aarch64: use symbolic opcodes instead of hex values
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include <helper/time_support.h>
31
32 static int aarch64_poll(struct target *target);
33 static int aarch64_debug_entry(struct target *target);
34 static int aarch64_restore_context(struct target *target, bool bpwp);
35 static int aarch64_set_breakpoint(struct target *target,
36 struct breakpoint *breakpoint, uint8_t matchmode);
37 static int aarch64_set_context_breakpoint(struct target *target,
38 struct breakpoint *breakpoint, uint8_t matchmode);
39 static int aarch64_set_hybrid_breakpoint(struct target *target,
40 struct breakpoint *breakpoint);
41 static int aarch64_unset_breakpoint(struct target *target,
42 struct breakpoint *breakpoint);
43 static int aarch64_mmu(struct target *target, int *enabled);
44 static int aarch64_virt2phys(struct target *target,
45 target_addr_t virt, target_addr_t *phys);
46 static int aarch64_read_apb_ap_memory(struct target *target,
47 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
48 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
49 uint32_t opcode, uint32_t data);
50
51 static int aarch64_restore_system_control_reg(struct target *target)
52 {
53 int retval = ERROR_OK;
54
55 struct aarch64_common *aarch64 = target_to_aarch64(target);
56 struct armv8_common *armv8 = target_to_armv8(target);
57
58 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
59 aarch64->system_control_reg_curr = aarch64->system_control_reg;
60 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
61
62 switch (armv8->arm.core_mode) {
63 case ARMV8_64_EL0T:
64 case ARMV8_64_EL1T:
65 case ARMV8_64_EL1H:
66 retval = armv8->arm.msr(target, 3, /*op 0*/
67 0, 1, /* op1, op2 */
68 0, 0, /* CRn, CRm */
69 aarch64->system_control_reg);
70 if (retval != ERROR_OK)
71 return retval;
72 break;
73 case ARMV8_64_EL2T:
74 case ARMV8_64_EL2H:
75 retval = armv8->arm.msr(target, 3, /*op 0*/
76 4, 1, /* op1, op2 */
77 0, 0, /* CRn, CRm */
78 aarch64->system_control_reg);
79 if (retval != ERROR_OK)
80 return retval;
81 break;
82 case ARMV8_64_EL3H:
83 case ARMV8_64_EL3T:
84 retval = armv8->arm.msr(target, 3, /*op 0*/
85 6, 1, /* op1, op2 */
86 0, 0, /* CRn, CRm */
87 aarch64->system_control_reg);
88 if (retval != ERROR_OK)
89 return retval;
90 break;
91 default:
92 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
93 }
94 }
95 return retval;
96 }
97
98 /* check address before aarch64_apb read write access with mmu on
99 * remove apb predictible data abort */
100 static int aarch64_check_address(struct target *target, uint32_t address)
101 {
102 /* TODO */
103 return ERROR_OK;
104 }
105 /* modify system_control_reg in order to enable or disable mmu for :
106 * - virt2phys address conversion
107 * - read or write memory in phys or virt address */
108 static int aarch64_mmu_modify(struct target *target, int enable)
109 {
110 struct aarch64_common *aarch64 = target_to_aarch64(target);
111 struct armv8_common *armv8 = &aarch64->armv8_common;
112 int retval = ERROR_OK;
113
114 if (enable) {
115 /* if mmu enabled at target stop and mmu not enable */
116 if (!(aarch64->system_control_reg & 0x1U)) {
117 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
118 return ERROR_FAIL;
119 }
120 if (!(aarch64->system_control_reg_curr & 0x1U)) {
121 aarch64->system_control_reg_curr |= 0x1U;
122 switch (armv8->arm.core_mode) {
123 case ARMV8_64_EL0T:
124 case ARMV8_64_EL1T:
125 case ARMV8_64_EL1H:
126 retval = armv8->arm.msr(target, 3, /*op 0*/
127 0, 0, /* op1, op2 */
128 1, 0, /* CRn, CRm */
129 aarch64->system_control_reg_curr);
130 if (retval != ERROR_OK)
131 return retval;
132 break;
133 case ARMV8_64_EL2T:
134 case ARMV8_64_EL2H:
135 retval = armv8->arm.msr(target, 3, /*op 0*/
136 4, 0, /* op1, op2 */
137 1, 0, /* CRn, CRm */
138 aarch64->system_control_reg_curr);
139 if (retval != ERROR_OK)
140 return retval;
141 break;
142 case ARMV8_64_EL3H:
143 case ARMV8_64_EL3T:
144 retval = armv8->arm.msr(target, 3, /*op 0*/
145 6, 0, /* op1, op2 */
146 1, 0, /* CRn, CRm */
147 aarch64->system_control_reg_curr);
148 if (retval != ERROR_OK)
149 return retval;
150 break;
151 default:
152 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
153 }
154 }
155 } else {
156 if (aarch64->system_control_reg_curr & 0x4U) {
157 /* data cache is active */
158 aarch64->system_control_reg_curr &= ~0x4U;
159 /* flush data cache armv7 function to be called */
160 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
161 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
162 }
163 if ((aarch64->system_control_reg_curr & 0x1U)) {
164 aarch64->system_control_reg_curr &= ~0x1U;
165 switch (armv8->arm.core_mode) {
166 case ARMV8_64_EL0T:
167 case ARMV8_64_EL1T:
168 case ARMV8_64_EL1H:
169 retval = armv8->arm.msr(target, 3, /*op 0*/
170 0, 0, /* op1, op2 */
171 1, 0, /* CRn, CRm */
172 aarch64->system_control_reg_curr);
173 if (retval != ERROR_OK)
174 return retval;
175 break;
176 case ARMV8_64_EL2T:
177 case ARMV8_64_EL2H:
178 retval = armv8->arm.msr(target, 3, /*op 0*/
179 4, 0, /* op1, op2 */
180 1, 0, /* CRn, CRm */
181 aarch64->system_control_reg_curr);
182 if (retval != ERROR_OK)
183 return retval;
184 break;
185 case ARMV8_64_EL3H:
186 case ARMV8_64_EL3T:
187 retval = armv8->arm.msr(target, 3, /*op 0*/
188 6, 0, /* op1, op2 */
189 1, 0, /* CRn, CRm */
190 aarch64->system_control_reg_curr);
191 if (retval != ERROR_OK)
192 return retval;
193 break;
194 default:
195 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
196 break;
197 }
198 }
199 }
200 return retval;
201 }
202
203 /*
204 * Basic debug access, very low level assumes state is saved
205 */
206 static int aarch64_init_debug_access(struct target *target)
207 {
208 struct armv8_common *armv8 = target_to_armv8(target);
209 int retval;
210 uint32_t dummy;
211
212 LOG_DEBUG(" ");
213
214 /* Unlocking the debug registers for modification
215 * The debugport might be uninitialised so try twice */
216 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
217 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
218 if (retval != ERROR_OK) {
219 /* try again */
220 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
221 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
222 if (retval == ERROR_OK)
223 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
224 }
225 if (retval != ERROR_OK)
226 return retval;
227 /* Clear Sticky Power Down status Bit in PRSR to enable access to
228 the registers in the Core Power Domain */
229 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
230 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
231 if (retval != ERROR_OK)
232 return retval;
233
234 /* Enabling of instruction execution in debug mode is done in debug_entry code */
235
236 /* Resync breakpoint registers */
237
238 /* Since this is likely called from init or reset, update target state information*/
239 return aarch64_poll(target);
240 }
241
242 /* To reduce needless round-trips, pass in a pointer to the current
243 * DSCR value. Initialize it to zero if you just need to know the
244 * value on return from this function; or DSCR_ITE if you
245 * happen to know that no instruction is pending.
246 */
247 static int aarch64_exec_opcode(struct target *target,
248 uint32_t opcode, uint32_t *dscr_p)
249 {
250 uint32_t dscr;
251 int retval;
252 struct armv8_common *armv8 = target_to_armv8(target);
253 dscr = dscr_p ? *dscr_p : 0;
254
255 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
256
257 /* Wait for InstrCompl bit to be set */
258 long long then = timeval_ms();
259 while ((dscr & DSCR_ITE) == 0) {
260 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
261 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
262 if (retval != ERROR_OK) {
263 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
264 return retval;
265 }
266 if (timeval_ms() > then + 1000) {
267 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
268 return ERROR_FAIL;
269 }
270 }
271
272 retval = mem_ap_write_u32(armv8->debug_ap,
273 armv8->debug_base + CPUV8_DBG_ITR, opcode);
274 if (retval != ERROR_OK)
275 return retval;
276
277 then = timeval_ms();
278 do {
279 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
280 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
281 if (retval != ERROR_OK) {
282 LOG_ERROR("Could not read DSCR register");
283 return retval;
284 }
285 if (timeval_ms() > then + 1000) {
286 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
287 return ERROR_FAIL;
288 }
289 } while ((dscr & DSCR_ITE) == 0); /* Wait for InstrCompl bit to be set */
290
291 if (dscr_p)
292 *dscr_p = dscr;
293
294 return retval;
295 }
296
297 /* Write to memory mapped registers directly with no cache or mmu handling */
298 static int aarch64_dap_write_memap_register_u32(struct target *target,
299 uint32_t address,
300 uint32_t value)
301 {
302 int retval;
303 struct armv8_common *armv8 = target_to_armv8(target);
304
305 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
306
307 return retval;
308 }
309
310 /*
311 * AARCH64 implementation of Debug Programmer's Model
312 *
313 * NOTE the invariant: these routines return with DSCR_ITE set,
314 * so there's no need to poll for it before executing an instruction.
315 *
316 * NOTE that in several of these cases the "stall" mode might be useful.
317 * It'd let us queue a few operations together... prepare/finish might
318 * be the places to enable/disable that mode.
319 */
320
321 static inline struct aarch64_common *dpm_to_a8(struct arm_dpm *dpm)
322 {
323 return container_of(dpm, struct aarch64_common, armv8_common.dpm);
324 }
325
326 static int aarch64_write_dcc(struct armv8_common *armv8, uint32_t data)
327 {
328 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
329 return mem_ap_write_u32(armv8->debug_ap,
330 armv8->debug_base + CPUV8_DBG_DTRRX, data);
331 }
332
333 static int aarch64_write_dcc_64(struct armv8_common *armv8, uint64_t data)
334 {
335 int ret;
336 LOG_DEBUG("write DCC Low word0x%08" PRIx32, (unsigned)data);
337 LOG_DEBUG("write DCC High word 0x%08" PRIx32, (unsigned)(data >> 32));
338 ret = mem_ap_write_u32(armv8->debug_ap,
339 armv8->debug_base + CPUV8_DBG_DTRRX, data);
340 ret += mem_ap_write_u32(armv8->debug_ap,
341 armv8->debug_base + CPUV8_DBG_DTRTX, data >> 32);
342 return ret;
343 }
344
345 static int aarch64_read_dcc(struct armv8_common *armv8, uint32_t *data,
346 uint32_t *dscr_p)
347 {
348 uint32_t dscr = DSCR_ITE;
349 int retval;
350
351 if (dscr_p)
352 dscr = *dscr_p;
353
354 /* Wait for DTRRXfull */
355 long long then = timeval_ms();
356 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
357 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
358 armv8->debug_base + CPUV8_DBG_DSCR,
359 &dscr);
360 if (retval != ERROR_OK)
361 return retval;
362 if (timeval_ms() > then + 1000) {
363 LOG_ERROR("Timeout waiting for read dcc");
364 return ERROR_FAIL;
365 }
366 }
367
368 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
369 armv8->debug_base + CPUV8_DBG_DTRTX,
370 data);
371 if (retval != ERROR_OK)
372 return retval;
373 LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
374
375 if (dscr_p)
376 *dscr_p = dscr;
377
378 return retval;
379 }
380
381 static int aarch64_read_dcc_64(struct armv8_common *armv8, uint64_t *data,
382 uint32_t *dscr_p)
383 {
384 uint32_t dscr = DSCR_ITE;
385 uint32_t higher;
386 int retval;
387
388 if (dscr_p)
389 dscr = *dscr_p;
390
391 /* Wait for DTRRXfull */
392 long long then = timeval_ms();
393 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
394 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
395 armv8->debug_base + CPUV8_DBG_DSCR,
396 &dscr);
397 if (retval != ERROR_OK)
398 return retval;
399 if (timeval_ms() > then + 1000) {
400 LOG_ERROR("Timeout waiting for read dcc");
401 return ERROR_FAIL;
402 }
403 }
404
405 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
406 armv8->debug_base + CPUV8_DBG_DTRTX,
407 (uint32_t *)data);
408 if (retval != ERROR_OK)
409 return retval;
410
411 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
412 armv8->debug_base + CPUV8_DBG_DTRRX,
413 &higher);
414 if (retval != ERROR_OK)
415 return retval;
416
417 *data = *(uint32_t *)data | (uint64_t)higher << 32;
418 LOG_DEBUG("read DCC 0x%16.16" PRIx64, *data);
419
420 if (dscr_p)
421 *dscr_p = dscr;
422
423 return retval;
424 }
425
426 static int aarch64_dpm_prepare(struct arm_dpm *dpm)
427 {
428 struct aarch64_common *a8 = dpm_to_a8(dpm);
429 uint32_t dscr;
430 int retval;
431
432 /* set up invariant: INSTR_COMP is set after ever DPM operation */
433 long long then = timeval_ms();
434 for (;; ) {
435 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
436 a8->armv8_common.debug_base + CPUV8_DBG_DSCR,
437 &dscr);
438 if (retval != ERROR_OK)
439 return retval;
440 if ((dscr & DSCR_ITE) != 0)
441 break;
442 if (timeval_ms() > then + 1000) {
443 LOG_ERROR("Timeout waiting for dpm prepare");
444 return ERROR_FAIL;
445 }
446 }
447
448 /* this "should never happen" ... */
449 if (dscr & DSCR_DTR_RX_FULL) {
450 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
451 /* Clear DCCRX */
452 retval = mem_ap_read_u32(a8->armv8_common.debug_ap,
453 a8->armv8_common.debug_base + CPUV8_DBG_DTRRX, &dscr);
454 if (retval != ERROR_OK)
455 return retval;
456
457 /* Clear sticky error */
458 retval = mem_ap_write_u32(a8->armv8_common.debug_ap,
459 a8->armv8_common.debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
460 if (retval != ERROR_OK)
461 return retval;
462 }
463
464 return retval;
465 }
466
467 static int aarch64_dpm_finish(struct arm_dpm *dpm)
468 {
469 /* REVISIT what could be done here? */
470 return ERROR_OK;
471 }
472
473 static int aarch64_instr_execute(struct arm_dpm *dpm,
474 uint32_t opcode)
475 {
476 struct aarch64_common *a8 = dpm_to_a8(dpm);
477 uint32_t dscr = DSCR_ITE;
478
479 return aarch64_exec_opcode(
480 a8->armv8_common.arm.target,
481 opcode,
482 &dscr);
483 }
484
485 static int aarch64_instr_write_data_dcc(struct arm_dpm *dpm,
486 uint32_t opcode, uint32_t data)
487 {
488 struct aarch64_common *a8 = dpm_to_a8(dpm);
489 int retval;
490 uint32_t dscr = DSCR_ITE;
491
492 retval = aarch64_write_dcc(&a8->armv8_common, data);
493 if (retval != ERROR_OK)
494 return retval;
495
496 return aarch64_exec_opcode(
497 a8->armv8_common.arm.target,
498 opcode,
499 &dscr);
500 }
501
502 static int aarch64_instr_write_data_dcc_64(struct arm_dpm *dpm,
503 uint32_t opcode, uint64_t data)
504 {
505 struct aarch64_common *a8 = dpm_to_a8(dpm);
506 int retval;
507 uint32_t dscr = DSCR_ITE;
508
509 retval = aarch64_write_dcc_64(&a8->armv8_common, data);
510 if (retval != ERROR_OK)
511 return retval;
512
513 return aarch64_exec_opcode(
514 a8->armv8_common.arm.target,
515 opcode,
516 &dscr);
517 }
518
519 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
520 uint32_t opcode, uint32_t data)
521 {
522 struct aarch64_common *a8 = dpm_to_a8(dpm);
523 uint32_t dscr = DSCR_ITE;
524 int retval;
525
526 retval = aarch64_write_dcc(&a8->armv8_common, data);
527 if (retval != ERROR_OK)
528 return retval;
529
530 retval = aarch64_exec_opcode(
531 a8->armv8_common.arm.target,
532 ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 0),
533 &dscr);
534 if (retval != ERROR_OK)
535 return retval;
536
537 /* then the opcode, taking data from R0 */
538 retval = aarch64_exec_opcode(
539 a8->armv8_common.arm.target,
540 opcode,
541 &dscr);
542
543 return retval;
544 }
545
546 static int aarch64_instr_write_data_r0_64(struct arm_dpm *dpm,
547 uint32_t opcode, uint64_t data)
548 {
549 struct aarch64_common *a8 = dpm_to_a8(dpm);
550 uint32_t dscr = DSCR_ITE;
551 int retval;
552
553 retval = aarch64_write_dcc_64(&a8->armv8_common, data);
554 if (retval != ERROR_OK)
555 return retval;
556
557 retval = aarch64_exec_opcode(
558 a8->armv8_common.arm.target,
559 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0),
560 &dscr);
561 if (retval != ERROR_OK)
562 return retval;
563
564 /* then the opcode, taking data from R0 */
565 retval = aarch64_exec_opcode(
566 a8->armv8_common.arm.target,
567 opcode,
568 &dscr);
569
570 return retval;
571 }
572
573 static int aarch64_instr_cpsr_sync(struct arm_dpm *dpm)
574 {
575 struct target *target = dpm->arm->target;
576 uint32_t dscr = DSCR_ITE;
577
578 /* "Prefetch flush" after modifying execution status in CPSR */
579 return aarch64_exec_opcode(target,
580 DSB_SY,
581 &dscr);
582 }
583
584 static int aarch64_instr_read_data_dcc(struct arm_dpm *dpm,
585 uint32_t opcode, uint32_t *data)
586 {
587 struct aarch64_common *a8 = dpm_to_a8(dpm);
588 int retval;
589 uint32_t dscr = DSCR_ITE;
590
591 /* the opcode, writing data to DCC */
592 retval = aarch64_exec_opcode(
593 a8->armv8_common.arm.target,
594 opcode,
595 &dscr);
596 if (retval != ERROR_OK)
597 return retval;
598
599 return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
600 }
601
602 static int aarch64_instr_read_data_dcc_64(struct arm_dpm *dpm,
603 uint32_t opcode, uint64_t *data)
604 {
605 struct aarch64_common *a8 = dpm_to_a8(dpm);
606 int retval;
607 uint32_t dscr = DSCR_ITE;
608
609 /* the opcode, writing data to DCC */
610 retval = aarch64_exec_opcode(
611 a8->armv8_common.arm.target,
612 opcode,
613 &dscr);
614 if (retval != ERROR_OK)
615 return retval;
616
617 return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
618 }
619
620 static int aarch64_instr_read_data_r0(struct arm_dpm *dpm,
621 uint32_t opcode, uint32_t *data)
622 {
623 struct aarch64_common *a8 = dpm_to_a8(dpm);
624 uint32_t dscr = DSCR_ITE;
625 int retval;
626
627 /* the opcode, writing data to R0 */
628 retval = aarch64_exec_opcode(
629 a8->armv8_common.arm.target,
630 opcode,
631 &dscr);
632 if (retval != ERROR_OK)
633 return retval;
634
635 /* write R0 to DCC */
636 retval = aarch64_exec_opcode(
637 a8->armv8_common.arm.target,
638 ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 0), /* msr dbgdtr_el0, x0 */
639 &dscr);
640 if (retval != ERROR_OK)
641 return retval;
642
643 return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
644 }
645
646 static int aarch64_instr_read_data_r0_64(struct arm_dpm *dpm,
647 uint32_t opcode, uint64_t *data)
648 {
649 struct aarch64_common *a8 = dpm_to_a8(dpm);
650 uint32_t dscr = DSCR_ITE;
651 int retval;
652
653 /* the opcode, writing data to R0 */
654 retval = aarch64_exec_opcode(
655 a8->armv8_common.arm.target,
656 opcode,
657 &dscr);
658 if (retval != ERROR_OK)
659 return retval;
660
661 /* write R0 to DCC */
662 retval = aarch64_exec_opcode(
663 a8->armv8_common.arm.target,
664 ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), /* msr dbgdtr_el0, x0 */
665 &dscr);
666 if (retval != ERROR_OK)
667 return retval;
668
669 return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
670 }
671
672 static int aarch64_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
673 uint32_t addr, uint32_t control)
674 {
675 struct aarch64_common *a8 = dpm_to_a8(dpm);
676 uint32_t vr = a8->armv8_common.debug_base;
677 uint32_t cr = a8->armv8_common.debug_base;
678 int retval;
679
680 switch (index_t) {
681 case 0 ... 15: /* breakpoints */
682 vr += CPUV8_DBG_BVR_BASE;
683 cr += CPUV8_DBG_BCR_BASE;
684 break;
685 case 16 ... 31: /* watchpoints */
686 vr += CPUV8_DBG_WVR_BASE;
687 cr += CPUV8_DBG_WCR_BASE;
688 index_t -= 16;
689 break;
690 default:
691 return ERROR_FAIL;
692 }
693 vr += 4 * index_t;
694 cr += 4 * index_t;
695
696 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
697 (unsigned) vr, (unsigned) cr);
698
699 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
700 vr, addr);
701 if (retval != ERROR_OK)
702 return retval;
703 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
704 cr, control);
705 return retval;
706 }
707
708 static int aarch64_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
709 {
710 return ERROR_OK;
711
712 #if 0
713 struct aarch64_common *a = dpm_to_a8(dpm);
714 uint32_t cr;
715
716 switch (index_t) {
717 case 0 ... 15:
718 cr = a->armv8_common.debug_base + CPUV8_DBG_BCR_BASE;
719 break;
720 case 16 ... 31:
721 cr = a->armv8_common.debug_base + CPUV8_DBG_WCR_BASE;
722 index_t -= 16;
723 break;
724 default:
725 return ERROR_FAIL;
726 }
727 cr += 4 * index_t;
728
729 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
730
731 /* clear control register */
732 return aarch64_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
733 #endif
734 }
735
736 static int aarch64_dpm_setup(struct aarch64_common *a8, uint32_t debug)
737 {
738 struct arm_dpm *dpm = &a8->armv8_common.dpm;
739 int retval;
740
741 dpm->arm = &a8->armv8_common.arm;
742 dpm->didr = debug;
743
744 dpm->prepare = aarch64_dpm_prepare;
745 dpm->finish = aarch64_dpm_finish;
746
747 dpm->instr_execute = aarch64_instr_execute;
748 dpm->instr_write_data_dcc = aarch64_instr_write_data_dcc;
749 dpm->instr_write_data_dcc_64 = aarch64_instr_write_data_dcc_64;
750 dpm->instr_write_data_r0 = aarch64_instr_write_data_r0;
751 dpm->instr_write_data_r0_64 = aarch64_instr_write_data_r0_64;
752 dpm->instr_cpsr_sync = aarch64_instr_cpsr_sync;
753
754 dpm->instr_read_data_dcc = aarch64_instr_read_data_dcc;
755 dpm->instr_read_data_dcc_64 = aarch64_instr_read_data_dcc_64;
756 dpm->instr_read_data_r0 = aarch64_instr_read_data_r0;
757 dpm->instr_read_data_r0_64 = aarch64_instr_read_data_r0_64;
758
759 dpm->arm_reg_current = armv8_reg_current;
760
761 dpm->bpwp_enable = aarch64_bpwp_enable;
762 dpm->bpwp_disable = aarch64_bpwp_disable;
763
764 retval = armv8_dpm_setup(dpm);
765 if (retval == ERROR_OK)
766 retval = armv8_dpm_initialize(dpm);
767
768 return retval;
769 }
770 static struct target *get_aarch64(struct target *target, int32_t coreid)
771 {
772 struct target_list *head;
773 struct target *curr;
774
775 head = target->head;
776 while (head != (struct target_list *)NULL) {
777 curr = head->target;
778 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
779 return curr;
780 head = head->next;
781 }
782 return target;
783 }
784 static int aarch64_halt(struct target *target);
785
786 static int aarch64_halt_smp(struct target *target)
787 {
788 int retval = 0;
789 struct target_list *head;
790 struct target *curr;
791 head = target->head;
792 while (head != (struct target_list *)NULL) {
793 curr = head->target;
794 if ((curr != target) && (curr->state != TARGET_HALTED))
795 retval += aarch64_halt(curr);
796 head = head->next;
797 }
798 return retval;
799 }
800
801 static int update_halt_gdb(struct target *target)
802 {
803 int retval = 0;
804 if (target->gdb_service && target->gdb_service->core[0] == -1) {
805 target->gdb_service->target = target;
806 target->gdb_service->core[0] = target->coreid;
807 retval += aarch64_halt_smp(target);
808 }
809 return retval;
810 }
811
812 /*
813 * Cortex-A8 Run control
814 */
815
816 static int aarch64_poll(struct target *target)
817 {
818 int retval = ERROR_OK;
819 uint32_t dscr;
820 struct aarch64_common *aarch64 = target_to_aarch64(target);
821 struct armv8_common *armv8 = &aarch64->armv8_common;
822 enum target_state prev_target_state = target->state;
823 /* toggle to another core is done by gdb as follow */
824 /* maint packet J core_id */
825 /* continue */
826 /* the next polling trigger an halt event sent to gdb */
827 if ((target->state == TARGET_HALTED) && (target->smp) &&
828 (target->gdb_service) &&
829 (target->gdb_service->target == NULL)) {
830 target->gdb_service->target =
831 get_aarch64(target, target->gdb_service->core[1]);
832 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
833 return retval;
834 }
835 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
836 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
837 if (retval != ERROR_OK)
838 return retval;
839 aarch64->cpudbg_dscr = dscr;
840
841 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
842 if (prev_target_state != TARGET_HALTED) {
843 /* We have a halting debug event */
844 LOG_DEBUG("Target halted");
845 target->state = TARGET_HALTED;
846 if ((prev_target_state == TARGET_RUNNING)
847 || (prev_target_state == TARGET_UNKNOWN)
848 || (prev_target_state == TARGET_RESET)) {
849 retval = aarch64_debug_entry(target);
850 if (retval != ERROR_OK)
851 return retval;
852 if (target->smp) {
853 retval = update_halt_gdb(target);
854 if (retval != ERROR_OK)
855 return retval;
856 }
857 target_call_event_callbacks(target,
858 TARGET_EVENT_HALTED);
859 }
860 if (prev_target_state == TARGET_DEBUG_RUNNING) {
861 LOG_DEBUG(" ");
862
863 retval = aarch64_debug_entry(target);
864 if (retval != ERROR_OK)
865 return retval;
866 if (target->smp) {
867 retval = update_halt_gdb(target);
868 if (retval != ERROR_OK)
869 return retval;
870 }
871
872 target_call_event_callbacks(target,
873 TARGET_EVENT_DEBUG_HALTED);
874 }
875 }
876 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
877 target->state = TARGET_RUNNING;
878 else {
879 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
880 target->state = TARGET_UNKNOWN;
881 }
882
883 return retval;
884 }
885
886 static int aarch64_halt(struct target *target)
887 {
888 int retval = ERROR_OK;
889 uint32_t dscr;
890 struct armv8_common *armv8 = target_to_armv8(target);
891
892 /* enable CTI*/
893 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
894 armv8->cti_base + CTI_CTR, 1);
895 if (retval != ERROR_OK)
896 return retval;
897
898 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
899 armv8->cti_base + CTI_GATE, 3);
900 if (retval != ERROR_OK)
901 return retval;
902
903 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
904 armv8->cti_base + CTI_OUTEN0, 1);
905 if (retval != ERROR_OK)
906 return retval;
907
908 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
909 armv8->cti_base + CTI_OUTEN1, 2);
910 if (retval != ERROR_OK)
911 return retval;
912
913 /*
914 * add HDE in halting debug mode
915 */
916 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
917 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
918 if (retval != ERROR_OK)
919 return retval;
920
921 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
922 armv8->debug_base + CPUV8_DBG_DSCR, dscr | DSCR_HDE);
923 if (retval != ERROR_OK)
924 return retval;
925
926 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
927 armv8->cti_base + CTI_APPPULSE, 1);
928 if (retval != ERROR_OK)
929 return retval;
930
931 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
932 armv8->cti_base + CTI_INACK, 1);
933 if (retval != ERROR_OK)
934 return retval;
935
936
937 long long then = timeval_ms();
938 for (;; ) {
939 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
940 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
941 if (retval != ERROR_OK)
942 return retval;
943 if ((dscr & DSCRV8_HALT_MASK) != 0)
944 break;
945 if (timeval_ms() > then + 1000) {
946 LOG_ERROR("Timeout waiting for halt");
947 return ERROR_FAIL;
948 }
949 }
950
951 target->debug_reason = DBG_REASON_DBGRQ;
952
953 return ERROR_OK;
954 }
955
956 static int aarch64_internal_restore(struct target *target, int current,
957 uint64_t *address, int handle_breakpoints, int debug_execution)
958 {
959 struct armv8_common *armv8 = target_to_armv8(target);
960 struct arm *arm = &armv8->arm;
961 int retval;
962 uint64_t resume_pc;
963
964 if (!debug_execution)
965 target_free_all_working_areas(target);
966
967 /* current = 1: continue on current pc, otherwise continue at <address> */
968 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
969 if (!current)
970 resume_pc = *address;
971 else
972 *address = resume_pc;
973
974 /* Make sure that the Armv7 gdb thumb fixups does not
975 * kill the return address
976 */
977 switch (arm->core_state) {
978 case ARM_STATE_ARM:
979 resume_pc &= 0xFFFFFFFC;
980 break;
981 case ARM_STATE_AARCH64:
982 resume_pc &= 0xFFFFFFFFFFFFFFFC;
983 break;
984 case ARM_STATE_THUMB:
985 case ARM_STATE_THUMB_EE:
986 /* When the return address is loaded into PC
987 * bit 0 must be 1 to stay in Thumb state
988 */
989 resume_pc |= 0x1;
990 break;
991 case ARM_STATE_JAZELLE:
992 LOG_ERROR("How do I resume into Jazelle state??");
993 return ERROR_FAIL;
994 }
995 LOG_DEBUG("resume pc = 0x%16" PRIx64, resume_pc);
996 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
997 arm->pc->dirty = 1;
998 arm->pc->valid = 1;
999 dpmv8_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1000
1001 /* called it now before restoring context because it uses cpu
1002 * register r0 for restoring system control register */
1003 retval = aarch64_restore_system_control_reg(target);
1004 if (retval != ERROR_OK)
1005 return retval;
1006 retval = aarch64_restore_context(target, handle_breakpoints);
1007 if (retval != ERROR_OK)
1008 return retval;
1009 target->debug_reason = DBG_REASON_NOTHALTED;
1010 target->state = TARGET_RUNNING;
1011
1012 /* registers are now invalid */
1013 register_cache_invalidate(arm->core_cache);
1014
1015 #if 0
1016 /* the front-end may request us not to handle breakpoints */
1017 if (handle_breakpoints) {
1018 /* Single step past breakpoint at current address */
1019 breakpoint = breakpoint_find(target, resume_pc);
1020 if (breakpoint) {
1021 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1022 cortex_m3_unset_breakpoint(target, breakpoint);
1023 cortex_m3_single_step_core(target);
1024 cortex_m3_set_breakpoint(target, breakpoint);
1025 }
1026 }
1027 #endif
1028
1029 return retval;
1030 }
1031
1032 static int aarch64_internal_restart(struct target *target)
1033 {
1034 struct armv8_common *armv8 = target_to_armv8(target);
1035 struct arm *arm = &armv8->arm;
1036 int retval;
1037 uint32_t dscr;
1038 /*
1039 * * Restart core and wait for it to be started. Clear ITRen and sticky
1040 * * exception flags: see ARMv7 ARM, C5.9.
1041 *
1042 * REVISIT: for single stepping, we probably want to
1043 * disable IRQs by default, with optional override...
1044 */
1045
1046 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1047 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1048 if (retval != ERROR_OK)
1049 return retval;
1050
1051 if ((dscr & DSCR_ITE) == 0)
1052 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1053
1054 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1055 armv8->cti_base + CTI_APPPULSE, 2);
1056 if (retval != ERROR_OK)
1057 return retval;
1058
1059 long long then = timeval_ms();
1060 for (;; ) {
1061 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1062 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1063 if (retval != ERROR_OK)
1064 return retval;
1065 if ((dscr & DSCR_HDE) != 0)
1066 break;
1067 if (timeval_ms() > then + 1000) {
1068 LOG_ERROR("Timeout waiting for resume");
1069 return ERROR_FAIL;
1070 }
1071 }
1072
1073 target->debug_reason = DBG_REASON_NOTHALTED;
1074 target->state = TARGET_RUNNING;
1075
1076 /* registers are now invalid */
1077 register_cache_invalidate(arm->core_cache);
1078
1079 return ERROR_OK;
1080 }
1081
1082 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
1083 {
1084 int retval = 0;
1085 struct target_list *head;
1086 struct target *curr;
1087 uint64_t address;
1088 head = target->head;
1089 while (head != (struct target_list *)NULL) {
1090 curr = head->target;
1091 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1092 /* resume current address , not in step mode */
1093 retval += aarch64_internal_restore(curr, 1, &address,
1094 handle_breakpoints, 0);
1095 retval += aarch64_internal_restart(curr);
1096 }
1097 head = head->next;
1098
1099 }
1100 return retval;
1101 }
1102
1103 static int aarch64_resume(struct target *target, int current,
1104 target_addr_t address, int handle_breakpoints, int debug_execution)
1105 {
1106 int retval = 0;
1107 uint64_t addr = address;
1108
1109 /* dummy resume for smp toggle in order to reduce gdb impact */
1110 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1111 /* simulate a start and halt of target */
1112 target->gdb_service->target = NULL;
1113 target->gdb_service->core[0] = target->gdb_service->core[1];
1114 /* fake resume at next poll we play the target core[1], see poll*/
1115 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1116 return 0;
1117 }
1118 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
1119 debug_execution);
1120 if (target->smp) {
1121 target->gdb_service->core[0] = -1;
1122 retval = aarch64_restore_smp(target, handle_breakpoints);
1123 if (retval != ERROR_OK)
1124 return retval;
1125 }
1126 aarch64_internal_restart(target);
1127
1128 if (!debug_execution) {
1129 target->state = TARGET_RUNNING;
1130 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1131 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
1132 } else {
1133 target->state = TARGET_DEBUG_RUNNING;
1134 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1135 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
1136 }
1137
1138 return ERROR_OK;
1139 }
1140
1141 static int aarch64_debug_entry(struct target *target)
1142 {
1143 uint32_t dscr;
1144 int retval = ERROR_OK;
1145 struct aarch64_common *aarch64 = target_to_aarch64(target);
1146 struct armv8_common *armv8 = target_to_armv8(target);
1147 uint32_t tmp;
1148
1149 LOG_DEBUG("dscr = 0x%08" PRIx32, aarch64->cpudbg_dscr);
1150
1151 /* REVISIT surely we should not re-read DSCR !! */
1152 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1153 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1154 if (retval != ERROR_OK)
1155 return retval;
1156
1157 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1158 * imprecise data aborts get discarded by issuing a Data
1159 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1160 */
1161
1162 /* Enable the ITR execution once we are in debug mode */
1163 dscr |= DSCR_ITR_EN;
1164 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1165 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1166 if (retval != ERROR_OK)
1167 return retval;
1168
1169 /* Examine debug reason */
1170 arm_dpm_report_dscr(&armv8->dpm, aarch64->cpudbg_dscr);
1171 mem_ap_read_atomic_u32(armv8->debug_ap,
1172 armv8->debug_base + CPUV8_DBG_EDESR, &tmp);
1173 if ((tmp & 0x7) == 0x4)
1174 target->debug_reason = DBG_REASON_SINGLESTEP;
1175
1176 /* save address of instruction that triggered the watchpoint? */
1177 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1178 uint32_t wfar;
1179
1180 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1181 armv8->debug_base + CPUV8_DBG_WFAR0,
1182 &wfar);
1183 if (retval != ERROR_OK)
1184 return retval;
1185 arm_dpm_report_wfar(&armv8->dpm, wfar);
1186 }
1187
1188 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1189
1190 if (armv8->post_debug_entry) {
1191 retval = armv8->post_debug_entry(target);
1192 if (retval != ERROR_OK)
1193 return retval;
1194 }
1195
1196 return retval;
1197 }
1198
1199 static int aarch64_post_debug_entry(struct target *target)
1200 {
1201 struct aarch64_common *aarch64 = target_to_aarch64(target);
1202 struct armv8_common *armv8 = &aarch64->armv8_common;
1203 int retval;
1204
1205 mem_ap_write_atomic_u32(armv8->debug_ap,
1206 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1207 switch (armv8->arm.core_mode) {
1208 case ARMV8_64_EL0T:
1209 case ARMV8_64_EL1T:
1210 case ARMV8_64_EL1H:
1211 retval = armv8->arm.mrs(target, 3, /*op 0*/
1212 0, 0, /* op1, op2 */
1213 1, 0, /* CRn, CRm */
1214 &aarch64->system_control_reg);
1215 if (retval != ERROR_OK)
1216 return retval;
1217 break;
1218 case ARMV8_64_EL2T:
1219 case ARMV8_64_EL2H:
1220 retval = armv8->arm.mrs(target, 3, /*op 0*/
1221 4, 0, /* op1, op2 */
1222 1, 0, /* CRn, CRm */
1223 &aarch64->system_control_reg);
1224 if (retval != ERROR_OK)
1225 return retval;
1226 break;
1227 case ARMV8_64_EL3H:
1228 case ARMV8_64_EL3T:
1229 retval = armv8->arm.mrs(target, 3, /*op 0*/
1230 6, 0, /* op1, op2 */
1231 1, 0, /* CRn, CRm */
1232 &aarch64->system_control_reg);
1233 if (retval != ERROR_OK)
1234 return retval;
1235 break;
1236 default:
1237 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
1238 }
1239 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1240 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1241
1242 #if 0
1243 if (armv8->armv8_mmu.armv8_cache.ctype == -1)
1244 armv8_identify_cache(target);
1245 #endif
1246
1247 armv8->armv8_mmu.mmu_enabled =
1248 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1249 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1250 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1251 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1252 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1253 aarch64->curr_mode = armv8->arm.core_mode;
1254 return ERROR_OK;
1255 }
1256
1257 static int aarch64_step(struct target *target, int current, target_addr_t address,
1258 int handle_breakpoints)
1259 {
1260 struct armv8_common *armv8 = target_to_armv8(target);
1261 int retval;
1262 uint32_t tmp;
1263
1264 if (target->state != TARGET_HALTED) {
1265 LOG_WARNING("target not halted");
1266 return ERROR_TARGET_NOT_HALTED;
1267 }
1268
1269 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1270 armv8->debug_base + CPUV8_DBG_EDECR, &tmp);
1271 if (retval != ERROR_OK)
1272 return retval;
1273
1274 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1275 armv8->debug_base + CPUV8_DBG_EDECR, (tmp|0x4));
1276 if (retval != ERROR_OK)
1277 return retval;
1278
1279 target->debug_reason = DBG_REASON_SINGLESTEP;
1280 retval = aarch64_resume(target, 1, address, 0, 0);
1281 if (retval != ERROR_OK)
1282 return retval;
1283
1284 long long then = timeval_ms();
1285 while (target->state != TARGET_HALTED) {
1286 mem_ap_read_atomic_u32(armv8->debug_ap,
1287 armv8->debug_base + CPUV8_DBG_EDESR, &tmp);
1288 LOG_DEBUG("DESR = %#x", tmp);
1289 retval = aarch64_poll(target);
1290 if (retval != ERROR_OK)
1291 return retval;
1292 if (timeval_ms() > then + 1000) {
1293 LOG_ERROR("timeout waiting for target halt");
1294 return ERROR_FAIL;
1295 }
1296 }
1297
1298 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1299 armv8->debug_base + CPUV8_DBG_EDECR, (tmp&(~0x4)));
1300 if (retval != ERROR_OK)
1301 return retval;
1302
1303 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1304 if (target->state == TARGET_HALTED)
1305 LOG_DEBUG("target stepped");
1306
1307 return ERROR_OK;
1308 }
1309
1310 static int aarch64_restore_context(struct target *target, bool bpwp)
1311 {
1312 struct armv8_common *armv8 = target_to_armv8(target);
1313
1314 LOG_DEBUG(" ");
1315
1316 if (armv8->pre_restore_context)
1317 armv8->pre_restore_context(target);
1318
1319 return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1320
1321 }
1322
1323 /*
1324 * Cortex-A8 Breakpoint and watchpoint functions
1325 */
1326
1327 /* Setup hardware Breakpoint Register Pair */
1328 static int aarch64_set_breakpoint(struct target *target,
1329 struct breakpoint *breakpoint, uint8_t matchmode)
1330 {
1331 int retval;
1332 int brp_i = 0;
1333 uint32_t control;
1334 uint8_t byte_addr_select = 0x0F;
1335 struct aarch64_common *aarch64 = target_to_aarch64(target);
1336 struct armv8_common *armv8 = &aarch64->armv8_common;
1337 struct aarch64_brp *brp_list = aarch64->brp_list;
1338 uint32_t dscr;
1339
1340 if (breakpoint->set) {
1341 LOG_WARNING("breakpoint already set");
1342 return ERROR_OK;
1343 }
1344
1345 if (breakpoint->type == BKPT_HARD) {
1346 int64_t bpt_value;
1347 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1348 brp_i++;
1349 if (brp_i >= aarch64->brp_num) {
1350 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1351 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1352 }
1353 breakpoint->set = brp_i + 1;
1354 if (breakpoint->length == 2)
1355 byte_addr_select = (3 << (breakpoint->address & 0x02));
1356 control = ((matchmode & 0x7) << 20)
1357 | (1 << 13)
1358 | (byte_addr_select << 5)
1359 | (3 << 1) | 1;
1360 brp_list[brp_i].used = 1;
1361 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1362 brp_list[brp_i].control = control;
1363 bpt_value = brp_list[brp_i].value;
1364
1365 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1366 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1367 (uint32_t)(bpt_value & 0xFFFFFFFF));
1368 if (retval != ERROR_OK)
1369 return retval;
1370 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1371 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1372 (uint32_t)(bpt_value >> 32));
1373 if (retval != ERROR_OK)
1374 return retval;
1375
1376 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1377 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1378 brp_list[brp_i].control);
1379 if (retval != ERROR_OK)
1380 return retval;
1381 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1382 brp_list[brp_i].control,
1383 brp_list[brp_i].value);
1384
1385 } else if (breakpoint->type == BKPT_SOFT) {
1386 uint8_t code[4];
1387 buf_set_u32(code, 0, 32, ARMV8_BKPT(0x11));
1388 retval = target_read_memory(target,
1389 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1390 breakpoint->length, 1,
1391 breakpoint->orig_instr);
1392 if (retval != ERROR_OK)
1393 return retval;
1394 retval = target_write_memory(target,
1395 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1396 breakpoint->length, 1, code);
1397 if (retval != ERROR_OK)
1398 return retval;
1399 breakpoint->set = 0x11; /* Any nice value but 0 */
1400 }
1401
1402 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1403 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1404 /* Ensure that halting debug mode is enable */
1405 dscr = dscr | DSCR_HDE;
1406 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1407 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1408 if (retval != ERROR_OK) {
1409 LOG_DEBUG("Failed to set DSCR.HDE");
1410 return retval;
1411 }
1412
1413 return ERROR_OK;
1414 }
1415
1416 static int aarch64_set_context_breakpoint(struct target *target,
1417 struct breakpoint *breakpoint, uint8_t matchmode)
1418 {
1419 int retval = ERROR_FAIL;
1420 int brp_i = 0;
1421 uint32_t control;
1422 uint8_t byte_addr_select = 0x0F;
1423 struct aarch64_common *aarch64 = target_to_aarch64(target);
1424 struct armv8_common *armv8 = &aarch64->armv8_common;
1425 struct aarch64_brp *brp_list = aarch64->brp_list;
1426
1427 if (breakpoint->set) {
1428 LOG_WARNING("breakpoint already set");
1429 return retval;
1430 }
1431 /*check available context BRPs*/
1432 while ((brp_list[brp_i].used ||
1433 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1434 brp_i++;
1435
1436 if (brp_i >= aarch64->brp_num) {
1437 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1438 return ERROR_FAIL;
1439 }
1440
1441 breakpoint->set = brp_i + 1;
1442 control = ((matchmode & 0x7) << 20)
1443 | (1 << 13)
1444 | (byte_addr_select << 5)
1445 | (3 << 1) | 1;
1446 brp_list[brp_i].used = 1;
1447 brp_list[brp_i].value = (breakpoint->asid);
1448 brp_list[brp_i].control = control;
1449 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1450 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1451 brp_list[brp_i].value);
1452 if (retval != ERROR_OK)
1453 return retval;
1454 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1455 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1456 brp_list[brp_i].control);
1457 if (retval != ERROR_OK)
1458 return retval;
1459 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1460 brp_list[brp_i].control,
1461 brp_list[brp_i].value);
1462 return ERROR_OK;
1463
1464 }
1465
1466 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1467 {
1468 int retval = ERROR_FAIL;
1469 int brp_1 = 0; /* holds the contextID pair */
1470 int brp_2 = 0; /* holds the IVA pair */
1471 uint32_t control_CTX, control_IVA;
1472 uint8_t CTX_byte_addr_select = 0x0F;
1473 uint8_t IVA_byte_addr_select = 0x0F;
1474 uint8_t CTX_machmode = 0x03;
1475 uint8_t IVA_machmode = 0x01;
1476 struct aarch64_common *aarch64 = target_to_aarch64(target);
1477 struct armv8_common *armv8 = &aarch64->armv8_common;
1478 struct aarch64_brp *brp_list = aarch64->brp_list;
1479
1480 if (breakpoint->set) {
1481 LOG_WARNING("breakpoint already set");
1482 return retval;
1483 }
1484 /*check available context BRPs*/
1485 while ((brp_list[brp_1].used ||
1486 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1487 brp_1++;
1488
1489 printf("brp(CTX) found num: %d\n", brp_1);
1490 if (brp_1 >= aarch64->brp_num) {
1491 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1492 return ERROR_FAIL;
1493 }
1494
1495 while ((brp_list[brp_2].used ||
1496 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1497 brp_2++;
1498
1499 printf("brp(IVA) found num: %d\n", brp_2);
1500 if (brp_2 >= aarch64->brp_num) {
1501 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1502 return ERROR_FAIL;
1503 }
1504
1505 breakpoint->set = brp_1 + 1;
1506 breakpoint->linked_BRP = brp_2;
1507 control_CTX = ((CTX_machmode & 0x7) << 20)
1508 | (brp_2 << 16)
1509 | (0 << 14)
1510 | (CTX_byte_addr_select << 5)
1511 | (3 << 1) | 1;
1512 brp_list[brp_1].used = 1;
1513 brp_list[brp_1].value = (breakpoint->asid);
1514 brp_list[brp_1].control = control_CTX;
1515 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1516 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1517 brp_list[brp_1].value);
1518 if (retval != ERROR_OK)
1519 return retval;
1520 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1521 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1522 brp_list[brp_1].control);
1523 if (retval != ERROR_OK)
1524 return retval;
1525
1526 control_IVA = ((IVA_machmode & 0x7) << 20)
1527 | (brp_1 << 16)
1528 | (1 << 13)
1529 | (IVA_byte_addr_select << 5)
1530 | (3 << 1) | 1;
1531 brp_list[brp_2].used = 1;
1532 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1533 brp_list[brp_2].control = control_IVA;
1534 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1535 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1536 brp_list[brp_2].value & 0xFFFFFFFF);
1537 if (retval != ERROR_OK)
1538 return retval;
1539 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1540 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1541 brp_list[brp_2].value >> 32);
1542 if (retval != ERROR_OK)
1543 return retval;
1544 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1545 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1546 brp_list[brp_2].control);
1547 if (retval != ERROR_OK)
1548 return retval;
1549
1550 return ERROR_OK;
1551 }
1552
1553 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1554 {
1555 int retval;
1556 struct aarch64_common *aarch64 = target_to_aarch64(target);
1557 struct armv8_common *armv8 = &aarch64->armv8_common;
1558 struct aarch64_brp *brp_list = aarch64->brp_list;
1559
1560 if (!breakpoint->set) {
1561 LOG_WARNING("breakpoint not set");
1562 return ERROR_OK;
1563 }
1564
1565 if (breakpoint->type == BKPT_HARD) {
1566 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1567 int brp_i = breakpoint->set - 1;
1568 int brp_j = breakpoint->linked_BRP;
1569 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1570 LOG_DEBUG("Invalid BRP number in breakpoint");
1571 return ERROR_OK;
1572 }
1573 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1574 brp_list[brp_i].control, brp_list[brp_i].value);
1575 brp_list[brp_i].used = 0;
1576 brp_list[brp_i].value = 0;
1577 brp_list[brp_i].control = 0;
1578 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1579 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1580 brp_list[brp_i].control);
1581 if (retval != ERROR_OK)
1582 return retval;
1583 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1584 LOG_DEBUG("Invalid BRP number in breakpoint");
1585 return ERROR_OK;
1586 }
1587 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1588 brp_list[brp_j].control, brp_list[brp_j].value);
1589 brp_list[brp_j].used = 0;
1590 brp_list[brp_j].value = 0;
1591 brp_list[brp_j].control = 0;
1592 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1593 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1594 brp_list[brp_j].control);
1595 if (retval != ERROR_OK)
1596 return retval;
1597 breakpoint->linked_BRP = 0;
1598 breakpoint->set = 0;
1599 return ERROR_OK;
1600
1601 } else {
1602 int brp_i = breakpoint->set - 1;
1603 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1604 LOG_DEBUG("Invalid BRP number in breakpoint");
1605 return ERROR_OK;
1606 }
1607 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1608 brp_list[brp_i].control, brp_list[brp_i].value);
1609 brp_list[brp_i].used = 0;
1610 brp_list[brp_i].value = 0;
1611 brp_list[brp_i].control = 0;
1612 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1613 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1614 brp_list[brp_i].control);
1615 if (retval != ERROR_OK)
1616 return retval;
1617 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1618 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1619 brp_list[brp_i].value);
1620 if (retval != ERROR_OK)
1621 return retval;
1622 breakpoint->set = 0;
1623 return ERROR_OK;
1624 }
1625 } else {
1626 /* restore original instruction (kept in target endianness) */
1627 if (breakpoint->length == 4) {
1628 retval = target_write_memory(target,
1629 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1630 4, 1, breakpoint->orig_instr);
1631 if (retval != ERROR_OK)
1632 return retval;
1633 } else {
1634 retval = target_write_memory(target,
1635 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1636 2, 1, breakpoint->orig_instr);
1637 if (retval != ERROR_OK)
1638 return retval;
1639 }
1640 }
1641 breakpoint->set = 0;
1642
1643 return ERROR_OK;
1644 }
1645
1646 static int aarch64_add_breakpoint(struct target *target,
1647 struct breakpoint *breakpoint)
1648 {
1649 struct aarch64_common *aarch64 = target_to_aarch64(target);
1650
1651 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1652 LOG_INFO("no hardware breakpoint available");
1653 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1654 }
1655
1656 if (breakpoint->type == BKPT_HARD)
1657 aarch64->brp_num_available--;
1658
1659 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1660 }
1661
1662 static int aarch64_add_context_breakpoint(struct target *target,
1663 struct breakpoint *breakpoint)
1664 {
1665 struct aarch64_common *aarch64 = target_to_aarch64(target);
1666
1667 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1668 LOG_INFO("no hardware breakpoint available");
1669 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1670 }
1671
1672 if (breakpoint->type == BKPT_HARD)
1673 aarch64->brp_num_available--;
1674
1675 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1676 }
1677
1678 static int aarch64_add_hybrid_breakpoint(struct target *target,
1679 struct breakpoint *breakpoint)
1680 {
1681 struct aarch64_common *aarch64 = target_to_aarch64(target);
1682
1683 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1684 LOG_INFO("no hardware breakpoint available");
1685 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1686 }
1687
1688 if (breakpoint->type == BKPT_HARD)
1689 aarch64->brp_num_available--;
1690
1691 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1692 }
1693
1694
1695 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1696 {
1697 struct aarch64_common *aarch64 = target_to_aarch64(target);
1698
1699 #if 0
1700 /* It is perfectly possible to remove breakpoints while the target is running */
1701 if (target->state != TARGET_HALTED) {
1702 LOG_WARNING("target not halted");
1703 return ERROR_TARGET_NOT_HALTED;
1704 }
1705 #endif
1706
1707 if (breakpoint->set) {
1708 aarch64_unset_breakpoint(target, breakpoint);
1709 if (breakpoint->type == BKPT_HARD)
1710 aarch64->brp_num_available++;
1711 }
1712
1713 return ERROR_OK;
1714 }
1715
1716 /*
1717 * Cortex-A8 Reset functions
1718 */
1719
1720 static int aarch64_assert_reset(struct target *target)
1721 {
1722 struct armv8_common *armv8 = target_to_armv8(target);
1723
1724 LOG_DEBUG(" ");
1725
1726 /* FIXME when halt is requested, make it work somehow... */
1727
1728 /* Issue some kind of warm reset. */
1729 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1730 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1731 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1732 /* REVISIT handle "pulls" cases, if there's
1733 * hardware that needs them to work.
1734 */
1735 jtag_add_reset(0, 1);
1736 } else {
1737 LOG_ERROR("%s: how to reset?", target_name(target));
1738 return ERROR_FAIL;
1739 }
1740
1741 /* registers are now invalid */
1742 register_cache_invalidate(armv8->arm.core_cache);
1743
1744 target->state = TARGET_RESET;
1745
1746 return ERROR_OK;
1747 }
1748
1749 static int aarch64_deassert_reset(struct target *target)
1750 {
1751 int retval;
1752
1753 LOG_DEBUG(" ");
1754
1755 /* be certain SRST is off */
1756 jtag_add_reset(0, 0);
1757
1758 retval = aarch64_poll(target);
1759 if (retval != ERROR_OK)
1760 return retval;
1761
1762 if (target->reset_halt) {
1763 if (target->state != TARGET_HALTED) {
1764 LOG_WARNING("%s: ran after reset and before halt ...",
1765 target_name(target));
1766 retval = target_halt(target);
1767 if (retval != ERROR_OK)
1768 return retval;
1769 }
1770 }
1771
1772 return ERROR_OK;
1773 }
1774
1775 static int aarch64_write_apb_ap_memory(struct target *target,
1776 uint64_t address, uint32_t size,
1777 uint32_t count, const uint8_t *buffer)
1778 {
1779 /* write memory through APB-AP */
1780 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1781 struct armv8_common *armv8 = target_to_armv8(target);
1782 struct arm *arm = &armv8->arm;
1783 int total_bytes = count * size;
1784 int total_u32;
1785 int start_byte = address & 0x3;
1786 int end_byte = (address + total_bytes) & 0x3;
1787 struct reg *reg;
1788 uint32_t dscr;
1789 uint8_t *tmp_buff = NULL;
1790
1791 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %" PRIu32 " count%" PRIu32,
1792 address, size, count);
1793 if (target->state != TARGET_HALTED) {
1794 LOG_WARNING("target not halted");
1795 return ERROR_TARGET_NOT_HALTED;
1796 }
1797
1798 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1799
1800 /* Mark register R0 as dirty, as it will be used
1801 * for transferring the data.
1802 * It will be restored automatically when exiting
1803 * debug mode
1804 */
1805 reg = armv8_reg_current(arm, 1);
1806 reg->dirty = true;
1807
1808 reg = armv8_reg_current(arm, 0);
1809 reg->dirty = true;
1810
1811 /* clear any abort */
1812 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1813 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1814 if (retval != ERROR_OK)
1815 return retval;
1816
1817
1818 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1819
1820 /* The algorithm only copies 32 bit words, so the buffer
1821 * should be expanded to include the words at either end.
1822 * The first and last words will be read first to avoid
1823 * corruption if needed.
1824 */
1825 tmp_buff = malloc(total_u32 * 4);
1826
1827 if ((start_byte != 0) && (total_u32 > 1)) {
1828 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1829 * the other bytes in the word.
1830 */
1831 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1832 if (retval != ERROR_OK)
1833 goto error_free_buff_w;
1834 }
1835
1836 /* If end of write is not aligned, or the write is less than 4 bytes */
1837 if ((end_byte != 0) ||
1838 ((total_u32 == 1) && (total_bytes != 4))) {
1839
1840 /* Read the last word to avoid corruption during 32 bit write */
1841 int mem_offset = (total_u32-1) * 4;
1842 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1843 if (retval != ERROR_OK)
1844 goto error_free_buff_w;
1845 }
1846
1847 /* Copy the write buffer over the top of the temporary buffer */
1848 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1849
1850 /* We now have a 32 bit aligned buffer that can be written */
1851
1852 /* Read DSCR */
1853 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1854 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1855 if (retval != ERROR_OK)
1856 goto error_free_buff_w;
1857
1858 /* Set Normal access mode */
1859 dscr = (dscr & ~DSCR_MA);
1860 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1861 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1862
1863 if (arm->core_state == ARM_STATE_AARCH64) {
1864 /* Write X0 with value 'address' using write procedure */
1865 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1866 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
1867 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1868 retval += aarch64_exec_opcode(target,
1869 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1870 } else {
1871 /* Write R0 with value 'address' using write procedure */
1872 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1873 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
1874 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1875 retval += aarch64_exec_opcode(target,
1876 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
1877
1878 }
1879 /* Step 1.d - Change DCC to memory mode */
1880 dscr = dscr | DSCR_MA;
1881 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1882 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1883 if (retval != ERROR_OK)
1884 goto error_unset_dtr_w;
1885
1886
1887 /* Step 2.a - Do the write */
1888 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1889 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1890 if (retval != ERROR_OK)
1891 goto error_unset_dtr_w;
1892
1893 /* Step 3.a - Switch DTR mode back to Normal mode */
1894 dscr = (dscr & ~DSCR_MA);
1895 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1896 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1897 if (retval != ERROR_OK)
1898 goto error_unset_dtr_w;
1899
1900 /* Check for sticky abort flags in the DSCR */
1901 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1902 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1903 if (retval != ERROR_OK)
1904 goto error_free_buff_w;
1905 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1906 /* Abort occurred - clear it and exit */
1907 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1908 mem_ap_write_atomic_u32(armv8->debug_ap,
1909 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1910 goto error_free_buff_w;
1911 }
1912
1913 /* Done */
1914 free(tmp_buff);
1915 return ERROR_OK;
1916
1917 error_unset_dtr_w:
1918 /* Unset DTR mode */
1919 mem_ap_read_atomic_u32(armv8->debug_ap,
1920 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1921 dscr = (dscr & ~DSCR_MA);
1922 mem_ap_write_atomic_u32(armv8->debug_ap,
1923 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1924 error_free_buff_w:
1925 LOG_ERROR("error");
1926 free(tmp_buff);
1927 return ERROR_FAIL;
1928 }
1929
1930 static int aarch64_read_apb_ap_memory(struct target *target,
1931 target_addr_t address, uint32_t size,
1932 uint32_t count, uint8_t *buffer)
1933 {
1934 /* read memory through APB-AP */
1935 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1936 struct armv8_common *armv8 = target_to_armv8(target);
1937 struct arm *arm = &armv8->arm;
1938 int total_bytes = count * size;
1939 int total_u32;
1940 int start_byte = address & 0x3;
1941 int end_byte = (address + total_bytes) & 0x3;
1942 struct reg *reg;
1943 uint32_t dscr;
1944 uint8_t *tmp_buff = NULL;
1945 uint8_t *u8buf_ptr;
1946 uint32_t value;
1947
1948 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count%" PRIu32,
1949 address, size, count);
1950 if (target->state != TARGET_HALTED) {
1951 LOG_WARNING("target not halted");
1952 return ERROR_TARGET_NOT_HALTED;
1953 }
1954
1955 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1956 /* Mark register X0, X1 as dirty, as it will be used
1957 * for transferring the data.
1958 * It will be restored automatically when exiting
1959 * debug mode
1960 */
1961 reg = armv8_reg_current(arm, 1);
1962 reg->dirty = true;
1963
1964 reg = armv8_reg_current(arm, 0);
1965 reg->dirty = true;
1966
1967 /* clear any abort */
1968 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1969 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1970 if (retval != ERROR_OK)
1971 goto error_free_buff_r;
1972
1973 /* Read DSCR */
1974 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1975 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1976
1977 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1978
1979 /* Set Normal access mode */
1980 dscr = (dscr & ~DSCR_MA);
1981 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1982 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1983
1984 if (arm->core_state == ARM_STATE_AARCH64) {
1985 /* Write X0 with value 'address' using write procedure */
1986 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1987 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
1988 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1989 retval += aarch64_exec_opcode(target, ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1990 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1991 retval += aarch64_exec_opcode(target, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1992 /* Step 1.e - Change DCC to memory mode */
1993 dscr = dscr | DSCR_MA;
1994 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1995 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1996 /* Step 1.f - read DBGDTRTX and discard the value */
1997 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1998 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1999 } else {
2000 /* Write R0 with value 'address' using write procedure */
2001 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2002 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
2003 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2004 retval += aarch64_exec_opcode(target,
2005 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
2006 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2007 retval += aarch64_exec_opcode(target,
2008 T32_FMTITR(ARMV4_5_MCR(14, 0, 0, 0, 5, 0)), &dscr);
2009 /* Step 1.e - Change DCC to memory mode */
2010 dscr = dscr | DSCR_MA;
2011 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
2012 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2013 /* Step 1.f - read DBGDTRTX and discard the value */
2014 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2015 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2016
2017 }
2018 if (retval != ERROR_OK)
2019 goto error_unset_dtr_r;
2020
2021 /* Optimize the read as much as we can, either way we read in a single pass */
2022 if ((start_byte) || (end_byte)) {
2023 /* The algorithm only copies 32 bit words, so the buffer
2024 * should be expanded to include the words at either end.
2025 * The first and last words will be read into a temp buffer
2026 * to avoid corruption
2027 */
2028 tmp_buff = malloc(total_u32 * 4);
2029 if (!tmp_buff)
2030 goto error_unset_dtr_r;
2031
2032 /* use the tmp buffer to read the entire data */
2033 u8buf_ptr = tmp_buff;
2034 } else
2035 /* address and read length are aligned so read directly into the passed buffer */
2036 u8buf_ptr = buffer;
2037
2038 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2039 * Abort flags are sticky, so can be read at end of transactions
2040 *
2041 * This data is read in aligned to 32 bit boundary.
2042 */
2043
2044 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2045 * increments X0 by 4. */
2046 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
2047 armv8->debug_base + CPUV8_DBG_DTRTX);
2048 if (retval != ERROR_OK)
2049 goto error_unset_dtr_r;
2050
2051 /* Step 3.a - set DTR access mode back to Normal mode */
2052 dscr = (dscr & ~DSCR_MA);
2053 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2054 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2055 if (retval != ERROR_OK)
2056 goto error_free_buff_r;
2057
2058 /* Step 3.b - read DBGDTRTX for the final value */
2059 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2060 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2061 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
2062
2063 /* Check for sticky abort flags in the DSCR */
2064 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2065 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2066 if (retval != ERROR_OK)
2067 goto error_free_buff_r;
2068 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2069 /* Abort occurred - clear it and exit */
2070 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2071 mem_ap_write_atomic_u32(armv8->debug_ap,
2072 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
2073 goto error_free_buff_r;
2074 }
2075
2076 /* check if we need to copy aligned data by applying any shift necessary */
2077 if (tmp_buff) {
2078 memcpy(buffer, tmp_buff + start_byte, total_bytes);
2079 free(tmp_buff);
2080 }
2081
2082 /* Done */
2083 return ERROR_OK;
2084
2085 error_unset_dtr_r:
2086 /* Unset DTR mode */
2087 mem_ap_read_atomic_u32(armv8->debug_ap,
2088 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2089 dscr = (dscr & ~DSCR_MA);
2090 mem_ap_write_atomic_u32(armv8->debug_ap,
2091 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2092 error_free_buff_r:
2093 LOG_ERROR("error");
2094 free(tmp_buff);
2095 return ERROR_FAIL;
2096 }
2097
2098 static int aarch64_read_phys_memory(struct target *target,
2099 target_addr_t address, uint32_t size,
2100 uint32_t count, uint8_t *buffer)
2101 {
2102 struct armv8_common *armv8 = target_to_armv8(target);
2103 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2104 struct adiv5_dap *swjdp = armv8->arm.dap;
2105 uint8_t apsel = swjdp->apsel;
2106 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
2107 address, size, count);
2108
2109 if (count && buffer) {
2110
2111 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2112
2113 /* read memory through AHB-AP */
2114 retval = mem_ap_read_buf(armv8->memory_ap, buffer, size, count, address);
2115 } else {
2116 /* read memory through APB-AP */
2117 retval = aarch64_mmu_modify(target, 0);
2118 if (retval != ERROR_OK)
2119 return retval;
2120 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
2121 }
2122 }
2123 return retval;
2124 }
2125
2126 static int aarch64_read_memory(struct target *target, target_addr_t address,
2127 uint32_t size, uint32_t count, uint8_t *buffer)
2128 {
2129 int mmu_enabled = 0;
2130 target_addr_t virt, phys;
2131 int retval;
2132 struct armv8_common *armv8 = target_to_armv8(target);
2133 struct adiv5_dap *swjdp = armv8->arm.dap;
2134 uint8_t apsel = swjdp->apsel;
2135
2136 /* aarch64 handles unaligned memory access */
2137 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2138 size, count);
2139
2140 /* determine if MMU was enabled on target stop */
2141 if (!armv8->is_armv7r) {
2142 retval = aarch64_mmu(target, &mmu_enabled);
2143 if (retval != ERROR_OK)
2144 return retval;
2145 }
2146
2147 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2148 if (mmu_enabled) {
2149 virt = address;
2150 retval = aarch64_virt2phys(target, virt, &phys);
2151 if (retval != ERROR_OK)
2152 return retval;
2153
2154 LOG_DEBUG("Reading at virtual address. Translating v:0x%" TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR,
2155 virt, phys);
2156 address = phys;
2157 }
2158 retval = aarch64_read_phys_memory(target, address, size, count,
2159 buffer);
2160 } else {
2161 if (mmu_enabled) {
2162 retval = aarch64_check_address(target, address);
2163 if (retval != ERROR_OK)
2164 return retval;
2165 /* enable MMU as we could have disabled it for phys
2166 access */
2167 retval = aarch64_mmu_modify(target, 1);
2168 if (retval != ERROR_OK)
2169 return retval;
2170 }
2171 retval = aarch64_read_apb_ap_memory(target, address, size,
2172 count, buffer);
2173 }
2174 return retval;
2175 }
2176
2177 static int aarch64_write_phys_memory(struct target *target,
2178 target_addr_t address, uint32_t size,
2179 uint32_t count, const uint8_t *buffer)
2180 {
2181 struct armv8_common *armv8 = target_to_armv8(target);
2182 struct adiv5_dap *swjdp = armv8->arm.dap;
2183 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2184 uint8_t apsel = swjdp->apsel;
2185
2186 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2187 size, count);
2188
2189 if (count && buffer) {
2190
2191 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2192
2193 /* write memory through AHB-AP */
2194 retval = mem_ap_write_buf(armv8->memory_ap, buffer, size, count, address);
2195 } else {
2196
2197 /* write memory through APB-AP */
2198 if (!armv8->is_armv7r) {
2199 retval = aarch64_mmu_modify(target, 0);
2200 if (retval != ERROR_OK)
2201 return retval;
2202 }
2203 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2204 }
2205 }
2206
2207
2208 /* REVISIT this op is generic ARMv7-A/R stuff */
2209 if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2210 struct arm_dpm *dpm = armv8->arm.dpm;
2211
2212 retval = dpm->prepare(dpm);
2213 if (retval != ERROR_OK)
2214 return retval;
2215
2216 /* The Cache handling will NOT work with MMU active, the
2217 * wrong addresses will be invalidated!
2218 *
2219 * For both ICache and DCache, walk all cache lines in the
2220 * address range. Cortex-A8 has fixed 64 byte line length.
2221 *
2222 * REVISIT per ARMv7, these may trigger watchpoints ...
2223 */
2224
2225 /* invalidate I-Cache */
2226 if (armv8->armv8_mmu.armv8_cache.i_cache_enabled) {
2227 /* ICIMVAU - Invalidate Cache single entry
2228 * with MVA to PoU
2229 * MCR p15, 0, r0, c7, c5, 1
2230 */
2231 for (uint32_t cacheline = address;
2232 cacheline < address + size * count;
2233 cacheline += 64) {
2234 retval = dpm->instr_write_data_r0(dpm,
2235 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2236 cacheline);
2237 if (retval != ERROR_OK)
2238 return retval;
2239 }
2240 }
2241
2242 /* invalidate D-Cache */
2243 if (armv8->armv8_mmu.armv8_cache.d_u_cache_enabled) {
2244 /* DCIMVAC - Invalidate data Cache line
2245 * with MVA to PoC
2246 * MCR p15, 0, r0, c7, c6, 1
2247 */
2248 for (uint32_t cacheline = address;
2249 cacheline < address + size * count;
2250 cacheline += 64) {
2251 retval = dpm->instr_write_data_r0(dpm,
2252 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2253 cacheline);
2254 if (retval != ERROR_OK)
2255 return retval;
2256 }
2257 }
2258
2259 /* (void) */ dpm->finish(dpm);
2260 }
2261
2262 return retval;
2263 }
2264
2265 static int aarch64_write_memory(struct target *target, target_addr_t address,
2266 uint32_t size, uint32_t count, const uint8_t *buffer)
2267 {
2268 int mmu_enabled = 0;
2269 target_addr_t virt, phys;
2270 int retval;
2271 struct armv8_common *armv8 = target_to_armv8(target);
2272 struct adiv5_dap *swjdp = armv8->arm.dap;
2273 uint8_t apsel = swjdp->apsel;
2274
2275 /* aarch64 handles unaligned memory access */
2276 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
2277 "; count %" PRId32, address, size, count);
2278
2279 /* determine if MMU was enabled on target stop */
2280 if (!armv8->is_armv7r) {
2281 retval = aarch64_mmu(target, &mmu_enabled);
2282 if (retval != ERROR_OK)
2283 return retval;
2284 }
2285
2286 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2287 LOG_DEBUG("Writing memory to address 0x%" TARGET_PRIxADDR "; size %"
2288 PRId32 "; count %" PRId32, address, size, count);
2289 if (mmu_enabled) {
2290 virt = address;
2291 retval = aarch64_virt2phys(target, virt, &phys);
2292 if (retval != ERROR_OK)
2293 return retval;
2294
2295 LOG_DEBUG("Writing to virtual address. Translating v:0x%"
2296 TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR, virt, phys);
2297 address = phys;
2298 }
2299 retval = aarch64_write_phys_memory(target, address, size,
2300 count, buffer);
2301 } else {
2302 if (mmu_enabled) {
2303 retval = aarch64_check_address(target, address);
2304 if (retval != ERROR_OK)
2305 return retval;
2306 /* enable MMU as we could have disabled it for phys access */
2307 retval = aarch64_mmu_modify(target, 1);
2308 if (retval != ERROR_OK)
2309 return retval;
2310 }
2311 retval = aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2312 }
2313 return retval;
2314 }
2315
2316 static int aarch64_handle_target_request(void *priv)
2317 {
2318 struct target *target = priv;
2319 struct armv8_common *armv8 = target_to_armv8(target);
2320 int retval;
2321
2322 if (!target_was_examined(target))
2323 return ERROR_OK;
2324 if (!target->dbg_msg_enabled)
2325 return ERROR_OK;
2326
2327 if (target->state == TARGET_RUNNING) {
2328 uint32_t request;
2329 uint32_t dscr;
2330 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2331 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2332
2333 /* check if we have data */
2334 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2335 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2336 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2337 if (retval == ERROR_OK) {
2338 target_request(target, request);
2339 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2340 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2341 }
2342 }
2343 }
2344
2345 return ERROR_OK;
2346 }
2347
2348 static int aarch64_examine_first(struct target *target)
2349 {
2350 struct aarch64_common *aarch64 = target_to_aarch64(target);
2351 struct armv8_common *armv8 = &aarch64->armv8_common;
2352 struct adiv5_dap *swjdp = armv8->arm.dap;
2353 int retval = ERROR_OK;
2354 uint32_t pfr, debug, ctypr, ttypr, cpuid;
2355 int i;
2356
2357 /* We do one extra read to ensure DAP is configured,
2358 * we call ahbap_debugport_init(swjdp) instead
2359 */
2360 retval = dap_dp_init(swjdp);
2361 if (retval != ERROR_OK)
2362 return retval;
2363
2364 /* Search for the APB-AB - it is needed for access to debug registers */
2365 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2366 if (retval != ERROR_OK) {
2367 LOG_ERROR("Could not find APB-AP for debug access");
2368 return retval;
2369 }
2370
2371 retval = mem_ap_init(armv8->debug_ap);
2372 if (retval != ERROR_OK) {
2373 LOG_ERROR("Could not initialize the APB-AP");
2374 return retval;
2375 }
2376
2377 armv8->debug_ap->memaccess_tck = 80;
2378
2379 /* Search for the AHB-AB */
2380 armv8->memory_ap_available = false;
2381 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv8->memory_ap);
2382 if (retval == ERROR_OK) {
2383 retval = mem_ap_init(armv8->memory_ap);
2384 if (retval == ERROR_OK)
2385 armv8->memory_ap_available = true;
2386 }
2387 if (retval != ERROR_OK) {
2388 /* AHB-AP not found or unavailable - use the CPU */
2389 LOG_DEBUG("No AHB-AP available for memory access");
2390 }
2391
2392
2393 if (!target->dbgbase_set) {
2394 uint32_t dbgbase;
2395 /* Get ROM Table base */
2396 uint32_t apid;
2397 int32_t coreidx = target->coreid;
2398 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2399 if (retval != ERROR_OK)
2400 return retval;
2401 /* Lookup 0x15 -- Processor DAP */
2402 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2403 &armv8->debug_base, &coreidx);
2404 if (retval != ERROR_OK)
2405 return retval;
2406 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2407 coreidx, armv8->debug_base);
2408 } else
2409 armv8->debug_base = target->dbgbase;
2410
2411 LOG_DEBUG("Target ctibase is 0x%x", target->ctibase);
2412 if (target->ctibase == 0)
2413 armv8->cti_base = target->ctibase = armv8->debug_base + 0x1000;
2414 else
2415 armv8->cti_base = target->ctibase;
2416
2417 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2418 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
2419 if (retval != ERROR_OK) {
2420 LOG_DEBUG("Examine %s failed", "oslock");
2421 return retval;
2422 }
2423
2424 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2425 armv8->debug_base + 0x88, &cpuid);
2426 LOG_DEBUG("0x88 = %x", cpuid);
2427
2428 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2429 armv8->debug_base + 0x314, &cpuid);
2430 LOG_DEBUG("0x314 = %x", cpuid);
2431
2432 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2433 armv8->debug_base + 0x310, &cpuid);
2434 LOG_DEBUG("0x310 = %x", cpuid);
2435 if (retval != ERROR_OK)
2436 return retval;
2437
2438 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2439 armv8->debug_base + CPUDBG_CPUID, &cpuid);
2440 if (retval != ERROR_OK) {
2441 LOG_DEBUG("Examine %s failed", "CPUID");
2442 return retval;
2443 }
2444
2445 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2446 armv8->debug_base + CPUDBG_CTYPR, &ctypr);
2447 if (retval != ERROR_OK) {
2448 LOG_DEBUG("Examine %s failed", "CTYPR");
2449 return retval;
2450 }
2451
2452 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2453 armv8->debug_base + CPUDBG_TTYPR, &ttypr);
2454 if (retval != ERROR_OK) {
2455 LOG_DEBUG("Examine %s failed", "TTYPR");
2456 return retval;
2457 }
2458
2459 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2460 armv8->debug_base + ID_AA64PFR0_EL1, &pfr);
2461 if (retval != ERROR_OK) {
2462 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2463 return retval;
2464 }
2465 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2466 armv8->debug_base + ID_AA64DFR0_EL1, &debug);
2467 if (retval != ERROR_OK) {
2468 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2469 return retval;
2470 }
2471
2472 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2473 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2474 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2475 LOG_DEBUG("ID_AA64PFR0_EL1 = 0x%08" PRIx32, pfr);
2476 LOG_DEBUG("ID_AA64DFR0_EL1 = 0x%08" PRIx32, debug);
2477
2478 armv8->arm.core_type = ARM_MODE_MON;
2479 armv8->arm.core_state = ARM_STATE_AARCH64;
2480 retval = aarch64_dpm_setup(aarch64, debug);
2481 if (retval != ERROR_OK)
2482 return retval;
2483
2484 /* Setup Breakpoint Register Pairs */
2485 aarch64->brp_num = ((debug >> 12) & 0x0F) + 1;
2486 aarch64->brp_num_context = ((debug >> 28) & 0x0F) + 1;
2487
2488 /* hack - no context bpt support yet */
2489 aarch64->brp_num_context = 0;
2490
2491 aarch64->brp_num_available = aarch64->brp_num;
2492 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2493 for (i = 0; i < aarch64->brp_num; i++) {
2494 aarch64->brp_list[i].used = 0;
2495 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2496 aarch64->brp_list[i].type = BRP_NORMAL;
2497 else
2498 aarch64->brp_list[i].type = BRP_CONTEXT;
2499 aarch64->brp_list[i].value = 0;
2500 aarch64->brp_list[i].control = 0;
2501 aarch64->brp_list[i].BRPn = i;
2502 }
2503
2504 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2505
2506 target_set_examined(target);
2507 return ERROR_OK;
2508 }
2509
2510 static int aarch64_examine(struct target *target)
2511 {
2512 int retval = ERROR_OK;
2513
2514 /* don't re-probe hardware after each reset */
2515 if (!target_was_examined(target))
2516 retval = aarch64_examine_first(target);
2517
2518 /* Configure core debug access */
2519 if (retval == ERROR_OK)
2520 retval = aarch64_init_debug_access(target);
2521
2522 return retval;
2523 }
2524
2525 /*
2526 * Cortex-A8 target creation and initialization
2527 */
2528
2529 static int aarch64_init_target(struct command_context *cmd_ctx,
2530 struct target *target)
2531 {
2532 /* examine_first() does a bunch of this */
2533 return ERROR_OK;
2534 }
2535
2536 static int aarch64_init_arch_info(struct target *target,
2537 struct aarch64_common *aarch64, struct jtag_tap *tap)
2538 {
2539 struct armv8_common *armv8 = &aarch64->armv8_common;
2540 struct adiv5_dap *dap = armv8->arm.dap;
2541
2542 armv8->arm.dap = dap;
2543
2544 /* Setup struct aarch64_common */
2545 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2546 /* tap has no dap initialized */
2547 if (!tap->dap) {
2548 tap->dap = dap_init();
2549
2550 /* Leave (only) generic DAP stuff for debugport_init() */
2551 tap->dap->tap = tap;
2552 }
2553
2554 armv8->arm.dap = tap->dap;
2555
2556 aarch64->fast_reg_read = 0;
2557
2558 /* register arch-specific functions */
2559 armv8->examine_debug_reason = NULL;
2560
2561 armv8->post_debug_entry = aarch64_post_debug_entry;
2562
2563 armv8->pre_restore_context = NULL;
2564
2565 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2566
2567 /* REVISIT v7a setup should be in a v7a-specific routine */
2568 armv8_init_arch_info(target, armv8);
2569 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2570
2571 return ERROR_OK;
2572 }
2573
2574 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2575 {
2576 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2577
2578 aarch64->armv8_common.is_armv7r = false;
2579
2580 return aarch64_init_arch_info(target, aarch64, target->tap);
2581 }
2582
2583 static int aarch64_mmu(struct target *target, int *enabled)
2584 {
2585 if (target->state != TARGET_HALTED) {
2586 LOG_ERROR("%s: target not halted", __func__);
2587 return ERROR_TARGET_INVALID;
2588 }
2589
2590 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2591 return ERROR_OK;
2592 }
2593
2594 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2595 target_addr_t *phys)
2596 {
2597 int retval = ERROR_FAIL;
2598 struct armv8_common *armv8 = target_to_armv8(target);
2599 struct adiv5_dap *swjdp = armv8->arm.dap;
2600 uint8_t apsel = swjdp->apsel;
2601 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2602 uint32_t ret;
2603 retval = armv8_mmu_translate_va(target,
2604 virt, &ret);
2605 if (retval != ERROR_OK)
2606 goto done;
2607 *phys = ret;
2608 } else {/* use this method if armv8->memory_ap not selected
2609 * mmu must be enable in order to get a correct translation */
2610 retval = aarch64_mmu_modify(target, 1);
2611 if (retval != ERROR_OK)
2612 goto done;
2613 retval = armv8_mmu_translate_va_pa(target, virt, phys, 1);
2614 }
2615 done:
2616 return retval;
2617 }
2618
2619 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2620 {
2621 struct target *target = get_current_target(CMD_CTX);
2622 struct armv8_common *armv8 = target_to_armv8(target);
2623
2624 return armv8_handle_cache_info_command(CMD_CTX,
2625 &armv8->armv8_mmu.armv8_cache);
2626 }
2627
2628
2629 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2630 {
2631 struct target *target = get_current_target(CMD_CTX);
2632 if (!target_was_examined(target)) {
2633 LOG_ERROR("target not examined yet");
2634 return ERROR_FAIL;
2635 }
2636
2637 return aarch64_init_debug_access(target);
2638 }
2639 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2640 {
2641 struct target *target = get_current_target(CMD_CTX);
2642 /* check target is an smp target */
2643 struct target_list *head;
2644 struct target *curr;
2645 head = target->head;
2646 target->smp = 0;
2647 if (head != (struct target_list *)NULL) {
2648 while (head != (struct target_list *)NULL) {
2649 curr = head->target;
2650 curr->smp = 0;
2651 head = head->next;
2652 }
2653 /* fixes the target display to the debugger */
2654 target->gdb_service->target = target;
2655 }
2656 return ERROR_OK;
2657 }
2658
2659 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2660 {
2661 struct target *target = get_current_target(CMD_CTX);
2662 struct target_list *head;
2663 struct target *curr;
2664 head = target->head;
2665 if (head != (struct target_list *)NULL) {
2666 target->smp = 1;
2667 while (head != (struct target_list *)NULL) {
2668 curr = head->target;
2669 curr->smp = 1;
2670 head = head->next;
2671 }
2672 }
2673 return ERROR_OK;
2674 }
2675
2676 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2677 {
2678 struct target *target = get_current_target(CMD_CTX);
2679 int retval = ERROR_OK;
2680 struct target_list *head;
2681 head = target->head;
2682 if (head != (struct target_list *)NULL) {
2683 if (CMD_ARGC == 1) {
2684 int coreid = 0;
2685 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2686 if (ERROR_OK != retval)
2687 return retval;
2688 target->gdb_service->core[1] = coreid;
2689
2690 }
2691 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2692 , target->gdb_service->core[1]);
2693 }
2694 return ERROR_OK;
2695 }
2696
2697 static const struct command_registration aarch64_exec_command_handlers[] = {
2698 {
2699 .name = "cache_info",
2700 .handler = aarch64_handle_cache_info_command,
2701 .mode = COMMAND_EXEC,
2702 .help = "display information about target caches",
2703 .usage = "",
2704 },
2705 {
2706 .name = "dbginit",
2707 .handler = aarch64_handle_dbginit_command,
2708 .mode = COMMAND_EXEC,
2709 .help = "Initialize core debug",
2710 .usage = "",
2711 },
2712 { .name = "smp_off",
2713 .handler = aarch64_handle_smp_off_command,
2714 .mode = COMMAND_EXEC,
2715 .help = "Stop smp handling",
2716 .usage = "",
2717 },
2718 {
2719 .name = "smp_on",
2720 .handler = aarch64_handle_smp_on_command,
2721 .mode = COMMAND_EXEC,
2722 .help = "Restart smp handling",
2723 .usage = "",
2724 },
2725 {
2726 .name = "smp_gdb",
2727 .handler = aarch64_handle_smp_gdb_command,
2728 .mode = COMMAND_EXEC,
2729 .help = "display/fix current core played to gdb",
2730 .usage = "",
2731 },
2732
2733
2734 COMMAND_REGISTRATION_DONE
2735 };
2736 static const struct command_registration aarch64_command_handlers[] = {
2737 {
2738 .chain = arm_command_handlers,
2739 },
2740 {
2741 .chain = armv8_command_handlers,
2742 },
2743 {
2744 .name = "cortex_a",
2745 .mode = COMMAND_ANY,
2746 .help = "Cortex-A command group",
2747 .usage = "",
2748 .chain = aarch64_exec_command_handlers,
2749 },
2750 COMMAND_REGISTRATION_DONE
2751 };
2752
2753 struct target_type aarch64_target = {
2754 .name = "aarch64",
2755
2756 .poll = aarch64_poll,
2757 .arch_state = armv8_arch_state,
2758
2759 .halt = aarch64_halt,
2760 .resume = aarch64_resume,
2761 .step = aarch64_step,
2762
2763 .assert_reset = aarch64_assert_reset,
2764 .deassert_reset = aarch64_deassert_reset,
2765
2766 /* REVISIT allow exporting VFP3 registers ... */
2767 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2768
2769 .read_memory = aarch64_read_memory,
2770 .write_memory = aarch64_write_memory,
2771
2772 .checksum_memory = arm_checksum_memory,
2773 .blank_check_memory = arm_blank_check_memory,
2774
2775 .run_algorithm = armv4_5_run_algorithm,
2776
2777 .add_breakpoint = aarch64_add_breakpoint,
2778 .add_context_breakpoint = aarch64_add_context_breakpoint,
2779 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2780 .remove_breakpoint = aarch64_remove_breakpoint,
2781 .add_watchpoint = NULL,
2782 .remove_watchpoint = NULL,
2783
2784 .commands = aarch64_command_handlers,
2785 .target_create = aarch64_target_create,
2786 .init_target = aarch64_init_target,
2787 .examine = aarch64_examine,
2788
2789 .read_phys_memory = aarch64_read_phys_memory,
2790 .write_phys_memory = aarch64_write_phys_memory,
2791 .mmu = aarch64_mmu,
2792 .virt2phys = aarch64_virt2phys,
2793 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)