aarch64: fix cache identification
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include <helper/time_support.h>
31
32 static int aarch64_poll(struct target *target);
33 static int aarch64_debug_entry(struct target *target);
34 static int aarch64_restore_context(struct target *target, bool bpwp);
35 static int aarch64_set_breakpoint(struct target *target,
36 struct breakpoint *breakpoint, uint8_t matchmode);
37 static int aarch64_set_context_breakpoint(struct target *target,
38 struct breakpoint *breakpoint, uint8_t matchmode);
39 static int aarch64_set_hybrid_breakpoint(struct target *target,
40 struct breakpoint *breakpoint);
41 static int aarch64_unset_breakpoint(struct target *target,
42 struct breakpoint *breakpoint);
43 static int aarch64_mmu(struct target *target, int *enabled);
44 static int aarch64_virt2phys(struct target *target,
45 target_addr_t virt, target_addr_t *phys);
46 static int aarch64_read_apb_ap_memory(struct target *target,
47 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
48 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
49 uint32_t opcode, uint32_t data);
50
51 static int aarch64_restore_system_control_reg(struct target *target)
52 {
53 int retval = ERROR_OK;
54
55 struct aarch64_common *aarch64 = target_to_aarch64(target);
56 struct armv8_common *armv8 = target_to_armv8(target);
57
58 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
59 aarch64->system_control_reg_curr = aarch64->system_control_reg;
60 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
61
62 switch (armv8->arm.core_mode) {
63 case ARMV8_64_EL0T:
64 case ARMV8_64_EL1T:
65 case ARMV8_64_EL1H:
66 retval = armv8->arm.msr(target, 3, /*op 0*/
67 0, 1, /* op1, op2 */
68 0, 0, /* CRn, CRm */
69 aarch64->system_control_reg);
70 if (retval != ERROR_OK)
71 return retval;
72 break;
73 case ARMV8_64_EL2T:
74 case ARMV8_64_EL2H:
75 retval = armv8->arm.msr(target, 3, /*op 0*/
76 4, 1, /* op1, op2 */
77 0, 0, /* CRn, CRm */
78 aarch64->system_control_reg);
79 if (retval != ERROR_OK)
80 return retval;
81 break;
82 case ARMV8_64_EL3H:
83 case ARMV8_64_EL3T:
84 retval = armv8->arm.msr(target, 3, /*op 0*/
85 6, 1, /* op1, op2 */
86 0, 0, /* CRn, CRm */
87 aarch64->system_control_reg);
88 if (retval != ERROR_OK)
89 return retval;
90 break;
91 default:
92 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
93 }
94 }
95 return retval;
96 }
97
98 /* check address before aarch64_apb read write access with mmu on
99 * remove apb predictible data abort */
100 static int aarch64_check_address(struct target *target, uint32_t address)
101 {
102 /* TODO */
103 return ERROR_OK;
104 }
105 /* modify system_control_reg in order to enable or disable mmu for :
106 * - virt2phys address conversion
107 * - read or write memory in phys or virt address */
108 static int aarch64_mmu_modify(struct target *target, int enable)
109 {
110 struct aarch64_common *aarch64 = target_to_aarch64(target);
111 struct armv8_common *armv8 = &aarch64->armv8_common;
112 int retval = ERROR_OK;
113
114 if (enable) {
115 /* if mmu enabled at target stop and mmu not enable */
116 if (!(aarch64->system_control_reg & 0x1U)) {
117 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
118 return ERROR_FAIL;
119 }
120 if (!(aarch64->system_control_reg_curr & 0x1U)) {
121 aarch64->system_control_reg_curr |= 0x1U;
122 switch (armv8->arm.core_mode) {
123 case ARMV8_64_EL0T:
124 case ARMV8_64_EL1T:
125 case ARMV8_64_EL1H:
126 retval = armv8->arm.msr(target, 3, /*op 0*/
127 0, 0, /* op1, op2 */
128 1, 0, /* CRn, CRm */
129 aarch64->system_control_reg_curr);
130 if (retval != ERROR_OK)
131 return retval;
132 break;
133 case ARMV8_64_EL2T:
134 case ARMV8_64_EL2H:
135 retval = armv8->arm.msr(target, 3, /*op 0*/
136 4, 0, /* op1, op2 */
137 1, 0, /* CRn, CRm */
138 aarch64->system_control_reg_curr);
139 if (retval != ERROR_OK)
140 return retval;
141 break;
142 case ARMV8_64_EL3H:
143 case ARMV8_64_EL3T:
144 retval = armv8->arm.msr(target, 3, /*op 0*/
145 6, 0, /* op1, op2 */
146 1, 0, /* CRn, CRm */
147 aarch64->system_control_reg_curr);
148 if (retval != ERROR_OK)
149 return retval;
150 break;
151 default:
152 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
153 }
154 }
155 } else {
156 if (aarch64->system_control_reg_curr & 0x4U) {
157 /* data cache is active */
158 aarch64->system_control_reg_curr &= ~0x4U;
159 /* flush data cache armv7 function to be called */
160 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
161 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
162 }
163 if ((aarch64->system_control_reg_curr & 0x1U)) {
164 aarch64->system_control_reg_curr &= ~0x1U;
165 switch (armv8->arm.core_mode) {
166 case ARMV8_64_EL0T:
167 case ARMV8_64_EL1T:
168 case ARMV8_64_EL1H:
169 retval = armv8->arm.msr(target, 3, /*op 0*/
170 0, 0, /* op1, op2 */
171 1, 0, /* CRn, CRm */
172 aarch64->system_control_reg_curr);
173 if (retval != ERROR_OK)
174 return retval;
175 break;
176 case ARMV8_64_EL2T:
177 case ARMV8_64_EL2H:
178 retval = armv8->arm.msr(target, 3, /*op 0*/
179 4, 0, /* op1, op2 */
180 1, 0, /* CRn, CRm */
181 aarch64->system_control_reg_curr);
182 if (retval != ERROR_OK)
183 return retval;
184 break;
185 case ARMV8_64_EL3H:
186 case ARMV8_64_EL3T:
187 retval = armv8->arm.msr(target, 3, /*op 0*/
188 6, 0, /* op1, op2 */
189 1, 0, /* CRn, CRm */
190 aarch64->system_control_reg_curr);
191 if (retval != ERROR_OK)
192 return retval;
193 break;
194 default:
195 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
196 break;
197 }
198 }
199 }
200 return retval;
201 }
202
203 /*
204 * Basic debug access, very low level assumes state is saved
205 */
206 static int aarch64_init_debug_access(struct target *target)
207 {
208 struct armv8_common *armv8 = target_to_armv8(target);
209 int retval;
210 uint32_t dummy;
211
212 LOG_DEBUG(" ");
213
214 /* Unlocking the debug registers for modification
215 * The debugport might be uninitialised so try twice */
216 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
217 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
218 if (retval != ERROR_OK) {
219 /* try again */
220 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
221 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
222 if (retval == ERROR_OK)
223 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
224 }
225 if (retval != ERROR_OK)
226 return retval;
227 /* Clear Sticky Power Down status Bit in PRSR to enable access to
228 the registers in the Core Power Domain */
229 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
230 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
231 if (retval != ERROR_OK)
232 return retval;
233
234 /* Enabling of instruction execution in debug mode is done in debug_entry code */
235
236 /* Resync breakpoint registers */
237
238 /* Since this is likely called from init or reset, update target state information*/
239 return aarch64_poll(target);
240 }
241
242 /* To reduce needless round-trips, pass in a pointer to the current
243 * DSCR value. Initialize it to zero if you just need to know the
244 * value on return from this function; or DSCR_ITE if you
245 * happen to know that no instruction is pending.
246 */
247 static int aarch64_exec_opcode(struct target *target,
248 uint32_t opcode, uint32_t *dscr_p)
249 {
250 uint32_t dscr;
251 int retval;
252 struct armv8_common *armv8 = target_to_armv8(target);
253 dscr = dscr_p ? *dscr_p : 0;
254
255 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
256
257 /* Wait for InstrCompl bit to be set */
258 long long then = timeval_ms();
259 while ((dscr & DSCR_ITE) == 0) {
260 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
261 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
262 if (retval != ERROR_OK) {
263 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
264 return retval;
265 }
266 if (timeval_ms() > then + 1000) {
267 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
268 return ERROR_FAIL;
269 }
270 }
271
272 retval = mem_ap_write_u32(armv8->debug_ap,
273 armv8->debug_base + CPUV8_DBG_ITR, opcode);
274 if (retval != ERROR_OK)
275 return retval;
276
277 then = timeval_ms();
278 do {
279 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
280 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
281 if (retval != ERROR_OK) {
282 LOG_ERROR("Could not read DSCR register");
283 return retval;
284 }
285 if (timeval_ms() > then + 1000) {
286 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
287 return ERROR_FAIL;
288 }
289 } while ((dscr & DSCR_ITE) == 0); /* Wait for InstrCompl bit to be set */
290
291 if (dscr_p)
292 *dscr_p = dscr;
293
294 return retval;
295 }
296
297 /* Write to memory mapped registers directly with no cache or mmu handling */
298 static int aarch64_dap_write_memap_register_u32(struct target *target,
299 uint32_t address,
300 uint32_t value)
301 {
302 int retval;
303 struct armv8_common *armv8 = target_to_armv8(target);
304
305 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
306
307 return retval;
308 }
309
310 /*
311 * AARCH64 implementation of Debug Programmer's Model
312 *
313 * NOTE the invariant: these routines return with DSCR_ITE set,
314 * so there's no need to poll for it before executing an instruction.
315 *
316 * NOTE that in several of these cases the "stall" mode might be useful.
317 * It'd let us queue a few operations together... prepare/finish might
318 * be the places to enable/disable that mode.
319 */
320
321 static inline struct aarch64_common *dpm_to_a8(struct arm_dpm *dpm)
322 {
323 return container_of(dpm, struct aarch64_common, armv8_common.dpm);
324 }
325
326 static int aarch64_write_dcc(struct armv8_common *armv8, uint32_t data)
327 {
328 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
329 return mem_ap_write_u32(armv8->debug_ap,
330 armv8->debug_base + CPUV8_DBG_DTRRX, data);
331 }
332
333 static int aarch64_write_dcc_64(struct armv8_common *armv8, uint64_t data)
334 {
335 int ret;
336 LOG_DEBUG("write DCC Low word0x%08" PRIx32, (unsigned)data);
337 LOG_DEBUG("write DCC High word 0x%08" PRIx32, (unsigned)(data >> 32));
338 ret = mem_ap_write_u32(armv8->debug_ap,
339 armv8->debug_base + CPUV8_DBG_DTRRX, data);
340 ret += mem_ap_write_u32(armv8->debug_ap,
341 armv8->debug_base + CPUV8_DBG_DTRTX, data >> 32);
342 return ret;
343 }
344
345 static int aarch64_read_dcc(struct armv8_common *armv8, uint32_t *data,
346 uint32_t *dscr_p)
347 {
348 uint32_t dscr = DSCR_ITE;
349 int retval;
350
351 if (dscr_p)
352 dscr = *dscr_p;
353
354 /* Wait for DTRRXfull */
355 long long then = timeval_ms();
356 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
357 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
358 armv8->debug_base + CPUV8_DBG_DSCR,
359 &dscr);
360 if (retval != ERROR_OK)
361 return retval;
362 if (timeval_ms() > then + 1000) {
363 LOG_ERROR("Timeout waiting for read dcc");
364 return ERROR_FAIL;
365 }
366 }
367
368 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
369 armv8->debug_base + CPUV8_DBG_DTRTX,
370 data);
371 if (retval != ERROR_OK)
372 return retval;
373 LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
374
375 if (dscr_p)
376 *dscr_p = dscr;
377
378 return retval;
379 }
380
381 static int aarch64_read_dcc_64(struct armv8_common *armv8, uint64_t *data,
382 uint32_t *dscr_p)
383 {
384 uint32_t dscr = DSCR_ITE;
385 uint32_t higher;
386 int retval;
387
388 if (dscr_p)
389 dscr = *dscr_p;
390
391 /* Wait for DTRRXfull */
392 long long then = timeval_ms();
393 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
394 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
395 armv8->debug_base + CPUV8_DBG_DSCR,
396 &dscr);
397 if (retval != ERROR_OK)
398 return retval;
399 if (timeval_ms() > then + 1000) {
400 LOG_ERROR("Timeout waiting for read dcc");
401 return ERROR_FAIL;
402 }
403 }
404
405 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
406 armv8->debug_base + CPUV8_DBG_DTRTX,
407 (uint32_t *)data);
408 if (retval != ERROR_OK)
409 return retval;
410
411 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
412 armv8->debug_base + CPUV8_DBG_DTRRX,
413 &higher);
414 if (retval != ERROR_OK)
415 return retval;
416
417 *data = *(uint32_t *)data | (uint64_t)higher << 32;
418 LOG_DEBUG("read DCC 0x%16.16" PRIx64, *data);
419
420 if (dscr_p)
421 *dscr_p = dscr;
422
423 return retval;
424 }
425
426 static int aarch64_dpm_prepare(struct arm_dpm *dpm)
427 {
428 struct aarch64_common *a8 = dpm_to_a8(dpm);
429 uint32_t dscr;
430 int retval;
431
432 /* set up invariant: INSTR_COMP is set after ever DPM operation */
433 long long then = timeval_ms();
434 for (;; ) {
435 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
436 a8->armv8_common.debug_base + CPUV8_DBG_DSCR,
437 &dscr);
438 if (retval != ERROR_OK)
439 return retval;
440 if ((dscr & DSCR_ITE) != 0)
441 break;
442 if (timeval_ms() > then + 1000) {
443 LOG_ERROR("Timeout waiting for dpm prepare");
444 return ERROR_FAIL;
445 }
446 }
447
448 /* this "should never happen" ... */
449 if (dscr & DSCR_DTR_RX_FULL) {
450 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
451 /* Clear DCCRX */
452 retval = mem_ap_read_u32(a8->armv8_common.debug_ap,
453 a8->armv8_common.debug_base + CPUV8_DBG_DTRRX, &dscr);
454 if (retval != ERROR_OK)
455 return retval;
456
457 /* Clear sticky error */
458 retval = mem_ap_write_u32(a8->armv8_common.debug_ap,
459 a8->armv8_common.debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
460 if (retval != ERROR_OK)
461 return retval;
462 }
463
464 return retval;
465 }
466
467 static int aarch64_dpm_finish(struct arm_dpm *dpm)
468 {
469 /* REVISIT what could be done here? */
470 return ERROR_OK;
471 }
472
473 static int aarch64_instr_execute(struct arm_dpm *dpm,
474 uint32_t opcode)
475 {
476 struct aarch64_common *a8 = dpm_to_a8(dpm);
477 uint32_t dscr = DSCR_ITE;
478
479 return aarch64_exec_opcode(
480 a8->armv8_common.arm.target,
481 opcode,
482 &dscr);
483 }
484
485 static int aarch64_instr_write_data_dcc(struct arm_dpm *dpm,
486 uint32_t opcode, uint32_t data)
487 {
488 struct aarch64_common *a8 = dpm_to_a8(dpm);
489 int retval;
490 uint32_t dscr = DSCR_ITE;
491
492 retval = aarch64_write_dcc(&a8->armv8_common, data);
493 if (retval != ERROR_OK)
494 return retval;
495
496 return aarch64_exec_opcode(
497 a8->armv8_common.arm.target,
498 opcode,
499 &dscr);
500 }
501
502 static int aarch64_instr_write_data_dcc_64(struct arm_dpm *dpm,
503 uint32_t opcode, uint64_t data)
504 {
505 struct aarch64_common *a8 = dpm_to_a8(dpm);
506 int retval;
507 uint32_t dscr = DSCR_ITE;
508
509 retval = aarch64_write_dcc_64(&a8->armv8_common, data);
510 if (retval != ERROR_OK)
511 return retval;
512
513 return aarch64_exec_opcode(
514 a8->armv8_common.arm.target,
515 opcode,
516 &dscr);
517 }
518
519 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
520 uint32_t opcode, uint32_t data)
521 {
522 struct aarch64_common *a8 = dpm_to_a8(dpm);
523 uint32_t dscr = DSCR_ITE;
524 int retval;
525
526 retval = aarch64_write_dcc(&a8->armv8_common, data);
527 if (retval != ERROR_OK)
528 return retval;
529
530 retval = aarch64_exec_opcode(
531 a8->armv8_common.arm.target,
532 ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 0),
533 &dscr);
534 if (retval != ERROR_OK)
535 return retval;
536
537 /* then the opcode, taking data from R0 */
538 retval = aarch64_exec_opcode(
539 a8->armv8_common.arm.target,
540 opcode,
541 &dscr);
542
543 return retval;
544 }
545
546 static int aarch64_instr_write_data_r0_64(struct arm_dpm *dpm,
547 uint32_t opcode, uint64_t data)
548 {
549 struct aarch64_common *a8 = dpm_to_a8(dpm);
550 uint32_t dscr = DSCR_ITE;
551 int retval;
552
553 retval = aarch64_write_dcc_64(&a8->armv8_common, data);
554 if (retval != ERROR_OK)
555 return retval;
556
557 retval = aarch64_exec_opcode(
558 a8->armv8_common.arm.target,
559 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0),
560 &dscr);
561 if (retval != ERROR_OK)
562 return retval;
563
564 /* then the opcode, taking data from R0 */
565 retval = aarch64_exec_opcode(
566 a8->armv8_common.arm.target,
567 opcode,
568 &dscr);
569
570 return retval;
571 }
572
573 static int aarch64_instr_cpsr_sync(struct arm_dpm *dpm)
574 {
575 struct target *target = dpm->arm->target;
576 uint32_t dscr = DSCR_ITE;
577
578 /* "Prefetch flush" after modifying execution status in CPSR */
579 return aarch64_exec_opcode(target,
580 DSB_SY,
581 &dscr);
582 }
583
584 static int aarch64_instr_read_data_dcc(struct arm_dpm *dpm,
585 uint32_t opcode, uint32_t *data)
586 {
587 struct aarch64_common *a8 = dpm_to_a8(dpm);
588 int retval;
589 uint32_t dscr = DSCR_ITE;
590
591 /* the opcode, writing data to DCC */
592 retval = aarch64_exec_opcode(
593 a8->armv8_common.arm.target,
594 opcode,
595 &dscr);
596 if (retval != ERROR_OK)
597 return retval;
598
599 return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
600 }
601
602 static int aarch64_instr_read_data_dcc_64(struct arm_dpm *dpm,
603 uint32_t opcode, uint64_t *data)
604 {
605 struct aarch64_common *a8 = dpm_to_a8(dpm);
606 int retval;
607 uint32_t dscr = DSCR_ITE;
608
609 /* the opcode, writing data to DCC */
610 retval = aarch64_exec_opcode(
611 a8->armv8_common.arm.target,
612 opcode,
613 &dscr);
614 if (retval != ERROR_OK)
615 return retval;
616
617 return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
618 }
619
620 static int aarch64_instr_read_data_r0(struct arm_dpm *dpm,
621 uint32_t opcode, uint32_t *data)
622 {
623 struct aarch64_common *a8 = dpm_to_a8(dpm);
624 uint32_t dscr = DSCR_ITE;
625 int retval;
626
627 /* the opcode, writing data to R0 */
628 retval = aarch64_exec_opcode(
629 a8->armv8_common.arm.target,
630 opcode,
631 &dscr);
632 if (retval != ERROR_OK)
633 return retval;
634
635 /* write R0 to DCC */
636 retval = aarch64_exec_opcode(
637 a8->armv8_common.arm.target,
638 ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 0), /* msr dbgdtr_el0, x0 */
639 &dscr);
640 if (retval != ERROR_OK)
641 return retval;
642
643 return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
644 }
645
646 static int aarch64_instr_read_data_r0_64(struct arm_dpm *dpm,
647 uint32_t opcode, uint64_t *data)
648 {
649 struct aarch64_common *a8 = dpm_to_a8(dpm);
650 uint32_t dscr = DSCR_ITE;
651 int retval;
652
653 /* the opcode, writing data to R0 */
654 retval = aarch64_exec_opcode(
655 a8->armv8_common.arm.target,
656 opcode,
657 &dscr);
658 if (retval != ERROR_OK)
659 return retval;
660
661 /* write R0 to DCC */
662 retval = aarch64_exec_opcode(
663 a8->armv8_common.arm.target,
664 ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), /* msr dbgdtr_el0, x0 */
665 &dscr);
666 if (retval != ERROR_OK)
667 return retval;
668
669 return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
670 }
671
672 static int aarch64_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
673 uint32_t addr, uint32_t control)
674 {
675 struct aarch64_common *a8 = dpm_to_a8(dpm);
676 uint32_t vr = a8->armv8_common.debug_base;
677 uint32_t cr = a8->armv8_common.debug_base;
678 int retval;
679
680 switch (index_t) {
681 case 0 ... 15: /* breakpoints */
682 vr += CPUV8_DBG_BVR_BASE;
683 cr += CPUV8_DBG_BCR_BASE;
684 break;
685 case 16 ... 31: /* watchpoints */
686 vr += CPUV8_DBG_WVR_BASE;
687 cr += CPUV8_DBG_WCR_BASE;
688 index_t -= 16;
689 break;
690 default:
691 return ERROR_FAIL;
692 }
693 vr += 4 * index_t;
694 cr += 4 * index_t;
695
696 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
697 (unsigned) vr, (unsigned) cr);
698
699 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
700 vr, addr);
701 if (retval != ERROR_OK)
702 return retval;
703 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
704 cr, control);
705 return retval;
706 }
707
708 static int aarch64_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
709 {
710 return ERROR_OK;
711
712 #if 0
713 struct aarch64_common *a = dpm_to_a8(dpm);
714 uint32_t cr;
715
716 switch (index_t) {
717 case 0 ... 15:
718 cr = a->armv8_common.debug_base + CPUV8_DBG_BCR_BASE;
719 break;
720 case 16 ... 31:
721 cr = a->armv8_common.debug_base + CPUV8_DBG_WCR_BASE;
722 index_t -= 16;
723 break;
724 default:
725 return ERROR_FAIL;
726 }
727 cr += 4 * index_t;
728
729 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
730
731 /* clear control register */
732 return aarch64_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
733 #endif
734 }
735
736 static int aarch64_dpm_setup(struct aarch64_common *a8, uint32_t debug)
737 {
738 struct arm_dpm *dpm = &a8->armv8_common.dpm;
739 int retval;
740
741 dpm->arm = &a8->armv8_common.arm;
742 dpm->didr = debug;
743
744 dpm->prepare = aarch64_dpm_prepare;
745 dpm->finish = aarch64_dpm_finish;
746
747 dpm->instr_execute = aarch64_instr_execute;
748 dpm->instr_write_data_dcc = aarch64_instr_write_data_dcc;
749 dpm->instr_write_data_dcc_64 = aarch64_instr_write_data_dcc_64;
750 dpm->instr_write_data_r0 = aarch64_instr_write_data_r0;
751 dpm->instr_write_data_r0_64 = aarch64_instr_write_data_r0_64;
752 dpm->instr_cpsr_sync = aarch64_instr_cpsr_sync;
753
754 dpm->instr_read_data_dcc = aarch64_instr_read_data_dcc;
755 dpm->instr_read_data_dcc_64 = aarch64_instr_read_data_dcc_64;
756 dpm->instr_read_data_r0 = aarch64_instr_read_data_r0;
757 dpm->instr_read_data_r0_64 = aarch64_instr_read_data_r0_64;
758
759 dpm->arm_reg_current = armv8_reg_current;
760
761 dpm->bpwp_enable = aarch64_bpwp_enable;
762 dpm->bpwp_disable = aarch64_bpwp_disable;
763
764 retval = armv8_dpm_setup(dpm);
765 if (retval == ERROR_OK)
766 retval = armv8_dpm_initialize(dpm);
767
768 return retval;
769 }
770 static struct target *get_aarch64(struct target *target, int32_t coreid)
771 {
772 struct target_list *head;
773 struct target *curr;
774
775 head = target->head;
776 while (head != (struct target_list *)NULL) {
777 curr = head->target;
778 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
779 return curr;
780 head = head->next;
781 }
782 return target;
783 }
784 static int aarch64_halt(struct target *target);
785
786 static int aarch64_halt_smp(struct target *target)
787 {
788 int retval = 0;
789 struct target_list *head;
790 struct target *curr;
791 head = target->head;
792 while (head != (struct target_list *)NULL) {
793 curr = head->target;
794 if ((curr != target) && (curr->state != TARGET_HALTED))
795 retval += aarch64_halt(curr);
796 head = head->next;
797 }
798 return retval;
799 }
800
801 static int update_halt_gdb(struct target *target)
802 {
803 int retval = 0;
804 if (target->gdb_service && target->gdb_service->core[0] == -1) {
805 target->gdb_service->target = target;
806 target->gdb_service->core[0] = target->coreid;
807 retval += aarch64_halt_smp(target);
808 }
809 return retval;
810 }
811
812 /*
813 * Cortex-A8 Run control
814 */
815
816 static int aarch64_poll(struct target *target)
817 {
818 int retval = ERROR_OK;
819 uint32_t dscr;
820 struct aarch64_common *aarch64 = target_to_aarch64(target);
821 struct armv8_common *armv8 = &aarch64->armv8_common;
822 enum target_state prev_target_state = target->state;
823 /* toggle to another core is done by gdb as follow */
824 /* maint packet J core_id */
825 /* continue */
826 /* the next polling trigger an halt event sent to gdb */
827 if ((target->state == TARGET_HALTED) && (target->smp) &&
828 (target->gdb_service) &&
829 (target->gdb_service->target == NULL)) {
830 target->gdb_service->target =
831 get_aarch64(target, target->gdb_service->core[1]);
832 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
833 return retval;
834 }
835 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
836 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
837 if (retval != ERROR_OK)
838 return retval;
839 aarch64->cpudbg_dscr = dscr;
840
841 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
842 if (prev_target_state != TARGET_HALTED) {
843 /* We have a halting debug event */
844 LOG_DEBUG("Target halted");
845 target->state = TARGET_HALTED;
846 if ((prev_target_state == TARGET_RUNNING)
847 || (prev_target_state == TARGET_UNKNOWN)
848 || (prev_target_state == TARGET_RESET)) {
849 retval = aarch64_debug_entry(target);
850 if (retval != ERROR_OK)
851 return retval;
852 if (target->smp) {
853 retval = update_halt_gdb(target);
854 if (retval != ERROR_OK)
855 return retval;
856 }
857 target_call_event_callbacks(target,
858 TARGET_EVENT_HALTED);
859 }
860 if (prev_target_state == TARGET_DEBUG_RUNNING) {
861 LOG_DEBUG(" ");
862
863 retval = aarch64_debug_entry(target);
864 if (retval != ERROR_OK)
865 return retval;
866 if (target->smp) {
867 retval = update_halt_gdb(target);
868 if (retval != ERROR_OK)
869 return retval;
870 }
871
872 target_call_event_callbacks(target,
873 TARGET_EVENT_DEBUG_HALTED);
874 }
875 }
876 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
877 target->state = TARGET_RUNNING;
878 else {
879 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
880 target->state = TARGET_UNKNOWN;
881 }
882
883 return retval;
884 }
885
886 static int aarch64_halt(struct target *target)
887 {
888 int retval = ERROR_OK;
889 uint32_t dscr;
890 struct armv8_common *armv8 = target_to_armv8(target);
891
892 /* enable CTI*/
893 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
894 armv8->cti_base + CTI_CTR, 1);
895 if (retval != ERROR_OK)
896 return retval;
897
898 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
899 armv8->cti_base + CTI_GATE, 3);
900 if (retval != ERROR_OK)
901 return retval;
902
903 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
904 armv8->cti_base + CTI_OUTEN0, 1);
905 if (retval != ERROR_OK)
906 return retval;
907
908 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
909 armv8->cti_base + CTI_OUTEN1, 2);
910 if (retval != ERROR_OK)
911 return retval;
912
913 /*
914 * add HDE in halting debug mode
915 */
916 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
917 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
918 if (retval != ERROR_OK)
919 return retval;
920
921 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
922 armv8->debug_base + CPUV8_DBG_DSCR, dscr | DSCR_HDE);
923 if (retval != ERROR_OK)
924 return retval;
925
926 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
927 armv8->cti_base + CTI_APPPULSE, 1);
928 if (retval != ERROR_OK)
929 return retval;
930
931 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
932 armv8->cti_base + CTI_INACK, 1);
933 if (retval != ERROR_OK)
934 return retval;
935
936
937 long long then = timeval_ms();
938 for (;; ) {
939 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
940 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
941 if (retval != ERROR_OK)
942 return retval;
943 if ((dscr & DSCRV8_HALT_MASK) != 0)
944 break;
945 if (timeval_ms() > then + 1000) {
946 LOG_ERROR("Timeout waiting for halt");
947 return ERROR_FAIL;
948 }
949 }
950
951 target->debug_reason = DBG_REASON_DBGRQ;
952
953 return ERROR_OK;
954 }
955
956 static int aarch64_internal_restore(struct target *target, int current,
957 uint64_t *address, int handle_breakpoints, int debug_execution)
958 {
959 struct armv8_common *armv8 = target_to_armv8(target);
960 struct arm *arm = &armv8->arm;
961 int retval;
962 uint64_t resume_pc;
963
964 if (!debug_execution)
965 target_free_all_working_areas(target);
966
967 /* current = 1: continue on current pc, otherwise continue at <address> */
968 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
969 if (!current)
970 resume_pc = *address;
971 else
972 *address = resume_pc;
973
974 /* Make sure that the Armv7 gdb thumb fixups does not
975 * kill the return address
976 */
977 switch (arm->core_state) {
978 case ARM_STATE_ARM:
979 resume_pc &= 0xFFFFFFFC;
980 break;
981 case ARM_STATE_AARCH64:
982 resume_pc &= 0xFFFFFFFFFFFFFFFC;
983 break;
984 case ARM_STATE_THUMB:
985 case ARM_STATE_THUMB_EE:
986 /* When the return address is loaded into PC
987 * bit 0 must be 1 to stay in Thumb state
988 */
989 resume_pc |= 0x1;
990 break;
991 case ARM_STATE_JAZELLE:
992 LOG_ERROR("How do I resume into Jazelle state??");
993 return ERROR_FAIL;
994 }
995 LOG_DEBUG("resume pc = 0x%16" PRIx64, resume_pc);
996 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
997 arm->pc->dirty = 1;
998 arm->pc->valid = 1;
999 dpmv8_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1000
1001 /* called it now before restoring context because it uses cpu
1002 * register r0 for restoring system control register */
1003 retval = aarch64_restore_system_control_reg(target);
1004 if (retval != ERROR_OK)
1005 return retval;
1006 retval = aarch64_restore_context(target, handle_breakpoints);
1007 if (retval != ERROR_OK)
1008 return retval;
1009 target->debug_reason = DBG_REASON_NOTHALTED;
1010 target->state = TARGET_RUNNING;
1011
1012 /* registers are now invalid */
1013 register_cache_invalidate(arm->core_cache);
1014
1015 #if 0
1016 /* the front-end may request us not to handle breakpoints */
1017 if (handle_breakpoints) {
1018 /* Single step past breakpoint at current address */
1019 breakpoint = breakpoint_find(target, resume_pc);
1020 if (breakpoint) {
1021 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1022 cortex_m3_unset_breakpoint(target, breakpoint);
1023 cortex_m3_single_step_core(target);
1024 cortex_m3_set_breakpoint(target, breakpoint);
1025 }
1026 }
1027 #endif
1028
1029 return retval;
1030 }
1031
1032 static int aarch64_internal_restart(struct target *target)
1033 {
1034 struct armv8_common *armv8 = target_to_armv8(target);
1035 struct arm *arm = &armv8->arm;
1036 int retval;
1037 uint32_t dscr;
1038 /*
1039 * * Restart core and wait for it to be started. Clear ITRen and sticky
1040 * * exception flags: see ARMv7 ARM, C5.9.
1041 *
1042 * REVISIT: for single stepping, we probably want to
1043 * disable IRQs by default, with optional override...
1044 */
1045
1046 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1047 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1048 if (retval != ERROR_OK)
1049 return retval;
1050
1051 if ((dscr & DSCR_ITE) == 0)
1052 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1053
1054 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1055 armv8->cti_base + CTI_APPPULSE, 2);
1056 if (retval != ERROR_OK)
1057 return retval;
1058
1059 long long then = timeval_ms();
1060 for (;; ) {
1061 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1062 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1063 if (retval != ERROR_OK)
1064 return retval;
1065 if ((dscr & DSCR_HDE) != 0)
1066 break;
1067 if (timeval_ms() > then + 1000) {
1068 LOG_ERROR("Timeout waiting for resume");
1069 return ERROR_FAIL;
1070 }
1071 }
1072
1073 target->debug_reason = DBG_REASON_NOTHALTED;
1074 target->state = TARGET_RUNNING;
1075
1076 /* registers are now invalid */
1077 register_cache_invalidate(arm->core_cache);
1078
1079 return ERROR_OK;
1080 }
1081
1082 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
1083 {
1084 int retval = 0;
1085 struct target_list *head;
1086 struct target *curr;
1087 uint64_t address;
1088 head = target->head;
1089 while (head != (struct target_list *)NULL) {
1090 curr = head->target;
1091 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1092 /* resume current address , not in step mode */
1093 retval += aarch64_internal_restore(curr, 1, &address,
1094 handle_breakpoints, 0);
1095 retval += aarch64_internal_restart(curr);
1096 }
1097 head = head->next;
1098
1099 }
1100 return retval;
1101 }
1102
1103 static int aarch64_resume(struct target *target, int current,
1104 target_addr_t address, int handle_breakpoints, int debug_execution)
1105 {
1106 int retval = 0;
1107 uint64_t addr = address;
1108
1109 /* dummy resume for smp toggle in order to reduce gdb impact */
1110 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1111 /* simulate a start and halt of target */
1112 target->gdb_service->target = NULL;
1113 target->gdb_service->core[0] = target->gdb_service->core[1];
1114 /* fake resume at next poll we play the target core[1], see poll*/
1115 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1116 return 0;
1117 }
1118 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
1119 debug_execution);
1120 if (target->smp) {
1121 target->gdb_service->core[0] = -1;
1122 retval = aarch64_restore_smp(target, handle_breakpoints);
1123 if (retval != ERROR_OK)
1124 return retval;
1125 }
1126 aarch64_internal_restart(target);
1127
1128 if (!debug_execution) {
1129 target->state = TARGET_RUNNING;
1130 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1131 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
1132 } else {
1133 target->state = TARGET_DEBUG_RUNNING;
1134 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1135 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
1136 }
1137
1138 return ERROR_OK;
1139 }
1140
1141 static int aarch64_debug_entry(struct target *target)
1142 {
1143 int retval = ERROR_OK;
1144 struct aarch64_common *aarch64 = target_to_aarch64(target);
1145 struct armv8_common *armv8 = target_to_armv8(target);
1146
1147 LOG_DEBUG("dscr = 0x%08" PRIx32, aarch64->cpudbg_dscr);
1148
1149 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1150 * imprecise data aborts get discarded by issuing a Data
1151 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1152 */
1153
1154 /* make sure to clear all sticky errors */
1155 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1156 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1157 if (retval != ERROR_OK)
1158 return retval;
1159
1160 /* Examine debug reason */
1161 armv8_dpm_report_dscr(&armv8->dpm, aarch64->cpudbg_dscr);
1162
1163 /* save address of instruction that triggered the watchpoint? */
1164 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1165 uint32_t tmp;
1166 uint64_t wfar = 0;
1167
1168 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1169 armv8->debug_base + CPUV8_DBG_WFAR1,
1170 &tmp);
1171 if (retval != ERROR_OK)
1172 return retval;
1173 wfar = tmp;
1174 wfar = (wfar << 32);
1175 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1176 armv8->debug_base + CPUV8_DBG_WFAR0,
1177 &tmp);
1178 if (retval != ERROR_OK)
1179 return retval;
1180 wfar |= tmp;
1181 armv8_dpm_report_wfar(&armv8->dpm, wfar);
1182 }
1183
1184 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1185
1186 if (armv8->post_debug_entry) {
1187 retval = armv8->post_debug_entry(target);
1188 if (retval != ERROR_OK)
1189 return retval;
1190 }
1191
1192 return retval;
1193 }
1194
1195 static int aarch64_post_debug_entry(struct target *target)
1196 {
1197 struct aarch64_common *aarch64 = target_to_aarch64(target);
1198 struct armv8_common *armv8 = &aarch64->armv8_common;
1199 int retval;
1200
1201 mem_ap_write_atomic_u32(armv8->debug_ap,
1202 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1203 switch (armv8->arm.core_mode) {
1204 case ARMV8_64_EL0T:
1205 case ARMV8_64_EL1T:
1206 case ARMV8_64_EL1H:
1207 retval = armv8->arm.mrs(target, 3, /*op 0*/
1208 0, 0, /* op1, op2 */
1209 1, 0, /* CRn, CRm */
1210 &aarch64->system_control_reg);
1211 if (retval != ERROR_OK)
1212 return retval;
1213 break;
1214 case ARMV8_64_EL2T:
1215 case ARMV8_64_EL2H:
1216 retval = armv8->arm.mrs(target, 3, /*op 0*/
1217 4, 0, /* op1, op2 */
1218 1, 0, /* CRn, CRm */
1219 &aarch64->system_control_reg);
1220 if (retval != ERROR_OK)
1221 return retval;
1222 break;
1223 case ARMV8_64_EL3H:
1224 case ARMV8_64_EL3T:
1225 retval = armv8->arm.mrs(target, 3, /*op 0*/
1226 6, 0, /* op1, op2 */
1227 1, 0, /* CRn, CRm */
1228 &aarch64->system_control_reg);
1229 if (retval != ERROR_OK)
1230 return retval;
1231 break;
1232 default:
1233 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
1234 }
1235 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1236 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1237
1238 if (armv8->armv8_mmu.armv8_cache.ctype == -1)
1239 armv8_identify_cache(target);
1240
1241 armv8->armv8_mmu.mmu_enabled =
1242 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1243 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1244 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1245 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1246 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1247 aarch64->curr_mode = armv8->arm.core_mode;
1248 return ERROR_OK;
1249 }
1250
1251 static int aarch64_step(struct target *target, int current, target_addr_t address,
1252 int handle_breakpoints)
1253 {
1254 struct armv8_common *armv8 = target_to_armv8(target);
1255 int retval;
1256 uint32_t tmp;
1257
1258 if (target->state != TARGET_HALTED) {
1259 LOG_WARNING("target not halted");
1260 return ERROR_TARGET_NOT_HALTED;
1261 }
1262
1263 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1264 armv8->debug_base + CPUV8_DBG_EDECR, &tmp);
1265 if (retval != ERROR_OK)
1266 return retval;
1267
1268 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1269 armv8->debug_base + CPUV8_DBG_EDECR, (tmp|0x4));
1270 if (retval != ERROR_OK)
1271 return retval;
1272
1273 target->debug_reason = DBG_REASON_SINGLESTEP;
1274 retval = aarch64_resume(target, 1, address, 0, 0);
1275 if (retval != ERROR_OK)
1276 return retval;
1277
1278 long long then = timeval_ms();
1279 while (target->state != TARGET_HALTED) {
1280 mem_ap_read_atomic_u32(armv8->debug_ap,
1281 armv8->debug_base + CPUV8_DBG_EDESR, &tmp);
1282 LOG_DEBUG("DESR = %#x", tmp);
1283 retval = aarch64_poll(target);
1284 if (retval != ERROR_OK)
1285 return retval;
1286 if (timeval_ms() > then + 1000) {
1287 LOG_ERROR("timeout waiting for target halt");
1288 return ERROR_FAIL;
1289 }
1290 }
1291
1292 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1293 armv8->debug_base + CPUV8_DBG_EDECR, (tmp&(~0x4)));
1294 if (retval != ERROR_OK)
1295 return retval;
1296
1297 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1298 if (target->state == TARGET_HALTED)
1299 LOG_DEBUG("target stepped");
1300
1301 return ERROR_OK;
1302 }
1303
1304 static int aarch64_restore_context(struct target *target, bool bpwp)
1305 {
1306 struct armv8_common *armv8 = target_to_armv8(target);
1307
1308 LOG_DEBUG(" ");
1309
1310 if (armv8->pre_restore_context)
1311 armv8->pre_restore_context(target);
1312
1313 return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1314
1315 }
1316
1317 /*
1318 * Cortex-A8 Breakpoint and watchpoint functions
1319 */
1320
1321 /* Setup hardware Breakpoint Register Pair */
1322 static int aarch64_set_breakpoint(struct target *target,
1323 struct breakpoint *breakpoint, uint8_t matchmode)
1324 {
1325 int retval;
1326 int brp_i = 0;
1327 uint32_t control;
1328 uint8_t byte_addr_select = 0x0F;
1329 struct aarch64_common *aarch64 = target_to_aarch64(target);
1330 struct armv8_common *armv8 = &aarch64->armv8_common;
1331 struct aarch64_brp *brp_list = aarch64->brp_list;
1332 uint32_t dscr;
1333
1334 if (breakpoint->set) {
1335 LOG_WARNING("breakpoint already set");
1336 return ERROR_OK;
1337 }
1338
1339 if (breakpoint->type == BKPT_HARD) {
1340 int64_t bpt_value;
1341 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1342 brp_i++;
1343 if (brp_i >= aarch64->brp_num) {
1344 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1345 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1346 }
1347 breakpoint->set = brp_i + 1;
1348 if (breakpoint->length == 2)
1349 byte_addr_select = (3 << (breakpoint->address & 0x02));
1350 control = ((matchmode & 0x7) << 20)
1351 | (1 << 13)
1352 | (byte_addr_select << 5)
1353 | (3 << 1) | 1;
1354 brp_list[brp_i].used = 1;
1355 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1356 brp_list[brp_i].control = control;
1357 bpt_value = brp_list[brp_i].value;
1358
1359 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1360 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1361 (uint32_t)(bpt_value & 0xFFFFFFFF));
1362 if (retval != ERROR_OK)
1363 return retval;
1364 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1365 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1366 (uint32_t)(bpt_value >> 32));
1367 if (retval != ERROR_OK)
1368 return retval;
1369
1370 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1371 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1372 brp_list[brp_i].control);
1373 if (retval != ERROR_OK)
1374 return retval;
1375 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1376 brp_list[brp_i].control,
1377 brp_list[brp_i].value);
1378
1379 } else if (breakpoint->type == BKPT_SOFT) {
1380 uint8_t code[4];
1381 buf_set_u32(code, 0, 32, ARMV8_BKPT(0x11));
1382 retval = target_read_memory(target,
1383 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1384 breakpoint->length, 1,
1385 breakpoint->orig_instr);
1386 if (retval != ERROR_OK)
1387 return retval;
1388 retval = target_write_memory(target,
1389 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1390 breakpoint->length, 1, code);
1391 if (retval != ERROR_OK)
1392 return retval;
1393 breakpoint->set = 0x11; /* Any nice value but 0 */
1394 }
1395
1396 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1397 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1398 /* Ensure that halting debug mode is enable */
1399 dscr = dscr | DSCR_HDE;
1400 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1401 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1402 if (retval != ERROR_OK) {
1403 LOG_DEBUG("Failed to set DSCR.HDE");
1404 return retval;
1405 }
1406
1407 return ERROR_OK;
1408 }
1409
1410 static int aarch64_set_context_breakpoint(struct target *target,
1411 struct breakpoint *breakpoint, uint8_t matchmode)
1412 {
1413 int retval = ERROR_FAIL;
1414 int brp_i = 0;
1415 uint32_t control;
1416 uint8_t byte_addr_select = 0x0F;
1417 struct aarch64_common *aarch64 = target_to_aarch64(target);
1418 struct armv8_common *armv8 = &aarch64->armv8_common;
1419 struct aarch64_brp *brp_list = aarch64->brp_list;
1420
1421 if (breakpoint->set) {
1422 LOG_WARNING("breakpoint already set");
1423 return retval;
1424 }
1425 /*check available context BRPs*/
1426 while ((brp_list[brp_i].used ||
1427 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1428 brp_i++;
1429
1430 if (brp_i >= aarch64->brp_num) {
1431 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1432 return ERROR_FAIL;
1433 }
1434
1435 breakpoint->set = brp_i + 1;
1436 control = ((matchmode & 0x7) << 20)
1437 | (1 << 13)
1438 | (byte_addr_select << 5)
1439 | (3 << 1) | 1;
1440 brp_list[brp_i].used = 1;
1441 brp_list[brp_i].value = (breakpoint->asid);
1442 brp_list[brp_i].control = control;
1443 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1444 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1445 brp_list[brp_i].value);
1446 if (retval != ERROR_OK)
1447 return retval;
1448 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1449 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1450 brp_list[brp_i].control);
1451 if (retval != ERROR_OK)
1452 return retval;
1453 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1454 brp_list[brp_i].control,
1455 brp_list[brp_i].value);
1456 return ERROR_OK;
1457
1458 }
1459
1460 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1461 {
1462 int retval = ERROR_FAIL;
1463 int brp_1 = 0; /* holds the contextID pair */
1464 int brp_2 = 0; /* holds the IVA pair */
1465 uint32_t control_CTX, control_IVA;
1466 uint8_t CTX_byte_addr_select = 0x0F;
1467 uint8_t IVA_byte_addr_select = 0x0F;
1468 uint8_t CTX_machmode = 0x03;
1469 uint8_t IVA_machmode = 0x01;
1470 struct aarch64_common *aarch64 = target_to_aarch64(target);
1471 struct armv8_common *armv8 = &aarch64->armv8_common;
1472 struct aarch64_brp *brp_list = aarch64->brp_list;
1473
1474 if (breakpoint->set) {
1475 LOG_WARNING("breakpoint already set");
1476 return retval;
1477 }
1478 /*check available context BRPs*/
1479 while ((brp_list[brp_1].used ||
1480 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1481 brp_1++;
1482
1483 printf("brp(CTX) found num: %d\n", brp_1);
1484 if (brp_1 >= aarch64->brp_num) {
1485 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1486 return ERROR_FAIL;
1487 }
1488
1489 while ((brp_list[brp_2].used ||
1490 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1491 brp_2++;
1492
1493 printf("brp(IVA) found num: %d\n", brp_2);
1494 if (brp_2 >= aarch64->brp_num) {
1495 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1496 return ERROR_FAIL;
1497 }
1498
1499 breakpoint->set = brp_1 + 1;
1500 breakpoint->linked_BRP = brp_2;
1501 control_CTX = ((CTX_machmode & 0x7) << 20)
1502 | (brp_2 << 16)
1503 | (0 << 14)
1504 | (CTX_byte_addr_select << 5)
1505 | (3 << 1) | 1;
1506 brp_list[brp_1].used = 1;
1507 brp_list[brp_1].value = (breakpoint->asid);
1508 brp_list[brp_1].control = control_CTX;
1509 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1510 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1511 brp_list[brp_1].value);
1512 if (retval != ERROR_OK)
1513 return retval;
1514 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1515 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1516 brp_list[brp_1].control);
1517 if (retval != ERROR_OK)
1518 return retval;
1519
1520 control_IVA = ((IVA_machmode & 0x7) << 20)
1521 | (brp_1 << 16)
1522 | (1 << 13)
1523 | (IVA_byte_addr_select << 5)
1524 | (3 << 1) | 1;
1525 brp_list[brp_2].used = 1;
1526 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1527 brp_list[brp_2].control = control_IVA;
1528 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1529 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1530 brp_list[brp_2].value & 0xFFFFFFFF);
1531 if (retval != ERROR_OK)
1532 return retval;
1533 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1534 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1535 brp_list[brp_2].value >> 32);
1536 if (retval != ERROR_OK)
1537 return retval;
1538 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1539 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1540 brp_list[brp_2].control);
1541 if (retval != ERROR_OK)
1542 return retval;
1543
1544 return ERROR_OK;
1545 }
1546
1547 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1548 {
1549 int retval;
1550 struct aarch64_common *aarch64 = target_to_aarch64(target);
1551 struct armv8_common *armv8 = &aarch64->armv8_common;
1552 struct aarch64_brp *brp_list = aarch64->brp_list;
1553
1554 if (!breakpoint->set) {
1555 LOG_WARNING("breakpoint not set");
1556 return ERROR_OK;
1557 }
1558
1559 if (breakpoint->type == BKPT_HARD) {
1560 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1561 int brp_i = breakpoint->set - 1;
1562 int brp_j = breakpoint->linked_BRP;
1563 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1564 LOG_DEBUG("Invalid BRP number in breakpoint");
1565 return ERROR_OK;
1566 }
1567 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1568 brp_list[brp_i].control, brp_list[brp_i].value);
1569 brp_list[brp_i].used = 0;
1570 brp_list[brp_i].value = 0;
1571 brp_list[brp_i].control = 0;
1572 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1573 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1574 brp_list[brp_i].control);
1575 if (retval != ERROR_OK)
1576 return retval;
1577 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1578 LOG_DEBUG("Invalid BRP number in breakpoint");
1579 return ERROR_OK;
1580 }
1581 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1582 brp_list[brp_j].control, brp_list[brp_j].value);
1583 brp_list[brp_j].used = 0;
1584 brp_list[brp_j].value = 0;
1585 brp_list[brp_j].control = 0;
1586 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1587 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1588 brp_list[brp_j].control);
1589 if (retval != ERROR_OK)
1590 return retval;
1591 breakpoint->linked_BRP = 0;
1592 breakpoint->set = 0;
1593 return ERROR_OK;
1594
1595 } else {
1596 int brp_i = breakpoint->set - 1;
1597 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1598 LOG_DEBUG("Invalid BRP number in breakpoint");
1599 return ERROR_OK;
1600 }
1601 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1602 brp_list[brp_i].control, brp_list[brp_i].value);
1603 brp_list[brp_i].used = 0;
1604 brp_list[brp_i].value = 0;
1605 brp_list[brp_i].control = 0;
1606 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1607 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1608 brp_list[brp_i].control);
1609 if (retval != ERROR_OK)
1610 return retval;
1611 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1612 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1613 brp_list[brp_i].value);
1614 if (retval != ERROR_OK)
1615 return retval;
1616 breakpoint->set = 0;
1617 return ERROR_OK;
1618 }
1619 } else {
1620 /* restore original instruction (kept in target endianness) */
1621 if (breakpoint->length == 4) {
1622 retval = target_write_memory(target,
1623 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1624 4, 1, breakpoint->orig_instr);
1625 if (retval != ERROR_OK)
1626 return retval;
1627 } else {
1628 retval = target_write_memory(target,
1629 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1630 2, 1, breakpoint->orig_instr);
1631 if (retval != ERROR_OK)
1632 return retval;
1633 }
1634 }
1635 breakpoint->set = 0;
1636
1637 return ERROR_OK;
1638 }
1639
1640 static int aarch64_add_breakpoint(struct target *target,
1641 struct breakpoint *breakpoint)
1642 {
1643 struct aarch64_common *aarch64 = target_to_aarch64(target);
1644
1645 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1646 LOG_INFO("no hardware breakpoint available");
1647 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1648 }
1649
1650 if (breakpoint->type == BKPT_HARD)
1651 aarch64->brp_num_available--;
1652
1653 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1654 }
1655
1656 static int aarch64_add_context_breakpoint(struct target *target,
1657 struct breakpoint *breakpoint)
1658 {
1659 struct aarch64_common *aarch64 = target_to_aarch64(target);
1660
1661 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1662 LOG_INFO("no hardware breakpoint available");
1663 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1664 }
1665
1666 if (breakpoint->type == BKPT_HARD)
1667 aarch64->brp_num_available--;
1668
1669 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1670 }
1671
1672 static int aarch64_add_hybrid_breakpoint(struct target *target,
1673 struct breakpoint *breakpoint)
1674 {
1675 struct aarch64_common *aarch64 = target_to_aarch64(target);
1676
1677 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1678 LOG_INFO("no hardware breakpoint available");
1679 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1680 }
1681
1682 if (breakpoint->type == BKPT_HARD)
1683 aarch64->brp_num_available--;
1684
1685 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1686 }
1687
1688
1689 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1690 {
1691 struct aarch64_common *aarch64 = target_to_aarch64(target);
1692
1693 #if 0
1694 /* It is perfectly possible to remove breakpoints while the target is running */
1695 if (target->state != TARGET_HALTED) {
1696 LOG_WARNING("target not halted");
1697 return ERROR_TARGET_NOT_HALTED;
1698 }
1699 #endif
1700
1701 if (breakpoint->set) {
1702 aarch64_unset_breakpoint(target, breakpoint);
1703 if (breakpoint->type == BKPT_HARD)
1704 aarch64->brp_num_available++;
1705 }
1706
1707 return ERROR_OK;
1708 }
1709
1710 /*
1711 * Cortex-A8 Reset functions
1712 */
1713
1714 static int aarch64_assert_reset(struct target *target)
1715 {
1716 struct armv8_common *armv8 = target_to_armv8(target);
1717
1718 LOG_DEBUG(" ");
1719
1720 /* FIXME when halt is requested, make it work somehow... */
1721
1722 /* Issue some kind of warm reset. */
1723 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1724 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1725 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1726 /* REVISIT handle "pulls" cases, if there's
1727 * hardware that needs them to work.
1728 */
1729 jtag_add_reset(0, 1);
1730 } else {
1731 LOG_ERROR("%s: how to reset?", target_name(target));
1732 return ERROR_FAIL;
1733 }
1734
1735 /* registers are now invalid */
1736 register_cache_invalidate(armv8->arm.core_cache);
1737
1738 target->state = TARGET_RESET;
1739
1740 return ERROR_OK;
1741 }
1742
1743 static int aarch64_deassert_reset(struct target *target)
1744 {
1745 int retval;
1746
1747 LOG_DEBUG(" ");
1748
1749 /* be certain SRST is off */
1750 jtag_add_reset(0, 0);
1751
1752 retval = aarch64_poll(target);
1753 if (retval != ERROR_OK)
1754 return retval;
1755
1756 if (target->reset_halt) {
1757 if (target->state != TARGET_HALTED) {
1758 LOG_WARNING("%s: ran after reset and before halt ...",
1759 target_name(target));
1760 retval = target_halt(target);
1761 if (retval != ERROR_OK)
1762 return retval;
1763 }
1764 }
1765
1766 return ERROR_OK;
1767 }
1768
1769 static int aarch64_write_apb_ap_memory(struct target *target,
1770 uint64_t address, uint32_t size,
1771 uint32_t count, const uint8_t *buffer)
1772 {
1773 /* write memory through APB-AP */
1774 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1775 struct armv8_common *armv8 = target_to_armv8(target);
1776 struct arm *arm = &armv8->arm;
1777 int total_bytes = count * size;
1778 int total_u32;
1779 int start_byte = address & 0x3;
1780 int end_byte = (address + total_bytes) & 0x3;
1781 struct reg *reg;
1782 uint32_t dscr;
1783 uint8_t *tmp_buff = NULL;
1784
1785 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %" PRIu32 " count%" PRIu32,
1786 address, size, count);
1787 if (target->state != TARGET_HALTED) {
1788 LOG_WARNING("target not halted");
1789 return ERROR_TARGET_NOT_HALTED;
1790 }
1791
1792 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1793
1794 /* Mark register R0 as dirty, as it will be used
1795 * for transferring the data.
1796 * It will be restored automatically when exiting
1797 * debug mode
1798 */
1799 reg = armv8_reg_current(arm, 1);
1800 reg->dirty = true;
1801
1802 reg = armv8_reg_current(arm, 0);
1803 reg->dirty = true;
1804
1805 /* clear any abort */
1806 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1807 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1808 if (retval != ERROR_OK)
1809 return retval;
1810
1811
1812 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1813
1814 /* The algorithm only copies 32 bit words, so the buffer
1815 * should be expanded to include the words at either end.
1816 * The first and last words will be read first to avoid
1817 * corruption if needed.
1818 */
1819 tmp_buff = malloc(total_u32 * 4);
1820
1821 if ((start_byte != 0) && (total_u32 > 1)) {
1822 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1823 * the other bytes in the word.
1824 */
1825 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1826 if (retval != ERROR_OK)
1827 goto error_free_buff_w;
1828 }
1829
1830 /* If end of write is not aligned, or the write is less than 4 bytes */
1831 if ((end_byte != 0) ||
1832 ((total_u32 == 1) && (total_bytes != 4))) {
1833
1834 /* Read the last word to avoid corruption during 32 bit write */
1835 int mem_offset = (total_u32-1) * 4;
1836 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1837 if (retval != ERROR_OK)
1838 goto error_free_buff_w;
1839 }
1840
1841 /* Copy the write buffer over the top of the temporary buffer */
1842 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1843
1844 /* We now have a 32 bit aligned buffer that can be written */
1845
1846 /* Read DSCR */
1847 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1848 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1849 if (retval != ERROR_OK)
1850 goto error_free_buff_w;
1851
1852 /* Set Normal access mode */
1853 dscr = (dscr & ~DSCR_MA);
1854 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1855 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1856
1857 if (arm->core_state == ARM_STATE_AARCH64) {
1858 /* Write X0 with value 'address' using write procedure */
1859 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1860 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
1861 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1862 retval += aarch64_exec_opcode(target,
1863 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1864 } else {
1865 /* Write R0 with value 'address' using write procedure */
1866 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1867 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
1868 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1869 retval += aarch64_exec_opcode(target,
1870 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
1871
1872 }
1873 /* Step 1.d - Change DCC to memory mode */
1874 dscr = dscr | DSCR_MA;
1875 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1876 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1877 if (retval != ERROR_OK)
1878 goto error_unset_dtr_w;
1879
1880
1881 /* Step 2.a - Do the write */
1882 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1883 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1884 if (retval != ERROR_OK)
1885 goto error_unset_dtr_w;
1886
1887 /* Step 3.a - Switch DTR mode back to Normal mode */
1888 dscr = (dscr & ~DSCR_MA);
1889 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1890 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1891 if (retval != ERROR_OK)
1892 goto error_unset_dtr_w;
1893
1894 /* Check for sticky abort flags in the DSCR */
1895 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1896 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1897 if (retval != ERROR_OK)
1898 goto error_free_buff_w;
1899 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1900 /* Abort occurred - clear it and exit */
1901 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1902 mem_ap_write_atomic_u32(armv8->debug_ap,
1903 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1904 goto error_free_buff_w;
1905 }
1906
1907 /* Done */
1908 free(tmp_buff);
1909 return ERROR_OK;
1910
1911 error_unset_dtr_w:
1912 /* Unset DTR mode */
1913 mem_ap_read_atomic_u32(armv8->debug_ap,
1914 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1915 dscr = (dscr & ~DSCR_MA);
1916 mem_ap_write_atomic_u32(armv8->debug_ap,
1917 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1918 error_free_buff_w:
1919 LOG_ERROR("error");
1920 free(tmp_buff);
1921 return ERROR_FAIL;
1922 }
1923
1924 static int aarch64_read_apb_ap_memory(struct target *target,
1925 target_addr_t address, uint32_t size,
1926 uint32_t count, uint8_t *buffer)
1927 {
1928 /* read memory through APB-AP */
1929 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1930 struct armv8_common *armv8 = target_to_armv8(target);
1931 struct arm *arm = &armv8->arm;
1932 int total_bytes = count * size;
1933 int total_u32;
1934 int start_byte = address & 0x3;
1935 int end_byte = (address + total_bytes) & 0x3;
1936 struct reg *reg;
1937 uint32_t dscr;
1938 uint8_t *tmp_buff = NULL;
1939 uint8_t *u8buf_ptr;
1940 uint32_t value;
1941
1942 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count%" PRIu32,
1943 address, size, count);
1944 if (target->state != TARGET_HALTED) {
1945 LOG_WARNING("target not halted");
1946 return ERROR_TARGET_NOT_HALTED;
1947 }
1948
1949 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1950 /* Mark register X0, X1 as dirty, as it will be used
1951 * for transferring the data.
1952 * It will be restored automatically when exiting
1953 * debug mode
1954 */
1955 reg = armv8_reg_current(arm, 1);
1956 reg->dirty = true;
1957
1958 reg = armv8_reg_current(arm, 0);
1959 reg->dirty = true;
1960
1961 /* clear any abort */
1962 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1963 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1964 if (retval != ERROR_OK)
1965 goto error_free_buff_r;
1966
1967 /* Read DSCR */
1968 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1969 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1970
1971 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1972
1973 /* Set Normal access mode */
1974 dscr = (dscr & ~DSCR_MA);
1975 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1976 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1977
1978 if (arm->core_state == ARM_STATE_AARCH64) {
1979 /* Write X0 with value 'address' using write procedure */
1980 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1981 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
1982 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1983 retval += aarch64_exec_opcode(target, ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1984 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1985 retval += aarch64_exec_opcode(target, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1986 /* Step 1.e - Change DCC to memory mode */
1987 dscr = dscr | DSCR_MA;
1988 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1989 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1990 /* Step 1.f - read DBGDTRTX and discard the value */
1991 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1992 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1993 } else {
1994 /* Write R0 with value 'address' using write procedure */
1995 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1996 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
1997 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1998 retval += aarch64_exec_opcode(target,
1999 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
2000 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2001 retval += aarch64_exec_opcode(target,
2002 T32_FMTITR(ARMV4_5_MCR(14, 0, 0, 0, 5, 0)), &dscr);
2003 /* Step 1.e - Change DCC to memory mode */
2004 dscr = dscr | DSCR_MA;
2005 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
2006 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2007 /* Step 1.f - read DBGDTRTX and discard the value */
2008 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2009 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2010
2011 }
2012 if (retval != ERROR_OK)
2013 goto error_unset_dtr_r;
2014
2015 /* Optimize the read as much as we can, either way we read in a single pass */
2016 if ((start_byte) || (end_byte)) {
2017 /* The algorithm only copies 32 bit words, so the buffer
2018 * should be expanded to include the words at either end.
2019 * The first and last words will be read into a temp buffer
2020 * to avoid corruption
2021 */
2022 tmp_buff = malloc(total_u32 * 4);
2023 if (!tmp_buff)
2024 goto error_unset_dtr_r;
2025
2026 /* use the tmp buffer to read the entire data */
2027 u8buf_ptr = tmp_buff;
2028 } else
2029 /* address and read length are aligned so read directly into the passed buffer */
2030 u8buf_ptr = buffer;
2031
2032 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2033 * Abort flags are sticky, so can be read at end of transactions
2034 *
2035 * This data is read in aligned to 32 bit boundary.
2036 */
2037
2038 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2039 * increments X0 by 4. */
2040 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
2041 armv8->debug_base + CPUV8_DBG_DTRTX);
2042 if (retval != ERROR_OK)
2043 goto error_unset_dtr_r;
2044
2045 /* Step 3.a - set DTR access mode back to Normal mode */
2046 dscr = (dscr & ~DSCR_MA);
2047 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2048 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2049 if (retval != ERROR_OK)
2050 goto error_free_buff_r;
2051
2052 /* Step 3.b - read DBGDTRTX for the final value */
2053 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2054 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2055 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
2056
2057 /* Check for sticky abort flags in the DSCR */
2058 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2059 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2060 if (retval != ERROR_OK)
2061 goto error_free_buff_r;
2062 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2063 /* Abort occurred - clear it and exit */
2064 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2065 mem_ap_write_atomic_u32(armv8->debug_ap,
2066 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
2067 goto error_free_buff_r;
2068 }
2069
2070 /* check if we need to copy aligned data by applying any shift necessary */
2071 if (tmp_buff) {
2072 memcpy(buffer, tmp_buff + start_byte, total_bytes);
2073 free(tmp_buff);
2074 }
2075
2076 /* Done */
2077 return ERROR_OK;
2078
2079 error_unset_dtr_r:
2080 /* Unset DTR mode */
2081 mem_ap_read_atomic_u32(armv8->debug_ap,
2082 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2083 dscr = (dscr & ~DSCR_MA);
2084 mem_ap_write_atomic_u32(armv8->debug_ap,
2085 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2086 error_free_buff_r:
2087 LOG_ERROR("error");
2088 free(tmp_buff);
2089 return ERROR_FAIL;
2090 }
2091
2092 static int aarch64_read_phys_memory(struct target *target,
2093 target_addr_t address, uint32_t size,
2094 uint32_t count, uint8_t *buffer)
2095 {
2096 struct armv8_common *armv8 = target_to_armv8(target);
2097 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2098 struct adiv5_dap *swjdp = armv8->arm.dap;
2099 uint8_t apsel = swjdp->apsel;
2100 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
2101 address, size, count);
2102
2103 if (count && buffer) {
2104
2105 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2106
2107 /* read memory through AHB-AP */
2108 retval = mem_ap_read_buf(armv8->memory_ap, buffer, size, count, address);
2109 } else {
2110 /* read memory through APB-AP */
2111 retval = aarch64_mmu_modify(target, 0);
2112 if (retval != ERROR_OK)
2113 return retval;
2114 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
2115 }
2116 }
2117 return retval;
2118 }
2119
2120 static int aarch64_read_memory(struct target *target, target_addr_t address,
2121 uint32_t size, uint32_t count, uint8_t *buffer)
2122 {
2123 int mmu_enabled = 0;
2124 target_addr_t virt, phys;
2125 int retval;
2126 struct armv8_common *armv8 = target_to_armv8(target);
2127 struct adiv5_dap *swjdp = armv8->arm.dap;
2128 uint8_t apsel = swjdp->apsel;
2129
2130 /* aarch64 handles unaligned memory access */
2131 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2132 size, count);
2133
2134 /* determine if MMU was enabled on target stop */
2135 if (!armv8->is_armv7r) {
2136 retval = aarch64_mmu(target, &mmu_enabled);
2137 if (retval != ERROR_OK)
2138 return retval;
2139 }
2140
2141 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2142 if (mmu_enabled) {
2143 virt = address;
2144 retval = aarch64_virt2phys(target, virt, &phys);
2145 if (retval != ERROR_OK)
2146 return retval;
2147
2148 LOG_DEBUG("Reading at virtual address. Translating v:0x%" TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR,
2149 virt, phys);
2150 address = phys;
2151 }
2152 retval = aarch64_read_phys_memory(target, address, size, count,
2153 buffer);
2154 } else {
2155 if (mmu_enabled) {
2156 retval = aarch64_check_address(target, address);
2157 if (retval != ERROR_OK)
2158 return retval;
2159 /* enable MMU as we could have disabled it for phys
2160 access */
2161 retval = aarch64_mmu_modify(target, 1);
2162 if (retval != ERROR_OK)
2163 return retval;
2164 }
2165 retval = aarch64_read_apb_ap_memory(target, address, size,
2166 count, buffer);
2167 }
2168 return retval;
2169 }
2170
2171 static int aarch64_write_phys_memory(struct target *target,
2172 target_addr_t address, uint32_t size,
2173 uint32_t count, const uint8_t *buffer)
2174 {
2175 struct armv8_common *armv8 = target_to_armv8(target);
2176 struct adiv5_dap *swjdp = armv8->arm.dap;
2177 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2178 uint8_t apsel = swjdp->apsel;
2179
2180 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2181 size, count);
2182
2183 if (count && buffer) {
2184
2185 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2186
2187 /* write memory through AHB-AP */
2188 retval = mem_ap_write_buf(armv8->memory_ap, buffer, size, count, address);
2189 } else {
2190
2191 /* write memory through APB-AP */
2192 if (!armv8->is_armv7r) {
2193 retval = aarch64_mmu_modify(target, 0);
2194 if (retval != ERROR_OK)
2195 return retval;
2196 }
2197 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2198 }
2199 }
2200
2201
2202 /* REVISIT this op is generic ARMv7-A/R stuff */
2203 if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2204 struct arm_dpm *dpm = armv8->arm.dpm;
2205
2206 retval = dpm->prepare(dpm);
2207 if (retval != ERROR_OK)
2208 return retval;
2209
2210 /* The Cache handling will NOT work with MMU active, the
2211 * wrong addresses will be invalidated!
2212 *
2213 * For both ICache and DCache, walk all cache lines in the
2214 * address range. Cortex-A8 has fixed 64 byte line length.
2215 *
2216 * REVISIT per ARMv7, these may trigger watchpoints ...
2217 */
2218
2219 /* invalidate I-Cache */
2220 if (armv8->armv8_mmu.armv8_cache.i_cache_enabled) {
2221 /* ICIMVAU - Invalidate Cache single entry
2222 * with MVA to PoU
2223 * MCR p15, 0, r0, c7, c5, 1
2224 */
2225 for (uint32_t cacheline = address;
2226 cacheline < address + size * count;
2227 cacheline += 64) {
2228 retval = dpm->instr_write_data_r0(dpm,
2229 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2230 cacheline);
2231 if (retval != ERROR_OK)
2232 return retval;
2233 }
2234 }
2235
2236 /* invalidate D-Cache */
2237 if (armv8->armv8_mmu.armv8_cache.d_u_cache_enabled) {
2238 /* DCIMVAC - Invalidate data Cache line
2239 * with MVA to PoC
2240 * MCR p15, 0, r0, c7, c6, 1
2241 */
2242 for (uint32_t cacheline = address;
2243 cacheline < address + size * count;
2244 cacheline += 64) {
2245 retval = dpm->instr_write_data_r0(dpm,
2246 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2247 cacheline);
2248 if (retval != ERROR_OK)
2249 return retval;
2250 }
2251 }
2252
2253 /* (void) */ dpm->finish(dpm);
2254 }
2255
2256 return retval;
2257 }
2258
2259 static int aarch64_write_memory(struct target *target, target_addr_t address,
2260 uint32_t size, uint32_t count, const uint8_t *buffer)
2261 {
2262 int mmu_enabled = 0;
2263 target_addr_t virt, phys;
2264 int retval;
2265 struct armv8_common *armv8 = target_to_armv8(target);
2266 struct adiv5_dap *swjdp = armv8->arm.dap;
2267 uint8_t apsel = swjdp->apsel;
2268
2269 /* aarch64 handles unaligned memory access */
2270 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
2271 "; count %" PRId32, address, size, count);
2272
2273 /* determine if MMU was enabled on target stop */
2274 if (!armv8->is_armv7r) {
2275 retval = aarch64_mmu(target, &mmu_enabled);
2276 if (retval != ERROR_OK)
2277 return retval;
2278 }
2279
2280 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2281 LOG_DEBUG("Writing memory to address 0x%" TARGET_PRIxADDR "; size %"
2282 PRId32 "; count %" PRId32, address, size, count);
2283 if (mmu_enabled) {
2284 virt = address;
2285 retval = aarch64_virt2phys(target, virt, &phys);
2286 if (retval != ERROR_OK)
2287 return retval;
2288
2289 LOG_DEBUG("Writing to virtual address. Translating v:0x%"
2290 TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR, virt, phys);
2291 address = phys;
2292 }
2293 retval = aarch64_write_phys_memory(target, address, size,
2294 count, buffer);
2295 } else {
2296 if (mmu_enabled) {
2297 retval = aarch64_check_address(target, address);
2298 if (retval != ERROR_OK)
2299 return retval;
2300 /* enable MMU as we could have disabled it for phys access */
2301 retval = aarch64_mmu_modify(target, 1);
2302 if (retval != ERROR_OK)
2303 return retval;
2304 }
2305 retval = aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2306 }
2307 return retval;
2308 }
2309
2310 static int aarch64_handle_target_request(void *priv)
2311 {
2312 struct target *target = priv;
2313 struct armv8_common *armv8 = target_to_armv8(target);
2314 int retval;
2315
2316 if (!target_was_examined(target))
2317 return ERROR_OK;
2318 if (!target->dbg_msg_enabled)
2319 return ERROR_OK;
2320
2321 if (target->state == TARGET_RUNNING) {
2322 uint32_t request;
2323 uint32_t dscr;
2324 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2325 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2326
2327 /* check if we have data */
2328 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2329 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2330 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2331 if (retval == ERROR_OK) {
2332 target_request(target, request);
2333 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2334 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2335 }
2336 }
2337 }
2338
2339 return ERROR_OK;
2340 }
2341
2342 static int aarch64_examine_first(struct target *target)
2343 {
2344 struct aarch64_common *aarch64 = target_to_aarch64(target);
2345 struct armv8_common *armv8 = &aarch64->armv8_common;
2346 struct adiv5_dap *swjdp = armv8->arm.dap;
2347 int retval = ERROR_OK;
2348 uint32_t pfr, debug, ctypr, ttypr, cpuid;
2349 int i;
2350
2351 /* We do one extra read to ensure DAP is configured,
2352 * we call ahbap_debugport_init(swjdp) instead
2353 */
2354 retval = dap_dp_init(swjdp);
2355 if (retval != ERROR_OK)
2356 return retval;
2357
2358 /* Search for the APB-AB - it is needed for access to debug registers */
2359 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2360 if (retval != ERROR_OK) {
2361 LOG_ERROR("Could not find APB-AP for debug access");
2362 return retval;
2363 }
2364
2365 retval = mem_ap_init(armv8->debug_ap);
2366 if (retval != ERROR_OK) {
2367 LOG_ERROR("Could not initialize the APB-AP");
2368 return retval;
2369 }
2370
2371 armv8->debug_ap->memaccess_tck = 80;
2372
2373 /* Search for the AHB-AB */
2374 armv8->memory_ap_available = false;
2375 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv8->memory_ap);
2376 if (retval == ERROR_OK) {
2377 retval = mem_ap_init(armv8->memory_ap);
2378 if (retval == ERROR_OK)
2379 armv8->memory_ap_available = true;
2380 }
2381 if (retval != ERROR_OK) {
2382 /* AHB-AP not found or unavailable - use the CPU */
2383 LOG_DEBUG("No AHB-AP available for memory access");
2384 }
2385
2386
2387 if (!target->dbgbase_set) {
2388 uint32_t dbgbase;
2389 /* Get ROM Table base */
2390 uint32_t apid;
2391 int32_t coreidx = target->coreid;
2392 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2393 if (retval != ERROR_OK)
2394 return retval;
2395 /* Lookup 0x15 -- Processor DAP */
2396 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2397 &armv8->debug_base, &coreidx);
2398 if (retval != ERROR_OK)
2399 return retval;
2400 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2401 coreidx, armv8->debug_base);
2402 } else
2403 armv8->debug_base = target->dbgbase;
2404
2405 LOG_DEBUG("Target ctibase is 0x%x", target->ctibase);
2406 if (target->ctibase == 0)
2407 armv8->cti_base = target->ctibase = armv8->debug_base + 0x1000;
2408 else
2409 armv8->cti_base = target->ctibase;
2410
2411 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2412 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
2413 if (retval != ERROR_OK) {
2414 LOG_DEBUG("Examine %s failed", "oslock");
2415 return retval;
2416 }
2417
2418 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2419 armv8->debug_base + 0x88, &cpuid);
2420 LOG_DEBUG("0x88 = %x", cpuid);
2421
2422 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2423 armv8->debug_base + 0x314, &cpuid);
2424 LOG_DEBUG("0x314 = %x", cpuid);
2425
2426 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2427 armv8->debug_base + 0x310, &cpuid);
2428 LOG_DEBUG("0x310 = %x", cpuid);
2429 if (retval != ERROR_OK)
2430 return retval;
2431
2432 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2433 armv8->debug_base + CPUDBG_CPUID, &cpuid);
2434 if (retval != ERROR_OK) {
2435 LOG_DEBUG("Examine %s failed", "CPUID");
2436 return retval;
2437 }
2438
2439 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2440 armv8->debug_base + CPUDBG_CTYPR, &ctypr);
2441 if (retval != ERROR_OK) {
2442 LOG_DEBUG("Examine %s failed", "CTYPR");
2443 return retval;
2444 }
2445
2446 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2447 armv8->debug_base + CPUDBG_TTYPR, &ttypr);
2448 if (retval != ERROR_OK) {
2449 LOG_DEBUG("Examine %s failed", "TTYPR");
2450 return retval;
2451 }
2452
2453 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2454 armv8->debug_base + ID_AA64PFR0_EL1, &pfr);
2455 if (retval != ERROR_OK) {
2456 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2457 return retval;
2458 }
2459 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2460 armv8->debug_base + ID_AA64DFR0_EL1, &debug);
2461 if (retval != ERROR_OK) {
2462 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2463 return retval;
2464 }
2465
2466 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2467 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2468 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2469 LOG_DEBUG("ID_AA64PFR0_EL1 = 0x%08" PRIx32, pfr);
2470 LOG_DEBUG("ID_AA64DFR0_EL1 = 0x%08" PRIx32, debug);
2471
2472 armv8->arm.core_type = ARM_MODE_MON;
2473 armv8->arm.core_state = ARM_STATE_AARCH64;
2474 retval = aarch64_dpm_setup(aarch64, debug);
2475 if (retval != ERROR_OK)
2476 return retval;
2477
2478 /* Setup Breakpoint Register Pairs */
2479 aarch64->brp_num = ((debug >> 12) & 0x0F) + 1;
2480 aarch64->brp_num_context = ((debug >> 28) & 0x0F) + 1;
2481
2482 /* hack - no context bpt support yet */
2483 aarch64->brp_num_context = 0;
2484
2485 aarch64->brp_num_available = aarch64->brp_num;
2486 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2487 for (i = 0; i < aarch64->brp_num; i++) {
2488 aarch64->brp_list[i].used = 0;
2489 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2490 aarch64->brp_list[i].type = BRP_NORMAL;
2491 else
2492 aarch64->brp_list[i].type = BRP_CONTEXT;
2493 aarch64->brp_list[i].value = 0;
2494 aarch64->brp_list[i].control = 0;
2495 aarch64->brp_list[i].BRPn = i;
2496 }
2497
2498 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2499
2500 target_set_examined(target);
2501 return ERROR_OK;
2502 }
2503
2504 static int aarch64_examine(struct target *target)
2505 {
2506 int retval = ERROR_OK;
2507
2508 /* don't re-probe hardware after each reset */
2509 if (!target_was_examined(target))
2510 retval = aarch64_examine_first(target);
2511
2512 /* Configure core debug access */
2513 if (retval == ERROR_OK)
2514 retval = aarch64_init_debug_access(target);
2515
2516 return retval;
2517 }
2518
2519 /*
2520 * Cortex-A8 target creation and initialization
2521 */
2522
2523 static int aarch64_init_target(struct command_context *cmd_ctx,
2524 struct target *target)
2525 {
2526 /* examine_first() does a bunch of this */
2527 return ERROR_OK;
2528 }
2529
2530 static int aarch64_init_arch_info(struct target *target,
2531 struct aarch64_common *aarch64, struct jtag_tap *tap)
2532 {
2533 struct armv8_common *armv8 = &aarch64->armv8_common;
2534 struct adiv5_dap *dap = armv8->arm.dap;
2535
2536 armv8->arm.dap = dap;
2537
2538 /* Setup struct aarch64_common */
2539 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2540 /* tap has no dap initialized */
2541 if (!tap->dap) {
2542 tap->dap = dap_init();
2543
2544 /* Leave (only) generic DAP stuff for debugport_init() */
2545 tap->dap->tap = tap;
2546 }
2547
2548 armv8->arm.dap = tap->dap;
2549
2550 aarch64->fast_reg_read = 0;
2551
2552 /* register arch-specific functions */
2553 armv8->examine_debug_reason = NULL;
2554
2555 armv8->post_debug_entry = aarch64_post_debug_entry;
2556
2557 armv8->pre_restore_context = NULL;
2558
2559 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2560
2561 /* REVISIT v7a setup should be in a v7a-specific routine */
2562 armv8_init_arch_info(target, armv8);
2563 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2564
2565 return ERROR_OK;
2566 }
2567
2568 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2569 {
2570 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2571
2572 aarch64->armv8_common.is_armv7r = false;
2573
2574 return aarch64_init_arch_info(target, aarch64, target->tap);
2575 }
2576
2577 static int aarch64_mmu(struct target *target, int *enabled)
2578 {
2579 if (target->state != TARGET_HALTED) {
2580 LOG_ERROR("%s: target not halted", __func__);
2581 return ERROR_TARGET_INVALID;
2582 }
2583
2584 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2585 return ERROR_OK;
2586 }
2587
2588 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2589 target_addr_t *phys)
2590 {
2591 int retval = ERROR_FAIL;
2592 struct armv8_common *armv8 = target_to_armv8(target);
2593 struct adiv5_dap *swjdp = armv8->arm.dap;
2594 uint8_t apsel = swjdp->apsel;
2595 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2596 uint32_t ret;
2597 retval = armv8_mmu_translate_va(target,
2598 virt, &ret);
2599 if (retval != ERROR_OK)
2600 goto done;
2601 *phys = ret;
2602 } else {/* use this method if armv8->memory_ap not selected
2603 * mmu must be enable in order to get a correct translation */
2604 retval = aarch64_mmu_modify(target, 1);
2605 if (retval != ERROR_OK)
2606 goto done;
2607 retval = armv8_mmu_translate_va_pa(target, virt, phys, 1);
2608 }
2609 done:
2610 return retval;
2611 }
2612
2613 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2614 {
2615 struct target *target = get_current_target(CMD_CTX);
2616 struct armv8_common *armv8 = target_to_armv8(target);
2617
2618 return armv8_handle_cache_info_command(CMD_CTX,
2619 &armv8->armv8_mmu.armv8_cache);
2620 }
2621
2622
2623 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2624 {
2625 struct target *target = get_current_target(CMD_CTX);
2626 if (!target_was_examined(target)) {
2627 LOG_ERROR("target not examined yet");
2628 return ERROR_FAIL;
2629 }
2630
2631 return aarch64_init_debug_access(target);
2632 }
2633 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2634 {
2635 struct target *target = get_current_target(CMD_CTX);
2636 /* check target is an smp target */
2637 struct target_list *head;
2638 struct target *curr;
2639 head = target->head;
2640 target->smp = 0;
2641 if (head != (struct target_list *)NULL) {
2642 while (head != (struct target_list *)NULL) {
2643 curr = head->target;
2644 curr->smp = 0;
2645 head = head->next;
2646 }
2647 /* fixes the target display to the debugger */
2648 target->gdb_service->target = target;
2649 }
2650 return ERROR_OK;
2651 }
2652
2653 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2654 {
2655 struct target *target = get_current_target(CMD_CTX);
2656 struct target_list *head;
2657 struct target *curr;
2658 head = target->head;
2659 if (head != (struct target_list *)NULL) {
2660 target->smp = 1;
2661 while (head != (struct target_list *)NULL) {
2662 curr = head->target;
2663 curr->smp = 1;
2664 head = head->next;
2665 }
2666 }
2667 return ERROR_OK;
2668 }
2669
2670 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2671 {
2672 struct target *target = get_current_target(CMD_CTX);
2673 int retval = ERROR_OK;
2674 struct target_list *head;
2675 head = target->head;
2676 if (head != (struct target_list *)NULL) {
2677 if (CMD_ARGC == 1) {
2678 int coreid = 0;
2679 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2680 if (ERROR_OK != retval)
2681 return retval;
2682 target->gdb_service->core[1] = coreid;
2683
2684 }
2685 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2686 , target->gdb_service->core[1]);
2687 }
2688 return ERROR_OK;
2689 }
2690
2691 static const struct command_registration aarch64_exec_command_handlers[] = {
2692 {
2693 .name = "cache_info",
2694 .handler = aarch64_handle_cache_info_command,
2695 .mode = COMMAND_EXEC,
2696 .help = "display information about target caches",
2697 .usage = "",
2698 },
2699 {
2700 .name = "dbginit",
2701 .handler = aarch64_handle_dbginit_command,
2702 .mode = COMMAND_EXEC,
2703 .help = "Initialize core debug",
2704 .usage = "",
2705 },
2706 { .name = "smp_off",
2707 .handler = aarch64_handle_smp_off_command,
2708 .mode = COMMAND_EXEC,
2709 .help = "Stop smp handling",
2710 .usage = "",
2711 },
2712 {
2713 .name = "smp_on",
2714 .handler = aarch64_handle_smp_on_command,
2715 .mode = COMMAND_EXEC,
2716 .help = "Restart smp handling",
2717 .usage = "",
2718 },
2719 {
2720 .name = "smp_gdb",
2721 .handler = aarch64_handle_smp_gdb_command,
2722 .mode = COMMAND_EXEC,
2723 .help = "display/fix current core played to gdb",
2724 .usage = "",
2725 },
2726
2727
2728 COMMAND_REGISTRATION_DONE
2729 };
2730 static const struct command_registration aarch64_command_handlers[] = {
2731 {
2732 .chain = arm_command_handlers,
2733 },
2734 {
2735 .chain = armv8_command_handlers,
2736 },
2737 {
2738 .name = "cortex_a",
2739 .mode = COMMAND_ANY,
2740 .help = "Cortex-A command group",
2741 .usage = "",
2742 .chain = aarch64_exec_command_handlers,
2743 },
2744 COMMAND_REGISTRATION_DONE
2745 };
2746
2747 struct target_type aarch64_target = {
2748 .name = "aarch64",
2749
2750 .poll = aarch64_poll,
2751 .arch_state = armv8_arch_state,
2752
2753 .halt = aarch64_halt,
2754 .resume = aarch64_resume,
2755 .step = aarch64_step,
2756
2757 .assert_reset = aarch64_assert_reset,
2758 .deassert_reset = aarch64_deassert_reset,
2759
2760 /* REVISIT allow exporting VFP3 registers ... */
2761 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2762
2763 .read_memory = aarch64_read_memory,
2764 .write_memory = aarch64_write_memory,
2765
2766 .checksum_memory = arm_checksum_memory,
2767 .blank_check_memory = arm_blank_check_memory,
2768
2769 .run_algorithm = armv4_5_run_algorithm,
2770
2771 .add_breakpoint = aarch64_add_breakpoint,
2772 .add_context_breakpoint = aarch64_add_context_breakpoint,
2773 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2774 .remove_breakpoint = aarch64_remove_breakpoint,
2775 .add_watchpoint = NULL,
2776 .remove_watchpoint = NULL,
2777
2778 .commands = aarch64_command_handlers,
2779 .target_create = aarch64_target_create,
2780 .init_target = aarch64_init_target,
2781 .examine = aarch64_examine,
2782
2783 .read_phys_memory = aarch64_read_phys_memory,
2784 .write_phys_memory = aarch64_write_phys_memory,
2785 .mmu = aarch64_mmu,
2786 .virt2phys = aarch64_virt2phys,
2787 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)