aarch64: add cache handling when setting/deleting soft breakpoints
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
32
33 static int aarch64_poll(struct target *target);
34 static int aarch64_debug_entry(struct target *target);
35 static int aarch64_restore_context(struct target *target, bool bpwp);
36 static int aarch64_set_breakpoint(struct target *target,
37 struct breakpoint *breakpoint, uint8_t matchmode);
38 static int aarch64_set_context_breakpoint(struct target *target,
39 struct breakpoint *breakpoint, uint8_t matchmode);
40 static int aarch64_set_hybrid_breakpoint(struct target *target,
41 struct breakpoint *breakpoint);
42 static int aarch64_unset_breakpoint(struct target *target,
43 struct breakpoint *breakpoint);
44 static int aarch64_mmu(struct target *target, int *enabled);
45 static int aarch64_virt2phys(struct target *target,
46 target_addr_t virt, target_addr_t *phys);
47 static int aarch64_read_apb_ap_memory(struct target *target,
48 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
49 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
50 uint32_t opcode, uint32_t data);
51
52 static int aarch64_restore_system_control_reg(struct target *target)
53 {
54 int retval = ERROR_OK;
55
56 struct aarch64_common *aarch64 = target_to_aarch64(target);
57 struct armv8_common *armv8 = target_to_armv8(target);
58
59 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
60 aarch64->system_control_reg_curr = aarch64->system_control_reg;
61 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
62
63 switch (armv8->arm.core_mode) {
64 case ARMV8_64_EL0T:
65 case ARMV8_64_EL1T:
66 case ARMV8_64_EL1H:
67 retval = armv8->arm.msr(target, 3, /*op 0*/
68 0, 1, /* op1, op2 */
69 0, 0, /* CRn, CRm */
70 aarch64->system_control_reg);
71 if (retval != ERROR_OK)
72 return retval;
73 break;
74 case ARMV8_64_EL2T:
75 case ARMV8_64_EL2H:
76 retval = armv8->arm.msr(target, 3, /*op 0*/
77 4, 1, /* op1, op2 */
78 0, 0, /* CRn, CRm */
79 aarch64->system_control_reg);
80 if (retval != ERROR_OK)
81 return retval;
82 break;
83 case ARMV8_64_EL3H:
84 case ARMV8_64_EL3T:
85 retval = armv8->arm.msr(target, 3, /*op 0*/
86 6, 1, /* op1, op2 */
87 0, 0, /* CRn, CRm */
88 aarch64->system_control_reg);
89 if (retval != ERROR_OK)
90 return retval;
91 break;
92 default:
93 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
94 }
95 }
96 return retval;
97 }
98
99 /* check address before aarch64_apb read write access with mmu on
100 * remove apb predictible data abort */
101 static int aarch64_check_address(struct target *target, uint32_t address)
102 {
103 /* TODO */
104 return ERROR_OK;
105 }
106 /* modify system_control_reg in order to enable or disable mmu for :
107 * - virt2phys address conversion
108 * - read or write memory in phys or virt address */
109 static int aarch64_mmu_modify(struct target *target, int enable)
110 {
111 struct aarch64_common *aarch64 = target_to_aarch64(target);
112 struct armv8_common *armv8 = &aarch64->armv8_common;
113 int retval = ERROR_OK;
114
115 if (enable) {
116 /* if mmu enabled at target stop and mmu not enable */
117 if (!(aarch64->system_control_reg & 0x1U)) {
118 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
119 return ERROR_FAIL;
120 }
121 if (!(aarch64->system_control_reg_curr & 0x1U)) {
122 aarch64->system_control_reg_curr |= 0x1U;
123 switch (armv8->arm.core_mode) {
124 case ARMV8_64_EL0T:
125 case ARMV8_64_EL1T:
126 case ARMV8_64_EL1H:
127 retval = armv8->arm.msr(target, 3, /*op 0*/
128 0, 0, /* op1, op2 */
129 1, 0, /* CRn, CRm */
130 aarch64->system_control_reg_curr);
131 if (retval != ERROR_OK)
132 return retval;
133 break;
134 case ARMV8_64_EL2T:
135 case ARMV8_64_EL2H:
136 retval = armv8->arm.msr(target, 3, /*op 0*/
137 4, 0, /* op1, op2 */
138 1, 0, /* CRn, CRm */
139 aarch64->system_control_reg_curr);
140 if (retval != ERROR_OK)
141 return retval;
142 break;
143 case ARMV8_64_EL3H:
144 case ARMV8_64_EL3T:
145 retval = armv8->arm.msr(target, 3, /*op 0*/
146 6, 0, /* op1, op2 */
147 1, 0, /* CRn, CRm */
148 aarch64->system_control_reg_curr);
149 if (retval != ERROR_OK)
150 return retval;
151 break;
152 default:
153 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
154 }
155 }
156 } else {
157 if (aarch64->system_control_reg_curr & 0x4U) {
158 /* data cache is active */
159 aarch64->system_control_reg_curr &= ~0x4U;
160 /* flush data cache armv7 function to be called */
161 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
162 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
163 }
164 if ((aarch64->system_control_reg_curr & 0x1U)) {
165 aarch64->system_control_reg_curr &= ~0x1U;
166 switch (armv8->arm.core_mode) {
167 case ARMV8_64_EL0T:
168 case ARMV8_64_EL1T:
169 case ARMV8_64_EL1H:
170 retval = armv8->arm.msr(target, 3, /*op 0*/
171 0, 0, /* op1, op2 */
172 1, 0, /* CRn, CRm */
173 aarch64->system_control_reg_curr);
174 if (retval != ERROR_OK)
175 return retval;
176 break;
177 case ARMV8_64_EL2T:
178 case ARMV8_64_EL2H:
179 retval = armv8->arm.msr(target, 3, /*op 0*/
180 4, 0, /* op1, op2 */
181 1, 0, /* CRn, CRm */
182 aarch64->system_control_reg_curr);
183 if (retval != ERROR_OK)
184 return retval;
185 break;
186 case ARMV8_64_EL3H:
187 case ARMV8_64_EL3T:
188 retval = armv8->arm.msr(target, 3, /*op 0*/
189 6, 0, /* op1, op2 */
190 1, 0, /* CRn, CRm */
191 aarch64->system_control_reg_curr);
192 if (retval != ERROR_OK)
193 return retval;
194 break;
195 default:
196 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
197 break;
198 }
199 }
200 }
201 return retval;
202 }
203
204 /*
205 * Basic debug access, very low level assumes state is saved
206 */
207 static int aarch64_init_debug_access(struct target *target)
208 {
209 struct armv8_common *armv8 = target_to_armv8(target);
210 int retval;
211 uint32_t dummy;
212
213 LOG_DEBUG(" ");
214
215 /* Unlocking the debug registers for modification
216 * The debugport might be uninitialised so try twice */
217 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
218 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
219 if (retval != ERROR_OK) {
220 /* try again */
221 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
222 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
223 if (retval == ERROR_OK)
224 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
225 }
226 if (retval != ERROR_OK)
227 return retval;
228 /* Clear Sticky Power Down status Bit in PRSR to enable access to
229 the registers in the Core Power Domain */
230 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
231 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
232 if (retval != ERROR_OK)
233 return retval;
234
235 /* Enabling of instruction execution in debug mode is done in debug_entry code */
236
237 /* Resync breakpoint registers */
238
239 /* Since this is likely called from init or reset, update target state information*/
240 return aarch64_poll(target);
241 }
242
243 /* To reduce needless round-trips, pass in a pointer to the current
244 * DSCR value. Initialize it to zero if you just need to know the
245 * value on return from this function; or DSCR_ITE if you
246 * happen to know that no instruction is pending.
247 */
248 static int aarch64_exec_opcode(struct target *target,
249 uint32_t opcode, uint32_t *dscr_p)
250 {
251 uint32_t dscr;
252 int retval;
253 struct armv8_common *armv8 = target_to_armv8(target);
254 dscr = dscr_p ? *dscr_p : 0;
255
256 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
257
258 /* Wait for InstrCompl bit to be set */
259 long long then = timeval_ms();
260 while ((dscr & DSCR_ITE) == 0) {
261 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
262 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
263 if (retval != ERROR_OK) {
264 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
265 return retval;
266 }
267 if (timeval_ms() > then + 1000) {
268 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
269 return ERROR_FAIL;
270 }
271 }
272
273 retval = mem_ap_write_u32(armv8->debug_ap,
274 armv8->debug_base + CPUV8_DBG_ITR, opcode);
275 if (retval != ERROR_OK)
276 return retval;
277
278 then = timeval_ms();
279 do {
280 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
281 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
282 if (retval != ERROR_OK) {
283 LOG_ERROR("Could not read DSCR register");
284 return retval;
285 }
286 if (timeval_ms() > then + 1000) {
287 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
288 return ERROR_FAIL;
289 }
290 } while ((dscr & DSCR_ITE) == 0); /* Wait for InstrCompl bit to be set */
291
292 if (dscr_p)
293 *dscr_p = dscr;
294
295 return retval;
296 }
297
298 /* Write to memory mapped registers directly with no cache or mmu handling */
299 static int aarch64_dap_write_memap_register_u32(struct target *target,
300 uint32_t address,
301 uint32_t value)
302 {
303 int retval;
304 struct armv8_common *armv8 = target_to_armv8(target);
305
306 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
307
308 return retval;
309 }
310
311 /*
312 * AARCH64 implementation of Debug Programmer's Model
313 *
314 * NOTE the invariant: these routines return with DSCR_ITE set,
315 * so there's no need to poll for it before executing an instruction.
316 *
317 * NOTE that in several of these cases the "stall" mode might be useful.
318 * It'd let us queue a few operations together... prepare/finish might
319 * be the places to enable/disable that mode.
320 */
321
322 static inline struct aarch64_common *dpm_to_a8(struct arm_dpm *dpm)
323 {
324 return container_of(dpm, struct aarch64_common, armv8_common.dpm);
325 }
326
327 static int aarch64_write_dcc(struct armv8_common *armv8, uint32_t data)
328 {
329 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
330 return mem_ap_write_u32(armv8->debug_ap,
331 armv8->debug_base + CPUV8_DBG_DTRRX, data);
332 }
333
334 static int aarch64_write_dcc_64(struct armv8_common *armv8, uint64_t data)
335 {
336 int ret;
337 LOG_DEBUG("write DCC Low word0x%08" PRIx32, (unsigned)data);
338 LOG_DEBUG("write DCC High word 0x%08" PRIx32, (unsigned)(data >> 32));
339 ret = mem_ap_write_u32(armv8->debug_ap,
340 armv8->debug_base + CPUV8_DBG_DTRRX, data);
341 ret += mem_ap_write_u32(armv8->debug_ap,
342 armv8->debug_base + CPUV8_DBG_DTRTX, data >> 32);
343 return ret;
344 }
345
346 static int aarch64_read_dcc(struct armv8_common *armv8, uint32_t *data,
347 uint32_t *dscr_p)
348 {
349 uint32_t dscr = DSCR_ITE;
350 int retval;
351
352 if (dscr_p)
353 dscr = *dscr_p;
354
355 /* Wait for DTRRXfull */
356 long long then = timeval_ms();
357 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
358 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
359 armv8->debug_base + CPUV8_DBG_DSCR,
360 &dscr);
361 if (retval != ERROR_OK)
362 return retval;
363 if (timeval_ms() > then + 1000) {
364 LOG_ERROR("Timeout waiting for read dcc");
365 return ERROR_FAIL;
366 }
367 }
368
369 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
370 armv8->debug_base + CPUV8_DBG_DTRTX,
371 data);
372 if (retval != ERROR_OK)
373 return retval;
374 LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
375
376 if (dscr_p)
377 *dscr_p = dscr;
378
379 return retval;
380 }
381
382 static int aarch64_read_dcc_64(struct armv8_common *armv8, uint64_t *data,
383 uint32_t *dscr_p)
384 {
385 uint32_t dscr = DSCR_ITE;
386 uint32_t higher;
387 int retval;
388
389 if (dscr_p)
390 dscr = *dscr_p;
391
392 /* Wait for DTRRXfull */
393 long long then = timeval_ms();
394 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
395 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
396 armv8->debug_base + CPUV8_DBG_DSCR,
397 &dscr);
398 if (retval != ERROR_OK)
399 return retval;
400 if (timeval_ms() > then + 1000) {
401 LOG_ERROR("Timeout waiting for read dcc");
402 return ERROR_FAIL;
403 }
404 }
405
406 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
407 armv8->debug_base + CPUV8_DBG_DTRTX,
408 (uint32_t *)data);
409 if (retval != ERROR_OK)
410 return retval;
411
412 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
413 armv8->debug_base + CPUV8_DBG_DTRRX,
414 &higher);
415 if (retval != ERROR_OK)
416 return retval;
417
418 *data = *(uint32_t *)data | (uint64_t)higher << 32;
419 LOG_DEBUG("read DCC 0x%16.16" PRIx64, *data);
420
421 if (dscr_p)
422 *dscr_p = dscr;
423
424 return retval;
425 }
426
427 static int aarch64_dpm_prepare(struct arm_dpm *dpm)
428 {
429 struct aarch64_common *a8 = dpm_to_a8(dpm);
430 uint32_t dscr;
431 int retval;
432
433 /* set up invariant: INSTR_COMP is set after ever DPM operation */
434 long long then = timeval_ms();
435 for (;; ) {
436 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
437 a8->armv8_common.debug_base + CPUV8_DBG_DSCR,
438 &dscr);
439 if (retval != ERROR_OK)
440 return retval;
441 if ((dscr & DSCR_ITE) != 0)
442 break;
443 if (timeval_ms() > then + 1000) {
444 LOG_ERROR("Timeout waiting for dpm prepare");
445 return ERROR_FAIL;
446 }
447 }
448
449 /* this "should never happen" ... */
450 if (dscr & DSCR_DTR_RX_FULL) {
451 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
452 /* Clear DCCRX */
453 retval = mem_ap_read_u32(a8->armv8_common.debug_ap,
454 a8->armv8_common.debug_base + CPUV8_DBG_DTRRX, &dscr);
455 if (retval != ERROR_OK)
456 return retval;
457
458 /* Clear sticky error */
459 retval = mem_ap_write_u32(a8->armv8_common.debug_ap,
460 a8->armv8_common.debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
461 if (retval != ERROR_OK)
462 return retval;
463 }
464
465 return retval;
466 }
467
468 static int aarch64_dpm_finish(struct arm_dpm *dpm)
469 {
470 /* REVISIT what could be done here? */
471 return ERROR_OK;
472 }
473
474 static int aarch64_instr_execute(struct arm_dpm *dpm,
475 uint32_t opcode)
476 {
477 struct aarch64_common *a8 = dpm_to_a8(dpm);
478 uint32_t dscr = DSCR_ITE;
479
480 return aarch64_exec_opcode(
481 a8->armv8_common.arm.target,
482 opcode,
483 &dscr);
484 }
485
486 static int aarch64_instr_write_data_dcc(struct arm_dpm *dpm,
487 uint32_t opcode, uint32_t data)
488 {
489 struct aarch64_common *a8 = dpm_to_a8(dpm);
490 int retval;
491 uint32_t dscr = DSCR_ITE;
492
493 retval = aarch64_write_dcc(&a8->armv8_common, data);
494 if (retval != ERROR_OK)
495 return retval;
496
497 return aarch64_exec_opcode(
498 a8->armv8_common.arm.target,
499 opcode,
500 &dscr);
501 }
502
503 static int aarch64_instr_write_data_dcc_64(struct arm_dpm *dpm,
504 uint32_t opcode, uint64_t data)
505 {
506 struct aarch64_common *a8 = dpm_to_a8(dpm);
507 int retval;
508 uint32_t dscr = DSCR_ITE;
509
510 retval = aarch64_write_dcc_64(&a8->armv8_common, data);
511 if (retval != ERROR_OK)
512 return retval;
513
514 return aarch64_exec_opcode(
515 a8->armv8_common.arm.target,
516 opcode,
517 &dscr);
518 }
519
520 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
521 uint32_t opcode, uint32_t data)
522 {
523 struct aarch64_common *a8 = dpm_to_a8(dpm);
524 uint32_t dscr = DSCR_ITE;
525 int retval;
526
527 retval = aarch64_write_dcc(&a8->armv8_common, data);
528 if (retval != ERROR_OK)
529 return retval;
530
531 retval = aarch64_exec_opcode(
532 a8->armv8_common.arm.target,
533 ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 0),
534 &dscr);
535 if (retval != ERROR_OK)
536 return retval;
537
538 /* then the opcode, taking data from R0 */
539 retval = aarch64_exec_opcode(
540 a8->armv8_common.arm.target,
541 opcode,
542 &dscr);
543
544 return retval;
545 }
546
547 static int aarch64_instr_write_data_r0_64(struct arm_dpm *dpm,
548 uint32_t opcode, uint64_t data)
549 {
550 struct aarch64_common *a8 = dpm_to_a8(dpm);
551 uint32_t dscr = DSCR_ITE;
552 int retval;
553
554 retval = aarch64_write_dcc_64(&a8->armv8_common, data);
555 if (retval != ERROR_OK)
556 return retval;
557
558 retval = aarch64_exec_opcode(
559 a8->armv8_common.arm.target,
560 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0),
561 &dscr);
562 if (retval != ERROR_OK)
563 return retval;
564
565 /* then the opcode, taking data from R0 */
566 retval = aarch64_exec_opcode(
567 a8->armv8_common.arm.target,
568 opcode,
569 &dscr);
570
571 return retval;
572 }
573
574 static int aarch64_instr_cpsr_sync(struct arm_dpm *dpm)
575 {
576 struct target *target = dpm->arm->target;
577 uint32_t dscr = DSCR_ITE;
578
579 /* "Prefetch flush" after modifying execution status in CPSR */
580 return aarch64_exec_opcode(target,
581 DSB_SY,
582 &dscr);
583 }
584
585 static int aarch64_instr_read_data_dcc(struct arm_dpm *dpm,
586 uint32_t opcode, uint32_t *data)
587 {
588 struct aarch64_common *a8 = dpm_to_a8(dpm);
589 int retval;
590 uint32_t dscr = DSCR_ITE;
591
592 /* the opcode, writing data to DCC */
593 retval = aarch64_exec_opcode(
594 a8->armv8_common.arm.target,
595 opcode,
596 &dscr);
597 if (retval != ERROR_OK)
598 return retval;
599
600 return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
601 }
602
603 static int aarch64_instr_read_data_dcc_64(struct arm_dpm *dpm,
604 uint32_t opcode, uint64_t *data)
605 {
606 struct aarch64_common *a8 = dpm_to_a8(dpm);
607 int retval;
608 uint32_t dscr = DSCR_ITE;
609
610 /* the opcode, writing data to DCC */
611 retval = aarch64_exec_opcode(
612 a8->armv8_common.arm.target,
613 opcode,
614 &dscr);
615 if (retval != ERROR_OK)
616 return retval;
617
618 return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
619 }
620
621 static int aarch64_instr_read_data_r0(struct arm_dpm *dpm,
622 uint32_t opcode, uint32_t *data)
623 {
624 struct aarch64_common *a8 = dpm_to_a8(dpm);
625 uint32_t dscr = DSCR_ITE;
626 int retval;
627
628 /* the opcode, writing data to R0 */
629 retval = aarch64_exec_opcode(
630 a8->armv8_common.arm.target,
631 opcode,
632 &dscr);
633 if (retval != ERROR_OK)
634 return retval;
635
636 /* write R0 to DCC */
637 retval = aarch64_exec_opcode(
638 a8->armv8_common.arm.target,
639 ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 0), /* msr dbgdtr_el0, x0 */
640 &dscr);
641 if (retval != ERROR_OK)
642 return retval;
643
644 return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
645 }
646
647 static int aarch64_instr_read_data_r0_64(struct arm_dpm *dpm,
648 uint32_t opcode, uint64_t *data)
649 {
650 struct aarch64_common *a8 = dpm_to_a8(dpm);
651 uint32_t dscr = DSCR_ITE;
652 int retval;
653
654 /* the opcode, writing data to R0 */
655 retval = aarch64_exec_opcode(
656 a8->armv8_common.arm.target,
657 opcode,
658 &dscr);
659 if (retval != ERROR_OK)
660 return retval;
661
662 /* write R0 to DCC */
663 retval = aarch64_exec_opcode(
664 a8->armv8_common.arm.target,
665 ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), /* msr dbgdtr_el0, x0 */
666 &dscr);
667 if (retval != ERROR_OK)
668 return retval;
669
670 return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
671 }
672
673 static int aarch64_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
674 uint32_t addr, uint32_t control)
675 {
676 struct aarch64_common *a8 = dpm_to_a8(dpm);
677 uint32_t vr = a8->armv8_common.debug_base;
678 uint32_t cr = a8->armv8_common.debug_base;
679 int retval;
680
681 switch (index_t) {
682 case 0 ... 15: /* breakpoints */
683 vr += CPUV8_DBG_BVR_BASE;
684 cr += CPUV8_DBG_BCR_BASE;
685 break;
686 case 16 ... 31: /* watchpoints */
687 vr += CPUV8_DBG_WVR_BASE;
688 cr += CPUV8_DBG_WCR_BASE;
689 index_t -= 16;
690 break;
691 default:
692 return ERROR_FAIL;
693 }
694 vr += 16 * index_t;
695 cr += 16 * index_t;
696
697 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
698 (unsigned) vr, (unsigned) cr);
699
700 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
701 vr, addr);
702 if (retval != ERROR_OK)
703 return retval;
704 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
705 cr, control);
706 return retval;
707 }
708
709 static int aarch64_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
710 {
711 struct aarch64_common *a = dpm_to_a8(dpm);
712 uint32_t cr;
713
714 switch (index_t) {
715 case 0 ... 15:
716 cr = a->armv8_common.debug_base + CPUV8_DBG_BCR_BASE;
717 break;
718 case 16 ... 31:
719 cr = a->armv8_common.debug_base + CPUV8_DBG_WCR_BASE;
720 index_t -= 16;
721 break;
722 default:
723 return ERROR_FAIL;
724 }
725 cr += 16 * index_t;
726
727 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
728
729 /* clear control register */
730 return aarch64_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
731
732 }
733
734 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
735 {
736 struct arm_dpm *dpm = &a8->armv8_common.dpm;
737 int retval;
738
739 dpm->arm = &a8->armv8_common.arm;
740 dpm->didr = debug;
741
742 dpm->prepare = aarch64_dpm_prepare;
743 dpm->finish = aarch64_dpm_finish;
744
745 dpm->instr_execute = aarch64_instr_execute;
746 dpm->instr_write_data_dcc = aarch64_instr_write_data_dcc;
747 dpm->instr_write_data_dcc_64 = aarch64_instr_write_data_dcc_64;
748 dpm->instr_write_data_r0 = aarch64_instr_write_data_r0;
749 dpm->instr_write_data_r0_64 = aarch64_instr_write_data_r0_64;
750 dpm->instr_cpsr_sync = aarch64_instr_cpsr_sync;
751
752 dpm->instr_read_data_dcc = aarch64_instr_read_data_dcc;
753 dpm->instr_read_data_dcc_64 = aarch64_instr_read_data_dcc_64;
754 dpm->instr_read_data_r0 = aarch64_instr_read_data_r0;
755 dpm->instr_read_data_r0_64 = aarch64_instr_read_data_r0_64;
756
757 dpm->arm_reg_current = armv8_reg_current;
758
759 dpm->bpwp_enable = aarch64_bpwp_enable;
760 dpm->bpwp_disable = aarch64_bpwp_disable;
761
762 retval = armv8_dpm_setup(dpm);
763 if (retval == ERROR_OK)
764 retval = armv8_dpm_initialize(dpm);
765
766 return retval;
767 }
768 static struct target *get_aarch64(struct target *target, int32_t coreid)
769 {
770 struct target_list *head;
771 struct target *curr;
772
773 head = target->head;
774 while (head != (struct target_list *)NULL) {
775 curr = head->target;
776 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
777 return curr;
778 head = head->next;
779 }
780 return target;
781 }
782 static int aarch64_halt(struct target *target);
783
784 static int aarch64_halt_smp(struct target *target)
785 {
786 int retval = 0;
787 struct target_list *head;
788 struct target *curr;
789 head = target->head;
790 while (head != (struct target_list *)NULL) {
791 curr = head->target;
792 if ((curr != target) && (curr->state != TARGET_HALTED))
793 retval += aarch64_halt(curr);
794 head = head->next;
795 }
796 return retval;
797 }
798
799 static int update_halt_gdb(struct target *target)
800 {
801 int retval = 0;
802 if (target->gdb_service && target->gdb_service->core[0] == -1) {
803 target->gdb_service->target = target;
804 target->gdb_service->core[0] = target->coreid;
805 retval += aarch64_halt_smp(target);
806 }
807 return retval;
808 }
809
810 /*
811 * Cortex-A8 Run control
812 */
813
814 static int aarch64_poll(struct target *target)
815 {
816 int retval = ERROR_OK;
817 uint32_t dscr;
818 struct aarch64_common *aarch64 = target_to_aarch64(target);
819 struct armv8_common *armv8 = &aarch64->armv8_common;
820 enum target_state prev_target_state = target->state;
821 /* toggle to another core is done by gdb as follow */
822 /* maint packet J core_id */
823 /* continue */
824 /* the next polling trigger an halt event sent to gdb */
825 if ((target->state == TARGET_HALTED) && (target->smp) &&
826 (target->gdb_service) &&
827 (target->gdb_service->target == NULL)) {
828 target->gdb_service->target =
829 get_aarch64(target, target->gdb_service->core[1]);
830 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
831 return retval;
832 }
833 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
834 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
835 if (retval != ERROR_OK)
836 return retval;
837 aarch64->cpudbg_dscr = dscr;
838
839 if (DSCR_RUN_MODE(dscr) == 0x3) {
840 if (prev_target_state != TARGET_HALTED) {
841 /* We have a halting debug event */
842 LOG_DEBUG("Target halted");
843 target->state = TARGET_HALTED;
844 if ((prev_target_state == TARGET_RUNNING)
845 || (prev_target_state == TARGET_UNKNOWN)
846 || (prev_target_state == TARGET_RESET)) {
847 retval = aarch64_debug_entry(target);
848 if (retval != ERROR_OK)
849 return retval;
850 if (target->smp) {
851 retval = update_halt_gdb(target);
852 if (retval != ERROR_OK)
853 return retval;
854 }
855 target_call_event_callbacks(target,
856 TARGET_EVENT_HALTED);
857 }
858 if (prev_target_state == TARGET_DEBUG_RUNNING) {
859 LOG_DEBUG(" ");
860
861 retval = aarch64_debug_entry(target);
862 if (retval != ERROR_OK)
863 return retval;
864 if (target->smp) {
865 retval = update_halt_gdb(target);
866 if (retval != ERROR_OK)
867 return retval;
868 }
869
870 target_call_event_callbacks(target,
871 TARGET_EVENT_DEBUG_HALTED);
872 }
873 }
874 } else
875 target->state = TARGET_RUNNING;
876
877 return retval;
878 }
879
880 static int aarch64_halt(struct target *target)
881 {
882 int retval = ERROR_OK;
883 uint32_t dscr;
884 struct armv8_common *armv8 = target_to_armv8(target);
885
886 /* enable CTI*/
887 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
888 armv8->cti_base + CTI_CTR, 1);
889 if (retval != ERROR_OK)
890 return retval;
891
892 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
893 armv8->cti_base + CTI_GATE, 3);
894 if (retval != ERROR_OK)
895 return retval;
896
897 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
898 armv8->cti_base + CTI_OUTEN0, 1);
899 if (retval != ERROR_OK)
900 return retval;
901
902 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
903 armv8->cti_base + CTI_OUTEN1, 2);
904 if (retval != ERROR_OK)
905 return retval;
906
907 /*
908 * add HDE in halting debug mode
909 */
910 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
911 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
912 if (retval != ERROR_OK)
913 return retval;
914
915 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
916 armv8->debug_base + CPUV8_DBG_DSCR, dscr | DSCR_HDE);
917 if (retval != ERROR_OK)
918 return retval;
919
920 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
921 armv8->cti_base + CTI_APPPULSE, 1);
922 if (retval != ERROR_OK)
923 return retval;
924
925 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
926 armv8->cti_base + CTI_INACK, 1);
927 if (retval != ERROR_OK)
928 return retval;
929
930
931 long long then = timeval_ms();
932 for (;; ) {
933 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
934 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
935 if (retval != ERROR_OK)
936 return retval;
937 if ((dscr & DSCRV8_HALT_MASK) != 0)
938 break;
939 if (timeval_ms() > then + 1000) {
940 LOG_ERROR("Timeout waiting for halt");
941 return ERROR_FAIL;
942 }
943 }
944
945 target->debug_reason = DBG_REASON_DBGRQ;
946
947 return ERROR_OK;
948 }
949
950 static int aarch64_internal_restore(struct target *target, int current,
951 uint64_t *address, int handle_breakpoints, int debug_execution)
952 {
953 struct armv8_common *armv8 = target_to_armv8(target);
954 struct arm *arm = &armv8->arm;
955 int retval;
956 uint64_t resume_pc;
957
958 if (!debug_execution)
959 target_free_all_working_areas(target);
960
961 /* current = 1: continue on current pc, otherwise continue at <address> */
962 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
963 if (!current)
964 resume_pc = *address;
965 else
966 *address = resume_pc;
967
968 /* Make sure that the Armv7 gdb thumb fixups does not
969 * kill the return address
970 */
971 switch (arm->core_state) {
972 case ARM_STATE_ARM:
973 resume_pc &= 0xFFFFFFFC;
974 break;
975 case ARM_STATE_AARCH64:
976 resume_pc &= 0xFFFFFFFFFFFFFFFC;
977 break;
978 case ARM_STATE_THUMB:
979 case ARM_STATE_THUMB_EE:
980 /* When the return address is loaded into PC
981 * bit 0 must be 1 to stay in Thumb state
982 */
983 resume_pc |= 0x1;
984 break;
985 case ARM_STATE_JAZELLE:
986 LOG_ERROR("How do I resume into Jazelle state??");
987 return ERROR_FAIL;
988 }
989 LOG_DEBUG("resume pc = 0x%16" PRIx64, resume_pc);
990 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
991 arm->pc->dirty = 1;
992 arm->pc->valid = 1;
993 dpmv8_modeswitch(&armv8->dpm, ARM_MODE_ANY);
994
995 /* called it now before restoring context because it uses cpu
996 * register r0 for restoring system control register */
997 retval = aarch64_restore_system_control_reg(target);
998 if (retval != ERROR_OK)
999 return retval;
1000 retval = aarch64_restore_context(target, handle_breakpoints);
1001 if (retval != ERROR_OK)
1002 return retval;
1003 target->debug_reason = DBG_REASON_NOTHALTED;
1004 target->state = TARGET_RUNNING;
1005
1006 /* registers are now invalid */
1007 register_cache_invalidate(arm->core_cache);
1008
1009 #if 0
1010 /* the front-end may request us not to handle breakpoints */
1011 if (handle_breakpoints) {
1012 /* Single step past breakpoint at current address */
1013 breakpoint = breakpoint_find(target, resume_pc);
1014 if (breakpoint) {
1015 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1016 cortex_m3_unset_breakpoint(target, breakpoint);
1017 cortex_m3_single_step_core(target);
1018 cortex_m3_set_breakpoint(target, breakpoint);
1019 }
1020 }
1021 #endif
1022
1023 return retval;
1024 }
1025
1026 static int aarch64_internal_restart(struct target *target)
1027 {
1028 struct armv8_common *armv8 = target_to_armv8(target);
1029 struct arm *arm = &armv8->arm;
1030 int retval;
1031 uint32_t dscr;
1032 /*
1033 * * Restart core and wait for it to be started. Clear ITRen and sticky
1034 * * exception flags: see ARMv7 ARM, C5.9.
1035 *
1036 * REVISIT: for single stepping, we probably want to
1037 * disable IRQs by default, with optional override...
1038 */
1039
1040 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1041 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1042 if (retval != ERROR_OK)
1043 return retval;
1044
1045 if ((dscr & DSCR_ITE) == 0)
1046 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1047
1048 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1049 armv8->cti_base + CTI_APPPULSE, 2);
1050 if (retval != ERROR_OK)
1051 return retval;
1052
1053 long long then = timeval_ms();
1054 for (;; ) {
1055 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1056 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1057 if (retval != ERROR_OK)
1058 return retval;
1059 if ((dscr & DSCR_HDE) != 0)
1060 break;
1061 if (timeval_ms() > then + 1000) {
1062 LOG_ERROR("Timeout waiting for resume");
1063 return ERROR_FAIL;
1064 }
1065 }
1066
1067 target->debug_reason = DBG_REASON_NOTHALTED;
1068 target->state = TARGET_RUNNING;
1069
1070 /* registers are now invalid */
1071 register_cache_invalidate(arm->core_cache);
1072
1073 return ERROR_OK;
1074 }
1075
1076 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
1077 {
1078 int retval = 0;
1079 struct target_list *head;
1080 struct target *curr;
1081 uint64_t address;
1082 head = target->head;
1083 while (head != (struct target_list *)NULL) {
1084 curr = head->target;
1085 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1086 /* resume current address , not in step mode */
1087 retval += aarch64_internal_restore(curr, 1, &address,
1088 handle_breakpoints, 0);
1089 retval += aarch64_internal_restart(curr);
1090 }
1091 head = head->next;
1092
1093 }
1094 return retval;
1095 }
1096
1097 static int aarch64_resume(struct target *target, int current,
1098 target_addr_t address, int handle_breakpoints, int debug_execution)
1099 {
1100 int retval = 0;
1101 uint64_t addr = address;
1102
1103 /* dummy resume for smp toggle in order to reduce gdb impact */
1104 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1105 /* simulate a start and halt of target */
1106 target->gdb_service->target = NULL;
1107 target->gdb_service->core[0] = target->gdb_service->core[1];
1108 /* fake resume at next poll we play the target core[1], see poll*/
1109 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1110 return 0;
1111 }
1112 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
1113 debug_execution);
1114 if (target->smp) {
1115 target->gdb_service->core[0] = -1;
1116 retval = aarch64_restore_smp(target, handle_breakpoints);
1117 if (retval != ERROR_OK)
1118 return retval;
1119 }
1120 aarch64_internal_restart(target);
1121
1122 if (!debug_execution) {
1123 target->state = TARGET_RUNNING;
1124 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1125 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
1126 } else {
1127 target->state = TARGET_DEBUG_RUNNING;
1128 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1129 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
1130 }
1131
1132 return ERROR_OK;
1133 }
1134
1135 static int aarch64_debug_entry(struct target *target)
1136 {
1137 int retval = ERROR_OK;
1138 struct aarch64_common *aarch64 = target_to_aarch64(target);
1139 struct armv8_common *armv8 = target_to_armv8(target);
1140
1141 LOG_DEBUG("dscr = 0x%08" PRIx32, aarch64->cpudbg_dscr);
1142
1143 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1144 * imprecise data aborts get discarded by issuing a Data
1145 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1146 */
1147
1148 /* make sure to clear all sticky errors */
1149 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1150 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1151 if (retval != ERROR_OK)
1152 return retval;
1153
1154 /* Examine debug reason */
1155 armv8_dpm_report_dscr(&armv8->dpm, aarch64->cpudbg_dscr);
1156
1157 /* save address of instruction that triggered the watchpoint? */
1158 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1159 uint32_t tmp;
1160 uint64_t wfar = 0;
1161
1162 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1163 armv8->debug_base + CPUV8_DBG_WFAR1,
1164 &tmp);
1165 if (retval != ERROR_OK)
1166 return retval;
1167 wfar = tmp;
1168 wfar = (wfar << 32);
1169 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1170 armv8->debug_base + CPUV8_DBG_WFAR0,
1171 &tmp);
1172 if (retval != ERROR_OK)
1173 return retval;
1174 wfar |= tmp;
1175 armv8_dpm_report_wfar(&armv8->dpm, wfar);
1176 }
1177
1178 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1179
1180 if (armv8->post_debug_entry) {
1181 retval = armv8->post_debug_entry(target);
1182 if (retval != ERROR_OK)
1183 return retval;
1184 }
1185
1186 return retval;
1187 }
1188
1189 static int aarch64_post_debug_entry(struct target *target)
1190 {
1191 struct aarch64_common *aarch64 = target_to_aarch64(target);
1192 struct armv8_common *armv8 = &aarch64->armv8_common;
1193 int retval;
1194
1195 mem_ap_write_atomic_u32(armv8->debug_ap,
1196 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1197 switch (armv8->arm.core_mode) {
1198 case ARMV8_64_EL0T:
1199 case ARMV8_64_EL1T:
1200 case ARMV8_64_EL1H:
1201 retval = armv8->arm.mrs(target, 3, /*op 0*/
1202 0, 0, /* op1, op2 */
1203 1, 0, /* CRn, CRm */
1204 &aarch64->system_control_reg);
1205 if (retval != ERROR_OK)
1206 return retval;
1207 break;
1208 case ARMV8_64_EL2T:
1209 case ARMV8_64_EL2H:
1210 retval = armv8->arm.mrs(target, 3, /*op 0*/
1211 4, 0, /* op1, op2 */
1212 1, 0, /* CRn, CRm */
1213 &aarch64->system_control_reg);
1214 if (retval != ERROR_OK)
1215 return retval;
1216 break;
1217 case ARMV8_64_EL3H:
1218 case ARMV8_64_EL3T:
1219 retval = armv8->arm.mrs(target, 3, /*op 0*/
1220 6, 0, /* op1, op2 */
1221 1, 0, /* CRn, CRm */
1222 &aarch64->system_control_reg);
1223 if (retval != ERROR_OK)
1224 return retval;
1225 break;
1226 default:
1227 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
1228 }
1229 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1230 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1231
1232 if (armv8->armv8_mmu.armv8_cache.ctype == -1)
1233 armv8_identify_cache(target);
1234
1235 armv8->armv8_mmu.mmu_enabled =
1236 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1237 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1238 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1239 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1240 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1241 aarch64->curr_mode = armv8->arm.core_mode;
1242 return ERROR_OK;
1243 }
1244
1245 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1246 {
1247 struct armv8_common *armv8 = target_to_armv8(target);
1248 uint32_t dscr;
1249
1250 /* Read DSCR */
1251 int retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1252 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1253 if (ERROR_OK != retval)
1254 return retval;
1255
1256 /* clear bitfield */
1257 dscr &= ~bit_mask;
1258 /* put new value */
1259 dscr |= value & bit_mask;
1260
1261 /* write new DSCR */
1262 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1263 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1264 return retval;
1265 }
1266
1267 static int aarch64_step(struct target *target, int current, target_addr_t address,
1268 int handle_breakpoints)
1269 {
1270 struct armv8_common *armv8 = target_to_armv8(target);
1271 int retval;
1272 uint32_t edecr;
1273
1274 if (target->state != TARGET_HALTED) {
1275 LOG_WARNING("target not halted");
1276 return ERROR_TARGET_NOT_HALTED;
1277 }
1278
1279 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1280 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1281 if (retval != ERROR_OK)
1282 return retval;
1283
1284 /* make sure EDECR.SS is not set when restoring the register */
1285 edecr &= ~0x4;
1286
1287 /* set EDECR.SS to enter hardware step mode */
1288 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1289 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1290 if (retval != ERROR_OK)
1291 return retval;
1292
1293 /* disable interrupts while stepping */
1294 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1295 if (retval != ERROR_OK)
1296 return ERROR_OK;
1297
1298 /* resume the target */
1299 retval = aarch64_resume(target, current, address, 0, 0);
1300 if (retval != ERROR_OK)
1301 return retval;
1302
1303 long long then = timeval_ms();
1304 while (target->state != TARGET_HALTED) {
1305 retval = aarch64_poll(target);
1306 if (retval != ERROR_OK)
1307 return retval;
1308 if (timeval_ms() > then + 1000) {
1309 LOG_ERROR("timeout waiting for target halt");
1310 return ERROR_FAIL;
1311 }
1312 }
1313
1314 /* restore EDECR */
1315 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1316 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1317 if (retval != ERROR_OK)
1318 return retval;
1319
1320 /* restore interrupts */
1321 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1322 if (retval != ERROR_OK)
1323 return ERROR_OK;
1324
1325 return ERROR_OK;
1326 }
1327
1328 static int aarch64_restore_context(struct target *target, bool bpwp)
1329 {
1330 struct armv8_common *armv8 = target_to_armv8(target);
1331
1332 LOG_DEBUG(" ");
1333
1334 if (armv8->pre_restore_context)
1335 armv8->pre_restore_context(target);
1336
1337 return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1338
1339 }
1340
1341 /*
1342 * Cortex-A8 Breakpoint and watchpoint functions
1343 */
1344
1345 /* Setup hardware Breakpoint Register Pair */
1346 static int aarch64_set_breakpoint(struct target *target,
1347 struct breakpoint *breakpoint, uint8_t matchmode)
1348 {
1349 int retval;
1350 int brp_i = 0;
1351 uint32_t control;
1352 uint8_t byte_addr_select = 0x0F;
1353 struct aarch64_common *aarch64 = target_to_aarch64(target);
1354 struct armv8_common *armv8 = &aarch64->armv8_common;
1355 struct aarch64_brp *brp_list = aarch64->brp_list;
1356 uint32_t dscr;
1357
1358 if (breakpoint->set) {
1359 LOG_WARNING("breakpoint already set");
1360 return ERROR_OK;
1361 }
1362
1363 if (breakpoint->type == BKPT_HARD) {
1364 int64_t bpt_value;
1365 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1366 brp_i++;
1367 if (brp_i >= aarch64->brp_num) {
1368 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1369 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1370 }
1371 breakpoint->set = brp_i + 1;
1372 if (breakpoint->length == 2)
1373 byte_addr_select = (3 << (breakpoint->address & 0x02));
1374 control = ((matchmode & 0x7) << 20)
1375 | (1 << 13)
1376 | (byte_addr_select << 5)
1377 | (3 << 1) | 1;
1378 brp_list[brp_i].used = 1;
1379 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1380 brp_list[brp_i].control = control;
1381 bpt_value = brp_list[brp_i].value;
1382
1383 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1384 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1385 (uint32_t)(bpt_value & 0xFFFFFFFF));
1386 if (retval != ERROR_OK)
1387 return retval;
1388 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1389 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1390 (uint32_t)(bpt_value >> 32));
1391 if (retval != ERROR_OK)
1392 return retval;
1393
1394 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1395 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1396 brp_list[brp_i].control);
1397 if (retval != ERROR_OK)
1398 return retval;
1399 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1400 brp_list[brp_i].control,
1401 brp_list[brp_i].value);
1402
1403 } else if (breakpoint->type == BKPT_SOFT) {
1404 uint8_t code[4];
1405
1406 buf_set_u32(code, 0, 32, ARMV8_HLT(0x11));
1407 retval = target_read_memory(target,
1408 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1409 breakpoint->length, 1,
1410 breakpoint->orig_instr);
1411 if (retval != ERROR_OK)
1412 return retval;
1413
1414 armv8_cache_d_inner_flush_virt(armv8,
1415 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1416 breakpoint->length);
1417
1418 retval = target_write_memory(target,
1419 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1420 breakpoint->length, 1, code);
1421 if (retval != ERROR_OK)
1422 return retval;
1423
1424 armv8_cache_d_inner_flush_virt(armv8,
1425 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1426 breakpoint->length);
1427
1428 armv8_cache_i_inner_inval_virt(armv8,
1429 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1430 breakpoint->length);
1431
1432 breakpoint->set = 0x11; /* Any nice value but 0 */
1433 }
1434
1435 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1436 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1437 /* Ensure that halting debug mode is enable */
1438 dscr = dscr | DSCR_HDE;
1439 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1440 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1441 if (retval != ERROR_OK) {
1442 LOG_DEBUG("Failed to set DSCR.HDE");
1443 return retval;
1444 }
1445
1446 return ERROR_OK;
1447 }
1448
1449 static int aarch64_set_context_breakpoint(struct target *target,
1450 struct breakpoint *breakpoint, uint8_t matchmode)
1451 {
1452 int retval = ERROR_FAIL;
1453 int brp_i = 0;
1454 uint32_t control;
1455 uint8_t byte_addr_select = 0x0F;
1456 struct aarch64_common *aarch64 = target_to_aarch64(target);
1457 struct armv8_common *armv8 = &aarch64->armv8_common;
1458 struct aarch64_brp *brp_list = aarch64->brp_list;
1459
1460 if (breakpoint->set) {
1461 LOG_WARNING("breakpoint already set");
1462 return retval;
1463 }
1464 /*check available context BRPs*/
1465 while ((brp_list[brp_i].used ||
1466 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1467 brp_i++;
1468
1469 if (brp_i >= aarch64->brp_num) {
1470 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1471 return ERROR_FAIL;
1472 }
1473
1474 breakpoint->set = brp_i + 1;
1475 control = ((matchmode & 0x7) << 20)
1476 | (1 << 13)
1477 | (byte_addr_select << 5)
1478 | (3 << 1) | 1;
1479 brp_list[brp_i].used = 1;
1480 brp_list[brp_i].value = (breakpoint->asid);
1481 brp_list[brp_i].control = control;
1482 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1483 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1484 brp_list[brp_i].value);
1485 if (retval != ERROR_OK)
1486 return retval;
1487 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1488 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1489 brp_list[brp_i].control);
1490 if (retval != ERROR_OK)
1491 return retval;
1492 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1493 brp_list[brp_i].control,
1494 brp_list[brp_i].value);
1495 return ERROR_OK;
1496
1497 }
1498
1499 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1500 {
1501 int retval = ERROR_FAIL;
1502 int brp_1 = 0; /* holds the contextID pair */
1503 int brp_2 = 0; /* holds the IVA pair */
1504 uint32_t control_CTX, control_IVA;
1505 uint8_t CTX_byte_addr_select = 0x0F;
1506 uint8_t IVA_byte_addr_select = 0x0F;
1507 uint8_t CTX_machmode = 0x03;
1508 uint8_t IVA_machmode = 0x01;
1509 struct aarch64_common *aarch64 = target_to_aarch64(target);
1510 struct armv8_common *armv8 = &aarch64->armv8_common;
1511 struct aarch64_brp *brp_list = aarch64->brp_list;
1512
1513 if (breakpoint->set) {
1514 LOG_WARNING("breakpoint already set");
1515 return retval;
1516 }
1517 /*check available context BRPs*/
1518 while ((brp_list[brp_1].used ||
1519 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1520 brp_1++;
1521
1522 printf("brp(CTX) found num: %d\n", brp_1);
1523 if (brp_1 >= aarch64->brp_num) {
1524 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1525 return ERROR_FAIL;
1526 }
1527
1528 while ((brp_list[brp_2].used ||
1529 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1530 brp_2++;
1531
1532 printf("brp(IVA) found num: %d\n", brp_2);
1533 if (brp_2 >= aarch64->brp_num) {
1534 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1535 return ERROR_FAIL;
1536 }
1537
1538 breakpoint->set = brp_1 + 1;
1539 breakpoint->linked_BRP = brp_2;
1540 control_CTX = ((CTX_machmode & 0x7) << 20)
1541 | (brp_2 << 16)
1542 | (0 << 14)
1543 | (CTX_byte_addr_select << 5)
1544 | (3 << 1) | 1;
1545 brp_list[brp_1].used = 1;
1546 brp_list[brp_1].value = (breakpoint->asid);
1547 brp_list[brp_1].control = control_CTX;
1548 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1549 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1550 brp_list[brp_1].value);
1551 if (retval != ERROR_OK)
1552 return retval;
1553 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1554 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1555 brp_list[brp_1].control);
1556 if (retval != ERROR_OK)
1557 return retval;
1558
1559 control_IVA = ((IVA_machmode & 0x7) << 20)
1560 | (brp_1 << 16)
1561 | (1 << 13)
1562 | (IVA_byte_addr_select << 5)
1563 | (3 << 1) | 1;
1564 brp_list[brp_2].used = 1;
1565 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1566 brp_list[brp_2].control = control_IVA;
1567 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1568 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1569 brp_list[brp_2].value & 0xFFFFFFFF);
1570 if (retval != ERROR_OK)
1571 return retval;
1572 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1573 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1574 brp_list[brp_2].value >> 32);
1575 if (retval != ERROR_OK)
1576 return retval;
1577 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1578 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1579 brp_list[brp_2].control);
1580 if (retval != ERROR_OK)
1581 return retval;
1582
1583 return ERROR_OK;
1584 }
1585
1586 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1587 {
1588 int retval;
1589 struct aarch64_common *aarch64 = target_to_aarch64(target);
1590 struct armv8_common *armv8 = &aarch64->armv8_common;
1591 struct aarch64_brp *brp_list = aarch64->brp_list;
1592
1593 if (!breakpoint->set) {
1594 LOG_WARNING("breakpoint not set");
1595 return ERROR_OK;
1596 }
1597
1598 if (breakpoint->type == BKPT_HARD) {
1599 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1600 int brp_i = breakpoint->set - 1;
1601 int brp_j = breakpoint->linked_BRP;
1602 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1603 LOG_DEBUG("Invalid BRP number in breakpoint");
1604 return ERROR_OK;
1605 }
1606 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1607 brp_list[brp_i].control, brp_list[brp_i].value);
1608 brp_list[brp_i].used = 0;
1609 brp_list[brp_i].value = 0;
1610 brp_list[brp_i].control = 0;
1611 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1612 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1613 brp_list[brp_i].control);
1614 if (retval != ERROR_OK)
1615 return retval;
1616 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1617 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1618 (uint32_t)brp_list[brp_i].value);
1619 if (retval != ERROR_OK)
1620 return retval;
1621 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1622 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1623 (uint32_t)brp_list[brp_i].value);
1624 if (retval != ERROR_OK)
1625 return retval;
1626 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1627 LOG_DEBUG("Invalid BRP number in breakpoint");
1628 return ERROR_OK;
1629 }
1630 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1631 brp_list[brp_j].control, brp_list[brp_j].value);
1632 brp_list[brp_j].used = 0;
1633 brp_list[brp_j].value = 0;
1634 brp_list[brp_j].control = 0;
1635 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1636 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1637 brp_list[brp_j].control);
1638 if (retval != ERROR_OK)
1639 return retval;
1640 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1641 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1642 (uint32_t)brp_list[brp_j].value);
1643 if (retval != ERROR_OK)
1644 return retval;
1645 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1646 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1647 (uint32_t)brp_list[brp_j].value);
1648 if (retval != ERROR_OK)
1649 return retval;
1650
1651 breakpoint->linked_BRP = 0;
1652 breakpoint->set = 0;
1653 return ERROR_OK;
1654
1655 } else {
1656 int brp_i = breakpoint->set - 1;
1657 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1658 LOG_DEBUG("Invalid BRP number in breakpoint");
1659 return ERROR_OK;
1660 }
1661 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1662 brp_list[brp_i].control, brp_list[brp_i].value);
1663 brp_list[brp_i].used = 0;
1664 brp_list[brp_i].value = 0;
1665 brp_list[brp_i].control = 0;
1666 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1667 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1668 brp_list[brp_i].control);
1669 if (retval != ERROR_OK)
1670 return retval;
1671 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1672 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1673 brp_list[brp_i].value);
1674 if (retval != ERROR_OK)
1675 return retval;
1676
1677 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1678 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1679 (uint32_t)brp_list[brp_i].value);
1680 if (retval != ERROR_OK)
1681 return retval;
1682 breakpoint->set = 0;
1683 return ERROR_OK;
1684 }
1685 } else {
1686 /* restore original instruction (kept in target endianness) */
1687
1688 armv8_cache_d_inner_flush_virt(armv8,
1689 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1690 breakpoint->length);
1691
1692 if (breakpoint->length == 4) {
1693 retval = target_write_memory(target,
1694 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1695 4, 1, breakpoint->orig_instr);
1696 if (retval != ERROR_OK)
1697 return retval;
1698 } else {
1699 retval = target_write_memory(target,
1700 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1701 2, 1, breakpoint->orig_instr);
1702 if (retval != ERROR_OK)
1703 return retval;
1704 }
1705
1706 armv8_cache_d_inner_flush_virt(armv8,
1707 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1708 breakpoint->length);
1709
1710 armv8_cache_i_inner_inval_virt(armv8,
1711 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1712 breakpoint->length);
1713 }
1714 breakpoint->set = 0;
1715
1716 return ERROR_OK;
1717 }
1718
1719 static int aarch64_add_breakpoint(struct target *target,
1720 struct breakpoint *breakpoint)
1721 {
1722 struct aarch64_common *aarch64 = target_to_aarch64(target);
1723
1724 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1725 LOG_INFO("no hardware breakpoint available");
1726 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1727 }
1728
1729 if (breakpoint->type == BKPT_HARD)
1730 aarch64->brp_num_available--;
1731
1732 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1733 }
1734
1735 static int aarch64_add_context_breakpoint(struct target *target,
1736 struct breakpoint *breakpoint)
1737 {
1738 struct aarch64_common *aarch64 = target_to_aarch64(target);
1739
1740 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1741 LOG_INFO("no hardware breakpoint available");
1742 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1743 }
1744
1745 if (breakpoint->type == BKPT_HARD)
1746 aarch64->brp_num_available--;
1747
1748 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1749 }
1750
1751 static int aarch64_add_hybrid_breakpoint(struct target *target,
1752 struct breakpoint *breakpoint)
1753 {
1754 struct aarch64_common *aarch64 = target_to_aarch64(target);
1755
1756 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1757 LOG_INFO("no hardware breakpoint available");
1758 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1759 }
1760
1761 if (breakpoint->type == BKPT_HARD)
1762 aarch64->brp_num_available--;
1763
1764 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1765 }
1766
1767
1768 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1769 {
1770 struct aarch64_common *aarch64 = target_to_aarch64(target);
1771
1772 #if 0
1773 /* It is perfectly possible to remove breakpoints while the target is running */
1774 if (target->state != TARGET_HALTED) {
1775 LOG_WARNING("target not halted");
1776 return ERROR_TARGET_NOT_HALTED;
1777 }
1778 #endif
1779
1780 if (breakpoint->set) {
1781 aarch64_unset_breakpoint(target, breakpoint);
1782 if (breakpoint->type == BKPT_HARD)
1783 aarch64->brp_num_available++;
1784 }
1785
1786 return ERROR_OK;
1787 }
1788
1789 /*
1790 * Cortex-A8 Reset functions
1791 */
1792
1793 static int aarch64_assert_reset(struct target *target)
1794 {
1795 struct armv8_common *armv8 = target_to_armv8(target);
1796
1797 LOG_DEBUG(" ");
1798
1799 /* FIXME when halt is requested, make it work somehow... */
1800
1801 /* Issue some kind of warm reset. */
1802 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1803 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1804 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1805 /* REVISIT handle "pulls" cases, if there's
1806 * hardware that needs them to work.
1807 */
1808 jtag_add_reset(0, 1);
1809 } else {
1810 LOG_ERROR("%s: how to reset?", target_name(target));
1811 return ERROR_FAIL;
1812 }
1813
1814 /* registers are now invalid */
1815 register_cache_invalidate(armv8->arm.core_cache);
1816
1817 target->state = TARGET_RESET;
1818
1819 return ERROR_OK;
1820 }
1821
1822 static int aarch64_deassert_reset(struct target *target)
1823 {
1824 int retval;
1825
1826 LOG_DEBUG(" ");
1827
1828 /* be certain SRST is off */
1829 jtag_add_reset(0, 0);
1830
1831 retval = aarch64_poll(target);
1832 if (retval != ERROR_OK)
1833 return retval;
1834
1835 if (target->reset_halt) {
1836 if (target->state != TARGET_HALTED) {
1837 LOG_WARNING("%s: ran after reset and before halt ...",
1838 target_name(target));
1839 retval = target_halt(target);
1840 if (retval != ERROR_OK)
1841 return retval;
1842 }
1843 }
1844
1845 return ERROR_OK;
1846 }
1847
1848 static int aarch64_write_apb_ap_memory(struct target *target,
1849 uint64_t address, uint32_t size,
1850 uint32_t count, const uint8_t *buffer)
1851 {
1852 /* write memory through APB-AP */
1853 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1854 struct armv8_common *armv8 = target_to_armv8(target);
1855 struct arm *arm = &armv8->arm;
1856 int total_bytes = count * size;
1857 int total_u32;
1858 int start_byte = address & 0x3;
1859 int end_byte = (address + total_bytes) & 0x3;
1860 struct reg *reg;
1861 uint32_t dscr;
1862 uint8_t *tmp_buff = NULL;
1863
1864 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %" PRIu32 " count%" PRIu32,
1865 address, size, count);
1866 if (target->state != TARGET_HALTED) {
1867 LOG_WARNING("target not halted");
1868 return ERROR_TARGET_NOT_HALTED;
1869 }
1870
1871 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1872
1873 /* Mark register R0 as dirty, as it will be used
1874 * for transferring the data.
1875 * It will be restored automatically when exiting
1876 * debug mode
1877 */
1878 reg = armv8_reg_current(arm, 1);
1879 reg->dirty = true;
1880
1881 reg = armv8_reg_current(arm, 0);
1882 reg->dirty = true;
1883
1884 /* clear any abort */
1885 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1886 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1887 if (retval != ERROR_OK)
1888 return retval;
1889
1890
1891 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1892
1893 /* The algorithm only copies 32 bit words, so the buffer
1894 * should be expanded to include the words at either end.
1895 * The first and last words will be read first to avoid
1896 * corruption if needed.
1897 */
1898 tmp_buff = malloc(total_u32 * 4);
1899
1900 if ((start_byte != 0) && (total_u32 > 1)) {
1901 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1902 * the other bytes in the word.
1903 */
1904 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1905 if (retval != ERROR_OK)
1906 goto error_free_buff_w;
1907 }
1908
1909 /* If end of write is not aligned, or the write is less than 4 bytes */
1910 if ((end_byte != 0) ||
1911 ((total_u32 == 1) && (total_bytes != 4))) {
1912
1913 /* Read the last word to avoid corruption during 32 bit write */
1914 int mem_offset = (total_u32-1) * 4;
1915 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1916 if (retval != ERROR_OK)
1917 goto error_free_buff_w;
1918 }
1919
1920 /* Copy the write buffer over the top of the temporary buffer */
1921 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1922
1923 /* We now have a 32 bit aligned buffer that can be written */
1924
1925 /* Read DSCR */
1926 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1927 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1928 if (retval != ERROR_OK)
1929 goto error_free_buff_w;
1930
1931 /* Set Normal access mode */
1932 dscr = (dscr & ~DSCR_MA);
1933 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1934 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1935
1936 if (arm->core_state == ARM_STATE_AARCH64) {
1937 /* Write X0 with value 'address' using write procedure */
1938 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1939 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
1940 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1941 retval += aarch64_exec_opcode(target,
1942 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1943 } else {
1944 /* Write R0 with value 'address' using write procedure */
1945 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1946 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
1947 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1948 retval += aarch64_exec_opcode(target,
1949 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
1950
1951 }
1952 /* Step 1.d - Change DCC to memory mode */
1953 dscr = dscr | DSCR_MA;
1954 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1955 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1956 if (retval != ERROR_OK)
1957 goto error_unset_dtr_w;
1958
1959
1960 /* Step 2.a - Do the write */
1961 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1962 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1963 if (retval != ERROR_OK)
1964 goto error_unset_dtr_w;
1965
1966 /* Step 3.a - Switch DTR mode back to Normal mode */
1967 dscr = (dscr & ~DSCR_MA);
1968 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1969 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1970 if (retval != ERROR_OK)
1971 goto error_unset_dtr_w;
1972
1973 /* Check for sticky abort flags in the DSCR */
1974 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1975 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1976 if (retval != ERROR_OK)
1977 goto error_free_buff_w;
1978 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1979 /* Abort occurred - clear it and exit */
1980 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1981 mem_ap_write_atomic_u32(armv8->debug_ap,
1982 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1983 goto error_free_buff_w;
1984 }
1985
1986 /* Done */
1987 free(tmp_buff);
1988 return ERROR_OK;
1989
1990 error_unset_dtr_w:
1991 /* Unset DTR mode */
1992 mem_ap_read_atomic_u32(armv8->debug_ap,
1993 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1994 dscr = (dscr & ~DSCR_MA);
1995 mem_ap_write_atomic_u32(armv8->debug_ap,
1996 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1997 error_free_buff_w:
1998 LOG_ERROR("error");
1999 free(tmp_buff);
2000 return ERROR_FAIL;
2001 }
2002
2003 static int aarch64_read_apb_ap_memory(struct target *target,
2004 target_addr_t address, uint32_t size,
2005 uint32_t count, uint8_t *buffer)
2006 {
2007 /* read memory through APB-AP */
2008 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2009 struct armv8_common *armv8 = target_to_armv8(target);
2010 struct arm *arm = &armv8->arm;
2011 int total_bytes = count * size;
2012 int total_u32;
2013 int start_byte = address & 0x3;
2014 int end_byte = (address + total_bytes) & 0x3;
2015 struct reg *reg;
2016 uint32_t dscr;
2017 uint8_t *tmp_buff = NULL;
2018 uint8_t *u8buf_ptr;
2019 uint32_t value;
2020
2021 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count%" PRIu32,
2022 address, size, count);
2023 if (target->state != TARGET_HALTED) {
2024 LOG_WARNING("target not halted");
2025 return ERROR_TARGET_NOT_HALTED;
2026 }
2027
2028 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
2029 /* Mark register X0, X1 as dirty, as it will be used
2030 * for transferring the data.
2031 * It will be restored automatically when exiting
2032 * debug mode
2033 */
2034 reg = armv8_reg_current(arm, 1);
2035 reg->dirty = true;
2036
2037 reg = armv8_reg_current(arm, 0);
2038 reg->dirty = true;
2039
2040 /* clear any abort */
2041 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2042 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
2043 if (retval != ERROR_OK)
2044 goto error_free_buff_r;
2045
2046 /* Read DSCR */
2047 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2048 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2049
2050 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2051
2052 /* Set Normal access mode */
2053 dscr = (dscr & ~DSCR_MA);
2054 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
2055 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2056
2057 if (arm->core_state == ARM_STATE_AARCH64) {
2058 /* Write X0 with value 'address' using write procedure */
2059 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2060 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
2061 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2062 retval += aarch64_exec_opcode(target, ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
2063 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2064 retval += aarch64_exec_opcode(target, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
2065 /* Step 1.e - Change DCC to memory mode */
2066 dscr = dscr | DSCR_MA;
2067 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
2068 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2069 /* Step 1.f - read DBGDTRTX and discard the value */
2070 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2071 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2072 } else {
2073 /* Write R0 with value 'address' using write procedure */
2074 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2075 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
2076 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2077 retval += aarch64_exec_opcode(target,
2078 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
2079 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2080 retval += aarch64_exec_opcode(target,
2081 T32_FMTITR(ARMV4_5_MCR(14, 0, 0, 0, 5, 0)), &dscr);
2082 /* Step 1.e - Change DCC to memory mode */
2083 dscr = dscr | DSCR_MA;
2084 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
2085 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2086 /* Step 1.f - read DBGDTRTX and discard the value */
2087 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2088 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2089
2090 }
2091 if (retval != ERROR_OK)
2092 goto error_unset_dtr_r;
2093
2094 /* Optimize the read as much as we can, either way we read in a single pass */
2095 if ((start_byte) || (end_byte)) {
2096 /* The algorithm only copies 32 bit words, so the buffer
2097 * should be expanded to include the words at either end.
2098 * The first and last words will be read into a temp buffer
2099 * to avoid corruption
2100 */
2101 tmp_buff = malloc(total_u32 * 4);
2102 if (!tmp_buff)
2103 goto error_unset_dtr_r;
2104
2105 /* use the tmp buffer to read the entire data */
2106 u8buf_ptr = tmp_buff;
2107 } else
2108 /* address and read length are aligned so read directly into the passed buffer */
2109 u8buf_ptr = buffer;
2110
2111 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2112 * Abort flags are sticky, so can be read at end of transactions
2113 *
2114 * This data is read in aligned to 32 bit boundary.
2115 */
2116
2117 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2118 * increments X0 by 4. */
2119 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
2120 armv8->debug_base + CPUV8_DBG_DTRTX);
2121 if (retval != ERROR_OK)
2122 goto error_unset_dtr_r;
2123
2124 /* Step 3.a - set DTR access mode back to Normal mode */
2125 dscr = (dscr & ~DSCR_MA);
2126 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2127 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2128 if (retval != ERROR_OK)
2129 goto error_free_buff_r;
2130
2131 /* Step 3.b - read DBGDTRTX for the final value */
2132 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2133 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2134 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
2135
2136 /* Check for sticky abort flags in the DSCR */
2137 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2138 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2139 if (retval != ERROR_OK)
2140 goto error_free_buff_r;
2141 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2142 /* Abort occurred - clear it and exit */
2143 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2144 mem_ap_write_atomic_u32(armv8->debug_ap,
2145 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
2146 goto error_free_buff_r;
2147 }
2148
2149 /* check if we need to copy aligned data by applying any shift necessary */
2150 if (tmp_buff) {
2151 memcpy(buffer, tmp_buff + start_byte, total_bytes);
2152 free(tmp_buff);
2153 }
2154
2155 /* Done */
2156 return ERROR_OK;
2157
2158 error_unset_dtr_r:
2159 /* Unset DTR mode */
2160 mem_ap_read_atomic_u32(armv8->debug_ap,
2161 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2162 dscr = (dscr & ~DSCR_MA);
2163 mem_ap_write_atomic_u32(armv8->debug_ap,
2164 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2165 error_free_buff_r:
2166 LOG_ERROR("error");
2167 free(tmp_buff);
2168 return ERROR_FAIL;
2169 }
2170
2171 static int aarch64_read_phys_memory(struct target *target,
2172 target_addr_t address, uint32_t size,
2173 uint32_t count, uint8_t *buffer)
2174 {
2175 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2176 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
2177 address, size, count);
2178
2179 if (count && buffer) {
2180 /* read memory through APB-AP */
2181 retval = aarch64_mmu_modify(target, 0);
2182 if (retval != ERROR_OK)
2183 return retval;
2184 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
2185 }
2186 return retval;
2187 }
2188
2189 static int aarch64_read_memory(struct target *target, target_addr_t address,
2190 uint32_t size, uint32_t count, uint8_t *buffer)
2191 {
2192 int mmu_enabled = 0;
2193 int retval;
2194
2195 /* aarch64 handles unaligned memory access */
2196 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2197 size, count);
2198
2199 /* determine if MMU was enabled on target stop */
2200 retval = aarch64_mmu(target, &mmu_enabled);
2201 if (retval != ERROR_OK)
2202 return retval;
2203
2204 if (mmu_enabled) {
2205 retval = aarch64_check_address(target, address);
2206 if (retval != ERROR_OK)
2207 return retval;
2208 /* enable MMU as we could have disabled it for phys access */
2209 retval = aarch64_mmu_modify(target, 1);
2210 if (retval != ERROR_OK)
2211 return retval;
2212 }
2213 return aarch64_read_apb_ap_memory(target, address, size, count, buffer);
2214 }
2215
2216 static int aarch64_write_phys_memory(struct target *target,
2217 target_addr_t address, uint32_t size,
2218 uint32_t count, const uint8_t *buffer)
2219 {
2220 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2221
2222 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2223 size, count);
2224
2225 if (count && buffer) {
2226 /* write memory through APB-AP */
2227 retval = aarch64_mmu_modify(target, 0);
2228 if (retval != ERROR_OK)
2229 return retval;
2230 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2231 }
2232
2233 return retval;
2234 }
2235
2236 static int aarch64_write_memory(struct target *target, target_addr_t address,
2237 uint32_t size, uint32_t count, const uint8_t *buffer)
2238 {
2239 int mmu_enabled = 0;
2240 int retval;
2241
2242 /* aarch64 handles unaligned memory access */
2243 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
2244 "; count %" PRId32, address, size, count);
2245
2246 /* determine if MMU was enabled on target stop */
2247 retval = aarch64_mmu(target, &mmu_enabled);
2248 if (retval != ERROR_OK)
2249 return retval;
2250
2251 if (mmu_enabled) {
2252 retval = aarch64_check_address(target, address);
2253 if (retval != ERROR_OK)
2254 return retval;
2255 /* enable MMU as we could have disabled it for phys access */
2256 retval = aarch64_mmu_modify(target, 1);
2257 if (retval != ERROR_OK)
2258 return retval;
2259 }
2260 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2261 }
2262
2263 static int aarch64_handle_target_request(void *priv)
2264 {
2265 struct target *target = priv;
2266 struct armv8_common *armv8 = target_to_armv8(target);
2267 int retval;
2268
2269 if (!target_was_examined(target))
2270 return ERROR_OK;
2271 if (!target->dbg_msg_enabled)
2272 return ERROR_OK;
2273
2274 if (target->state == TARGET_RUNNING) {
2275 uint32_t request;
2276 uint32_t dscr;
2277 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2278 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2279
2280 /* check if we have data */
2281 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2282 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2283 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2284 if (retval == ERROR_OK) {
2285 target_request(target, request);
2286 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2287 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2288 }
2289 }
2290 }
2291
2292 return ERROR_OK;
2293 }
2294
2295 static int aarch64_examine_first(struct target *target)
2296 {
2297 struct aarch64_common *aarch64 = target_to_aarch64(target);
2298 struct armv8_common *armv8 = &aarch64->armv8_common;
2299 struct adiv5_dap *swjdp = armv8->arm.dap;
2300 int i;
2301 int retval = ERROR_OK;
2302 uint64_t debug, ttypr;
2303 uint32_t cpuid;
2304 uint32_t tmp0, tmp1;
2305 debug = ttypr = cpuid = 0;
2306
2307 /* We do one extra read to ensure DAP is configured,
2308 * we call ahbap_debugport_init(swjdp) instead
2309 */
2310 retval = dap_dp_init(swjdp);
2311 if (retval != ERROR_OK)
2312 return retval;
2313
2314 /* Search for the APB-AB - it is needed for access to debug registers */
2315 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2316 if (retval != ERROR_OK) {
2317 LOG_ERROR("Could not find APB-AP for debug access");
2318 return retval;
2319 }
2320
2321 retval = mem_ap_init(armv8->debug_ap);
2322 if (retval != ERROR_OK) {
2323 LOG_ERROR("Could not initialize the APB-AP");
2324 return retval;
2325 }
2326
2327 armv8->debug_ap->memaccess_tck = 80;
2328
2329 if (!target->dbgbase_set) {
2330 uint32_t dbgbase;
2331 /* Get ROM Table base */
2332 uint32_t apid;
2333 int32_t coreidx = target->coreid;
2334 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2335 if (retval != ERROR_OK)
2336 return retval;
2337 /* Lookup 0x15 -- Processor DAP */
2338 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2339 &armv8->debug_base, &coreidx);
2340 if (retval != ERROR_OK)
2341 return retval;
2342 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
2343 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2344 } else
2345 armv8->debug_base = target->dbgbase;
2346
2347 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2348 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
2349 if (retval != ERROR_OK) {
2350 LOG_DEBUG("LOCK debug access fail");
2351 return retval;
2352 }
2353
2354 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2355 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2356 if (retval != ERROR_OK) {
2357 LOG_DEBUG("Examine %s failed", "oslock");
2358 return retval;
2359 }
2360
2361 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2362 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2363 if (retval != ERROR_OK) {
2364 LOG_DEBUG("Examine %s failed", "CPUID");
2365 return retval;
2366 }
2367
2368 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2369 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2370 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2371 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2372 if (retval != ERROR_OK) {
2373 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2374 return retval;
2375 }
2376 ttypr |= tmp1;
2377 ttypr = (ttypr << 32) | tmp0;
2378
2379 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2380 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp0);
2381 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2382 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp1);
2383 if (retval != ERROR_OK) {
2384 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2385 return retval;
2386 }
2387 debug |= tmp1;
2388 debug = (debug << 32) | tmp0;
2389
2390 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2391 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2392 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2393
2394 if (target->ctibase == 0) {
2395 /* assume a v8 rom table layout */
2396 armv8->cti_base = target->ctibase = armv8->debug_base + 0x10000;
2397 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32, target->ctibase);
2398 } else
2399 armv8->cti_base = target->ctibase;
2400
2401 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2402 armv8->cti_base + CTI_UNLOCK , 0xC5ACCE55);
2403 if (retval != ERROR_OK)
2404 return retval;
2405
2406
2407 armv8->arm.core_type = ARM_MODE_MON;
2408 retval = aarch64_dpm_setup(aarch64, debug);
2409 if (retval != ERROR_OK)
2410 return retval;
2411
2412 /* Setup Breakpoint Register Pairs */
2413 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2414 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2415 aarch64->brp_num_available = aarch64->brp_num;
2416 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2417 for (i = 0; i < aarch64->brp_num; i++) {
2418 aarch64->brp_list[i].used = 0;
2419 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2420 aarch64->brp_list[i].type = BRP_NORMAL;
2421 else
2422 aarch64->brp_list[i].type = BRP_CONTEXT;
2423 aarch64->brp_list[i].value = 0;
2424 aarch64->brp_list[i].control = 0;
2425 aarch64->brp_list[i].BRPn = i;
2426 }
2427
2428 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2429
2430 target_set_examined(target);
2431 return ERROR_OK;
2432 }
2433
2434 static int aarch64_examine(struct target *target)
2435 {
2436 int retval = ERROR_OK;
2437
2438 /* don't re-probe hardware after each reset */
2439 if (!target_was_examined(target))
2440 retval = aarch64_examine_first(target);
2441
2442 /* Configure core debug access */
2443 if (retval == ERROR_OK)
2444 retval = aarch64_init_debug_access(target);
2445
2446 return retval;
2447 }
2448
2449 /*
2450 * Cortex-A8 target creation and initialization
2451 */
2452
2453 static int aarch64_init_target(struct command_context *cmd_ctx,
2454 struct target *target)
2455 {
2456 /* examine_first() does a bunch of this */
2457 return ERROR_OK;
2458 }
2459
2460 static int aarch64_init_arch_info(struct target *target,
2461 struct aarch64_common *aarch64, struct jtag_tap *tap)
2462 {
2463 struct armv8_common *armv8 = &aarch64->armv8_common;
2464 struct adiv5_dap *dap = armv8->arm.dap;
2465
2466 armv8->arm.dap = dap;
2467
2468 /* Setup struct aarch64_common */
2469 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2470 /* tap has no dap initialized */
2471 if (!tap->dap) {
2472 tap->dap = dap_init();
2473
2474 /* Leave (only) generic DAP stuff for debugport_init() */
2475 tap->dap->tap = tap;
2476 }
2477
2478 armv8->arm.dap = tap->dap;
2479
2480 aarch64->fast_reg_read = 0;
2481
2482 /* register arch-specific functions */
2483 armv8->examine_debug_reason = NULL;
2484
2485 armv8->post_debug_entry = aarch64_post_debug_entry;
2486
2487 armv8->pre_restore_context = NULL;
2488
2489 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2490
2491 /* REVISIT v7a setup should be in a v7a-specific routine */
2492 armv8_init_arch_info(target, armv8);
2493 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2494
2495 return ERROR_OK;
2496 }
2497
2498 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2499 {
2500 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2501
2502 return aarch64_init_arch_info(target, aarch64, target->tap);
2503 }
2504
2505 static int aarch64_mmu(struct target *target, int *enabled)
2506 {
2507 if (target->state != TARGET_HALTED) {
2508 LOG_ERROR("%s: target not halted", __func__);
2509 return ERROR_TARGET_INVALID;
2510 }
2511
2512 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2513 return ERROR_OK;
2514 }
2515
2516 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2517 target_addr_t *phys)
2518 {
2519 return armv8_mmu_translate_va(target, virt, phys);
2520 }
2521
2522 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2523 {
2524 struct target *target = get_current_target(CMD_CTX);
2525 struct armv8_common *armv8 = target_to_armv8(target);
2526
2527 return armv8_handle_cache_info_command(CMD_CTX,
2528 &armv8->armv8_mmu.armv8_cache);
2529 }
2530
2531
2532 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2533 {
2534 struct target *target = get_current_target(CMD_CTX);
2535 if (!target_was_examined(target)) {
2536 LOG_ERROR("target not examined yet");
2537 return ERROR_FAIL;
2538 }
2539
2540 return aarch64_init_debug_access(target);
2541 }
2542 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2543 {
2544 struct target *target = get_current_target(CMD_CTX);
2545 /* check target is an smp target */
2546 struct target_list *head;
2547 struct target *curr;
2548 head = target->head;
2549 target->smp = 0;
2550 if (head != (struct target_list *)NULL) {
2551 while (head != (struct target_list *)NULL) {
2552 curr = head->target;
2553 curr->smp = 0;
2554 head = head->next;
2555 }
2556 /* fixes the target display to the debugger */
2557 target->gdb_service->target = target;
2558 }
2559 return ERROR_OK;
2560 }
2561
2562 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2563 {
2564 struct target *target = get_current_target(CMD_CTX);
2565 struct target_list *head;
2566 struct target *curr;
2567 head = target->head;
2568 if (head != (struct target_list *)NULL) {
2569 target->smp = 1;
2570 while (head != (struct target_list *)NULL) {
2571 curr = head->target;
2572 curr->smp = 1;
2573 head = head->next;
2574 }
2575 }
2576 return ERROR_OK;
2577 }
2578
2579 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2580 {
2581 struct target *target = get_current_target(CMD_CTX);
2582 int retval = ERROR_OK;
2583 struct target_list *head;
2584 head = target->head;
2585 if (head != (struct target_list *)NULL) {
2586 if (CMD_ARGC == 1) {
2587 int coreid = 0;
2588 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2589 if (ERROR_OK != retval)
2590 return retval;
2591 target->gdb_service->core[1] = coreid;
2592
2593 }
2594 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2595 , target->gdb_service->core[1]);
2596 }
2597 return ERROR_OK;
2598 }
2599
2600 static const struct command_registration aarch64_exec_command_handlers[] = {
2601 {
2602 .name = "cache_info",
2603 .handler = aarch64_handle_cache_info_command,
2604 .mode = COMMAND_EXEC,
2605 .help = "display information about target caches",
2606 .usage = "",
2607 },
2608 {
2609 .name = "dbginit",
2610 .handler = aarch64_handle_dbginit_command,
2611 .mode = COMMAND_EXEC,
2612 .help = "Initialize core debug",
2613 .usage = "",
2614 },
2615 { .name = "smp_off",
2616 .handler = aarch64_handle_smp_off_command,
2617 .mode = COMMAND_EXEC,
2618 .help = "Stop smp handling",
2619 .usage = "",
2620 },
2621 {
2622 .name = "smp_on",
2623 .handler = aarch64_handle_smp_on_command,
2624 .mode = COMMAND_EXEC,
2625 .help = "Restart smp handling",
2626 .usage = "",
2627 },
2628 {
2629 .name = "smp_gdb",
2630 .handler = aarch64_handle_smp_gdb_command,
2631 .mode = COMMAND_EXEC,
2632 .help = "display/fix current core played to gdb",
2633 .usage = "",
2634 },
2635
2636
2637 COMMAND_REGISTRATION_DONE
2638 };
2639 static const struct command_registration aarch64_command_handlers[] = {
2640 {
2641 .chain = arm_command_handlers,
2642 },
2643 {
2644 .chain = armv8_command_handlers,
2645 },
2646 {
2647 .name = "cortex_a",
2648 .mode = COMMAND_ANY,
2649 .help = "Cortex-A command group",
2650 .usage = "",
2651 .chain = aarch64_exec_command_handlers,
2652 },
2653 COMMAND_REGISTRATION_DONE
2654 };
2655
2656 struct target_type aarch64_target = {
2657 .name = "aarch64",
2658
2659 .poll = aarch64_poll,
2660 .arch_state = armv8_arch_state,
2661
2662 .halt = aarch64_halt,
2663 .resume = aarch64_resume,
2664 .step = aarch64_step,
2665
2666 .assert_reset = aarch64_assert_reset,
2667 .deassert_reset = aarch64_deassert_reset,
2668
2669 /* REVISIT allow exporting VFP3 registers ... */
2670 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2671
2672 .read_memory = aarch64_read_memory,
2673 .write_memory = aarch64_write_memory,
2674
2675 .checksum_memory = arm_checksum_memory,
2676 .blank_check_memory = arm_blank_check_memory,
2677
2678 .run_algorithm = armv4_5_run_algorithm,
2679
2680 .add_breakpoint = aarch64_add_breakpoint,
2681 .add_context_breakpoint = aarch64_add_context_breakpoint,
2682 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2683 .remove_breakpoint = aarch64_remove_breakpoint,
2684 .add_watchpoint = NULL,
2685 .remove_watchpoint = NULL,
2686
2687 .commands = aarch64_command_handlers,
2688 .target_create = aarch64_target_create,
2689 .init_target = aarch64_init_target,
2690 .examine = aarch64_examine,
2691
2692 .read_phys_memory = aarch64_read_phys_memory,
2693 .write_phys_memory = aarch64_write_phys_memory,
2694 .mmu = aarch64_mmu,
2695 .virt2phys = aarch64_virt2phys,
2696 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)