aarch64: Correct target state for hardware step
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "arm_opcodes.h"
30 #include <helper/time_support.h>
31
32 static int aarch64_poll(struct target *target);
33 static int aarch64_debug_entry(struct target *target);
34 static int aarch64_restore_context(struct target *target, bool bpwp);
35 static int aarch64_set_breakpoint(struct target *target,
36 struct breakpoint *breakpoint, uint8_t matchmode);
37 static int aarch64_set_context_breakpoint(struct target *target,
38 struct breakpoint *breakpoint, uint8_t matchmode);
39 static int aarch64_set_hybrid_breakpoint(struct target *target,
40 struct breakpoint *breakpoint);
41 static int aarch64_unset_breakpoint(struct target *target,
42 struct breakpoint *breakpoint);
43 static int aarch64_mmu(struct target *target, int *enabled);
44 static int aarch64_virt2phys(struct target *target,
45 target_addr_t virt, target_addr_t *phys);
46 static int aarch64_read_apb_ab_memory(struct target *target,
47 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
48 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
49 uint32_t opcode, uint32_t data);
50
51 static int aarch64_restore_system_control_reg(struct target *target)
52 {
53 int retval = ERROR_OK;
54
55 struct aarch64_common *aarch64 = target_to_aarch64(target);
56 struct armv8_common *armv8 = target_to_armv8(target);
57
58 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
59 aarch64->system_control_reg_curr = aarch64->system_control_reg;
60 retval = aarch64_instr_write_data_r0(armv8->arm.dpm,
61 0xd5181000,
62 aarch64->system_control_reg);
63 }
64
65 return retval;
66 }
67
68 /* check address before aarch64_apb read write access with mmu on
69 * remove apb predictible data abort */
70 static int aarch64_check_address(struct target *target, uint32_t address)
71 {
72 /* TODO */
73 return ERROR_OK;
74 }
75 /* modify system_control_reg in order to enable or disable mmu for :
76 * - virt2phys address conversion
77 * - read or write memory in phys or virt address */
78 static int aarch64_mmu_modify(struct target *target, int enable)
79 {
80 struct aarch64_common *aarch64 = target_to_aarch64(target);
81 struct armv8_common *armv8 = &aarch64->armv8_common;
82 int retval = ERROR_OK;
83
84 if (enable) {
85 /* if mmu enabled at target stop and mmu not enable */
86 if (!(aarch64->system_control_reg & 0x1U)) {
87 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
88 return ERROR_FAIL;
89 }
90 if (!(aarch64->system_control_reg_curr & 0x1U)) {
91 aarch64->system_control_reg_curr |= 0x1U;
92 retval = aarch64_instr_write_data_r0(armv8->arm.dpm,
93 0xd5181000,
94 aarch64->system_control_reg_curr);
95 }
96 } else {
97 if (aarch64->system_control_reg_curr & 0x4U) {
98 /* data cache is active */
99 aarch64->system_control_reg_curr &= ~0x4U;
100 /* flush data cache armv7 function to be called */
101 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
102 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
103 }
104 if ((aarch64->system_control_reg_curr & 0x1U)) {
105 aarch64->system_control_reg_curr &= ~0x1U;
106 retval = aarch64_instr_write_data_r0(armv8->arm.dpm,
107 0xd5181000,
108 aarch64->system_control_reg_curr);
109 }
110 }
111 return retval;
112 }
113
114 /*
115 * Basic debug access, very low level assumes state is saved
116 */
117 static int aarch64_init_debug_access(struct target *target)
118 {
119 struct armv8_common *armv8 = target_to_armv8(target);
120 int retval;
121 uint32_t dummy;
122
123 LOG_DEBUG(" ");
124
125 /* Unlocking the debug registers for modification
126 * The debugport might be uninitialised so try twice */
127 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
128 armv8->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
129 if (retval != ERROR_OK) {
130 /* try again */
131 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
132 armv8->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
133 if (retval == ERROR_OK)
134 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
135 }
136 if (retval != ERROR_OK)
137 return retval;
138 /* Clear Sticky Power Down status Bit in PRSR to enable access to
139 the registers in the Core Power Domain */
140 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
141 armv8->debug_base + CPUDBG_PRSR, &dummy);
142 if (retval != ERROR_OK)
143 return retval;
144
145 /* Enabling of instruction execution in debug mode is done in debug_entry code */
146
147 /* Resync breakpoint registers */
148
149 /* Since this is likely called from init or reset, update target state information*/
150 return aarch64_poll(target);
151 }
152
153 /* To reduce needless round-trips, pass in a pointer to the current
154 * DSCR value. Initialize it to zero if you just need to know the
155 * value on return from this function; or DSCR_INSTR_COMP if you
156 * happen to know that no instruction is pending.
157 */
158 static int aarch64_exec_opcode(struct target *target,
159 uint32_t opcode, uint32_t *dscr_p)
160 {
161 uint32_t dscr;
162 int retval;
163 struct armv8_common *armv8 = target_to_armv8(target);
164 dscr = dscr_p ? *dscr_p : 0;
165
166 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
167
168 /* Wait for InstrCompl bit to be set */
169 long long then = timeval_ms();
170 while ((dscr & DSCR_INSTR_COMP) == 0) {
171 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
172 armv8->debug_base + CPUDBG_DSCR, &dscr);
173 if (retval != ERROR_OK) {
174 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
175 return retval;
176 }
177 if (timeval_ms() > then + 1000) {
178 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
179 return ERROR_FAIL;
180 }
181 }
182
183 retval = mem_ap_write_u32(armv8->debug_ap,
184 armv8->debug_base + CPUDBG_ITR, opcode);
185 if (retval != ERROR_OK)
186 return retval;
187
188 then = timeval_ms();
189 do {
190 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
191 armv8->debug_base + CPUDBG_DSCR, &dscr);
192 if (retval != ERROR_OK) {
193 LOG_ERROR("Could not read DSCR register");
194 return retval;
195 }
196 if (timeval_ms() > then + 1000) {
197 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
198 return ERROR_FAIL;
199 }
200 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
201
202 if (dscr_p)
203 *dscr_p = dscr;
204
205 return retval;
206 }
207
208 /* Write to memory mapped registers directly with no cache or mmu handling */
209 static int aarch64_dap_write_memap_register_u32(struct target *target,
210 uint32_t address,
211 uint32_t value)
212 {
213 int retval;
214 struct armv8_common *armv8 = target_to_armv8(target);
215
216 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
217
218 return retval;
219 }
220
221 /*
222 * AARCH64 implementation of Debug Programmer's Model
223 *
224 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
225 * so there's no need to poll for it before executing an instruction.
226 *
227 * NOTE that in several of these cases the "stall" mode might be useful.
228 * It'd let us queue a few operations together... prepare/finish might
229 * be the places to enable/disable that mode.
230 */
231
232 static inline struct aarch64_common *dpm_to_a8(struct arm_dpm *dpm)
233 {
234 return container_of(dpm, struct aarch64_common, armv8_common.dpm);
235 }
236
237 static int aarch64_write_dcc(struct aarch64_common *a8, uint32_t data)
238 {
239 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
240 return mem_ap_write_u32(a8->armv8_common.debug_ap,
241 a8->armv8_common.debug_base + CPUDBG_DTRRX, data);
242 }
243
244 static int aarch64_write_dcc_64(struct aarch64_common *a8, uint64_t data)
245 {
246 int ret;
247 LOG_DEBUG("write DCC 0x%08" PRIx32, (unsigned)data);
248 LOG_DEBUG("write DCC 0x%08" PRIx32, (unsigned)(data >> 32));
249 ret = mem_ap_write_u32(a8->armv8_common.debug_ap,
250 a8->armv8_common.debug_base + CPUDBG_DTRRX, data);
251 ret += mem_ap_write_u32(a8->armv8_common.debug_ap,
252 a8->armv8_common.debug_base + CPUDBG_DTRTX, data >> 32);
253 return ret;
254 }
255
256 static int aarch64_read_dcc(struct aarch64_common *a8, uint32_t *data,
257 uint32_t *dscr_p)
258 {
259 uint32_t dscr = DSCR_INSTR_COMP;
260 int retval;
261
262 if (dscr_p)
263 dscr = *dscr_p;
264
265 /* Wait for DTRRXfull */
266 long long then = timeval_ms();
267 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
268 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
269 a8->armv8_common.debug_base + CPUDBG_DSCR,
270 &dscr);
271 if (retval != ERROR_OK)
272 return retval;
273 if (timeval_ms() > then + 1000) {
274 LOG_ERROR("Timeout waiting for read dcc");
275 return ERROR_FAIL;
276 }
277 }
278
279 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
280 a8->armv8_common.debug_base + CPUDBG_DTRTX,
281 data);
282 if (retval != ERROR_OK)
283 return retval;
284 LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
285
286 if (dscr_p)
287 *dscr_p = dscr;
288
289 return retval;
290 }
291 static int aarch64_read_dcc_64(struct aarch64_common *a8, uint64_t *data,
292 uint32_t *dscr_p)
293 {
294 uint32_t dscr = DSCR_INSTR_COMP;
295 uint32_t higher;
296 int retval;
297
298 if (dscr_p)
299 dscr = *dscr_p;
300
301 /* Wait for DTRRXfull */
302 long long then = timeval_ms();
303 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
304 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
305 a8->armv8_common.debug_base + CPUDBG_DSCR,
306 &dscr);
307 if (retval != ERROR_OK)
308 return retval;
309 if (timeval_ms() > then + 1000) {
310 LOG_ERROR("Timeout waiting for read dcc");
311 return ERROR_FAIL;
312 }
313 }
314
315 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
316 a8->armv8_common.debug_base + CPUDBG_DTRTX,
317 (uint32_t *)data);
318 if (retval != ERROR_OK)
319 return retval;
320
321 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
322 a8->armv8_common.debug_base + CPUDBG_DTRRX,
323 &higher);
324 if (retval != ERROR_OK)
325 return retval;
326
327 *data = *(uint32_t *)data | (uint64_t)higher << 32;
328 LOG_DEBUG("read DCC 0x%16.16" PRIx64, *data);
329
330 if (dscr_p)
331 *dscr_p = dscr;
332
333 return retval;
334 }
335
336 static int aarch64_dpm_prepare(struct arm_dpm *dpm)
337 {
338 struct aarch64_common *a8 = dpm_to_a8(dpm);
339 uint32_t dscr;
340 int retval;
341
342 /* set up invariant: INSTR_COMP is set after ever DPM operation */
343 long long then = timeval_ms();
344 for (;; ) {
345 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
346 a8->armv8_common.debug_base + CPUDBG_DSCR,
347 &dscr);
348 if (retval != ERROR_OK)
349 return retval;
350 if ((dscr & DSCR_INSTR_COMP) != 0)
351 break;
352 if (timeval_ms() > then + 1000) {
353 LOG_ERROR("Timeout waiting for dpm prepare");
354 return ERROR_FAIL;
355 }
356 }
357
358 /* this "should never happen" ... */
359 if (dscr & DSCR_DTR_RX_FULL) {
360 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
361 /* Clear DCCRX */
362 retval = aarch64_exec_opcode(
363 a8->armv8_common.arm.target,
364 0xd5130400,
365 &dscr);
366 if (retval != ERROR_OK)
367 return retval;
368 }
369
370 return retval;
371 }
372
373 static int aarch64_dpm_finish(struct arm_dpm *dpm)
374 {
375 /* REVISIT what could be done here? */
376 return ERROR_OK;
377 }
378
379 static int aarch64_instr_write_data_dcc(struct arm_dpm *dpm,
380 uint32_t opcode, uint32_t data)
381 {
382 struct aarch64_common *a8 = dpm_to_a8(dpm);
383 int retval;
384 uint32_t dscr = DSCR_INSTR_COMP;
385
386 retval = aarch64_write_dcc(a8, data);
387 if (retval != ERROR_OK)
388 return retval;
389
390 return aarch64_exec_opcode(
391 a8->armv8_common.arm.target,
392 opcode,
393 &dscr);
394 }
395
396 static int aarch64_instr_write_data_dcc_64(struct arm_dpm *dpm,
397 uint32_t opcode, uint64_t data)
398 {
399 struct aarch64_common *a8 = dpm_to_a8(dpm);
400 int retval;
401 uint32_t dscr = DSCR_INSTR_COMP;
402
403 retval = aarch64_write_dcc_64(a8, data);
404 if (retval != ERROR_OK)
405 return retval;
406
407 return aarch64_exec_opcode(
408 a8->armv8_common.arm.target,
409 opcode,
410 &dscr);
411 }
412
413 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
414 uint32_t opcode, uint32_t data)
415 {
416 struct aarch64_common *a8 = dpm_to_a8(dpm);
417 uint32_t dscr = DSCR_INSTR_COMP;
418 int retval;
419
420 retval = aarch64_write_dcc(a8, data);
421 if (retval != ERROR_OK)
422 return retval;
423
424 retval = aarch64_exec_opcode(
425 a8->armv8_common.arm.target,
426 0xd5330500,
427 &dscr);
428 if (retval != ERROR_OK)
429 return retval;
430
431 /* then the opcode, taking data from R0 */
432 retval = aarch64_exec_opcode(
433 a8->armv8_common.arm.target,
434 opcode,
435 &dscr);
436
437 return retval;
438 }
439
440 static int aarch64_instr_write_data_r0_64(struct arm_dpm *dpm,
441 uint32_t opcode, uint64_t data)
442 {
443 struct aarch64_common *a8 = dpm_to_a8(dpm);
444 uint32_t dscr = DSCR_INSTR_COMP;
445 int retval;
446
447 retval = aarch64_write_dcc_64(a8, data);
448 if (retval != ERROR_OK)
449 return retval;
450
451 retval = aarch64_exec_opcode(
452 a8->armv8_common.arm.target,
453 0xd5330400,
454 &dscr);
455 if (retval != ERROR_OK)
456 return retval;
457
458 /* then the opcode, taking data from R0 */
459 retval = aarch64_exec_opcode(
460 a8->armv8_common.arm.target,
461 opcode,
462 &dscr);
463
464 return retval;
465 }
466
467 static int aarch64_instr_cpsr_sync(struct arm_dpm *dpm)
468 {
469 struct target *target = dpm->arm->target;
470 uint32_t dscr = DSCR_INSTR_COMP;
471
472 /* "Prefetch flush" after modifying execution status in CPSR */
473 return aarch64_exec_opcode(target,
474 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
475 &dscr);
476 }
477
478 static int aarch64_instr_read_data_dcc(struct arm_dpm *dpm,
479 uint32_t opcode, uint32_t *data)
480 {
481 struct aarch64_common *a8 = dpm_to_a8(dpm);
482 int retval;
483 uint32_t dscr = DSCR_INSTR_COMP;
484
485 /* the opcode, writing data to DCC */
486 retval = aarch64_exec_opcode(
487 a8->armv8_common.arm.target,
488 opcode,
489 &dscr);
490 if (retval != ERROR_OK)
491 return retval;
492
493 return aarch64_read_dcc(a8, data, &dscr);
494 }
495
496 static int aarch64_instr_read_data_dcc_64(struct arm_dpm *dpm,
497 uint32_t opcode, uint64_t *data)
498 {
499 struct aarch64_common *a8 = dpm_to_a8(dpm);
500 int retval;
501 uint32_t dscr = DSCR_INSTR_COMP;
502
503 /* the opcode, writing data to DCC */
504 retval = aarch64_exec_opcode(
505 a8->armv8_common.arm.target,
506 opcode,
507 &dscr);
508 if (retval != ERROR_OK)
509 return retval;
510
511 return aarch64_read_dcc_64(a8, data, &dscr);
512 }
513
514 static int aarch64_instr_read_data_r0(struct arm_dpm *dpm,
515 uint32_t opcode, uint32_t *data)
516 {
517 struct aarch64_common *a8 = dpm_to_a8(dpm);
518 uint32_t dscr = DSCR_INSTR_COMP;
519 int retval;
520
521 /* the opcode, writing data to R0 */
522 retval = aarch64_exec_opcode(
523 a8->armv8_common.arm.target,
524 opcode,
525 &dscr);
526 if (retval != ERROR_OK)
527 return retval;
528
529 /* write R0 to DCC */
530 retval = aarch64_exec_opcode(
531 a8->armv8_common.arm.target,
532 0xd5130400, /* msr dbgdtr_el0, x0 */
533 &dscr);
534 if (retval != ERROR_OK)
535 return retval;
536
537 return aarch64_read_dcc(a8, data, &dscr);
538 }
539
540 static int aarch64_instr_read_data_r0_64(struct arm_dpm *dpm,
541 uint32_t opcode, uint64_t *data)
542 {
543 struct aarch64_common *a8 = dpm_to_a8(dpm);
544 uint32_t dscr = DSCR_INSTR_COMP;
545 int retval;
546
547 /* the opcode, writing data to R0 */
548 retval = aarch64_exec_opcode(
549 a8->armv8_common.arm.target,
550 opcode,
551 &dscr);
552 if (retval != ERROR_OK)
553 return retval;
554
555 /* write R0 to DCC */
556 retval = aarch64_exec_opcode(
557 a8->armv8_common.arm.target,
558 0xd5130400, /* msr dbgdtr_el0, x0 */
559 &dscr);
560 if (retval != ERROR_OK)
561 return retval;
562
563 return aarch64_read_dcc_64(a8, data, &dscr);
564 }
565
566 static int aarch64_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
567 uint32_t addr, uint32_t control)
568 {
569 struct aarch64_common *a8 = dpm_to_a8(dpm);
570 uint32_t vr = a8->armv8_common.debug_base;
571 uint32_t cr = a8->armv8_common.debug_base;
572 int retval;
573
574 switch (index_t) {
575 case 0 ... 15: /* breakpoints */
576 vr += CPUDBG_BVR_BASE;
577 cr += CPUDBG_BCR_BASE;
578 break;
579 case 16 ... 31: /* watchpoints */
580 vr += CPUDBG_WVR_BASE;
581 cr += CPUDBG_WCR_BASE;
582 index_t -= 16;
583 break;
584 default:
585 return ERROR_FAIL;
586 }
587 vr += 4 * index_t;
588 cr += 4 * index_t;
589
590 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
591 (unsigned) vr, (unsigned) cr);
592
593 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
594 vr, addr);
595 if (retval != ERROR_OK)
596 return retval;
597 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
598 cr, control);
599 return retval;
600 }
601
602 static int aarch64_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
603 {
604 return ERROR_OK;
605
606 #if 0
607 struct aarch64_common *a8 = dpm_to_a8(dpm);
608 uint32_t cr;
609
610 switch (index_t) {
611 case 0 ... 15:
612 cr = a8->armv8_common.debug_base + CPUDBG_BCR_BASE;
613 break;
614 case 16 ... 31:
615 cr = a8->armv8_common.debug_base + CPUDBG_WCR_BASE;
616 index_t -= 16;
617 break;
618 default:
619 return ERROR_FAIL;
620 }
621 cr += 4 * index_t;
622
623 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
624
625 /* clear control register */
626 return aarch64_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
627 #endif
628 }
629
630 static int aarch64_dpm_setup(struct aarch64_common *a8, uint32_t debug)
631 {
632 struct arm_dpm *dpm = &a8->armv8_common.dpm;
633 int retval;
634
635 dpm->arm = &a8->armv8_common.arm;
636 dpm->didr = debug;
637
638 dpm->prepare = aarch64_dpm_prepare;
639 dpm->finish = aarch64_dpm_finish;
640
641 dpm->instr_write_data_dcc = aarch64_instr_write_data_dcc;
642 dpm->instr_write_data_dcc_64 = aarch64_instr_write_data_dcc_64;
643 dpm->instr_write_data_r0 = aarch64_instr_write_data_r0;
644 dpm->instr_write_data_r0_64 = aarch64_instr_write_data_r0_64;
645 dpm->instr_cpsr_sync = aarch64_instr_cpsr_sync;
646
647 dpm->instr_read_data_dcc = aarch64_instr_read_data_dcc;
648 dpm->instr_read_data_dcc_64 = aarch64_instr_read_data_dcc_64;
649 dpm->instr_read_data_r0 = aarch64_instr_read_data_r0;
650 dpm->instr_read_data_r0_64 = aarch64_instr_read_data_r0_64;
651
652 dpm->arm_reg_current = armv8_reg_current;
653
654 dpm->bpwp_enable = aarch64_bpwp_enable;
655 dpm->bpwp_disable = aarch64_bpwp_disable;
656
657 retval = arm_dpm_setup(dpm);
658 if (retval == ERROR_OK)
659 retval = arm_dpm_initialize(dpm);
660
661 return retval;
662 }
663 static struct target *get_aarch64(struct target *target, int32_t coreid)
664 {
665 struct target_list *head;
666 struct target *curr;
667
668 head = target->head;
669 while (head != (struct target_list *)NULL) {
670 curr = head->target;
671 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
672 return curr;
673 head = head->next;
674 }
675 return target;
676 }
677 static int aarch64_halt(struct target *target);
678
679 static int aarch64_halt_smp(struct target *target)
680 {
681 int retval = 0;
682 struct target_list *head;
683 struct target *curr;
684 head = target->head;
685 while (head != (struct target_list *)NULL) {
686 curr = head->target;
687 if ((curr != target) && (curr->state != TARGET_HALTED))
688 retval += aarch64_halt(curr);
689 head = head->next;
690 }
691 return retval;
692 }
693
694 static int update_halt_gdb(struct target *target)
695 {
696 int retval = 0;
697 if (target->gdb_service && target->gdb_service->core[0] == -1) {
698 target->gdb_service->target = target;
699 target->gdb_service->core[0] = target->coreid;
700 retval += aarch64_halt_smp(target);
701 }
702 return retval;
703 }
704
705 /*
706 * Cortex-A8 Run control
707 */
708
709 static int aarch64_poll(struct target *target)
710 {
711 int retval = ERROR_OK;
712 uint32_t dscr;
713 struct aarch64_common *aarch64 = target_to_aarch64(target);
714 struct armv8_common *armv8 = &aarch64->armv8_common;
715 enum target_state prev_target_state = target->state;
716 /* toggle to another core is done by gdb as follow */
717 /* maint packet J core_id */
718 /* continue */
719 /* the next polling trigger an halt event sent to gdb */
720 if ((target->state == TARGET_HALTED) && (target->smp) &&
721 (target->gdb_service) &&
722 (target->gdb_service->target == NULL)) {
723 target->gdb_service->target =
724 get_aarch64(target, target->gdb_service->core[1]);
725 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
726 return retval;
727 }
728 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
729 armv8->debug_base + CPUDBG_DSCR, &dscr);
730 if (retval != ERROR_OK)
731 return retval;
732 aarch64->cpudbg_dscr = dscr;
733
734 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
735 if (prev_target_state != TARGET_HALTED) {
736 /* We have a halting debug event */
737 LOG_DEBUG("Target halted");
738 target->state = TARGET_HALTED;
739 if ((prev_target_state == TARGET_RUNNING)
740 || (prev_target_state == TARGET_UNKNOWN)
741 || (prev_target_state == TARGET_RESET)) {
742 retval = aarch64_debug_entry(target);
743 if (retval != ERROR_OK)
744 return retval;
745 if (target->smp) {
746 retval = update_halt_gdb(target);
747 if (retval != ERROR_OK)
748 return retval;
749 }
750 target_call_event_callbacks(target,
751 TARGET_EVENT_HALTED);
752 }
753 if (prev_target_state == TARGET_DEBUG_RUNNING) {
754 LOG_DEBUG(" ");
755
756 retval = aarch64_debug_entry(target);
757 if (retval != ERROR_OK)
758 return retval;
759 if (target->smp) {
760 retval = update_halt_gdb(target);
761 if (retval != ERROR_OK)
762 return retval;
763 }
764
765 target_call_event_callbacks(target,
766 TARGET_EVENT_DEBUG_HALTED);
767 }
768 }
769 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
770 target->state = TARGET_RUNNING;
771 else {
772 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
773 target->state = TARGET_UNKNOWN;
774 }
775
776 return retval;
777 }
778
779 static int aarch64_halt(struct target *target)
780 {
781 int retval = ERROR_OK;
782 uint32_t dscr;
783 struct armv8_common *armv8 = target_to_armv8(target);
784
785 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
786 armv8->debug_base + 0x10000 + 0, &dscr);
787 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
788 armv8->debug_base + 0x10000 + 0, 1);
789 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
790 armv8->debug_base + 0x10000 + 0, &dscr);
791
792 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
793 armv8->debug_base + 0x10000 + 0x140, &dscr);
794 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
795 armv8->debug_base + 0x10000 + 0x140, 6);
796 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
797 armv8->debug_base + 0x10000 + 0x140, &dscr);
798
799 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
800 armv8->debug_base + 0x10000 + 0xa0, &dscr);
801 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
802 armv8->debug_base + 0x10000 + 0xa0, 5);
803 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
804 armv8->debug_base + 0x10000 + 0xa0, &dscr);
805
806 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
807 armv8->debug_base + 0x10000 + 0xa4, &dscr);
808 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
809 armv8->debug_base + 0x10000 + 0xa4, 2);
810 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
811 armv8->debug_base + 0x10000 + 0xa4, &dscr);
812
813 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
814 armv8->debug_base + 0x10000 + 0x20, &dscr);
815 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
816 armv8->debug_base + 0x10000 + 0x20, 4);
817 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
818 armv8->debug_base + 0x10000 + 0x20, &dscr);
819
820 /*
821 * enter halting debug mode
822 */
823 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
824 armv8->debug_base + CPUDBG_DSCR, &dscr);
825 if (retval != ERROR_OK)
826 return retval;
827
828 # /* STATUS */
829 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
830 armv8->debug_base + 0x10000 + 0x134, &dscr);
831
832 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
833 armv8->debug_base + 0x10000 + 0x1c, &dscr);
834 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
835 armv8->debug_base + 0x10000 + 0x1c, 1);
836 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
837 armv8->debug_base + 0x10000 + 0x1c, &dscr);
838
839
840 long long then = timeval_ms();
841 for (;; ) {
842 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
843 armv8->debug_base + CPUDBG_DSCR, &dscr);
844 if (retval != ERROR_OK)
845 return retval;
846 if ((dscr & DSCR_CORE_HALTED) != 0)
847 break;
848 if (timeval_ms() > then + 1000) {
849 LOG_ERROR("Timeout waiting for halt");
850 return ERROR_FAIL;
851 }
852 }
853
854 target->debug_reason = DBG_REASON_DBGRQ;
855
856 return ERROR_OK;
857 }
858
859 static int aarch64_internal_restore(struct target *target, int current,
860 uint64_t *address, int handle_breakpoints, int debug_execution)
861 {
862 struct armv8_common *armv8 = target_to_armv8(target);
863 struct arm *arm = &armv8->arm;
864 int retval;
865 uint64_t resume_pc;
866
867 if (!debug_execution)
868 target_free_all_working_areas(target);
869
870 /* current = 1: continue on current pc, otherwise continue at <address> */
871 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
872 if (!current)
873 resume_pc = *address;
874 else
875 *address = resume_pc;
876
877 /* Make sure that the Armv7 gdb thumb fixups does not
878 * kill the return address
879 */
880 switch (arm->core_state) {
881 case ARM_STATE_ARM:
882 case ARM_STATE_AARCH64:
883 resume_pc &= 0xFFFFFFFFFFFFFFFC;
884 break;
885 case ARM_STATE_THUMB:
886 case ARM_STATE_THUMB_EE:
887 /* When the return address is loaded into PC
888 * bit 0 must be 1 to stay in Thumb state
889 */
890 resume_pc |= 0x1;
891 break;
892 case ARM_STATE_JAZELLE:
893 LOG_ERROR("How do I resume into Jazelle state??");
894 return ERROR_FAIL;
895 }
896 LOG_DEBUG("resume pc = 0x%16" PRIx64, resume_pc);
897 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
898 arm->pc->dirty = 1;
899 arm->pc->valid = 1;
900 #if 0
901 /* restore dpm_mode at system halt */
902 dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
903 #endif
904 /* called it now before restoring context because it uses cpu
905 * register r0 for restoring system control register */
906 retval = aarch64_restore_system_control_reg(target);
907 if (retval != ERROR_OK)
908 return retval;
909 retval = aarch64_restore_context(target, handle_breakpoints);
910 if (retval != ERROR_OK)
911 return retval;
912 target->debug_reason = DBG_REASON_NOTHALTED;
913 target->state = TARGET_RUNNING;
914
915 /* registers are now invalid */
916 register_cache_invalidate(arm->core_cache);
917
918 #if 0
919 /* the front-end may request us not to handle breakpoints */
920 if (handle_breakpoints) {
921 /* Single step past breakpoint at current address */
922 breakpoint = breakpoint_find(target, resume_pc);
923 if (breakpoint) {
924 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
925 cortex_m3_unset_breakpoint(target, breakpoint);
926 cortex_m3_single_step_core(target);
927 cortex_m3_set_breakpoint(target, breakpoint);
928 }
929 }
930 #endif
931
932 return retval;
933 }
934
935 static int aarch64_internal_restart(struct target *target)
936 {
937 struct armv8_common *armv8 = target_to_armv8(target);
938 struct arm *arm = &armv8->arm;
939 int retval;
940 uint32_t dscr;
941 /*
942 * * Restart core and wait for it to be started. Clear ITRen and sticky
943 * * exception flags: see ARMv7 ARM, C5.9.
944 *
945 * REVISIT: for single stepping, we probably want to
946 * disable IRQs by default, with optional override...
947 */
948
949 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
950 armv8->debug_base + CPUDBG_DSCR, &dscr);
951 if (retval != ERROR_OK)
952 return retval;
953
954 if ((dscr & DSCR_INSTR_COMP) == 0)
955 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
956
957 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
958 armv8->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
959 if (retval != ERROR_OK)
960 return retval;
961
962 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
963 armv8->debug_base + CPUDBG_DRCR, DRCR_RESTART |
964 DRCR_CLEAR_EXCEPTIONS);
965 if (retval != ERROR_OK)
966 return retval;
967
968 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
969 armv8->debug_base + 0x10000 + 0x10, 1);
970 if (retval != ERROR_OK)
971 return retval;
972
973 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
974 armv8->debug_base + 0x10000 + 0x1c, 2);
975 if (retval != ERROR_OK)
976 return retval;
977
978 long long then = timeval_ms();
979 for (;; ) {
980 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
981 armv8->debug_base + CPUDBG_DSCR, &dscr);
982 if (retval != ERROR_OK)
983 return retval;
984 if ((dscr & DSCR_CORE_RESTARTED) != 0)
985 break;
986 if (timeval_ms() > then + 1000) {
987 LOG_ERROR("Timeout waiting for resume");
988 return ERROR_FAIL;
989 }
990 }
991
992 target->debug_reason = DBG_REASON_NOTHALTED;
993 target->state = TARGET_RUNNING;
994
995 /* registers are now invalid */
996 register_cache_invalidate(arm->core_cache);
997
998 return ERROR_OK;
999 }
1000
1001 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
1002 {
1003 int retval = 0;
1004 struct target_list *head;
1005 struct target *curr;
1006 uint64_t address;
1007 head = target->head;
1008 while (head != (struct target_list *)NULL) {
1009 curr = head->target;
1010 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1011 /* resume current address , not in step mode */
1012 retval += aarch64_internal_restore(curr, 1, &address,
1013 handle_breakpoints, 0);
1014 retval += aarch64_internal_restart(curr);
1015 }
1016 head = head->next;
1017
1018 }
1019 return retval;
1020 }
1021
1022 static int aarch64_resume(struct target *target, int current,
1023 target_addr_t address, int handle_breakpoints, int debug_execution)
1024 {
1025 int retval = 0;
1026 uint64_t addr = address;
1027
1028 /* dummy resume for smp toggle in order to reduce gdb impact */
1029 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1030 /* simulate a start and halt of target */
1031 target->gdb_service->target = NULL;
1032 target->gdb_service->core[0] = target->gdb_service->core[1];
1033 /* fake resume at next poll we play the target core[1], see poll*/
1034 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1035 return 0;
1036 }
1037 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
1038 debug_execution);
1039 if (target->smp) {
1040 target->gdb_service->core[0] = -1;
1041 retval = aarch64_restore_smp(target, handle_breakpoints);
1042 if (retval != ERROR_OK)
1043 return retval;
1044 }
1045 aarch64_internal_restart(target);
1046
1047 if (!debug_execution) {
1048 target->state = TARGET_RUNNING;
1049 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1050 LOG_DEBUG("target resumed at 0x%" PRIu64, addr);
1051 } else {
1052 target->state = TARGET_DEBUG_RUNNING;
1053 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1054 LOG_DEBUG("target debug resumed at 0x%" PRIu64, addr);
1055 }
1056
1057 return ERROR_OK;
1058 }
1059
1060 static int aarch64_debug_entry(struct target *target)
1061 {
1062 uint32_t dscr;
1063 int retval = ERROR_OK;
1064 struct aarch64_common *aarch64 = target_to_aarch64(target);
1065 struct armv8_common *armv8 = target_to_armv8(target);
1066 uint32_t tmp;
1067
1068 LOG_DEBUG("dscr = 0x%08" PRIx32, aarch64->cpudbg_dscr);
1069
1070 /* REVISIT surely we should not re-read DSCR !! */
1071 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1072 armv8->debug_base + CPUDBG_DSCR, &dscr);
1073 if (retval != ERROR_OK)
1074 return retval;
1075
1076 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1077 * imprecise data aborts get discarded by issuing a Data
1078 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1079 */
1080
1081 /* Enable the ITR execution once we are in debug mode */
1082 dscr |= DSCR_ITR_EN;
1083 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1084 armv8->debug_base + CPUDBG_DSCR, dscr);
1085 if (retval != ERROR_OK)
1086 return retval;
1087
1088 /* Examine debug reason */
1089 arm_dpm_report_dscr(&armv8->dpm, aarch64->cpudbg_dscr);
1090 mem_ap_read_atomic_u32(armv8->debug_ap,
1091 armv8->debug_base + CPUDBG_DESR, &tmp);
1092 if ((tmp & 0x7) == 0x4)
1093 target->debug_reason = DBG_REASON_SINGLESTEP;
1094
1095 /* save address of instruction that triggered the watchpoint? */
1096 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1097 uint32_t wfar;
1098
1099 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1100 armv8->debug_base + CPUDBG_WFAR,
1101 &wfar);
1102 if (retval != ERROR_OK)
1103 return retval;
1104 arm_dpm_report_wfar(&armv8->dpm, wfar);
1105 }
1106
1107 retval = arm_dpm_read_current_registers_64(&armv8->dpm);
1108
1109 if (armv8->post_debug_entry) {
1110 retval = armv8->post_debug_entry(target);
1111 if (retval != ERROR_OK)
1112 return retval;
1113 }
1114
1115 return retval;
1116 }
1117
1118 static int aarch64_post_debug_entry(struct target *target)
1119 {
1120 struct aarch64_common *aarch64 = target_to_aarch64(target);
1121 struct armv8_common *armv8 = &aarch64->armv8_common;
1122 struct armv8_mmu_common *armv8_mmu = &armv8->armv8_mmu;
1123 uint32_t sctlr_el1 = 0;
1124 int retval;
1125
1126 mem_ap_write_atomic_u32(armv8->debug_ap,
1127 armv8->debug_base + CPUDBG_DRCR, 1<<2);
1128 retval = aarch64_instr_read_data_r0(armv8->arm.dpm,
1129 0xd5381000, &sctlr_el1);
1130 if (retval != ERROR_OK)
1131 return retval;
1132
1133 LOG_DEBUG("sctlr_el1 = %#8.8x", sctlr_el1);
1134 aarch64->system_control_reg = sctlr_el1;
1135 aarch64->system_control_reg_curr = sctlr_el1;
1136 aarch64->curr_mode = armv8->arm.core_mode;
1137
1138 armv8_mmu->mmu_enabled = sctlr_el1 & 0x1U ? 1 : 0;
1139 armv8_mmu->armv8_cache.d_u_cache_enabled = sctlr_el1 & 0x4U ? 1 : 0;
1140 armv8_mmu->armv8_cache.i_cache_enabled = sctlr_el1 & 0x1000U ? 1 : 0;
1141
1142 #if 0
1143 if (armv8->armv8_mmu.armv8_cache.ctype == -1)
1144 armv8_identify_cache(target);
1145 #endif
1146
1147 return ERROR_OK;
1148 }
1149
1150 static int aarch64_step(struct target *target, int current, target_addr_t address,
1151 int handle_breakpoints)
1152 {
1153 struct armv8_common *armv8 = target_to_armv8(target);
1154 int retval;
1155 uint32_t tmp;
1156
1157 if (target->state != TARGET_HALTED) {
1158 LOG_WARNING("target not halted");
1159 return ERROR_TARGET_NOT_HALTED;
1160 }
1161
1162 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1163 armv8->debug_base + CPUDBG_DECR, &tmp);
1164 if (retval != ERROR_OK)
1165 return retval;
1166
1167 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1168 armv8->debug_base + CPUDBG_DECR, (tmp|0x4));
1169 if (retval != ERROR_OK)
1170 return retval;
1171
1172 target->debug_reason = DBG_REASON_SINGLESTEP;
1173 retval = aarch64_resume(target, 1, address, 0, 0);
1174 if (retval != ERROR_OK)
1175 return retval;
1176
1177 long long then = timeval_ms();
1178 while (target->state != TARGET_HALTED) {
1179 mem_ap_read_atomic_u32(armv8->debug_ap,
1180 armv8->debug_base + CPUDBG_DESR, &tmp);
1181 LOG_DEBUG("DESR = %#x", tmp);
1182 retval = aarch64_poll(target);
1183 if (retval != ERROR_OK)
1184 return retval;
1185 if (timeval_ms() > then + 1000) {
1186 LOG_ERROR("timeout waiting for target halt");
1187 return ERROR_FAIL;
1188 }
1189 }
1190
1191 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1192 armv8->debug_base + CPUDBG_DECR, (tmp&(~0x4)));
1193 if (retval != ERROR_OK)
1194 return retval;
1195
1196 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1197 if (target->state == TARGET_HALTED)
1198 LOG_DEBUG("target stepped");
1199
1200 return ERROR_OK;
1201 }
1202
1203 static int aarch64_restore_context(struct target *target, bool bpwp)
1204 {
1205 struct armv8_common *armv8 = target_to_armv8(target);
1206
1207 LOG_DEBUG(" ");
1208
1209 if (armv8->pre_restore_context)
1210 armv8->pre_restore_context(target);
1211
1212 return arm_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1213
1214 return ERROR_OK;
1215 }
1216
1217 /*
1218 * Cortex-A8 Breakpoint and watchpoint functions
1219 */
1220
1221 /* Setup hardware Breakpoint Register Pair */
1222 static int aarch64_set_breakpoint(struct target *target,
1223 struct breakpoint *breakpoint, uint8_t matchmode)
1224 {
1225 int retval;
1226 int brp_i = 0;
1227 uint32_t control;
1228 uint8_t byte_addr_select = 0x0F;
1229 struct aarch64_common *aarch64 = target_to_aarch64(target);
1230 struct armv8_common *armv8 = &aarch64->armv8_common;
1231 struct aarch64_brp *brp_list = aarch64->brp_list;
1232 uint32_t dscr;
1233
1234 if (breakpoint->set) {
1235 LOG_WARNING("breakpoint already set");
1236 return ERROR_OK;
1237 }
1238
1239 if (breakpoint->type == BKPT_HARD) {
1240 int64_t bpt_value;
1241 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1242 brp_i++;
1243 if (brp_i >= aarch64->brp_num) {
1244 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1245 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1246 }
1247 breakpoint->set = brp_i + 1;
1248 if (breakpoint->length == 2)
1249 byte_addr_select = (3 << (breakpoint->address & 0x02));
1250 control = ((matchmode & 0x7) << 20)
1251 | (1 << 13)
1252 | (byte_addr_select << 5)
1253 | (3 << 1) | 1;
1254 brp_list[brp_i].used = 1;
1255 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1256 brp_list[brp_i].control = control;
1257 bpt_value = brp_list[brp_i].value;
1258
1259 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1260 + CPUDBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1261 (uint32_t)(bpt_value & 0xFFFFFFFF));
1262 if (retval != ERROR_OK)
1263 return retval;
1264 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1265 + CPUDBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1266 (uint32_t)(bpt_value >> 32));
1267 if (retval != ERROR_OK)
1268 return retval;
1269
1270 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1271 + CPUDBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1272 brp_list[brp_i].control);
1273 if (retval != ERROR_OK)
1274 return retval;
1275 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1276 brp_list[brp_i].control,
1277 brp_list[brp_i].value);
1278
1279 } else if (breakpoint->type == BKPT_SOFT) {
1280 uint8_t code[4];
1281 buf_set_u32(code, 0, 32, 0xD4400000);
1282
1283 retval = target_read_memory(target,
1284 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1285 breakpoint->length, 1,
1286 breakpoint->orig_instr);
1287 if (retval != ERROR_OK)
1288 return retval;
1289 retval = target_write_memory(target,
1290 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1291 breakpoint->length, 1, code);
1292 if (retval != ERROR_OK)
1293 return retval;
1294 breakpoint->set = 0x11; /* Any nice value but 0 */
1295 }
1296
1297 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1298 armv8->debug_base + CPUDBG_DSCR, &dscr);
1299 /* Ensure that halting debug mode is enable */
1300 dscr = dscr | DSCR_HALT_DBG_MODE;
1301 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1302 armv8->debug_base + CPUDBG_DSCR, dscr);
1303 if (retval != ERROR_OK) {
1304 LOG_DEBUG("Failed to set DSCR.HDE");
1305 return retval;
1306 }
1307
1308 return ERROR_OK;
1309 }
1310
1311 static int aarch64_set_context_breakpoint(struct target *target,
1312 struct breakpoint *breakpoint, uint8_t matchmode)
1313 {
1314 int retval = ERROR_FAIL;
1315 int brp_i = 0;
1316 uint32_t control;
1317 uint8_t byte_addr_select = 0x0F;
1318 struct aarch64_common *aarch64 = target_to_aarch64(target);
1319 struct armv8_common *armv8 = &aarch64->armv8_common;
1320 struct aarch64_brp *brp_list = aarch64->brp_list;
1321
1322 if (breakpoint->set) {
1323 LOG_WARNING("breakpoint already set");
1324 return retval;
1325 }
1326 /*check available context BRPs*/
1327 while ((brp_list[brp_i].used ||
1328 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1329 brp_i++;
1330
1331 if (brp_i >= aarch64->brp_num) {
1332 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1333 return ERROR_FAIL;
1334 }
1335
1336 breakpoint->set = brp_i + 1;
1337 control = ((matchmode & 0x7) << 20)
1338 | (byte_addr_select << 5)
1339 | (3 << 1) | 1;
1340 brp_list[brp_i].used = 1;
1341 brp_list[brp_i].value = (breakpoint->asid);
1342 brp_list[brp_i].control = control;
1343 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1344 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1345 brp_list[brp_i].value);
1346 if (retval != ERROR_OK)
1347 return retval;
1348 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1349 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1350 brp_list[brp_i].control);
1351 if (retval != ERROR_OK)
1352 return retval;
1353 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1354 brp_list[brp_i].control,
1355 brp_list[brp_i].value);
1356 return ERROR_OK;
1357
1358 }
1359
1360 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1361 {
1362 int retval = ERROR_FAIL;
1363 int brp_1 = 0; /* holds the contextID pair */
1364 int brp_2 = 0; /* holds the IVA pair */
1365 uint32_t control_CTX, control_IVA;
1366 uint8_t CTX_byte_addr_select = 0x0F;
1367 uint8_t IVA_byte_addr_select = 0x0F;
1368 uint8_t CTX_machmode = 0x03;
1369 uint8_t IVA_machmode = 0x01;
1370 struct aarch64_common *aarch64 = target_to_aarch64(target);
1371 struct armv8_common *armv8 = &aarch64->armv8_common;
1372 struct aarch64_brp *brp_list = aarch64->brp_list;
1373
1374 if (breakpoint->set) {
1375 LOG_WARNING("breakpoint already set");
1376 return retval;
1377 }
1378 /*check available context BRPs*/
1379 while ((brp_list[brp_1].used ||
1380 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1381 brp_1++;
1382
1383 printf("brp(CTX) found num: %d\n", brp_1);
1384 if (brp_1 >= aarch64->brp_num) {
1385 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1386 return ERROR_FAIL;
1387 }
1388
1389 while ((brp_list[brp_2].used ||
1390 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1391 brp_2++;
1392
1393 printf("brp(IVA) found num: %d\n", brp_2);
1394 if (brp_2 >= aarch64->brp_num) {
1395 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1396 return ERROR_FAIL;
1397 }
1398
1399 breakpoint->set = brp_1 + 1;
1400 breakpoint->linked_BRP = brp_2;
1401 control_CTX = ((CTX_machmode & 0x7) << 20)
1402 | (brp_2 << 16)
1403 | (0 << 14)
1404 | (CTX_byte_addr_select << 5)
1405 | (3 << 1) | 1;
1406 brp_list[brp_1].used = 1;
1407 brp_list[brp_1].value = (breakpoint->asid);
1408 brp_list[brp_1].control = control_CTX;
1409 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1410 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1411 brp_list[brp_1].value);
1412 if (retval != ERROR_OK)
1413 return retval;
1414 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1415 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1416 brp_list[brp_1].control);
1417 if (retval != ERROR_OK)
1418 return retval;
1419
1420 control_IVA = ((IVA_machmode & 0x7) << 20)
1421 | (brp_1 << 16)
1422 | (IVA_byte_addr_select << 5)
1423 | (3 << 1) | 1;
1424 brp_list[brp_2].used = 1;
1425 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1426 brp_list[brp_2].control = control_IVA;
1427 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1428 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1429 brp_list[brp_2].value);
1430 if (retval != ERROR_OK)
1431 return retval;
1432 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1433 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1434 brp_list[brp_2].control);
1435 if (retval != ERROR_OK)
1436 return retval;
1437
1438 return ERROR_OK;
1439 }
1440
1441 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1442 {
1443 int retval;
1444 struct aarch64_common *aarch64 = target_to_aarch64(target);
1445 struct armv8_common *armv8 = &aarch64->armv8_common;
1446 struct aarch64_brp *brp_list = aarch64->brp_list;
1447
1448 if (!breakpoint->set) {
1449 LOG_WARNING("breakpoint not set");
1450 return ERROR_OK;
1451 }
1452
1453 if (breakpoint->type == BKPT_HARD) {
1454 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1455 int brp_i = breakpoint->set - 1;
1456 int brp_j = breakpoint->linked_BRP;
1457 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1458 LOG_DEBUG("Invalid BRP number in breakpoint");
1459 return ERROR_OK;
1460 }
1461 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1462 brp_list[brp_i].control, brp_list[brp_i].value);
1463 brp_list[brp_i].used = 0;
1464 brp_list[brp_i].value = 0;
1465 brp_list[brp_i].control = 0;
1466 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1467 + CPUDBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1468 brp_list[brp_i].control);
1469 if (retval != ERROR_OK)
1470 return retval;
1471 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1472 LOG_DEBUG("Invalid BRP number in breakpoint");
1473 return ERROR_OK;
1474 }
1475 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1476 brp_list[brp_j].control, brp_list[brp_j].value);
1477 brp_list[brp_j].used = 0;
1478 brp_list[brp_j].value = 0;
1479 brp_list[brp_j].control = 0;
1480 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1481 + CPUDBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1482 brp_list[brp_j].control);
1483 if (retval != ERROR_OK)
1484 return retval;
1485 breakpoint->linked_BRP = 0;
1486 breakpoint->set = 0;
1487 return ERROR_OK;
1488
1489 } else {
1490 int brp_i = breakpoint->set - 1;
1491 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1492 LOG_DEBUG("Invalid BRP number in breakpoint");
1493 return ERROR_OK;
1494 }
1495 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1496 brp_list[brp_i].control, brp_list[brp_i].value);
1497 brp_list[brp_i].used = 0;
1498 brp_list[brp_i].value = 0;
1499 brp_list[brp_i].control = 0;
1500 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1501 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1502 brp_list[brp_i].control);
1503 if (retval != ERROR_OK)
1504 return retval;
1505 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1506 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1507 brp_list[brp_i].value);
1508 if (retval != ERROR_OK)
1509 return retval;
1510 breakpoint->set = 0;
1511 return ERROR_OK;
1512 }
1513 } else {
1514 /* restore original instruction (kept in target endianness) */
1515 if (breakpoint->length == 4) {
1516 retval = target_write_memory(target,
1517 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1518 4, 1, breakpoint->orig_instr);
1519 if (retval != ERROR_OK)
1520 return retval;
1521 } else {
1522 retval = target_write_memory(target,
1523 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1524 2, 1, breakpoint->orig_instr);
1525 if (retval != ERROR_OK)
1526 return retval;
1527 }
1528 }
1529 breakpoint->set = 0;
1530
1531 return ERROR_OK;
1532 }
1533
1534 static int aarch64_add_breakpoint(struct target *target,
1535 struct breakpoint *breakpoint)
1536 {
1537 struct aarch64_common *aarch64 = target_to_aarch64(target);
1538
1539 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1540 LOG_INFO("no hardware breakpoint available");
1541 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1542 }
1543
1544 if (breakpoint->type == BKPT_HARD)
1545 aarch64->brp_num_available--;
1546
1547 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1548 }
1549
1550 static int aarch64_add_context_breakpoint(struct target *target,
1551 struct breakpoint *breakpoint)
1552 {
1553 struct aarch64_common *aarch64 = target_to_aarch64(target);
1554
1555 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1556 LOG_INFO("no hardware breakpoint available");
1557 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1558 }
1559
1560 if (breakpoint->type == BKPT_HARD)
1561 aarch64->brp_num_available--;
1562
1563 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1564 }
1565
1566 static int aarch64_add_hybrid_breakpoint(struct target *target,
1567 struct breakpoint *breakpoint)
1568 {
1569 struct aarch64_common *aarch64 = target_to_aarch64(target);
1570
1571 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1572 LOG_INFO("no hardware breakpoint available");
1573 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1574 }
1575
1576 if (breakpoint->type == BKPT_HARD)
1577 aarch64->brp_num_available--;
1578
1579 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1580 }
1581
1582
1583 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1584 {
1585 struct aarch64_common *aarch64 = target_to_aarch64(target);
1586
1587 #if 0
1588 /* It is perfectly possible to remove breakpoints while the target is running */
1589 if (target->state != TARGET_HALTED) {
1590 LOG_WARNING("target not halted");
1591 return ERROR_TARGET_NOT_HALTED;
1592 }
1593 #endif
1594
1595 if (breakpoint->set) {
1596 aarch64_unset_breakpoint(target, breakpoint);
1597 if (breakpoint->type == BKPT_HARD)
1598 aarch64->brp_num_available++;
1599 }
1600
1601 return ERROR_OK;
1602 }
1603
1604 /*
1605 * Cortex-A8 Reset functions
1606 */
1607
1608 static int aarch64_assert_reset(struct target *target)
1609 {
1610 struct armv8_common *armv8 = target_to_armv8(target);
1611
1612 LOG_DEBUG(" ");
1613
1614 /* FIXME when halt is requested, make it work somehow... */
1615
1616 /* Issue some kind of warm reset. */
1617 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1618 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1619 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1620 /* REVISIT handle "pulls" cases, if there's
1621 * hardware that needs them to work.
1622 */
1623 jtag_add_reset(0, 1);
1624 } else {
1625 LOG_ERROR("%s: how to reset?", target_name(target));
1626 return ERROR_FAIL;
1627 }
1628
1629 /* registers are now invalid */
1630 register_cache_invalidate(armv8->arm.core_cache);
1631
1632 target->state = TARGET_RESET;
1633
1634 return ERROR_OK;
1635 }
1636
1637 static int aarch64_deassert_reset(struct target *target)
1638 {
1639 int retval;
1640
1641 LOG_DEBUG(" ");
1642
1643 /* be certain SRST is off */
1644 jtag_add_reset(0, 0);
1645
1646 retval = aarch64_poll(target);
1647 if (retval != ERROR_OK)
1648 return retval;
1649
1650 if (target->reset_halt) {
1651 if (target->state != TARGET_HALTED) {
1652 LOG_WARNING("%s: ran after reset and before halt ...",
1653 target_name(target));
1654 retval = target_halt(target);
1655 if (retval != ERROR_OK)
1656 return retval;
1657 }
1658 }
1659
1660 return ERROR_OK;
1661 }
1662
1663 static int aarch64_write_apb_ab_memory(struct target *target,
1664 uint64_t address, uint32_t size,
1665 uint32_t count, const uint8_t *buffer)
1666 {
1667 /* write memory through APB-AP */
1668 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1669 struct armv8_common *armv8 = target_to_armv8(target);
1670 struct arm *arm = &armv8->arm;
1671 int total_bytes = count * size;
1672 int total_u32;
1673 int start_byte = address & 0x3;
1674 int end_byte = (address + total_bytes) & 0x3;
1675 struct reg *reg;
1676 uint32_t dscr;
1677 uint8_t *tmp_buff = NULL;
1678 uint32_t i = 0;
1679
1680 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %" PRIu32 " count%" PRIu32,
1681 address, size, count);
1682 if (target->state != TARGET_HALTED) {
1683 LOG_WARNING("target not halted");
1684 return ERROR_TARGET_NOT_HALTED;
1685 }
1686
1687 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1688
1689 /* Mark register R0 as dirty, as it will be used
1690 * for transferring the data.
1691 * It will be restored automatically when exiting
1692 * debug mode
1693 */
1694 reg = armv8_reg_current(arm, 1);
1695 reg->dirty = true;
1696
1697 reg = armv8_reg_current(arm, 0);
1698 reg->dirty = true;
1699
1700 /* clear any abort */
1701 retval = mem_ap_write_atomic_u32(armv8->debug_ap, armv8->debug_base + CPUDBG_DRCR, 1<<2);
1702 if (retval != ERROR_OK)
1703 return retval;
1704
1705 /* This algorithm comes from either :
1706 * Cortex-A8 TRM Example 12-25
1707 * Cortex-R4 TRM Example 11-26
1708 * (slight differences)
1709 */
1710
1711 /* The algorithm only copies 32 bit words, so the buffer
1712 * should be expanded to include the words at either end.
1713 * The first and last words will be read first to avoid
1714 * corruption if needed.
1715 */
1716 tmp_buff = malloc(total_u32 * 4);
1717
1718 if ((start_byte != 0) && (total_u32 > 1)) {
1719 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1720 * the other bytes in the word.
1721 */
1722 retval = aarch64_read_apb_ab_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1723 if (retval != ERROR_OK)
1724 goto error_free_buff_w;
1725 }
1726
1727 /* If end of write is not aligned, or the write is less than 4 bytes */
1728 if ((end_byte != 0) ||
1729 ((total_u32 == 1) && (total_bytes != 4))) {
1730
1731 /* Read the last word to avoid corruption during 32 bit write */
1732 int mem_offset = (total_u32-1) * 4;
1733 retval = aarch64_read_apb_ab_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1734 if (retval != ERROR_OK)
1735 goto error_free_buff_w;
1736 }
1737
1738 /* Copy the write buffer over the top of the temporary buffer */
1739 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1740
1741 /* We now have a 32 bit aligned buffer that can be written */
1742
1743 /* Read DSCR */
1744 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1745 armv8->debug_base + CPUDBG_DSCR, &dscr);
1746 if (retval != ERROR_OK)
1747 goto error_free_buff_w;
1748
1749 /* Set DTR mode to Normal*/
1750 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
1751 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1752 armv8->debug_base + CPUDBG_DSCR, dscr);
1753 if (retval != ERROR_OK)
1754 goto error_free_buff_w;
1755
1756 if (size > 4) {
1757 LOG_WARNING("reading size >4 bytes not yet supported");
1758 goto error_unset_dtr_w;
1759 }
1760
1761 retval = aarch64_instr_write_data_dcc_64(arm->dpm, 0xd5330401, address+4);
1762 if (retval != ERROR_OK)
1763 goto error_unset_dtr_w;
1764
1765 dscr = DSCR_INSTR_COMP;
1766 while (i < count * size) {
1767 uint32_t val;
1768
1769 memcpy(&val, &buffer[i], size);
1770 retval = aarch64_instr_write_data_dcc(arm->dpm, 0xd5330500, val);
1771 if (retval != ERROR_OK)
1772 goto error_unset_dtr_w;
1773
1774 retval = aarch64_exec_opcode(target, 0xb81fc020, &dscr);
1775 if (retval != ERROR_OK)
1776 goto error_unset_dtr_w;
1777
1778 retval = aarch64_exec_opcode(target, 0x91001021, &dscr);
1779 if (retval != ERROR_OK)
1780 goto error_unset_dtr_w;
1781
1782 i += 4;
1783 }
1784
1785 /* Check for sticky abort flags in the DSCR */
1786 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1787 armv8->debug_base + CPUDBG_DSCR, &dscr);
1788 if (retval != ERROR_OK)
1789 goto error_free_buff_w;
1790 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
1791 /* Abort occurred - clear it and exit */
1792 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1793 mem_ap_write_atomic_u32(armv8->debug_ap,
1794 armv8->debug_base + CPUDBG_DRCR, 1<<2);
1795 goto error_free_buff_w;
1796 }
1797
1798 /* Done */
1799 free(tmp_buff);
1800 return ERROR_OK;
1801
1802 error_unset_dtr_w:
1803 /* Unset DTR mode */
1804 mem_ap_read_atomic_u32(armv8->debug_ap,
1805 armv8->debug_base + CPUDBG_DSCR, &dscr);
1806 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
1807 mem_ap_write_atomic_u32(armv8->debug_ap,
1808 armv8->debug_base + CPUDBG_DSCR, dscr);
1809 error_free_buff_w:
1810 LOG_ERROR("error");
1811 free(tmp_buff);
1812 return ERROR_FAIL;
1813 }
1814
1815 static int aarch64_read_apb_ab_memory(struct target *target,
1816 target_addr_t address, uint32_t size,
1817 uint32_t count, uint8_t *buffer)
1818 {
1819 /* read memory through APB-AP */
1820
1821 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1822 struct armv8_common *armv8 = target_to_armv8(target);
1823 struct arm *arm = &armv8->arm;
1824 struct reg *reg;
1825 uint32_t dscr, val;
1826 uint8_t *tmp_buff = NULL;
1827 uint32_t i = 0;
1828
1829 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count%" PRIu32,
1830 address, size, count);
1831 if (target->state != TARGET_HALTED) {
1832 LOG_WARNING("target not halted");
1833 return ERROR_TARGET_NOT_HALTED;
1834 }
1835
1836 /* Mark register R0 as dirty, as it will be used
1837 * for transferring the data.
1838 * It will be restored automatically when exiting
1839 * debug mode
1840 */
1841 reg = armv8_reg_current(arm, 0);
1842 reg->dirty = true;
1843
1844 /* clear any abort */
1845 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1846 armv8->debug_base + CPUDBG_DRCR, 1<<2);
1847 if (retval != ERROR_OK)
1848 goto error_free_buff_r;
1849
1850 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1851 armv8->debug_base + CPUDBG_DSCR, &dscr);
1852 if (retval != ERROR_OK)
1853 goto error_unset_dtr_r;
1854
1855 if (size > 4) {
1856 LOG_WARNING("reading size >4 bytes not yet supported");
1857 goto error_unset_dtr_r;
1858 }
1859
1860 while (i < count * size) {
1861
1862 retval = aarch64_instr_write_data_dcc_64(arm->dpm, 0xd5330400, address+4);
1863 if (retval != ERROR_OK)
1864 goto error_unset_dtr_r;
1865 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1866 armv8->debug_base + CPUDBG_DSCR, &dscr);
1867
1868 dscr = DSCR_INSTR_COMP;
1869 retval = aarch64_exec_opcode(target, 0xb85fc000, &dscr);
1870 if (retval != ERROR_OK)
1871 goto error_unset_dtr_r;
1872 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1873 armv8->debug_base + CPUDBG_DSCR, &dscr);
1874
1875 retval = aarch64_instr_read_data_dcc(arm->dpm, 0xd5130400, &val);
1876 if (retval != ERROR_OK)
1877 goto error_unset_dtr_r;
1878 memcpy(&buffer[i], &val, size);
1879 i += 4;
1880 address += 4;
1881 }
1882
1883 /* Clear any sticky error */
1884 mem_ap_write_atomic_u32(armv8->debug_ap,
1885 armv8->debug_base + CPUDBG_DRCR, 1<<2);
1886
1887 /* Done */
1888 return ERROR_OK;
1889
1890 error_unset_dtr_r:
1891 LOG_WARNING("DSCR = 0x%" PRIx32, dscr);
1892 /* Todo: Unset DTR mode */
1893
1894 error_free_buff_r:
1895 LOG_ERROR("error");
1896 free(tmp_buff);
1897
1898 /* Clear any sticky error */
1899 mem_ap_write_atomic_u32(armv8->debug_ap,
1900 armv8->debug_base + CPUDBG_DRCR, 1<<2);
1901
1902 return ERROR_FAIL;
1903 }
1904
1905 static int aarch64_read_phys_memory(struct target *target,
1906 target_addr_t address, uint32_t size,
1907 uint32_t count, uint8_t *buffer)
1908 {
1909 struct armv8_common *armv8 = target_to_armv8(target);
1910 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1911 struct adiv5_dap *swjdp = armv8->arm.dap;
1912 uint8_t apsel = swjdp->apsel;
1913 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
1914 address, size, count);
1915
1916 if (count && buffer) {
1917
1918 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
1919
1920 /* read memory through AHB-AP */
1921 retval = mem_ap_read_buf(armv8->memory_ap, buffer, size, count, address);
1922 } else {
1923 /* read memory through APB-AP */
1924 retval = aarch64_mmu_modify(target, 0);
1925 if (retval != ERROR_OK)
1926 return retval;
1927 retval = aarch64_read_apb_ab_memory(target, address, size, count, buffer);
1928 }
1929 }
1930 return retval;
1931 }
1932
1933 static int aarch64_read_memory(struct target *target, target_addr_t address,
1934 uint32_t size, uint32_t count, uint8_t *buffer)
1935 {
1936 int mmu_enabled = 0;
1937 target_addr_t virt, phys;
1938 int retval;
1939 struct armv8_common *armv8 = target_to_armv8(target);
1940 struct adiv5_dap *swjdp = armv8->arm.dap;
1941 uint8_t apsel = swjdp->apsel;
1942
1943 /* aarch64 handles unaligned memory access */
1944 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
1945 size, count);
1946
1947 /* determine if MMU was enabled on target stop */
1948 if (!armv8->is_armv7r) {
1949 retval = aarch64_mmu(target, &mmu_enabled);
1950 if (retval != ERROR_OK)
1951 return retval;
1952 }
1953
1954 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
1955 if (mmu_enabled) {
1956 virt = address;
1957 retval = aarch64_virt2phys(target, virt, &phys);
1958 if (retval != ERROR_OK)
1959 return retval;
1960
1961 LOG_DEBUG("Reading at virtual address. Translating v:0x%" TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR,
1962 virt, phys);
1963 address = phys;
1964 }
1965 retval = aarch64_read_phys_memory(target, address, size, count,
1966 buffer);
1967 } else {
1968 if (mmu_enabled) {
1969 retval = aarch64_check_address(target, address);
1970 if (retval != ERROR_OK)
1971 return retval;
1972 /* enable MMU as we could have disabled it for phys
1973 access */
1974 retval = aarch64_mmu_modify(target, 1);
1975 if (retval != ERROR_OK)
1976 return retval;
1977 }
1978 retval = aarch64_read_apb_ab_memory(target, address, size,
1979 count, buffer);
1980 }
1981 return retval;
1982 }
1983
1984 static int aarch64_write_phys_memory(struct target *target,
1985 target_addr_t address, uint32_t size,
1986 uint32_t count, const uint8_t *buffer)
1987 {
1988 struct armv8_common *armv8 = target_to_armv8(target);
1989 struct adiv5_dap *swjdp = armv8->arm.dap;
1990 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1991 uint8_t apsel = swjdp->apsel;
1992
1993 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
1994 size, count);
1995
1996 if (count && buffer) {
1997
1998 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
1999
2000 /* write memory through AHB-AP */
2001 retval = mem_ap_write_buf(armv8->memory_ap, buffer, size, count, address);
2002 } else {
2003
2004 /* write memory through APB-AP */
2005 if (!armv8->is_armv7r) {
2006 retval = aarch64_mmu_modify(target, 0);
2007 if (retval != ERROR_OK)
2008 return retval;
2009 }
2010 return aarch64_write_apb_ab_memory(target, address, size, count, buffer);
2011 }
2012 }
2013
2014
2015 /* REVISIT this op is generic ARMv7-A/R stuff */
2016 if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2017 struct arm_dpm *dpm = armv8->arm.dpm;
2018
2019 retval = dpm->prepare(dpm);
2020 if (retval != ERROR_OK)
2021 return retval;
2022
2023 /* The Cache handling will NOT work with MMU active, the
2024 * wrong addresses will be invalidated!
2025 *
2026 * For both ICache and DCache, walk all cache lines in the
2027 * address range. Cortex-A8 has fixed 64 byte line length.
2028 *
2029 * REVISIT per ARMv7, these may trigger watchpoints ...
2030 */
2031
2032 /* invalidate I-Cache */
2033 if (armv8->armv8_mmu.armv8_cache.i_cache_enabled) {
2034 /* ICIMVAU - Invalidate Cache single entry
2035 * with MVA to PoU
2036 * MCR p15, 0, r0, c7, c5, 1
2037 */
2038 for (uint32_t cacheline = address;
2039 cacheline < address + size * count;
2040 cacheline += 64) {
2041 retval = dpm->instr_write_data_r0(dpm,
2042 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2043 cacheline);
2044 if (retval != ERROR_OK)
2045 return retval;
2046 }
2047 }
2048
2049 /* invalidate D-Cache */
2050 if (armv8->armv8_mmu.armv8_cache.d_u_cache_enabled) {
2051 /* DCIMVAC - Invalidate data Cache line
2052 * with MVA to PoC
2053 * MCR p15, 0, r0, c7, c6, 1
2054 */
2055 for (uint32_t cacheline = address;
2056 cacheline < address + size * count;
2057 cacheline += 64) {
2058 retval = dpm->instr_write_data_r0(dpm,
2059 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2060 cacheline);
2061 if (retval != ERROR_OK)
2062 return retval;
2063 }
2064 }
2065
2066 /* (void) */ dpm->finish(dpm);
2067 }
2068
2069 return retval;
2070 }
2071
2072 static int aarch64_write_memory(struct target *target, target_addr_t address,
2073 uint32_t size, uint32_t count, const uint8_t *buffer)
2074 {
2075 int mmu_enabled = 0;
2076 target_addr_t virt, phys;
2077 int retval;
2078 struct armv8_common *armv8 = target_to_armv8(target);
2079 struct adiv5_dap *swjdp = armv8->arm.dap;
2080 uint8_t apsel = swjdp->apsel;
2081
2082 /* aarch64 handles unaligned memory access */
2083 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
2084 "; count %" PRId32, address, size, count);
2085
2086 /* determine if MMU was enabled on target stop */
2087 if (!armv8->is_armv7r) {
2088 retval = aarch64_mmu(target, &mmu_enabled);
2089 if (retval != ERROR_OK)
2090 return retval;
2091 }
2092
2093 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2094 LOG_DEBUG("Writing memory to address 0x%" TARGET_PRIxADDR "; size %"
2095 PRId32 "; count %" PRId32, address, size, count);
2096 if (mmu_enabled) {
2097 virt = address;
2098 retval = aarch64_virt2phys(target, virt, &phys);
2099 if (retval != ERROR_OK)
2100 return retval;
2101
2102 LOG_DEBUG("Writing to virtual address. Translating v:0x%"
2103 TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR, virt, phys);
2104 address = phys;
2105 }
2106 retval = aarch64_write_phys_memory(target, address, size,
2107 count, buffer);
2108 } else {
2109 if (mmu_enabled) {
2110 retval = aarch64_check_address(target, address);
2111 if (retval != ERROR_OK)
2112 return retval;
2113 /* enable MMU as we could have disabled it for phys access */
2114 retval = aarch64_mmu_modify(target, 1);
2115 if (retval != ERROR_OK)
2116 return retval;
2117 }
2118 retval = aarch64_write_apb_ab_memory(target, address, size, count, buffer);
2119 }
2120 return retval;
2121 }
2122
2123 static int aarch64_handle_target_request(void *priv)
2124 {
2125 struct target *target = priv;
2126 struct armv8_common *armv8 = target_to_armv8(target);
2127 int retval;
2128
2129 if (!target_was_examined(target))
2130 return ERROR_OK;
2131 if (!target->dbg_msg_enabled)
2132 return ERROR_OK;
2133
2134 if (target->state == TARGET_RUNNING) {
2135 uint32_t request;
2136 uint32_t dscr;
2137 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2138 armv8->debug_base + CPUDBG_DSCR, &dscr);
2139
2140 /* check if we have data */
2141 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2142 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2143 armv8->debug_base + CPUDBG_DTRTX, &request);
2144 if (retval == ERROR_OK) {
2145 target_request(target, request);
2146 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2147 armv8->debug_base + CPUDBG_DSCR, &dscr);
2148 }
2149 }
2150 }
2151
2152 return ERROR_OK;
2153 }
2154
2155 static int aarch64_examine_first(struct target *target)
2156 {
2157 struct aarch64_common *aarch64 = target_to_aarch64(target);
2158 struct armv8_common *armv8 = &aarch64->armv8_common;
2159 struct adiv5_dap *swjdp = armv8->arm.dap;
2160 int retval = ERROR_OK;
2161 uint32_t pfr, debug, ctypr, ttypr, cpuid;
2162 int i;
2163
2164 /* We do one extra read to ensure DAP is configured,
2165 * we call ahbap_debugport_init(swjdp) instead
2166 */
2167 retval = dap_dp_init(swjdp);
2168 if (retval != ERROR_OK)
2169 return retval;
2170
2171 /* Search for the APB-AB - it is needed for access to debug registers */
2172 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2173 if (retval != ERROR_OK) {
2174 LOG_ERROR("Could not find APB-AP for debug access");
2175 return retval;
2176 }
2177
2178 retval = mem_ap_init(armv8->debug_ap);
2179 if (retval != ERROR_OK) {
2180 LOG_ERROR("Could not initialize the APB-AP");
2181 return retval;
2182 }
2183
2184 armv8->debug_ap->memaccess_tck = 80;
2185
2186 /* Search for the AHB-AB */
2187 armv8->memory_ap_available = false;
2188 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv8->memory_ap);
2189 if (retval == ERROR_OK) {
2190 retval = mem_ap_init(armv8->memory_ap);
2191 if (retval == ERROR_OK)
2192 armv8->memory_ap_available = true;
2193 }
2194 if (retval != ERROR_OK) {
2195 /* AHB-AP not found or unavailable - use the CPU */
2196 LOG_DEBUG("No AHB-AP available for memory access");
2197 }
2198
2199
2200 if (!target->dbgbase_set) {
2201 uint32_t dbgbase;
2202 /* Get ROM Table base */
2203 uint32_t apid;
2204 int32_t coreidx = target->coreid;
2205 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2206 if (retval != ERROR_OK)
2207 return retval;
2208 /* Lookup 0x15 -- Processor DAP */
2209 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2210 &armv8->debug_base, &coreidx);
2211 if (retval != ERROR_OK)
2212 return retval;
2213 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2214 coreidx, armv8->debug_base);
2215 } else
2216 armv8->debug_base = target->dbgbase;
2217
2218 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2219 armv8->debug_base + 0x300, 0);
2220 if (retval != ERROR_OK) {
2221 LOG_DEBUG("Examine %s failed", "oslock");
2222 return retval;
2223 }
2224
2225 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2226 armv8->debug_base + 0x88, &cpuid);
2227 LOG_DEBUG("0x88 = %x", cpuid);
2228
2229 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2230 armv8->debug_base + 0x314, &cpuid);
2231 LOG_DEBUG("0x314 = %x", cpuid);
2232
2233 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2234 armv8->debug_base + 0x310, &cpuid);
2235 LOG_DEBUG("0x310 = %x", cpuid);
2236 if (retval != ERROR_OK)
2237 return retval;
2238
2239 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2240 armv8->debug_base + CPUDBG_CPUID, &cpuid);
2241 if (retval != ERROR_OK) {
2242 LOG_DEBUG("Examine %s failed", "CPUID");
2243 return retval;
2244 }
2245
2246 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2247 armv8->debug_base + CPUDBG_CTYPR, &ctypr);
2248 if (retval != ERROR_OK) {
2249 LOG_DEBUG("Examine %s failed", "CTYPR");
2250 return retval;
2251 }
2252
2253 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2254 armv8->debug_base + CPUDBG_TTYPR, &ttypr);
2255 if (retval != ERROR_OK) {
2256 LOG_DEBUG("Examine %s failed", "TTYPR");
2257 return retval;
2258 }
2259
2260 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2261 armv8->debug_base + ID_AA64PFR0_EL1, &pfr);
2262 if (retval != ERROR_OK) {
2263 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2264 return retval;
2265 }
2266 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2267 armv8->debug_base + ID_AA64DFR0_EL1, &debug);
2268 if (retval != ERROR_OK) {
2269 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2270 return retval;
2271 }
2272
2273 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2274 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2275 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2276 LOG_DEBUG("ID_AA64PFR0_EL1 = 0x%08" PRIx32, pfr);
2277 LOG_DEBUG("ID_AA64DFR0_EL1 = 0x%08" PRIx32, debug);
2278
2279 armv8->arm.core_type = ARM_MODE_MON;
2280 armv8->arm.core_state = ARM_STATE_AARCH64;
2281 retval = aarch64_dpm_setup(aarch64, debug);
2282 if (retval != ERROR_OK)
2283 return retval;
2284
2285 /* Setup Breakpoint Register Pairs */
2286 aarch64->brp_num = ((debug >> 12) & 0x0F) + 1;
2287 aarch64->brp_num_context = ((debug >> 28) & 0x0F) + 1;
2288
2289 /* hack - no context bpt support yet */
2290 aarch64->brp_num_context = 0;
2291
2292 aarch64->brp_num_available = aarch64->brp_num;
2293 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2294 for (i = 0; i < aarch64->brp_num; i++) {
2295 aarch64->brp_list[i].used = 0;
2296 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2297 aarch64->brp_list[i].type = BRP_NORMAL;
2298 else
2299 aarch64->brp_list[i].type = BRP_CONTEXT;
2300 aarch64->brp_list[i].value = 0;
2301 aarch64->brp_list[i].control = 0;
2302 aarch64->brp_list[i].BRPn = i;
2303 }
2304
2305 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2306
2307 target_set_examined(target);
2308 return ERROR_OK;
2309 }
2310
2311 static int aarch64_examine(struct target *target)
2312 {
2313 int retval = ERROR_OK;
2314
2315 /* don't re-probe hardware after each reset */
2316 if (!target_was_examined(target))
2317 retval = aarch64_examine_first(target);
2318
2319 /* Configure core debug access */
2320 if (retval == ERROR_OK)
2321 retval = aarch64_init_debug_access(target);
2322
2323 return retval;
2324 }
2325
2326 /*
2327 * Cortex-A8 target creation and initialization
2328 */
2329
2330 static int aarch64_init_target(struct command_context *cmd_ctx,
2331 struct target *target)
2332 {
2333 /* examine_first() does a bunch of this */
2334 return ERROR_OK;
2335 }
2336
2337 static int aarch64_init_arch_info(struct target *target,
2338 struct aarch64_common *aarch64, struct jtag_tap *tap)
2339 {
2340 struct armv8_common *armv8 = &aarch64->armv8_common;
2341 struct adiv5_dap *dap = armv8->arm.dap;
2342
2343 armv8->arm.dap = dap;
2344
2345 /* Setup struct aarch64_common */
2346 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2347 /* tap has no dap initialized */
2348 if (!tap->dap) {
2349 tap->dap = dap_init();
2350
2351 /* Leave (only) generic DAP stuff for debugport_init() */
2352 tap->dap->tap = tap;
2353 }
2354
2355 armv8->arm.dap = tap->dap;
2356
2357 aarch64->fast_reg_read = 0;
2358
2359 /* register arch-specific functions */
2360 armv8->examine_debug_reason = NULL;
2361
2362 armv8->post_debug_entry = aarch64_post_debug_entry;
2363
2364 armv8->pre_restore_context = NULL;
2365
2366 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2367
2368 /* REVISIT v7a setup should be in a v7a-specific routine */
2369 armv8_init_arch_info(target, armv8);
2370 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2371
2372 return ERROR_OK;
2373 }
2374
2375 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2376 {
2377 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2378
2379 aarch64->armv8_common.is_armv7r = false;
2380
2381 return aarch64_init_arch_info(target, aarch64, target->tap);
2382 }
2383
2384 static int aarch64_mmu(struct target *target, int *enabled)
2385 {
2386 if (target->state != TARGET_HALTED) {
2387 LOG_ERROR("%s: target not halted", __func__);
2388 return ERROR_TARGET_INVALID;
2389 }
2390
2391 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2392 return ERROR_OK;
2393 }
2394
2395 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2396 target_addr_t *phys)
2397 {
2398 int retval = ERROR_FAIL;
2399 struct armv8_common *armv8 = target_to_armv8(target);
2400 struct adiv5_dap *swjdp = armv8->arm.dap;
2401 uint8_t apsel = swjdp->apsel;
2402 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2403 uint32_t ret;
2404 retval = armv8_mmu_translate_va(target,
2405 virt, &ret);
2406 if (retval != ERROR_OK)
2407 goto done;
2408 *phys = ret;
2409 } else {/* use this method if armv8->memory_ap not selected
2410 * mmu must be enable in order to get a correct translation */
2411 retval = aarch64_mmu_modify(target, 1);
2412 if (retval != ERROR_OK)
2413 goto done;
2414 retval = armv8_mmu_translate_va_pa(target, virt, phys, 1);
2415 }
2416 done:
2417 return retval;
2418 }
2419
2420 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2421 {
2422 struct target *target = get_current_target(CMD_CTX);
2423 struct armv8_common *armv8 = target_to_armv8(target);
2424
2425 return armv8_handle_cache_info_command(CMD_CTX,
2426 &armv8->armv8_mmu.armv8_cache);
2427 }
2428
2429
2430 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2431 {
2432 struct target *target = get_current_target(CMD_CTX);
2433 if (!target_was_examined(target)) {
2434 LOG_ERROR("target not examined yet");
2435 return ERROR_FAIL;
2436 }
2437
2438 return aarch64_init_debug_access(target);
2439 }
2440 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2441 {
2442 struct target *target = get_current_target(CMD_CTX);
2443 /* check target is an smp target */
2444 struct target_list *head;
2445 struct target *curr;
2446 head = target->head;
2447 target->smp = 0;
2448 if (head != (struct target_list *)NULL) {
2449 while (head != (struct target_list *)NULL) {
2450 curr = head->target;
2451 curr->smp = 0;
2452 head = head->next;
2453 }
2454 /* fixes the target display to the debugger */
2455 target->gdb_service->target = target;
2456 }
2457 return ERROR_OK;
2458 }
2459
2460 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2461 {
2462 struct target *target = get_current_target(CMD_CTX);
2463 struct target_list *head;
2464 struct target *curr;
2465 head = target->head;
2466 if (head != (struct target_list *)NULL) {
2467 target->smp = 1;
2468 while (head != (struct target_list *)NULL) {
2469 curr = head->target;
2470 curr->smp = 1;
2471 head = head->next;
2472 }
2473 }
2474 return ERROR_OK;
2475 }
2476
2477 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2478 {
2479 struct target *target = get_current_target(CMD_CTX);
2480 int retval = ERROR_OK;
2481 struct target_list *head;
2482 head = target->head;
2483 if (head != (struct target_list *)NULL) {
2484 if (CMD_ARGC == 1) {
2485 int coreid = 0;
2486 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2487 if (ERROR_OK != retval)
2488 return retval;
2489 target->gdb_service->core[1] = coreid;
2490
2491 }
2492 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2493 , target->gdb_service->core[1]);
2494 }
2495 return ERROR_OK;
2496 }
2497
2498 static const struct command_registration aarch64_exec_command_handlers[] = {
2499 {
2500 .name = "cache_info",
2501 .handler = aarch64_handle_cache_info_command,
2502 .mode = COMMAND_EXEC,
2503 .help = "display information about target caches",
2504 .usage = "",
2505 },
2506 {
2507 .name = "dbginit",
2508 .handler = aarch64_handle_dbginit_command,
2509 .mode = COMMAND_EXEC,
2510 .help = "Initialize core debug",
2511 .usage = "",
2512 },
2513 { .name = "smp_off",
2514 .handler = aarch64_handle_smp_off_command,
2515 .mode = COMMAND_EXEC,
2516 .help = "Stop smp handling",
2517 .usage = "",
2518 },
2519 {
2520 .name = "smp_on",
2521 .handler = aarch64_handle_smp_on_command,
2522 .mode = COMMAND_EXEC,
2523 .help = "Restart smp handling",
2524 .usage = "",
2525 },
2526 {
2527 .name = "smp_gdb",
2528 .handler = aarch64_handle_smp_gdb_command,
2529 .mode = COMMAND_EXEC,
2530 .help = "display/fix current core played to gdb",
2531 .usage = "",
2532 },
2533
2534
2535 COMMAND_REGISTRATION_DONE
2536 };
2537 static const struct command_registration aarch64_command_handlers[] = {
2538 {
2539 .chain = arm_command_handlers,
2540 },
2541 {
2542 .chain = armv8_command_handlers,
2543 },
2544 {
2545 .name = "cortex_a",
2546 .mode = COMMAND_ANY,
2547 .help = "Cortex-A command group",
2548 .usage = "",
2549 .chain = aarch64_exec_command_handlers,
2550 },
2551 COMMAND_REGISTRATION_DONE
2552 };
2553
2554 struct target_type aarch64_target = {
2555 .name = "aarch64",
2556
2557 .poll = aarch64_poll,
2558 .arch_state = armv8_arch_state,
2559
2560 .halt = aarch64_halt,
2561 .resume = aarch64_resume,
2562 .step = aarch64_step,
2563
2564 .assert_reset = aarch64_assert_reset,
2565 .deassert_reset = aarch64_deassert_reset,
2566
2567 /* REVISIT allow exporting VFP3 registers ... */
2568 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2569
2570 .read_memory = aarch64_read_memory,
2571 .write_memory = aarch64_write_memory,
2572
2573 .checksum_memory = arm_checksum_memory,
2574 .blank_check_memory = arm_blank_check_memory,
2575
2576 .run_algorithm = armv4_5_run_algorithm,
2577
2578 .add_breakpoint = aarch64_add_breakpoint,
2579 .add_context_breakpoint = aarch64_add_context_breakpoint,
2580 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2581 .remove_breakpoint = aarch64_remove_breakpoint,
2582 .add_watchpoint = NULL,
2583 .remove_watchpoint = NULL,
2584
2585 .commands = aarch64_command_handlers,
2586 .target_create = aarch64_target_create,
2587 .init_target = aarch64_init_target,
2588 .examine = aarch64_examine,
2589
2590 .read_phys_memory = aarch64_read_phys_memory,
2591 .write_phys_memory = aarch64_write_phys_memory,
2592 .mmu = aarch64_mmu,
2593 .virt2phys = aarch64_virt2phys,
2594 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)