aarch64: add basic Aarch32 support
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
32
33 static int aarch64_poll(struct target *target);
34 static int aarch64_debug_entry(struct target *target);
35 static int aarch64_restore_context(struct target *target, bool bpwp);
36 static int aarch64_set_breakpoint(struct target *target,
37 struct breakpoint *breakpoint, uint8_t matchmode);
38 static int aarch64_set_context_breakpoint(struct target *target,
39 struct breakpoint *breakpoint, uint8_t matchmode);
40 static int aarch64_set_hybrid_breakpoint(struct target *target,
41 struct breakpoint *breakpoint);
42 static int aarch64_unset_breakpoint(struct target *target,
43 struct breakpoint *breakpoint);
44 static int aarch64_mmu(struct target *target, int *enabled);
45 static int aarch64_virt2phys(struct target *target,
46 target_addr_t virt, target_addr_t *phys);
47 static int aarch64_read_apb_ap_memory(struct target *target,
48 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
49 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
50 uint32_t opcode, uint32_t data);
51
52 static int aarch64_restore_system_control_reg(struct target *target)
53 {
54 int retval = ERROR_OK;
55
56 struct aarch64_common *aarch64 = target_to_aarch64(target);
57 struct armv8_common *armv8 = target_to_armv8(target);
58
59 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
60 aarch64->system_control_reg_curr = aarch64->system_control_reg;
61 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
62
63 switch (armv8->arm.core_mode) {
64 case ARMV8_64_EL0T:
65 case ARMV8_64_EL1T:
66 case ARMV8_64_EL1H:
67 retval = armv8->arm.msr(target, 3, /*op 0*/
68 0, 1, /* op1, op2 */
69 0, 0, /* CRn, CRm */
70 aarch64->system_control_reg);
71 if (retval != ERROR_OK)
72 return retval;
73 break;
74 case ARMV8_64_EL2T:
75 case ARMV8_64_EL2H:
76 retval = armv8->arm.msr(target, 3, /*op 0*/
77 4, 1, /* op1, op2 */
78 0, 0, /* CRn, CRm */
79 aarch64->system_control_reg);
80 if (retval != ERROR_OK)
81 return retval;
82 break;
83 case ARMV8_64_EL3H:
84 case ARMV8_64_EL3T:
85 retval = armv8->arm.msr(target, 3, /*op 0*/
86 6, 1, /* op1, op2 */
87 0, 0, /* CRn, CRm */
88 aarch64->system_control_reg);
89 if (retval != ERROR_OK)
90 return retval;
91 break;
92 default:
93 retval = armv8->arm.mcr(target, 15, 0, 0, 1, 0, aarch64->system_control_reg);
94 if (retval != ERROR_OK)
95 return retval;
96 break;
97 }
98 }
99 return retval;
100 }
101
102 /* check address before aarch64_apb read write access with mmu on
103 * remove apb predictible data abort */
104 static int aarch64_check_address(struct target *target, uint32_t address)
105 {
106 /* TODO */
107 return ERROR_OK;
108 }
109 /* modify system_control_reg in order to enable or disable mmu for :
110 * - virt2phys address conversion
111 * - read or write memory in phys or virt address */
112 static int aarch64_mmu_modify(struct target *target, int enable)
113 {
114 struct aarch64_common *aarch64 = target_to_aarch64(target);
115 struct armv8_common *armv8 = &aarch64->armv8_common;
116 int retval = ERROR_OK;
117
118 if (enable) {
119 /* if mmu enabled at target stop and mmu not enable */
120 if (!(aarch64->system_control_reg & 0x1U)) {
121 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
122 return ERROR_FAIL;
123 }
124 if (!(aarch64->system_control_reg_curr & 0x1U)) {
125 aarch64->system_control_reg_curr |= 0x1U;
126 switch (armv8->arm.core_mode) {
127 case ARMV8_64_EL0T:
128 case ARMV8_64_EL1T:
129 case ARMV8_64_EL1H:
130 retval = armv8->arm.msr(target, 3, /*op 0*/
131 0, 0, /* op1, op2 */
132 1, 0, /* CRn, CRm */
133 aarch64->system_control_reg_curr);
134 if (retval != ERROR_OK)
135 return retval;
136 break;
137 case ARMV8_64_EL2T:
138 case ARMV8_64_EL2H:
139 retval = armv8->arm.msr(target, 3, /*op 0*/
140 4, 0, /* op1, op2 */
141 1, 0, /* CRn, CRm */
142 aarch64->system_control_reg_curr);
143 if (retval != ERROR_OK)
144 return retval;
145 break;
146 case ARMV8_64_EL3H:
147 case ARMV8_64_EL3T:
148 retval = armv8->arm.msr(target, 3, /*op 0*/
149 6, 0, /* op1, op2 */
150 1, 0, /* CRn, CRm */
151 aarch64->system_control_reg_curr);
152 if (retval != ERROR_OK)
153 return retval;
154 break;
155 default:
156 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
157 }
158 }
159 } else {
160 if (aarch64->system_control_reg_curr & 0x4U) {
161 /* data cache is active */
162 aarch64->system_control_reg_curr &= ~0x4U;
163 /* flush data cache armv7 function to be called */
164 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
165 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
166 }
167 if ((aarch64->system_control_reg_curr & 0x1U)) {
168 aarch64->system_control_reg_curr &= ~0x1U;
169 switch (armv8->arm.core_mode) {
170 case ARMV8_64_EL0T:
171 case ARMV8_64_EL1T:
172 case ARMV8_64_EL1H:
173 retval = armv8->arm.msr(target, 3, /*op 0*/
174 0, 0, /* op1, op2 */
175 1, 0, /* CRn, CRm */
176 aarch64->system_control_reg_curr);
177 if (retval != ERROR_OK)
178 return retval;
179 break;
180 case ARMV8_64_EL2T:
181 case ARMV8_64_EL2H:
182 retval = armv8->arm.msr(target, 3, /*op 0*/
183 4, 0, /* op1, op2 */
184 1, 0, /* CRn, CRm */
185 aarch64->system_control_reg_curr);
186 if (retval != ERROR_OK)
187 return retval;
188 break;
189 case ARMV8_64_EL3H:
190 case ARMV8_64_EL3T:
191 retval = armv8->arm.msr(target, 3, /*op 0*/
192 6, 0, /* op1, op2 */
193 1, 0, /* CRn, CRm */
194 aarch64->system_control_reg_curr);
195 if (retval != ERROR_OK)
196 return retval;
197 break;
198 default:
199 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
200 break;
201 }
202 }
203 }
204 return retval;
205 }
206
207 /*
208 * Basic debug access, very low level assumes state is saved
209 */
210 static int aarch64_init_debug_access(struct target *target)
211 {
212 struct armv8_common *armv8 = target_to_armv8(target);
213 int retval;
214 uint32_t dummy;
215
216 LOG_DEBUG(" ");
217
218 /* Clear Sticky Power Down status Bit in PRSR to enable access to
219 the registers in the Core Power Domain */
220 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
221 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
222 if (retval != ERROR_OK)
223 return retval;
224
225 /*
226 * Static CTI configuration:
227 * Channel 0 -> trigger outputs HALT request to PE
228 * Channel 1 -> trigger outputs Resume request to PE
229 * Gate all channel trigger events from entering the CTM
230 */
231
232 /* Enable CTI */
233 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
234 armv8->cti_base + CTI_CTR, 1);
235 /* By default, gate all channel triggers to and from the CTM */
236 if (retval == ERROR_OK)
237 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
238 armv8->cti_base + CTI_GATE, 0);
239 /* output halt requests to PE on channel 0 trigger */
240 if (retval == ERROR_OK)
241 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
242 armv8->cti_base + CTI_OUTEN0, CTI_CHNL(0));
243 /* output restart requests to PE on channel 1 trigger */
244 if (retval == ERROR_OK)
245 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
246 armv8->cti_base + CTI_OUTEN1, CTI_CHNL(1));
247 if (retval != ERROR_OK)
248 return retval;
249
250 /* Resync breakpoint registers */
251
252 /* Since this is likely called from init or reset, update target state information*/
253 return aarch64_poll(target);
254 }
255
256 /* To reduce needless round-trips, pass in a pointer to the current
257 * DSCR value. Initialize it to zero if you just need to know the
258 * value on return from this function; or DSCR_ITE if you
259 * happen to know that no instruction is pending.
260 */
261 static int aarch64_exec_opcode(struct target *target,
262 uint32_t opcode, uint32_t *dscr_p)
263 {
264 uint32_t dscr;
265 int retval;
266 struct armv8_common *armv8 = target_to_armv8(target);
267 dscr = dscr_p ? *dscr_p : 0;
268
269 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
270
271 /* Wait for InstrCompl bit to be set */
272 long long then = timeval_ms();
273 while ((dscr & DSCR_ITE) == 0) {
274 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
275 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
276 if (retval != ERROR_OK) {
277 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
278 return retval;
279 }
280 if (timeval_ms() > then + 1000) {
281 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
282 return ERROR_FAIL;
283 }
284 }
285
286 retval = mem_ap_write_u32(armv8->debug_ap,
287 armv8->debug_base + CPUV8_DBG_ITR, opcode);
288 if (retval != ERROR_OK)
289 return retval;
290
291 then = timeval_ms();
292 do {
293 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
294 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
295 if (retval != ERROR_OK) {
296 LOG_ERROR("Could not read DSCR register");
297 return retval;
298 }
299 if (timeval_ms() > then + 1000) {
300 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
301 return ERROR_FAIL;
302 }
303 } while ((dscr & DSCR_ITE) == 0); /* Wait for InstrCompl bit to be set */
304
305 if (dscr_p)
306 *dscr_p = dscr;
307
308 return retval;
309 }
310
311 /* Write to memory mapped registers directly with no cache or mmu handling */
312 static int aarch64_dap_write_memap_register_u32(struct target *target,
313 uint32_t address,
314 uint32_t value)
315 {
316 int retval;
317 struct armv8_common *armv8 = target_to_armv8(target);
318
319 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
320
321 return retval;
322 }
323
324 /*
325 * AARCH64 implementation of Debug Programmer's Model
326 *
327 * NOTE the invariant: these routines return with DSCR_ITE set,
328 * so there's no need to poll for it before executing an instruction.
329 *
330 * NOTE that in several of these cases the "stall" mode might be useful.
331 * It'd let us queue a few operations together... prepare/finish might
332 * be the places to enable/disable that mode.
333 */
334
335 static inline struct aarch64_common *dpm_to_a8(struct arm_dpm *dpm)
336 {
337 return container_of(dpm, struct aarch64_common, armv8_common.dpm);
338 }
339
340 static int aarch64_write_dcc(struct armv8_common *armv8, uint32_t data)
341 {
342 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
343 return mem_ap_write_u32(armv8->debug_ap,
344 armv8->debug_base + CPUV8_DBG_DTRRX, data);
345 }
346
347 static int aarch64_write_dcc_64(struct armv8_common *armv8, uint64_t data)
348 {
349 int ret;
350 LOG_DEBUG("write DCC Low word0x%08" PRIx32, (unsigned)data);
351 LOG_DEBUG("write DCC High word 0x%08" PRIx32, (unsigned)(data >> 32));
352 ret = mem_ap_write_u32(armv8->debug_ap,
353 armv8->debug_base + CPUV8_DBG_DTRRX, data);
354 ret += mem_ap_write_u32(armv8->debug_ap,
355 armv8->debug_base + CPUV8_DBG_DTRTX, data >> 32);
356 return ret;
357 }
358
359 static int aarch64_read_dcc(struct armv8_common *armv8, uint32_t *data,
360 uint32_t *dscr_p)
361 {
362 uint32_t dscr = DSCR_ITE;
363 int retval;
364
365 if (dscr_p)
366 dscr = *dscr_p;
367
368 /* Wait for DTRRXfull */
369 long long then = timeval_ms();
370 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
371 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
372 armv8->debug_base + CPUV8_DBG_DSCR,
373 &dscr);
374 if (retval != ERROR_OK)
375 return retval;
376 if (timeval_ms() > then + 1000) {
377 LOG_ERROR("Timeout waiting for read dcc");
378 return ERROR_FAIL;
379 }
380 }
381
382 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
383 armv8->debug_base + CPUV8_DBG_DTRTX,
384 data);
385 if (retval != ERROR_OK)
386 return retval;
387 LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
388
389 if (dscr_p)
390 *dscr_p = dscr;
391
392 return retval;
393 }
394
395 static int aarch64_read_dcc_64(struct armv8_common *armv8, uint64_t *data,
396 uint32_t *dscr_p)
397 {
398 uint32_t dscr = DSCR_ITE;
399 uint32_t higher;
400 int retval;
401
402 if (dscr_p)
403 dscr = *dscr_p;
404
405 /* Wait for DTRRXfull */
406 long long then = timeval_ms();
407 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
408 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
409 armv8->debug_base + CPUV8_DBG_DSCR,
410 &dscr);
411 if (retval != ERROR_OK)
412 return retval;
413 if (timeval_ms() > then + 1000) {
414 LOG_ERROR("Timeout waiting for read dcc");
415 return ERROR_FAIL;
416 }
417 }
418
419 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
420 armv8->debug_base + CPUV8_DBG_DTRTX,
421 (uint32_t *)data);
422 if (retval != ERROR_OK)
423 return retval;
424
425 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
426 armv8->debug_base + CPUV8_DBG_DTRRX,
427 &higher);
428 if (retval != ERROR_OK)
429 return retval;
430
431 *data = *(uint32_t *)data | (uint64_t)higher << 32;
432 LOG_DEBUG("read DCC 0x%16.16" PRIx64, *data);
433
434 if (dscr_p)
435 *dscr_p = dscr;
436
437 return retval;
438 }
439
440 static int aarch64_dpm_prepare(struct arm_dpm *dpm)
441 {
442 struct aarch64_common *a8 = dpm_to_a8(dpm);
443 uint32_t dscr;
444 int retval;
445
446 /* set up invariant: INSTR_COMP is set after ever DPM operation */
447 long long then = timeval_ms();
448 for (;; ) {
449 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
450 a8->armv8_common.debug_base + CPUV8_DBG_DSCR,
451 &dscr);
452 if (retval != ERROR_OK)
453 return retval;
454 if ((dscr & DSCR_ITE) != 0)
455 break;
456 if (timeval_ms() > then + 1000) {
457 LOG_ERROR("Timeout waiting for dpm prepare");
458 return ERROR_FAIL;
459 }
460 }
461
462 /* this "should never happen" ... */
463 if (dscr & DSCR_DTR_RX_FULL) {
464 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
465 /* Clear DCCRX */
466 retval = mem_ap_read_u32(a8->armv8_common.debug_ap,
467 a8->armv8_common.debug_base + CPUV8_DBG_DTRRX, &dscr);
468 if (retval != ERROR_OK)
469 return retval;
470
471 /* Clear sticky error */
472 retval = mem_ap_write_u32(a8->armv8_common.debug_ap,
473 a8->armv8_common.debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
474 if (retval != ERROR_OK)
475 return retval;
476 }
477
478 return retval;
479 }
480
481 static int aarch64_dpm_finish(struct arm_dpm *dpm)
482 {
483 /* REVISIT what could be done here? */
484 return ERROR_OK;
485 }
486
487 static int aarch64_instr_execute(struct arm_dpm *dpm,
488 uint32_t opcode)
489 {
490 struct aarch64_common *a8 = dpm_to_a8(dpm);
491 uint32_t dscr = DSCR_ITE;
492
493 return aarch64_exec_opcode(
494 a8->armv8_common.arm.target,
495 opcode,
496 &dscr);
497 }
498
499 static int aarch64_instr_write_data_dcc(struct arm_dpm *dpm,
500 uint32_t opcode, uint32_t data)
501 {
502 struct aarch64_common *a8 = dpm_to_a8(dpm);
503 int retval;
504 uint32_t dscr = DSCR_ITE;
505
506 retval = aarch64_write_dcc(&a8->armv8_common, data);
507 if (retval != ERROR_OK)
508 return retval;
509
510 return aarch64_exec_opcode(
511 a8->armv8_common.arm.target,
512 opcode,
513 &dscr);
514 }
515
516 static int aarch64_instr_write_data_dcc_64(struct arm_dpm *dpm,
517 uint32_t opcode, uint64_t data)
518 {
519 struct aarch64_common *a8 = dpm_to_a8(dpm);
520 int retval;
521 uint32_t dscr = DSCR_ITE;
522
523 retval = aarch64_write_dcc_64(&a8->armv8_common, data);
524 if (retval != ERROR_OK)
525 return retval;
526
527 return aarch64_exec_opcode(
528 a8->armv8_common.arm.target,
529 opcode,
530 &dscr);
531 }
532
533 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
534 uint32_t opcode, uint32_t data)
535 {
536 struct aarch64_common *a8 = dpm_to_a8(dpm);
537
538 uint32_t dscr = DSCR_ITE;
539 int retval;
540
541 retval = aarch64_write_dcc(&a8->armv8_common, data);
542 if (retval != ERROR_OK)
543 return retval;
544
545 retval = aarch64_exec_opcode(
546 a8->armv8_common.arm.target, armv8_opcode(&a8->armv8_common, READ_REG_DTRRX), &dscr);
547 if (retval != ERROR_OK)
548 return retval;
549
550 /* then the opcode, taking data from R0 */
551 retval = aarch64_exec_opcode(
552 a8->armv8_common.arm.target,
553 opcode,
554 &dscr);
555
556 return retval;
557 }
558
559 static int aarch64_instr_write_data_r0_64(struct arm_dpm *dpm,
560 uint32_t opcode, uint64_t data)
561 {
562 struct aarch64_common *a8 = dpm_to_a8(dpm);
563 uint32_t dscr = DSCR_ITE;
564 int retval;
565
566 retval = aarch64_write_dcc_64(&a8->armv8_common, data);
567 if (retval != ERROR_OK)
568 return retval;
569
570 retval = aarch64_exec_opcode(
571 a8->armv8_common.arm.target,
572 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0),
573 &dscr);
574 if (retval != ERROR_OK)
575 return retval;
576
577 /* then the opcode, taking data from R0 */
578 retval = aarch64_exec_opcode(
579 a8->armv8_common.arm.target,
580 opcode,
581 &dscr);
582
583 return retval;
584 }
585
586 static int aarch64_instr_cpsr_sync(struct arm_dpm *dpm)
587 {
588 struct target *target = dpm->arm->target;
589 struct armv8_common *armv8 = target_to_armv8(target);
590 uint32_t dscr = DSCR_ITE;
591
592 /* "Prefetch flush" after modifying execution status in CPSR */
593 return aarch64_exec_opcode(target, armv8_opcode(armv8, ARMV8_OPC_DSB_SY), &dscr);
594 }
595
596 static int aarch64_instr_read_data_dcc(struct arm_dpm *dpm,
597 uint32_t opcode, uint32_t *data)
598 {
599 struct aarch64_common *a8 = dpm_to_a8(dpm);
600 int retval;
601 uint32_t dscr = DSCR_ITE;
602
603 /* the opcode, writing data to DCC */
604 retval = aarch64_exec_opcode(
605 a8->armv8_common.arm.target,
606 opcode,
607 &dscr);
608 if (retval != ERROR_OK)
609 return retval;
610
611 return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
612 }
613
614 static int aarch64_instr_read_data_dcc_64(struct arm_dpm *dpm,
615 uint32_t opcode, uint64_t *data)
616 {
617 struct aarch64_common *a8 = dpm_to_a8(dpm);
618 int retval;
619 uint32_t dscr = DSCR_ITE;
620
621 /* the opcode, writing data to DCC */
622 retval = aarch64_exec_opcode(
623 a8->armv8_common.arm.target,
624 opcode,
625 &dscr);
626 if (retval != ERROR_OK)
627 return retval;
628
629 return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
630 }
631
632 static int aarch64_instr_read_data_r0(struct arm_dpm *dpm,
633 uint32_t opcode, uint32_t *data)
634 {
635 struct aarch64_common *a8 = dpm_to_a8(dpm);
636 uint32_t dscr = DSCR_ITE;
637 int retval;
638
639 /* the opcode, writing data to R0 */
640 retval = aarch64_exec_opcode(
641 a8->armv8_common.arm.target,
642 opcode,
643 &dscr);
644 if (retval != ERROR_OK)
645 return retval;
646
647 /* write R0 to DCC */
648 retval = aarch64_exec_opcode(
649 a8->armv8_common.arm.target, armv8_opcode(&a8->armv8_common, WRITE_REG_DTRTX), &dscr);
650 if (retval != ERROR_OK)
651 return retval;
652
653 return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
654 }
655
656 static int aarch64_instr_read_data_r0_64(struct arm_dpm *dpm,
657 uint32_t opcode, uint64_t *data)
658 {
659 struct aarch64_common *a8 = dpm_to_a8(dpm);
660 uint32_t dscr = DSCR_ITE;
661 int retval;
662
663 /* the opcode, writing data to R0 */
664 retval = aarch64_exec_opcode(
665 a8->armv8_common.arm.target,
666 opcode,
667 &dscr);
668 if (retval != ERROR_OK)
669 return retval;
670
671 /* write R0 to DCC */
672 retval = aarch64_exec_opcode(
673 a8->armv8_common.arm.target,
674 ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), /* msr dbgdtr_el0, x0 */
675 &dscr);
676 if (retval != ERROR_OK)
677 return retval;
678
679 return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
680 }
681
682 static int aarch64_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
683 uint32_t addr, uint32_t control)
684 {
685 struct aarch64_common *a8 = dpm_to_a8(dpm);
686 uint32_t vr = a8->armv8_common.debug_base;
687 uint32_t cr = a8->armv8_common.debug_base;
688 int retval;
689
690 switch (index_t) {
691 case 0 ... 15: /* breakpoints */
692 vr += CPUV8_DBG_BVR_BASE;
693 cr += CPUV8_DBG_BCR_BASE;
694 break;
695 case 16 ... 31: /* watchpoints */
696 vr += CPUV8_DBG_WVR_BASE;
697 cr += CPUV8_DBG_WCR_BASE;
698 index_t -= 16;
699 break;
700 default:
701 return ERROR_FAIL;
702 }
703 vr += 16 * index_t;
704 cr += 16 * index_t;
705
706 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
707 (unsigned) vr, (unsigned) cr);
708
709 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
710 vr, addr);
711 if (retval != ERROR_OK)
712 return retval;
713 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
714 cr, control);
715 return retval;
716 }
717
718 static int aarch64_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
719 {
720 struct aarch64_common *a = dpm_to_a8(dpm);
721 uint32_t cr;
722
723 switch (index_t) {
724 case 0 ... 15:
725 cr = a->armv8_common.debug_base + CPUV8_DBG_BCR_BASE;
726 break;
727 case 16 ... 31:
728 cr = a->armv8_common.debug_base + CPUV8_DBG_WCR_BASE;
729 index_t -= 16;
730 break;
731 default:
732 return ERROR_FAIL;
733 }
734 cr += 16 * index_t;
735
736 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
737
738 /* clear control register */
739 return aarch64_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
740
741 }
742
743 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
744 {
745 struct arm_dpm *dpm = &a8->armv8_common.dpm;
746 int retval;
747
748 dpm->arm = &a8->armv8_common.arm;
749 dpm->didr = debug;
750
751 dpm->prepare = aarch64_dpm_prepare;
752 dpm->finish = aarch64_dpm_finish;
753
754 dpm->instr_execute = aarch64_instr_execute;
755 dpm->instr_write_data_dcc = aarch64_instr_write_data_dcc;
756 dpm->instr_write_data_dcc_64 = aarch64_instr_write_data_dcc_64;
757 dpm->instr_write_data_r0 = aarch64_instr_write_data_r0;
758 dpm->instr_write_data_r0_64 = aarch64_instr_write_data_r0_64;
759 dpm->instr_cpsr_sync = aarch64_instr_cpsr_sync;
760
761 dpm->instr_read_data_dcc = aarch64_instr_read_data_dcc;
762 dpm->instr_read_data_dcc_64 = aarch64_instr_read_data_dcc_64;
763 dpm->instr_read_data_r0 = aarch64_instr_read_data_r0;
764 dpm->instr_read_data_r0_64 = aarch64_instr_read_data_r0_64;
765
766 dpm->arm_reg_current = armv8_reg_current;
767
768 dpm->bpwp_enable = aarch64_bpwp_enable;
769 dpm->bpwp_disable = aarch64_bpwp_disable;
770
771 retval = armv8_dpm_setup(dpm);
772 if (retval == ERROR_OK)
773 retval = armv8_dpm_initialize(dpm);
774
775 return retval;
776 }
777 static struct target *get_aarch64(struct target *target, int32_t coreid)
778 {
779 struct target_list *head;
780 struct target *curr;
781
782 head = target->head;
783 while (head != (struct target_list *)NULL) {
784 curr = head->target;
785 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
786 return curr;
787 head = head->next;
788 }
789 return target;
790 }
791 static int aarch64_halt(struct target *target);
792
793 static int aarch64_halt_smp(struct target *target)
794 {
795 int retval = ERROR_OK;
796 struct target_list *head = target->head;
797
798 while (head != (struct target_list *)NULL) {
799 struct target *curr = head->target;
800 struct armv8_common *armv8 = target_to_armv8(curr);
801
802 /* open the gate for channel 0 to let HALT requests pass to the CTM */
803 if (curr->smp)
804 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
805 armv8->cti_base + CTI_GATE, CTI_CHNL(0));
806 if (retval != ERROR_OK)
807 break;
808
809 head = head->next;
810 }
811
812 /* halt the target PE */
813 if (retval == ERROR_OK)
814 retval = aarch64_halt(target);
815
816 return retval;
817 }
818
819 static int update_halt_gdb(struct target *target)
820 {
821 int retval = 0;
822 if (target->gdb_service && target->gdb_service->core[0] == -1) {
823 target->gdb_service->target = target;
824 target->gdb_service->core[0] = target->coreid;
825 retval += aarch64_halt_smp(target);
826 }
827 return retval;
828 }
829
830 /*
831 * Cortex-A8 Run control
832 */
833
834 static int aarch64_poll(struct target *target)
835 {
836 int retval = ERROR_OK;
837 uint32_t dscr;
838 struct aarch64_common *aarch64 = target_to_aarch64(target);
839 struct armv8_common *armv8 = &aarch64->armv8_common;
840 enum target_state prev_target_state = target->state;
841 /* toggle to another core is done by gdb as follow */
842 /* maint packet J core_id */
843 /* continue */
844 /* the next polling trigger an halt event sent to gdb */
845 if ((target->state == TARGET_HALTED) && (target->smp) &&
846 (target->gdb_service) &&
847 (target->gdb_service->target == NULL)) {
848 target->gdb_service->target =
849 get_aarch64(target, target->gdb_service->core[1]);
850 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
851 return retval;
852 }
853 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
854 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
855 if (retval != ERROR_OK)
856 return retval;
857 aarch64->cpudbg_dscr = dscr;
858
859 if (DSCR_RUN_MODE(dscr) == 0x3) {
860 if (prev_target_state != TARGET_HALTED) {
861 /* We have a halting debug event */
862 LOG_DEBUG("Target halted");
863 target->state = TARGET_HALTED;
864 if ((prev_target_state == TARGET_RUNNING)
865 || (prev_target_state == TARGET_UNKNOWN)
866 || (prev_target_state == TARGET_RESET)) {
867 retval = aarch64_debug_entry(target);
868 if (retval != ERROR_OK)
869 return retval;
870 if (target->smp) {
871 retval = update_halt_gdb(target);
872 if (retval != ERROR_OK)
873 return retval;
874 }
875 target_call_event_callbacks(target,
876 TARGET_EVENT_HALTED);
877 }
878 if (prev_target_state == TARGET_DEBUG_RUNNING) {
879 LOG_DEBUG(" ");
880
881 retval = aarch64_debug_entry(target);
882 if (retval != ERROR_OK)
883 return retval;
884 if (target->smp) {
885 retval = update_halt_gdb(target);
886 if (retval != ERROR_OK)
887 return retval;
888 }
889
890 target_call_event_callbacks(target,
891 TARGET_EVENT_DEBUG_HALTED);
892 }
893 }
894 } else
895 target->state = TARGET_RUNNING;
896
897 return retval;
898 }
899
900 static int aarch64_halt(struct target *target)
901 {
902 int retval = ERROR_OK;
903 uint32_t dscr;
904 struct armv8_common *armv8 = target_to_armv8(target);
905
906 /*
907 * add HDE in halting debug mode
908 */
909 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
910 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
911 if (retval == ERROR_OK)
912 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
913 armv8->debug_base + CPUV8_DBG_DSCR, dscr | DSCR_HDE);
914 if (retval != ERROR_OK)
915 return retval;
916
917 /* trigger an event on channel 0, this outputs a halt request to the PE */
918 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
919 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(0));
920 if (retval != ERROR_OK)
921 return retval;
922
923 long long then = timeval_ms();
924 for (;; ) {
925 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
926 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
927 if (retval != ERROR_OK)
928 return retval;
929 if ((dscr & DSCRV8_HALT_MASK) != 0)
930 break;
931 if (timeval_ms() > then + 1000) {
932 LOG_ERROR("Timeout waiting for halt");
933 return ERROR_FAIL;
934 }
935 }
936
937 target->debug_reason = DBG_REASON_DBGRQ;
938
939 return ERROR_OK;
940 }
941
942 static int aarch64_internal_restore(struct target *target, int current,
943 uint64_t *address, int handle_breakpoints, int debug_execution)
944 {
945 struct armv8_common *armv8 = target_to_armv8(target);
946 struct arm *arm = &armv8->arm;
947 int retval;
948 uint64_t resume_pc;
949
950 if (!debug_execution)
951 target_free_all_working_areas(target);
952
953 /* current = 1: continue on current pc, otherwise continue at <address> */
954 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
955 if (!current)
956 resume_pc = *address;
957 else
958 *address = resume_pc;
959
960 /* Make sure that the Armv7 gdb thumb fixups does not
961 * kill the return address
962 */
963 switch (arm->core_state) {
964 case ARM_STATE_ARM:
965 resume_pc &= 0xFFFFFFFC;
966 break;
967 case ARM_STATE_AARCH64:
968 resume_pc &= 0xFFFFFFFFFFFFFFFC;
969 break;
970 case ARM_STATE_THUMB:
971 case ARM_STATE_THUMB_EE:
972 /* When the return address is loaded into PC
973 * bit 0 must be 1 to stay in Thumb state
974 */
975 resume_pc |= 0x1;
976 break;
977 case ARM_STATE_JAZELLE:
978 LOG_ERROR("How do I resume into Jazelle state??");
979 return ERROR_FAIL;
980 }
981 LOG_DEBUG("resume pc = 0x%16" PRIx64, resume_pc);
982 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
983 arm->pc->dirty = 1;
984 arm->pc->valid = 1;
985 dpmv8_modeswitch(&armv8->dpm, ARM_MODE_ANY);
986
987 /* called it now before restoring context because it uses cpu
988 * register r0 for restoring system control register */
989 retval = aarch64_restore_system_control_reg(target);
990 if (retval != ERROR_OK)
991 return retval;
992 retval = aarch64_restore_context(target, handle_breakpoints);
993 if (retval != ERROR_OK)
994 return retval;
995 target->debug_reason = DBG_REASON_NOTHALTED;
996 target->state = TARGET_RUNNING;
997
998 /* registers are now invalid */
999 register_cache_invalidate(arm->core_cache);
1000
1001 return retval;
1002 }
1003
1004 static int aarch64_internal_restart(struct target *target, bool slave_pe)
1005 {
1006 struct armv8_common *armv8 = target_to_armv8(target);
1007 struct arm *arm = &armv8->arm;
1008 int retval;
1009 uint32_t dscr;
1010 /*
1011 * * Restart core and wait for it to be started. Clear ITRen and sticky
1012 * * exception flags: see ARMv7 ARM, C5.9.
1013 *
1014 * REVISIT: for single stepping, we probably want to
1015 * disable IRQs by default, with optional override...
1016 */
1017
1018 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1019 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1020 if (retval != ERROR_OK)
1021 return retval;
1022
1023 if ((dscr & DSCR_ITE) == 0)
1024 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1025
1026 /* make sure to acknowledge the halt event before resuming */
1027 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1028 armv8->cti_base + CTI_INACK, CTI_TRIG(HALT));
1029
1030 /*
1031 * open the CTI gate for channel 1 so that the restart events
1032 * get passed along to all PEs
1033 */
1034 if (retval == ERROR_OK)
1035 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1036 armv8->cti_base + CTI_GATE, CTI_CHNL(1));
1037 if (retval != ERROR_OK)
1038 return retval;
1039
1040 if (!slave_pe) {
1041 /* trigger an event on channel 1, generates a restart request to the PE */
1042 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1043 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(1));
1044 if (retval != ERROR_OK)
1045 return retval;
1046
1047 long long then = timeval_ms();
1048 for (;; ) {
1049 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1050 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1051 if (retval != ERROR_OK)
1052 return retval;
1053 if ((dscr & DSCR_HDE) != 0)
1054 break;
1055 if (timeval_ms() > then + 1000) {
1056 LOG_ERROR("Timeout waiting for resume");
1057 return ERROR_FAIL;
1058 }
1059 }
1060 }
1061
1062 target->debug_reason = DBG_REASON_NOTHALTED;
1063 target->state = TARGET_RUNNING;
1064
1065 /* registers are now invalid */
1066 register_cache_invalidate(arm->core_cache);
1067
1068 return ERROR_OK;
1069 }
1070
1071 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
1072 {
1073 int retval = 0;
1074 struct target_list *head;
1075 struct target *curr;
1076 uint64_t address;
1077 head = target->head;
1078 while (head != (struct target_list *)NULL) {
1079 curr = head->target;
1080 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1081 /* resume current address , not in step mode */
1082 retval += aarch64_internal_restore(curr, 1, &address,
1083 handle_breakpoints, 0);
1084 retval += aarch64_internal_restart(curr, true);
1085 }
1086 head = head->next;
1087
1088 }
1089 return retval;
1090 }
1091
1092 static int aarch64_resume(struct target *target, int current,
1093 target_addr_t address, int handle_breakpoints, int debug_execution)
1094 {
1095 int retval = 0;
1096 uint64_t addr = address;
1097
1098 /* dummy resume for smp toggle in order to reduce gdb impact */
1099 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1100 /* simulate a start and halt of target */
1101 target->gdb_service->target = NULL;
1102 target->gdb_service->core[0] = target->gdb_service->core[1];
1103 /* fake resume at next poll we play the target core[1], see poll*/
1104 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1105 return 0;
1106 }
1107 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
1108 debug_execution);
1109 if (target->smp) {
1110 target->gdb_service->core[0] = -1;
1111 retval = aarch64_restore_smp(target, handle_breakpoints);
1112 if (retval != ERROR_OK)
1113 return retval;
1114 }
1115 aarch64_internal_restart(target, false);
1116
1117 if (!debug_execution) {
1118 target->state = TARGET_RUNNING;
1119 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1120 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
1121 } else {
1122 target->state = TARGET_DEBUG_RUNNING;
1123 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1124 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
1125 }
1126
1127 return ERROR_OK;
1128 }
1129
1130 static int aarch64_debug_entry(struct target *target)
1131 {
1132 int retval = ERROR_OK;
1133 struct aarch64_common *aarch64 = target_to_aarch64(target);
1134 struct armv8_common *armv8 = target_to_armv8(target);
1135
1136 LOG_DEBUG("dscr = 0x%08" PRIx32, aarch64->cpudbg_dscr);
1137
1138 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1139 * imprecise data aborts get discarded by issuing a Data
1140 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1141 */
1142
1143 /* make sure to clear all sticky errors */
1144 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1145 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1146 if (retval != ERROR_OK)
1147 return retval;
1148
1149 /* Examine debug reason */
1150 armv8_dpm_report_dscr(&armv8->dpm, aarch64->cpudbg_dscr);
1151
1152 /* save address of instruction that triggered the watchpoint? */
1153 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1154 uint32_t tmp;
1155 uint64_t wfar = 0;
1156
1157 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1158 armv8->debug_base + CPUV8_DBG_WFAR1,
1159 &tmp);
1160 if (retval != ERROR_OK)
1161 return retval;
1162 wfar = tmp;
1163 wfar = (wfar << 32);
1164 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1165 armv8->debug_base + CPUV8_DBG_WFAR0,
1166 &tmp);
1167 if (retval != ERROR_OK)
1168 return retval;
1169 wfar |= tmp;
1170 armv8_dpm_report_wfar(&armv8->dpm, wfar);
1171 }
1172
1173 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1174
1175 if (armv8->post_debug_entry) {
1176 retval = armv8->post_debug_entry(target);
1177 if (retval != ERROR_OK)
1178 return retval;
1179 }
1180
1181 return retval;
1182 }
1183
1184 static int aarch64_post_debug_entry(struct target *target)
1185 {
1186 struct aarch64_common *aarch64 = target_to_aarch64(target);
1187 struct armv8_common *armv8 = &aarch64->armv8_common;
1188 int retval;
1189
1190 /* clear sticky errors */
1191 mem_ap_write_atomic_u32(armv8->debug_ap,
1192 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1193
1194 switch (armv8->arm.core_mode) {
1195 case ARMV8_64_EL0T:
1196 case ARMV8_64_EL1T:
1197 case ARMV8_64_EL1H:
1198 retval = armv8->arm.mrs(target, 3, /*op 0*/
1199 0, 0, /* op1, op2 */
1200 1, 0, /* CRn, CRm */
1201 &aarch64->system_control_reg);
1202 if (retval != ERROR_OK)
1203 return retval;
1204 break;
1205 case ARMV8_64_EL2T:
1206 case ARMV8_64_EL2H:
1207 retval = armv8->arm.mrs(target, 3, /*op 0*/
1208 4, 0, /* op1, op2 */
1209 1, 0, /* CRn, CRm */
1210 &aarch64->system_control_reg);
1211 if (retval != ERROR_OK)
1212 return retval;
1213 break;
1214 case ARMV8_64_EL3H:
1215 case ARMV8_64_EL3T:
1216 retval = armv8->arm.mrs(target, 3, /*op 0*/
1217 6, 0, /* op1, op2 */
1218 1, 0, /* CRn, CRm */
1219 &aarch64->system_control_reg);
1220 if (retval != ERROR_OK)
1221 return retval;
1222 break;
1223 default:
1224 retval = armv8->arm.mrc(target, 15, 0, 0, 1, 0, &aarch64->system_control_reg);
1225 if (retval != ERROR_OK)
1226 return retval;
1227 break;
1228 }
1229
1230 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1231 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1232
1233 if (armv8->armv8_mmu.armv8_cache.ctype == -1)
1234 armv8_identify_cache(target);
1235
1236 armv8->armv8_mmu.mmu_enabled =
1237 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1238 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1239 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1240 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1241 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1242 aarch64->curr_mode = armv8->arm.core_mode;
1243 return ERROR_OK;
1244 }
1245
1246 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1247 {
1248 struct armv8_common *armv8 = target_to_armv8(target);
1249 uint32_t dscr;
1250
1251 /* Read DSCR */
1252 int retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1253 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1254 if (ERROR_OK != retval)
1255 return retval;
1256
1257 /* clear bitfield */
1258 dscr &= ~bit_mask;
1259 /* put new value */
1260 dscr |= value & bit_mask;
1261
1262 /* write new DSCR */
1263 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1264 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1265 return retval;
1266 }
1267
1268 static int aarch64_step(struct target *target, int current, target_addr_t address,
1269 int handle_breakpoints)
1270 {
1271 struct armv8_common *armv8 = target_to_armv8(target);
1272 int retval;
1273 uint32_t edecr;
1274
1275 if (target->state != TARGET_HALTED) {
1276 LOG_WARNING("target not halted");
1277 return ERROR_TARGET_NOT_HALTED;
1278 }
1279
1280 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1281 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1282 if (retval != ERROR_OK)
1283 return retval;
1284
1285 /* make sure EDECR.SS is not set when restoring the register */
1286 edecr &= ~0x4;
1287
1288 /* set EDECR.SS to enter hardware step mode */
1289 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1290 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1291 if (retval != ERROR_OK)
1292 return retval;
1293
1294 /* disable interrupts while stepping */
1295 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1296 if (retval != ERROR_OK)
1297 return ERROR_OK;
1298
1299 /* resume the target */
1300 retval = aarch64_resume(target, current, address, 0, 0);
1301 if (retval != ERROR_OK)
1302 return retval;
1303
1304 long long then = timeval_ms();
1305 while (target->state != TARGET_HALTED) {
1306 retval = aarch64_poll(target);
1307 if (retval != ERROR_OK)
1308 return retval;
1309 if (timeval_ms() > then + 1000) {
1310 LOG_ERROR("timeout waiting for target halt");
1311 return ERROR_FAIL;
1312 }
1313 }
1314
1315 /* restore EDECR */
1316 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1317 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1318 if (retval != ERROR_OK)
1319 return retval;
1320
1321 /* restore interrupts */
1322 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1323 if (retval != ERROR_OK)
1324 return ERROR_OK;
1325
1326 return ERROR_OK;
1327 }
1328
1329 static int aarch64_restore_context(struct target *target, bool bpwp)
1330 {
1331 struct armv8_common *armv8 = target_to_armv8(target);
1332
1333 LOG_DEBUG(" ");
1334
1335 if (armv8->pre_restore_context)
1336 armv8->pre_restore_context(target);
1337
1338 return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1339
1340 }
1341
1342 /*
1343 * Cortex-A8 Breakpoint and watchpoint functions
1344 */
1345
1346 /* Setup hardware Breakpoint Register Pair */
1347 static int aarch64_set_breakpoint(struct target *target,
1348 struct breakpoint *breakpoint, uint8_t matchmode)
1349 {
1350 int retval;
1351 int brp_i = 0;
1352 uint32_t control;
1353 uint8_t byte_addr_select = 0x0F;
1354 struct aarch64_common *aarch64 = target_to_aarch64(target);
1355 struct armv8_common *armv8 = &aarch64->armv8_common;
1356 struct aarch64_brp *brp_list = aarch64->brp_list;
1357 uint32_t dscr;
1358
1359 if (breakpoint->set) {
1360 LOG_WARNING("breakpoint already set");
1361 return ERROR_OK;
1362 }
1363
1364 if (breakpoint->type == BKPT_HARD) {
1365 int64_t bpt_value;
1366 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1367 brp_i++;
1368 if (brp_i >= aarch64->brp_num) {
1369 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1370 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1371 }
1372 breakpoint->set = brp_i + 1;
1373 if (breakpoint->length == 2)
1374 byte_addr_select = (3 << (breakpoint->address & 0x02));
1375 control = ((matchmode & 0x7) << 20)
1376 | (1 << 13)
1377 | (byte_addr_select << 5)
1378 | (3 << 1) | 1;
1379 brp_list[brp_i].used = 1;
1380 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1381 brp_list[brp_i].control = control;
1382 bpt_value = brp_list[brp_i].value;
1383
1384 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1385 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1386 (uint32_t)(bpt_value & 0xFFFFFFFF));
1387 if (retval != ERROR_OK)
1388 return retval;
1389 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1390 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1391 (uint32_t)(bpt_value >> 32));
1392 if (retval != ERROR_OK)
1393 return retval;
1394
1395 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1396 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1397 brp_list[brp_i].control);
1398 if (retval != ERROR_OK)
1399 return retval;
1400 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1401 brp_list[brp_i].control,
1402 brp_list[brp_i].value);
1403
1404 } else if (breakpoint->type == BKPT_SOFT) {
1405 uint8_t code[4];
1406
1407 buf_set_u32(code, 0, 32, ARMV8_HLT(0x11));
1408 retval = target_read_memory(target,
1409 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1410 breakpoint->length, 1,
1411 breakpoint->orig_instr);
1412 if (retval != ERROR_OK)
1413 return retval;
1414
1415 armv8_cache_d_inner_flush_virt(armv8,
1416 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1417 breakpoint->length);
1418
1419 retval = target_write_memory(target,
1420 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1421 breakpoint->length, 1, code);
1422 if (retval != ERROR_OK)
1423 return retval;
1424
1425 armv8_cache_d_inner_flush_virt(armv8,
1426 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1427 breakpoint->length);
1428
1429 armv8_cache_i_inner_inval_virt(armv8,
1430 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1431 breakpoint->length);
1432
1433 breakpoint->set = 0x11; /* Any nice value but 0 */
1434 }
1435
1436 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1437 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1438 /* Ensure that halting debug mode is enable */
1439 dscr = dscr | DSCR_HDE;
1440 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1441 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1442 if (retval != ERROR_OK) {
1443 LOG_DEBUG("Failed to set DSCR.HDE");
1444 return retval;
1445 }
1446
1447 return ERROR_OK;
1448 }
1449
1450 static int aarch64_set_context_breakpoint(struct target *target,
1451 struct breakpoint *breakpoint, uint8_t matchmode)
1452 {
1453 int retval = ERROR_FAIL;
1454 int brp_i = 0;
1455 uint32_t control;
1456 uint8_t byte_addr_select = 0x0F;
1457 struct aarch64_common *aarch64 = target_to_aarch64(target);
1458 struct armv8_common *armv8 = &aarch64->armv8_common;
1459 struct aarch64_brp *brp_list = aarch64->brp_list;
1460
1461 if (breakpoint->set) {
1462 LOG_WARNING("breakpoint already set");
1463 return retval;
1464 }
1465 /*check available context BRPs*/
1466 while ((brp_list[brp_i].used ||
1467 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1468 brp_i++;
1469
1470 if (brp_i >= aarch64->brp_num) {
1471 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1472 return ERROR_FAIL;
1473 }
1474
1475 breakpoint->set = brp_i + 1;
1476 control = ((matchmode & 0x7) << 20)
1477 | (1 << 13)
1478 | (byte_addr_select << 5)
1479 | (3 << 1) | 1;
1480 brp_list[brp_i].used = 1;
1481 brp_list[brp_i].value = (breakpoint->asid);
1482 brp_list[brp_i].control = control;
1483 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1484 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1485 brp_list[brp_i].value);
1486 if (retval != ERROR_OK)
1487 return retval;
1488 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1489 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1490 brp_list[brp_i].control);
1491 if (retval != ERROR_OK)
1492 return retval;
1493 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1494 brp_list[brp_i].control,
1495 brp_list[brp_i].value);
1496 return ERROR_OK;
1497
1498 }
1499
1500 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1501 {
1502 int retval = ERROR_FAIL;
1503 int brp_1 = 0; /* holds the contextID pair */
1504 int brp_2 = 0; /* holds the IVA pair */
1505 uint32_t control_CTX, control_IVA;
1506 uint8_t CTX_byte_addr_select = 0x0F;
1507 uint8_t IVA_byte_addr_select = 0x0F;
1508 uint8_t CTX_machmode = 0x03;
1509 uint8_t IVA_machmode = 0x01;
1510 struct aarch64_common *aarch64 = target_to_aarch64(target);
1511 struct armv8_common *armv8 = &aarch64->armv8_common;
1512 struct aarch64_brp *brp_list = aarch64->brp_list;
1513
1514 if (breakpoint->set) {
1515 LOG_WARNING("breakpoint already set");
1516 return retval;
1517 }
1518 /*check available context BRPs*/
1519 while ((brp_list[brp_1].used ||
1520 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1521 brp_1++;
1522
1523 printf("brp(CTX) found num: %d\n", brp_1);
1524 if (brp_1 >= aarch64->brp_num) {
1525 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1526 return ERROR_FAIL;
1527 }
1528
1529 while ((brp_list[brp_2].used ||
1530 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1531 brp_2++;
1532
1533 printf("brp(IVA) found num: %d\n", brp_2);
1534 if (brp_2 >= aarch64->brp_num) {
1535 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1536 return ERROR_FAIL;
1537 }
1538
1539 breakpoint->set = brp_1 + 1;
1540 breakpoint->linked_BRP = brp_2;
1541 control_CTX = ((CTX_machmode & 0x7) << 20)
1542 | (brp_2 << 16)
1543 | (0 << 14)
1544 | (CTX_byte_addr_select << 5)
1545 | (3 << 1) | 1;
1546 brp_list[brp_1].used = 1;
1547 brp_list[brp_1].value = (breakpoint->asid);
1548 brp_list[brp_1].control = control_CTX;
1549 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1550 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1551 brp_list[brp_1].value);
1552 if (retval != ERROR_OK)
1553 return retval;
1554 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1555 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1556 brp_list[brp_1].control);
1557 if (retval != ERROR_OK)
1558 return retval;
1559
1560 control_IVA = ((IVA_machmode & 0x7) << 20)
1561 | (brp_1 << 16)
1562 | (1 << 13)
1563 | (IVA_byte_addr_select << 5)
1564 | (3 << 1) | 1;
1565 brp_list[brp_2].used = 1;
1566 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1567 brp_list[brp_2].control = control_IVA;
1568 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1569 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1570 brp_list[brp_2].value & 0xFFFFFFFF);
1571 if (retval != ERROR_OK)
1572 return retval;
1573 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1574 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1575 brp_list[brp_2].value >> 32);
1576 if (retval != ERROR_OK)
1577 return retval;
1578 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1579 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1580 brp_list[brp_2].control);
1581 if (retval != ERROR_OK)
1582 return retval;
1583
1584 return ERROR_OK;
1585 }
1586
1587 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1588 {
1589 int retval;
1590 struct aarch64_common *aarch64 = target_to_aarch64(target);
1591 struct armv8_common *armv8 = &aarch64->armv8_common;
1592 struct aarch64_brp *brp_list = aarch64->brp_list;
1593
1594 if (!breakpoint->set) {
1595 LOG_WARNING("breakpoint not set");
1596 return ERROR_OK;
1597 }
1598
1599 if (breakpoint->type == BKPT_HARD) {
1600 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1601 int brp_i = breakpoint->set - 1;
1602 int brp_j = breakpoint->linked_BRP;
1603 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1604 LOG_DEBUG("Invalid BRP number in breakpoint");
1605 return ERROR_OK;
1606 }
1607 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1608 brp_list[brp_i].control, brp_list[brp_i].value);
1609 brp_list[brp_i].used = 0;
1610 brp_list[brp_i].value = 0;
1611 brp_list[brp_i].control = 0;
1612 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1613 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1614 brp_list[brp_i].control);
1615 if (retval != ERROR_OK)
1616 return retval;
1617 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1618 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1619 (uint32_t)brp_list[brp_i].value);
1620 if (retval != ERROR_OK)
1621 return retval;
1622 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1623 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1624 (uint32_t)brp_list[brp_i].value);
1625 if (retval != ERROR_OK)
1626 return retval;
1627 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1628 LOG_DEBUG("Invalid BRP number in breakpoint");
1629 return ERROR_OK;
1630 }
1631 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1632 brp_list[brp_j].control, brp_list[brp_j].value);
1633 brp_list[brp_j].used = 0;
1634 brp_list[brp_j].value = 0;
1635 brp_list[brp_j].control = 0;
1636 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1637 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1638 brp_list[brp_j].control);
1639 if (retval != ERROR_OK)
1640 return retval;
1641 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1642 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1643 (uint32_t)brp_list[brp_j].value);
1644 if (retval != ERROR_OK)
1645 return retval;
1646 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1647 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1648 (uint32_t)brp_list[brp_j].value);
1649 if (retval != ERROR_OK)
1650 return retval;
1651
1652 breakpoint->linked_BRP = 0;
1653 breakpoint->set = 0;
1654 return ERROR_OK;
1655
1656 } else {
1657 int brp_i = breakpoint->set - 1;
1658 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1659 LOG_DEBUG("Invalid BRP number in breakpoint");
1660 return ERROR_OK;
1661 }
1662 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1663 brp_list[brp_i].control, brp_list[brp_i].value);
1664 brp_list[brp_i].used = 0;
1665 brp_list[brp_i].value = 0;
1666 brp_list[brp_i].control = 0;
1667 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1668 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1669 brp_list[brp_i].control);
1670 if (retval != ERROR_OK)
1671 return retval;
1672 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1673 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1674 brp_list[brp_i].value);
1675 if (retval != ERROR_OK)
1676 return retval;
1677
1678 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1679 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1680 (uint32_t)brp_list[brp_i].value);
1681 if (retval != ERROR_OK)
1682 return retval;
1683 breakpoint->set = 0;
1684 return ERROR_OK;
1685 }
1686 } else {
1687 /* restore original instruction (kept in target endianness) */
1688
1689 armv8_cache_d_inner_flush_virt(armv8,
1690 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1691 breakpoint->length);
1692
1693 if (breakpoint->length == 4) {
1694 retval = target_write_memory(target,
1695 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1696 4, 1, breakpoint->orig_instr);
1697 if (retval != ERROR_OK)
1698 return retval;
1699 } else {
1700 retval = target_write_memory(target,
1701 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1702 2, 1, breakpoint->orig_instr);
1703 if (retval != ERROR_OK)
1704 return retval;
1705 }
1706
1707 armv8_cache_d_inner_flush_virt(armv8,
1708 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1709 breakpoint->length);
1710
1711 armv8_cache_i_inner_inval_virt(armv8,
1712 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1713 breakpoint->length);
1714 }
1715 breakpoint->set = 0;
1716
1717 return ERROR_OK;
1718 }
1719
1720 static int aarch64_add_breakpoint(struct target *target,
1721 struct breakpoint *breakpoint)
1722 {
1723 struct aarch64_common *aarch64 = target_to_aarch64(target);
1724
1725 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1726 LOG_INFO("no hardware breakpoint available");
1727 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1728 }
1729
1730 if (breakpoint->type == BKPT_HARD)
1731 aarch64->brp_num_available--;
1732
1733 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1734 }
1735
1736 static int aarch64_add_context_breakpoint(struct target *target,
1737 struct breakpoint *breakpoint)
1738 {
1739 struct aarch64_common *aarch64 = target_to_aarch64(target);
1740
1741 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1742 LOG_INFO("no hardware breakpoint available");
1743 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1744 }
1745
1746 if (breakpoint->type == BKPT_HARD)
1747 aarch64->brp_num_available--;
1748
1749 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1750 }
1751
1752 static int aarch64_add_hybrid_breakpoint(struct target *target,
1753 struct breakpoint *breakpoint)
1754 {
1755 struct aarch64_common *aarch64 = target_to_aarch64(target);
1756
1757 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1758 LOG_INFO("no hardware breakpoint available");
1759 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1760 }
1761
1762 if (breakpoint->type == BKPT_HARD)
1763 aarch64->brp_num_available--;
1764
1765 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1766 }
1767
1768
1769 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1770 {
1771 struct aarch64_common *aarch64 = target_to_aarch64(target);
1772
1773 #if 0
1774 /* It is perfectly possible to remove breakpoints while the target is running */
1775 if (target->state != TARGET_HALTED) {
1776 LOG_WARNING("target not halted");
1777 return ERROR_TARGET_NOT_HALTED;
1778 }
1779 #endif
1780
1781 if (breakpoint->set) {
1782 aarch64_unset_breakpoint(target, breakpoint);
1783 if (breakpoint->type == BKPT_HARD)
1784 aarch64->brp_num_available++;
1785 }
1786
1787 return ERROR_OK;
1788 }
1789
1790 /*
1791 * Cortex-A8 Reset functions
1792 */
1793
1794 static int aarch64_assert_reset(struct target *target)
1795 {
1796 struct armv8_common *armv8 = target_to_armv8(target);
1797
1798 LOG_DEBUG(" ");
1799
1800 /* FIXME when halt is requested, make it work somehow... */
1801
1802 /* Issue some kind of warm reset. */
1803 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1804 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1805 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1806 /* REVISIT handle "pulls" cases, if there's
1807 * hardware that needs them to work.
1808 */
1809 jtag_add_reset(0, 1);
1810 } else {
1811 LOG_ERROR("%s: how to reset?", target_name(target));
1812 return ERROR_FAIL;
1813 }
1814
1815 /* registers are now invalid */
1816 register_cache_invalidate(armv8->arm.core_cache);
1817
1818 target->state = TARGET_RESET;
1819
1820 return ERROR_OK;
1821 }
1822
1823 static int aarch64_deassert_reset(struct target *target)
1824 {
1825 int retval;
1826
1827 LOG_DEBUG(" ");
1828
1829 /* be certain SRST is off */
1830 jtag_add_reset(0, 0);
1831
1832 retval = aarch64_poll(target);
1833 if (retval != ERROR_OK)
1834 return retval;
1835
1836 if (target->reset_halt) {
1837 if (target->state != TARGET_HALTED) {
1838 LOG_WARNING("%s: ran after reset and before halt ...",
1839 target_name(target));
1840 retval = target_halt(target);
1841 if (retval != ERROR_OK)
1842 return retval;
1843 }
1844 }
1845
1846 return ERROR_OK;
1847 }
1848
1849 static int aarch64_write_apb_ap_memory(struct target *target,
1850 uint64_t address, uint32_t size,
1851 uint32_t count, const uint8_t *buffer)
1852 {
1853 /* write memory through APB-AP */
1854 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1855 struct armv8_common *armv8 = target_to_armv8(target);
1856 struct arm *arm = &armv8->arm;
1857 int total_bytes = count * size;
1858 int total_u32;
1859 int start_byte = address & 0x3;
1860 int end_byte = (address + total_bytes) & 0x3;
1861 struct reg *reg;
1862 uint32_t dscr;
1863 uint8_t *tmp_buff = NULL;
1864
1865 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %" PRIu32 " count%" PRIu32,
1866 address, size, count);
1867 if (target->state != TARGET_HALTED) {
1868 LOG_WARNING("target not halted");
1869 return ERROR_TARGET_NOT_HALTED;
1870 }
1871
1872 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1873
1874 /* Mark register R0 as dirty, as it will be used
1875 * for transferring the data.
1876 * It will be restored automatically when exiting
1877 * debug mode
1878 */
1879 reg = armv8_reg_current(arm, 1);
1880 reg->dirty = true;
1881
1882 reg = armv8_reg_current(arm, 0);
1883 reg->dirty = true;
1884
1885 /* clear any abort */
1886 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1887 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1888 if (retval != ERROR_OK)
1889 return retval;
1890
1891
1892 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1893
1894 /* The algorithm only copies 32 bit words, so the buffer
1895 * should be expanded to include the words at either end.
1896 * The first and last words will be read first to avoid
1897 * corruption if needed.
1898 */
1899 tmp_buff = malloc(total_u32 * 4);
1900
1901 if ((start_byte != 0) && (total_u32 > 1)) {
1902 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1903 * the other bytes in the word.
1904 */
1905 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1906 if (retval != ERROR_OK)
1907 goto error_free_buff_w;
1908 }
1909
1910 /* If end of write is not aligned, or the write is less than 4 bytes */
1911 if ((end_byte != 0) ||
1912 ((total_u32 == 1) && (total_bytes != 4))) {
1913
1914 /* Read the last word to avoid corruption during 32 bit write */
1915 int mem_offset = (total_u32-1) * 4;
1916 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1917 if (retval != ERROR_OK)
1918 goto error_free_buff_w;
1919 }
1920
1921 /* Copy the write buffer over the top of the temporary buffer */
1922 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1923
1924 /* We now have a 32 bit aligned buffer that can be written */
1925
1926 /* Read DSCR */
1927 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1928 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1929 if (retval != ERROR_OK)
1930 goto error_free_buff_w;
1931
1932 /* Set Normal access mode */
1933 dscr = (dscr & ~DSCR_MA);
1934 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1935 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1936
1937 if (arm->core_state == ARM_STATE_AARCH64) {
1938 /* Write X0 with value 'address' using write procedure */
1939 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1940 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
1941 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1942 retval += aarch64_exec_opcode(target,
1943 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1944 } else {
1945 /* Write R0 with value 'address' using write procedure */
1946 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1947 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
1948 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1949 retval += aarch64_exec_opcode(target,
1950 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
1951
1952 }
1953 /* Step 1.d - Change DCC to memory mode */
1954 dscr = dscr | DSCR_MA;
1955 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1956 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1957 if (retval != ERROR_OK)
1958 goto error_unset_dtr_w;
1959
1960
1961 /* Step 2.a - Do the write */
1962 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1963 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1964 if (retval != ERROR_OK)
1965 goto error_unset_dtr_w;
1966
1967 /* Step 3.a - Switch DTR mode back to Normal mode */
1968 dscr = (dscr & ~DSCR_MA);
1969 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1970 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1971 if (retval != ERROR_OK)
1972 goto error_unset_dtr_w;
1973
1974 /* Check for sticky abort flags in the DSCR */
1975 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1976 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1977 if (retval != ERROR_OK)
1978 goto error_free_buff_w;
1979 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1980 /* Abort occurred - clear it and exit */
1981 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1982 mem_ap_write_atomic_u32(armv8->debug_ap,
1983 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1984 goto error_free_buff_w;
1985 }
1986
1987 /* Done */
1988 free(tmp_buff);
1989 return ERROR_OK;
1990
1991 error_unset_dtr_w:
1992 /* Unset DTR mode */
1993 mem_ap_read_atomic_u32(armv8->debug_ap,
1994 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1995 dscr = (dscr & ~DSCR_MA);
1996 mem_ap_write_atomic_u32(armv8->debug_ap,
1997 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1998 error_free_buff_w:
1999 LOG_ERROR("error");
2000 free(tmp_buff);
2001 return ERROR_FAIL;
2002 }
2003
2004 static int aarch64_read_apb_ap_memory(struct target *target,
2005 target_addr_t address, uint32_t size,
2006 uint32_t count, uint8_t *buffer)
2007 {
2008 /* read memory through APB-AP */
2009 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2010 struct armv8_common *armv8 = target_to_armv8(target);
2011 struct arm *arm = &armv8->arm;
2012 int total_bytes = count * size;
2013 int total_u32;
2014 int start_byte = address & 0x3;
2015 int end_byte = (address + total_bytes) & 0x3;
2016 struct reg *reg;
2017 uint32_t dscr;
2018 uint8_t *tmp_buff = NULL;
2019 uint8_t *u8buf_ptr;
2020 uint32_t value;
2021
2022 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count%" PRIu32,
2023 address, size, count);
2024 if (target->state != TARGET_HALTED) {
2025 LOG_WARNING("target not halted");
2026 return ERROR_TARGET_NOT_HALTED;
2027 }
2028
2029 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
2030 /* Mark register X0, X1 as dirty, as it will be used
2031 * for transferring the data.
2032 * It will be restored automatically when exiting
2033 * debug mode
2034 */
2035 reg = armv8_reg_current(arm, 1);
2036 reg->dirty = true;
2037
2038 reg = armv8_reg_current(arm, 0);
2039 reg->dirty = true;
2040
2041 /* clear any abort */
2042 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2043 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
2044 if (retval != ERROR_OK)
2045 goto error_free_buff_r;
2046
2047 /* Read DSCR */
2048 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2049 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2050
2051 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2052
2053 /* Set Normal access mode */
2054 dscr = (dscr & ~DSCR_MA);
2055 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
2056 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2057
2058 if (arm->core_state == ARM_STATE_AARCH64) {
2059 /* Write X0 with value 'address' using write procedure */
2060 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2061 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
2062 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2063 retval += aarch64_exec_opcode(target, ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
2064 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2065 retval += aarch64_exec_opcode(target, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
2066 /* Step 1.e - Change DCC to memory mode */
2067 dscr = dscr | DSCR_MA;
2068 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
2069 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2070 /* Step 1.f - read DBGDTRTX and discard the value */
2071 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2072 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2073 } else {
2074 /* Write R0 with value 'address' using write procedure */
2075 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2076 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
2077 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2078 retval += aarch64_exec_opcode(target,
2079 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
2080 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2081 retval += aarch64_exec_opcode(target,
2082 T32_FMTITR(ARMV4_5_MCR(14, 0, 0, 0, 5, 0)), &dscr);
2083 /* Step 1.e - Change DCC to memory mode */
2084 dscr = dscr | DSCR_MA;
2085 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
2086 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2087 /* Step 1.f - read DBGDTRTX and discard the value */
2088 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2089 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2090
2091 }
2092 if (retval != ERROR_OK)
2093 goto error_unset_dtr_r;
2094
2095 /* Optimize the read as much as we can, either way we read in a single pass */
2096 if ((start_byte) || (end_byte)) {
2097 /* The algorithm only copies 32 bit words, so the buffer
2098 * should be expanded to include the words at either end.
2099 * The first and last words will be read into a temp buffer
2100 * to avoid corruption
2101 */
2102 tmp_buff = malloc(total_u32 * 4);
2103 if (!tmp_buff)
2104 goto error_unset_dtr_r;
2105
2106 /* use the tmp buffer to read the entire data */
2107 u8buf_ptr = tmp_buff;
2108 } else
2109 /* address and read length are aligned so read directly into the passed buffer */
2110 u8buf_ptr = buffer;
2111
2112 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2113 * Abort flags are sticky, so can be read at end of transactions
2114 *
2115 * This data is read in aligned to 32 bit boundary.
2116 */
2117
2118 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2119 * increments X0 by 4. */
2120 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
2121 armv8->debug_base + CPUV8_DBG_DTRTX);
2122 if (retval != ERROR_OK)
2123 goto error_unset_dtr_r;
2124
2125 /* Step 3.a - set DTR access mode back to Normal mode */
2126 dscr = (dscr & ~DSCR_MA);
2127 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2128 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2129 if (retval != ERROR_OK)
2130 goto error_free_buff_r;
2131
2132 /* Step 3.b - read DBGDTRTX for the final value */
2133 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2134 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2135 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
2136
2137 /* Check for sticky abort flags in the DSCR */
2138 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2139 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2140 if (retval != ERROR_OK)
2141 goto error_free_buff_r;
2142 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2143 /* Abort occurred - clear it and exit */
2144 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2145 mem_ap_write_atomic_u32(armv8->debug_ap,
2146 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
2147 goto error_free_buff_r;
2148 }
2149
2150 /* check if we need to copy aligned data by applying any shift necessary */
2151 if (tmp_buff) {
2152 memcpy(buffer, tmp_buff + start_byte, total_bytes);
2153 free(tmp_buff);
2154 }
2155
2156 /* Done */
2157 return ERROR_OK;
2158
2159 error_unset_dtr_r:
2160 /* Unset DTR mode */
2161 mem_ap_read_atomic_u32(armv8->debug_ap,
2162 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2163 dscr = (dscr & ~DSCR_MA);
2164 mem_ap_write_atomic_u32(armv8->debug_ap,
2165 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2166 error_free_buff_r:
2167 LOG_ERROR("error");
2168 free(tmp_buff);
2169 return ERROR_FAIL;
2170 }
2171
2172 static int aarch64_read_phys_memory(struct target *target,
2173 target_addr_t address, uint32_t size,
2174 uint32_t count, uint8_t *buffer)
2175 {
2176 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2177 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
2178 address, size, count);
2179
2180 if (count && buffer) {
2181 /* read memory through APB-AP */
2182 retval = aarch64_mmu_modify(target, 0);
2183 if (retval != ERROR_OK)
2184 return retval;
2185 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
2186 }
2187 return retval;
2188 }
2189
2190 static int aarch64_read_memory(struct target *target, target_addr_t address,
2191 uint32_t size, uint32_t count, uint8_t *buffer)
2192 {
2193 int mmu_enabled = 0;
2194 int retval;
2195
2196 /* aarch64 handles unaligned memory access */
2197 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2198 size, count);
2199
2200 /* determine if MMU was enabled on target stop */
2201 retval = aarch64_mmu(target, &mmu_enabled);
2202 if (retval != ERROR_OK)
2203 return retval;
2204
2205 if (mmu_enabled) {
2206 retval = aarch64_check_address(target, address);
2207 if (retval != ERROR_OK)
2208 return retval;
2209 /* enable MMU as we could have disabled it for phys access */
2210 retval = aarch64_mmu_modify(target, 1);
2211 if (retval != ERROR_OK)
2212 return retval;
2213 }
2214 return aarch64_read_apb_ap_memory(target, address, size, count, buffer);
2215 }
2216
2217 static int aarch64_write_phys_memory(struct target *target,
2218 target_addr_t address, uint32_t size,
2219 uint32_t count, const uint8_t *buffer)
2220 {
2221 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2222
2223 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2224 size, count);
2225
2226 if (count && buffer) {
2227 /* write memory through APB-AP */
2228 retval = aarch64_mmu_modify(target, 0);
2229 if (retval != ERROR_OK)
2230 return retval;
2231 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2232 }
2233
2234 return retval;
2235 }
2236
2237 static int aarch64_write_memory(struct target *target, target_addr_t address,
2238 uint32_t size, uint32_t count, const uint8_t *buffer)
2239 {
2240 int mmu_enabled = 0;
2241 int retval;
2242
2243 /* aarch64 handles unaligned memory access */
2244 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
2245 "; count %" PRId32, address, size, count);
2246
2247 /* determine if MMU was enabled on target stop */
2248 retval = aarch64_mmu(target, &mmu_enabled);
2249 if (retval != ERROR_OK)
2250 return retval;
2251
2252 if (mmu_enabled) {
2253 retval = aarch64_check_address(target, address);
2254 if (retval != ERROR_OK)
2255 return retval;
2256 /* enable MMU as we could have disabled it for phys access */
2257 retval = aarch64_mmu_modify(target, 1);
2258 if (retval != ERROR_OK)
2259 return retval;
2260 }
2261 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2262 }
2263
2264 static int aarch64_handle_target_request(void *priv)
2265 {
2266 struct target *target = priv;
2267 struct armv8_common *armv8 = target_to_armv8(target);
2268 int retval;
2269
2270 if (!target_was_examined(target))
2271 return ERROR_OK;
2272 if (!target->dbg_msg_enabled)
2273 return ERROR_OK;
2274
2275 if (target->state == TARGET_RUNNING) {
2276 uint32_t request;
2277 uint32_t dscr;
2278 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2279 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2280
2281 /* check if we have data */
2282 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2283 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2284 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2285 if (retval == ERROR_OK) {
2286 target_request(target, request);
2287 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2288 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2289 }
2290 }
2291 }
2292
2293 return ERROR_OK;
2294 }
2295
2296 static int aarch64_examine_first(struct target *target)
2297 {
2298 struct aarch64_common *aarch64 = target_to_aarch64(target);
2299 struct armv8_common *armv8 = &aarch64->armv8_common;
2300 struct adiv5_dap *swjdp = armv8->arm.dap;
2301 int i;
2302 int retval = ERROR_OK;
2303 uint64_t debug, ttypr;
2304 uint32_t cpuid;
2305 uint32_t tmp0, tmp1;
2306 debug = ttypr = cpuid = 0;
2307
2308 /* We do one extra read to ensure DAP is configured,
2309 * we call ahbap_debugport_init(swjdp) instead
2310 */
2311 retval = dap_dp_init(swjdp);
2312 if (retval != ERROR_OK)
2313 return retval;
2314
2315 /* Search for the APB-AB - it is needed for access to debug registers */
2316 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2317 if (retval != ERROR_OK) {
2318 LOG_ERROR("Could not find APB-AP for debug access");
2319 return retval;
2320 }
2321
2322 retval = mem_ap_init(armv8->debug_ap);
2323 if (retval != ERROR_OK) {
2324 LOG_ERROR("Could not initialize the APB-AP");
2325 return retval;
2326 }
2327
2328 armv8->debug_ap->memaccess_tck = 80;
2329
2330 if (!target->dbgbase_set) {
2331 uint32_t dbgbase;
2332 /* Get ROM Table base */
2333 uint32_t apid;
2334 int32_t coreidx = target->coreid;
2335 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2336 if (retval != ERROR_OK)
2337 return retval;
2338 /* Lookup 0x15 -- Processor DAP */
2339 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2340 &armv8->debug_base, &coreidx);
2341 if (retval != ERROR_OK)
2342 return retval;
2343 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
2344 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2345 } else
2346 armv8->debug_base = target->dbgbase;
2347
2348 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2349 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
2350 if (retval != ERROR_OK) {
2351 LOG_DEBUG("LOCK debug access fail");
2352 return retval;
2353 }
2354
2355 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2356 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2357 if (retval != ERROR_OK) {
2358 LOG_DEBUG("Examine %s failed", "oslock");
2359 return retval;
2360 }
2361
2362 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2363 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2364 if (retval != ERROR_OK) {
2365 LOG_DEBUG("Examine %s failed", "CPUID");
2366 return retval;
2367 }
2368
2369 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2370 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2371 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2372 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2373 if (retval != ERROR_OK) {
2374 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2375 return retval;
2376 }
2377 ttypr |= tmp1;
2378 ttypr = (ttypr << 32) | tmp0;
2379
2380 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2381 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp0);
2382 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2383 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp1);
2384 if (retval != ERROR_OK) {
2385 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2386 return retval;
2387 }
2388 debug |= tmp1;
2389 debug = (debug << 32) | tmp0;
2390
2391 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2392 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2393 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2394
2395 if (target->ctibase == 0) {
2396 /* assume a v8 rom table layout */
2397 armv8->cti_base = target->ctibase = armv8->debug_base + 0x10000;
2398 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32, target->ctibase);
2399 } else
2400 armv8->cti_base = target->ctibase;
2401
2402 armv8->arm.core_type = ARM_MODE_MON;
2403 retval = aarch64_dpm_setup(aarch64, debug);
2404 if (retval != ERROR_OK)
2405 return retval;
2406
2407 /* Setup Breakpoint Register Pairs */
2408 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2409 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2410 aarch64->brp_num_available = aarch64->brp_num;
2411 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2412 for (i = 0; i < aarch64->brp_num; i++) {
2413 aarch64->brp_list[i].used = 0;
2414 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2415 aarch64->brp_list[i].type = BRP_NORMAL;
2416 else
2417 aarch64->brp_list[i].type = BRP_CONTEXT;
2418 aarch64->brp_list[i].value = 0;
2419 aarch64->brp_list[i].control = 0;
2420 aarch64->brp_list[i].BRPn = i;
2421 }
2422
2423 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2424
2425 target_set_examined(target);
2426 return ERROR_OK;
2427 }
2428
2429 static int aarch64_examine(struct target *target)
2430 {
2431 int retval = ERROR_OK;
2432
2433 /* don't re-probe hardware after each reset */
2434 if (!target_was_examined(target))
2435 retval = aarch64_examine_first(target);
2436
2437 /* Configure core debug access */
2438 if (retval == ERROR_OK)
2439 retval = aarch64_init_debug_access(target);
2440
2441 return retval;
2442 }
2443
2444 /*
2445 * Cortex-A8 target creation and initialization
2446 */
2447
2448 static int aarch64_init_target(struct command_context *cmd_ctx,
2449 struct target *target)
2450 {
2451 /* examine_first() does a bunch of this */
2452 return ERROR_OK;
2453 }
2454
2455 static int aarch64_init_arch_info(struct target *target,
2456 struct aarch64_common *aarch64, struct jtag_tap *tap)
2457 {
2458 struct armv8_common *armv8 = &aarch64->armv8_common;
2459 struct adiv5_dap *dap = armv8->arm.dap;
2460
2461 armv8->arm.dap = dap;
2462
2463 /* Setup struct aarch64_common */
2464 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2465 /* tap has no dap initialized */
2466 if (!tap->dap) {
2467 tap->dap = dap_init();
2468
2469 /* Leave (only) generic DAP stuff for debugport_init() */
2470 tap->dap->tap = tap;
2471 }
2472
2473 armv8->arm.dap = tap->dap;
2474
2475 aarch64->fast_reg_read = 0;
2476
2477 /* register arch-specific functions */
2478 armv8->examine_debug_reason = NULL;
2479
2480 armv8->post_debug_entry = aarch64_post_debug_entry;
2481
2482 armv8->pre_restore_context = NULL;
2483
2484 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2485
2486 /* REVISIT v7a setup should be in a v7a-specific routine */
2487 armv8_init_arch_info(target, armv8);
2488 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2489
2490 return ERROR_OK;
2491 }
2492
2493 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2494 {
2495 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2496
2497 return aarch64_init_arch_info(target, aarch64, target->tap);
2498 }
2499
2500 static int aarch64_mmu(struct target *target, int *enabled)
2501 {
2502 if (target->state != TARGET_HALTED) {
2503 LOG_ERROR("%s: target not halted", __func__);
2504 return ERROR_TARGET_INVALID;
2505 }
2506
2507 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2508 return ERROR_OK;
2509 }
2510
2511 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2512 target_addr_t *phys)
2513 {
2514 return armv8_mmu_translate_va(target, virt, phys);
2515 }
2516
2517 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2518 {
2519 struct target *target = get_current_target(CMD_CTX);
2520 struct armv8_common *armv8 = target_to_armv8(target);
2521
2522 return armv8_handle_cache_info_command(CMD_CTX,
2523 &armv8->armv8_mmu.armv8_cache);
2524 }
2525
2526
2527 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2528 {
2529 struct target *target = get_current_target(CMD_CTX);
2530 if (!target_was_examined(target)) {
2531 LOG_ERROR("target not examined yet");
2532 return ERROR_FAIL;
2533 }
2534
2535 return aarch64_init_debug_access(target);
2536 }
2537 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2538 {
2539 struct target *target = get_current_target(CMD_CTX);
2540 /* check target is an smp target */
2541 struct target_list *head;
2542 struct target *curr;
2543 head = target->head;
2544 target->smp = 0;
2545 if (head != (struct target_list *)NULL) {
2546 while (head != (struct target_list *)NULL) {
2547 curr = head->target;
2548 curr->smp = 0;
2549 head = head->next;
2550 }
2551 /* fixes the target display to the debugger */
2552 target->gdb_service->target = target;
2553 }
2554 return ERROR_OK;
2555 }
2556
2557 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2558 {
2559 struct target *target = get_current_target(CMD_CTX);
2560 struct target_list *head;
2561 struct target *curr;
2562 head = target->head;
2563 if (head != (struct target_list *)NULL) {
2564 target->smp = 1;
2565 while (head != (struct target_list *)NULL) {
2566 curr = head->target;
2567 curr->smp = 1;
2568 head = head->next;
2569 }
2570 }
2571 return ERROR_OK;
2572 }
2573
2574 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2575 {
2576 struct target *target = get_current_target(CMD_CTX);
2577 int retval = ERROR_OK;
2578 struct target_list *head;
2579 head = target->head;
2580 if (head != (struct target_list *)NULL) {
2581 if (CMD_ARGC == 1) {
2582 int coreid = 0;
2583 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2584 if (ERROR_OK != retval)
2585 return retval;
2586 target->gdb_service->core[1] = coreid;
2587
2588 }
2589 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2590 , target->gdb_service->core[1]);
2591 }
2592 return ERROR_OK;
2593 }
2594
2595 static const struct command_registration aarch64_exec_command_handlers[] = {
2596 {
2597 .name = "cache_info",
2598 .handler = aarch64_handle_cache_info_command,
2599 .mode = COMMAND_EXEC,
2600 .help = "display information about target caches",
2601 .usage = "",
2602 },
2603 {
2604 .name = "dbginit",
2605 .handler = aarch64_handle_dbginit_command,
2606 .mode = COMMAND_EXEC,
2607 .help = "Initialize core debug",
2608 .usage = "",
2609 },
2610 { .name = "smp_off",
2611 .handler = aarch64_handle_smp_off_command,
2612 .mode = COMMAND_EXEC,
2613 .help = "Stop smp handling",
2614 .usage = "",
2615 },
2616 {
2617 .name = "smp_on",
2618 .handler = aarch64_handle_smp_on_command,
2619 .mode = COMMAND_EXEC,
2620 .help = "Restart smp handling",
2621 .usage = "",
2622 },
2623 {
2624 .name = "smp_gdb",
2625 .handler = aarch64_handle_smp_gdb_command,
2626 .mode = COMMAND_EXEC,
2627 .help = "display/fix current core played to gdb",
2628 .usage = "",
2629 },
2630
2631
2632 COMMAND_REGISTRATION_DONE
2633 };
2634 static const struct command_registration aarch64_command_handlers[] = {
2635 {
2636 .chain = arm_command_handlers,
2637 },
2638 {
2639 .chain = armv8_command_handlers,
2640 },
2641 {
2642 .name = "cortex_a",
2643 .mode = COMMAND_ANY,
2644 .help = "Cortex-A command group",
2645 .usage = "",
2646 .chain = aarch64_exec_command_handlers,
2647 },
2648 COMMAND_REGISTRATION_DONE
2649 };
2650
2651 struct target_type aarch64_target = {
2652 .name = "aarch64",
2653
2654 .poll = aarch64_poll,
2655 .arch_state = armv8_arch_state,
2656
2657 .halt = aarch64_halt,
2658 .resume = aarch64_resume,
2659 .step = aarch64_step,
2660
2661 .assert_reset = aarch64_assert_reset,
2662 .deassert_reset = aarch64_deassert_reset,
2663
2664 /* REVISIT allow exporting VFP3 registers ... */
2665 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2666
2667 .read_memory = aarch64_read_memory,
2668 .write_memory = aarch64_write_memory,
2669
2670 .checksum_memory = arm_checksum_memory,
2671 .blank_check_memory = arm_blank_check_memory,
2672
2673 .run_algorithm = armv4_5_run_algorithm,
2674
2675 .add_breakpoint = aarch64_add_breakpoint,
2676 .add_context_breakpoint = aarch64_add_context_breakpoint,
2677 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2678 .remove_breakpoint = aarch64_remove_breakpoint,
2679 .add_watchpoint = NULL,
2680 .remove_watchpoint = NULL,
2681
2682 .commands = aarch64_command_handlers,
2683 .target_create = aarch64_target_create,
2684 .init_target = aarch64_init_target,
2685 .examine = aarch64_examine,
2686
2687 .read_phys_memory = aarch64_read_phys_memory,
2688 .write_phys_memory = aarch64_write_phys_memory,
2689 .mmu = aarch64_mmu,
2690 .virt2phys = aarch64_virt2phys,
2691 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)