587cbba0e91b36a3d1b7bd4afaaa8e5fb734ac8c
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex-R4 support *
22 * *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 * *
39 * Cortex-A8(tm) TRM, ARM DDI 0344H *
40 * Cortex-A9(tm) TRM, ARM DDI 0407F *
41 * Cortex-A4(tm) TRM, ARM DDI 0363E *
42 * Cortex-A15(tm)TRM, ARM DDI 0438C *
43 * *
44 ***************************************************************************/
45
46 #ifdef HAVE_CONFIG_H
47 #include "config.h"
48 #endif
49
50 #include "breakpoints.h"
51 #include "cortex_a.h"
52 #include "register.h"
53 #include "armv7a_mmu.h"
54 #include "target_request.h"
55 #include "target_type.h"
56 #include "arm_opcodes.h"
57 #include "arm_semihosting.h"
58 #include "transport/transport.h"
59 #include "smp.h"
60 #include <helper/time_support.h>
61
62 static int cortex_a_poll(struct target *target);
63 static int cortex_a_debug_entry(struct target *target);
64 static int cortex_a_restore_context(struct target *target, bool bpwp);
65 static int cortex_a_set_breakpoint(struct target *target,
66 struct breakpoint *breakpoint, uint8_t matchmode);
67 static int cortex_a_set_context_breakpoint(struct target *target,
68 struct breakpoint *breakpoint, uint8_t matchmode);
69 static int cortex_a_set_hybrid_breakpoint(struct target *target,
70 struct breakpoint *breakpoint);
71 static int cortex_a_unset_breakpoint(struct target *target,
72 struct breakpoint *breakpoint);
73 static int cortex_a_mmu(struct target *target, int *enabled);
74 static int cortex_a_mmu_modify(struct target *target, int enable);
75 static int cortex_a_virt2phys(struct target *target,
76 target_addr_t virt, target_addr_t *phys);
77 static int cortex_a_read_cpu_memory(struct target *target,
78 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
79
80
81 /* restore cp15_control_reg at resume */
82 static int cortex_a_restore_cp15_control_reg(struct target *target)
83 {
84 int retval = ERROR_OK;
85 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
86 struct armv7a_common *armv7a = target_to_armv7a(target);
87
88 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
89 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
90 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
91 retval = armv7a->arm.mcr(target, 15,
92 0, 0, /* op1, op2 */
93 1, 0, /* CRn, CRm */
94 cortex_a->cp15_control_reg);
95 }
96 return retval;
97 }
98
99 /*
100 * Set up ARM core for memory access.
101 * If !phys_access, switch to SVC mode and make sure MMU is on
102 * If phys_access, switch off mmu
103 */
104 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
105 {
106 struct armv7a_common *armv7a = target_to_armv7a(target);
107 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
108 int mmu_enabled = 0;
109
110 if (phys_access == 0) {
111 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
112 cortex_a_mmu(target, &mmu_enabled);
113 if (mmu_enabled)
114 cortex_a_mmu_modify(target, 1);
115 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
116 /* overwrite DACR to all-manager */
117 armv7a->arm.mcr(target, 15,
118 0, 0, 3, 0,
119 0xFFFFFFFF);
120 }
121 } else {
122 cortex_a_mmu(target, &mmu_enabled);
123 if (mmu_enabled)
124 cortex_a_mmu_modify(target, 0);
125 }
126 return ERROR_OK;
127 }
128
129 /*
130 * Restore ARM core after memory access.
131 * If !phys_access, switch to previous mode
132 * If phys_access, restore MMU setting
133 */
134 static int cortex_a_post_memaccess(struct target *target, int phys_access)
135 {
136 struct armv7a_common *armv7a = target_to_armv7a(target);
137 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
138
139 if (phys_access == 0) {
140 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
141 /* restore */
142 armv7a->arm.mcr(target, 15,
143 0, 0, 3, 0,
144 cortex_a->cp15_dacr_reg);
145 }
146 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
147 } else {
148 int mmu_enabled = 0;
149 cortex_a_mmu(target, &mmu_enabled);
150 if (mmu_enabled)
151 cortex_a_mmu_modify(target, 1);
152 }
153 return ERROR_OK;
154 }
155
156
157 /* modify cp15_control_reg in order to enable or disable mmu for :
158 * - virt2phys address conversion
159 * - read or write memory in phys or virt address */
160 static int cortex_a_mmu_modify(struct target *target, int enable)
161 {
162 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
163 struct armv7a_common *armv7a = target_to_armv7a(target);
164 int retval = ERROR_OK;
165 int need_write = 0;
166
167 if (enable) {
168 /* if mmu enabled at target stop and mmu not enable */
169 if (!(cortex_a->cp15_control_reg & 0x1U)) {
170 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
171 return ERROR_FAIL;
172 }
173 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
174 cortex_a->cp15_control_reg_curr |= 0x1U;
175 need_write = 1;
176 }
177 } else {
178 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
179 cortex_a->cp15_control_reg_curr &= ~0x1U;
180 need_write = 1;
181 }
182 }
183
184 if (need_write) {
185 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
186 enable ? "enable mmu" : "disable mmu",
187 cortex_a->cp15_control_reg_curr);
188
189 retval = armv7a->arm.mcr(target, 15,
190 0, 0, /* op1, op2 */
191 1, 0, /* CRn, CRm */
192 cortex_a->cp15_control_reg_curr);
193 }
194 return retval;
195 }
196
197 /*
198 * Cortex-A Basic debug access, very low level assumes state is saved
199 */
200 static int cortex_a_init_debug_access(struct target *target)
201 {
202 struct armv7a_common *armv7a = target_to_armv7a(target);
203 uint32_t dscr;
204 int retval;
205
206 /* lock memory-mapped access to debug registers to prevent
207 * software interference */
208 retval = mem_ap_write_u32(armv7a->debug_ap,
209 armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
210 if (retval != ERROR_OK)
211 return retval;
212
213 /* Disable cacheline fills and force cache write-through in debug state */
214 retval = mem_ap_write_u32(armv7a->debug_ap,
215 armv7a->debug_base + CPUDBG_DSCCR, 0);
216 if (retval != ERROR_OK)
217 return retval;
218
219 /* Disable TLB lookup and refill/eviction in debug state */
220 retval = mem_ap_write_u32(armv7a->debug_ap,
221 armv7a->debug_base + CPUDBG_DSMCR, 0);
222 if (retval != ERROR_OK)
223 return retval;
224
225 retval = dap_run(armv7a->debug_ap->dap);
226 if (retval != ERROR_OK)
227 return retval;
228
229 /* Enabling of instruction execution in debug mode is done in debug_entry code */
230
231 /* Resync breakpoint registers */
232
233 /* Enable halt for breakpoint, watchpoint and vector catch */
234 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
235 armv7a->debug_base + CPUDBG_DSCR, &dscr);
236 if (retval != ERROR_OK)
237 return retval;
238 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
239 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
240 if (retval != ERROR_OK)
241 return retval;
242
243 /* Since this is likely called from init or reset, update target state information*/
244 return cortex_a_poll(target);
245 }
246
247 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
248 {
249 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
250 * Writes final value of DSCR into *dscr. Pass force to force always
251 * reading DSCR at least once. */
252 struct armv7a_common *armv7a = target_to_armv7a(target);
253 int64_t then = timeval_ms();
254 while ((*dscr & DSCR_INSTR_COMP) == 0 || force) {
255 force = false;
256 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
257 armv7a->debug_base + CPUDBG_DSCR, dscr);
258 if (retval != ERROR_OK) {
259 LOG_ERROR("Could not read DSCR register");
260 return retval;
261 }
262 if (timeval_ms() > then + 1000) {
263 LOG_ERROR("Timeout waiting for InstrCompl=1");
264 return ERROR_FAIL;
265 }
266 }
267 return ERROR_OK;
268 }
269
270 /* To reduce needless round-trips, pass in a pointer to the current
271 * DSCR value. Initialize it to zero if you just need to know the
272 * value on return from this function; or DSCR_INSTR_COMP if you
273 * happen to know that no instruction is pending.
274 */
275 static int cortex_a_exec_opcode(struct target *target,
276 uint32_t opcode, uint32_t *dscr_p)
277 {
278 uint32_t dscr;
279 int retval;
280 struct armv7a_common *armv7a = target_to_armv7a(target);
281
282 dscr = dscr_p ? *dscr_p : 0;
283
284 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
285
286 /* Wait for InstrCompl bit to be set */
287 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
288 if (retval != ERROR_OK)
289 return retval;
290
291 retval = mem_ap_write_u32(armv7a->debug_ap,
292 armv7a->debug_base + CPUDBG_ITR, opcode);
293 if (retval != ERROR_OK)
294 return retval;
295
296 int64_t then = timeval_ms();
297 do {
298 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
299 armv7a->debug_base + CPUDBG_DSCR, &dscr);
300 if (retval != ERROR_OK) {
301 LOG_ERROR("Could not read DSCR register");
302 return retval;
303 }
304 if (timeval_ms() > then + 1000) {
305 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
306 return ERROR_FAIL;
307 }
308 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
309
310 if (dscr_p)
311 *dscr_p = dscr;
312
313 return retval;
314 }
315
316 /* Write to memory mapped registers directly with no cache or mmu handling */
317 static int cortex_a_dap_write_memap_register_u32(struct target *target,
318 uint32_t address,
319 uint32_t value)
320 {
321 int retval;
322 struct armv7a_common *armv7a = target_to_armv7a(target);
323
324 retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
325
326 return retval;
327 }
328
329 /*
330 * Cortex-A implementation of Debug Programmer's Model
331 *
332 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
333 * so there's no need to poll for it before executing an instruction.
334 *
335 * NOTE that in several of these cases the "stall" mode might be useful.
336 * It'd let us queue a few operations together... prepare/finish might
337 * be the places to enable/disable that mode.
338 */
339
340 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
341 {
342 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
343 }
344
345 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
346 {
347 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
348 return mem_ap_write_u32(a->armv7a_common.debug_ap,
349 a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
350 }
351
352 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
353 uint32_t *dscr_p)
354 {
355 uint32_t dscr = DSCR_INSTR_COMP;
356 int retval;
357
358 if (dscr_p)
359 dscr = *dscr_p;
360
361 /* Wait for DTRRXfull */
362 int64_t then = timeval_ms();
363 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
364 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
365 a->armv7a_common.debug_base + CPUDBG_DSCR,
366 &dscr);
367 if (retval != ERROR_OK)
368 return retval;
369 if (timeval_ms() > then + 1000) {
370 LOG_ERROR("Timeout waiting for read dcc");
371 return ERROR_FAIL;
372 }
373 }
374
375 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
376 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
377 if (retval != ERROR_OK)
378 return retval;
379 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
380
381 if (dscr_p)
382 *dscr_p = dscr;
383
384 return retval;
385 }
386
387 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
388 {
389 struct cortex_a_common *a = dpm_to_a(dpm);
390 uint32_t dscr;
391 int retval;
392
393 /* set up invariant: INSTR_COMP is set after ever DPM operation */
394 int64_t then = timeval_ms();
395 for (;; ) {
396 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
397 a->armv7a_common.debug_base + CPUDBG_DSCR,
398 &dscr);
399 if (retval != ERROR_OK)
400 return retval;
401 if ((dscr & DSCR_INSTR_COMP) != 0)
402 break;
403 if (timeval_ms() > then + 1000) {
404 LOG_ERROR("Timeout waiting for dpm prepare");
405 return ERROR_FAIL;
406 }
407 }
408
409 /* this "should never happen" ... */
410 if (dscr & DSCR_DTR_RX_FULL) {
411 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
412 /* Clear DCCRX */
413 retval = cortex_a_exec_opcode(
414 a->armv7a_common.arm.target,
415 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
416 &dscr);
417 if (retval != ERROR_OK)
418 return retval;
419 }
420
421 return retval;
422 }
423
424 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
425 {
426 /* REVISIT what could be done here? */
427 return ERROR_OK;
428 }
429
430 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
431 uint32_t opcode, uint32_t data)
432 {
433 struct cortex_a_common *a = dpm_to_a(dpm);
434 int retval;
435 uint32_t dscr = DSCR_INSTR_COMP;
436
437 retval = cortex_a_write_dcc(a, data);
438 if (retval != ERROR_OK)
439 return retval;
440
441 return cortex_a_exec_opcode(
442 a->armv7a_common.arm.target,
443 opcode,
444 &dscr);
445 }
446
447 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
448 uint32_t opcode, uint32_t data)
449 {
450 struct cortex_a_common *a = dpm_to_a(dpm);
451 uint32_t dscr = DSCR_INSTR_COMP;
452 int retval;
453
454 retval = cortex_a_write_dcc(a, data);
455 if (retval != ERROR_OK)
456 return retval;
457
458 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
459 retval = cortex_a_exec_opcode(
460 a->armv7a_common.arm.target,
461 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
462 &dscr);
463 if (retval != ERROR_OK)
464 return retval;
465
466 /* then the opcode, taking data from R0 */
467 retval = cortex_a_exec_opcode(
468 a->armv7a_common.arm.target,
469 opcode,
470 &dscr);
471
472 return retval;
473 }
474
475 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
476 {
477 struct target *target = dpm->arm->target;
478 uint32_t dscr = DSCR_INSTR_COMP;
479
480 /* "Prefetch flush" after modifying execution status in CPSR */
481 return cortex_a_exec_opcode(target,
482 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
483 &dscr);
484 }
485
486 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
487 uint32_t opcode, uint32_t *data)
488 {
489 struct cortex_a_common *a = dpm_to_a(dpm);
490 int retval;
491 uint32_t dscr = DSCR_INSTR_COMP;
492
493 /* the opcode, writing data to DCC */
494 retval = cortex_a_exec_opcode(
495 a->armv7a_common.arm.target,
496 opcode,
497 &dscr);
498 if (retval != ERROR_OK)
499 return retval;
500
501 return cortex_a_read_dcc(a, data, &dscr);
502 }
503
504
505 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
506 uint32_t opcode, uint32_t *data)
507 {
508 struct cortex_a_common *a = dpm_to_a(dpm);
509 uint32_t dscr = DSCR_INSTR_COMP;
510 int retval;
511
512 /* the opcode, writing data to R0 */
513 retval = cortex_a_exec_opcode(
514 a->armv7a_common.arm.target,
515 opcode,
516 &dscr);
517 if (retval != ERROR_OK)
518 return retval;
519
520 /* write R0 to DCC */
521 retval = cortex_a_exec_opcode(
522 a->armv7a_common.arm.target,
523 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
524 &dscr);
525 if (retval != ERROR_OK)
526 return retval;
527
528 return cortex_a_read_dcc(a, data, &dscr);
529 }
530
531 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
532 uint32_t addr, uint32_t control)
533 {
534 struct cortex_a_common *a = dpm_to_a(dpm);
535 uint32_t vr = a->armv7a_common.debug_base;
536 uint32_t cr = a->armv7a_common.debug_base;
537 int retval;
538
539 switch (index_t) {
540 case 0 ... 15: /* breakpoints */
541 vr += CPUDBG_BVR_BASE;
542 cr += CPUDBG_BCR_BASE;
543 break;
544 case 16 ... 31: /* watchpoints */
545 vr += CPUDBG_WVR_BASE;
546 cr += CPUDBG_WCR_BASE;
547 index_t -= 16;
548 break;
549 default:
550 return ERROR_FAIL;
551 }
552 vr += 4 * index_t;
553 cr += 4 * index_t;
554
555 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
556 (unsigned) vr, (unsigned) cr);
557
558 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
559 vr, addr);
560 if (retval != ERROR_OK)
561 return retval;
562 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
563 cr, control);
564 return retval;
565 }
566
567 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
568 {
569 struct cortex_a_common *a = dpm_to_a(dpm);
570 uint32_t cr;
571
572 switch (index_t) {
573 case 0 ... 15:
574 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
575 break;
576 case 16 ... 31:
577 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
578 index_t -= 16;
579 break;
580 default:
581 return ERROR_FAIL;
582 }
583 cr += 4 * index_t;
584
585 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
586
587 /* clear control register */
588 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
589 }
590
591 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
592 {
593 struct arm_dpm *dpm = &a->armv7a_common.dpm;
594 int retval;
595
596 dpm->arm = &a->armv7a_common.arm;
597 dpm->didr = didr;
598
599 dpm->prepare = cortex_a_dpm_prepare;
600 dpm->finish = cortex_a_dpm_finish;
601
602 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
603 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
604 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
605
606 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
607 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
608
609 dpm->bpwp_enable = cortex_a_bpwp_enable;
610 dpm->bpwp_disable = cortex_a_bpwp_disable;
611
612 retval = arm_dpm_setup(dpm);
613 if (retval == ERROR_OK)
614 retval = arm_dpm_initialize(dpm);
615
616 return retval;
617 }
618 static struct target *get_cortex_a(struct target *target, int32_t coreid)
619 {
620 struct target_list *head;
621 struct target *curr;
622
623 head = target->head;
624 while (head != (struct target_list *)NULL) {
625 curr = head->target;
626 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
627 return curr;
628 head = head->next;
629 }
630 return target;
631 }
632 static int cortex_a_halt(struct target *target);
633
634 static int cortex_a_halt_smp(struct target *target)
635 {
636 int retval = 0;
637 struct target_list *head;
638 struct target *curr;
639 head = target->head;
640 while (head != (struct target_list *)NULL) {
641 curr = head->target;
642 if ((curr != target) && (curr->state != TARGET_HALTED)
643 && target_was_examined(curr))
644 retval += cortex_a_halt(curr);
645 head = head->next;
646 }
647 return retval;
648 }
649
650 static int update_halt_gdb(struct target *target)
651 {
652 struct target *gdb_target = NULL;
653 struct target_list *head;
654 struct target *curr;
655 int retval = 0;
656
657 if (target->gdb_service && target->gdb_service->core[0] == -1) {
658 target->gdb_service->target = target;
659 target->gdb_service->core[0] = target->coreid;
660 retval += cortex_a_halt_smp(target);
661 }
662
663 if (target->gdb_service)
664 gdb_target = target->gdb_service->target;
665
666 foreach_smp_target(head, target->head) {
667 curr = head->target;
668 /* skip calling context */
669 if (curr == target)
670 continue;
671 if (!target_was_examined(curr))
672 continue;
673 /* skip targets that were already halted */
674 if (curr->state == TARGET_HALTED)
675 continue;
676 /* Skip gdb_target; it alerts GDB so has to be polled as last one */
677 if (curr == gdb_target)
678 continue;
679
680 /* avoid recursion in cortex_a_poll() */
681 curr->smp = 0;
682 cortex_a_poll(curr);
683 curr->smp = 1;
684 }
685
686 /* after all targets were updated, poll the gdb serving target */
687 if (gdb_target != NULL && gdb_target != target)
688 cortex_a_poll(gdb_target);
689 return retval;
690 }
691
692 /*
693 * Cortex-A Run control
694 */
695
696 static int cortex_a_poll(struct target *target)
697 {
698 int retval = ERROR_OK;
699 uint32_t dscr;
700 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
701 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
702 enum target_state prev_target_state = target->state;
703 /* toggle to another core is done by gdb as follow */
704 /* maint packet J core_id */
705 /* continue */
706 /* the next polling trigger an halt event sent to gdb */
707 if ((target->state == TARGET_HALTED) && (target->smp) &&
708 (target->gdb_service) &&
709 (target->gdb_service->target == NULL)) {
710 target->gdb_service->target =
711 get_cortex_a(target, target->gdb_service->core[1]);
712 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
713 return retval;
714 }
715 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
716 armv7a->debug_base + CPUDBG_DSCR, &dscr);
717 if (retval != ERROR_OK)
718 return retval;
719 cortex_a->cpudbg_dscr = dscr;
720
721 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
722 if (prev_target_state != TARGET_HALTED) {
723 /* We have a halting debug event */
724 LOG_DEBUG("Target halted");
725 target->state = TARGET_HALTED;
726
727 retval = cortex_a_debug_entry(target);
728 if (retval != ERROR_OK)
729 return retval;
730
731 if (target->smp) {
732 retval = update_halt_gdb(target);
733 if (retval != ERROR_OK)
734 return retval;
735 }
736
737 if (prev_target_state == TARGET_DEBUG_RUNNING) {
738 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
739 } else { /* prev_target_state is RUNNING, UNKNOWN or RESET */
740 if (arm_semihosting(target, &retval) != 0)
741 return retval;
742
743 target_call_event_callbacks(target,
744 TARGET_EVENT_HALTED);
745 }
746 }
747 } else
748 target->state = TARGET_RUNNING;
749
750 return retval;
751 }
752
753 static int cortex_a_halt(struct target *target)
754 {
755 int retval = ERROR_OK;
756 uint32_t dscr;
757 struct armv7a_common *armv7a = target_to_armv7a(target);
758
759 /*
760 * Tell the core to be halted by writing DRCR with 0x1
761 * and then wait for the core to be halted.
762 */
763 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
764 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
765 if (retval != ERROR_OK)
766 return retval;
767
768 int64_t then = timeval_ms();
769 for (;; ) {
770 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
771 armv7a->debug_base + CPUDBG_DSCR, &dscr);
772 if (retval != ERROR_OK)
773 return retval;
774 if ((dscr & DSCR_CORE_HALTED) != 0)
775 break;
776 if (timeval_ms() > then + 1000) {
777 LOG_ERROR("Timeout waiting for halt");
778 return ERROR_FAIL;
779 }
780 }
781
782 target->debug_reason = DBG_REASON_DBGRQ;
783
784 return ERROR_OK;
785 }
786
787 static int cortex_a_internal_restore(struct target *target, int current,
788 target_addr_t *address, int handle_breakpoints, int debug_execution)
789 {
790 struct armv7a_common *armv7a = target_to_armv7a(target);
791 struct arm *arm = &armv7a->arm;
792 int retval;
793 uint32_t resume_pc;
794
795 if (!debug_execution)
796 target_free_all_working_areas(target);
797
798 #if 0
799 if (debug_execution) {
800 /* Disable interrupts */
801 /* We disable interrupts in the PRIMASK register instead of
802 * masking with C_MASKINTS,
803 * This is probably the same issue as Cortex-M3 Errata 377493:
804 * C_MASKINTS in parallel with disabled interrupts can cause
805 * local faults to not be taken. */
806 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
807 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = true;
808 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = true;
809
810 /* Make sure we are in Thumb mode */
811 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
812 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
813 32) | (1 << 24));
814 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = true;
815 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = true;
816 }
817 #endif
818
819 /* current = 1: continue on current pc, otherwise continue at <address> */
820 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
821 if (!current)
822 resume_pc = *address;
823 else
824 *address = resume_pc;
825
826 /* Make sure that the Armv7 gdb thumb fixups does not
827 * kill the return address
828 */
829 switch (arm->core_state) {
830 case ARM_STATE_ARM:
831 resume_pc &= 0xFFFFFFFC;
832 break;
833 case ARM_STATE_THUMB:
834 case ARM_STATE_THUMB_EE:
835 /* When the return address is loaded into PC
836 * bit 0 must be 1 to stay in Thumb state
837 */
838 resume_pc |= 0x1;
839 break;
840 case ARM_STATE_JAZELLE:
841 LOG_ERROR("How do I resume into Jazelle state??");
842 return ERROR_FAIL;
843 case ARM_STATE_AARCH64:
844 LOG_ERROR("Shoudn't be in AARCH64 state");
845 return ERROR_FAIL;
846 }
847 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
848 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
849 arm->pc->dirty = true;
850 arm->pc->valid = true;
851
852 /* restore dpm_mode at system halt */
853 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
854 /* called it now before restoring context because it uses cpu
855 * register r0 for restoring cp15 control register */
856 retval = cortex_a_restore_cp15_control_reg(target);
857 if (retval != ERROR_OK)
858 return retval;
859 retval = cortex_a_restore_context(target, handle_breakpoints);
860 if (retval != ERROR_OK)
861 return retval;
862 target->debug_reason = DBG_REASON_NOTHALTED;
863 target->state = TARGET_RUNNING;
864
865 /* registers are now invalid */
866 register_cache_invalidate(arm->core_cache);
867
868 #if 0
869 /* the front-end may request us not to handle breakpoints */
870 if (handle_breakpoints) {
871 /* Single step past breakpoint at current address */
872 breakpoint = breakpoint_find(target, resume_pc);
873 if (breakpoint) {
874 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
875 cortex_m3_unset_breakpoint(target, breakpoint);
876 cortex_m3_single_step_core(target);
877 cortex_m3_set_breakpoint(target, breakpoint);
878 }
879 }
880
881 #endif
882 return retval;
883 }
884
885 static int cortex_a_internal_restart(struct target *target)
886 {
887 struct armv7a_common *armv7a = target_to_armv7a(target);
888 struct arm *arm = &armv7a->arm;
889 int retval;
890 uint32_t dscr;
891 /*
892 * * Restart core and wait for it to be started. Clear ITRen and sticky
893 * * exception flags: see ARMv7 ARM, C5.9.
894 *
895 * REVISIT: for single stepping, we probably want to
896 * disable IRQs by default, with optional override...
897 */
898
899 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
900 armv7a->debug_base + CPUDBG_DSCR, &dscr);
901 if (retval != ERROR_OK)
902 return retval;
903
904 if ((dscr & DSCR_INSTR_COMP) == 0)
905 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
906
907 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
908 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
909 if (retval != ERROR_OK)
910 return retval;
911
912 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
913 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
914 DRCR_CLEAR_EXCEPTIONS);
915 if (retval != ERROR_OK)
916 return retval;
917
918 int64_t then = timeval_ms();
919 for (;; ) {
920 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
921 armv7a->debug_base + CPUDBG_DSCR, &dscr);
922 if (retval != ERROR_OK)
923 return retval;
924 if ((dscr & DSCR_CORE_RESTARTED) != 0)
925 break;
926 if (timeval_ms() > then + 1000) {
927 LOG_ERROR("Timeout waiting for resume");
928 return ERROR_FAIL;
929 }
930 }
931
932 target->debug_reason = DBG_REASON_NOTHALTED;
933 target->state = TARGET_RUNNING;
934
935 /* registers are now invalid */
936 register_cache_invalidate(arm->core_cache);
937
938 return ERROR_OK;
939 }
940
941 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
942 {
943 int retval = 0;
944 struct target_list *head;
945 struct target *curr;
946 target_addr_t address;
947 head = target->head;
948 while (head != (struct target_list *)NULL) {
949 curr = head->target;
950 if ((curr != target) && (curr->state != TARGET_RUNNING)
951 && target_was_examined(curr)) {
952 /* resume current address , not in step mode */
953 retval += cortex_a_internal_restore(curr, 1, &address,
954 handle_breakpoints, 0);
955 retval += cortex_a_internal_restart(curr);
956 }
957 head = head->next;
958
959 }
960 return retval;
961 }
962
963 static int cortex_a_resume(struct target *target, int current,
964 target_addr_t address, int handle_breakpoints, int debug_execution)
965 {
966 int retval = 0;
967 /* dummy resume for smp toggle in order to reduce gdb impact */
968 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
969 /* simulate a start and halt of target */
970 target->gdb_service->target = NULL;
971 target->gdb_service->core[0] = target->gdb_service->core[1];
972 /* fake resume at next poll we play the target core[1], see poll*/
973 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
974 return 0;
975 }
976 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
977 if (target->smp) {
978 target->gdb_service->core[0] = -1;
979 retval = cortex_a_restore_smp(target, handle_breakpoints);
980 if (retval != ERROR_OK)
981 return retval;
982 }
983 cortex_a_internal_restart(target);
984
985 if (!debug_execution) {
986 target->state = TARGET_RUNNING;
987 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
988 LOG_DEBUG("target resumed at " TARGET_ADDR_FMT, address);
989 } else {
990 target->state = TARGET_DEBUG_RUNNING;
991 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
992 LOG_DEBUG("target debug resumed at " TARGET_ADDR_FMT, address);
993 }
994
995 return ERROR_OK;
996 }
997
998 static int cortex_a_debug_entry(struct target *target)
999 {
1000 uint32_t dscr;
1001 int retval = ERROR_OK;
1002 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1003 struct armv7a_common *armv7a = target_to_armv7a(target);
1004 struct arm *arm = &armv7a->arm;
1005
1006 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1007
1008 /* REVISIT surely we should not re-read DSCR !! */
1009 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1010 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1011 if (retval != ERROR_OK)
1012 return retval;
1013
1014 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1015 * imprecise data aborts get discarded by issuing a Data
1016 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1017 */
1018
1019 /* Enable the ITR execution once we are in debug mode */
1020 dscr |= DSCR_ITR_EN;
1021 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1022 armv7a->debug_base + CPUDBG_DSCR, dscr);
1023 if (retval != ERROR_OK)
1024 return retval;
1025
1026 /* Examine debug reason */
1027 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1028
1029 /* save address of instruction that triggered the watchpoint? */
1030 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1031 uint32_t wfar;
1032
1033 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1034 armv7a->debug_base + CPUDBG_WFAR,
1035 &wfar);
1036 if (retval != ERROR_OK)
1037 return retval;
1038 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1039 }
1040
1041 /* First load register accessible through core debug port */
1042 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1043 if (retval != ERROR_OK)
1044 return retval;
1045
1046 if (arm->spsr) {
1047 /* read SPSR */
1048 retval = arm_dpm_read_reg(&armv7a->dpm, arm->spsr, 17);
1049 if (retval != ERROR_OK)
1050 return retval;
1051 }
1052
1053 #if 0
1054 /* TODO, Move this */
1055 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1056 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1057 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1058
1059 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1060 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1061
1062 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1063 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1064 #endif
1065
1066 /* Are we in an exception handler */
1067 /* armv4_5->exception_number = 0; */
1068 if (armv7a->post_debug_entry) {
1069 retval = armv7a->post_debug_entry(target);
1070 if (retval != ERROR_OK)
1071 return retval;
1072 }
1073
1074 return retval;
1075 }
1076
1077 static int cortex_a_post_debug_entry(struct target *target)
1078 {
1079 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1080 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1081 int retval;
1082
1083 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1084 retval = armv7a->arm.mrc(target, 15,
1085 0, 0, /* op1, op2 */
1086 1, 0, /* CRn, CRm */
1087 &cortex_a->cp15_control_reg);
1088 if (retval != ERROR_OK)
1089 return retval;
1090 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1091 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1092
1093 if (!armv7a->is_armv7r)
1094 armv7a_read_ttbcr(target);
1095
1096 if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1097 armv7a_identify_cache(target);
1098
1099 if (armv7a->is_armv7r) {
1100 armv7a->armv7a_mmu.mmu_enabled = 0;
1101 } else {
1102 armv7a->armv7a_mmu.mmu_enabled =
1103 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1104 }
1105 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1106 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1107 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1108 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1109 cortex_a->curr_mode = armv7a->arm.core_mode;
1110
1111 /* switch to SVC mode to read DACR */
1112 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1113 armv7a->arm.mrc(target, 15,
1114 0, 0, 3, 0,
1115 &cortex_a->cp15_dacr_reg);
1116
1117 LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1118 cortex_a->cp15_dacr_reg);
1119
1120 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1121 return ERROR_OK;
1122 }
1123
1124 int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1125 {
1126 struct armv7a_common *armv7a = target_to_armv7a(target);
1127 uint32_t dscr;
1128
1129 /* Read DSCR */
1130 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1131 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1132 if (ERROR_OK != retval)
1133 return retval;
1134
1135 /* clear bitfield */
1136 dscr &= ~bit_mask;
1137 /* put new value */
1138 dscr |= value & bit_mask;
1139
1140 /* write new DSCR */
1141 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1142 armv7a->debug_base + CPUDBG_DSCR, dscr);
1143 return retval;
1144 }
1145
1146 static int cortex_a_step(struct target *target, int current, target_addr_t address,
1147 int handle_breakpoints)
1148 {
1149 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1150 struct armv7a_common *armv7a = target_to_armv7a(target);
1151 struct arm *arm = &armv7a->arm;
1152 struct breakpoint *breakpoint = NULL;
1153 struct breakpoint stepbreakpoint;
1154 struct reg *r;
1155 int retval;
1156
1157 if (target->state != TARGET_HALTED) {
1158 LOG_WARNING("target not halted");
1159 return ERROR_TARGET_NOT_HALTED;
1160 }
1161
1162 /* current = 1: continue on current pc, otherwise continue at <address> */
1163 r = arm->pc;
1164 if (!current)
1165 buf_set_u32(r->value, 0, 32, address);
1166 else
1167 address = buf_get_u32(r->value, 0, 32);
1168
1169 /* The front-end may request us not to handle breakpoints.
1170 * But since Cortex-A uses breakpoint for single step,
1171 * we MUST handle breakpoints.
1172 */
1173 handle_breakpoints = 1;
1174 if (handle_breakpoints) {
1175 breakpoint = breakpoint_find(target, address);
1176 if (breakpoint)
1177 cortex_a_unset_breakpoint(target, breakpoint);
1178 }
1179
1180 /* Setup single step breakpoint */
1181 stepbreakpoint.address = address;
1182 stepbreakpoint.asid = 0;
1183 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1184 ? 2 : 4;
1185 stepbreakpoint.type = BKPT_HARD;
1186 stepbreakpoint.set = 0;
1187
1188 /* Disable interrupts during single step if requested */
1189 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1190 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1191 if (ERROR_OK != retval)
1192 return retval;
1193 }
1194
1195 /* Break on IVA mismatch */
1196 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1197
1198 target->debug_reason = DBG_REASON_SINGLESTEP;
1199
1200 retval = cortex_a_resume(target, 1, address, 0, 0);
1201 if (retval != ERROR_OK)
1202 return retval;
1203
1204 int64_t then = timeval_ms();
1205 while (target->state != TARGET_HALTED) {
1206 retval = cortex_a_poll(target);
1207 if (retval != ERROR_OK)
1208 return retval;
1209 if (target->state == TARGET_HALTED)
1210 break;
1211 if (timeval_ms() > then + 1000) {
1212 LOG_ERROR("timeout waiting for target halt");
1213 return ERROR_FAIL;
1214 }
1215 }
1216
1217 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1218
1219 /* Re-enable interrupts if they were disabled */
1220 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1221 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1222 if (ERROR_OK != retval)
1223 return retval;
1224 }
1225
1226
1227 target->debug_reason = DBG_REASON_BREAKPOINT;
1228
1229 if (breakpoint)
1230 cortex_a_set_breakpoint(target, breakpoint, 0);
1231
1232 if (target->state != TARGET_HALTED)
1233 LOG_DEBUG("target stepped");
1234
1235 return ERROR_OK;
1236 }
1237
1238 static int cortex_a_restore_context(struct target *target, bool bpwp)
1239 {
1240 struct armv7a_common *armv7a = target_to_armv7a(target);
1241
1242 LOG_DEBUG(" ");
1243
1244 if (armv7a->pre_restore_context)
1245 armv7a->pre_restore_context(target);
1246
1247 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1248 }
1249
1250 /*
1251 * Cortex-A Breakpoint and watchpoint functions
1252 */
1253
1254 /* Setup hardware Breakpoint Register Pair */
1255 static int cortex_a_set_breakpoint(struct target *target,
1256 struct breakpoint *breakpoint, uint8_t matchmode)
1257 {
1258 int retval;
1259 int brp_i = 0;
1260 uint32_t control;
1261 uint8_t byte_addr_select = 0x0F;
1262 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1263 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1264 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1265
1266 if (breakpoint->set) {
1267 LOG_WARNING("breakpoint already set");
1268 return ERROR_OK;
1269 }
1270
1271 if (breakpoint->type == BKPT_HARD) {
1272 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1273 brp_i++;
1274 if (brp_i >= cortex_a->brp_num) {
1275 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1276 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1277 }
1278 breakpoint->set = brp_i + 1;
1279 if (breakpoint->length == 2)
1280 byte_addr_select = (3 << (breakpoint->address & 0x02));
1281 control = ((matchmode & 0x7) << 20)
1282 | (byte_addr_select << 5)
1283 | (3 << 1) | 1;
1284 brp_list[brp_i].used = 1;
1285 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1286 brp_list[brp_i].control = control;
1287 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1288 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1289 brp_list[brp_i].value);
1290 if (retval != ERROR_OK)
1291 return retval;
1292 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1293 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1294 brp_list[brp_i].control);
1295 if (retval != ERROR_OK)
1296 return retval;
1297 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1298 brp_list[brp_i].control,
1299 brp_list[brp_i].value);
1300 } else if (breakpoint->type == BKPT_SOFT) {
1301 uint8_t code[4];
1302 /* length == 2: Thumb breakpoint */
1303 if (breakpoint->length == 2)
1304 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1305 else
1306 /* length == 3: Thumb-2 breakpoint, actual encoding is
1307 * a regular Thumb BKPT instruction but we replace a
1308 * 32bit Thumb-2 instruction, so fix-up the breakpoint
1309 * length
1310 */
1311 if (breakpoint->length == 3) {
1312 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1313 breakpoint->length = 4;
1314 } else
1315 /* length == 4, normal ARM breakpoint */
1316 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1317
1318 retval = target_read_memory(target,
1319 breakpoint->address & 0xFFFFFFFE,
1320 breakpoint->length, 1,
1321 breakpoint->orig_instr);
1322 if (retval != ERROR_OK)
1323 return retval;
1324
1325 /* make sure data cache is cleaned & invalidated down to PoC */
1326 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1327 armv7a_cache_flush_virt(target, breakpoint->address,
1328 breakpoint->length);
1329 }
1330
1331 retval = target_write_memory(target,
1332 breakpoint->address & 0xFFFFFFFE,
1333 breakpoint->length, 1, code);
1334 if (retval != ERROR_OK)
1335 return retval;
1336
1337 /* update i-cache at breakpoint location */
1338 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1339 breakpoint->length);
1340 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1341 breakpoint->length);
1342
1343 breakpoint->set = 0x11; /* Any nice value but 0 */
1344 }
1345
1346 return ERROR_OK;
1347 }
1348
1349 static int cortex_a_set_context_breakpoint(struct target *target,
1350 struct breakpoint *breakpoint, uint8_t matchmode)
1351 {
1352 int retval = ERROR_FAIL;
1353 int brp_i = 0;
1354 uint32_t control;
1355 uint8_t byte_addr_select = 0x0F;
1356 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1357 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1358 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1359
1360 if (breakpoint->set) {
1361 LOG_WARNING("breakpoint already set");
1362 return retval;
1363 }
1364 /*check available context BRPs*/
1365 while ((brp_list[brp_i].used ||
1366 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1367 brp_i++;
1368
1369 if (brp_i >= cortex_a->brp_num) {
1370 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1371 return ERROR_FAIL;
1372 }
1373
1374 breakpoint->set = brp_i + 1;
1375 control = ((matchmode & 0x7) << 20)
1376 | (byte_addr_select << 5)
1377 | (3 << 1) | 1;
1378 brp_list[brp_i].used = 1;
1379 brp_list[brp_i].value = (breakpoint->asid);
1380 brp_list[brp_i].control = control;
1381 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1382 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1383 brp_list[brp_i].value);
1384 if (retval != ERROR_OK)
1385 return retval;
1386 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1387 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1388 brp_list[brp_i].control);
1389 if (retval != ERROR_OK)
1390 return retval;
1391 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1392 brp_list[brp_i].control,
1393 brp_list[brp_i].value);
1394 return ERROR_OK;
1395
1396 }
1397
1398 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1399 {
1400 int retval = ERROR_FAIL;
1401 int brp_1 = 0; /* holds the contextID pair */
1402 int brp_2 = 0; /* holds the IVA pair */
1403 uint32_t control_CTX, control_IVA;
1404 uint8_t CTX_byte_addr_select = 0x0F;
1405 uint8_t IVA_byte_addr_select = 0x0F;
1406 uint8_t CTX_machmode = 0x03;
1407 uint8_t IVA_machmode = 0x01;
1408 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1409 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1410 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1411
1412 if (breakpoint->set) {
1413 LOG_WARNING("breakpoint already set");
1414 return retval;
1415 }
1416 /*check available context BRPs*/
1417 while ((brp_list[brp_1].used ||
1418 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1419 brp_1++;
1420
1421 printf("brp(CTX) found num: %d\n", brp_1);
1422 if (brp_1 >= cortex_a->brp_num) {
1423 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1424 return ERROR_FAIL;
1425 }
1426
1427 while ((brp_list[brp_2].used ||
1428 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1429 brp_2++;
1430
1431 printf("brp(IVA) found num: %d\n", brp_2);
1432 if (brp_2 >= cortex_a->brp_num) {
1433 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1434 return ERROR_FAIL;
1435 }
1436
1437 breakpoint->set = brp_1 + 1;
1438 breakpoint->linked_BRP = brp_2;
1439 control_CTX = ((CTX_machmode & 0x7) << 20)
1440 | (brp_2 << 16)
1441 | (0 << 14)
1442 | (CTX_byte_addr_select << 5)
1443 | (3 << 1) | 1;
1444 brp_list[brp_1].used = 1;
1445 brp_list[brp_1].value = (breakpoint->asid);
1446 brp_list[brp_1].control = control_CTX;
1447 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1448 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1449 brp_list[brp_1].value);
1450 if (retval != ERROR_OK)
1451 return retval;
1452 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1453 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1454 brp_list[brp_1].control);
1455 if (retval != ERROR_OK)
1456 return retval;
1457
1458 control_IVA = ((IVA_machmode & 0x7) << 20)
1459 | (brp_1 << 16)
1460 | (IVA_byte_addr_select << 5)
1461 | (3 << 1) | 1;
1462 brp_list[brp_2].used = 1;
1463 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1464 brp_list[brp_2].control = control_IVA;
1465 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1466 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1467 brp_list[brp_2].value);
1468 if (retval != ERROR_OK)
1469 return retval;
1470 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1471 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1472 brp_list[brp_2].control);
1473 if (retval != ERROR_OK)
1474 return retval;
1475
1476 return ERROR_OK;
1477 }
1478
1479 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1480 {
1481 int retval;
1482 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1483 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1484 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1485
1486 if (!breakpoint->set) {
1487 LOG_WARNING("breakpoint not set");
1488 return ERROR_OK;
1489 }
1490
1491 if (breakpoint->type == BKPT_HARD) {
1492 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1493 int brp_i = breakpoint->set - 1;
1494 int brp_j = breakpoint->linked_BRP;
1495 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1496 LOG_DEBUG("Invalid BRP number in breakpoint");
1497 return ERROR_OK;
1498 }
1499 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1500 brp_list[brp_i].control, brp_list[brp_i].value);
1501 brp_list[brp_i].used = 0;
1502 brp_list[brp_i].value = 0;
1503 brp_list[brp_i].control = 0;
1504 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1505 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1506 brp_list[brp_i].control);
1507 if (retval != ERROR_OK)
1508 return retval;
1509 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1510 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1511 brp_list[brp_i].value);
1512 if (retval != ERROR_OK)
1513 return retval;
1514 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1515 LOG_DEBUG("Invalid BRP number in breakpoint");
1516 return ERROR_OK;
1517 }
1518 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1519 brp_list[brp_j].control, brp_list[brp_j].value);
1520 brp_list[brp_j].used = 0;
1521 brp_list[brp_j].value = 0;
1522 brp_list[brp_j].control = 0;
1523 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1524 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1525 brp_list[brp_j].control);
1526 if (retval != ERROR_OK)
1527 return retval;
1528 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1529 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1530 brp_list[brp_j].value);
1531 if (retval != ERROR_OK)
1532 return retval;
1533 breakpoint->linked_BRP = 0;
1534 breakpoint->set = 0;
1535 return ERROR_OK;
1536
1537 } else {
1538 int brp_i = breakpoint->set - 1;
1539 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1540 LOG_DEBUG("Invalid BRP number in breakpoint");
1541 return ERROR_OK;
1542 }
1543 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1544 brp_list[brp_i].control, brp_list[brp_i].value);
1545 brp_list[brp_i].used = 0;
1546 brp_list[brp_i].value = 0;
1547 brp_list[brp_i].control = 0;
1548 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1549 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1550 brp_list[brp_i].control);
1551 if (retval != ERROR_OK)
1552 return retval;
1553 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1554 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1555 brp_list[brp_i].value);
1556 if (retval != ERROR_OK)
1557 return retval;
1558 breakpoint->set = 0;
1559 return ERROR_OK;
1560 }
1561 } else {
1562
1563 /* make sure data cache is cleaned & invalidated down to PoC */
1564 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1565 armv7a_cache_flush_virt(target, breakpoint->address,
1566 breakpoint->length);
1567 }
1568
1569 /* restore original instruction (kept in target endianness) */
1570 if (breakpoint->length == 4) {
1571 retval = target_write_memory(target,
1572 breakpoint->address & 0xFFFFFFFE,
1573 4, 1, breakpoint->orig_instr);
1574 if (retval != ERROR_OK)
1575 return retval;
1576 } else {
1577 retval = target_write_memory(target,
1578 breakpoint->address & 0xFFFFFFFE,
1579 2, 1, breakpoint->orig_instr);
1580 if (retval != ERROR_OK)
1581 return retval;
1582 }
1583
1584 /* update i-cache at breakpoint location */
1585 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1586 breakpoint->length);
1587 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1588 breakpoint->length);
1589 }
1590 breakpoint->set = 0;
1591
1592 return ERROR_OK;
1593 }
1594
1595 static int cortex_a_add_breakpoint(struct target *target,
1596 struct breakpoint *breakpoint)
1597 {
1598 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1599
1600 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1601 LOG_INFO("no hardware breakpoint available");
1602 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1603 }
1604
1605 if (breakpoint->type == BKPT_HARD)
1606 cortex_a->brp_num_available--;
1607
1608 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1609 }
1610
1611 static int cortex_a_add_context_breakpoint(struct target *target,
1612 struct breakpoint *breakpoint)
1613 {
1614 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1615
1616 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1617 LOG_INFO("no hardware breakpoint available");
1618 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1619 }
1620
1621 if (breakpoint->type == BKPT_HARD)
1622 cortex_a->brp_num_available--;
1623
1624 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1625 }
1626
1627 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1628 struct breakpoint *breakpoint)
1629 {
1630 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1631
1632 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1633 LOG_INFO("no hardware breakpoint available");
1634 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1635 }
1636
1637 if (breakpoint->type == BKPT_HARD)
1638 cortex_a->brp_num_available--;
1639
1640 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1641 }
1642
1643
1644 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1645 {
1646 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1647
1648 #if 0
1649 /* It is perfectly possible to remove breakpoints while the target is running */
1650 if (target->state != TARGET_HALTED) {
1651 LOG_WARNING("target not halted");
1652 return ERROR_TARGET_NOT_HALTED;
1653 }
1654 #endif
1655
1656 if (breakpoint->set) {
1657 cortex_a_unset_breakpoint(target, breakpoint);
1658 if (breakpoint->type == BKPT_HARD)
1659 cortex_a->brp_num_available++;
1660 }
1661
1662
1663 return ERROR_OK;
1664 }
1665
1666 /*
1667 * Cortex-A Reset functions
1668 */
1669
1670 static int cortex_a_assert_reset(struct target *target)
1671 {
1672 struct armv7a_common *armv7a = target_to_armv7a(target);
1673
1674 LOG_DEBUG(" ");
1675
1676 /* FIXME when halt is requested, make it work somehow... */
1677
1678 /* This function can be called in "target not examined" state */
1679
1680 /* Issue some kind of warm reset. */
1681 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1682 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1683 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1684 /* REVISIT handle "pulls" cases, if there's
1685 * hardware that needs them to work.
1686 */
1687
1688 /*
1689 * FIXME: fix reset when transport is SWD. This is a temporary
1690 * work-around for release v0.10 that is not intended to stay!
1691 */
1692 if (transport_is_swd() ||
1693 (target->reset_halt && (jtag_get_reset_config() & RESET_SRST_NO_GATING)))
1694 jtag_add_reset(0, 1);
1695
1696 } else {
1697 LOG_ERROR("%s: how to reset?", target_name(target));
1698 return ERROR_FAIL;
1699 }
1700
1701 /* registers are now invalid */
1702 if (target_was_examined(target))
1703 register_cache_invalidate(armv7a->arm.core_cache);
1704
1705 target->state = TARGET_RESET;
1706
1707 return ERROR_OK;
1708 }
1709
1710 static int cortex_a_deassert_reset(struct target *target)
1711 {
1712 int retval;
1713
1714 LOG_DEBUG(" ");
1715
1716 /* be certain SRST is off */
1717 jtag_add_reset(0, 0);
1718
1719 if (target_was_examined(target)) {
1720 retval = cortex_a_poll(target);
1721 if (retval != ERROR_OK)
1722 return retval;
1723 }
1724
1725 if (target->reset_halt) {
1726 if (target->state != TARGET_HALTED) {
1727 LOG_WARNING("%s: ran after reset and before halt ...",
1728 target_name(target));
1729 if (target_was_examined(target)) {
1730 retval = target_halt(target);
1731 if (retval != ERROR_OK)
1732 return retval;
1733 } else
1734 target->state = TARGET_UNKNOWN;
1735 }
1736 }
1737
1738 return ERROR_OK;
1739 }
1740
1741 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1742 {
1743 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1744 * New desired mode must be in mode. Current value of DSCR must be in
1745 * *dscr, which is updated with new value.
1746 *
1747 * This function elides actually sending the mode-change over the debug
1748 * interface if the mode is already set as desired.
1749 */
1750 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1751 if (new_dscr != *dscr) {
1752 struct armv7a_common *armv7a = target_to_armv7a(target);
1753 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1754 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1755 if (retval == ERROR_OK)
1756 *dscr = new_dscr;
1757 return retval;
1758 } else {
1759 return ERROR_OK;
1760 }
1761 }
1762
1763 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1764 uint32_t value, uint32_t *dscr)
1765 {
1766 /* Waits until the specified bit(s) of DSCR take on a specified value. */
1767 struct armv7a_common *armv7a = target_to_armv7a(target);
1768 int64_t then;
1769 int retval;
1770
1771 if ((*dscr & mask) == value)
1772 return ERROR_OK;
1773
1774 then = timeval_ms();
1775 while (1) {
1776 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1777 armv7a->debug_base + CPUDBG_DSCR, dscr);
1778 if (retval != ERROR_OK) {
1779 LOG_ERROR("Could not read DSCR register");
1780 return retval;
1781 }
1782 if ((*dscr & mask) == value)
1783 break;
1784 if (timeval_ms() > then + 1000) {
1785 LOG_ERROR("timeout waiting for DSCR bit change");
1786 return ERROR_FAIL;
1787 }
1788 }
1789 return ERROR_OK;
1790 }
1791
1792 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1793 uint32_t *data, uint32_t *dscr)
1794 {
1795 int retval;
1796 struct armv7a_common *armv7a = target_to_armv7a(target);
1797
1798 /* Move from coprocessor to R0. */
1799 retval = cortex_a_exec_opcode(target, opcode, dscr);
1800 if (retval != ERROR_OK)
1801 return retval;
1802
1803 /* Move from R0 to DTRTX. */
1804 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
1805 if (retval != ERROR_OK)
1806 return retval;
1807
1808 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
1809 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1810 * must also check TXfull_l). Most of the time this will be free
1811 * because TXfull_l will be set immediately and cached in dscr. */
1812 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
1813 DSCR_DTRTX_FULL_LATCHED, dscr);
1814 if (retval != ERROR_OK)
1815 return retval;
1816
1817 /* Read the value transferred to DTRTX. */
1818 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1819 armv7a->debug_base + CPUDBG_DTRTX, data);
1820 if (retval != ERROR_OK)
1821 return retval;
1822
1823 return ERROR_OK;
1824 }
1825
1826 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
1827 uint32_t *dfsr, uint32_t *dscr)
1828 {
1829 int retval;
1830
1831 if (dfar) {
1832 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
1833 if (retval != ERROR_OK)
1834 return retval;
1835 }
1836
1837 if (dfsr) {
1838 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
1839 if (retval != ERROR_OK)
1840 return retval;
1841 }
1842
1843 return ERROR_OK;
1844 }
1845
1846 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
1847 uint32_t data, uint32_t *dscr)
1848 {
1849 int retval;
1850 struct armv7a_common *armv7a = target_to_armv7a(target);
1851
1852 /* Write the value into DTRRX. */
1853 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1854 armv7a->debug_base + CPUDBG_DTRRX, data);
1855 if (retval != ERROR_OK)
1856 return retval;
1857
1858 /* Move from DTRRX to R0. */
1859 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
1860 if (retval != ERROR_OK)
1861 return retval;
1862
1863 /* Move from R0 to coprocessor. */
1864 retval = cortex_a_exec_opcode(target, opcode, dscr);
1865 if (retval != ERROR_OK)
1866 return retval;
1867
1868 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
1869 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
1870 * check RXfull_l). Most of the time this will be free because RXfull_l
1871 * will be cleared immediately and cached in dscr. */
1872 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
1873 if (retval != ERROR_OK)
1874 return retval;
1875
1876 return ERROR_OK;
1877 }
1878
1879 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
1880 uint32_t dfsr, uint32_t *dscr)
1881 {
1882 int retval;
1883
1884 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
1885 if (retval != ERROR_OK)
1886 return retval;
1887
1888 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
1889 if (retval != ERROR_OK)
1890 return retval;
1891
1892 return ERROR_OK;
1893 }
1894
1895 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
1896 {
1897 uint32_t status, upper4;
1898
1899 if (dfsr & (1 << 9)) {
1900 /* LPAE format. */
1901 status = dfsr & 0x3f;
1902 upper4 = status >> 2;
1903 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
1904 return ERROR_TARGET_TRANSLATION_FAULT;
1905 else if (status == 33)
1906 return ERROR_TARGET_UNALIGNED_ACCESS;
1907 else
1908 return ERROR_TARGET_DATA_ABORT;
1909 } else {
1910 /* Normal format. */
1911 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
1912 if (status == 1)
1913 return ERROR_TARGET_UNALIGNED_ACCESS;
1914 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
1915 status == 9 || status == 11 || status == 13 || status == 15)
1916 return ERROR_TARGET_TRANSLATION_FAULT;
1917 else
1918 return ERROR_TARGET_DATA_ABORT;
1919 }
1920 }
1921
1922 static int cortex_a_write_cpu_memory_slow(struct target *target,
1923 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1924 {
1925 /* Writes count objects of size size from *buffer. Old value of DSCR must
1926 * be in *dscr; updated to new value. This is slow because it works for
1927 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
1928 * the address is aligned, cortex_a_write_cpu_memory_fast should be
1929 * preferred.
1930 * Preconditions:
1931 * - Address is in R0.
1932 * - R0 is marked dirty.
1933 */
1934 struct armv7a_common *armv7a = target_to_armv7a(target);
1935 struct arm *arm = &armv7a->arm;
1936 int retval;
1937
1938 /* Mark register R1 as dirty, to use for transferring data. */
1939 arm_reg_current(arm, 1)->dirty = true;
1940
1941 /* Switch to non-blocking mode if not already in that mode. */
1942 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
1943 if (retval != ERROR_OK)
1944 return retval;
1945
1946 /* Go through the objects. */
1947 while (count) {
1948 /* Write the value to store into DTRRX. */
1949 uint32_t data, opcode;
1950 if (size == 1)
1951 data = *buffer;
1952 else if (size == 2)
1953 data = target_buffer_get_u16(target, buffer);
1954 else
1955 data = target_buffer_get_u32(target, buffer);
1956 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1957 armv7a->debug_base + CPUDBG_DTRRX, data);
1958 if (retval != ERROR_OK)
1959 return retval;
1960
1961 /* Transfer the value from DTRRX to R1. */
1962 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
1963 if (retval != ERROR_OK)
1964 return retval;
1965
1966 /* Write the value transferred to R1 into memory. */
1967 if (size == 1)
1968 opcode = ARMV4_5_STRB_IP(1, 0);
1969 else if (size == 2)
1970 opcode = ARMV4_5_STRH_IP(1, 0);
1971 else
1972 opcode = ARMV4_5_STRW_IP(1, 0);
1973 retval = cortex_a_exec_opcode(target, opcode, dscr);
1974 if (retval != ERROR_OK)
1975 return retval;
1976
1977 /* Check for faults and return early. */
1978 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
1979 return ERROR_OK; /* A data fault is not considered a system failure. */
1980
1981 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
1982 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1983 * must also check RXfull_l). Most of the time this will be free
1984 * because RXfull_l will be cleared immediately and cached in dscr. */
1985 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
1986 if (retval != ERROR_OK)
1987 return retval;
1988
1989 /* Advance. */
1990 buffer += size;
1991 --count;
1992 }
1993
1994 return ERROR_OK;
1995 }
1996
1997 static int cortex_a_write_cpu_memory_fast(struct target *target,
1998 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1999 {
2000 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2001 * in *dscr; updated to new value. This is fast but only works for
2002 * word-sized objects at aligned addresses.
2003 * Preconditions:
2004 * - Address is in R0 and must be a multiple of 4.
2005 * - R0 is marked dirty.
2006 */
2007 struct armv7a_common *armv7a = target_to_armv7a(target);
2008 int retval;
2009
2010 /* Switch to fast mode if not already in that mode. */
2011 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2012 if (retval != ERROR_OK)
2013 return retval;
2014
2015 /* Latch STC instruction. */
2016 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2017 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2018 if (retval != ERROR_OK)
2019 return retval;
2020
2021 /* Transfer all the data and issue all the instructions. */
2022 return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2023 4, count, armv7a->debug_base + CPUDBG_DTRRX);
2024 }
2025
2026 static int cortex_a_write_cpu_memory(struct target *target,
2027 uint32_t address, uint32_t size,
2028 uint32_t count, const uint8_t *buffer)
2029 {
2030 /* Write memory through the CPU. */
2031 int retval, final_retval;
2032 struct armv7a_common *armv7a = target_to_armv7a(target);
2033 struct arm *arm = &armv7a->arm;
2034 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2035
2036 LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2037 address, size, count);
2038 if (target->state != TARGET_HALTED) {
2039 LOG_WARNING("target not halted");
2040 return ERROR_TARGET_NOT_HALTED;
2041 }
2042
2043 if (!count)
2044 return ERROR_OK;
2045
2046 /* Clear any abort. */
2047 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2048 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2049 if (retval != ERROR_OK)
2050 return retval;
2051
2052 /* Read DSCR. */
2053 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2054 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2055 if (retval != ERROR_OK)
2056 return retval;
2057
2058 /* Switch to non-blocking mode if not already in that mode. */
2059 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2060 if (retval != ERROR_OK)
2061 goto out;
2062
2063 /* Mark R0 as dirty. */
2064 arm_reg_current(arm, 0)->dirty = true;
2065
2066 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2067 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2068 if (retval != ERROR_OK)
2069 goto out;
2070
2071 /* Get the memory address into R0. */
2072 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2073 armv7a->debug_base + CPUDBG_DTRRX, address);
2074 if (retval != ERROR_OK)
2075 goto out;
2076 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2077 if (retval != ERROR_OK)
2078 goto out;
2079
2080 if (size == 4 && (address % 4) == 0) {
2081 /* We are doing a word-aligned transfer, so use fast mode. */
2082 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2083 } else {
2084 /* Use slow path. */
2085 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2086 }
2087
2088 out:
2089 final_retval = retval;
2090
2091 /* Switch to non-blocking mode if not already in that mode. */
2092 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2093 if (final_retval == ERROR_OK)
2094 final_retval = retval;
2095
2096 /* Wait for last issued instruction to complete. */
2097 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2098 if (final_retval == ERROR_OK)
2099 final_retval = retval;
2100
2101 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2102 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2103 * check RXfull_l). Most of the time this will be free because RXfull_l
2104 * will be cleared immediately and cached in dscr. However, don't do this
2105 * if there is fault, because then the instruction might not have completed
2106 * successfully. */
2107 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2108 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2109 if (retval != ERROR_OK)
2110 return retval;
2111 }
2112
2113 /* If there were any sticky abort flags, clear them. */
2114 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2115 fault_dscr = dscr;
2116 mem_ap_write_atomic_u32(armv7a->debug_ap,
2117 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2118 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2119 } else {
2120 fault_dscr = 0;
2121 }
2122
2123 /* Handle synchronous data faults. */
2124 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2125 if (final_retval == ERROR_OK) {
2126 /* Final return value will reflect cause of fault. */
2127 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2128 if (retval == ERROR_OK) {
2129 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2130 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2131 } else
2132 final_retval = retval;
2133 }
2134 /* Fault destroyed DFAR/DFSR; restore them. */
2135 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2136 if (retval != ERROR_OK)
2137 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2138 }
2139
2140 /* Handle asynchronous data faults. */
2141 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2142 if (final_retval == ERROR_OK)
2143 /* No other error has been recorded so far, so keep this one. */
2144 final_retval = ERROR_TARGET_DATA_ABORT;
2145 }
2146
2147 /* If the DCC is nonempty, clear it. */
2148 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2149 uint32_t dummy;
2150 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2151 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2152 if (final_retval == ERROR_OK)
2153 final_retval = retval;
2154 }
2155 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2156 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2157 if (final_retval == ERROR_OK)
2158 final_retval = retval;
2159 }
2160
2161 /* Done. */
2162 return final_retval;
2163 }
2164
2165 static int cortex_a_read_cpu_memory_slow(struct target *target,
2166 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2167 {
2168 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2169 * in *dscr; updated to new value. This is slow because it works for
2170 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2171 * the address is aligned, cortex_a_read_cpu_memory_fast should be
2172 * preferred.
2173 * Preconditions:
2174 * - Address is in R0.
2175 * - R0 is marked dirty.
2176 */
2177 struct armv7a_common *armv7a = target_to_armv7a(target);
2178 struct arm *arm = &armv7a->arm;
2179 int retval;
2180
2181 /* Mark register R1 as dirty, to use for transferring data. */
2182 arm_reg_current(arm, 1)->dirty = true;
2183
2184 /* Switch to non-blocking mode if not already in that mode. */
2185 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2186 if (retval != ERROR_OK)
2187 return retval;
2188
2189 /* Go through the objects. */
2190 while (count) {
2191 /* Issue a load of the appropriate size to R1. */
2192 uint32_t opcode, data;
2193 if (size == 1)
2194 opcode = ARMV4_5_LDRB_IP(1, 0);
2195 else if (size == 2)
2196 opcode = ARMV4_5_LDRH_IP(1, 0);
2197 else
2198 opcode = ARMV4_5_LDRW_IP(1, 0);
2199 retval = cortex_a_exec_opcode(target, opcode, dscr);
2200 if (retval != ERROR_OK)
2201 return retval;
2202
2203 /* Issue a write of R1 to DTRTX. */
2204 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2205 if (retval != ERROR_OK)
2206 return retval;
2207
2208 /* Check for faults and return early. */
2209 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2210 return ERROR_OK; /* A data fault is not considered a system failure. */
2211
2212 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2213 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2214 * must also check TXfull_l). Most of the time this will be free
2215 * because TXfull_l will be set immediately and cached in dscr. */
2216 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2217 DSCR_DTRTX_FULL_LATCHED, dscr);
2218 if (retval != ERROR_OK)
2219 return retval;
2220
2221 /* Read the value transferred to DTRTX into the buffer. */
2222 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2223 armv7a->debug_base + CPUDBG_DTRTX, &data);
2224 if (retval != ERROR_OK)
2225 return retval;
2226 if (size == 1)
2227 *buffer = (uint8_t) data;
2228 else if (size == 2)
2229 target_buffer_set_u16(target, buffer, (uint16_t) data);
2230 else
2231 target_buffer_set_u32(target, buffer, data);
2232
2233 /* Advance. */
2234 buffer += size;
2235 --count;
2236 }
2237
2238 return ERROR_OK;
2239 }
2240
2241 static int cortex_a_read_cpu_memory_fast(struct target *target,
2242 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2243 {
2244 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2245 * *dscr; updated to new value. This is fast but only works for word-sized
2246 * objects at aligned addresses.
2247 * Preconditions:
2248 * - Address is in R0 and must be a multiple of 4.
2249 * - R0 is marked dirty.
2250 */
2251 struct armv7a_common *armv7a = target_to_armv7a(target);
2252 uint32_t u32;
2253 int retval;
2254
2255 /* Switch to non-blocking mode if not already in that mode. */
2256 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2257 if (retval != ERROR_OK)
2258 return retval;
2259
2260 /* Issue the LDC instruction via a write to ITR. */
2261 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2262 if (retval != ERROR_OK)
2263 return retval;
2264
2265 count--;
2266
2267 if (count > 0) {
2268 /* Switch to fast mode if not already in that mode. */
2269 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2270 if (retval != ERROR_OK)
2271 return retval;
2272
2273 /* Latch LDC instruction. */
2274 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2275 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2276 if (retval != ERROR_OK)
2277 return retval;
2278
2279 /* Read the value transferred to DTRTX into the buffer. Due to fast
2280 * mode rules, this blocks until the instruction finishes executing and
2281 * then reissues the read instruction to read the next word from
2282 * memory. The last read of DTRTX in this call reads the second-to-last
2283 * word from memory and issues the read instruction for the last word.
2284 */
2285 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2286 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2287 if (retval != ERROR_OK)
2288 return retval;
2289
2290 /* Advance. */
2291 buffer += count * 4;
2292 }
2293
2294 /* Wait for last issued instruction to complete. */
2295 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2296 if (retval != ERROR_OK)
2297 return retval;
2298
2299 /* Switch to non-blocking mode if not already in that mode. */
2300 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2301 if (retval != ERROR_OK)
2302 return retval;
2303
2304 /* Check for faults and return early. */
2305 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2306 return ERROR_OK; /* A data fault is not considered a system failure. */
2307
2308 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2309 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2310 * check TXfull_l). Most of the time this will be free because TXfull_l
2311 * will be set immediately and cached in dscr. */
2312 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2313 DSCR_DTRTX_FULL_LATCHED, dscr);
2314 if (retval != ERROR_OK)
2315 return retval;
2316
2317 /* Read the value transferred to DTRTX into the buffer. This is the last
2318 * word. */
2319 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2320 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2321 if (retval != ERROR_OK)
2322 return retval;
2323 target_buffer_set_u32(target, buffer, u32);
2324
2325 return ERROR_OK;
2326 }
2327
2328 static int cortex_a_read_cpu_memory(struct target *target,
2329 uint32_t address, uint32_t size,
2330 uint32_t count, uint8_t *buffer)
2331 {
2332 /* Read memory through the CPU. */
2333 int retval, final_retval;
2334 struct armv7a_common *armv7a = target_to_armv7a(target);
2335 struct arm *arm = &armv7a->arm;
2336 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2337
2338 LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2339 address, size, count);
2340 if (target->state != TARGET_HALTED) {
2341 LOG_WARNING("target not halted");
2342 return ERROR_TARGET_NOT_HALTED;
2343 }
2344
2345 if (!count)
2346 return ERROR_OK;
2347
2348 /* Clear any abort. */
2349 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2350 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2351 if (retval != ERROR_OK)
2352 return retval;
2353
2354 /* Read DSCR */
2355 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2356 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2357 if (retval != ERROR_OK)
2358 return retval;
2359
2360 /* Switch to non-blocking mode if not already in that mode. */
2361 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2362 if (retval != ERROR_OK)
2363 goto out;
2364
2365 /* Mark R0 as dirty. */
2366 arm_reg_current(arm, 0)->dirty = true;
2367
2368 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2369 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2370 if (retval != ERROR_OK)
2371 goto out;
2372
2373 /* Get the memory address into R0. */
2374 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2375 armv7a->debug_base + CPUDBG_DTRRX, address);
2376 if (retval != ERROR_OK)
2377 goto out;
2378 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2379 if (retval != ERROR_OK)
2380 goto out;
2381
2382 if (size == 4 && (address % 4) == 0) {
2383 /* We are doing a word-aligned transfer, so use fast mode. */
2384 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2385 } else {
2386 /* Use slow path. */
2387 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2388 }
2389
2390 out:
2391 final_retval = retval;
2392
2393 /* Switch to non-blocking mode if not already in that mode. */
2394 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2395 if (final_retval == ERROR_OK)
2396 final_retval = retval;
2397
2398 /* Wait for last issued instruction to complete. */
2399 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2400 if (final_retval == ERROR_OK)
2401 final_retval = retval;
2402
2403 /* If there were any sticky abort flags, clear them. */
2404 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2405 fault_dscr = dscr;
2406 mem_ap_write_atomic_u32(armv7a->debug_ap,
2407 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2408 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2409 } else {
2410 fault_dscr = 0;
2411 }
2412
2413 /* Handle synchronous data faults. */
2414 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2415 if (final_retval == ERROR_OK) {
2416 /* Final return value will reflect cause of fault. */
2417 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2418 if (retval == ERROR_OK) {
2419 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2420 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2421 } else
2422 final_retval = retval;
2423 }
2424 /* Fault destroyed DFAR/DFSR; restore them. */
2425 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2426 if (retval != ERROR_OK)
2427 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2428 }
2429
2430 /* Handle asynchronous data faults. */
2431 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2432 if (final_retval == ERROR_OK)
2433 /* No other error has been recorded so far, so keep this one. */
2434 final_retval = ERROR_TARGET_DATA_ABORT;
2435 }
2436
2437 /* If the DCC is nonempty, clear it. */
2438 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2439 uint32_t dummy;
2440 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2441 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2442 if (final_retval == ERROR_OK)
2443 final_retval = retval;
2444 }
2445 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2446 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2447 if (final_retval == ERROR_OK)
2448 final_retval = retval;
2449 }
2450
2451 /* Done. */
2452 return final_retval;
2453 }
2454
2455
2456 /*
2457 * Cortex-A Memory access
2458 *
2459 * This is same Cortex-M3 but we must also use the correct
2460 * ap number for every access.
2461 */
2462
2463 static int cortex_a_read_phys_memory(struct target *target,
2464 target_addr_t address, uint32_t size,
2465 uint32_t count, uint8_t *buffer)
2466 {
2467 int retval;
2468
2469 if (!count || !buffer)
2470 return ERROR_COMMAND_SYNTAX_ERROR;
2471
2472 LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2473 address, size, count);
2474
2475 /* read memory through the CPU */
2476 cortex_a_prep_memaccess(target, 1);
2477 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2478 cortex_a_post_memaccess(target, 1);
2479
2480 return retval;
2481 }
2482
2483 static int cortex_a_read_memory(struct target *target, target_addr_t address,
2484 uint32_t size, uint32_t count, uint8_t *buffer)
2485 {
2486 int retval;
2487
2488 /* cortex_a handles unaligned memory access */
2489 LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2490 address, size, count);
2491
2492 cortex_a_prep_memaccess(target, 0);
2493 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2494 cortex_a_post_memaccess(target, 0);
2495
2496 return retval;
2497 }
2498
2499 static int cortex_a_write_phys_memory(struct target *target,
2500 target_addr_t address, uint32_t size,
2501 uint32_t count, const uint8_t *buffer)
2502 {
2503 int retval;
2504
2505 if (!count || !buffer)
2506 return ERROR_COMMAND_SYNTAX_ERROR;
2507
2508 LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2509 address, size, count);
2510
2511 /* write memory through the CPU */
2512 cortex_a_prep_memaccess(target, 1);
2513 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2514 cortex_a_post_memaccess(target, 1);
2515
2516 return retval;
2517 }
2518
2519 static int cortex_a_write_memory(struct target *target, target_addr_t address,
2520 uint32_t size, uint32_t count, const uint8_t *buffer)
2521 {
2522 int retval;
2523
2524 /* cortex_a handles unaligned memory access */
2525 LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2526 address, size, count);
2527
2528 /* memory writes bypass the caches, must flush before writing */
2529 armv7a_cache_auto_flush_on_write(target, address, size * count);
2530
2531 cortex_a_prep_memaccess(target, 0);
2532 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2533 cortex_a_post_memaccess(target, 0);
2534 return retval;
2535 }
2536
2537 static int cortex_a_read_buffer(struct target *target, target_addr_t address,
2538 uint32_t count, uint8_t *buffer)
2539 {
2540 uint32_t size;
2541
2542 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2543 * will have something to do with the size we leave to it. */
2544 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2545 if (address & size) {
2546 int retval = target_read_memory(target, address, size, 1, buffer);
2547 if (retval != ERROR_OK)
2548 return retval;
2549 address += size;
2550 count -= size;
2551 buffer += size;
2552 }
2553 }
2554
2555 /* Read the data with as large access size as possible. */
2556 for (; size > 0; size /= 2) {
2557 uint32_t aligned = count - count % size;
2558 if (aligned > 0) {
2559 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2560 if (retval != ERROR_OK)
2561 return retval;
2562 address += aligned;
2563 count -= aligned;
2564 buffer += aligned;
2565 }
2566 }
2567
2568 return ERROR_OK;
2569 }
2570
2571 static int cortex_a_write_buffer(struct target *target, target_addr_t address,
2572 uint32_t count, const uint8_t *buffer)
2573 {
2574 uint32_t size;
2575
2576 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2577 * will have something to do with the size we leave to it. */
2578 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2579 if (address & size) {
2580 int retval = target_write_memory(target, address, size, 1, buffer);
2581 if (retval != ERROR_OK)
2582 return retval;
2583 address += size;
2584 count -= size;
2585 buffer += size;
2586 }
2587 }
2588
2589 /* Write the data with as large access size as possible. */
2590 for (; size > 0; size /= 2) {
2591 uint32_t aligned = count - count % size;
2592 if (aligned > 0) {
2593 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2594 if (retval != ERROR_OK)
2595 return retval;
2596 address += aligned;
2597 count -= aligned;
2598 buffer += aligned;
2599 }
2600 }
2601
2602 return ERROR_OK;
2603 }
2604
2605 static int cortex_a_handle_target_request(void *priv)
2606 {
2607 struct target *target = priv;
2608 struct armv7a_common *armv7a = target_to_armv7a(target);
2609 int retval;
2610
2611 if (!target_was_examined(target))
2612 return ERROR_OK;
2613 if (!target->dbg_msg_enabled)
2614 return ERROR_OK;
2615
2616 if (target->state == TARGET_RUNNING) {
2617 uint32_t request;
2618 uint32_t dscr;
2619 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2620 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2621
2622 /* check if we have data */
2623 int64_t then = timeval_ms();
2624 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2625 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2626 armv7a->debug_base + CPUDBG_DTRTX, &request);
2627 if (retval == ERROR_OK) {
2628 target_request(target, request);
2629 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2630 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2631 }
2632 if (timeval_ms() > then + 1000) {
2633 LOG_ERROR("Timeout waiting for dtr tx full");
2634 return ERROR_FAIL;
2635 }
2636 }
2637 }
2638
2639 return ERROR_OK;
2640 }
2641
2642 /*
2643 * Cortex-A target information and configuration
2644 */
2645
2646 static int cortex_a_examine_first(struct target *target)
2647 {
2648 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2649 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2650 struct adiv5_dap *swjdp = armv7a->arm.dap;
2651
2652 int i;
2653 int retval = ERROR_OK;
2654 uint32_t didr, cpuid, dbg_osreg;
2655
2656 /* Search for the APB-AP - it is needed for access to debug registers */
2657 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2658 if (retval != ERROR_OK) {
2659 LOG_ERROR("Could not find APB-AP for debug access");
2660 return retval;
2661 }
2662
2663 retval = mem_ap_init(armv7a->debug_ap);
2664 if (retval != ERROR_OK) {
2665 LOG_ERROR("Could not initialize the APB-AP");
2666 return retval;
2667 }
2668
2669 armv7a->debug_ap->memaccess_tck = 80;
2670
2671 if (!target->dbgbase_set) {
2672 uint32_t dbgbase;
2673 /* Get ROM Table base */
2674 uint32_t apid;
2675 int32_t coreidx = target->coreid;
2676 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2677 target->cmd_name);
2678 retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
2679 if (retval != ERROR_OK)
2680 return retval;
2681 /* Lookup 0x15 -- Processor DAP */
2682 retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, 0x15,
2683 &armv7a->debug_base, &coreidx);
2684 if (retval != ERROR_OK) {
2685 LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2686 target->cmd_name);
2687 return retval;
2688 }
2689 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2690 target->coreid, armv7a->debug_base);
2691 } else
2692 armv7a->debug_base = target->dbgbase;
2693
2694 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2695 armv7a->debug_base + CPUDBG_DIDR, &didr);
2696 if (retval != ERROR_OK) {
2697 LOG_DEBUG("Examine %s failed", "DIDR");
2698 return retval;
2699 }
2700
2701 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2702 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2703 if (retval != ERROR_OK) {
2704 LOG_DEBUG("Examine %s failed", "CPUID");
2705 return retval;
2706 }
2707
2708 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2709 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2710
2711 cortex_a->didr = didr;
2712 cortex_a->cpuid = cpuid;
2713
2714 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2715 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
2716 if (retval != ERROR_OK)
2717 return retval;
2718 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
2719
2720 if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
2721 LOG_ERROR("target->coreid %" PRId32 " powered down!", target->coreid);
2722 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2723 return ERROR_TARGET_INIT_FAILED;
2724 }
2725
2726 if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
2727 LOG_DEBUG("target->coreid %" PRId32 " was reset!", target->coreid);
2728
2729 /* Read DBGOSLSR and check if OSLK is implemented */
2730 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2731 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2732 if (retval != ERROR_OK)
2733 return retval;
2734 LOG_DEBUG("target->coreid %" PRId32 " DBGOSLSR 0x%" PRIx32, target->coreid, dbg_osreg);
2735
2736 /* check if OS Lock is implemented */
2737 if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
2738 /* check if OS Lock is set */
2739 if (dbg_osreg & OSLSR_OSLK) {
2740 LOG_DEBUG("target->coreid %" PRId32 " OSLock set! Trying to unlock", target->coreid);
2741
2742 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2743 armv7a->debug_base + CPUDBG_OSLAR,
2744 0);
2745 if (retval == ERROR_OK)
2746 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2747 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2748
2749 /* if we fail to access the register or cannot reset the OSLK bit, bail out */
2750 if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
2751 LOG_ERROR("target->coreid %" PRId32 " OSLock sticky, core not powered?",
2752 target->coreid);
2753 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2754 return ERROR_TARGET_INIT_FAILED;
2755 }
2756 }
2757 }
2758
2759 armv7a->arm.core_type = ARM_MODE_MON;
2760
2761 /* Avoid recreating the registers cache */
2762 if (!target_was_examined(target)) {
2763 retval = cortex_a_dpm_setup(cortex_a, didr);
2764 if (retval != ERROR_OK)
2765 return retval;
2766 }
2767
2768 /* Setup Breakpoint Register Pairs */
2769 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
2770 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2771 cortex_a->brp_num_available = cortex_a->brp_num;
2772 free(cortex_a->brp_list);
2773 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
2774 /* cortex_a->brb_enabled = ????; */
2775 for (i = 0; i < cortex_a->brp_num; i++) {
2776 cortex_a->brp_list[i].used = 0;
2777 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
2778 cortex_a->brp_list[i].type = BRP_NORMAL;
2779 else
2780 cortex_a->brp_list[i].type = BRP_CONTEXT;
2781 cortex_a->brp_list[i].value = 0;
2782 cortex_a->brp_list[i].control = 0;
2783 cortex_a->brp_list[i].BRPn = i;
2784 }
2785
2786 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
2787
2788 /* select debug_ap as default */
2789 swjdp->apsel = armv7a->debug_ap->ap_num;
2790
2791 target_set_examined(target);
2792 return ERROR_OK;
2793 }
2794
2795 static int cortex_a_examine(struct target *target)
2796 {
2797 int retval = ERROR_OK;
2798
2799 /* Reestablish communication after target reset */
2800 retval = cortex_a_examine_first(target);
2801
2802 /* Configure core debug access */
2803 if (retval == ERROR_OK)
2804 retval = cortex_a_init_debug_access(target);
2805
2806 return retval;
2807 }
2808
2809 /*
2810 * Cortex-A target creation and initialization
2811 */
2812
2813 static int cortex_a_init_target(struct command_context *cmd_ctx,
2814 struct target *target)
2815 {
2816 /* examine_first() does a bunch of this */
2817 arm_semihosting_init(target);
2818 return ERROR_OK;
2819 }
2820
2821 static int cortex_a_init_arch_info(struct target *target,
2822 struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
2823 {
2824 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2825
2826 /* Setup struct cortex_a_common */
2827 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2828 armv7a->arm.dap = dap;
2829
2830 /* register arch-specific functions */
2831 armv7a->examine_debug_reason = NULL;
2832
2833 armv7a->post_debug_entry = cortex_a_post_debug_entry;
2834
2835 armv7a->pre_restore_context = NULL;
2836
2837 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
2838
2839
2840 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
2841
2842 /* REVISIT v7a setup should be in a v7a-specific routine */
2843 armv7a_init_arch_info(target, armv7a);
2844 target_register_timer_callback(cortex_a_handle_target_request, 1,
2845 TARGET_TIMER_TYPE_PERIODIC, target);
2846
2847 return ERROR_OK;
2848 }
2849
2850 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
2851 {
2852 struct cortex_a_common *cortex_a;
2853 struct adiv5_private_config *pc;
2854
2855 if (target->private_config == NULL)
2856 return ERROR_FAIL;
2857
2858 pc = (struct adiv5_private_config *)target->private_config;
2859
2860 cortex_a = calloc(1, sizeof(struct cortex_a_common));
2861 if (cortex_a == NULL) {
2862 LOG_ERROR("Out of memory");
2863 return ERROR_FAIL;
2864 }
2865 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2866 cortex_a->armv7a_common.is_armv7r = false;
2867 cortex_a->armv7a_common.arm.arm_vfp_version = ARM_VFP_V3;
2868
2869 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
2870 }
2871
2872 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
2873 {
2874 struct cortex_a_common *cortex_a;
2875 struct adiv5_private_config *pc;
2876
2877 pc = (struct adiv5_private_config *)target->private_config;
2878 if (adiv5_verify_config(pc) != ERROR_OK)
2879 return ERROR_FAIL;
2880
2881 cortex_a = calloc(1, sizeof(struct cortex_a_common));
2882 if (cortex_a == NULL) {
2883 LOG_ERROR("Out of memory");
2884 return ERROR_FAIL;
2885 }
2886 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2887 cortex_a->armv7a_common.is_armv7r = true;
2888
2889 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
2890 }
2891
2892 static void cortex_a_deinit_target(struct target *target)
2893 {
2894 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2895 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2896 struct arm_dpm *dpm = &armv7a->dpm;
2897 uint32_t dscr;
2898 int retval;
2899
2900 if (target_was_examined(target)) {
2901 /* Disable halt for breakpoint, watchpoint and vector catch */
2902 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2903 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2904 if (retval == ERROR_OK)
2905 mem_ap_write_atomic_u32(armv7a->debug_ap,
2906 armv7a->debug_base + CPUDBG_DSCR,
2907 dscr & ~DSCR_HALT_DBG_MODE);
2908 }
2909
2910 free(cortex_a->brp_list);
2911 free(dpm->dbp);
2912 free(dpm->dwp);
2913 free(target->private_config);
2914 free(cortex_a);
2915 }
2916
2917 static int cortex_a_mmu(struct target *target, int *enabled)
2918 {
2919 struct armv7a_common *armv7a = target_to_armv7a(target);
2920
2921 if (target->state != TARGET_HALTED) {
2922 LOG_ERROR("%s: target not halted", __func__);
2923 return ERROR_TARGET_INVALID;
2924 }
2925
2926 if (armv7a->is_armv7r)
2927 *enabled = 0;
2928 else
2929 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
2930
2931 return ERROR_OK;
2932 }
2933
2934 static int cortex_a_virt2phys(struct target *target,
2935 target_addr_t virt, target_addr_t *phys)
2936 {
2937 int retval;
2938 int mmu_enabled = 0;
2939
2940 /*
2941 * If the MMU was not enabled at debug entry, there is no
2942 * way of knowing if there was ever a valid configuration
2943 * for it and thus it's not safe to enable it. In this case,
2944 * just return the virtual address as physical.
2945 */
2946 cortex_a_mmu(target, &mmu_enabled);
2947 if (!mmu_enabled) {
2948 *phys = virt;
2949 return ERROR_OK;
2950 }
2951
2952 /* mmu must be enable in order to get a correct translation */
2953 retval = cortex_a_mmu_modify(target, 1);
2954 if (retval != ERROR_OK)
2955 return retval;
2956 return armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
2957 (uint32_t *)phys, 1);
2958 }
2959
2960 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
2961 {
2962 struct target *target = get_current_target(CMD_CTX);
2963 struct armv7a_common *armv7a = target_to_armv7a(target);
2964
2965 return armv7a_handle_cache_info_command(CMD_CTX,
2966 &armv7a->armv7a_mmu.armv7a_cache);
2967 }
2968
2969
2970 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
2971 {
2972 struct target *target = get_current_target(CMD_CTX);
2973 if (!target_was_examined(target)) {
2974 LOG_ERROR("target not examined yet");
2975 return ERROR_FAIL;
2976 }
2977
2978 return cortex_a_init_debug_access(target);
2979 }
2980
2981 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
2982 {
2983 struct target *target = get_current_target(CMD_CTX);
2984 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2985
2986 static const Jim_Nvp nvp_maskisr_modes[] = {
2987 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
2988 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
2989 { .name = NULL, .value = -1 },
2990 };
2991 const Jim_Nvp *n;
2992
2993 if (CMD_ARGC > 0) {
2994 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2995 if (n->name == NULL) {
2996 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2997 return ERROR_COMMAND_SYNTAX_ERROR;
2998 }
2999
3000 cortex_a->isrmasking_mode = n->value;
3001 }
3002
3003 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3004 command_print(CMD_CTX, "cortex_a interrupt mask %s", n->name);
3005
3006 return ERROR_OK;
3007 }
3008
3009 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3010 {
3011 struct target *target = get_current_target(CMD_CTX);
3012 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3013
3014 static const Jim_Nvp nvp_dacrfixup_modes[] = {
3015 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3016 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3017 { .name = NULL, .value = -1 },
3018 };
3019 const Jim_Nvp *n;
3020
3021 if (CMD_ARGC > 0) {
3022 n = Jim_Nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
3023 if (n->name == NULL)
3024 return ERROR_COMMAND_SYNTAX_ERROR;
3025 cortex_a->dacrfixup_mode = n->value;
3026
3027 }
3028
3029 n = Jim_Nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3030 command_print(CMD_CTX, "cortex_a domain access control fixup %s", n->name);
3031
3032 return ERROR_OK;
3033 }
3034
3035 static const struct command_registration cortex_a_exec_command_handlers[] = {
3036 {
3037 .name = "cache_info",
3038 .handler = cortex_a_handle_cache_info_command,
3039 .mode = COMMAND_EXEC,
3040 .help = "display information about target caches",
3041 .usage = "",
3042 },
3043 {
3044 .name = "dbginit",
3045 .handler = cortex_a_handle_dbginit_command,
3046 .mode = COMMAND_EXEC,
3047 .help = "Initialize core debug",
3048 .usage = "",
3049 },
3050 {
3051 .name = "maskisr",
3052 .handler = handle_cortex_a_mask_interrupts_command,
3053 .mode = COMMAND_ANY,
3054 .help = "mask cortex_a interrupts",
3055 .usage = "['on'|'off']",
3056 },
3057 {
3058 .name = "dacrfixup",
3059 .handler = handle_cortex_a_dacrfixup_command,
3060 .mode = COMMAND_ANY,
3061 .help = "set domain access control (DACR) to all-manager "
3062 "on memory access",
3063 .usage = "['on'|'off']",
3064 },
3065 {
3066 .chain = armv7a_mmu_command_handlers,
3067 },
3068 {
3069 .chain = smp_command_handlers,
3070 },
3071
3072 COMMAND_REGISTRATION_DONE
3073 };
3074 static const struct command_registration cortex_a_command_handlers[] = {
3075 {
3076 .chain = arm_command_handlers,
3077 },
3078 {
3079 .chain = armv7a_command_handlers,
3080 },
3081 {
3082 .name = "cortex_a",
3083 .mode = COMMAND_ANY,
3084 .help = "Cortex-A command group",
3085 .usage = "",
3086 .chain = cortex_a_exec_command_handlers,
3087 },
3088 COMMAND_REGISTRATION_DONE
3089 };
3090
3091 struct target_type cortexa_target = {
3092 .name = "cortex_a",
3093 .deprecated_name = "cortex_a8",
3094
3095 .poll = cortex_a_poll,
3096 .arch_state = armv7a_arch_state,
3097
3098 .halt = cortex_a_halt,
3099 .resume = cortex_a_resume,
3100 .step = cortex_a_step,
3101
3102 .assert_reset = cortex_a_assert_reset,
3103 .deassert_reset = cortex_a_deassert_reset,
3104
3105 /* REVISIT allow exporting VFP3 registers ... */
3106 .get_gdb_arch = arm_get_gdb_arch,
3107 .get_gdb_reg_list = arm_get_gdb_reg_list,
3108
3109 .read_memory = cortex_a_read_memory,
3110 .write_memory = cortex_a_write_memory,
3111
3112 .read_buffer = cortex_a_read_buffer,
3113 .write_buffer = cortex_a_write_buffer,
3114
3115 .checksum_memory = arm_checksum_memory,
3116 .blank_check_memory = arm_blank_check_memory,
3117
3118 .run_algorithm = armv4_5_run_algorithm,
3119
3120 .add_breakpoint = cortex_a_add_breakpoint,
3121 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3122 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3123 .remove_breakpoint = cortex_a_remove_breakpoint,
3124 .add_watchpoint = NULL,
3125 .remove_watchpoint = NULL,
3126
3127 .commands = cortex_a_command_handlers,
3128 .target_create = cortex_a_target_create,
3129 .target_jim_configure = adiv5_jim_configure,
3130 .init_target = cortex_a_init_target,
3131 .examine = cortex_a_examine,
3132 .deinit_target = cortex_a_deinit_target,
3133
3134 .read_phys_memory = cortex_a_read_phys_memory,
3135 .write_phys_memory = cortex_a_write_phys_memory,
3136 .mmu = cortex_a_mmu,
3137 .virt2phys = cortex_a_virt2phys,
3138 };
3139
3140 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3141 {
3142 .name = "dbginit",
3143 .handler = cortex_a_handle_dbginit_command,
3144 .mode = COMMAND_EXEC,
3145 .help = "Initialize core debug",
3146 .usage = "",
3147 },
3148 {
3149 .name = "maskisr",
3150 .handler = handle_cortex_a_mask_interrupts_command,
3151 .mode = COMMAND_EXEC,
3152 .help = "mask cortex_r4 interrupts",
3153 .usage = "['on'|'off']",
3154 },
3155
3156 COMMAND_REGISTRATION_DONE
3157 };
3158 static const struct command_registration cortex_r4_command_handlers[] = {
3159 {
3160 .chain = arm_command_handlers,
3161 },
3162 {
3163 .name = "cortex_r4",
3164 .mode = COMMAND_ANY,
3165 .help = "Cortex-R4 command group",
3166 .usage = "",
3167 .chain = cortex_r4_exec_command_handlers,
3168 },
3169 COMMAND_REGISTRATION_DONE
3170 };
3171
3172 struct target_type cortexr4_target = {
3173 .name = "cortex_r4",
3174
3175 .poll = cortex_a_poll,
3176 .arch_state = armv7a_arch_state,
3177
3178 .halt = cortex_a_halt,
3179 .resume = cortex_a_resume,
3180 .step = cortex_a_step,
3181
3182 .assert_reset = cortex_a_assert_reset,
3183 .deassert_reset = cortex_a_deassert_reset,
3184
3185 /* REVISIT allow exporting VFP3 registers ... */
3186 .get_gdb_arch = arm_get_gdb_arch,
3187 .get_gdb_reg_list = arm_get_gdb_reg_list,
3188
3189 .read_memory = cortex_a_read_phys_memory,
3190 .write_memory = cortex_a_write_phys_memory,
3191
3192 .checksum_memory = arm_checksum_memory,
3193 .blank_check_memory = arm_blank_check_memory,
3194
3195 .run_algorithm = armv4_5_run_algorithm,
3196
3197 .add_breakpoint = cortex_a_add_breakpoint,
3198 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3199 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3200 .remove_breakpoint = cortex_a_remove_breakpoint,
3201 .add_watchpoint = NULL,
3202 .remove_watchpoint = NULL,
3203
3204 .commands = cortex_r4_command_handlers,
3205 .target_create = cortex_r4_target_create,
3206 .target_jim_configure = adiv5_jim_configure,
3207 .init_target = cortex_a_init_target,
3208 .examine = cortex_a_examine,
3209 .deinit_target = cortex_a_deinit_target,
3210 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)