smp: replace commands smp_on/smp_off with "smp [on|off]"
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 √ėyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex-R4 support *
22 * *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 * *
39 * Cortex-A8(tm) TRM, ARM DDI 0344H *
40 * Cortex-A9(tm) TRM, ARM DDI 0407F *
41 * Cortex-A4(tm) TRM, ARM DDI 0363E *
42 * Cortex-A15(tm)TRM, ARM DDI 0438C *
43 * *
44 ***************************************************************************/
45
46 #ifdef HAVE_CONFIG_H
47 #include "config.h"
48 #endif
49
50 #include "breakpoints.h"
51 #include "cortex_a.h"
52 #include "register.h"
53 #include "armv7a_mmu.h"
54 #include "target_request.h"
55 #include "target_type.h"
56 #include "arm_opcodes.h"
57 #include "arm_semihosting.h"
58 #include "transport/transport.h"
59 #include "smp.h"
60 #include <helper/time_support.h>
61
62 static int cortex_a_poll(struct target *target);
63 static int cortex_a_debug_entry(struct target *target);
64 static int cortex_a_restore_context(struct target *target, bool bpwp);
65 static int cortex_a_set_breakpoint(struct target *target,
66 struct breakpoint *breakpoint, uint8_t matchmode);
67 static int cortex_a_set_context_breakpoint(struct target *target,
68 struct breakpoint *breakpoint, uint8_t matchmode);
69 static int cortex_a_set_hybrid_breakpoint(struct target *target,
70 struct breakpoint *breakpoint);
71 static int cortex_a_unset_breakpoint(struct target *target,
72 struct breakpoint *breakpoint);
73 static int cortex_a_mmu(struct target *target, int *enabled);
74 static int cortex_a_mmu_modify(struct target *target, int enable);
75 static int cortex_a_virt2phys(struct target *target,
76 target_addr_t virt, target_addr_t *phys);
77 static int cortex_a_read_cpu_memory(struct target *target,
78 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
79
80
81 /* restore cp15_control_reg at resume */
82 static int cortex_a_restore_cp15_control_reg(struct target *target)
83 {
84 int retval = ERROR_OK;
85 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
86 struct armv7a_common *armv7a = target_to_armv7a(target);
87
88 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
89 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
90 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
91 retval = armv7a->arm.mcr(target, 15,
92 0, 0, /* op1, op2 */
93 1, 0, /* CRn, CRm */
94 cortex_a->cp15_control_reg);
95 }
96 return retval;
97 }
98
99 /*
100 * Set up ARM core for memory access.
101 * If !phys_access, switch to SVC mode and make sure MMU is on
102 * If phys_access, switch off mmu
103 */
104 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
105 {
106 struct armv7a_common *armv7a = target_to_armv7a(target);
107 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
108 int mmu_enabled = 0;
109
110 if (phys_access == 0) {
111 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
112 cortex_a_mmu(target, &mmu_enabled);
113 if (mmu_enabled)
114 cortex_a_mmu_modify(target, 1);
115 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
116 /* overwrite DACR to all-manager */
117 armv7a->arm.mcr(target, 15,
118 0, 0, 3, 0,
119 0xFFFFFFFF);
120 }
121 } else {
122 cortex_a_mmu(target, &mmu_enabled);
123 if (mmu_enabled)
124 cortex_a_mmu_modify(target, 0);
125 }
126 return ERROR_OK;
127 }
128
129 /*
130 * Restore ARM core after memory access.
131 * If !phys_access, switch to previous mode
132 * If phys_access, restore MMU setting
133 */
134 static int cortex_a_post_memaccess(struct target *target, int phys_access)
135 {
136 struct armv7a_common *armv7a = target_to_armv7a(target);
137 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
138
139 if (phys_access == 0) {
140 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
141 /* restore */
142 armv7a->arm.mcr(target, 15,
143 0, 0, 3, 0,
144 cortex_a->cp15_dacr_reg);
145 }
146 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
147 } else {
148 int mmu_enabled = 0;
149 cortex_a_mmu(target, &mmu_enabled);
150 if (mmu_enabled)
151 cortex_a_mmu_modify(target, 1);
152 }
153 return ERROR_OK;
154 }
155
156
157 /* modify cp15_control_reg in order to enable or disable mmu for :
158 * - virt2phys address conversion
159 * - read or write memory in phys or virt address */
160 static int cortex_a_mmu_modify(struct target *target, int enable)
161 {
162 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
163 struct armv7a_common *armv7a = target_to_armv7a(target);
164 int retval = ERROR_OK;
165 int need_write = 0;
166
167 if (enable) {
168 /* if mmu enabled at target stop and mmu not enable */
169 if (!(cortex_a->cp15_control_reg & 0x1U)) {
170 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
171 return ERROR_FAIL;
172 }
173 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
174 cortex_a->cp15_control_reg_curr |= 0x1U;
175 need_write = 1;
176 }
177 } else {
178 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
179 cortex_a->cp15_control_reg_curr &= ~0x1U;
180 need_write = 1;
181 }
182 }
183
184 if (need_write) {
185 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
186 enable ? "enable mmu" : "disable mmu",
187 cortex_a->cp15_control_reg_curr);
188
189 retval = armv7a->arm.mcr(target, 15,
190 0, 0, /* op1, op2 */
191 1, 0, /* CRn, CRm */
192 cortex_a->cp15_control_reg_curr);
193 }
194 return retval;
195 }
196
197 /*
198 * Cortex-A Basic debug access, very low level assumes state is saved
199 */
200 static int cortex_a_init_debug_access(struct target *target)
201 {
202 struct armv7a_common *armv7a = target_to_armv7a(target);
203 uint32_t dscr;
204 int retval;
205
206 /* lock memory-mapped access to debug registers to prevent
207 * software interference */
208 retval = mem_ap_write_u32(armv7a->debug_ap,
209 armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
210 if (retval != ERROR_OK)
211 return retval;
212
213 /* Disable cacheline fills and force cache write-through in debug state */
214 retval = mem_ap_write_u32(armv7a->debug_ap,
215 armv7a->debug_base + CPUDBG_DSCCR, 0);
216 if (retval != ERROR_OK)
217 return retval;
218
219 /* Disable TLB lookup and refill/eviction in debug state */
220 retval = mem_ap_write_u32(armv7a->debug_ap,
221 armv7a->debug_base + CPUDBG_DSMCR, 0);
222 if (retval != ERROR_OK)
223 return retval;
224
225 retval = dap_run(armv7a->debug_ap->dap);
226 if (retval != ERROR_OK)
227 return retval;
228
229 /* Enabling of instruction execution in debug mode is done in debug_entry code */
230
231 /* Resync breakpoint registers */
232
233 /* Enable halt for breakpoint, watchpoint and vector catch */
234 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
235 armv7a->debug_base + CPUDBG_DSCR, &dscr);
236 if (retval != ERROR_OK)
237 return retval;
238 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
239 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
240 if (retval != ERROR_OK)
241 return retval;
242
243 /* Since this is likely called from init or reset, update target state information*/
244 return cortex_a_poll(target);
245 }
246
247 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
248 {
249 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
250 * Writes final value of DSCR into *dscr. Pass force to force always
251 * reading DSCR at least once. */
252 struct armv7a_common *armv7a = target_to_armv7a(target);
253 int64_t then = timeval_ms();
254 while ((*dscr & DSCR_INSTR_COMP) == 0 || force) {
255 force = false;
256 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
257 armv7a->debug_base + CPUDBG_DSCR, dscr);
258 if (retval != ERROR_OK) {
259 LOG_ERROR("Could not read DSCR register");
260 return retval;
261 }
262 if (timeval_ms() > then + 1000) {
263 LOG_ERROR("Timeout waiting for InstrCompl=1");
264 return ERROR_FAIL;
265 }
266 }
267 return ERROR_OK;
268 }
269
270 /* To reduce needless round-trips, pass in a pointer to the current
271 * DSCR value. Initialize it to zero if you just need to know the
272 * value on return from this function; or DSCR_INSTR_COMP if you
273 * happen to know that no instruction is pending.
274 */
275 static int cortex_a_exec_opcode(struct target *target,
276 uint32_t opcode, uint32_t *dscr_p)
277 {
278 uint32_t dscr;
279 int retval;
280 struct armv7a_common *armv7a = target_to_armv7a(target);
281
282 dscr = dscr_p ? *dscr_p : 0;
283
284 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
285
286 /* Wait for InstrCompl bit to be set */
287 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
288 if (retval != ERROR_OK)
289 return retval;
290
291 retval = mem_ap_write_u32(armv7a->debug_ap,
292 armv7a->debug_base + CPUDBG_ITR, opcode);
293 if (retval != ERROR_OK)
294 return retval;
295
296 int64_t then = timeval_ms();
297 do {
298 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
299 armv7a->debug_base + CPUDBG_DSCR, &dscr);
300 if (retval != ERROR_OK) {
301 LOG_ERROR("Could not read DSCR register");
302 return retval;
303 }
304 if (timeval_ms() > then + 1000) {
305 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
306 return ERROR_FAIL;
307 }
308 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
309
310 if (dscr_p)
311 *dscr_p = dscr;
312
313 return retval;
314 }
315
316 /* Write to memory mapped registers directly with no cache or mmu handling */
317 static int cortex_a_dap_write_memap_register_u32(struct target *target,
318 uint32_t address,
319 uint32_t value)
320 {
321 int retval;
322 struct armv7a_common *armv7a = target_to_armv7a(target);
323
324 retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
325
326 return retval;
327 }
328
329 /*
330 * Cortex-A implementation of Debug Programmer's Model
331 *
332 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
333 * so there's no need to poll for it before executing an instruction.
334 *
335 * NOTE that in several of these cases the "stall" mode might be useful.
336 * It'd let us queue a few operations together... prepare/finish might
337 * be the places to enable/disable that mode.
338 */
339
340 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
341 {
342 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
343 }
344
345 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
346 {
347 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
348 return mem_ap_write_u32(a->armv7a_common.debug_ap,
349 a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
350 }
351
352 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
353 uint32_t *dscr_p)
354 {
355 uint32_t dscr = DSCR_INSTR_COMP;
356 int retval;
357
358 if (dscr_p)
359 dscr = *dscr_p;
360
361 /* Wait for DTRRXfull */
362 int64_t then = timeval_ms();
363 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
364 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
365 a->armv7a_common.debug_base + CPUDBG_DSCR,
366 &dscr);
367 if (retval != ERROR_OK)
368 return retval;
369 if (timeval_ms() > then + 1000) {
370 LOG_ERROR("Timeout waiting for read dcc");
371 return ERROR_FAIL;
372 }
373 }
374
375 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
376 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
377 if (retval != ERROR_OK)
378 return retval;
379 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
380
381 if (dscr_p)
382 *dscr_p = dscr;
383
384 return retval;
385 }
386
387 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
388 {
389 struct cortex_a_common *a = dpm_to_a(dpm);
390 uint32_t dscr;
391 int retval;
392
393 /* set up invariant: INSTR_COMP is set after ever DPM operation */
394 int64_t then = timeval_ms();
395 for (;; ) {
396 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
397 a->armv7a_common.debug_base + CPUDBG_DSCR,
398 &dscr);
399 if (retval != ERROR_OK)
400 return retval;
401 if ((dscr & DSCR_INSTR_COMP) != 0)
402 break;
403 if (timeval_ms() > then + 1000) {
404 LOG_ERROR("Timeout waiting for dpm prepare");
405 return ERROR_FAIL;
406 }
407 }
408
409 /* this "should never happen" ... */
410 if (dscr & DSCR_DTR_RX_FULL) {
411 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
412 /* Clear DCCRX */
413 retval = cortex_a_exec_opcode(
414 a->armv7a_common.arm.target,
415 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
416 &dscr);
417 if (retval != ERROR_OK)
418 return retval;
419 }
420
421 return retval;
422 }
423
424 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
425 {
426 /* REVISIT what could be done here? */
427 return ERROR_OK;
428 }
429
430 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
431 uint32_t opcode, uint32_t data)
432 {
433 struct cortex_a_common *a = dpm_to_a(dpm);
434 int retval;
435 uint32_t dscr = DSCR_INSTR_COMP;
436
437 retval = cortex_a_write_dcc(a, data);
438 if (retval != ERROR_OK)
439 return retval;
440
441 return cortex_a_exec_opcode(
442 a->armv7a_common.arm.target,
443 opcode,
444 &dscr);
445 }
446
447 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
448 uint32_t opcode, uint32_t data)
449 {
450 struct cortex_a_common *a = dpm_to_a(dpm);
451 uint32_t dscr = DSCR_INSTR_COMP;
452 int retval;
453
454 retval = cortex_a_write_dcc(a, data);
455 if (retval != ERROR_OK)
456 return retval;
457
458 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
459 retval = cortex_a_exec_opcode(
460 a->armv7a_common.arm.target,
461 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
462 &dscr);
463 if (retval != ERROR_OK)
464 return retval;
465
466 /* then the opcode, taking data from R0 */
467 retval = cortex_a_exec_opcode(
468 a->armv7a_common.arm.target,
469 opcode,
470 &dscr);
471
472 return retval;
473 }
474
475 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
476 {
477 struct target *target = dpm->arm->target;
478 uint32_t dscr = DSCR_INSTR_COMP;
479
480 /* "Prefetch flush" after modifying execution status in CPSR */
481 return cortex_a_exec_opcode(target,
482 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
483 &dscr);
484 }
485
486 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
487 uint32_t opcode, uint32_t *data)
488 {
489 struct cortex_a_common *a = dpm_to_a(dpm);
490 int retval;
491 uint32_t dscr = DSCR_INSTR_COMP;
492
493 /* the opcode, writing data to DCC */
494 retval = cortex_a_exec_opcode(
495 a->armv7a_common.arm.target,
496 opcode,
497 &dscr);
498 if (retval != ERROR_OK)
499 return retval;
500
501 return cortex_a_read_dcc(a, data, &dscr);
502 }
503
504
505 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
506 uint32_t opcode, uint32_t *data)
507 {
508 struct cortex_a_common *a = dpm_to_a(dpm);
509 uint32_t dscr = DSCR_INSTR_COMP;
510 int retval;
511
512 /* the opcode, writing data to R0 */
513 retval = cortex_a_exec_opcode(
514 a->armv7a_common.arm.target,
515 opcode,
516 &dscr);
517 if (retval != ERROR_OK)
518 return retval;
519
520 /* write R0 to DCC */
521 retval = cortex_a_exec_opcode(
522 a->armv7a_common.arm.target,
523 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
524 &dscr);
525 if (retval != ERROR_OK)
526 return retval;
527
528 return cortex_a_read_dcc(a, data, &dscr);
529 }
530
531 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
532 uint32_t addr, uint32_t control)
533 {
534 struct cortex_a_common *a = dpm_to_a(dpm);
535 uint32_t vr = a->armv7a_common.debug_base;
536 uint32_t cr = a->armv7a_common.debug_base;
537 int retval;
538
539 switch (index_t) {
540 case 0 ... 15: /* breakpoints */
541 vr += CPUDBG_BVR_BASE;
542 cr += CPUDBG_BCR_BASE;
543 break;
544 case 16 ... 31: /* watchpoints */
545 vr += CPUDBG_WVR_BASE;
546 cr += CPUDBG_WCR_BASE;
547 index_t -= 16;
548 break;
549 default:
550 return ERROR_FAIL;
551 }
552 vr += 4 * index_t;
553 cr += 4 * index_t;
554
555 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
556 (unsigned) vr, (unsigned) cr);
557
558 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
559 vr, addr);
560 if (retval != ERROR_OK)
561 return retval;
562 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
563 cr, control);
564 return retval;
565 }
566
567 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
568 {
569 struct cortex_a_common *a = dpm_to_a(dpm);
570 uint32_t cr;
571
572 switch (index_t) {
573 case 0 ... 15:
574 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
575 break;
576 case 16 ... 31:
577 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
578 index_t -= 16;
579 break;
580 default:
581 return ERROR_FAIL;
582 }
583 cr += 4 * index_t;
584
585 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
586
587 /* clear control register */
588 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
589 }
590
591 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
592 {
593 struct arm_dpm *dpm = &a->armv7a_common.dpm;
594 int retval;
595
596 dpm->arm = &a->armv7a_common.arm;
597 dpm->didr = didr;
598
599 dpm->prepare = cortex_a_dpm_prepare;
600 dpm->finish = cortex_a_dpm_finish;
601
602 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
603 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
604 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
605
606 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
607 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
608
609 dpm->bpwp_enable = cortex_a_bpwp_enable;
610 dpm->bpwp_disable = cortex_a_bpwp_disable;
611
612 retval = arm_dpm_setup(dpm);
613 if (retval == ERROR_OK)
614 retval = arm_dpm_initialize(dpm);
615
616 return retval;
617 }
618 static struct target *get_cortex_a(struct target *target, int32_t coreid)
619 {
620 struct target_list *head;
621 struct target *curr;
622
623 head = target->head;
624 while (head != (struct target_list *)NULL) {
625 curr = head->target;
626 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
627 return curr;
628 head = head->next;
629 }
630 return target;
631 }
632 static int cortex_a_halt(struct target *target);
633
634 static int cortex_a_halt_smp(struct target *target)
635 {
636 int retval = 0;
637 struct target_list *head;
638 struct target *curr;
639 head = target->head;
640 while (head != (struct target_list *)NULL) {
641 curr = head->target;
642 if ((curr != target) && (curr->state != TARGET_HALTED)
643 && target_was_examined(curr))
644 retval += cortex_a_halt(curr);
645 head = head->next;
646 }
647 return retval;
648 }
649
650 static int update_halt_gdb(struct target *target)
651 {
652 struct target *gdb_target = NULL;
653 struct target_list *head;
654 struct target *curr;
655 int retval = 0;
656
657 if (target->gdb_service && target->gdb_service->core[0] == -1) {
658 target->gdb_service->target = target;
659 target->gdb_service->core[0] = target->coreid;
660 retval += cortex_a_halt_smp(target);
661 }
662
663 if (target->gdb_service)
664 gdb_target = target->gdb_service->target;
665
666 foreach_smp_target(head, target->head) {
667 curr = head->target;
668 /* skip calling context */
669 if (curr == target)
670 continue;
671 if (!target_was_examined(curr))
672 continue;
673 /* skip targets that were already halted */
674 if (curr->state == TARGET_HALTED)
675 continue;
676 /* Skip gdb_target; it alerts GDB so has to be polled as last one */
677 if (curr == gdb_target)
678 continue;
679
680 /* avoid recursion in cortex_a_poll() */
681 curr->smp = 0;
682 cortex_a_poll(curr);
683 curr->smp = 1;
684 }
685
686 /* after all targets were updated, poll the gdb serving target */
687 if (gdb_target != NULL && gdb_target != target)
688 cortex_a_poll(gdb_target);
689 return retval;
690 }
691
692 /*
693 * Cortex-A Run control
694 */
695
696 static int cortex_a_poll(struct target *target)
697 {
698 int retval = ERROR_OK;
699 uint32_t dscr;
700 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
701 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
702 enum target_state prev_target_state = target->state;
703 /* toggle to another core is done by gdb as follow */
704 /* maint packet J core_id */
705 /* continue */
706 /* the next polling trigger an halt event sent to gdb */
707 if ((target->state == TARGET_HALTED) && (target->smp) &&
708 (target->gdb_service) &&
709 (target->gdb_service->target == NULL)) {
710 target->gdb_service->target =
711 get_cortex_a(target, target->gdb_service->core[1]);
712 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
713 return retval;
714 }
715 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
716 armv7a->debug_base + CPUDBG_DSCR, &dscr);
717 if (retval != ERROR_OK)
718 return retval;
719 cortex_a->cpudbg_dscr = dscr;
720
721 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
722 if (prev_target_state != TARGET_HALTED) {
723 /* We have a halting debug event */
724 LOG_DEBUG("Target halted");
725 target->state = TARGET_HALTED;
726
727 retval = cortex_a_debug_entry(target);
728 if (retval != ERROR_OK)
729 return retval;
730
731 if (target->smp) {
732 retval = update_halt_gdb(target);
733 if (retval != ERROR_OK)
734 return retval;
735 }
736
737 if (prev_target_state == TARGET_DEBUG_RUNNING) {
738 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
739 } else { /* prev_target_state is RUNNING, UNKNOWN or RESET */
740 if (arm_semihosting(target, &retval) != 0)
741 return retval;
742
743 target_call_event_callbacks(target,
744 TARGET_EVENT_HALTED);
745 }
746 }
747 } else
748 target->state = TARGET_RUNNING;
749
750 return retval;
751 }
752
753 static int cortex_a_halt(struct target *target)
754 {
755 int retval = ERROR_OK;
756 uint32_t dscr;
757 struct armv7a_common *armv7a = target_to_armv7a(target);
758
759 /*
760 * Tell the core to be halted by writing DRCR with 0x1
761 * and then wait for the core to be halted.
762 */
763 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
764 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
765 if (retval != ERROR_OK)
766 return retval;
767
768 int64_t then = timeval_ms();
769 for (;; ) {
770 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
771 armv7a->debug_base + CPUDBG_DSCR, &dscr);
772 if (retval != ERROR_OK)
773 return retval;
774 if ((dscr & DSCR_CORE_HALTED) != 0)
775 break;
776 if (timeval_ms() > then + 1000) {
777 LOG_ERROR("Timeout waiting for halt");
778 return ERROR_FAIL;
779 }
780 }
781
782 target->debug_reason = DBG_REASON_DBGRQ;
783
784 return ERROR_OK;
785 }
786
787 static int cortex_a_internal_restore(struct target *target, int current,
788 target_addr_t *address, int handle_breakpoints, int debug_execution)
789 {
790 struct armv7a_common *armv7a = target_to_armv7a(target);
791 struct arm *arm = &armv7a->arm;
792 int retval;
793 uint32_t resume_pc;
794
795 if (!debug_execution)
796 target_free_all_working_areas(target);
797
798 #if 0
799 if (debug_execution) {
800 /* Disable interrupts */
801 /* We disable interrupts in the PRIMASK register instead of
802 * masking with C_MASKINTS,
803 * This is probably the same issue as Cortex-M3 Errata 377493:
804 * C_MASKINTS in parallel with disabled interrupts can cause
805 * local faults to not be taken. */
806 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
807 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = true;
808 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = true;
809
810 /* Make sure we are in Thumb mode */
811 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
812 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
813 32) | (1 << 24));
814 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = true;
815 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = true;
816 }
817 #endif
818
819 /* current = 1: continue on current pc, otherwise continue at <address> */
820 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
821 if (!current)
822 resume_pc = *address;
823 else
824 *address = resume_pc;
825
826 /* Make sure that the Armv7 gdb thumb fixups does not
827 * kill the return address
828 */
829 switch (arm->core_state) {
830 case ARM_STATE_ARM:
831 resume_pc &= 0xFFFFFFFC;
832 break;
833 case ARM_STATE_THUMB:
834 case ARM_STATE_THUMB_EE:
835 /* When the return address is loaded into PC
836 * bit 0 must be 1 to stay in Thumb state
837 */
838 resume_pc |= 0x1;
839 break;
840 case ARM_STATE_JAZELLE:
841 LOG_ERROR("How do I resume into Jazelle state??");
842 return ERROR_FAIL;
843 case ARM_STATE_AARCH64:
844 LOG_ERROR("Shoudn't be in AARCH64 state");
845 return ERROR_FAIL;
846 }
847 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
848 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
849 arm->pc->dirty = true;
850 arm->pc->valid = true;
851
852 /* restore dpm_mode at system halt */
853 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
854 /* called it now before restoring context because it uses cpu
855 * register r0 for restoring cp15 control register */
856 retval = cortex_a_restore_cp15_control_reg(target);
857 if (retval != ERROR_OK)
858 return retval;
859 retval = cortex_a_restore_context(target, handle_breakpoints);
860 if (retval != ERROR_OK)
861 return retval;
862 target->debug_reason = DBG_REASON_NOTHALTED;
863 target->state = TARGET_RUNNING;
864
865 /* registers are now invalid */
866 register_cache_invalidate(arm->core_cache);
867
868 #if 0
869 /* the front-end may request us not to handle breakpoints */
870 if (handle_breakpoints) {
871 /* Single step past breakpoint at current address */
872 breakpoint = breakpoint_find(target, resume_pc);
873 if (breakpoint) {
874 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
875 cortex_m3_unset_breakpoint(target, breakpoint);
876 cortex_m3_single_step_core(target);
877 cortex_m3_set_breakpoint(target, breakpoint);
878 }
879 }
880
881 #endif
882 return retval;
883 }
884
885 static int cortex_a_internal_restart(struct target *target)
886 {
887 struct armv7a_common *armv7a = target_to_armv7a(target);
888 struct arm *arm = &armv7a->arm;
889 int retval;
890 uint32_t dscr;
891 /*
892 * * Restart core and wait for it to be started. Clear ITRen and sticky
893 * * exception flags: see ARMv7 ARM, C5.9.
894 *
895 * REVISIT: for single stepping, we probably want to
896 * disable IRQs by default, with optional override...
897 */
898
899 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
900 armv7a->debug_base + CPUDBG_DSCR, &dscr);
901 if (retval != ERROR_OK)
902 return retval;
903
904 if ((dscr & DSCR_INSTR_COMP) == 0)
905 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
906
907 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
908 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
909 if (retval != ERROR_OK)
910 return retval;
911
912 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
913 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
914 DRCR_CLEAR_EXCEPTIONS);
915 if (retval != ERROR_OK)
916 return retval;
917
918 int64_t then = timeval_ms();
919 for (;; ) {
920 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
921 armv7a->debug_base + CPUDBG_DSCR, &dscr);
922 if (retval != ERROR_OK)
923 return retval;
924 if ((dscr & DSCR_CORE_RESTARTED) != 0)
925 break;
926 if (timeval_ms() > then + 1000) {
927 LOG_ERROR("Timeout waiting for resume");
928 return ERROR_FAIL;
929 }
930 }
931
932 target->debug_reason = DBG_REASON_NOTHALTED;
933 target->state = TARGET_RUNNING;
934
935 /* registers are now invalid */
936 register_cache_invalidate(arm->core_cache);
937
938 return ERROR_OK;
939 }
940
941 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
942 {
943 int retval = 0;
944 struct target_list *head;
945 struct target *curr;
946 target_addr_t address;
947 head = target->head;
948 while (head != (struct target_list *)NULL) {
949 curr = head->target;
950 if ((curr != target) && (curr->state != TARGET_RUNNING)
951 && target_was_examined(curr)) {
952 /* resume current address , not in step mode */
953 retval += cortex_a_internal_restore(curr, 1, &address,
954 handle_breakpoints, 0);
955 retval += cortex_a_internal_restart(curr);
956 }
957 head = head->next;
958
959 }
960 return retval;
961 }
962
963 static int cortex_a_resume(struct target *target, int current,
964 target_addr_t address, int handle_breakpoints, int debug_execution)
965 {
966 int retval = 0;
967 /* dummy resume for smp toggle in order to reduce gdb impact */
968 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
969 /* simulate a start and halt of target */
970 target->gdb_service->target = NULL;
971 target->gdb_service->core[0] = target->gdb_service->core[1];
972 /* fake resume at next poll we play the target core[1], see poll*/
973 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
974 return 0;
975 }
976 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
977 if (target->smp) {
978 target->gdb_service->core[0] = -1;
979 retval = cortex_a_restore_smp(target, handle_breakpoints);
980 if (retval != ERROR_OK)
981 return retval;
982 }
983 cortex_a_internal_restart(target);
984
985 if (!debug_execution) {
986 target->state = TARGET_RUNNING;
987 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
988 LOG_DEBUG("target resumed at " TARGET_ADDR_FMT, address);
989 } else {
990 target->state = TARGET_DEBUG_RUNNING;
991 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
992 LOG_DEBUG("target debug resumed at " TARGET_ADDR_FMT, address);
993 }
994
995 return ERROR_OK;
996 }
997
998 static int cortex_a_debug_entry(struct target *target)
999 {
1000 uint32_t dscr;
1001 int retval = ERROR_OK;
1002 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1003 struct armv7a_common *armv7a = target_to_armv7a(target);
1004 struct arm *arm = &armv7a->arm;
1005
1006 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1007
1008 /* REVISIT surely we should not re-read DSCR !! */
1009 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1010 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1011 if (retval != ERROR_OK)
1012 return retval;
1013
1014 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1015 * imprecise data aborts get discarded by issuing a Data
1016 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1017 */
1018
1019 /* Enable the ITR execution once we are in debug mode */
1020 dscr |= DSCR_ITR_EN;
1021 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1022 armv7a->debug_base + CPUDBG_DSCR, dscr);
1023 if (retval != ERROR_OK)
1024 return retval;
1025
1026 /* Examine debug reason */
1027 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1028
1029 /* save address of instruction that triggered the watchpoint? */
1030 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1031 uint32_t wfar;
1032
1033 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1034 armv7a->debug_base + CPUDBG_WFAR,
1035 &wfar);
1036 if (retval != ERROR_OK)
1037 return retval;
1038 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1039 }
1040
1041 /* First load register accessible through core debug port */
1042 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1043 if (retval != ERROR_OK)
1044 return retval;
1045
1046 if (arm->spsr) {
1047 /* read SPSR */
1048 retval = arm_dpm_read_reg(&armv7a->dpm, arm->spsr, 17);
1049 if (retval != ERROR_OK)
1050 return retval;
1051 }
1052
1053 #if 0
1054 /* TODO, Move this */
1055 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1056 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1057 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1058
1059 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1060 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1061
1062 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1063 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1064 #endif
1065
1066 /* Are we in an exception handler */
1067 /* armv4_5->exception_number = 0; */
1068 if (armv7a->post_debug_entry) {
1069 retval = armv7a->post_debug_entry(target);
1070 if (retval != ERROR_OK)
1071 return retval;
1072 }
1073
1074 return retval;
1075 }
1076
1077 static int cortex_a_post_debug_entry(struct target *target)
1078 {
1079 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1080 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1081 int retval;
1082
1083 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1084 retval = armv7a->arm.mrc(target, 15,
1085 0, 0, /* op1, op2 */
1086 1, 0, /* CRn, CRm */
1087 &cortex_a->cp15_control_reg);
1088 if (retval != ERROR_OK)
1089 return retval;
1090 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1091 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1092
1093 if (!armv7a->is_armv7r)
1094 armv7a_read_ttbcr(target);
1095
1096 if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1097 armv7a_identify_cache(target);
1098
1099 if (armv7a->is_armv7r) {
1100 armv7a->armv7a_mmu.mmu_enabled = 0;
1101 } else {
1102 armv7a->armv7a_mmu.mmu_enabled =
1103 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1104 }
1105 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1106 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1107 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1108 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1109 cortex_a->curr_mode = armv7a->arm.core_mode;
1110
1111 /* switch to SVC mode to read DACR */
1112 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1113 armv7a->arm.mrc(target, 15,
1114 0, 0, 3, 0,
1115 &cortex_a->cp15_dacr_reg);
1116
1117 LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1118 cortex_a->cp15_dacr_reg);
1119
1120 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1121 return ERROR_OK;
1122 }
1123
1124 int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1125 {
1126 struct armv7a_common *armv7a = target_to_armv7a(target);
1127 uint32_t dscr;
1128
1129 /* Read DSCR */
1130 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1131 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1132 if (ERROR_OK != retval)
1133 return retval;
1134
1135 /* clear bitfield */
1136 dscr &= ~bit_mask;
1137 /* put new value */
1138 dscr |= value & bit_mask;
1139
1140 /* write new DSCR */
1141 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1142 armv7a->debug_base + CPUDBG_DSCR, dscr);
1143 return retval;
1144 }
1145
1146 static int cortex_a_step(struct target *target, int current, target_addr_t address,
1147 int handle_breakpoints)
1148 {
1149 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1150 struct armv7a_common *armv7a = target_to_armv7a(target);
1151 struct arm *arm = &armv7a->arm;
1152 struct breakpoint *breakpoint = NULL;
1153 struct breakpoint stepbreakpoint;
1154 struct reg *r;
1155 int retval;
1156
1157 if (target->state != TARGET_HALTED) {
1158 LOG_WARNING("target not halted");
1159 return ERROR_TARGET_NOT_HALTED;
1160 }
1161
1162 /* current = 1: continue on current pc, otherwise continue at <address> */
1163 r = arm->pc;
1164 if (!current)
1165 buf_set_u32(r->value, 0, 32, address);
1166 else
1167 address = buf_get_u32(r->value, 0, 32);
1168
1169 /* The front-end may request us not to handle breakpoints.
1170 * But since Cortex-A uses breakpoint for single step,
1171 * we MUST handle breakpoints.
1172 */
1173 handle_breakpoints = 1;
1174 if (handle_breakpoints) {
1175 breakpoint = breakpoint_find(target, address);
1176 if (breakpoint)
1177 cortex_a_unset_breakpoint(target, breakpoint);
1178 }
1179
1180 /* Setup single step breakpoint */
1181 stepbreakpoint.address = address;
1182 stepbreakpoint.asid = 0;
1183 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1184 ? 2 : 4;
1185 stepbreakpoint.type = BKPT_HARD;
1186 stepbreakpoint.set = 0;
1187
1188 /* Disable interrupts during single step if requested */
1189 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1190 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1191 if (ERROR_OK != retval)
1192 return retval;
1193 }
1194
1195 /* Break on IVA mismatch */
1196 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1197
1198 target->debug_reason = DBG_REASON_SINGLESTEP;
1199
1200 retval = cortex_a_resume(target, 1, address, 0, 0);
1201 if (retval != ERROR_OK)
1202 return retval;
1203
1204 int64_t then = timeval_ms();
1205 while (target->state != TARGET_HALTED) {
1206 retval = cortex_a_poll(target);
1207 if (retval != ERROR_OK)
1208 return retval;
1209 if (timeval_ms() > then + 1000) {
1210 LOG_ERROR("timeout waiting for target halt");
1211 return ERROR_FAIL;
1212 }
1213 }
1214
1215 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1216
1217 /* Re-enable interrupts if they were disabled */
1218 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1219 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1220 if (ERROR_OK != retval)
1221 return retval;
1222 }
1223
1224
1225 target->debug_reason = DBG_REASON_BREAKPOINT;
1226
1227 if (breakpoint)
1228 cortex_a_set_breakpoint(target, breakpoint, 0);
1229
1230 if (target->state != TARGET_HALTED)
1231 LOG_DEBUG("target stepped");
1232
1233 return ERROR_OK;
1234 }
1235
1236 static int cortex_a_restore_context(struct target *target, bool bpwp)
1237 {
1238 struct armv7a_common *armv7a = target_to_armv7a(target);
1239
1240 LOG_DEBUG(" ");
1241
1242 if (armv7a->pre_restore_context)
1243 armv7a->pre_restore_context(target);
1244
1245 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1246 }
1247
1248 /*
1249 * Cortex-A Breakpoint and watchpoint functions
1250 */
1251
1252 /* Setup hardware Breakpoint Register Pair */
1253 static int cortex_a_set_breakpoint(struct target *target,
1254 struct breakpoint *breakpoint, uint8_t matchmode)
1255 {
1256 int retval;
1257 int brp_i = 0;
1258 uint32_t control;
1259 uint8_t byte_addr_select = 0x0F;
1260 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1261 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1262 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1263
1264 if (breakpoint->set) {
1265 LOG_WARNING("breakpoint already set");
1266 return ERROR_OK;
1267 }
1268
1269 if (breakpoint->type == BKPT_HARD) {
1270 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1271 brp_i++;
1272 if (brp_i >= cortex_a->brp_num) {
1273 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1274 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1275 }
1276 breakpoint->set = brp_i + 1;
1277 if (breakpoint->length == 2)
1278 byte_addr_select = (3 << (breakpoint->address & 0x02));
1279 control = ((matchmode & 0x7) << 20)
1280 | (byte_addr_select << 5)
1281 | (3 << 1) | 1;
1282 brp_list[brp_i].used = 1;
1283 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1284 brp_list[brp_i].control = control;
1285 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1286 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1287 brp_list[brp_i].value);
1288 if (retval != ERROR_OK)
1289 return retval;
1290 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1291 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1292 brp_list[brp_i].control);
1293 if (retval != ERROR_OK)
1294 return retval;
1295 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1296 brp_list[brp_i].control,
1297 brp_list[brp_i].value);
1298 } else if (breakpoint->type == BKPT_SOFT) {
1299 uint8_t code[4];
1300 /* length == 2: Thumb breakpoint */
1301 if (breakpoint->length == 2)
1302 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1303 else
1304 /* length == 3: Thumb-2 breakpoint, actual encoding is
1305 * a regular Thumb BKPT instruction but we replace a
1306 * 32bit Thumb-2 instruction, so fix-up the breakpoint
1307 * length
1308 */
1309 if (breakpoint->length == 3) {
1310 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1311 breakpoint->length = 4;
1312 } else
1313 /* length == 4, normal ARM breakpoint */
1314 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1315
1316 retval = target_read_memory(target,
1317 breakpoint->address & 0xFFFFFFFE,
1318 breakpoint->length, 1,
1319 breakpoint->orig_instr);
1320 if (retval != ERROR_OK)
1321 return retval;
1322
1323 /* make sure data cache is cleaned & invalidated down to PoC */
1324 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1325 armv7a_cache_flush_virt(target, breakpoint->address,
1326 breakpoint->length);
1327 }
1328
1329 retval = target_write_memory(target,
1330 breakpoint->address & 0xFFFFFFFE,
1331 breakpoint->length, 1, code);
1332 if (retval != ERROR_OK)
1333 return retval;
1334
1335 /* update i-cache at breakpoint location */
1336 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1337 breakpoint->length);
1338 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1339 breakpoint->length);
1340
1341 breakpoint->set = 0x11; /* Any nice value but 0 */
1342 }
1343
1344 return ERROR_OK;
1345 }
1346
1347 static int cortex_a_set_context_breakpoint(struct target *target,
1348 struct breakpoint *breakpoint, uint8_t matchmode)
1349 {
1350 int retval = ERROR_FAIL;
1351 int brp_i = 0;
1352 uint32_t control;
1353 uint8_t byte_addr_select = 0x0F;
1354 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1355 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1356 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1357
1358 if (breakpoint->set) {
1359 LOG_WARNING("breakpoint already set");
1360 return retval;
1361 }
1362 /*check available context BRPs*/
1363 while ((brp_list[brp_i].used ||
1364 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1365 brp_i++;
1366
1367 if (brp_i >= cortex_a->brp_num) {
1368 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1369 return ERROR_FAIL;
1370 }
1371
1372 breakpoint->set = brp_i + 1;
1373 control = ((matchmode & 0x7) << 20)
1374 | (byte_addr_select << 5)
1375 | (3 << 1) | 1;
1376 brp_list[brp_i].used = 1;
1377 brp_list[brp_i].value = (breakpoint->asid);
1378 brp_list[brp_i].control = control;
1379 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1380 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1381 brp_list[brp_i].value);
1382 if (retval != ERROR_OK)
1383 return retval;
1384 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1385 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1386 brp_list[brp_i].control);
1387 if (retval != ERROR_OK)
1388 return retval;
1389 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1390 brp_list[brp_i].control,
1391 brp_list[brp_i].value);
1392 return ERROR_OK;
1393
1394 }
1395
1396 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1397 {
1398 int retval = ERROR_FAIL;
1399 int brp_1 = 0; /* holds the contextID pair */
1400 int brp_2 = 0; /* holds the IVA pair */
1401 uint32_t control_CTX, control_IVA;
1402 uint8_t CTX_byte_addr_select = 0x0F;
1403 uint8_t IVA_byte_addr_select = 0x0F;
1404 uint8_t CTX_machmode = 0x03;
1405 uint8_t IVA_machmode = 0x01;
1406 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1407 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1408 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1409
1410 if (breakpoint->set) {
1411 LOG_WARNING("breakpoint already set");
1412 return retval;
1413 }
1414 /*check available context BRPs*/
1415 while ((brp_list[brp_1].used ||
1416 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1417 brp_1++;
1418
1419 printf("brp(CTX) found num: %d\n", brp_1);
1420 if (brp_1 >= cortex_a->brp_num) {
1421 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1422 return ERROR_FAIL;
1423 }
1424
1425 while ((brp_list[brp_2].used ||
1426 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1427 brp_2++;
1428
1429 printf("brp(IVA) found num: %d\n", brp_2);
1430 if (brp_2 >= cortex_a->brp_num) {
1431 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1432 return ERROR_FAIL;
1433 }
1434
1435 breakpoint->set = brp_1 + 1;
1436 breakpoint->linked_BRP = brp_2;
1437 control_CTX = ((CTX_machmode & 0x7) << 20)
1438 | (brp_2 << 16)
1439 | (0 << 14)
1440 | (CTX_byte_addr_select << 5)
1441 | (3 << 1) | 1;
1442 brp_list[brp_1].used = 1;
1443 brp_list[brp_1].value = (breakpoint->asid);
1444 brp_list[brp_1].control = control_CTX;
1445 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1446 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1447 brp_list[brp_1].value);
1448 if (retval != ERROR_OK)
1449 return retval;
1450 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1451 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1452 brp_list[brp_1].control);
1453 if (retval != ERROR_OK)
1454 return retval;
1455
1456 control_IVA = ((IVA_machmode & 0x7) << 20)
1457 | (brp_1 << 16)
1458 | (IVA_byte_addr_select << 5)
1459 | (3 << 1) | 1;
1460 brp_list[brp_2].used = 1;
1461 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1462 brp_list[brp_2].control = control_IVA;
1463 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1464 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1465 brp_list[brp_2].value);
1466 if (retval != ERROR_OK)
1467 return retval;
1468 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1469 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1470 brp_list[brp_2].control);
1471 if (retval != ERROR_OK)
1472 return retval;
1473
1474 return ERROR_OK;
1475 }
1476
1477 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1478 {
1479 int retval;
1480 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1481 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1482 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1483
1484 if (!breakpoint->set) {
1485 LOG_WARNING("breakpoint not set");
1486 return ERROR_OK;
1487 }
1488
1489 if (breakpoint->type == BKPT_HARD) {
1490 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1491 int brp_i = breakpoint->set - 1;
1492 int brp_j = breakpoint->linked_BRP;
1493 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1494 LOG_DEBUG("Invalid BRP number in breakpoint");
1495 return ERROR_OK;
1496 }
1497 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1498 brp_list[brp_i].control, brp_list[brp_i].value);
1499 brp_list[brp_i].used = 0;
1500 brp_list[brp_i].value = 0;
1501 brp_list[brp_i].control = 0;
1502 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1503 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1504 brp_list[brp_i].control);
1505 if (retval != ERROR_OK)
1506 return retval;
1507 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1508 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1509 brp_list[brp_i].value);
1510 if (retval != ERROR_OK)
1511 return retval;
1512 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1513 LOG_DEBUG("Invalid BRP number in breakpoint");
1514 return ERROR_OK;
1515 }
1516 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1517 brp_list[brp_j].control, brp_list[brp_j].value);
1518 brp_list[brp_j].used = 0;
1519 brp_list[brp_j].value = 0;
1520 brp_list[brp_j].control = 0;
1521 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1522 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1523 brp_list[brp_j].control);
1524 if (retval != ERROR_OK)
1525 return retval;
1526 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1527 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1528 brp_list[brp_j].value);
1529 if (retval != ERROR_OK)
1530 return retval;
1531 breakpoint->linked_BRP = 0;
1532 breakpoint->set = 0;
1533 return ERROR_OK;
1534
1535 } else {
1536 int brp_i = breakpoint->set - 1;
1537 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1538 LOG_DEBUG("Invalid BRP number in breakpoint");
1539 return ERROR_OK;
1540 }
1541 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1542 brp_list[brp_i].control, brp_list[brp_i].value);
1543 brp_list[brp_i].used = 0;
1544 brp_list[brp_i].value = 0;
1545 brp_list[brp_i].control = 0;
1546 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1547 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1548 brp_list[brp_i].control);
1549 if (retval != ERROR_OK)
1550 return retval;
1551 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1552 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1553 brp_list[brp_i].value);
1554 if (retval != ERROR_OK)
1555 return retval;
1556 breakpoint->set = 0;
1557 return ERROR_OK;
1558 }
1559 } else {
1560
1561 /* make sure data cache is cleaned & invalidated down to PoC */
1562 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1563 armv7a_cache_flush_virt(target, breakpoint->address,
1564 breakpoint->length);
1565 }
1566
1567 /* restore original instruction (kept in target endianness) */
1568 if (breakpoint->length == 4) {
1569 retval = target_write_memory(target,
1570 breakpoint->address & 0xFFFFFFFE,
1571 4, 1, breakpoint->orig_instr);
1572 if (retval != ERROR_OK)
1573 return retval;
1574 } else {
1575 retval = target_write_memory(target,
1576 breakpoint->address & 0xFFFFFFFE,
1577 2, 1, breakpoint->orig_instr);
1578 if (retval != ERROR_OK)
1579 return retval;
1580 }
1581
1582 /* update i-cache at breakpoint location */
1583 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1584 breakpoint->length);
1585 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1586 breakpoint->length);
1587 }
1588 breakpoint->set = 0;
1589
1590 return ERROR_OK;
1591 }
1592
1593 static int cortex_a_add_breakpoint(struct target *target,
1594 struct breakpoint *breakpoint)
1595 {
1596 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1597
1598 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1599 LOG_INFO("no hardware breakpoint available");
1600 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1601 }
1602
1603 if (breakpoint->type == BKPT_HARD)
1604 cortex_a->brp_num_available--;
1605
1606 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1607 }
1608
1609 static int cortex_a_add_context_breakpoint(struct target *target,
1610 struct breakpoint *breakpoint)
1611 {
1612 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1613
1614 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1615 LOG_INFO("no hardware breakpoint available");
1616 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1617 }
1618
1619 if (breakpoint->type == BKPT_HARD)
1620 cortex_a->brp_num_available--;
1621
1622 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1623 }
1624
1625 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1626 struct breakpoint *breakpoint)
1627 {
1628 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1629
1630 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1631 LOG_INFO("no hardware breakpoint available");
1632 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1633 }
1634
1635 if (breakpoint->type == BKPT_HARD)
1636 cortex_a->brp_num_available--;
1637
1638 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1639 }
1640
1641
1642 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1643 {
1644 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1645
1646 #if 0
1647 /* It is perfectly possible to remove breakpoints while the target is running */
1648 if (target->state != TARGET_HALTED) {
1649 LOG_WARNING("target not halted");
1650 return ERROR_TARGET_NOT_HALTED;
1651 }
1652 #endif
1653
1654 if (breakpoint->set) {
1655 cortex_a_unset_breakpoint(target, breakpoint);
1656 if (breakpoint->type == BKPT_HARD)
1657 cortex_a->brp_num_available++;
1658 }
1659
1660
1661 return ERROR_OK;
1662 }
1663
1664 /*
1665 * Cortex-A Reset functions
1666 */
1667
1668 static int cortex_a_assert_reset(struct target *target)
1669 {
1670 struct armv7a_common *armv7a = target_to_armv7a(target);
1671
1672 LOG_DEBUG(" ");
1673
1674 /* FIXME when halt is requested, make it work somehow... */
1675
1676 /* This function can be called in "target not examined" state */
1677
1678 /* Issue some kind of warm reset. */
1679 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1680 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1681 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1682 /* REVISIT handle "pulls" cases, if there's
1683 * hardware that needs them to work.
1684 */
1685
1686 /*
1687 * FIXME: fix reset when transport is SWD. This is a temporary
1688 * work-around for release v0.10 that is not intended to stay!
1689 */
1690 if (transport_is_swd() ||
1691 (target->reset_halt && (jtag_get_reset_config() & RESET_SRST_NO_GATING)))
1692 jtag_add_reset(0, 1);
1693
1694 } else {
1695 LOG_ERROR("%s: how to reset?", target_name(target));
1696 return ERROR_FAIL;
1697 }
1698
1699 /* registers are now invalid */
1700 if (target_was_examined(target))
1701 register_cache_invalidate(armv7a->arm.core_cache);
1702
1703 target->state = TARGET_RESET;
1704
1705 return ERROR_OK;
1706 }
1707
1708 static int cortex_a_deassert_reset(struct target *target)
1709 {
1710 int retval;
1711
1712 LOG_DEBUG(" ");
1713
1714 /* be certain SRST is off */
1715 jtag_add_reset(0, 0);
1716
1717 if (target_was_examined(target)) {
1718 retval = cortex_a_poll(target);
1719 if (retval != ERROR_OK)
1720 return retval;
1721 }
1722
1723 if (target->reset_halt) {
1724 if (target->state != TARGET_HALTED) {
1725 LOG_WARNING("%s: ran after reset and before halt ...",
1726 target_name(target));
1727 if (target_was_examined(target)) {
1728 retval = target_halt(target);
1729 if (retval != ERROR_OK)
1730 return retval;
1731 } else
1732 target->state = TARGET_UNKNOWN;
1733 }
1734 }
1735
1736 return ERROR_OK;
1737 }
1738
1739 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1740 {
1741 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1742 * New desired mode must be in mode. Current value of DSCR must be in
1743 * *dscr, which is updated with new value.
1744 *
1745 * This function elides actually sending the mode-change over the debug
1746 * interface if the mode is already set as desired.
1747 */
1748 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1749 if (new_dscr != *dscr) {
1750 struct armv7a_common *armv7a = target_to_armv7a(target);
1751 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1752 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1753 if (retval == ERROR_OK)
1754 *dscr = new_dscr;
1755 return retval;
1756 } else {
1757 return ERROR_OK;
1758 }
1759 }
1760
1761 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1762 uint32_t value, uint32_t *dscr)
1763 {
1764 /* Waits until the specified bit(s) of DSCR take on a specified value. */
1765 struct armv7a_common *armv7a = target_to_armv7a(target);
1766 int64_t then = timeval_ms();
1767 int retval;
1768
1769 while ((*dscr & mask) != value) {
1770 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1771 armv7a->debug_base + CPUDBG_DSCR, dscr);
1772 if (retval != ERROR_OK)
1773 return retval;
1774 if (timeval_ms() > then + 1000) {
1775 LOG_ERROR("timeout waiting for DSCR bit change");
1776 return ERROR_FAIL;
1777 }
1778 }
1779 return ERROR_OK;
1780 }
1781
1782 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1783 uint32_t *data, uint32_t *dscr)
1784 {
1785 int retval;
1786 struct armv7a_common *armv7a = target_to_armv7a(target);
1787
1788 /* Move from coprocessor to R0. */
1789 retval = cortex_a_exec_opcode(target, opcode, dscr);
1790 if (retval != ERROR_OK)
1791 return retval;
1792
1793 /* Move from R0 to DTRTX. */
1794 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
1795 if (retval != ERROR_OK)
1796 return retval;
1797
1798 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
1799 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1800 * must also check TXfull_l). Most of the time this will be free
1801 * because TXfull_l will be set immediately and cached in dscr. */
1802 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
1803 DSCR_DTRTX_FULL_LATCHED, dscr);
1804 if (retval != ERROR_OK)
1805 return retval;
1806
1807 /* Read the value transferred to DTRTX. */
1808 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1809 armv7a->debug_base + CPUDBG_DTRTX, data);
1810 if (retval != ERROR_OK)
1811 return retval;
1812
1813 return ERROR_OK;
1814 }
1815
1816 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
1817 uint32_t *dfsr, uint32_t *dscr)
1818 {
1819 int retval;
1820
1821 if (dfar) {
1822 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
1823 if (retval != ERROR_OK)
1824 return retval;
1825 }
1826
1827 if (dfsr) {
1828 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
1829 if (retval != ERROR_OK)
1830 return retval;
1831 }
1832
1833 return ERROR_OK;
1834 }
1835
1836 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
1837 uint32_t data, uint32_t *dscr)
1838 {
1839 int retval;
1840 struct armv7a_common *armv7a = target_to_armv7a(target);
1841
1842 /* Write the value into DTRRX. */
1843 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1844 armv7a->debug_base + CPUDBG_DTRRX, data);
1845 if (retval != ERROR_OK)
1846 return retval;
1847
1848 /* Move from DTRRX to R0. */
1849 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
1850 if (retval != ERROR_OK)
1851 return retval;
1852
1853 /* Move from R0 to coprocessor. */
1854 retval = cortex_a_exec_opcode(target, opcode, dscr);
1855 if (retval != ERROR_OK)
1856 return retval;
1857
1858 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
1859 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
1860 * check RXfull_l). Most of the time this will be free because RXfull_l
1861 * will be cleared immediately and cached in dscr. */
1862 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
1863 if (retval != ERROR_OK)
1864 return retval;
1865
1866 return ERROR_OK;
1867 }
1868
1869 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
1870 uint32_t dfsr, uint32_t *dscr)
1871 {
1872 int retval;
1873
1874 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
1875 if (retval != ERROR_OK)
1876 return retval;
1877
1878 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
1879 if (retval != ERROR_OK)
1880 return retval;
1881
1882 return ERROR_OK;
1883 }
1884
1885 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
1886 {
1887 uint32_t status, upper4;
1888
1889 if (dfsr & (1 << 9)) {
1890 /* LPAE format. */
1891 status = dfsr & 0x3f;
1892 upper4 = status >> 2;
1893 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
1894 return ERROR_TARGET_TRANSLATION_FAULT;
1895 else if (status == 33)
1896 return ERROR_TARGET_UNALIGNED_ACCESS;
1897 else
1898 return ERROR_TARGET_DATA_ABORT;
1899 } else {
1900 /* Normal format. */
1901 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
1902 if (status == 1)
1903 return ERROR_TARGET_UNALIGNED_ACCESS;
1904 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
1905 status == 9 || status == 11 || status == 13 || status == 15)
1906 return ERROR_TARGET_TRANSLATION_FAULT;
1907 else
1908 return ERROR_TARGET_DATA_ABORT;
1909 }
1910 }
1911
1912 static int cortex_a_write_cpu_memory_slow(struct target *target,
1913 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1914 {
1915 /* Writes count objects of size size from *buffer. Old value of DSCR must
1916 * be in *dscr; updated to new value. This is slow because it works for
1917 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
1918 * the address is aligned, cortex_a_write_cpu_memory_fast should be
1919 * preferred.
1920 * Preconditions:
1921 * - Address is in R0.
1922 * - R0 is marked dirty.
1923 */
1924 struct armv7a_common *armv7a = target_to_armv7a(target);
1925 struct arm *arm = &armv7a->arm;
1926 int retval;
1927
1928 /* Mark register R1 as dirty, to use for transferring data. */
1929 arm_reg_current(arm, 1)->dirty = true;
1930
1931 /* Switch to non-blocking mode if not already in that mode. */
1932 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
1933 if (retval != ERROR_OK)
1934 return retval;
1935
1936 /* Go through the objects. */
1937 while (count) {
1938 /* Write the value to store into DTRRX. */
1939 uint32_t data, opcode;
1940 if (size == 1)
1941 data = *buffer;
1942 else if (size == 2)
1943 data = target_buffer_get_u16(target, buffer);
1944 else
1945 data = target_buffer_get_u32(target, buffer);
1946 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1947 armv7a->debug_base + CPUDBG_DTRRX, data);
1948 if (retval != ERROR_OK)
1949 return retval;
1950
1951 /* Transfer the value from DTRRX to R1. */
1952 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
1953 if (retval != ERROR_OK)
1954 return retval;
1955
1956 /* Write the value transferred to R1 into memory. */
1957 if (size == 1)
1958 opcode = ARMV4_5_STRB_IP(1, 0);
1959 else if (size == 2)
1960 opcode = ARMV4_5_STRH_IP(1, 0);
1961 else
1962 opcode = ARMV4_5_STRW_IP(1, 0);
1963 retval = cortex_a_exec_opcode(target, opcode, dscr);
1964 if (retval != ERROR_OK)
1965 return retval;
1966
1967 /* Check for faults and return early. */
1968 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
1969 return ERROR_OK; /* A data fault is not considered a system failure. */
1970
1971 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
1972 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1973 * must also check RXfull_l). Most of the time this will be free
1974 * because RXfull_l will be cleared immediately and cached in dscr. */
1975 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
1976 if (retval != ERROR_OK)
1977 return retval;
1978
1979 /* Advance. */
1980 buffer += size;
1981 --count;
1982 }
1983
1984 return ERROR_OK;
1985 }
1986
1987 static int cortex_a_write_cpu_memory_fast(struct target *target,
1988 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1989 {
1990 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
1991 * in *dscr; updated to new value. This is fast but only works for
1992 * word-sized objects at aligned addresses.
1993 * Preconditions:
1994 * - Address is in R0 and must be a multiple of 4.
1995 * - R0 is marked dirty.
1996 */
1997 struct armv7a_common *armv7a = target_to_armv7a(target);
1998 int retval;
1999
2000 /* Switch to fast mode if not already in that mode. */
2001 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2002 if (retval != ERROR_OK)
2003 return retval;
2004
2005 /* Latch STC instruction. */
2006 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2007 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2008 if (retval != ERROR_OK)
2009 return retval;
2010
2011 /* Transfer all the data and issue all the instructions. */
2012 return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2013 4, count, armv7a->debug_base + CPUDBG_DTRRX);
2014 }
2015
2016 static int cortex_a_write_cpu_memory(struct target *target,
2017 uint32_t address, uint32_t size,
2018 uint32_t count, const uint8_t *buffer)
2019 {
2020 /* Write memory through the CPU. */
2021 int retval, final_retval;
2022 struct armv7a_common *armv7a = target_to_armv7a(target);
2023 struct arm *arm = &armv7a->arm;
2024 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2025
2026 LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2027 address, size, count);
2028 if (target->state != TARGET_HALTED) {
2029 LOG_WARNING("target not halted");
2030 return ERROR_TARGET_NOT_HALTED;
2031 }
2032
2033 if (!count)
2034 return ERROR_OK;
2035
2036 /* Clear any abort. */
2037 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2038 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2039 if (retval != ERROR_OK)
2040 return retval;
2041
2042 /* Read DSCR. */
2043 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2044 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2045 if (retval != ERROR_OK)
2046 return retval;
2047
2048 /* Switch to non-blocking mode if not already in that mode. */
2049 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2050 if (retval != ERROR_OK)
2051 goto out;
2052
2053 /* Mark R0 as dirty. */
2054 arm_reg_current(arm, 0)->dirty = true;
2055
2056 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2057 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2058 if (retval != ERROR_OK)
2059 goto out;
2060
2061 /* Get the memory address into R0. */
2062 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2063 armv7a->debug_base + CPUDBG_DTRRX, address);
2064 if (retval != ERROR_OK)
2065 goto out;
2066 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2067 if (retval != ERROR_OK)
2068 goto out;
2069
2070 if (size == 4 && (address % 4) == 0) {
2071 /* We are doing a word-aligned transfer, so use fast mode. */
2072 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2073 } else {
2074 /* Use slow path. */
2075 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2076 }
2077
2078 out:
2079 final_retval = retval;
2080
2081 /* Switch to non-blocking mode if not already in that mode. */
2082 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2083 if (final_retval == ERROR_OK)
2084 final_retval = retval;
2085
2086 /* Wait for last issued instruction to complete. */
2087 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2088 if (final_retval == ERROR_OK)
2089 final_retval = retval;
2090
2091 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2092 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2093 * check RXfull_l). Most of the time this will be free because RXfull_l
2094 * will be cleared immediately and cached in dscr. However, don't do this
2095 * if there is fault, because then the instruction might not have completed
2096 * successfully. */
2097 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2098 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2099 if (retval != ERROR_OK)
2100 return retval;
2101 }
2102
2103 /* If there were any sticky abort flags, clear them. */
2104 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2105 fault_dscr = dscr;
2106 mem_ap_write_atomic_u32(armv7a->debug_ap,
2107 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2108 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2109 } else {
2110 fault_dscr = 0;
2111 }
2112
2113 /* Handle synchronous data faults. */
2114 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2115 if (final_retval == ERROR_OK) {
2116 /* Final return value will reflect cause of fault. */
2117 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2118 if (retval == ERROR_OK) {
2119 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2120 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2121 } else
2122 final_retval = retval;
2123 }
2124 /* Fault destroyed DFAR/DFSR; restore them. */
2125 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2126 if (retval != ERROR_OK)
2127 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2128 }
2129
2130 /* Handle asynchronous data faults. */
2131 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2132 if (final_retval == ERROR_OK)
2133 /* No other error has been recorded so far, so keep this one. */
2134 final_retval = ERROR_TARGET_DATA_ABORT;
2135 }
2136
2137 /* If the DCC is nonempty, clear it. */
2138 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2139 uint32_t dummy;
2140 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2141 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2142 if (final_retval == ERROR_OK)
2143 final_retval = retval;
2144 }
2145 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2146 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2147 if (final_retval == ERROR_OK)
2148 final_retval = retval;
2149 }
2150
2151 /* Done. */
2152 return final_retval;
2153 }
2154
2155 static int cortex_a_read_cpu_memory_slow(struct target *target,
2156 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2157 {
2158 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2159 * in *dscr; updated to new value. This is slow because it works for
2160 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2161 * the address is aligned, cortex_a_read_cpu_memory_fast should be
2162 * preferred.
2163 * Preconditions:
2164 * - Address is in R0.
2165 * - R0 is marked dirty.
2166 */
2167 struct armv7a_common *armv7a = target_to_armv7a(target);
2168 struct arm *arm = &armv7a->arm;
2169 int retval;
2170
2171 /* Mark register R1 as dirty, to use for transferring data. */
2172 arm_reg_current(arm, 1)->dirty = true;
2173
2174 /* Switch to non-blocking mode if not already in that mode. */
2175 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2176 if (retval != ERROR_OK)
2177 return retval;
2178
2179 /* Go through the objects. */
2180 while (count) {
2181 /* Issue a load of the appropriate size to R1. */
2182 uint32_t opcode, data;
2183 if (size == 1)
2184 opcode = ARMV4_5_LDRB_IP(1, 0);
2185 else if (size == 2)
2186 opcode = ARMV4_5_LDRH_IP(1, 0);
2187 else
2188 opcode = ARMV4_5_LDRW_IP(1, 0);
2189 retval = cortex_a_exec_opcode(target, opcode, dscr);
2190 if (retval != ERROR_OK)
2191 return retval;
2192
2193 /* Issue a write of R1 to DTRTX. */
2194 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2195 if (retval != ERROR_OK)
2196 return retval;
2197
2198 /* Check for faults and return early. */
2199 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2200 return ERROR_OK; /* A data fault is not considered a system failure. */
2201
2202 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2203 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2204 * must also check TXfull_l). Most of the time this will be free
2205 * because TXfull_l will be set immediately and cached in dscr. */
2206 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2207 DSCR_DTRTX_FULL_LATCHED, dscr);
2208 if (retval != ERROR_OK)
2209 return retval;
2210
2211 /* Read the value transferred to DTRTX into the buffer. */
2212 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2213 armv7a->debug_base + CPUDBG_DTRTX, &data);
2214 if (retval != ERROR_OK)
2215 return retval;
2216 if (size == 1)
2217 *buffer = (uint8_t) data;
2218 else if (size == 2)
2219 target_buffer_set_u16(target, buffer, (uint16_t) data);
2220 else
2221 target_buffer_set_u32(target, buffer, data);
2222
2223 /* Advance. */
2224 buffer += size;
2225 --count;
2226 }
2227
2228 return ERROR_OK;
2229 }
2230
2231 static int cortex_a_read_cpu_memory_fast(struct target *target,
2232 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2233 {
2234 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2235 * *dscr; updated to new value. This is fast but only works for word-sized
2236 * objects at aligned addresses.
2237 * Preconditions:
2238 * - Address is in R0 and must be a multiple of 4.
2239 * - R0 is marked dirty.
2240 */
2241 struct armv7a_common *armv7a = target_to_armv7a(target);
2242 uint32_t u32;
2243 int retval;
2244
2245 /* Switch to non-blocking mode if not already in that mode. */
2246 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2247 if (retval != ERROR_OK)
2248 return retval;
2249
2250 /* Issue the LDC instruction via a write to ITR. */
2251 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2252 if (retval != ERROR_OK)
2253 return retval;
2254
2255 count--;
2256
2257 if (count > 0) {
2258 /* Switch to fast mode if not already in that mode. */
2259 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2260 if (retval != ERROR_OK)
2261 return retval;
2262
2263 /* Latch LDC instruction. */
2264 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2265 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2266 if (retval != ERROR_OK)
2267 return retval;
2268
2269 /* Read the value transferred to DTRTX into the buffer. Due to fast
2270 * mode rules, this blocks until the instruction finishes executing and
2271 * then reissues the read instruction to read the next word from
2272 * memory. The last read of DTRTX in this call reads the second-to-last
2273 * word from memory and issues the read instruction for the last word.
2274 */
2275 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2276 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2277 if (retval != ERROR_OK)
2278 return retval;
2279
2280 /* Advance. */
2281 buffer += count * 4;
2282 }
2283
2284 /* Wait for last issued instruction to complete. */
2285 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2286 if (retval != ERROR_OK)
2287 return retval;
2288
2289 /* Switch to non-blocking mode if not already in that mode. */
2290 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2291 if (retval != ERROR_OK)
2292 return retval;
2293
2294 /* Check for faults and return early. */
2295 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2296 return ERROR_OK; /* A data fault is not considered a system failure. */
2297
2298 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2299 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2300 * check TXfull_l). Most of the time this will be free because TXfull_l
2301 * will be set immediately and cached in dscr. */
2302 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2303 DSCR_DTRTX_FULL_LATCHED, dscr);
2304 if (retval != ERROR_OK)
2305 return retval;
2306
2307 /* Read the value transferred to DTRTX into the buffer. This is the last
2308 * word. */
2309 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2310 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2311 if (retval != ERROR_OK)
2312 return retval;
2313 target_buffer_set_u32(target, buffer, u32);
2314
2315 return ERROR_OK;
2316 }
2317
2318 static int cortex_a_read_cpu_memory(struct target *target,
2319 uint32_t address, uint32_t size,
2320 uint32_t count, uint8_t *buffer)
2321 {
2322 /* Read memory through the CPU. */
2323 int retval, final_retval;
2324 struct armv7a_common *armv7a = target_to_armv7a(target);
2325 struct arm *arm = &armv7a->arm;
2326 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2327
2328 LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2329 address, size, count);
2330 if (target->state != TARGET_HALTED) {
2331 LOG_WARNING("target not halted");
2332 return ERROR_TARGET_NOT_HALTED;
2333 }
2334
2335 if (!count)
2336 return ERROR_OK;
2337
2338 /* Clear any abort. */
2339 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2340 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2341 if (retval != ERROR_OK)
2342 return retval;
2343
2344 /* Read DSCR */
2345 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2346 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2347 if (retval != ERROR_OK)
2348 return retval;
2349
2350 /* Switch to non-blocking mode if not already in that mode. */
2351 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2352 if (retval != ERROR_OK)
2353 goto out;
2354
2355 /* Mark R0 as dirty. */
2356 arm_reg_current(arm, 0)->dirty = true;
2357
2358 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2359 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2360 if (retval != ERROR_OK)
2361 goto out;
2362
2363 /* Get the memory address into R0. */
2364 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2365 armv7a->debug_base + CPUDBG_DTRRX, address);
2366 if (retval != ERROR_OK)
2367 goto out;
2368 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2369 if (retval != ERROR_OK)
2370 goto out;
2371
2372 if (size == 4 && (address % 4) == 0) {
2373 /* We are doing a word-aligned transfer, so use fast mode. */
2374 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2375 } else {
2376 /* Use slow path. */
2377 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2378 }
2379
2380 out:
2381 final_retval = retval;
2382
2383 /* Switch to non-blocking mode if not already in that mode. */
2384 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2385 if (final_retval == ERROR_OK)
2386 final_retval = retval;
2387
2388 /* Wait for last issued instruction to complete. */
2389 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2390 if (final_retval == ERROR_OK)
2391 final_retval = retval;
2392
2393 /* If there were any sticky abort flags, clear them. */
2394 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2395 fault_dscr = dscr;
2396 mem_ap_write_atomic_u32(armv7a->debug_ap,
2397 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2398 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2399 } else {
2400 fault_dscr = 0;
2401 }
2402
2403 /* Handle synchronous data faults. */
2404 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2405 if (final_retval == ERROR_OK) {
2406 /* Final return value will reflect cause of fault. */
2407 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2408 if (retval == ERROR_OK) {
2409 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2410 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2411 } else
2412 final_retval = retval;
2413 }
2414 /* Fault destroyed DFAR/DFSR; restore them. */
2415 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2416 if (retval != ERROR_OK)
2417 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2418 }
2419
2420 /* Handle asynchronous data faults. */
2421 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2422 if (final_retval == ERROR_OK)
2423 /* No other error has been recorded so far, so keep this one. */
2424 final_retval = ERROR_TARGET_DATA_ABORT;
2425 }
2426
2427 /* If the DCC is nonempty, clear it. */
2428 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2429 uint32_t dummy;
2430 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2431 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2432 if (final_retval == ERROR_OK)
2433 final_retval = retval;
2434 }
2435 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2436 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2437 if (final_retval == ERROR_OK)
2438 final_retval = retval;
2439 }
2440
2441 /* Done. */
2442 return final_retval;
2443 }
2444
2445
2446 /*
2447 * Cortex-A Memory access
2448 *
2449 * This is same Cortex-M3 but we must also use the correct
2450 * ap number for every access.
2451 */
2452
2453 static int cortex_a_read_phys_memory(struct target *target,
2454 target_addr_t address, uint32_t size,
2455 uint32_t count, uint8_t *buffer)
2456 {
2457 int retval;
2458
2459 if (!count || !buffer)
2460 return ERROR_COMMAND_SYNTAX_ERROR;
2461
2462 LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2463 address, size, count);
2464
2465 /* read memory through the CPU */
2466 cortex_a_prep_memaccess(target, 1);
2467 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2468 cortex_a_post_memaccess(target, 1);
2469
2470 return retval;
2471 }
2472
2473 static int cortex_a_read_memory(struct target *target, target_addr_t address,
2474 uint32_t size, uint32_t count, uint8_t *buffer)
2475 {
2476 int retval;
2477
2478 /* cortex_a handles unaligned memory access */
2479 LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2480 address, size, count);
2481
2482 cortex_a_prep_memaccess(target, 0);
2483 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2484 cortex_a_post_memaccess(target, 0);
2485
2486 return retval;
2487 }
2488
2489 static int cortex_a_write_phys_memory(struct target *target,
2490 target_addr_t address, uint32_t size,
2491 uint32_t count, const uint8_t *buffer)
2492 {
2493 int retval;
2494
2495 if (!count || !buffer)
2496 return ERROR_COMMAND_SYNTAX_ERROR;
2497
2498 LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2499 address, size, count);
2500
2501 /* write memory through the CPU */
2502 cortex_a_prep_memaccess(target, 1);
2503 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2504 cortex_a_post_memaccess(target, 1);
2505
2506 return retval;
2507 }
2508
2509 static int cortex_a_write_memory(struct target *target, target_addr_t address,
2510 uint32_t size, uint32_t count, const uint8_t *buffer)
2511 {
2512 int retval;
2513
2514 /* cortex_a handles unaligned memory access */
2515 LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2516 address, size, count);
2517
2518 /* memory writes bypass the caches, must flush before writing */
2519 armv7a_cache_auto_flush_on_write(target, address, size * count);
2520
2521 cortex_a_prep_memaccess(target, 0);
2522 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2523 cortex_a_post_memaccess(target, 0);
2524 return retval;
2525 }
2526
2527 static int cortex_a_read_buffer(struct target *target, target_addr_t address,
2528 uint32_t count, uint8_t *buffer)
2529 {
2530 uint32_t size;
2531
2532 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2533 * will have something to do with the size we leave to it. */
2534 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2535 if (address & size) {
2536 int retval = target_read_memory(target, address, size, 1, buffer);
2537 if (retval != ERROR_OK)
2538 return retval;
2539 address += size;
2540 count -= size;
2541 buffer += size;
2542 }
2543 }
2544
2545 /* Read the data with as large access size as possible. */
2546 for (; size > 0; size /= 2) {
2547 uint32_t aligned = count - count % size;
2548 if (aligned > 0) {
2549 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2550 if (retval != ERROR_OK)
2551 return retval;
2552 address += aligned;
2553 count -= aligned;
2554 buffer += aligned;
2555 }
2556 }
2557
2558 return ERROR_OK;
2559 }
2560
2561 static int cortex_a_write_buffer(struct target *target, target_addr_t address,
2562 uint32_t count, const uint8_t *buffer)
2563 {
2564 uint32_t size;
2565
2566 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2567 * will have something to do with the size we leave to it. */
2568 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2569 if (address & size) {
2570 int retval = target_write_memory(target, address, size, 1, buffer);
2571 if (retval != ERROR_OK)
2572 return retval;
2573 address += size;
2574 count -= size;
2575 buffer += size;
2576 }
2577 }
2578
2579 /* Write the data with as large access size as possible. */
2580 for (; size > 0; size /= 2) {
2581 uint32_t aligned = count - count % size;
2582 if (aligned > 0) {
2583 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2584 if (retval != ERROR_OK)
2585 return retval;
2586 address += aligned;
2587 count -= aligned;
2588 buffer += aligned;
2589 }
2590 }
2591
2592 return ERROR_OK;
2593 }
2594
2595 static int cortex_a_handle_target_request(void *priv)
2596 {
2597 struct target *target = priv;
2598 struct armv7a_common *armv7a = target_to_armv7a(target);
2599 int retval;
2600
2601 if (!target_was_examined(target))
2602 return ERROR_OK;
2603 if (!target->dbg_msg_enabled)
2604 return ERROR_OK;
2605
2606 if (target->state == TARGET_RUNNING) {
2607 uint32_t request;
2608 uint32_t dscr;
2609 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2610 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2611
2612 /* check if we have data */
2613 int64_t then = timeval_ms();
2614 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2615 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2616 armv7a->debug_base + CPUDBG_DTRTX, &request);
2617 if (retval == ERROR_OK) {
2618 target_request(target, request);
2619 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2620 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2621 }
2622 if (timeval_ms() > then + 1000) {
2623 LOG_ERROR("Timeout waiting for dtr tx full");
2624 return ERROR_FAIL;
2625 }
2626 }
2627 }
2628
2629 return ERROR_OK;
2630 }
2631
2632 /*
2633 * Cortex-A target information and configuration
2634 */
2635
2636 static int cortex_a_examine_first(struct target *target)
2637 {
2638 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2639 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2640 struct adiv5_dap *swjdp = armv7a->arm.dap;
2641
2642 int i;
2643 int retval = ERROR_OK;
2644 uint32_t didr, cpuid, dbg_osreg;
2645
2646 /* Search for the APB-AP - it is needed for access to debug registers */
2647 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2648 if (retval != ERROR_OK) {
2649 LOG_ERROR("Could not find APB-AP for debug access");
2650 return retval;
2651 }
2652
2653 retval = mem_ap_init(armv7a->debug_ap);
2654 if (retval != ERROR_OK) {
2655 LOG_ERROR("Could not initialize the APB-AP");
2656 return retval;
2657 }
2658
2659 armv7a->debug_ap->memaccess_tck = 80;
2660
2661 if (!target->dbgbase_set) {
2662 uint32_t dbgbase;
2663 /* Get ROM Table base */
2664 uint32_t apid;
2665 int32_t coreidx = target->coreid;
2666 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2667 target->cmd_name);
2668 retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
2669 if (retval != ERROR_OK)
2670 return retval;
2671 /* Lookup 0x15 -- Processor DAP */
2672 retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, 0x15,
2673 &armv7a->debug_base, &coreidx);
2674 if (retval != ERROR_OK) {
2675 LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2676 target->cmd_name);
2677 return retval;
2678 }
2679 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2680 target->coreid, armv7a->debug_base);
2681 } else
2682 armv7a->debug_base = target->dbgbase;
2683
2684 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2685 armv7a->debug_base + CPUDBG_DIDR, &didr);
2686 if (retval != ERROR_OK) {
2687 LOG_DEBUG("Examine %s failed", "DIDR");
2688 return retval;
2689 }
2690
2691 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2692 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2693 if (retval != ERROR_OK) {
2694 LOG_DEBUG("Examine %s failed", "CPUID");
2695 return retval;
2696 }
2697
2698 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2699 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2700
2701 cortex_a->didr = didr;
2702 cortex_a->cpuid = cpuid;
2703
2704 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2705 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
2706 if (retval != ERROR_OK)
2707 return retval;
2708 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
2709
2710 if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
2711 LOG_ERROR("target->coreid %" PRId32 " powered down!", target->coreid);
2712 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2713 return ERROR_TARGET_INIT_FAILED;
2714 }
2715
2716 if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
2717 LOG_DEBUG("target->coreid %" PRId32 " was reset!", target->coreid);
2718
2719 /* Read DBGOSLSR and check if OSLK is implemented */
2720 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2721 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2722 if (retval != ERROR_OK)
2723 return retval;
2724 LOG_DEBUG("target->coreid %" PRId32 " DBGOSLSR 0x%" PRIx32, target->coreid, dbg_osreg);
2725
2726 /* check if OS Lock is implemented */
2727 if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
2728 /* check if OS Lock is set */
2729 if (dbg_osreg & OSLSR_OSLK) {
2730 LOG_DEBUG("target->coreid %" PRId32 " OSLock set! Trying to unlock", target->coreid);
2731
2732 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2733 armv7a->debug_base + CPUDBG_OSLAR,
2734 0);
2735 if (retval == ERROR_OK)
2736 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2737 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2738
2739 /* if we fail to access the register or cannot reset the OSLK bit, bail out */
2740 if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
2741 LOG_ERROR("target->coreid %" PRId32 " OSLock sticky, core not powered?",
2742 target->coreid);
2743 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2744 return ERROR_TARGET_INIT_FAILED;
2745 }
2746 }
2747 }
2748
2749 armv7a->arm.core_type = ARM_MODE_MON;
2750
2751 /* Avoid recreating the registers cache */
2752 if (!target_was_examined(target)) {
2753 retval = cortex_a_dpm_setup(cortex_a, didr);
2754 if (retval != ERROR_OK)
2755 return retval;
2756 }
2757
2758 /* Setup Breakpoint Register Pairs */
2759 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
2760 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2761 cortex_a->brp_num_available = cortex_a->brp_num;
2762 free(cortex_a->brp_list);
2763 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
2764 /* cortex_a->brb_enabled = ????; */
2765 for (i = 0; i < cortex_a->brp_num; i++) {
2766 cortex_a->brp_list[i].used = 0;
2767 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
2768 cortex_a->brp_list[i].type = BRP_NORMAL;
2769 else
2770 cortex_a->brp_list[i].type = BRP_CONTEXT;
2771 cortex_a->brp_list[i].value = 0;
2772 cortex_a->brp_list[i].control = 0;
2773 cortex_a->brp_list[i].BRPn = i;
2774 }
2775
2776 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
2777
2778 /* select debug_ap as default */
2779 swjdp->apsel = armv7a->debug_ap->ap_num;
2780
2781 target_set_examined(target);
2782 return ERROR_OK;
2783 }
2784
2785 static int cortex_a_examine(struct target *target)
2786 {
2787 int retval = ERROR_OK;
2788
2789 /* Reestablish communication after target reset */
2790 retval = cortex_a_examine_first(target);
2791
2792 /* Configure core debug access */
2793 if (retval == ERROR_OK)
2794 retval = cortex_a_init_debug_access(target);
2795
2796 return retval;
2797 }
2798
2799 /*
2800 * Cortex-A target creation and initialization
2801 */
2802
2803 static int cortex_a_init_target(struct command_context *cmd_ctx,
2804 struct target *target)
2805 {
2806 /* examine_first() does a bunch of this */
2807 arm_semihosting_init(target);
2808 return ERROR_OK;
2809 }
2810
2811 static int cortex_a_init_arch_info(struct target *target,
2812 struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
2813 {
2814 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2815
2816 /* Setup struct cortex_a_common */
2817 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2818 armv7a->arm.dap = dap;
2819
2820 /* register arch-specific functions */
2821 armv7a->examine_debug_reason = NULL;
2822
2823 armv7a->post_debug_entry = cortex_a_post_debug_entry;
2824
2825 armv7a->pre_restore_context = NULL;
2826
2827 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
2828
2829
2830 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
2831
2832 /* REVISIT v7a setup should be in a v7a-specific routine */
2833 armv7a_init_arch_info(target, armv7a);
2834 target_register_timer_callback(cortex_a_handle_target_request, 1,
2835 TARGET_TIMER_TYPE_PERIODIC, target);
2836
2837 return ERROR_OK;
2838 }
2839
2840 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
2841 {
2842 struct cortex_a_common *cortex_a;
2843 struct adiv5_private_config *pc;
2844
2845 if (target->private_config == NULL)
2846 return ERROR_FAIL;
2847
2848 pc = (struct adiv5_private_config *)target->private_config;
2849
2850 cortex_a = calloc(1, sizeof(struct cortex_a_common));
2851 if (cortex_a == NULL) {
2852 LOG_ERROR("Out of memory");
2853 return ERROR_FAIL;
2854 }
2855 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2856 cortex_a->armv7a_common.is_armv7r = false;
2857 cortex_a->armv7a_common.arm.arm_vfp_version = ARM_VFP_V3;
2858
2859 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
2860 }
2861
2862 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
2863 {
2864 struct cortex_a_common *cortex_a;
2865 struct adiv5_private_config *pc;
2866
2867 pc = (struct adiv5_private_config *)target->private_config;
2868 if (adiv5_verify_config(pc) != ERROR_OK)
2869 return ERROR_FAIL;
2870
2871 cortex_a = calloc(1, sizeof(struct cortex_a_common));
2872 if (cortex_a == NULL) {
2873 LOG_ERROR("Out of memory");
2874 return ERROR_FAIL;
2875 }
2876 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2877 cortex_a->armv7a_common.is_armv7r = true;
2878
2879 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
2880 }
2881
2882 static void cortex_a_deinit_target(struct target *target)
2883 {
2884 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2885 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2886 struct arm_dpm *dpm = &armv7a->dpm;
2887 uint32_t dscr;
2888 int retval;
2889
2890 if (target_was_examined(target)) {
2891 /* Disable halt for breakpoint, watchpoint and vector catch */
2892 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2893 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2894 if (retval == ERROR_OK)
2895 mem_ap_write_atomic_u32(armv7a->debug_ap,
2896 armv7a->debug_base + CPUDBG_DSCR,
2897 dscr & ~DSCR_HALT_DBG_MODE);
2898 }
2899
2900 free(cortex_a->brp_list);
2901 free(dpm->dbp);
2902 free(dpm->dwp);
2903 free(target->private_config);
2904 free(cortex_a);
2905 }
2906
2907 static int cortex_a_mmu(struct target *target, int *enabled)
2908 {
2909 struct armv7a_common *armv7a = target_to_armv7a(target);
2910
2911 if (target->state != TARGET_HALTED) {
2912 LOG_ERROR("%s: target not halted", __func__);
2913 return ERROR_TARGET_INVALID;
2914 }
2915
2916 if (armv7a->is_armv7r)
2917 *enabled = 0;
2918 else
2919 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
2920
2921 return ERROR_OK;
2922 }
2923
2924 static int cortex_a_virt2phys(struct target *target,
2925 target_addr_t virt, target_addr_t *phys)
2926 {
2927 int retval;
2928 int mmu_enabled = 0;
2929
2930 /*
2931 * If the MMU was not enabled at debug entry, there is no
2932 * way of knowing if there was ever a valid configuration
2933 * for it and thus it's not safe to enable it. In this case,
2934 * just return the virtual address as physical.
2935 */
2936 cortex_a_mmu(target, &mmu_enabled);
2937 if (!mmu_enabled) {
2938 *phys = virt;
2939 return ERROR_OK;
2940 }
2941
2942 /* mmu must be enable in order to get a correct translation */
2943 retval = cortex_a_mmu_modify(target, 1);
2944 if (retval != ERROR_OK)
2945 return retval;
2946 return armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
2947 (uint32_t *)phys, 1);
2948 }
2949
2950 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
2951 {
2952 struct target *target = get_current_target(CMD_CTX);
2953 struct armv7a_common *armv7a = target_to_armv7a(target);
2954
2955 return armv7a_handle_cache_info_command(CMD_CTX,
2956 &armv7a->armv7a_mmu.armv7a_cache);
2957 }
2958
2959
2960 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
2961 {
2962 struct target *target = get_current_target(CMD_CTX);
2963 if (!target_was_examined(target)) {
2964 LOG_ERROR("target not examined yet");
2965 return ERROR_FAIL;
2966 }
2967
2968 return cortex_a_init_debug_access(target);
2969 }
2970
2971 COMMAND_HANDLER(cortex_a_handle_smp_gdb_command)
2972 {
2973 struct target *target = get_current_target(CMD_CTX);
2974 int retval = ERROR_OK;
2975 struct target_list *head;
2976 head = target->head;
2977 if (head != (struct target_list *)NULL) {
2978 if (CMD_ARGC == 1) {
2979 int coreid = 0;
2980 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2981 if (ERROR_OK != retval)
2982 return retval;
2983 target->gdb_service->core[1] = coreid;
2984
2985 }
2986 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2987 , target->gdb_service->core[1]);
2988 }
2989 return ERROR_OK;
2990 }
2991
2992 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
2993 {
2994 struct target *target = get_current_target(CMD_CTX);
2995 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2996
2997 static const Jim_Nvp nvp_maskisr_modes[] = {
2998 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
2999 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3000 { .name = NULL, .value = -1 },
3001 };
3002 const Jim_Nvp *n;
3003
3004 if (CMD_ARGC > 0) {
3005 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
3006 if (n->name == NULL) {
3007 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3008 return ERROR_COMMAND_SYNTAX_ERROR;
3009 }
3010
3011 cortex_a->isrmasking_mode = n->value;
3012 }
3013
3014 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3015 command_print(CMD_CTX, "cortex_a interrupt mask %s", n->name);
3016
3017 return ERROR_OK;
3018 }
3019
3020 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3021 {
3022 struct target *target = get_current_target(CMD_CTX);
3023 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3024
3025 static const Jim_Nvp nvp_dacrfixup_modes[] = {
3026 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3027 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3028 { .name = NULL, .value = -1 },
3029 };
3030 const Jim_Nvp *n;
3031
3032 if (CMD_ARGC > 0) {
3033 n = Jim_Nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
3034 if (n->name == NULL)
3035 return ERROR_COMMAND_SYNTAX_ERROR;
3036 cortex_a->dacrfixup_mode = n->value;
3037
3038 }
3039
3040 n = Jim_Nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3041 command_print(CMD_CTX, "cortex_a domain access control fixup %s", n->name);
3042
3043 return ERROR_OK;
3044 }
3045
3046 static const struct command_registration cortex_a_exec_command_handlers[] = {
3047 {
3048 .name = "cache_info",
3049 .handler = cortex_a_handle_cache_info_command,
3050 .mode = COMMAND_EXEC,
3051 .help = "display information about target caches",
3052 .usage = "",
3053 },
3054 {
3055 .name = "dbginit",
3056 .handler = cortex_a_handle_dbginit_command,
3057 .mode = COMMAND_EXEC,
3058 .help = "Initialize core debug",
3059 .usage = "",
3060 },
3061 {
3062 .name = "smp_gdb",
3063 .handler = cortex_a_handle_smp_gdb_command,
3064 .mode = COMMAND_EXEC,
3065 .help = "display/fix current core played to gdb",
3066 .usage = "",
3067 },
3068 {
3069 .name = "maskisr",
3070 .handler = handle_cortex_a_mask_interrupts_command,
3071 .mode = COMMAND_ANY,
3072 .help = "mask cortex_a interrupts",
3073 .usage = "['on'|'off']",
3074 },
3075 {
3076 .name = "dacrfixup",
3077 .handler = handle_cortex_a_dacrfixup_command,
3078 .mode = COMMAND_ANY,
3079 .help = "set domain access control (DACR) to all-manager "
3080 "on memory access",
3081 .usage = "['on'|'off']",
3082 },
3083 {
3084 .chain = armv7a_mmu_command_handlers,
3085 },
3086 {
3087 .chain = smp_command_handlers,
3088 },
3089
3090 COMMAND_REGISTRATION_DONE
3091 };
3092 static const struct command_registration cortex_a_command_handlers[] = {
3093 {
3094 .chain = arm_command_handlers,
3095 },
3096 {
3097 .chain = armv7a_command_handlers,
3098 },
3099 {
3100 .name = "cortex_a",
3101 .mode = COMMAND_ANY,
3102 .help = "Cortex-A command group",
3103 .usage = "",
3104 .chain = cortex_a_exec_command