target/cortex_a: enable DSCR_HALT_DBG_MODE during examine
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 √ėyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex-R4 support *
22 * *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 * *
39 * Cortex-A8(tm) TRM, ARM DDI 0344H *
40 * Cortex-A9(tm) TRM, ARM DDI 0407F *
41 * Cortex-A4(tm) TRM, ARM DDI 0363E *
42 * Cortex-A15(tm)TRM, ARM DDI 0438C *
43 * *
44 ***************************************************************************/
45
46 #ifdef HAVE_CONFIG_H
47 #include "config.h"
48 #endif
49
50 #include "breakpoints.h"
51 #include "cortex_a.h"
52 #include "register.h"
53 #include "armv7a_mmu.h"
54 #include "target_request.h"
55 #include "target_type.h"
56 #include "arm_opcodes.h"
57 #include "arm_semihosting.h"
58 #include "transport/transport.h"
59 #include <helper/time_support.h>
60
61 #define foreach_smp_target(pos, head) \
62 for (pos = head; (pos != NULL); pos = pos->next)
63
64 static int cortex_a_poll(struct target *target);
65 static int cortex_a_debug_entry(struct target *target);
66 static int cortex_a_restore_context(struct target *target, bool bpwp);
67 static int cortex_a_set_breakpoint(struct target *target,
68 struct breakpoint *breakpoint, uint8_t matchmode);
69 static int cortex_a_set_context_breakpoint(struct target *target,
70 struct breakpoint *breakpoint, uint8_t matchmode);
71 static int cortex_a_set_hybrid_breakpoint(struct target *target,
72 struct breakpoint *breakpoint);
73 static int cortex_a_unset_breakpoint(struct target *target,
74 struct breakpoint *breakpoint);
75 static int cortex_a_mmu(struct target *target, int *enabled);
76 static int cortex_a_mmu_modify(struct target *target, int enable);
77 static int cortex_a_virt2phys(struct target *target,
78 target_addr_t virt, target_addr_t *phys);
79 static int cortex_a_read_cpu_memory(struct target *target,
80 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
81
82
83 /* restore cp15_control_reg at resume */
84 static int cortex_a_restore_cp15_control_reg(struct target *target)
85 {
86 int retval = ERROR_OK;
87 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
88 struct armv7a_common *armv7a = target_to_armv7a(target);
89
90 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
91 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
92 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
93 retval = armv7a->arm.mcr(target, 15,
94 0, 0, /* op1, op2 */
95 1, 0, /* CRn, CRm */
96 cortex_a->cp15_control_reg);
97 }
98 return retval;
99 }
100
101 /*
102 * Set up ARM core for memory access.
103 * If !phys_access, switch to SVC mode and make sure MMU is on
104 * If phys_access, switch off mmu
105 */
106 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
107 {
108 struct armv7a_common *armv7a = target_to_armv7a(target);
109 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
110 int mmu_enabled = 0;
111
112 if (phys_access == 0) {
113 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
114 cortex_a_mmu(target, &mmu_enabled);
115 if (mmu_enabled)
116 cortex_a_mmu_modify(target, 1);
117 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
118 /* overwrite DACR to all-manager */
119 armv7a->arm.mcr(target, 15,
120 0, 0, 3, 0,
121 0xFFFFFFFF);
122 }
123 } else {
124 cortex_a_mmu(target, &mmu_enabled);
125 if (mmu_enabled)
126 cortex_a_mmu_modify(target, 0);
127 }
128 return ERROR_OK;
129 }
130
131 /*
132 * Restore ARM core after memory access.
133 * If !phys_access, switch to previous mode
134 * If phys_access, restore MMU setting
135 */
136 static int cortex_a_post_memaccess(struct target *target, int phys_access)
137 {
138 struct armv7a_common *armv7a = target_to_armv7a(target);
139 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
140
141 if (phys_access == 0) {
142 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
143 /* restore */
144 armv7a->arm.mcr(target, 15,
145 0, 0, 3, 0,
146 cortex_a->cp15_dacr_reg);
147 }
148 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
149 } else {
150 int mmu_enabled = 0;
151 cortex_a_mmu(target, &mmu_enabled);
152 if (mmu_enabled)
153 cortex_a_mmu_modify(target, 1);
154 }
155 return ERROR_OK;
156 }
157
158
159 /* modify cp15_control_reg in order to enable or disable mmu for :
160 * - virt2phys address conversion
161 * - read or write memory in phys or virt address */
162 static int cortex_a_mmu_modify(struct target *target, int enable)
163 {
164 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
165 struct armv7a_common *armv7a = target_to_armv7a(target);
166 int retval = ERROR_OK;
167 int need_write = 0;
168
169 if (enable) {
170 /* if mmu enabled at target stop and mmu not enable */
171 if (!(cortex_a->cp15_control_reg & 0x1U)) {
172 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
173 return ERROR_FAIL;
174 }
175 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
176 cortex_a->cp15_control_reg_curr |= 0x1U;
177 need_write = 1;
178 }
179 } else {
180 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
181 cortex_a->cp15_control_reg_curr &= ~0x1U;
182 need_write = 1;
183 }
184 }
185
186 if (need_write) {
187 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
188 enable ? "enable mmu" : "disable mmu",
189 cortex_a->cp15_control_reg_curr);
190
191 retval = armv7a->arm.mcr(target, 15,
192 0, 0, /* op1, op2 */
193 1, 0, /* CRn, CRm */
194 cortex_a->cp15_control_reg_curr);
195 }
196 return retval;
197 }
198
199 /*
200 * Cortex-A Basic debug access, very low level assumes state is saved
201 */
202 static int cortex_a_init_debug_access(struct target *target)
203 {
204 struct armv7a_common *armv7a = target_to_armv7a(target);
205 uint32_t dscr;
206 int retval;
207
208 /* lock memory-mapped access to debug registers to prevent
209 * software interference */
210 retval = mem_ap_write_u32(armv7a->debug_ap,
211 armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
212 if (retval != ERROR_OK)
213 return retval;
214
215 /* Disable cacheline fills and force cache write-through in debug state */
216 retval = mem_ap_write_u32(armv7a->debug_ap,
217 armv7a->debug_base + CPUDBG_DSCCR, 0);
218 if (retval != ERROR_OK)
219 return retval;
220
221 /* Disable TLB lookup and refill/eviction in debug state */
222 retval = mem_ap_write_u32(armv7a->debug_ap,
223 armv7a->debug_base + CPUDBG_DSMCR, 0);
224 if (retval != ERROR_OK)
225 return retval;
226
227 retval = dap_run(armv7a->debug_ap->dap);
228 if (retval != ERROR_OK)
229 return retval;
230
231 /* Enabling of instruction execution in debug mode is done in debug_entry code */
232
233 /* Resync breakpoint registers */
234
235 /* Enable halt for breakpoint, watchpoint and vector catch */
236 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
237 armv7a->debug_base + CPUDBG_DSCR, &dscr);
238 if (retval != ERROR_OK)
239 return retval;
240 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
241 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
242 if (retval != ERROR_OK)
243 return retval;
244
245 /* Since this is likely called from init or reset, update target state information*/
246 return cortex_a_poll(target);
247 }
248
249 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
250 {
251 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
252 * Writes final value of DSCR into *dscr. Pass force to force always
253 * reading DSCR at least once. */
254 struct armv7a_common *armv7a = target_to_armv7a(target);
255 int64_t then = timeval_ms();
256 while ((*dscr & DSCR_INSTR_COMP) == 0 || force) {
257 force = false;
258 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
259 armv7a->debug_base + CPUDBG_DSCR, dscr);
260 if (retval != ERROR_OK) {
261 LOG_ERROR("Could not read DSCR register");
262 return retval;
263 }
264 if (timeval_ms() > then + 1000) {
265 LOG_ERROR("Timeout waiting for InstrCompl=1");
266 return ERROR_FAIL;
267 }
268 }
269 return ERROR_OK;
270 }
271
272 /* To reduce needless round-trips, pass in a pointer to the current
273 * DSCR value. Initialize it to zero if you just need to know the
274 * value on return from this function; or DSCR_INSTR_COMP if you
275 * happen to know that no instruction is pending.
276 */
277 static int cortex_a_exec_opcode(struct target *target,
278 uint32_t opcode, uint32_t *dscr_p)
279 {
280 uint32_t dscr;
281 int retval;
282 struct armv7a_common *armv7a = target_to_armv7a(target);
283
284 dscr = dscr_p ? *dscr_p : 0;
285
286 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
287
288 /* Wait for InstrCompl bit to be set */
289 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
290 if (retval != ERROR_OK)
291 return retval;
292
293 retval = mem_ap_write_u32(armv7a->debug_ap,
294 armv7a->debug_base + CPUDBG_ITR, opcode);
295 if (retval != ERROR_OK)
296 return retval;
297
298 int64_t then = timeval_ms();
299 do {
300 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
301 armv7a->debug_base + CPUDBG_DSCR, &dscr);
302 if (retval != ERROR_OK) {
303 LOG_ERROR("Could not read DSCR register");
304 return retval;
305 }
306 if (timeval_ms() > then + 1000) {
307 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
308 return ERROR_FAIL;
309 }
310 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
311
312 if (dscr_p)
313 *dscr_p = dscr;
314
315 return retval;
316 }
317
318 /* Write to memory mapped registers directly with no cache or mmu handling */
319 static int cortex_a_dap_write_memap_register_u32(struct target *target,
320 uint32_t address,
321 uint32_t value)
322 {
323 int retval;
324 struct armv7a_common *armv7a = target_to_armv7a(target);
325
326 retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
327
328 return retval;
329 }
330
331 /*
332 * Cortex-A implementation of Debug Programmer's Model
333 *
334 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
335 * so there's no need to poll for it before executing an instruction.
336 *
337 * NOTE that in several of these cases the "stall" mode might be useful.
338 * It'd let us queue a few operations together... prepare/finish might
339 * be the places to enable/disable that mode.
340 */
341
342 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
343 {
344 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
345 }
346
347 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
348 {
349 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
350 return mem_ap_write_u32(a->armv7a_common.debug_ap,
351 a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
352 }
353
354 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
355 uint32_t *dscr_p)
356 {
357 uint32_t dscr = DSCR_INSTR_COMP;
358 int retval;
359
360 if (dscr_p)
361 dscr = *dscr_p;
362
363 /* Wait for DTRRXfull */
364 int64_t then = timeval_ms();
365 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
366 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
367 a->armv7a_common.debug_base + CPUDBG_DSCR,
368 &dscr);
369 if (retval != ERROR_OK)
370 return retval;
371 if (timeval_ms() > then + 1000) {
372 LOG_ERROR("Timeout waiting for read dcc");
373 return ERROR_FAIL;
374 }
375 }
376
377 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
378 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
379 if (retval != ERROR_OK)
380 return retval;
381 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
382
383 if (dscr_p)
384 *dscr_p = dscr;
385
386 return retval;
387 }
388
389 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
390 {
391 struct cortex_a_common *a = dpm_to_a(dpm);
392 uint32_t dscr;
393 int retval;
394
395 /* set up invariant: INSTR_COMP is set after ever DPM operation */
396 int64_t then = timeval_ms();
397 for (;; ) {
398 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
399 a->armv7a_common.debug_base + CPUDBG_DSCR,
400 &dscr);
401 if (retval != ERROR_OK)
402 return retval;
403 if ((dscr & DSCR_INSTR_COMP) != 0)
404 break;
405 if (timeval_ms() > then + 1000) {
406 LOG_ERROR("Timeout waiting for dpm prepare");
407 return ERROR_FAIL;
408 }
409 }
410
411 /* this "should never happen" ... */
412 if (dscr & DSCR_DTR_RX_FULL) {
413 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
414 /* Clear DCCRX */
415 retval = cortex_a_exec_opcode(
416 a->armv7a_common.arm.target,
417 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
418 &dscr);
419 if (retval != ERROR_OK)
420 return retval;
421 }
422
423 return retval;
424 }
425
426 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
427 {
428 /* REVISIT what could be done here? */
429 return ERROR_OK;
430 }
431
432 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
433 uint32_t opcode, uint32_t data)
434 {
435 struct cortex_a_common *a = dpm_to_a(dpm);
436 int retval;
437 uint32_t dscr = DSCR_INSTR_COMP;
438
439 retval = cortex_a_write_dcc(a, data);
440 if (retval != ERROR_OK)
441 return retval;
442
443 return cortex_a_exec_opcode(
444 a->armv7a_common.arm.target,
445 opcode,
446 &dscr);
447 }
448
449 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
450 uint32_t opcode, uint32_t data)
451 {
452 struct cortex_a_common *a = dpm_to_a(dpm);
453 uint32_t dscr = DSCR_INSTR_COMP;
454 int retval;
455
456 retval = cortex_a_write_dcc(a, data);
457 if (retval != ERROR_OK)
458 return retval;
459
460 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
461 retval = cortex_a_exec_opcode(
462 a->armv7a_common.arm.target,
463 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
464 &dscr);
465 if (retval != ERROR_OK)
466 return retval;
467
468 /* then the opcode, taking data from R0 */
469 retval = cortex_a_exec_opcode(
470 a->armv7a_common.arm.target,
471 opcode,
472 &dscr);
473
474 return retval;
475 }
476
477 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
478 {
479 struct target *target = dpm->arm->target;
480 uint32_t dscr = DSCR_INSTR_COMP;
481
482 /* "Prefetch flush" after modifying execution status in CPSR */
483 return cortex_a_exec_opcode(target,
484 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
485 &dscr);
486 }
487
488 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
489 uint32_t opcode, uint32_t *data)
490 {
491 struct cortex_a_common *a = dpm_to_a(dpm);
492 int retval;
493 uint32_t dscr = DSCR_INSTR_COMP;
494
495 /* the opcode, writing data to DCC */
496 retval = cortex_a_exec_opcode(
497 a->armv7a_common.arm.target,
498 opcode,
499 &dscr);
500 if (retval != ERROR_OK)
501 return retval;
502
503 return cortex_a_read_dcc(a, data, &dscr);
504 }
505
506
507 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
508 uint32_t opcode, uint32_t *data)
509 {
510 struct cortex_a_common *a = dpm_to_a(dpm);
511 uint32_t dscr = DSCR_INSTR_COMP;
512 int retval;
513
514 /* the opcode, writing data to R0 */
515 retval = cortex_a_exec_opcode(
516 a->armv7a_common.arm.target,
517 opcode,
518 &dscr);
519 if (retval != ERROR_OK)
520 return retval;
521
522 /* write R0 to DCC */
523 retval = cortex_a_exec_opcode(
524 a->armv7a_common.arm.target,
525 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
526 &dscr);
527 if (retval != ERROR_OK)
528 return retval;
529
530 return cortex_a_read_dcc(a, data, &dscr);
531 }
532
533 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
534 uint32_t addr, uint32_t control)
535 {
536 struct cortex_a_common *a = dpm_to_a(dpm);
537 uint32_t vr = a->armv7a_common.debug_base;
538 uint32_t cr = a->armv7a_common.debug_base;
539 int retval;
540
541 switch (index_t) {
542 case 0 ... 15: /* breakpoints */
543 vr += CPUDBG_BVR_BASE;
544 cr += CPUDBG_BCR_BASE;
545 break;
546 case 16 ... 31: /* watchpoints */
547 vr += CPUDBG_WVR_BASE;
548 cr += CPUDBG_WCR_BASE;
549 index_t -= 16;
550 break;
551 default:
552 return ERROR_FAIL;
553 }
554 vr += 4 * index_t;
555 cr += 4 * index_t;
556
557 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
558 (unsigned) vr, (unsigned) cr);
559
560 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
561 vr, addr);
562 if (retval != ERROR_OK)
563 return retval;
564 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
565 cr, control);
566 return retval;
567 }
568
569 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
570 {
571 struct cortex_a_common *a = dpm_to_a(dpm);
572 uint32_t cr;
573
574 switch (index_t) {
575 case 0 ... 15:
576 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
577 break;
578 case 16 ... 31:
579 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
580 index_t -= 16;
581 break;
582 default:
583 return ERROR_FAIL;
584 }
585 cr += 4 * index_t;
586
587 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
588
589 /* clear control register */
590 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
591 }
592
593 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
594 {
595 struct arm_dpm *dpm = &a->armv7a_common.dpm;
596 int retval;
597
598 dpm->arm = &a->armv7a_common.arm;
599 dpm->didr = didr;
600
601 dpm->prepare = cortex_a_dpm_prepare;
602 dpm->finish = cortex_a_dpm_finish;
603
604 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
605 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
606 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
607
608 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
609 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
610
611 dpm->bpwp_enable = cortex_a_bpwp_enable;
612 dpm->bpwp_disable = cortex_a_bpwp_disable;
613
614 retval = arm_dpm_setup(dpm);
615 if (retval == ERROR_OK)
616 retval = arm_dpm_initialize(dpm);
617
618 return retval;
619 }
620 static struct target *get_cortex_a(struct target *target, int32_t coreid)
621 {
622 struct target_list *head;
623 struct target *curr;
624
625 head = target->head;
626 while (head != (struct target_list *)NULL) {
627 curr = head->target;
628 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
629 return curr;
630 head = head->next;
631 }
632 return target;
633 }
634 static int cortex_a_halt(struct target *target);
635
636 static int cortex_a_halt_smp(struct target *target)
637 {
638 int retval = 0;
639 struct target_list *head;
640 struct target *curr;
641 head = target->head;
642 while (head != (struct target_list *)NULL) {
643 curr = head->target;
644 if ((curr != target) && (curr->state != TARGET_HALTED)
645 && target_was_examined(curr))
646 retval += cortex_a_halt(curr);
647 head = head->next;
648 }
649 return retval;
650 }
651
652 static int update_halt_gdb(struct target *target)
653 {
654 struct target *gdb_target = NULL;
655 struct target_list *head;
656 struct target *curr;
657 int retval = 0;
658
659 if (target->gdb_service && target->gdb_service->core[0] == -1) {
660 target->gdb_service->target = target;
661 target->gdb_service->core[0] = target->coreid;
662 retval += cortex_a_halt_smp(target);
663 }
664
665 if (target->gdb_service)
666 gdb_target = target->gdb_service->target;
667
668 foreach_smp_target(head, target->head) {
669 curr = head->target;
670 /* skip calling context */
671 if (curr == target)
672 continue;
673 if (!target_was_examined(curr))
674 continue;
675 /* skip targets that were already halted */
676 if (curr->state == TARGET_HALTED)
677 continue;
678 /* Skip gdb_target; it alerts GDB so has to be polled as last one */
679 if (curr == gdb_target)
680 continue;
681
682 /* avoid recursion in cortex_a_poll() */
683 curr->smp = 0;
684 cortex_a_poll(curr);
685 curr->smp = 1;
686 }
687
688 /* after all targets were updated, poll the gdb serving target */
689 if (gdb_target != NULL && gdb_target != target)
690 cortex_a_poll(gdb_target);
691 return retval;
692 }
693
694 /*
695 * Cortex-A Run control
696 */
697
698 static int cortex_a_poll(struct target *target)
699 {
700 int retval = ERROR_OK;
701 uint32_t dscr;
702 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
703 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
704 enum target_state prev_target_state = target->state;
705 /* toggle to another core is done by gdb as follow */
706 /* maint packet J core_id */
707 /* continue */
708 /* the next polling trigger an halt event sent to gdb */
709 if ((target->state == TARGET_HALTED) && (target->smp) &&
710 (target->gdb_service) &&
711 (target->gdb_service->target == NULL)) {
712 target->gdb_service->target =
713 get_cortex_a(target, target->gdb_service->core[1]);
714 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
715 return retval;
716 }
717 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
718 armv7a->debug_base + CPUDBG_DSCR, &dscr);
719 if (retval != ERROR_OK)
720 return retval;
721 cortex_a->cpudbg_dscr = dscr;
722
723 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
724 if (prev_target_state != TARGET_HALTED) {
725 /* We have a halting debug event */
726 LOG_DEBUG("Target halted");
727 target->state = TARGET_HALTED;
728 if ((prev_target_state == TARGET_RUNNING)
729 || (prev_target_state == TARGET_UNKNOWN)
730 || (prev_target_state == TARGET_RESET)) {
731 retval = cortex_a_debug_entry(target);
732 if (retval != ERROR_OK)
733 return retval;
734 if (target->smp) {
735 retval = update_halt_gdb(target);
736 if (retval != ERROR_OK)
737 return retval;
738 }
739
740 if (arm_semihosting(target, &retval) != 0)
741 return retval;
742
743 target_call_event_callbacks(target,
744 TARGET_EVENT_HALTED);
745 }
746 if (prev_target_state == TARGET_DEBUG_RUNNING) {
747 LOG_DEBUG(" ");
748
749 retval = cortex_a_debug_entry(target);
750 if (retval != ERROR_OK)
751 return retval;
752 if (target->smp) {
753 retval = update_halt_gdb(target);
754 if (retval != ERROR_OK)
755 return retval;
756 }
757
758 target_call_event_callbacks(target,
759 TARGET_EVENT_DEBUG_HALTED);
760 }
761 }
762 } else
763 target->state = TARGET_RUNNING;
764
765 return retval;
766 }
767
768 static int cortex_a_halt(struct target *target)
769 {
770 int retval = ERROR_OK;
771 uint32_t dscr;
772 struct armv7a_common *armv7a = target_to_armv7a(target);
773
774 /*
775 * Tell the core to be halted by writing DRCR with 0x1
776 * and then wait for the core to be halted.
777 */
778 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
779 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
780 if (retval != ERROR_OK)
781 return retval;
782
783 int64_t then = timeval_ms();
784 for (;; ) {
785 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
786 armv7a->debug_base + CPUDBG_DSCR, &dscr);
787 if (retval != ERROR_OK)
788 return retval;
789 if ((dscr & DSCR_CORE_HALTED) != 0)
790 break;
791 if (timeval_ms() > then + 1000) {
792 LOG_ERROR("Timeout waiting for halt");
793 return ERROR_FAIL;
794 }
795 }
796
797 target->debug_reason = DBG_REASON_DBGRQ;
798
799 return ERROR_OK;
800 }
801
802 static int cortex_a_internal_restore(struct target *target, int current,
803 target_addr_t *address, int handle_breakpoints, int debug_execution)
804 {
805 struct armv7a_common *armv7a = target_to_armv7a(target);
806 struct arm *arm = &armv7a->arm;
807 int retval;
808 uint32_t resume_pc;
809
810 if (!debug_execution)
811 target_free_all_working_areas(target);
812
813 #if 0
814 if (debug_execution) {
815 /* Disable interrupts */
816 /* We disable interrupts in the PRIMASK register instead of
817 * masking with C_MASKINTS,
818 * This is probably the same issue as Cortex-M3 Errata 377493:
819 * C_MASKINTS in parallel with disabled interrupts can cause
820 * local faults to not be taken. */
821 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
822 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
823 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
824
825 /* Make sure we are in Thumb mode */
826 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
827 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
828 32) | (1 << 24));
829 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
830 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
831 }
832 #endif
833
834 /* current = 1: continue on current pc, otherwise continue at <address> */
835 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
836 if (!current)
837 resume_pc = *address;
838 else
839 *address = resume_pc;
840
841 /* Make sure that the Armv7 gdb thumb fixups does not
842 * kill the return address
843 */
844 switch (arm->core_state) {
845 case ARM_STATE_ARM:
846 resume_pc &= 0xFFFFFFFC;
847 break;
848 case ARM_STATE_THUMB:
849 case ARM_STATE_THUMB_EE:
850 /* When the return address is loaded into PC
851 * bit 0 must be 1 to stay in Thumb state
852 */
853 resume_pc |= 0x1;
854 break;
855 case ARM_STATE_JAZELLE:
856 LOG_ERROR("How do I resume into Jazelle state??");
857 return ERROR_FAIL;
858 case ARM_STATE_AARCH64:
859 LOG_ERROR("Shoudn't be in AARCH64 state");
860 return ERROR_FAIL;
861 }
862 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
863 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
864 arm->pc->dirty = 1;
865 arm->pc->valid = 1;
866
867 /* restore dpm_mode at system halt */
868 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
869 /* called it now before restoring context because it uses cpu
870 * register r0 for restoring cp15 control register */
871 retval = cortex_a_restore_cp15_control_reg(target);
872 if (retval != ERROR_OK)
873 return retval;
874 retval = cortex_a_restore_context(target, handle_breakpoints);
875 if (retval != ERROR_OK)
876 return retval;
877 target->debug_reason = DBG_REASON_NOTHALTED;
878 target->state = TARGET_RUNNING;
879
880 /* registers are now invalid */
881 register_cache_invalidate(arm->core_cache);
882
883 #if 0
884 /* the front-end may request us not to handle breakpoints */
885 if (handle_breakpoints) {
886 /* Single step past breakpoint at current address */
887 breakpoint = breakpoint_find(target, resume_pc);
888 if (breakpoint) {
889 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
890 cortex_m3_unset_breakpoint(target, breakpoint);
891 cortex_m3_single_step_core(target);
892 cortex_m3_set_breakpoint(target, breakpoint);
893 }
894 }
895
896 #endif
897 return retval;
898 }
899
900 static int cortex_a_internal_restart(struct target *target)
901 {
902 struct armv7a_common *armv7a = target_to_armv7a(target);
903 struct arm *arm = &armv7a->arm;
904 int retval;
905 uint32_t dscr;
906 /*
907 * * Restart core and wait for it to be started. Clear ITRen and sticky
908 * * exception flags: see ARMv7 ARM, C5.9.
909 *
910 * REVISIT: for single stepping, we probably want to
911 * disable IRQs by default, with optional override...
912 */
913
914 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
915 armv7a->debug_base + CPUDBG_DSCR, &dscr);
916 if (retval != ERROR_OK)
917 return retval;
918
919 if ((dscr & DSCR_INSTR_COMP) == 0)
920 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
921
922 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
923 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
924 if (retval != ERROR_OK)
925 return retval;
926
927 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
928 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
929 DRCR_CLEAR_EXCEPTIONS);
930 if (retval != ERROR_OK)
931 return retval;
932
933 int64_t then = timeval_ms();
934 for (;; ) {
935 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
936 armv7a->debug_base + CPUDBG_DSCR, &dscr);
937 if (retval != ERROR_OK)
938 return retval;
939 if ((dscr & DSCR_CORE_RESTARTED) != 0)
940 break;
941 if (timeval_ms() > then + 1000) {
942 LOG_ERROR("Timeout waiting for resume");
943 return ERROR_FAIL;
944 }
945 }
946
947 target->debug_reason = DBG_REASON_NOTHALTED;
948 target->state = TARGET_RUNNING;
949
950 /* registers are now invalid */
951 register_cache_invalidate(arm->core_cache);
952
953 return ERROR_OK;
954 }
955
956 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
957 {
958 int retval = 0;
959 struct target_list *head;
960 struct target *curr;
961 target_addr_t address;
962 head = target->head;
963 while (head != (struct target_list *)NULL) {
964 curr = head->target;
965 if ((curr != target) && (curr->state != TARGET_RUNNING)
966 && target_was_examined(curr)) {
967 /* resume current address , not in step mode */
968 retval += cortex_a_internal_restore(curr, 1, &address,
969 handle_breakpoints, 0);
970 retval += cortex_a_internal_restart(curr);
971 }
972 head = head->next;
973
974 }
975 return retval;
976 }
977
978 static int cortex_a_resume(struct target *target, int current,
979 target_addr_t address, int handle_breakpoints, int debug_execution)
980 {
981 int retval = 0;
982 /* dummy resume for smp toggle in order to reduce gdb impact */
983 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
984 /* simulate a start and halt of target */
985 target->gdb_service->target = NULL;
986 target->gdb_service->core[0] = target->gdb_service->core[1];
987 /* fake resume at next poll we play the target core[1], see poll*/
988 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
989 return 0;
990 }
991 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
992 if (target->smp) {
993 target->gdb_service->core[0] = -1;
994 retval = cortex_a_restore_smp(target, handle_breakpoints);
995 if (retval != ERROR_OK)
996 return retval;
997 }
998 cortex_a_internal_restart(target);
999
1000 if (!debug_execution) {
1001 target->state = TARGET_RUNNING;
1002 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1003 LOG_DEBUG("target resumed at " TARGET_ADDR_FMT, address);
1004 } else {
1005 target->state = TARGET_DEBUG_RUNNING;
1006 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1007 LOG_DEBUG("target debug resumed at " TARGET_ADDR_FMT, address);
1008 }
1009
1010 return ERROR_OK;
1011 }
1012
1013 static int cortex_a_debug_entry(struct target *target)
1014 {
1015 uint32_t dscr;
1016 int retval = ERROR_OK;
1017 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1018 struct armv7a_common *armv7a = target_to_armv7a(target);
1019 struct arm *arm = &armv7a->arm;
1020
1021 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1022
1023 /* REVISIT surely we should not re-read DSCR !! */
1024 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1025 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1026 if (retval != ERROR_OK)
1027 return retval;
1028
1029 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1030 * imprecise data aborts get discarded by issuing a Data
1031 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1032 */
1033
1034 /* Enable the ITR execution once we are in debug mode */
1035 dscr |= DSCR_ITR_EN;
1036 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1037 armv7a->debug_base + CPUDBG_DSCR, dscr);
1038 if (retval != ERROR_OK)
1039 return retval;
1040
1041 /* Examine debug reason */
1042 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1043
1044 /* save address of instruction that triggered the watchpoint? */
1045 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1046 uint32_t wfar;
1047
1048 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1049 armv7a->debug_base + CPUDBG_WFAR,
1050 &wfar);
1051 if (retval != ERROR_OK)
1052 return retval;
1053 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1054 }
1055
1056 /* First load register accessible through core debug port */
1057 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1058 if (retval != ERROR_OK)
1059 return retval;
1060
1061 if (arm->spsr) {
1062 /* read SPSR */
1063 retval = arm_dpm_read_reg(&armv7a->dpm, arm->spsr, 17);
1064 if (retval != ERROR_OK)
1065 return retval;
1066 }
1067
1068 #if 0
1069 /* TODO, Move this */
1070 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1071 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1072 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1073
1074 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1075 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1076
1077 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1078 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1079 #endif
1080
1081 /* Are we in an exception handler */
1082 /* armv4_5->exception_number = 0; */
1083 if (armv7a->post_debug_entry) {
1084 retval = armv7a->post_debug_entry(target);
1085 if (retval != ERROR_OK)
1086 return retval;
1087 }
1088
1089 return retval;
1090 }
1091
1092 static int cortex_a_post_debug_entry(struct target *target)
1093 {
1094 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1095 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1096 int retval;
1097
1098 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1099 retval = armv7a->arm.mrc(target, 15,
1100 0, 0, /* op1, op2 */
1101 1, 0, /* CRn, CRm */
1102 &cortex_a->cp15_control_reg);
1103 if (retval != ERROR_OK)
1104 return retval;
1105 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1106 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1107
1108 if (!armv7a->is_armv7r)
1109 armv7a_read_ttbcr(target);
1110
1111 if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1112 armv7a_identify_cache(target);
1113
1114 if (armv7a->is_armv7r) {
1115 armv7a->armv7a_mmu.mmu_enabled = 0;
1116 } else {
1117 armv7a->armv7a_mmu.mmu_enabled =
1118 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1119 }
1120 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1121 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1122 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1123 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1124 cortex_a->curr_mode = armv7a->arm.core_mode;
1125
1126 /* switch to SVC mode to read DACR */
1127 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1128 armv7a->arm.mrc(target, 15,
1129 0, 0, 3, 0,
1130 &cortex_a->cp15_dacr_reg);
1131
1132 LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1133 cortex_a->cp15_dacr_reg);
1134
1135 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1136 return ERROR_OK;
1137 }
1138
1139 int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1140 {
1141 struct armv7a_common *armv7a = target_to_armv7a(target);
1142 uint32_t dscr;
1143
1144 /* Read DSCR */
1145 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1146 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1147 if (ERROR_OK != retval)
1148 return retval;
1149
1150 /* clear bitfield */
1151 dscr &= ~bit_mask;
1152 /* put new value */
1153 dscr |= value & bit_mask;
1154
1155 /* write new DSCR */
1156 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1157 armv7a->debug_base + CPUDBG_DSCR, dscr);
1158 return retval;
1159 }
1160
1161 static int cortex_a_step(struct target *target, int current, target_addr_t address,
1162 int handle_breakpoints)
1163 {
1164 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1165 struct armv7a_common *armv7a = target_to_armv7a(target);
1166 struct arm *arm = &armv7a->arm;
1167 struct breakpoint *breakpoint = NULL;
1168 struct breakpoint stepbreakpoint;
1169 struct reg *r;
1170 int retval;
1171
1172 if (target->state != TARGET_HALTED) {
1173 LOG_WARNING("target not halted");
1174 return ERROR_TARGET_NOT_HALTED;
1175 }
1176
1177 /* current = 1: continue on current pc, otherwise continue at <address> */
1178 r = arm->pc;
1179 if (!current)
1180 buf_set_u32(r->value, 0, 32, address);
1181 else
1182 address = buf_get_u32(r->value, 0, 32);
1183
1184 /* The front-end may request us not to handle breakpoints.
1185 * But since Cortex-A uses breakpoint for single step,
1186 * we MUST handle breakpoints.
1187 */
1188 handle_breakpoints = 1;
1189 if (handle_breakpoints) {
1190 breakpoint = breakpoint_find(target, address);
1191 if (breakpoint)
1192 cortex_a_unset_breakpoint(target, breakpoint);
1193 }
1194
1195 /* Setup single step breakpoint */
1196 stepbreakpoint.address = address;
1197 stepbreakpoint.asid = 0;
1198 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1199 ? 2 : 4;
1200 stepbreakpoint.type = BKPT_HARD;
1201 stepbreakpoint.set = 0;
1202
1203 /* Disable interrupts during single step if requested */
1204 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1205 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1206 if (ERROR_OK != retval)
1207 return retval;
1208 }
1209
1210 /* Break on IVA mismatch */
1211 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1212
1213 target->debug_reason = DBG_REASON_SINGLESTEP;
1214
1215 retval = cortex_a_resume(target, 1, address, 0, 0);
1216 if (retval != ERROR_OK)
1217 return retval;
1218
1219 int64_t then = timeval_ms();
1220 while (target->state != TARGET_HALTED) {
1221 retval = cortex_a_poll(target);
1222 if (retval != ERROR_OK)
1223 return retval;
1224 if (timeval_ms() > then + 1000) {
1225 LOG_ERROR("timeout waiting for target halt");
1226 return ERROR_FAIL;
1227 }
1228 }
1229
1230 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1231
1232 /* Re-enable interrupts if they were disabled */
1233 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1234 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1235 if (ERROR_OK != retval)
1236 return retval;
1237 }
1238
1239
1240 target->debug_reason = DBG_REASON_BREAKPOINT;
1241
1242 if (breakpoint)
1243 cortex_a_set_breakpoint(target, breakpoint, 0);
1244
1245 if (target->state != TARGET_HALTED)
1246 LOG_DEBUG("target stepped");
1247
1248 return ERROR_OK;
1249 }
1250
1251 static int cortex_a_restore_context(struct target *target, bool bpwp)
1252 {
1253 struct armv7a_common *armv7a = target_to_armv7a(target);
1254
1255 LOG_DEBUG(" ");
1256
1257 if (armv7a->pre_restore_context)
1258 armv7a->pre_restore_context(target);
1259
1260 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1261 }
1262
1263 /*
1264 * Cortex-A Breakpoint and watchpoint functions
1265 */
1266
1267 /* Setup hardware Breakpoint Register Pair */
1268 static int cortex_a_set_breakpoint(struct target *target,
1269 struct breakpoint *breakpoint, uint8_t matchmode)
1270 {
1271 int retval;
1272 int brp_i = 0;
1273 uint32_t control;
1274 uint8_t byte_addr_select = 0x0F;
1275 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1276 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1277 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1278
1279 if (breakpoint->set) {
1280 LOG_WARNING("breakpoint already set");
1281 return ERROR_OK;
1282 }
1283
1284 if (breakpoint->type == BKPT_HARD) {
1285 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1286 brp_i++;
1287 if (brp_i >= cortex_a->brp_num) {
1288 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1289 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1290 }
1291 breakpoint->set = brp_i + 1;
1292 if (breakpoint->length == 2)
1293 byte_addr_select = (3 << (breakpoint->address & 0x02));
1294 control = ((matchmode & 0x7) << 20)
1295 | (byte_addr_select << 5)
1296 | (3 << 1) | 1;
1297 brp_list[brp_i].used = 1;
1298 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1299 brp_list[brp_i].control = control;
1300 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1301 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1302 brp_list[brp_i].value);
1303 if (retval != ERROR_OK)
1304 return retval;
1305 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1306 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1307 brp_list[brp_i].control);
1308 if (retval != ERROR_OK)
1309 return retval;
1310 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1311 brp_list[brp_i].control,
1312 brp_list[brp_i].value);
1313 } else if (breakpoint->type == BKPT_SOFT) {
1314 uint8_t code[4];
1315 /* length == 2: Thumb breakpoint */
1316 if (breakpoint->length == 2)
1317 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1318 else
1319 /* length == 3: Thumb-2 breakpoint, actual encoding is
1320 * a regular Thumb BKPT instruction but we replace a
1321 * 32bit Thumb-2 instruction, so fix-up the breakpoint
1322 * length
1323 */
1324 if (breakpoint->length == 3) {
1325 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1326 breakpoint->length = 4;
1327 } else
1328 /* length == 4, normal ARM breakpoint */
1329 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1330
1331 retval = target_read_memory(target,
1332 breakpoint->address & 0xFFFFFFFE,
1333 breakpoint->length, 1,
1334 breakpoint->orig_instr);
1335 if (retval != ERROR_OK)
1336 return retval;
1337
1338 /* make sure data cache is cleaned & invalidated down to PoC */
1339 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1340 armv7a_cache_flush_virt(target, breakpoint->address,
1341 breakpoint->length);
1342 }
1343
1344 retval = target_write_memory(target,
1345 breakpoint->address & 0xFFFFFFFE,
1346 breakpoint->length, 1, code);
1347 if (retval != ERROR_OK)
1348 return retval;
1349
1350 /* update i-cache at breakpoint location */
1351 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1352 breakpoint->length);
1353 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1354 breakpoint->length);
1355
1356 breakpoint->set = 0x11; /* Any nice value but 0 */
1357 }
1358
1359 return ERROR_OK;
1360 }
1361
1362 static int cortex_a_set_context_breakpoint(struct target *target,
1363 struct breakpoint *breakpoint, uint8_t matchmode)
1364 {
1365 int retval = ERROR_FAIL;
1366 int brp_i = 0;
1367 uint32_t control;
1368 uint8_t byte_addr_select = 0x0F;
1369 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1370 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1371 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1372
1373 if (breakpoint->set) {
1374 LOG_WARNING("breakpoint already set");
1375 return retval;
1376 }
1377 /*check available context BRPs*/
1378 while ((brp_list[brp_i].used ||
1379 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1380 brp_i++;
1381
1382 if (brp_i >= cortex_a->brp_num) {
1383 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1384 return ERROR_FAIL;
1385 }
1386
1387 breakpoint->set = brp_i + 1;
1388 control = ((matchmode & 0x7) << 20)
1389 | (byte_addr_select << 5)
1390 | (3 << 1) | 1;
1391 brp_list[brp_i].used = 1;
1392 brp_list[brp_i].value = (breakpoint->asid);
1393 brp_list[brp_i].control = control;
1394 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1395 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1396 brp_list[brp_i].value);
1397 if (retval != ERROR_OK)
1398 return retval;
1399 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1400 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1401 brp_list[brp_i].control);
1402 if (retval != ERROR_OK)
1403 return retval;
1404 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1405 brp_list[brp_i].control,
1406 brp_list[brp_i].value);
1407 return ERROR_OK;
1408
1409 }
1410
1411 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1412 {
1413 int retval = ERROR_FAIL;
1414 int brp_1 = 0; /* holds the contextID pair */
1415 int brp_2 = 0; /* holds the IVA pair */
1416 uint32_t control_CTX, control_IVA;
1417 uint8_t CTX_byte_addr_select = 0x0F;
1418 uint8_t IVA_byte_addr_select = 0x0F;
1419 uint8_t CTX_machmode = 0x03;
1420 uint8_t IVA_machmode = 0x01;
1421 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1422 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1423 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1424
1425 if (breakpoint->set) {
1426 LOG_WARNING("breakpoint already set");
1427 return retval;
1428 }
1429 /*check available context BRPs*/
1430 while ((brp_list[brp_1].used ||
1431 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1432 brp_1++;
1433
1434 printf("brp(CTX) found num: %d\n", brp_1);
1435 if (brp_1 >= cortex_a->brp_num) {
1436 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1437 return ERROR_FAIL;
1438 }
1439
1440 while ((brp_list[brp_2].used ||
1441 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1442 brp_2++;
1443
1444 printf("brp(IVA) found num: %d\n", brp_2);
1445 if (brp_2 >= cortex_a->brp_num) {
1446 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1447 return ERROR_FAIL;
1448 }
1449
1450 breakpoint->set = brp_1 + 1;
1451 breakpoint->linked_BRP = brp_2;
1452 control_CTX = ((CTX_machmode & 0x7) << 20)
1453 | (brp_2 << 16)
1454 | (0 << 14)
1455 | (CTX_byte_addr_select << 5)
1456 | (3 << 1) | 1;
1457 brp_list[brp_1].used = 1;
1458 brp_list[brp_1].value = (breakpoint->asid);
1459 brp_list[brp_1].control = control_CTX;
1460 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1461 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1462 brp_list[brp_1].value);
1463 if (retval != ERROR_OK)
1464 return retval;
1465 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1466 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1467 brp_list[brp_1].control);
1468 if (retval != ERROR_OK)
1469 return retval;
1470
1471 control_IVA = ((IVA_machmode & 0x7) << 20)
1472 | (brp_1 << 16)
1473 | (IVA_byte_addr_select << 5)
1474 | (3 << 1) | 1;
1475 brp_list[brp_2].used = 1;
1476 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1477 brp_list[brp_2].control = control_IVA;
1478 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1479 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1480 brp_list[brp_2].value);
1481 if (retval != ERROR_OK)
1482 return retval;
1483 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1484 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1485 brp_list[brp_2].control);
1486 if (retval != ERROR_OK)
1487 return retval;
1488
1489 return ERROR_OK;
1490 }
1491
1492 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1493 {
1494 int retval;
1495 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1496 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1497 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1498
1499 if (!breakpoint->set) {
1500 LOG_WARNING("breakpoint not set");
1501 return ERROR_OK;
1502 }
1503
1504 if (breakpoint->type == BKPT_HARD) {
1505 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1506 int brp_i = breakpoint->set - 1;
1507 int brp_j = breakpoint->linked_BRP;
1508 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1509 LOG_DEBUG("Invalid BRP number in breakpoint");
1510 return ERROR_OK;
1511 }
1512 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1513 brp_list[brp_i].control, brp_list[brp_i].value);
1514 brp_list[brp_i].used = 0;
1515 brp_list[brp_i].value = 0;
1516 brp_list[brp_i].control = 0;
1517 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1518 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1519 brp_list[brp_i].control);
1520 if (retval != ERROR_OK)
1521 return retval;
1522 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1523 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1524 brp_list[brp_i].value);
1525 if (retval != ERROR_OK)
1526 return retval;
1527 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1528 LOG_DEBUG("Invalid BRP number in breakpoint");
1529 return ERROR_OK;
1530 }
1531 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1532 brp_list[brp_j].control, brp_list[brp_j].value);
1533 brp_list[brp_j].used = 0;
1534 brp_list[brp_j].value = 0;
1535 brp_list[brp_j].control = 0;
1536 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1537 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1538 brp_list[brp_j].control);
1539 if (retval != ERROR_OK)
1540 return retval;
1541 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1542 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1543 brp_list[brp_j].value);
1544 if (retval != ERROR_OK)
1545 return retval;
1546 breakpoint->linked_BRP = 0;
1547 breakpoint->set = 0;
1548 return ERROR_OK;
1549
1550 } else {
1551 int brp_i = breakpoint->set - 1;
1552 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1553 LOG_DEBUG("Invalid BRP number in breakpoint");
1554 return ERROR_OK;
1555 }
1556 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1557 brp_list[brp_i].control, brp_list[brp_i].value);
1558 brp_list[brp_i].used = 0;
1559 brp_list[brp_i].value = 0;
1560 brp_list[brp_i].control = 0;
1561 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1562 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1563 brp_list[brp_i].control);
1564 if (retval != ERROR_OK)
1565 return retval;
1566 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1567 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1568 brp_list[brp_i].value);
1569 if (retval != ERROR_OK)
1570 return retval;
1571 breakpoint->set = 0;
1572 return ERROR_OK;
1573 }
1574 } else {
1575
1576 /* make sure data cache is cleaned & invalidated down to PoC */
1577 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1578 armv7a_cache_flush_virt(target, breakpoint->address,
1579 breakpoint->length);
1580 }
1581
1582 /* restore original instruction (kept in target endianness) */
1583 if (breakpoint->length == 4) {
1584 retval = target_write_memory(target,
1585 breakpoint->address & 0xFFFFFFFE,
1586 4, 1, breakpoint->orig_instr);
1587 if (retval != ERROR_OK)
1588 return retval;
1589 } else {
1590 retval = target_write_memory(target,
1591 breakpoint->address & 0xFFFFFFFE,
1592 2, 1, breakpoint->orig_instr);
1593 if (retval != ERROR_OK)
1594 return retval;
1595 }
1596
1597 /* update i-cache at breakpoint location */
1598 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1599 breakpoint->length);
1600 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1601 breakpoint->length);
1602 }
1603 breakpoint->set = 0;
1604
1605 return ERROR_OK;
1606 }
1607
1608 static int cortex_a_add_breakpoint(struct target *target,
1609 struct breakpoint *breakpoint)
1610 {
1611 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1612
1613 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1614 LOG_INFO("no hardware breakpoint available");
1615 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1616 }
1617
1618 if (breakpoint->type == BKPT_HARD)
1619 cortex_a->brp_num_available--;
1620
1621 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1622 }
1623
1624 static int cortex_a_add_context_breakpoint(struct target *target,
1625 struct breakpoint *breakpoint)
1626 {
1627 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1628
1629 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1630 LOG_INFO("no hardware breakpoint available");
1631 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1632 }
1633
1634 if (breakpoint->type == BKPT_HARD)
1635 cortex_a->brp_num_available--;
1636
1637 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1638 }
1639
1640 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1641 struct breakpoint *breakpoint)
1642 {
1643 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1644
1645 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1646 LOG_INFO("no hardware breakpoint available");
1647 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1648 }
1649
1650 if (breakpoint->type == BKPT_HARD)
1651 cortex_a->brp_num_available--;
1652
1653 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1654 }
1655
1656
1657 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1658 {
1659 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1660
1661 #if 0
1662 /* It is perfectly possible to remove breakpoints while the target is running */
1663 if (target->state != TARGET_HALTED) {
1664 LOG_WARNING("target not halted");
1665 return ERROR_TARGET_NOT_HALTED;
1666 }
1667 #endif
1668
1669 if (breakpoint->set) {
1670 cortex_a_unset_breakpoint(target, breakpoint);
1671 if (breakpoint->type == BKPT_HARD)
1672 cortex_a->brp_num_available++;
1673 }
1674
1675
1676 return ERROR_OK;
1677 }
1678
1679 /*
1680 * Cortex-A Reset functions
1681 */
1682
1683 static int cortex_a_assert_reset(struct target *target)
1684 {
1685 struct armv7a_common *armv7a = target_to_armv7a(target);
1686
1687 LOG_DEBUG(" ");
1688
1689 /* FIXME when halt is requested, make it work somehow... */
1690
1691 /* This function can be called in "target not examined" state */
1692
1693 /* Issue some kind of warm reset. */
1694 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1695 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1696 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1697 /* REVISIT handle "pulls" cases, if there's
1698 * hardware that needs them to work.
1699 */
1700
1701 /*
1702 * FIXME: fix reset when transport is SWD. This is a temporary
1703 * work-around for release v0.10 that is not intended to stay!
1704 */
1705 if (transport_is_swd() ||
1706 (target->reset_halt && (jtag_get_reset_config() & RESET_SRST_NO_GATING)))
1707 jtag_add_reset(0, 1);
1708
1709 } else {
1710 LOG_ERROR("%s: how to reset?", target_name(target));
1711 return ERROR_FAIL;
1712 }
1713
1714 /* registers are now invalid */
1715 if (target_was_examined(target))
1716 register_cache_invalidate(armv7a->arm.core_cache);
1717
1718 target->state = TARGET_RESET;
1719
1720 return ERROR_OK;
1721 }
1722
1723 static int cortex_a_deassert_reset(struct target *target)
1724 {
1725 int retval;
1726
1727 LOG_DEBUG(" ");
1728
1729 /* be certain SRST is off */
1730 jtag_add_reset(0, 0);
1731
1732 if (target_was_examined(target)) {
1733 retval = cortex_a_poll(target);
1734 if (retval != ERROR_OK)
1735 return retval;
1736 }
1737
1738 if (target->reset_halt) {
1739 if (target->state != TARGET_HALTED) {
1740 LOG_WARNING("%s: ran after reset and before halt ...",
1741 target_name(target));
1742 if (target_was_examined(target)) {
1743 retval = target_halt(target);
1744 if (retval != ERROR_OK)
1745 return retval;
1746 } else
1747 target->state = TARGET_UNKNOWN;
1748 }
1749 }
1750
1751 return ERROR_OK;
1752 }
1753
1754 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1755 {
1756 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1757 * New desired mode must be in mode. Current value of DSCR must be in
1758 * *dscr, which is updated with new value.
1759 *
1760 * This function elides actually sending the mode-change over the debug
1761 * interface if the mode is already set as desired.
1762 */
1763 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1764 if (new_dscr != *dscr) {
1765 struct armv7a_common *armv7a = target_to_armv7a(target);
1766 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1767 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1768 if (retval == ERROR_OK)
1769 *dscr = new_dscr;
1770 return retval;
1771 } else {
1772 return ERROR_OK;
1773 }
1774 }
1775
1776 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1777 uint32_t value, uint32_t *dscr)
1778 {
1779 /* Waits until the specified bit(s) of DSCR take on a specified value. */
1780 struct armv7a_common *armv7a = target_to_armv7a(target);
1781 int64_t then = timeval_ms();
1782 int retval;
1783
1784 while ((*dscr & mask) != value) {
1785 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1786 armv7a->debug_base + CPUDBG_DSCR, dscr);
1787 if (retval != ERROR_OK)
1788 return retval;
1789 if (timeval_ms() > then + 1000) {
1790 LOG_ERROR("timeout waiting for DSCR bit change");
1791 return ERROR_FAIL;
1792 }
1793 }
1794 return ERROR_OK;
1795 }
1796
1797 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1798 uint32_t *data, uint32_t *dscr)
1799 {
1800 int retval;
1801 struct armv7a_common *armv7a = target_to_armv7a(target);
1802
1803 /* Move from coprocessor to R0. */
1804 retval = cortex_a_exec_opcode(target, opcode, dscr);
1805 if (retval != ERROR_OK)
1806 return retval;
1807
1808 /* Move from R0 to DTRTX. */
1809 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
1810 if (retval != ERROR_OK)
1811 return retval;
1812
1813 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
1814 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1815 * must also check TXfull_l). Most of the time this will be free
1816 * because TXfull_l will be set immediately and cached in dscr. */
1817 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
1818 DSCR_DTRTX_FULL_LATCHED, dscr);
1819 if (retval != ERROR_OK)
1820 return retval;
1821
1822 /* Read the value transferred to DTRTX. */
1823 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1824 armv7a->debug_base + CPUDBG_DTRTX, data);
1825 if (retval != ERROR_OK)
1826 return retval;
1827
1828 return ERROR_OK;
1829 }
1830
1831 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
1832 uint32_t *dfsr, uint32_t *dscr)
1833 {
1834 int retval;
1835
1836 if (dfar) {
1837 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
1838 if (retval != ERROR_OK)
1839 return retval;
1840 }
1841
1842 if (dfsr) {
1843 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
1844 if (retval != ERROR_OK)
1845 return retval;
1846 }
1847
1848 return ERROR_OK;
1849 }
1850
1851 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
1852 uint32_t data, uint32_t *dscr)
1853 {
1854 int retval;
1855 struct armv7a_common *armv7a = target_to_armv7a(target);
1856
1857 /* Write the value into DTRRX. */
1858 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1859 armv7a->debug_base + CPUDBG_DTRRX, data);
1860 if (retval != ERROR_OK)
1861 return retval;
1862
1863 /* Move from DTRRX to R0. */
1864 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
1865 if (retval != ERROR_OK)
1866 return retval;
1867
1868 /* Move from R0 to coprocessor. */
1869 retval = cortex_a_exec_opcode(target, opcode, dscr);
1870 if (retval != ERROR_OK)
1871 return retval;
1872
1873 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
1874 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
1875 * check RXfull_l). Most of the time this will be free because RXfull_l
1876 * will be cleared immediately and cached in dscr. */
1877 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
1878 if (retval != ERROR_OK)
1879 return retval;
1880
1881 return ERROR_OK;
1882 }
1883
1884 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
1885 uint32_t dfsr, uint32_t *dscr)
1886 {
1887 int retval;
1888
1889 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
1890 if (retval != ERROR_OK)
1891 return retval;
1892
1893 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
1894 if (retval != ERROR_OK)
1895 return retval;
1896
1897 return ERROR_OK;
1898 }
1899
1900 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
1901 {
1902 uint32_t status, upper4;
1903
1904 if (dfsr & (1 << 9)) {
1905 /* LPAE format. */
1906 status = dfsr & 0x3f;
1907 upper4 = status >> 2;
1908 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
1909 return ERROR_TARGET_TRANSLATION_FAULT;
1910 else if (status == 33)
1911 return ERROR_TARGET_UNALIGNED_ACCESS;
1912 else
1913 return ERROR_TARGET_DATA_ABORT;
1914 } else {
1915 /* Normal format. */
1916 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
1917 if (status == 1)
1918 return ERROR_TARGET_UNALIGNED_ACCESS;
1919 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
1920 status == 9 || status == 11 || status == 13 || status == 15)
1921 return ERROR_TARGET_TRANSLATION_FAULT;
1922 else
1923 return ERROR_TARGET_DATA_ABORT;
1924 }
1925 }
1926
1927 static int cortex_a_write_cpu_memory_slow(struct target *target,
1928 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1929 {
1930 /* Writes count objects of size size from *buffer. Old value of DSCR must
1931 * be in *dscr; updated to new value. This is slow because it works for
1932 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
1933 * the address is aligned, cortex_a_write_cpu_memory_fast should be
1934 * preferred.
1935 * Preconditions:
1936 * - Address is in R0.
1937 * - R0 is marked dirty.
1938 */
1939 struct armv7a_common *armv7a = target_to_armv7a(target);
1940 struct arm *arm = &armv7a->arm;
1941 int retval;
1942
1943 /* Mark register R1 as dirty, to use for transferring data. */
1944 arm_reg_current(arm, 1)->dirty = true;
1945
1946 /* Switch to non-blocking mode if not already in that mode. */
1947 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
1948 if (retval != ERROR_OK)
1949 return retval;
1950
1951 /* Go through the objects. */
1952 while (count) {
1953 /* Write the value to store into DTRRX. */
1954 uint32_t data, opcode;
1955 if (size == 1)
1956 data = *buffer;
1957 else if (size == 2)
1958 data = target_buffer_get_u16(target, buffer);
1959 else
1960 data = target_buffer_get_u32(target, buffer);
1961 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1962 armv7a->debug_base + CPUDBG_DTRRX, data);
1963 if (retval != ERROR_OK)
1964 return retval;
1965
1966 /* Transfer the value from DTRRX to R1. */
1967 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
1968 if (retval != ERROR_OK)
1969 return retval;
1970
1971 /* Write the value transferred to R1 into memory. */
1972 if (size == 1)
1973 opcode = ARMV4_5_STRB_IP(1, 0);
1974 else if (size == 2)
1975 opcode = ARMV4_5_STRH_IP(1, 0);
1976 else
1977 opcode = ARMV4_5_STRW_IP(1, 0);
1978 retval = cortex_a_exec_opcode(target, opcode, dscr);
1979 if (retval != ERROR_OK)
1980 return retval;
1981
1982 /* Check for faults and return early. */
1983 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
1984 return ERROR_OK; /* A data fault is not considered a system failure. */
1985
1986 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
1987 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1988 * must also check RXfull_l). Most of the time this will be free
1989 * because RXfull_l will be cleared immediately and cached in dscr. */
1990 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
1991 if (retval != ERROR_OK)
1992 return retval;
1993
1994 /* Advance. */
1995 buffer += size;
1996 --count;
1997 }
1998
1999 return ERROR_OK;
2000 }
2001
2002 static int cortex_a_write_cpu_memory_fast(struct target *target,
2003 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2004 {
2005 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2006 * in *dscr; updated to new value. This is fast but only works for
2007 * word-sized objects at aligned addresses.
2008 * Preconditions:
2009 * - Address is in R0 and must be a multiple of 4.
2010 * - R0 is marked dirty.
2011 */
2012 struct armv7a_common *armv7a = target_to_armv7a(target);
2013 int retval;
2014
2015 /* Switch to fast mode if not already in that mode. */
2016 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2017 if (retval != ERROR_OK)
2018 return retval;
2019
2020 /* Latch STC instruction. */
2021 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2022 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2023 if (retval != ERROR_OK)
2024 return retval;
2025
2026 /* Transfer all the data and issue all the instructions. */
2027 return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2028 4, count, armv7a->debug_base + CPUDBG_DTRRX);
2029 }
2030
2031 static int cortex_a_write_cpu_memory(struct target *target,
2032 uint32_t address, uint32_t size,
2033 uint32_t count, const uint8_t *buffer)
2034 {
2035 /* Write memory through the CPU. */
2036 int retval, final_retval;
2037 struct armv7a_common *armv7a = target_to_armv7a(target);
2038 struct arm *arm = &armv7a->arm;
2039 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2040
2041 LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2042 address, size, count);
2043 if (target->state != TARGET_HALTED) {
2044 LOG_WARNING("target not halted");
2045 return ERROR_TARGET_NOT_HALTED;
2046 }
2047
2048 if (!count)
2049 return ERROR_OK;
2050
2051 /* Clear any abort. */
2052 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2053 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2054 if (retval != ERROR_OK)
2055 return retval;
2056
2057 /* Read DSCR. */
2058 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2059 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2060 if (retval != ERROR_OK)
2061 return retval;
2062
2063 /* Switch to non-blocking mode if not already in that mode. */
2064 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2065 if (retval != ERROR_OK)
2066 goto out;
2067
2068 /* Mark R0 as dirty. */
2069 arm_reg_current(arm, 0)->dirty = true;
2070
2071 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2072 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2073 if (retval != ERROR_OK)
2074 goto out;
2075
2076 /* Get the memory address into R0. */
2077 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2078 armv7a->debug_base + CPUDBG_DTRRX, address);
2079 if (retval != ERROR_OK)
2080 goto out;
2081 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2082 if (retval != ERROR_OK)
2083 goto out;
2084
2085 if (size == 4 && (address % 4) == 0) {
2086 /* We are doing a word-aligned transfer, so use fast mode. */
2087 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2088 } else {
2089 /* Use slow path. */
2090 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2091 }
2092
2093 out:
2094 final_retval = retval;
2095
2096 /* Switch to non-blocking mode if not already in that mode. */
2097 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2098 if (final_retval == ERROR_OK)
2099 final_retval = retval;
2100
2101 /* Wait for last issued instruction to complete. */
2102 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2103 if (final_retval == ERROR_OK)
2104 final_retval = retval;
2105
2106 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2107 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2108 * check RXfull_l). Most of the time this will be free because RXfull_l
2109 * will be cleared immediately and cached in dscr. However, don't do this
2110 * if there is fault, because then the instruction might not have completed
2111 * successfully. */
2112 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2113 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2114 if (retval != ERROR_OK)
2115 return retval;
2116 }
2117
2118 /* If there were any sticky abort flags, clear them. */
2119 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2120 fault_dscr = dscr;
2121 mem_ap_write_atomic_u32(armv7a->debug_ap,
2122 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2123 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2124 } else {
2125 fault_dscr = 0;
2126 }
2127
2128 /* Handle synchronous data faults. */
2129 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2130 if (final_retval == ERROR_OK) {
2131 /* Final return value will reflect cause of fault. */
2132 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2133 if (retval == ERROR_OK) {
2134 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2135 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2136 } else
2137 final_retval = retval;
2138 }
2139 /* Fault destroyed DFAR/DFSR; restore them. */
2140 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2141 if (retval != ERROR_OK)
2142 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2143 }
2144
2145 /* Handle asynchronous data faults. */
2146 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2147 if (final_retval == ERROR_OK)
2148 /* No other error has been recorded so far, so keep this one. */
2149 final_retval = ERROR_TARGET_DATA_ABORT;
2150 }
2151
2152 /* If the DCC is nonempty, clear it. */
2153 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2154 uint32_t dummy;
2155 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2156 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2157 if (final_retval == ERROR_OK)
2158 final_retval = retval;
2159 }
2160 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2161 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2162 if (final_retval == ERROR_OK)
2163 final_retval = retval;
2164 }
2165
2166 /* Done. */
2167 return final_retval;
2168 }
2169
2170 static int cortex_a_read_cpu_memory_slow(struct target *target,
2171 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2172 {
2173 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2174 * in *dscr; updated to new value. This is slow because it works for
2175 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2176 * the address is aligned, cortex_a_read_cpu_memory_fast should be
2177 * preferred.
2178 * Preconditions:
2179 * - Address is in R0.
2180 * - R0 is marked dirty.
2181 */
2182 struct armv7a_common *armv7a = target_to_armv7a(target);
2183 struct arm *arm = &armv7a->arm;
2184 int retval;
2185
2186 /* Mark register R1 as dirty, to use for transferring data. */
2187 arm_reg_current(arm, 1)->dirty = true;
2188
2189 /* Switch to non-blocking mode if not already in that mode. */
2190 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2191 if (retval != ERROR_OK)
2192 return retval;
2193
2194 /* Go through the objects. */
2195 while (count) {
2196 /* Issue a load of the appropriate size to R1. */
2197 uint32_t opcode, data;
2198 if (size == 1)
2199 opcode = ARMV4_5_LDRB_IP(1, 0);
2200 else if (size == 2)
2201 opcode = ARMV4_5_LDRH_IP(1, 0);
2202 else
2203 opcode = ARMV4_5_LDRW_IP(1, 0);
2204 retval = cortex_a_exec_opcode(target, opcode, dscr);
2205 if (retval != ERROR_OK)
2206 return retval;
2207
2208 /* Issue a write of R1 to DTRTX. */
2209 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2210 if (retval != ERROR_OK)
2211 return retval;
2212
2213 /* Check for faults and return early. */
2214 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2215 return ERROR_OK; /* A data fault is not considered a system failure. */
2216
2217 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2218 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2219 * must also check TXfull_l). Most of the time this will be free
2220 * because TXfull_l will be set immediately and cached in dscr. */
2221 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2222 DSCR_DTRTX_FULL_LATCHED, dscr);
2223 if (retval != ERROR_OK)
2224 return retval;
2225
2226 /* Read the value transferred to DTRTX into the buffer. */
2227 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2228 armv7a->debug_base + CPUDBG_DTRTX, &data);
2229 if (retval != ERROR_OK)
2230 return retval;
2231 if (size == 1)
2232 *buffer = (uint8_t) data;
2233 else if (size == 2)
2234 target_buffer_set_u16(target, buffer, (uint16_t) data);
2235 else
2236 target_buffer_set_u32(target, buffer, data);
2237
2238 /* Advance. */
2239 buffer += size;
2240 --count;
2241 }
2242
2243 return ERROR_OK;
2244 }
2245
2246 static int cortex_a_read_cpu_memory_fast(struct target *target,
2247 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2248 {
2249 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2250 * *dscr; updated to new value. This is fast but only works for word-sized
2251 * objects at aligned addresses.
2252 * Preconditions:
2253 * - Address is in R0 and must be a multiple of 4.
2254 * - R0 is marked dirty.
2255 */
2256 struct armv7a_common *armv7a = target_to_armv7a(target);
2257 uint32_t u32;
2258 int retval;
2259
2260 /* Switch to non-blocking mode if not already in that mode. */
2261 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2262 if (retval != ERROR_OK)
2263 return retval;
2264
2265 /* Issue the LDC instruction via a write to ITR. */
2266 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2267 if (retval != ERROR_OK)
2268 return retval;
2269
2270 count--;
2271
2272 if (count > 0) {
2273 /* Switch to fast mode if not already in that mode. */
2274 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2275 if (retval != ERROR_OK)
2276 return retval;
2277
2278 /* Latch LDC instruction. */
2279 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2280 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2281 if (retval != ERROR_OK)
2282 return retval;
2283
2284 /* Read the value transferred to DTRTX into the buffer. Due to fast
2285 * mode rules, this blocks until the instruction finishes executing and
2286 * then reissues the read instruction to read the next word from
2287 * memory. The last read of DTRTX in this call reads the second-to-last
2288 * word from memory and issues the read instruction for the last word.
2289 */
2290 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2291 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2292 if (retval != ERROR_OK)
2293 return retval;
2294
2295 /* Advance. */
2296 buffer += count * 4;
2297 }
2298
2299 /* Wait for last issued instruction to complete. */
2300 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2301 if (retval != ERROR_OK)
2302 return retval;
2303
2304 /* Switch to non-blocking mode if not already in that mode. */
2305 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2306 if (retval != ERROR_OK)
2307 return retval;
2308
2309 /* Check for faults and return early. */
2310 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2311 return ERROR_OK; /* A data fault is not considered a system failure. */
2312
2313 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2314 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2315 * check TXfull_l). Most of the time this will be free because TXfull_l
2316 * will be set immediately and cached in dscr. */
2317 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2318 DSCR_DTRTX_FULL_LATCHED, dscr);
2319 if (retval != ERROR_OK)
2320 return retval;
2321
2322 /* Read the value transferred to DTRTX into the buffer. This is the last
2323 * word. */
2324 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2325 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2326 if (retval != ERROR_OK)
2327 return retval;
2328 target_buffer_set_u32(target, buffer, u32);
2329
2330 return ERROR_OK;
2331 }
2332
2333 static int cortex_a_read_cpu_memory(struct target *target,
2334 uint32_t address, uint32_t size,
2335 uint32_t count, uint8_t *buffer)
2336 {
2337 /* Read memory through the CPU. */
2338 int retval, final_retval;
2339 struct armv7a_common *armv7a = target_to_armv7a(target);
2340 struct arm *arm = &armv7a->arm;
2341 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2342
2343 LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2344 address, size, count);
2345 if (target->state != TARGET_HALTED) {
2346 LOG_WARNING("target not halted");
2347 return ERROR_TARGET_NOT_HALTED;
2348 }
2349
2350 if (!count)
2351 return ERROR_OK;
2352
2353 /* Clear any abort. */
2354 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2355 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2356 if (retval != ERROR_OK)
2357 return retval;
2358
2359 /* Read DSCR */
2360 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2361 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2362 if (retval != ERROR_OK)
2363 return retval;
2364
2365 /* Switch to non-blocking mode if not already in that mode. */
2366 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2367 if (retval != ERROR_OK)
2368 goto out;
2369
2370 /* Mark R0 as dirty. */
2371 arm_reg_current(arm, 0)->dirty = true;
2372
2373 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2374 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2375 if (retval != ERROR_OK)
2376 goto out;
2377
2378 /* Get the memory address into R0. */
2379 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2380 armv7a->debug_base + CPUDBG_DTRRX, address);
2381 if (retval != ERROR_OK)
2382 goto out;
2383 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2384 if (retval != ERROR_OK)
2385 goto out;
2386
2387 if (size == 4 && (address % 4) == 0) {
2388 /* We are doing a word-aligned transfer, so use fast mode. */
2389 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2390 } else {
2391 /* Use slow path. */
2392 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2393 }
2394
2395 out:
2396 final_retval = retval;
2397
2398 /* Switch to non-blocking mode if not already in that mode. */
2399 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2400 if (final_retval == ERROR_OK)
2401 final_retval = retval;
2402
2403 /* Wait for last issued instruction to complete. */
2404 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2405 if (final_retval == ERROR_OK)
2406 final_retval = retval;
2407
2408 /* If there were any sticky abort flags, clear them. */
2409 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2410 fault_dscr = dscr;
2411 mem_ap_write_atomic_u32(armv7a->debug_ap,
2412 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2413 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2414 } else {
2415 fault_dscr = 0;
2416 }
2417
2418 /* Handle synchronous data faults. */
2419 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2420 if (final_retval == ERROR_OK) {
2421 /* Final return value will reflect cause of fault. */
2422 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2423 if (retval == ERROR_OK) {
2424 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2425 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2426 } else
2427 final_retval = retval;
2428 }
2429 /* Fault destroyed DFAR/DFSR; restore them. */
2430 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2431 if (retval != ERROR_OK)
2432 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2433 }
2434
2435 /* Handle asynchronous data faults. */
2436 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2437 if (final_retval == ERROR_OK)
2438 /* No other error has been recorded so far, so keep this one. */
2439 final_retval = ERROR_TARGET_DATA_ABORT;
2440 }
2441
2442 /* If the DCC is nonempty, clear it. */
2443 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2444 uint32_t dummy;
2445 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2446 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2447 if (final_retval == ERROR_OK)
2448 final_retval = retval;
2449 }
2450 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2451 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2452 if (final_retval == ERROR_OK)
2453 final_retval = retval;
2454 }
2455
2456 /* Done. */
2457 return final_retval;
2458 }
2459
2460
2461 /*
2462 * Cortex-A Memory access
2463 *
2464 * This is same Cortex-M3 but we must also use the correct
2465 * ap number for every access.
2466 */
2467
2468 static int cortex_a_read_phys_memory(struct target *target,
2469 target_addr_t address, uint32_t size,
2470 uint32_t count, uint8_t *buffer)
2471 {
2472 int retval;
2473
2474 if (!count || !buffer)
2475 return ERROR_COMMAND_SYNTAX_ERROR;
2476
2477 LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2478 address, size, count);
2479
2480 /* read memory through the CPU */
2481 cortex_a_prep_memaccess(target, 1);
2482 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2483 cortex_a_post_memaccess(target, 1);
2484
2485 return retval;
2486 }
2487
2488 static int cortex_a_read_memory(struct target *target, target_addr_t address,
2489 uint32_t size, uint32_t count, uint8_t *buffer)
2490 {
2491 int retval;
2492
2493 /* cortex_a handles unaligned memory access */
2494 LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2495 address, size, count);
2496
2497 cortex_a_prep_memaccess(target, 0);
2498 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2499 cortex_a_post_memaccess(target, 0);
2500
2501 return retval;
2502 }
2503
2504 static int cortex_a_write_phys_memory(struct target *target,
2505 target_addr_t address, uint32_t size,
2506 uint32_t count, const uint8_t *buffer)
2507 {
2508 int retval;
2509
2510 if (!count || !buffer)
2511 return ERROR_COMMAND_SYNTAX_ERROR;
2512
2513 LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2514 address, size, count);
2515
2516 /* write memory through the CPU */
2517 cortex_a_prep_memaccess(target, 1);
2518 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2519 cortex_a_post_memaccess(target, 1);
2520
2521 return retval;
2522 }
2523
2524 static int cortex_a_write_memory(struct target *target, target_addr_t address,
2525 uint32_t size, uint32_t count, const uint8_t *buffer)
2526 {
2527 int retval;
2528
2529 /* cortex_a handles unaligned memory access */
2530 LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2531 address, size, count);
2532
2533 /* memory writes bypass the caches, must flush before writing */
2534 armv7a_cache_auto_flush_on_write(target, address, size * count);
2535
2536 cortex_a_prep_memaccess(target, 0);
2537 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2538 cortex_a_post_memaccess(target, 0);
2539 return retval;
2540 }
2541
2542 static int cortex_a_read_buffer(struct target *target, target_addr_t address,
2543 uint32_t count, uint8_t *buffer)
2544 {
2545 uint32_t size;
2546
2547 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2548 * will have something to do with the size we leave to it. */
2549 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2550 if (address & size) {
2551 int retval = target_read_memory(target, address, size, 1, buffer);
2552 if (retval != ERROR_OK)
2553 return retval;
2554 address += size;
2555 count -= size;
2556 buffer += size;
2557 }
2558 }
2559
2560 /* Read the data with as large access size as possible. */
2561 for (; size > 0; size /= 2) {
2562 uint32_t aligned = count - count % size;
2563 if (aligned > 0) {
2564 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2565 if (retval != ERROR_OK)
2566 return retval;
2567 address += aligned;
2568 count -= aligned;
2569 buffer += aligned;
2570 }
2571 }
2572
2573 return ERROR_OK;
2574 }
2575
2576 static int cortex_a_write_buffer(struct target *target, target_addr_t address,
2577 uint32_t count, const uint8_t *buffer)
2578 {
2579 uint32_t size;
2580
2581 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2582 * will have something to do with the size we leave to it. */
2583 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2584 if (address & size) {
2585 int retval = target_write_memory(target, address, size, 1, buffer);
2586 if (retval != ERROR_OK)
2587 return retval;
2588 address += size;
2589 count -= size;
2590 buffer += size;
2591 }
2592 }
2593
2594 /* Write the data with as large access size as possible. */
2595 for (; size > 0; size /= 2) {
2596 uint32_t aligned = count - count % size;
2597 if (aligned > 0) {
2598 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2599 if (retval != ERROR_OK)
2600 return retval;
2601 address += aligned;
2602 count -= aligned;
2603 buffer += aligned;
2604 }
2605 }
2606
2607 return ERROR_OK;
2608 }
2609
2610 static int cortex_a_handle_target_request(void *priv)
2611 {
2612 struct target *target = priv;
2613 struct armv7a_common *armv7a = target_to_armv7a(target);
2614 int retval;
2615
2616 if (!target_was_examined(target))
2617 return ERROR_OK;
2618 if (!target->dbg_msg_enabled)
2619 return ERROR_OK;
2620
2621 if (target->state == TARGET_RUNNING) {
2622 uint32_t request;
2623 uint32_t dscr;
2624 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2625 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2626
2627 /* check if we have data */
2628 int64_t then = timeval_ms();
2629 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2630 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2631 armv7a->debug_base + CPUDBG_DTRTX, &request);
2632 if (retval == ERROR_OK) {
2633 target_request(target, request);
2634 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2635 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2636 }
2637 if (timeval_ms() > then + 1000) {
2638 LOG_ERROR("Timeout waiting for dtr tx full");
2639 return ERROR_FAIL;
2640 }
2641 }
2642 }
2643
2644 return ERROR_OK;
2645 }
2646
2647 /*
2648 * Cortex-A target information and configuration
2649 */
2650
2651 static int cortex_a_examine_first(struct target *target)
2652 {
2653 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2654 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2655 struct adiv5_dap *swjdp = armv7a->arm.dap;
2656
2657 int i;
2658 int retval = ERROR_OK;
2659 uint32_t didr, cpuid, dbg_osreg;
2660
2661 /* Search for the APB-AP - it is needed for access to debug registers */
2662 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2663 if (retval != ERROR_OK) {
2664 LOG_ERROR("Could not find APB-AP for debug access");
2665 return retval;
2666 }
2667
2668 retval = mem_ap_init(armv7a->debug_ap);
2669 if (retval != ERROR_OK) {
2670 LOG_ERROR("Could not initialize the APB-AP");
2671 return retval;
2672 }
2673
2674 armv7a->debug_ap->memaccess_tck = 80;
2675
2676 if (!target->dbgbase_set) {
2677 uint32_t dbgbase;
2678 /* Get ROM Table base */
2679 uint32_t apid;
2680 int32_t coreidx = target->coreid;
2681 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2682 target->cmd_name);
2683 retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
2684 if (retval != ERROR_OK)
2685 return retval;
2686 /* Lookup 0x15 -- Processor DAP */
2687 retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, 0x15,
2688 &armv7a->debug_base, &coreidx);
2689 if (retval != ERROR_OK) {
2690 LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2691 target->cmd_name);
2692 return retval;
2693 }
2694 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2695 target->coreid, armv7a->debug_base);
2696 } else
2697 armv7a->debug_base = target->dbgbase;
2698
2699 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2700 armv7a->debug_base + CPUDBG_DIDR, &didr);
2701 if (retval != ERROR_OK) {
2702 LOG_DEBUG("Examine %s failed", "DIDR");
2703 return retval;
2704 }
2705
2706 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2707 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2708 if (retval != ERROR_OK) {
2709 LOG_DEBUG("Examine %s failed", "CPUID");
2710 return retval;
2711 }
2712
2713 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2714 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2715
2716 cortex_a->didr = didr;
2717 cortex_a->cpuid = cpuid;
2718
2719 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2720 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
2721 if (retval != ERROR_OK)
2722 return retval;
2723 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
2724
2725 if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
2726 LOG_ERROR("target->coreid %" PRId32 " powered down!", target->coreid);
2727 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2728 return ERROR_TARGET_INIT_FAILED;
2729 }
2730
2731 if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
2732 LOG_DEBUG("target->coreid %" PRId32 " was reset!", target->coreid);
2733
2734 /* Read DBGOSLSR and check if OSLK is implemented */
2735 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2736 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2737 if (retval != ERROR_OK)
2738 return retval;
2739 LOG_DEBUG("target->coreid %" PRId32 " DBGOSLSR 0x%" PRIx32, target->coreid, dbg_osreg);
2740
2741 /* check if OS Lock is implemented */
2742 if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
2743 /* check if OS Lock is set */
2744 if (dbg_osreg & OSLSR_OSLK) {
2745 LOG_DEBUG("target->coreid %" PRId32 " OSLock set! Trying to unlock", target->coreid);
2746
2747 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2748 armv7a->debug_base + CPUDBG_OSLAR,
2749 0);
2750 if (retval == ERROR_OK)
2751 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2752 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2753
2754 /* if we fail to access the register or cannot reset the OSLK bit, bail out */
2755 if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
2756 LOG_ERROR("target->coreid %" PRId32 " OSLock sticky, core not powered?",
2757 target->coreid);
2758 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2759 return ERROR_TARGET_INIT_FAILED;
2760 }
2761 }
2762 }
2763
2764 armv7a->arm.core_type = ARM_MODE_MON;
2765
2766 /* Avoid recreating the registers cache */
2767 if (!target_was_examined(target)) {
2768 retval = cortex_a_dpm_setup(cortex_a, didr);
2769 if (retval != ERROR_OK)
2770 return retval;
2771 }
2772
2773 /* Setup Breakpoint Register Pairs */
2774 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
2775 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2776 cortex_a->brp_num_available = cortex_a->brp_num;
2777 free(cortex_a->brp_list);
2778 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
2779 /* cortex_a->brb_enabled = ????; */
2780 for (i = 0; i < cortex_a->brp_num; i++) {
2781 cortex_a->brp_list[i].used = 0;
2782 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
2783 cortex_a->brp_list[i].type = BRP_NORMAL;
2784 else
2785 cortex_a->brp_list[i].type = BRP_CONTEXT;
2786 cortex_a->brp_list[i].value = 0;
2787 cortex_a->brp_list[i].control = 0;
2788 cortex_a->brp_list[i].BRPn = i;
2789 }
2790
2791 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
2792
2793 /* select debug_ap as default */
2794 swjdp->apsel = armv7a->debug_ap->ap_num;
2795
2796 target_set_examined(target);
2797 return ERROR_OK;
2798 }
2799
2800 static int cortex_a_examine(struct target *target)
2801 {
2802 int retval = ERROR_OK;
2803
2804 /* Reestablish communication after target reset */
2805 retval = cortex_a_examine_first(target);
2806
2807 /* Configure core debug access */
2808 if (retval == ERROR_OK)
2809 retval = cortex_a_init_debug_access(target);
2810
2811 return retval;
2812 }
2813
2814 /*
2815 * Cortex-A target creation and initialization
2816 */
2817
2818 static int cortex_a_init_target(struct command_context *cmd_ctx,
2819 struct target *target)
2820 {
2821 /* examine_first() does a bunch of this */
2822 arm_semihosting_init(target);
2823 return ERROR_OK;
2824 }
2825
2826 static int cortex_a_init_arch_info(struct target *target,
2827 struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
2828 {
2829 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2830
2831 /* Setup struct cortex_a_common */
2832 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2833 armv7a->arm.dap = dap;
2834
2835 /* register arch-specific functions */
2836 armv7a->examine_debug_reason = NULL;
2837
2838 armv7a->post_debug_entry = cortex_a_post_debug_entry;
2839
2840 armv7a->pre_restore_context = NULL;
2841
2842 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
2843
2844
2845 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
2846
2847 /* REVISIT v7a setup should be in a v7a-specific routine */
2848 armv7a_init_arch_info(target, armv7a);
2849 target_register_timer_callback(cortex_a_handle_target_request, 1, 1, target);
2850
2851 return ERROR_OK;
2852 }
2853
2854 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
2855 {
2856 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
2857 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2858 struct adiv5_private_config *pc;
2859
2860 if (target->private_config == NULL)
2861 return ERROR_FAIL;
2862
2863 pc = (struct adiv5_private_config *)target->private_config;
2864
2865 cortex_a->armv7a_common.is_armv7r = false;
2866
2867 cortex_a->armv7a_common.arm.arm_vfp_version = ARM_VFP_V3;
2868
2869 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
2870 }
2871
2872 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
2873 {
2874 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
2875 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2876 struct adiv5_private_config *pc;
2877
2878 pc = (struct adiv5_private_config *)target->private_config;
2879 if (adiv5_verify_config(pc) != ERROR_OK)
2880 return ERROR_FAIL;
2881
2882 cortex_a->armv7a_common.is_armv7r = true;
2883
2884 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
2885 }
2886
2887 static void cortex_a_deinit_target(struct target *target)
2888 {
2889 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2890 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2891 struct arm_dpm *dpm = &armv7a->dpm;
2892 uint32_t dscr;
2893 int retval;
2894
2895 if (target_was_examined(target)) {
2896 /* Disable halt for breakpoint, watchpoint and vector catch */
2897 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2898 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2899 if (retval == ERROR_OK)
2900 mem_ap_write_atomic_u32(armv7a->debug_ap,
2901 armv7a->debug_base + CPUDBG_DSCR,
2902 dscr & ~DSCR_HALT_DBG_MODE);
2903 }
2904
2905 free(cortex_a->brp_list);
2906 free(dpm->dbp);
2907 free(dpm->dwp);
2908 free(target->private_config);
2909 free(cortex_a);
2910 }
2911
2912 static int cortex_a_mmu(struct target *target, int *enabled)
2913 {
2914 struct armv7a_common *armv7a = target_to_armv7a(target);
2915
2916 if (target->state != TARGET_HALTED) {
2917 LOG_ERROR("%s: target not halted", __func__);
2918 return ERROR_TARGET_INVALID;
2919 }
2920
2921 if (armv7a->is_armv7r)
2922 *enabled = 0;
2923 else
2924 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
2925
2926 return ERROR_OK;
2927 }
2928
2929 static int cortex_a_virt2phys(struct target *target,
2930 target_addr_t virt, target_addr_t *phys)
2931 {
2932 int retval;
2933 int mmu_enabled = 0;
2934
2935 /*
2936 * If the MMU was not enabled at debug entry, there is no
2937 * way of knowing if there was ever a valid configuration
2938 * for it and thus it's not safe to enable it. In this case,
2939 * just return the virtual address as physical.
2940 */
2941 cortex_a_mmu(target, &mmu_enabled);
2942 if (!mmu_enabled) {
2943 *phys = virt;
2944 return ERROR_OK;
2945 }
2946
2947 /* mmu must be enable in order to get a correct translation */
2948 retval = cortex_a_mmu_modify(target, 1);
2949 if (retval != ERROR_OK)
2950 return retval;
2951 return armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
2952 (uint32_t *)phys, 1);
2953 }
2954
2955 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
2956 {
2957 struct target *target = get_current_target(CMD_CTX);
2958 struct armv7a_common *armv7a = target_to_armv7a(target);
2959
2960 return armv7a_handle_cache_info_command(CMD_CTX,
2961 &armv7a->armv7a_mmu.armv7a_cache);
2962 }
2963
2964
2965 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
2966 {
2967 struct target *target = get_current_target(CMD_CTX);
2968 if (!target_was_examined(target)) {
2969 LOG_ERROR("target not examined yet");
2970 return ERROR_FAIL;
2971 }
2972
2973 return cortex_a_init_debug_access(target);
2974 }
2975 COMMAND_HANDLER(cortex_a_handle_smp_off_command)
2976 {
2977 struct target *target = get_current_target(CMD_CTX);
2978 /* check target is an smp target */
2979 struct target_list *head;
2980 struct target *curr;
2981 head = target->head;
2982 target->smp = 0;
2983 if (head != (struct target_list *)NULL) {
2984 while (head != (struct target_list *)NULL) {
2985 curr = head->target;
2986 curr->smp = 0;
2987 head = head->next;
2988 }
2989 /* fixes the target display to the debugger */
2990 target->gdb_service->target = target;
2991 }
2992 return ERROR_OK;
2993 }
2994
2995 COMMAND_HANDLER(cortex_a_handle_smp_on_command)
2996 {
2997 struct target *target = get_current_target(CMD_CTX);
2998 struct target_list *head;
2999 struct target *curr;
3000 head = target->head;
3001 if (head != (struct target_list *)NULL) {
3002 target->smp = 1;
3003 while (head != (struct target_list *)NULL) {
3004 curr = head->target;
3005 curr->smp = 1;
3006 head = head->next;
3007 }
3008 }
3009 return ERROR_OK;
3010 }
3011
3012 COMMAND_HANDLER(cortex_a_handle_smp_gdb_command)
3013 {
3014 struct target *target = get_current_target(CMD_CTX);
3015 int retval = ERROR_OK;
3016 struct target_list *head;
3017 head = target->head;
3018 if (head != (struct target_list *)NULL) {
3019 if (CMD_ARGC == 1) {
3020 int coreid = 0;
3021 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
3022 if (ERROR_OK != retval)
3023 return retval;
3024 target->gdb_service->core[1] = coreid;
3025
3026 }
3027 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
3028 , target->gdb_service->core[1]);
3029 }
3030 return ERROR_OK;
3031 }
3032
3033 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3034 {
3035 struct target *target = get_current_target(CMD_CTX);
3036 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3037
3038 static const Jim_Nvp nvp_maskisr_modes[] = {
3039 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3040 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3041 { .name = NULL, .value = -1 },
3042 };
3043 const Jim_Nvp *n;
3044
3045 if (CMD_ARGC > 0) {
3046 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
3047 if (n->name == NULL) {
3048 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3049 return ERROR_COMMAND_SYNTAX_ERROR;
3050 }
3051
3052 cortex_a->isrmasking_mode = n->value;
3053 }
3054
3055 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3056 command_print(CMD_CTX, "cortex_a interrupt mask %s", n->name);
3057
3058 return ERROR_OK;
3059 }
3060
3061 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3062 {
3063 struct target *target = get_current_target(CMD_CTX);
3064 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3065
3066 static const Jim_Nvp nvp_dacrfixup_modes[] = {
3067 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3068 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3069 { .name = NULL, .value = -1 },
3070 };
3071 const Jim_Nvp *n;
3072
3073 if (CMD_ARGC > 0) {
3074 n = Jim_Nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
3075 if (n->name == NULL)
3076 return ERROR_COMMAND_SYNTAX_ERROR;
3077 cortex_a->dacrfixup_mode = n->value;
3078
3079 }
3080
3081 n = Jim_Nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3082 command_print(CMD_CTX, "cortex_a domain access control fixup %s", n->name);
3083
3084 return ERROR_OK;
3085 }
3086
3087 static const struct command_registration cortex_a_exec_command_handlers[] = {
3088 {
3089 .name = "cache_info",
3090 .handler = cortex_a_handle_cache_info_command,
3091 .mode = COMMAND_EXEC,
3092 .help = "display information about target caches",
3093 .usage = "",
3094 },
3095 {
3096 .name = "dbginit",
3097 .handler = cortex_a_handle_dbginit_command,
3098 .mode = COMMAND_EXEC,
3099 .help = "Initialize core debug",
3100 .usage = "",
3101 },<