92ba54789dbb326d1f99fc709f7a31303c3f1d3c
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 √ėyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex-R4 support *
22 * *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 * *
39 * Cortex-A8(tm) TRM, ARM DDI 0344H *
40 * Cortex-A9(tm) TRM, ARM DDI 0407F *
41 * Cortex-A4(tm) TRM, ARM DDI 0363E *
42 * Cortex-A15(tm)TRM, ARM DDI 0438C *
43 * *
44 ***************************************************************************/
45
46 #ifdef HAVE_CONFIG_H
47 #include "config.h"
48 #endif
49
50 #include "breakpoints.h"
51 #include "cortex_a.h"
52 #include "register.h"
53 #include "armv7a_mmu.h"
54 #include "target_request.h"
55 #include "target_type.h"
56 #include "arm_opcodes.h"
57 #include "arm_semihosting.h"
58 #include "transport/transport.h"
59 #include <helper/time_support.h>
60
61 #define foreach_smp_target(pos, head) \
62 for (pos = head; (pos != NULL); pos = pos->next)
63
64 static int cortex_a_poll(struct target *target);
65 static int cortex_a_debug_entry(struct target *target);
66 static int cortex_a_restore_context(struct target *target, bool bpwp);
67 static int cortex_a_set_breakpoint(struct target *target,
68 struct breakpoint *breakpoint, uint8_t matchmode);
69 static int cortex_a_set_context_breakpoint(struct target *target,
70 struct breakpoint *breakpoint, uint8_t matchmode);
71 static int cortex_a_set_hybrid_breakpoint(struct target *target,
72 struct breakpoint *breakpoint);
73 static int cortex_a_unset_breakpoint(struct target *target,
74 struct breakpoint *breakpoint);
75 static int cortex_a_mmu(struct target *target, int *enabled);
76 static int cortex_a_mmu_modify(struct target *target, int enable);
77 static int cortex_a_virt2phys(struct target *target,
78 target_addr_t virt, target_addr_t *phys);
79 static int cortex_a_read_cpu_memory(struct target *target,
80 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
81
82
83 /* restore cp15_control_reg at resume */
84 static int cortex_a_restore_cp15_control_reg(struct target *target)
85 {
86 int retval = ERROR_OK;
87 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
88 struct armv7a_common *armv7a = target_to_armv7a(target);
89
90 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
91 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
92 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
93 retval = armv7a->arm.mcr(target, 15,
94 0, 0, /* op1, op2 */
95 1, 0, /* CRn, CRm */
96 cortex_a->cp15_control_reg);
97 }
98 return retval;
99 }
100
101 /*
102 * Set up ARM core for memory access.
103 * If !phys_access, switch to SVC mode and make sure MMU is on
104 * If phys_access, switch off mmu
105 */
106 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
107 {
108 struct armv7a_common *armv7a = target_to_armv7a(target);
109 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
110 int mmu_enabled = 0;
111
112 if (phys_access == 0) {
113 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
114 cortex_a_mmu(target, &mmu_enabled);
115 if (mmu_enabled)
116 cortex_a_mmu_modify(target, 1);
117 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
118 /* overwrite DACR to all-manager */
119 armv7a->arm.mcr(target, 15,
120 0, 0, 3, 0,
121 0xFFFFFFFF);
122 }
123 } else {
124 cortex_a_mmu(target, &mmu_enabled);
125 if (mmu_enabled)
126 cortex_a_mmu_modify(target, 0);
127 }
128 return ERROR_OK;
129 }
130
131 /*
132 * Restore ARM core after memory access.
133 * If !phys_access, switch to previous mode
134 * If phys_access, restore MMU setting
135 */
136 static int cortex_a_post_memaccess(struct target *target, int phys_access)
137 {
138 struct armv7a_common *armv7a = target_to_armv7a(target);
139 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
140
141 if (phys_access == 0) {
142 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
143 /* restore */
144 armv7a->arm.mcr(target, 15,
145 0, 0, 3, 0,
146 cortex_a->cp15_dacr_reg);
147 }
148 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
149 } else {
150 int mmu_enabled = 0;
151 cortex_a_mmu(target, &mmu_enabled);
152 if (mmu_enabled)
153 cortex_a_mmu_modify(target, 1);
154 }
155 return ERROR_OK;
156 }
157
158
159 /* modify cp15_control_reg in order to enable or disable mmu for :
160 * - virt2phys address conversion
161 * - read or write memory in phys or virt address */
162 static int cortex_a_mmu_modify(struct target *target, int enable)
163 {
164 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
165 struct armv7a_common *armv7a = target_to_armv7a(target);
166 int retval = ERROR_OK;
167 int need_write = 0;
168
169 if (enable) {
170 /* if mmu enabled at target stop and mmu not enable */
171 if (!(cortex_a->cp15_control_reg & 0x1U)) {
172 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
173 return ERROR_FAIL;
174 }
175 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
176 cortex_a->cp15_control_reg_curr |= 0x1U;
177 need_write = 1;
178 }
179 } else {
180 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
181 cortex_a->cp15_control_reg_curr &= ~0x1U;
182 need_write = 1;
183 }
184 }
185
186 if (need_write) {
187 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
188 enable ? "enable mmu" : "disable mmu",
189 cortex_a->cp15_control_reg_curr);
190
191 retval = armv7a->arm.mcr(target, 15,
192 0, 0, /* op1, op2 */
193 1, 0, /* CRn, CRm */
194 cortex_a->cp15_control_reg_curr);
195 }
196 return retval;
197 }
198
199 /*
200 * Cortex-A Basic debug access, very low level assumes state is saved
201 */
202 static int cortex_a_init_debug_access(struct target *target)
203 {
204 struct armv7a_common *armv7a = target_to_armv7a(target);
205 int retval;
206
207 /* lock memory-mapped access to debug registers to prevent
208 * software interference */
209 retval = mem_ap_write_u32(armv7a->debug_ap,
210 armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
211 if (retval != ERROR_OK)
212 return retval;
213
214 /* Disable cacheline fills and force cache write-through in debug state */
215 retval = mem_ap_write_u32(armv7a->debug_ap,
216 armv7a->debug_base + CPUDBG_DSCCR, 0);
217 if (retval != ERROR_OK)
218 return retval;
219
220 /* Disable TLB lookup and refill/eviction in debug state */
221 retval = mem_ap_write_u32(armv7a->debug_ap,
222 armv7a->debug_base + CPUDBG_DSMCR, 0);
223 if (retval != ERROR_OK)
224 return retval;
225
226 retval = dap_run(armv7a->debug_ap->dap);
227 if (retval != ERROR_OK)
228 return retval;
229
230 /* Enabling of instruction execution in debug mode is done in debug_entry code */
231
232 /* Resync breakpoint registers */
233
234 /* Since this is likely called from init or reset, update target state information*/
235 return cortex_a_poll(target);
236 }
237
238 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
239 {
240 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
241 * Writes final value of DSCR into *dscr. Pass force to force always
242 * reading DSCR at least once. */
243 struct armv7a_common *armv7a = target_to_armv7a(target);
244 int64_t then = timeval_ms();
245 while ((*dscr & DSCR_INSTR_COMP) == 0 || force) {
246 force = false;
247 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
248 armv7a->debug_base + CPUDBG_DSCR, dscr);
249 if (retval != ERROR_OK) {
250 LOG_ERROR("Could not read DSCR register");
251 return retval;
252 }
253 if (timeval_ms() > then + 1000) {
254 LOG_ERROR("Timeout waiting for InstrCompl=1");
255 return ERROR_FAIL;
256 }
257 }
258 return ERROR_OK;
259 }
260
261 /* To reduce needless round-trips, pass in a pointer to the current
262 * DSCR value. Initialize it to zero if you just need to know the
263 * value on return from this function; or DSCR_INSTR_COMP if you
264 * happen to know that no instruction is pending.
265 */
266 static int cortex_a_exec_opcode(struct target *target,
267 uint32_t opcode, uint32_t *dscr_p)
268 {
269 uint32_t dscr;
270 int retval;
271 struct armv7a_common *armv7a = target_to_armv7a(target);
272
273 dscr = dscr_p ? *dscr_p : 0;
274
275 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
276
277 /* Wait for InstrCompl bit to be set */
278 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
279 if (retval != ERROR_OK)
280 return retval;
281
282 retval = mem_ap_write_u32(armv7a->debug_ap,
283 armv7a->debug_base + CPUDBG_ITR, opcode);
284 if (retval != ERROR_OK)
285 return retval;
286
287 int64_t then = timeval_ms();
288 do {
289 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
290 armv7a->debug_base + CPUDBG_DSCR, &dscr);
291 if (retval != ERROR_OK) {
292 LOG_ERROR("Could not read DSCR register");
293 return retval;
294 }
295 if (timeval_ms() > then + 1000) {
296 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
297 return ERROR_FAIL;
298 }
299 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
300
301 if (dscr_p)
302 *dscr_p = dscr;
303
304 return retval;
305 }
306
307 /* Write to memory mapped registers directly with no cache or mmu handling */
308 static int cortex_a_dap_write_memap_register_u32(struct target *target,
309 uint32_t address,
310 uint32_t value)
311 {
312 int retval;
313 struct armv7a_common *armv7a = target_to_armv7a(target);
314
315 retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
316
317 return retval;
318 }
319
320 /*
321 * Cortex-A implementation of Debug Programmer's Model
322 *
323 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
324 * so there's no need to poll for it before executing an instruction.
325 *
326 * NOTE that in several of these cases the "stall" mode might be useful.
327 * It'd let us queue a few operations together... prepare/finish might
328 * be the places to enable/disable that mode.
329 */
330
331 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
332 {
333 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
334 }
335
336 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
337 {
338 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
339 return mem_ap_write_u32(a->armv7a_common.debug_ap,
340 a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
341 }
342
343 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
344 uint32_t *dscr_p)
345 {
346 uint32_t dscr = DSCR_INSTR_COMP;
347 int retval;
348
349 if (dscr_p)
350 dscr = *dscr_p;
351
352 /* Wait for DTRRXfull */
353 int64_t then = timeval_ms();
354 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
355 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
356 a->armv7a_common.debug_base + CPUDBG_DSCR,
357 &dscr);
358 if (retval != ERROR_OK)
359 return retval;
360 if (timeval_ms() > then + 1000) {
361 LOG_ERROR("Timeout waiting for read dcc");
362 return ERROR_FAIL;
363 }
364 }
365
366 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
367 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
368 if (retval != ERROR_OK)
369 return retval;
370 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
371
372 if (dscr_p)
373 *dscr_p = dscr;
374
375 return retval;
376 }
377
378 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
379 {
380 struct cortex_a_common *a = dpm_to_a(dpm);
381 uint32_t dscr;
382 int retval;
383
384 /* set up invariant: INSTR_COMP is set after ever DPM operation */
385 int64_t then = timeval_ms();
386 for (;; ) {
387 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
388 a->armv7a_common.debug_base + CPUDBG_DSCR,
389 &dscr);
390 if (retval != ERROR_OK)
391 return retval;
392 if ((dscr & DSCR_INSTR_COMP) != 0)
393 break;
394 if (timeval_ms() > then + 1000) {
395 LOG_ERROR("Timeout waiting for dpm prepare");
396 return ERROR_FAIL;
397 }
398 }
399
400 /* this "should never happen" ... */
401 if (dscr & DSCR_DTR_RX_FULL) {
402 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
403 /* Clear DCCRX */
404 retval = cortex_a_exec_opcode(
405 a->armv7a_common.arm.target,
406 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
407 &dscr);
408 if (retval != ERROR_OK)
409 return retval;
410 }
411
412 return retval;
413 }
414
415 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
416 {
417 /* REVISIT what could be done here? */
418 return ERROR_OK;
419 }
420
421 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
422 uint32_t opcode, uint32_t data)
423 {
424 struct cortex_a_common *a = dpm_to_a(dpm);
425 int retval;
426 uint32_t dscr = DSCR_INSTR_COMP;
427
428 retval = cortex_a_write_dcc(a, data);
429 if (retval != ERROR_OK)
430 return retval;
431
432 return cortex_a_exec_opcode(
433 a->armv7a_common.arm.target,
434 opcode,
435 &dscr);
436 }
437
438 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
439 uint32_t opcode, uint32_t data)
440 {
441 struct cortex_a_common *a = dpm_to_a(dpm);
442 uint32_t dscr = DSCR_INSTR_COMP;
443 int retval;
444
445 retval = cortex_a_write_dcc(a, data);
446 if (retval != ERROR_OK)
447 return retval;
448
449 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
450 retval = cortex_a_exec_opcode(
451 a->armv7a_common.arm.target,
452 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
453 &dscr);
454 if (retval != ERROR_OK)
455 return retval;
456
457 /* then the opcode, taking data from R0 */
458 retval = cortex_a_exec_opcode(
459 a->armv7a_common.arm.target,
460 opcode,
461 &dscr);
462
463 return retval;
464 }
465
466 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
467 {
468 struct target *target = dpm->arm->target;
469 uint32_t dscr = DSCR_INSTR_COMP;
470
471 /* "Prefetch flush" after modifying execution status in CPSR */
472 return cortex_a_exec_opcode(target,
473 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
474 &dscr);
475 }
476
477 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
478 uint32_t opcode, uint32_t *data)
479 {
480 struct cortex_a_common *a = dpm_to_a(dpm);
481 int retval;
482 uint32_t dscr = DSCR_INSTR_COMP;
483
484 /* the opcode, writing data to DCC */
485 retval = cortex_a_exec_opcode(
486 a->armv7a_common.arm.target,
487 opcode,
488 &dscr);
489 if (retval != ERROR_OK)
490 return retval;
491
492 return cortex_a_read_dcc(a, data, &dscr);
493 }
494
495
496 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
497 uint32_t opcode, uint32_t *data)
498 {
499 struct cortex_a_common *a = dpm_to_a(dpm);
500 uint32_t dscr = DSCR_INSTR_COMP;
501 int retval;
502
503 /* the opcode, writing data to R0 */
504 retval = cortex_a_exec_opcode(
505 a->armv7a_common.arm.target,
506 opcode,
507 &dscr);
508 if (retval != ERROR_OK)
509 return retval;
510
511 /* write R0 to DCC */
512 retval = cortex_a_exec_opcode(
513 a->armv7a_common.arm.target,
514 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
515 &dscr);
516 if (retval != ERROR_OK)
517 return retval;
518
519 return cortex_a_read_dcc(a, data, &dscr);
520 }
521
522 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
523 uint32_t addr, uint32_t control)
524 {
525 struct cortex_a_common *a = dpm_to_a(dpm);
526 uint32_t vr = a->armv7a_common.debug_base;
527 uint32_t cr = a->armv7a_common.debug_base;
528 int retval;
529
530 switch (index_t) {
531 case 0 ... 15: /* breakpoints */
532 vr += CPUDBG_BVR_BASE;
533 cr += CPUDBG_BCR_BASE;
534 break;
535 case 16 ... 31: /* watchpoints */
536 vr += CPUDBG_WVR_BASE;
537 cr += CPUDBG_WCR_BASE;
538 index_t -= 16;
539 break;
540 default:
541 return ERROR_FAIL;
542 }
543 vr += 4 * index_t;
544 cr += 4 * index_t;
545
546 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
547 (unsigned) vr, (unsigned) cr);
548
549 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
550 vr, addr);
551 if (retval != ERROR_OK)
552 return retval;
553 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
554 cr, control);
555 return retval;
556 }
557
558 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
559 {
560 struct cortex_a_common *a = dpm_to_a(dpm);
561 uint32_t cr;
562
563 switch (index_t) {
564 case 0 ... 15:
565 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
566 break;
567 case 16 ... 31:
568 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
569 index_t -= 16;
570 break;
571 default:
572 return ERROR_FAIL;
573 }
574 cr += 4 * index_t;
575
576 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
577
578 /* clear control register */
579 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
580 }
581
582 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
583 {
584 struct arm_dpm *dpm = &a->armv7a_common.dpm;
585 int retval;
586
587 dpm->arm = &a->armv7a_common.arm;
588 dpm->didr = didr;
589
590 dpm->prepare = cortex_a_dpm_prepare;
591 dpm->finish = cortex_a_dpm_finish;
592
593 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
594 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
595 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
596
597 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
598 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
599
600 dpm->bpwp_enable = cortex_a_bpwp_enable;
601 dpm->bpwp_disable = cortex_a_bpwp_disable;
602
603 retval = arm_dpm_setup(dpm);
604 if (retval == ERROR_OK)
605 retval = arm_dpm_initialize(dpm);
606
607 return retval;
608 }
609 static struct target *get_cortex_a(struct target *target, int32_t coreid)
610 {
611 struct target_list *head;
612 struct target *curr;
613
614 head = target->head;
615 while (head != (struct target_list *)NULL) {
616 curr = head->target;
617 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
618 return curr;
619 head = head->next;
620 }
621 return target;
622 }
623 static int cortex_a_halt(struct target *target);
624
625 static int cortex_a_halt_smp(struct target *target)
626 {
627 int retval = 0;
628 struct target_list *head;
629 struct target *curr;
630 head = target->head;
631 while (head != (struct target_list *)NULL) {
632 curr = head->target;
633 if ((curr != target) && (curr->state != TARGET_HALTED)
634 && target_was_examined(curr))
635 retval += cortex_a_halt(curr);
636 head = head->next;
637 }
638 return retval;
639 }
640
641 static int update_halt_gdb(struct target *target)
642 {
643 struct target *gdb_target = NULL;
644 struct target_list *head;
645 struct target *curr;
646 int retval = 0;
647
648 if (target->gdb_service && target->gdb_service->core[0] == -1) {
649 target->gdb_service->target = target;
650 target->gdb_service->core[0] = target->coreid;
651 retval += cortex_a_halt_smp(target);
652 }
653
654 if (target->gdb_service)
655 gdb_target = target->gdb_service->target;
656
657 foreach_smp_target(head, target->head) {
658 curr = head->target;
659 /* skip calling context */
660 if (curr == target)
661 continue;
662 if (!target_was_examined(curr))
663 continue;
664 /* skip targets that were already halted */
665 if (curr->state == TARGET_HALTED)
666 continue;
667 /* Skip gdb_target; it alerts GDB so has to be polled as last one */
668 if (curr == gdb_target)
669 continue;
670
671 /* avoid recursion in cortex_a_poll() */
672 curr->smp = 0;
673 cortex_a_poll(curr);
674 curr->smp = 1;
675 }
676
677 /* after all targets were updated, poll the gdb serving target */
678 if (gdb_target != NULL && gdb_target != target)
679 cortex_a_poll(gdb_target);
680 return retval;
681 }
682
683 /*
684 * Cortex-A Run control
685 */
686
687 static int cortex_a_poll(struct target *target)
688 {
689 int retval = ERROR_OK;
690 uint32_t dscr;
691 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
692 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
693 enum target_state prev_target_state = target->state;
694 /* toggle to another core is done by gdb as follow */
695 /* maint packet J core_id */
696 /* continue */
697 /* the next polling trigger an halt event sent to gdb */
698 if ((target->state == TARGET_HALTED) && (target->smp) &&
699 (target->gdb_service) &&
700 (target->gdb_service->target == NULL)) {
701 target->gdb_service->target =
702 get_cortex_a(target, target->gdb_service->core[1]);
703 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
704 return retval;
705 }
706 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
707 armv7a->debug_base + CPUDBG_DSCR, &dscr);
708 if (retval != ERROR_OK)
709 return retval;
710 cortex_a->cpudbg_dscr = dscr;
711
712 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
713 if (prev_target_state != TARGET_HALTED) {
714 /* We have a halting debug event */
715 LOG_DEBUG("Target halted");
716 target->state = TARGET_HALTED;
717 if ((prev_target_state == TARGET_RUNNING)
718 || (prev_target_state == TARGET_UNKNOWN)
719 || (prev_target_state == TARGET_RESET)) {
720 retval = cortex_a_debug_entry(target);
721 if (retval != ERROR_OK)
722 return retval;
723 if (target->smp) {
724 retval = update_halt_gdb(target);
725 if (retval != ERROR_OK)
726 return retval;
727 }
728
729 if (arm_semihosting(target, &retval) != 0)
730 return retval;
731
732 target_call_event_callbacks(target,
733 TARGET_EVENT_HALTED);
734 }
735 if (prev_target_state == TARGET_DEBUG_RUNNING) {
736 LOG_DEBUG(" ");
737
738 retval = cortex_a_debug_entry(target);
739 if (retval != ERROR_OK)
740 return retval;
741 if (target->smp) {
742 retval = update_halt_gdb(target);
743 if (retval != ERROR_OK)
744 return retval;
745 }
746
747 target_call_event_callbacks(target,
748 TARGET_EVENT_DEBUG_HALTED);
749 }
750 }
751 } else
752 target->state = TARGET_RUNNING;
753
754 return retval;
755 }
756
757 static int cortex_a_halt(struct target *target)
758 {
759 int retval = ERROR_OK;
760 uint32_t dscr;
761 struct armv7a_common *armv7a = target_to_armv7a(target);
762
763 /*
764 * Tell the core to be halted by writing DRCR with 0x1
765 * and then wait for the core to be halted.
766 */
767 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
768 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
769 if (retval != ERROR_OK)
770 return retval;
771
772 /*
773 * enter halting debug mode
774 */
775 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
776 armv7a->debug_base + CPUDBG_DSCR, &dscr);
777 if (retval != ERROR_OK)
778 return retval;
779
780 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
781 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
782 if (retval != ERROR_OK)
783 return retval;
784
785 int64_t then = timeval_ms();
786 for (;; ) {
787 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
788 armv7a->debug_base + CPUDBG_DSCR, &dscr);
789 if (retval != ERROR_OK)
790 return retval;
791 if ((dscr & DSCR_CORE_HALTED) != 0)
792 break;
793 if (timeval_ms() > then + 1000) {
794 LOG_ERROR("Timeout waiting for halt");
795 return ERROR_FAIL;
796 }
797 }
798
799 target->debug_reason = DBG_REASON_DBGRQ;
800
801 return ERROR_OK;
802 }
803
804 static int cortex_a_internal_restore(struct target *target, int current,
805 target_addr_t *address, int handle_breakpoints, int debug_execution)
806 {
807 struct armv7a_common *armv7a = target_to_armv7a(target);
808 struct arm *arm = &armv7a->arm;
809 int retval;
810 uint32_t resume_pc;
811
812 if (!debug_execution)
813 target_free_all_working_areas(target);
814
815 #if 0
816 if (debug_execution) {
817 /* Disable interrupts */
818 /* We disable interrupts in the PRIMASK register instead of
819 * masking with C_MASKINTS,
820 * This is probably the same issue as Cortex-M3 Errata 377493:
821 * C_MASKINTS in parallel with disabled interrupts can cause
822 * local faults to not be taken. */
823 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
824 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
825 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
826
827 /* Make sure we are in Thumb mode */
828 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
829 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
830 32) | (1 << 24));
831 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
832 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
833 }
834 #endif
835
836 /* current = 1: continue on current pc, otherwise continue at <address> */
837 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
838 if (!current)
839 resume_pc = *address;
840 else
841 *address = resume_pc;
842
843 /* Make sure that the Armv7 gdb thumb fixups does not
844 * kill the return address
845 */
846 switch (arm->core_state) {
847 case ARM_STATE_ARM:
848 resume_pc &= 0xFFFFFFFC;
849 break;
850 case ARM_STATE_THUMB:
851 case ARM_STATE_THUMB_EE:
852 /* When the return address is loaded into PC
853 * bit 0 must be 1 to stay in Thumb state
854 */
855 resume_pc |= 0x1;
856 break;
857 case ARM_STATE_JAZELLE:
858 LOG_ERROR("How do I resume into Jazelle state??");
859 return ERROR_FAIL;
860 case ARM_STATE_AARCH64:
861 LOG_ERROR("Shoudn't be in AARCH64 state");
862 return ERROR_FAIL;
863 }
864 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
865 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
866 arm->pc->dirty = 1;
867 arm->pc->valid = 1;
868
869 /* restore dpm_mode at system halt */
870 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
871 /* called it now before restoring context because it uses cpu
872 * register r0 for restoring cp15 control register */
873 retval = cortex_a_restore_cp15_control_reg(target);
874 if (retval != ERROR_OK)
875 return retval;
876 retval = cortex_a_restore_context(target, handle_breakpoints);
877 if (retval != ERROR_OK)
878 return retval;
879 target->debug_reason = DBG_REASON_NOTHALTED;
880 target->state = TARGET_RUNNING;
881
882 /* registers are now invalid */
883 register_cache_invalidate(arm->core_cache);
884
885 #if 0
886 /* the front-end may request us not to handle breakpoints */
887 if (handle_breakpoints) {
888 /* Single step past breakpoint at current address */
889 breakpoint = breakpoint_find(target, resume_pc);
890 if (breakpoint) {
891 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
892 cortex_m3_unset_breakpoint(target, breakpoint);
893 cortex_m3_single_step_core(target);
894 cortex_m3_set_breakpoint(target, breakpoint);
895 }
896 }
897
898 #endif
899 return retval;
900 }
901
902 static int cortex_a_internal_restart(struct target *target)
903 {
904 struct armv7a_common *armv7a = target_to_armv7a(target);
905 struct arm *arm = &armv7a->arm;
906 int retval;
907 uint32_t dscr;
908 /*
909 * * Restart core and wait for it to be started. Clear ITRen and sticky
910 * * exception flags: see ARMv7 ARM, C5.9.
911 *
912 * REVISIT: for single stepping, we probably want to
913 * disable IRQs by default, with optional override...
914 */
915
916 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
917 armv7a->debug_base + CPUDBG_DSCR, &dscr);
918 if (retval != ERROR_OK)
919 return retval;
920
921 if ((dscr & DSCR_INSTR_COMP) == 0)
922 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
923
924 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
925 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
926 if (retval != ERROR_OK)
927 return retval;
928
929 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
930 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
931 DRCR_CLEAR_EXCEPTIONS);
932 if (retval != ERROR_OK)
933 return retval;
934
935 int64_t then = timeval_ms();
936 for (;; ) {
937 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
938 armv7a->debug_base + CPUDBG_DSCR, &dscr);
939 if (retval != ERROR_OK)
940 return retval;
941 if ((dscr & DSCR_CORE_RESTARTED) != 0)
942 break;
943 if (timeval_ms() > then + 1000) {
944 LOG_ERROR("Timeout waiting for resume");
945 return ERROR_FAIL;
946 }
947 }
948
949 target->debug_reason = DBG_REASON_NOTHALTED;
950 target->state = TARGET_RUNNING;
951
952 /* registers are now invalid */
953 register_cache_invalidate(arm->core_cache);
954
955 return ERROR_OK;
956 }
957
958 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
959 {
960 int retval = 0;
961 struct target_list *head;
962 struct target *curr;
963 target_addr_t address;
964 head = target->head;
965 while (head != (struct target_list *)NULL) {
966 curr = head->target;
967 if ((curr != target) && (curr->state != TARGET_RUNNING)
968 && target_was_examined(curr)) {
969 /* resume current address , not in step mode */
970 retval += cortex_a_internal_restore(curr, 1, &address,
971 handle_breakpoints, 0);
972 retval += cortex_a_internal_restart(curr);
973 }
974 head = head->next;
975
976 }
977 return retval;
978 }
979
980 static int cortex_a_resume(struct target *target, int current,
981 target_addr_t address, int handle_breakpoints, int debug_execution)
982 {
983 int retval = 0;
984 /* dummy resume for smp toggle in order to reduce gdb impact */
985 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
986 /* simulate a start and halt of target */
987 target->gdb_service->target = NULL;
988 target->gdb_service->core[0] = target->gdb_service->core[1];
989 /* fake resume at next poll we play the target core[1], see poll*/
990 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
991 return 0;
992 }
993 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
994 if (target->smp) {
995 target->gdb_service->core[0] = -1;
996 retval = cortex_a_restore_smp(target, handle_breakpoints);
997 if (retval != ERROR_OK)
998 return retval;
999 }
1000 cortex_a_internal_restart(target);
1001
1002 if (!debug_execution) {
1003 target->state = TARGET_RUNNING;
1004 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1005 LOG_DEBUG("target resumed at " TARGET_ADDR_FMT, address);
1006 } else {
1007 target->state = TARGET_DEBUG_RUNNING;
1008 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1009 LOG_DEBUG("target debug resumed at " TARGET_ADDR_FMT, address);
1010 }
1011
1012 return ERROR_OK;
1013 }
1014
1015 static int cortex_a_debug_entry(struct target *target)
1016 {
1017 uint32_t dscr;
1018 int retval = ERROR_OK;
1019 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1020 struct armv7a_common *armv7a = target_to_armv7a(target);
1021 struct arm *arm = &armv7a->arm;
1022
1023 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1024
1025 /* REVISIT surely we should not re-read DSCR !! */
1026 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1027 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1028 if (retval != ERROR_OK)
1029 return retval;
1030
1031 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1032 * imprecise data aborts get discarded by issuing a Data
1033 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1034 */
1035
1036 /* Enable the ITR execution once we are in debug mode */
1037 dscr |= DSCR_ITR_EN;
1038 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1039 armv7a->debug_base + CPUDBG_DSCR, dscr);
1040 if (retval != ERROR_OK)
1041 return retval;
1042
1043 /* Examine debug reason */
1044 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1045
1046 /* save address of instruction that triggered the watchpoint? */
1047 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1048 uint32_t wfar;
1049
1050 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1051 armv7a->debug_base + CPUDBG_WFAR,
1052 &wfar);
1053 if (retval != ERROR_OK)
1054 return retval;
1055 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1056 }
1057
1058 /* First load register accessible through core debug port */
1059 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1060 if (retval != ERROR_OK)
1061 return retval;
1062
1063 if (arm->spsr) {
1064 /* read SPSR */
1065 retval = arm_dpm_read_reg(&armv7a->dpm, arm->spsr, 17);
1066 if (retval != ERROR_OK)
1067 return retval;
1068 }
1069
1070 #if 0
1071 /* TODO, Move this */
1072 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1073 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1074 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1075
1076 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1077 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1078
1079 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1080 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1081 #endif
1082
1083 /* Are we in an exception handler */
1084 /* armv4_5->exception_number = 0; */
1085 if (armv7a->post_debug_entry) {
1086 retval = armv7a->post_debug_entry(target);
1087 if (retval != ERROR_OK)
1088 return retval;
1089 }
1090
1091 return retval;
1092 }
1093
1094 static int cortex_a_post_debug_entry(struct target *target)
1095 {
1096 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1097 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1098 int retval;
1099
1100 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1101 retval = armv7a->arm.mrc(target, 15,
1102 0, 0, /* op1, op2 */
1103 1, 0, /* CRn, CRm */
1104 &cortex_a->cp15_control_reg);
1105 if (retval != ERROR_OK)
1106 return retval;
1107 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1108 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1109
1110 if (!armv7a->is_armv7r)
1111 armv7a_read_ttbcr(target);
1112
1113 if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1114 armv7a_identify_cache(target);
1115
1116 if (armv7a->is_armv7r) {
1117 armv7a->armv7a_mmu.mmu_enabled = 0;
1118 } else {
1119 armv7a->armv7a_mmu.mmu_enabled =
1120 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1121 }
1122 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1123 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1124 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1125 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1126 cortex_a->curr_mode = armv7a->arm.core_mode;
1127
1128 /* switch to SVC mode to read DACR */
1129 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1130 armv7a->arm.mrc(target, 15,
1131 0, 0, 3, 0,
1132 &cortex_a->cp15_dacr_reg);
1133
1134 LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1135 cortex_a->cp15_dacr_reg);
1136
1137 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1138 return ERROR_OK;
1139 }
1140
1141 int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1142 {
1143 struct armv7a_common *armv7a = target_to_armv7a(target);
1144 uint32_t dscr;
1145
1146 /* Read DSCR */
1147 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1148 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1149 if (ERROR_OK != retval)
1150 return retval;
1151
1152 /* clear bitfield */
1153 dscr &= ~bit_mask;
1154 /* put new value */
1155 dscr |= value & bit_mask;
1156
1157 /* write new DSCR */
1158 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1159 armv7a->debug_base + CPUDBG_DSCR, dscr);
1160 return retval;
1161 }
1162
1163 static int cortex_a_step(struct target *target, int current, target_addr_t address,
1164 int handle_breakpoints)
1165 {
1166 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1167 struct armv7a_common *armv7a = target_to_armv7a(target);
1168 struct arm *arm = &armv7a->arm;
1169 struct breakpoint *breakpoint = NULL;
1170 struct breakpoint stepbreakpoint;
1171 struct reg *r;
1172 int retval;
1173
1174 if (target->state != TARGET_HALTED) {
1175 LOG_WARNING("target not halted");
1176 return ERROR_TARGET_NOT_HALTED;
1177 }
1178
1179 /* current = 1: continue on current pc, otherwise continue at <address> */
1180 r = arm->pc;
1181 if (!current)
1182 buf_set_u32(r->value, 0, 32, address);
1183 else
1184 address = buf_get_u32(r->value, 0, 32);
1185
1186 /* The front-end may request us not to handle breakpoints.
1187 * But since Cortex-A uses breakpoint for single step,
1188 * we MUST handle breakpoints.
1189 */
1190 handle_breakpoints = 1;
1191 if (handle_breakpoints) {
1192 breakpoint = breakpoint_find(target, address);
1193 if (breakpoint)
1194 cortex_a_unset_breakpoint(target, breakpoint);
1195 }
1196
1197 /* Setup single step breakpoint */
1198 stepbreakpoint.address = address;
1199 stepbreakpoint.asid = 0;
1200 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1201 ? 2 : 4;
1202 stepbreakpoint.type = BKPT_HARD;
1203 stepbreakpoint.set = 0;
1204
1205 /* Disable interrupts during single step if requested */
1206 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1207 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1208 if (ERROR_OK != retval)
1209 return retval;
1210 }
1211
1212 /* Break on IVA mismatch */
1213 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1214
1215 target->debug_reason = DBG_REASON_SINGLESTEP;
1216
1217 retval = cortex_a_resume(target, 1, address, 0, 0);
1218 if (retval != ERROR_OK)
1219 return retval;
1220
1221 int64_t then = timeval_ms();
1222 while (target->state != TARGET_HALTED) {
1223 retval = cortex_a_poll(target);
1224 if (retval != ERROR_OK)
1225 return retval;
1226 if (timeval_ms() > then + 1000) {
1227 LOG_ERROR("timeout waiting for target halt");
1228 return ERROR_FAIL;
1229 }
1230 }
1231
1232 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1233
1234 /* Re-enable interrupts if they were disabled */
1235 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1236 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1237 if (ERROR_OK != retval)
1238 return retval;
1239 }
1240
1241
1242 target->debug_reason = DBG_REASON_BREAKPOINT;
1243
1244 if (breakpoint)
1245 cortex_a_set_breakpoint(target, breakpoint, 0);
1246
1247 if (target->state != TARGET_HALTED)
1248 LOG_DEBUG("target stepped");
1249
1250 return ERROR_OK;
1251 }
1252
1253 static int cortex_a_restore_context(struct target *target, bool bpwp)
1254 {
1255 struct armv7a_common *armv7a = target_to_armv7a(target);
1256
1257 LOG_DEBUG(" ");
1258
1259 if (armv7a->pre_restore_context)
1260 armv7a->pre_restore_context(target);
1261
1262 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1263 }
1264
1265 /*
1266 * Cortex-A Breakpoint and watchpoint functions
1267 */
1268
1269 /* Setup hardware Breakpoint Register Pair */
1270 static int cortex_a_set_breakpoint(struct target *target,
1271 struct breakpoint *breakpoint, uint8_t matchmode)
1272 {
1273 int retval;
1274 int brp_i = 0;
1275 uint32_t control;
1276 uint8_t byte_addr_select = 0x0F;
1277 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1278 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1279 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1280
1281 if (breakpoint->set) {
1282 LOG_WARNING("breakpoint already set");
1283 return ERROR_OK;
1284 }
1285
1286 if (breakpoint->type == BKPT_HARD) {
1287 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1288 brp_i++;
1289 if (brp_i >= cortex_a->brp_num) {
1290 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1291 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1292 }
1293 breakpoint->set = brp_i + 1;
1294 if (breakpoint->length == 2)
1295 byte_addr_select = (3 << (breakpoint->address & 0x02));
1296 control = ((matchmode & 0x7) << 20)
1297 | (byte_addr_select << 5)
1298 | (3 << 1) | 1;
1299 brp_list[brp_i].used = 1;
1300 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1301 brp_list[brp_i].control = control;
1302 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1303 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1304 brp_list[brp_i].value);
1305 if (retval != ERROR_OK)
1306 return retval;
1307 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1308 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1309 brp_list[brp_i].control);
1310 if (retval != ERROR_OK)
1311 return retval;
1312 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1313 brp_list[brp_i].control,
1314 brp_list[brp_i].value);
1315 } else if (breakpoint->type == BKPT_SOFT) {
1316 uint8_t code[4];
1317 /* length == 2: Thumb breakpoint */
1318 if (breakpoint->length == 2)
1319 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1320 else
1321 /* length == 3: Thumb-2 breakpoint, actual encoding is
1322 * a regular Thumb BKPT instruction but we replace a
1323 * 32bit Thumb-2 instruction, so fix-up the breakpoint
1324 * length
1325 */
1326 if (breakpoint->length == 3) {
1327 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1328 breakpoint->length = 4;
1329 } else
1330 /* length == 4, normal ARM breakpoint */
1331 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1332
1333 retval = target_read_memory(target,
1334 breakpoint->address & 0xFFFFFFFE,
1335 breakpoint->length, 1,
1336 breakpoint->orig_instr);
1337 if (retval != ERROR_OK)
1338 return retval;
1339
1340 /* make sure data cache is cleaned & invalidated down to PoC */
1341 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1342 armv7a_cache_flush_virt(target, breakpoint->address,
1343 breakpoint->length);
1344 }
1345
1346 retval = target_write_memory(target,
1347 breakpoint->address & 0xFFFFFFFE,
1348 breakpoint->length, 1, code);
1349 if (retval != ERROR_OK)
1350 return retval;
1351
1352 /* update i-cache at breakpoint location */
1353 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1354 breakpoint->length);
1355 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1356 breakpoint->length);
1357
1358 breakpoint->set = 0x11; /* Any nice value but 0 */
1359 }
1360
1361 return ERROR_OK;
1362 }
1363
1364 static int cortex_a_set_context_breakpoint(struct target *target,
1365 struct breakpoint *breakpoint, uint8_t matchmode)
1366 {
1367 int retval = ERROR_FAIL;
1368 int brp_i = 0;
1369 uint32_t control;
1370 uint8_t byte_addr_select = 0x0F;
1371 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1372 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1373 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1374
1375 if (breakpoint->set) {
1376 LOG_WARNING("breakpoint already set");
1377 return retval;
1378 }
1379 /*check available context BRPs*/
1380 while ((brp_list[brp_i].used ||
1381 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1382 brp_i++;
1383
1384 if (brp_i >= cortex_a->brp_num) {
1385 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1386 return ERROR_FAIL;
1387 }
1388
1389 breakpoint->set = brp_i + 1;
1390 control = ((matchmode & 0x7) << 20)
1391 | (byte_addr_select << 5)
1392 | (3 << 1) | 1;
1393 brp_list[brp_i].used = 1;
1394 brp_list[brp_i].value = (breakpoint->asid);
1395 brp_list[brp_i].control = control;
1396 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1397 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1398 brp_list[brp_i].value);
1399 if (retval != ERROR_OK)
1400 return retval;
1401 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1402 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1403 brp_list[brp_i].control);
1404 if (retval != ERROR_OK)
1405 return retval;
1406 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1407 brp_list[brp_i].control,
1408 brp_list[brp_i].value);
1409 return ERROR_OK;
1410
1411 }
1412
1413 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1414 {
1415 int retval = ERROR_FAIL;
1416 int brp_1 = 0; /* holds the contextID pair */
1417 int brp_2 = 0; /* holds the IVA pair */
1418 uint32_t control_CTX, control_IVA;
1419 uint8_t CTX_byte_addr_select = 0x0F;
1420 uint8_t IVA_byte_addr_select = 0x0F;
1421 uint8_t CTX_machmode = 0x03;
1422 uint8_t IVA_machmode = 0x01;
1423 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1424 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1425 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1426
1427 if (breakpoint->set) {
1428 LOG_WARNING("breakpoint already set");
1429 return retval;
1430 }
1431 /*check available context BRPs*/
1432 while ((brp_list[brp_1].used ||
1433 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1434 brp_1++;
1435
1436 printf("brp(CTX) found num: %d\n", brp_1);
1437 if (brp_1 >= cortex_a->brp_num) {
1438 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1439 return ERROR_FAIL;
1440 }
1441
1442 while ((brp_list[brp_2].used ||
1443 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1444 brp_2++;
1445
1446 printf("brp(IVA) found num: %d\n", brp_2);
1447 if (brp_2 >= cortex_a->brp_num) {
1448 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1449 return ERROR_FAIL;
1450 }
1451
1452 breakpoint->set = brp_1 + 1;
1453 breakpoint->linked_BRP = brp_2;
1454 control_CTX = ((CTX_machmode & 0x7) << 20)
1455 | (brp_2 << 16)
1456 | (0 << 14)
1457 | (CTX_byte_addr_select << 5)
1458 | (3 << 1) | 1;
1459 brp_list[brp_1].used = 1;
1460 brp_list[brp_1].value = (breakpoint->asid);
1461 brp_list[brp_1].control = control_CTX;
1462 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1463 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1464 brp_list[brp_1].value);
1465 if (retval != ERROR_OK)
1466 return retval;
1467 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1468 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1469 brp_list[brp_1].control);
1470 if (retval != ERROR_OK)
1471 return retval;
1472
1473 control_IVA = ((IVA_machmode & 0x7) << 20)
1474 | (brp_1 << 16)
1475 | (IVA_byte_addr_select << 5)
1476 | (3 << 1) | 1;
1477 brp_list[brp_2].used = 1;
1478 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1479 brp_list[brp_2].control = control_IVA;
1480 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1481 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1482 brp_list[brp_2].value);
1483 if (retval != ERROR_OK)
1484 return retval;
1485 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1486 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1487 brp_list[brp_2].control);
1488 if (retval != ERROR_OK)
1489 return retval;
1490
1491 return ERROR_OK;
1492 }
1493
1494 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1495 {
1496 int retval;
1497 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1498 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1499 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1500
1501 if (!breakpoint->set) {
1502 LOG_WARNING("breakpoint not set");
1503 return ERROR_OK;
1504 }
1505
1506 if (breakpoint->type == BKPT_HARD) {
1507 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1508 int brp_i = breakpoint->set - 1;
1509 int brp_j = breakpoint->linked_BRP;
1510 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1511 LOG_DEBUG("Invalid BRP number in breakpoint");
1512 return ERROR_OK;
1513 }
1514 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1515 brp_list[brp_i].control, brp_list[brp_i].value);
1516 brp_list[brp_i].used = 0;
1517 brp_list[brp_i].value = 0;
1518 brp_list[brp_i].control = 0;
1519 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1520 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1521 brp_list[brp_i].control);
1522 if (retval != ERROR_OK)
1523 return retval;
1524 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1525 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1526 brp_list[brp_i].value);
1527 if (retval != ERROR_OK)
1528 return retval;
1529 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1530 LOG_DEBUG("Invalid BRP number in breakpoint");
1531 return ERROR_OK;
1532 }
1533 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1534 brp_list[brp_j].control, brp_list[brp_j].value);
1535 brp_list[brp_j].used = 0;
1536 brp_list[brp_j].value = 0;
1537 brp_list[brp_j].control = 0;
1538 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1539 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1540 brp_list[brp_j].control);
1541 if (retval != ERROR_OK)
1542 return retval;
1543 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1544 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1545 brp_list[brp_j].value);
1546 if (retval != ERROR_OK)
1547 return retval;
1548 breakpoint->linked_BRP = 0;
1549 breakpoint->set = 0;
1550 return ERROR_OK;
1551
1552 } else {
1553 int brp_i = breakpoint->set - 1;
1554 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1555 LOG_DEBUG("Invalid BRP number in breakpoint");
1556 return ERROR_OK;
1557 }
1558 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1559 brp_list[brp_i].control, brp_list[brp_i].value);
1560 brp_list[brp_i].used = 0;
1561 brp_list[brp_i].value = 0;
1562 brp_list[brp_i].control = 0;
1563 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1564 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1565 brp_list[brp_i].control);
1566 if (retval != ERROR_OK)
1567 return retval;
1568 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1569 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1570 brp_list[brp_i].value);
1571 if (retval != ERROR_OK)
1572 return retval;
1573 breakpoint->set = 0;
1574 return ERROR_OK;
1575 }
1576 } else {
1577
1578 /* make sure data cache is cleaned & invalidated down to PoC */
1579 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1580 armv7a_cache_flush_virt(target, breakpoint->address,
1581 breakpoint->length);
1582 }
1583
1584 /* restore original instruction (kept in target endianness) */
1585 if (breakpoint->length == 4) {
1586 retval = target_write_memory(target,
1587 breakpoint->address & 0xFFFFFFFE,
1588 4, 1, breakpoint->orig_instr);
1589 if (retval != ERROR_OK)
1590 return retval;
1591 } else {
1592 retval = target_write_memory(target,
1593 breakpoint->address & 0xFFFFFFFE,
1594 2, 1, breakpoint->orig_instr);
1595 if (retval != ERROR_OK)
1596 return retval;
1597 }
1598
1599 /* update i-cache at breakpoint location */
1600 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1601 breakpoint->length);
1602 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1603 breakpoint->length);
1604 }
1605 breakpoint->set = 0;
1606
1607 return ERROR_OK;
1608 }
1609
1610 static int cortex_a_add_breakpoint(struct target *target,
1611 struct breakpoint *breakpoint)
1612 {
1613 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1614
1615 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1616 LOG_INFO("no hardware breakpoint available");
1617 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1618 }
1619
1620 if (breakpoint->type == BKPT_HARD)
1621 cortex_a->brp_num_available--;
1622
1623 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1624 }
1625
1626 static int cortex_a_add_context_breakpoint(struct target *target,
1627 struct breakpoint *breakpoint)
1628 {
1629 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1630
1631 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1632 LOG_INFO("no hardware breakpoint available");
1633 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1634 }
1635
1636 if (breakpoint->type == BKPT_HARD)
1637 cortex_a->brp_num_available--;
1638
1639 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1640 }
1641
1642 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1643 struct breakpoint *breakpoint)
1644 {
1645 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1646
1647 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1648 LOG_INFO("no hardware breakpoint available");
1649 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1650 }
1651
1652 if (breakpoint->type == BKPT_HARD)
1653 cortex_a->brp_num_available--;
1654
1655 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1656 }
1657
1658
1659 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1660 {
1661 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1662
1663 #if 0
1664 /* It is perfectly possible to remove breakpoints while the target is running */
1665 if (target->state != TARGET_HALTED) {
1666 LOG_WARNING("target not halted");
1667 return ERROR_TARGET_NOT_HALTED;
1668 }
1669 #endif
1670
1671 if (breakpoint->set) {
1672 cortex_a_unset_breakpoint(target, breakpoint);
1673 if (breakpoint->type == BKPT_HARD)
1674 cortex_a->brp_num_available++;
1675 }
1676
1677
1678 return ERROR_OK;
1679 }
1680
1681 /*
1682 * Cortex-A Reset functions
1683 */
1684
1685 static int cortex_a_assert_reset(struct target *target)
1686 {
1687 struct armv7a_common *armv7a = target_to_armv7a(target);
1688
1689 LOG_DEBUG(" ");
1690
1691 /* FIXME when halt is requested, make it work somehow... */
1692
1693 /* This function can be called in "target not examined" state */
1694
1695 /* Issue some kind of warm reset. */
1696 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1697 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1698 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1699 /* REVISIT handle "pulls" cases, if there's
1700 * hardware that needs them to work.
1701 */
1702
1703 /*
1704 * FIXME: fix reset when transport is SWD. This is a temporary
1705 * work-around for release v0.10 that is not intended to stay!
1706 */
1707 if (transport_is_swd() ||
1708 (target->reset_halt && (jtag_get_reset_config() & RESET_SRST_NO_GATING)))
1709 jtag_add_reset(0, 1);
1710
1711 } else {
1712 LOG_ERROR("%s: how to reset?", target_name(target));
1713 return ERROR_FAIL;
1714 }
1715
1716 /* registers are now invalid */
1717 if (target_was_examined(target))
1718 register_cache_invalidate(armv7a->arm.core_cache);
1719
1720 target->state = TARGET_RESET;
1721
1722 return ERROR_OK;
1723 }
1724
1725 static int cortex_a_deassert_reset(struct target *target)
1726 {
1727 int retval;
1728
1729 LOG_DEBUG(" ");
1730
1731 /* be certain SRST is off */
1732 jtag_add_reset(0, 0);
1733
1734 if (target_was_examined(target)) {
1735 retval = cortex_a_poll(target);
1736 if (retval != ERROR_OK)
1737 return retval;
1738 }
1739
1740 if (target->reset_halt) {
1741 if (target->state != TARGET_HALTED) {
1742 LOG_WARNING("%s: ran after reset and before halt ...",
1743 target_name(target));
1744 if (target_was_examined(target)) {
1745 retval = target_halt(target);
1746 if (retval != ERROR_OK)
1747 return retval;
1748 } else
1749 target->state = TARGET_UNKNOWN;
1750 }
1751 }
1752
1753 return ERROR_OK;
1754 }
1755
1756 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1757 {
1758 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1759 * New desired mode must be in mode. Current value of DSCR must be in
1760 * *dscr, which is updated with new value.
1761 *
1762 * This function elides actually sending the mode-change over the debug
1763 * interface if the mode is already set as desired.
1764 */
1765 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1766 if (new_dscr != *dscr) {
1767 struct armv7a_common *armv7a = target_to_armv7a(target);
1768 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1769 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1770 if (retval == ERROR_OK)
1771 *dscr = new_dscr;
1772 return retval;
1773 } else {
1774 return ERROR_OK;
1775 }
1776 }
1777
1778 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1779 uint32_t value, uint32_t *dscr)
1780 {
1781 /* Waits until the specified bit(s) of DSCR take on a specified value. */
1782 struct armv7a_common *armv7a = target_to_armv7a(target);
1783 int64_t then = timeval_ms();
1784 int retval;
1785
1786 while ((*dscr & mask) != value) {
1787 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1788 armv7a->debug_base + CPUDBG_DSCR, dscr);
1789 if (retval != ERROR_OK)
1790 return retval;
1791 if (timeval_ms() > then + 1000) {
1792 LOG_ERROR("timeout waiting for DSCR bit change");
1793 return ERROR_FAIL;
1794 }
1795 }
1796 return ERROR_OK;
1797 }
1798
1799 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1800 uint32_t *data, uint32_t *dscr)
1801 {
1802 int retval;
1803 struct armv7a_common *armv7a = target_to_armv7a(target);
1804
1805 /* Move from coprocessor to R0. */
1806 retval = cortex_a_exec_opcode(target, opcode, dscr);
1807 if (retval != ERROR_OK)
1808 return retval;
1809
1810 /* Move from R0 to DTRTX. */
1811 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
1812 if (retval != ERROR_OK)
1813 return retval;
1814
1815 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
1816 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1817 * must also check TXfull_l). Most of the time this will be free
1818 * because TXfull_l will be set immediately and cached in dscr. */
1819 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
1820 DSCR_DTRTX_FULL_LATCHED, dscr);
1821 if (retval != ERROR_OK)
1822 return retval;
1823
1824 /* Read the value transferred to DTRTX. */
1825 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1826 armv7a->debug_base + CPUDBG_DTRTX, data);
1827 if (retval != ERROR_OK)
1828 return retval;
1829
1830 return ERROR_OK;
1831 }
1832
1833 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
1834 uint32_t *dfsr, uint32_t *dscr)
1835 {
1836 int retval;
1837
1838 if (dfar) {
1839 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
1840 if (retval != ERROR_OK)
1841 return retval;
1842 }
1843
1844 if (dfsr) {
1845 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
1846 if (retval != ERROR_OK)
1847 return retval;
1848 }
1849
1850 return ERROR_OK;
1851 }
1852
1853 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
1854 uint32_t data, uint32_t *dscr)
1855 {
1856 int retval;
1857 struct armv7a_common *armv7a = target_to_armv7a(target);
1858
1859 /* Write the value into DTRRX. */
1860 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1861 armv7a->debug_base + CPUDBG_DTRRX, data);
1862 if (retval != ERROR_OK)
1863 return retval;
1864
1865 /* Move from DTRRX to R0. */
1866 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
1867 if (retval != ERROR_OK)
1868 return retval;
1869
1870 /* Move from R0 to coprocessor. */
1871 retval = cortex_a_exec_opcode(target, opcode, dscr);
1872 if (retval != ERROR_OK)
1873 return retval;
1874
1875 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
1876 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
1877 * check RXfull_l). Most of the time this will be free because RXfull_l
1878 * will be cleared immediately and cached in dscr. */
1879 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
1880 if (retval != ERROR_OK)
1881 return retval;
1882
1883 return ERROR_OK;
1884 }
1885
1886 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
1887 uint32_t dfsr, uint32_t *dscr)
1888 {
1889 int retval;
1890
1891 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
1892 if (retval != ERROR_OK)
1893 return retval;
1894
1895 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
1896 if (retval != ERROR_OK)
1897 return retval;
1898
1899 return ERROR_OK;
1900 }
1901
1902 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
1903 {
1904 uint32_t status, upper4;
1905
1906 if (dfsr & (1 << 9)) {
1907 /* LPAE format. */
1908 status = dfsr & 0x3f;
1909 upper4 = status >> 2;
1910 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
1911 return ERROR_TARGET_TRANSLATION_FAULT;
1912 else if (status == 33)
1913 return ERROR_TARGET_UNALIGNED_ACCESS;
1914 else
1915 return ERROR_TARGET_DATA_ABORT;
1916 } else {
1917 /* Normal format. */
1918 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
1919 if (status == 1)
1920 return ERROR_TARGET_UNALIGNED_ACCESS;
1921 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
1922 status == 9 || status == 11 || status == 13 || status == 15)
1923 return ERROR_TARGET_TRANSLATION_FAULT;
1924 else
1925 return ERROR_TARGET_DATA_ABORT;
1926 }
1927 }
1928
1929 static int cortex_a_write_cpu_memory_slow(struct target *target,
1930 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1931 {
1932 /* Writes count objects of size size from *buffer. Old value of DSCR must
1933 * be in *dscr; updated to new value. This is slow because it works for
1934 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
1935 * the address is aligned, cortex_a_write_cpu_memory_fast should be
1936 * preferred.
1937 * Preconditions:
1938 * - Address is in R0.
1939 * - R0 is marked dirty.
1940 */
1941 struct armv7a_common *armv7a = target_to_armv7a(target);
1942 struct arm *arm = &armv7a->arm;
1943 int retval;
1944
1945 /* Mark register R1 as dirty, to use for transferring data. */
1946 arm_reg_current(arm, 1)->dirty = true;
1947
1948 /* Switch to non-blocking mode if not already in that mode. */
1949 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
1950 if (retval != ERROR_OK)
1951 return retval;
1952
1953 /* Go through the objects. */
1954 while (count) {
1955 /* Write the value to store into DTRRX. */
1956 uint32_t data, opcode;
1957 if (size == 1)
1958 data = *buffer;
1959 else if (size == 2)
1960 data = target_buffer_get_u16(target, buffer);
1961 else
1962 data = target_buffer_get_u32(target, buffer);
1963 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1964 armv7a->debug_base + CPUDBG_DTRRX, data);
1965 if (retval != ERROR_OK)
1966 return retval;
1967
1968 /* Transfer the value from DTRRX to R1. */
1969 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
1970 if (retval != ERROR_OK)
1971 return retval;
1972
1973 /* Write the value transferred to R1 into memory. */
1974 if (size == 1)
1975 opcode = ARMV4_5_STRB_IP(1, 0);
1976 else if (size == 2)
1977 opcode = ARMV4_5_STRH_IP(1, 0);
1978 else
1979 opcode = ARMV4_5_STRW_IP(1, 0);
1980 retval = cortex_a_exec_opcode(target, opcode, dscr);
1981 if (retval != ERROR_OK)
1982 return retval;
1983
1984 /* Check for faults and return early. */
1985 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
1986 return ERROR_OK; /* A data fault is not considered a system failure. */
1987
1988 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
1989 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1990 * must also check RXfull_l). Most of the time this will be free
1991 * because RXfull_l will be cleared immediately and cached in dscr. */
1992 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
1993 if (retval != ERROR_OK)
1994 return retval;
1995
1996 /* Advance. */
1997 buffer += size;
1998 --count;
1999 }
2000
2001 return ERROR_OK;
2002 }
2003
2004 static int cortex_a_write_cpu_memory_fast(struct target *target,
2005 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2006 {
2007 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2008 * in *dscr; updated to new value. This is fast but only works for
2009 * word-sized objects at aligned addresses.
2010 * Preconditions:
2011 * - Address is in R0 and must be a multiple of 4.
2012 * - R0 is marked dirty.
2013 */
2014 struct armv7a_common *armv7a = target_to_armv7a(target);
2015 int retval;
2016
2017 /* Switch to fast mode if not already in that mode. */
2018 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2019 if (retval != ERROR_OK)
2020 return retval;
2021
2022 /* Latch STC instruction. */
2023 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2024 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2025 if (retval != ERROR_OK)
2026 return retval;
2027
2028 /* Transfer all the data and issue all the instructions. */
2029 return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2030 4, count, armv7a->debug_base + CPUDBG_DTRRX);
2031 }
2032
2033 static int cortex_a_write_cpu_memory(struct target *target,
2034 uint32_t address, uint32_t size,
2035 uint32_t count, const uint8_t *buffer)
2036 {
2037 /* Write memory through the CPU. */
2038 int retval, final_retval;
2039 struct armv7a_common *armv7a = target_to_armv7a(target);
2040 struct arm *arm = &armv7a->arm;
2041 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2042
2043 LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2044 address, size, count);
2045 if (target->state != TARGET_HALTED) {
2046 LOG_WARNING("target not halted");
2047 return ERROR_TARGET_NOT_HALTED;
2048 }
2049
2050 if (!count)
2051 return ERROR_OK;
2052
2053 /* Clear any abort. */
2054 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2055 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2056 if (retval != ERROR_OK)
2057 return retval;
2058
2059 /* Read DSCR. */
2060 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2061 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2062 if (retval != ERROR_OK)
2063 return retval;
2064
2065 /* Switch to non-blocking mode if not already in that mode. */
2066 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2067 if (retval != ERROR_OK)
2068 goto out;
2069
2070 /* Mark R0 as dirty. */
2071 arm_reg_current(arm, 0)->dirty = true;
2072
2073 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2074 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2075 if (retval != ERROR_OK)
2076 goto out;
2077
2078 /* Get the memory address into R0. */
2079 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2080 armv7a->debug_base + CPUDBG_DTRRX, address);
2081 if (retval != ERROR_OK)
2082 goto out;
2083 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2084 if (retval != ERROR_OK)
2085 goto out;
2086
2087 if (size == 4 && (address % 4) == 0) {
2088 /* We are doing a word-aligned transfer, so use fast mode. */
2089 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2090 } else {
2091 /* Use slow path. */
2092 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2093 }
2094
2095 out:
2096 final_retval = retval;
2097
2098 /* Switch to non-blocking mode if not already in that mode. */
2099 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2100 if (final_retval == ERROR_OK)
2101 final_retval = retval;
2102
2103 /* Wait for last issued instruction to complete. */
2104 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2105 if (final_retval == ERROR_OK)
2106 final_retval = retval;
2107
2108 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2109 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2110 * check RXfull_l). Most of the time this will be free because RXfull_l
2111 * will be cleared immediately and cached in dscr. However, don't do this
2112 * if there is fault, because then the instruction might not have completed
2113 * successfully. */
2114 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2115 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2116 if (retval != ERROR_OK)
2117 return retval;
2118 }
2119
2120 /* If there were any sticky abort flags, clear them. */
2121 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2122 fault_dscr = dscr;
2123 mem_ap_write_atomic_u32(armv7a->debug_ap,
2124 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2125 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2126 } else {
2127 fault_dscr = 0;
2128 }
2129
2130 /* Handle synchronous data faults. */
2131 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2132 if (final_retval == ERROR_OK) {
2133 /* Final return value will reflect cause of fault. */
2134 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2135 if (retval == ERROR_OK) {
2136 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2137 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2138 } else
2139 final_retval = retval;
2140 }
2141 /* Fault destroyed DFAR/DFSR; restore them. */
2142 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2143 if (retval != ERROR_OK)
2144 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2145 }
2146
2147 /* Handle asynchronous data faults. */
2148 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2149 if (final_retval == ERROR_OK)
2150 /* No other error has been recorded so far, so keep this one. */
2151 final_retval = ERROR_TARGET_DATA_ABORT;
2152 }
2153
2154 /* If the DCC is nonempty, clear it. */
2155 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2156 uint32_t dummy;
2157 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2158 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2159 if (final_retval == ERROR_OK)
2160 final_retval = retval;
2161 }
2162 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2163 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2164 if (final_retval == ERROR_OK)
2165 final_retval = retval;
2166 }
2167
2168 /* Done. */
2169 return final_retval;
2170 }
2171
2172 static int cortex_a_read_cpu_memory_slow(struct target *target,
2173 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2174 {
2175 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2176 * in *dscr; updated to new value. This is slow because it works for
2177 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2178 * the address is aligned, cortex_a_read_cpu_memory_fast should be
2179 * preferred.
2180 * Preconditions:
2181 * - Address is in R0.
2182 * - R0 is marked dirty.
2183 */
2184 struct armv7a_common *armv7a = target_to_armv7a(target);
2185 struct arm *arm = &armv7a->arm;
2186 int retval;
2187
2188 /* Mark register R1 as dirty, to use for transferring data. */
2189 arm_reg_current(arm, 1)->dirty = true;
2190
2191 /* Switch to non-blocking mode if not already in that mode. */
2192 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2193 if (retval != ERROR_OK)
2194 return retval;
2195
2196 /* Go through the objects. */
2197 while (count) {
2198 /* Issue a load of the appropriate size to R1. */
2199 uint32_t opcode, data;
2200 if (size == 1)
2201 opcode = ARMV4_5_LDRB_IP(1, 0);
2202 else if (size == 2)
2203 opcode = ARMV4_5_LDRH_IP(1, 0);
2204 else
2205 opcode = ARMV4_5_LDRW_IP(1, 0);
2206 retval = cortex_a_exec_opcode(target, opcode, dscr);
2207 if (retval != ERROR_OK)
2208 return retval;
2209
2210 /* Issue a write of R1 to DTRTX. */
2211 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2212 if (retval != ERROR_OK)
2213 return retval;
2214
2215 /* Check for faults and return early. */
2216 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2217 return ERROR_OK; /* A data fault is not considered a system failure. */
2218
2219 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2220 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2221 * must also check TXfull_l). Most of the time this will be free
2222 * because TXfull_l will be set immediately and cached in dscr. */
2223 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2224 DSCR_DTRTX_FULL_LATCHED, dscr);
2225 if (retval != ERROR_OK)
2226 return retval;
2227
2228 /* Read the value transferred to DTRTX into the buffer. */
2229 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2230 armv7a->debug_base + CPUDBG_DTRTX, &data);
2231 if (retval != ERROR_OK)
2232 return retval;
2233 if (size == 1)
2234 *buffer = (uint8_t) data;
2235 else if (size == 2)
2236 target_buffer_set_u16(target, buffer, (uint16_t) data);
2237 else
2238 target_buffer_set_u32(target, buffer, data);
2239
2240 /* Advance. */
2241 buffer += size;
2242 --count;
2243 }
2244
2245 return ERROR_OK;
2246 }
2247
2248 static int cortex_a_read_cpu_memory_fast(struct target *target,
2249 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2250 {
2251 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2252 * *dscr; updated to new value. This is fast but only works for word-sized
2253 * objects at aligned addresses.
2254 * Preconditions:
2255 * - Address is in R0 and must be a multiple of 4.
2256 * - R0 is marked dirty.
2257 */
2258 struct armv7a_common *armv7a = target_to_armv7a(target);
2259 uint32_t u32;
2260 int retval;
2261
2262 /* Switch to non-blocking mode if not already in that mode. */
2263 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2264 if (retval != ERROR_OK)
2265 return retval;
2266
2267 /* Issue the LDC instruction via a write to ITR. */
2268 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2269 if (retval != ERROR_OK)
2270 return retval;
2271
2272 count--;
2273
2274 if (count > 0) {
2275 /* Switch to fast mode if not already in that mode. */
2276 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2277 if (retval != ERROR_OK)
2278 return retval;
2279
2280 /* Latch LDC instruction. */
2281 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2282 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2283 if (retval != ERROR_OK)
2284 return retval;
2285
2286 /* Read the value transferred to DTRTX into the buffer. Due to fast
2287 * mode rules, this blocks until the instruction finishes executing and
2288 * then reissues the read instruction to read the next word from
2289 * memory. The last read of DTRTX in this call reads the second-to-last
2290 * word from memory and issues the read instruction for the last word.
2291 */
2292 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2293 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2294 if (retval != ERROR_OK)
2295 return retval;
2296
2297 /* Advance. */
2298 buffer += count * 4;
2299 }
2300
2301 /* Wait for last issued instruction to complete. */
2302 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2303 if (retval != ERROR_OK)
2304 return retval;
2305
2306 /* Switch to non-blocking mode if not already in that mode. */
2307 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2308 if (retval != ERROR_OK)
2309 return retval;
2310
2311 /* Check for faults and return early. */
2312 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2313 return ERROR_OK; /* A data fault is not considered a system failure. */
2314
2315 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2316 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2317 * check TXfull_l). Most of the time this will be free because TXfull_l
2318 * will be set immediately and cached in dscr. */
2319 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2320 DSCR_DTRTX_FULL_LATCHED, dscr);
2321 if (retval != ERROR_OK)
2322 return retval;
2323
2324 /* Read the value transferred to DTRTX into the buffer. This is the last
2325 * word. */
2326 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2327 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2328 if (retval != ERROR_OK)
2329 return retval;
2330 target_buffer_set_u32(target, buffer, u32);
2331
2332 return ERROR_OK;
2333 }
2334
2335 static int cortex_a_read_cpu_memory(struct target *target,
2336 uint32_t address, uint32_t size,
2337 uint32_t count, uint8_t *buffer)
2338 {
2339 /* Read memory through the CPU. */
2340 int retval, final_retval;
2341 struct armv7a_common *armv7a = target_to_armv7a(target);
2342 struct arm *arm = &armv7a->arm;
2343 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2344
2345 LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2346 address, size, count);
2347 if (target->state != TARGET_HALTED) {
2348 LOG_WARNING("target not halted");
2349 return ERROR_TARGET_NOT_HALTED;
2350 }
2351
2352 if (!count)
2353 return ERROR_OK;
2354
2355 /* Clear any abort. */
2356 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2357 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2358 if (retval != ERROR_OK)
2359 return retval;
2360
2361 /* Read DSCR */
2362 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2363 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2364 if (retval != ERROR_OK)
2365 return retval;
2366
2367 /* Switch to non-blocking mode if not already in that mode. */
2368 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2369 if (retval != ERROR_OK)
2370 goto out;
2371
2372 /* Mark R0 as dirty. */
2373 arm_reg_current(arm, 0)->dirty = true;
2374
2375 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2376 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2377 if (retval != ERROR_OK)
2378 goto out;
2379
2380 /* Get the memory address into R0. */
2381 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2382 armv7a->debug_base + CPUDBG_DTRRX, address);
2383 if (retval != ERROR_OK)
2384 goto out;
2385 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2386 if (retval != ERROR_OK)
2387 goto out;
2388
2389 if (size == 4 && (address % 4) == 0) {
2390 /* We are doing a word-aligned transfer, so use fast mode. */
2391 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2392 } else {
2393 /* Use slow path. */
2394 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2395 }
2396
2397 out:
2398 final_retval = retval;
2399
2400 /* Switch to non-blocking mode if not already in that mode. */
2401 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2402 if (final_retval == ERROR_OK)
2403 final_retval = retval;
2404
2405 /* Wait for last issued instruction to complete. */
2406 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2407 if (final_retval == ERROR_OK)
2408 final_retval = retval;
2409
2410 /* If there were any sticky abort flags, clear them. */
2411 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2412 fault_dscr = dscr;
2413 mem_ap_write_atomic_u32(armv7a->debug_ap,
2414 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2415 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2416 } else {
2417 fault_dscr = 0;
2418 }
2419
2420 /* Handle synchronous data faults. */
2421 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2422 if (final_retval == ERROR_OK) {
2423 /* Final return value will reflect cause of fault. */
2424 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2425 if (retval == ERROR_OK) {
2426 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2427 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2428 } else
2429 final_retval = retval;
2430 }
2431 /* Fault destroyed DFAR/DFSR; restore them. */
2432 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2433 if (retval != ERROR_OK)
2434 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2435 }
2436
2437 /* Handle asynchronous data faults. */
2438 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2439 if (final_retval == ERROR_OK)
2440 /* No other error has been recorded so far, so keep this one. */
2441 final_retval = ERROR_TARGET_DATA_ABORT;
2442 }
2443
2444 /* If the DCC is nonempty, clear it. */
2445 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2446 uint32_t dummy;
2447 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2448 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2449 if (final_retval == ERROR_OK)
2450 final_retval = retval;
2451 }
2452 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2453 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2454 if (final_retval == ERROR_OK)
2455 final_retval = retval;
2456 }
2457
2458 /* Done. */
2459 return final_retval;
2460 }
2461
2462
2463 /*
2464 * Cortex-A Memory access
2465 *
2466 * This is same Cortex-M3 but we must also use the correct
2467 * ap number for every access.
2468 */
2469
2470 static int cortex_a_read_phys_memory(struct target *target,
2471 target_addr_t address, uint32_t size,
2472 uint32_t count, uint8_t *buffer)
2473 {
2474 int retval;
2475
2476 if (!count || !buffer)
2477 return ERROR_COMMAND_SYNTAX_ERROR;
2478
2479 LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2480 address, size, count);
2481
2482 /* read memory through the CPU */
2483 cortex_a_prep_memaccess(target, 1);
2484 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2485 cortex_a_post_memaccess(target, 1);
2486
2487 return retval;
2488 }
2489
2490 static int cortex_a_read_memory(struct target *target, target_addr_t address,
2491 uint32_t size, uint32_t count, uint8_t *buffer)
2492 {
2493 int retval;
2494
2495 /* cortex_a handles unaligned memory access */
2496 LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2497 address, size, count);
2498
2499 cortex_a_prep_memaccess(target, 0);
2500 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2501 cortex_a_post_memaccess(target, 0);
2502
2503 return retval;
2504 }
2505
2506 static int cortex_a_write_phys_memory(struct target *target,
2507 target_addr_t address, uint32_t size,
2508 uint32_t count, const uint8_t *buffer)
2509 {
2510 int retval;
2511
2512 if (!count || !buffer)
2513 return ERROR_COMMAND_SYNTAX_ERROR;
2514
2515 LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2516 address, size, count);
2517
2518 /* write memory through the CPU */
2519 cortex_a_prep_memaccess(target, 1);
2520 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2521 cortex_a_post_memaccess(target, 1);
2522
2523 return retval;
2524 }
2525
2526 static int cortex_a_write_memory(struct target *target, target_addr_t address,
2527 uint32_t size, uint32_t count, const uint8_t *buffer)
2528 {
2529 int retval;
2530
2531 /* cortex_a handles unaligned memory access */
2532 LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2533 address, size, count);
2534
2535 /* memory writes bypass the caches, must flush before writing */
2536 armv7a_cache_auto_flush_on_write(target, address, size * count);
2537
2538 cortex_a_prep_memaccess(target, 0);
2539 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2540 cortex_a_post_memaccess(target, 0);
2541 return retval;
2542 }
2543
2544 static int cortex_a_read_buffer(struct target *target, target_addr_t address,
2545 uint32_t count, uint8_t *buffer)
2546 {
2547 uint32_t size;
2548
2549 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2550 * will have something to do with the size we leave to it. */
2551 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2552 if (address & size) {
2553 int retval = target_read_memory(target, address, size, 1, buffer);
2554 if (retval != ERROR_OK)
2555 return retval;
2556 address += size;
2557 count -= size;
2558 buffer += size;
2559 }
2560 }
2561
2562 /* Read the data with as large access size as possible. */
2563 for (; size > 0; size /= 2) {
2564 uint32_t aligned = count - count % size;
2565 if (aligned > 0) {
2566 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2567 if (retval != ERROR_OK)
2568 return retval;
2569 address += aligned;
2570 count -= aligned;
2571 buffer += aligned;
2572 }
2573 }
2574
2575 return ERROR_OK;
2576 }
2577
2578 static int cortex_a_write_buffer(struct target *target, target_addr_t address,
2579 uint32_t count, const uint8_t *buffer)
2580 {
2581 uint32_t size;
2582
2583 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2584 * will have something to do with the size we leave to it. */
2585 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2586 if (address & size) {
2587 int retval = target_write_memory(target, address, size, 1, buffer);
2588 if (retval != ERROR_OK)
2589 return retval;
2590 address += size;
2591 count -= size;
2592 buffer += size;
2593 }
2594 }
2595
2596 /* Write the data with as large access size as possible. */
2597 for (; size > 0; size /= 2) {
2598 uint32_t aligned = count - count % size;
2599 if (aligned > 0) {
2600 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2601 if (retval != ERROR_OK)
2602 return retval;
2603 address += aligned;
2604 count -= aligned;
2605 buffer += aligned;
2606 }
2607 }
2608
2609 return ERROR_OK;
2610 }
2611
2612 static int cortex_a_handle_target_request(void *priv)
2613 {
2614 struct target *target = priv;
2615 struct armv7a_common *armv7a = target_to_armv7a(target);
2616 int retval;
2617
2618 if (!target_was_examined(target))
2619 return ERROR_OK;
2620 if (!target->dbg_msg_enabled)
2621 return ERROR_OK;
2622
2623 if (target->state == TARGET_RUNNING) {
2624 uint32_t request;
2625 uint32_t dscr;
2626 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2627 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2628
2629 /* check if we have data */
2630 int64_t then = timeval_ms();
2631 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2632 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2633 armv7a->debug_base + CPUDBG_DTRTX, &request);
2634 if (retval == ERROR_OK) {
2635 target_request(target, request);
2636 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2637 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2638 }
2639 if (timeval_ms() > then + 1000) {
2640 LOG_ERROR("Timeout waiting for dtr tx full");
2641 return ERROR_FAIL;
2642 }
2643 }
2644 }
2645
2646 return ERROR_OK;
2647 }
2648
2649 /*
2650 * Cortex-A target information and configuration
2651 */
2652
2653 static int cortex_a_examine_first(struct target *target)
2654 {
2655 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2656 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2657 struct adiv5_dap *swjdp = armv7a->arm.dap;
2658
2659 int i;
2660 int retval = ERROR_OK;
2661 uint32_t didr, cpuid, dbg_osreg;
2662
2663 /* Search for the APB-AP - it is needed for access to debug registers */
2664 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2665 if (retval != ERROR_OK) {
2666 LOG_ERROR("Could not find APB-AP for debug access");
2667 return retval;
2668 }
2669
2670 retval = mem_ap_init(armv7a->debug_ap);
2671 if (retval != ERROR_OK) {
2672 LOG_ERROR("Could not initialize the APB-AP");
2673 return retval;
2674 }
2675
2676 armv7a->debug_ap->memaccess_tck = 80;
2677
2678 if (!target->dbgbase_set) {
2679 uint32_t dbgbase;
2680 /* Get ROM Table base */
2681 uint32_t apid;
2682 int32_t coreidx = target->coreid;
2683 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2684 target->cmd_name);
2685 retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
2686 if (retval != ERROR_OK)
2687 return retval;
2688 /* Lookup 0x15 -- Processor DAP */
2689 retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, 0x15,
2690 &armv7a->debug_base, &coreidx);
2691 if (retval != ERROR_OK) {
2692 LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2693 target->cmd_name);
2694 return retval;
2695 }
2696 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2697 target->coreid, armv7a->debug_base);
2698 } else
2699 armv7a->debug_base = target->dbgbase;
2700
2701 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2702 armv7a->debug_base + CPUDBG_DIDR, &didr);
2703 if (retval != ERROR_OK) {
2704 LOG_DEBUG("Examine %s failed", "DIDR");
2705 return retval;
2706 }
2707
2708 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2709 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2710 if (retval != ERROR_OK) {
2711 LOG_DEBUG("Examine %s failed", "CPUID");
2712 return retval;
2713 }
2714
2715 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2716 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2717
2718 cortex_a->didr = didr;
2719 cortex_a->cpuid = cpuid;
2720
2721 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2722 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
2723 if (retval != ERROR_OK)
2724 return retval;
2725 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
2726
2727 if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
2728 LOG_ERROR("target->coreid %" PRId32 " powered down!", target->coreid);
2729 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2730 return ERROR_TARGET_INIT_FAILED;
2731 }
2732
2733 if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
2734 LOG_DEBUG("target->coreid %" PRId32 " was reset!", target->coreid);
2735
2736 /* Read DBGOSLSR and check if OSLK is implemented */
2737 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2738 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2739 if (retval != ERROR_OK)
2740 return retval;
2741 LOG_DEBUG("target->coreid %" PRId32 " DBGOSLSR 0x%" PRIx32, target->coreid, dbg_osreg);
2742
2743 /* check if OS Lock is implemented */
2744 if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
2745 /* check if OS Lock is set */
2746 if (dbg_osreg & OSLSR_OSLK) {
2747 LOG_DEBUG("target->coreid %" PRId32 " OSLock set! Trying to unlock", target->coreid);
2748
2749 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2750 armv7a->debug_base + CPUDBG_OSLAR,
2751 0);
2752 if (retval == ERROR_OK)
2753 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2754 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2755
2756 /* if we fail to access the register or cannot reset the OSLK bit, bail out */
2757 if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
2758 LOG_ERROR("target->coreid %" PRId32 " OSLock sticky, core not powered?",
2759 target->coreid);
2760 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2761 return ERROR_TARGET_INIT_FAILED;
2762 }
2763 }
2764 }
2765
2766 armv7a->arm.core_type = ARM_MODE_MON;
2767
2768 /* Avoid recreating the registers cache */
2769 if (!target_was_examined(target)) {
2770 retval = cortex_a_dpm_setup(cortex_a, didr);
2771 if (retval != ERROR_OK)
2772 return retval;
2773 }
2774
2775 /* Setup Breakpoint Register Pairs */
2776 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
2777 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2778 cortex_a->brp_num_available = cortex_a->brp_num;
2779 free(cortex_a->brp_list);
2780 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
2781 /* cortex_a->brb_enabled = ????; */
2782 for (i = 0; i < cortex_a->brp_num; i++) {
2783 cortex_a->brp_list[i].used = 0;
2784 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
2785 cortex_a->brp_list[i].type = BRP_NORMAL;
2786 else
2787 cortex_a->brp_list[i].type = BRP_CONTEXT;
2788 cortex_a->brp_list[i].value = 0;
2789 cortex_a->brp_list[i].control = 0;
2790 cortex_a->brp_list[i].BRPn = i;
2791 }
2792
2793 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
2794
2795 /* select debug_ap as default */
2796 swjdp->apsel = armv7a->debug_ap->ap_num;
2797
2798 target_set_examined(target);
2799 return ERROR_OK;
2800 }
2801
2802 static int cortex_a_examine(struct target *target)
2803 {
2804 int retval = ERROR_OK;
2805
2806 /* Reestablish communication after target reset */
2807 retval = cortex_a_examine_first(target);
2808
2809 /* Configure core debug access */
2810 if (retval == ERROR_OK)
2811 retval = cortex_a_init_debug_access(target);
2812
2813 return retval;
2814 }
2815
2816 /*
2817 * Cortex-A target creation and initialization
2818 */
2819
2820 static int cortex_a_init_target(struct command_context *cmd_ctx,
2821 struct target *target)
2822 {
2823 /* examine_first() does a bunch of this */
2824 arm_semihosting_init(target);
2825 return ERROR_OK;
2826 }
2827
2828 static int cortex_a_init_arch_info(struct target *target,
2829 struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
2830 {
2831 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2832
2833 /* Setup struct cortex_a_common */
2834 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2835 armv7a->arm.dap = dap;
2836
2837 /* register arch-specific functions */
2838 armv7a->examine_debug_reason = NULL;
2839
2840 armv7a->post_debug_entry = cortex_a_post_debug_entry;
2841
2842 armv7a->pre_restore_context = NULL;
2843
2844 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
2845
2846
2847 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
2848
2849 /* REVISIT v7a setup should be in a v7a-specific routine */
2850 armv7a_init_arch_info(target, armv7a);
2851 target_register_timer_callback(cortex_a_handle_target_request, 1, 1, target);
2852
2853 return ERROR_OK;
2854 }
2855
2856 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
2857 {
2858 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
2859 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2860 struct adiv5_private_config *pc;
2861
2862 if (target->private_config == NULL)
2863 return ERROR_FAIL;
2864
2865 pc = (struct adiv5_private_config *)target->private_config;
2866
2867 cortex_a->armv7a_common.is_armv7r = false;
2868
2869 cortex_a->armv7a_common.arm.arm_vfp_version = ARM_VFP_V3;
2870
2871 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
2872 }
2873
2874 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
2875 {
2876 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
2877 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2878 struct adiv5_private_config *pc;
2879
2880 pc = (struct adiv5_private_config *)target->private_config;
2881 if (adiv5_verify_config(pc) != ERROR_OK)
2882 return ERROR_FAIL;
2883
2884 cortex_a->armv7a_common.is_armv7r = true;
2885
2886 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
2887 }
2888
2889 static void cortex_a_deinit_target(struct target *target)
2890 {
2891 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2892 struct arm_dpm *dpm = &cortex_a->armv7a_common.dpm;
2893
2894 free(cortex_a->brp_list);
2895 free(dpm->dbp);
2896 free(dpm->dwp);
2897 free(target->private_config);
2898 free(cortex_a);
2899 }
2900
2901 static int cortex_a_mmu(struct target *target, int *enabled)
2902 {
2903 struct armv7a_common *armv7a = target_to_armv7a(target);
2904
2905 if (target->state != TARGET_HALTED) {
2906 LOG_ERROR("%s: target not halted", __func__);
2907 return ERROR_TARGET_INVALID;
2908 }
2909
2910 if (armv7a->is_armv7r)
2911 *enabled = 0;
2912 else
2913 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
2914
2915 return ERROR_OK;
2916 }
2917
2918 static int cortex_a_virt2phys(struct target *target,
2919 target_addr_t virt, target_addr_t *phys)
2920 {
2921 int retval;
2922 int mmu_enabled = 0;
2923
2924 /*
2925 * If the MMU was not enabled at debug entry, there is no
2926 * way of knowing if there was ever a valid configuration
2927 * for it and thus it's not safe to enable it. In this case,
2928 * just return the virtual address as physical.
2929 */
2930 cortex_a_mmu(target, &mmu_enabled);
2931 if (!mmu_enabled) {
2932 *phys = virt;
2933 return ERROR_OK;
2934 }
2935
2936 /* mmu must be enable in order to get a correct translation */
2937 retval = cortex_a_mmu_modify(target, 1);
2938 if (retval != ERROR_OK)
2939 return retval;
2940 return armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
2941 (uint32_t *)phys, 1);
2942 }
2943
2944 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
2945 {
2946 struct target *target = get_current_target(CMD_CTX);
2947 struct armv7a_common *armv7a = target_to_armv7a(target);
2948
2949 return armv7a_handle_cache_info_command(CMD_CTX,
2950 &armv7a->armv7a_mmu.armv7a_cache);
2951 }
2952
2953
2954 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
2955 {
2956 struct target *target = get_current_target(CMD_CTX);
2957 if (!target_was_examined(target)) {
2958 LOG_ERROR("target not examined yet");
2959 return ERROR_FAIL;
2960 }
2961
2962 return cortex_a_init_debug_access(target);
2963 }
2964 COMMAND_HANDLER(cortex_a_handle_smp_off_command)
2965 {
2966 struct target *target = get_current_target(CMD_CTX);
2967 /* check target is an smp target */
2968 struct target_list *head;
2969 struct target *curr;
2970 head = target->head;
2971 target->smp = 0;
2972 if (head != (struct target_list *)NULL) {
2973 while (head != (struct target_list *)NULL) {
2974 curr = head->target;
2975 curr->smp = 0;
2976 head = head->next;
2977 }
2978 /* fixes the target display to the debugger */
2979 target->gdb_service->target = target;
2980 }
2981 return ERROR_OK;
2982 }
2983
2984 COMMAND_HANDLER(cortex_a_handle_smp_on_command)
2985 {
2986 struct target *target = get_current_target(CMD_CTX);
2987 struct target_list *head;
2988 struct target *curr;
2989 head = target->head;
2990 if (head != (struct target_list *)NULL) {
2991 target->smp = 1;
2992 while (head != (struct target_list *)NULL) {
2993 curr = head->target;
2994 curr->smp = 1;
2995 head = head->next;
2996 }
2997 }
2998 return ERROR_OK;
2999 }
3000
3001 COMMAND_HANDLER(cortex_a_handle_smp_gdb_command)
3002 {
3003 struct target *target = get_current_target(CMD_CTX);
3004 int retval = ERROR_OK;
3005 struct target_list *head;
3006 head = target->head;
3007 if (head != (struct target_list *)NULL) {
3008 if (CMD_ARGC == 1) {
3009 int coreid = 0;
3010 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
3011 if (ERROR_OK != retval)
3012 return retval;
3013 target->gdb_service->core[1] = coreid;
3014
3015 }
3016 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
3017 , target->gdb_service->core[1]);
3018 }
3019 return ERROR_OK;
3020 }
3021
3022 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3023 {
3024 struct target *target = get_current_target(CMD_CTX);
3025 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3026
3027 static const Jim_Nvp nvp_maskisr_modes[] = {
3028 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3029 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3030 { .name = NULL, .value = -1 },
3031 };
3032 const Jim_Nvp *n;
3033
3034 if (CMD_ARGC > 0) {
3035 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
3036 if (n->name == NULL) {
3037 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3038 return ERROR_COMMAND_SYNTAX_ERROR;
3039 }
3040
3041 cortex_a->isrmasking_mode = n->value;
3042 }
3043
3044 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3045 command_print(CMD_CTX, "cortex_a interrupt mask %s", n->name);
3046
3047 return ERROR_OK;
3048 }
3049
3050 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3051 {
3052 struct target *target = get_current_target(CMD_CTX);
3053 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3054
3055 static const Jim_Nvp nvp_dacrfixup_modes[] = {
3056 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3057 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3058 { .name = NULL, .value = -1 },
3059 };
3060 const Jim_Nvp *n;
3061
3062 if (CMD_ARGC > 0) {
3063 n = Jim_Nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
3064 if (n->name == NULL)
3065 return ERROR_COMMAND_SYNTAX_ERROR;
3066 cortex_a->dacrfixup_mode = n->value;
3067
3068 }
3069
3070 n = Jim_Nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3071 command_print(CMD_CTX, "cortex_a domain access control fixup %s", n->name);
3072
3073 return ERROR_OK;
3074 }
3075
3076 static const struct command_registration cortex_a_exec_command_handlers[] = {
3077 {
3078 .name = "cache_info",
3079 .handler = cortex_a_handle_cache_info_command,
3080 .mode = COMMAND_EXEC,
3081 .help = "display information about target caches",
3082 .usage = "",
3083 },
3084 {
3085 .name = "dbginit",
3086 .handler = cortex_a_handle_dbginit_command,
3087 .mode = COMMAND_EXEC,
3088 .help = "Initialize core debug",
3089 .usage = "",
3090 },
3091 { .name = "smp_off",
3092 .handler = cortex_a_handle_smp_off_command,
3093 .mode = COMMAND_EXEC,
3094 .help = "Stop smp handling",
3095 .usage = "",},
3096 {
3097 .name = "smp_on",
3098 .handler = cortex_a_handle_smp_on_command,
3099 .mode = COMMAND_EXEC,
3100 .help = "Restart smp handling",
3101 .usage = "",
3102 },
3103 {
3104 .name = "smp_gdb",
3105 .handler = cortex_a_handle_smp_gdb_command,
3106 .mode = COMMAND_EXEC,
3107 .help = "display/fix current core played to gdb",
3108 .usage = "",
3109 },
3110 {
3111 .name = "maskisr",
3112 .handler = handle_cortex_a_mask_interrupts_command,
3113 .mode = COMMAND_ANY,
3114 .help = "mask cortex_a interrupts",
3115 .usage = "['on'|'off']",
3116 },
3117 {
3118 .name = "dacrfixup",
3119 .handler = handle_cortex_a_dacrfixup_command,
3120 .mode = COMMAND_ANY,
3121 .help = "set domain access control (DACR) to all-manager "
3122 "on memory access",
3123 .usage = "['on'|'off']",
3124 },
3125 {
3126 .chain = armv7a_mmu_command_handlers,
3127 },
3128
3129 COMMAND_REGISTRATION_DONE
3130 };
3131 static const struct command_registration cortex_a_command_handlers[] = {
3132 {
3133 .chain = arm_command_handlers,
3134 },
3135 {
3136 .chain = armv7a_command_handlers,
3137 },
3138 {
3139 .name = "cortex_a",
3140 .mode = COMMAND_ANY,
3141 .help = "Cortex-A command group",
3142 .usage = "",
3143 .chain = cortex_a_exec_command_handlers,
3144 },
3145 COMMAND_REGISTRATION_DONE
3146 };
3147
3148 struct target_type cortexa_target = {
3149 .name = "cortex_a",
3150 .deprecated_name = "cortex_a8",
3151
3152 .poll = cortex_a_poll,
3153 .arch_state = armv7a_arch_state,
3154
3155 .halt = cortex_a_halt,
3156 .resume = cortex_a_resume,
3157 .step = cortex_a_step,
3158
3159 .assert_reset = cortex_a_assert_reset,
3160 .deassert_reset = cortex_a_deassert_reset,
3161
3162 /* REVISIT allow exporting VFP3 registers ... */
3163 .get_gdb_reg_list = arm_get_gdb_reg_list,
3164
3165 .read_memory = cortex_a_read_memory,
3166 .write_memory = cortex_a_write_memory,
3167
3168 .read_buffer = cortex_a_read_buffer,
3169 .write_buffer = cortex_a_write_buffer,
3170
3171 .checksum_memory = arm_checksum_memory,
3172 .blank_check_memory = arm_blank_check_memory,
3173
3174 .run_algorithm = armv4_5_run_algorithm,
3175
3176 .add_breakpoint = cortex_a_add_breakpoint,
3177 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3178 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3179 .remove_breakpoint = cortex_a_remove_breakpoint,
3180 .add_watchpoint = NULL,
3181 .remove_watchpoint = NULL,
3182
3183 .commands = cortex_a_command_handlers,
3184 .target_create = cortex_a_target_create,
3185 .target_jim_configure = adiv5_jim_configure,
3186 .init_target = cortex_a_init_target,
3187 .examine = cortex_a_examine,
3188 .deinit_target = cortex_a_deinit_target,
3189
3190 .read_phys_memory = cortex_a_read_phys_memory,
3191 .write_phys_memory = cortex_a_write_phys_memory,
3192 .mmu = cortex_a_mmu,
3193 .virt2phys = cortex_a_virt2phys,
3194 };
3195
3196 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3197 {
3198 .name = "dbginit",
3199 .handler = cortex_a_handle_dbginit_command,
3200 .mode = COMMAND_EXEC,
3201 .help = "Initialize core debug",
3202 .usage = "",
3203 },
3204 {
3205 .name = "maskisr",
3206 .handler = handle_cortex_a_mask_interrupts_command,
3207 .mode = COMMAND_EXEC,
3208 .help = "mask cortex_r4 interrupts",
3209 .usage = "['on'|'off']",
3210 },
3211
3212 COMMAND_REGISTRATION_DONE
3213 };
3214 static const struct command_registration cortex_r4_command_handlers[] = {
3215 {
3216 .chain = arm_command_handlers,
3217 },
3218 {
3219 .name = "cortex_r4",
3220 .mode = COMMAND_ANY,
3221 .help = "Cortex-R4 command group",
3222 .usage = "",
3223 .chain = cortex_r4_exec_command_handlers,
3224 },
3225 COMMAND_REGISTRATION_DONE
3226 };
3227
3228 struct target_type cortexr4_target = {
3229 .name = "cortex_r4",
3230
3231 .poll = cortex_a_poll,
3232 .arch_state = armv7a_arch_state,
3233
3234 .halt = cortex_a_halt,
3235 .resume = cortex_a_resume,
3236 .step = cortex_a_step,
3237
3238 .assert_reset = cortex_a_assert_reset,
3239 .deassert_reset = cortex_a_deassert_reset,
3240
3241 /* REVISIT allow exporting VFP3 registers ... */
3242 .get_gdb_reg_list = arm_get_gdb_reg_list,
3243
3244 .read_memory = cortex_a_read_phys_memory,
3245 .write_memory = cortex_a_write_phys_memory,
3246
3247 .checksum_memory = arm_checksum_memory,
3248 .blank_check_memory = arm_blank_check_memory,
3249
3250 .run_algorithm = armv4_5_run_algorithm,
3251
3252 .add_breakpoint = cortex_a_add_breakpoint,
3253 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3254 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3255 .remove_breakpoint = cortex_a_remove_breakpoint,
3256 .add_watchpoint = NULL,
3257 .remove_watchpoint = NULL,
3258
3259 .commands = cortex_r4_command_handlers,
3260 .target_create = cortex_r4_target_create,
3261 .target_jim_configure = adiv5_jim_configure,
3262 .init_target = cortex_a_init_target,
3263 .examine = cortex_a_examine,
3264 .deinit_target = cortex_a_deinit_target,
3265 };