jtag: linuxgpiod: drop extra parenthesis
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex-R4 support *
22 * *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 * *
39 * Cortex-A8(tm) TRM, ARM DDI 0344H *
40 * Cortex-A9(tm) TRM, ARM DDI 0407F *
41 * Cortex-A4(tm) TRM, ARM DDI 0363E *
42 * Cortex-A15(tm)TRM, ARM DDI 0438C *
43 * *
44 ***************************************************************************/
45
46 #ifdef HAVE_CONFIG_H
47 #include "config.h"
48 #endif
49
50 #include "breakpoints.h"
51 #include "cortex_a.h"
52 #include "register.h"
53 #include "armv7a_mmu.h"
54 #include "target_request.h"
55 #include "target_type.h"
56 #include "arm_opcodes.h"
57 #include "arm_semihosting.h"
58 #include "jtag/interface.h"
59 #include "transport/transport.h"
60 #include "smp.h"
61 #include <helper/time_support.h>
62
63 static int cortex_a_poll(struct target *target);
64 static int cortex_a_debug_entry(struct target *target);
65 static int cortex_a_restore_context(struct target *target, bool bpwp);
66 static int cortex_a_set_breakpoint(struct target *target,
67 struct breakpoint *breakpoint, uint8_t matchmode);
68 static int cortex_a_set_context_breakpoint(struct target *target,
69 struct breakpoint *breakpoint, uint8_t matchmode);
70 static int cortex_a_set_hybrid_breakpoint(struct target *target,
71 struct breakpoint *breakpoint);
72 static int cortex_a_unset_breakpoint(struct target *target,
73 struct breakpoint *breakpoint);
74 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
75 uint32_t value, uint32_t *dscr);
76 static int cortex_a_mmu(struct target *target, int *enabled);
77 static int cortex_a_mmu_modify(struct target *target, int enable);
78 static int cortex_a_virt2phys(struct target *target,
79 target_addr_t virt, target_addr_t *phys);
80 static int cortex_a_read_cpu_memory(struct target *target,
81 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
82
83
84 /* restore cp15_control_reg at resume */
85 static int cortex_a_restore_cp15_control_reg(struct target *target)
86 {
87 int retval = ERROR_OK;
88 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
89 struct armv7a_common *armv7a = target_to_armv7a(target);
90
91 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
92 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
93 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
94 retval = armv7a->arm.mcr(target, 15,
95 0, 0, /* op1, op2 */
96 1, 0, /* CRn, CRm */
97 cortex_a->cp15_control_reg);
98 }
99 return retval;
100 }
101
102 /*
103 * Set up ARM core for memory access.
104 * If !phys_access, switch to SVC mode and make sure MMU is on
105 * If phys_access, switch off mmu
106 */
107 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
108 {
109 struct armv7a_common *armv7a = target_to_armv7a(target);
110 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
111 int mmu_enabled = 0;
112
113 if (phys_access == 0) {
114 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
115 cortex_a_mmu(target, &mmu_enabled);
116 if (mmu_enabled)
117 cortex_a_mmu_modify(target, 1);
118 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
119 /* overwrite DACR to all-manager */
120 armv7a->arm.mcr(target, 15,
121 0, 0, 3, 0,
122 0xFFFFFFFF);
123 }
124 } else {
125 cortex_a_mmu(target, &mmu_enabled);
126 if (mmu_enabled)
127 cortex_a_mmu_modify(target, 0);
128 }
129 return ERROR_OK;
130 }
131
132 /*
133 * Restore ARM core after memory access.
134 * If !phys_access, switch to previous mode
135 * If phys_access, restore MMU setting
136 */
137 static int cortex_a_post_memaccess(struct target *target, int phys_access)
138 {
139 struct armv7a_common *armv7a = target_to_armv7a(target);
140 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
141
142 if (phys_access == 0) {
143 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
144 /* restore */
145 armv7a->arm.mcr(target, 15,
146 0, 0, 3, 0,
147 cortex_a->cp15_dacr_reg);
148 }
149 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
150 } else {
151 int mmu_enabled = 0;
152 cortex_a_mmu(target, &mmu_enabled);
153 if (mmu_enabled)
154 cortex_a_mmu_modify(target, 1);
155 }
156 return ERROR_OK;
157 }
158
159
160 /* modify cp15_control_reg in order to enable or disable mmu for :
161 * - virt2phys address conversion
162 * - read or write memory in phys or virt address */
163 static int cortex_a_mmu_modify(struct target *target, int enable)
164 {
165 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
166 struct armv7a_common *armv7a = target_to_armv7a(target);
167 int retval = ERROR_OK;
168 int need_write = 0;
169
170 if (enable) {
171 /* if mmu enabled at target stop and mmu not enable */
172 if (!(cortex_a->cp15_control_reg & 0x1U)) {
173 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
174 return ERROR_FAIL;
175 }
176 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
177 cortex_a->cp15_control_reg_curr |= 0x1U;
178 need_write = 1;
179 }
180 } else {
181 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
182 cortex_a->cp15_control_reg_curr &= ~0x1U;
183 need_write = 1;
184 }
185 }
186
187 if (need_write) {
188 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
189 enable ? "enable mmu" : "disable mmu",
190 cortex_a->cp15_control_reg_curr);
191
192 retval = armv7a->arm.mcr(target, 15,
193 0, 0, /* op1, op2 */
194 1, 0, /* CRn, CRm */
195 cortex_a->cp15_control_reg_curr);
196 }
197 return retval;
198 }
199
200 /*
201 * Cortex-A Basic debug access, very low level assumes state is saved
202 */
203 static int cortex_a_init_debug_access(struct target *target)
204 {
205 struct armv7a_common *armv7a = target_to_armv7a(target);
206 uint32_t dscr;
207 int retval;
208
209 /* lock memory-mapped access to debug registers to prevent
210 * software interference */
211 retval = mem_ap_write_u32(armv7a->debug_ap,
212 armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
213 if (retval != ERROR_OK)
214 return retval;
215
216 /* Disable cacheline fills and force cache write-through in debug state */
217 retval = mem_ap_write_u32(armv7a->debug_ap,
218 armv7a->debug_base + CPUDBG_DSCCR, 0);
219 if (retval != ERROR_OK)
220 return retval;
221
222 /* Disable TLB lookup and refill/eviction in debug state */
223 retval = mem_ap_write_u32(armv7a->debug_ap,
224 armv7a->debug_base + CPUDBG_DSMCR, 0);
225 if (retval != ERROR_OK)
226 return retval;
227
228 retval = dap_run(armv7a->debug_ap->dap);
229 if (retval != ERROR_OK)
230 return retval;
231
232 /* Enabling of instruction execution in debug mode is done in debug_entry code */
233
234 /* Resync breakpoint registers */
235
236 /* Enable halt for breakpoint, watchpoint and vector catch */
237 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
238 armv7a->debug_base + CPUDBG_DSCR, &dscr);
239 if (retval != ERROR_OK)
240 return retval;
241 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
242 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
243 if (retval != ERROR_OK)
244 return retval;
245
246 /* Since this is likely called from init or reset, update target state information*/
247 return cortex_a_poll(target);
248 }
249
250 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
251 {
252 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
253 * Writes final value of DSCR into *dscr. Pass force to force always
254 * reading DSCR at least once. */
255 struct armv7a_common *armv7a = target_to_armv7a(target);
256 int retval;
257
258 if (force) {
259 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
260 armv7a->debug_base + CPUDBG_DSCR, dscr);
261 if (retval != ERROR_OK) {
262 LOG_ERROR("Could not read DSCR register");
263 return retval;
264 }
265 }
266
267 retval = cortex_a_wait_dscr_bits(target, DSCR_INSTR_COMP, DSCR_INSTR_COMP, dscr);
268 if (retval != ERROR_OK)
269 LOG_ERROR("Error waiting for InstrCompl=1");
270 return retval;
271 }
272
273 /* To reduce needless round-trips, pass in a pointer to the current
274 * DSCR value. Initialize it to zero if you just need to know the
275 * value on return from this function; or DSCR_INSTR_COMP if you
276 * happen to know that no instruction is pending.
277 */
278 static int cortex_a_exec_opcode(struct target *target,
279 uint32_t opcode, uint32_t *dscr_p)
280 {
281 uint32_t dscr;
282 int retval;
283 struct armv7a_common *armv7a = target_to_armv7a(target);
284
285 dscr = dscr_p ? *dscr_p : 0;
286
287 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
288
289 /* Wait for InstrCompl bit to be set */
290 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
291 if (retval != ERROR_OK)
292 return retval;
293
294 retval = mem_ap_write_u32(armv7a->debug_ap,
295 armv7a->debug_base + CPUDBG_ITR, opcode);
296 if (retval != ERROR_OK)
297 return retval;
298
299 /* Wait for InstrCompl bit to be set */
300 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
301 if (retval != ERROR_OK) {
302 LOG_ERROR("Error waiting for cortex_a_exec_opcode");
303 return retval;
304 }
305
306 if (dscr_p)
307 *dscr_p = dscr;
308
309 return retval;
310 }
311
312 /* Write to memory mapped registers directly with no cache or mmu handling */
313 static int cortex_a_dap_write_memap_register_u32(struct target *target,
314 uint32_t address,
315 uint32_t value)
316 {
317 int retval;
318 struct armv7a_common *armv7a = target_to_armv7a(target);
319
320 retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
321
322 return retval;
323 }
324
325 /*
326 * Cortex-A implementation of Debug Programmer's Model
327 *
328 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
329 * so there's no need to poll for it before executing an instruction.
330 *
331 * NOTE that in several of these cases the "stall" mode might be useful.
332 * It'd let us queue a few operations together... prepare/finish might
333 * be the places to enable/disable that mode.
334 */
335
336 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
337 {
338 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
339 }
340
341 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
342 {
343 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
344 return mem_ap_write_u32(a->armv7a_common.debug_ap,
345 a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
346 }
347
348 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
349 uint32_t *dscr_p)
350 {
351 uint32_t dscr = DSCR_INSTR_COMP;
352 int retval;
353
354 if (dscr_p)
355 dscr = *dscr_p;
356
357 /* Wait for DTRRXfull */
358 retval = cortex_a_wait_dscr_bits(a->armv7a_common.arm.target,
359 DSCR_DTR_TX_FULL, DSCR_DTR_TX_FULL, &dscr);
360 if (retval != ERROR_OK) {
361 LOG_ERROR("Error waiting for read dcc");
362 return retval;
363 }
364
365 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
366 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
367 if (retval != ERROR_OK)
368 return retval;
369 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
370
371 if (dscr_p)
372 *dscr_p = dscr;
373
374 return retval;
375 }
376
377 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
378 {
379 struct cortex_a_common *a = dpm_to_a(dpm);
380 uint32_t dscr;
381 int retval;
382
383 /* set up invariant: INSTR_COMP is set after ever DPM operation */
384 retval = cortex_a_wait_instrcmpl(dpm->arm->target, &dscr, true);
385 if (retval != ERROR_OK) {
386 LOG_ERROR("Error waiting for dpm prepare");
387 return retval;
388 }
389
390 /* this "should never happen" ... */
391 if (dscr & DSCR_DTR_RX_FULL) {
392 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
393 /* Clear DCCRX */
394 retval = cortex_a_exec_opcode(
395 a->armv7a_common.arm.target,
396 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
397 &dscr);
398 if (retval != ERROR_OK)
399 return retval;
400 }
401
402 return retval;
403 }
404
405 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
406 {
407 /* REVISIT what could be done here? */
408 return ERROR_OK;
409 }
410
411 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
412 uint32_t opcode, uint32_t data)
413 {
414 struct cortex_a_common *a = dpm_to_a(dpm);
415 int retval;
416 uint32_t dscr = DSCR_INSTR_COMP;
417
418 retval = cortex_a_write_dcc(a, data);
419 if (retval != ERROR_OK)
420 return retval;
421
422 return cortex_a_exec_opcode(
423 a->armv7a_common.arm.target,
424 opcode,
425 &dscr);
426 }
427
428 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
429 uint32_t opcode, uint32_t data)
430 {
431 struct cortex_a_common *a = dpm_to_a(dpm);
432 uint32_t dscr = DSCR_INSTR_COMP;
433 int retval;
434
435 retval = cortex_a_write_dcc(a, data);
436 if (retval != ERROR_OK)
437 return retval;
438
439 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
440 retval = cortex_a_exec_opcode(
441 a->armv7a_common.arm.target,
442 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
443 &dscr);
444 if (retval != ERROR_OK)
445 return retval;
446
447 /* then the opcode, taking data from R0 */
448 retval = cortex_a_exec_opcode(
449 a->armv7a_common.arm.target,
450 opcode,
451 &dscr);
452
453 return retval;
454 }
455
456 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
457 {
458 struct target *target = dpm->arm->target;
459 uint32_t dscr = DSCR_INSTR_COMP;
460
461 /* "Prefetch flush" after modifying execution status in CPSR */
462 return cortex_a_exec_opcode(target,
463 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
464 &dscr);
465 }
466
467 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
468 uint32_t opcode, uint32_t *data)
469 {
470 struct cortex_a_common *a = dpm_to_a(dpm);
471 int retval;
472 uint32_t dscr = DSCR_INSTR_COMP;
473
474 /* the opcode, writing data to DCC */
475 retval = cortex_a_exec_opcode(
476 a->armv7a_common.arm.target,
477 opcode,
478 &dscr);
479 if (retval != ERROR_OK)
480 return retval;
481
482 return cortex_a_read_dcc(a, data, &dscr);
483 }
484
485
486 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
487 uint32_t opcode, uint32_t *data)
488 {
489 struct cortex_a_common *a = dpm_to_a(dpm);
490 uint32_t dscr = DSCR_INSTR_COMP;
491 int retval;
492
493 /* the opcode, writing data to R0 */
494 retval = cortex_a_exec_opcode(
495 a->armv7a_common.arm.target,
496 opcode,
497 &dscr);
498 if (retval != ERROR_OK)
499 return retval;
500
501 /* write R0 to DCC */
502 retval = cortex_a_exec_opcode(
503 a->armv7a_common.arm.target,
504 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
505 &dscr);
506 if (retval != ERROR_OK)
507 return retval;
508
509 return cortex_a_read_dcc(a, data, &dscr);
510 }
511
512 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
513 uint32_t addr, uint32_t control)
514 {
515 struct cortex_a_common *a = dpm_to_a(dpm);
516 uint32_t vr = a->armv7a_common.debug_base;
517 uint32_t cr = a->armv7a_common.debug_base;
518 int retval;
519
520 switch (index_t) {
521 case 0 ... 15: /* breakpoints */
522 vr += CPUDBG_BVR_BASE;
523 cr += CPUDBG_BCR_BASE;
524 break;
525 case 16 ... 31: /* watchpoints */
526 vr += CPUDBG_WVR_BASE;
527 cr += CPUDBG_WCR_BASE;
528 index_t -= 16;
529 break;
530 default:
531 return ERROR_FAIL;
532 }
533 vr += 4 * index_t;
534 cr += 4 * index_t;
535
536 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
537 (unsigned) vr, (unsigned) cr);
538
539 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
540 vr, addr);
541 if (retval != ERROR_OK)
542 return retval;
543 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
544 cr, control);
545 return retval;
546 }
547
548 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
549 {
550 struct cortex_a_common *a = dpm_to_a(dpm);
551 uint32_t cr;
552
553 switch (index_t) {
554 case 0 ... 15:
555 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
556 break;
557 case 16 ... 31:
558 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
559 index_t -= 16;
560 break;
561 default:
562 return ERROR_FAIL;
563 }
564 cr += 4 * index_t;
565
566 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
567
568 /* clear control register */
569 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
570 }
571
572 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
573 {
574 struct arm_dpm *dpm = &a->armv7a_common.dpm;
575 int retval;
576
577 dpm->arm = &a->armv7a_common.arm;
578 dpm->didr = didr;
579
580 dpm->prepare = cortex_a_dpm_prepare;
581 dpm->finish = cortex_a_dpm_finish;
582
583 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
584 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
585 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
586
587 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
588 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
589
590 dpm->bpwp_enable = cortex_a_bpwp_enable;
591 dpm->bpwp_disable = cortex_a_bpwp_disable;
592
593 retval = arm_dpm_setup(dpm);
594 if (retval == ERROR_OK)
595 retval = arm_dpm_initialize(dpm);
596
597 return retval;
598 }
599 static struct target *get_cortex_a(struct target *target, int32_t coreid)
600 {
601 struct target_list *head;
602 struct target *curr;
603
604 head = target->head;
605 while (head != (struct target_list *)NULL) {
606 curr = head->target;
607 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
608 return curr;
609 head = head->next;
610 }
611 return target;
612 }
613 static int cortex_a_halt(struct target *target);
614
615 static int cortex_a_halt_smp(struct target *target)
616 {
617 int retval = 0;
618 struct target_list *head;
619 struct target *curr;
620 head = target->head;
621 while (head != (struct target_list *)NULL) {
622 curr = head->target;
623 if ((curr != target) && (curr->state != TARGET_HALTED)
624 && target_was_examined(curr))
625 retval += cortex_a_halt(curr);
626 head = head->next;
627 }
628 return retval;
629 }
630
631 static int update_halt_gdb(struct target *target)
632 {
633 struct target *gdb_target = NULL;
634 struct target_list *head;
635 struct target *curr;
636 int retval = 0;
637
638 if (target->gdb_service && target->gdb_service->core[0] == -1) {
639 target->gdb_service->target = target;
640 target->gdb_service->core[0] = target->coreid;
641 retval += cortex_a_halt_smp(target);
642 }
643
644 if (target->gdb_service)
645 gdb_target = target->gdb_service->target;
646
647 foreach_smp_target(head, target->head) {
648 curr = head->target;
649 /* skip calling context */
650 if (curr == target)
651 continue;
652 if (!target_was_examined(curr))
653 continue;
654 /* skip targets that were already halted */
655 if (curr->state == TARGET_HALTED)
656 continue;
657 /* Skip gdb_target; it alerts GDB so has to be polled as last one */
658 if (curr == gdb_target)
659 continue;
660
661 /* avoid recursion in cortex_a_poll() */
662 curr->smp = 0;
663 cortex_a_poll(curr);
664 curr->smp = 1;
665 }
666
667 /* after all targets were updated, poll the gdb serving target */
668 if (gdb_target != NULL && gdb_target != target)
669 cortex_a_poll(gdb_target);
670 return retval;
671 }
672
673 /*
674 * Cortex-A Run control
675 */
676
677 static int cortex_a_poll(struct target *target)
678 {
679 int retval = ERROR_OK;
680 uint32_t dscr;
681 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
682 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
683 enum target_state prev_target_state = target->state;
684 /* toggle to another core is done by gdb as follow */
685 /* maint packet J core_id */
686 /* continue */
687 /* the next polling trigger an halt event sent to gdb */
688 if ((target->state == TARGET_HALTED) && (target->smp) &&
689 (target->gdb_service) &&
690 (target->gdb_service->target == NULL)) {
691 target->gdb_service->target =
692 get_cortex_a(target, target->gdb_service->core[1]);
693 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
694 return retval;
695 }
696 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
697 armv7a->debug_base + CPUDBG_DSCR, &dscr);
698 if (retval != ERROR_OK)
699 return retval;
700 cortex_a->cpudbg_dscr = dscr;
701
702 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
703 if (prev_target_state != TARGET_HALTED) {
704 /* We have a halting debug event */
705 LOG_DEBUG("Target halted");
706 target->state = TARGET_HALTED;
707
708 retval = cortex_a_debug_entry(target);
709 if (retval != ERROR_OK)
710 return retval;
711
712 if (target->smp) {
713 retval = update_halt_gdb(target);
714 if (retval != ERROR_OK)
715 return retval;
716 }
717
718 if (prev_target_state == TARGET_DEBUG_RUNNING) {
719 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
720 } else { /* prev_target_state is RUNNING, UNKNOWN or RESET */
721 if (arm_semihosting(target, &retval) != 0)
722 return retval;
723
724 target_call_event_callbacks(target,
725 TARGET_EVENT_HALTED);
726 }
727 }
728 } else
729 target->state = TARGET_RUNNING;
730
731 return retval;
732 }
733
734 static int cortex_a_halt(struct target *target)
735 {
736 int retval;
737 uint32_t dscr;
738 struct armv7a_common *armv7a = target_to_armv7a(target);
739
740 /*
741 * Tell the core to be halted by writing DRCR with 0x1
742 * and then wait for the core to be halted.
743 */
744 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
745 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
746 if (retval != ERROR_OK)
747 return retval;
748
749 dscr = 0; /* force read of dscr */
750 retval = cortex_a_wait_dscr_bits(target, DSCR_CORE_HALTED,
751 DSCR_CORE_HALTED, &dscr);
752 if (retval != ERROR_OK) {
753 LOG_ERROR("Error waiting for halt");
754 return retval;
755 }
756
757 target->debug_reason = DBG_REASON_DBGRQ;
758
759 return ERROR_OK;
760 }
761
762 static int cortex_a_internal_restore(struct target *target, int current,
763 target_addr_t *address, int handle_breakpoints, int debug_execution)
764 {
765 struct armv7a_common *armv7a = target_to_armv7a(target);
766 struct arm *arm = &armv7a->arm;
767 int retval;
768 uint32_t resume_pc;
769
770 if (!debug_execution)
771 target_free_all_working_areas(target);
772
773 #if 0
774 if (debug_execution) {
775 /* Disable interrupts */
776 /* We disable interrupts in the PRIMASK register instead of
777 * masking with C_MASKINTS,
778 * This is probably the same issue as Cortex-M3 Errata 377493:
779 * C_MASKINTS in parallel with disabled interrupts can cause
780 * local faults to not be taken. */
781 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
782 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = true;
783 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = true;
784
785 /* Make sure we are in Thumb mode */
786 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
787 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
788 32) | (1 << 24));
789 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = true;
790 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = true;
791 }
792 #endif
793
794 /* current = 1: continue on current pc, otherwise continue at <address> */
795 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
796 if (!current)
797 resume_pc = *address;
798 else
799 *address = resume_pc;
800
801 /* Make sure that the Armv7 gdb thumb fixups does not
802 * kill the return address
803 */
804 switch (arm->core_state) {
805 case ARM_STATE_ARM:
806 resume_pc &= 0xFFFFFFFC;
807 break;
808 case ARM_STATE_THUMB:
809 case ARM_STATE_THUMB_EE:
810 /* When the return address is loaded into PC
811 * bit 0 must be 1 to stay in Thumb state
812 */
813 resume_pc |= 0x1;
814 break;
815 case ARM_STATE_JAZELLE:
816 LOG_ERROR("How do I resume into Jazelle state??");
817 return ERROR_FAIL;
818 case ARM_STATE_AARCH64:
819 LOG_ERROR("Shoudn't be in AARCH64 state");
820 return ERROR_FAIL;
821 }
822 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
823 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
824 arm->pc->dirty = true;
825 arm->pc->valid = true;
826
827 /* restore dpm_mode at system halt */
828 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
829 /* called it now before restoring context because it uses cpu
830 * register r0 for restoring cp15 control register */
831 retval = cortex_a_restore_cp15_control_reg(target);
832 if (retval != ERROR_OK)
833 return retval;
834 retval = cortex_a_restore_context(target, handle_breakpoints);
835 if (retval != ERROR_OK)
836 return retval;
837 target->debug_reason = DBG_REASON_NOTHALTED;
838 target->state = TARGET_RUNNING;
839
840 /* registers are now invalid */
841 register_cache_invalidate(arm->core_cache);
842
843 #if 0
844 /* the front-end may request us not to handle breakpoints */
845 if (handle_breakpoints) {
846 /* Single step past breakpoint at current address */
847 breakpoint = breakpoint_find(target, resume_pc);
848 if (breakpoint) {
849 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
850 cortex_m3_unset_breakpoint(target, breakpoint);
851 cortex_m3_single_step_core(target);
852 cortex_m3_set_breakpoint(target, breakpoint);
853 }
854 }
855
856 #endif
857 return retval;
858 }
859
860 static int cortex_a_internal_restart(struct target *target)
861 {
862 struct armv7a_common *armv7a = target_to_armv7a(target);
863 struct arm *arm = &armv7a->arm;
864 int retval;
865 uint32_t dscr;
866 /*
867 * * Restart core and wait for it to be started. Clear ITRen and sticky
868 * * exception flags: see ARMv7 ARM, C5.9.
869 *
870 * REVISIT: for single stepping, we probably want to
871 * disable IRQs by default, with optional override...
872 */
873
874 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
875 armv7a->debug_base + CPUDBG_DSCR, &dscr);
876 if (retval != ERROR_OK)
877 return retval;
878
879 if ((dscr & DSCR_INSTR_COMP) == 0)
880 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
881
882 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
883 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
884 if (retval != ERROR_OK)
885 return retval;
886
887 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
888 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
889 DRCR_CLEAR_EXCEPTIONS);
890 if (retval != ERROR_OK)
891 return retval;
892
893 dscr = 0; /* force read of dscr */
894 retval = cortex_a_wait_dscr_bits(target, DSCR_CORE_RESTARTED,
895 DSCR_CORE_RESTARTED, &dscr);
896 if (retval != ERROR_OK) {
897 LOG_ERROR("Error waiting for resume");
898 return retval;
899 }
900
901 target->debug_reason = DBG_REASON_NOTHALTED;
902 target->state = TARGET_RUNNING;
903
904 /* registers are now invalid */
905 register_cache_invalidate(arm->core_cache);
906
907 return ERROR_OK;
908 }
909
910 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
911 {
912 int retval = 0;
913 struct target_list *head;
914 struct target *curr;
915 target_addr_t address;
916 head = target->head;
917 while (head != (struct target_list *)NULL) {
918 curr = head->target;
919 if ((curr != target) && (curr->state != TARGET_RUNNING)
920 && target_was_examined(curr)) {
921 /* resume current address , not in step mode */
922 retval += cortex_a_internal_restore(curr, 1, &address,
923 handle_breakpoints, 0);
924 retval += cortex_a_internal_restart(curr);
925 }
926 head = head->next;
927
928 }
929 return retval;
930 }
931
932 static int cortex_a_resume(struct target *target, int current,
933 target_addr_t address, int handle_breakpoints, int debug_execution)
934 {
935 int retval = 0;
936 /* dummy resume for smp toggle in order to reduce gdb impact */
937 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
938 /* simulate a start and halt of target */
939 target->gdb_service->target = NULL;
940 target->gdb_service->core[0] = target->gdb_service->core[1];
941 /* fake resume at next poll we play the target core[1], see poll*/
942 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
943 return 0;
944 }
945 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
946 if (target->smp) {
947 target->gdb_service->core[0] = -1;
948 retval = cortex_a_restore_smp(target, handle_breakpoints);
949 if (retval != ERROR_OK)
950 return retval;
951 }
952 cortex_a_internal_restart(target);
953
954 if (!debug_execution) {
955 target->state = TARGET_RUNNING;
956 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
957 LOG_DEBUG("target resumed at " TARGET_ADDR_FMT, address);
958 } else {
959 target->state = TARGET_DEBUG_RUNNING;
960 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
961 LOG_DEBUG("target debug resumed at " TARGET_ADDR_FMT, address);
962 }
963
964 return ERROR_OK;
965 }
966
967 static int cortex_a_debug_entry(struct target *target)
968 {
969 uint32_t dscr;
970 int retval = ERROR_OK;
971 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
972 struct armv7a_common *armv7a = target_to_armv7a(target);
973 struct arm *arm = &armv7a->arm;
974
975 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
976
977 /* REVISIT surely we should not re-read DSCR !! */
978 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
979 armv7a->debug_base + CPUDBG_DSCR, &dscr);
980 if (retval != ERROR_OK)
981 return retval;
982
983 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
984 * imprecise data aborts get discarded by issuing a Data
985 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
986 */
987
988 /* Enable the ITR execution once we are in debug mode */
989 dscr |= DSCR_ITR_EN;
990 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
991 armv7a->debug_base + CPUDBG_DSCR, dscr);
992 if (retval != ERROR_OK)
993 return retval;
994
995 /* Examine debug reason */
996 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
997
998 /* save address of instruction that triggered the watchpoint? */
999 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1000 uint32_t wfar;
1001
1002 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1003 armv7a->debug_base + CPUDBG_WFAR,
1004 &wfar);
1005 if (retval != ERROR_OK)
1006 return retval;
1007 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1008 }
1009
1010 /* First load register accessible through core debug port */
1011 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1012 if (retval != ERROR_OK)
1013 return retval;
1014
1015 if (arm->spsr) {
1016 /* read SPSR */
1017 retval = arm_dpm_read_reg(&armv7a->dpm, arm->spsr, 17);
1018 if (retval != ERROR_OK)
1019 return retval;
1020 }
1021
1022 #if 0
1023 /* TODO, Move this */
1024 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1025 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1026 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1027
1028 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1029 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1030
1031 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1032 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1033 #endif
1034
1035 /* Are we in an exception handler */
1036 /* armv4_5->exception_number = 0; */
1037 if (armv7a->post_debug_entry) {
1038 retval = armv7a->post_debug_entry(target);
1039 if (retval != ERROR_OK)
1040 return retval;
1041 }
1042
1043 return retval;
1044 }
1045
1046 static int cortex_a_post_debug_entry(struct target *target)
1047 {
1048 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1049 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1050 int retval;
1051
1052 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1053 retval = armv7a->arm.mrc(target, 15,
1054 0, 0, /* op1, op2 */
1055 1, 0, /* CRn, CRm */
1056 &cortex_a->cp15_control_reg);
1057 if (retval != ERROR_OK)
1058 return retval;
1059 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1060 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1061
1062 if (!armv7a->is_armv7r)
1063 armv7a_read_ttbcr(target);
1064
1065 if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1066 armv7a_identify_cache(target);
1067
1068 if (armv7a->is_armv7r) {
1069 armv7a->armv7a_mmu.mmu_enabled = 0;
1070 } else {
1071 armv7a->armv7a_mmu.mmu_enabled =
1072 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1073 }
1074 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1075 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1076 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1077 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1078 cortex_a->curr_mode = armv7a->arm.core_mode;
1079
1080 /* switch to SVC mode to read DACR */
1081 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1082 armv7a->arm.mrc(target, 15,
1083 0, 0, 3, 0,
1084 &cortex_a->cp15_dacr_reg);
1085
1086 LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1087 cortex_a->cp15_dacr_reg);
1088
1089 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1090 return ERROR_OK;
1091 }
1092
1093 int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1094 {
1095 struct armv7a_common *armv7a = target_to_armv7a(target);
1096 uint32_t dscr;
1097
1098 /* Read DSCR */
1099 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1100 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1101 if (ERROR_OK != retval)
1102 return retval;
1103
1104 /* clear bitfield */
1105 dscr &= ~bit_mask;
1106 /* put new value */
1107 dscr |= value & bit_mask;
1108
1109 /* write new DSCR */
1110 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1111 armv7a->debug_base + CPUDBG_DSCR, dscr);
1112 return retval;
1113 }
1114
1115 static int cortex_a_step(struct target *target, int current, target_addr_t address,
1116 int handle_breakpoints)
1117 {
1118 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1119 struct armv7a_common *armv7a = target_to_armv7a(target);
1120 struct arm *arm = &armv7a->arm;
1121 struct breakpoint *breakpoint = NULL;
1122 struct breakpoint stepbreakpoint;
1123 struct reg *r;
1124 int retval;
1125
1126 if (target->state != TARGET_HALTED) {
1127 LOG_WARNING("target not halted");
1128 return ERROR_TARGET_NOT_HALTED;
1129 }
1130
1131 /* current = 1: continue on current pc, otherwise continue at <address> */
1132 r = arm->pc;
1133 if (!current)
1134 buf_set_u32(r->value, 0, 32, address);
1135 else
1136 address = buf_get_u32(r->value, 0, 32);
1137
1138 /* The front-end may request us not to handle breakpoints.
1139 * But since Cortex-A uses breakpoint for single step,
1140 * we MUST handle breakpoints.
1141 */
1142 handle_breakpoints = 1;
1143 if (handle_breakpoints) {
1144 breakpoint = breakpoint_find(target, address);
1145 if (breakpoint)
1146 cortex_a_unset_breakpoint(target, breakpoint);
1147 }
1148
1149 /* Setup single step breakpoint */
1150 stepbreakpoint.address = address;
1151 stepbreakpoint.asid = 0;
1152 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1153 ? 2 : 4;
1154 stepbreakpoint.type = BKPT_HARD;
1155 stepbreakpoint.set = 0;
1156
1157 /* Disable interrupts during single step if requested */
1158 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1159 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1160 if (ERROR_OK != retval)
1161 return retval;
1162 }
1163
1164 /* Break on IVA mismatch */
1165 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1166
1167 target->debug_reason = DBG_REASON_SINGLESTEP;
1168
1169 retval = cortex_a_resume(target, 1, address, 0, 0);
1170 if (retval != ERROR_OK)
1171 return retval;
1172
1173 int64_t then = timeval_ms();
1174 while (target->state != TARGET_HALTED) {
1175 retval = cortex_a_poll(target);
1176 if (retval != ERROR_OK)
1177 return retval;
1178 if (target->state == TARGET_HALTED)
1179 break;
1180 if (timeval_ms() > then + 1000) {
1181 LOG_ERROR("timeout waiting for target halt");
1182 return ERROR_FAIL;
1183 }
1184 }
1185
1186 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1187
1188 /* Re-enable interrupts if they were disabled */
1189 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1190 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1191 if (ERROR_OK != retval)
1192 return retval;
1193 }
1194
1195
1196 target->debug_reason = DBG_REASON_BREAKPOINT;
1197
1198 if (breakpoint)
1199 cortex_a_set_breakpoint(target, breakpoint, 0);
1200
1201 if (target->state != TARGET_HALTED)
1202 LOG_DEBUG("target stepped");
1203
1204 return ERROR_OK;
1205 }
1206
1207 static int cortex_a_restore_context(struct target *target, bool bpwp)
1208 {
1209 struct armv7a_common *armv7a = target_to_armv7a(target);
1210
1211 LOG_DEBUG(" ");
1212
1213 if (armv7a->pre_restore_context)
1214 armv7a->pre_restore_context(target);
1215
1216 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1217 }
1218
1219 /*
1220 * Cortex-A Breakpoint and watchpoint functions
1221 */
1222
1223 /* Setup hardware Breakpoint Register Pair */
1224 static int cortex_a_set_breakpoint(struct target *target,
1225 struct breakpoint *breakpoint, uint8_t matchmode)
1226 {
1227 int retval;
1228 int brp_i = 0;
1229 uint32_t control;
1230 uint8_t byte_addr_select = 0x0F;
1231 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1232 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1233 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1234
1235 if (breakpoint->set) {
1236 LOG_WARNING("breakpoint already set");
1237 return ERROR_OK;
1238 }
1239
1240 if (breakpoint->type == BKPT_HARD) {
1241 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1242 brp_i++;
1243 if (brp_i >= cortex_a->brp_num) {
1244 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1245 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1246 }
1247 breakpoint->set = brp_i + 1;
1248 if (breakpoint->length == 2)
1249 byte_addr_select = (3 << (breakpoint->address & 0x02));
1250 control = ((matchmode & 0x7) << 20)
1251 | (byte_addr_select << 5)
1252 | (3 << 1) | 1;
1253 brp_list[brp_i].used = 1;
1254 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1255 brp_list[brp_i].control = control;
1256 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1257 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1258 brp_list[brp_i].value);
1259 if (retval != ERROR_OK)
1260 return retval;
1261 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1262 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1263 brp_list[brp_i].control);
1264 if (retval != ERROR_OK)
1265 return retval;
1266 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1267 brp_list[brp_i].control,
1268 brp_list[brp_i].value);
1269 } else if (breakpoint->type == BKPT_SOFT) {
1270 uint8_t code[4];
1271 /* length == 2: Thumb breakpoint */
1272 if (breakpoint->length == 2)
1273 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1274 else
1275 /* length == 3: Thumb-2 breakpoint, actual encoding is
1276 * a regular Thumb BKPT instruction but we replace a
1277 * 32bit Thumb-2 instruction, so fix-up the breakpoint
1278 * length
1279 */
1280 if (breakpoint->length == 3) {
1281 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1282 breakpoint->length = 4;
1283 } else
1284 /* length == 4, normal ARM breakpoint */
1285 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1286
1287 retval = target_read_memory(target,
1288 breakpoint->address & 0xFFFFFFFE,
1289 breakpoint->length, 1,
1290 breakpoint->orig_instr);
1291 if (retval != ERROR_OK)
1292 return retval;
1293
1294 /* make sure data cache is cleaned & invalidated down to PoC */
1295 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1296 armv7a_cache_flush_virt(target, breakpoint->address,
1297 breakpoint->length);
1298 }
1299
1300 retval = target_write_memory(target,
1301 breakpoint->address & 0xFFFFFFFE,
1302 breakpoint->length, 1, code);
1303 if (retval != ERROR_OK)
1304 return retval;
1305
1306 /* update i-cache at breakpoint location */
1307 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1308 breakpoint->length);
1309 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1310 breakpoint->length);
1311
1312 breakpoint->set = 0x11; /* Any nice value but 0 */
1313 }
1314
1315 return ERROR_OK;
1316 }
1317
1318 static int cortex_a_set_context_breakpoint(struct target *target,
1319 struct breakpoint *breakpoint, uint8_t matchmode)
1320 {
1321 int retval = ERROR_FAIL;
1322 int brp_i = 0;
1323 uint32_t control;
1324 uint8_t byte_addr_select = 0x0F;
1325 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1326 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1327 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1328
1329 if (breakpoint->set) {
1330 LOG_WARNING("breakpoint already set");
1331 return retval;
1332 }
1333 /*check available context BRPs*/
1334 while ((brp_list[brp_i].used ||
1335 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1336 brp_i++;
1337
1338 if (brp_i >= cortex_a->brp_num) {
1339 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1340 return ERROR_FAIL;
1341 }
1342
1343 breakpoint->set = brp_i + 1;
1344 control = ((matchmode & 0x7) << 20)
1345 | (byte_addr_select << 5)
1346 | (3 << 1) | 1;
1347 brp_list[brp_i].used = 1;
1348 brp_list[brp_i].value = (breakpoint->asid);
1349 brp_list[brp_i].control = control;
1350 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1351 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1352 brp_list[brp_i].value);
1353 if (retval != ERROR_OK)
1354 return retval;
1355 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1356 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1357 brp_list[brp_i].control);
1358 if (retval != ERROR_OK)
1359 return retval;
1360 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1361 brp_list[brp_i].control,
1362 brp_list[brp_i].value);
1363 return ERROR_OK;
1364
1365 }
1366
1367 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1368 {
1369 int retval = ERROR_FAIL;
1370 int brp_1 = 0; /* holds the contextID pair */
1371 int brp_2 = 0; /* holds the IVA pair */
1372 uint32_t control_CTX, control_IVA;
1373 uint8_t CTX_byte_addr_select = 0x0F;
1374 uint8_t IVA_byte_addr_select = 0x0F;
1375 uint8_t CTX_machmode = 0x03;
1376 uint8_t IVA_machmode = 0x01;
1377 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1378 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1379 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1380
1381 if (breakpoint->set) {
1382 LOG_WARNING("breakpoint already set");
1383 return retval;
1384 }
1385 /*check available context BRPs*/
1386 while ((brp_list[brp_1].used ||
1387 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1388 brp_1++;
1389
1390 printf("brp(CTX) found num: %d\n", brp_1);
1391 if (brp_1 >= cortex_a->brp_num) {
1392 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1393 return ERROR_FAIL;
1394 }
1395
1396 while ((brp_list[brp_2].used ||
1397 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1398 brp_2++;
1399
1400 printf("brp(IVA) found num: %d\n", brp_2);
1401 if (brp_2 >= cortex_a->brp_num) {
1402 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1403 return ERROR_FAIL;
1404 }
1405
1406 breakpoint->set = brp_1 + 1;
1407 breakpoint->linked_BRP = brp_2;
1408 control_CTX = ((CTX_machmode & 0x7) << 20)
1409 | (brp_2 << 16)
1410 | (0 << 14)
1411 | (CTX_byte_addr_select << 5)
1412 | (3 << 1) | 1;
1413 brp_list[brp_1].used = 1;
1414 brp_list[brp_1].value = (breakpoint->asid);
1415 brp_list[brp_1].control = control_CTX;
1416 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1417 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1418 brp_list[brp_1].value);
1419 if (retval != ERROR_OK)
1420 return retval;
1421 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1422 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1423 brp_list[brp_1].control);
1424 if (retval != ERROR_OK)
1425 return retval;
1426
1427 control_IVA = ((IVA_machmode & 0x7) << 20)
1428 | (brp_1 << 16)
1429 | (IVA_byte_addr_select << 5)
1430 | (3 << 1) | 1;
1431 brp_list[brp_2].used = 1;
1432 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1433 brp_list[brp_2].control = control_IVA;
1434 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1435 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1436 brp_list[brp_2].value);
1437 if (retval != ERROR_OK)
1438 return retval;
1439 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1440 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1441 brp_list[brp_2].control);
1442 if (retval != ERROR_OK)
1443 return retval;
1444
1445 return ERROR_OK;
1446 }
1447
1448 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1449 {
1450 int retval;
1451 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1452 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1453 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1454
1455 if (!breakpoint->set) {
1456 LOG_WARNING("breakpoint not set");
1457 return ERROR_OK;
1458 }
1459
1460 if (breakpoint->type == BKPT_HARD) {
1461 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1462 int brp_i = breakpoint->set - 1;
1463 int brp_j = breakpoint->linked_BRP;
1464 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1465 LOG_DEBUG("Invalid BRP number in breakpoint");
1466 return ERROR_OK;
1467 }
1468 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1469 brp_list[brp_i].control, brp_list[brp_i].value);
1470 brp_list[brp_i].used = 0;
1471 brp_list[brp_i].value = 0;
1472 brp_list[brp_i].control = 0;
1473 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1474 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1475 brp_list[brp_i].control);
1476 if (retval != ERROR_OK)
1477 return retval;
1478 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1479 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1480 brp_list[brp_i].value);
1481 if (retval != ERROR_OK)
1482 return retval;
1483 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1484 LOG_DEBUG("Invalid BRP number in breakpoint");
1485 return ERROR_OK;
1486 }
1487 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1488 brp_list[brp_j].control, brp_list[brp_j].value);
1489 brp_list[brp_j].used = 0;
1490 brp_list[brp_j].value = 0;
1491 brp_list[brp_j].control = 0;
1492 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1493 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1494 brp_list[brp_j].control);
1495 if (retval != ERROR_OK)
1496 return retval;
1497 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1498 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1499 brp_list[brp_j].value);
1500 if (retval != ERROR_OK)
1501 return retval;
1502 breakpoint->linked_BRP = 0;
1503 breakpoint->set = 0;
1504 return ERROR_OK;
1505
1506 } else {
1507 int brp_i = breakpoint->set - 1;
1508 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1509 LOG_DEBUG("Invalid BRP number in breakpoint");
1510 return ERROR_OK;
1511 }
1512 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1513 brp_list[brp_i].control, brp_list[brp_i].value);
1514 brp_list[brp_i].used = 0;
1515 brp_list[brp_i].value = 0;
1516 brp_list[brp_i].control = 0;
1517 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1518 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1519 brp_list[brp_i].control);
1520 if (retval != ERROR_OK)
1521 return retval;
1522 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1523 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1524 brp_list[brp_i].value);
1525 if (retval != ERROR_OK)
1526 return retval;
1527 breakpoint->set = 0;
1528 return ERROR_OK;
1529 }
1530 } else {
1531
1532 /* make sure data cache is cleaned & invalidated down to PoC */
1533 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1534 armv7a_cache_flush_virt(target, breakpoint->address,
1535 breakpoint->length);
1536 }
1537
1538 /* restore original instruction (kept in target endianness) */
1539 if (breakpoint->length == 4) {
1540 retval = target_write_memory(target,
1541 breakpoint->address & 0xFFFFFFFE,
1542 4, 1, breakpoint->orig_instr);
1543 if (retval != ERROR_OK)
1544 return retval;
1545 } else {
1546 retval = target_write_memory(target,
1547 breakpoint->address & 0xFFFFFFFE,
1548 2, 1, breakpoint->orig_instr);
1549 if (retval != ERROR_OK)
1550 return retval;
1551 }
1552
1553 /* update i-cache at breakpoint location */
1554 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1555 breakpoint->length);
1556 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1557 breakpoint->length);
1558 }
1559 breakpoint->set = 0;
1560
1561 return ERROR_OK;
1562 }
1563
1564 static int cortex_a_add_breakpoint(struct target *target,
1565 struct breakpoint *breakpoint)
1566 {
1567 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1568
1569 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1570 LOG_INFO("no hardware breakpoint available");
1571 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1572 }
1573
1574 if (breakpoint->type == BKPT_HARD)
1575 cortex_a->brp_num_available--;
1576
1577 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1578 }
1579
1580 static int cortex_a_add_context_breakpoint(struct target *target,
1581 struct breakpoint *breakpoint)
1582 {
1583 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1584
1585 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1586 LOG_INFO("no hardware breakpoint available");
1587 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1588 }
1589
1590 if (breakpoint->type == BKPT_HARD)
1591 cortex_a->brp_num_available--;
1592
1593 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1594 }
1595
1596 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1597 struct breakpoint *breakpoint)
1598 {
1599 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1600
1601 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1602 LOG_INFO("no hardware breakpoint available");
1603 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1604 }
1605
1606 if (breakpoint->type == BKPT_HARD)
1607 cortex_a->brp_num_available--;
1608
1609 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1610 }
1611
1612
1613 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1614 {
1615 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1616
1617 #if 0
1618 /* It is perfectly possible to remove breakpoints while the target is running */
1619 if (target->state != TARGET_HALTED) {
1620 LOG_WARNING("target not halted");
1621 return ERROR_TARGET_NOT_HALTED;
1622 }
1623 #endif
1624
1625 if (breakpoint->set) {
1626 cortex_a_unset_breakpoint(target, breakpoint);
1627 if (breakpoint->type == BKPT_HARD)
1628 cortex_a->brp_num_available++;
1629 }
1630
1631
1632 return ERROR_OK;
1633 }
1634
1635 /*
1636 * Cortex-A Reset functions
1637 */
1638
1639 static int cortex_a_assert_reset(struct target *target)
1640 {
1641 struct armv7a_common *armv7a = target_to_armv7a(target);
1642
1643 LOG_DEBUG(" ");
1644
1645 /* FIXME when halt is requested, make it work somehow... */
1646
1647 /* This function can be called in "target not examined" state */
1648
1649 /* Issue some kind of warm reset. */
1650 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1651 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1652 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1653 /* REVISIT handle "pulls" cases, if there's
1654 * hardware that needs them to work.
1655 */
1656
1657 /*
1658 * FIXME: fix reset when transport is SWD. This is a temporary
1659 * work-around for release v0.10 that is not intended to stay!
1660 */
1661 if (transport_is_swd() ||
1662 (target->reset_halt && (jtag_get_reset_config() & RESET_SRST_NO_GATING)))
1663 adapter_assert_reset();
1664
1665 } else {
1666 LOG_ERROR("%s: how to reset?", target_name(target));
1667 return ERROR_FAIL;
1668 }
1669
1670 /* registers are now invalid */
1671 if (target_was_examined(target))
1672 register_cache_invalidate(armv7a->arm.core_cache);
1673
1674 target->state = TARGET_RESET;
1675
1676 return ERROR_OK;
1677 }
1678
1679 static int cortex_a_deassert_reset(struct target *target)
1680 {
1681 int retval;
1682
1683 LOG_DEBUG(" ");
1684
1685 /* be certain SRST is off */
1686 adapter_deassert_reset();
1687
1688 if (target_was_examined(target)) {
1689 retval = cortex_a_poll(target);
1690 if (retval != ERROR_OK)
1691 return retval;
1692 }
1693
1694 if (target->reset_halt) {
1695 if (target->state != TARGET_HALTED) {
1696 LOG_WARNING("%s: ran after reset and before halt ...",
1697 target_name(target));
1698 if (target_was_examined(target)) {
1699 retval = target_halt(target);
1700 if (retval != ERROR_OK)
1701 return retval;
1702 } else
1703 target->state = TARGET_UNKNOWN;
1704 }
1705 }
1706
1707 return ERROR_OK;
1708 }
1709
1710 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1711 {
1712 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1713 * New desired mode must be in mode. Current value of DSCR must be in
1714 * *dscr, which is updated with new value.
1715 *
1716 * This function elides actually sending the mode-change over the debug
1717 * interface if the mode is already set as desired.
1718 */
1719 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1720 if (new_dscr != *dscr) {
1721 struct armv7a_common *armv7a = target_to_armv7a(target);
1722 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1723 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1724 if (retval == ERROR_OK)
1725 *dscr = new_dscr;
1726 return retval;
1727 } else {
1728 return ERROR_OK;
1729 }
1730 }
1731
1732 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1733 uint32_t value, uint32_t *dscr)
1734 {
1735 /* Waits until the specified bit(s) of DSCR take on a specified value. */
1736 struct armv7a_common *armv7a = target_to_armv7a(target);
1737 int64_t then;
1738 int retval;
1739
1740 if ((*dscr & mask) == value)
1741 return ERROR_OK;
1742
1743 then = timeval_ms();
1744 while (1) {
1745 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1746 armv7a->debug_base + CPUDBG_DSCR, dscr);
1747 if (retval != ERROR_OK) {
1748 LOG_ERROR("Could not read DSCR register");
1749 return retval;
1750 }
1751 if ((*dscr & mask) == value)
1752 break;
1753 if (timeval_ms() > then + 1000) {
1754 LOG_ERROR("timeout waiting for DSCR bit change");
1755 return ERROR_FAIL;
1756 }
1757 }
1758 return ERROR_OK;
1759 }
1760
1761 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1762 uint32_t *data, uint32_t *dscr)
1763 {
1764 int retval;
1765 struct armv7a_common *armv7a = target_to_armv7a(target);
1766
1767 /* Move from coprocessor to R0. */
1768 retval = cortex_a_exec_opcode(target, opcode, dscr);
1769 if (retval != ERROR_OK)
1770 return retval;
1771
1772 /* Move from R0 to DTRTX. */
1773 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
1774 if (retval != ERROR_OK)
1775 return retval;
1776
1777 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
1778 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1779 * must also check TXfull_l). Most of the time this will be free
1780 * because TXfull_l will be set immediately and cached in dscr. */
1781 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
1782 DSCR_DTRTX_FULL_LATCHED, dscr);
1783 if (retval != ERROR_OK)
1784 return retval;
1785
1786 /* Read the value transferred to DTRTX. */
1787 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1788 armv7a->debug_base + CPUDBG_DTRTX, data);
1789 if (retval != ERROR_OK)
1790 return retval;
1791
1792 return ERROR_OK;
1793 }
1794
1795 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
1796 uint32_t *dfsr, uint32_t *dscr)
1797 {
1798 int retval;
1799
1800 if (dfar) {
1801 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
1802 if (retval != ERROR_OK)
1803 return retval;
1804 }
1805
1806 if (dfsr) {
1807 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
1808 if (retval != ERROR_OK)
1809 return retval;
1810 }
1811
1812 return ERROR_OK;
1813 }
1814
1815 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
1816 uint32_t data, uint32_t *dscr)
1817 {
1818 int retval;
1819 struct armv7a_common *armv7a = target_to_armv7a(target);
1820
1821 /* Write the value into DTRRX. */
1822 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1823 armv7a->debug_base + CPUDBG_DTRRX, data);
1824 if (retval != ERROR_OK)
1825 return retval;
1826
1827 /* Move from DTRRX to R0. */
1828 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
1829 if (retval != ERROR_OK)
1830 return retval;
1831
1832 /* Move from R0 to coprocessor. */
1833 retval = cortex_a_exec_opcode(target, opcode, dscr);
1834 if (retval != ERROR_OK)
1835 return retval;
1836
1837 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
1838 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
1839 * check RXfull_l). Most of the time this will be free because RXfull_l
1840 * will be cleared immediately and cached in dscr. */
1841 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
1842 if (retval != ERROR_OK)
1843 return retval;
1844
1845 return ERROR_OK;
1846 }
1847
1848 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
1849 uint32_t dfsr, uint32_t *dscr)
1850 {
1851 int retval;
1852
1853 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
1854 if (retval != ERROR_OK)
1855 return retval;
1856
1857 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
1858 if (retval != ERROR_OK)
1859 return retval;
1860
1861 return ERROR_OK;
1862 }
1863
1864 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
1865 {
1866 uint32_t status, upper4;
1867
1868 if (dfsr & (1 << 9)) {
1869 /* LPAE format. */
1870 status = dfsr & 0x3f;
1871 upper4 = status >> 2;
1872 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
1873 return ERROR_TARGET_TRANSLATION_FAULT;
1874 else if (status == 33)
1875 return ERROR_TARGET_UNALIGNED_ACCESS;
1876 else
1877 return ERROR_TARGET_DATA_ABORT;
1878 } else {
1879 /* Normal format. */
1880 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
1881 if (status == 1)
1882 return ERROR_TARGET_UNALIGNED_ACCESS;
1883 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
1884 status == 9 || status == 11 || status == 13 || status == 15)
1885 return ERROR_TARGET_TRANSLATION_FAULT;
1886 else
1887 return ERROR_TARGET_DATA_ABORT;
1888 }
1889 }
1890
1891 static int cortex_a_write_cpu_memory_slow(struct target *target,
1892 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1893 {
1894 /* Writes count objects of size size from *buffer. Old value of DSCR must
1895 * be in *dscr; updated to new value. This is slow because it works for
1896 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
1897 * the address is aligned, cortex_a_write_cpu_memory_fast should be
1898 * preferred.
1899 * Preconditions:
1900 * - Address is in R0.
1901 * - R0 is marked dirty.
1902 */
1903 struct armv7a_common *armv7a = target_to_armv7a(target);
1904 struct arm *arm = &armv7a->arm;
1905 int retval;
1906
1907 /* Mark register R1 as dirty, to use for transferring data. */
1908 arm_reg_current(arm, 1)->dirty = true;
1909
1910 /* Switch to non-blocking mode if not already in that mode. */
1911 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
1912 if (retval != ERROR_OK)
1913 return retval;
1914
1915 /* Go through the objects. */
1916 while (count) {
1917 /* Write the value to store into DTRRX. */
1918 uint32_t data, opcode;
1919 if (size == 1)
1920 data = *buffer;
1921 else if (size == 2)
1922 data = target_buffer_get_u16(target, buffer);
1923 else
1924 data = target_buffer_get_u32(target, buffer);
1925 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1926 armv7a->debug_base + CPUDBG_DTRRX, data);
1927 if (retval != ERROR_OK)
1928 return retval;
1929
1930 /* Transfer the value from DTRRX to R1. */
1931 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
1932 if (retval != ERROR_OK)
1933 return retval;
1934
1935 /* Write the value transferred to R1 into memory. */
1936 if (size == 1)
1937 opcode = ARMV4_5_STRB_IP(1, 0);
1938 else if (size == 2)
1939 opcode = ARMV4_5_STRH_IP(1, 0);
1940 else
1941 opcode = ARMV4_5_STRW_IP(1, 0);
1942 retval = cortex_a_exec_opcode(target, opcode, dscr);
1943 if (retval != ERROR_OK)
1944 return retval;
1945
1946 /* Check for faults and return early. */
1947 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
1948 return ERROR_OK; /* A data fault is not considered a system failure. */
1949
1950 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
1951 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1952 * must also check RXfull_l). Most of the time this will be free
1953 * because RXfull_l will be cleared immediately and cached in dscr. */
1954 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
1955 if (retval != ERROR_OK)
1956 return retval;
1957
1958 /* Advance. */
1959 buffer += size;
1960 --count;
1961 }
1962
1963 return ERROR_OK;
1964 }
1965
1966 static int cortex_a_write_cpu_memory_fast(struct target *target,
1967 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1968 {
1969 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
1970 * in *dscr; updated to new value. This is fast but only works for
1971 * word-sized objects at aligned addresses.
1972 * Preconditions:
1973 * - Address is in R0 and must be a multiple of 4.
1974 * - R0 is marked dirty.
1975 */
1976 struct armv7a_common *armv7a = target_to_armv7a(target);
1977 int retval;
1978
1979 /* Switch to fast mode if not already in that mode. */
1980 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
1981 if (retval != ERROR_OK)
1982 return retval;
1983
1984 /* Latch STC instruction. */
1985 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1986 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
1987 if (retval != ERROR_OK)
1988 return retval;
1989
1990 /* Transfer all the data and issue all the instructions. */
1991 return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
1992 4, count, armv7a->debug_base + CPUDBG_DTRRX);
1993 }
1994
1995 static int cortex_a_write_cpu_memory(struct target *target,
1996 uint32_t address, uint32_t size,
1997 uint32_t count, const uint8_t *buffer)
1998 {
1999 /* Write memory through the CPU. */
2000 int retval, final_retval;
2001 struct armv7a_common *armv7a = target_to_armv7a(target);
2002 struct arm *arm = &armv7a->arm;
2003 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2004
2005 LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2006 address, size, count);
2007 if (target->state != TARGET_HALTED) {
2008 LOG_WARNING("target not halted");
2009 return ERROR_TARGET_NOT_HALTED;
2010 }
2011
2012 if (!count)
2013 return ERROR_OK;
2014
2015 /* Clear any abort. */
2016 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2017 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2018 if (retval != ERROR_OK)
2019 return retval;
2020
2021 /* Read DSCR. */
2022 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2023 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2024 if (retval != ERROR_OK)
2025 return retval;
2026
2027 /* Switch to non-blocking mode if not already in that mode. */
2028 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2029 if (retval != ERROR_OK)
2030 goto out;
2031
2032 /* Mark R0 as dirty. */
2033 arm_reg_current(arm, 0)->dirty = true;
2034
2035 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2036 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2037 if (retval != ERROR_OK)
2038 goto out;
2039
2040 /* Get the memory address into R0. */
2041 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2042 armv7a->debug_base + CPUDBG_DTRRX, address);
2043 if (retval != ERROR_OK)
2044 goto out;
2045 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2046 if (retval != ERROR_OK)
2047 goto out;
2048
2049 if (size == 4 && (address % 4) == 0) {
2050 /* We are doing a word-aligned transfer, so use fast mode. */
2051 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2052 } else {
2053 /* Use slow path. */
2054 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2055 }
2056
2057 out:
2058 final_retval = retval;
2059
2060 /* Switch to non-blocking mode if not already in that mode. */
2061 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2062 if (final_retval == ERROR_OK)
2063 final_retval = retval;
2064
2065 /* Wait for last issued instruction to complete. */
2066 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2067 if (final_retval == ERROR_OK)
2068 final_retval = retval;
2069
2070 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2071 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2072 * check RXfull_l). Most of the time this will be free because RXfull_l
2073 * will be cleared immediately and cached in dscr. However, don't do this
2074 * if there is fault, because then the instruction might not have completed
2075 * successfully. */
2076 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2077 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2078 if (retval != ERROR_OK)
2079 return retval;
2080 }
2081
2082 /* If there were any sticky abort flags, clear them. */
2083 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2084 fault_dscr = dscr;
2085 mem_ap_write_atomic_u32(armv7a->debug_ap,
2086 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2087 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2088 } else {
2089 fault_dscr = 0;
2090 }
2091
2092 /* Handle synchronous data faults. */
2093 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2094 if (final_retval == ERROR_OK) {
2095 /* Final return value will reflect cause of fault. */
2096 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2097 if (retval == ERROR_OK) {
2098 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2099 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2100 } else
2101 final_retval = retval;
2102 }
2103 /* Fault destroyed DFAR/DFSR; restore them. */
2104 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2105 if (retval != ERROR_OK)
2106 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2107 }
2108
2109 /* Handle asynchronous data faults. */
2110 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2111 if (final_retval == ERROR_OK)
2112 /* No other error has been recorded so far, so keep this one. */
2113 final_retval = ERROR_TARGET_DATA_ABORT;
2114 }
2115
2116 /* If the DCC is nonempty, clear it. */
2117 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2118 uint32_t dummy;
2119 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2120 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2121 if (final_retval == ERROR_OK)
2122 final_retval = retval;
2123 }
2124 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2125 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2126 if (final_retval == ERROR_OK)
2127 final_retval = retval;
2128 }
2129
2130 /* Done. */
2131 return final_retval;
2132 }
2133
2134 static int cortex_a_read_cpu_memory_slow(struct target *target,
2135 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2136 {
2137 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2138 * in *dscr; updated to new value. This is slow because it works for
2139 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2140 * the address is aligned, cortex_a_read_cpu_memory_fast should be
2141 * preferred.
2142 * Preconditions:
2143 * - Address is in R0.
2144 * - R0 is marked dirty.
2145 */
2146 struct armv7a_common *armv7a = target_to_armv7a(target);
2147 struct arm *arm = &armv7a->arm;
2148 int retval;
2149
2150 /* Mark register R1 as dirty, to use for transferring data. */
2151 arm_reg_current(arm, 1)->dirty = true;
2152
2153 /* Switch to non-blocking mode if not already in that mode. */
2154 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2155 if (retval != ERROR_OK)
2156 return retval;
2157
2158 /* Go through the objects. */
2159 while (count) {
2160 /* Issue a load of the appropriate size to R1. */
2161 uint32_t opcode, data;
2162 if (size == 1)
2163 opcode = ARMV4_5_LDRB_IP(1, 0);
2164 else if (size == 2)
2165 opcode = ARMV4_5_LDRH_IP(1, 0);
2166 else
2167 opcode = ARMV4_5_LDRW_IP(1, 0);
2168 retval = cortex_a_exec_opcode(target, opcode, dscr);
2169 if (retval != ERROR_OK)
2170 return retval;
2171
2172 /* Issue a write of R1 to DTRTX. */
2173 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2174 if (retval != ERROR_OK)
2175 return retval;
2176
2177 /* Check for faults and return early. */
2178 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2179 return ERROR_OK; /* A data fault is not considered a system failure. */
2180
2181 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2182 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2183 * must also check TXfull_l). Most of the time this will be free
2184 * because TXfull_l will be set immediately and cached in dscr. */
2185 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2186 DSCR_DTRTX_FULL_LATCHED, dscr);
2187 if (retval != ERROR_OK)
2188 return retval;
2189
2190 /* Read the value transferred to DTRTX into the buffer. */
2191 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2192 armv7a->debug_base + CPUDBG_DTRTX, &data);
2193 if (retval != ERROR_OK)
2194 return retval;
2195 if (size == 1)
2196 *buffer = (uint8_t) data;
2197 else if (size == 2)
2198 target_buffer_set_u16(target, buffer, (uint16_t) data);
2199 else
2200 target_buffer_set_u32(target, buffer, data);
2201
2202 /* Advance. */
2203 buffer += size;
2204 --count;
2205 }
2206
2207 return ERROR_OK;
2208 }
2209
2210 static int cortex_a_read_cpu_memory_fast(struct target *target,
2211 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2212 {
2213 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2214 * *dscr; updated to new value. This is fast but only works for word-sized
2215 * objects at aligned addresses.
2216 * Preconditions:
2217 * - Address is in R0 and must be a multiple of 4.
2218 * - R0 is marked dirty.
2219 */
2220 struct armv7a_common *armv7a = target_to_armv7a(target);
2221 uint32_t u32;
2222 int retval;
2223
2224 /* Switch to non-blocking mode if not already in that mode. */
2225 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2226 if (retval != ERROR_OK)
2227 return retval;
2228
2229 /* Issue the LDC instruction via a write to ITR. */
2230 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2231 if (retval != ERROR_OK)
2232 return retval;
2233
2234 count--;
2235
2236 if (count > 0) {
2237 /* Switch to fast mode if not already in that mode. */
2238 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2239 if (retval != ERROR_OK)
2240 return retval;
2241
2242 /* Latch LDC instruction. */
2243 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2244 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2245 if (retval != ERROR_OK)
2246 return retval;
2247
2248 /* Read the value transferred to DTRTX into the buffer. Due to fast
2249 * mode rules, this blocks until the instruction finishes executing and
2250 * then reissues the read instruction to read the next word from
2251 * memory. The last read of DTRTX in this call reads the second-to-last
2252 * word from memory and issues the read instruction for the last word.
2253 */
2254 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2255 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2256 if (retval != ERROR_OK)
2257 return retval;
2258
2259 /* Advance. */
2260 buffer += count * 4;
2261 }
2262
2263 /* Wait for last issued instruction to complete. */
2264 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2265 if (retval != ERROR_OK)
2266 return retval;
2267
2268 /* Switch to non-blocking mode if not already in that mode. */
2269 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2270 if (retval != ERROR_OK)
2271 return retval;
2272
2273 /* Check for faults and return early. */
2274 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2275 return ERROR_OK; /* A data fault is not considered a system failure. */
2276
2277 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2278 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2279 * check TXfull_l). Most of the time this will be free because TXfull_l
2280 * will be set immediately and cached in dscr. */
2281 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2282 DSCR_DTRTX_FULL_LATCHED, dscr);
2283 if (retval != ERROR_OK)
2284 return retval;
2285
2286 /* Read the value transferred to DTRTX into the buffer. This is the last
2287 * word. */
2288 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2289 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2290 if (retval != ERROR_OK)
2291 return retval;
2292 target_buffer_set_u32(target, buffer, u32);
2293
2294 return ERROR_OK;
2295 }
2296
2297 static int cortex_a_read_cpu_memory(struct target *target,
2298 uint32_t address, uint32_t size,
2299 uint32_t count, uint8_t *buffer)
2300 {
2301 /* Read memory through the CPU. */
2302 int retval, final_retval;
2303 struct armv7a_common *armv7a = target_to_armv7a(target);
2304 struct arm *arm = &armv7a->arm;
2305 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2306
2307 LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2308 address, size, count);
2309 if (target->state != TARGET_HALTED) {
2310 LOG_WARNING("target not halted");
2311 return ERROR_TARGET_NOT_HALTED;
2312 }
2313
2314 if (!count)
2315 return ERROR_OK;
2316
2317 /* Clear any abort. */
2318 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2319 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2320 if (retval != ERROR_OK)
2321 return retval;
2322
2323 /* Read DSCR */
2324 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2325 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2326 if (retval != ERROR_OK)
2327 return retval;
2328
2329 /* Switch to non-blocking mode if not already in that mode. */
2330 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2331 if (retval != ERROR_OK)
2332 goto out;
2333
2334 /* Mark R0 as dirty. */
2335 arm_reg_current(arm, 0)->dirty = true;
2336
2337 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2338 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2339 if (retval != ERROR_OK)
2340 goto out;
2341
2342 /* Get the memory address into R0. */
2343 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2344 armv7a->debug_base + CPUDBG_DTRRX, address);
2345 if (retval != ERROR_OK)
2346 goto out;
2347 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2348 if (retval != ERROR_OK)
2349 goto out;
2350
2351 if (size == 4 && (address % 4) == 0) {
2352 /* We are doing a word-aligned transfer, so use fast mode. */
2353 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2354 } else {
2355 /* Use slow path. */
2356 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2357 }
2358
2359 out:
2360 final_retval = retval;
2361
2362 /* Switch to non-blocking mode if not already in that mode. */
2363 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2364 if (final_retval == ERROR_OK)
2365 final_retval = retval;
2366
2367 /* Wait for last issued instruction to complete. */
2368 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2369 if (final_retval == ERROR_OK)
2370 final_retval = retval;
2371
2372 /* If there were any sticky abort flags, clear them. */
2373 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2374 fault_dscr = dscr;
2375 mem_ap_write_atomic_u32(armv7a->debug_ap,
2376 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2377 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2378 } else {
2379 fault_dscr = 0;
2380 }
2381
2382 /* Handle synchronous data faults. */
2383 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2384 if (final_retval == ERROR_OK) {
2385 /* Final return value will reflect cause of fault. */
2386 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2387 if (retval == ERROR_OK) {
2388 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2389 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2390 } else
2391 final_retval = retval;
2392 }
2393 /* Fault destroyed DFAR/DFSR; restore them. */
2394 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2395 if (retval != ERROR_OK)
2396 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2397 }
2398
2399 /* Handle asynchronous data faults. */
2400 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2401 if (final_retval == ERROR_OK)
2402 /* No other error has been recorded so far, so keep this one. */
2403 final_retval = ERROR_TARGET_DATA_ABORT;
2404 }
2405
2406 /* If the DCC is nonempty, clear it. */
2407 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2408 uint32_t dummy;
2409 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2410 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2411 if (final_retval == ERROR_OK)
2412 final_retval = retval;
2413 }
2414 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2415 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2416 if (final_retval == ERROR_OK)
2417 final_retval = retval;
2418 }
2419
2420 /* Done. */
2421 return final_retval;
2422 }
2423
2424
2425 /*
2426 * Cortex-A Memory access
2427 *
2428 * This is same Cortex-M3 but we must also use the correct
2429 * ap number for every access.
2430 */
2431
2432 static int cortex_a_read_phys_memory(struct target *target,
2433 target_addr_t address, uint32_t size,
2434 uint32_t count, uint8_t *buffer)
2435 {
2436 int retval;
2437
2438 if (!count || !buffer)
2439 return ERROR_COMMAND_SYNTAX_ERROR;
2440
2441 LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2442 address, size, count);
2443
2444 /* read memory through the CPU */
2445 cortex_a_prep_memaccess(target, 1);
2446 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2447 cortex_a_post_memaccess(target, 1);
2448
2449 return retval;
2450 }
2451
2452 static int cortex_a_read_memory(struct target *target, target_addr_t address,
2453 uint32_t size, uint32_t count, uint8_t *buffer)
2454 {
2455 int retval;
2456
2457 /* cortex_a handles unaligned memory access */
2458 LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2459 address, size, count);
2460
2461 cortex_a_prep_memaccess(target, 0);
2462 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2463 cortex_a_post_memaccess(target, 0);
2464
2465 return retval;
2466 }
2467
2468 static int cortex_a_write_phys_memory(struct target *target,
2469 target_addr_t address, uint32_t size,
2470 uint32_t count, const uint8_t *buffer)
2471 {
2472 int retval;
2473
2474 if (!count || !buffer)
2475 return ERROR_COMMAND_SYNTAX_ERROR;
2476
2477 LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2478 address, size, count);
2479
2480 /* write memory through the CPU */
2481 cortex_a_prep_memaccess(target, 1);
2482 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2483 cortex_a_post_memaccess(target, 1);
2484
2485 return retval;
2486 }
2487
2488 static int cortex_a_write_memory(struct target *target, target_addr_t address,
2489 uint32_t size, uint32_t count, const uint8_t *buffer)
2490 {
2491 int retval;
2492
2493 /* cortex_a handles unaligned memory access */
2494 LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2495 address, size, count);
2496
2497 /* memory writes bypass the caches, must flush before writing */
2498 armv7a_cache_auto_flush_on_write(target, address, size * count);
2499
2500 cortex_a_prep_memaccess(target, 0);
2501 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2502 cortex_a_post_memaccess(target, 0);
2503 return retval;
2504 }
2505
2506 static int cortex_a_read_buffer(struct target *target, target_addr_t address,
2507 uint32_t count, uint8_t *buffer)
2508 {
2509 uint32_t size;
2510
2511 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2512 * will have something to do with the size we leave to it. */
2513 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2514 if (address & size) {
2515 int retval = target_read_memory(target, address, size, 1, buffer);
2516 if (retval != ERROR_OK)
2517 return retval;
2518 address += size;
2519 count -= size;
2520 buffer += size;
2521 }
2522 }
2523
2524 /* Read the data with as large access size as possible. */
2525 for (; size > 0; size /= 2) {
2526 uint32_t aligned = count - count % size;
2527 if (aligned > 0) {
2528 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2529 if (retval != ERROR_OK)
2530 return retval;
2531 address += aligned;
2532 count -= aligned;
2533 buffer += aligned;
2534 }
2535 }
2536
2537 return ERROR_OK;
2538 }
2539
2540 static int cortex_a_write_buffer(struct target *target, target_addr_t address,
2541 uint32_t count, const uint8_t *buffer)
2542 {
2543 uint32_t size;
2544
2545 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2546 * will have something to do with the size we leave to it. */
2547 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2548 if (address & size) {
2549 int retval = target_write_memory(target, address, size, 1, buffer);
2550 if (retval != ERROR_OK)
2551 return retval;
2552 address += size;
2553 count -= size;
2554 buffer += size;
2555 }
2556 }
2557
2558 /* Write the data with as large access size as possible. */
2559 for (; size > 0; size /= 2) {
2560 uint32_t aligned = count - count % size;
2561 if (aligned > 0) {
2562 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2563 if (retval != ERROR_OK)
2564 return retval;
2565 address += aligned;
2566 count -= aligned;
2567 buffer += aligned;
2568 }
2569 }
2570
2571 return ERROR_OK;
2572 }
2573
2574 static int cortex_a_handle_target_request(void *priv)
2575 {
2576 struct target *target = priv;
2577 struct armv7a_common *armv7a = target_to_armv7a(target);
2578 int retval;
2579
2580 if (!target_was_examined(target))
2581 return ERROR_OK;
2582 if (!target->dbg_msg_enabled)
2583 return ERROR_OK;
2584
2585 if (target->state == TARGET_RUNNING) {
2586 uint32_t request;
2587 uint32_t dscr;
2588 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2589 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2590
2591 /* check if we have data */
2592 int64_t then = timeval_ms();
2593 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2594 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2595 armv7a->debug_base + CPUDBG_DTRTX, &request);
2596 if (retval == ERROR_OK) {
2597 target_request(target, request);
2598 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2599 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2600 }
2601 if (timeval_ms() > then + 1000) {
2602 LOG_ERROR("Timeout waiting for dtr tx full");
2603 return ERROR_FAIL;
2604 }
2605 }
2606 }
2607
2608 return ERROR_OK;
2609 }
2610
2611 /*
2612 * Cortex-A target information and configuration
2613 */
2614
2615 static int cortex_a_examine_first(struct target *target)
2616 {
2617 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2618 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2619 struct adiv5_dap *swjdp = armv7a->arm.dap;
2620
2621 int i;
2622 int retval = ERROR_OK;
2623 uint32_t didr, cpuid, dbg_osreg;
2624
2625 /* Search for the APB-AP - it is needed for access to debug registers */
2626 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2627 if (retval != ERROR_OK) {
2628 LOG_ERROR("Could not find APB-AP for debug access");
2629 return retval;
2630 }
2631
2632 retval = mem_ap_init(armv7a->debug_ap);
2633 if (retval != ERROR_OK) {
2634 LOG_ERROR("Could not initialize the APB-AP");
2635 return retval;
2636 }
2637
2638 armv7a->debug_ap->memaccess_tck = 80;
2639
2640 if (!target->dbgbase_set) {
2641 uint32_t dbgbase;
2642 /* Get ROM Table base */
2643 uint32_t apid;
2644 int32_t coreidx = target->coreid;
2645 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2646 target->cmd_name);
2647 retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
2648 if (retval != ERROR_OK)
2649 return retval;
2650 /* Lookup 0x15 -- Processor DAP */
2651 retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, 0x15,
2652 &armv7a->debug_base, &coreidx);
2653 if (retval != ERROR_OK) {
2654 LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2655 target->cmd_name);
2656 return retval;
2657 }
2658 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2659 target->coreid, armv7a->debug_base);
2660 } else
2661 armv7a->debug_base = target->dbgbase;
2662
2663 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2664 armv7a->debug_base + CPUDBG_DIDR, &didr);
2665 if (retval != ERROR_OK) {
2666 LOG_DEBUG("Examine %s failed", "DIDR");
2667 return retval;
2668 }
2669
2670 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2671 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2672 if (retval != ERROR_OK) {
2673 LOG_DEBUG("Examine %s failed", "CPUID");
2674 return retval;
2675 }
2676
2677 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2678 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2679
2680 cortex_a->didr = didr;
2681 cortex_a->cpuid = cpuid;
2682
2683 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2684 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
2685 if (retval != ERROR_OK)
2686 return retval;
2687 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
2688
2689 if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
2690 LOG_ERROR("target->coreid %" PRId32 " powered down!", target->coreid);
2691 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2692 return ERROR_TARGET_INIT_FAILED;
2693 }
2694
2695 if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
2696 LOG_DEBUG("target->coreid %" PRId32 " was reset!", target->coreid);
2697
2698 /* Read DBGOSLSR and check if OSLK is implemented */
2699 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2700 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2701 if (retval != ERROR_OK)
2702 return retval;
2703 LOG_DEBUG("target->coreid %" PRId32 " DBGOSLSR 0x%" PRIx32, target->coreid, dbg_osreg);
2704
2705 /* check if OS Lock is implemented */
2706 if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
2707 /* check if OS Lock is set */
2708 if (dbg_osreg & OSLSR_OSLK) {
2709 LOG_DEBUG("target->coreid %" PRId32 " OSLock set! Trying to unlock", target->coreid);
2710
2711 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2712 armv7a->debug_base + CPUDBG_OSLAR,
2713 0);
2714 if (retval == ERROR_OK)
2715 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2716 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2717
2718 /* if we fail to access the register or cannot reset the OSLK bit, bail out */
2719 if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
2720 LOG_ERROR("target->coreid %" PRId32 " OSLock sticky, core not powered?",
2721 target->coreid);
2722 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2723 return ERROR_TARGET_INIT_FAILED;
2724 }
2725 }
2726 }
2727
2728 armv7a->arm.core_type = ARM_MODE_MON;
2729
2730 /* Avoid recreating the registers cache */
2731 if (!target_was_examined(target)) {
2732 retval = cortex_a_dpm_setup(cortex_a, didr);
2733 if (retval != ERROR_OK)
2734 return retval;
2735 }
2736
2737 /* Setup Breakpoint Register Pairs */
2738 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
2739 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2740 cortex_a->brp_num_available = cortex_a->brp_num;
2741 free(cortex_a->brp_list);
2742 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
2743 /* cortex_a->brb_enabled = ????; */
2744 for (i = 0; i < cortex_a->brp_num; i++) {
2745 cortex_a->brp_list[i].used = 0;
2746 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
2747 cortex_a->brp_list[i].type = BRP_NORMAL;
2748 else
2749 cortex_a->brp_list[i].type = BRP_CONTEXT;
2750 cortex_a->brp_list[i].value = 0;
2751 cortex_a->brp_list[i].control = 0;
2752 cortex_a->brp_list[i].BRPn = i;
2753 }
2754
2755 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
2756
2757 /* select debug_ap as default */
2758 swjdp->apsel = armv7a->debug_ap->ap_num;
2759
2760 target_set_examined(target);
2761 return ERROR_OK;
2762 }
2763
2764 static int cortex_a_examine(struct target *target)
2765 {
2766 int retval = ERROR_OK;
2767
2768 /* Reestablish communication after target reset */
2769 retval = cortex_a_examine_first(target);
2770
2771 /* Configure core debug access */
2772 if (retval == ERROR_OK)
2773 retval = cortex_a_init_debug_access(target);
2774
2775 return retval;
2776 }
2777
2778 /*
2779 * Cortex-A target creation and initialization
2780 */
2781
2782 static int cortex_a_init_target(struct command_context *cmd_ctx,
2783 struct target *target)
2784 {
2785 /* examine_first() does a bunch of this */
2786 arm_semihosting_init(target);
2787 return ERROR_OK;
2788 }
2789
2790 static int cortex_a_init_arch_info(struct target *target,
2791 struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
2792 {
2793 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2794
2795 /* Setup struct cortex_a_common */
2796 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2797 armv7a->arm.dap = dap;
2798
2799 /* register arch-specific functions */
2800 armv7a->examine_debug_reason = NULL;
2801
2802 armv7a->post_debug_entry = cortex_a_post_debug_entry;
2803
2804 armv7a->pre_restore_context = NULL;
2805
2806 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
2807
2808
2809 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
2810
2811 /* REVISIT v7a setup should be in a v7a-specific routine */
2812 armv7a_init_arch_info(target, armv7a);
2813 target_register_timer_callback(cortex_a_handle_target_request, 1,
2814 TARGET_TIMER_TYPE_PERIODIC, target);
2815
2816 return ERROR_OK;
2817 }
2818
2819 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
2820 {
2821 struct cortex_a_common *cortex_a;
2822 struct adiv5_private_config *pc;
2823
2824 if (target->private_config == NULL)
2825 return ERROR_FAIL;
2826
2827 pc = (struct adiv5_private_config *)target->private_config;
2828
2829 cortex_a = calloc(1, sizeof(struct cortex_a_common));
2830 if (cortex_a == NULL) {
2831 LOG_ERROR("Out of memory");
2832 return ERROR_FAIL;
2833 }
2834 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2835 cortex_a->armv7a_common.is_armv7r = false;
2836 cortex_a->armv7a_common.arm.arm_vfp_version = ARM_VFP_V3;
2837
2838 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
2839 }
2840
2841 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
2842 {
2843 struct cortex_a_common *cortex_a;
2844 struct adiv5_private_config *pc;
2845
2846 pc = (struct adiv5_private_config *)target->private_config;
2847 if (adiv5_verify_config(pc) != ERROR_OK)
2848 return ERROR_FAIL;
2849
2850 cortex_a = calloc(1, sizeof(struct cortex_a_common));
2851 if (cortex_a == NULL) {
2852 LOG_ERROR("Out of memory");
2853 return ERROR_FAIL;
2854 }
2855 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2856 cortex_a->armv7a_common.is_armv7r = true;
2857
2858 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
2859 }
2860
2861 static void cortex_a_deinit_target(struct target *target)
2862 {
2863 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2864 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2865 struct arm_dpm *dpm = &armv7a->dpm;
2866 uint32_t dscr;
2867 int retval;
2868
2869 if (target_was_examined(target)) {
2870 /* Disable halt for breakpoint, watchpoint and vector catch */
2871 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2872 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2873 if (retval == ERROR_OK)
2874 mem_ap_write_atomic_u32(armv7a->debug_ap,
2875 armv7a->debug_base + CPUDBG_DSCR,
2876 dscr & ~DSCR_HALT_DBG_MODE);
2877 }
2878
2879 free(cortex_a->brp_list);
2880 free(dpm->dbp);
2881 free(dpm->dwp);
2882 free(target->private_config);
2883 free(cortex_a);
2884 }
2885
2886 static int cortex_a_mmu(struct target *target, int *enabled)
2887 {
2888 struct armv7a_common *armv7a = target_to_armv7a(target);
2889
2890 if (target->state != TARGET_HALTED) {
2891 LOG_ERROR("%s: target not halted", __func__);
2892 return ERROR_TARGET_INVALID;
2893 }
2894
2895 if (armv7a->is_armv7r)
2896 *enabled = 0;
2897 else
2898 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
2899
2900 return ERROR_OK;
2901 }
2902
2903 static int cortex_a_virt2phys(struct target *target,
2904 target_addr_t virt, target_addr_t *phys)
2905 {
2906 int retval;
2907 int mmu_enabled = 0;
2908
2909 /*
2910 * If the MMU was not enabled at debug entry, there is no
2911 * way of knowing if there was ever a valid configuration
2912 * for it and thus it's not safe to enable it. In this case,
2913 * just return the virtual address as physical.
2914 */
2915 cortex_a_mmu(target, &mmu_enabled);
2916 if (!mmu_enabled) {
2917 *phys = virt;
2918 return ERROR_OK;
2919 }
2920
2921 /* mmu must be enable in order to get a correct translation */
2922 retval = cortex_a_mmu_modify(target, 1);
2923 if (retval != ERROR_OK)
2924 return retval;
2925 return armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
2926 phys, 1);
2927 }
2928
2929 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
2930 {
2931 struct target *target = get_current_target(CMD_CTX);
2932 struct armv7a_common *armv7a = target_to_armv7a(target);
2933
2934 return armv7a_handle_cache_info_command(CMD,
2935 &armv7a->armv7a_mmu.armv7a_cache);
2936 }
2937
2938
2939 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
2940 {
2941 struct target *target = get_current_target(CMD_CTX);
2942 if (!target_was_examined(target)) {
2943 LOG_ERROR("target not examined yet");
2944 return ERROR_FAIL;
2945 }
2946
2947 return cortex_a_init_debug_access(target);
2948 }
2949
2950 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
2951 {
2952 struct target *target = get_current_target(CMD_CTX);
2953 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2954
2955 static const Jim_Nvp nvp_maskisr_modes[] = {
2956 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
2957 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
2958 { .name = NULL, .value = -1 },
2959 };
2960 const Jim_Nvp *n;
2961
2962 if (CMD_ARGC > 0) {
2963 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2964 if (n->name == NULL) {
2965 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2966 return ERROR_COMMAND_SYNTAX_ERROR;
2967 }
2968
2969 cortex_a->isrmasking_mode = n->value;
2970 }
2971
2972 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
2973 command_print(CMD, "cortex_a interrupt mask %s", n->name);
2974
2975 return ERROR_OK;
2976 }
2977
2978 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
2979 {
2980 struct target *target = get_current_target(CMD_CTX);
2981 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2982
2983 static const Jim_Nvp nvp_dacrfixup_modes[] = {
2984 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
2985 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
2986 { .name = NULL, .value = -1 },
2987 };
2988 const Jim_Nvp *n;
2989
2990 if (CMD_ARGC > 0) {
2991 n = Jim_Nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
2992 if (n->name == NULL)
2993 return ERROR_COMMAND_SYNTAX_ERROR;
2994 cortex_a->dacrfixup_mode = n->value;
2995
2996 }
2997
2998 n = Jim_Nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
2999 command_print(CMD, "cortex_a domain access control fixup %s", n->name);
3000
3001 return ERROR_OK;
3002 }
3003
3004 static const struct command_registration cortex_a_exec_command_handlers[] = {
3005 {
3006 .name = "cache_info",
3007 .handler = cortex_a_handle_cache_info_command,
3008 .mode = COMMAND_EXEC,
3009 .help = "display information about target caches",
3010 .usage = "",
3011 },
3012 {
3013 .name = "dbginit",
3014 .handler = cortex_a_handle_dbginit_command,
3015 .mode = COMMAND_EXEC,
3016 .help = "Initialize core debug",
3017 .usage = "",
3018 },
3019 {
3020 .name = "maskisr",
3021 .handler = handle_cortex_a_mask_interrupts_command,
3022 .mode = COMMAND_ANY,
3023 .help = "mask cortex_a interrupts",
3024 .usage = "['on'|'off']",
3025 },
3026 {
3027 .name = "dacrfixup",
3028 .handler = handle_cortex_a_dacrfixup_command,
3029 .mode = COMMAND_ANY,
3030 .help = "set domain access control (DACR) to all-manager "
3031 "on memory access",
3032 .usage = "['on'|'off']",
3033 },
3034 {
3035 .chain = armv7a_mmu_command_handlers,
3036 },
3037 {
3038 .chain = smp_command_handlers,
3039 },
3040
3041 COMMAND_REGISTRATION_DONE
3042 };
3043 static const struct command_registration cortex_a_command_handlers[] = {
3044 {
3045 .chain = arm_command_handlers,
3046 },
3047 {
3048 .chain = armv7a_command_handlers,
3049 },
3050 {
3051 .name = "cortex_a",
3052 .mode = COMMAND_ANY,
3053 .help = "Cortex-A command group",
3054 .usage = "",
3055 .chain = cortex_a_exec_command_handlers,
3056 },
3057 COMMAND_REGISTRATION_DONE
3058 };
3059
3060 struct target_type cortexa_target = {
3061 .name = "cortex_a",
3062 .deprecated_name = "cortex_a8",
3063
3064 .poll = cortex_a_poll,
3065 .arch_state = armv7a_arch_state,
3066
3067 .halt = cortex_a_halt,
3068 .resume = cortex_a_resume,
3069 .step = cortex_a_step,
3070
3071 .assert_reset = cortex_a_assert_reset,
3072 .deassert_reset = cortex_a_deassert_reset,
3073
3074 /* REVISIT allow exporting VFP3 registers ... */
3075 .get_gdb_arch = arm_get_gdb_arch,
3076 .get_gdb_reg_list = arm_get_gdb_reg_list,
3077
3078 .read_memory = cortex_a_read_memory,
3079 .write_memory = cortex_a_write_memory,
3080
3081 .read_buffer = cortex_a_read_buffer,
3082 .write_buffer = cortex_a_write_buffer,
3083
3084 .checksum_memory = arm_checksum_memory,
3085 .blank_check_memory = arm_blank_check_memory,
3086
3087 .run_algorithm = armv4_5_run_algorithm,
3088
3089 .add_breakpoint = cortex_a_add_breakpoint,
3090 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3091 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3092 .remove_breakpoint = cortex_a_remove_breakpoint,
3093 .add_watchpoint = NULL,
3094 .remove_watchpoint = NULL,
3095
3096 .commands = cortex_a_command_handlers,
3097 .target_create = cortex_a_target_create,
3098 .target_jim_configure = adiv5_jim_configure,
3099 .init_target = cortex_a_init_target,
3100 .examine = cortex_a_examine,
3101 .deinit_target = cortex_a_deinit_target,
3102
3103 .read_phys_memory = cortex_a_read_phys_memory,
3104 .write_phys_memory = cortex_a_write_phys_memory,
3105 .mmu = cortex_a_mmu,
3106 .virt2phys = cortex_a_virt2phys,
3107 };
3108
3109 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3110 {
3111 .name = "dbginit",
3112 .handler = cortex_a_handle_dbginit_command,
3113 .mode = COMMAND_EXEC,
3114 .help = "Initialize core debug",
3115 .usage = "",
3116 },
3117 {
3118 .name = "maskisr",
3119 .handler = handle_cortex_a_mask_interrupts_command,
3120 .mode = COMMAND_EXEC,
3121 .help = "mask cortex_r4 interrupts",
3122 .usage = "['on'|'off']",
3123 },
3124
3125 COMMAND_REGISTRATION_DONE
3126 };
3127 static const struct command_registration cortex_r4_command_handlers[] = {
3128 {
3129 .chain = arm_command_handlers,
3130 },
3131 {
3132 .name = "cortex_r4",
3133 .mode = COMMAND_ANY,
3134 .help = "Cortex-R4 command group",
3135 .usage = "",
3136 .chain = cortex_r4_exec_command_handlers,
3137 },
3138 COMMAND_REGISTRATION_DONE
3139 };
3140
3141 struct target_type cortexr4_target = {
3142 .name = "cortex_r4",
3143
3144 .poll = cortex_a_poll,
3145 .arch_state = armv7a_arch_state,
3146
3147 .halt = cortex_a_halt,
3148 .resume = cortex_a_resume,
3149 .step = cortex_a_step,
3150
3151 .assert_reset = cortex_a_assert_reset,
3152 .deassert_reset = cortex_a_deassert_reset,
3153
3154 /* REVISIT allow exporting VFP3 registers ... */
3155 .get_gdb_arch = arm_get_gdb_arch,
3156 .get_gdb_reg_list = arm_get_gdb_reg_list,
3157
3158 .read_memory = cortex_a_read_phys_memory,
3159 .write_memory = cortex_a_write_phys_memory,
3160
3161 .checksum_memory = arm_checksum_memory,
3162 .blank_check_memory = arm_blank_check_memory,
3163
3164 .run_algorithm = armv4_5_run_algorithm,
3165
3166 .add_breakpoint = cortex_a_add_breakpoint,
3167 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3168 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3169 .remove_breakpoint = cortex_a_remove_breakpoint,
3170 .add_watchpoint = NULL,
3171 .remove_watchpoint = NULL,
3172
3173 .commands = cortex_r4_command_handlers,
3174 .target_create = cortex_r4_target_create,
3175 .target_jim_configure = adiv5_jim_configure,
3176 .init_target = cortex_a_init_target,
3177 .examine = cortex_a_examine,
3178 .deinit_target = cortex_a_deinit_target,
3179 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)