target/armv7a: change prototype of armv7a_handle_cache_info_command()
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex-R4 support *
22 * *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 * *
39 * Cortex-A8(tm) TRM, ARM DDI 0344H *
40 * Cortex-A9(tm) TRM, ARM DDI 0407F *
41 * Cortex-A4(tm) TRM, ARM DDI 0363E *
42 * Cortex-A15(tm)TRM, ARM DDI 0438C *
43 * *
44 ***************************************************************************/
45
46 #ifdef HAVE_CONFIG_H
47 #include "config.h"
48 #endif
49
50 #include "breakpoints.h"
51 #include "cortex_a.h"
52 #include "register.h"
53 #include "armv7a_mmu.h"
54 #include "target_request.h"
55 #include "target_type.h"
56 #include "arm_opcodes.h"
57 #include "arm_semihosting.h"
58 #include "transport/transport.h"
59 #include "smp.h"
60 #include <helper/time_support.h>
61
62 static int cortex_a_poll(struct target *target);
63 static int cortex_a_debug_entry(struct target *target);
64 static int cortex_a_restore_context(struct target *target, bool bpwp);
65 static int cortex_a_set_breakpoint(struct target *target,
66 struct breakpoint *breakpoint, uint8_t matchmode);
67 static int cortex_a_set_context_breakpoint(struct target *target,
68 struct breakpoint *breakpoint, uint8_t matchmode);
69 static int cortex_a_set_hybrid_breakpoint(struct target *target,
70 struct breakpoint *breakpoint);
71 static int cortex_a_unset_breakpoint(struct target *target,
72 struct breakpoint *breakpoint);
73 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
74 uint32_t value, uint32_t *dscr);
75 static int cortex_a_mmu(struct target *target, int *enabled);
76 static int cortex_a_mmu_modify(struct target *target, int enable);
77 static int cortex_a_virt2phys(struct target *target,
78 target_addr_t virt, target_addr_t *phys);
79 static int cortex_a_read_cpu_memory(struct target *target,
80 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
81
82
83 /* restore cp15_control_reg at resume */
84 static int cortex_a_restore_cp15_control_reg(struct target *target)
85 {
86 int retval = ERROR_OK;
87 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
88 struct armv7a_common *armv7a = target_to_armv7a(target);
89
90 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
91 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
92 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
93 retval = armv7a->arm.mcr(target, 15,
94 0, 0, /* op1, op2 */
95 1, 0, /* CRn, CRm */
96 cortex_a->cp15_control_reg);
97 }
98 return retval;
99 }
100
101 /*
102 * Set up ARM core for memory access.
103 * If !phys_access, switch to SVC mode and make sure MMU is on
104 * If phys_access, switch off mmu
105 */
106 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
107 {
108 struct armv7a_common *armv7a = target_to_armv7a(target);
109 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
110 int mmu_enabled = 0;
111
112 if (phys_access == 0) {
113 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
114 cortex_a_mmu(target, &mmu_enabled);
115 if (mmu_enabled)
116 cortex_a_mmu_modify(target, 1);
117 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
118 /* overwrite DACR to all-manager */
119 armv7a->arm.mcr(target, 15,
120 0, 0, 3, 0,
121 0xFFFFFFFF);
122 }
123 } else {
124 cortex_a_mmu(target, &mmu_enabled);
125 if (mmu_enabled)
126 cortex_a_mmu_modify(target, 0);
127 }
128 return ERROR_OK;
129 }
130
131 /*
132 * Restore ARM core after memory access.
133 * If !phys_access, switch to previous mode
134 * If phys_access, restore MMU setting
135 */
136 static int cortex_a_post_memaccess(struct target *target, int phys_access)
137 {
138 struct armv7a_common *armv7a = target_to_armv7a(target);
139 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
140
141 if (phys_access == 0) {
142 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
143 /* restore */
144 armv7a->arm.mcr(target, 15,
145 0, 0, 3, 0,
146 cortex_a->cp15_dacr_reg);
147 }
148 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
149 } else {
150 int mmu_enabled = 0;
151 cortex_a_mmu(target, &mmu_enabled);
152 if (mmu_enabled)
153 cortex_a_mmu_modify(target, 1);
154 }
155 return ERROR_OK;
156 }
157
158
159 /* modify cp15_control_reg in order to enable or disable mmu for :
160 * - virt2phys address conversion
161 * - read or write memory in phys or virt address */
162 static int cortex_a_mmu_modify(struct target *target, int enable)
163 {
164 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
165 struct armv7a_common *armv7a = target_to_armv7a(target);
166 int retval = ERROR_OK;
167 int need_write = 0;
168
169 if (enable) {
170 /* if mmu enabled at target stop and mmu not enable */
171 if (!(cortex_a->cp15_control_reg & 0x1U)) {
172 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
173 return ERROR_FAIL;
174 }
175 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
176 cortex_a->cp15_control_reg_curr |= 0x1U;
177 need_write = 1;
178 }
179 } else {
180 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
181 cortex_a->cp15_control_reg_curr &= ~0x1U;
182 need_write = 1;
183 }
184 }
185
186 if (need_write) {
187 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
188 enable ? "enable mmu" : "disable mmu",
189 cortex_a->cp15_control_reg_curr);
190
191 retval = armv7a->arm.mcr(target, 15,
192 0, 0, /* op1, op2 */
193 1, 0, /* CRn, CRm */
194 cortex_a->cp15_control_reg_curr);
195 }
196 return retval;
197 }
198
199 /*
200 * Cortex-A Basic debug access, very low level assumes state is saved
201 */
202 static int cortex_a_init_debug_access(struct target *target)
203 {
204 struct armv7a_common *armv7a = target_to_armv7a(target);
205 uint32_t dscr;
206 int retval;
207
208 /* lock memory-mapped access to debug registers to prevent
209 * software interference */
210 retval = mem_ap_write_u32(armv7a->debug_ap,
211 armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
212 if (retval != ERROR_OK)
213 return retval;
214
215 /* Disable cacheline fills and force cache write-through in debug state */
216 retval = mem_ap_write_u32(armv7a->debug_ap,
217 armv7a->debug_base + CPUDBG_DSCCR, 0);
218 if (retval != ERROR_OK)
219 return retval;
220
221 /* Disable TLB lookup and refill/eviction in debug state */
222 retval = mem_ap_write_u32(armv7a->debug_ap,
223 armv7a->debug_base + CPUDBG_DSMCR, 0);
224 if (retval != ERROR_OK)
225 return retval;
226
227 retval = dap_run(armv7a->debug_ap->dap);
228 if (retval != ERROR_OK)
229 return retval;
230
231 /* Enabling of instruction execution in debug mode is done in debug_entry code */
232
233 /* Resync breakpoint registers */
234
235 /* Enable halt for breakpoint, watchpoint and vector catch */
236 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
237 armv7a->debug_base + CPUDBG_DSCR, &dscr);
238 if (retval != ERROR_OK)
239 return retval;
240 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
241 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
242 if (retval != ERROR_OK)
243 return retval;
244
245 /* Since this is likely called from init or reset, update target state information*/
246 return cortex_a_poll(target);
247 }
248
249 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
250 {
251 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
252 * Writes final value of DSCR into *dscr. Pass force to force always
253 * reading DSCR at least once. */
254 struct armv7a_common *armv7a = target_to_armv7a(target);
255 int retval;
256
257 if (force) {
258 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
259 armv7a->debug_base + CPUDBG_DSCR, dscr);
260 if (retval != ERROR_OK) {
261 LOG_ERROR("Could not read DSCR register");
262 return retval;
263 }
264 }
265
266 retval = cortex_a_wait_dscr_bits(target, DSCR_INSTR_COMP, DSCR_INSTR_COMP, dscr);
267 if (retval != ERROR_OK)
268 LOG_ERROR("Error waiting for InstrCompl=1");
269 return retval;
270 }
271
272 /* To reduce needless round-trips, pass in a pointer to the current
273 * DSCR value. Initialize it to zero if you just need to know the
274 * value on return from this function; or DSCR_INSTR_COMP if you
275 * happen to know that no instruction is pending.
276 */
277 static int cortex_a_exec_opcode(struct target *target,
278 uint32_t opcode, uint32_t *dscr_p)
279 {
280 uint32_t dscr;
281 int retval;
282 struct armv7a_common *armv7a = target_to_armv7a(target);
283
284 dscr = dscr_p ? *dscr_p : 0;
285
286 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
287
288 /* Wait for InstrCompl bit to be set */
289 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
290 if (retval != ERROR_OK)
291 return retval;
292
293 retval = mem_ap_write_u32(armv7a->debug_ap,
294 armv7a->debug_base + CPUDBG_ITR, opcode);
295 if (retval != ERROR_OK)
296 return retval;
297
298 /* Wait for InstrCompl bit to be set */
299 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
300 if (retval != ERROR_OK) {
301 LOG_ERROR("Error waiting for cortex_a_exec_opcode");
302 return retval;
303 }
304
305 if (dscr_p)
306 *dscr_p = dscr;
307
308 return retval;
309 }
310
311 /* Write to memory mapped registers directly with no cache or mmu handling */
312 static int cortex_a_dap_write_memap_register_u32(struct target *target,
313 uint32_t address,
314 uint32_t value)
315 {
316 int retval;
317 struct armv7a_common *armv7a = target_to_armv7a(target);
318
319 retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
320
321 return retval;
322 }
323
324 /*
325 * Cortex-A implementation of Debug Programmer's Model
326 *
327 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
328 * so there's no need to poll for it before executing an instruction.
329 *
330 * NOTE that in several of these cases the "stall" mode might be useful.
331 * It'd let us queue a few operations together... prepare/finish might
332 * be the places to enable/disable that mode.
333 */
334
335 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
336 {
337 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
338 }
339
340 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
341 {
342 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
343 return mem_ap_write_u32(a->armv7a_common.debug_ap,
344 a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
345 }
346
347 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
348 uint32_t *dscr_p)
349 {
350 uint32_t dscr = DSCR_INSTR_COMP;
351 int retval;
352
353 if (dscr_p)
354 dscr = *dscr_p;
355
356 /* Wait for DTRRXfull */
357 retval = cortex_a_wait_dscr_bits(a->armv7a_common.arm.target,
358 DSCR_DTR_TX_FULL, DSCR_DTR_TX_FULL, &dscr);
359 if (retval != ERROR_OK) {
360 LOG_ERROR("Error waiting for read dcc");
361 return retval;
362 }
363
364 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
365 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
366 if (retval != ERROR_OK)
367 return retval;
368 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
369
370 if (dscr_p)
371 *dscr_p = dscr;
372
373 return retval;
374 }
375
376 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
377 {
378 struct cortex_a_common *a = dpm_to_a(dpm);
379 uint32_t dscr;
380 int retval;
381
382 /* set up invariant: INSTR_COMP is set after ever DPM operation */
383 retval = cortex_a_wait_instrcmpl(dpm->arm->target, &dscr, true);
384 if (retval != ERROR_OK) {
385 LOG_ERROR("Error waiting for dpm prepare");
386 return retval;
387 }
388
389 /* this "should never happen" ... */
390 if (dscr & DSCR_DTR_RX_FULL) {
391 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
392 /* Clear DCCRX */
393 retval = cortex_a_exec_opcode(
394 a->armv7a_common.arm.target,
395 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
396 &dscr);
397 if (retval != ERROR_OK)
398 return retval;
399 }
400
401 return retval;
402 }
403
404 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
405 {
406 /* REVISIT what could be done here? */
407 return ERROR_OK;
408 }
409
410 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
411 uint32_t opcode, uint32_t data)
412 {
413 struct cortex_a_common *a = dpm_to_a(dpm);
414 int retval;
415 uint32_t dscr = DSCR_INSTR_COMP;
416
417 retval = cortex_a_write_dcc(a, data);
418 if (retval != ERROR_OK)
419 return retval;
420
421 return cortex_a_exec_opcode(
422 a->armv7a_common.arm.target,
423 opcode,
424 &dscr);
425 }
426
427 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
428 uint32_t opcode, uint32_t data)
429 {
430 struct cortex_a_common *a = dpm_to_a(dpm);
431 uint32_t dscr = DSCR_INSTR_COMP;
432 int retval;
433
434 retval = cortex_a_write_dcc(a, data);
435 if (retval != ERROR_OK)
436 return retval;
437
438 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
439 retval = cortex_a_exec_opcode(
440 a->armv7a_common.arm.target,
441 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
442 &dscr);
443 if (retval != ERROR_OK)
444 return retval;
445
446 /* then the opcode, taking data from R0 */
447 retval = cortex_a_exec_opcode(
448 a->armv7a_common.arm.target,
449 opcode,
450 &dscr);
451
452 return retval;
453 }
454
455 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
456 {
457 struct target *target = dpm->arm->target;
458 uint32_t dscr = DSCR_INSTR_COMP;
459
460 /* "Prefetch flush" after modifying execution status in CPSR */
461 return cortex_a_exec_opcode(target,
462 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
463 &dscr);
464 }
465
466 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
467 uint32_t opcode, uint32_t *data)
468 {
469 struct cortex_a_common *a = dpm_to_a(dpm);
470 int retval;
471 uint32_t dscr = DSCR_INSTR_COMP;
472
473 /* the opcode, writing data to DCC */
474 retval = cortex_a_exec_opcode(
475 a->armv7a_common.arm.target,
476 opcode,
477 &dscr);
478 if (retval != ERROR_OK)
479 return retval;
480
481 return cortex_a_read_dcc(a, data, &dscr);
482 }
483
484
485 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
486 uint32_t opcode, uint32_t *data)
487 {
488 struct cortex_a_common *a = dpm_to_a(dpm);
489 uint32_t dscr = DSCR_INSTR_COMP;
490 int retval;
491
492 /* the opcode, writing data to R0 */
493 retval = cortex_a_exec_opcode(
494 a->armv7a_common.arm.target,
495 opcode,
496 &dscr);
497 if (retval != ERROR_OK)
498 return retval;
499
500 /* write R0 to DCC */
501 retval = cortex_a_exec_opcode(
502 a->armv7a_common.arm.target,
503 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
504 &dscr);
505 if (retval != ERROR_OK)
506 return retval;
507
508 return cortex_a_read_dcc(a, data, &dscr);
509 }
510
511 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
512 uint32_t addr, uint32_t control)
513 {
514 struct cortex_a_common *a = dpm_to_a(dpm);
515 uint32_t vr = a->armv7a_common.debug_base;
516 uint32_t cr = a->armv7a_common.debug_base;
517 int retval;
518
519 switch (index_t) {
520 case 0 ... 15: /* breakpoints */
521 vr += CPUDBG_BVR_BASE;
522 cr += CPUDBG_BCR_BASE;
523 break;
524 case 16 ... 31: /* watchpoints */
525 vr += CPUDBG_WVR_BASE;
526 cr += CPUDBG_WCR_BASE;
527 index_t -= 16;
528 break;
529 default:
530 return ERROR_FAIL;
531 }
532 vr += 4 * index_t;
533 cr += 4 * index_t;
534
535 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
536 (unsigned) vr, (unsigned) cr);
537
538 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
539 vr, addr);
540 if (retval != ERROR_OK)
541 return retval;
542 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
543 cr, control);
544 return retval;
545 }
546
547 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
548 {
549 struct cortex_a_common *a = dpm_to_a(dpm);
550 uint32_t cr;
551
552 switch (index_t) {
553 case 0 ... 15:
554 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
555 break;
556 case 16 ... 31:
557 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
558 index_t -= 16;
559 break;
560 default:
561 return ERROR_FAIL;
562 }
563 cr += 4 * index_t;
564
565 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
566
567 /* clear control register */
568 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
569 }
570
571 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
572 {
573 struct arm_dpm *dpm = &a->armv7a_common.dpm;
574 int retval;
575
576 dpm->arm = &a->armv7a_common.arm;
577 dpm->didr = didr;
578
579 dpm->prepare = cortex_a_dpm_prepare;
580 dpm->finish = cortex_a_dpm_finish;
581
582 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
583 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
584 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
585
586 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
587 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
588
589 dpm->bpwp_enable = cortex_a_bpwp_enable;
590 dpm->bpwp_disable = cortex_a_bpwp_disable;
591
592 retval = arm_dpm_setup(dpm);
593 if (retval == ERROR_OK)
594 retval = arm_dpm_initialize(dpm);
595
596 return retval;
597 }
598 static struct target *get_cortex_a(struct target *target, int32_t coreid)
599 {
600 struct target_list *head;
601 struct target *curr;
602
603 head = target->head;
604 while (head != (struct target_list *)NULL) {
605 curr = head->target;
606 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
607 return curr;
608 head = head->next;
609 }
610 return target;
611 }
612 static int cortex_a_halt(struct target *target);
613
614 static int cortex_a_halt_smp(struct target *target)
615 {
616 int retval = 0;
617 struct target_list *head;
618 struct target *curr;
619 head = target->head;
620 while (head != (struct target_list *)NULL) {
621 curr = head->target;
622 if ((curr != target) && (curr->state != TARGET_HALTED)
623 && target_was_examined(curr))
624 retval += cortex_a_halt(curr);
625 head = head->next;
626 }
627 return retval;
628 }
629
630 static int update_halt_gdb(struct target *target)
631 {
632 struct target *gdb_target = NULL;
633 struct target_list *head;
634 struct target *curr;
635 int retval = 0;
636
637 if (target->gdb_service && target->gdb_service->core[0] == -1) {
638 target->gdb_service->target = target;
639 target->gdb_service->core[0] = target->coreid;
640 retval += cortex_a_halt_smp(target);
641 }
642
643 if (target->gdb_service)
644 gdb_target = target->gdb_service->target;
645
646 foreach_smp_target(head, target->head) {
647 curr = head->target;
648 /* skip calling context */
649 if (curr == target)
650 continue;
651 if (!target_was_examined(curr))
652 continue;
653 /* skip targets that were already halted */
654 if (curr->state == TARGET_HALTED)
655 continue;
656 /* Skip gdb_target; it alerts GDB so has to be polled as last one */
657 if (curr == gdb_target)
658 continue;
659
660 /* avoid recursion in cortex_a_poll() */
661 curr->smp = 0;
662 cortex_a_poll(curr);
663 curr->smp = 1;
664 }
665
666 /* after all targets were updated, poll the gdb serving target */
667 if (gdb_target != NULL && gdb_target != target)
668 cortex_a_poll(gdb_target);
669 return retval;
670 }
671
672 /*
673 * Cortex-A Run control
674 */
675
676 static int cortex_a_poll(struct target *target)
677 {
678 int retval = ERROR_OK;
679 uint32_t dscr;
680 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
681 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
682 enum target_state prev_target_state = target->state;
683 /* toggle to another core is done by gdb as follow */
684 /* maint packet J core_id */
685 /* continue */
686 /* the next polling trigger an halt event sent to gdb */
687 if ((target->state == TARGET_HALTED) && (target->smp) &&
688 (target->gdb_service) &&
689 (target->gdb_service->target == NULL)) {
690 target->gdb_service->target =
691 get_cortex_a(target, target->gdb_service->core[1]);
692 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
693 return retval;
694 }
695 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
696 armv7a->debug_base + CPUDBG_DSCR, &dscr);
697 if (retval != ERROR_OK)
698 return retval;
699 cortex_a->cpudbg_dscr = dscr;
700
701 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
702 if (prev_target_state != TARGET_HALTED) {
703 /* We have a halting debug event */
704 LOG_DEBUG("Target halted");
705 target->state = TARGET_HALTED;
706
707 retval = cortex_a_debug_entry(target);
708 if (retval != ERROR_OK)
709 return retval;
710
711 if (target->smp) {
712 retval = update_halt_gdb(target);
713 if (retval != ERROR_OK)
714 return retval;
715 }
716
717 if (prev_target_state == TARGET_DEBUG_RUNNING) {
718 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
719 } else { /* prev_target_state is RUNNING, UNKNOWN or RESET */
720 if (arm_semihosting(target, &retval) != 0)
721 return retval;
722
723 target_call_event_callbacks(target,
724 TARGET_EVENT_HALTED);
725 }
726 }
727 } else
728 target->state = TARGET_RUNNING;
729
730 return retval;
731 }
732
733 static int cortex_a_halt(struct target *target)
734 {
735 int retval;
736 uint32_t dscr;
737 struct armv7a_common *armv7a = target_to_armv7a(target);
738
739 /*
740 * Tell the core to be halted by writing DRCR with 0x1
741 * and then wait for the core to be halted.
742 */
743 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
744 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
745 if (retval != ERROR_OK)
746 return retval;
747
748 dscr = 0; /* force read of dscr */
749 retval = cortex_a_wait_dscr_bits(target, DSCR_CORE_HALTED,
750 DSCR_CORE_HALTED, &dscr);
751 if (retval != ERROR_OK) {
752 LOG_ERROR("Error waiting for halt");
753 return retval;
754 }
755
756 target->debug_reason = DBG_REASON_DBGRQ;
757
758 return ERROR_OK;
759 }
760
761 static int cortex_a_internal_restore(struct target *target, int current,
762 target_addr_t *address, int handle_breakpoints, int debug_execution)
763 {
764 struct armv7a_common *armv7a = target_to_armv7a(target);
765 struct arm *arm = &armv7a->arm;
766 int retval;
767 uint32_t resume_pc;
768
769 if (!debug_execution)
770 target_free_all_working_areas(target);
771
772 #if 0
773 if (debug_execution) {
774 /* Disable interrupts */
775 /* We disable interrupts in the PRIMASK register instead of
776 * masking with C_MASKINTS,
777 * This is probably the same issue as Cortex-M3 Errata 377493:
778 * C_MASKINTS in parallel with disabled interrupts can cause
779 * local faults to not be taken. */
780 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
781 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = true;
782 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = true;
783
784 /* Make sure we are in Thumb mode */
785 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
786 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
787 32) | (1 << 24));
788 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = true;
789 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = true;
790 }
791 #endif
792
793 /* current = 1: continue on current pc, otherwise continue at <address> */
794 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
795 if (!current)
796 resume_pc = *address;
797 else
798 *address = resume_pc;
799
800 /* Make sure that the Armv7 gdb thumb fixups does not
801 * kill the return address
802 */
803 switch (arm->core_state) {
804 case ARM_STATE_ARM:
805 resume_pc &= 0xFFFFFFFC;
806 break;
807 case ARM_STATE_THUMB:
808 case ARM_STATE_THUMB_EE:
809 /* When the return address is loaded into PC
810 * bit 0 must be 1 to stay in Thumb state
811 */
812 resume_pc |= 0x1;
813 break;
814 case ARM_STATE_JAZELLE:
815 LOG_ERROR("How do I resume into Jazelle state??");
816 return ERROR_FAIL;
817 case ARM_STATE_AARCH64:
818 LOG_ERROR("Shoudn't be in AARCH64 state");
819 return ERROR_FAIL;
820 }
821 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
822 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
823 arm->pc->dirty = true;
824 arm->pc->valid = true;
825
826 /* restore dpm_mode at system halt */
827 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
828 /* called it now before restoring context because it uses cpu
829 * register r0 for restoring cp15 control register */
830 retval = cortex_a_restore_cp15_control_reg(target);
831 if (retval != ERROR_OK)
832 return retval;
833 retval = cortex_a_restore_context(target, handle_breakpoints);
834 if (retval != ERROR_OK)
835 return retval;
836 target->debug_reason = DBG_REASON_NOTHALTED;
837 target->state = TARGET_RUNNING;
838
839 /* registers are now invalid */
840 register_cache_invalidate(arm->core_cache);
841
842 #if 0
843 /* the front-end may request us not to handle breakpoints */
844 if (handle_breakpoints) {
845 /* Single step past breakpoint at current address */
846 breakpoint = breakpoint_find(target, resume_pc);
847 if (breakpoint) {
848 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
849 cortex_m3_unset_breakpoint(target, breakpoint);
850 cortex_m3_single_step_core(target);
851 cortex_m3_set_breakpoint(target, breakpoint);
852 }
853 }
854
855 #endif
856 return retval;
857 }
858
859 static int cortex_a_internal_restart(struct target *target)
860 {
861 struct armv7a_common *armv7a = target_to_armv7a(target);
862 struct arm *arm = &armv7a->arm;
863 int retval;
864 uint32_t dscr;
865 /*
866 * * Restart core and wait for it to be started. Clear ITRen and sticky
867 * * exception flags: see ARMv7 ARM, C5.9.
868 *
869 * REVISIT: for single stepping, we probably want to
870 * disable IRQs by default, with optional override...
871 */
872
873 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
874 armv7a->debug_base + CPUDBG_DSCR, &dscr);
875 if (retval != ERROR_OK)
876 return retval;
877
878 if ((dscr & DSCR_INSTR_COMP) == 0)
879 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
880
881 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
882 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
883 if (retval != ERROR_OK)
884 return retval;
885
886 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
887 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
888 DRCR_CLEAR_EXCEPTIONS);
889 if (retval != ERROR_OK)
890 return retval;
891
892 dscr = 0; /* force read of dscr */
893 retval = cortex_a_wait_dscr_bits(target, DSCR_CORE_RESTARTED,
894 DSCR_CORE_RESTARTED, &dscr);
895 if (retval != ERROR_OK) {
896 LOG_ERROR("Error waiting for resume");
897 return retval;
898 }
899
900 target->debug_reason = DBG_REASON_NOTHALTED;
901 target->state = TARGET_RUNNING;
902
903 /* registers are now invalid */
904 register_cache_invalidate(arm->core_cache);
905
906 return ERROR_OK;
907 }
908
909 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
910 {
911 int retval = 0;
912 struct target_list *head;
913 struct target *curr;
914 target_addr_t address;
915 head = target->head;
916 while (head != (struct target_list *)NULL) {
917 curr = head->target;
918 if ((curr != target) && (curr->state != TARGET_RUNNING)
919 && target_was_examined(curr)) {
920 /* resume current address , not in step mode */
921 retval += cortex_a_internal_restore(curr, 1, &address,
922 handle_breakpoints, 0);
923 retval += cortex_a_internal_restart(curr);
924 }
925 head = head->next;
926
927 }
928 return retval;
929 }
930
931 static int cortex_a_resume(struct target *target, int current,
932 target_addr_t address, int handle_breakpoints, int debug_execution)
933 {
934 int retval = 0;
935 /* dummy resume for smp toggle in order to reduce gdb impact */
936 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
937 /* simulate a start and halt of target */
938 target->gdb_service->target = NULL;
939 target->gdb_service->core[0] = target->gdb_service->core[1];
940 /* fake resume at next poll we play the target core[1], see poll*/
941 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
942 return 0;
943 }
944 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
945 if (target->smp) {
946 target->gdb_service->core[0] = -1;
947 retval = cortex_a_restore_smp(target, handle_breakpoints);
948 if (retval != ERROR_OK)
949 return retval;
950 }
951 cortex_a_internal_restart(target);
952
953 if (!debug_execution) {
954 target->state = TARGET_RUNNING;
955 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
956 LOG_DEBUG("target resumed at " TARGET_ADDR_FMT, address);
957 } else {
958 target->state = TARGET_DEBUG_RUNNING;
959 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
960 LOG_DEBUG("target debug resumed at " TARGET_ADDR_FMT, address);
961 }
962
963 return ERROR_OK;
964 }
965
966 static int cortex_a_debug_entry(struct target *target)
967 {
968 uint32_t dscr;
969 int retval = ERROR_OK;
970 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
971 struct armv7a_common *armv7a = target_to_armv7a(target);
972 struct arm *arm = &armv7a->arm;
973
974 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
975
976 /* REVISIT surely we should not re-read DSCR !! */
977 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
978 armv7a->debug_base + CPUDBG_DSCR, &dscr);
979 if (retval != ERROR_OK)
980 return retval;
981
982 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
983 * imprecise data aborts get discarded by issuing a Data
984 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
985 */
986
987 /* Enable the ITR execution once we are in debug mode */
988 dscr |= DSCR_ITR_EN;
989 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
990 armv7a->debug_base + CPUDBG_DSCR, dscr);
991 if (retval != ERROR_OK)
992 return retval;
993
994 /* Examine debug reason */
995 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
996
997 /* save address of instruction that triggered the watchpoint? */
998 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
999 uint32_t wfar;
1000
1001 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1002 armv7a->debug_base + CPUDBG_WFAR,
1003 &wfar);
1004 if (retval != ERROR_OK)
1005 return retval;
1006 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1007 }
1008
1009 /* First load register accessible through core debug port */
1010 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1011 if (retval != ERROR_OK)
1012 return retval;
1013
1014 if (arm->spsr) {
1015 /* read SPSR */
1016 retval = arm_dpm_read_reg(&armv7a->dpm, arm->spsr, 17);
1017 if (retval != ERROR_OK)
1018 return retval;
1019 }
1020
1021 #if 0
1022 /* TODO, Move this */
1023 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1024 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1025 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1026
1027 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1028 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1029
1030 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1031 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1032 #endif
1033
1034 /* Are we in an exception handler */
1035 /* armv4_5->exception_number = 0; */
1036 if (armv7a->post_debug_entry) {
1037 retval = armv7a->post_debug_entry(target);
1038 if (retval != ERROR_OK)
1039 return retval;
1040 }
1041
1042 return retval;
1043 }
1044
1045 static int cortex_a_post_debug_entry(struct target *target)
1046 {
1047 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1048 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1049 int retval;
1050
1051 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1052 retval = armv7a->arm.mrc(target, 15,
1053 0, 0, /* op1, op2 */
1054 1, 0, /* CRn, CRm */
1055 &cortex_a->cp15_control_reg);
1056 if (retval != ERROR_OK)
1057 return retval;
1058 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1059 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1060
1061 if (!armv7a->is_armv7r)
1062 armv7a_read_ttbcr(target);
1063
1064 if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1065 armv7a_identify_cache(target);
1066
1067 if (armv7a->is_armv7r) {
1068 armv7a->armv7a_mmu.mmu_enabled = 0;
1069 } else {
1070 armv7a->armv7a_mmu.mmu_enabled =
1071 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1072 }
1073 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1074 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1075 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1076 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1077 cortex_a->curr_mode = armv7a->arm.core_mode;
1078
1079 /* switch to SVC mode to read DACR */
1080 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1081 armv7a->arm.mrc(target, 15,
1082 0, 0, 3, 0,
1083 &cortex_a->cp15_dacr_reg);
1084
1085 LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1086 cortex_a->cp15_dacr_reg);
1087
1088 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1089 return ERROR_OK;
1090 }
1091
1092 int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1093 {
1094 struct armv7a_common *armv7a = target_to_armv7a(target);
1095 uint32_t dscr;
1096
1097 /* Read DSCR */
1098 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1099 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1100 if (ERROR_OK != retval)
1101 return retval;
1102
1103 /* clear bitfield */
1104 dscr &= ~bit_mask;
1105 /* put new value */
1106 dscr |= value & bit_mask;
1107
1108 /* write new DSCR */
1109 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1110 armv7a->debug_base + CPUDBG_DSCR, dscr);
1111 return retval;
1112 }
1113
1114 static int cortex_a_step(struct target *target, int current, target_addr_t address,
1115 int handle_breakpoints)
1116 {
1117 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1118 struct armv7a_common *armv7a = target_to_armv7a(target);
1119 struct arm *arm = &armv7a->arm;
1120 struct breakpoint *breakpoint = NULL;
1121 struct breakpoint stepbreakpoint;
1122 struct reg *r;
1123 int retval;
1124
1125 if (target->state != TARGET_HALTED) {
1126 LOG_WARNING("target not halted");
1127 return ERROR_TARGET_NOT_HALTED;
1128 }
1129
1130 /* current = 1: continue on current pc, otherwise continue at <address> */
1131 r = arm->pc;
1132 if (!current)
1133 buf_set_u32(r->value, 0, 32, address);
1134 else
1135 address = buf_get_u32(r->value, 0, 32);
1136
1137 /* The front-end may request us not to handle breakpoints.
1138 * But since Cortex-A uses breakpoint for single step,
1139 * we MUST handle breakpoints.
1140 */
1141 handle_breakpoints = 1;
1142 if (handle_breakpoints) {
1143 breakpoint = breakpoint_find(target, address);
1144 if (breakpoint)
1145 cortex_a_unset_breakpoint(target, breakpoint);
1146 }
1147
1148 /* Setup single step breakpoint */
1149 stepbreakpoint.address = address;
1150 stepbreakpoint.asid = 0;
1151 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1152 ? 2 : 4;
1153 stepbreakpoint.type = BKPT_HARD;
1154 stepbreakpoint.set = 0;
1155
1156 /* Disable interrupts during single step if requested */
1157 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1158 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1159 if (ERROR_OK != retval)
1160 return retval;
1161 }
1162
1163 /* Break on IVA mismatch */
1164 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1165
1166 target->debug_reason = DBG_REASON_SINGLESTEP;
1167
1168 retval = cortex_a_resume(target, 1, address, 0, 0);
1169 if (retval != ERROR_OK)
1170 return retval;
1171
1172 int64_t then = timeval_ms();
1173 while (target->state != TARGET_HALTED) {
1174 retval = cortex_a_poll(target);
1175 if (retval != ERROR_OK)
1176 return retval;
1177 if (target->state == TARGET_HALTED)
1178 break;
1179 if (timeval_ms() > then + 1000) {
1180 LOG_ERROR("timeout waiting for target halt");
1181 return ERROR_FAIL;
1182 }
1183 }
1184
1185 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1186
1187 /* Re-enable interrupts if they were disabled */
1188 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1189 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1190 if (ERROR_OK != retval)
1191 return retval;
1192 }
1193
1194
1195 target->debug_reason = DBG_REASON_BREAKPOINT;
1196
1197 if (breakpoint)
1198 cortex_a_set_breakpoint(target, breakpoint, 0);
1199
1200 if (target->state != TARGET_HALTED)
1201 LOG_DEBUG("target stepped");
1202
1203 return ERROR_OK;
1204 }
1205
1206 static int cortex_a_restore_context(struct target *target, bool bpwp)
1207 {
1208 struct armv7a_common *armv7a = target_to_armv7a(target);
1209
1210 LOG_DEBUG(" ");
1211
1212 if (armv7a->pre_restore_context)
1213 armv7a->pre_restore_context(target);
1214
1215 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1216 }
1217
1218 /*
1219 * Cortex-A Breakpoint and watchpoint functions
1220 */
1221
1222 /* Setup hardware Breakpoint Register Pair */
1223 static int cortex_a_set_breakpoint(struct target *target,
1224 struct breakpoint *breakpoint, uint8_t matchmode)
1225 {
1226 int retval;
1227 int brp_i = 0;
1228 uint32_t control;
1229 uint8_t byte_addr_select = 0x0F;
1230 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1231 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1232 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1233
1234 if (breakpoint->set) {
1235 LOG_WARNING("breakpoint already set");
1236 return ERROR_OK;
1237 }
1238
1239 if (breakpoint->type == BKPT_HARD) {
1240 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1241 brp_i++;
1242 if (brp_i >= cortex_a->brp_num) {
1243 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1244 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1245 }
1246 breakpoint->set = brp_i + 1;
1247 if (breakpoint->length == 2)
1248 byte_addr_select = (3 << (breakpoint->address & 0x02));
1249 control = ((matchmode & 0x7) << 20)
1250 | (byte_addr_select << 5)
1251 | (3 << 1) | 1;
1252 brp_list[brp_i].used = 1;
1253 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1254 brp_list[brp_i].control = control;
1255 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1256 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1257 brp_list[brp_i].value);
1258 if (retval != ERROR_OK)
1259 return retval;
1260 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1261 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1262 brp_list[brp_i].control);
1263 if (retval != ERROR_OK)
1264 return retval;
1265 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1266 brp_list[brp_i].control,
1267 brp_list[brp_i].value);
1268 } else if (breakpoint->type == BKPT_SOFT) {
1269 uint8_t code[4];
1270 /* length == 2: Thumb breakpoint */
1271 if (breakpoint->length == 2)
1272 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1273 else
1274 /* length == 3: Thumb-2 breakpoint, actual encoding is
1275 * a regular Thumb BKPT instruction but we replace a
1276 * 32bit Thumb-2 instruction, so fix-up the breakpoint
1277 * length
1278 */
1279 if (breakpoint->length == 3) {
1280 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1281 breakpoint->length = 4;
1282 } else
1283 /* length == 4, normal ARM breakpoint */
1284 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1285
1286 retval = target_read_memory(target,
1287 breakpoint->address & 0xFFFFFFFE,
1288 breakpoint->length, 1,
1289 breakpoint->orig_instr);
1290 if (retval != ERROR_OK)
1291 return retval;
1292
1293 /* make sure data cache is cleaned & invalidated down to PoC */
1294 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1295 armv7a_cache_flush_virt(target, breakpoint->address,
1296 breakpoint->length);
1297 }
1298
1299 retval = target_write_memory(target,
1300 breakpoint->address & 0xFFFFFFFE,
1301 breakpoint->length, 1, code);
1302 if (retval != ERROR_OK)
1303 return retval;
1304
1305 /* update i-cache at breakpoint location */
1306 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1307 breakpoint->length);
1308 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1309 breakpoint->length);
1310
1311 breakpoint->set = 0x11; /* Any nice value but 0 */
1312 }
1313
1314 return ERROR_OK;
1315 }
1316
1317 static int cortex_a_set_context_breakpoint(struct target *target,
1318 struct breakpoint *breakpoint, uint8_t matchmode)
1319 {
1320 int retval = ERROR_FAIL;
1321 int brp_i = 0;
1322 uint32_t control;
1323 uint8_t byte_addr_select = 0x0F;
1324 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1325 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1326 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1327
1328 if (breakpoint->set) {
1329 LOG_WARNING("breakpoint already set");
1330 return retval;
1331 }
1332 /*check available context BRPs*/
1333 while ((brp_list[brp_i].used ||
1334 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1335 brp_i++;
1336
1337 if (brp_i >= cortex_a->brp_num) {
1338 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1339 return ERROR_FAIL;
1340 }
1341
1342 breakpoint->set = brp_i + 1;
1343 control = ((matchmode & 0x7) << 20)
1344 | (byte_addr_select << 5)
1345 | (3 << 1) | 1;
1346 brp_list[brp_i].used = 1;
1347 brp_list[brp_i].value = (breakpoint->asid);
1348 brp_list[brp_i].control = control;
1349 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1350 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1351 brp_list[brp_i].value);
1352 if (retval != ERROR_OK)
1353 return retval;
1354 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1355 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1356 brp_list[brp_i].control);
1357 if (retval != ERROR_OK)
1358 return retval;
1359 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1360 brp_list[brp_i].control,
1361 brp_list[brp_i].value);
1362 return ERROR_OK;
1363
1364 }
1365
1366 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1367 {
1368 int retval = ERROR_FAIL;
1369 int brp_1 = 0; /* holds the contextID pair */
1370 int brp_2 = 0; /* holds the IVA pair */
1371 uint32_t control_CTX, control_IVA;
1372 uint8_t CTX_byte_addr_select = 0x0F;
1373 uint8_t IVA_byte_addr_select = 0x0F;
1374 uint8_t CTX_machmode = 0x03;
1375 uint8_t IVA_machmode = 0x01;
1376 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1377 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1378 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1379
1380 if (breakpoint->set) {
1381 LOG_WARNING("breakpoint already set");
1382 return retval;
1383 }
1384 /*check available context BRPs*/
1385 while ((brp_list[brp_1].used ||
1386 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1387 brp_1++;
1388
1389 printf("brp(CTX) found num: %d\n", brp_1);
1390 if (brp_1 >= cortex_a->brp_num) {
1391 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1392 return ERROR_FAIL;
1393 }
1394
1395 while ((brp_list[brp_2].used ||
1396 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1397 brp_2++;
1398
1399 printf("brp(IVA) found num: %d\n", brp_2);
1400 if (brp_2 >= cortex_a->brp_num) {
1401 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1402 return ERROR_FAIL;
1403 }
1404
1405 breakpoint->set = brp_1 + 1;
1406 breakpoint->linked_BRP = brp_2;
1407 control_CTX = ((CTX_machmode & 0x7) << 20)
1408 | (brp_2 << 16)
1409 | (0 << 14)
1410 | (CTX_byte_addr_select << 5)
1411 | (3 << 1) | 1;
1412 brp_list[brp_1].used = 1;
1413 brp_list[brp_1].value = (breakpoint->asid);
1414 brp_list[brp_1].control = control_CTX;
1415 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1416 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1417 brp_list[brp_1].value);
1418 if (retval != ERROR_OK)
1419 return retval;
1420 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1421 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1422 brp_list[brp_1].control);
1423 if (retval != ERROR_OK)
1424 return retval;
1425
1426 control_IVA = ((IVA_machmode & 0x7) << 20)
1427 | (brp_1 << 16)
1428 | (IVA_byte_addr_select << 5)
1429 | (3 << 1) | 1;
1430 brp_list[brp_2].used = 1;
1431 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1432 brp_list[brp_2].control = control_IVA;
1433 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1434 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1435 brp_list[brp_2].value);
1436 if (retval != ERROR_OK)
1437 return retval;
1438 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1439 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1440 brp_list[brp_2].control);
1441 if (retval != ERROR_OK)
1442 return retval;
1443
1444 return ERROR_OK;
1445 }
1446
1447 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1448 {
1449 int retval;
1450 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1451 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1452 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1453
1454 if (!breakpoint->set) {
1455 LOG_WARNING("breakpoint not set");
1456 return ERROR_OK;
1457 }
1458
1459 if (breakpoint->type == BKPT_HARD) {
1460 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1461 int brp_i = breakpoint->set - 1;
1462 int brp_j = breakpoint->linked_BRP;
1463 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1464 LOG_DEBUG("Invalid BRP number in breakpoint");
1465 return ERROR_OK;
1466 }
1467 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1468 brp_list[brp_i].control, brp_list[brp_i].value);
1469 brp_list[brp_i].used = 0;
1470 brp_list[brp_i].value = 0;
1471 brp_list[brp_i].control = 0;
1472 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1473 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1474 brp_list[brp_i].control);
1475 if (retval != ERROR_OK)
1476 return retval;
1477 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1478 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1479 brp_list[brp_i].value);
1480 if (retval != ERROR_OK)
1481 return retval;
1482 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1483 LOG_DEBUG("Invalid BRP number in breakpoint");
1484 return ERROR_OK;
1485 }
1486 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1487 brp_list[brp_j].control, brp_list[brp_j].value);
1488 brp_list[brp_j].used = 0;
1489 brp_list[brp_j].value = 0;
1490 brp_list[brp_j].control = 0;
1491 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1492 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1493 brp_list[brp_j].control);
1494 if (retval != ERROR_OK)
1495 return retval;
1496 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1497 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1498 brp_list[brp_j].value);
1499 if (retval != ERROR_OK)
1500 return retval;
1501 breakpoint->linked_BRP = 0;
1502 breakpoint->set = 0;
1503 return ERROR_OK;
1504
1505 } else {
1506 int brp_i = breakpoint->set - 1;
1507 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1508 LOG_DEBUG("Invalid BRP number in breakpoint");
1509 return ERROR_OK;
1510 }
1511 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1512 brp_list[brp_i].control, brp_list[brp_i].value);
1513 brp_list[brp_i].used = 0;
1514 brp_list[brp_i].value = 0;
1515 brp_list[brp_i].control = 0;
1516 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1517 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1518 brp_list[brp_i].control);
1519 if (retval != ERROR_OK)
1520 return retval;
1521 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1522 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1523 brp_list[brp_i].value);
1524 if (retval != ERROR_OK)
1525 return retval;
1526 breakpoint->set = 0;
1527 return ERROR_OK;
1528 }
1529 } else {
1530
1531 /* make sure data cache is cleaned & invalidated down to PoC */
1532 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1533 armv7a_cache_flush_virt(target, breakpoint->address,
1534 breakpoint->length);
1535 }
1536
1537 /* restore original instruction (kept in target endianness) */
1538 if (breakpoint->length == 4) {
1539 retval = target_write_memory(target,
1540 breakpoint->address & 0xFFFFFFFE,
1541 4, 1, breakpoint->orig_instr);
1542 if (retval != ERROR_OK)
1543 return retval;
1544 } else {
1545 retval = target_write_memory(target,
1546 breakpoint->address & 0xFFFFFFFE,
1547 2, 1, breakpoint->orig_instr);
1548 if (retval != ERROR_OK)
1549 return retval;
1550 }
1551
1552 /* update i-cache at breakpoint location */
1553 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1554 breakpoint->length);
1555 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1556 breakpoint->length);
1557 }
1558 breakpoint->set = 0;
1559
1560 return ERROR_OK;
1561 }
1562
1563 static int cortex_a_add_breakpoint(struct target *target,
1564 struct breakpoint *breakpoint)
1565 {
1566 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1567
1568 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1569 LOG_INFO("no hardware breakpoint available");
1570 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1571 }
1572
1573 if (breakpoint->type == BKPT_HARD)
1574 cortex_a->brp_num_available--;
1575
1576 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1577 }
1578
1579 static int cortex_a_add_context_breakpoint(struct target *target,
1580 struct breakpoint *breakpoint)
1581 {
1582 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1583
1584 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1585 LOG_INFO("no hardware breakpoint available");
1586 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1587 }
1588
1589 if (breakpoint->type == BKPT_HARD)
1590 cortex_a->brp_num_available--;
1591
1592 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1593 }
1594
1595 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1596 struct breakpoint *breakpoint)
1597 {
1598 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1599
1600 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1601 LOG_INFO("no hardware breakpoint available");
1602 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1603 }
1604
1605 if (breakpoint->type == BKPT_HARD)
1606 cortex_a->brp_num_available--;
1607
1608 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1609 }
1610
1611
1612 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1613 {
1614 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1615
1616 #if 0
1617 /* It is perfectly possible to remove breakpoints while the target is running */
1618 if (target->state != TARGET_HALTED) {
1619 LOG_WARNING("target not halted");
1620 return ERROR_TARGET_NOT_HALTED;
1621 }
1622 #endif
1623
1624 if (breakpoint->set) {
1625 cortex_a_unset_breakpoint(target, breakpoint);
1626 if (breakpoint->type == BKPT_HARD)
1627 cortex_a->brp_num_available++;
1628 }
1629
1630
1631 return ERROR_OK;
1632 }
1633
1634 /*
1635 * Cortex-A Reset functions
1636 */
1637
1638 static int cortex_a_assert_reset(struct target *target)
1639 {
1640 struct armv7a_common *armv7a = target_to_armv7a(target);
1641
1642 LOG_DEBUG(" ");
1643
1644 /* FIXME when halt is requested, make it work somehow... */
1645
1646 /* This function can be called in "target not examined" state */
1647
1648 /* Issue some kind of warm reset. */
1649 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1650 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1651 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1652 /* REVISIT handle "pulls" cases, if there's
1653 * hardware that needs them to work.
1654 */
1655
1656 /*
1657 * FIXME: fix reset when transport is SWD. This is a temporary
1658 * work-around for release v0.10 that is not intended to stay!
1659 */
1660 if (transport_is_swd() ||
1661 (target->reset_halt && (jtag_get_reset_config() & RESET_SRST_NO_GATING)))
1662 jtag_add_reset(0, 1);
1663
1664 } else {
1665 LOG_ERROR("%s: how to reset?", target_name(target));
1666 return ERROR_FAIL;
1667 }
1668
1669 /* registers are now invalid */
1670 if (target_was_examined(target))
1671 register_cache_invalidate(armv7a->arm.core_cache);
1672
1673 target->state = TARGET_RESET;
1674
1675 return ERROR_OK;
1676 }
1677
1678 static int cortex_a_deassert_reset(struct target *target)
1679 {
1680 int retval;
1681
1682 LOG_DEBUG(" ");
1683
1684 /* be certain SRST is off */
1685 jtag_add_reset(0, 0);
1686
1687 if (target_was_examined(target)) {
1688 retval = cortex_a_poll(target);
1689 if (retval != ERROR_OK)
1690 return retval;
1691 }
1692
1693 if (target->reset_halt) {
1694 if (target->state != TARGET_HALTED) {
1695 LOG_WARNING("%s: ran after reset and before halt ...",
1696 target_name(target));
1697 if (target_was_examined(target)) {
1698 retval = target_halt(target);
1699 if (retval != ERROR_OK)
1700 return retval;
1701 } else
1702 target->state = TARGET_UNKNOWN;
1703 }
1704 }
1705
1706 return ERROR_OK;
1707 }
1708
1709 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1710 {
1711 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1712 * New desired mode must be in mode. Current value of DSCR must be in
1713 * *dscr, which is updated with new value.
1714 *
1715 * This function elides actually sending the mode-change over the debug
1716 * interface if the mode is already set as desired.
1717 */
1718 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1719 if (new_dscr != *dscr) {
1720 struct armv7a_common *armv7a = target_to_armv7a(target);
1721 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1722 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1723 if (retval == ERROR_OK)
1724 *dscr = new_dscr;
1725 return retval;
1726 } else {
1727 return ERROR_OK;
1728 }
1729 }
1730
1731 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1732 uint32_t value, uint32_t *dscr)
1733 {
1734 /* Waits until the specified bit(s) of DSCR take on a specified value. */
1735 struct armv7a_common *armv7a = target_to_armv7a(target);
1736 int64_t then;
1737 int retval;
1738
1739 if ((*dscr & mask) == value)
1740 return ERROR_OK;
1741
1742 then = timeval_ms();
1743 while (1) {
1744 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1745 armv7a->debug_base + CPUDBG_DSCR, dscr);
1746 if (retval != ERROR_OK) {
1747 LOG_ERROR("Could not read DSCR register");
1748 return retval;
1749 }
1750 if ((*dscr & mask) == value)
1751 break;
1752 if (timeval_ms() > then + 1000) {
1753 LOG_ERROR("timeout waiting for DSCR bit change");
1754 return ERROR_FAIL;
1755 }
1756 }
1757 return ERROR_OK;
1758 }
1759
1760 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1761 uint32_t *data, uint32_t *dscr)
1762 {
1763 int retval;
1764 struct armv7a_common *armv7a = target_to_armv7a(target);
1765
1766 /* Move from coprocessor to R0. */
1767 retval = cortex_a_exec_opcode(target, opcode, dscr);
1768 if (retval != ERROR_OK)
1769 return retval;
1770
1771 /* Move from R0 to DTRTX. */
1772 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
1773 if (retval != ERROR_OK)
1774 return retval;
1775
1776 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
1777 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1778 * must also check TXfull_l). Most of the time this will be free
1779 * because TXfull_l will be set immediately and cached in dscr. */
1780 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
1781 DSCR_DTRTX_FULL_LATCHED, dscr);
1782 if (retval != ERROR_OK)
1783 return retval;
1784
1785 /* Read the value transferred to DTRTX. */
1786 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1787 armv7a->debug_base + CPUDBG_DTRTX, data);
1788 if (retval != ERROR_OK)
1789 return retval;
1790
1791 return ERROR_OK;
1792 }
1793
1794 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
1795 uint32_t *dfsr, uint32_t *dscr)
1796 {
1797 int retval;
1798
1799 if (dfar) {
1800 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
1801 if (retval != ERROR_OK)
1802 return retval;
1803 }
1804
1805 if (dfsr) {
1806 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
1807 if (retval != ERROR_OK)
1808 return retval;
1809 }
1810
1811 return ERROR_OK;
1812 }
1813
1814 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
1815 uint32_t data, uint32_t *dscr)
1816 {
1817 int retval;
1818 struct armv7a_common *armv7a = target_to_armv7a(target);
1819
1820 /* Write the value into DTRRX. */
1821 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1822 armv7a->debug_base + CPUDBG_DTRRX, data);
1823 if (retval != ERROR_OK)
1824 return retval;
1825
1826 /* Move from DTRRX to R0. */
1827 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
1828 if (retval != ERROR_OK)
1829 return retval;
1830
1831 /* Move from R0 to coprocessor. */
1832 retval = cortex_a_exec_opcode(target, opcode, dscr);
1833 if (retval != ERROR_OK)
1834 return retval;
1835
1836 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
1837 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
1838 * check RXfull_l). Most of the time this will be free because RXfull_l
1839 * will be cleared immediately and cached in dscr. */
1840 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
1841 if (retval != ERROR_OK)
1842 return retval;
1843
1844 return ERROR_OK;
1845 }
1846
1847 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
1848 uint32_t dfsr, uint32_t *dscr)
1849 {
1850 int retval;
1851
1852 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
1853 if (retval != ERROR_OK)
1854 return retval;
1855
1856 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
1857 if (retval != ERROR_OK)
1858 return retval;
1859
1860 return ERROR_OK;
1861 }
1862
1863 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
1864 {
1865 uint32_t status, upper4;
1866
1867 if (dfsr & (1 << 9)) {
1868 /* LPAE format. */
1869 status = dfsr & 0x3f;
1870 upper4 = status >> 2;
1871 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
1872 return ERROR_TARGET_TRANSLATION_FAULT;
1873 else if (status == 33)
1874 return ERROR_TARGET_UNALIGNED_ACCESS;
1875 else
1876 return ERROR_TARGET_DATA_ABORT;
1877 } else {
1878 /* Normal format. */
1879 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
1880 if (status == 1)
1881 return ERROR_TARGET_UNALIGNED_ACCESS;
1882 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
1883 status == 9 || status == 11 || status == 13 || status == 15)
1884 return ERROR_TARGET_TRANSLATION_FAULT;
1885 else
1886 return ERROR_TARGET_DATA_ABORT;
1887 }
1888 }
1889
1890 static int cortex_a_write_cpu_memory_slow(struct target *target,
1891 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1892 {
1893 /* Writes count objects of size size from *buffer. Old value of DSCR must
1894 * be in *dscr; updated to new value. This is slow because it works for
1895 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
1896 * the address is aligned, cortex_a_write_cpu_memory_fast should be
1897 * preferred.
1898 * Preconditions:
1899 * - Address is in R0.
1900 * - R0 is marked dirty.
1901 */
1902 struct armv7a_common *armv7a = target_to_armv7a(target);
1903 struct arm *arm = &armv7a->arm;
1904 int retval;
1905
1906 /* Mark register R1 as dirty, to use for transferring data. */
1907 arm_reg_current(arm, 1)->dirty = true;
1908
1909 /* Switch to non-blocking mode if not already in that mode. */
1910 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
1911 if (retval != ERROR_OK)
1912 return retval;
1913
1914 /* Go through the objects. */
1915 while (count) {
1916 /* Write the value to store into DTRRX. */
1917 uint32_t data, opcode;
1918 if (size == 1)
1919 data = *buffer;
1920 else if (size == 2)
1921 data = target_buffer_get_u16(target, buffer);
1922 else
1923 data = target_buffer_get_u32(target, buffer);
1924 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1925 armv7a->debug_base + CPUDBG_DTRRX, data);
1926 if (retval != ERROR_OK)
1927 return retval;
1928
1929 /* Transfer the value from DTRRX to R1. */
1930 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
1931 if (retval != ERROR_OK)
1932 return retval;
1933
1934 /* Write the value transferred to R1 into memory. */
1935 if (size == 1)
1936 opcode = ARMV4_5_STRB_IP(1, 0);
1937 else if (size == 2)
1938 opcode = ARMV4_5_STRH_IP(1, 0);
1939 else
1940 opcode = ARMV4_5_STRW_IP(1, 0);
1941 retval = cortex_a_exec_opcode(target, opcode, dscr);
1942 if (retval != ERROR_OK)
1943 return retval;
1944
1945 /* Check for faults and return early. */
1946 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
1947 return ERROR_OK; /* A data fault is not considered a system failure. */
1948
1949 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
1950 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1951 * must also check RXfull_l). Most of the time this will be free
1952 * because RXfull_l will be cleared immediately and cached in dscr. */
1953 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
1954 if (retval != ERROR_OK)
1955 return retval;
1956
1957 /* Advance. */
1958 buffer += size;
1959 --count;
1960 }
1961
1962 return ERROR_OK;
1963 }
1964
1965 static int cortex_a_write_cpu_memory_fast(struct target *target,
1966 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1967 {
1968 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
1969 * in *dscr; updated to new value. This is fast but only works for
1970 * word-sized objects at aligned addresses.
1971 * Preconditions:
1972 * - Address is in R0 and must be a multiple of 4.
1973 * - R0 is marked dirty.
1974 */
1975 struct armv7a_common *armv7a = target_to_armv7a(target);
1976 int retval;
1977
1978 /* Switch to fast mode if not already in that mode. */
1979 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
1980 if (retval != ERROR_OK)
1981 return retval;
1982
1983 /* Latch STC instruction. */
1984 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1985 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
1986 if (retval != ERROR_OK)
1987 return retval;
1988
1989 /* Transfer all the data and issue all the instructions. */
1990 return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
1991 4, count, armv7a->debug_base + CPUDBG_DTRRX);
1992 }
1993
1994 static int cortex_a_write_cpu_memory(struct target *target,
1995 uint32_t address, uint32_t size,
1996 uint32_t count, const uint8_t *buffer)
1997 {
1998 /* Write memory through the CPU. */
1999 int retval, final_retval;
2000 struct armv7a_common *armv7a = target_to_armv7a(target);
2001 struct arm *arm = &armv7a->arm;
2002 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2003
2004 LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2005 address, size, count);
2006 if (target->state != TARGET_HALTED) {
2007 LOG_WARNING("target not halted");
2008 return ERROR_TARGET_NOT_HALTED;
2009 }
2010
2011 if (!count)
2012 return ERROR_OK;
2013
2014 /* Clear any abort. */
2015 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2016 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2017 if (retval != ERROR_OK)
2018 return retval;
2019
2020 /* Read DSCR. */
2021 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2022 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2023 if (retval != ERROR_OK)
2024 return retval;
2025
2026 /* Switch to non-blocking mode if not already in that mode. */
2027 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2028 if (retval != ERROR_OK)
2029 goto out;
2030
2031 /* Mark R0 as dirty. */
2032 arm_reg_current(arm, 0)->dirty = true;
2033
2034 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2035 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2036 if (retval != ERROR_OK)
2037 goto out;
2038
2039 /* Get the memory address into R0. */
2040 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2041 armv7a->debug_base + CPUDBG_DTRRX, address);
2042 if (retval != ERROR_OK)
2043 goto out;
2044 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2045 if (retval != ERROR_OK)
2046 goto out;
2047
2048 if (size == 4 && (address % 4) == 0) {
2049 /* We are doing a word-aligned transfer, so use fast mode. */
2050 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2051 } else {
2052 /* Use slow path. */
2053 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2054 }
2055
2056 out:
2057 final_retval = retval;
2058
2059 /* Switch to non-blocking mode if not already in that mode. */
2060 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2061 if (final_retval == ERROR_OK)
2062 final_retval = retval;
2063
2064 /* Wait for last issued instruction to complete. */
2065 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2066 if (final_retval == ERROR_OK)
2067 final_retval = retval;
2068
2069 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2070 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2071 * check RXfull_l). Most of the time this will be free because RXfull_l
2072 * will be cleared immediately and cached in dscr. However, don't do this
2073 * if there is fault, because then the instruction might not have completed
2074 * successfully. */
2075 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2076 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2077 if (retval != ERROR_OK)
2078 return retval;
2079 }
2080
2081 /* If there were any sticky abort flags, clear them. */
2082 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2083 fault_dscr = dscr;
2084 mem_ap_write_atomic_u32(armv7a->debug_ap,
2085 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2086 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2087 } else {
2088 fault_dscr = 0;
2089 }
2090
2091 /* Handle synchronous data faults. */
2092 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2093 if (final_retval == ERROR_OK) {
2094 /* Final return value will reflect cause of fault. */
2095 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2096 if (retval == ERROR_OK) {
2097 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2098 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2099 } else
2100 final_retval = retval;
2101 }
2102 /* Fault destroyed DFAR/DFSR; restore them. */
2103 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2104 if (retval != ERROR_OK)
2105 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2106 }
2107
2108 /* Handle asynchronous data faults. */
2109 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2110 if (final_retval == ERROR_OK)
2111 /* No other error has been recorded so far, so keep this one. */
2112 final_retval = ERROR_TARGET_DATA_ABORT;
2113 }
2114
2115 /* If the DCC is nonempty, clear it. */
2116 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2117 uint32_t dummy;
2118 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2119 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2120 if (final_retval == ERROR_OK)
2121 final_retval = retval;
2122 }
2123 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2124 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2125 if (final_retval == ERROR_OK)
2126 final_retval = retval;
2127 }
2128
2129 /* Done. */
2130 return final_retval;
2131 }
2132
2133 static int cortex_a_read_cpu_memory_slow(struct target *target,
2134 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2135 {
2136 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2137 * in *dscr; updated to new value. This is slow because it works for
2138 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2139 * the address is aligned, cortex_a_read_cpu_memory_fast should be
2140 * preferred.
2141 * Preconditions:
2142 * - Address is in R0.
2143 * - R0 is marked dirty.
2144 */
2145 struct armv7a_common *armv7a = target_to_armv7a(target);
2146 struct arm *arm = &armv7a->arm;
2147 int retval;
2148
2149 /* Mark register R1 as dirty, to use for transferring data. */
2150 arm_reg_current(arm, 1)->dirty = true;
2151
2152 /* Switch to non-blocking mode if not already in that mode. */
2153 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2154 if (retval != ERROR_OK)
2155 return retval;
2156
2157 /* Go through the objects. */
2158 while (count) {
2159 /* Issue a load of the appropriate size to R1. */
2160 uint32_t opcode, data;
2161 if (size == 1)
2162 opcode = ARMV4_5_LDRB_IP(1, 0);
2163 else if (size == 2)
2164 opcode = ARMV4_5_LDRH_IP(1, 0);
2165 else
2166 opcode = ARMV4_5_LDRW_IP(1, 0);
2167 retval = cortex_a_exec_opcode(target, opcode, dscr);
2168 if (retval != ERROR_OK)
2169 return retval;
2170
2171 /* Issue a write of R1 to DTRTX. */
2172 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2173 if (retval != ERROR_OK)
2174 return retval;
2175
2176 /* Check for faults and return early. */
2177 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2178 return ERROR_OK; /* A data fault is not considered a system failure. */
2179
2180 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2181 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2182 * must also check TXfull_l). Most of the time this will be free
2183 * because TXfull_l will be set immediately and cached in dscr. */
2184 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2185 DSCR_DTRTX_FULL_LATCHED, dscr);
2186 if (retval != ERROR_OK)
2187 return retval;
2188
2189 /* Read the value transferred to DTRTX into the buffer. */
2190 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2191 armv7a->debug_base + CPUDBG_DTRTX, &data);
2192 if (retval != ERROR_OK)
2193 return retval;
2194 if (size == 1)
2195 *buffer = (uint8_t) data;
2196 else if (size == 2)
2197 target_buffer_set_u16(target, buffer, (uint16_t) data);
2198 else
2199 target_buffer_set_u32(target, buffer, data);
2200
2201 /* Advance. */
2202 buffer += size;
2203 --count;
2204 }
2205
2206 return ERROR_OK;
2207 }
2208
2209 static int cortex_a_read_cpu_memory_fast(struct target *target,
2210 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2211 {
2212 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2213 * *dscr; updated to new value. This is fast but only works for word-sized
2214 * objects at aligned addresses.
2215 * Preconditions:
2216 * - Address is in R0 and must be a multiple of 4.
2217 * - R0 is marked dirty.
2218 */
2219 struct armv7a_common *armv7a = target_to_armv7a(target);
2220 uint32_t u32;
2221 int retval;
2222
2223 /* Switch to non-blocking mode if not already in that mode. */
2224 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2225 if (retval != ERROR_OK)
2226 return retval;
2227
2228 /* Issue the LDC instruction via a write to ITR. */
2229 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2230 if (retval != ERROR_OK)
2231 return retval;
2232
2233 count--;
2234
2235 if (count > 0) {
2236 /* Switch to fast mode if not already in that mode. */
2237 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2238 if (retval != ERROR_OK)
2239 return retval;
2240
2241 /* Latch LDC instruction. */
2242 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2243 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2244 if (retval != ERROR_OK)
2245 return retval;
2246
2247 /* Read the value transferred to DTRTX into the buffer. Due to fast
2248 * mode rules, this blocks until the instruction finishes executing and
2249 * then reissues the read instruction to read the next word from
2250 * memory. The last read of DTRTX in this call reads the second-to-last
2251 * word from memory and issues the read instruction for the last word.
2252 */
2253 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2254 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2255 if (retval != ERROR_OK)
2256 return retval;
2257
2258 /* Advance. */
2259 buffer += count * 4;
2260 }
2261
2262 /* Wait for last issued instruction to complete. */
2263 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2264 if (retval != ERROR_OK)
2265 return retval;
2266
2267 /* Switch to non-blocking mode if not already in that mode. */
2268 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2269 if (retval != ERROR_OK)
2270 return retval;
2271
2272 /* Check for faults and return early. */
2273 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2274 return ERROR_OK; /* A data fault is not considered a system failure. */
2275
2276 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2277 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2278 * check TXfull_l). Most of the time this will be free because TXfull_l
2279 * will be set immediately and cached in dscr. */
2280 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2281 DSCR_DTRTX_FULL_LATCHED, dscr);
2282 if (retval != ERROR_OK)
2283 return retval;
2284
2285 /* Read the value transferred to DTRTX into the buffer. This is the last
2286 * word. */
2287 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2288 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2289 if (retval != ERROR_OK)
2290 return retval;
2291 target_buffer_set_u32(target, buffer, u32);
2292
2293 return ERROR_OK;
2294 }
2295
2296 static int cortex_a_read_cpu_memory(struct target *target,
2297 uint32_t address, uint32_t size,
2298 uint32_t count, uint8_t *buffer)
2299 {
2300 /* Read memory through the CPU. */
2301 int retval, final_retval;
2302 struct armv7a_common *armv7a = target_to_armv7a(target);
2303 struct arm *arm = &armv7a->arm;
2304 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2305
2306 LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2307 address, size, count);
2308 if (target->state != TARGET_HALTED) {
2309 LOG_WARNING("target not halted");
2310 return ERROR_TARGET_NOT_HALTED;
2311 }
2312
2313 if (!count)
2314 return ERROR_OK;
2315
2316 /* Clear any abort. */
2317 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2318 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2319 if (retval != ERROR_OK)
2320 return retval;
2321
2322 /* Read DSCR */
2323 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2324 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2325 if (retval != ERROR_OK)
2326 return retval;
2327
2328 /* Switch to non-blocking mode if not already in that mode. */
2329 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2330 if (retval != ERROR_OK)
2331 goto out;
2332
2333 /* Mark R0 as dirty. */
2334 arm_reg_current(arm, 0)->dirty = true;
2335
2336 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2337 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2338 if (retval != ERROR_OK)
2339 goto out;
2340
2341 /* Get the memory address into R0. */
2342 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2343 armv7a->debug_base + CPUDBG_DTRRX, address);
2344 if (retval != ERROR_OK)
2345 goto out;
2346 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2347 if (retval != ERROR_OK)
2348 goto out;
2349
2350 if (size == 4 && (address % 4) == 0) {
2351 /* We are doing a word-aligned transfer, so use fast mode. */
2352 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2353 } else {
2354 /* Use slow path. */
2355 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2356 }
2357
2358 out:
2359 final_retval = retval;
2360
2361 /* Switch to non-blocking mode if not already in that mode. */
2362 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2363 if (final_retval == ERROR_OK)
2364 final_retval = retval;
2365
2366 /* Wait for last issued instruction to complete. */
2367 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2368 if (final_retval == ERROR_OK)
2369 final_retval = retval;
2370
2371 /* If there were any sticky abort flags, clear them. */
2372 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2373 fault_dscr = dscr;
2374 mem_ap_write_atomic_u32(armv7a->debug_ap,
2375 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2376 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2377 } else {
2378 fault_dscr = 0;
2379 }
2380
2381 /* Handle synchronous data faults. */
2382 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2383 if (final_retval == ERROR_OK) {
2384 /* Final return value will reflect cause of fault. */
2385 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2386 if (retval == ERROR_OK) {
2387 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2388 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2389 } else
2390 final_retval = retval;
2391 }
2392 /* Fault destroyed DFAR/DFSR; restore them. */
2393 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2394 if (retval != ERROR_OK)
2395 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2396 }
2397
2398 /* Handle asynchronous data faults. */
2399 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2400 if (final_retval == ERROR_OK)
2401 /* No other error has been recorded so far, so keep this one. */
2402 final_retval = ERROR_TARGET_DATA_ABORT;
2403 }
2404
2405 /* If the DCC is nonempty, clear it. */
2406 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2407 uint32_t dummy;
2408 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2409 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2410 if (final_retval == ERROR_OK)
2411 final_retval = retval;
2412 }
2413 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2414 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2415 if (final_retval == ERROR_OK)
2416 final_retval = retval;
2417 }
2418
2419 /* Done. */
2420 return final_retval;
2421 }
2422
2423
2424 /*
2425 * Cortex-A Memory access
2426 *
2427 * This is same Cortex-M3 but we must also use the correct
2428 * ap number for every access.
2429 */
2430
2431 static int cortex_a_read_phys_memory(struct target *target,
2432 target_addr_t address, uint32_t size,
2433 uint32_t count, uint8_t *buffer)
2434 {
2435 int retval;
2436
2437 if (!count || !buffer)
2438 return ERROR_COMMAND_SYNTAX_ERROR;
2439
2440 LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2441 address, size, count);
2442
2443 /* read memory through the CPU */
2444 cortex_a_prep_memaccess(target, 1);
2445 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2446 cortex_a_post_memaccess(target, 1);
2447
2448 return retval;
2449 }
2450
2451 static int cortex_a_read_memory(struct target *target, target_addr_t address,
2452 uint32_t size, uint32_t count, uint8_t *buffer)
2453 {
2454 int retval;
2455
2456 /* cortex_a handles unaligned memory access */
2457 LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2458 address, size, count);
2459
2460 cortex_a_prep_memaccess(target, 0);
2461 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2462 cortex_a_post_memaccess(target, 0);
2463
2464 return retval;
2465 }
2466
2467 static int cortex_a_write_phys_memory(struct target *target,
2468 target_addr_t address, uint32_t size,
2469 uint32_t count, const uint8_t *buffer)
2470 {
2471 int retval;
2472
2473 if (!count || !buffer)
2474 return ERROR_COMMAND_SYNTAX_ERROR;
2475
2476 LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2477 address, size, count);
2478
2479 /* write memory through the CPU */
2480 cortex_a_prep_memaccess(target, 1);
2481 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2482 cortex_a_post_memaccess(target, 1);
2483
2484 return retval;
2485 }
2486
2487 static int cortex_a_write_memory(struct target *target, target_addr_t address,
2488 uint32_t size, uint32_t count, const uint8_t *buffer)
2489 {
2490 int retval;
2491
2492 /* cortex_a handles unaligned memory access */
2493 LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2494 address, size, count);
2495
2496 /* memory writes bypass the caches, must flush before writing */
2497 armv7a_cache_auto_flush_on_write(target, address, size * count);
2498
2499 cortex_a_prep_memaccess(target, 0);
2500 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2501 cortex_a_post_memaccess(target, 0);
2502 return retval;
2503 }
2504
2505 static int cortex_a_read_buffer(struct target *target, target_addr_t address,
2506 uint32_t count, uint8_t *buffer)
2507 {
2508 uint32_t size;
2509
2510 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2511 * will have something to do with the size we leave to it. */
2512 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2513 if (address & size) {
2514 int retval = target_read_memory(target, address, size, 1, buffer);
2515 if (retval != ERROR_OK)
2516 return retval;
2517 address += size;
2518 count -= size;
2519 buffer += size;
2520 }
2521 }
2522
2523 /* Read the data with as large access size as possible. */
2524 for (; size > 0; size /= 2) {
2525 uint32_t aligned = count - count % size;
2526 if (aligned > 0) {
2527 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2528 if (retval != ERROR_OK)
2529 return retval;
2530 address += aligned;
2531 count -= aligned;
2532 buffer += aligned;
2533 }
2534 }
2535
2536 return ERROR_OK;
2537 }
2538
2539 static int cortex_a_write_buffer(struct target *target, target_addr_t address,
2540 uint32_t count, const uint8_t *buffer)
2541 {
2542 uint32_t size;
2543
2544 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2545 * will have something to do with the size we leave to it. */
2546 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2547 if (address & size) {
2548 int retval = target_write_memory(target, address, size, 1, buffer);
2549 if (retval != ERROR_OK)
2550 return retval;
2551 address += size;
2552 count -= size;
2553 buffer += size;
2554 }
2555 }
2556
2557 /* Write the data with as large access size as possible. */
2558 for (; size > 0; size /= 2) {
2559 uint32_t aligned = count - count % size;
2560 if (aligned > 0) {
2561 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2562 if (retval != ERROR_OK)
2563 return retval;
2564 address += aligned;
2565 count -= aligned;
2566 buffer += aligned;
2567 }
2568 }
2569
2570 return ERROR_OK;
2571 }
2572
2573 static int cortex_a_handle_target_request(void *priv)
2574 {
2575 struct target *target = priv;
2576 struct armv7a_common *armv7a = target_to_armv7a(target);
2577 int retval;
2578
2579 if (!target_was_examined(target))
2580 return ERROR_OK;
2581 if (!target->dbg_msg_enabled)
2582 return ERROR_OK;
2583
2584 if (target->state == TARGET_RUNNING) {
2585 uint32_t request;
2586 uint32_t dscr;
2587 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2588 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2589
2590 /* check if we have data */
2591 int64_t then = timeval_ms();
2592 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2593 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2594 armv7a->debug_base + CPUDBG_DTRTX, &request);
2595 if (retval == ERROR_OK) {
2596 target_request(target, request);
2597 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2598 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2599 }
2600 if (timeval_ms() > then + 1000) {
2601 LOG_ERROR("Timeout waiting for dtr tx full");
2602 return ERROR_FAIL;
2603 }
2604 }
2605 }
2606
2607 return ERROR_OK;
2608 }
2609
2610 /*
2611 * Cortex-A target information and configuration
2612 */
2613
2614 static int cortex_a_examine_first(struct target *target)
2615 {
2616 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2617 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2618 struct adiv5_dap *swjdp = armv7a->arm.dap;
2619
2620 int i;
2621 int retval = ERROR_OK;
2622 uint32_t didr, cpuid, dbg_osreg;
2623
2624 /* Search for the APB-AP - it is needed for access to debug registers */
2625 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2626 if (retval != ERROR_OK) {
2627 LOG_ERROR("Could not find APB-AP for debug access");
2628 return retval;
2629 }
2630
2631 retval = mem_ap_init(armv7a->debug_ap);
2632 if (retval != ERROR_OK) {
2633 LOG_ERROR("Could not initialize the APB-AP");
2634 return retval;
2635 }
2636
2637 armv7a->debug_ap->memaccess_tck = 80;
2638
2639 if (!target->dbgbase_set) {
2640 uint32_t dbgbase;
2641 /* Get ROM Table base */
2642 uint32_t apid;
2643 int32_t coreidx = target->coreid;
2644 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2645 target->cmd_name);
2646 retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
2647 if (retval != ERROR_OK)
2648 return retval;
2649 /* Lookup 0x15 -- Processor DAP */
2650 retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, 0x15,
2651 &armv7a->debug_base, &coreidx);
2652 if (retval != ERROR_OK) {
2653 LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2654 target->cmd_name);
2655 return retval;
2656 }
2657 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2658 target->coreid, armv7a->debug_base);
2659 } else
2660 armv7a->debug_base = target->dbgbase;
2661
2662 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2663 armv7a->debug_base + CPUDBG_DIDR, &didr);
2664 if (retval != ERROR_OK) {
2665 LOG_DEBUG("Examine %s failed", "DIDR");
2666 return retval;
2667 }
2668
2669 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2670 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2671 if (retval != ERROR_OK) {
2672 LOG_DEBUG("Examine %s failed", "CPUID");
2673 return retval;
2674 }
2675
2676 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2677 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2678
2679 cortex_a->didr = didr;
2680 cortex_a->cpuid = cpuid;
2681
2682 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2683 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
2684 if (retval != ERROR_OK)
2685 return retval;
2686 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
2687
2688 if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
2689 LOG_ERROR("target->coreid %" PRId32 " powered down!", target->coreid);
2690 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2691 return ERROR_TARGET_INIT_FAILED;
2692 }
2693
2694 if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
2695 LOG_DEBUG("target->coreid %" PRId32 " was reset!", target->coreid);
2696
2697 /* Read DBGOSLSR and check if OSLK is implemented */
2698 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2699 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2700 if (retval != ERROR_OK)
2701 return retval;
2702 LOG_DEBUG("target->coreid %" PRId32 " DBGOSLSR 0x%" PRIx32, target->coreid, dbg_osreg);
2703
2704 /* check if OS Lock is implemented */
2705 if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
2706 /* check if OS Lock is set */
2707 if (dbg_osreg & OSLSR_OSLK) {
2708 LOG_DEBUG("target->coreid %" PRId32 " OSLock set! Trying to unlock", target->coreid);
2709
2710 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2711 armv7a->debug_base + CPUDBG_OSLAR,
2712 0);
2713 if (retval == ERROR_OK)
2714 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2715 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2716
2717 /* if we fail to access the register or cannot reset the OSLK bit, bail out */
2718 if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
2719 LOG_ERROR("target->coreid %" PRId32 " OSLock sticky, core not powered?",
2720 target->coreid);
2721 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2722 return ERROR_TARGET_INIT_FAILED;
2723 }
2724 }
2725 }
2726
2727 armv7a->arm.core_type = ARM_MODE_MON;
2728
2729 /* Avoid recreating the registers cache */
2730 if (!target_was_examined(target)) {
2731 retval = cortex_a_dpm_setup(cortex_a, didr);
2732 if (retval != ERROR_OK)
2733 return retval;
2734 }
2735
2736 /* Setup Breakpoint Register Pairs */
2737 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
2738 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2739 cortex_a->brp_num_available = cortex_a->brp_num;
2740 free(cortex_a->brp_list);
2741 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
2742 /* cortex_a->brb_enabled = ????; */
2743 for (i = 0; i < cortex_a->brp_num; i++) {
2744 cortex_a->brp_list[i].used = 0;
2745 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
2746 cortex_a->brp_list[i].type = BRP_NORMAL;
2747 else
2748 cortex_a->brp_list[i].type = BRP_CONTEXT;
2749 cortex_a->brp_list[i].value = 0;
2750 cortex_a->brp_list[i].control = 0;
2751 cortex_a->brp_list[i].BRPn = i;
2752 }
2753
2754 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
2755
2756 /* select debug_ap as default */
2757 swjdp->apsel = armv7a->debug_ap->ap_num;
2758
2759 target_set_examined(target);
2760 return ERROR_OK;
2761 }
2762
2763 static int cortex_a_examine(struct target *target)
2764 {
2765 int retval = ERROR_OK;
2766
2767 /* Reestablish communication after target reset */
2768 retval = cortex_a_examine_first(target);
2769
2770 /* Configure core debug access */
2771 if (retval == ERROR_OK)
2772 retval = cortex_a_init_debug_access(target);
2773
2774 return retval;
2775 }
2776
2777 /*
2778 * Cortex-A target creation and initialization
2779 */
2780
2781 static int cortex_a_init_target(struct command_context *cmd_ctx,
2782 struct target *target)
2783 {
2784 /* examine_first() does a bunch of this */
2785 arm_semihosting_init(target);
2786 return ERROR_OK;
2787 }
2788
2789 static int cortex_a_init_arch_info(struct target *target,
2790 struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
2791 {
2792 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2793
2794 /* Setup struct cortex_a_common */
2795 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2796 armv7a->arm.dap = dap;
2797
2798 /* register arch-specific functions */
2799 armv7a->examine_debug_reason = NULL;
2800
2801 armv7a->post_debug_entry = cortex_a_post_debug_entry;
2802
2803 armv7a->pre_restore_context = NULL;
2804
2805 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
2806
2807
2808 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
2809
2810 /* REVISIT v7a setup should be in a v7a-specific routine */
2811 armv7a_init_arch_info(target, armv7a);
2812 target_register_timer_callback(cortex_a_handle_target_request, 1,
2813 TARGET_TIMER_TYPE_PERIODIC, target);
2814
2815 return ERROR_OK;
2816 }
2817
2818 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
2819 {
2820 struct cortex_a_common *cortex_a;
2821 struct adiv5_private_config *pc;
2822
2823 if (target->private_config == NULL)
2824 return ERROR_FAIL;
2825
2826 pc = (struct adiv5_private_config *)target->private_config;
2827
2828 cortex_a = calloc(1, sizeof(struct cortex_a_common));
2829 if (cortex_a == NULL) {
2830 LOG_ERROR("Out of memory");
2831 return ERROR_FAIL;
2832 }
2833 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2834 cortex_a->armv7a_common.is_armv7r = false;
2835 cortex_a->armv7a_common.arm.arm_vfp_version = ARM_VFP_V3;
2836
2837 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
2838 }
2839
2840 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
2841 {
2842 struct cortex_a_common *cortex_a;
2843 struct adiv5_private_config *pc;
2844
2845 pc = (struct adiv5_private_config *)target->private_config;
2846 if (adiv5_verify_config(pc) != ERROR_OK)
2847 return ERROR_FAIL;
2848
2849 cortex_a = calloc(1, sizeof(struct cortex_a_common));
2850 if (cortex_a == NULL) {
2851 LOG_ERROR("Out of memory");
2852 return ERROR_FAIL;
2853 }
2854 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2855 cortex_a->armv7a_common.is_armv7r = true;
2856
2857 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
2858 }
2859
2860 static void cortex_a_deinit_target(struct target *target)
2861 {
2862 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2863 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2864 struct arm_dpm *dpm = &armv7a->dpm;
2865 uint32_t dscr;
2866 int retval;
2867
2868 if (target_was_examined(target)) {
2869 /* Disable halt for breakpoint, watchpoint and vector catch */
2870 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2871 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2872 if (retval == ERROR_OK)
2873 mem_ap_write_atomic_u32(armv7a->debug_ap,
2874 armv7a->debug_base + CPUDBG_DSCR,
2875 dscr & ~DSCR_HALT_DBG_MODE);
2876 }
2877
2878 free(cortex_a->brp_list);
2879 free(dpm->dbp);
2880 free(dpm->dwp);
2881 free(target->private_config);
2882 free(cortex_a);
2883 }
2884
2885 static int cortex_a_mmu(struct target *target, int *enabled)
2886 {
2887 struct armv7a_common *armv7a = target_to_armv7a(target);
2888
2889 if (target->state != TARGET_HALTED) {
2890 LOG_ERROR("%s: target not halted", __func__);
2891 return ERROR_TARGET_INVALID;
2892 }
2893
2894 if (armv7a->is_armv7r)
2895 *enabled = 0;
2896 else
2897 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
2898
2899 return ERROR_OK;
2900 }
2901
2902 static int cortex_a_virt2phys(struct target *target,
2903 target_addr_t virt, target_addr_t *phys)
2904 {
2905 int retval;
2906 int mmu_enabled = 0;
2907
2908 /*
2909 * If the MMU was not enabled at debug entry, there is no
2910 * way of knowing if there was ever a valid configuration
2911 * for it and thus it's not safe to enable it. In this case,
2912 * just return the virtual address as physical.
2913 */
2914 cortex_a_mmu(target, &mmu_enabled);
2915 if (!mmu_enabled) {
2916 *phys = virt;
2917 return ERROR_OK;
2918 }
2919
2920 /* mmu must be enable in order to get a correct translation */
2921 retval = cortex_a_mmu_modify(target, 1);
2922 if (retval != ERROR_OK)
2923 return retval;
2924 return armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
2925 (uint32_t *)phys, 1);
2926 }
2927
2928 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
2929 {
2930 struct target *target = get_current_target(CMD_CTX);
2931 struct armv7a_common *armv7a = target_to_armv7a(target);
2932
2933 return armv7a_handle_cache_info_command(CMD,
2934 &armv7a->armv7a_mmu.armv7a_cache);
2935 }
2936
2937
2938 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
2939 {
2940 struct target *target = get_current_target(CMD_CTX);
2941 if (!target_was_examined(target)) {
2942 LOG_ERROR("target not examined yet");
2943 return ERROR_FAIL;
2944 }
2945
2946 return cortex_a_init_debug_access(target);
2947 }
2948
2949 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
2950 {
2951 struct target *target = get_current_target(CMD_CTX);
2952 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2953
2954 static const Jim_Nvp nvp_maskisr_modes[] = {
2955 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
2956 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
2957 { .name = NULL, .value = -1 },
2958 };
2959 const Jim_Nvp *n;
2960
2961 if (CMD_ARGC > 0) {
2962 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2963 if (n->name == NULL) {
2964 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2965 return ERROR_COMMAND_SYNTAX_ERROR;
2966 }
2967
2968 cortex_a->isrmasking_mode = n->value;
2969 }
2970
2971 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
2972 command_print(CMD_CTX, "cortex_a interrupt mask %s", n->name);
2973
2974 return ERROR_OK;
2975 }
2976
2977 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
2978 {
2979 struct target *target = get_current_target(CMD_CTX);
2980 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2981
2982 static const Jim_Nvp nvp_dacrfixup_modes[] = {
2983 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
2984 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
2985 { .name = NULL, .value = -1 },
2986 };
2987 const Jim_Nvp *n;
2988
2989 if (CMD_ARGC > 0) {
2990 n = Jim_Nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
2991 if (n->name == NULL)
2992 return ERROR_COMMAND_SYNTAX_ERROR;
2993 cortex_a->dacrfixup_mode = n->value;
2994
2995 }
2996
2997 n = Jim_Nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
2998 command_print(CMD_CTX, "cortex_a domain access control fixup %s", n->name);
2999
3000 return ERROR_OK;
3001 }
3002
3003 static const struct command_registration cortex_a_exec_command_handlers[] = {
3004 {
3005 .name = "cache_info",
3006 .handler = cortex_a_handle_cache_info_command,
3007 .mode = COMMAND_EXEC,
3008 .help = "display information about target caches",
3009 .usage = "",
3010 },
3011 {
3012 .name = "dbginit",
3013 .handler = cortex_a_handle_dbginit_command,
3014 .mode = COMMAND_EXEC,
3015 .help = "Initialize core debug",
3016 .usage = "",
3017 },
3018 {
3019 .name = "maskisr",
3020 .handler = handle_cortex_a_mask_interrupts_command,
3021 .mode = COMMAND_ANY,
3022 .help = "mask cortex_a interrupts",
3023 .usage = "['on'|'off']",
3024 },
3025 {
3026 .name = "dacrfixup",
3027 .handler = handle_cortex_a_dacrfixup_command,
3028 .mode = COMMAND_ANY,
3029 .help = "set domain access control (DACR) to all-manager "
3030 "on memory access",
3031 .usage = "['on'|'off']",
3032 },
3033 {
3034 .chain = armv7a_mmu_command_handlers,
3035 },
3036 {
3037 .chain = smp_command_handlers,
3038 },
3039
3040 COMMAND_REGISTRATION_DONE
3041 };
3042 static const struct command_registration cortex_a_command_handlers[] = {
3043 {
3044 .chain = arm_command_handlers,
3045 },
3046 {
3047 .chain = armv7a_command_handlers,
3048 },
3049 {
3050 .name = "cortex_a",
3051 .mode = COMMAND_ANY,
3052 .help = "Cortex-A command group",
3053 .usage = "",
3054 .chain = cortex_a_exec_command_handlers,
3055 },
3056 COMMAND_REGISTRATION_DONE
3057 };
3058
3059 struct target_type cortexa_target = {
3060 .name = "cortex_a",
3061 .deprecated_name = "cortex_a8",
3062
3063 .poll = cortex_a_poll,
3064 .arch_state = armv7a_arch_state,
3065
3066 .halt = cortex_a_halt,
3067 .resume = cortex_a_resume,
3068 .step = cortex_a_step,
3069
3070 .assert_reset = cortex_a_assert_reset,
3071 .deassert_reset = cortex_a_deassert_reset,
3072
3073 /* REVISIT allow exporting VFP3 registers ... */
3074 .get_gdb_arch = arm_get_gdb_arch,
3075 .get_gdb_reg_list = arm_get_gdb_reg_list,
3076
3077 .read_memory = cortex_a_read_memory,
3078 .write_memory = cortex_a_write_memory,
3079
3080 .read_buffer = cortex_a_read_buffer,
3081 .write_buffer = cortex_a_write_buffer,
3082
3083 .checksum_memory = arm_checksum_memory,
3084 .blank_check_memory = arm_blank_check_memory,
3085
3086 .run_algorithm = armv4_5_run_algorithm,
3087
3088 .add_breakpoint = cortex_a_add_breakpoint,
3089 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3090 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3091 .remove_breakpoint = cortex_a_remove_breakpoint,
3092 .add_watchpoint = NULL,
3093 .remove_watchpoint = NULL,
3094
3095 .commands = cortex_a_command_handlers,
3096 .target_create = cortex_a_target_create,
3097 .target_jim_configure = adiv5_jim_configure,
3098 .init_target = cortex_a_init_target,
3099 .examine = cortex_a_examine,
3100 .deinit_target = cortex_a_deinit_target,
3101
3102 .read_phys_memory = cortex_a_read_phys_memory,
3103 .write_phys_memory = cortex_a_write_phys_memory,
3104 .mmu = cortex_a_mmu,
3105 .virt2phys = cortex_a_virt2phys,
3106 };
3107
3108 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3109 {
3110 .name = "dbginit",
3111 .handler = cortex_a_handle_dbginit_command,
3112 .mode = COMMAND_EXEC,
3113 .help = "Initialize core debug",
3114 .usage = "",
3115 },
3116 {
3117 .name = "maskisr",
3118 .handler = handle_cortex_a_mask_interrupts_command,
3119 .mode = COMMAND_EXEC,
3120 .help = "mask cortex_r4 interrupts",
3121 .usage = "['on'|'off']",
3122 },
3123
3124 COMMAND_REGISTRATION_DONE
3125 };
3126 static const struct command_registration cortex_r4_command_handlers[] = {
3127 {
3128 .chain = arm_command_handlers,
3129 },
3130 {
3131 .name = "cortex_r4",
3132 .mode = COMMAND_ANY,
3133 .help = "Cortex-R4 command group",
3134 .usage = "",
3135 .chain = cortex_r4_exec_command_handlers,
3136 },
3137 COMMAND_REGISTRATION_DONE
3138 };
3139
3140 struct target_type cortexr4_target = {
3141 .name = "cortex_r4",
3142
3143 .poll = cortex_a_poll,
3144 .arch_state = armv7a_arch_state,
3145
3146 .halt = cortex_a_halt,
3147 .resume = cortex_a_resume,
3148 .step = cortex_a_step,
3149
3150 .assert_reset = cortex_a_assert_reset,
3151 .deassert_reset = cortex_a_deassert_reset,
3152
3153 /* REVISIT allow exporting VFP3 registers ... */
3154 .get_gdb_arch = arm_get_gdb_arch,
3155 .get_gdb_reg_list = arm_get_gdb_reg_list,
3156
3157 .read_memory = cortex_a_read_phys_memory,
3158 .write_memory = cortex_a_write_phys_memory,
3159
3160 .checksum_memory = arm_checksum_memory,
3161 .blank_check_memory = arm_blank_check_memory,
3162
3163 .run_algorithm = armv4_5_run_algorithm,
3164
3165 .add_breakpoint = cortex_a_add_breakpoint,
3166 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3167 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3168 .remove_breakpoint = cortex_a_remove_breakpoint,
3169 .add_watchpoint = NULL,
3170 .remove_watchpoint = NULL,
3171
3172 .commands = cortex_r4_command_handlers,
3173 .target_create = cortex_r4_target_create,
3174 .target_jim_configure = adiv5_jim_configure,
3175 .init_target = cortex_a_init_target,
3176 .examine = cortex_a_examine,
3177 .deinit_target = cortex_a_deinit_target,
3178 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)