cortex_a9: trivial fixes
[openocd.git] / src / target / cortex_a9.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * This program is free software; you can redistribute it and/or modify *
18 * it under the terms of the GNU General Public License as published by *
19 * the Free Software Foundation; either version 2 of the License, or *
20 * (at your option) any later version. *
21 * *
22 * This program is distributed in the hope that it will be useful, *
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
25 * GNU General Public License for more details. *
26 * *
27 * You should have received a copy of the GNU General Public License *
28 * along with this program; if not, write to the *
29 * Free Software Foundation, Inc., *
30 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
31 * *
32 * Cortex-A9(tm) TRM, ARM DDI 0407F *
33 * *
34 ***************************************************************************/
35 #ifdef HAVE_CONFIG_H
36 #include "config.h"
37 #endif
38
39 #include "breakpoints.h"
40 #include "cortex_a9.h"
41 #include "register.h"
42 #include "target_request.h"
43 #include "target_type.h"
44 #include "arm_opcodes.h"
45 #include <helper/time_support.h>
46
47 static int cortex_a9_poll(struct target *target);
48 static int cortex_a9_debug_entry(struct target *target);
49 static int cortex_a9_restore_context(struct target *target, bool bpwp);
50 static int cortex_a9_set_breakpoint(struct target *target,
51 struct breakpoint *breakpoint, uint8_t matchmode);
52 static int cortex_a9_unset_breakpoint(struct target *target,
53 struct breakpoint *breakpoint);
54 static int cortex_a9_dap_read_coreregister_u32(struct target *target,
55 uint32_t *value, int regnum);
56 static int cortex_a9_dap_write_coreregister_u32(struct target *target,
57 uint32_t value, int regnum);
58 static int cortex_a9_mmu(struct target *target, int *enabled);
59 static int cortex_a9_virt2phys(struct target *target,
60 uint32_t virt, uint32_t *phys);
61 static int cortex_a9_disable_mmu_caches(struct target *target, int mmu,
62 int d_u_cache, int i_cache);
63 static int cortex_a9_enable_mmu_caches(struct target *target, int mmu,
64 int d_u_cache, int i_cache);
65 static int cortex_a9_get_ttb(struct target *target, uint32_t *result);
66
67
68 /*
69 * FIXME do topology discovery using the ROM; don't
70 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
71 * cores, with different AP numbering ... don't use a #define
72 * for these numbers, use per-core armv7a state.
73 */
74 #define swjdp_memoryap 0
75 #define swjdp_debugap 1
76
77 /*
78 * Cortex-A9 Basic debug access, very low level assumes state is saved
79 */
80 static int cortex_a9_init_debug_access(struct target *target)
81 {
82 struct armv7a_common *armv7a = target_to_armv7a(target);
83 struct adiv5_dap *swjdp = &armv7a->dap;
84 uint8_t saved_apsel = dap_ap_get_select(swjdp);
85
86 int retval;
87 uint32_t dummy;
88
89 dap_ap_select(swjdp, swjdp_debugap);
90
91 LOG_DEBUG(" ");
92
93 /* Unlocking the debug registers for modification */
94 /* The debugport might be uninitialised so try twice */
95 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
96 if (retval != ERROR_OK)
97 {
98 /* try again */
99 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
100 if (retval == ERROR_OK)
101 {
102 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
103 }
104 }
105 if (retval != ERROR_OK)
106 goto out;
107 /* Clear Sticky Power Down status Bit in PRSR to enable access to
108 the registers in the Core Power Domain */
109 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_PRSR, &dummy);
110 if (retval != ERROR_OK)
111 goto out;
112
113 /* Enabling of instruction execution in debug mode is done in debug_entry code */
114
115 /* Resync breakpoint registers */
116
117 /* Since this is likely called from init or reset, update target state information*/
118 retval = cortex_a9_poll(target);
119
120 out:
121 dap_ap_select(swjdp, saved_apsel);
122 return retval;
123 }
124
125 /* To reduce needless round-trips, pass in a pointer to the current
126 * DSCR value. Initialize it to zero if you just need to know the
127 * value on return from this function; or DSCR_INSTR_COMP if you
128 * happen to know that no instruction is pending.
129 */
130 static int cortex_a9_exec_opcode(struct target *target,
131 uint32_t opcode, uint32_t *dscr_p)
132 {
133 uint32_t dscr;
134 int retval;
135 struct armv7a_common *armv7a = target_to_armv7a(target);
136 struct adiv5_dap *swjdp = &armv7a->dap;
137
138 dscr = dscr_p ? *dscr_p : 0;
139
140 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
141
142 /* Wait for InstrCompl bit to be set */
143 long long then = timeval_ms();
144 while ((dscr & DSCR_INSTR_COMP) == 0)
145 {
146 retval = mem_ap_read_atomic_u32(swjdp,
147 armv7a->debug_base + CPUDBG_DSCR, &dscr);
148 if (retval != ERROR_OK)
149 {
150 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
151 return retval;
152 }
153 if (timeval_ms() > then + 1000)
154 {
155 LOG_ERROR("Timeout waiting for cortex_a9_exec_opcode");
156 return ERROR_FAIL;
157 }
158 }
159
160 retval = mem_ap_write_u32(swjdp, armv7a->debug_base + CPUDBG_ITR, opcode);
161 if (retval != ERROR_OK)
162 return retval;
163
164 then = timeval_ms();
165 do
166 {
167 retval = mem_ap_read_atomic_u32(swjdp,
168 armv7a->debug_base + CPUDBG_DSCR, &dscr);
169 if (retval != ERROR_OK)
170 {
171 LOG_ERROR("Could not read DSCR register");
172 return retval;
173 }
174 if (timeval_ms() > then + 1000)
175 {
176 LOG_ERROR("Timeout waiting for cortex_a9_exec_opcode");
177 return ERROR_FAIL;
178 }
179 }
180 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
181
182 if (dscr_p)
183 *dscr_p = dscr;
184
185 return retval;
186 }
187
188 /**************************************************************************
189 Read core register with very few exec_opcode, fast but needs work_area.
190 This can cause problems with MMU active.
191 **************************************************************************/
192 static int cortex_a9_read_regs_through_mem(struct target *target, uint32_t address,
193 uint32_t * regfile)
194 {
195 int retval = ERROR_OK;
196 struct armv7a_common *armv7a = target_to_armv7a(target);
197 struct adiv5_dap *swjdp = &armv7a->dap;
198
199 retval = cortex_a9_dap_read_coreregister_u32(target, regfile, 0);
200 if (retval != ERROR_OK)
201 return retval;
202 retval = cortex_a9_dap_write_coreregister_u32(target, address, 0);
203 if (retval != ERROR_OK)
204 return retval;
205 retval = cortex_a9_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
206 if (retval != ERROR_OK)
207 return retval;
208
209 dap_ap_select(swjdp, swjdp_memoryap);
210 retval = mem_ap_read_buf_u32(swjdp, (uint8_t *)(&regfile[1]), 4*15, address);
211 if (retval != ERROR_OK)
212 return retval;
213 dap_ap_select(swjdp, swjdp_debugap);
214
215 return retval;
216 }
217
218 static int cortex_a9_dap_read_coreregister_u32(struct target *target,
219 uint32_t *value, int regnum)
220 {
221 int retval = ERROR_OK;
222 uint8_t reg = regnum&0xFF;
223 uint32_t dscr = 0;
224 struct armv7a_common *armv7a = target_to_armv7a(target);
225 struct adiv5_dap *swjdp = &armv7a->dap;
226
227 if (reg > 17)
228 return retval;
229
230 if (reg < 15)
231 {
232 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
233 retval = cortex_a9_exec_opcode(target,
234 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
235 &dscr);
236 if (retval != ERROR_OK)
237 return retval;
238 }
239 else if (reg == 15)
240 {
241 /* "MOV r0, r15"; then move r0 to DCCTX */
242 retval = cortex_a9_exec_opcode(target, 0xE1A0000F, &dscr);
243 if (retval != ERROR_OK)
244 return retval;
245 retval = cortex_a9_exec_opcode(target,
246 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
247 &dscr);
248 if (retval != ERROR_OK)
249 return retval;
250 }
251 else
252 {
253 /* "MRS r0, CPSR" or "MRS r0, SPSR"
254 * then move r0 to DCCTX
255 */
256 retval = cortex_a9_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
257 if (retval != ERROR_OK)
258 return retval;
259 retval = cortex_a9_exec_opcode(target,
260 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
261 &dscr);
262 if (retval != ERROR_OK)
263 return retval;
264 }
265
266 /* Wait for DTRRXfull then read DTRRTX */
267 long long then = timeval_ms();
268 while ((dscr & DSCR_DTR_TX_FULL) == 0)
269 {
270 retval = mem_ap_read_atomic_u32(swjdp,
271 armv7a->debug_base + CPUDBG_DSCR, &dscr);
272 if (retval != ERROR_OK)
273 return retval;
274 if (timeval_ms() > then + 1000)
275 {
276 LOG_ERROR("Timeout waiting for cortex_a9_exec_opcode");
277 return ERROR_FAIL;
278 }
279 }
280
281 retval = mem_ap_read_atomic_u32(swjdp,
282 armv7a->debug_base + CPUDBG_DTRTX, value);
283 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
284
285 return retval;
286 }
287
288 static int cortex_a9_dap_write_coreregister_u32(struct target *target,
289 uint32_t value, int regnum)
290 {
291 int retval = ERROR_OK;
292 uint8_t Rd = regnum&0xFF;
293 uint32_t dscr;
294 struct armv7a_common *armv7a = target_to_armv7a(target);
295 struct adiv5_dap *swjdp = &armv7a->dap;
296
297 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
298
299 /* Check that DCCRX is not full */
300 retval = mem_ap_read_atomic_u32(swjdp,
301 armv7a->debug_base + CPUDBG_DSCR, &dscr);
302 if (retval != ERROR_OK)
303 return retval;
304 if (dscr & DSCR_DTR_RX_FULL)
305 {
306 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
307 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
308 retval = cortex_a9_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
309 &dscr);
310 if (retval != ERROR_OK)
311 return retval;
312 }
313
314 if (Rd > 17)
315 return retval;
316
317 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
318 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
319 retval = mem_ap_write_u32(swjdp,
320 armv7a->debug_base + CPUDBG_DTRRX, value);
321 if (retval != ERROR_OK)
322 return retval;
323
324 if (Rd < 15)
325 {
326 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
327 retval = cortex_a9_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
328 &dscr);
329 if (retval != ERROR_OK)
330 return retval;
331 }
332 else if (Rd == 15)
333 {
334 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
335 * then "mov r15, r0"
336 */
337 retval = cortex_a9_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
338 &dscr);
339 if (retval != ERROR_OK)
340 return retval;
341 retval = cortex_a9_exec_opcode(target, 0xE1A0F000, &dscr);
342 if (retval != ERROR_OK)
343 return retval;
344 }
345 else
346 {
347 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
348 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
349 */
350 retval = cortex_a9_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
351 &dscr);
352 if (retval != ERROR_OK)
353 return retval;
354 retval = cortex_a9_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
355 &dscr);
356 if (retval != ERROR_OK)
357 return retval;
358
359 /* "Prefetch flush" after modifying execution status in CPSR */
360 if (Rd == 16)
361 {
362 retval = cortex_a9_exec_opcode(target,
363 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
364 &dscr);
365 if (retval != ERROR_OK)
366 return retval;
367 }
368 }
369
370 return retval;
371 }
372
373 /* Write to memory mapped registers directly with no cache or mmu handling */
374 static int cortex_a9_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
375 {
376 int retval;
377 struct armv7a_common *armv7a = target_to_armv7a(target);
378 struct adiv5_dap *swjdp = &armv7a->dap;
379
380 retval = mem_ap_write_atomic_u32(swjdp, address, value);
381
382 return retval;
383 }
384
385 /*
386 * Cortex-A9 implementation of Debug Programmer's Model
387 *
388 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
389 * so there's no need to poll for it before executing an instruction.
390 *
391 * NOTE that in several of these cases the "stall" mode might be useful.
392 * It'd let us queue a few operations together... prepare/finish might
393 * be the places to enable/disable that mode.
394 */
395
396 static inline struct cortex_a9_common *dpm_to_a9(struct arm_dpm *dpm)
397 {
398 return container_of(dpm, struct cortex_a9_common, armv7a_common.dpm);
399 }
400
401 static int cortex_a9_write_dcc(struct cortex_a9_common *a9, uint32_t data)
402 {
403 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
404 return mem_ap_write_u32(&a9->armv7a_common.dap,
405 a9->armv7a_common.debug_base + CPUDBG_DTRRX, data);
406 }
407
408 static int cortex_a9_read_dcc(struct cortex_a9_common *a9, uint32_t *data,
409 uint32_t *dscr_p)
410 {
411 struct adiv5_dap *swjdp = &a9->armv7a_common.dap;
412 uint32_t dscr = DSCR_INSTR_COMP;
413 int retval;
414
415 if (dscr_p)
416 dscr = *dscr_p;
417
418 /* Wait for DTRRXfull */
419 long long then = timeval_ms();
420 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
421 retval = mem_ap_read_atomic_u32(swjdp,
422 a9->armv7a_common.debug_base + CPUDBG_DSCR,
423 &dscr);
424 if (retval != ERROR_OK)
425 return retval;
426 if (timeval_ms() > then + 1000)
427 {
428 LOG_ERROR("Timeout waiting for read dcc");
429 return ERROR_FAIL;
430 }
431 }
432
433 retval = mem_ap_read_atomic_u32(swjdp,
434 a9->armv7a_common.debug_base + CPUDBG_DTRTX, data);
435 if (retval != ERROR_OK)
436 return retval;
437 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
438
439 if (dscr_p)
440 *dscr_p = dscr;
441
442 return retval;
443 }
444
445 static int cortex_a9_dpm_prepare(struct arm_dpm *dpm)
446 {
447 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
448 struct adiv5_dap *swjdp = &a9->armv7a_common.dap;
449 uint32_t dscr;
450 int retval;
451
452 /* set up invariant: INSTR_COMP is set after ever DPM operation */
453 long long then = timeval_ms();
454 for (;;)
455 {
456 retval = mem_ap_read_atomic_u32(swjdp,
457 a9->armv7a_common.debug_base + CPUDBG_DSCR,
458 &dscr);
459 if (retval != ERROR_OK)
460 return retval;
461 if ((dscr & DSCR_INSTR_COMP) != 0)
462 break;
463 if (timeval_ms() > then + 1000)
464 {
465 LOG_ERROR("Timeout waiting for dpm prepare");
466 return ERROR_FAIL;
467 }
468 }
469
470 /* this "should never happen" ... */
471 if (dscr & DSCR_DTR_RX_FULL) {
472 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
473 /* Clear DCCRX */
474 retval = cortex_a9_exec_opcode(
475 a9->armv7a_common.armv4_5_common.target,
476 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
477 &dscr);
478 if (retval != ERROR_OK)
479 return retval;
480 }
481
482 return retval;
483 }
484
485 static int cortex_a9_dpm_finish(struct arm_dpm *dpm)
486 {
487 /* REVISIT what could be done here? */
488 return ERROR_OK;
489 }
490
491 static int cortex_a9_instr_write_data_dcc(struct arm_dpm *dpm,
492 uint32_t opcode, uint32_t data)
493 {
494 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
495 int retval;
496 uint32_t dscr = DSCR_INSTR_COMP;
497
498 retval = cortex_a9_write_dcc(a9, data);
499 if (retval != ERROR_OK)
500 return retval;
501
502 return cortex_a9_exec_opcode(
503 a9->armv7a_common.armv4_5_common.target,
504 opcode,
505 &dscr);
506 }
507
508 static int cortex_a9_instr_write_data_r0(struct arm_dpm *dpm,
509 uint32_t opcode, uint32_t data)
510 {
511 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
512 uint32_t dscr = DSCR_INSTR_COMP;
513 int retval;
514
515 retval = cortex_a9_write_dcc(a9, data);
516 if (retval != ERROR_OK)
517 return retval;
518
519 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
520 retval = cortex_a9_exec_opcode(
521 a9->armv7a_common.armv4_5_common.target,
522 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
523 &dscr);
524 if (retval != ERROR_OK)
525 return retval;
526
527 /* then the opcode, taking data from R0 */
528 retval = cortex_a9_exec_opcode(
529 a9->armv7a_common.armv4_5_common.target,
530 opcode,
531 &dscr);
532
533 return retval;
534 }
535
536 static int cortex_a9_instr_cpsr_sync(struct arm_dpm *dpm)
537 {
538 struct target *target = dpm->arm->target;
539 uint32_t dscr = DSCR_INSTR_COMP;
540
541 /* "Prefetch flush" after modifying execution status in CPSR */
542 return cortex_a9_exec_opcode(target,
543 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
544 &dscr);
545 }
546
547 static int cortex_a9_instr_read_data_dcc(struct arm_dpm *dpm,
548 uint32_t opcode, uint32_t *data)
549 {
550 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
551 int retval;
552 uint32_t dscr = DSCR_INSTR_COMP;
553
554 /* the opcode, writing data to DCC */
555 retval = cortex_a9_exec_opcode(
556 a9->armv7a_common.armv4_5_common.target,
557 opcode,
558 &dscr);
559 if (retval != ERROR_OK)
560 return retval;
561
562 return cortex_a9_read_dcc(a9, data, &dscr);
563 }
564
565
566 static int cortex_a9_instr_read_data_r0(struct arm_dpm *dpm,
567 uint32_t opcode, uint32_t *data)
568 {
569 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
570 uint32_t dscr = DSCR_INSTR_COMP;
571 int retval;
572
573 /* the opcode, writing data to R0 */
574 retval = cortex_a9_exec_opcode(
575 a9->armv7a_common.armv4_5_common.target,
576 opcode,
577 &dscr);
578 if (retval != ERROR_OK)
579 return retval;
580
581 /* write R0 to DCC */
582 retval = cortex_a9_exec_opcode(
583 a9->armv7a_common.armv4_5_common.target,
584 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
585 &dscr);
586 if (retval != ERROR_OK)
587 return retval;
588
589 return cortex_a9_read_dcc(a9, data, &dscr);
590 }
591
592 static int cortex_a9_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
593 uint32_t addr, uint32_t control)
594 {
595 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
596 uint32_t vr = a9->armv7a_common.debug_base;
597 uint32_t cr = a9->armv7a_common.debug_base;
598 int retval;
599
600 switch (index_t) {
601 case 0 ... 15: /* breakpoints */
602 vr += CPUDBG_BVR_BASE;
603 cr += CPUDBG_BCR_BASE;
604 break;
605 case 16 ... 31: /* watchpoints */
606 vr += CPUDBG_WVR_BASE;
607 cr += CPUDBG_WCR_BASE;
608 index_t -= 16;
609 break;
610 default:
611 return ERROR_FAIL;
612 }
613 vr += 4 * index_t;
614 cr += 4 * index_t;
615
616 LOG_DEBUG("A9: bpwp enable, vr %08x cr %08x",
617 (unsigned) vr, (unsigned) cr);
618
619 retval = cortex_a9_dap_write_memap_register_u32(dpm->arm->target,
620 vr, addr);
621 if (retval != ERROR_OK)
622 return retval;
623 retval = cortex_a9_dap_write_memap_register_u32(dpm->arm->target,
624 cr, control);
625 return retval;
626 }
627
628 static int cortex_a9_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
629 {
630 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
631 uint32_t cr;
632
633 switch (index_t) {
634 case 0 ... 15:
635 cr = a9->armv7a_common.debug_base + CPUDBG_BCR_BASE;
636 break;
637 case 16 ... 31:
638 cr = a9->armv7a_common.debug_base + CPUDBG_WCR_BASE;
639 index_t -= 16;
640 break;
641 default:
642 return ERROR_FAIL;
643 }
644 cr += 4 * index_t;
645
646 LOG_DEBUG("A9: bpwp disable, cr %08x", (unsigned) cr);
647
648 /* clear control register */
649 return cortex_a9_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
650 }
651
652 static int cortex_a9_dpm_setup(struct cortex_a9_common *a9, uint32_t didr)
653 {
654 struct arm_dpm *dpm = &a9->armv7a_common.dpm;
655 int retval;
656
657 dpm->arm = &a9->armv7a_common.armv4_5_common;
658 dpm->didr = didr;
659
660 dpm->prepare = cortex_a9_dpm_prepare;
661 dpm->finish = cortex_a9_dpm_finish;
662
663 dpm->instr_write_data_dcc = cortex_a9_instr_write_data_dcc;
664 dpm->instr_write_data_r0 = cortex_a9_instr_write_data_r0;
665 dpm->instr_cpsr_sync = cortex_a9_instr_cpsr_sync;
666
667 dpm->instr_read_data_dcc = cortex_a9_instr_read_data_dcc;
668 dpm->instr_read_data_r0 = cortex_a9_instr_read_data_r0;
669
670 dpm->bpwp_enable = cortex_a9_bpwp_enable;
671 dpm->bpwp_disable = cortex_a9_bpwp_disable;
672
673 retval = arm_dpm_setup(dpm);
674 if (retval == ERROR_OK)
675 retval = arm_dpm_initialize(dpm);
676
677 return retval;
678 }
679
680
681 /*
682 * Cortex-A9 Run control
683 */
684
685 static int cortex_a9_poll(struct target *target)
686 {
687 int retval = ERROR_OK;
688 uint32_t dscr;
689 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
690 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
691 struct adiv5_dap *swjdp = &armv7a->dap;
692 enum target_state prev_target_state = target->state;
693 uint8_t saved_apsel = dap_ap_get_select(swjdp);
694
695 dap_ap_select(swjdp, swjdp_debugap);
696 retval = mem_ap_read_atomic_u32(swjdp,
697 armv7a->debug_base + CPUDBG_DSCR, &dscr);
698 if (retval != ERROR_OK)
699 {
700 dap_ap_select(swjdp, saved_apsel);
701 return retval;
702 }
703 cortex_a9->cpudbg_dscr = dscr;
704
705 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED))
706 {
707 if (prev_target_state != TARGET_HALTED)
708 {
709 /* We have a halting debug event */
710 LOG_DEBUG("Target halted");
711 target->state = TARGET_HALTED;
712 if ((prev_target_state == TARGET_RUNNING)
713 || (prev_target_state == TARGET_RESET))
714 {
715 retval = cortex_a9_debug_entry(target);
716 if (retval != ERROR_OK)
717 return retval;
718
719 target_call_event_callbacks(target,
720 TARGET_EVENT_HALTED);
721 }
722 if (prev_target_state == TARGET_DEBUG_RUNNING)
723 {
724 LOG_DEBUG(" ");
725
726 retval = cortex_a9_debug_entry(target);
727 if (retval != ERROR_OK)
728 return retval;
729
730 target_call_event_callbacks(target,
731 TARGET_EVENT_DEBUG_HALTED);
732 }
733 }
734 }
735 else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
736 {
737 target->state = TARGET_RUNNING;
738 }
739 else
740 {
741 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
742 target->state = TARGET_UNKNOWN;
743 }
744
745 dap_ap_select(swjdp, saved_apsel);
746
747 return retval;
748 }
749
750 static int cortex_a9_halt(struct target *target)
751 {
752 int retval = ERROR_OK;
753 uint32_t dscr;
754 struct armv7a_common *armv7a = target_to_armv7a(target);
755 struct adiv5_dap *swjdp = &armv7a->dap;
756 uint8_t saved_apsel = dap_ap_get_select(swjdp);
757 dap_ap_select(swjdp, swjdp_debugap);
758
759 /*
760 * Tell the core to be halted by writing DRCR with 0x1
761 * and then wait for the core to be halted.
762 */
763 retval = mem_ap_write_atomic_u32(swjdp,
764 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
765 if (retval != ERROR_OK)
766 goto out;
767
768 /*
769 * enter halting debug mode
770 */
771 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DSCR, &dscr);
772 if (retval != ERROR_OK)
773 goto out;
774
775 retval = mem_ap_write_atomic_u32(swjdp,
776 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
777 if (retval != ERROR_OK)
778 goto out;
779
780 long long then = timeval_ms();
781 for (;;)
782 {
783 retval = mem_ap_read_atomic_u32(swjdp,
784 armv7a->debug_base + CPUDBG_DSCR, &dscr);
785 if (retval != ERROR_OK)
786 goto out;
787 if ((dscr & DSCR_CORE_HALTED) != 0)
788 {
789 break;
790 }
791 if (timeval_ms() > then + 1000)
792 {
793 LOG_ERROR("Timeout waiting for halt");
794 return ERROR_FAIL;
795 }
796 }
797
798 target->debug_reason = DBG_REASON_DBGRQ;
799
800 out:
801 dap_ap_select(swjdp, saved_apsel);
802 return retval;
803 }
804
805 static int cortex_a9_resume(struct target *target, int current,
806 uint32_t address, int handle_breakpoints, int debug_execution)
807 {
808 struct armv7a_common *armv7a = target_to_armv7a(target);
809 struct arm *armv4_5 = &armv7a->armv4_5_common;
810 struct adiv5_dap *swjdp = &armv7a->dap;
811 int retval;
812
813 // struct breakpoint *breakpoint = NULL;
814 uint32_t resume_pc, dscr;
815
816 uint8_t saved_apsel = dap_ap_get_select(swjdp);
817 dap_ap_select(swjdp, swjdp_debugap);
818
819 if (!debug_execution)
820 target_free_all_working_areas(target);
821
822 #if 0
823 if (debug_execution)
824 {
825 /* Disable interrupts */
826 /* We disable interrupts in the PRIMASK register instead of
827 * masking with C_MASKINTS,
828 * This is probably the same issue as Cortex-M3 Errata 377493:
829 * C_MASKINTS in parallel with disabled interrupts can cause
830 * local faults to not be taken. */
831 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
832 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
833 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
834
835 /* Make sure we are in Thumb mode */
836 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
837 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
838 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
839 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
840 }
841 #endif
842
843 /* current = 1: continue on current pc, otherwise continue at <address> */
844 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
845 if (!current)
846 resume_pc = address;
847
848 /* Make sure that the Armv7 gdb thumb fixups does not
849 * kill the return address
850 */
851 switch (armv4_5->core_state)
852 {
853 case ARM_STATE_ARM:
854 resume_pc &= 0xFFFFFFFC;
855 break;
856 case ARM_STATE_THUMB:
857 case ARM_STATE_THUMB_EE:
858 /* When the return address is loaded into PC
859 * bit 0 must be 1 to stay in Thumb state
860 */
861 resume_pc |= 0x1;
862 break;
863 case ARM_STATE_JAZELLE:
864 LOG_ERROR("How do I resume into Jazelle state??");
865 return ERROR_FAIL;
866 }
867 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
868 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
869 armv4_5->pc->dirty = 1;
870 armv4_5->pc->valid = 1;
871
872 retval = cortex_a9_restore_context(target, handle_breakpoints);
873 if (retval != ERROR_OK)
874 return retval;
875
876 #if 0
877 /* the front-end may request us not to handle breakpoints */
878 if (handle_breakpoints)
879 {
880 /* Single step past breakpoint at current address */
881 if ((breakpoint = breakpoint_find(target, resume_pc)))
882 {
883 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
884 cortex_m3_unset_breakpoint(target, breakpoint);
885 cortex_m3_single_step_core(target);
886 cortex_m3_set_breakpoint(target, breakpoint);
887 }
888 }
889
890 #endif
891
892 /*
893 * Restart core and wait for it to be started. Clear ITRen and sticky
894 * exception flags: see ARMv7 ARM, C5.9.
895 *
896 * REVISIT: for single stepping, we probably want to
897 * disable IRQs by default, with optional override...
898 */
899
900 retval = mem_ap_read_atomic_u32(swjdp,
901 armv7a->debug_base + CPUDBG_DSCR, &dscr);
902 if (retval != ERROR_OK)
903 return retval;
904
905 if ((dscr & DSCR_INSTR_COMP) == 0)
906 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
907
908 retval = mem_ap_write_atomic_u32(swjdp,
909 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
910 if (retval != ERROR_OK)
911 return retval;
912
913 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DRCR,
914 DRCR_RESTART | DRCR_CLEAR_EXCEPTIONS);
915 if (retval != ERROR_OK)
916 return retval;
917
918 long long then = timeval_ms();
919 for (;;)
920 {
921 retval = mem_ap_read_atomic_u32(swjdp,
922 armv7a->debug_base + CPUDBG_DSCR, &dscr);
923 if (retval != ERROR_OK)
924 return retval;
925 if ((dscr & DSCR_CORE_RESTARTED) != 0)
926 break;
927 if (timeval_ms() > then + 1000)
928 {
929 LOG_ERROR("Timeout waiting for resume");
930 return ERROR_FAIL;
931 }
932 }
933
934 target->debug_reason = DBG_REASON_NOTHALTED;
935 target->state = TARGET_RUNNING;
936
937 /* registers are now invalid */
938 register_cache_invalidate(armv4_5->core_cache);
939
940 if (!debug_execution)
941 {
942 target->state = TARGET_RUNNING;
943 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
944 LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
945 }
946 else
947 {
948 target->state = TARGET_DEBUG_RUNNING;
949 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
950 LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
951 }
952
953 dap_ap_select(swjdp, saved_apsel);
954
955 return ERROR_OK;
956 }
957
958 static int cortex_a9_debug_entry(struct target *target)
959 {
960 int i;
961 uint32_t regfile[16], cpsr, dscr;
962 int retval = ERROR_OK;
963 struct working_area *regfile_working_area = NULL;
964 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
965 struct armv7a_common *armv7a = target_to_armv7a(target);
966 struct arm *armv4_5 = &armv7a->armv4_5_common;
967 struct adiv5_dap *swjdp = &armv7a->dap;
968 struct reg *reg;
969
970 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a9->cpudbg_dscr);
971
972 /* REVISIT surely we should not re-read DSCR !! */
973 retval = mem_ap_read_atomic_u32(swjdp,
974 armv7a->debug_base + CPUDBG_DSCR, &dscr);
975 if (retval != ERROR_OK)
976 return retval;
977
978 /* REVISIT see A9 TRM 12.11.4 steps 2..3 -- make sure that any
979 * imprecise data aborts get discarded by issuing a Data
980 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
981 */
982
983 /* Enable the ITR execution once we are in debug mode */
984 dscr |= DSCR_ITR_EN;
985 retval = mem_ap_write_atomic_u32(swjdp,
986 armv7a->debug_base + CPUDBG_DSCR, dscr);
987 if (retval != ERROR_OK)
988 return retval;
989
990 /* Examine debug reason */
991 arm_dpm_report_dscr(&armv7a->dpm, cortex_a9->cpudbg_dscr);
992
993 /* save address of instruction that triggered the watchpoint? */
994 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
995 uint32_t wfar;
996
997 retval = mem_ap_read_atomic_u32(swjdp,
998 armv7a->debug_base + CPUDBG_WFAR,
999 &wfar);
1000 if (retval != ERROR_OK)
1001 return retval;
1002 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1003 }
1004
1005 /* REVISIT fast_reg_read is never set ... */
1006
1007 /* Examine target state and mode */
1008 if (cortex_a9->fast_reg_read)
1009 target_alloc_working_area(target, 64, &regfile_working_area);
1010
1011 /* First load register acessible through core debug port*/
1012 if (!regfile_working_area)
1013 {
1014 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1015 }
1016 else
1017 {
1018 dap_ap_select(swjdp, swjdp_memoryap);
1019 retval = cortex_a9_read_regs_through_mem(target,
1020 regfile_working_area->address, regfile);
1021 dap_ap_select(swjdp, swjdp_memoryap);
1022 target_free_working_area(target, regfile_working_area);
1023 if (retval != ERROR_OK)
1024 {
1025 return retval;
1026 }
1027
1028 /* read Current PSR */
1029 retval = cortex_a9_dap_read_coreregister_u32(target, &cpsr, 16);
1030 if (retval != ERROR_OK)
1031 return retval;
1032 dap_ap_select(swjdp, swjdp_debugap);
1033 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1034
1035 arm_set_cpsr(armv4_5, cpsr);
1036
1037 /* update cache */
1038 for (i = 0; i <= ARM_PC; i++)
1039 {
1040 reg = arm_reg_current(armv4_5, i);
1041
1042 buf_set_u32(reg->value, 0, 32, regfile[i]);
1043 reg->valid = 1;
1044 reg->dirty = 0;
1045 }
1046
1047 /* Fixup PC Resume Address */
1048 if (cpsr & (1 << 5))
1049 {
1050 // T bit set for Thumb or ThumbEE state
1051 regfile[ARM_PC] -= 4;
1052 }
1053 else
1054 {
1055 // ARM state
1056 regfile[ARM_PC] -= 8;
1057 }
1058
1059 reg = armv4_5->pc;
1060 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1061 reg->dirty = reg->valid;
1062 }
1063
1064 #if 0
1065 /* TODO, Move this */
1066 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1067 cortex_a9_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1068 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1069
1070 cortex_a9_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1071 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1072
1073 cortex_a9_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1074 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1075 #endif
1076
1077 /* Are we in an exception handler */
1078 // armv4_5->exception_number = 0;
1079 if (armv7a->post_debug_entry)
1080 {
1081 retval = armv7a->post_debug_entry(target);
1082 if (retval != ERROR_OK)
1083 return retval;
1084 }
1085
1086 return retval;
1087 }
1088
1089 static int cortex_a9_post_debug_entry(struct target *target)
1090 {
1091 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1092 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
1093 int retval;
1094
1095 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1096 retval = armv7a->armv4_5_common.mrc(target, 15,
1097 0, 0, /* op1, op2 */
1098 1, 0, /* CRn, CRm */
1099 &cortex_a9->cp15_control_reg);
1100 if (retval != ERROR_OK)
1101 return retval;
1102 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a9->cp15_control_reg);
1103
1104 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
1105 {
1106 uint32_t cache_type_reg;
1107
1108 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
1109 retval = armv7a->armv4_5_common.mrc(target, 15,
1110 0, 1, /* op1, op2 */
1111 0, 0, /* CRn, CRm */
1112 &cache_type_reg);
1113 if (retval != ERROR_OK)
1114 return retval;
1115 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
1116
1117 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A9 */
1118 armv4_5_identify_cache(cache_type_reg,
1119 &armv7a->armv4_5_mmu.armv4_5_cache);
1120 }
1121
1122 armv7a->armv4_5_mmu.mmu_enabled =
1123 (cortex_a9->cp15_control_reg & 0x1U) ? 1 : 0;
1124 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
1125 (cortex_a9->cp15_control_reg & 0x4U) ? 1 : 0;
1126 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1127 (cortex_a9->cp15_control_reg & 0x1000U) ? 1 : 0;
1128
1129 return ERROR_OK;
1130 }
1131
1132 static int cortex_a9_step(struct target *target, int current, uint32_t address,
1133 int handle_breakpoints)
1134 {
1135 struct armv7a_common *armv7a = target_to_armv7a(target);
1136 struct arm *armv4_5 = &armv7a->armv4_5_common;
1137 struct adiv5_dap *swjdp = &armv7a->dap;
1138 struct breakpoint *breakpoint = NULL;
1139 struct breakpoint stepbreakpoint;
1140 struct reg *r;
1141 int retval;
1142 uint8_t saved_apsel = dap_ap_get_select(swjdp);
1143
1144 if (target->state != TARGET_HALTED)
1145 {
1146 LOG_WARNING("target not halted");
1147 return ERROR_TARGET_NOT_HALTED;
1148 }
1149
1150 dap_ap_select(swjdp, swjdp_debugap);
1151
1152 /* current = 1: continue on current pc, otherwise continue at <address> */
1153 r = armv4_5->pc;
1154 if (!current)
1155 {
1156 buf_set_u32(r->value, 0, 32, address);
1157 }
1158 else
1159 {
1160 address = buf_get_u32(r->value, 0, 32);
1161 }
1162
1163 /* The front-end may request us not to handle breakpoints.
1164 * But since Cortex-A9 uses breakpoint for single step,
1165 * we MUST handle breakpoints.
1166 */
1167 handle_breakpoints = 1;
1168 if (handle_breakpoints) {
1169 breakpoint = breakpoint_find(target, address);
1170 if (breakpoint)
1171 cortex_a9_unset_breakpoint(target, breakpoint);
1172 }
1173
1174 /* Setup single step breakpoint */
1175 stepbreakpoint.address = address;
1176 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1177 ? 2 : 4;
1178 stepbreakpoint.type = BKPT_HARD;
1179 stepbreakpoint.set = 0;
1180
1181 /* Break on IVA mismatch */
1182 cortex_a9_set_breakpoint(target, &stepbreakpoint, 0x04);
1183
1184 target->debug_reason = DBG_REASON_SINGLESTEP;
1185
1186 retval = cortex_a9_resume(target, 1, address, 0, 0);
1187 if (retval != ERROR_OK)
1188 goto out;
1189
1190 long long then = timeval_ms();
1191 while (target->state != TARGET_HALTED)
1192 {
1193 retval = cortex_a9_poll(target);
1194 if (retval != ERROR_OK)
1195 goto out;
1196 if (timeval_ms() > then + 1000)
1197 {
1198 LOG_ERROR("timeout waiting for target halt");
1199 retval = ERROR_FAIL;
1200 goto out;
1201 }
1202 }
1203
1204 cortex_a9_unset_breakpoint(target, &stepbreakpoint);
1205
1206 target->debug_reason = DBG_REASON_BREAKPOINT;
1207
1208 if (breakpoint)
1209 cortex_a9_set_breakpoint(target, breakpoint, 0);
1210
1211 if (target->state != TARGET_HALTED)
1212 LOG_DEBUG("target stepped");
1213
1214 retval = ERROR_OK;
1215
1216 out:
1217 dap_ap_select(swjdp, saved_apsel);
1218 return retval;
1219 }
1220
1221 static int cortex_a9_restore_context(struct target *target, bool bpwp)
1222 {
1223 struct armv7a_common *armv7a = target_to_armv7a(target);
1224
1225 LOG_DEBUG(" ");
1226
1227 if (armv7a->pre_restore_context)
1228 armv7a->pre_restore_context(target);
1229
1230 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1231 }
1232
1233
1234 /*
1235 * Cortex-A9 Breakpoint and watchpoint functions
1236 */
1237
1238 /* Setup hardware Breakpoint Register Pair */
1239 static int cortex_a9_set_breakpoint(struct target *target,
1240 struct breakpoint *breakpoint, uint8_t matchmode)
1241 {
1242 int retval;
1243 int brp_i=0;
1244 uint32_t control;
1245 uint8_t byte_addr_select = 0x0F;
1246 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1247 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
1248 struct cortex_a9_brp * brp_list = cortex_a9->brp_list;
1249
1250 if (breakpoint->set)
1251 {
1252 LOG_WARNING("breakpoint already set");
1253 return ERROR_OK;
1254 }
1255
1256 if (breakpoint->type == BKPT_HARD)
1257 {
1258 while (brp_list[brp_i].used && (brp_i < cortex_a9->brp_num))
1259 brp_i++ ;
1260 if (brp_i >= cortex_a9->brp_num)
1261 {
1262 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1263 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1264 }
1265 breakpoint->set = brp_i + 1;
1266 if (breakpoint->length == 2)
1267 {
1268 byte_addr_select = (3 << (breakpoint->address & 0x02));
1269 }
1270 control = ((matchmode & 0x7) << 20)
1271 | (byte_addr_select << 5)
1272 | (3 << 1) | 1;
1273 brp_list[brp_i].used = 1;
1274 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1275 brp_list[brp_i].control = control;
1276 retval = cortex_a9_dap_write_memap_register_u32(target, armv7a->debug_base
1277 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1278 brp_list[brp_i].value);
1279 if (retval != ERROR_OK)
1280 return retval;
1281 retval = cortex_a9_dap_write_memap_register_u32(target, armv7a->debug_base
1282 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1283 brp_list[brp_i].control);
1284 if (retval != ERROR_OK)
1285 return retval;
1286 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1287 brp_list[brp_i].control,
1288 brp_list[brp_i].value);
1289 }
1290 else if (breakpoint->type == BKPT_SOFT)
1291 {
1292 uint8_t code[4];
1293 if (breakpoint->length == 2)
1294 {
1295 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1296 }
1297 else
1298 {
1299 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1300 }
1301 retval = target->type->read_memory(target,
1302 breakpoint->address & 0xFFFFFFFE,
1303 breakpoint->length, 1,
1304 breakpoint->orig_instr);
1305 if (retval != ERROR_OK)
1306 return retval;
1307 retval = target->type->write_memory(target,
1308 breakpoint->address & 0xFFFFFFFE,
1309 breakpoint->length, 1, code);
1310 if (retval != ERROR_OK)
1311 return retval;
1312 breakpoint->set = 0x11; /* Any nice value but 0 */
1313 }
1314
1315 return ERROR_OK;
1316 }
1317
1318 static int cortex_a9_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1319 {
1320 int retval;
1321 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1322 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
1323 struct cortex_a9_brp * brp_list = cortex_a9->brp_list;
1324
1325 if (!breakpoint->set)
1326 {
1327 LOG_WARNING("breakpoint not set");
1328 return ERROR_OK;
1329 }
1330
1331 if (breakpoint->type == BKPT_HARD)
1332 {
1333 int brp_i = breakpoint->set - 1;
1334 if ((brp_i < 0) || (brp_i >= cortex_a9->brp_num))
1335 {
1336 LOG_DEBUG("Invalid BRP number in breakpoint");
1337 return ERROR_OK;
1338 }
1339 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1340 brp_list[brp_i].control, brp_list[brp_i].value);
1341 brp_list[brp_i].used = 0;
1342 brp_list[brp_i].value = 0;
1343 brp_list[brp_i].control = 0;
1344 retval = cortex_a9_dap_write_memap_register_u32(target, armv7a->debug_base
1345 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1346 brp_list[brp_i].control);
1347 if (retval != ERROR_OK)
1348 return retval;
1349 retval = cortex_a9_dap_write_memap_register_u32(target, armv7a->debug_base
1350 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1351 brp_list[brp_i].value);
1352 if (retval != ERROR_OK)
1353 return retval;
1354 }
1355 else
1356 {
1357 /* restore original instruction (kept in target endianness) */
1358 if (breakpoint->length == 4)
1359 {
1360 retval = target->type->write_memory(target,
1361 breakpoint->address & 0xFFFFFFFE,
1362 4, 1, breakpoint->orig_instr);
1363 if (retval != ERROR_OK)
1364 return retval;
1365 }
1366 else
1367 {
1368 retval = target->type->write_memory(target,
1369 breakpoint->address & 0xFFFFFFFE,
1370 2, 1, breakpoint->orig_instr);
1371 if (retval != ERROR_OK)
1372 return retval;
1373 }
1374 }
1375 breakpoint->set = 0;
1376
1377 return ERROR_OK;
1378 }
1379
1380 static int cortex_a9_add_breakpoint(struct target *target,
1381 struct breakpoint *breakpoint)
1382 {
1383 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1384
1385 if ((breakpoint->type == BKPT_HARD) && (cortex_a9->brp_num_available < 1))
1386 {
1387 LOG_INFO("no hardware breakpoint available");
1388 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1389 }
1390
1391 if (breakpoint->type == BKPT_HARD)
1392 cortex_a9->brp_num_available--;
1393
1394 return cortex_a9_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1395 }
1396
1397 static int cortex_a9_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1398 {
1399 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1400
1401 #if 0
1402 /* It is perfectly possible to remove breakpoints while the target is running */
1403 if (target->state != TARGET_HALTED)
1404 {
1405 LOG_WARNING("target not halted");
1406 return ERROR_TARGET_NOT_HALTED;
1407 }
1408 #endif
1409
1410 if (breakpoint->set)
1411 {
1412 cortex_a9_unset_breakpoint(target, breakpoint);
1413 if (breakpoint->type == BKPT_HARD)
1414 cortex_a9->brp_num_available++ ;
1415 }
1416
1417
1418 return ERROR_OK;
1419 }
1420
1421
1422
1423 /*
1424 * Cortex-A9 Reset functions
1425 */
1426
1427 static int cortex_a9_assert_reset(struct target *target)
1428 {
1429 struct armv7a_common *armv7a = target_to_armv7a(target);
1430
1431 LOG_DEBUG(" ");
1432
1433 /* FIXME when halt is requested, make it work somehow... */
1434
1435 /* Issue some kind of warm reset. */
1436 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1437 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1438 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1439 /* REVISIT handle "pulls" cases, if there's
1440 * hardware that needs them to work.
1441 */
1442 jtag_add_reset(0, 1);
1443 } else {
1444 LOG_ERROR("%s: how to reset?", target_name(target));
1445 return ERROR_FAIL;
1446 }
1447
1448 /* registers are now invalid */
1449 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1450
1451 target->state = TARGET_RESET;
1452
1453 return ERROR_OK;
1454 }
1455
1456 static int cortex_a9_deassert_reset(struct target *target)
1457 {
1458 int retval;
1459
1460 LOG_DEBUG(" ");
1461
1462 /* be certain SRST is off */
1463 jtag_add_reset(0, 0);
1464
1465 retval = cortex_a9_poll(target);
1466 if (retval != ERROR_OK)
1467 return retval;
1468
1469 if (target->reset_halt) {
1470 if (target->state != TARGET_HALTED) {
1471 LOG_WARNING("%s: ran after reset and before halt ...",
1472 target_name(target));
1473 if ((retval = target_halt(target)) != ERROR_OK)
1474 return retval;
1475 }
1476 }
1477
1478 return ERROR_OK;
1479 }
1480
1481 /*
1482 * Cortex-A9 Memory access
1483 *
1484 * This is same Cortex M3 but we must also use the correct
1485 * ap number for every access.
1486 */
1487
1488 static int cortex_a9_read_phys_memory(struct target *target,
1489 uint32_t address, uint32_t size,
1490 uint32_t count, uint8_t *buffer)
1491 {
1492 struct armv7a_common *armv7a = target_to_armv7a(target);
1493 struct adiv5_dap *swjdp = &armv7a->dap;
1494 int retval = ERROR_INVALID_ARGUMENTS;
1495 uint8_t apsel = dap_ap_get_select(swjdp);
1496
1497 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d", address, size, count);
1498
1499 if (count && buffer) {
1500
1501 if ( apsel == swjdp_memoryap ) {
1502
1503 /* read memory through AHB-AP */
1504
1505 switch (size) {
1506 case 4:
1507 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
1508 break;
1509 case 2:
1510 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1511 break;
1512 case 1:
1513 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1514 break;
1515 }
1516
1517 } else {
1518
1519 /* read memory through APB-AP */
1520
1521 uint32_t saved_r0, saved_r1;
1522 int nbytes = count * size;
1523 uint32_t data;
1524
1525 /* save registers r0 and r1, we are going to corrupt them */
1526 retval = cortex_a9_dap_read_coreregister_u32(target, &saved_r0, 0);
1527 if (retval != ERROR_OK)
1528 return retval;
1529
1530 retval = cortex_a9_dap_read_coreregister_u32(target, &saved_r1, 1);
1531 if (retval != ERROR_OK)
1532 return retval;
1533
1534 retval = cortex_a9_dap_write_coreregister_u32(target, address, 0);
1535 if (retval != ERROR_OK)
1536 return retval;
1537
1538 while (nbytes > 0) {
1539
1540 /* execute instruction LDRB r1, [r0], 1 (0xe4d01001) */
1541 retval = cortex_a9_exec_opcode(target, ARMV4_5_LDRB_IP(1, 0) , NULL);
1542 if (retval != ERROR_OK)
1543 return retval;
1544
1545 retval = cortex_a9_dap_read_coreregister_u32(target, &data, 1);
1546 if (retval != ERROR_OK)
1547 return retval;
1548
1549 *buffer++ = data;
1550 --nbytes;
1551
1552 }
1553
1554 /* restore corrupted registers r0 and r1 */
1555 retval = cortex_a9_dap_write_coreregister_u32(target, saved_r0, 0);
1556 if (retval != ERROR_OK)
1557 return retval;
1558
1559 retval = cortex_a9_dap_write_coreregister_u32(target, saved_r1, 1);
1560 if (retval != ERROR_OK)
1561 return retval;
1562
1563 }
1564 }
1565
1566 return retval;
1567 }
1568
1569 static int cortex_a9_read_memory(struct target *target, uint32_t address,
1570 uint32_t size, uint32_t count, uint8_t *buffer)
1571 {
1572 int enabled = 0;
1573 uint32_t virt, phys;
1574 int retval;
1575
1576 /* cortex_a9 handles unaligned memory access */
1577
1578 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address, size, count);
1579 retval = cortex_a9_mmu(target, &enabled);
1580 if (retval != ERROR_OK)
1581 return retval;
1582
1583 if (enabled)
1584 {
1585 virt = address;
1586 retval = cortex_a9_virt2phys(target, virt, &phys);
1587 if (retval != ERROR_OK)
1588 return retval;
1589
1590 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1591 address = phys;
1592 }
1593
1594 return cortex_a9_read_phys_memory(target, address, size, count, buffer);
1595 }
1596
1597 static int cortex_a9_write_phys_memory(struct target *target,
1598 uint32_t address, uint32_t size,
1599 uint32_t count, uint8_t *buffer)
1600 {
1601 struct armv7a_common *armv7a = target_to_armv7a(target);
1602 struct adiv5_dap *swjdp = &armv7a->dap;
1603 int retval = ERROR_INVALID_ARGUMENTS;
1604
1605 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address, size, count);
1606
1607 if (count && buffer) {
1608 uint8_t apsel = dap_ap_get_select(swjdp);
1609
1610 if ( apsel == swjdp_memoryap ) {
1611
1612 /* write memory through AHB-AP */
1613 switch (size) {
1614 case 4:
1615 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
1616 break;
1617 case 2:
1618 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1619 break;
1620 case 1:
1621 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1622 break;
1623 }
1624
1625 } else {
1626
1627 /* write memory through APB-AP */
1628
1629 uint32_t saved_r0, saved_r1;
1630 int nbytes = count * size;
1631 uint32_t data;
1632
1633 /* save registers r0 and r1, we are going to corrupt them */
1634 retval = cortex_a9_dap_read_coreregister_u32(target, &saved_r0, 0);
1635 if (retval != ERROR_OK)
1636 return retval;
1637
1638 retval = cortex_a9_dap_read_coreregister_u32(target, &saved_r1, 1);
1639 if (retval != ERROR_OK)
1640 return retval;
1641
1642 retval = cortex_a9_dap_write_coreregister_u32(target, address, 0);
1643 if (retval != ERROR_OK)
1644 return retval;
1645
1646 while (nbytes > 0) {
1647
1648 data = *buffer++;
1649
1650 retval = cortex_a9_dap_write_coreregister_u32(target, data, 1);
1651 if (retval != ERROR_OK)
1652 return retval;
1653
1654 /* execute instruction STRB r1, [r0], 1 (0xe4c01001) */
1655 retval = cortex_a9_exec_opcode(target, ARMV4_5_STRB_IP(1, 0) , NULL);
1656 if (retval != ERROR_OK)
1657 return retval;
1658
1659 --nbytes;
1660 }
1661
1662 /* restore corrupted registers r0 and r1 */
1663 retval = cortex_a9_dap_write_coreregister_u32(target, saved_r0, 0);
1664 if (retval != ERROR_OK)
1665 return retval;
1666
1667 retval = cortex_a9_dap_write_coreregister_u32(target, saved_r1, 1);
1668 if (retval != ERROR_OK)
1669 return retval;
1670
1671 /* we can return here without invalidating D/I-cache because */
1672 /* access through APB maintains cache coherency */
1673 return retval;
1674 }
1675 }
1676
1677
1678 /* REVISIT this op is generic ARMv7-A/R stuff */
1679 if (retval == ERROR_OK && target->state == TARGET_HALTED)
1680 {
1681 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
1682
1683 retval = dpm->prepare(dpm);
1684 if (retval != ERROR_OK)
1685 return retval;
1686
1687 /* The Cache handling will NOT work with MMU active, the
1688 * wrong addresses will be invalidated!
1689 *
1690 * For both ICache and DCache, walk all cache lines in the
1691 * address range. Cortex-A9 has fixed 64 byte line length.
1692 *
1693 * REVISIT per ARMv7, these may trigger watchpoints ...
1694 */
1695
1696 /* invalidate I-Cache */
1697 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
1698 {
1699 /* ICIMVAU - Invalidate Cache single entry
1700 * with MVA to PoU
1701 * MCR p15, 0, r0, c7, c5, 1
1702 */
1703 for (uint32_t cacheline = address;
1704 cacheline < address + size * count;
1705 cacheline += 64) {
1706 retval = dpm->instr_write_data_r0(dpm,
1707 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1708 cacheline);
1709 if (retval != ERROR_OK)
1710 return retval;
1711 }
1712 }
1713
1714 /* invalidate D-Cache */
1715 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
1716 {
1717 /* DCIMVAC - Invalidate data Cache line
1718 * with MVA to PoC
1719 * MCR p15, 0, r0, c7, c6, 1
1720 */
1721 for (uint32_t cacheline = address;
1722 cacheline < address + size * count;
1723 cacheline += 64) {
1724 retval = dpm->instr_write_data_r0(dpm,
1725 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1726 cacheline);
1727 if (retval != ERROR_OK)
1728 return retval;
1729 }
1730 }
1731
1732 /* (void) */ dpm->finish(dpm);
1733 }
1734
1735 return retval;
1736 }
1737
1738 static int cortex_a9_write_memory(struct target *target, uint32_t address,
1739 uint32_t size, uint32_t count, uint8_t *buffer)
1740 {
1741 int enabled = 0;
1742 uint32_t virt, phys;
1743 int retval;
1744
1745 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
1746 retval = cortex_a9_mmu(target, &enabled);
1747 if (retval != ERROR_OK)
1748 return retval;
1749
1750 if (enabled)
1751 {
1752 virt = address;
1753 retval = cortex_a9_virt2phys(target, virt, &phys);
1754 if (retval != ERROR_OK)
1755 return retval;
1756 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1757 address = phys;
1758 }
1759
1760 return cortex_a9_write_phys_memory(target, address, size,
1761 count, buffer);
1762 }
1763
1764 static int cortex_a9_bulk_write_memory(struct target *target, uint32_t address,
1765 uint32_t count, uint8_t *buffer)
1766 {
1767 return cortex_a9_write_memory(target, address, 4, count, buffer);
1768 }
1769
1770 static int cortex_a9_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1771 {
1772 #if 0
1773 u16 dcrdr;
1774
1775 mem_ap_read_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1776 *ctrl = (uint8_t)dcrdr;
1777 *value = (uint8_t)(dcrdr >> 8);
1778
1779 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1780
1781 /* write ack back to software dcc register
1782 * signify we have read data */
1783 if (dcrdr & (1 << 0))
1784 {
1785 dcrdr = 0;
1786 mem_ap_write_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1787 }
1788 #endif
1789 return ERROR_OK;
1790 }
1791
1792
1793 static int cortex_a9_handle_target_request(void *priv)
1794 {
1795 struct target *target = priv;
1796 struct armv7a_common *armv7a = target_to_armv7a(target);
1797 struct adiv5_dap *swjdp = &armv7a->dap;
1798 int retval;
1799
1800 if (!target_was_examined(target))
1801 return ERROR_OK;
1802 if (!target->dbg_msg_enabled)
1803 return ERROR_OK;
1804
1805 if (target->state == TARGET_RUNNING)
1806 {
1807 uint8_t data = 0;
1808 uint8_t ctrl = 0;
1809
1810 retval = cortex_a9_dcc_read(swjdp, &data, &ctrl);
1811 if (retval != ERROR_OK)
1812 return retval;
1813
1814 /* check if we have data */
1815 if (ctrl & (1 << 0))
1816 {
1817 uint32_t request;
1818
1819 /* we assume target is quick enough */
1820 request = data;
1821 retval = cortex_a9_dcc_read(swjdp, &data, &ctrl);
1822 if (retval != ERROR_OK)
1823 return retval;
1824 request |= (data << 8);
1825 retval = cortex_a9_dcc_read(swjdp, &data, &ctrl);
1826 if (retval != ERROR_OK)
1827 return retval;
1828 request |= (data << 16);
1829 retval = cortex_a9_dcc_read(swjdp, &data, &ctrl);
1830 if (retval != ERROR_OK)
1831 return retval;
1832 request |= (data << 24);
1833 target_request(target, request);
1834 }
1835 }
1836
1837 return ERROR_OK;
1838 }
1839
1840 /*
1841 * Cortex-A9 target information and configuration
1842 */
1843
1844 static int cortex_a9_examine_first(struct target *target)
1845 {
1846 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1847 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
1848 struct adiv5_dap *swjdp = &armv7a->dap;
1849 int i;
1850 int retval = ERROR_OK;
1851 uint32_t didr, ctypr, ttypr, cpuid;
1852
1853 /* We do one extra read to ensure DAP is configured,
1854 * we call ahbap_debugport_init(swjdp) instead
1855 */
1856 retval = ahbap_debugport_init(swjdp);
1857 if (retval != ERROR_OK)
1858 return retval;
1859
1860 dap_ap_select(swjdp, swjdp_debugap);
1861
1862 /*
1863 * FIXME: assuming omap4430
1864 *
1865 * APB DBGBASE reads 0x80040000, but this points to an empty ROM table.
1866 * 0x80000000 is cpu0 coresight region
1867 */
1868 if (target->coreid > 3) {
1869 LOG_ERROR("cortex_a9 supports up to 4 cores");
1870 return ERROR_INVALID_ARGUMENTS;
1871 }
1872 armv7a->debug_base = 0x80000000 |
1873 ((target->coreid & 0x3) << CORTEX_A9_PADDRDBG_CPU_SHIFT);
1874
1875 retval = mem_ap_read_atomic_u32(swjdp,
1876 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
1877 if (retval != ERROR_OK)
1878 return retval;
1879
1880 if ((retval = mem_ap_read_atomic_u32(swjdp,
1881 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
1882 {
1883 LOG_DEBUG("Examine %s failed", "CPUID");
1884 return retval;
1885 }
1886
1887 if ((retval = mem_ap_read_atomic_u32(swjdp,
1888 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
1889 {
1890 LOG_DEBUG("Examine %s failed", "CTYPR");
1891 return retval;
1892 }
1893
1894 if ((retval = mem_ap_read_atomic_u32(swjdp,
1895 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
1896 {
1897 LOG_DEBUG("Examine %s failed", "TTYPR");
1898 return retval;
1899 }
1900
1901 if ((retval = mem_ap_read_atomic_u32(swjdp,
1902 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
1903 {
1904 LOG_DEBUG("Examine %s failed", "DIDR");
1905 return retval;
1906 }
1907
1908 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1909 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
1910 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
1911 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
1912
1913 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
1914 retval = cortex_a9_dpm_setup(cortex_a9, didr);
1915 if (retval != ERROR_OK)
1916 return retval;
1917
1918 /* Setup Breakpoint Register Pairs */
1919 cortex_a9->brp_num = ((didr >> 24) & 0x0F) + 1;
1920 cortex_a9->brp_num_context = ((didr >> 20) & 0x0F) + 1;
1921 cortex_a9->brp_num_available = cortex_a9->brp_num;
1922 cortex_a9->brp_list = calloc(cortex_a9->brp_num, sizeof(struct cortex_a9_brp));
1923 // cortex_a9->brb_enabled = ????;
1924 for (i = 0; i < cortex_a9->brp_num; i++)
1925 {
1926 cortex_a9->brp_list[i].used = 0;
1927 if (i < (cortex_a9->brp_num-cortex_a9->brp_num_context))
1928 cortex_a9->brp_list[i].type = BRP_NORMAL;
1929 else
1930 cortex_a9->brp_list[i].type = BRP_CONTEXT;
1931 cortex_a9->brp_list[i].value = 0;
1932 cortex_a9->brp_list[i].control = 0;
1933 cortex_a9->brp_list[i].BRPn = i;
1934 }
1935
1936 LOG_DEBUG("Configured %i hw breakpoints", cortex_a9->brp_num);
1937
1938 target_set_examined(target);
1939 return ERROR_OK;
1940 }
1941
1942 static int cortex_a9_examine(struct target *target)
1943 {
1944 int retval = ERROR_OK;
1945
1946 /* don't re-probe hardware after each reset */
1947 if (!target_was_examined(target))
1948 retval = cortex_a9_examine_first(target);
1949
1950 /* Configure core debug access */
1951 if (retval == ERROR_OK)
1952 retval = cortex_a9_init_debug_access(target);
1953
1954 return retval;
1955 }
1956
1957 /*
1958 * Cortex-A9 target creation and initialization
1959 */
1960
1961 static int cortex_a9_init_target(struct command_context *cmd_ctx,
1962 struct target *target)
1963 {
1964 /* examine_first() does a bunch of this */
1965 return ERROR_OK;
1966 }
1967
1968 static int cortex_a9_init_arch_info(struct target *target,
1969 struct cortex_a9_common *cortex_a9, struct jtag_tap *tap)
1970 {
1971 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
1972 struct arm *armv4_5 = &armv7a->armv4_5_common;
1973 struct adiv5_dap *dap = &armv7a->dap;
1974
1975 armv7a->armv4_5_common.dap = dap;
1976
1977 /* Setup struct cortex_a9_common */
1978 cortex_a9->common_magic = CORTEX_A9_COMMON_MAGIC;
1979 armv4_5->arch_info = armv7a;
1980
1981 /* prepare JTAG information for the new target */
1982 cortex_a9->jtag_info.tap = tap;
1983 cortex_a9->jtag_info.scann_size = 4;
1984
1985 /* Leave (only) generic DAP stuff for debugport_init() */
1986 dap->jtag_info = &cortex_a9->jtag_info;
1987 dap->memaccess_tck = 80;
1988
1989 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
1990 dap->tar_autoincr_block = (1 << 10);
1991
1992 cortex_a9->fast_reg_read = 0;
1993
1994 /* Set default value */
1995 cortex_a9->current_address_mode = ARM_MODE_ANY;
1996
1997 /* register arch-specific functions */
1998 armv7a->examine_debug_reason = NULL;
1999
2000 armv7a->post_debug_entry = cortex_a9_post_debug_entry;
2001
2002 armv7a->pre_restore_context = NULL;
2003 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
2004 armv7a->armv4_5_mmu.get_ttb = cortex_a9_get_ttb;
2005 armv7a->armv4_5_mmu.read_memory = cortex_a9_read_phys_memory;
2006 armv7a->armv4_5_mmu.write_memory = cortex_a9_write_phys_memory;
2007 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a9_disable_mmu_caches;
2008 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a9_enable_mmu_caches;
2009 armv7a->armv4_5_mmu.has_tiny_pages = 1;
2010 armv7a->armv4_5_mmu.mmu_enabled = 0;
2011
2012
2013 // arm7_9->handle_target_request = cortex_a9_handle_target_request;
2014
2015 /* REVISIT v7a setup should be in a v7a-specific routine */
2016 arm_init_arch_info(target, armv4_5);
2017 armv7a->common_magic = ARMV7_COMMON_MAGIC;
2018
2019 target_register_timer_callback(cortex_a9_handle_target_request, 1, 1, target);
2020
2021 return ERROR_OK;
2022 }
2023
2024 static int cortex_a9_target_create(struct target *target, Jim_Interp *interp)
2025 {
2026 struct cortex_a9_common *cortex_a9 = calloc(1, sizeof(struct cortex_a9_common));
2027
2028 return cortex_a9_init_arch_info(target, cortex_a9, target->tap);
2029 }
2030
2031 static int cortex_a9_get_ttb(struct target *target, uint32_t *result)
2032 {
2033 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
2034 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
2035 uint32_t ttb = 0, retval = ERROR_OK;
2036
2037 /* current_address_mode is set inside cortex_a9_virt2phys()
2038 where we can determine if address belongs to user or kernel */
2039 if(cortex_a9->current_address_mode == ARM_MODE_SVC)
2040 {
2041 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2042 retval = armv7a->armv4_5_common.mrc(target, 15,
2043 0, 1, /* op1, op2 */
2044 2, 0, /* CRn, CRm */
2045 &ttb);
2046 if (retval != ERROR_OK)
2047 return retval;
2048 }
2049 else if(cortex_a9->current_address_mode == ARM_MODE_USR)
2050 {
2051 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2052 retval = armv7a->armv4_5_common.mrc(target, 15,
2053 0, 0, /* op1, op2 */
2054 2, 0, /* CRn, CRm */
2055 &ttb);
2056 if (retval != ERROR_OK)
2057 return retval;
2058 }
2059 /* we don't know whose address is: user or kernel
2060 we assume that if we are in kernel mode then
2061 address belongs to kernel else if in user mode
2062 - to user */
2063 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
2064 {
2065 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2066 retval = armv7a->armv4_5_common.mrc(target, 15,
2067 0, 1, /* op1, op2 */
2068 2, 0, /* CRn, CRm */
2069 &ttb);
2070 if (retval != ERROR_OK)
2071 return retval;
2072 }
2073 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
2074 {
2075 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2076 retval = armv7a->armv4_5_common.mrc(target, 15,
2077 0, 0, /* op1, op2 */
2078 2, 0, /* CRn, CRm */
2079 &ttb);
2080 if (retval != ERROR_OK)
2081 return retval;
2082 }
2083 /* finally we don't know whose ttb to use: user or kernel */
2084 else
2085 LOG_ERROR("Don't know how to get ttb for current mode!!!");
2086
2087 ttb &= 0xffffc000;
2088
2089 *result = ttb;
2090
2091 return ERROR_OK;
2092 }
2093
2094 static int cortex_a9_disable_mmu_caches(struct target *target, int mmu,
2095 int d_u_cache, int i_cache)
2096 {
2097 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
2098 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
2099 uint32_t cp15_control;
2100 int retval;
2101
2102 /* read cp15 control register */
2103 retval = armv7a->armv4_5_common.mrc(target, 15,
2104 0, 0, /* op1, op2 */
2105 1, 0, /* CRn, CRm */
2106 &cp15_control);
2107 if (retval != ERROR_OK)
2108 return retval;
2109
2110
2111 if (mmu)
2112 cp15_control &= ~0x1U;
2113
2114 if (d_u_cache)
2115 cp15_control &= ~0x4U;
2116
2117 if (i_cache)
2118 cp15_control &= ~0x1000U;
2119
2120 retval = armv7a->armv4_5_common.mcr(target, 15,
2121 0, 0, /* op1, op2 */
2122 1, 0, /* CRn, CRm */
2123 cp15_control);
2124 return retval;
2125 }
2126
2127 static int cortex_a9_enable_mmu_caches(struct target *target, int mmu,
2128 int d_u_cache, int i_cache)
2129 {
2130 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
2131 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
2132 uint32_t cp15_control;
2133 int retval;
2134
2135 /* read cp15 control register */
2136 retval = armv7a->armv4_5_common.mrc(target, 15,
2137 0, 0, /* op1, op2 */
2138 1, 0, /* CRn, CRm */
2139 &cp15_control);
2140 if (retval != ERROR_OK)
2141 return retval;
2142
2143 if (mmu)
2144 cp15_control |= 0x1U;
2145
2146 if (d_u_cache)
2147 cp15_control |= 0x4U;
2148
2149 if (i_cache)
2150 cp15_control |= 0x1000U;
2151
2152 retval = armv7a->armv4_5_common.mcr(target, 15,
2153 0, 0, /* op1, op2 */
2154 1, 0, /* CRn, CRm */
2155 cp15_control);
2156 return retval;
2157 }
2158
2159
2160 static int cortex_a9_mmu(struct target *target, int *enabled)
2161 {
2162 if (target->state != TARGET_HALTED) {
2163 LOG_ERROR("%s: target not halted", __func__);
2164 return ERROR_TARGET_INVALID;
2165 }
2166
2167 *enabled = target_to_cortex_a9(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
2168 return ERROR_OK;
2169 }
2170
2171 static int cortex_a9_virt2phys(struct target *target,
2172 uint32_t virt, uint32_t *phys)
2173 {
2174 uint32_t cb;
2175 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
2176 // struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
2177 struct armv7a_common *armv7a = target_to_armv7a(target);
2178
2179 /* We assume that virtual address is separated
2180 between user and kernel in Linux style:
2181 0x00000000-0xbfffffff - User space
2182 0xc0000000-0xffffffff - Kernel space */
2183 if( virt < 0xc0000000 ) /* Linux user space */
2184 cortex_a9->current_address_mode = ARM_MODE_USR;
2185 else /* Linux kernel */
2186 cortex_a9->current_address_mode = ARM_MODE_SVC;
2187 uint32_t ret;
2188 int retval = armv4_5_mmu_translate_va(target,
2189 &armv7a->armv4_5_mmu, virt, &cb, &ret);
2190 if (retval != ERROR_OK)
2191 return retval;
2192 /* Reset the flag. We don't want someone else to use it by error */
2193 cortex_a9->current_address_mode = ARM_MODE_ANY;
2194
2195 *phys = ret;
2196 return ERROR_OK;
2197 }
2198
2199 COMMAND_HANDLER(cortex_a9_handle_cache_info_command)
2200 {
2201 struct target *target = get_current_target(CMD_CTX);
2202 struct armv7a_common *armv7a = target_to_armv7a(target);
2203
2204 return armv4_5_handle_cache_info_command(CMD_CTX,
2205 &armv7a->armv4_5_mmu.armv4_5_cache);
2206 }
2207
2208
2209 COMMAND_HANDLER(cortex_a9_handle_dbginit_command)
2210 {
2211 struct target *target = get_current_target(CMD_CTX);
2212 if (!target_was_examined(target))
2213 {
2214 LOG_ERROR("target not examined yet");
2215 return ERROR_FAIL;
2216 }
2217
2218 return cortex_a9_init_debug_access(target);
2219 }
2220
2221 static const struct command_registration cortex_a9_exec_command_handlers[] = {
2222 {
2223 .name = "cache_info",
2224 .handler = cortex_a9_handle_cache_info_command,
2225 .mode = COMMAND_EXEC,
2226 .help = "display information about target caches",
2227 },
2228 {
2229 .name = "dbginit",
2230 .handler = cortex_a9_handle_dbginit_command,
2231 .mode = COMMAND_EXEC,
2232 .help = "Initialize core debug",
2233 },
2234 COMMAND_REGISTRATION_DONE
2235 };
2236 static const struct command_registration cortex_a9_command_handlers[] = {
2237 {
2238 .chain = arm_command_handlers,
2239 },
2240 {
2241 .chain = armv7a_command_handlers,
2242 },
2243 {
2244 .name = "cortex_a9",
2245 .mode = COMMAND_ANY,
2246 .help = "Cortex-A9 command group",
2247 .chain = cortex_a9_exec_command_handlers,
2248 },
2249 COMMAND_REGISTRATION_DONE
2250 };
2251
2252 struct target_type cortexa9_target = {
2253 .name = "cortex_a9",
2254
2255 .poll = cortex_a9_poll,
2256 .arch_state = armv7a_arch_state,
2257
2258 .target_request_data = NULL,
2259
2260 .halt = cortex_a9_halt,
2261 .resume = cortex_a9_resume,
2262 .step = cortex_a9_step,
2263
2264 .assert_reset = cortex_a9_assert_reset,
2265 .deassert_reset = cortex_a9_deassert_reset,
2266 .soft_reset_halt = NULL,
2267
2268 /* REVISIT allow exporting VFP3 registers ... */
2269 .get_gdb_reg_list = arm_get_gdb_reg_list,
2270
2271 .read_memory = cortex_a9_read_memory,
2272 .write_memory = cortex_a9_write_memory,
2273 .bulk_write_memory = cortex_a9_bulk_write_memory,
2274
2275 .checksum_memory = arm_checksum_memory,
2276 .blank_check_memory = arm_blank_check_memory,
2277
2278 .run_algorithm = armv4_5_run_algorithm,
2279
2280 .add_breakpoint = cortex_a9_add_breakpoint,
2281 .remove_breakpoint = cortex_a9_remove_breakpoint,
2282 .add_watchpoint = NULL,
2283 .remove_watchpoint = NULL,
2284
2285 .commands = cortex_a9_command_handlers,
2286 .target_create = cortex_a9_target_create,
2287 .init_target = cortex_a9_init_target,
2288 .examine = cortex_a9_examine,
2289
2290 .read_phys_memory = cortex_a9_read_phys_memory,
2291 .write_phys_memory = cortex_a9_write_phys_memory,
2292 .mmu = cortex_a9_mmu,
2293 .virt2phys = cortex_a9_virt2phys,
2294 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)