- add parameter flush to the once api to signalize if the jtag queue need to be flush...
[openocd.git] / src / target / cortex_a9.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * This program is free software; you can redistribute it and/or modify *
18 * it under the terms of the GNU General Public License as published by *
19 * the Free Software Foundation; either version 2 of the License, or *
20 * (at your option) any later version. *
21 * *
22 * This program is distributed in the hope that it will be useful, *
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
25 * GNU General Public License for more details. *
26 * *
27 * You should have received a copy of the GNU General Public License *
28 * along with this program; if not, write to the *
29 * Free Software Foundation, Inc., *
30 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
31 * *
32 * Cortex-A9(tm) TRM, ARM DDI 0407F *
33 * *
34 ***************************************************************************/
35 #ifdef HAVE_CONFIG_H
36 #include "config.h"
37 #endif
38
39 #include "breakpoints.h"
40 #include "cortex_a9.h"
41 #include "register.h"
42 #include "target_request.h"
43 #include "target_type.h"
44 #include "arm_opcodes.h"
45 #include <helper/time_support.h>
46
47 static int cortex_a9_poll(struct target *target);
48 static int cortex_a9_debug_entry(struct target *target);
49 static int cortex_a9_restore_context(struct target *target, bool bpwp);
50 static int cortex_a9_set_breakpoint(struct target *target,
51 struct breakpoint *breakpoint, uint8_t matchmode);
52 static int cortex_a9_unset_breakpoint(struct target *target,
53 struct breakpoint *breakpoint);
54 static int cortex_a9_dap_read_coreregister_u32(struct target *target,
55 uint32_t *value, int regnum);
56 static int cortex_a9_dap_write_coreregister_u32(struct target *target,
57 uint32_t value, int regnum);
58 static int cortex_a9_mmu(struct target *target, int *enabled);
59 static int cortex_a9_virt2phys(struct target *target,
60 uint32_t virt, uint32_t *phys);
61 static int cortex_a9_disable_mmu_caches(struct target *target, int mmu,
62 int d_u_cache, int i_cache);
63 static int cortex_a9_enable_mmu_caches(struct target *target, int mmu,
64 int d_u_cache, int i_cache);
65 static int cortex_a9_get_ttb(struct target *target, uint32_t *result);
66
67
68 /*
69 * FIXME do topology discovery using the ROM; don't
70 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
71 * cores, with different AP numbering ... don't use a #define
72 * for these numbers, use per-core armv7a state.
73 */
74 #define swjdp_memoryap 0
75 #define swjdp_debugap 1
76
77 /*
78 * Cortex-A9 Basic debug access, very low level assumes state is saved
79 */
80 static int cortex_a9_init_debug_access(struct target *target)
81 {
82 struct armv7a_common *armv7a = target_to_armv7a(target);
83 struct adiv5_dap *swjdp = &armv7a->dap;
84 int retval;
85 uint32_t dummy;
86
87 LOG_DEBUG(" ");
88
89 /* Unlocking the debug registers for modification */
90 /* The debugport might be uninitialised so try twice */
91 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
92 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
93 if (retval != ERROR_OK)
94 {
95 /* try again */
96 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
97 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
98 if (retval == ERROR_OK)
99 {
100 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
101 }
102 }
103 if (retval != ERROR_OK)
104 return retval;
105 /* Clear Sticky Power Down status Bit in PRSR to enable access to
106 the registers in the Core Power Domain */
107 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
108 armv7a->debug_base + CPUDBG_PRSR, &dummy);
109 if (retval != ERROR_OK)
110 return retval;
111
112 /* Enabling of instruction execution in debug mode is done in debug_entry code */
113
114 /* Resync breakpoint registers */
115
116 /* Since this is likely called from init or reset, update target state information*/
117 return cortex_a9_poll(target);
118 }
119
120 /* To reduce needless round-trips, pass in a pointer to the current
121 * DSCR value. Initialize it to zero if you just need to know the
122 * value on return from this function; or DSCR_INSTR_COMP if you
123 * happen to know that no instruction is pending.
124 */
125 static int cortex_a9_exec_opcode(struct target *target,
126 uint32_t opcode, uint32_t *dscr_p)
127 {
128 uint32_t dscr;
129 int retval;
130 struct armv7a_common *armv7a = target_to_armv7a(target);
131 struct adiv5_dap *swjdp = &armv7a->dap;
132
133 dscr = dscr_p ? *dscr_p : 0;
134
135 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
136
137 /* Wait for InstrCompl bit to be set */
138 long long then = timeval_ms();
139 while ((dscr & DSCR_INSTR_COMP) == 0)
140 {
141 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
142 armv7a->debug_base + CPUDBG_DSCR, &dscr);
143 if (retval != ERROR_OK)
144 {
145 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
146 return retval;
147 }
148 if (timeval_ms() > then + 1000)
149 {
150 LOG_ERROR("Timeout waiting for cortex_a9_exec_opcode");
151 return ERROR_FAIL;
152 }
153 }
154
155 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
156 armv7a->debug_base + CPUDBG_ITR, opcode);
157 if (retval != ERROR_OK)
158 return retval;
159
160 then = timeval_ms();
161 do
162 {
163 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
164 armv7a->debug_base + CPUDBG_DSCR, &dscr);
165 if (retval != ERROR_OK)
166 {
167 LOG_ERROR("Could not read DSCR register");
168 return retval;
169 }
170 if (timeval_ms() > then + 1000)
171 {
172 LOG_ERROR("Timeout waiting for cortex_a9_exec_opcode");
173 return ERROR_FAIL;
174 }
175 }
176 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
177
178 if (dscr_p)
179 *dscr_p = dscr;
180
181 return retval;
182 }
183
184 /**************************************************************************
185 Read core register with very few exec_opcode, fast but needs work_area.
186 This can cause problems with MMU active.
187 **************************************************************************/
188 static int cortex_a9_read_regs_through_mem(struct target *target, uint32_t address,
189 uint32_t * regfile)
190 {
191 int retval = ERROR_OK;
192 struct armv7a_common *armv7a = target_to_armv7a(target);
193 struct adiv5_dap *swjdp = &armv7a->dap;
194
195 retval = cortex_a9_dap_read_coreregister_u32(target, regfile, 0);
196 if (retval != ERROR_OK)
197 return retval;
198 retval = cortex_a9_dap_write_coreregister_u32(target, address, 0);
199 if (retval != ERROR_OK)
200 return retval;
201 retval = cortex_a9_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
202 if (retval != ERROR_OK)
203 return retval;
204
205 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
206 (uint8_t *)(&regfile[1]), 4*15, address);
207
208 return retval;
209 }
210
211 static int cortex_a9_dap_read_coreregister_u32(struct target *target,
212 uint32_t *value, int regnum)
213 {
214 int retval = ERROR_OK;
215 uint8_t reg = regnum&0xFF;
216 uint32_t dscr = 0;
217 struct armv7a_common *armv7a = target_to_armv7a(target);
218 struct adiv5_dap *swjdp = &armv7a->dap;
219
220 if (reg > 17)
221 return retval;
222
223 if (reg < 15)
224 {
225 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
226 retval = cortex_a9_exec_opcode(target,
227 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
228 &dscr);
229 if (retval != ERROR_OK)
230 return retval;
231 }
232 else if (reg == 15)
233 {
234 /* "MOV r0, r15"; then move r0 to DCCTX */
235 retval = cortex_a9_exec_opcode(target, 0xE1A0000F, &dscr);
236 if (retval != ERROR_OK)
237 return retval;
238 retval = cortex_a9_exec_opcode(target,
239 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
240 &dscr);
241 if (retval != ERROR_OK)
242 return retval;
243 }
244 else
245 {
246 /* "MRS r0, CPSR" or "MRS r0, SPSR"
247 * then move r0 to DCCTX
248 */
249 retval = cortex_a9_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
250 if (retval != ERROR_OK)
251 return retval;
252 retval = cortex_a9_exec_opcode(target,
253 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
254 &dscr);
255 if (retval != ERROR_OK)
256 return retval;
257 }
258
259 /* Wait for DTRRXfull then read DTRRTX */
260 long long then = timeval_ms();
261 while ((dscr & DSCR_DTR_TX_FULL) == 0)
262 {
263 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
264 armv7a->debug_base + CPUDBG_DSCR, &dscr);
265 if (retval != ERROR_OK)
266 return retval;
267 if (timeval_ms() > then + 1000)
268 {
269 LOG_ERROR("Timeout waiting for cortex_a9_exec_opcode");
270 return ERROR_FAIL;
271 }
272 }
273
274 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
275 armv7a->debug_base + CPUDBG_DTRTX, value);
276 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
277
278 return retval;
279 }
280
281 static int cortex_a9_dap_write_coreregister_u32(struct target *target,
282 uint32_t value, int regnum)
283 {
284 int retval = ERROR_OK;
285 uint8_t Rd = regnum&0xFF;
286 uint32_t dscr;
287 struct armv7a_common *armv7a = target_to_armv7a(target);
288 struct adiv5_dap *swjdp = &armv7a->dap;
289
290 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
291
292 /* Check that DCCRX is not full */
293 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
294 armv7a->debug_base + CPUDBG_DSCR, &dscr);
295 if (retval != ERROR_OK)
296 return retval;
297 if (dscr & DSCR_DTR_RX_FULL)
298 {
299 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
300 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
301 retval = cortex_a9_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
302 &dscr);
303 if (retval != ERROR_OK)
304 return retval;
305 }
306
307 if (Rd > 17)
308 return retval;
309
310 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
311 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
312 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
313 armv7a->debug_base + CPUDBG_DTRRX, value);
314 if (retval != ERROR_OK)
315 return retval;
316
317 if (Rd < 15)
318 {
319 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
320 retval = cortex_a9_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
321 &dscr);
322 if (retval != ERROR_OK)
323 return retval;
324 }
325 else if (Rd == 15)
326 {
327 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
328 * then "mov r15, r0"
329 */
330 retval = cortex_a9_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
331 &dscr);
332 if (retval != ERROR_OK)
333 return retval;
334 retval = cortex_a9_exec_opcode(target, 0xE1A0F000, &dscr);
335 if (retval != ERROR_OK)
336 return retval;
337 }
338 else
339 {
340 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
341 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
342 */
343 retval = cortex_a9_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
344 &dscr);
345 if (retval != ERROR_OK)
346 return retval;
347 retval = cortex_a9_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
348 &dscr);
349 if (retval != ERROR_OK)
350 return retval;
351
352 /* "Prefetch flush" after modifying execution status in CPSR */
353 if (Rd == 16)
354 {
355 retval = cortex_a9_exec_opcode(target,
356 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
357 &dscr);
358 if (retval != ERROR_OK)
359 return retval;
360 }
361 }
362
363 return retval;
364 }
365
366 /* Write to memory mapped registers directly with no cache or mmu handling */
367 static int cortex_a9_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
368 {
369 int retval;
370 struct armv7a_common *armv7a = target_to_armv7a(target);
371 struct adiv5_dap *swjdp = &armv7a->dap;
372
373 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap, address, value);
374
375 return retval;
376 }
377
378 /*
379 * Cortex-A9 implementation of Debug Programmer's Model
380 *
381 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
382 * so there's no need to poll for it before executing an instruction.
383 *
384 * NOTE that in several of these cases the "stall" mode might be useful.
385 * It'd let us queue a few operations together... prepare/finish might
386 * be the places to enable/disable that mode.
387 */
388
389 static inline struct cortex_a9_common *dpm_to_a9(struct arm_dpm *dpm)
390 {
391 return container_of(dpm, struct cortex_a9_common, armv7a_common.dpm);
392 }
393
394 static int cortex_a9_write_dcc(struct cortex_a9_common *a9, uint32_t data)
395 {
396 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
397 return mem_ap_sel_write_u32(&a9->armv7a_common.dap, swjdp_debugap,
398 a9->armv7a_common.debug_base + CPUDBG_DTRRX, data);
399 }
400
401 static int cortex_a9_read_dcc(struct cortex_a9_common *a9, uint32_t *data,
402 uint32_t *dscr_p)
403 {
404 struct adiv5_dap *swjdp = &a9->armv7a_common.dap;
405 uint32_t dscr = DSCR_INSTR_COMP;
406 int retval;
407
408 if (dscr_p)
409 dscr = *dscr_p;
410
411 /* Wait for DTRRXfull */
412 long long then = timeval_ms();
413 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
414 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
415 a9->armv7a_common.debug_base + CPUDBG_DSCR,
416 &dscr);
417 if (retval != ERROR_OK)
418 return retval;
419 if (timeval_ms() > then + 1000)
420 {
421 LOG_ERROR("Timeout waiting for read dcc");
422 return ERROR_FAIL;
423 }
424 }
425
426 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
427 a9->armv7a_common.debug_base + CPUDBG_DTRTX, data);
428 if (retval != ERROR_OK)
429 return retval;
430 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
431
432 if (dscr_p)
433 *dscr_p = dscr;
434
435 return retval;
436 }
437
438 static int cortex_a9_dpm_prepare(struct arm_dpm *dpm)
439 {
440 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
441 struct adiv5_dap *swjdp = &a9->armv7a_common.dap;
442 uint32_t dscr;
443 int retval;
444
445 /* set up invariant: INSTR_COMP is set after ever DPM operation */
446 long long then = timeval_ms();
447 for (;;)
448 {
449 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
450 a9->armv7a_common.debug_base + CPUDBG_DSCR,
451 &dscr);
452 if (retval != ERROR_OK)
453 return retval;
454 if ((dscr & DSCR_INSTR_COMP) != 0)
455 break;
456 if (timeval_ms() > then + 1000)
457 {
458 LOG_ERROR("Timeout waiting for dpm prepare");
459 return ERROR_FAIL;
460 }
461 }
462
463 /* this "should never happen" ... */
464 if (dscr & DSCR_DTR_RX_FULL) {
465 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
466 /* Clear DCCRX */
467 retval = cortex_a9_exec_opcode(
468 a9->armv7a_common.armv4_5_common.target,
469 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
470 &dscr);
471 if (retval != ERROR_OK)
472 return retval;
473 }
474
475 return retval;
476 }
477
478 static int cortex_a9_dpm_finish(struct arm_dpm *dpm)
479 {
480 /* REVISIT what could be done here? */
481 return ERROR_OK;
482 }
483
484 static int cortex_a9_instr_write_data_dcc(struct arm_dpm *dpm,
485 uint32_t opcode, uint32_t data)
486 {
487 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
488 int retval;
489 uint32_t dscr = DSCR_INSTR_COMP;
490
491 retval = cortex_a9_write_dcc(a9, data);
492 if (retval != ERROR_OK)
493 return retval;
494
495 return cortex_a9_exec_opcode(
496 a9->armv7a_common.armv4_5_common.target,
497 opcode,
498 &dscr);
499 }
500
501 static int cortex_a9_instr_write_data_r0(struct arm_dpm *dpm,
502 uint32_t opcode, uint32_t data)
503 {
504 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
505 uint32_t dscr = DSCR_INSTR_COMP;
506 int retval;
507
508 retval = cortex_a9_write_dcc(a9, data);
509 if (retval != ERROR_OK)
510 return retval;
511
512 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
513 retval = cortex_a9_exec_opcode(
514 a9->armv7a_common.armv4_5_common.target,
515 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
516 &dscr);
517 if (retval != ERROR_OK)
518 return retval;
519
520 /* then the opcode, taking data from R0 */
521 retval = cortex_a9_exec_opcode(
522 a9->armv7a_common.armv4_5_common.target,
523 opcode,
524 &dscr);
525
526 return retval;
527 }
528
529 static int cortex_a9_instr_cpsr_sync(struct arm_dpm *dpm)
530 {
531 struct target *target = dpm->arm->target;
532 uint32_t dscr = DSCR_INSTR_COMP;
533
534 /* "Prefetch flush" after modifying execution status in CPSR */
535 return cortex_a9_exec_opcode(target,
536 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
537 &dscr);
538 }
539
540 static int cortex_a9_instr_read_data_dcc(struct arm_dpm *dpm,
541 uint32_t opcode, uint32_t *data)
542 {
543 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
544 int retval;
545 uint32_t dscr = DSCR_INSTR_COMP;
546
547 /* the opcode, writing data to DCC */
548 retval = cortex_a9_exec_opcode(
549 a9->armv7a_common.armv4_5_common.target,
550 opcode,
551 &dscr);
552 if (retval != ERROR_OK)
553 return retval;
554
555 return cortex_a9_read_dcc(a9, data, &dscr);
556 }
557
558
559 static int cortex_a9_instr_read_data_r0(struct arm_dpm *dpm,
560 uint32_t opcode, uint32_t *data)
561 {
562 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
563 uint32_t dscr = DSCR_INSTR_COMP;
564 int retval;
565
566 /* the opcode, writing data to R0 */
567 retval = cortex_a9_exec_opcode(
568 a9->armv7a_common.armv4_5_common.target,
569 opcode,
570 &dscr);
571 if (retval != ERROR_OK)
572 return retval;
573
574 /* write R0 to DCC */
575 retval = cortex_a9_exec_opcode(
576 a9->armv7a_common.armv4_5_common.target,
577 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
578 &dscr);
579 if (retval != ERROR_OK)
580 return retval;
581
582 return cortex_a9_read_dcc(a9, data, &dscr);
583 }
584
585 static int cortex_a9_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
586 uint32_t addr, uint32_t control)
587 {
588 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
589 uint32_t vr = a9->armv7a_common.debug_base;
590 uint32_t cr = a9->armv7a_common.debug_base;
591 int retval;
592
593 switch (index_t) {
594 case 0 ... 15: /* breakpoints */
595 vr += CPUDBG_BVR_BASE;
596 cr += CPUDBG_BCR_BASE;
597 break;
598 case 16 ... 31: /* watchpoints */
599 vr += CPUDBG_WVR_BASE;
600 cr += CPUDBG_WCR_BASE;
601 index_t -= 16;
602 break;
603 default:
604 return ERROR_FAIL;
605 }
606 vr += 4 * index_t;
607 cr += 4 * index_t;
608
609 LOG_DEBUG("A9: bpwp enable, vr %08x cr %08x",
610 (unsigned) vr, (unsigned) cr);
611
612 retval = cortex_a9_dap_write_memap_register_u32(dpm->arm->target,
613 vr, addr);
614 if (retval != ERROR_OK)
615 return retval;
616 retval = cortex_a9_dap_write_memap_register_u32(dpm->arm->target,
617 cr, control);
618 return retval;
619 }
620
621 static int cortex_a9_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
622 {
623 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
624 uint32_t cr;
625
626 switch (index_t) {
627 case 0 ... 15:
628 cr = a9->armv7a_common.debug_base + CPUDBG_BCR_BASE;
629 break;
630 case 16 ... 31:
631 cr = a9->armv7a_common.debug_base + CPUDBG_WCR_BASE;
632 index_t -= 16;
633 break;
634 default:
635 return ERROR_FAIL;
636 }
637 cr += 4 * index_t;
638
639 LOG_DEBUG("A9: bpwp disable, cr %08x", (unsigned) cr);
640
641 /* clear control register */
642 return cortex_a9_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
643 }
644
645 static int cortex_a9_dpm_setup(struct cortex_a9_common *a9, uint32_t didr)
646 {
647 struct arm_dpm *dpm = &a9->armv7a_common.dpm;
648 int retval;
649
650 dpm->arm = &a9->armv7a_common.armv4_5_common;
651 dpm->didr = didr;
652
653 dpm->prepare = cortex_a9_dpm_prepare;
654 dpm->finish = cortex_a9_dpm_finish;
655
656 dpm->instr_write_data_dcc = cortex_a9_instr_write_data_dcc;
657 dpm->instr_write_data_r0 = cortex_a9_instr_write_data_r0;
658 dpm->instr_cpsr_sync = cortex_a9_instr_cpsr_sync;
659
660 dpm->instr_read_data_dcc = cortex_a9_instr_read_data_dcc;
661 dpm->instr_read_data_r0 = cortex_a9_instr_read_data_r0;
662
663 dpm->bpwp_enable = cortex_a9_bpwp_enable;
664 dpm->bpwp_disable = cortex_a9_bpwp_disable;
665
666 retval = arm_dpm_setup(dpm);
667 if (retval == ERROR_OK)
668 retval = arm_dpm_initialize(dpm);
669
670 return retval;
671 }
672
673
674 /*
675 * Cortex-A9 Run control
676 */
677
678 static int cortex_a9_poll(struct target *target)
679 {
680 int retval = ERROR_OK;
681 uint32_t dscr;
682 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
683 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
684 struct adiv5_dap *swjdp = &armv7a->dap;
685 enum target_state prev_target_state = target->state;
686
687 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
688 armv7a->debug_base + CPUDBG_DSCR, &dscr);
689 if (retval != ERROR_OK)
690 {
691 return retval;
692 }
693 cortex_a9->cpudbg_dscr = dscr;
694
695 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED))
696 {
697 if (prev_target_state != TARGET_HALTED)
698 {
699 /* We have a halting debug event */
700 LOG_DEBUG("Target halted");
701 target->state = TARGET_HALTED;
702 if ((prev_target_state == TARGET_RUNNING)
703 || (prev_target_state == TARGET_RESET))
704 {
705 retval = cortex_a9_debug_entry(target);
706 if (retval != ERROR_OK)
707 return retval;
708
709 target_call_event_callbacks(target,
710 TARGET_EVENT_HALTED);
711 }
712 if (prev_target_state == TARGET_DEBUG_RUNNING)
713 {
714 LOG_DEBUG(" ");
715
716 retval = cortex_a9_debug_entry(target);
717 if (retval != ERROR_OK)
718 return retval;
719
720 target_call_event_callbacks(target,
721 TARGET_EVENT_DEBUG_HALTED);
722 }
723 }
724 }
725 else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
726 {
727 target->state = TARGET_RUNNING;
728 }
729 else
730 {
731 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
732 target->state = TARGET_UNKNOWN;
733 }
734
735 return retval;
736 }
737
738 static int cortex_a9_halt(struct target *target)
739 {
740 int retval = ERROR_OK;
741 uint32_t dscr;
742 struct armv7a_common *armv7a = target_to_armv7a(target);
743 struct adiv5_dap *swjdp = &armv7a->dap;
744
745 /*
746 * Tell the core to be halted by writing DRCR with 0x1
747 * and then wait for the core to be halted.
748 */
749 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
750 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
751 if (retval != ERROR_OK)
752 return retval;
753
754 /*
755 * enter halting debug mode
756 */
757 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
758 armv7a->debug_base + CPUDBG_DSCR, &dscr);
759 if (retval != ERROR_OK)
760 return retval;
761
762 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
763 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
764 if (retval != ERROR_OK)
765 return retval;
766
767 long long then = timeval_ms();
768 for (;;)
769 {
770 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
771 armv7a->debug_base + CPUDBG_DSCR, &dscr);
772 if (retval != ERROR_OK)
773 return retval;
774 if ((dscr & DSCR_CORE_HALTED) != 0)
775 {
776 break;
777 }
778 if (timeval_ms() > then + 1000)
779 {
780 LOG_ERROR("Timeout waiting for halt");
781 return ERROR_FAIL;
782 }
783 }
784
785 target->debug_reason = DBG_REASON_DBGRQ;
786
787 return ERROR_OK;
788 }
789
790 static int cortex_a9_resume(struct target *target, int current,
791 uint32_t address, int handle_breakpoints, int debug_execution)
792 {
793 struct armv7a_common *armv7a = target_to_armv7a(target);
794 struct arm *armv4_5 = &armv7a->armv4_5_common;
795 struct adiv5_dap *swjdp = &armv7a->dap;
796 int retval;
797
798 // struct breakpoint *breakpoint = NULL;
799 uint32_t resume_pc, dscr;
800
801 if (!debug_execution)
802 target_free_all_working_areas(target);
803
804 #if 0
805 if (debug_execution)
806 {
807 /* Disable interrupts */
808 /* We disable interrupts in the PRIMASK register instead of
809 * masking with C_MASKINTS,
810 * This is probably the same issue as Cortex-M3 Errata 377493:
811 * C_MASKINTS in parallel with disabled interrupts can cause
812 * local faults to not be taken. */
813 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
814 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
815 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
816
817 /* Make sure we are in Thumb mode */
818 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
819 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
820 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
821 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
822 }
823 #endif
824
825 /* current = 1: continue on current pc, otherwise continue at <address> */
826 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
827 if (!current)
828 resume_pc = address;
829
830 /* Make sure that the Armv7 gdb thumb fixups does not
831 * kill the return address
832 */
833 switch (armv4_5->core_state)
834 {
835 case ARM_STATE_ARM:
836 resume_pc &= 0xFFFFFFFC;
837 break;
838 case ARM_STATE_THUMB:
839 case ARM_STATE_THUMB_EE:
840 /* When the return address is loaded into PC
841 * bit 0 must be 1 to stay in Thumb state
842 */
843 resume_pc |= 0x1;
844 break;
845 case ARM_STATE_JAZELLE:
846 LOG_ERROR("How do I resume into Jazelle state??");
847 return ERROR_FAIL;
848 }
849 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
850 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
851 armv4_5->pc->dirty = 1;
852 armv4_5->pc->valid = 1;
853
854 retval = cortex_a9_restore_context(target, handle_breakpoints);
855 if (retval != ERROR_OK)
856 return retval;
857
858 #if 0
859 /* the front-end may request us not to handle breakpoints */
860 if (handle_breakpoints)
861 {
862 /* Single step past breakpoint at current address */
863 if ((breakpoint = breakpoint_find(target, resume_pc)))
864 {
865 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
866 cortex_m3_unset_breakpoint(target, breakpoint);
867 cortex_m3_single_step_core(target);
868 cortex_m3_set_breakpoint(target, breakpoint);
869 }
870 }
871
872 #endif
873
874 /*
875 * Restart core and wait for it to be started. Clear ITRen and sticky
876 * exception flags: see ARMv7 ARM, C5.9.
877 *
878 * REVISIT: for single stepping, we probably want to
879 * disable IRQs by default, with optional override...
880 */
881
882 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
883 armv7a->debug_base + CPUDBG_DSCR, &dscr);
884 if (retval != ERROR_OK)
885 return retval;
886
887 if ((dscr & DSCR_INSTR_COMP) == 0)
888 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
889
890 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
891 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
892 if (retval != ERROR_OK)
893 return retval;
894
895 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
896 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART | DRCR_CLEAR_EXCEPTIONS);
897 if (retval != ERROR_OK)
898 return retval;
899
900 long long then = timeval_ms();
901 for (;;)
902 {
903 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
904 armv7a->debug_base + CPUDBG_DSCR, &dscr);
905 if (retval != ERROR_OK)
906 return retval;
907 if ((dscr & DSCR_CORE_RESTARTED) != 0)
908 break;
909 if (timeval_ms() > then + 1000)
910 {
911 LOG_ERROR("Timeout waiting for resume");
912 return ERROR_FAIL;
913 }
914 }
915
916 target->debug_reason = DBG_REASON_NOTHALTED;
917 target->state = TARGET_RUNNING;
918
919 /* registers are now invalid */
920 register_cache_invalidate(armv4_5->core_cache);
921
922 if (!debug_execution)
923 {
924 target->state = TARGET_RUNNING;
925 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
926 LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
927 }
928 else
929 {
930 target->state = TARGET_DEBUG_RUNNING;
931 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
932 LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
933 }
934
935 return ERROR_OK;
936 }
937
938 static int cortex_a9_debug_entry(struct target *target)
939 {
940 int i;
941 uint32_t regfile[16], cpsr, dscr;
942 int retval = ERROR_OK;
943 struct working_area *regfile_working_area = NULL;
944 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
945 struct armv7a_common *armv7a = target_to_armv7a(target);
946 struct arm *armv4_5 = &armv7a->armv4_5_common;
947 struct adiv5_dap *swjdp = &armv7a->dap;
948 struct reg *reg;
949
950 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a9->cpudbg_dscr);
951
952 /* REVISIT surely we should not re-read DSCR !! */
953 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
954 armv7a->debug_base + CPUDBG_DSCR, &dscr);
955 if (retval != ERROR_OK)
956 return retval;
957
958 /* REVISIT see A9 TRM 12.11.4 steps 2..3 -- make sure that any
959 * imprecise data aborts get discarded by issuing a Data
960 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
961 */
962
963 /* Enable the ITR execution once we are in debug mode */
964 dscr |= DSCR_ITR_EN;
965 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
966 armv7a->debug_base + CPUDBG_DSCR, dscr);
967 if (retval != ERROR_OK)
968 return retval;
969
970 /* Examine debug reason */
971 arm_dpm_report_dscr(&armv7a->dpm, cortex_a9->cpudbg_dscr);
972
973 /* save address of instruction that triggered the watchpoint? */
974 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
975 uint32_t wfar;
976
977 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
978 armv7a->debug_base + CPUDBG_WFAR,
979 &wfar);
980 if (retval != ERROR_OK)
981 return retval;
982 arm_dpm_report_wfar(&armv7a->dpm, wfar);
983 }
984
985 /* REVISIT fast_reg_read is never set ... */
986
987 /* Examine target state and mode */
988 if (cortex_a9->fast_reg_read)
989 target_alloc_working_area(target, 64, &regfile_working_area);
990
991 /* First load register acessible through core debug port*/
992 if (!regfile_working_area)
993 {
994 retval = arm_dpm_read_current_registers(&armv7a->dpm);
995 }
996 else
997 {
998 retval = cortex_a9_read_regs_through_mem(target,
999 regfile_working_area->address, regfile);
1000
1001 target_free_working_area(target, regfile_working_area);
1002 if (retval != ERROR_OK)
1003 {
1004 return retval;
1005 }
1006
1007 /* read Current PSR */
1008 retval = cortex_a9_dap_read_coreregister_u32(target, &cpsr, 16);
1009 if (retval != ERROR_OK)
1010 return retval;
1011
1012 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1013
1014 arm_set_cpsr(armv4_5, cpsr);
1015
1016 /* update cache */
1017 for (i = 0; i <= ARM_PC; i++)
1018 {
1019 reg = arm_reg_current(armv4_5, i);
1020
1021 buf_set_u32(reg->value, 0, 32, regfile[i]);
1022 reg->valid = 1;
1023 reg->dirty = 0;
1024 }
1025
1026 /* Fixup PC Resume Address */
1027 if (cpsr & (1 << 5))
1028 {
1029 // T bit set for Thumb or ThumbEE state
1030 regfile[ARM_PC] -= 4;
1031 }
1032 else
1033 {
1034 // ARM state
1035 regfile[ARM_PC] -= 8;
1036 }
1037
1038 reg = armv4_5->pc;
1039 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1040 reg->dirty = reg->valid;
1041 }
1042
1043 #if 0
1044 /* TODO, Move this */
1045 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1046 cortex_a9_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1047 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1048
1049 cortex_a9_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1050 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1051
1052 cortex_a9_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1053 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1054 #endif
1055
1056 /* Are we in an exception handler */
1057 // armv4_5->exception_number = 0;
1058 if (armv7a->post_debug_entry)
1059 {
1060 retval = armv7a->post_debug_entry(target);
1061 if (retval != ERROR_OK)
1062 return retval;
1063 }
1064
1065 return retval;
1066 }
1067
1068 static int cortex_a9_post_debug_entry(struct target *target)
1069 {
1070 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1071 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
1072 int retval;
1073
1074 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1075 retval = armv7a->armv4_5_common.mrc(target, 15,
1076 0, 0, /* op1, op2 */
1077 1, 0, /* CRn, CRm */
1078 &cortex_a9->cp15_control_reg);
1079 if (retval != ERROR_OK)
1080 return retval;
1081 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a9->cp15_control_reg);
1082
1083 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
1084 {
1085 uint32_t cache_type_reg;
1086
1087 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
1088 retval = armv7a->armv4_5_common.mrc(target, 15,
1089 0, 1, /* op1, op2 */
1090 0, 0, /* CRn, CRm */
1091 &cache_type_reg);
1092 if (retval != ERROR_OK)
1093 return retval;
1094 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
1095
1096 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A9 */
1097 armv4_5_identify_cache(cache_type_reg,
1098 &armv7a->armv4_5_mmu.armv4_5_cache);
1099 }
1100
1101 armv7a->armv4_5_mmu.mmu_enabled =
1102 (cortex_a9->cp15_control_reg & 0x1U) ? 1 : 0;
1103 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
1104 (cortex_a9->cp15_control_reg & 0x4U) ? 1 : 0;
1105 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1106 (cortex_a9->cp15_control_reg & 0x1000U) ? 1 : 0;
1107
1108 return ERROR_OK;
1109 }
1110
1111 static int cortex_a9_step(struct target *target, int current, uint32_t address,
1112 int handle_breakpoints)
1113 {
1114 struct armv7a_common *armv7a = target_to_armv7a(target);
1115 struct arm *armv4_5 = &armv7a->armv4_5_common;
1116 struct breakpoint *breakpoint = NULL;
1117 struct breakpoint stepbreakpoint;
1118 struct reg *r;
1119 int retval;
1120
1121 if (target->state != TARGET_HALTED)
1122 {
1123 LOG_WARNING("target not halted");
1124 return ERROR_TARGET_NOT_HALTED;
1125 }
1126
1127 /* current = 1: continue on current pc, otherwise continue at <address> */
1128 r = armv4_5->pc;
1129 if (!current)
1130 {
1131 buf_set_u32(r->value, 0, 32, address);
1132 }
1133 else
1134 {
1135 address = buf_get_u32(r->value, 0, 32);
1136 }
1137
1138 /* The front-end may request us not to handle breakpoints.
1139 * But since Cortex-A9 uses breakpoint for single step,
1140 * we MUST handle breakpoints.
1141 */
1142 handle_breakpoints = 1;
1143 if (handle_breakpoints) {
1144 breakpoint = breakpoint_find(target, address);
1145 if (breakpoint)
1146 cortex_a9_unset_breakpoint(target, breakpoint);
1147 }
1148
1149 /* Setup single step breakpoint */
1150 stepbreakpoint.address = address;
1151 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1152 ? 2 : 4;
1153 stepbreakpoint.type = BKPT_HARD;
1154 stepbreakpoint.set = 0;
1155
1156 /* Break on IVA mismatch */
1157 cortex_a9_set_breakpoint(target, &stepbreakpoint, 0x04);
1158
1159 target->debug_reason = DBG_REASON_SINGLESTEP;
1160
1161 retval = cortex_a9_resume(target, 1, address, 0, 0);
1162 if (retval != ERROR_OK)
1163 return retval;
1164
1165 long long then = timeval_ms();
1166 while (target->state != TARGET_HALTED)
1167 {
1168 retval = cortex_a9_poll(target);
1169 if (retval != ERROR_OK)
1170 return retval;
1171 if (timeval_ms() > then + 1000)
1172 {
1173 LOG_ERROR("timeout waiting for target halt");
1174 return ERROR_FAIL;
1175 }
1176 }
1177
1178 cortex_a9_unset_breakpoint(target, &stepbreakpoint);
1179
1180 target->debug_reason = DBG_REASON_BREAKPOINT;
1181
1182 if (breakpoint)
1183 cortex_a9_set_breakpoint(target, breakpoint, 0);
1184
1185 if (target->state != TARGET_HALTED)
1186 LOG_DEBUG("target stepped");
1187
1188 return ERROR_OK;
1189 }
1190
1191 static int cortex_a9_restore_context(struct target *target, bool bpwp)
1192 {
1193 struct armv7a_common *armv7a = target_to_armv7a(target);
1194
1195 LOG_DEBUG(" ");
1196
1197 if (armv7a->pre_restore_context)
1198 armv7a->pre_restore_context(target);
1199
1200 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1201 }
1202
1203
1204 /*
1205 * Cortex-A9 Breakpoint and watchpoint functions
1206 */
1207
1208 /* Setup hardware Breakpoint Register Pair */
1209 static int cortex_a9_set_breakpoint(struct target *target,
1210 struct breakpoint *breakpoint, uint8_t matchmode)
1211 {
1212 int retval;
1213 int brp_i=0;
1214 uint32_t control;
1215 uint8_t byte_addr_select = 0x0F;
1216 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1217 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
1218 struct cortex_a9_brp * brp_list = cortex_a9->brp_list;
1219
1220 if (breakpoint->set)
1221 {
1222 LOG_WARNING("breakpoint already set");
1223 return ERROR_OK;
1224 }
1225
1226 if (breakpoint->type == BKPT_HARD)
1227 {
1228 while (brp_list[brp_i].used && (brp_i < cortex_a9->brp_num))
1229 brp_i++ ;
1230 if (brp_i >= cortex_a9->brp_num)
1231 {
1232 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1233 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1234 }
1235 breakpoint->set = brp_i + 1;
1236 if (breakpoint->length == 2)
1237 {
1238 byte_addr_select = (3 << (breakpoint->address & 0x02));
1239 }
1240 control = ((matchmode & 0x7) << 20)
1241 | (byte_addr_select << 5)
1242 | (3 << 1) | 1;
1243 brp_list[brp_i].used = 1;
1244 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1245 brp_list[brp_i].control = control;
1246 retval = cortex_a9_dap_write_memap_register_u32(target, armv7a->debug_base
1247 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1248 brp_list[brp_i].value);
1249 if (retval != ERROR_OK)
1250 return retval;
1251 retval = cortex_a9_dap_write_memap_register_u32(target, armv7a->debug_base
1252 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1253 brp_list[brp_i].control);
1254 if (retval != ERROR_OK)
1255 return retval;
1256 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1257 brp_list[brp_i].control,
1258 brp_list[brp_i].value);
1259 }
1260 else if (breakpoint->type == BKPT_SOFT)
1261 {
1262 uint8_t code[4];
1263 if (breakpoint->length == 2)
1264 {
1265 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1266 }
1267 else
1268 {
1269 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1270 }
1271 retval = target->type->read_memory(target,
1272 breakpoint->address & 0xFFFFFFFE,
1273 breakpoint->length, 1,
1274 breakpoint->orig_instr);
1275 if (retval != ERROR_OK)
1276 return retval;
1277 retval = target->type->write_memory(target,
1278 breakpoint->address & 0xFFFFFFFE,
1279 breakpoint->length, 1, code);
1280 if (retval != ERROR_OK)
1281 return retval;
1282 breakpoint->set = 0x11; /* Any nice value but 0 */
1283 }
1284
1285 return ERROR_OK;
1286 }
1287
1288 static int cortex_a9_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1289 {
1290 int retval;
1291 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1292 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
1293 struct cortex_a9_brp * brp_list = cortex_a9->brp_list;
1294
1295 if (!breakpoint->set)
1296 {
1297 LOG_WARNING("breakpoint not set");
1298 return ERROR_OK;
1299 }
1300
1301 if (breakpoint->type == BKPT_HARD)
1302 {
1303 int brp_i = breakpoint->set - 1;
1304 if ((brp_i < 0) || (brp_i >= cortex_a9->brp_num))
1305 {
1306 LOG_DEBUG("Invalid BRP number in breakpoint");
1307 return ERROR_OK;
1308 }
1309 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1310 brp_list[brp_i].control, brp_list[brp_i].value);
1311 brp_list[brp_i].used = 0;
1312 brp_list[brp_i].value = 0;
1313 brp_list[brp_i].control = 0;
1314 retval = cortex_a9_dap_write_memap_register_u32(target, armv7a->debug_base
1315 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1316 brp_list[brp_i].control);
1317 if (retval != ERROR_OK)
1318 return retval;
1319 retval = cortex_a9_dap_write_memap_register_u32(target, armv7a->debug_base
1320 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1321 brp_list[brp_i].value);
1322 if (retval != ERROR_OK)
1323 return retval;
1324 }
1325 else
1326 {
1327 /* restore original instruction (kept in target endianness) */
1328 if (breakpoint->length == 4)
1329 {
1330 retval = target->type->write_memory(target,
1331 breakpoint->address & 0xFFFFFFFE,
1332 4, 1, breakpoint->orig_instr);
1333 if (retval != ERROR_OK)
1334 return retval;
1335 }
1336 else
1337 {
1338 retval = target->type->write_memory(target,
1339 breakpoint->address & 0xFFFFFFFE,
1340 2, 1, breakpoint->orig_instr);
1341 if (retval != ERROR_OK)
1342 return retval;
1343 }
1344 }
1345 breakpoint->set = 0;
1346
1347 return ERROR_OK;
1348 }
1349
1350 static int cortex_a9_add_breakpoint(struct target *target,
1351 struct breakpoint *breakpoint)
1352 {
1353 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1354
1355 if ((breakpoint->type == BKPT_HARD) && (cortex_a9->brp_num_available < 1))
1356 {
1357 LOG_INFO("no hardware breakpoint available");
1358 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1359 }
1360
1361 if (breakpoint->type == BKPT_HARD)
1362 cortex_a9->brp_num_available--;
1363
1364 return cortex_a9_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1365 }
1366
1367 static int cortex_a9_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1368 {
1369 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1370
1371 #if 0
1372 /* It is perfectly possible to remove breakpoints while the target is running */
1373 if (target->state != TARGET_HALTED)
1374 {
1375 LOG_WARNING("target not halted");
1376 return ERROR_TARGET_NOT_HALTED;
1377 }
1378 #endif
1379
1380 if (breakpoint->set)
1381 {
1382 cortex_a9_unset_breakpoint(target, breakpoint);
1383 if (breakpoint->type == BKPT_HARD)
1384 cortex_a9->brp_num_available++ ;
1385 }
1386
1387
1388 return ERROR_OK;
1389 }
1390
1391
1392
1393 /*
1394 * Cortex-A9 Reset functions
1395 */
1396
1397 static int cortex_a9_assert_reset(struct target *target)
1398 {
1399 struct armv7a_common *armv7a = target_to_armv7a(target);
1400
1401 LOG_DEBUG(" ");
1402
1403 /* FIXME when halt is requested, make it work somehow... */
1404
1405 /* Issue some kind of warm reset. */
1406 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1407 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1408 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1409 /* REVISIT handle "pulls" cases, if there's
1410 * hardware that needs them to work.
1411 */
1412 jtag_add_reset(0, 1);
1413 } else {
1414 LOG_ERROR("%s: how to reset?", target_name(target));
1415 return ERROR_FAIL;
1416 }
1417
1418 /* registers are now invalid */
1419 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1420
1421 target->state = TARGET_RESET;
1422
1423 return ERROR_OK;
1424 }
1425
1426 static int cortex_a9_deassert_reset(struct target *target)
1427 {
1428 int retval;
1429
1430 LOG_DEBUG(" ");
1431
1432 /* be certain SRST is off */
1433 jtag_add_reset(0, 0);
1434
1435 retval = cortex_a9_poll(target);
1436 if (retval != ERROR_OK)
1437 return retval;
1438
1439 if (target->reset_halt) {
1440 if (target->state != TARGET_HALTED) {
1441 LOG_WARNING("%s: ran after reset and before halt ...",
1442 target_name(target));
1443 if ((retval = target_halt(target)) != ERROR_OK)
1444 return retval;
1445 }
1446 }
1447
1448 return ERROR_OK;
1449 }
1450
1451 /*
1452 * Cortex-A9 Memory access
1453 *
1454 * This is same Cortex M3 but we must also use the correct
1455 * ap number for every access.
1456 */
1457
1458 static int cortex_a9_read_phys_memory(struct target *target,
1459 uint32_t address, uint32_t size,
1460 uint32_t count, uint8_t *buffer)
1461 {
1462 struct armv7a_common *armv7a = target_to_armv7a(target);
1463 struct adiv5_dap *swjdp = &armv7a->dap;
1464 int retval = ERROR_INVALID_ARGUMENTS;
1465 uint8_t apsel = dap_ap_get_select(swjdp);
1466
1467 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d", address, size, count);
1468
1469 if (count && buffer) {
1470
1471 if ( apsel == swjdp_memoryap ) {
1472
1473 /* read memory through AHB-AP */
1474
1475 switch (size) {
1476 case 4:
1477 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
1478 buffer, 4 * count, address);
1479 break;
1480 case 2:
1481 retval = mem_ap_sel_read_buf_u16(swjdp, swjdp_memoryap,
1482 buffer, 2 * count, address);
1483 break;
1484 case 1:
1485 retval = mem_ap_sel_read_buf_u8(swjdp, swjdp_memoryap,
1486 buffer, count, address);
1487 break;
1488 }
1489
1490 } else {
1491
1492 /* read memory through APB-AP */
1493
1494 uint32_t saved_r0, saved_r1;
1495 int nbytes = count * size;
1496 uint32_t data;
1497 int enabled = 0;
1498
1499 if (target->state != TARGET_HALTED)
1500 {
1501 LOG_WARNING("target not halted");
1502 return ERROR_TARGET_NOT_HALTED;
1503 }
1504
1505 retval = cortex_a9_mmu(target, &enabled);
1506 if (retval != ERROR_OK)
1507 return retval;
1508
1509 if (enabled)
1510 {
1511 LOG_WARNING("Reading physical memory through APB with MMU enabled is not yet implemented");
1512 return ERROR_TARGET_FAILURE;
1513 }
1514
1515 /* save registers r0 and r1, we are going to corrupt them */
1516 retval = cortex_a9_dap_read_coreregister_u32(target, &saved_r0, 0);
1517 if (retval != ERROR_OK)
1518 return retval;
1519
1520 retval = cortex_a9_dap_read_coreregister_u32(target, &saved_r1, 1);
1521 if (retval != ERROR_OK)
1522 return retval;
1523
1524 retval = cortex_a9_dap_write_coreregister_u32(target, address, 0);
1525 if (retval != ERROR_OK)
1526 return retval;
1527
1528 while (nbytes > 0) {
1529
1530 /* execute instruction LDRB r1, [r0], 1 (0xe4d01001) */
1531 retval = cortex_a9_exec_opcode(target, ARMV4_5_LDRB_IP(1, 0) , NULL);
1532 if (retval != ERROR_OK)
1533 return retval;
1534
1535 retval = cortex_a9_dap_read_coreregister_u32(target, &data, 1);
1536 if (retval != ERROR_OK)
1537 return retval;
1538
1539 *buffer++ = data;
1540 --nbytes;
1541
1542 }
1543
1544 /* restore corrupted registers r0 and r1 */
1545 retval = cortex_a9_dap_write_coreregister_u32(target, saved_r0, 0);
1546 if (retval != ERROR_OK)
1547 return retval;
1548
1549 retval = cortex_a9_dap_write_coreregister_u32(target, saved_r1, 1);
1550 if (retval != ERROR_OK)
1551 return retval;
1552
1553 }
1554 }
1555
1556 return retval;
1557 }
1558
1559 static int cortex_a9_read_memory(struct target *target, uint32_t address,
1560 uint32_t size, uint32_t count, uint8_t *buffer)
1561 {
1562 int enabled = 0;
1563 uint32_t virt, phys;
1564 int retval;
1565
1566 /* cortex_a9 handles unaligned memory access */
1567
1568 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address, size, count);
1569 retval = cortex_a9_mmu(target, &enabled);
1570 if (retval != ERROR_OK)
1571 return retval;
1572
1573 if (enabled)
1574 {
1575 virt = address;
1576 retval = cortex_a9_virt2phys(target, virt, &phys);
1577 if (retval != ERROR_OK)
1578 return retval;
1579
1580 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1581 address = phys;
1582 }
1583
1584 return cortex_a9_read_phys_memory(target, address, size, count, buffer);
1585 }
1586
1587 static int cortex_a9_write_phys_memory(struct target *target,
1588 uint32_t address, uint32_t size,
1589 uint32_t count, uint8_t *buffer)
1590 {
1591 struct armv7a_common *armv7a = target_to_armv7a(target);
1592 struct adiv5_dap *swjdp = &armv7a->dap;
1593 int retval = ERROR_INVALID_ARGUMENTS;
1594
1595 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address, size, count);
1596
1597 if (count && buffer) {
1598 uint8_t apsel = dap_ap_get_select(swjdp);
1599
1600 if ( apsel == swjdp_memoryap ) {
1601
1602 /* write memory through AHB-AP */
1603 switch (size) {
1604 case 4:
1605 retval = mem_ap_sel_write_buf_u32(swjdp, swjdp_memoryap,
1606 buffer, 4 * count, address);
1607 break;
1608 case 2:
1609 retval = mem_ap_sel_write_buf_u16(swjdp, swjdp_memoryap,
1610 buffer, 2 * count, address);
1611 break;
1612 case 1:
1613 retval = mem_ap_sel_write_buf_u8(swjdp, swjdp_memoryap,
1614 buffer, count, address);
1615 break;
1616 }
1617
1618 } else {
1619
1620 /* write memory through APB-AP */
1621
1622 uint32_t saved_r0, saved_r1;
1623 int nbytes = count * size;
1624 uint32_t data;
1625 int enabled = 0;
1626
1627 if (target->state != TARGET_HALTED)
1628 {
1629 LOG_WARNING("target not halted");
1630 return ERROR_TARGET_NOT_HALTED;
1631 }
1632
1633 retval = cortex_a9_mmu(target, &enabled);
1634 if (retval != ERROR_OK)
1635 return retval;
1636
1637 if (enabled)
1638 {
1639 LOG_WARNING("Writing physical memory through APB with MMU enabled is not yet implemented");
1640 return ERROR_TARGET_FAILURE;
1641 }
1642
1643 /* save registers r0 and r1, we are going to corrupt them */
1644 retval = cortex_a9_dap_read_coreregister_u32(target, &saved_r0, 0);
1645 if (retval != ERROR_OK)
1646 return retval;
1647
1648 retval = cortex_a9_dap_read_coreregister_u32(target, &saved_r1, 1);
1649 if (retval != ERROR_OK)
1650 return retval;
1651
1652 retval = cortex_a9_dap_write_coreregister_u32(target, address, 0);
1653 if (retval != ERROR_OK)
1654 return retval;
1655
1656 while (nbytes > 0) {
1657
1658 data = *buffer++;
1659
1660 retval = cortex_a9_dap_write_coreregister_u32(target, data, 1);
1661 if (retval != ERROR_OK)
1662 return retval;
1663
1664 /* execute instruction STRB r1, [r0], 1 (0xe4c01001) */
1665 retval = cortex_a9_exec_opcode(target, ARMV4_5_STRB_IP(1, 0) , NULL);
1666 if (retval != ERROR_OK)
1667 return retval;
1668
1669 --nbytes;
1670 }
1671
1672 /* restore corrupted registers r0 and r1 */
1673 retval = cortex_a9_dap_write_coreregister_u32(target, saved_r0, 0);
1674 if (retval != ERROR_OK)
1675 return retval;
1676
1677 retval = cortex_a9_dap_write_coreregister_u32(target, saved_r1, 1);
1678 if (retval != ERROR_OK)
1679 return retval;
1680
1681 /* we can return here without invalidating D/I-cache because */
1682 /* access through APB maintains cache coherency */
1683 return retval;
1684 }
1685 }
1686
1687
1688 /* REVISIT this op is generic ARMv7-A/R stuff */
1689 if (retval == ERROR_OK && target->state == TARGET_HALTED)
1690 {
1691 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
1692
1693 retval = dpm->prepare(dpm);
1694 if (retval != ERROR_OK)
1695 return retval;
1696
1697 /* The Cache handling will NOT work with MMU active, the
1698 * wrong addresses will be invalidated!
1699 *
1700 * For both ICache and DCache, walk all cache lines in the
1701 * address range. Cortex-A9 has fixed 64 byte line length.
1702 *
1703 * REVISIT per ARMv7, these may trigger watchpoints ...
1704 */
1705
1706 /* invalidate I-Cache */
1707 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
1708 {
1709 /* ICIMVAU - Invalidate Cache single entry
1710 * with MVA to PoU
1711 * MCR p15, 0, r0, c7, c5, 1
1712 */
1713 for (uint32_t cacheline = address;
1714 cacheline < address + size * count;
1715 cacheline += 64) {
1716 retval = dpm->instr_write_data_r0(dpm,
1717 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1718 cacheline);
1719 if (retval != ERROR_OK)
1720 return retval;
1721 }
1722 }
1723
1724 /* invalidate D-Cache */
1725 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
1726 {
1727 /* DCIMVAC - Invalidate data Cache line
1728 * with MVA to PoC
1729 * MCR p15, 0, r0, c7, c6, 1
1730 */
1731 for (uint32_t cacheline = address;
1732 cacheline < address + size * count;
1733 cacheline += 64) {
1734 retval = dpm->instr_write_data_r0(dpm,
1735 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1736 cacheline);
1737 if (retval != ERROR_OK)
1738 return retval;
1739 }
1740 }
1741
1742 /* (void) */ dpm->finish(dpm);
1743 }
1744
1745 return retval;
1746 }
1747
1748 static int cortex_a9_write_memory(struct target *target, uint32_t address,
1749 uint32_t size, uint32_t count, uint8_t *buffer)
1750 {
1751 int enabled = 0;
1752 uint32_t virt, phys;
1753 int retval;
1754
1755 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
1756 retval = cortex_a9_mmu(target, &enabled);
1757 if (retval != ERROR_OK)
1758 return retval;
1759
1760 if (enabled)
1761 {
1762 virt = address;
1763 retval = cortex_a9_virt2phys(target, virt, &phys);
1764 if (retval != ERROR_OK)
1765 return retval;
1766 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1767 address = phys;
1768 }
1769
1770 return cortex_a9_write_phys_memory(target, address, size,
1771 count, buffer);
1772 }
1773
1774 static int cortex_a9_bulk_write_memory(struct target *target, uint32_t address,
1775 uint32_t count, uint8_t *buffer)
1776 {
1777 return cortex_a9_write_memory(target, address, 4, count, buffer);
1778 }
1779
1780 static int cortex_a9_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1781 {
1782 #if 0
1783 u16 dcrdr;
1784
1785 mem_ap_read_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1786 *ctrl = (uint8_t)dcrdr;
1787 *value = (uint8_t)(dcrdr >> 8);
1788
1789 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1790
1791 /* write ack back to software dcc register
1792 * signify we have read data */
1793 if (dcrdr & (1 << 0))
1794 {
1795 dcrdr = 0;
1796 mem_ap_write_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1797 }
1798 #endif
1799 return ERROR_OK;
1800 }
1801
1802
1803 static int cortex_a9_handle_target_request(void *priv)
1804 {
1805 struct target *target = priv;
1806 struct armv7a_common *armv7a = target_to_armv7a(target);
1807 struct adiv5_dap *swjdp = &armv7a->dap;
1808 int retval;
1809
1810 if (!target_was_examined(target))
1811 return ERROR_OK;
1812 if (!target->dbg_msg_enabled)
1813 return ERROR_OK;
1814
1815 if (target->state == TARGET_RUNNING)
1816 {
1817 uint8_t data = 0;
1818 uint8_t ctrl = 0;
1819
1820 retval = cortex_a9_dcc_read(swjdp, &data, &ctrl);
1821 if (retval != ERROR_OK)
1822 return retval;
1823
1824 /* check if we have data */
1825 if (ctrl & (1 << 0))
1826 {
1827 uint32_t request;
1828
1829 /* we assume target is quick enough */
1830 request = data;
1831 retval = cortex_a9_dcc_read(swjdp, &data, &ctrl);
1832 if (retval != ERROR_OK)
1833 return retval;
1834 request |= (data << 8);
1835 retval = cortex_a9_dcc_read(swjdp, &data, &ctrl);
1836 if (retval != ERROR_OK)
1837 return retval;
1838 request |= (data << 16);
1839 retval = cortex_a9_dcc_read(swjdp, &data, &ctrl);
1840 if (retval != ERROR_OK)
1841 return retval;
1842 request |= (data << 24);
1843 target_request(target, request);
1844 }
1845 }
1846
1847 return ERROR_OK;
1848 }
1849
1850 /*
1851 * Cortex-A9 target information and configuration
1852 */
1853
1854 static int cortex_a9_examine_first(struct target *target)
1855 {
1856 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1857 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
1858 struct adiv5_dap *swjdp = &armv7a->dap;
1859 int i;
1860 int retval = ERROR_OK;
1861 uint32_t didr, ctypr, ttypr, cpuid;
1862
1863 /* We do one extra read to ensure DAP is configured,
1864 * we call ahbap_debugport_init(swjdp) instead
1865 */
1866 retval = ahbap_debugport_init(swjdp);
1867 if (retval != ERROR_OK)
1868 return retval;
1869
1870 /*
1871 * FIXME: assuming omap4430
1872 *
1873 * APB DBGBASE reads 0x80040000, but this points to an empty ROM table.
1874 * 0x80000000 is cpu0 coresight region
1875 */
1876 if (target->coreid > 3) {
1877 LOG_ERROR("cortex_a9 supports up to 4 cores");
1878 return ERROR_INVALID_ARGUMENTS;
1879 }
1880 armv7a->debug_base = 0x80000000 |
1881 ((target->coreid & 0x3) << CORTEX_A9_PADDRDBG_CPU_SHIFT);
1882
1883 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1884 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
1885 if (retval != ERROR_OK)
1886 return retval;
1887
1888 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1889 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
1890 {
1891 LOG_DEBUG("Examine %s failed", "CPUID");
1892 return retval;
1893 }
1894
1895 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1896 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
1897 {
1898 LOG_DEBUG("Examine %s failed", "CTYPR");
1899 return retval;
1900 }
1901
1902 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1903 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
1904 {
1905 LOG_DEBUG("Examine %s failed", "TTYPR");
1906 return retval;
1907 }
1908
1909 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1910 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
1911 {
1912 LOG_DEBUG("Examine %s failed", "DIDR");
1913 return retval;
1914 }
1915
1916 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1917 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
1918 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
1919 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
1920
1921 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
1922 retval = cortex_a9_dpm_setup(cortex_a9, didr);
1923 if (retval != ERROR_OK)
1924 return retval;
1925
1926 /* Setup Breakpoint Register Pairs */
1927 cortex_a9->brp_num = ((didr >> 24) & 0x0F) + 1;
1928 cortex_a9->brp_num_context = ((didr >> 20) & 0x0F) + 1;
1929 cortex_a9->brp_num_available = cortex_a9->brp_num;
1930 cortex_a9->brp_list = calloc(cortex_a9->brp_num, sizeof(struct cortex_a9_brp));
1931 // cortex_a9->brb_enabled = ????;
1932 for (i = 0; i < cortex_a9->brp_num; i++)
1933 {
1934 cortex_a9->brp_list[i].used = 0;
1935 if (i < (cortex_a9->brp_num-cortex_a9->brp_num_context))
1936 cortex_a9->brp_list[i].type = BRP_NORMAL;
1937 else
1938 cortex_a9->brp_list[i].type = BRP_CONTEXT;
1939 cortex_a9->brp_list[i].value = 0;
1940 cortex_a9->brp_list[i].control = 0;
1941 cortex_a9->brp_list[i].BRPn = i;
1942 }
1943
1944 LOG_DEBUG("Configured %i hw breakpoints", cortex_a9->brp_num);
1945
1946 target_set_examined(target);
1947 return ERROR_OK;
1948 }
1949
1950 static int cortex_a9_examine(struct target *target)
1951 {
1952 int retval = ERROR_OK;
1953
1954 /* don't re-probe hardware after each reset */
1955 if (!target_was_examined(target))
1956 retval = cortex_a9_examine_first(target);
1957
1958 /* Configure core debug access */
1959 if (retval == ERROR_OK)
1960 retval = cortex_a9_init_debug_access(target);
1961
1962 return retval;
1963 }
1964
1965 /*
1966 * Cortex-A9 target creation and initialization
1967 */
1968
1969 static int cortex_a9_init_target(struct command_context *cmd_ctx,
1970 struct target *target)
1971 {
1972 /* examine_first() does a bunch of this */
1973 return ERROR_OK;
1974 }
1975
1976 static int cortex_a9_init_arch_info(struct target *target,
1977 struct cortex_a9_common *cortex_a9, struct jtag_tap *tap)
1978 {
1979 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
1980 struct arm *armv4_5 = &armv7a->armv4_5_common;
1981 struct adiv5_dap *dap = &armv7a->dap;
1982
1983 armv7a->armv4_5_common.dap = dap;
1984
1985 /* Setup struct cortex_a9_common */
1986 cortex_a9->common_magic = CORTEX_A9_COMMON_MAGIC;
1987 armv4_5->arch_info = armv7a;
1988
1989 /* prepare JTAG information for the new target */
1990 cortex_a9->jtag_info.tap = tap;
1991 cortex_a9->jtag_info.scann_size = 4;
1992
1993 /* Leave (only) generic DAP stuff for debugport_init() */
1994 dap->jtag_info = &cortex_a9->jtag_info;
1995 dap->memaccess_tck = 80;
1996
1997 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
1998 dap->tar_autoincr_block = (1 << 10);
1999
2000 cortex_a9->fast_reg_read = 0;
2001
2002 /* Set default value */
2003 cortex_a9->current_address_mode = ARM_MODE_ANY;
2004
2005 /* register arch-specific functions */
2006 armv7a->examine_debug_reason = NULL;
2007
2008 armv7a->post_debug_entry = cortex_a9_post_debug_entry;
2009
2010 armv7a->pre_restore_context = NULL;
2011 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
2012 armv7a->armv4_5_mmu.get_ttb = cortex_a9_get_ttb;
2013 armv7a->armv4_5_mmu.read_memory = cortex_a9_read_phys_memory;
2014 armv7a->armv4_5_mmu.write_memory = cortex_a9_write_phys_memory;
2015 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a9_disable_mmu_caches;
2016 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a9_enable_mmu_caches;
2017 armv7a->armv4_5_mmu.has_tiny_pages = 1;
2018 armv7a->armv4_5_mmu.mmu_enabled = 0;
2019
2020
2021 // arm7_9->handle_target_request = cortex_a9_handle_target_request;
2022
2023 /* REVISIT v7a setup should be in a v7a-specific routine */
2024 arm_init_arch_info(target, armv4_5);
2025 armv7a->common_magic = ARMV7_COMMON_MAGIC;
2026
2027 target_register_timer_callback(cortex_a9_handle_target_request, 1, 1, target);
2028
2029 return ERROR_OK;
2030 }
2031
2032 static int cortex_a9_target_create(struct target *target, Jim_Interp *interp)
2033 {
2034 struct cortex_a9_common *cortex_a9 = calloc(1, sizeof(struct cortex_a9_common));
2035
2036 return cortex_a9_init_arch_info(target, cortex_a9, target->tap);
2037 }
2038
2039 static int cortex_a9_get_ttb(struct target *target, uint32_t *result)
2040 {
2041 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
2042 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
2043 uint32_t ttb = 0, retval = ERROR_OK;
2044
2045 /* current_address_mode is set inside cortex_a9_virt2phys()
2046 where we can determine if address belongs to user or kernel */
2047 if(cortex_a9->current_address_mode == ARM_MODE_SVC)
2048 {
2049 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2050 retval = armv7a->armv4_5_common.mrc(target, 15,
2051 0, 1, /* op1, op2 */
2052 2, 0, /* CRn, CRm */
2053 &ttb);
2054 if (retval != ERROR_OK)
2055 return retval;
2056 }
2057 else if(cortex_a9->current_address_mode == ARM_MODE_USR)
2058 {
2059 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2060 retval = armv7a->armv4_5_common.mrc(target, 15,
2061 0, 0, /* op1, op2 */
2062 2, 0, /* CRn, CRm */
2063 &ttb);
2064 if (retval != ERROR_OK)
2065 return retval;
2066 }
2067 /* we don't know whose address is: user or kernel
2068 we assume that if we are in kernel mode then
2069 address belongs to kernel else if in user mode
2070 - to user */
2071 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
2072 {
2073 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2074 retval = armv7a->armv4_5_common.mrc(target, 15,
2075 0, 1, /* op1, op2 */
2076 2, 0, /* CRn, CRm */
2077 &ttb);
2078 if (retval != ERROR_OK)
2079 return retval;
2080 }
2081 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
2082 {
2083 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2084 retval = armv7a->armv4_5_common.mrc(target, 15,
2085 0, 0, /* op1, op2 */
2086 2, 0, /* CRn, CRm */
2087 &ttb);
2088 if (retval != ERROR_OK)
2089 return retval;
2090 }
2091 /* finally we don't know whose ttb to use: user or kernel */
2092 else
2093 LOG_ERROR("Don't know how to get ttb for current mode!!!");
2094
2095 ttb &= 0xffffc000;
2096
2097 *result = ttb;
2098
2099 return ERROR_OK;
2100 }
2101
2102 static int cortex_a9_disable_mmu_caches(struct target *target, int mmu,
2103 int d_u_cache, int i_cache)
2104 {
2105 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
2106 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
2107 uint32_t cp15_control;
2108 int retval;
2109
2110 /* read cp15 control register */
2111 retval = armv7a->armv4_5_common.mrc(target, 15,
2112 0, 0, /* op1, op2 */
2113 1, 0, /* CRn, CRm */
2114 &cp15_control);
2115 if (retval != ERROR_OK)
2116 return retval;
2117
2118
2119 if (mmu)
2120 cp15_control &= ~0x1U;
2121
2122 if (d_u_cache)
2123 cp15_control &= ~0x4U;
2124
2125 if (i_cache)
2126 cp15_control &= ~0x1000U;
2127
2128 retval = armv7a->armv4_5_common.mcr(target, 15,
2129 0, 0, /* op1, op2 */
2130 1, 0, /* CRn, CRm */
2131 cp15_control);
2132 return retval;
2133 }
2134
2135 static int cortex_a9_enable_mmu_caches(struct target *target, int mmu,
2136 int d_u_cache, int i_cache)
2137 {
2138 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
2139 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
2140 uint32_t cp15_control;
2141 int retval;
2142
2143 /* read cp15 control register */
2144 retval = armv7a->armv4_5_common.mrc(target, 15,
2145 0, 0, /* op1, op2 */
2146 1, 0, /* CRn, CRm */
2147 &cp15_control);
2148 if (retval != ERROR_OK)
2149 return retval;
2150
2151 if (mmu)
2152 cp15_control |= 0x1U;
2153
2154 if (d_u_cache)
2155 cp15_control |= 0x4U;
2156
2157 if (i_cache)
2158 cp15_control |= 0x1000U;
2159
2160 retval = armv7a->armv4_5_common.mcr(target, 15,
2161 0, 0, /* op1, op2 */
2162 1, 0, /* CRn, CRm */
2163 cp15_control);
2164 return retval;
2165 }
2166
2167
2168 static int cortex_a9_mmu(struct target *target, int *enabled)
2169 {
2170 if (target->state != TARGET_HALTED) {
2171 LOG_ERROR("%s: target not halted", __func__);
2172 return ERROR_TARGET_INVALID;
2173 }
2174
2175 *enabled = target_to_cortex_a9(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
2176 return ERROR_OK;
2177 }
2178
2179 static int cortex_a9_virt2phys(struct target *target,
2180 uint32_t virt, uint32_t *phys)
2181 {
2182 uint32_t cb;
2183 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
2184 // struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
2185 struct armv7a_common *armv7a = target_to_armv7a(target);
2186
2187 /* We assume that virtual address is separated
2188 between user and kernel in Linux style:
2189 0x00000000-0xbfffffff - User space
2190 0xc0000000-0xffffffff - Kernel space */
2191 if( virt < 0xc0000000 ) /* Linux user space */
2192 cortex_a9->current_address_mode = ARM_MODE_USR;
2193 else /* Linux kernel */
2194 cortex_a9->current_address_mode = ARM_MODE_SVC;
2195 uint32_t ret;
2196 int retval = armv4_5_mmu_translate_va(target,
2197 &armv7a->armv4_5_mmu, virt, &cb, &ret);
2198 if (retval != ERROR_OK)
2199 return retval;
2200 /* Reset the flag. We don't want someone else to use it by error */
2201 cortex_a9->current_address_mode = ARM_MODE_ANY;
2202
2203 *phys = ret;
2204 return ERROR_OK;
2205 }
2206
2207 COMMAND_HANDLER(cortex_a9_handle_cache_info_command)
2208 {
2209 struct target *target = get_current_target(CMD_CTX);
2210 struct armv7a_common *armv7a = target_to_armv7a(target);
2211
2212 return armv4_5_handle_cache_info_command(CMD_CTX,
2213 &armv7a->armv4_5_mmu.armv4_5_cache);
2214 }
2215
2216
2217 COMMAND_HANDLER(cortex_a9_handle_dbginit_command)
2218 {
2219 struct target *target = get_current_target(CMD_CTX);
2220 if (!target_was_examined(target))
2221 {
2222 LOG_ERROR("target not examined yet");
2223 return ERROR_FAIL;
2224 }
2225
2226 return cortex_a9_init_debug_access(target);
2227 }
2228
2229 static const struct command_registration cortex_a9_exec_command_handlers[] = {
2230 {
2231 .name = "cache_info",
2232 .handler = cortex_a9_handle_cache_info_command,
2233 .mode = COMMAND_EXEC,
2234 .help = "display information about target caches",
2235 },
2236 {
2237 .name = "dbginit",
2238 .handler = cortex_a9_handle_dbginit_command,
2239 .mode = COMMAND_EXEC,
2240 .help = "Initialize core debug",
2241 },
2242 COMMAND_REGISTRATION_DONE
2243 };
2244 static const struct command_registration cortex_a9_command_handlers[] = {
2245 {
2246 .chain = arm_command_handlers,
2247 },
2248 {
2249 .chain = armv7a_command_handlers,
2250 },
2251 {
2252 .name = "cortex_a9",
2253 .mode = COMMAND_ANY,
2254 .help = "Cortex-A9 command group",
2255 .chain = cortex_a9_exec_command_handlers,
2256 },
2257 COMMAND_REGISTRATION_DONE
2258 };
2259
2260 struct target_type cortexa9_target = {
2261 .name = "cortex_a9",
2262
2263 .poll = cortex_a9_poll,
2264 .arch_state = armv7a_arch_state,
2265
2266 .target_request_data = NULL,
2267
2268 .halt = cortex_a9_halt,
2269 .resume = cortex_a9_resume,
2270 .step = cortex_a9_step,
2271
2272 .assert_reset = cortex_a9_assert_reset,
2273 .deassert_reset = cortex_a9_deassert_reset,
2274 .soft_reset_halt = NULL,
2275
2276 /* REVISIT allow exporting VFP3 registers ... */
2277 .get_gdb_reg_list = arm_get_gdb_reg_list,
2278
2279 .read_memory = cortex_a9_read_memory,
2280 .write_memory = cortex_a9_write_memory,
2281 .bulk_write_memory = cortex_a9_bulk_write_memory,
2282
2283 .checksum_memory = arm_checksum_memory,
2284 .blank_check_memory = arm_blank_check_memory,
2285
2286 .run_algorithm = armv4_5_run_algorithm,
2287
2288 .add_breakpoint = cortex_a9_add_breakpoint,
2289 .remove_breakpoint = cortex_a9_remove_breakpoint,
2290 .add_watchpoint = NULL,
2291 .remove_watchpoint = NULL,
2292
2293 .commands = cortex_a9_command_handlers,
2294 .target_create = cortex_a9_target_create,
2295 .init_target = cortex_a9_init_target,
2296 .examine = cortex_a9_examine,
2297
2298 .read_phys_memory = cortex_a9_read_phys_memory,
2299 .write_phys_memory = cortex_a9_write_phys_memory,
2300 .mmu = cortex_a9_mmu,
2301 .virt2phys = cortex_a9_virt2phys,
2302 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)