76c3d37b18547350c69566db9b37ad61243a115c
[openocd.git] / src / target / cortex_a8.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * This program is free software; you can redistribute it and/or modify *
18 * it under the terms of the GNU General Public License as published by *
19 * the Free Software Foundation; either version 2 of the License, or *
20 * (at your option) any later version. *
21 * *
22 * This program is distributed in the hope that it will be useful, *
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
25 * GNU General Public License for more details. *
26 * *
27 * You should have received a copy of the GNU General Public License *
28 * along with this program; if not, write to the *
29 * Free Software Foundation, Inc., *
30 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
31 * *
32 * Cortex-A8(tm) TRM, ARM DDI 0344H *
33 * *
34 ***************************************************************************/
35 #ifdef HAVE_CONFIG_H
36 #include "config.h"
37 #endif
38
39 #include "breakpoints.h"
40 #include "cortex_a8.h"
41 #include "register.h"
42 #include "target_request.h"
43 #include "target_type.h"
44 #include "arm_opcodes.h"
45 #include <helper/time_support.h>
46
47 static int cortex_a8_poll(struct target *target);
48 static int cortex_a8_debug_entry(struct target *target);
49 static int cortex_a8_restore_context(struct target *target, bool bpwp);
50 static int cortex_a8_set_breakpoint(struct target *target,
51 struct breakpoint *breakpoint, uint8_t matchmode);
52 static int cortex_a8_unset_breakpoint(struct target *target,
53 struct breakpoint *breakpoint);
54 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
55 uint32_t *value, int regnum);
56 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
57 uint32_t value, int regnum);
58 static int cortex_a8_mmu(struct target *target, int *enabled);
59 static int cortex_a8_virt2phys(struct target *target,
60 uint32_t virt, uint32_t *phys);
61 static int cortex_a8_disable_mmu_caches(struct target *target, int mmu,
62 int d_u_cache, int i_cache);
63 static int cortex_a8_enable_mmu_caches(struct target *target, int mmu,
64 int d_u_cache, int i_cache);
65 static int cortex_a8_get_ttb(struct target *target, uint32_t *result);
66
67
68 /*
69 * FIXME do topology discovery using the ROM; don't
70 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
71 * cores, with different AP numbering ... don't use a #define
72 * for these numbers, use per-core armv7a state.
73 */
74 #define swjdp_memoryap 0
75 #define swjdp_debugap 1
76 #define OMAP3530_DEBUG_BASE 0x54011000
77
78 /*
79 * Cortex-A8 Basic debug access, very low level assumes state is saved
80 */
81 static int cortex_a8_init_debug_access(struct target *target)
82 {
83 struct armv7a_common *armv7a = target_to_armv7a(target);
84 struct adiv5_dap *swjdp = &armv7a->dap;
85
86 int retval;
87 uint32_t dummy;
88
89 LOG_DEBUG(" ");
90
91 /* Unlocking the debug registers for modification */
92 /* The debugport might be uninitialised so try twice */
93 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
94 if (retval != ERROR_OK)
95 {
96 /* try again */
97 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
98 if (retval == ERROR_OK)
99 {
100 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
101 }
102 }
103 if (retval != ERROR_OK)
104 return retval;
105 /* Clear Sticky Power Down status Bit in PRSR to enable access to
106 the registers in the Core Power Domain */
107 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_PRSR, &dummy);
108 if (retval != ERROR_OK)
109 return retval;
110
111 /* Enabling of instruction execution in debug mode is done in debug_entry code */
112
113 /* Resync breakpoint registers */
114
115 /* Since this is likely called from init or reset, update target state information*/
116 retval = cortex_a8_poll(target);
117
118 return retval;
119 }
120
121 /* To reduce needless round-trips, pass in a pointer to the current
122 * DSCR value. Initialize it to zero if you just need to know the
123 * value on return from this function; or DSCR_INSTR_COMP if you
124 * happen to know that no instruction is pending.
125 */
126 static int cortex_a8_exec_opcode(struct target *target,
127 uint32_t opcode, uint32_t *dscr_p)
128 {
129 uint32_t dscr;
130 int retval;
131 struct armv7a_common *armv7a = target_to_armv7a(target);
132 struct adiv5_dap *swjdp = &armv7a->dap;
133
134 dscr = dscr_p ? *dscr_p : 0;
135
136 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
137
138 /* Wait for InstrCompl bit to be set */
139 long long then = timeval_ms();
140 while ((dscr & DSCR_INSTR_COMP) == 0)
141 {
142 retval = mem_ap_read_atomic_u32(swjdp,
143 armv7a->debug_base + CPUDBG_DSCR, &dscr);
144 if (retval != ERROR_OK)
145 {
146 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
147 return retval;
148 }
149 if (timeval_ms() > then + 1000)
150 {
151 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
152 return ERROR_FAIL;
153 }
154 }
155
156 retval = mem_ap_write_u32(swjdp, armv7a->debug_base + CPUDBG_ITR, opcode);
157 if (retval != ERROR_OK)
158 return retval;
159
160 then = timeval_ms();
161 do
162 {
163 retval = mem_ap_read_atomic_u32(swjdp,
164 armv7a->debug_base + CPUDBG_DSCR, &dscr);
165 if (retval != ERROR_OK)
166 {
167 LOG_ERROR("Could not read DSCR register");
168 return retval;
169 }
170 if (timeval_ms() > then + 1000)
171 {
172 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
173 return ERROR_FAIL;
174 }
175 }
176 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
177
178 if (dscr_p)
179 *dscr_p = dscr;
180
181 return retval;
182 }
183
184 /**************************************************************************
185 Read core register with very few exec_opcode, fast but needs work_area.
186 This can cause problems with MMU active.
187 **************************************************************************/
188 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
189 uint32_t * regfile)
190 {
191 int retval = ERROR_OK;
192 struct armv7a_common *armv7a = target_to_armv7a(target);
193 struct adiv5_dap *swjdp = &armv7a->dap;
194
195 retval = cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
196 if (retval != ERROR_OK)
197 return retval;
198 retval = cortex_a8_dap_write_coreregister_u32(target, address, 0);
199 if (retval != ERROR_OK)
200 return retval;
201 retval = cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
202 if (retval != ERROR_OK)
203 return retval;
204
205 dap_ap_select(swjdp, swjdp_memoryap);
206 retval = mem_ap_read_buf_u32(swjdp, (uint8_t *)(&regfile[1]), 4*15, address);
207 if (retval != ERROR_OK)
208 return retval;
209 dap_ap_select(swjdp, swjdp_debugap);
210
211 return retval;
212 }
213
214 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
215 uint32_t *value, int regnum)
216 {
217 int retval = ERROR_OK;
218 uint8_t reg = regnum&0xFF;
219 uint32_t dscr = 0;
220 struct armv7a_common *armv7a = target_to_armv7a(target);
221 struct adiv5_dap *swjdp = &armv7a->dap;
222
223 if (reg > 17)
224 return retval;
225
226 if (reg < 15)
227 {
228 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
229 retval = cortex_a8_exec_opcode(target,
230 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
231 &dscr);
232 if (retval != ERROR_OK)
233 return retval;
234 }
235 else if (reg == 15)
236 {
237 /* "MOV r0, r15"; then move r0 to DCCTX */
238 retval = cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
239 if (retval != ERROR_OK)
240 return retval;
241 retval = cortex_a8_exec_opcode(target,
242 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
243 &dscr);
244 if (retval != ERROR_OK)
245 return retval;
246 }
247 else
248 {
249 /* "MRS r0, CPSR" or "MRS r0, SPSR"
250 * then move r0 to DCCTX
251 */
252 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
253 if (retval != ERROR_OK)
254 return retval;
255 retval = cortex_a8_exec_opcode(target,
256 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
257 &dscr);
258 if (retval != ERROR_OK)
259 return retval;
260 }
261
262 /* Wait for DTRRXfull then read DTRRTX */
263 long long then = timeval_ms();
264 while ((dscr & DSCR_DTR_TX_FULL) == 0)
265 {
266 retval = mem_ap_read_atomic_u32(swjdp,
267 armv7a->debug_base + CPUDBG_DSCR, &dscr);
268 if (retval != ERROR_OK)
269 return retval;
270 if (timeval_ms() > then + 1000)
271 {
272 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
273 return ERROR_FAIL;
274 }
275 }
276
277 retval = mem_ap_read_atomic_u32(swjdp,
278 armv7a->debug_base + CPUDBG_DTRTX, value);
279 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
280
281 return retval;
282 }
283
284 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
285 uint32_t value, int regnum)
286 {
287 int retval = ERROR_OK;
288 uint8_t Rd = regnum&0xFF;
289 uint32_t dscr;
290 struct armv7a_common *armv7a = target_to_armv7a(target);
291 struct adiv5_dap *swjdp = &armv7a->dap;
292
293 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
294
295 /* Check that DCCRX is not full */
296 retval = mem_ap_read_atomic_u32(swjdp,
297 armv7a->debug_base + CPUDBG_DSCR, &dscr);
298 if (retval != ERROR_OK)
299 return retval;
300 if (dscr & DSCR_DTR_RX_FULL)
301 {
302 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
303 /* Clear DCCRX with MCR(p14, 0, Rd, c0, c5, 0), opcode 0xEE000E15 */
304 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
305 &dscr);
306 if (retval != ERROR_OK)
307 return retval;
308 }
309
310 if (Rd > 17)
311 return retval;
312
313 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
314 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
315 retval = mem_ap_write_u32(swjdp,
316 armv7a->debug_base + CPUDBG_DTRRX, value);
317 if (retval != ERROR_OK)
318 return retval;
319
320 if (Rd < 15)
321 {
322 /* DCCRX to Rn, "MCR p14, 0, Rn, c0, c5, 0", 0xEE00nE15 */
323 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
324 &dscr);
325 if (retval != ERROR_OK)
326 return retval;
327 }
328 else if (Rd == 15)
329 {
330 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
331 * then "mov r15, r0"
332 */
333 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
334 &dscr);
335 if (retval != ERROR_OK)
336 return retval;
337 retval = cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
338 if (retval != ERROR_OK)
339 return retval;
340 }
341 else
342 {
343 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
344 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
345 */
346 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
347 &dscr);
348 if (retval != ERROR_OK)
349 return retval;
350 retval = cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
351 &dscr);
352 if (retval != ERROR_OK)
353 return retval;
354
355 /* "Prefetch flush" after modifying execution status in CPSR */
356 if (Rd == 16)
357 {
358 retval = cortex_a8_exec_opcode(target,
359 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
360 &dscr);
361 if (retval != ERROR_OK)
362 return retval;
363 }
364 }
365
366 return retval;
367 }
368
369 /* Write to memory mapped registers directly with no cache or mmu handling */
370 static int cortex_a8_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
371 {
372 int retval;
373 struct armv7a_common *armv7a = target_to_armv7a(target);
374 struct adiv5_dap *swjdp = &armv7a->dap;
375
376 retval = mem_ap_write_atomic_u32(swjdp, address, value);
377
378 return retval;
379 }
380
381 /*
382 * Cortex-A8 implementation of Debug Programmer's Model
383 *
384 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
385 * so there's no need to poll for it before executing an instruction.
386 *
387 * NOTE that in several of these cases the "stall" mode might be useful.
388 * It'd let us queue a few operations together... prepare/finish might
389 * be the places to enable/disable that mode.
390 */
391
392 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
393 {
394 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
395 }
396
397 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
398 {
399 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
400 return mem_ap_write_u32(&a8->armv7a_common.dap,
401 a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
402 }
403
404 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
405 uint32_t *dscr_p)
406 {
407 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
408 uint32_t dscr = DSCR_INSTR_COMP;
409 int retval;
410
411 if (dscr_p)
412 dscr = *dscr_p;
413
414 /* Wait for DTRRXfull */
415 long long then = timeval_ms();
416 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
417 retval = mem_ap_read_atomic_u32(swjdp,
418 a8->armv7a_common.debug_base + CPUDBG_DSCR,
419 &dscr);
420 if (retval != ERROR_OK)
421 return retval;
422 if (timeval_ms() > then + 1000)
423 {
424 LOG_ERROR("Timeout waiting for read dcc");
425 return ERROR_FAIL;
426 }
427 }
428
429 retval = mem_ap_read_atomic_u32(swjdp,
430 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
431 if (retval != ERROR_OK)
432 return retval;
433 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
434
435 if (dscr_p)
436 *dscr_p = dscr;
437
438 return retval;
439 }
440
441 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
442 {
443 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
444 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
445 uint32_t dscr;
446 int retval;
447
448 /* set up invariant: INSTR_COMP is set after ever DPM operation */
449 long long then = timeval_ms();
450 for (;;)
451 {
452 retval = mem_ap_read_atomic_u32(swjdp,
453 a8->armv7a_common.debug_base + CPUDBG_DSCR,
454 &dscr);
455 if (retval != ERROR_OK)
456 return retval;
457 if ((dscr & DSCR_INSTR_COMP) != 0)
458 break;
459 if (timeval_ms() > then + 1000)
460 {
461 LOG_ERROR("Timeout waiting for dpm prepare");
462 return ERROR_FAIL;
463 }
464 }
465
466 /* this "should never happen" ... */
467 if (dscr & DSCR_DTR_RX_FULL) {
468 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
469 /* Clear DCCRX */
470 retval = cortex_a8_exec_opcode(
471 a8->armv7a_common.armv4_5_common.target,
472 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
473 &dscr);
474 if (retval != ERROR_OK)
475 return retval;
476 }
477
478 return retval;
479 }
480
481 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
482 {
483 /* REVISIT what could be done here? */
484 return ERROR_OK;
485 }
486
487 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
488 uint32_t opcode, uint32_t data)
489 {
490 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
491 int retval;
492 uint32_t dscr = DSCR_INSTR_COMP;
493
494 retval = cortex_a8_write_dcc(a8, data);
495 if (retval != ERROR_OK)
496 return retval;
497
498 return cortex_a8_exec_opcode(
499 a8->armv7a_common.armv4_5_common.target,
500 opcode,
501 &dscr);
502 }
503
504 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
505 uint32_t opcode, uint32_t data)
506 {
507 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
508 uint32_t dscr = DSCR_INSTR_COMP;
509 int retval;
510
511 retval = cortex_a8_write_dcc(a8, data);
512 if (retval != ERROR_OK)
513 return retval;
514
515 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
516 retval = cortex_a8_exec_opcode(
517 a8->armv7a_common.armv4_5_common.target,
518 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
519 &dscr);
520 if (retval != ERROR_OK)
521 return retval;
522
523 /* then the opcode, taking data from R0 */
524 retval = cortex_a8_exec_opcode(
525 a8->armv7a_common.armv4_5_common.target,
526 opcode,
527 &dscr);
528
529 return retval;
530 }
531
532 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
533 {
534 struct target *target = dpm->arm->target;
535 uint32_t dscr = DSCR_INSTR_COMP;
536
537 /* "Prefetch flush" after modifying execution status in CPSR */
538 return cortex_a8_exec_opcode(target,
539 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
540 &dscr);
541 }
542
543 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
544 uint32_t opcode, uint32_t *data)
545 {
546 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
547 int retval;
548 uint32_t dscr = DSCR_INSTR_COMP;
549
550 /* the opcode, writing data to DCC */
551 retval = cortex_a8_exec_opcode(
552 a8->armv7a_common.armv4_5_common.target,
553 opcode,
554 &dscr);
555 if (retval != ERROR_OK)
556 return retval;
557
558 return cortex_a8_read_dcc(a8, data, &dscr);
559 }
560
561
562 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
563 uint32_t opcode, uint32_t *data)
564 {
565 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
566 uint32_t dscr = DSCR_INSTR_COMP;
567 int retval;
568
569 /* the opcode, writing data to R0 */
570 retval = cortex_a8_exec_opcode(
571 a8->armv7a_common.armv4_5_common.target,
572 opcode,
573 &dscr);
574 if (retval != ERROR_OK)
575 return retval;
576
577 /* write R0 to DCC */
578 retval = cortex_a8_exec_opcode(
579 a8->armv7a_common.armv4_5_common.target,
580 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
581 &dscr);
582 if (retval != ERROR_OK)
583 return retval;
584
585 return cortex_a8_read_dcc(a8, data, &dscr);
586 }
587
588 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
589 uint32_t addr, uint32_t control)
590 {
591 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
592 uint32_t vr = a8->armv7a_common.debug_base;
593 uint32_t cr = a8->armv7a_common.debug_base;
594 int retval;
595
596 switch (index_t) {
597 case 0 ... 15: /* breakpoints */
598 vr += CPUDBG_BVR_BASE;
599 cr += CPUDBG_BCR_BASE;
600 break;
601 case 16 ... 31: /* watchpoints */
602 vr += CPUDBG_WVR_BASE;
603 cr += CPUDBG_WCR_BASE;
604 index_t -= 16;
605 break;
606 default:
607 return ERROR_FAIL;
608 }
609 vr += 4 * index_t;
610 cr += 4 * index_t;
611
612 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
613 (unsigned) vr, (unsigned) cr);
614
615 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
616 vr, addr);
617 if (retval != ERROR_OK)
618 return retval;
619 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
620 cr, control);
621 return retval;
622 }
623
624 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
625 {
626 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
627 uint32_t cr;
628
629 switch (index_t) {
630 case 0 ... 15:
631 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
632 break;
633 case 16 ... 31:
634 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
635 index_t -= 16;
636 break;
637 default:
638 return ERROR_FAIL;
639 }
640 cr += 4 * index_t;
641
642 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
643
644 /* clear control register */
645 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
646 }
647
648 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
649 {
650 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
651 int retval;
652
653 dpm->arm = &a8->armv7a_common.armv4_5_common;
654 dpm->didr = didr;
655
656 dpm->prepare = cortex_a8_dpm_prepare;
657 dpm->finish = cortex_a8_dpm_finish;
658
659 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
660 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
661 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
662
663 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
664 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
665
666 dpm->bpwp_enable = cortex_a8_bpwp_enable;
667 dpm->bpwp_disable = cortex_a8_bpwp_disable;
668
669 retval = arm_dpm_setup(dpm);
670 if (retval == ERROR_OK)
671 retval = arm_dpm_initialize(dpm);
672
673 return retval;
674 }
675
676
677 /*
678 * Cortex-A8 Run control
679 */
680
681 static int cortex_a8_poll(struct target *target)
682 {
683 int retval = ERROR_OK;
684 uint32_t dscr;
685 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
686 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
687 struct adiv5_dap *swjdp = &armv7a->dap;
688 enum target_state prev_target_state = target->state;
689 uint8_t saved_apsel = dap_ap_get_select(swjdp);
690
691 dap_ap_select(swjdp, swjdp_debugap);
692 retval = mem_ap_read_atomic_u32(swjdp,
693 armv7a->debug_base + CPUDBG_DSCR, &dscr);
694 if (retval != ERROR_OK)
695 {
696 dap_ap_select(swjdp, saved_apsel);
697 return retval;
698 }
699 cortex_a8->cpudbg_dscr = dscr;
700
701 if ((dscr & 0x3) == 0x3)
702 {
703 if (prev_target_state != TARGET_HALTED)
704 {
705 /* We have a halting debug event */
706 LOG_DEBUG("Target halted");
707 target->state = TARGET_HALTED;
708 if ((prev_target_state == TARGET_RUNNING)
709 || (prev_target_state == TARGET_RESET))
710 {
711 retval = cortex_a8_debug_entry(target);
712 if (retval != ERROR_OK)
713 return retval;
714
715 target_call_event_callbacks(target,
716 TARGET_EVENT_HALTED);
717 }
718 if (prev_target_state == TARGET_DEBUG_RUNNING)
719 {
720 LOG_DEBUG(" ");
721
722 retval = cortex_a8_debug_entry(target);
723 if (retval != ERROR_OK)
724 return retval;
725
726 target_call_event_callbacks(target,
727 TARGET_EVENT_DEBUG_HALTED);
728 }
729 }
730 }
731 else if ((dscr & 0x3) == 0x2)
732 {
733 target->state = TARGET_RUNNING;
734 }
735 else
736 {
737 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
738 target->state = TARGET_UNKNOWN;
739 }
740
741 dap_ap_select(swjdp, saved_apsel);
742
743 return retval;
744 }
745
746 static int cortex_a8_halt(struct target *target)
747 {
748 int retval = ERROR_OK;
749 uint32_t dscr;
750 struct armv7a_common *armv7a = target_to_armv7a(target);
751 struct adiv5_dap *swjdp = &armv7a->dap;
752 uint8_t saved_apsel = dap_ap_get_select(swjdp);
753 dap_ap_select(swjdp, swjdp_debugap);
754
755 /*
756 * Tell the core to be halted by writing DRCR with 0x1
757 * and then wait for the core to be halted.
758 */
759 retval = mem_ap_write_atomic_u32(swjdp,
760 armv7a->debug_base + CPUDBG_DRCR, 0x1);
761 if (retval != ERROR_OK)
762 goto out;
763
764 /*
765 * enter halting debug mode
766 */
767 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DSCR, &dscr);
768 if (retval != ERROR_OK)
769 goto out;
770
771 retval = mem_ap_write_atomic_u32(swjdp,
772 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
773 if (retval != ERROR_OK)
774 goto out;
775
776 long long then = timeval_ms();
777 for (;;)
778 {
779 retval = mem_ap_read_atomic_u32(swjdp,
780 armv7a->debug_base + CPUDBG_DSCR, &dscr);
781 if (retval != ERROR_OK)
782 goto out;
783 if ((dscr & DSCR_CORE_HALTED) != 0)
784 {
785 break;
786 }
787 if (timeval_ms() > then + 1000)
788 {
789 LOG_ERROR("Timeout waiting for halt");
790 return ERROR_FAIL;
791 }
792 }
793
794 target->debug_reason = DBG_REASON_DBGRQ;
795
796 out:
797 dap_ap_select(swjdp, saved_apsel);
798 return retval;
799 }
800
801 static int cortex_a8_resume(struct target *target, int current,
802 uint32_t address, int handle_breakpoints, int debug_execution)
803 {
804 struct armv7a_common *armv7a = target_to_armv7a(target);
805 struct arm *armv4_5 = &armv7a->armv4_5_common;
806 struct adiv5_dap *swjdp = &armv7a->dap;
807 int retval;
808
809 // struct breakpoint *breakpoint = NULL;
810 uint32_t resume_pc, dscr;
811
812 uint8_t saved_apsel = dap_ap_get_select(swjdp);
813 dap_ap_select(swjdp, swjdp_debugap);
814
815 if (!debug_execution)
816 target_free_all_working_areas(target);
817
818 #if 0
819 if (debug_execution)
820 {
821 /* Disable interrupts */
822 /* We disable interrupts in the PRIMASK register instead of
823 * masking with C_MASKINTS,
824 * This is probably the same issue as Cortex-M3 Errata 377493:
825 * C_MASKINTS in parallel with disabled interrupts can cause
826 * local faults to not be taken. */
827 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
828 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
829 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
830
831 /* Make sure we are in Thumb mode */
832 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
833 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
834 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
835 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
836 }
837 #endif
838
839 /* current = 1: continue on current pc, otherwise continue at <address> */
840 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
841 if (!current)
842 resume_pc = address;
843
844 /* Make sure that the Armv7 gdb thumb fixups does not
845 * kill the return address
846 */
847 switch (armv4_5->core_state)
848 {
849 case ARM_STATE_ARM:
850 resume_pc &= 0xFFFFFFFC;
851 break;
852 case ARM_STATE_THUMB:
853 case ARM_STATE_THUMB_EE:
854 /* When the return address is loaded into PC
855 * bit 0 must be 1 to stay in Thumb state
856 */
857 resume_pc |= 0x1;
858 break;
859 case ARM_STATE_JAZELLE:
860 LOG_ERROR("How do I resume into Jazelle state??");
861 return ERROR_FAIL;
862 }
863 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
864 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
865 armv4_5->pc->dirty = 1;
866 armv4_5->pc->valid = 1;
867
868 retval = cortex_a8_restore_context(target, handle_breakpoints);
869 if (retval != ERROR_OK)
870 return retval;
871
872 #if 0
873 /* the front-end may request us not to handle breakpoints */
874 if (handle_breakpoints)
875 {
876 /* Single step past breakpoint at current address */
877 if ((breakpoint = breakpoint_find(target, resume_pc)))
878 {
879 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
880 cortex_m3_unset_breakpoint(target, breakpoint);
881 cortex_m3_single_step_core(target);
882 cortex_m3_set_breakpoint(target, breakpoint);
883 }
884 }
885
886 #endif
887 /* Restart core and wait for it to be started
888 * NOTE: this clears DSCR_ITR_EN and other bits.
889 *
890 * REVISIT: for single stepping, we probably want to
891 * disable IRQs by default, with optional override...
892 */
893 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DRCR, 0x2);
894 if (retval != ERROR_OK)
895 return retval;
896
897 long long then = timeval_ms();
898 for (;;)
899 {
900 retval = mem_ap_read_atomic_u32(swjdp,
901 armv7a->debug_base + CPUDBG_DSCR, &dscr);
902 if (retval != ERROR_OK)
903 return retval;
904 if ((dscr & DSCR_CORE_RESTARTED) != 0)
905 break;
906 if (timeval_ms() > then + 1000)
907 {
908 LOG_ERROR("Timeout waiting for resume");
909 return ERROR_FAIL;
910 }
911 }
912
913 target->debug_reason = DBG_REASON_NOTHALTED;
914 target->state = TARGET_RUNNING;
915
916 /* registers are now invalid */
917 register_cache_invalidate(armv4_5->core_cache);
918
919 if (!debug_execution)
920 {
921 target->state = TARGET_RUNNING;
922 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
923 LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
924 }
925 else
926 {
927 target->state = TARGET_DEBUG_RUNNING;
928 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
929 LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
930 }
931
932 dap_ap_select(swjdp, saved_apsel);
933
934 return ERROR_OK;
935 }
936
937 static int cortex_a8_debug_entry(struct target *target)
938 {
939 int i;
940 uint32_t regfile[16], cpsr, dscr;
941 int retval = ERROR_OK;
942 struct working_area *regfile_working_area = NULL;
943 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
944 struct armv7a_common *armv7a = target_to_armv7a(target);
945 struct arm *armv4_5 = &armv7a->armv4_5_common;
946 struct adiv5_dap *swjdp = &armv7a->dap;
947 struct reg *reg;
948
949 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
950
951 /* REVISIT surely we should not re-read DSCR !! */
952 retval = mem_ap_read_atomic_u32(swjdp,
953 armv7a->debug_base + CPUDBG_DSCR, &dscr);
954 if (retval != ERROR_OK)
955 return retval;
956
957 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
958 * imprecise data aborts get discarded by issuing a Data
959 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
960 */
961
962 /* Enable the ITR execution once we are in debug mode */
963 dscr |= DSCR_ITR_EN;
964 retval = mem_ap_write_atomic_u32(swjdp,
965 armv7a->debug_base + CPUDBG_DSCR, dscr);
966 if (retval != ERROR_OK)
967 return retval;
968
969 /* Examine debug reason */
970 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
971
972 /* save address of instruction that triggered the watchpoint? */
973 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
974 uint32_t wfar;
975
976 retval = mem_ap_read_atomic_u32(swjdp,
977 armv7a->debug_base + CPUDBG_WFAR,
978 &wfar);
979 if (retval != ERROR_OK)
980 return retval;
981 arm_dpm_report_wfar(&armv7a->dpm, wfar);
982 }
983
984 /* REVISIT fast_reg_read is never set ... */
985
986 /* Examine target state and mode */
987 if (cortex_a8->fast_reg_read)
988 target_alloc_working_area(target, 64, &regfile_working_area);
989
990 /* First load register acessible through core debug port*/
991 if (!regfile_working_area)
992 {
993 retval = arm_dpm_read_current_registers(&armv7a->dpm);
994 }
995 else
996 {
997 dap_ap_select(swjdp, swjdp_memoryap);
998 retval = cortex_a8_read_regs_through_mem(target,
999 regfile_working_area->address, regfile);
1000 dap_ap_select(swjdp, swjdp_memoryap);
1001 target_free_working_area(target, regfile_working_area);
1002 if (retval != ERROR_OK)
1003 {
1004 return retval;
1005 }
1006
1007 /* read Current PSR */
1008 retval = cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
1009 if (retval != ERROR_OK)
1010 return retval;
1011 dap_ap_select(swjdp, swjdp_debugap);
1012 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1013
1014 arm_set_cpsr(armv4_5, cpsr);
1015
1016 /* update cache */
1017 for (i = 0; i <= ARM_PC; i++)
1018 {
1019 reg = arm_reg_current(armv4_5, i);
1020
1021 buf_set_u32(reg->value, 0, 32, regfile[i]);
1022 reg->valid = 1;
1023 reg->dirty = 0;
1024 }
1025
1026 /* Fixup PC Resume Address */
1027 if (cpsr & (1 << 5))
1028 {
1029 // T bit set for Thumb or ThumbEE state
1030 regfile[ARM_PC] -= 4;
1031 }
1032 else
1033 {
1034 // ARM state
1035 regfile[ARM_PC] -= 8;
1036 }
1037
1038 reg = armv4_5->pc;
1039 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1040 reg->dirty = reg->valid;
1041 }
1042
1043 #if 0
1044 /* TODO, Move this */
1045 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1046 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1047 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1048
1049 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1050 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1051
1052 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1053 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1054 #endif
1055
1056 /* Are we in an exception handler */
1057 // armv4_5->exception_number = 0;
1058 if (armv7a->post_debug_entry)
1059 armv7a->post_debug_entry(target);
1060
1061 return retval;
1062 }
1063
1064 static void cortex_a8_post_debug_entry(struct target *target)
1065 {
1066 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1067 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1068 int retval;
1069
1070 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1071 retval = armv7a->armv4_5_common.mrc(target, 15,
1072 0, 0, /* op1, op2 */
1073 1, 0, /* CRn, CRm */
1074 &cortex_a8->cp15_control_reg);
1075 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
1076
1077 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
1078 {
1079 uint32_t cache_type_reg;
1080
1081 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
1082 retval = armv7a->armv4_5_common.mrc(target, 15,
1083 0, 1, /* op1, op2 */
1084 0, 0, /* CRn, CRm */
1085 &cache_type_reg);
1086 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
1087
1088 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
1089 armv4_5_identify_cache(cache_type_reg,
1090 &armv7a->armv4_5_mmu.armv4_5_cache);
1091 }
1092
1093 armv7a->armv4_5_mmu.mmu_enabled =
1094 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
1095 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
1096 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
1097 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1098 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1099
1100
1101 }
1102
1103 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1104 int handle_breakpoints)
1105 {
1106 struct armv7a_common *armv7a = target_to_armv7a(target);
1107 struct arm *armv4_5 = &armv7a->armv4_5_common;
1108 struct breakpoint *breakpoint = NULL;
1109 struct breakpoint stepbreakpoint;
1110 struct reg *r;
1111 int retval;
1112
1113 if (target->state != TARGET_HALTED)
1114 {
1115 LOG_WARNING("target not halted");
1116 return ERROR_TARGET_NOT_HALTED;
1117 }
1118
1119 /* current = 1: continue on current pc, otherwise continue at <address> */
1120 r = armv4_5->pc;
1121 if (!current)
1122 {
1123 buf_set_u32(r->value, 0, 32, address);
1124 }
1125 else
1126 {
1127 address = buf_get_u32(r->value, 0, 32);
1128 }
1129
1130 /* The front-end may request us not to handle breakpoints.
1131 * But since Cortex-A8 uses breakpoint for single step,
1132 * we MUST handle breakpoints.
1133 */
1134 handle_breakpoints = 1;
1135 if (handle_breakpoints) {
1136 breakpoint = breakpoint_find(target, address);
1137 if (breakpoint)
1138 cortex_a8_unset_breakpoint(target, breakpoint);
1139 }
1140
1141 /* Setup single step breakpoint */
1142 stepbreakpoint.address = address;
1143 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1144 ? 2 : 4;
1145 stepbreakpoint.type = BKPT_HARD;
1146 stepbreakpoint.set = 0;
1147
1148 /* Break on IVA mismatch */
1149 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1150
1151 target->debug_reason = DBG_REASON_SINGLESTEP;
1152
1153 retval = cortex_a8_resume(target, 1, address, 0, 0);
1154 if (retval != ERROR_OK)
1155 return retval;
1156
1157 long long then = timeval_ms();
1158 while (target->state != TARGET_HALTED)
1159 {
1160 retval = cortex_a8_poll(target);
1161 if (retval != ERROR_OK)
1162 return retval;
1163 if (timeval_ms() > then + 1000)
1164 {
1165 LOG_ERROR("timeout waiting for target halt");
1166 return ERROR_FAIL;
1167 }
1168 }
1169
1170 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1171
1172 target->debug_reason = DBG_REASON_BREAKPOINT;
1173
1174 if (breakpoint)
1175 cortex_a8_set_breakpoint(target, breakpoint, 0);
1176
1177 if (target->state != TARGET_HALTED)
1178 LOG_DEBUG("target stepped");
1179
1180 return ERROR_OK;
1181 }
1182
1183 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1184 {
1185 struct armv7a_common *armv7a = target_to_armv7a(target);
1186
1187 LOG_DEBUG(" ");
1188
1189 if (armv7a->pre_restore_context)
1190 armv7a->pre_restore_context(target);
1191
1192 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1193 }
1194
1195
1196 /*
1197 * Cortex-A8 Breakpoint and watchpoint functions
1198 */
1199
1200 /* Setup hardware Breakpoint Register Pair */
1201 static int cortex_a8_set_breakpoint(struct target *target,
1202 struct breakpoint *breakpoint, uint8_t matchmode)
1203 {
1204 int retval;
1205 int brp_i=0;
1206 uint32_t control;
1207 uint8_t byte_addr_select = 0x0F;
1208 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1209 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1210 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1211
1212 if (breakpoint->set)
1213 {
1214 LOG_WARNING("breakpoint already set");
1215 return ERROR_OK;
1216 }
1217
1218 if (breakpoint->type == BKPT_HARD)
1219 {
1220 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1221 brp_i++ ;
1222 if (brp_i >= cortex_a8->brp_num)
1223 {
1224 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1225 return ERROR_FAIL;
1226 }
1227 breakpoint->set = brp_i + 1;
1228 if (breakpoint->length == 2)
1229 {
1230 byte_addr_select = (3 << (breakpoint->address & 0x02));
1231 }
1232 control = ((matchmode & 0x7) << 20)
1233 | (byte_addr_select << 5)
1234 | (3 << 1) | 1;
1235 brp_list[brp_i].used = 1;
1236 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1237 brp_list[brp_i].control = control;
1238 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1239 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1240 brp_list[brp_i].value);
1241 if (retval != ERROR_OK)
1242 return retval;
1243 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1244 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1245 brp_list[brp_i].control);
1246 if (retval != ERROR_OK)
1247 return retval;
1248 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1249 brp_list[brp_i].control,
1250 brp_list[brp_i].value);
1251 }
1252 else if (breakpoint->type == BKPT_SOFT)
1253 {
1254 uint8_t code[4];
1255 if (breakpoint->length == 2)
1256 {
1257 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1258 }
1259 else
1260 {
1261 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1262 }
1263 retval = target->type->read_memory(target,
1264 breakpoint->address & 0xFFFFFFFE,
1265 breakpoint->length, 1,
1266 breakpoint->orig_instr);
1267 if (retval != ERROR_OK)
1268 return retval;
1269 retval = target->type->write_memory(target,
1270 breakpoint->address & 0xFFFFFFFE,
1271 breakpoint->length, 1, code);
1272 if (retval != ERROR_OK)
1273 return retval;
1274 breakpoint->set = 0x11; /* Any nice value but 0 */
1275 }
1276
1277 return ERROR_OK;
1278 }
1279
1280 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1281 {
1282 int retval;
1283 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1284 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1285 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1286
1287 if (!breakpoint->set)
1288 {
1289 LOG_WARNING("breakpoint not set");
1290 return ERROR_OK;
1291 }
1292
1293 if (breakpoint->type == BKPT_HARD)
1294 {
1295 int brp_i = breakpoint->set - 1;
1296 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1297 {
1298 LOG_DEBUG("Invalid BRP number in breakpoint");
1299 return ERROR_OK;
1300 }
1301 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1302 brp_list[brp_i].control, brp_list[brp_i].value);
1303 brp_list[brp_i].used = 0;
1304 brp_list[brp_i].value = 0;
1305 brp_list[brp_i].control = 0;
1306 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1307 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1308 brp_list[brp_i].control);
1309 if (retval != ERROR_OK)
1310 return retval;
1311 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1312 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1313 brp_list[brp_i].value);
1314 if (retval != ERROR_OK)
1315 return retval;
1316 }
1317 else
1318 {
1319 /* restore original instruction (kept in target endianness) */
1320 if (breakpoint->length == 4)
1321 {
1322 retval = target->type->write_memory(target,
1323 breakpoint->address & 0xFFFFFFFE,
1324 4, 1, breakpoint->orig_instr);
1325 if (retval != ERROR_OK)
1326 return retval;
1327 }
1328 else
1329 {
1330 retval = target->type->write_memory(target,
1331 breakpoint->address & 0xFFFFFFFE,
1332 2, 1, breakpoint->orig_instr);
1333 if (retval != ERROR_OK)
1334 return retval;
1335 }
1336 }
1337 breakpoint->set = 0;
1338
1339 return ERROR_OK;
1340 }
1341
1342 static int cortex_a8_add_breakpoint(struct target *target,
1343 struct breakpoint *breakpoint)
1344 {
1345 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1346
1347 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1348 {
1349 LOG_INFO("no hardware breakpoint available");
1350 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1351 }
1352
1353 if (breakpoint->type == BKPT_HARD)
1354 cortex_a8->brp_num_available--;
1355 cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1356
1357 return ERROR_OK;
1358 }
1359
1360 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1361 {
1362 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1363
1364 #if 0
1365 /* It is perfectly possible to remove breakpoints while the target is running */
1366 if (target->state != TARGET_HALTED)
1367 {
1368 LOG_WARNING("target not halted");
1369 return ERROR_TARGET_NOT_HALTED;
1370 }
1371 #endif
1372
1373 if (breakpoint->set)
1374 {
1375 cortex_a8_unset_breakpoint(target, breakpoint);
1376 if (breakpoint->type == BKPT_HARD)
1377 cortex_a8->brp_num_available++ ;
1378 }
1379
1380
1381 return ERROR_OK;
1382 }
1383
1384
1385
1386 /*
1387 * Cortex-A8 Reset functions
1388 */
1389
1390 static int cortex_a8_assert_reset(struct target *target)
1391 {
1392 struct armv7a_common *armv7a = target_to_armv7a(target);
1393
1394 LOG_DEBUG(" ");
1395
1396 /* FIXME when halt is requested, make it work somehow... */
1397
1398 /* Issue some kind of warm reset. */
1399 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1400 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1401 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1402 /* REVISIT handle "pulls" cases, if there's
1403 * hardware that needs them to work.
1404 */
1405 jtag_add_reset(0, 1);
1406 } else {
1407 LOG_ERROR("%s: how to reset?", target_name(target));
1408 return ERROR_FAIL;
1409 }
1410
1411 /* registers are now invalid */
1412 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1413
1414 target->state = TARGET_RESET;
1415
1416 return ERROR_OK;
1417 }
1418
1419 static int cortex_a8_deassert_reset(struct target *target)
1420 {
1421 int retval;
1422
1423 LOG_DEBUG(" ");
1424
1425 /* be certain SRST is off */
1426 jtag_add_reset(0, 0);
1427
1428 retval = cortex_a8_poll(target);
1429 if (retval != ERROR_OK)
1430 return retval;
1431
1432 if (target->reset_halt) {
1433 if (target->state != TARGET_HALTED) {
1434 LOG_WARNING("%s: ran after reset and before halt ...",
1435 target_name(target));
1436 if ((retval = target_halt(target)) != ERROR_OK)
1437 return retval;
1438 }
1439 }
1440
1441 return ERROR_OK;
1442 }
1443
1444 /*
1445 * Cortex-A8 Memory access
1446 *
1447 * This is same Cortex M3 but we must also use the correct
1448 * ap number for every access.
1449 */
1450
1451 static int cortex_a8_read_phys_memory(struct target *target,
1452 uint32_t address, uint32_t size,
1453 uint32_t count, uint8_t *buffer)
1454 {
1455 struct armv7a_common *armv7a = target_to_armv7a(target);
1456 struct adiv5_dap *swjdp = &armv7a->dap;
1457 int retval = ERROR_INVALID_ARGUMENTS;
1458
1459 /* cortex_a8 handles unaligned memory access */
1460
1461 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1462 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d", address, size, count);
1463 if (count && buffer) {
1464 switch (size) {
1465 case 4:
1466 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
1467 break;
1468 case 2:
1469 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1470 break;
1471 case 1:
1472 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1473 break;
1474 }
1475 }
1476
1477 return retval;
1478 }
1479
1480 static int cortex_a8_read_memory(struct target *target, uint32_t address,
1481 uint32_t size, uint32_t count, uint8_t *buffer)
1482 {
1483 int enabled = 0;
1484 uint32_t virt, phys;
1485 int retval;
1486
1487 /* cortex_a8 handles unaligned memory access */
1488
1489 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1490 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address, size, count);
1491 retval = cortex_a8_mmu(target, &enabled);
1492 if (retval != ERROR_OK)
1493 return retval;
1494
1495 if(enabled)
1496 {
1497 virt = address;
1498 retval = cortex_a8_virt2phys(target, virt, &phys);
1499 if (retval != ERROR_OK)
1500 return retval;
1501
1502 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1503 address = phys;
1504 }
1505
1506 return cortex_a8_read_phys_memory(target, address, size, count, buffer);
1507 }
1508
1509 static int cortex_a8_write_phys_memory(struct target *target,
1510 uint32_t address, uint32_t size,
1511 uint32_t count, uint8_t *buffer)
1512 {
1513 struct armv7a_common *armv7a = target_to_armv7a(target);
1514 struct adiv5_dap *swjdp = &armv7a->dap;
1515 int retval = ERROR_INVALID_ARGUMENTS;
1516
1517 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1518
1519 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address, size, count);
1520 if (count && buffer) {
1521 switch (size) {
1522 case 4:
1523 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
1524 break;
1525 case 2:
1526 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1527 break;
1528 case 1:
1529 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1530 break;
1531 }
1532 }
1533
1534 /* REVISIT this op is generic ARMv7-A/R stuff */
1535 if (retval == ERROR_OK && target->state == TARGET_HALTED)
1536 {
1537 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
1538
1539 retval = dpm->prepare(dpm);
1540 if (retval != ERROR_OK)
1541 return retval;
1542
1543 /* The Cache handling will NOT work with MMU active, the
1544 * wrong addresses will be invalidated!
1545 *
1546 * For both ICache and DCache, walk all cache lines in the
1547 * address range. Cortex-A8 has fixed 64 byte line length.
1548 *
1549 * REVISIT per ARMv7, these may trigger watchpoints ...
1550 */
1551
1552 /* invalidate I-Cache */
1553 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
1554 {
1555 /* ICIMVAU - Invalidate Cache single entry
1556 * with MVA to PoU
1557 * MCR p15, 0, r0, c7, c5, 1
1558 */
1559 for (uint32_t cacheline = address;
1560 cacheline < address + size * count;
1561 cacheline += 64) {
1562 retval = dpm->instr_write_data_r0(dpm,
1563 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1564 cacheline);
1565 if (retval != ERROR_OK)
1566 return retval;
1567 }
1568 }
1569
1570 /* invalidate D-Cache */
1571 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
1572 {
1573 /* DCIMVAC - Invalidate data Cache line
1574 * with MVA to PoC
1575 * MCR p15, 0, r0, c7, c6, 1
1576 */
1577 for (uint32_t cacheline = address;
1578 cacheline < address + size * count;
1579 cacheline += 64) {
1580 retval = dpm->instr_write_data_r0(dpm,
1581 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1582 cacheline);
1583 if (retval != ERROR_OK)
1584 return retval;
1585 }
1586 }
1587
1588 /* (void) */ dpm->finish(dpm);
1589 }
1590
1591 return retval;
1592 }
1593
1594 static int cortex_a8_write_memory(struct target *target, uint32_t address,
1595 uint32_t size, uint32_t count, uint8_t *buffer)
1596 {
1597 int enabled = 0;
1598 uint32_t virt, phys;
1599 int retval;
1600
1601 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1602
1603 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
1604 retval = cortex_a8_mmu(target, &enabled);
1605 if (retval != ERROR_OK)
1606 return retval;
1607 if(enabled)
1608 {
1609 virt = address;
1610 retval = cortex_a8_virt2phys(target, virt, &phys);
1611 if (retval != ERROR_OK)
1612 return retval;
1613 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1614 address = phys;
1615 }
1616
1617 return cortex_a8_write_phys_memory(target, address, size,
1618 count, buffer);
1619 }
1620
1621 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
1622 uint32_t count, uint8_t *buffer)
1623 {
1624 return cortex_a8_write_memory(target, address, 4, count, buffer);
1625 }
1626
1627
1628 static int cortex_a8_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1629 {
1630 #if 0
1631 u16 dcrdr;
1632
1633 mem_ap_read_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1634 *ctrl = (uint8_t)dcrdr;
1635 *value = (uint8_t)(dcrdr >> 8);
1636
1637 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1638
1639 /* write ack back to software dcc register
1640 * signify we have read data */
1641 if (dcrdr & (1 << 0))
1642 {
1643 dcrdr = 0;
1644 mem_ap_write_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1645 }
1646 #endif
1647 return ERROR_OK;
1648 }
1649
1650
1651 static int cortex_a8_handle_target_request(void *priv)
1652 {
1653 struct target *target = priv;
1654 struct armv7a_common *armv7a = target_to_armv7a(target);
1655 struct adiv5_dap *swjdp = &armv7a->dap;
1656 int retval;
1657
1658 if (!target_was_examined(target))
1659 return ERROR_OK;
1660 if (!target->dbg_msg_enabled)
1661 return ERROR_OK;
1662
1663 if (target->state == TARGET_RUNNING)
1664 {
1665 uint8_t data = 0;
1666 uint8_t ctrl = 0;
1667
1668 retval = cortex_a8_dcc_read(swjdp, &data, &ctrl);
1669 if (retval != ERROR_OK)
1670 return retval;
1671
1672 /* check if we have data */
1673 if (ctrl & (1 << 0))
1674 {
1675 uint32_t request;
1676
1677 /* we assume target is quick enough */
1678 request = data;
1679 retval = cortex_a8_dcc_read(swjdp, &data, &ctrl);
1680 if (retval != ERROR_OK)
1681 return retval;
1682 request |= (data << 8);
1683 retval = cortex_a8_dcc_read(swjdp, &data, &ctrl);
1684 if (retval != ERROR_OK)
1685 return retval;
1686 request |= (data << 16);
1687 retval = cortex_a8_dcc_read(swjdp, &data, &ctrl);
1688 if (retval != ERROR_OK)
1689 return retval;
1690 request |= (data << 24);
1691 target_request(target, request);
1692 }
1693 }
1694
1695 return ERROR_OK;
1696 }
1697
1698 /*
1699 * Cortex-A8 target information and configuration
1700 */
1701
1702 static int cortex_a8_examine_first(struct target *target)
1703 {
1704 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1705 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1706 struct adiv5_dap *swjdp = &armv7a->dap;
1707 int i;
1708 int retval = ERROR_OK;
1709 uint32_t didr, ctypr, ttypr, cpuid;
1710
1711 /* stop assuming this is an OMAP! */
1712 LOG_DEBUG("TODO - autoconfigure");
1713
1714 /* Here we shall insert a proper ROM Table scan */
1715 armv7a->debug_base = OMAP3530_DEBUG_BASE;
1716
1717 /* We do one extra read to ensure DAP is configured,
1718 * we call ahbap_debugport_init(swjdp) instead
1719 */
1720 retval = ahbap_debugport_init(swjdp);
1721 if (retval != ERROR_OK)
1722 return retval;
1723
1724 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_CPUID, &cpuid);
1725 if (retval != ERROR_OK)
1726 return retval;
1727
1728 if ((retval = mem_ap_read_atomic_u32(swjdp,
1729 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
1730 {
1731 LOG_DEBUG("Examine %s failed", "CPUID");
1732 return retval;
1733 }
1734
1735 if ((retval = mem_ap_read_atomic_u32(swjdp,
1736 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
1737 {
1738 LOG_DEBUG("Examine %s failed", "CTYPR");
1739 return retval;
1740 }
1741
1742 if ((retval = mem_ap_read_atomic_u32(swjdp,
1743 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
1744 {
1745 LOG_DEBUG("Examine %s failed", "TTYPR");
1746 return retval;
1747 }
1748
1749 if ((retval = mem_ap_read_atomic_u32(swjdp,
1750 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
1751 {
1752 LOG_DEBUG("Examine %s failed", "DIDR");
1753 return retval;
1754 }
1755
1756 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1757 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
1758 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
1759 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
1760
1761 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
1762 retval = cortex_a8_dpm_setup(cortex_a8, didr);
1763 if (retval != ERROR_OK)
1764 return retval;
1765
1766 /* Setup Breakpoint Register Pairs */
1767 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
1768 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
1769 cortex_a8->brp_num_available = cortex_a8->brp_num;
1770 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
1771 // cortex_a8->brb_enabled = ????;
1772 for (i = 0; i < cortex_a8->brp_num; i++)
1773 {
1774 cortex_a8->brp_list[i].used = 0;
1775 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
1776 cortex_a8->brp_list[i].type = BRP_NORMAL;
1777 else
1778 cortex_a8->brp_list[i].type = BRP_CONTEXT;
1779 cortex_a8->brp_list[i].value = 0;
1780 cortex_a8->brp_list[i].control = 0;
1781 cortex_a8->brp_list[i].BRPn = i;
1782 }
1783
1784 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
1785
1786 target_set_examined(target);
1787 return ERROR_OK;
1788 }
1789
1790 static int cortex_a8_examine(struct target *target)
1791 {
1792 int retval = ERROR_OK;
1793
1794 /* don't re-probe hardware after each reset */
1795 if (!target_was_examined(target))
1796 retval = cortex_a8_examine_first(target);
1797
1798 /* Configure core debug access */
1799 if (retval == ERROR_OK)
1800 retval = cortex_a8_init_debug_access(target);
1801
1802 return retval;
1803 }
1804
1805 /*
1806 * Cortex-A8 target creation and initialization
1807 */
1808
1809 static int cortex_a8_init_target(struct command_context *cmd_ctx,
1810 struct target *target)
1811 {
1812 /* examine_first() does a bunch of this */
1813 return ERROR_OK;
1814 }
1815
1816 static int cortex_a8_init_arch_info(struct target *target,
1817 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
1818 {
1819 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1820 struct arm *armv4_5 = &armv7a->armv4_5_common;
1821 struct adiv5_dap *dap = &armv7a->dap;
1822
1823 armv7a->armv4_5_common.dap = dap;
1824
1825 /* Setup struct cortex_a8_common */
1826 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
1827 armv4_5->arch_info = armv7a;
1828
1829 /* prepare JTAG information for the new target */
1830 cortex_a8->jtag_info.tap = tap;
1831 cortex_a8->jtag_info.scann_size = 4;
1832
1833 /* Leave (only) generic DAP stuff for debugport_init() */
1834 dap->jtag_info = &cortex_a8->jtag_info;
1835 dap->memaccess_tck = 80;
1836
1837 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
1838 dap->tar_autoincr_block = (1 << 10);
1839
1840 cortex_a8->fast_reg_read = 0;
1841
1842 /* Set default value */
1843 cortex_a8->current_address_mode = ARM_MODE_ANY;
1844
1845 /* register arch-specific functions */
1846 armv7a->examine_debug_reason = NULL;
1847
1848 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
1849
1850 armv7a->pre_restore_context = NULL;
1851 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
1852 armv7a->armv4_5_mmu.get_ttb = cortex_a8_get_ttb;
1853 armv7a->armv4_5_mmu.read_memory = cortex_a8_read_phys_memory;
1854 armv7a->armv4_5_mmu.write_memory = cortex_a8_write_phys_memory;
1855 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a8_disable_mmu_caches;
1856 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a8_enable_mmu_caches;
1857 armv7a->armv4_5_mmu.has_tiny_pages = 1;
1858 armv7a->armv4_5_mmu.mmu_enabled = 0;
1859
1860
1861 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
1862
1863 /* REVISIT v7a setup should be in a v7a-specific routine */
1864 arm_init_arch_info(target, armv4_5);
1865 armv7a->common_magic = ARMV7_COMMON_MAGIC;
1866
1867 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
1868
1869 return ERROR_OK;
1870 }
1871
1872 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
1873 {
1874 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
1875
1876 return cortex_a8_init_arch_info(target, cortex_a8, target->tap);
1877 }
1878
1879 static int cortex_a8_get_ttb(struct target *target, uint32_t *result)
1880 {
1881 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1882 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1883 uint32_t ttb = 0, retval = ERROR_OK;
1884
1885 /* current_address_mode is set inside cortex_a8_virt2phys()
1886 where we can determine if address belongs to user or kernel */
1887 if(cortex_a8->current_address_mode == ARM_MODE_SVC)
1888 {
1889 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1890 retval = armv7a->armv4_5_common.mrc(target, 15,
1891 0, 1, /* op1, op2 */
1892 2, 0, /* CRn, CRm */
1893 &ttb);
1894 if (retval != ERROR_OK)
1895 return retval;
1896 }
1897 else if(cortex_a8->current_address_mode == ARM_MODE_USR)
1898 {
1899 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1900 retval = armv7a->armv4_5_common.mrc(target, 15,
1901 0, 0, /* op1, op2 */
1902 2, 0, /* CRn, CRm */
1903 &ttb);
1904 if (retval != ERROR_OK)
1905 return retval;
1906 }
1907 /* we don't know whose address is: user or kernel
1908 we assume that if we are in kernel mode then
1909 address belongs to kernel else if in user mode
1910 - to user */
1911 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
1912 {
1913 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1914 retval = armv7a->armv4_5_common.mrc(target, 15,
1915 0, 1, /* op1, op2 */
1916 2, 0, /* CRn, CRm */
1917 &ttb);
1918 if (retval != ERROR_OK)
1919 return retval;
1920 }
1921 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
1922 {
1923 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1924 retval = armv7a->armv4_5_common.mrc(target, 15,
1925 0, 0, /* op1, op2 */
1926 2, 0, /* CRn, CRm */
1927 &ttb);
1928 if (retval != ERROR_OK)
1929 return retval;
1930 }
1931 /* finally we don't know whose ttb to use: user or kernel */
1932 else
1933 LOG_ERROR("Don't know how to get ttb for current mode!!!");
1934
1935 ttb &= 0xffffc000;
1936
1937 *result = ttb;
1938
1939 return ERROR_OK;
1940 }
1941
1942 static int cortex_a8_disable_mmu_caches(struct target *target, int mmu,
1943 int d_u_cache, int i_cache)
1944 {
1945 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1946 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1947 uint32_t cp15_control;
1948 int retval;
1949
1950 /* read cp15 control register */
1951 retval = armv7a->armv4_5_common.mrc(target, 15,
1952 0, 0, /* op1, op2 */
1953 1, 0, /* CRn, CRm */
1954 &cp15_control);
1955 if (retval != ERROR_OK)
1956 return retval;
1957
1958
1959 if (mmu)
1960 cp15_control &= ~0x1U;
1961
1962 if (d_u_cache)
1963 cp15_control &= ~0x4U;
1964
1965 if (i_cache)
1966 cp15_control &= ~0x1000U;
1967
1968 retval = armv7a->armv4_5_common.mcr(target, 15,
1969 0, 0, /* op1, op2 */
1970 1, 0, /* CRn, CRm */
1971 cp15_control);
1972 return retval;
1973 }
1974
1975 static int cortex_a8_enable_mmu_caches(struct target *target, int mmu,
1976 int d_u_cache, int i_cache)
1977 {
1978 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1979 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1980 uint32_t cp15_control;
1981 int retval;
1982
1983 /* read cp15 control register */
1984 retval = armv7a->armv4_5_common.mrc(target, 15,
1985 0, 0, /* op1, op2 */
1986 1, 0, /* CRn, CRm */
1987 &cp15_control);
1988 if (retval != ERROR_OK)
1989 return retval;
1990
1991 if (mmu)
1992 cp15_control |= 0x1U;
1993
1994 if (d_u_cache)
1995 cp15_control |= 0x4U;
1996
1997 if (i_cache)
1998 cp15_control |= 0x1000U;
1999
2000 retval = armv7a->armv4_5_common.mcr(target, 15,
2001 0, 0, /* op1, op2 */
2002 1, 0, /* CRn, CRm */
2003 cp15_control);
2004 return retval;
2005 }
2006
2007
2008 static int cortex_a8_mmu(struct target *target, int *enabled)
2009 {
2010 if (target->state != TARGET_HALTED) {
2011 LOG_ERROR("%s: target not halted", __func__);
2012 return ERROR_TARGET_INVALID;
2013 }
2014
2015 *enabled = target_to_cortex_a8(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
2016 return ERROR_OK;
2017 }
2018
2019 static int cortex_a8_virt2phys(struct target *target,
2020 uint32_t virt, uint32_t *phys)
2021 {
2022 uint32_t cb;
2023 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2024 // struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2025 struct armv7a_common *armv7a = target_to_armv7a(target);
2026
2027 /* We assume that virtual address is separated
2028 between user and kernel in Linux style:
2029 0x00000000-0xbfffffff - User space
2030 0xc0000000-0xffffffff - Kernel space */
2031 if( virt < 0xc0000000 ) /* Linux user space */
2032 cortex_a8->current_address_mode = ARM_MODE_USR;
2033 else /* Linux kernel */
2034 cortex_a8->current_address_mode = ARM_MODE_SVC;
2035 uint32_t ret;
2036 int retval = armv4_5_mmu_translate_va(target,
2037 &armv7a->armv4_5_mmu, virt, &cb, &ret);
2038 if (retval != ERROR_OK)
2039 return retval;
2040 /* Reset the flag. We don't want someone else to use it by error */
2041 cortex_a8->current_address_mode = ARM_MODE_ANY;
2042
2043 *phys = ret;
2044 return ERROR_OK;
2045 }
2046
2047 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
2048 {
2049 struct target *target = get_current_target(CMD_CTX);
2050 struct armv7a_common *armv7a = target_to_armv7a(target);
2051
2052 return armv4_5_handle_cache_info_command(CMD_CTX,
2053 &armv7a->armv4_5_mmu.armv4_5_cache);
2054 }
2055
2056
2057 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
2058 {
2059 struct target *target = get_current_target(CMD_CTX);
2060 if (!target_was_examined(target))
2061 {
2062 LOG_ERROR("target not examined yet");
2063 return ERROR_FAIL;
2064 }
2065
2066 return cortex_a8_init_debug_access(target);
2067 }
2068
2069 static const struct command_registration cortex_a8_exec_command_handlers[] = {
2070 {
2071 .name = "cache_info",
2072 .handler = cortex_a8_handle_cache_info_command,
2073 .mode = COMMAND_EXEC,
2074 .help = "display information about target caches",
2075 },
2076 {
2077 .name = "dbginit",
2078 .handler = cortex_a8_handle_dbginit_command,
2079 .mode = COMMAND_EXEC,
2080 .help = "Initialize core debug",
2081 },
2082 COMMAND_REGISTRATION_DONE
2083 };
2084 static const struct command_registration cortex_a8_command_handlers[] = {
2085 {
2086 .chain = arm_command_handlers,
2087 },
2088 {
2089 .chain = armv7a_command_handlers,
2090 },
2091 {
2092 .name = "cortex_a8",
2093 .mode = COMMAND_ANY,
2094 .help = "Cortex-A8 command group",
2095 .chain = cortex_a8_exec_command_handlers,
2096 },
2097 COMMAND_REGISTRATION_DONE
2098 };
2099
2100 struct target_type cortexa8_target = {
2101 .name = "cortex_a8",
2102
2103 .poll = cortex_a8_poll,
2104 .arch_state = armv7a_arch_state,
2105
2106 .target_request_data = NULL,
2107
2108 .halt = cortex_a8_halt,
2109 .resume = cortex_a8_resume,
2110 .step = cortex_a8_step,
2111
2112 .assert_reset = cortex_a8_assert_reset,
2113 .deassert_reset = cortex_a8_deassert_reset,
2114 .soft_reset_halt = NULL,
2115
2116 /* REVISIT allow exporting VFP3 registers ... */
2117 .get_gdb_reg_list = arm_get_gdb_reg_list,
2118
2119 .read_memory = cortex_a8_read_memory,
2120 .write_memory = cortex_a8_write_memory,
2121 .bulk_write_memory = cortex_a8_bulk_write_memory,
2122
2123 .checksum_memory = arm_checksum_memory,
2124 .blank_check_memory = arm_blank_check_memory,
2125
2126 .run_algorithm = armv4_5_run_algorithm,
2127
2128 .add_breakpoint = cortex_a8_add_breakpoint,
2129 .remove_breakpoint = cortex_a8_remove_breakpoint,
2130 .add_watchpoint = NULL,
2131 .remove_watchpoint = NULL,
2132
2133 .commands = cortex_a8_command_handlers,
2134 .target_create = cortex_a8_target_create,
2135 .init_target = cortex_a8_init_target,
2136 .examine = cortex_a8_examine,
2137
2138 .read_phys_memory = cortex_a8_read_phys_memory,
2139 .write_phys_memory = cortex_a8_write_phys_memory,
2140 .mmu = cortex_a8_mmu,
2141 .virt2phys = cortex_a8_virt2phys,
2142
2143 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)