cortex_a8: remove dap_ap_sel calls
[openocd.git] / src / target / cortex_a8.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * This program is free software; you can redistribute it and/or modify *
18 * it under the terms of the GNU General Public License as published by *
19 * the Free Software Foundation; either version 2 of the License, or *
20 * (at your option) any later version. *
21 * *
22 * This program is distributed in the hope that it will be useful, *
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
25 * GNU General Public License for more details. *
26 * *
27 * You should have received a copy of the GNU General Public License *
28 * along with this program; if not, write to the *
29 * Free Software Foundation, Inc., *
30 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
31 * *
32 * Cortex-A8(tm) TRM, ARM DDI 0344H *
33 * *
34 ***************************************************************************/
35 #ifdef HAVE_CONFIG_H
36 #include "config.h"
37 #endif
38
39 #include "breakpoints.h"
40 #include "cortex_a8.h"
41 #include "register.h"
42 #include "target_request.h"
43 #include "target_type.h"
44 #include "arm_opcodes.h"
45 #include <helper/time_support.h>
46
47 static int cortex_a8_poll(struct target *target);
48 static int cortex_a8_debug_entry(struct target *target);
49 static int cortex_a8_restore_context(struct target *target, bool bpwp);
50 static int cortex_a8_set_breakpoint(struct target *target,
51 struct breakpoint *breakpoint, uint8_t matchmode);
52 static int cortex_a8_unset_breakpoint(struct target *target,
53 struct breakpoint *breakpoint);
54 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
55 uint32_t *value, int regnum);
56 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
57 uint32_t value, int regnum);
58 static int cortex_a8_mmu(struct target *target, int *enabled);
59 static int cortex_a8_virt2phys(struct target *target,
60 uint32_t virt, uint32_t *phys);
61 static int cortex_a8_disable_mmu_caches(struct target *target, int mmu,
62 int d_u_cache, int i_cache);
63 static int cortex_a8_enable_mmu_caches(struct target *target, int mmu,
64 int d_u_cache, int i_cache);
65 static int cortex_a8_get_ttb(struct target *target, uint32_t *result);
66
67
68 /*
69 * FIXME do topology discovery using the ROM; don't
70 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
71 * cores, with different AP numbering ... don't use a #define
72 * for these numbers, use per-core armv7a state.
73 */
74 #define swjdp_memoryap 0
75 #define swjdp_debugap 1
76
77 /*
78 * Cortex-A8 Basic debug access, very low level assumes state is saved
79 */
80 static int cortex_a8_init_debug_access(struct target *target)
81 {
82 struct armv7a_common *armv7a = target_to_armv7a(target);
83 struct adiv5_dap *swjdp = &armv7a->dap;
84 int retval;
85 uint32_t dummy;
86
87 LOG_DEBUG(" ");
88
89 /* Unlocking the debug registers for modification */
90 /* The debugport might be uninitialised so try twice */
91 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
92 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
93 if (retval != ERROR_OK)
94 {
95 /* try again */
96 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
97 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
98 if (retval == ERROR_OK)
99 {
100 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
101 }
102 }
103 if (retval != ERROR_OK)
104 return retval;
105 /* Clear Sticky Power Down status Bit in PRSR to enable access to
106 the registers in the Core Power Domain */
107 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
108 armv7a->debug_base + CPUDBG_PRSR, &dummy);
109 if (retval != ERROR_OK)
110 return retval;
111
112 /* Enabling of instruction execution in debug mode is done in debug_entry code */
113
114 /* Resync breakpoint registers */
115
116 /* Since this is likely called from init or reset, update target state information*/
117 return cortex_a8_poll(target);
118 }
119
120 /* To reduce needless round-trips, pass in a pointer to the current
121 * DSCR value. Initialize it to zero if you just need to know the
122 * value on return from this function; or DSCR_INSTR_COMP if you
123 * happen to know that no instruction is pending.
124 */
125 static int cortex_a8_exec_opcode(struct target *target,
126 uint32_t opcode, uint32_t *dscr_p)
127 {
128 uint32_t dscr;
129 int retval;
130 struct armv7a_common *armv7a = target_to_armv7a(target);
131 struct adiv5_dap *swjdp = &armv7a->dap;
132
133 dscr = dscr_p ? *dscr_p : 0;
134
135 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
136
137 /* Wait for InstrCompl bit to be set */
138 long long then = timeval_ms();
139 while ((dscr & DSCR_INSTR_COMP) == 0)
140 {
141 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
142 armv7a->debug_base + CPUDBG_DSCR, &dscr);
143 if (retval != ERROR_OK)
144 {
145 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
146 return retval;
147 }
148 if (timeval_ms() > then + 1000)
149 {
150 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
151 return ERROR_FAIL;
152 }
153 }
154
155 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
156 armv7a->debug_base + CPUDBG_ITR, opcode);
157 if (retval != ERROR_OK)
158 return retval;
159
160 then = timeval_ms();
161 do
162 {
163 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
164 armv7a->debug_base + CPUDBG_DSCR, &dscr);
165 if (retval != ERROR_OK)
166 {
167 LOG_ERROR("Could not read DSCR register");
168 return retval;
169 }
170 if (timeval_ms() > then + 1000)
171 {
172 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
173 return ERROR_FAIL;
174 }
175 }
176 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
177
178 if (dscr_p)
179 *dscr_p = dscr;
180
181 return retval;
182 }
183
184 /**************************************************************************
185 Read core register with very few exec_opcode, fast but needs work_area.
186 This can cause problems with MMU active.
187 **************************************************************************/
188 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
189 uint32_t * regfile)
190 {
191 int retval = ERROR_OK;
192 struct armv7a_common *armv7a = target_to_armv7a(target);
193 struct adiv5_dap *swjdp = &armv7a->dap;
194
195 retval = cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
196 if (retval != ERROR_OK)
197 return retval;
198 retval = cortex_a8_dap_write_coreregister_u32(target, address, 0);
199 if (retval != ERROR_OK)
200 return retval;
201 retval = cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
202 if (retval != ERROR_OK)
203 return retval;
204
205 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
206 (uint8_t *)(&regfile[1]), 4*15, address);
207
208 return retval;
209 }
210
211 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
212 uint32_t *value, int regnum)
213 {
214 int retval = ERROR_OK;
215 uint8_t reg = regnum&0xFF;
216 uint32_t dscr = 0;
217 struct armv7a_common *armv7a = target_to_armv7a(target);
218 struct adiv5_dap *swjdp = &armv7a->dap;
219
220 if (reg > 17)
221 return retval;
222
223 if (reg < 15)
224 {
225 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
226 retval = cortex_a8_exec_opcode(target,
227 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
228 &dscr);
229 if (retval != ERROR_OK)
230 return retval;
231 }
232 else if (reg == 15)
233 {
234 /* "MOV r0, r15"; then move r0 to DCCTX */
235 retval = cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
236 if (retval != ERROR_OK)
237 return retval;
238 retval = cortex_a8_exec_opcode(target,
239 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
240 &dscr);
241 if (retval != ERROR_OK)
242 return retval;
243 }
244 else
245 {
246 /* "MRS r0, CPSR" or "MRS r0, SPSR"
247 * then move r0 to DCCTX
248 */
249 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
250 if (retval != ERROR_OK)
251 return retval;
252 retval = cortex_a8_exec_opcode(target,
253 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
254 &dscr);
255 if (retval != ERROR_OK)
256 return retval;
257 }
258
259 /* Wait for DTRRXfull then read DTRRTX */
260 long long then = timeval_ms();
261 while ((dscr & DSCR_DTR_TX_FULL) == 0)
262 {
263 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
264 armv7a->debug_base + CPUDBG_DSCR, &dscr);
265 if (retval != ERROR_OK)
266 return retval;
267 if (timeval_ms() > then + 1000)
268 {
269 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
270 return ERROR_FAIL;
271 }
272 }
273
274 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
275 armv7a->debug_base + CPUDBG_DTRTX, value);
276 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
277
278 return retval;
279 }
280
281 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
282 uint32_t value, int regnum)
283 {
284 int retval = ERROR_OK;
285 uint8_t Rd = regnum&0xFF;
286 uint32_t dscr;
287 struct armv7a_common *armv7a = target_to_armv7a(target);
288 struct adiv5_dap *swjdp = &armv7a->dap;
289
290 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
291
292 /* Check that DCCRX is not full */
293 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
294 armv7a->debug_base + CPUDBG_DSCR, &dscr);
295 if (retval != ERROR_OK)
296 return retval;
297 if (dscr & DSCR_DTR_RX_FULL)
298 {
299 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
300 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
301 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
302 &dscr);
303 if (retval != ERROR_OK)
304 return retval;
305 }
306
307 if (Rd > 17)
308 return retval;
309
310 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
311 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
312 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
313 armv7a->debug_base + CPUDBG_DTRRX, value);
314 if (retval != ERROR_OK)
315 return retval;
316
317 if (Rd < 15)
318 {
319 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
320 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
321 &dscr);
322 if (retval != ERROR_OK)
323 return retval;
324 }
325 else if (Rd == 15)
326 {
327 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
328 * then "mov r15, r0"
329 */
330 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
331 &dscr);
332 if (retval != ERROR_OK)
333 return retval;
334 retval = cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
335 if (retval != ERROR_OK)
336 return retval;
337 }
338 else
339 {
340 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
341 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
342 */
343 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
344 &dscr);
345 if (retval != ERROR_OK)
346 return retval;
347 retval = cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
348 &dscr);
349 if (retval != ERROR_OK)
350 return retval;
351
352 /* "Prefetch flush" after modifying execution status in CPSR */
353 if (Rd == 16)
354 {
355 retval = cortex_a8_exec_opcode(target,
356 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
357 &dscr);
358 if (retval != ERROR_OK)
359 return retval;
360 }
361 }
362
363 return retval;
364 }
365
366 /* Write to memory mapped registers directly with no cache or mmu handling */
367 static int cortex_a8_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
368 {
369 int retval;
370 struct armv7a_common *armv7a = target_to_armv7a(target);
371 struct adiv5_dap *swjdp = &armv7a->dap;
372
373 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap, address, value);
374
375 return retval;
376 }
377
378 /*
379 * Cortex-A8 implementation of Debug Programmer's Model
380 *
381 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
382 * so there's no need to poll for it before executing an instruction.
383 *
384 * NOTE that in several of these cases the "stall" mode might be useful.
385 * It'd let us queue a few operations together... prepare/finish might
386 * be the places to enable/disable that mode.
387 */
388
389 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
390 {
391 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
392 }
393
394 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
395 {
396 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
397 return mem_ap_sel_write_u32(&a8->armv7a_common.dap, swjdp_debugap,
398 a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
399 }
400
401 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
402 uint32_t *dscr_p)
403 {
404 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
405 uint32_t dscr = DSCR_INSTR_COMP;
406 int retval;
407
408 if (dscr_p)
409 dscr = *dscr_p;
410
411 /* Wait for DTRRXfull */
412 long long then = timeval_ms();
413 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
414 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
415 a8->armv7a_common.debug_base + CPUDBG_DSCR,
416 &dscr);
417 if (retval != ERROR_OK)
418 return retval;
419 if (timeval_ms() > then + 1000)
420 {
421 LOG_ERROR("Timeout waiting for read dcc");
422 return ERROR_FAIL;
423 }
424 }
425
426 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
427 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
428 if (retval != ERROR_OK)
429 return retval;
430 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
431
432 if (dscr_p)
433 *dscr_p = dscr;
434
435 return retval;
436 }
437
438 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
439 {
440 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
441 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
442 uint32_t dscr;
443 int retval;
444
445 /* set up invariant: INSTR_COMP is set after ever DPM operation */
446 long long then = timeval_ms();
447 for (;;)
448 {
449 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
450 a8->armv7a_common.debug_base + CPUDBG_DSCR,
451 &dscr);
452 if (retval != ERROR_OK)
453 return retval;
454 if ((dscr & DSCR_INSTR_COMP) != 0)
455 break;
456 if (timeval_ms() > then + 1000)
457 {
458 LOG_ERROR("Timeout waiting for dpm prepare");
459 return ERROR_FAIL;
460 }
461 }
462
463 /* this "should never happen" ... */
464 if (dscr & DSCR_DTR_RX_FULL) {
465 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
466 /* Clear DCCRX */
467 retval = cortex_a8_exec_opcode(
468 a8->armv7a_common.armv4_5_common.target,
469 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
470 &dscr);
471 if (retval != ERROR_OK)
472 return retval;
473 }
474
475 return retval;
476 }
477
478 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
479 {
480 /* REVISIT what could be done here? */
481 return ERROR_OK;
482 }
483
484 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
485 uint32_t opcode, uint32_t data)
486 {
487 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
488 int retval;
489 uint32_t dscr = DSCR_INSTR_COMP;
490
491 retval = cortex_a8_write_dcc(a8, data);
492 if (retval != ERROR_OK)
493 return retval;
494
495 return cortex_a8_exec_opcode(
496 a8->armv7a_common.armv4_5_common.target,
497 opcode,
498 &dscr);
499 }
500
501 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
502 uint32_t opcode, uint32_t data)
503 {
504 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
505 uint32_t dscr = DSCR_INSTR_COMP;
506 int retval;
507
508 retval = cortex_a8_write_dcc(a8, data);
509 if (retval != ERROR_OK)
510 return retval;
511
512 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
513 retval = cortex_a8_exec_opcode(
514 a8->armv7a_common.armv4_5_common.target,
515 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
516 &dscr);
517 if (retval != ERROR_OK)
518 return retval;
519
520 /* then the opcode, taking data from R0 */
521 retval = cortex_a8_exec_opcode(
522 a8->armv7a_common.armv4_5_common.target,
523 opcode,
524 &dscr);
525
526 return retval;
527 }
528
529 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
530 {
531 struct target *target = dpm->arm->target;
532 uint32_t dscr = DSCR_INSTR_COMP;
533
534 /* "Prefetch flush" after modifying execution status in CPSR */
535 return cortex_a8_exec_opcode(target,
536 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
537 &dscr);
538 }
539
540 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
541 uint32_t opcode, uint32_t *data)
542 {
543 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
544 int retval;
545 uint32_t dscr = DSCR_INSTR_COMP;
546
547 /* the opcode, writing data to DCC */
548 retval = cortex_a8_exec_opcode(
549 a8->armv7a_common.armv4_5_common.target,
550 opcode,
551 &dscr);
552 if (retval != ERROR_OK)
553 return retval;
554
555 return cortex_a8_read_dcc(a8, data, &dscr);
556 }
557
558
559 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
560 uint32_t opcode, uint32_t *data)
561 {
562 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
563 uint32_t dscr = DSCR_INSTR_COMP;
564 int retval;
565
566 /* the opcode, writing data to R0 */
567 retval = cortex_a8_exec_opcode(
568 a8->armv7a_common.armv4_5_common.target,
569 opcode,
570 &dscr);
571 if (retval != ERROR_OK)
572 return retval;
573
574 /* write R0 to DCC */
575 retval = cortex_a8_exec_opcode(
576 a8->armv7a_common.armv4_5_common.target,
577 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
578 &dscr);
579 if (retval != ERROR_OK)
580 return retval;
581
582 return cortex_a8_read_dcc(a8, data, &dscr);
583 }
584
585 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
586 uint32_t addr, uint32_t control)
587 {
588 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
589 uint32_t vr = a8->armv7a_common.debug_base;
590 uint32_t cr = a8->armv7a_common.debug_base;
591 int retval;
592
593 switch (index_t) {
594 case 0 ... 15: /* breakpoints */
595 vr += CPUDBG_BVR_BASE;
596 cr += CPUDBG_BCR_BASE;
597 break;
598 case 16 ... 31: /* watchpoints */
599 vr += CPUDBG_WVR_BASE;
600 cr += CPUDBG_WCR_BASE;
601 index_t -= 16;
602 break;
603 default:
604 return ERROR_FAIL;
605 }
606 vr += 4 * index_t;
607 cr += 4 * index_t;
608
609 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
610 (unsigned) vr, (unsigned) cr);
611
612 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
613 vr, addr);
614 if (retval != ERROR_OK)
615 return retval;
616 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
617 cr, control);
618 return retval;
619 }
620
621 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
622 {
623 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
624 uint32_t cr;
625
626 switch (index_t) {
627 case 0 ... 15:
628 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
629 break;
630 case 16 ... 31:
631 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
632 index_t -= 16;
633 break;
634 default:
635 return ERROR_FAIL;
636 }
637 cr += 4 * index_t;
638
639 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
640
641 /* clear control register */
642 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
643 }
644
645 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
646 {
647 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
648 int retval;
649
650 dpm->arm = &a8->armv7a_common.armv4_5_common;
651 dpm->didr = didr;
652
653 dpm->prepare = cortex_a8_dpm_prepare;
654 dpm->finish = cortex_a8_dpm_finish;
655
656 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
657 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
658 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
659
660 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
661 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
662
663 dpm->bpwp_enable = cortex_a8_bpwp_enable;
664 dpm->bpwp_disable = cortex_a8_bpwp_disable;
665
666 retval = arm_dpm_setup(dpm);
667 if (retval == ERROR_OK)
668 retval = arm_dpm_initialize(dpm);
669
670 return retval;
671 }
672
673
674 /*
675 * Cortex-A8 Run control
676 */
677
678 static int cortex_a8_poll(struct target *target)
679 {
680 int retval = ERROR_OK;
681 uint32_t dscr;
682 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
683 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
684 struct adiv5_dap *swjdp = &armv7a->dap;
685 enum target_state prev_target_state = target->state;
686
687 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
688 armv7a->debug_base + CPUDBG_DSCR, &dscr);
689 if (retval != ERROR_OK)
690 {
691 return retval;
692 }
693 cortex_a8->cpudbg_dscr = dscr;
694
695 if ((dscr & 0x3) == 0x3)
696 {
697 if (prev_target_state != TARGET_HALTED)
698 {
699 /* We have a halting debug event */
700 LOG_DEBUG("Target halted");
701 target->state = TARGET_HALTED;
702 if ((prev_target_state == TARGET_RUNNING)
703 || (prev_target_state == TARGET_RESET))
704 {
705 retval = cortex_a8_debug_entry(target);
706 if (retval != ERROR_OK)
707 return retval;
708
709 target_call_event_callbacks(target,
710 TARGET_EVENT_HALTED);
711 }
712 if (prev_target_state == TARGET_DEBUG_RUNNING)
713 {
714 LOG_DEBUG(" ");
715
716 retval = cortex_a8_debug_entry(target);
717 if (retval != ERROR_OK)
718 return retval;
719
720 target_call_event_callbacks(target,
721 TARGET_EVENT_DEBUG_HALTED);
722 }
723 }
724 }
725 else if ((dscr & 0x3) == 0x2)
726 {
727 target->state = TARGET_RUNNING;
728 }
729 else
730 {
731 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
732 target->state = TARGET_UNKNOWN;
733 }
734
735 return retval;
736 }
737
738 static int cortex_a8_halt(struct target *target)
739 {
740 int retval = ERROR_OK;
741 uint32_t dscr;
742 struct armv7a_common *armv7a = target_to_armv7a(target);
743 struct adiv5_dap *swjdp = &armv7a->dap;
744
745 /*
746 * Tell the core to be halted by writing DRCR with 0x1
747 * and then wait for the core to be halted.
748 */
749 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
750 armv7a->debug_base + CPUDBG_DRCR, 0x1);
751 if (retval != ERROR_OK)
752 return retval;
753
754 /*
755 * enter halting debug mode
756 */
757 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
758 armv7a->debug_base + CPUDBG_DSCR, &dscr);
759 if (retval != ERROR_OK)
760 return retval;
761
762 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
763 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
764 if (retval != ERROR_OK)
765 return retval;
766
767 long long then = timeval_ms();
768 for (;;)
769 {
770 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
771 armv7a->debug_base + CPUDBG_DSCR, &dscr);
772 if (retval != ERROR_OK)
773 return retval;
774 if ((dscr & DSCR_CORE_HALTED) != 0)
775 {
776 break;
777 }
778 if (timeval_ms() > then + 1000)
779 {
780 LOG_ERROR("Timeout waiting for halt");
781 return ERROR_FAIL;
782 }
783 }
784
785 target->debug_reason = DBG_REASON_DBGRQ;
786
787 return ERROR_OK;
788 }
789
790 static int cortex_a8_resume(struct target *target, int current,
791 uint32_t address, int handle_breakpoints, int debug_execution)
792 {
793 struct armv7a_common *armv7a = target_to_armv7a(target);
794 struct arm *armv4_5 = &armv7a->armv4_5_common;
795 struct adiv5_dap *swjdp = &armv7a->dap;
796 int retval;
797
798 // struct breakpoint *breakpoint = NULL;
799 uint32_t resume_pc, dscr;
800
801 if (!debug_execution)
802 target_free_all_working_areas(target);
803
804 #if 0
805 if (debug_execution)
806 {
807 /* Disable interrupts */
808 /* We disable interrupts in the PRIMASK register instead of
809 * masking with C_MASKINTS,
810 * This is probably the same issue as Cortex-M3 Errata 377493:
811 * C_MASKINTS in parallel with disabled interrupts can cause
812 * local faults to not be taken. */
813 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
814 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
815 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
816
817 /* Make sure we are in Thumb mode */
818 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
819 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
820 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
821 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
822 }
823 #endif
824
825 /* current = 1: continue on current pc, otherwise continue at <address> */
826 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
827 if (!current)
828 resume_pc = address;
829
830 /* Make sure that the Armv7 gdb thumb fixups does not
831 * kill the return address
832 */
833 switch (armv4_5->core_state)
834 {
835 case ARM_STATE_ARM:
836 resume_pc &= 0xFFFFFFFC;
837 break;
838 case ARM_STATE_THUMB:
839 case ARM_STATE_THUMB_EE:
840 /* When the return address is loaded into PC
841 * bit 0 must be 1 to stay in Thumb state
842 */
843 resume_pc |= 0x1;
844 break;
845 case ARM_STATE_JAZELLE:
846 LOG_ERROR("How do I resume into Jazelle state??");
847 return ERROR_FAIL;
848 }
849 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
850 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
851 armv4_5->pc->dirty = 1;
852 armv4_5->pc->valid = 1;
853
854 retval = cortex_a8_restore_context(target, handle_breakpoints);
855 if (retval != ERROR_OK)
856 return retval;
857
858 #if 0
859 /* the front-end may request us not to handle breakpoints */
860 if (handle_breakpoints)
861 {
862 /* Single step past breakpoint at current address */
863 if ((breakpoint = breakpoint_find(target, resume_pc)))
864 {
865 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
866 cortex_m3_unset_breakpoint(target, breakpoint);
867 cortex_m3_single_step_core(target);
868 cortex_m3_set_breakpoint(target, breakpoint);
869 }
870 }
871
872 #endif
873 /* Restart core and wait for it to be started
874 * NOTE: this clears DSCR_ITR_EN and other bits.
875 *
876 * REVISIT: for single stepping, we probably want to
877 * disable IRQs by default, with optional override...
878 */
879 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
880 armv7a->debug_base + CPUDBG_DRCR, 0x2);
881 if (retval != ERROR_OK)
882 return retval;
883
884 long long then = timeval_ms();
885 for (;;)
886 {
887 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
888 armv7a->debug_base + CPUDBG_DSCR, &dscr);
889 if (retval != ERROR_OK)
890 return retval;
891 if ((dscr & DSCR_CORE_RESTARTED) != 0)
892 break;
893 if (timeval_ms() > then + 1000)
894 {
895 LOG_ERROR("Timeout waiting for resume");
896 return ERROR_FAIL;
897 }
898 }
899
900 target->debug_reason = DBG_REASON_NOTHALTED;
901 target->state = TARGET_RUNNING;
902
903 /* registers are now invalid */
904 register_cache_invalidate(armv4_5->core_cache);
905
906 if (!debug_execution)
907 {
908 target->state = TARGET_RUNNING;
909 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
910 LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
911 }
912 else
913 {
914 target->state = TARGET_DEBUG_RUNNING;
915 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
916 LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
917 }
918
919 return ERROR_OK;
920 }
921
922 static int cortex_a8_debug_entry(struct target *target)
923 {
924 int i;
925 uint32_t regfile[16], cpsr, dscr;
926 int retval = ERROR_OK;
927 struct working_area *regfile_working_area = NULL;
928 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
929 struct armv7a_common *armv7a = target_to_armv7a(target);
930 struct arm *armv4_5 = &armv7a->armv4_5_common;
931 struct adiv5_dap *swjdp = &armv7a->dap;
932 struct reg *reg;
933
934 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
935
936 /* REVISIT surely we should not re-read DSCR !! */
937 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
938 armv7a->debug_base + CPUDBG_DSCR, &dscr);
939 if (retval != ERROR_OK)
940 return retval;
941
942 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
943 * imprecise data aborts get discarded by issuing a Data
944 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
945 */
946
947 /* Enable the ITR execution once we are in debug mode */
948 dscr |= DSCR_ITR_EN;
949 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
950 armv7a->debug_base + CPUDBG_DSCR, dscr);
951 if (retval != ERROR_OK)
952 return retval;
953
954 /* Examine debug reason */
955 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
956
957 /* save address of instruction that triggered the watchpoint? */
958 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
959 uint32_t wfar;
960
961 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
962 armv7a->debug_base + CPUDBG_WFAR,
963 &wfar);
964 if (retval != ERROR_OK)
965 return retval;
966 arm_dpm_report_wfar(&armv7a->dpm, wfar);
967 }
968
969 /* REVISIT fast_reg_read is never set ... */
970
971 /* Examine target state and mode */
972 if (cortex_a8->fast_reg_read)
973 target_alloc_working_area(target, 64, &regfile_working_area);
974
975 /* First load register acessible through core debug port*/
976 if (!regfile_working_area)
977 {
978 retval = arm_dpm_read_current_registers(&armv7a->dpm);
979 }
980 else
981 {
982 retval = cortex_a8_read_regs_through_mem(target,
983 regfile_working_area->address, regfile);
984
985 target_free_working_area(target, regfile_working_area);
986 if (retval != ERROR_OK)
987 {
988 return retval;
989 }
990
991 /* read Current PSR */
992 retval = cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
993 if (retval != ERROR_OK)
994 return retval;
995
996 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
997
998 arm_set_cpsr(armv4_5, cpsr);
999
1000 /* update cache */
1001 for (i = 0; i <= ARM_PC; i++)
1002 {
1003 reg = arm_reg_current(armv4_5, i);
1004
1005 buf_set_u32(reg->value, 0, 32, regfile[i]);
1006 reg->valid = 1;
1007 reg->dirty = 0;
1008 }
1009
1010 /* Fixup PC Resume Address */
1011 if (cpsr & (1 << 5))
1012 {
1013 // T bit set for Thumb or ThumbEE state
1014 regfile[ARM_PC] -= 4;
1015 }
1016 else
1017 {
1018 // ARM state
1019 regfile[ARM_PC] -= 8;
1020 }
1021
1022 reg = armv4_5->pc;
1023 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1024 reg->dirty = reg->valid;
1025 }
1026
1027 #if 0
1028 /* TODO, Move this */
1029 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1030 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1031 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1032
1033 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1034 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1035
1036 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1037 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1038 #endif
1039
1040 /* Are we in an exception handler */
1041 // armv4_5->exception_number = 0;
1042 if (armv7a->post_debug_entry)
1043 {
1044 retval = armv7a->post_debug_entry(target);
1045 if (retval != ERROR_OK)
1046 return retval;
1047 }
1048
1049 return retval;
1050 }
1051
1052 static int cortex_a8_post_debug_entry(struct target *target)
1053 {
1054 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1055 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1056 int retval;
1057
1058 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1059 retval = armv7a->armv4_5_common.mrc(target, 15,
1060 0, 0, /* op1, op2 */
1061 1, 0, /* CRn, CRm */
1062 &cortex_a8->cp15_control_reg);
1063 if (retval != ERROR_OK)
1064 return retval;
1065 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
1066
1067 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
1068 {
1069 uint32_t cache_type_reg;
1070
1071 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
1072 retval = armv7a->armv4_5_common.mrc(target, 15,
1073 0, 1, /* op1, op2 */
1074 0, 0, /* CRn, CRm */
1075 &cache_type_reg);
1076 if (retval != ERROR_OK)
1077 return retval;
1078 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
1079
1080 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
1081 armv4_5_identify_cache(cache_type_reg,
1082 &armv7a->armv4_5_mmu.armv4_5_cache);
1083 }
1084
1085 armv7a->armv4_5_mmu.mmu_enabled =
1086 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
1087 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
1088 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
1089 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1090 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1091
1092 return ERROR_OK;
1093 }
1094
1095 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1096 int handle_breakpoints)
1097 {
1098 struct armv7a_common *armv7a = target_to_armv7a(target);
1099 struct arm *armv4_5 = &armv7a->armv4_5_common;
1100 struct breakpoint *breakpoint = NULL;
1101 struct breakpoint stepbreakpoint;
1102 struct reg *r;
1103 int retval;
1104
1105 if (target->state != TARGET_HALTED)
1106 {
1107 LOG_WARNING("target not halted");
1108 return ERROR_TARGET_NOT_HALTED;
1109 }
1110
1111 /* current = 1: continue on current pc, otherwise continue at <address> */
1112 r = armv4_5->pc;
1113 if (!current)
1114 {
1115 buf_set_u32(r->value, 0, 32, address);
1116 }
1117 else
1118 {
1119 address = buf_get_u32(r->value, 0, 32);
1120 }
1121
1122 /* The front-end may request us not to handle breakpoints.
1123 * But since Cortex-A8 uses breakpoint for single step,
1124 * we MUST handle breakpoints.
1125 */
1126 handle_breakpoints = 1;
1127 if (handle_breakpoints) {
1128 breakpoint = breakpoint_find(target, address);
1129 if (breakpoint)
1130 cortex_a8_unset_breakpoint(target, breakpoint);
1131 }
1132
1133 /* Setup single step breakpoint */
1134 stepbreakpoint.address = address;
1135 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1136 ? 2 : 4;
1137 stepbreakpoint.type = BKPT_HARD;
1138 stepbreakpoint.set = 0;
1139
1140 /* Break on IVA mismatch */
1141 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1142
1143 target->debug_reason = DBG_REASON_SINGLESTEP;
1144
1145 retval = cortex_a8_resume(target, 1, address, 0, 0);
1146 if (retval != ERROR_OK)
1147 return retval;
1148
1149 long long then = timeval_ms();
1150 while (target->state != TARGET_HALTED)
1151 {
1152 retval = cortex_a8_poll(target);
1153 if (retval != ERROR_OK)
1154 return retval;
1155 if (timeval_ms() > then + 1000)
1156 {
1157 LOG_ERROR("timeout waiting for target halt");
1158 return ERROR_FAIL;
1159 }
1160 }
1161
1162 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1163
1164 target->debug_reason = DBG_REASON_BREAKPOINT;
1165
1166 if (breakpoint)
1167 cortex_a8_set_breakpoint(target, breakpoint, 0);
1168
1169 if (target->state != TARGET_HALTED)
1170 LOG_DEBUG("target stepped");
1171
1172 return ERROR_OK;
1173 }
1174
1175 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1176 {
1177 struct armv7a_common *armv7a = target_to_armv7a(target);
1178
1179 LOG_DEBUG(" ");
1180
1181 if (armv7a->pre_restore_context)
1182 armv7a->pre_restore_context(target);
1183
1184 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1185 }
1186
1187
1188 /*
1189 * Cortex-A8 Breakpoint and watchpoint functions
1190 */
1191
1192 /* Setup hardware Breakpoint Register Pair */
1193 static int cortex_a8_set_breakpoint(struct target *target,
1194 struct breakpoint *breakpoint, uint8_t matchmode)
1195 {
1196 int retval;
1197 int brp_i=0;
1198 uint32_t control;
1199 uint8_t byte_addr_select = 0x0F;
1200 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1201 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1202 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1203
1204 if (breakpoint->set)
1205 {
1206 LOG_WARNING("breakpoint already set");
1207 return ERROR_OK;
1208 }
1209
1210 if (breakpoint->type == BKPT_HARD)
1211 {
1212 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1213 brp_i++ ;
1214 if (brp_i >= cortex_a8->brp_num)
1215 {
1216 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1217 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1218 }
1219 breakpoint->set = brp_i + 1;
1220 if (breakpoint->length == 2)
1221 {
1222 byte_addr_select = (3 << (breakpoint->address & 0x02));
1223 }
1224 control = ((matchmode & 0x7) << 20)
1225 | (byte_addr_select << 5)
1226 | (3 << 1) | 1;
1227 brp_list[brp_i].used = 1;
1228 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1229 brp_list[brp_i].control = control;
1230 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1231 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1232 brp_list[brp_i].value);
1233 if (retval != ERROR_OK)
1234 return retval;
1235 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1236 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1237 brp_list[brp_i].control);
1238 if (retval != ERROR_OK)
1239 return retval;
1240 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1241 brp_list[brp_i].control,
1242 brp_list[brp_i].value);
1243 }
1244 else if (breakpoint->type == BKPT_SOFT)
1245 {
1246 uint8_t code[4];
1247 if (breakpoint->length == 2)
1248 {
1249 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1250 }
1251 else
1252 {
1253 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1254 }
1255 retval = target->type->read_memory(target,
1256 breakpoint->address & 0xFFFFFFFE,
1257 breakpoint->length, 1,
1258 breakpoint->orig_instr);
1259 if (retval != ERROR_OK)
1260 return retval;
1261 retval = target->type->write_memory(target,
1262 breakpoint->address & 0xFFFFFFFE,
1263 breakpoint->length, 1, code);
1264 if (retval != ERROR_OK)
1265 return retval;
1266 breakpoint->set = 0x11; /* Any nice value but 0 */
1267 }
1268
1269 return ERROR_OK;
1270 }
1271
1272 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1273 {
1274 int retval;
1275 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1276 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1277 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1278
1279 if (!breakpoint->set)
1280 {
1281 LOG_WARNING("breakpoint not set");
1282 return ERROR_OK;
1283 }
1284
1285 if (breakpoint->type == BKPT_HARD)
1286 {
1287 int brp_i = breakpoint->set - 1;
1288 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1289 {
1290 LOG_DEBUG("Invalid BRP number in breakpoint");
1291 return ERROR_OK;
1292 }
1293 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1294 brp_list[brp_i].control, brp_list[brp_i].value);
1295 brp_list[brp_i].used = 0;
1296 brp_list[brp_i].value = 0;
1297 brp_list[brp_i].control = 0;
1298 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1299 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1300 brp_list[brp_i].control);
1301 if (retval != ERROR_OK)
1302 return retval;
1303 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1304 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1305 brp_list[brp_i].value);
1306 if (retval != ERROR_OK)
1307 return retval;
1308 }
1309 else
1310 {
1311 /* restore original instruction (kept in target endianness) */
1312 if (breakpoint->length == 4)
1313 {
1314 retval = target->type->write_memory(target,
1315 breakpoint->address & 0xFFFFFFFE,
1316 4, 1, breakpoint->orig_instr);
1317 if (retval != ERROR_OK)
1318 return retval;
1319 }
1320 else
1321 {
1322 retval = target->type->write_memory(target,
1323 breakpoint->address & 0xFFFFFFFE,
1324 2, 1, breakpoint->orig_instr);
1325 if (retval != ERROR_OK)
1326 return retval;
1327 }
1328 }
1329 breakpoint->set = 0;
1330
1331 return ERROR_OK;
1332 }
1333
1334 static int cortex_a8_add_breakpoint(struct target *target,
1335 struct breakpoint *breakpoint)
1336 {
1337 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1338
1339 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1340 {
1341 LOG_INFO("no hardware breakpoint available");
1342 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1343 }
1344
1345 if (breakpoint->type == BKPT_HARD)
1346 cortex_a8->brp_num_available--;
1347
1348 return cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1349 }
1350
1351 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1352 {
1353 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1354
1355 #if 0
1356 /* It is perfectly possible to remove breakpoints while the target is running */
1357 if (target->state != TARGET_HALTED)
1358 {
1359 LOG_WARNING("target not halted");
1360 return ERROR_TARGET_NOT_HALTED;
1361 }
1362 #endif
1363
1364 if (breakpoint->set)
1365 {
1366 cortex_a8_unset_breakpoint(target, breakpoint);
1367 if (breakpoint->type == BKPT_HARD)
1368 cortex_a8->brp_num_available++ ;
1369 }
1370
1371
1372 return ERROR_OK;
1373 }
1374
1375
1376
1377 /*
1378 * Cortex-A8 Reset functions
1379 */
1380
1381 static int cortex_a8_assert_reset(struct target *target)
1382 {
1383 struct armv7a_common *armv7a = target_to_armv7a(target);
1384
1385 LOG_DEBUG(" ");
1386
1387 /* FIXME when halt is requested, make it work somehow... */
1388
1389 /* Issue some kind of warm reset. */
1390 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1391 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1392 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1393 /* REVISIT handle "pulls" cases, if there's
1394 * hardware that needs them to work.
1395 */
1396 jtag_add_reset(0, 1);
1397 } else {
1398 LOG_ERROR("%s: how to reset?", target_name(target));
1399 return ERROR_FAIL;
1400 }
1401
1402 /* registers are now invalid */
1403 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1404
1405 target->state = TARGET_RESET;
1406
1407 return ERROR_OK;
1408 }
1409
1410 static int cortex_a8_deassert_reset(struct target *target)
1411 {
1412 int retval;
1413
1414 LOG_DEBUG(" ");
1415
1416 /* be certain SRST is off */
1417 jtag_add_reset(0, 0);
1418
1419 retval = cortex_a8_poll(target);
1420 if (retval != ERROR_OK)
1421 return retval;
1422
1423 if (target->reset_halt) {
1424 if (target->state != TARGET_HALTED) {
1425 LOG_WARNING("%s: ran after reset and before halt ...",
1426 target_name(target));
1427 if ((retval = target_halt(target)) != ERROR_OK)
1428 return retval;
1429 }
1430 }
1431
1432 return ERROR_OK;
1433 }
1434
1435 /*
1436 * Cortex-A8 Memory access
1437 *
1438 * This is same Cortex M3 but we must also use the correct
1439 * ap number for every access.
1440 */
1441
1442 static int cortex_a8_read_phys_memory(struct target *target,
1443 uint32_t address, uint32_t size,
1444 uint32_t count, uint8_t *buffer)
1445 {
1446 struct armv7a_common *armv7a = target_to_armv7a(target);
1447 struct adiv5_dap *swjdp = &armv7a->dap;
1448 int retval = ERROR_INVALID_ARGUMENTS;
1449
1450 /* cortex_a8 handles unaligned memory access */
1451
1452 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1453 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d", address, size, count);
1454 if (count && buffer) {
1455 switch (size) {
1456 case 4:
1457 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
1458 buffer, 4 * count, address);
1459 break;
1460 case 2:
1461 retval = mem_ap_sel_read_buf_u16(swjdp, swjdp_memoryap,
1462 buffer, 2 * count, address);
1463 break;
1464 case 1:
1465 retval = mem_ap_sel_read_buf_u8(swjdp, swjdp_memoryap,
1466 buffer, count, address);
1467 break;
1468 }
1469 }
1470
1471 return retval;
1472 }
1473
1474 static int cortex_a8_read_memory(struct target *target, uint32_t address,
1475 uint32_t size, uint32_t count, uint8_t *buffer)
1476 {
1477 int enabled = 0;
1478 uint32_t virt, phys;
1479 int retval;
1480
1481 /* cortex_a8 handles unaligned memory access */
1482
1483 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1484 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address, size, count);
1485 retval = cortex_a8_mmu(target, &enabled);
1486 if (retval != ERROR_OK)
1487 return retval;
1488
1489 if(enabled)
1490 {
1491 virt = address;
1492 retval = cortex_a8_virt2phys(target, virt, &phys);
1493 if (retval != ERROR_OK)
1494 return retval;
1495
1496 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1497 address = phys;
1498 }
1499
1500 return cortex_a8_read_phys_memory(target, address, size, count, buffer);
1501 }
1502
1503 static int cortex_a8_write_phys_memory(struct target *target,
1504 uint32_t address, uint32_t size,
1505 uint32_t count, uint8_t *buffer)
1506 {
1507 struct armv7a_common *armv7a = target_to_armv7a(target);
1508 struct adiv5_dap *swjdp = &armv7a->dap;
1509 int retval = ERROR_INVALID_ARGUMENTS;
1510
1511 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1512
1513 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address, size, count);
1514 if (count && buffer) {
1515 switch (size) {
1516 case 4:
1517 retval = mem_ap_sel_write_buf_u32(swjdp, swjdp_memoryap,
1518 buffer, 4 * count, address);
1519 break;
1520 case 2:
1521 retval = mem_ap_sel_write_buf_u16(swjdp, swjdp_memoryap,
1522 buffer, 2 * count, address);
1523 break;
1524 case 1:
1525 retval = mem_ap_sel_write_buf_u8(swjdp, swjdp_memoryap,
1526 buffer, count, address);
1527 break;
1528 }
1529 }
1530
1531 /* REVISIT this op is generic ARMv7-A/R stuff */
1532 if (retval == ERROR_OK && target->state == TARGET_HALTED)
1533 {
1534 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
1535
1536 retval = dpm->prepare(dpm);
1537 if (retval != ERROR_OK)
1538 return retval;
1539
1540 /* The Cache handling will NOT work with MMU active, the
1541 * wrong addresses will be invalidated!
1542 *
1543 * For both ICache and DCache, walk all cache lines in the
1544 * address range. Cortex-A8 has fixed 64 byte line length.
1545 *
1546 * REVISIT per ARMv7, these may trigger watchpoints ...
1547 */
1548
1549 /* invalidate I-Cache */
1550 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
1551 {
1552 /* ICIMVAU - Invalidate Cache single entry
1553 * with MVA to PoU
1554 * MCR p15, 0, r0, c7, c5, 1
1555 */
1556 for (uint32_t cacheline = address;
1557 cacheline < address + size * count;
1558 cacheline += 64) {
1559 retval = dpm->instr_write_data_r0(dpm,
1560 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1561 cacheline);
1562 if (retval != ERROR_OK)
1563 return retval;
1564 }
1565 }
1566
1567 /* invalidate D-Cache */
1568 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
1569 {
1570 /* DCIMVAC - Invalidate data Cache line
1571 * with MVA to PoC
1572 * MCR p15, 0, r0, c7, c6, 1
1573 */
1574 for (uint32_t cacheline = address;
1575 cacheline < address + size * count;
1576 cacheline += 64) {
1577 retval = dpm->instr_write_data_r0(dpm,
1578 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1579 cacheline);
1580 if (retval != ERROR_OK)
1581 return retval;
1582 }
1583 }
1584
1585 /* (void) */ dpm->finish(dpm);
1586 }
1587
1588 return retval;
1589 }
1590
1591 static int cortex_a8_write_memory(struct target *target, uint32_t address,
1592 uint32_t size, uint32_t count, uint8_t *buffer)
1593 {
1594 int enabled = 0;
1595 uint32_t virt, phys;
1596 int retval;
1597
1598 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1599
1600 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
1601 retval = cortex_a8_mmu(target, &enabled);
1602 if (retval != ERROR_OK)
1603 return retval;
1604
1605 if(enabled)
1606 {
1607 virt = address;
1608 retval = cortex_a8_virt2phys(target, virt, &phys);
1609 if (retval != ERROR_OK)
1610 return retval;
1611 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1612 address = phys;
1613 }
1614
1615 return cortex_a8_write_phys_memory(target, address, size,
1616 count, buffer);
1617 }
1618
1619 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
1620 uint32_t count, uint8_t *buffer)
1621 {
1622 return cortex_a8_write_memory(target, address, 4, count, buffer);
1623 }
1624
1625 static int cortex_a8_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1626 {
1627 #if 0
1628 u16 dcrdr;
1629
1630 mem_ap_read_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1631 *ctrl = (uint8_t)dcrdr;
1632 *value = (uint8_t)(dcrdr >> 8);
1633
1634 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1635
1636 /* write ack back to software dcc register
1637 * signify we have read data */
1638 if (dcrdr & (1 << 0))
1639 {
1640 dcrdr = 0;
1641 mem_ap_write_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1642 }
1643 #endif
1644 return ERROR_OK;
1645 }
1646
1647
1648 static int cortex_a8_handle_target_request(void *priv)
1649 {
1650 struct target *target = priv;
1651 struct armv7a_common *armv7a = target_to_armv7a(target);
1652 struct adiv5_dap *swjdp = &armv7a->dap;
1653 int retval;
1654
1655 if (!target_was_examined(target))
1656 return ERROR_OK;
1657 if (!target->dbg_msg_enabled)
1658 return ERROR_OK;
1659
1660 if (target->state == TARGET_RUNNING)
1661 {
1662 uint8_t data = 0;
1663 uint8_t ctrl = 0;
1664
1665 retval = cortex_a8_dcc_read(swjdp, &data, &ctrl);
1666 if (retval != ERROR_OK)
1667 return retval;
1668
1669 /* check if we have data */
1670 if (ctrl & (1 << 0))
1671 {
1672 uint32_t request;
1673
1674 /* we assume target is quick enough */
1675 request = data;
1676 retval = cortex_a8_dcc_read(swjdp, &data, &ctrl);
1677 if (retval != ERROR_OK)
1678 return retval;
1679 request |= (data << 8);
1680 retval = cortex_a8_dcc_read(swjdp, &data, &ctrl);
1681 if (retval != ERROR_OK)
1682 return retval;
1683 request |= (data << 16);
1684 retval = cortex_a8_dcc_read(swjdp, &data, &ctrl);
1685 if (retval != ERROR_OK)
1686 return retval;
1687 request |= (data << 24);
1688 target_request(target, request);
1689 }
1690 }
1691
1692 return ERROR_OK;
1693 }
1694
1695 /*
1696 * Cortex-A8 target information and configuration
1697 */
1698
1699 static int cortex_a8_examine_first(struct target *target)
1700 {
1701 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1702 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1703 struct adiv5_dap *swjdp = &armv7a->dap;
1704 int i;
1705 int retval = ERROR_OK;
1706 uint32_t didr, ctypr, ttypr, cpuid;
1707 uint32_t dbgbase, apid;
1708
1709 /* We do one extra read to ensure DAP is configured,
1710 * we call ahbap_debugport_init(swjdp) instead
1711 */
1712 retval = ahbap_debugport_init(swjdp);
1713 if (retval != ERROR_OK)
1714 return retval;
1715
1716 /* Get ROM Table base */
1717 retval = dap_get_debugbase(swjdp, 1, &dbgbase, &apid);
1718 if (retval != ERROR_OK)
1719 return retval;
1720
1721 /* Lookup 0x15 -- Processor DAP */
1722 retval = dap_lookup_cs_component(swjdp, 1, dbgbase, 0x15,
1723 &armv7a->debug_base);
1724 if (retval != ERROR_OK)
1725 return retval;
1726
1727 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1728 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
1729 if (retval != ERROR_OK)
1730 return retval;
1731
1732 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1733 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
1734 {
1735 LOG_DEBUG("Examine %s failed", "CPUID");
1736 return retval;
1737 }
1738
1739 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1740 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
1741 {
1742 LOG_DEBUG("Examine %s failed", "CTYPR");
1743 return retval;
1744 }
1745
1746 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1747 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
1748 {
1749 LOG_DEBUG("Examine %s failed", "TTYPR");
1750 return retval;
1751 }
1752
1753 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1754 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
1755 {
1756 LOG_DEBUG("Examine %s failed", "DIDR");
1757 return retval;
1758 }
1759
1760 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1761 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
1762 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
1763 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
1764
1765 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
1766 retval = cortex_a8_dpm_setup(cortex_a8, didr);
1767 if (retval != ERROR_OK)
1768 return retval;
1769
1770 /* Setup Breakpoint Register Pairs */
1771 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
1772 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
1773 cortex_a8->brp_num_available = cortex_a8->brp_num;
1774 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
1775 // cortex_a8->brb_enabled = ????;
1776 for (i = 0; i < cortex_a8->brp_num; i++)
1777 {
1778 cortex_a8->brp_list[i].used = 0;
1779 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
1780 cortex_a8->brp_list[i].type = BRP_NORMAL;
1781 else
1782 cortex_a8->brp_list[i].type = BRP_CONTEXT;
1783 cortex_a8->brp_list[i].value = 0;
1784 cortex_a8->brp_list[i].control = 0;
1785 cortex_a8->brp_list[i].BRPn = i;
1786 }
1787
1788 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
1789
1790 target_set_examined(target);
1791 return ERROR_OK;
1792 }
1793
1794 static int cortex_a8_examine(struct target *target)
1795 {
1796 int retval = ERROR_OK;
1797
1798 /* don't re-probe hardware after each reset */
1799 if (!target_was_examined(target))
1800 retval = cortex_a8_examine_first(target);
1801
1802 /* Configure core debug access */
1803 if (retval == ERROR_OK)
1804 retval = cortex_a8_init_debug_access(target);
1805
1806 return retval;
1807 }
1808
1809 /*
1810 * Cortex-A8 target creation and initialization
1811 */
1812
1813 static int cortex_a8_init_target(struct command_context *cmd_ctx,
1814 struct target *target)
1815 {
1816 /* examine_first() does a bunch of this */
1817 return ERROR_OK;
1818 }
1819
1820 static int cortex_a8_init_arch_info(struct target *target,
1821 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
1822 {
1823 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1824 struct arm *armv4_5 = &armv7a->armv4_5_common;
1825 struct adiv5_dap *dap = &armv7a->dap;
1826
1827 armv7a->armv4_5_common.dap = dap;
1828
1829 /* Setup struct cortex_a8_common */
1830 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
1831 armv4_5->arch_info = armv7a;
1832
1833 /* prepare JTAG information for the new target */
1834 cortex_a8->jtag_info.tap = tap;
1835 cortex_a8->jtag_info.scann_size = 4;
1836
1837 /* Leave (only) generic DAP stuff for debugport_init() */
1838 dap->jtag_info = &cortex_a8->jtag_info;
1839 dap->memaccess_tck = 80;
1840
1841 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
1842 dap->tar_autoincr_block = (1 << 10);
1843
1844 cortex_a8->fast_reg_read = 0;
1845
1846 /* Set default value */
1847 cortex_a8->current_address_mode = ARM_MODE_ANY;
1848
1849 /* register arch-specific functions */
1850 armv7a->examine_debug_reason = NULL;
1851
1852 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
1853
1854 armv7a->pre_restore_context = NULL;
1855 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
1856 armv7a->armv4_5_mmu.get_ttb = cortex_a8_get_ttb;
1857 armv7a->armv4_5_mmu.read_memory = cortex_a8_read_phys_memory;
1858 armv7a->armv4_5_mmu.write_memory = cortex_a8_write_phys_memory;
1859 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a8_disable_mmu_caches;
1860 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a8_enable_mmu_caches;
1861 armv7a->armv4_5_mmu.has_tiny_pages = 1;
1862 armv7a->armv4_5_mmu.mmu_enabled = 0;
1863
1864
1865 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
1866
1867 /* REVISIT v7a setup should be in a v7a-specific routine */
1868 arm_init_arch_info(target, armv4_5);
1869 armv7a->common_magic = ARMV7_COMMON_MAGIC;
1870
1871 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
1872
1873 return ERROR_OK;
1874 }
1875
1876 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
1877 {
1878 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
1879
1880 return cortex_a8_init_arch_info(target, cortex_a8, target->tap);
1881 }
1882
1883 static int cortex_a8_get_ttb(struct target *target, uint32_t *result)
1884 {
1885 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1886 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1887 uint32_t ttb = 0, retval = ERROR_OK;
1888
1889 /* current_address_mode is set inside cortex_a8_virt2phys()
1890 where we can determine if address belongs to user or kernel */
1891 if(cortex_a8->current_address_mode == ARM_MODE_SVC)
1892 {
1893 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1894 retval = armv7a->armv4_5_common.mrc(target, 15,
1895 0, 1, /* op1, op2 */
1896 2, 0, /* CRn, CRm */
1897 &ttb);
1898 if (retval != ERROR_OK)
1899 return retval;
1900 }
1901 else if(cortex_a8->current_address_mode == ARM_MODE_USR)
1902 {
1903 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1904 retval = armv7a->armv4_5_common.mrc(target, 15,
1905 0, 0, /* op1, op2 */
1906 2, 0, /* CRn, CRm */
1907 &ttb);
1908 if (retval != ERROR_OK)
1909 return retval;
1910 }
1911 /* we don't know whose address is: user or kernel
1912 we assume that if we are in kernel mode then
1913 address belongs to kernel else if in user mode
1914 - to user */
1915 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
1916 {
1917 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1918 retval = armv7a->armv4_5_common.mrc(target, 15,
1919 0, 1, /* op1, op2 */
1920 2, 0, /* CRn, CRm */
1921 &ttb);
1922 if (retval != ERROR_OK)
1923 return retval;
1924 }
1925 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
1926 {
1927 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1928 retval = armv7a->armv4_5_common.mrc(target, 15,
1929 0, 0, /* op1, op2 */
1930 2, 0, /* CRn, CRm */
1931 &ttb);
1932 if (retval != ERROR_OK)
1933 return retval;
1934 }
1935 /* finally we don't know whose ttb to use: user or kernel */
1936 else
1937 LOG_ERROR("Don't know how to get ttb for current mode!!!");
1938
1939 ttb &= 0xffffc000;
1940
1941 *result = ttb;
1942
1943 return ERROR_OK;
1944 }
1945
1946 static int cortex_a8_disable_mmu_caches(struct target *target, int mmu,
1947 int d_u_cache, int i_cache)
1948 {
1949 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1950 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1951 uint32_t cp15_control;
1952 int retval;
1953
1954 /* read cp15 control register */
1955 retval = armv7a->armv4_5_common.mrc(target, 15,
1956 0, 0, /* op1, op2 */
1957 1, 0, /* CRn, CRm */
1958 &cp15_control);
1959 if (retval != ERROR_OK)
1960 return retval;
1961
1962
1963 if (mmu)
1964 cp15_control &= ~0x1U;
1965
1966 if (d_u_cache)
1967 cp15_control &= ~0x4U;
1968
1969 if (i_cache)
1970 cp15_control &= ~0x1000U;
1971
1972 retval = armv7a->armv4_5_common.mcr(target, 15,
1973 0, 0, /* op1, op2 */
1974 1, 0, /* CRn, CRm */
1975 cp15_control);
1976 return retval;
1977 }
1978
1979 static int cortex_a8_enable_mmu_caches(struct target *target, int mmu,
1980 int d_u_cache, int i_cache)
1981 {
1982 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1983 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1984 uint32_t cp15_control;
1985 int retval;
1986
1987 /* read cp15 control register */
1988 retval = armv7a->armv4_5_common.mrc(target, 15,
1989 0, 0, /* op1, op2 */
1990 1, 0, /* CRn, CRm */
1991 &cp15_control);
1992 if (retval != ERROR_OK)
1993 return retval;
1994
1995 if (mmu)
1996 cp15_control |= 0x1U;
1997
1998 if (d_u_cache)
1999 cp15_control |= 0x4U;
2000
2001 if (i_cache)
2002 cp15_control |= 0x1000U;
2003
2004 retval = armv7a->armv4_5_common.mcr(target, 15,
2005 0, 0, /* op1, op2 */
2006 1, 0, /* CRn, CRm */
2007 cp15_control);
2008 return retval;
2009 }
2010
2011
2012 static int cortex_a8_mmu(struct target *target, int *enabled)
2013 {
2014 if (target->state != TARGET_HALTED) {
2015 LOG_ERROR("%s: target not halted", __func__);
2016 return ERROR_TARGET_INVALID;
2017 }
2018
2019 *enabled = target_to_cortex_a8(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
2020 return ERROR_OK;
2021 }
2022
2023 static int cortex_a8_virt2phys(struct target *target,
2024 uint32_t virt, uint32_t *phys)
2025 {
2026 uint32_t cb;
2027 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2028 // struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2029 struct armv7a_common *armv7a = target_to_armv7a(target);
2030
2031 /* We assume that virtual address is separated
2032 between user and kernel in Linux style:
2033 0x00000000-0xbfffffff - User space
2034 0xc0000000-0xffffffff - Kernel space */
2035 if( virt < 0xc0000000 ) /* Linux user space */
2036 cortex_a8->current_address_mode = ARM_MODE_USR;
2037 else /* Linux kernel */
2038 cortex_a8->current_address_mode = ARM_MODE_SVC;
2039 uint32_t ret;
2040 int retval = armv4_5_mmu_translate_va(target,
2041 &armv7a->armv4_5_mmu, virt, &cb, &ret);
2042 if (retval != ERROR_OK)
2043 return retval;
2044 /* Reset the flag. We don't want someone else to use it by error */
2045 cortex_a8->current_address_mode = ARM_MODE_ANY;
2046
2047 *phys = ret;
2048 return ERROR_OK;
2049 }
2050
2051 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
2052 {
2053 struct target *target = get_current_target(CMD_CTX);
2054 struct armv7a_common *armv7a = target_to_armv7a(target);
2055
2056 return armv4_5_handle_cache_info_command(CMD_CTX,
2057 &armv7a->armv4_5_mmu.armv4_5_cache);
2058 }
2059
2060
2061 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
2062 {
2063 struct target *target = get_current_target(CMD_CTX);
2064 if (!target_was_examined(target))
2065 {
2066 LOG_ERROR("target not examined yet");
2067 return ERROR_FAIL;
2068 }
2069
2070 return cortex_a8_init_debug_access(target);
2071 }
2072
2073 static const struct command_registration cortex_a8_exec_command_handlers[] = {
2074 {
2075 .name = "cache_info",
2076 .handler = cortex_a8_handle_cache_info_command,
2077 .mode = COMMAND_EXEC,
2078 .help = "display information about target caches",
2079 },
2080 {
2081 .name = "dbginit",
2082 .handler = cortex_a8_handle_dbginit_command,
2083 .mode = COMMAND_EXEC,
2084 .help = "Initialize core debug",
2085 },
2086 COMMAND_REGISTRATION_DONE
2087 };
2088 static const struct command_registration cortex_a8_command_handlers[] = {
2089 {
2090 .chain = arm_command_handlers,
2091 },
2092 {
2093 .chain = armv7a_command_handlers,
2094 },
2095 {
2096 .name = "cortex_a8",
2097 .mode = COMMAND_ANY,
2098 .help = "Cortex-A8 command group",
2099 .chain = cortex_a8_exec_command_handlers,
2100 },
2101 COMMAND_REGISTRATION_DONE
2102 };
2103
2104 struct target_type cortexa8_target = {
2105 .name = "cortex_a8",
2106
2107 .poll = cortex_a8_poll,
2108 .arch_state = armv7a_arch_state,
2109
2110 .target_request_data = NULL,
2111
2112 .halt = cortex_a8_halt,
2113 .resume = cortex_a8_resume,
2114 .step = cortex_a8_step,
2115
2116 .assert_reset = cortex_a8_assert_reset,
2117 .deassert_reset = cortex_a8_deassert_reset,
2118 .soft_reset_halt = NULL,
2119
2120 /* REVISIT allow exporting VFP3 registers ... */
2121 .get_gdb_reg_list = arm_get_gdb_reg_list,
2122
2123 .read_memory = cortex_a8_read_memory,
2124 .write_memory = cortex_a8_write_memory,
2125 .bulk_write_memory = cortex_a8_bulk_write_memory,
2126
2127 .checksum_memory = arm_checksum_memory,
2128 .blank_check_memory = arm_blank_check_memory,
2129
2130 .run_algorithm = armv4_5_run_algorithm,
2131
2132 .add_breakpoint = cortex_a8_add_breakpoint,
2133 .remove_breakpoint = cortex_a8_remove_breakpoint,
2134 .add_watchpoint = NULL,
2135 .remove_watchpoint = NULL,
2136
2137 .commands = cortex_a8_command_handlers,
2138 .target_create = cortex_a8_target_create,
2139 .init_target = cortex_a8_init_target,
2140 .examine = cortex_a8_examine,
2141
2142 .read_phys_memory = cortex_a8_read_phys_memory,
2143 .write_phys_memory = cortex_a8_write_phys_memory,
2144 .mmu = cortex_a8_mmu,
2145 .virt2phys = cortex_a8_virt2phys,
2146
2147 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)