cortex_a :apb mem read/write working with mmu_on
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * This program is free software; you can redistribute it and/or modify *
18 * it under the terms of the GNU General Public License as published by *
19 * the Free Software Foundation; either version 2 of the License, or *
20 * (at your option) any later version. *
21 * *
22 * This program is distributed in the hope that it will be useful, *
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
25 * GNU General Public License for more details. *
26 * *
27 * You should have received a copy of the GNU General Public License *
28 * along with this program; if not, write to the *
29 * Free Software Foundation, Inc., *
30 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
31 * *
32 * Cortex-A8(tm) TRM, ARM DDI 0344H *
33 * Cortex-A9(tm) TRM, ARM DDI 0407F *
34 * *
35 ***************************************************************************/
36 #ifdef HAVE_CONFIG_H
37 #include "config.h"
38 #endif
39
40 #include "breakpoints.h"
41 #include "cortex_a.h"
42 #include "register.h"
43 #include "target_request.h"
44 #include "target_type.h"
45 #include "arm_opcodes.h"
46 #include <helper/time_support.h>
47
48 static int cortex_a8_poll(struct target *target);
49 static int cortex_a8_debug_entry(struct target *target);
50 static int cortex_a8_restore_context(struct target *target, bool bpwp);
51 static int cortex_a8_set_breakpoint(struct target *target,
52 struct breakpoint *breakpoint, uint8_t matchmode);
53 static int cortex_a8_unset_breakpoint(struct target *target,
54 struct breakpoint *breakpoint);
55 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
56 uint32_t *value, int regnum);
57 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
58 uint32_t value, int regnum);
59 static int cortex_a8_mmu(struct target *target, int *enabled);
60 static int cortex_a8_virt2phys(struct target *target,
61 uint32_t virt, uint32_t *phys);
62 static int cortex_a8_disable_mmu_caches(struct target *target, int mmu,
63 int d_u_cache, int i_cache);
64 static int cortex_a8_enable_mmu_caches(struct target *target, int mmu,
65 int d_u_cache, int i_cache);
66 static int cortex_a8_get_ttb(struct target *target, uint32_t *result);
67
68
69 /*
70 * FIXME do topology discovery using the ROM; don't
71 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
72 * cores, with different AP numbering ... don't use a #define
73 * for these numbers, use per-core armv7a state.
74 */
75 #define swjdp_memoryap 0
76 #define swjdp_debugap 1
77
78 /*
79 * Cortex-A8 Basic debug access, very low level assumes state is saved
80 */
81 static int cortex_a8_init_debug_access(struct target *target)
82 {
83 struct armv7a_common *armv7a = target_to_armv7a(target);
84 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
85 int retval;
86 uint32_t dummy;
87
88 LOG_DEBUG(" ");
89
90 /* Unlocking the debug registers for modification */
91 /* The debugport might be uninitialised so try twice */
92 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
93 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
94 if (retval != ERROR_OK)
95 {
96 /* try again */
97 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
98 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
99 if (retval == ERROR_OK)
100 {
101 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
102 }
103 }
104 if (retval != ERROR_OK)
105 return retval;
106 /* Clear Sticky Power Down status Bit in PRSR to enable access to
107 the registers in the Core Power Domain */
108 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
109 armv7a->debug_base + CPUDBG_PRSR, &dummy);
110 if (retval != ERROR_OK)
111 return retval;
112
113 /* Enabling of instruction execution in debug mode is done in debug_entry code */
114
115 /* Resync breakpoint registers */
116
117 /* Since this is likely called from init or reset, update target state information*/
118 return cortex_a8_poll(target);
119 }
120
121 /* To reduce needless round-trips, pass in a pointer to the current
122 * DSCR value. Initialize it to zero if you just need to know the
123 * value on return from this function; or DSCR_INSTR_COMP if you
124 * happen to know that no instruction is pending.
125 */
126 static int cortex_a8_exec_opcode(struct target *target,
127 uint32_t opcode, uint32_t *dscr_p)
128 {
129 uint32_t dscr;
130 int retval;
131 struct armv7a_common *armv7a = target_to_armv7a(target);
132 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
133
134 dscr = dscr_p ? *dscr_p : 0;
135
136 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
137
138 /* Wait for InstrCompl bit to be set */
139 long long then = timeval_ms();
140 while ((dscr & DSCR_INSTR_COMP) == 0)
141 {
142 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
143 armv7a->debug_base + CPUDBG_DSCR, &dscr);
144 if (retval != ERROR_OK)
145 {
146 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
147 return retval;
148 }
149 if (timeval_ms() > then + 1000)
150 {
151 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
152 return ERROR_FAIL;
153 }
154 }
155
156 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
157 armv7a->debug_base + CPUDBG_ITR, opcode);
158 if (retval != ERROR_OK)
159 return retval;
160
161 then = timeval_ms();
162 do
163 {
164 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
165 armv7a->debug_base + CPUDBG_DSCR, &dscr);
166 if (retval != ERROR_OK)
167 {
168 LOG_ERROR("Could not read DSCR register");
169 return retval;
170 }
171 if (timeval_ms() > then + 1000)
172 {
173 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
174 return ERROR_FAIL;
175 }
176 }
177 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
178
179 if (dscr_p)
180 *dscr_p = dscr;
181
182 return retval;
183 }
184
185 /**************************************************************************
186 Read core register with very few exec_opcode, fast but needs work_area.
187 This can cause problems with MMU active.
188 **************************************************************************/
189 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
190 uint32_t * regfile)
191 {
192 int retval = ERROR_OK;
193 struct armv7a_common *armv7a = target_to_armv7a(target);
194 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
195
196 retval = cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
197 if (retval != ERROR_OK)
198 return retval;
199 retval = cortex_a8_dap_write_coreregister_u32(target, address, 0);
200 if (retval != ERROR_OK)
201 return retval;
202 retval = cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
203 if (retval != ERROR_OK)
204 return retval;
205
206 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
207 (uint8_t *)(&regfile[1]), 4*15, address);
208
209 return retval;
210 }
211
212 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
213 uint32_t *value, int regnum)
214 {
215 int retval = ERROR_OK;
216 uint8_t reg = regnum&0xFF;
217 uint32_t dscr = 0;
218 struct armv7a_common *armv7a = target_to_armv7a(target);
219 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
220
221 if (reg > 17)
222 return retval;
223
224 if (reg < 15)
225 {
226 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
227 retval = cortex_a8_exec_opcode(target,
228 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
229 &dscr);
230 if (retval != ERROR_OK)
231 return retval;
232 }
233 else if (reg == 15)
234 {
235 /* "MOV r0, r15"; then move r0 to DCCTX */
236 retval = cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
237 if (retval != ERROR_OK)
238 return retval;
239 retval = cortex_a8_exec_opcode(target,
240 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
241 &dscr);
242 if (retval != ERROR_OK)
243 return retval;
244 }
245 else
246 {
247 /* "MRS r0, CPSR" or "MRS r0, SPSR"
248 * then move r0 to DCCTX
249 */
250 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
251 if (retval != ERROR_OK)
252 return retval;
253 retval = cortex_a8_exec_opcode(target,
254 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
255 &dscr);
256 if (retval != ERROR_OK)
257 return retval;
258 }
259
260 /* Wait for DTRRXfull then read DTRRTX */
261 long long then = timeval_ms();
262 while ((dscr & DSCR_DTR_TX_FULL) == 0)
263 {
264 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
265 armv7a->debug_base + CPUDBG_DSCR, &dscr);
266 if (retval != ERROR_OK)
267 return retval;
268 if (timeval_ms() > then + 1000)
269 {
270 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
271 return ERROR_FAIL;
272 }
273 }
274
275 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
276 armv7a->debug_base + CPUDBG_DTRTX, value);
277 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
278
279 return retval;
280 }
281
282 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
283 uint32_t value, int regnum)
284 {
285 int retval = ERROR_OK;
286 uint8_t Rd = regnum&0xFF;
287 uint32_t dscr;
288 struct armv7a_common *armv7a = target_to_armv7a(target);
289 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
290
291 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
292
293 /* Check that DCCRX is not full */
294 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
295 armv7a->debug_base + CPUDBG_DSCR, &dscr);
296 if (retval != ERROR_OK)
297 return retval;
298 if (dscr & DSCR_DTR_RX_FULL)
299 {
300 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
301 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
302 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
303 &dscr);
304 if (retval != ERROR_OK)
305 return retval;
306 }
307
308 if (Rd > 17)
309 return retval;
310
311 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
312 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
313 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
314 armv7a->debug_base + CPUDBG_DTRRX, value);
315 if (retval != ERROR_OK)
316 return retval;
317
318 if (Rd < 15)
319 {
320 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
321 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
322 &dscr);
323
324 if (retval != ERROR_OK)
325 return retval;
326 }
327 else if (Rd == 15)
328 {
329 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
330 * then "mov r15, r0"
331 */
332 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
333 &dscr);
334 if (retval != ERROR_OK)
335 return retval;
336 retval = cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
337 if (retval != ERROR_OK)
338 return retval;
339 }
340 else
341 {
342 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
343 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
344 */
345 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
346 &dscr);
347 if (retval != ERROR_OK)
348 return retval;
349 retval = cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
350 &dscr);
351 if (retval != ERROR_OK)
352 return retval;
353
354 /* "Prefetch flush" after modifying execution status in CPSR */
355 if (Rd == 16)
356 {
357 retval = cortex_a8_exec_opcode(target,
358 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
359 &dscr);
360 if (retval != ERROR_OK)
361 return retval;
362 }
363 }
364
365 return retval;
366 }
367
368 /* Write to memory mapped registers directly with no cache or mmu handling */
369 static int cortex_a8_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
370 {
371 int retval;
372 struct armv7a_common *armv7a = target_to_armv7a(target);
373 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
374
375 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap, address, value);
376
377 return retval;
378 }
379
380 /*
381 * Cortex-A8 implementation of Debug Programmer's Model
382 *
383 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
384 * so there's no need to poll for it before executing an instruction.
385 *
386 * NOTE that in several of these cases the "stall" mode might be useful.
387 * It'd let us queue a few operations together... prepare/finish might
388 * be the places to enable/disable that mode.
389 */
390
391 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
392 {
393 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
394 }
395
396 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
397 {
398 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
399 return mem_ap_sel_write_u32(a8->armv7a_common.armv4_5_common.dap,
400 swjdp_debugap,a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
401 }
402
403 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
404 uint32_t *dscr_p)
405 {
406 struct adiv5_dap *swjdp = a8->armv7a_common.armv4_5_common.dap;
407 uint32_t dscr = DSCR_INSTR_COMP;
408 int retval;
409
410 if (dscr_p)
411 dscr = *dscr_p;
412
413 /* Wait for DTRRXfull */
414 long long then = timeval_ms();
415 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
416 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
417 a8->armv7a_common.debug_base + CPUDBG_DSCR,
418 &dscr);
419 if (retval != ERROR_OK)
420 return retval;
421 if (timeval_ms() > then + 1000)
422 {
423 LOG_ERROR("Timeout waiting for read dcc");
424 return ERROR_FAIL;
425 }
426 }
427
428 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
429 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
430 if (retval != ERROR_OK)
431 return retval;
432 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
433
434 if (dscr_p)
435 *dscr_p = dscr;
436
437 return retval;
438 }
439
440 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
441 {
442 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
443 struct adiv5_dap *swjdp = a8->armv7a_common.armv4_5_common.dap;
444 uint32_t dscr;
445 int retval;
446
447 /* set up invariant: INSTR_COMP is set after ever DPM operation */
448 long long then = timeval_ms();
449 for (;;)
450 {
451 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
452 a8->armv7a_common.debug_base + CPUDBG_DSCR,
453 &dscr);
454 if (retval != ERROR_OK)
455 return retval;
456 if ((dscr & DSCR_INSTR_COMP) != 0)
457 break;
458 if (timeval_ms() > then + 1000)
459 {
460 LOG_ERROR("Timeout waiting for dpm prepare");
461 return ERROR_FAIL;
462 }
463 }
464
465 /* this "should never happen" ... */
466 if (dscr & DSCR_DTR_RX_FULL) {
467 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
468 /* Clear DCCRX */
469 retval = cortex_a8_exec_opcode(
470 a8->armv7a_common.armv4_5_common.target,
471 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
472 &dscr);
473 if (retval != ERROR_OK)
474 return retval;
475 }
476
477 return retval;
478 }
479
480 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
481 {
482 /* REVISIT what could be done here? */
483 return ERROR_OK;
484 }
485
486 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
487 uint32_t opcode, uint32_t data)
488 {
489 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
490 int retval;
491 uint32_t dscr = DSCR_INSTR_COMP;
492
493 retval = cortex_a8_write_dcc(a8, data);
494 if (retval != ERROR_OK)
495 return retval;
496
497 return cortex_a8_exec_opcode(
498 a8->armv7a_common.armv4_5_common.target,
499 opcode,
500 &dscr);
501 }
502
503 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
504 uint32_t opcode, uint32_t data)
505 {
506 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
507 uint32_t dscr = DSCR_INSTR_COMP;
508 int retval;
509
510 retval = cortex_a8_write_dcc(a8, data);
511 if (retval != ERROR_OK)
512 return retval;
513
514 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
515 retval = cortex_a8_exec_opcode(
516 a8->armv7a_common.armv4_5_common.target,
517 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
518 &dscr);
519 if (retval != ERROR_OK)
520 return retval;
521
522 /* then the opcode, taking data from R0 */
523 retval = cortex_a8_exec_opcode(
524 a8->armv7a_common.armv4_5_common.target,
525 opcode,
526 &dscr);
527
528 return retval;
529 }
530
531 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
532 {
533 struct target *target = dpm->arm->target;
534 uint32_t dscr = DSCR_INSTR_COMP;
535
536 /* "Prefetch flush" after modifying execution status in CPSR */
537 return cortex_a8_exec_opcode(target,
538 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
539 &dscr);
540 }
541
542 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
543 uint32_t opcode, uint32_t *data)
544 {
545 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
546 int retval;
547 uint32_t dscr = DSCR_INSTR_COMP;
548
549 /* the opcode, writing data to DCC */
550 retval = cortex_a8_exec_opcode(
551 a8->armv7a_common.armv4_5_common.target,
552 opcode,
553 &dscr);
554 if (retval != ERROR_OK)
555 return retval;
556
557 return cortex_a8_read_dcc(a8, data, &dscr);
558 }
559
560
561 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
562 uint32_t opcode, uint32_t *data)
563 {
564 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
565 uint32_t dscr = DSCR_INSTR_COMP;
566 int retval;
567
568 /* the opcode, writing data to R0 */
569 retval = cortex_a8_exec_opcode(
570 a8->armv7a_common.armv4_5_common.target,
571 opcode,
572 &dscr);
573 if (retval != ERROR_OK)
574 return retval;
575
576 /* write R0 to DCC */
577 retval = cortex_a8_exec_opcode(
578 a8->armv7a_common.armv4_5_common.target,
579 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
580 &dscr);
581 if (retval != ERROR_OK)
582 return retval;
583
584 return cortex_a8_read_dcc(a8, data, &dscr);
585 }
586
587 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
588 uint32_t addr, uint32_t control)
589 {
590 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
591 uint32_t vr = a8->armv7a_common.debug_base;
592 uint32_t cr = a8->armv7a_common.debug_base;
593 int retval;
594
595 switch (index_t) {
596 case 0 ... 15: /* breakpoints */
597 vr += CPUDBG_BVR_BASE;
598 cr += CPUDBG_BCR_BASE;
599 break;
600 case 16 ... 31: /* watchpoints */
601 vr += CPUDBG_WVR_BASE;
602 cr += CPUDBG_WCR_BASE;
603 index_t -= 16;
604 break;
605 default:
606 return ERROR_FAIL;
607 }
608 vr += 4 * index_t;
609 cr += 4 * index_t;
610
611 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
612 (unsigned) vr, (unsigned) cr);
613
614 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
615 vr, addr);
616 if (retval != ERROR_OK)
617 return retval;
618 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
619 cr, control);
620 return retval;
621 }
622
623 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
624 {
625 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
626 uint32_t cr;
627
628 switch (index_t) {
629 case 0 ... 15:
630 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
631 break;
632 case 16 ... 31:
633 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
634 index_t -= 16;
635 break;
636 default:
637 return ERROR_FAIL;
638 }
639 cr += 4 * index_t;
640
641 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
642
643 /* clear control register */
644 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
645 }
646
647 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
648 {
649 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
650 int retval;
651
652 dpm->arm = &a8->armv7a_common.armv4_5_common;
653 dpm->didr = didr;
654
655 dpm->prepare = cortex_a8_dpm_prepare;
656 dpm->finish = cortex_a8_dpm_finish;
657
658 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
659 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
660 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
661
662 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
663 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
664
665 dpm->bpwp_enable = cortex_a8_bpwp_enable;
666 dpm->bpwp_disable = cortex_a8_bpwp_disable;
667
668 retval = arm_dpm_setup(dpm);
669 if (retval == ERROR_OK)
670 retval = arm_dpm_initialize(dpm);
671
672 return retval;
673 }
674
675
676 /*
677 * Cortex-A8 Run control
678 */
679
680 static int cortex_a8_poll(struct target *target)
681 {
682 int retval = ERROR_OK;
683 uint32_t dscr;
684 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
685 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
686 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
687 enum target_state prev_target_state = target->state;
688
689 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
690 armv7a->debug_base + CPUDBG_DSCR, &dscr);
691 if (retval != ERROR_OK)
692 {
693 return retval;
694 }
695 cortex_a8->cpudbg_dscr = dscr;
696
697 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED))
698 {
699 if (prev_target_state != TARGET_HALTED)
700 {
701 /* We have a halting debug event */
702 LOG_DEBUG("Target halted");
703 target->state = TARGET_HALTED;
704 if ((prev_target_state == TARGET_RUNNING)
705 || (prev_target_state == TARGET_RESET))
706 {
707 retval = cortex_a8_debug_entry(target);
708 if (retval != ERROR_OK)
709 return retval;
710
711 target_call_event_callbacks(target,
712 TARGET_EVENT_HALTED);
713 }
714 if (prev_target_state == TARGET_DEBUG_RUNNING)
715 {
716 LOG_DEBUG(" ");
717
718 retval = cortex_a8_debug_entry(target);
719 if (retval != ERROR_OK)
720 return retval;
721
722 target_call_event_callbacks(target,
723 TARGET_EVENT_DEBUG_HALTED);
724 }
725 }
726 }
727 else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
728 {
729 target->state = TARGET_RUNNING;
730 }
731 else
732 {
733 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
734 target->state = TARGET_UNKNOWN;
735 }
736
737 return retval;
738 }
739
740 static int cortex_a8_halt(struct target *target)
741 {
742 int retval = ERROR_OK;
743 uint32_t dscr;
744 struct armv7a_common *armv7a = target_to_armv7a(target);
745 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
746
747 /*
748 * Tell the core to be halted by writing DRCR with 0x1
749 * and then wait for the core to be halted.
750 */
751 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
752 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
753 if (retval != ERROR_OK)
754 return retval;
755
756 /*
757 * enter halting debug mode
758 */
759 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
760 armv7a->debug_base + CPUDBG_DSCR, &dscr);
761 if (retval != ERROR_OK)
762 return retval;
763
764 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
765 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
766 if (retval != ERROR_OK)
767 return retval;
768
769 long long then = timeval_ms();
770 for (;;)
771 {
772 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
773 armv7a->debug_base + CPUDBG_DSCR, &dscr);
774 if (retval != ERROR_OK)
775 return retval;
776 if ((dscr & DSCR_CORE_HALTED) != 0)
777 {
778 break;
779 }
780 if (timeval_ms() > then + 1000)
781 {
782 LOG_ERROR("Timeout waiting for halt");
783 return ERROR_FAIL;
784 }
785 }
786
787 target->debug_reason = DBG_REASON_DBGRQ;
788
789 return ERROR_OK;
790 }
791
792 static int cortex_a8_resume(struct target *target, int current,
793 uint32_t address, int handle_breakpoints, int debug_execution)
794 {
795 struct armv7a_common *armv7a = target_to_armv7a(target);
796 struct arm *armv4_5 = &armv7a->armv4_5_common;
797 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
798 int retval;
799
800 // struct breakpoint *breakpoint = NULL;
801 uint32_t resume_pc, dscr;
802
803 if (!debug_execution)
804 target_free_all_working_areas(target);
805
806 #if 0
807 if (debug_execution)
808 {
809 /* Disable interrupts */
810 /* We disable interrupts in the PRIMASK register instead of
811 * masking with C_MASKINTS,
812 * This is probably the same issue as Cortex-M3 Errata 377493:
813 * C_MASKINTS in parallel with disabled interrupts can cause
814 * local faults to not be taken. */
815 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
816 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
817 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
818
819 /* Make sure we are in Thumb mode */
820 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
821 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
822 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
823 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
824 }
825 #endif
826
827 /* current = 1: continue on current pc, otherwise continue at <address> */
828 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
829 if (!current)
830 resume_pc = address;
831
832 /* Make sure that the Armv7 gdb thumb fixups does not
833 * kill the return address
834 */
835 switch (armv4_5->core_state)
836 {
837 case ARM_STATE_ARM:
838 resume_pc &= 0xFFFFFFFC;
839 break;
840 case ARM_STATE_THUMB:
841 case ARM_STATE_THUMB_EE:
842 /* When the return address is loaded into PC
843 * bit 0 must be 1 to stay in Thumb state
844 */
845 resume_pc |= 0x1;
846 break;
847 case ARM_STATE_JAZELLE:
848 LOG_ERROR("How do I resume into Jazelle state??");
849 return ERROR_FAIL;
850 }
851 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
852 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
853 armv4_5->pc->dirty = 1;
854 armv4_5->pc->valid = 1;
855
856 retval = cortex_a8_restore_context(target, handle_breakpoints);
857 if (retval != ERROR_OK)
858 return retval;
859
860 #if 0
861 /* the front-end may request us not to handle breakpoints */
862 if (handle_breakpoints)
863 {
864 /* Single step past breakpoint at current address */
865 if ((breakpoint = breakpoint_find(target, resume_pc)))
866 {
867 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
868 cortex_m3_unset_breakpoint(target, breakpoint);
869 cortex_m3_single_step_core(target);
870 cortex_m3_set_breakpoint(target, breakpoint);
871 }
872 }
873
874 #endif
875
876 /*
877 * Restart core and wait for it to be started. Clear ITRen and sticky
878 * exception flags: see ARMv7 ARM, C5.9.
879 *
880 * REVISIT: for single stepping, we probably want to
881 * disable IRQs by default, with optional override...
882 */
883
884 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
885 armv7a->debug_base + CPUDBG_DSCR, &dscr);
886 if (retval != ERROR_OK)
887 return retval;
888
889 if ((dscr & DSCR_INSTR_COMP) == 0)
890 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
891
892 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
893 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
894 if (retval != ERROR_OK)
895 return retval;
896
897 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
898 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART | DRCR_CLEAR_EXCEPTIONS);
899 if (retval != ERROR_OK)
900 return retval;
901
902 long long then = timeval_ms();
903 for (;;)
904 {
905 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
906 armv7a->debug_base + CPUDBG_DSCR, &dscr);
907 if (retval != ERROR_OK)
908 return retval;
909 if ((dscr & DSCR_CORE_RESTARTED) != 0)
910 break;
911 if (timeval_ms() > then + 1000)
912 {
913 LOG_ERROR("Timeout waiting for resume");
914 return ERROR_FAIL;
915 }
916 }
917
918 target->debug_reason = DBG_REASON_NOTHALTED;
919 target->state = TARGET_RUNNING;
920
921 /* registers are now invalid */
922 register_cache_invalidate(armv4_5->core_cache);
923
924 if (!debug_execution)
925 {
926 target->state = TARGET_RUNNING;
927 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
928 LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
929 }
930 else
931 {
932 target->state = TARGET_DEBUG_RUNNING;
933 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
934 LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
935 }
936
937 return ERROR_OK;
938 }
939
940 static int cortex_a8_debug_entry(struct target *target)
941 {
942 int i;
943 uint32_t regfile[16], cpsr, dscr;
944 int retval = ERROR_OK;
945 struct working_area *regfile_working_area = NULL;
946 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
947 struct armv7a_common *armv7a = target_to_armv7a(target);
948 struct arm *armv4_5 = &armv7a->armv4_5_common;
949 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
950 struct reg *reg;
951
952 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
953
954 /* REVISIT surely we should not re-read DSCR !! */
955 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
956 armv7a->debug_base + CPUDBG_DSCR, &dscr);
957 if (retval != ERROR_OK)
958 return retval;
959
960 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
961 * imprecise data aborts get discarded by issuing a Data
962 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
963 */
964
965 /* Enable the ITR execution once we are in debug mode */
966 dscr |= DSCR_ITR_EN;
967 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
968 armv7a->debug_base + CPUDBG_DSCR, dscr);
969 if (retval != ERROR_OK)
970 return retval;
971
972 /* Examine debug reason */
973 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
974
975 /* save address of instruction that triggered the watchpoint? */
976 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
977 uint32_t wfar;
978
979 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
980 armv7a->debug_base + CPUDBG_WFAR,
981 &wfar);
982 if (retval != ERROR_OK)
983 return retval;
984 arm_dpm_report_wfar(&armv7a->dpm, wfar);
985 }
986
987 /* REVISIT fast_reg_read is never set ... */
988
989 /* Examine target state and mode */
990 if (cortex_a8->fast_reg_read)
991 target_alloc_working_area(target, 64, &regfile_working_area);
992
993 /* First load register acessible through core debug port*/
994 if (!regfile_working_area)
995 {
996 retval = arm_dpm_read_current_registers(&armv7a->dpm);
997 }
998 else
999 {
1000 retval = cortex_a8_read_regs_through_mem(target,
1001 regfile_working_area->address, regfile);
1002
1003 target_free_working_area(target, regfile_working_area);
1004 if (retval != ERROR_OK)
1005 {
1006 return retval;
1007 }
1008
1009 /* read Current PSR */
1010 retval = cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
1011 if (retval != ERROR_OK)
1012 return retval;
1013
1014 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1015
1016 arm_set_cpsr(armv4_5, cpsr);
1017
1018 /* update cache */
1019 for (i = 0; i <= ARM_PC; i++)
1020 {
1021 reg = arm_reg_current(armv4_5, i);
1022
1023 buf_set_u32(reg->value, 0, 32, regfile[i]);
1024 reg->valid = 1;
1025 reg->dirty = 0;
1026 }
1027
1028 /* Fixup PC Resume Address */
1029 if (cpsr & (1 << 5))
1030 {
1031 // T bit set for Thumb or ThumbEE state
1032 regfile[ARM_PC] -= 4;
1033 }
1034 else
1035 {
1036 // ARM state
1037 regfile[ARM_PC] -= 8;
1038 }
1039
1040 reg = armv4_5->pc;
1041 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1042 reg->dirty = reg->valid;
1043 }
1044
1045 #if 0
1046 /* TODO, Move this */
1047 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1048 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1049 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1050
1051 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1052 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1053
1054 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1055 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1056 #endif
1057
1058 /* Are we in an exception handler */
1059 // armv4_5->exception_number = 0;
1060 if (armv7a->post_debug_entry)
1061 {
1062 retval = armv7a->post_debug_entry(target);
1063 if (retval != ERROR_OK)
1064 return retval;
1065 }
1066
1067 return retval;
1068 }
1069
1070 static int cortex_a8_post_debug_entry(struct target *target)
1071 {
1072 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1073 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1074 int retval;
1075
1076 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1077 retval = armv7a->armv4_5_common.mrc(target, 15,
1078 0, 0, /* op1, op2 */
1079 1, 0, /* CRn, CRm */
1080 &cortex_a8->cp15_control_reg);
1081 if (retval != ERROR_OK)
1082 return retval;
1083 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
1084
1085 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
1086 {
1087 uint32_t cache_type_reg;
1088
1089 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
1090 retval = armv7a->armv4_5_common.mrc(target, 15,
1091 0, 1, /* op1, op2 */
1092 0, 0, /* CRn, CRm */
1093 &cache_type_reg);
1094 if (retval != ERROR_OK)
1095 return retval;
1096 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
1097
1098 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
1099 armv4_5_identify_cache(cache_type_reg,
1100 &armv7a->armv4_5_mmu.armv4_5_cache);
1101 }
1102
1103 armv7a->armv4_5_mmu.mmu_enabled =
1104 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
1105 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
1106 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
1107 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1108 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1109
1110 return ERROR_OK;
1111 }
1112
1113 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1114 int handle_breakpoints)
1115 {
1116 struct armv7a_common *armv7a = target_to_armv7a(target);
1117 struct arm *armv4_5 = &armv7a->armv4_5_common;
1118 struct breakpoint *breakpoint = NULL;
1119 struct breakpoint stepbreakpoint;
1120 struct reg *r;
1121 int retval;
1122
1123 if (target->state != TARGET_HALTED)
1124 {
1125 LOG_WARNING("target not halted");
1126 return ERROR_TARGET_NOT_HALTED;
1127 }
1128
1129 /* current = 1: continue on current pc, otherwise continue at <address> */
1130 r = armv4_5->pc;
1131 if (!current)
1132 {
1133 buf_set_u32(r->value, 0, 32, address);
1134 }
1135 else
1136 {
1137 address = buf_get_u32(r->value, 0, 32);
1138 }
1139
1140 /* The front-end may request us not to handle breakpoints.
1141 * But since Cortex-A8 uses breakpoint for single step,
1142 * we MUST handle breakpoints.
1143 */
1144 handle_breakpoints = 1;
1145 if (handle_breakpoints) {
1146 breakpoint = breakpoint_find(target, address);
1147 if (breakpoint)
1148 cortex_a8_unset_breakpoint(target, breakpoint);
1149 }
1150
1151 /* Setup single step breakpoint */
1152 stepbreakpoint.address = address;
1153 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1154 ? 2 : 4;
1155 stepbreakpoint.type = BKPT_HARD;
1156 stepbreakpoint.set = 0;
1157
1158 /* Break on IVA mismatch */
1159 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1160
1161 target->debug_reason = DBG_REASON_SINGLESTEP;
1162
1163 retval = cortex_a8_resume(target, 1, address, 0, 0);
1164 if (retval != ERROR_OK)
1165 return retval;
1166
1167 long long then = timeval_ms();
1168 while (target->state != TARGET_HALTED)
1169 {
1170 retval = cortex_a8_poll(target);
1171 if (retval != ERROR_OK)
1172 return retval;
1173 if (timeval_ms() > then + 1000)
1174 {
1175 LOG_ERROR("timeout waiting for target halt");
1176 return ERROR_FAIL;
1177 }
1178 }
1179
1180 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1181
1182 target->debug_reason = DBG_REASON_BREAKPOINT;
1183
1184 if (breakpoint)
1185 cortex_a8_set_breakpoint(target, breakpoint, 0);
1186
1187 if (target->state != TARGET_HALTED)
1188 LOG_DEBUG("target stepped");
1189
1190 return ERROR_OK;
1191 }
1192
1193 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1194 {
1195 struct armv7a_common *armv7a = target_to_armv7a(target);
1196
1197 LOG_DEBUG(" ");
1198
1199 if (armv7a->pre_restore_context)
1200 armv7a->pre_restore_context(target);
1201
1202 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1203 }
1204
1205
1206 /*
1207 * Cortex-A8 Breakpoint and watchpoint functions
1208 */
1209
1210 /* Setup hardware Breakpoint Register Pair */
1211 static int cortex_a8_set_breakpoint(struct target *target,
1212 struct breakpoint *breakpoint, uint8_t matchmode)
1213 {
1214 int retval;
1215 int brp_i=0;
1216 uint32_t control;
1217 uint8_t byte_addr_select = 0x0F;
1218 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1219 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1220 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1221
1222 if (breakpoint->set)
1223 {
1224 LOG_WARNING("breakpoint already set");
1225 return ERROR_OK;
1226 }
1227
1228 if (breakpoint->type == BKPT_HARD)
1229 {
1230 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1231 brp_i++ ;
1232 if (brp_i >= cortex_a8->brp_num)
1233 {
1234 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1235 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1236 }
1237 breakpoint->set = brp_i + 1;
1238 if (breakpoint->length == 2)
1239 {
1240 byte_addr_select = (3 << (breakpoint->address & 0x02));
1241 }
1242 control = ((matchmode & 0x7) << 20)
1243 | (byte_addr_select << 5)
1244 | (3 << 1) | 1;
1245 brp_list[brp_i].used = 1;
1246 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1247 brp_list[brp_i].control = control;
1248 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1249 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1250 brp_list[brp_i].value);
1251 if (retval != ERROR_OK)
1252 return retval;
1253 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1254 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1255 brp_list[brp_i].control);
1256 if (retval != ERROR_OK)
1257 return retval;
1258 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1259 brp_list[brp_i].control,
1260 brp_list[brp_i].value);
1261 }
1262 else if (breakpoint->type == BKPT_SOFT)
1263 {
1264 uint8_t code[4];
1265 if (breakpoint->length == 2)
1266 {
1267 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1268 }
1269 else
1270 {
1271 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1272 }
1273 retval = target->type->read_memory(target,
1274 breakpoint->address & 0xFFFFFFFE,
1275 breakpoint->length, 1,
1276 breakpoint->orig_instr);
1277 if (retval != ERROR_OK)
1278 return retval;
1279 retval = target->type->write_memory(target,
1280 breakpoint->address & 0xFFFFFFFE,
1281 breakpoint->length, 1, code);
1282 if (retval != ERROR_OK)
1283 return retval;
1284 breakpoint->set = 0x11; /* Any nice value but 0 */
1285 }
1286
1287 return ERROR_OK;
1288 }
1289
1290 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1291 {
1292 int retval;
1293 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1294 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1295 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1296
1297 if (!breakpoint->set)
1298 {
1299 LOG_WARNING("breakpoint not set");
1300 return ERROR_OK;
1301 }
1302
1303 if (breakpoint->type == BKPT_HARD)
1304 {
1305 int brp_i = breakpoint->set - 1;
1306 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1307 {
1308 LOG_DEBUG("Invalid BRP number in breakpoint");
1309 return ERROR_OK;
1310 }
1311 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1312 brp_list[brp_i].control, brp_list[brp_i].value);
1313 brp_list[brp_i].used = 0;
1314 brp_list[brp_i].value = 0;
1315 brp_list[brp_i].control = 0;
1316 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1317 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1318 brp_list[brp_i].control);
1319 if (retval != ERROR_OK)
1320 return retval;
1321 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1322 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1323 brp_list[brp_i].value);
1324 if (retval != ERROR_OK)
1325 return retval;
1326 }
1327 else
1328 {
1329 /* restore original instruction (kept in target endianness) */
1330 if (breakpoint->length == 4)
1331 {
1332 retval = target->type->write_memory(target,
1333 breakpoint->address & 0xFFFFFFFE,
1334 4, 1, breakpoint->orig_instr);
1335 if (retval != ERROR_OK)
1336 return retval;
1337 }
1338 else
1339 {
1340 retval = target->type->write_memory(target,
1341 breakpoint->address & 0xFFFFFFFE,
1342 2, 1, breakpoint->orig_instr);
1343 if (retval != ERROR_OK)
1344 return retval;
1345 }
1346 }
1347 breakpoint->set = 0;
1348
1349 return ERROR_OK;
1350 }
1351
1352 static int cortex_a8_add_breakpoint(struct target *target,
1353 struct breakpoint *breakpoint)
1354 {
1355 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1356
1357 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1358 {
1359 LOG_INFO("no hardware breakpoint available");
1360 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1361 }
1362
1363 if (breakpoint->type == BKPT_HARD)
1364 cortex_a8->brp_num_available--;
1365
1366 return cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1367 }
1368
1369 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1370 {
1371 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1372
1373 #if 0
1374 /* It is perfectly possible to remove breakpoints while the target is running */
1375 if (target->state != TARGET_HALTED)
1376 {
1377 LOG_WARNING("target not halted");
1378 return ERROR_TARGET_NOT_HALTED;
1379 }
1380 #endif
1381
1382 if (breakpoint->set)
1383 {
1384 cortex_a8_unset_breakpoint(target, breakpoint);
1385 if (breakpoint->type == BKPT_HARD)
1386 cortex_a8->brp_num_available++ ;
1387 }
1388
1389
1390 return ERROR_OK;
1391 }
1392
1393
1394
1395 /*
1396 * Cortex-A8 Reset functions
1397 */
1398
1399 static int cortex_a8_assert_reset(struct target *target)
1400 {
1401 struct armv7a_common *armv7a = target_to_armv7a(target);
1402
1403 LOG_DEBUG(" ");
1404
1405 /* FIXME when halt is requested, make it work somehow... */
1406
1407 /* Issue some kind of warm reset. */
1408 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1409 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1410 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1411 /* REVISIT handle "pulls" cases, if there's
1412 * hardware that needs them to work.
1413 */
1414 jtag_add_reset(0, 1);
1415 } else {
1416 LOG_ERROR("%s: how to reset?", target_name(target));
1417 return ERROR_FAIL;
1418 }
1419
1420 /* registers are now invalid */
1421 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1422
1423 target->state = TARGET_RESET;
1424
1425 return ERROR_OK;
1426 }
1427
1428 static int cortex_a8_deassert_reset(struct target *target)
1429 {
1430 int retval;
1431
1432 LOG_DEBUG(" ");
1433
1434 /* be certain SRST is off */
1435 jtag_add_reset(0, 0);
1436
1437 retval = cortex_a8_poll(target);
1438 if (retval != ERROR_OK)
1439 return retval;
1440
1441 if (target->reset_halt) {
1442 if (target->state != TARGET_HALTED) {
1443 LOG_WARNING("%s: ran after reset and before halt ...",
1444 target_name(target));
1445 if ((retval = target_halt(target)) != ERROR_OK)
1446 return retval;
1447 }
1448 }
1449
1450 return ERROR_OK;
1451 }
1452
1453 static int cortex_a8_write_apb_ab_memory(struct target *target,
1454 uint32_t address, uint32_t size,
1455 uint32_t count, const uint8_t *buffer)
1456 {
1457 int retval = ERROR_INVALID_ARGUMENTS;
1458 struct armv7a_common *armv7a = target_to_armv7a(target);
1459 struct arm *armv4_5 = &armv7a->armv4_5_common;
1460 int nbytes = count * size;
1461 uint32_t data;
1462 struct reg *reg;
1463
1464 if (target->state != TARGET_HALTED)
1465 {
1466 LOG_WARNING("target not halted");
1467 return ERROR_TARGET_NOT_HALTED;
1468 }
1469 reg = arm_reg_current(armv4_5, 0);
1470 reg->dirty = 1;
1471 reg = arm_reg_current(armv4_5, 1);
1472 reg->dirty = 1;
1473 retval = cortex_a8_dap_write_coreregister_u32(target, address, 0);
1474 if (retval != ERROR_OK)
1475 return retval;
1476
1477 while (nbytes > 0) {
1478 data = *buffer++;
1479 retval = cortex_a8_dap_write_coreregister_u32(target, data, 1);
1480 if (retval != ERROR_OK)
1481 return retval;
1482
1483 /* execute instruction STRB r1, [r0], 1 (0xe4c01001) */
1484 retval = cortex_a8_exec_opcode(target, ARMV4_5_STRB_IP(1, 0) , NULL);
1485 if (retval != ERROR_OK)
1486 return retval;
1487 --nbytes;
1488 }
1489 return retval;
1490 }
1491
1492
1493 static int cortex_a8_read_apb_ab_memory(struct target *target,
1494 uint32_t address, uint32_t size,
1495 uint32_t count, uint8_t *buffer)
1496 {
1497 int retval = ERROR_INVALID_ARGUMENTS;
1498 struct armv7a_common *armv7a = target_to_armv7a(target);
1499 struct arm *armv4_5 = &armv7a->armv4_5_common;
1500 /* read memory through APB-AP */
1501 int nbytes = count * size;
1502 uint32_t data;
1503 struct reg *reg;
1504
1505 if (target->state != TARGET_HALTED)
1506 {
1507 LOG_WARNING("target not halted");
1508 return ERROR_TARGET_NOT_HALTED;
1509 }
1510
1511 reg = arm_reg_current(armv4_5, 0);
1512 reg->dirty = 1;
1513 reg = arm_reg_current(armv4_5, 1);
1514 reg->dirty = 1;
1515
1516 retval = cortex_a8_dap_write_coreregister_u32(target, address, 0);
1517 if (retval != ERROR_OK)
1518 return retval;
1519
1520 while (nbytes > 0) {
1521
1522
1523 /* execute instruction LDRB r1, [r0], 1 (0xe4d01001) */
1524 retval = cortex_a8_exec_opcode(target, ARMV4_5_LDRB_IP(1, 0) , NULL);
1525 if (retval != ERROR_OK)
1526 return retval;
1527
1528 retval = cortex_a8_dap_read_coreregister_u32(target, &data, 1);
1529 if (retval != ERROR_OK)
1530 return retval;
1531
1532 *buffer++ = data;
1533 --nbytes;
1534
1535 }
1536 return retval;
1537 }
1538
1539
1540
1541 /*
1542 * Cortex-A8 Memory access
1543 *
1544 * This is same Cortex M3 but we must also use the correct
1545 * ap number for every access.
1546 */
1547
1548 static int cortex_a8_read_phys_memory(struct target *target,
1549 uint32_t address, uint32_t size,
1550 uint32_t count, uint8_t *buffer)
1551 {
1552 struct armv7a_common *armv7a = target_to_armv7a(target);
1553 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1554 int retval = ERROR_INVALID_ARGUMENTS;
1555 uint8_t apsel = swjdp->apsel;
1556 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d",
1557 address, size, count);
1558
1559 if (count && buffer) {
1560
1561 if ( apsel == swjdp_memoryap ) {
1562
1563 /* read memory through AHB-AP */
1564
1565 switch (size) {
1566 case 4:
1567 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
1568 buffer, 4 * count, address);
1569 break;
1570 case 2:
1571 retval = mem_ap_sel_read_buf_u16(swjdp, swjdp_memoryap,
1572 buffer, 2 * count, address);
1573 break;
1574 case 1:
1575 retval = mem_ap_sel_read_buf_u8(swjdp, swjdp_memoryap,
1576 buffer, count, address);
1577 break;
1578 }
1579
1580 } else {
1581
1582 /* read memory through APB-AP */
1583 int enabled = 0;
1584
1585 retval = cortex_a8_mmu(target, &enabled);
1586 if (retval != ERROR_OK)
1587 return retval;
1588
1589 if (enabled)
1590 {
1591 LOG_WARNING("Reading physical memory through \
1592 APB with MMU enabled is not yet implemented");
1593 return ERROR_TARGET_FAILURE;
1594 }
1595 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
1596 }
1597 }
1598 return retval;
1599 }
1600
1601 static int cortex_a8_read_memory(struct target *target, uint32_t address,
1602 uint32_t size, uint32_t count, uint8_t *buffer)
1603 {
1604 int enabled = 0;
1605 uint32_t virt, phys;
1606 int retval;
1607 struct armv7a_common *armv7a = target_to_armv7a(target);
1608 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1609 uint8_t apsel = swjdp->apsel;
1610
1611 /* cortex_a8 handles unaligned memory access */
1612 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address,
1613 size, count);
1614 if (apsel == swjdp_memoryap) {
1615 retval = cortex_a8_mmu(target, &enabled);
1616 if (retval != ERROR_OK)
1617 return retval;
1618
1619 if(enabled)
1620 {
1621 virt = address;
1622 retval = cortex_a8_virt2phys(target, virt, &phys);
1623 if (retval != ERROR_OK)
1624 return retval;
1625
1626 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x",
1627 virt, phys);
1628 address = phys;
1629 }
1630 retval = cortex_a8_read_phys_memory(target, address, size, count, buffer);
1631 } else {
1632 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
1633 }
1634 return retval;
1635 }
1636
1637 static int cortex_a8_write_phys_memory(struct target *target,
1638 uint32_t address, uint32_t size,
1639 uint32_t count, const uint8_t *buffer)
1640 {
1641 struct armv7a_common *armv7a = target_to_armv7a(target);
1642 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1643 int retval = ERROR_INVALID_ARGUMENTS;
1644 uint8_t apsel = swjdp->apsel;
1645
1646 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address,
1647 size, count);
1648
1649 if (count && buffer) {
1650
1651 if ( apsel == swjdp_memoryap ) {
1652
1653 /* write memory through AHB-AP */
1654
1655 switch (size) {
1656 case 4:
1657 retval = mem_ap_sel_write_buf_u32(swjdp, swjdp_memoryap,
1658 buffer, 4 * count, address);
1659 break;
1660 case 2:
1661 retval = mem_ap_sel_write_buf_u16(swjdp, swjdp_memoryap,
1662 buffer, 2 * count, address);
1663 break;
1664 case 1:
1665 retval = mem_ap_sel_write_buf_u8(swjdp, swjdp_memoryap,
1666 buffer, count, address);
1667 break;
1668 }
1669
1670 } else {
1671
1672 /* write memory through APB-AP */
1673 int enabled = 0;
1674
1675 retval = cortex_a8_mmu(target, &enabled);
1676 if (retval != ERROR_OK)
1677 return retval;
1678
1679 if (enabled)
1680 {
1681 LOG_WARNING("Writing physical memory through APB with MMU" \
1682 "enabled is not yet implemented");
1683 return ERROR_TARGET_FAILURE;
1684 }
1685 return cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
1686 }
1687 }
1688
1689
1690 /* REVISIT this op is generic ARMv7-A/R stuff */
1691 if (retval == ERROR_OK && target->state == TARGET_HALTED)
1692 {
1693 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
1694
1695 retval = dpm->prepare(dpm);
1696 if (retval != ERROR_OK)
1697 return retval;
1698
1699 /* The Cache handling will NOT work with MMU active, the
1700 * wrong addresses will be invalidated!
1701 *
1702 * For both ICache and DCache, walk all cache lines in the
1703 * address range. Cortex-A8 has fixed 64 byte line length.
1704 *
1705 * REVISIT per ARMv7, these may trigger watchpoints ...
1706 */
1707
1708 /* invalidate I-Cache */
1709 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
1710 {
1711 /* ICIMVAU - Invalidate Cache single entry
1712 * with MVA to PoU
1713 * MCR p15, 0, r0, c7, c5, 1
1714 */
1715 for (uint32_t cacheline = address;
1716 cacheline < address + size * count;
1717 cacheline += 64) {
1718 retval = dpm->instr_write_data_r0(dpm,
1719 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1720 cacheline);
1721 if (retval != ERROR_OK)
1722 return retval;
1723 }
1724 }
1725
1726 /* invalidate D-Cache */
1727 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
1728 {
1729 /* DCIMVAC - Invalidate data Cache line
1730 * with MVA to PoC
1731 * MCR p15, 0, r0, c7, c6, 1
1732 */
1733 for (uint32_t cacheline = address;
1734 cacheline < address + size * count;
1735 cacheline += 64) {
1736 retval = dpm->instr_write_data_r0(dpm,
1737 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1738 cacheline);
1739 if (retval != ERROR_OK)
1740 return retval;
1741 }
1742 }
1743
1744 /* (void) */ dpm->finish(dpm);
1745 }
1746
1747 return retval;
1748 }
1749
1750 static int cortex_a8_write_memory(struct target *target, uint32_t address,
1751 uint32_t size, uint32_t count, const uint8_t *buffer)
1752 {
1753 int enabled = 0;
1754 uint32_t virt, phys;
1755 int retval;
1756 struct armv7a_common *armv7a = target_to_armv7a(target);
1757 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1758 uint8_t apsel = swjdp->apsel;
1759 /* cortex_a8 handles unaligned memory access */
1760 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address,
1761 size, count);
1762 if (apsel == swjdp_memoryap) {
1763
1764 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
1765 retval = cortex_a8_mmu(target, &enabled);
1766 if (retval != ERROR_OK)
1767 return retval;
1768
1769 if(enabled)
1770 {
1771 virt = address;
1772 retval = cortex_a8_virt2phys(target, virt, &phys);
1773 if (retval != ERROR_OK)
1774 return retval;
1775 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1776 address = phys;
1777 }
1778
1779 retval = cortex_a8_write_phys_memory(target, address, size,
1780 count, buffer);
1781 }
1782 else {
1783 retval = cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
1784 }
1785 return retval;
1786 }
1787
1788 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
1789 uint32_t count, const uint8_t *buffer)
1790 {
1791 return cortex_a8_write_memory(target, address, 4, count, buffer);
1792 }
1793
1794
1795 static int cortex_a8_handle_target_request(void *priv)
1796 {
1797 struct target *target = priv;
1798 struct armv7a_common *armv7a = target_to_armv7a(target);
1799 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1800 int retval;
1801
1802 if (!target_was_examined(target))
1803 return ERROR_OK;
1804 if (!target->dbg_msg_enabled)
1805 return ERROR_OK;
1806
1807 if (target->state == TARGET_RUNNING)
1808 {
1809 uint32_t request;
1810 uint32_t dscr;
1811 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1812 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1813
1814 /* check if we have data */
1815 while ((dscr & DSCR_DTR_TX_FULL) && (retval==ERROR_OK))
1816 {
1817 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1818 armv7a->debug_base+ CPUDBG_DTRTX, &request);
1819 if (retval == ERROR_OK)
1820 {
1821 target_request(target, request);
1822 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1823 armv7a->debug_base+ CPUDBG_DSCR, &dscr);
1824 }
1825 }
1826 }
1827
1828 return ERROR_OK;
1829 }
1830
1831 /*
1832 * Cortex-A8 target information and configuration
1833 */
1834
1835 static int cortex_a8_examine_first(struct target *target)
1836 {
1837 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1838 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1839 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1840 int i;
1841 int retval = ERROR_OK;
1842 uint32_t didr, ctypr, ttypr, cpuid;
1843
1844 /* We do one extra read to ensure DAP is configured,
1845 * we call ahbap_debugport_init(swjdp) instead
1846 */
1847 retval = ahbap_debugport_init(swjdp);
1848 if (retval != ERROR_OK)
1849 return retval;
1850
1851 if (!target->dbgbase_set)
1852 {
1853 uint32_t dbgbase;
1854 /* Get ROM Table base */
1855 uint32_t apid;
1856 retval = dap_get_debugbase(swjdp, 1, &dbgbase, &apid);
1857 if (retval != ERROR_OK)
1858 return retval;
1859 /* Lookup 0x15 -- Processor DAP */
1860 retval = dap_lookup_cs_component(swjdp, 1, dbgbase, 0x15,
1861 &armv7a->debug_base);
1862 if (retval != ERROR_OK)
1863 return retval;
1864 }
1865 else
1866 {
1867 armv7a->debug_base = target->dbgbase;
1868 }
1869
1870 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1871 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
1872 if (retval != ERROR_OK)
1873 return retval;
1874
1875 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1876 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
1877 {
1878 LOG_DEBUG("Examine %s failed", "CPUID");
1879 return retval;
1880 }
1881
1882 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1883 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
1884 {
1885 LOG_DEBUG("Examine %s failed", "CTYPR");
1886 return retval;
1887 }
1888
1889 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1890 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
1891 {
1892 LOG_DEBUG("Examine %s failed", "TTYPR");
1893 return retval;
1894 }
1895
1896 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1897 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
1898 {
1899 LOG_DEBUG("Examine %s failed", "DIDR");
1900 return retval;
1901 }
1902
1903 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1904 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
1905 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
1906 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
1907
1908 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
1909 retval = cortex_a8_dpm_setup(cortex_a8, didr);
1910 if (retval != ERROR_OK)
1911 return retval;
1912
1913 /* Setup Breakpoint Register Pairs */
1914 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
1915 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
1916 cortex_a8->brp_num_available = cortex_a8->brp_num;
1917 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
1918 // cortex_a8->brb_enabled = ????;
1919 for (i = 0; i < cortex_a8->brp_num; i++)
1920 {
1921 cortex_a8->brp_list[i].used = 0;
1922 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
1923 cortex_a8->brp_list[i].type = BRP_NORMAL;
1924 else
1925 cortex_a8->brp_list[i].type = BRP_CONTEXT;
1926 cortex_a8->brp_list[i].value = 0;
1927 cortex_a8->brp_list[i].control = 0;
1928 cortex_a8->brp_list[i].BRPn = i;
1929 }
1930
1931 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
1932
1933 target_set_examined(target);
1934 return ERROR_OK;
1935 }
1936
1937 static int cortex_a8_examine(struct target *target)
1938 {
1939 int retval = ERROR_OK;
1940
1941 /* don't re-probe hardware after each reset */
1942 if (!target_was_examined(target))
1943 retval = cortex_a8_examine_first(target);
1944
1945 /* Configure core debug access */
1946 if (retval == ERROR_OK)
1947 retval = cortex_a8_init_debug_access(target);
1948
1949 return retval;
1950 }
1951
1952 /*
1953 * Cortex-A8 target creation and initialization
1954 */
1955
1956 static int cortex_a8_init_target(struct command_context *cmd_ctx,
1957 struct target *target)
1958 {
1959 /* examine_first() does a bunch of this */
1960 return ERROR_OK;
1961 }
1962
1963 static int cortex_a8_init_arch_info(struct target *target,
1964 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
1965 {
1966 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1967 struct arm *armv4_5 = &armv7a->armv4_5_common;
1968 struct adiv5_dap *dap = &armv7a->dap;
1969
1970 armv7a->armv4_5_common.dap = dap;
1971
1972 /* Setup struct cortex_a8_common */
1973 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
1974 /* tap has no dap initialized */
1975 if (!tap->dap)
1976 {
1977 armv7a->armv4_5_common.dap = dap;
1978 /* Setup struct cortex_a8_common */
1979 armv4_5->arch_info = armv7a;
1980
1981 /* prepare JTAG information for the new target */
1982 cortex_a8->jtag_info.tap = tap;
1983 cortex_a8->jtag_info.scann_size = 4;
1984
1985 /* Leave (only) generic DAP stuff for debugport_init() */
1986 dap->jtag_info = &cortex_a8->jtag_info;
1987 dap->memaccess_tck = 80;
1988
1989 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
1990 dap->tar_autoincr_block = (1 << 10);
1991 dap->memaccess_tck = 80;
1992 tap->dap = dap;
1993 }
1994 else
1995 armv7a->armv4_5_common.dap = tap->dap;
1996
1997 cortex_a8->fast_reg_read = 0;
1998
1999 /* Set default value */
2000 cortex_a8->current_address_mode = ARM_MODE_ANY;
2001
2002 /* register arch-specific functions */
2003 armv7a->examine_debug_reason = NULL;
2004
2005 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
2006
2007 armv7a->pre_restore_context = NULL;
2008 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
2009 armv7a->armv4_5_mmu.get_ttb = cortex_a8_get_ttb;
2010 armv7a->armv4_5_mmu.read_memory = cortex_a8_read_phys_memory;
2011 armv7a->armv4_5_mmu.write_memory = cortex_a8_write_phys_memory;
2012 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a8_disable_mmu_caches;
2013 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a8_enable_mmu_caches;
2014 armv7a->armv4_5_mmu.has_tiny_pages = 1;
2015 armv7a->armv4_5_mmu.mmu_enabled = 0;
2016
2017
2018 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
2019
2020 /* REVISIT v7a setup should be in a v7a-specific routine */
2021 arm_init_arch_info(target, armv4_5);
2022 armv7a->common_magic = ARMV7_COMMON_MAGIC;
2023
2024 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
2025
2026 return ERROR_OK;
2027 }
2028
2029 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
2030 {
2031 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
2032
2033 return cortex_a8_init_arch_info(target, cortex_a8, target->tap);
2034 }
2035
2036 static int cortex_a8_get_ttb(struct target *target, uint32_t *result)
2037 {
2038 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2039 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2040 uint32_t ttb = 0, retval = ERROR_OK;
2041
2042 /* current_address_mode is set inside cortex_a8_virt2phys()
2043 where we can determine if address belongs to user or kernel */
2044 if(cortex_a8->current_address_mode == ARM_MODE_SVC)
2045 {
2046 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2047 retval = armv7a->armv4_5_common.mrc(target, 15,
2048 0, 1, /* op1, op2 */
2049 2, 0, /* CRn, CRm */
2050 &ttb);
2051 if (retval != ERROR_OK)
2052 return retval;
2053 }
2054 else if(cortex_a8->current_address_mode == ARM_MODE_USR)
2055 {
2056 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2057 retval = armv7a->armv4_5_common.mrc(target, 15,
2058 0, 0, /* op1, op2 */
2059 2, 0, /* CRn, CRm */
2060 &ttb);
2061 if (retval != ERROR_OK)
2062 return retval;
2063 }
2064 /* we don't know whose address is: user or kernel
2065 we assume that if we are in kernel mode then
2066 address belongs to kernel else if in user mode
2067 - to user */
2068 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
2069 {
2070 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2071 retval = armv7a->armv4_5_common.mrc(target, 15,
2072 0, 1, /* op1, op2 */
2073 2, 0, /* CRn, CRm */
2074 &ttb);
2075 if (retval != ERROR_OK)
2076 return retval;
2077 }
2078 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
2079 {
2080 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2081 retval = armv7a->armv4_5_common.mrc(target, 15,
2082 0, 0, /* op1, op2 */
2083 2, 0, /* CRn, CRm */
2084 &ttb);
2085 if (retval != ERROR_OK)
2086 return retval;
2087 }
2088 /* finally we don't know whose ttb to use: user or kernel */
2089 else
2090 LOG_ERROR("Don't know how to get ttb for current mode!!!");
2091
2092 ttb &= 0xffffc000;
2093
2094 *result = ttb;
2095
2096 return ERROR_OK;
2097 }
2098
2099 static int cortex_a8_disable_mmu_caches(struct target *target, int mmu,
2100 int d_u_cache, int i_cache)
2101 {
2102 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2103 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2104 uint32_t cp15_control;
2105 int retval;
2106
2107 /* read cp15 control register */
2108 retval = armv7a->armv4_5_common.mrc(target, 15,
2109 0, 0, /* op1, op2 */
2110 1, 0, /* CRn, CRm */
2111 &cp15_control);
2112 if (retval != ERROR_OK)
2113 return retval;
2114
2115
2116 if (mmu)
2117 cp15_control &= ~0x1U;
2118
2119 if (d_u_cache)
2120 cp15_control &= ~0x4U;
2121
2122 if (i_cache)
2123 cp15_control &= ~0x1000U;
2124
2125 retval = armv7a->armv4_5_common.mcr(target, 15,
2126 0, 0, /* op1, op2 */
2127 1, 0, /* CRn, CRm */
2128 cp15_control);
2129 return retval;
2130 }
2131
2132 static int cortex_a8_enable_mmu_caches(struct target *target, int mmu,
2133 int d_u_cache, int i_cache)
2134 {
2135 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2136 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2137 uint32_t cp15_control;
2138 int retval;
2139
2140 /* read cp15 control register */
2141 retval = armv7a->armv4_5_common.mrc(target, 15,
2142 0, 0, /* op1, op2 */
2143 1, 0, /* CRn, CRm */
2144 &cp15_control);
2145 if (retval != ERROR_OK)
2146 return retval;
2147
2148 if (mmu)
2149 cp15_control |= 0x1U;
2150
2151 if (d_u_cache)
2152 cp15_control |= 0x4U;
2153
2154 if (i_cache)
2155 cp15_control |= 0x1000U;
2156
2157 retval = armv7a->armv4_5_common.mcr(target, 15,
2158 0, 0, /* op1, op2 */
2159 1, 0, /* CRn, CRm */
2160 cp15_control);
2161 return retval;
2162 }
2163
2164
2165 static int cortex_a8_mmu(struct target *target, int *enabled)
2166 {
2167 if (target->state != TARGET_HALTED) {
2168 LOG_ERROR("%s: target not halted", __func__);
2169 return ERROR_TARGET_INVALID;
2170 }
2171
2172 *enabled = target_to_cortex_a8(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
2173 return ERROR_OK;
2174 }
2175
2176 static int cortex_a8_virt2phys(struct target *target,
2177 uint32_t virt, uint32_t *phys)
2178 {
2179 uint32_t cb;
2180 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2181 // struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2182 struct armv7a_common *armv7a = target_to_armv7a(target);
2183
2184 /* We assume that virtual address is separated
2185 between user and kernel in Linux style:
2186 0x00000000-0xbfffffff - User space
2187 0xc0000000-0xffffffff - Kernel space */
2188 if( virt < 0xc0000000 ) /* Linux user space */
2189 cortex_a8->current_address_mode = ARM_MODE_USR;
2190 else /* Linux kernel */
2191 cortex_a8->current_address_mode = ARM_MODE_SVC;
2192 uint32_t ret;
2193 int retval = armv4_5_mmu_translate_va(target,
2194 &armv7a->armv4_5_mmu, virt, &cb, &ret);
2195 if (retval != ERROR_OK)
2196 return retval;
2197 /* Reset the flag. We don't want someone else to use it by error */
2198 cortex_a8->current_address_mode = ARM_MODE_ANY;
2199
2200 *phys = ret;
2201 return ERROR_OK;
2202 }
2203
2204 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
2205 {
2206 struct target *target = get_current_target(CMD_CTX);
2207 struct armv7a_common *armv7a = target_to_armv7a(target);
2208
2209 return armv4_5_handle_cache_info_command(CMD_CTX,
2210 &armv7a->armv4_5_mmu.armv4_5_cache);
2211 }
2212
2213
2214 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
2215 {
2216 struct target *target = get_current_target(CMD_CTX);
2217 if (!target_was_examined(target))
2218 {
2219 LOG_ERROR("target not examined yet");
2220 return ERROR_FAIL;
2221 }
2222
2223 return cortex_a8_init_debug_access(target);
2224 }
2225
2226 static const struct command_registration cortex_a8_exec_command_handlers[] = {
2227 {
2228 .name = "cache_info",
2229 .handler = cortex_a8_handle_cache_info_command,
2230 .mode = COMMAND_EXEC,
2231 .help = "display information about target caches",
2232 },
2233 {
2234 .name = "dbginit",
2235 .handler = cortex_a8_handle_dbginit_command,
2236 .mode = COMMAND_EXEC,
2237 .help = "Initialize core debug",
2238 },
2239 COMMAND_REGISTRATION_DONE
2240 };
2241 static const struct command_registration cortex_a8_command_handlers[] = {
2242 {
2243 .chain = arm_command_handlers,
2244 },
2245 {
2246 .chain = armv7a_command_handlers,
2247 },
2248 {
2249 .name = "cortex_a8",
2250 .mode = COMMAND_ANY,
2251 .help = "Cortex-A8 command group",
2252 .chain = cortex_a8_exec_command_handlers,
2253 },
2254 COMMAND_REGISTRATION_DONE
2255 };
2256
2257 struct target_type cortexa8_target = {
2258 .name = "cortex_a8",
2259
2260 .poll = cortex_a8_poll,
2261 .arch_state = armv7a_arch_state,
2262
2263 .target_request_data = NULL,
2264
2265 .halt = cortex_a8_halt,
2266 .resume = cortex_a8_resume,
2267 .step = cortex_a8_step,
2268
2269 .assert_reset = cortex_a8_assert_reset,
2270 .deassert_reset = cortex_a8_deassert_reset,
2271 .soft_reset_halt = NULL,
2272
2273 /* REVISIT allow exporting VFP3 registers ... */
2274 .get_gdb_reg_list = arm_get_gdb_reg_list,
2275
2276 .read_memory = cortex_a8_read_memory,
2277 .write_memory = cortex_a8_write_memory,
2278 .bulk_write_memory = cortex_a8_bulk_write_memory,
2279
2280 .checksum_memory = arm_checksum_memory,
2281 .blank_check_memory = arm_blank_check_memory,
2282
2283 .run_algorithm = armv4_5_run_algorithm,
2284
2285 .add_breakpoint = cortex_a8_add_breakpoint,
2286 .remove_breakpoint = cortex_a8_remove_breakpoint,
2287 .add_watchpoint = NULL,
2288 .remove_watchpoint = NULL,
2289
2290 .commands = cortex_a8_command_handlers,
2291 .target_create = cortex_a8_target_create,
2292 .init_target = cortex_a8_init_target,
2293 .examine = cortex_a8_examine,
2294
2295 .read_phys_memory = cortex_a8_read_phys_memory,
2296 .write_phys_memory = cortex_a8_write_phys_memory,
2297 .mmu = cortex_a8_mmu,
2298 .virt2phys = cortex_a8_virt2phys,
2299
2300 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)