cortex_a: rename cortex_a8.c/h to cortex_a.c/h
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * This program is free software; you can redistribute it and/or modify *
18 * it under the terms of the GNU General Public License as published by *
19 * the Free Software Foundation; either version 2 of the License, or *
20 * (at your option) any later version. *
21 * *
22 * This program is distributed in the hope that it will be useful, *
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
25 * GNU General Public License for more details. *
26 * *
27 * You should have received a copy of the GNU General Public License *
28 * along with this program; if not, write to the *
29 * Free Software Foundation, Inc., *
30 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
31 * *
32 * Cortex-A8(tm) TRM, ARM DDI 0344H *
33 * Cortex-A9(tm) TRM, ARM DDI 0407F *
34 * *
35 ***************************************************************************/
36 #ifdef HAVE_CONFIG_H
37 #include "config.h"
38 #endif
39
40 #include "breakpoints.h"
41 #include "cortex_a.h"
42 #include "register.h"
43 #include "target_request.h"
44 #include "target_type.h"
45 #include "arm_opcodes.h"
46 #include <helper/time_support.h>
47
48 static int cortex_a8_poll(struct target *target);
49 static int cortex_a8_debug_entry(struct target *target);
50 static int cortex_a8_restore_context(struct target *target, bool bpwp);
51 static int cortex_a8_set_breakpoint(struct target *target,
52 struct breakpoint *breakpoint, uint8_t matchmode);
53 static int cortex_a8_unset_breakpoint(struct target *target,
54 struct breakpoint *breakpoint);
55 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
56 uint32_t *value, int regnum);
57 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
58 uint32_t value, int regnum);
59 static int cortex_a8_mmu(struct target *target, int *enabled);
60 static int cortex_a8_virt2phys(struct target *target,
61 uint32_t virt, uint32_t *phys);
62 static int cortex_a8_disable_mmu_caches(struct target *target, int mmu,
63 int d_u_cache, int i_cache);
64 static int cortex_a8_enable_mmu_caches(struct target *target, int mmu,
65 int d_u_cache, int i_cache);
66 static int cortex_a8_get_ttb(struct target *target, uint32_t *result);
67
68
69 /*
70 * FIXME do topology discovery using the ROM; don't
71 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
72 * cores, with different AP numbering ... don't use a #define
73 * for these numbers, use per-core armv7a state.
74 */
75 #define swjdp_memoryap 0
76 #define swjdp_debugap 1
77
78 /*
79 * Cortex-A8 Basic debug access, very low level assumes state is saved
80 */
81 static int cortex_a8_init_debug_access(struct target *target)
82 {
83 struct armv7a_common *armv7a = target_to_armv7a(target);
84 struct adiv5_dap *swjdp = &armv7a->dap;
85 int retval;
86 uint32_t dummy;
87
88 LOG_DEBUG(" ");
89
90 /* Unlocking the debug registers for modification */
91 /* The debugport might be uninitialised so try twice */
92 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
93 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
94 if (retval != ERROR_OK)
95 {
96 /* try again */
97 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
98 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
99 if (retval == ERROR_OK)
100 {
101 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
102 }
103 }
104 if (retval != ERROR_OK)
105 return retval;
106 /* Clear Sticky Power Down status Bit in PRSR to enable access to
107 the registers in the Core Power Domain */
108 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
109 armv7a->debug_base + CPUDBG_PRSR, &dummy);
110 if (retval != ERROR_OK)
111 return retval;
112
113 /* Enabling of instruction execution in debug mode is done in debug_entry code */
114
115 /* Resync breakpoint registers */
116
117 /* Since this is likely called from init or reset, update target state information*/
118 return cortex_a8_poll(target);
119 }
120
121 /* To reduce needless round-trips, pass in a pointer to the current
122 * DSCR value. Initialize it to zero if you just need to know the
123 * value on return from this function; or DSCR_INSTR_COMP if you
124 * happen to know that no instruction is pending.
125 */
126 static int cortex_a8_exec_opcode(struct target *target,
127 uint32_t opcode, uint32_t *dscr_p)
128 {
129 uint32_t dscr;
130 int retval;
131 struct armv7a_common *armv7a = target_to_armv7a(target);
132 struct adiv5_dap *swjdp = &armv7a->dap;
133
134 dscr = dscr_p ? *dscr_p : 0;
135
136 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
137
138 /* Wait for InstrCompl bit to be set */
139 long long then = timeval_ms();
140 while ((dscr & DSCR_INSTR_COMP) == 0)
141 {
142 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
143 armv7a->debug_base + CPUDBG_DSCR, &dscr);
144 if (retval != ERROR_OK)
145 {
146 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
147 return retval;
148 }
149 if (timeval_ms() > then + 1000)
150 {
151 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
152 return ERROR_FAIL;
153 }
154 }
155
156 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
157 armv7a->debug_base + CPUDBG_ITR, opcode);
158 if (retval != ERROR_OK)
159 return retval;
160
161 then = timeval_ms();
162 do
163 {
164 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
165 armv7a->debug_base + CPUDBG_DSCR, &dscr);
166 if (retval != ERROR_OK)
167 {
168 LOG_ERROR("Could not read DSCR register");
169 return retval;
170 }
171 if (timeval_ms() > then + 1000)
172 {
173 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
174 return ERROR_FAIL;
175 }
176 }
177 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
178
179 if (dscr_p)
180 *dscr_p = dscr;
181
182 return retval;
183 }
184
185 /**************************************************************************
186 Read core register with very few exec_opcode, fast but needs work_area.
187 This can cause problems with MMU active.
188 **************************************************************************/
189 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
190 uint32_t * regfile)
191 {
192 int retval = ERROR_OK;
193 struct armv7a_common *armv7a = target_to_armv7a(target);
194 struct adiv5_dap *swjdp = &armv7a->dap;
195
196 retval = cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
197 if (retval != ERROR_OK)
198 return retval;
199 retval = cortex_a8_dap_write_coreregister_u32(target, address, 0);
200 if (retval != ERROR_OK)
201 return retval;
202 retval = cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
203 if (retval != ERROR_OK)
204 return retval;
205
206 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
207 (uint8_t *)(&regfile[1]), 4*15, address);
208
209 return retval;
210 }
211
212 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
213 uint32_t *value, int regnum)
214 {
215 int retval = ERROR_OK;
216 uint8_t reg = regnum&0xFF;
217 uint32_t dscr = 0;
218 struct armv7a_common *armv7a = target_to_armv7a(target);
219 struct adiv5_dap *swjdp = &armv7a->dap;
220
221 if (reg > 17)
222 return retval;
223
224 if (reg < 15)
225 {
226 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
227 retval = cortex_a8_exec_opcode(target,
228 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
229 &dscr);
230 if (retval != ERROR_OK)
231 return retval;
232 }
233 else if (reg == 15)
234 {
235 /* "MOV r0, r15"; then move r0 to DCCTX */
236 retval = cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
237 if (retval != ERROR_OK)
238 return retval;
239 retval = cortex_a8_exec_opcode(target,
240 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
241 &dscr);
242 if (retval != ERROR_OK)
243 return retval;
244 }
245 else
246 {
247 /* "MRS r0, CPSR" or "MRS r0, SPSR"
248 * then move r0 to DCCTX
249 */
250 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
251 if (retval != ERROR_OK)
252 return retval;
253 retval = cortex_a8_exec_opcode(target,
254 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
255 &dscr);
256 if (retval != ERROR_OK)
257 return retval;
258 }
259
260 /* Wait for DTRRXfull then read DTRRTX */
261 long long then = timeval_ms();
262 while ((dscr & DSCR_DTR_TX_FULL) == 0)
263 {
264 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
265 armv7a->debug_base + CPUDBG_DSCR, &dscr);
266 if (retval != ERROR_OK)
267 return retval;
268 if (timeval_ms() > then + 1000)
269 {
270 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
271 return ERROR_FAIL;
272 }
273 }
274
275 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
276 armv7a->debug_base + CPUDBG_DTRTX, value);
277 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
278
279 return retval;
280 }
281
282 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
283 uint32_t value, int regnum)
284 {
285 int retval = ERROR_OK;
286 uint8_t Rd = regnum&0xFF;
287 uint32_t dscr;
288 struct armv7a_common *armv7a = target_to_armv7a(target);
289 struct adiv5_dap *swjdp = &armv7a->dap;
290
291 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
292
293 /* Check that DCCRX is not full */
294 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
295 armv7a->debug_base + CPUDBG_DSCR, &dscr);
296 if (retval != ERROR_OK)
297 return retval;
298 if (dscr & DSCR_DTR_RX_FULL)
299 {
300 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
301 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
302 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
303 &dscr);
304 if (retval != ERROR_OK)
305 return retval;
306 }
307
308 if (Rd > 17)
309 return retval;
310
311 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
312 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
313 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
314 armv7a->debug_base + CPUDBG_DTRRX, value);
315 if (retval != ERROR_OK)
316 return retval;
317
318 if (Rd < 15)
319 {
320 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
321 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
322 &dscr);
323 if (retval != ERROR_OK)
324 return retval;
325 }
326 else if (Rd == 15)
327 {
328 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
329 * then "mov r15, r0"
330 */
331 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
332 &dscr);
333 if (retval != ERROR_OK)
334 return retval;
335 retval = cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
336 if (retval != ERROR_OK)
337 return retval;
338 }
339 else
340 {
341 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
342 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
343 */
344 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
345 &dscr);
346 if (retval != ERROR_OK)
347 return retval;
348 retval = cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
349 &dscr);
350 if (retval != ERROR_OK)
351 return retval;
352
353 /* "Prefetch flush" after modifying execution status in CPSR */
354 if (Rd == 16)
355 {
356 retval = cortex_a8_exec_opcode(target,
357 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
358 &dscr);
359 if (retval != ERROR_OK)
360 return retval;
361 }
362 }
363
364 return retval;
365 }
366
367 /* Write to memory mapped registers directly with no cache or mmu handling */
368 static int cortex_a8_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
369 {
370 int retval;
371 struct armv7a_common *armv7a = target_to_armv7a(target);
372 struct adiv5_dap *swjdp = &armv7a->dap;
373
374 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap, address, value);
375
376 return retval;
377 }
378
379 /*
380 * Cortex-A8 implementation of Debug Programmer's Model
381 *
382 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
383 * so there's no need to poll for it before executing an instruction.
384 *
385 * NOTE that in several of these cases the "stall" mode might be useful.
386 * It'd let us queue a few operations together... prepare/finish might
387 * be the places to enable/disable that mode.
388 */
389
390 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
391 {
392 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
393 }
394
395 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
396 {
397 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
398 return mem_ap_sel_write_u32(&a8->armv7a_common.dap, swjdp_debugap,
399 a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
400 }
401
402 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
403 uint32_t *dscr_p)
404 {
405 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
406 uint32_t dscr = DSCR_INSTR_COMP;
407 int retval;
408
409 if (dscr_p)
410 dscr = *dscr_p;
411
412 /* Wait for DTRRXfull */
413 long long then = timeval_ms();
414 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
415 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
416 a8->armv7a_common.debug_base + CPUDBG_DSCR,
417 &dscr);
418 if (retval != ERROR_OK)
419 return retval;
420 if (timeval_ms() > then + 1000)
421 {
422 LOG_ERROR("Timeout waiting for read dcc");
423 return ERROR_FAIL;
424 }
425 }
426
427 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
428 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
429 if (retval != ERROR_OK)
430 return retval;
431 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
432
433 if (dscr_p)
434 *dscr_p = dscr;
435
436 return retval;
437 }
438
439 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
440 {
441 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
442 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
443 uint32_t dscr;
444 int retval;
445
446 /* set up invariant: INSTR_COMP is set after ever DPM operation */
447 long long then = timeval_ms();
448 for (;;)
449 {
450 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
451 a8->armv7a_common.debug_base + CPUDBG_DSCR,
452 &dscr);
453 if (retval != ERROR_OK)
454 return retval;
455 if ((dscr & DSCR_INSTR_COMP) != 0)
456 break;
457 if (timeval_ms() > then + 1000)
458 {
459 LOG_ERROR("Timeout waiting for dpm prepare");
460 return ERROR_FAIL;
461 }
462 }
463
464 /* this "should never happen" ... */
465 if (dscr & DSCR_DTR_RX_FULL) {
466 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
467 /* Clear DCCRX */
468 retval = cortex_a8_exec_opcode(
469 a8->armv7a_common.armv4_5_common.target,
470 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
471 &dscr);
472 if (retval != ERROR_OK)
473 return retval;
474 }
475
476 return retval;
477 }
478
479 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
480 {
481 /* REVISIT what could be done here? */
482 return ERROR_OK;
483 }
484
485 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
486 uint32_t opcode, uint32_t data)
487 {
488 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
489 int retval;
490 uint32_t dscr = DSCR_INSTR_COMP;
491
492 retval = cortex_a8_write_dcc(a8, data);
493 if (retval != ERROR_OK)
494 return retval;
495
496 return cortex_a8_exec_opcode(
497 a8->armv7a_common.armv4_5_common.target,
498 opcode,
499 &dscr);
500 }
501
502 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
503 uint32_t opcode, uint32_t data)
504 {
505 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
506 uint32_t dscr = DSCR_INSTR_COMP;
507 int retval;
508
509 retval = cortex_a8_write_dcc(a8, data);
510 if (retval != ERROR_OK)
511 return retval;
512
513 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
514 retval = cortex_a8_exec_opcode(
515 a8->armv7a_common.armv4_5_common.target,
516 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
517 &dscr);
518 if (retval != ERROR_OK)
519 return retval;
520
521 /* then the opcode, taking data from R0 */
522 retval = cortex_a8_exec_opcode(
523 a8->armv7a_common.armv4_5_common.target,
524 opcode,
525 &dscr);
526
527 return retval;
528 }
529
530 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
531 {
532 struct target *target = dpm->arm->target;
533 uint32_t dscr = DSCR_INSTR_COMP;
534
535 /* "Prefetch flush" after modifying execution status in CPSR */
536 return cortex_a8_exec_opcode(target,
537 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
538 &dscr);
539 }
540
541 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
542 uint32_t opcode, uint32_t *data)
543 {
544 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
545 int retval;
546 uint32_t dscr = DSCR_INSTR_COMP;
547
548 /* the opcode, writing data to DCC */
549 retval = cortex_a8_exec_opcode(
550 a8->armv7a_common.armv4_5_common.target,
551 opcode,
552 &dscr);
553 if (retval != ERROR_OK)
554 return retval;
555
556 return cortex_a8_read_dcc(a8, data, &dscr);
557 }
558
559
560 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
561 uint32_t opcode, uint32_t *data)
562 {
563 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
564 uint32_t dscr = DSCR_INSTR_COMP;
565 int retval;
566
567 /* the opcode, writing data to R0 */
568 retval = cortex_a8_exec_opcode(
569 a8->armv7a_common.armv4_5_common.target,
570 opcode,
571 &dscr);
572 if (retval != ERROR_OK)
573 return retval;
574
575 /* write R0 to DCC */
576 retval = cortex_a8_exec_opcode(
577 a8->armv7a_common.armv4_5_common.target,
578 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
579 &dscr);
580 if (retval != ERROR_OK)
581 return retval;
582
583 return cortex_a8_read_dcc(a8, data, &dscr);
584 }
585
586 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
587 uint32_t addr, uint32_t control)
588 {
589 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
590 uint32_t vr = a8->armv7a_common.debug_base;
591 uint32_t cr = a8->armv7a_common.debug_base;
592 int retval;
593
594 switch (index_t) {
595 case 0 ... 15: /* breakpoints */
596 vr += CPUDBG_BVR_BASE;
597 cr += CPUDBG_BCR_BASE;
598 break;
599 case 16 ... 31: /* watchpoints */
600 vr += CPUDBG_WVR_BASE;
601 cr += CPUDBG_WCR_BASE;
602 index_t -= 16;
603 break;
604 default:
605 return ERROR_FAIL;
606 }
607 vr += 4 * index_t;
608 cr += 4 * index_t;
609
610 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
611 (unsigned) vr, (unsigned) cr);
612
613 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
614 vr, addr);
615 if (retval != ERROR_OK)
616 return retval;
617 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
618 cr, control);
619 return retval;
620 }
621
622 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
623 {
624 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
625 uint32_t cr;
626
627 switch (index_t) {
628 case 0 ... 15:
629 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
630 break;
631 case 16 ... 31:
632 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
633 index_t -= 16;
634 break;
635 default:
636 return ERROR_FAIL;
637 }
638 cr += 4 * index_t;
639
640 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
641
642 /* clear control register */
643 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
644 }
645
646 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
647 {
648 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
649 int retval;
650
651 dpm->arm = &a8->armv7a_common.armv4_5_common;
652 dpm->didr = didr;
653
654 dpm->prepare = cortex_a8_dpm_prepare;
655 dpm->finish = cortex_a8_dpm_finish;
656
657 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
658 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
659 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
660
661 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
662 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
663
664 dpm->bpwp_enable = cortex_a8_bpwp_enable;
665 dpm->bpwp_disable = cortex_a8_bpwp_disable;
666
667 retval = arm_dpm_setup(dpm);
668 if (retval == ERROR_OK)
669 retval = arm_dpm_initialize(dpm);
670
671 return retval;
672 }
673
674
675 /*
676 * Cortex-A8 Run control
677 */
678
679 static int cortex_a8_poll(struct target *target)
680 {
681 int retval = ERROR_OK;
682 uint32_t dscr;
683 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
684 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
685 struct adiv5_dap *swjdp = &armv7a->dap;
686 enum target_state prev_target_state = target->state;
687
688 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
689 armv7a->debug_base + CPUDBG_DSCR, &dscr);
690 if (retval != ERROR_OK)
691 {
692 return retval;
693 }
694 cortex_a8->cpudbg_dscr = dscr;
695
696 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED))
697 {
698 if (prev_target_state != TARGET_HALTED)
699 {
700 /* We have a halting debug event */
701 LOG_DEBUG("Target halted");
702 target->state = TARGET_HALTED;
703 if ((prev_target_state == TARGET_RUNNING)
704 || (prev_target_state == TARGET_RESET))
705 {
706 retval = cortex_a8_debug_entry(target);
707 if (retval != ERROR_OK)
708 return retval;
709
710 target_call_event_callbacks(target,
711 TARGET_EVENT_HALTED);
712 }
713 if (prev_target_state == TARGET_DEBUG_RUNNING)
714 {
715 LOG_DEBUG(" ");
716
717 retval = cortex_a8_debug_entry(target);
718 if (retval != ERROR_OK)
719 return retval;
720
721 target_call_event_callbacks(target,
722 TARGET_EVENT_DEBUG_HALTED);
723 }
724 }
725 }
726 else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
727 {
728 target->state = TARGET_RUNNING;
729 }
730 else
731 {
732 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
733 target->state = TARGET_UNKNOWN;
734 }
735
736 return retval;
737 }
738
739 static int cortex_a8_halt(struct target *target)
740 {
741 int retval = ERROR_OK;
742 uint32_t dscr;
743 struct armv7a_common *armv7a = target_to_armv7a(target);
744 struct adiv5_dap *swjdp = &armv7a->dap;
745
746 /*
747 * Tell the core to be halted by writing DRCR with 0x1
748 * and then wait for the core to be halted.
749 */
750 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
751 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
752 if (retval != ERROR_OK)
753 return retval;
754
755 /*
756 * enter halting debug mode
757 */
758 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
759 armv7a->debug_base + CPUDBG_DSCR, &dscr);
760 if (retval != ERROR_OK)
761 return retval;
762
763 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
764 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
765 if (retval != ERROR_OK)
766 return retval;
767
768 long long then = timeval_ms();
769 for (;;)
770 {
771 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
772 armv7a->debug_base + CPUDBG_DSCR, &dscr);
773 if (retval != ERROR_OK)
774 return retval;
775 if ((dscr & DSCR_CORE_HALTED) != 0)
776 {
777 break;
778 }
779 if (timeval_ms() > then + 1000)
780 {
781 LOG_ERROR("Timeout waiting for halt");
782 return ERROR_FAIL;
783 }
784 }
785
786 target->debug_reason = DBG_REASON_DBGRQ;
787
788 return ERROR_OK;
789 }
790
791 static int cortex_a8_resume(struct target *target, int current,
792 uint32_t address, int handle_breakpoints, int debug_execution)
793 {
794 struct armv7a_common *armv7a = target_to_armv7a(target);
795 struct arm *armv4_5 = &armv7a->armv4_5_common;
796 struct adiv5_dap *swjdp = &armv7a->dap;
797 int retval;
798
799 // struct breakpoint *breakpoint = NULL;
800 uint32_t resume_pc, dscr;
801
802 if (!debug_execution)
803 target_free_all_working_areas(target);
804
805 #if 0
806 if (debug_execution)
807 {
808 /* Disable interrupts */
809 /* We disable interrupts in the PRIMASK register instead of
810 * masking with C_MASKINTS,
811 * This is probably the same issue as Cortex-M3 Errata 377493:
812 * C_MASKINTS in parallel with disabled interrupts can cause
813 * local faults to not be taken. */
814 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
815 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
816 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
817
818 /* Make sure we are in Thumb mode */
819 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
820 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
821 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
822 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
823 }
824 #endif
825
826 /* current = 1: continue on current pc, otherwise continue at <address> */
827 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
828 if (!current)
829 resume_pc = address;
830
831 /* Make sure that the Armv7 gdb thumb fixups does not
832 * kill the return address
833 */
834 switch (armv4_5->core_state)
835 {
836 case ARM_STATE_ARM:
837 resume_pc &= 0xFFFFFFFC;
838 break;
839 case ARM_STATE_THUMB:
840 case ARM_STATE_THUMB_EE:
841 /* When the return address is loaded into PC
842 * bit 0 must be 1 to stay in Thumb state
843 */
844 resume_pc |= 0x1;
845 break;
846 case ARM_STATE_JAZELLE:
847 LOG_ERROR("How do I resume into Jazelle state??");
848 return ERROR_FAIL;
849 }
850 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
851 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
852 armv4_5->pc->dirty = 1;
853 armv4_5->pc->valid = 1;
854
855 retval = cortex_a8_restore_context(target, handle_breakpoints);
856 if (retval != ERROR_OK)
857 return retval;
858
859 #if 0
860 /* the front-end may request us not to handle breakpoints */
861 if (handle_breakpoints)
862 {
863 /* Single step past breakpoint at current address */
864 if ((breakpoint = breakpoint_find(target, resume_pc)))
865 {
866 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
867 cortex_m3_unset_breakpoint(target, breakpoint);
868 cortex_m3_single_step_core(target);
869 cortex_m3_set_breakpoint(target, breakpoint);
870 }
871 }
872
873 #endif
874
875 /*
876 * Restart core and wait for it to be started. Clear ITRen and sticky
877 * exception flags: see ARMv7 ARM, C5.9.
878 *
879 * REVISIT: for single stepping, we probably want to
880 * disable IRQs by default, with optional override...
881 */
882
883 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
884 armv7a->debug_base + CPUDBG_DSCR, &dscr);
885 if (retval != ERROR_OK)
886 return retval;
887
888 if ((dscr & DSCR_INSTR_COMP) == 0)
889 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
890
891 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
892 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
893 if (retval != ERROR_OK)
894 return retval;
895
896 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
897 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART | DRCR_CLEAR_EXCEPTIONS);
898 if (retval != ERROR_OK)
899 return retval;
900
901 long long then = timeval_ms();
902 for (;;)
903 {
904 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
905 armv7a->debug_base + CPUDBG_DSCR, &dscr);
906 if (retval != ERROR_OK)
907 return retval;
908 if ((dscr & DSCR_CORE_RESTARTED) != 0)
909 break;
910 if (timeval_ms() > then + 1000)
911 {
912 LOG_ERROR("Timeout waiting for resume");
913 return ERROR_FAIL;
914 }
915 }
916
917 target->debug_reason = DBG_REASON_NOTHALTED;
918 target->state = TARGET_RUNNING;
919
920 /* registers are now invalid */
921 register_cache_invalidate(armv4_5->core_cache);
922
923 if (!debug_execution)
924 {
925 target->state = TARGET_RUNNING;
926 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
927 LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
928 }
929 else
930 {
931 target->state = TARGET_DEBUG_RUNNING;
932 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
933 LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
934 }
935
936 return ERROR_OK;
937 }
938
939 static int cortex_a8_debug_entry(struct target *target)
940 {
941 int i;
942 uint32_t regfile[16], cpsr, dscr;
943 int retval = ERROR_OK;
944 struct working_area *regfile_working_area = NULL;
945 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
946 struct armv7a_common *armv7a = target_to_armv7a(target);
947 struct arm *armv4_5 = &armv7a->armv4_5_common;
948 struct adiv5_dap *swjdp = &armv7a->dap;
949 struct reg *reg;
950
951 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
952
953 /* REVISIT surely we should not re-read DSCR !! */
954 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
955 armv7a->debug_base + CPUDBG_DSCR, &dscr);
956 if (retval != ERROR_OK)
957 return retval;
958
959 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
960 * imprecise data aborts get discarded by issuing a Data
961 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
962 */
963
964 /* Enable the ITR execution once we are in debug mode */
965 dscr |= DSCR_ITR_EN;
966 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
967 armv7a->debug_base + CPUDBG_DSCR, dscr);
968 if (retval != ERROR_OK)
969 return retval;
970
971 /* Examine debug reason */
972 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
973
974 /* save address of instruction that triggered the watchpoint? */
975 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
976 uint32_t wfar;
977
978 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
979 armv7a->debug_base + CPUDBG_WFAR,
980 &wfar);
981 if (retval != ERROR_OK)
982 return retval;
983 arm_dpm_report_wfar(&armv7a->dpm, wfar);
984 }
985
986 /* REVISIT fast_reg_read is never set ... */
987
988 /* Examine target state and mode */
989 if (cortex_a8->fast_reg_read)
990 target_alloc_working_area(target, 64, &regfile_working_area);
991
992 /* First load register acessible through core debug port*/
993 if (!regfile_working_area)
994 {
995 retval = arm_dpm_read_current_registers(&armv7a->dpm);
996 }
997 else
998 {
999 retval = cortex_a8_read_regs_through_mem(target,
1000 regfile_working_area->address, regfile);
1001
1002 target_free_working_area(target, regfile_working_area);
1003 if (retval != ERROR_OK)
1004 {
1005 return retval;
1006 }
1007
1008 /* read Current PSR */
1009 retval = cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
1010 if (retval != ERROR_OK)
1011 return retval;
1012
1013 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1014
1015 arm_set_cpsr(armv4_5, cpsr);
1016
1017 /* update cache */
1018 for (i = 0; i <= ARM_PC; i++)
1019 {
1020 reg = arm_reg_current(armv4_5, i);
1021
1022 buf_set_u32(reg->value, 0, 32, regfile[i]);
1023 reg->valid = 1;
1024 reg->dirty = 0;
1025 }
1026
1027 /* Fixup PC Resume Address */
1028 if (cpsr & (1 << 5))
1029 {
1030 // T bit set for Thumb or ThumbEE state
1031 regfile[ARM_PC] -= 4;
1032 }
1033 else
1034 {
1035 // ARM state
1036 regfile[ARM_PC] -= 8;
1037 }
1038
1039 reg = armv4_5->pc;
1040 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1041 reg->dirty = reg->valid;
1042 }
1043
1044 #if 0
1045 /* TODO, Move this */
1046 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1047 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1048 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1049
1050 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1051 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1052
1053 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1054 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1055 #endif
1056
1057 /* Are we in an exception handler */
1058 // armv4_5->exception_number = 0;
1059 if (armv7a->post_debug_entry)
1060 {
1061 retval = armv7a->post_debug_entry(target);
1062 if (retval != ERROR_OK)
1063 return retval;
1064 }
1065
1066 return retval;
1067 }
1068
1069 static int cortex_a8_post_debug_entry(struct target *target)
1070 {
1071 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1072 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1073 int retval;
1074
1075 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1076 retval = armv7a->armv4_5_common.mrc(target, 15,
1077 0, 0, /* op1, op2 */
1078 1, 0, /* CRn, CRm */
1079 &cortex_a8->cp15_control_reg);
1080 if (retval != ERROR_OK)
1081 return retval;
1082 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
1083
1084 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
1085 {
1086 uint32_t cache_type_reg;
1087
1088 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
1089 retval = armv7a->armv4_5_common.mrc(target, 15,
1090 0, 1, /* op1, op2 */
1091 0, 0, /* CRn, CRm */
1092 &cache_type_reg);
1093 if (retval != ERROR_OK)
1094 return retval;
1095 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
1096
1097 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
1098 armv4_5_identify_cache(cache_type_reg,
1099 &armv7a->armv4_5_mmu.armv4_5_cache);
1100 }
1101
1102 armv7a->armv4_5_mmu.mmu_enabled =
1103 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
1104 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
1105 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
1106 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1107 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1108
1109 return ERROR_OK;
1110 }
1111
1112 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1113 int handle_breakpoints)
1114 {
1115 struct armv7a_common *armv7a = target_to_armv7a(target);
1116 struct arm *armv4_5 = &armv7a->armv4_5_common;
1117 struct breakpoint *breakpoint = NULL;
1118 struct breakpoint stepbreakpoint;
1119 struct reg *r;
1120 int retval;
1121
1122 if (target->state != TARGET_HALTED)
1123 {
1124 LOG_WARNING("target not halted");
1125 return ERROR_TARGET_NOT_HALTED;
1126 }
1127
1128 /* current = 1: continue on current pc, otherwise continue at <address> */
1129 r = armv4_5->pc;
1130 if (!current)
1131 {
1132 buf_set_u32(r->value, 0, 32, address);
1133 }
1134 else
1135 {
1136 address = buf_get_u32(r->value, 0, 32);
1137 }
1138
1139 /* The front-end may request us not to handle breakpoints.
1140 * But since Cortex-A8 uses breakpoint for single step,
1141 * we MUST handle breakpoints.
1142 */
1143 handle_breakpoints = 1;
1144 if (handle_breakpoints) {
1145 breakpoint = breakpoint_find(target, address);
1146 if (breakpoint)
1147 cortex_a8_unset_breakpoint(target, breakpoint);
1148 }
1149
1150 /* Setup single step breakpoint */
1151 stepbreakpoint.address = address;
1152 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1153 ? 2 : 4;
1154 stepbreakpoint.type = BKPT_HARD;
1155 stepbreakpoint.set = 0;
1156
1157 /* Break on IVA mismatch */
1158 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1159
1160 target->debug_reason = DBG_REASON_SINGLESTEP;
1161
1162 retval = cortex_a8_resume(target, 1, address, 0, 0);
1163 if (retval != ERROR_OK)
1164 return retval;
1165
1166 long long then = timeval_ms();
1167 while (target->state != TARGET_HALTED)
1168 {
1169 retval = cortex_a8_poll(target);
1170 if (retval != ERROR_OK)
1171 return retval;
1172 if (timeval_ms() > then + 1000)
1173 {
1174 LOG_ERROR("timeout waiting for target halt");
1175 return ERROR_FAIL;
1176 }
1177 }
1178
1179 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1180
1181 target->debug_reason = DBG_REASON_BREAKPOINT;
1182
1183 if (breakpoint)
1184 cortex_a8_set_breakpoint(target, breakpoint, 0);
1185
1186 if (target->state != TARGET_HALTED)
1187 LOG_DEBUG("target stepped");
1188
1189 return ERROR_OK;
1190 }
1191
1192 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1193 {
1194 struct armv7a_common *armv7a = target_to_armv7a(target);
1195
1196 LOG_DEBUG(" ");
1197
1198 if (armv7a->pre_restore_context)
1199 armv7a->pre_restore_context(target);
1200
1201 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1202 }
1203
1204
1205 /*
1206 * Cortex-A8 Breakpoint and watchpoint functions
1207 */
1208
1209 /* Setup hardware Breakpoint Register Pair */
1210 static int cortex_a8_set_breakpoint(struct target *target,
1211 struct breakpoint *breakpoint, uint8_t matchmode)
1212 {
1213 int retval;
1214 int brp_i=0;
1215 uint32_t control;
1216 uint8_t byte_addr_select = 0x0F;
1217 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1218 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1219 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1220
1221 if (breakpoint->set)
1222 {
1223 LOG_WARNING("breakpoint already set");
1224 return ERROR_OK;
1225 }
1226
1227 if (breakpoint->type == BKPT_HARD)
1228 {
1229 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1230 brp_i++ ;
1231 if (brp_i >= cortex_a8->brp_num)
1232 {
1233 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1234 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1235 }
1236 breakpoint->set = brp_i + 1;
1237 if (breakpoint->length == 2)
1238 {
1239 byte_addr_select = (3 << (breakpoint->address & 0x02));
1240 }
1241 control = ((matchmode & 0x7) << 20)
1242 | (byte_addr_select << 5)
1243 | (3 << 1) | 1;
1244 brp_list[brp_i].used = 1;
1245 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1246 brp_list[brp_i].control = control;
1247 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1248 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1249 brp_list[brp_i].value);
1250 if (retval != ERROR_OK)
1251 return retval;
1252 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1253 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1254 brp_list[brp_i].control);
1255 if (retval != ERROR_OK)
1256 return retval;
1257 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1258 brp_list[brp_i].control,
1259 brp_list[brp_i].value);
1260 }
1261 else if (breakpoint->type == BKPT_SOFT)
1262 {
1263 uint8_t code[4];
1264 if (breakpoint->length == 2)
1265 {
1266 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1267 }
1268 else
1269 {
1270 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1271 }
1272 retval = target->type->read_memory(target,
1273 breakpoint->address & 0xFFFFFFFE,
1274 breakpoint->length, 1,
1275 breakpoint->orig_instr);
1276 if (retval != ERROR_OK)
1277 return retval;
1278 retval = target->type->write_memory(target,
1279 breakpoint->address & 0xFFFFFFFE,
1280 breakpoint->length, 1, code);
1281 if (retval != ERROR_OK)
1282 return retval;
1283 breakpoint->set = 0x11; /* Any nice value but 0 */
1284 }
1285
1286 return ERROR_OK;
1287 }
1288
1289 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1290 {
1291 int retval;
1292 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1293 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1294 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1295
1296 if (!breakpoint->set)
1297 {
1298 LOG_WARNING("breakpoint not set");
1299 return ERROR_OK;
1300 }
1301
1302 if (breakpoint->type == BKPT_HARD)
1303 {
1304 int brp_i = breakpoint->set - 1;
1305 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1306 {
1307 LOG_DEBUG("Invalid BRP number in breakpoint");
1308 return ERROR_OK;
1309 }
1310 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1311 brp_list[brp_i].control, brp_list[brp_i].value);
1312 brp_list[brp_i].used = 0;
1313 brp_list[brp_i].value = 0;
1314 brp_list[brp_i].control = 0;
1315 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1316 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1317 brp_list[brp_i].control);
1318 if (retval != ERROR_OK)
1319 return retval;
1320 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1321 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1322 brp_list[brp_i].value);
1323 if (retval != ERROR_OK)
1324 return retval;
1325 }
1326 else
1327 {
1328 /* restore original instruction (kept in target endianness) */
1329 if (breakpoint->length == 4)
1330 {
1331 retval = target->type->write_memory(target,
1332 breakpoint->address & 0xFFFFFFFE,
1333 4, 1, breakpoint->orig_instr);
1334 if (retval != ERROR_OK)
1335 return retval;
1336 }
1337 else
1338 {
1339 retval = target->type->write_memory(target,
1340 breakpoint->address & 0xFFFFFFFE,
1341 2, 1, breakpoint->orig_instr);
1342 if (retval != ERROR_OK)
1343 return retval;
1344 }
1345 }
1346 breakpoint->set = 0;
1347
1348 return ERROR_OK;
1349 }
1350
1351 static int cortex_a8_add_breakpoint(struct target *target,
1352 struct breakpoint *breakpoint)
1353 {
1354 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1355
1356 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1357 {
1358 LOG_INFO("no hardware breakpoint available");
1359 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1360 }
1361
1362 if (breakpoint->type == BKPT_HARD)
1363 cortex_a8->brp_num_available--;
1364
1365 return cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1366 }
1367
1368 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1369 {
1370 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1371
1372 #if 0
1373 /* It is perfectly possible to remove breakpoints while the target is running */
1374 if (target->state != TARGET_HALTED)
1375 {
1376 LOG_WARNING("target not halted");
1377 return ERROR_TARGET_NOT_HALTED;
1378 }
1379 #endif
1380
1381 if (breakpoint->set)
1382 {
1383 cortex_a8_unset_breakpoint(target, breakpoint);
1384 if (breakpoint->type == BKPT_HARD)
1385 cortex_a8->brp_num_available++ ;
1386 }
1387
1388
1389 return ERROR_OK;
1390 }
1391
1392
1393
1394 /*
1395 * Cortex-A8 Reset functions
1396 */
1397
1398 static int cortex_a8_assert_reset(struct target *target)
1399 {
1400 struct armv7a_common *armv7a = target_to_armv7a(target);
1401
1402 LOG_DEBUG(" ");
1403
1404 /* FIXME when halt is requested, make it work somehow... */
1405
1406 /* Issue some kind of warm reset. */
1407 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1408 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1409 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1410 /* REVISIT handle "pulls" cases, if there's
1411 * hardware that needs them to work.
1412 */
1413 jtag_add_reset(0, 1);
1414 } else {
1415 LOG_ERROR("%s: how to reset?", target_name(target));
1416 return ERROR_FAIL;
1417 }
1418
1419 /* registers are now invalid */
1420 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1421
1422 target->state = TARGET_RESET;
1423
1424 return ERROR_OK;
1425 }
1426
1427 static int cortex_a8_deassert_reset(struct target *target)
1428 {
1429 int retval;
1430
1431 LOG_DEBUG(" ");
1432
1433 /* be certain SRST is off */
1434 jtag_add_reset(0, 0);
1435
1436 retval = cortex_a8_poll(target);
1437 if (retval != ERROR_OK)
1438 return retval;
1439
1440 if (target->reset_halt) {
1441 if (target->state != TARGET_HALTED) {
1442 LOG_WARNING("%s: ran after reset and before halt ...",
1443 target_name(target));
1444 if ((retval = target_halt(target)) != ERROR_OK)
1445 return retval;
1446 }
1447 }
1448
1449 return ERROR_OK;
1450 }
1451
1452 /*
1453 * Cortex-A8 Memory access
1454 *
1455 * This is same Cortex M3 but we must also use the correct
1456 * ap number for every access.
1457 */
1458
1459 static int cortex_a8_read_phys_memory(struct target *target,
1460 uint32_t address, uint32_t size,
1461 uint32_t count, uint8_t *buffer)
1462 {
1463 struct armv7a_common *armv7a = target_to_armv7a(target);
1464 struct adiv5_dap *swjdp = &armv7a->dap;
1465 int retval = ERROR_INVALID_ARGUMENTS;
1466 uint8_t apsel = swjdp->apsel;
1467
1468 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d", address, size, count);
1469
1470 if (count && buffer) {
1471
1472 if ( apsel == swjdp_memoryap ) {
1473
1474 /* read memory through AHB-AP */
1475
1476 switch (size) {
1477 case 4:
1478 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
1479 buffer, 4 * count, address);
1480 break;
1481 case 2:
1482 retval = mem_ap_sel_read_buf_u16(swjdp, swjdp_memoryap,
1483 buffer, 2 * count, address);
1484 break;
1485 case 1:
1486 retval = mem_ap_sel_read_buf_u8(swjdp, swjdp_memoryap,
1487 buffer, count, address);
1488 break;
1489 }
1490
1491 } else {
1492
1493 /* read memory through APB-AP */
1494
1495 uint32_t saved_r0, saved_r1;
1496 int nbytes = count * size;
1497 uint32_t data;
1498 int enabled = 0;
1499
1500 if (target->state != TARGET_HALTED)
1501 {
1502 LOG_WARNING("target not halted");
1503 return ERROR_TARGET_NOT_HALTED;
1504 }
1505
1506 retval = cortex_a8_mmu(target, &enabled);
1507 if (retval != ERROR_OK)
1508 return retval;
1509
1510 if (enabled)
1511 {
1512 LOG_WARNING("Reading physical memory through APB with MMU enabled is not yet implemented");
1513 return ERROR_TARGET_FAILURE;
1514 }
1515
1516 /* save registers r0 and r1, we are going to corrupt them */
1517 retval = cortex_a8_dap_read_coreregister_u32(target, &saved_r0, 0);
1518 if (retval != ERROR_OK)
1519 return retval;
1520
1521 retval = cortex_a8_dap_read_coreregister_u32(target, &saved_r1, 1);
1522 if (retval != ERROR_OK)
1523 return retval;
1524
1525 retval = cortex_a8_dap_write_coreregister_u32(target, address, 0);
1526 if (retval != ERROR_OK)
1527 return retval;
1528
1529 while (nbytes > 0) {
1530
1531 /* execute instruction LDRB r1, [r0], 1 (0xe4d01001) */
1532 retval = cortex_a8_exec_opcode(target, ARMV4_5_LDRB_IP(1, 0) , NULL);
1533 if (retval != ERROR_OK)
1534 return retval;
1535
1536 retval = cortex_a8_dap_read_coreregister_u32(target, &data, 1);
1537 if (retval != ERROR_OK)
1538 return retval;
1539
1540 *buffer++ = data;
1541 --nbytes;
1542
1543 }
1544
1545 /* restore corrupted registers r0 and r1 */
1546 retval = cortex_a8_dap_write_coreregister_u32(target, saved_r0, 0);
1547 if (retval != ERROR_OK)
1548 return retval;
1549
1550 retval = cortex_a8_dap_write_coreregister_u32(target, saved_r1, 1);
1551 if (retval != ERROR_OK)
1552 return retval;
1553
1554 }
1555 }
1556
1557 return retval;
1558 }
1559
1560 static int cortex_a8_read_memory(struct target *target, uint32_t address,
1561 uint32_t size, uint32_t count, uint8_t *buffer)
1562 {
1563 int enabled = 0;
1564 uint32_t virt, phys;
1565 int retval;
1566
1567 /* cortex_a8 handles unaligned memory access */
1568
1569 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address, size, count);
1570 retval = cortex_a8_mmu(target, &enabled);
1571 if (retval != ERROR_OK)
1572 return retval;
1573
1574 if(enabled)
1575 {
1576 virt = address;
1577 retval = cortex_a8_virt2phys(target, virt, &phys);
1578 if (retval != ERROR_OK)
1579 return retval;
1580
1581 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1582 address = phys;
1583 }
1584
1585 return cortex_a8_read_phys_memory(target, address, size, count, buffer);
1586 }
1587
1588 static int cortex_a8_write_phys_memory(struct target *target,
1589 uint32_t address, uint32_t size,
1590 uint32_t count, uint8_t *buffer)
1591 {
1592 struct armv7a_common *armv7a = target_to_armv7a(target);
1593 struct adiv5_dap *swjdp = &armv7a->dap;
1594 int retval = ERROR_INVALID_ARGUMENTS;
1595 uint8_t apsel = swjdp->apsel;
1596
1597 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address, size, count);
1598
1599 if (count && buffer) {
1600
1601 if ( apsel == swjdp_memoryap ) {
1602
1603 /* write memory through AHB-AP */
1604
1605 switch (size) {
1606 case 4:
1607 retval = mem_ap_sel_write_buf_u32(swjdp, swjdp_memoryap,
1608 buffer, 4 * count, address);
1609 break;
1610 case 2:
1611 retval = mem_ap_sel_write_buf_u16(swjdp, swjdp_memoryap,
1612 buffer, 2 * count, address);
1613 break;
1614 case 1:
1615 retval = mem_ap_sel_write_buf_u8(swjdp, swjdp_memoryap,
1616 buffer, count, address);
1617 break;
1618 }
1619
1620 } else {
1621
1622 /* write memory through APB-AP */
1623
1624 uint32_t saved_r0, saved_r1;
1625 int nbytes = count * size;
1626 uint32_t data;
1627 int enabled = 0;
1628
1629 if (target->state != TARGET_HALTED)
1630 {
1631 LOG_WARNING("target not halted");
1632 return ERROR_TARGET_NOT_HALTED;
1633 }
1634
1635 retval = cortex_a8_mmu(target, &enabled);
1636 if (retval != ERROR_OK)
1637 return retval;
1638
1639 if (enabled)
1640 {
1641 LOG_WARNING("Writing physical memory through APB with MMU enabled is not yet implemented");
1642 return ERROR_TARGET_FAILURE;
1643 }
1644
1645 /* save registers r0 and r1, we are going to corrupt them */
1646 retval = cortex_a8_dap_read_coreregister_u32(target, &saved_r0, 0);
1647 if (retval != ERROR_OK)
1648 return retval;
1649
1650 retval = cortex_a8_dap_read_coreregister_u32(target, &saved_r1, 1);
1651 if (retval != ERROR_OK)
1652 return retval;
1653
1654 retval = cortex_a8_dap_write_coreregister_u32(target, address, 0);
1655 if (retval != ERROR_OK)
1656 return retval;
1657
1658 while (nbytes > 0) {
1659
1660 data = *buffer++;
1661
1662 retval = cortex_a8_dap_write_coreregister_u32(target, data, 1);
1663 if (retval != ERROR_OK)
1664 return retval;
1665
1666 /* execute instruction STRB r1, [r0], 1 (0xe4c01001) */
1667 retval = cortex_a8_exec_opcode(target, ARMV4_5_STRB_IP(1, 0) , NULL);
1668 if (retval != ERROR_OK)
1669 return retval;
1670
1671 --nbytes;
1672 }
1673
1674 /* restore corrupted registers r0 and r1 */
1675 retval = cortex_a8_dap_write_coreregister_u32(target, saved_r0, 0);
1676 if (retval != ERROR_OK)
1677 return retval;
1678
1679 retval = cortex_a8_dap_write_coreregister_u32(target, saved_r1, 1);
1680 if (retval != ERROR_OK)
1681 return retval;
1682
1683 /* we can return here without invalidating D/I-cache because */
1684 /* access through APB maintains cache coherency */
1685 return retval;
1686 }
1687 }
1688
1689
1690 /* REVISIT this op is generic ARMv7-A/R stuff */
1691 if (retval == ERROR_OK && target->state == TARGET_HALTED)
1692 {
1693 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
1694
1695 retval = dpm->prepare(dpm);
1696 if (retval != ERROR_OK)
1697 return retval;
1698
1699 /* The Cache handling will NOT work with MMU active, the
1700 * wrong addresses will be invalidated!
1701 *
1702 * For both ICache and DCache, walk all cache lines in the
1703 * address range. Cortex-A8 has fixed 64 byte line length.
1704 *
1705 * REVISIT per ARMv7, these may trigger watchpoints ...
1706 */
1707
1708 /* invalidate I-Cache */
1709 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
1710 {
1711 /* ICIMVAU - Invalidate Cache single entry
1712 * with MVA to PoU
1713 * MCR p15, 0, r0, c7, c5, 1
1714 */
1715 for (uint32_t cacheline = address;
1716 cacheline < address + size * count;
1717 cacheline += 64) {
1718 retval = dpm->instr_write_data_r0(dpm,
1719 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1720 cacheline);
1721 if (retval != ERROR_OK)
1722 return retval;
1723 }
1724 }
1725
1726 /* invalidate D-Cache */
1727 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
1728 {
1729 /* DCIMVAC - Invalidate data Cache line
1730 * with MVA to PoC
1731 * MCR p15, 0, r0, c7, c6, 1
1732 */
1733 for (uint32_t cacheline = address;
1734 cacheline < address + size * count;
1735 cacheline += 64) {
1736 retval = dpm->instr_write_data_r0(dpm,
1737 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1738 cacheline);
1739 if (retval != ERROR_OK)
1740 return retval;
1741 }
1742 }
1743
1744 /* (void) */ dpm->finish(dpm);
1745 }
1746
1747 return retval;
1748 }
1749
1750 static int cortex_a8_write_memory(struct target *target, uint32_t address,
1751 uint32_t size, uint32_t count, uint8_t *buffer)
1752 {
1753 int enabled = 0;
1754 uint32_t virt, phys;
1755 int retval;
1756
1757 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
1758 retval = cortex_a8_mmu(target, &enabled);
1759 if (retval != ERROR_OK)
1760 return retval;
1761
1762 if(enabled)
1763 {
1764 virt = address;
1765 retval = cortex_a8_virt2phys(target, virt, &phys);
1766 if (retval != ERROR_OK)
1767 return retval;
1768 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1769 address = phys;
1770 }
1771
1772 return cortex_a8_write_phys_memory(target, address, size,
1773 count, buffer);
1774 }
1775
1776 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
1777 uint32_t count, uint8_t *buffer)
1778 {
1779 return cortex_a8_write_memory(target, address, 4, count, buffer);
1780 }
1781
1782 static int cortex_a8_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1783 {
1784 #if 0
1785 u16 dcrdr;
1786
1787 mem_ap_read_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1788 *ctrl = (uint8_t)dcrdr;
1789 *value = (uint8_t)(dcrdr >> 8);
1790
1791 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1792
1793 /* write ack back to software dcc register
1794 * signify we have read data */
1795 if (dcrdr & (1 << 0))
1796 {
1797 dcrdr = 0;
1798 mem_ap_write_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1799 }
1800 #endif
1801 return ERROR_OK;
1802 }
1803
1804
1805 static int cortex_a8_handle_target_request(void *priv)
1806 {
1807 struct target *target = priv;
1808 struct armv7a_common *armv7a = target_to_armv7a(target);
1809 struct adiv5_dap *swjdp = &armv7a->dap;
1810 int retval;
1811
1812 if (!target_was_examined(target))
1813 return ERROR_OK;
1814 if (!target->dbg_msg_enabled)
1815 return ERROR_OK;
1816
1817 if (target->state == TARGET_RUNNING)
1818 {
1819 uint8_t data = 0;
1820 uint8_t ctrl = 0;
1821
1822 retval = cortex_a8_dcc_read(swjdp, &data, &ctrl);
1823 if (retval != ERROR_OK)
1824 return retval;
1825
1826 /* check if we have data */
1827 if (ctrl & (1 << 0))
1828 {
1829 uint32_t request;
1830
1831 /* we assume target is quick enough */
1832 request = data;
1833 retval = cortex_a8_dcc_read(swjdp, &data, &ctrl);
1834 if (retval != ERROR_OK)
1835 return retval;
1836 request |= (data << 8);
1837 retval = cortex_a8_dcc_read(swjdp, &data, &ctrl);
1838 if (retval != ERROR_OK)
1839 return retval;
1840 request |= (data << 16);
1841 retval = cortex_a8_dcc_read(swjdp, &data, &ctrl);
1842 if (retval != ERROR_OK)
1843 return retval;
1844 request |= (data << 24);
1845 target_request(target, request);
1846 }
1847 }
1848
1849 return ERROR_OK;
1850 }
1851
1852 /*
1853 * Cortex-A8 target information and configuration
1854 */
1855
1856 static int cortex_a8_examine_first(struct target *target)
1857 {
1858 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1859 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1860 struct adiv5_dap *swjdp = &armv7a->dap;
1861 int i;
1862 int retval = ERROR_OK;
1863 uint32_t didr, ctypr, ttypr, cpuid;
1864 uint32_t dbgbase, apid;
1865
1866 /* We do one extra read to ensure DAP is configured,
1867 * we call ahbap_debugport_init(swjdp) instead
1868 */
1869 retval = ahbap_debugport_init(swjdp);
1870 if (retval != ERROR_OK)
1871 return retval;
1872
1873 /* Get ROM Table base */
1874 retval = dap_get_debugbase(swjdp, 1, &dbgbase, &apid);
1875 if (retval != ERROR_OK)
1876 return retval;
1877
1878 /* Lookup 0x15 -- Processor DAP */
1879 retval = dap_lookup_cs_component(swjdp, 1, dbgbase, 0x15,
1880 &armv7a->debug_base);
1881 if (retval != ERROR_OK)
1882 return retval;
1883
1884 #if 0
1885 /*
1886 * FIXME: assuming omap4430
1887 *
1888 * APB DBGBASE reads 0x80040000, but this points to an empty ROM table.
1889 * 0x80000000 is cpu0 coresight region
1890 */
1891 if (target->coreid > 3) {
1892 LOG_ERROR("cortex_a8 supports up to 4 cores");
1893 return ERROR_INVALID_ARGUMENTS;
1894 }
1895 armv7a->debug_base = 0x80000000 |
1896 ((target->coreid & 0x3) << CORTEX_A8_PADDRDBG_CPU_SHIFT);
1897 #endif
1898
1899 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1900 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
1901 if (retval != ERROR_OK)
1902 return retval;
1903
1904 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1905 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
1906 {
1907 LOG_DEBUG("Examine %s failed", "CPUID");
1908 return retval;
1909 }
1910
1911 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1912 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
1913 {
1914 LOG_DEBUG("Examine %s failed", "CTYPR");
1915 return retval;
1916 }
1917
1918 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1919 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
1920 {
1921 LOG_DEBUG("Examine %s failed", "TTYPR");
1922 return retval;
1923 }
1924
1925 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1926 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
1927 {
1928 LOG_DEBUG("Examine %s failed", "DIDR");
1929 return retval;
1930 }
1931
1932 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1933 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
1934 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
1935 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
1936
1937 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
1938 retval = cortex_a8_dpm_setup(cortex_a8, didr);
1939 if (retval != ERROR_OK)
1940 return retval;
1941
1942 /* Setup Breakpoint Register Pairs */
1943 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
1944 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
1945 cortex_a8->brp_num_available = cortex_a8->brp_num;
1946 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
1947 // cortex_a8->brb_enabled = ????;
1948 for (i = 0; i < cortex_a8->brp_num; i++)
1949 {
1950 cortex_a8->brp_list[i].used = 0;
1951 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
1952 cortex_a8->brp_list[i].type = BRP_NORMAL;
1953 else
1954 cortex_a8->brp_list[i].type = BRP_CONTEXT;
1955 cortex_a8->brp_list[i].value = 0;
1956 cortex_a8->brp_list[i].control = 0;
1957 cortex_a8->brp_list[i].BRPn = i;
1958 }
1959
1960 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
1961
1962 target_set_examined(target);
1963 return ERROR_OK;
1964 }
1965
1966 static int cortex_a8_examine(struct target *target)
1967 {
1968 int retval = ERROR_OK;
1969
1970 /* don't re-probe hardware after each reset */
1971 if (!target_was_examined(target))
1972 retval = cortex_a8_examine_first(target);
1973
1974 /* Configure core debug access */
1975 if (retval == ERROR_OK)
1976 retval = cortex_a8_init_debug_access(target);
1977
1978 return retval;
1979 }
1980
1981 /*
1982 * Cortex-A8 target creation and initialization
1983 */
1984
1985 static int cortex_a8_init_target(struct command_context *cmd_ctx,
1986 struct target *target)
1987 {
1988 /* examine_first() does a bunch of this */
1989 return ERROR_OK;
1990 }
1991
1992 static int cortex_a8_init_arch_info(struct target *target,
1993 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
1994 {
1995 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1996 struct arm *armv4_5 = &armv7a->armv4_5_common;
1997 struct adiv5_dap *dap = &armv7a->dap;
1998
1999 armv7a->armv4_5_common.dap = dap;
2000
2001 /* Setup struct cortex_a8_common */
2002 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
2003 armv4_5->arch_info = armv7a;
2004
2005 /* prepare JTAG information for the new target */
2006 cortex_a8->jtag_info.tap = tap;
2007 cortex_a8->jtag_info.scann_size = 4;
2008
2009 /* Leave (only) generic DAP stuff for debugport_init() */
2010 dap->jtag_info = &cortex_a8->jtag_info;
2011 dap->memaccess_tck = 80;
2012
2013 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
2014 dap->tar_autoincr_block = (1 << 10);
2015
2016 cortex_a8->fast_reg_read = 0;
2017
2018 /* Set default value */
2019 cortex_a8->current_address_mode = ARM_MODE_ANY;
2020
2021 /* register arch-specific functions */
2022 armv7a->examine_debug_reason = NULL;
2023
2024 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
2025
2026 armv7a->pre_restore_context = NULL;
2027 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
2028 armv7a->armv4_5_mmu.get_ttb = cortex_a8_get_ttb;
2029 armv7a->armv4_5_mmu.read_memory = cortex_a8_read_phys_memory;
2030 armv7a->armv4_5_mmu.write_memory = cortex_a8_write_phys_memory;
2031 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a8_disable_mmu_caches;
2032 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a8_enable_mmu_caches;
2033 armv7a->armv4_5_mmu.has_tiny_pages = 1;
2034 armv7a->armv4_5_mmu.mmu_enabled = 0;
2035
2036
2037 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
2038
2039 /* REVISIT v7a setup should be in a v7a-specific routine */
2040 arm_init_arch_info(target, armv4_5);
2041 armv7a->common_magic = ARMV7_COMMON_MAGIC;
2042
2043 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
2044
2045 return ERROR_OK;
2046 }
2047
2048 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
2049 {
2050 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
2051
2052 return cortex_a8_init_arch_info(target, cortex_a8, target->tap);
2053 }
2054
2055 static int cortex_a8_get_ttb(struct target *target, uint32_t *result)
2056 {
2057 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2058 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2059 uint32_t ttb = 0, retval = ERROR_OK;
2060
2061 /* current_address_mode is set inside cortex_a8_virt2phys()
2062 where we can determine if address belongs to user or kernel */
2063 if(cortex_a8->current_address_mode == ARM_MODE_SVC)
2064 {
2065 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2066 retval = armv7a->armv4_5_common.mrc(target, 15,
2067 0, 1, /* op1, op2 */
2068 2, 0, /* CRn, CRm */
2069 &ttb);
2070 if (retval != ERROR_OK)
2071 return retval;
2072 }
2073 else if(cortex_a8->current_address_mode == ARM_MODE_USR)
2074 {
2075 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2076 retval = armv7a->armv4_5_common.mrc(target, 15,
2077 0, 0, /* op1, op2 */
2078 2, 0, /* CRn, CRm */
2079 &ttb);
2080 if (retval != ERROR_OK)
2081 return retval;
2082 }
2083 /* we don't know whose address is: user or kernel
2084 we assume that if we are in kernel mode then
2085 address belongs to kernel else if in user mode
2086 - to user */
2087 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
2088 {
2089 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2090 retval = armv7a->armv4_5_common.mrc(target, 15,
2091 0, 1, /* op1, op2 */
2092 2, 0, /* CRn, CRm */
2093 &ttb);
2094 if (retval != ERROR_OK)
2095 return retval;
2096 }
2097 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
2098 {
2099 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2100 retval = armv7a->armv4_5_common.mrc(target, 15,
2101 0, 0, /* op1, op2 */
2102 2, 0, /* CRn, CRm */
2103 &ttb);
2104 if (retval != ERROR_OK)
2105 return retval;
2106 }
2107 /* finally we don't know whose ttb to use: user or kernel */
2108 else
2109 LOG_ERROR("Don't know how to get ttb for current mode!!!");
2110
2111 ttb &= 0xffffc000;
2112
2113 *result = ttb;
2114
2115 return ERROR_OK;
2116 }
2117
2118 static int cortex_a8_disable_mmu_caches(struct target *target, int mmu,
2119 int d_u_cache, int i_cache)
2120 {
2121 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2122 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2123 uint32_t cp15_control;
2124 int retval;
2125
2126 /* read cp15 control register */
2127 retval = armv7a->armv4_5_common.mrc(target, 15,
2128 0, 0, /* op1, op2 */
2129 1, 0, /* CRn, CRm */
2130 &cp15_control);
2131 if (retval != ERROR_OK)
2132 return retval;
2133
2134
2135 if (mmu)
2136 cp15_control &= ~0x1U;
2137
2138 if (d_u_cache)
2139 cp15_control &= ~0x4U;
2140
2141 if (i_cache)
2142 cp15_control &= ~0x1000U;
2143
2144 retval = armv7a->armv4_5_common.mcr(target, 15,
2145 0, 0, /* op1, op2 */
2146 1, 0, /* CRn, CRm */
2147 cp15_control);
2148 return retval;
2149 }
2150
2151 static int cortex_a8_enable_mmu_caches(struct target *target, int mmu,
2152 int d_u_cache, int i_cache)
2153 {
2154 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2155 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2156 uint32_t cp15_control;
2157 int retval;
2158
2159 /* read cp15 control register */
2160 retval = armv7a->armv4_5_common.mrc(target, 15,
2161 0, 0, /* op1, op2 */
2162 1, 0, /* CRn, CRm */
2163 &cp15_control);
2164 if (retval != ERROR_OK)
2165 return retval;
2166
2167 if (mmu)
2168 cp15_control |= 0x1U;
2169
2170 if (d_u_cache)
2171 cp15_control |= 0x4U;
2172
2173 if (i_cache)
2174 cp15_control |= 0x1000U;
2175
2176 retval = armv7a->armv4_5_common.mcr(target, 15,
2177 0, 0, /* op1, op2 */
2178 1, 0, /* CRn, CRm */
2179 cp15_control);
2180 return retval;
2181 }
2182
2183
2184 static int cortex_a8_mmu(struct target *target, int *enabled)
2185 {
2186 if (target->state != TARGET_HALTED) {
2187 LOG_ERROR("%s: target not halted", __func__);
2188 return ERROR_TARGET_INVALID;
2189 }
2190
2191 *enabled = target_to_cortex_a8(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
2192 return ERROR_OK;
2193 }
2194
2195 static int cortex_a8_virt2phys(struct target *target,
2196 uint32_t virt, uint32_t *phys)
2197 {
2198 uint32_t cb;
2199 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2200 // struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2201 struct armv7a_common *armv7a = target_to_armv7a(target);
2202
2203 /* We assume that virtual address is separated
2204 between user and kernel in Linux style:
2205 0x00000000-0xbfffffff - User space
2206 0xc0000000-0xffffffff - Kernel space */
2207 if( virt < 0xc0000000 ) /* Linux user space */
2208 cortex_a8->current_address_mode = ARM_MODE_USR;
2209 else /* Linux kernel */
2210 cortex_a8->current_address_mode = ARM_MODE_SVC;
2211 uint32_t ret;
2212 int retval = armv4_5_mmu_translate_va(target,
2213 &armv7a->armv4_5_mmu, virt, &cb, &ret);
2214 if (retval != ERROR_OK)
2215 return retval;
2216 /* Reset the flag. We don't want someone else to use it by error */
2217 cortex_a8->current_address_mode = ARM_MODE_ANY;
2218
2219 *phys = ret;
2220 return ERROR_OK;
2221 }
2222
2223 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
2224 {
2225 struct target *target = get_current_target(CMD_CTX);
2226 struct armv7a_common *armv7a = target_to_armv7a(target);
2227
2228 return armv4_5_handle_cache_info_command(CMD_CTX,
2229 &armv7a->armv4_5_mmu.armv4_5_cache);
2230 }
2231
2232
2233 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
2234 {
2235 struct target *target = get_current_target(CMD_CTX);
2236 if (!target_was_examined(target))
2237 {
2238 LOG_ERROR("target not examined yet");
2239 return ERROR_FAIL;
2240 }
2241
2242 return cortex_a8_init_debug_access(target);
2243 }
2244
2245 static const struct command_registration cortex_a8_exec_command_handlers[] = {
2246 {
2247 .name = "cache_info",
2248 .handler = cortex_a8_handle_cache_info_command,
2249 .mode = COMMAND_EXEC,
2250 .help = "display information about target caches",
2251 },
2252 {
2253 .name = "dbginit",
2254 .handler = cortex_a8_handle_dbginit_command,
2255 .mode = COMMAND_EXEC,
2256 .help = "Initialize core debug",
2257 },
2258 COMMAND_REGISTRATION_DONE
2259 };
2260 static const struct command_registration cortex_a8_command_handlers[] = {
2261 {
2262 .chain = arm_command_handlers,
2263 },
2264 {
2265 .chain = armv7a_command_handlers,
2266 },
2267 {
2268 .name = "cortex_a8",
2269 .mode = COMMAND_ANY,
2270 .help = "Cortex-A8 command group",
2271 .chain = cortex_a8_exec_command_handlers,
2272 },
2273 COMMAND_REGISTRATION_DONE
2274 };
2275
2276 struct target_type cortexa8_target = {
2277 .name = "cortex_a8",
2278
2279 .poll = cortex_a8_poll,
2280 .arch_state = armv7a_arch_state,
2281
2282 .target_request_data = NULL,
2283
2284 .halt = cortex_a8_halt,
2285 .resume = cortex_a8_resume,
2286 .step = cortex_a8_step,
2287
2288 .assert_reset = cortex_a8_assert_reset,
2289 .deassert_reset = cortex_a8_deassert_reset,
2290 .soft_reset_halt = NULL,
2291
2292 /* REVISIT allow exporting VFP3 registers ... */
2293 .get_gdb_reg_list = arm_get_gdb_reg_list,
2294
2295 .read_memory = cortex_a8_read_memory,
2296 .write_memory = cortex_a8_write_memory,
2297 .bulk_write_memory = cortex_a8_bulk_write_memory,
2298
2299 .checksum_memory = arm_checksum_memory,
2300 .blank_check_memory = arm_blank_check_memory,
2301
2302 .run_algorithm = armv4_5_run_algorithm,
2303
2304 .add_breakpoint = cortex_a8_add_breakpoint,
2305 .remove_breakpoint = cortex_a8_remove_breakpoint,
2306 .add_watchpoint = NULL,
2307 .remove_watchpoint = NULL,
2308
2309 .commands = cortex_a8_command_handlers,
2310 .target_create = cortex_a8_target_create,
2311 .init_target = cortex_a8_init_target,
2312 .examine = cortex_a8_examine,
2313
2314 .read_phys_memory = cortex_a8_read_phys_memory,
2315 .write_phys_memory = cortex_a8_write_phys_memory,
2316 .mmu = cortex_a8_mmu,
2317 .virt2phys = cortex_a8_virt2phys,
2318
2319 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)