cortex_a hybrid & context breakpoints
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * This program is free software; you can redistribute it and/or modify *
21 * it under the terms of the GNU General Public License as published by *
22 * the Free Software Foundation; either version 2 of the License, or *
23 * (at your option) any later version. *
24 * *
25 * This program is distributed in the hope that it will be useful, *
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
28 * GNU General Public License for more details. *
29 * *
30 * You should have received a copy of the GNU General Public License *
31 * along with this program; if not, write to the *
32 * Free Software Foundation, Inc., *
33 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
34 * *
35 * Cortex-A8(tm) TRM, ARM DDI 0344H *
36 * Cortex-A9(tm) TRM, ARM DDI 0407F *
37 * *
38 ***************************************************************************/
39 #ifdef HAVE_CONFIG_H
40 #include "config.h"
41 #endif
42
43 #include "breakpoints.h"
44 #include "cortex_a.h"
45 #include "register.h"
46 #include "target_request.h"
47 #include "target_type.h"
48 #include "arm_opcodes.h"
49 #include <helper/time_support.h>
50
51 static int cortex_a8_poll(struct target *target);
52 static int cortex_a8_debug_entry(struct target *target);
53 static int cortex_a8_restore_context(struct target *target, bool bpwp);
54 static int cortex_a8_set_breakpoint(struct target *target,
55 struct breakpoint *breakpoint, uint8_t matchmode);
56 static int cortex_a8_set_context_breakpoint(struct target *target,
57 struct breakpoint *breakpoint, uint8_t matchmode);
58 static int cortex_a8_set_hybrid_breakpoint(struct target *target,
59 struct breakpoint *breakpoint);
60 static int cortex_a8_unset_breakpoint(struct target *target,
61 struct breakpoint *breakpoint);
62 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
63 uint32_t *value, int regnum);
64 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
65 uint32_t value, int regnum);
66 static int cortex_a8_mmu(struct target *target, int *enabled);
67 static int cortex_a8_virt2phys(struct target *target,
68 uint32_t virt, uint32_t *phys);
69 static int cortex_a8_disable_mmu_caches(struct target *target, int mmu,
70 int d_u_cache, int i_cache);
71 static int cortex_a8_enable_mmu_caches(struct target *target, int mmu,
72 int d_u_cache, int i_cache);
73 static int cortex_a8_get_ttb(struct target *target, uint32_t *result);
74
75
76 /*
77 * FIXME do topology discovery using the ROM; don't
78 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
79 * cores, with different AP numbering ... don't use a #define
80 * for these numbers, use per-core armv7a state.
81 */
82 #define swjdp_memoryap 0
83 #define swjdp_debugap 1
84
85 /*
86 * Cortex-A8 Basic debug access, very low level assumes state is saved
87 */
88 static int cortex_a8_init_debug_access(struct target *target)
89 {
90 struct armv7a_common *armv7a = target_to_armv7a(target);
91 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
92 int retval;
93 uint32_t dummy;
94
95 LOG_DEBUG(" ");
96
97 /* Unlocking the debug registers for modification */
98 /* The debugport might be uninitialised so try twice */
99 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
100 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
101 if (retval != ERROR_OK)
102 {
103 /* try again */
104 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
105 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
106 if (retval == ERROR_OK)
107 {
108 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
109 }
110 }
111 if (retval != ERROR_OK)
112 return retval;
113 /* Clear Sticky Power Down status Bit in PRSR to enable access to
114 the registers in the Core Power Domain */
115 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
116 armv7a->debug_base + CPUDBG_PRSR, &dummy);
117 if (retval != ERROR_OK)
118 return retval;
119
120 /* Enabling of instruction execution in debug mode is done in debug_entry code */
121
122 /* Resync breakpoint registers */
123
124 /* Since this is likely called from init or reset, update target state information*/
125 return cortex_a8_poll(target);
126 }
127
128 /* To reduce needless round-trips, pass in a pointer to the current
129 * DSCR value. Initialize it to zero if you just need to know the
130 * value on return from this function; or DSCR_INSTR_COMP if you
131 * happen to know that no instruction is pending.
132 */
133 static int cortex_a8_exec_opcode(struct target *target,
134 uint32_t opcode, uint32_t *dscr_p)
135 {
136 uint32_t dscr;
137 int retval;
138 struct armv7a_common *armv7a = target_to_armv7a(target);
139 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
140
141 dscr = dscr_p ? *dscr_p : 0;
142
143 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
144
145 /* Wait for InstrCompl bit to be set */
146 long long then = timeval_ms();
147 while ((dscr & DSCR_INSTR_COMP) == 0)
148 {
149 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
150 armv7a->debug_base + CPUDBG_DSCR, &dscr);
151 if (retval != ERROR_OK)
152 {
153 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
154 return retval;
155 }
156 if (timeval_ms() > then + 1000)
157 {
158 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
159 return ERROR_FAIL;
160 }
161 }
162
163 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
164 armv7a->debug_base + CPUDBG_ITR, opcode);
165 if (retval != ERROR_OK)
166 return retval;
167
168 then = timeval_ms();
169 do
170 {
171 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
172 armv7a->debug_base + CPUDBG_DSCR, &dscr);
173 if (retval != ERROR_OK)
174 {
175 LOG_ERROR("Could not read DSCR register");
176 return retval;
177 }
178 if (timeval_ms() > then + 1000)
179 {
180 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
181 return ERROR_FAIL;
182 }
183 }
184 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
185
186 if (dscr_p)
187 *dscr_p = dscr;
188
189 return retval;
190 }
191
192 /**************************************************************************
193 Read core register with very few exec_opcode, fast but needs work_area.
194 This can cause problems with MMU active.
195 **************************************************************************/
196 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
197 uint32_t * regfile)
198 {
199 int retval = ERROR_OK;
200 struct armv7a_common *armv7a = target_to_armv7a(target);
201 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
202
203 retval = cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
204 if (retval != ERROR_OK)
205 return retval;
206 retval = cortex_a8_dap_write_coreregister_u32(target, address, 0);
207 if (retval != ERROR_OK)
208 return retval;
209 retval = cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
210 if (retval != ERROR_OK)
211 return retval;
212
213 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
214 (uint8_t *)(&regfile[1]), 4*15, address);
215
216 return retval;
217 }
218
219 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
220 uint32_t *value, int regnum)
221 {
222 int retval = ERROR_OK;
223 uint8_t reg = regnum&0xFF;
224 uint32_t dscr = 0;
225 struct armv7a_common *armv7a = target_to_armv7a(target);
226 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
227
228 if (reg > 17)
229 return retval;
230
231 if (reg < 15)
232 {
233 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
234 retval = cortex_a8_exec_opcode(target,
235 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
236 &dscr);
237 if (retval != ERROR_OK)
238 return retval;
239 }
240 else if (reg == 15)
241 {
242 /* "MOV r0, r15"; then move r0 to DCCTX */
243 retval = cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
244 if (retval != ERROR_OK)
245 return retval;
246 retval = cortex_a8_exec_opcode(target,
247 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
248 &dscr);
249 if (retval != ERROR_OK)
250 return retval;
251 }
252 else
253 {
254 /* "MRS r0, CPSR" or "MRS r0, SPSR"
255 * then move r0 to DCCTX
256 */
257 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
258 if (retval != ERROR_OK)
259 return retval;
260 retval = cortex_a8_exec_opcode(target,
261 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
262 &dscr);
263 if (retval != ERROR_OK)
264 return retval;
265 }
266
267 /* Wait for DTRRXfull then read DTRRTX */
268 long long then = timeval_ms();
269 while ((dscr & DSCR_DTR_TX_FULL) == 0)
270 {
271 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
272 armv7a->debug_base + CPUDBG_DSCR, &dscr);
273 if (retval != ERROR_OK)
274 return retval;
275 if (timeval_ms() > then + 1000)
276 {
277 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
278 return ERROR_FAIL;
279 }
280 }
281
282 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
283 armv7a->debug_base + CPUDBG_DTRTX, value);
284 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
285
286 return retval;
287 }
288
289 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
290 uint32_t value, int regnum)
291 {
292 int retval = ERROR_OK;
293 uint8_t Rd = regnum&0xFF;
294 uint32_t dscr;
295 struct armv7a_common *armv7a = target_to_armv7a(target);
296 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
297
298 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
299
300 /* Check that DCCRX is not full */
301 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
302 armv7a->debug_base + CPUDBG_DSCR, &dscr);
303 if (retval != ERROR_OK)
304 return retval;
305 if (dscr & DSCR_DTR_RX_FULL)
306 {
307 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
308 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
309 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
310 &dscr);
311 if (retval != ERROR_OK)
312 return retval;
313 }
314
315 if (Rd > 17)
316 return retval;
317
318 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
319 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
320 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
321 armv7a->debug_base + CPUDBG_DTRRX, value);
322 if (retval != ERROR_OK)
323 return retval;
324
325 if (Rd < 15)
326 {
327 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
328 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
329 &dscr);
330
331 if (retval != ERROR_OK)
332 return retval;
333 }
334 else if (Rd == 15)
335 {
336 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
337 * then "mov r15, r0"
338 */
339 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
340 &dscr);
341 if (retval != ERROR_OK)
342 return retval;
343 retval = cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
344 if (retval != ERROR_OK)
345 return retval;
346 }
347 else
348 {
349 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
350 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
351 */
352 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
353 &dscr);
354 if (retval != ERROR_OK)
355 return retval;
356 retval = cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
357 &dscr);
358 if (retval != ERROR_OK)
359 return retval;
360
361 /* "Prefetch flush" after modifying execution status in CPSR */
362 if (Rd == 16)
363 {
364 retval = cortex_a8_exec_opcode(target,
365 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
366 &dscr);
367 if (retval != ERROR_OK)
368 return retval;
369 }
370 }
371
372 return retval;
373 }
374
375 /* Write to memory mapped registers directly with no cache or mmu handling */
376 static int cortex_a8_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
377 {
378 int retval;
379 struct armv7a_common *armv7a = target_to_armv7a(target);
380 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
381
382 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap, address, value);
383
384 return retval;
385 }
386
387 /*
388 * Cortex-A8 implementation of Debug Programmer's Model
389 *
390 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
391 * so there's no need to poll for it before executing an instruction.
392 *
393 * NOTE that in several of these cases the "stall" mode might be useful.
394 * It'd let us queue a few operations together... prepare/finish might
395 * be the places to enable/disable that mode.
396 */
397
398 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
399 {
400 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
401 }
402
403 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
404 {
405 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
406 return mem_ap_sel_write_u32(a8->armv7a_common.armv4_5_common.dap,
407 swjdp_debugap,a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
408 }
409
410 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
411 uint32_t *dscr_p)
412 {
413 struct adiv5_dap *swjdp = a8->armv7a_common.armv4_5_common.dap;
414 uint32_t dscr = DSCR_INSTR_COMP;
415 int retval;
416
417 if (dscr_p)
418 dscr = *dscr_p;
419
420 /* Wait for DTRRXfull */
421 long long then = timeval_ms();
422 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
423 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
424 a8->armv7a_common.debug_base + CPUDBG_DSCR,
425 &dscr);
426 if (retval != ERROR_OK)
427 return retval;
428 if (timeval_ms() > then + 1000)
429 {
430 LOG_ERROR("Timeout waiting for read dcc");
431 return ERROR_FAIL;
432 }
433 }
434
435 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
436 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
437 if (retval != ERROR_OK)
438 return retval;
439 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
440
441 if (dscr_p)
442 *dscr_p = dscr;
443
444 return retval;
445 }
446
447 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
448 {
449 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
450 struct adiv5_dap *swjdp = a8->armv7a_common.armv4_5_common.dap;
451 uint32_t dscr;
452 int retval;
453
454 /* set up invariant: INSTR_COMP is set after ever DPM operation */
455 long long then = timeval_ms();
456 for (;;)
457 {
458 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
459 a8->armv7a_common.debug_base + CPUDBG_DSCR,
460 &dscr);
461 if (retval != ERROR_OK)
462 return retval;
463 if ((dscr & DSCR_INSTR_COMP) != 0)
464 break;
465 if (timeval_ms() > then + 1000)
466 {
467 LOG_ERROR("Timeout waiting for dpm prepare");
468 return ERROR_FAIL;
469 }
470 }
471
472 /* this "should never happen" ... */
473 if (dscr & DSCR_DTR_RX_FULL) {
474 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
475 /* Clear DCCRX */
476 retval = cortex_a8_exec_opcode(
477 a8->armv7a_common.armv4_5_common.target,
478 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
479 &dscr);
480 if (retval != ERROR_OK)
481 return retval;
482 }
483
484 return retval;
485 }
486
487 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
488 {
489 /* REVISIT what could be done here? */
490 return ERROR_OK;
491 }
492
493 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
494 uint32_t opcode, uint32_t data)
495 {
496 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
497 int retval;
498 uint32_t dscr = DSCR_INSTR_COMP;
499
500 retval = cortex_a8_write_dcc(a8, data);
501 if (retval != ERROR_OK)
502 return retval;
503
504 return cortex_a8_exec_opcode(
505 a8->armv7a_common.armv4_5_common.target,
506 opcode,
507 &dscr);
508 }
509
510 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
511 uint32_t opcode, uint32_t data)
512 {
513 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
514 uint32_t dscr = DSCR_INSTR_COMP;
515 int retval;
516
517 retval = cortex_a8_write_dcc(a8, data);
518 if (retval != ERROR_OK)
519 return retval;
520
521 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
522 retval = cortex_a8_exec_opcode(
523 a8->armv7a_common.armv4_5_common.target,
524 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
525 &dscr);
526 if (retval != ERROR_OK)
527 return retval;
528
529 /* then the opcode, taking data from R0 */
530 retval = cortex_a8_exec_opcode(
531 a8->armv7a_common.armv4_5_common.target,
532 opcode,
533 &dscr);
534
535 return retval;
536 }
537
538 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
539 {
540 struct target *target = dpm->arm->target;
541 uint32_t dscr = DSCR_INSTR_COMP;
542
543 /* "Prefetch flush" after modifying execution status in CPSR */
544 return cortex_a8_exec_opcode(target,
545 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
546 &dscr);
547 }
548
549 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
550 uint32_t opcode, uint32_t *data)
551 {
552 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
553 int retval;
554 uint32_t dscr = DSCR_INSTR_COMP;
555
556 /* the opcode, writing data to DCC */
557 retval = cortex_a8_exec_opcode(
558 a8->armv7a_common.armv4_5_common.target,
559 opcode,
560 &dscr);
561 if (retval != ERROR_OK)
562 return retval;
563
564 return cortex_a8_read_dcc(a8, data, &dscr);
565 }
566
567
568 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
569 uint32_t opcode, uint32_t *data)
570 {
571 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
572 uint32_t dscr = DSCR_INSTR_COMP;
573 int retval;
574
575 /* the opcode, writing data to R0 */
576 retval = cortex_a8_exec_opcode(
577 a8->armv7a_common.armv4_5_common.target,
578 opcode,
579 &dscr);
580 if (retval != ERROR_OK)
581 return retval;
582
583 /* write R0 to DCC */
584 retval = cortex_a8_exec_opcode(
585 a8->armv7a_common.armv4_5_common.target,
586 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
587 &dscr);
588 if (retval != ERROR_OK)
589 return retval;
590
591 return cortex_a8_read_dcc(a8, data, &dscr);
592 }
593
594 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
595 uint32_t addr, uint32_t control)
596 {
597 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
598 uint32_t vr = a8->armv7a_common.debug_base;
599 uint32_t cr = a8->armv7a_common.debug_base;
600 int retval;
601
602 switch (index_t) {
603 case 0 ... 15: /* breakpoints */
604 vr += CPUDBG_BVR_BASE;
605 cr += CPUDBG_BCR_BASE;
606 break;
607 case 16 ... 31: /* watchpoints */
608 vr += CPUDBG_WVR_BASE;
609 cr += CPUDBG_WCR_BASE;
610 index_t -= 16;
611 break;
612 default:
613 return ERROR_FAIL;
614 }
615 vr += 4 * index_t;
616 cr += 4 * index_t;
617
618 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
619 (unsigned) vr, (unsigned) cr);
620
621 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
622 vr, addr);
623 if (retval != ERROR_OK)
624 return retval;
625 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
626 cr, control);
627 return retval;
628 }
629
630 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
631 {
632 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
633 uint32_t cr;
634
635 switch (index_t) {
636 case 0 ... 15:
637 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
638 break;
639 case 16 ... 31:
640 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
641 index_t -= 16;
642 break;
643 default:
644 return ERROR_FAIL;
645 }
646 cr += 4 * index_t;
647
648 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
649
650 /* clear control register */
651 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
652 }
653
654 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
655 {
656 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
657 int retval;
658
659 dpm->arm = &a8->armv7a_common.armv4_5_common;
660 dpm->didr = didr;
661
662 dpm->prepare = cortex_a8_dpm_prepare;
663 dpm->finish = cortex_a8_dpm_finish;
664
665 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
666 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
667 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
668
669 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
670 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
671
672 dpm->bpwp_enable = cortex_a8_bpwp_enable;
673 dpm->bpwp_disable = cortex_a8_bpwp_disable;
674
675 retval = arm_dpm_setup(dpm);
676 if (retval == ERROR_OK)
677 retval = arm_dpm_initialize(dpm);
678
679 return retval;
680 }
681 static struct target *get_cortex_a8(struct target *target, int32_t coreid)
682 {
683 struct target_list *head;
684 struct target *curr;
685
686 head = target->head;
687 while(head != (struct target_list*)NULL)
688 {
689 curr = head->target;
690 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
691 {
692 return curr;
693 }
694 head = head->next;
695 }
696 return target;
697 }
698 static int cortex_a8_halt(struct target *target);
699
700 static int cortex_a8_halt_smp(struct target *target)
701 {
702 int retval = 0;
703 struct target_list *head;
704 struct target *curr;
705 head = target->head;
706 while(head != (struct target_list*)NULL)
707 {
708 curr = head->target;
709 if ((curr != target) && (curr->state!= TARGET_HALTED))
710 {
711 retval += cortex_a8_halt(curr);
712 }
713 head = head->next;
714 }
715 return retval;
716 }
717
718 static int update_halt_gdb(struct target *target)
719 {
720 int retval = 0;
721 if (target->gdb_service->core[0]==-1)
722 {
723 target->gdb_service->target = target;
724 target->gdb_service->core[0] = target->coreid;
725 retval += cortex_a8_halt_smp(target);
726 }
727 return retval;
728 }
729
730 /*
731 * Cortex-A8 Run control
732 */
733
734 static int cortex_a8_poll(struct target *target)
735 {
736 int retval = ERROR_OK;
737 uint32_t dscr;
738 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
739 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
740 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
741 enum target_state prev_target_state = target->state;
742 // toggle to another core is done by gdb as follow
743 // maint packet J core_id
744 // continue
745 // the next polling trigger an halt event sent to gdb
746 if ((target->state == TARGET_HALTED) && (target->smp) &&
747 (target->gdb_service) &&
748 (target->gdb_service->target==NULL) )
749 {
750 target->gdb_service->target =
751 get_cortex_a8(target, target->gdb_service->core[1]);
752 target_call_event_callbacks(target,
753 TARGET_EVENT_HALTED);
754 return retval;
755 }
756 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
757 armv7a->debug_base + CPUDBG_DSCR, &dscr);
758 if (retval != ERROR_OK)
759 {
760 return retval;
761 }
762 cortex_a8->cpudbg_dscr = dscr;
763
764 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED))
765 {
766 if (prev_target_state != TARGET_HALTED)
767 {
768 /* We have a halting debug event */
769 LOG_DEBUG("Target halted");
770 target->state = TARGET_HALTED;
771 if ((prev_target_state == TARGET_RUNNING)
772 || (prev_target_state == TARGET_RESET))
773 {
774 retval = cortex_a8_debug_entry(target);
775 if (retval != ERROR_OK)
776 return retval;
777 if (target->smp)
778 {
779 retval = update_halt_gdb(target);
780 if (retval != ERROR_OK)
781 return retval;
782 }
783 target_call_event_callbacks(target,
784 TARGET_EVENT_HALTED);
785 }
786 if (prev_target_state == TARGET_DEBUG_RUNNING)
787 {
788 LOG_DEBUG(" ");
789
790 retval = cortex_a8_debug_entry(target);
791 if (retval != ERROR_OK)
792 return retval;
793 if (target->smp)
794 {
795 retval = update_halt_gdb(target);
796 if (retval != ERROR_OK)
797 return retval;
798 }
799
800 target_call_event_callbacks(target,
801 TARGET_EVENT_DEBUG_HALTED);
802 }
803 }
804 }
805 else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
806 {
807 target->state = TARGET_RUNNING;
808 }
809 else
810 {
811 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
812 target->state = TARGET_UNKNOWN;
813 }
814
815 return retval;
816 }
817
818 static int cortex_a8_halt(struct target *target)
819 {
820 int retval = ERROR_OK;
821 uint32_t dscr;
822 struct armv7a_common *armv7a = target_to_armv7a(target);
823 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
824
825 /*
826 * Tell the core to be halted by writing DRCR with 0x1
827 * and then wait for the core to be halted.
828 */
829 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
830 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
831 if (retval != ERROR_OK)
832 return retval;
833
834 /*
835 * enter halting debug mode
836 */
837 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
838 armv7a->debug_base + CPUDBG_DSCR, &dscr);
839 if (retval != ERROR_OK)
840 return retval;
841
842 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
843 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
844 if (retval != ERROR_OK)
845 return retval;
846
847 long long then = timeval_ms();
848 for (;;)
849 {
850 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
851 armv7a->debug_base + CPUDBG_DSCR, &dscr);
852 if (retval != ERROR_OK)
853 return retval;
854 if ((dscr & DSCR_CORE_HALTED) != 0)
855 {
856 break;
857 }
858 if (timeval_ms() > then + 1000)
859 {
860 LOG_ERROR("Timeout waiting for halt");
861 return ERROR_FAIL;
862 }
863 }
864
865 target->debug_reason = DBG_REASON_DBGRQ;
866
867 return ERROR_OK;
868 }
869
870 static int cortex_a8_internal_restore(struct target *target, int current,
871 uint32_t *address, int handle_breakpoints, int debug_execution)
872 {
873 struct armv7a_common *armv7a = target_to_armv7a(target);
874 struct arm *armv4_5 = &armv7a->armv4_5_common;
875 int retval;
876 uint32_t resume_pc;
877
878 if (!debug_execution)
879 target_free_all_working_areas(target);
880
881 #if 0
882 if (debug_execution)
883 {
884 /* Disable interrupts */
885 /* We disable interrupts in the PRIMASK register instead of
886 * masking with C_MASKINTS,
887 * This is probably the same issue as Cortex-M3 Errata 377493:
888 * C_MASKINTS in parallel with disabled interrupts can cause
889 * local faults to not be taken. */
890 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
891 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
892 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
893
894 /* Make sure we are in Thumb mode */
895 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
896 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
897 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
898 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
899 }
900 #endif
901
902 /* current = 1: continue on current pc, otherwise continue at <address> */
903 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
904 if (!current)
905 resume_pc = *address;
906 else
907 *address = resume_pc;
908
909 /* Make sure that the Armv7 gdb thumb fixups does not
910 * kill the return address
911 */
912 switch (armv4_5->core_state)
913 {
914 case ARM_STATE_ARM:
915 resume_pc &= 0xFFFFFFFC;
916 break;
917 case ARM_STATE_THUMB:
918 case ARM_STATE_THUMB_EE:
919 /* When the return address is loaded into PC
920 * bit 0 must be 1 to stay in Thumb state
921 */
922 resume_pc |= 0x1;
923 break;
924 case ARM_STATE_JAZELLE:
925 LOG_ERROR("How do I resume into Jazelle state??");
926 return ERROR_FAIL;
927 }
928 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
929 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
930 armv4_5->pc->dirty = 1;
931 armv4_5->pc->valid = 1;
932
933 retval = cortex_a8_restore_context(target, handle_breakpoints);
934 if (retval != ERROR_OK)
935 return retval;
936 target->debug_reason = DBG_REASON_NOTHALTED;
937 target->state = TARGET_RUNNING;
938
939 /* registers are now invalid */
940 register_cache_invalidate(armv4_5->core_cache);
941
942 #if 0
943 /* the front-end may request us not to handle breakpoints */
944 if (handle_breakpoints)
945 {
946 /* Single step past breakpoint at current address */
947 if ((breakpoint = breakpoint_find(target, resume_pc)))
948 {
949 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
950 cortex_m3_unset_breakpoint(target, breakpoint);
951 cortex_m3_single_step_core(target);
952 cortex_m3_set_breakpoint(target, breakpoint);
953 }
954 }
955
956 #endif
957 return retval;
958 }
959
960 static int cortex_a8_internal_restart(struct target *target)
961 {
962 struct armv7a_common *armv7a = target_to_armv7a(target);
963 struct arm *armv4_5 = &armv7a->armv4_5_common;
964 struct adiv5_dap *swjdp = armv4_5->dap;
965 int retval;
966 uint32_t dscr;
967 /*
968 * Restart core and wait for it to be started. Clear ITRen and sticky
969 * exception flags: see ARMv7 ARM, C5.9.
970 *
971 * REVISIT: for single stepping, we probably want to
972 * disable IRQs by default, with optional override...
973 */
974
975 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
976 armv7a->debug_base + CPUDBG_DSCR, &dscr);
977 if (retval != ERROR_OK)
978 return retval;
979
980 if ((dscr & DSCR_INSTR_COMP) == 0)
981 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
982
983 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
984 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
985 if (retval != ERROR_OK)
986 return retval;
987
988 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
989 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
990 DRCR_CLEAR_EXCEPTIONS);
991 if (retval != ERROR_OK)
992 return retval;
993
994 long long then = timeval_ms();
995 for (;;)
996 {
997 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
998 armv7a->debug_base + CPUDBG_DSCR, &dscr);
999 if (retval != ERROR_OK)
1000 return retval;
1001 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1002 break;
1003 if (timeval_ms() > then + 1000)
1004 {
1005 LOG_ERROR("Timeout waiting for resume");
1006 return ERROR_FAIL;
1007 }
1008 }
1009
1010 target->debug_reason = DBG_REASON_NOTHALTED;
1011 target->state = TARGET_RUNNING;
1012
1013 /* registers are now invalid */
1014 register_cache_invalidate(armv4_5->core_cache);
1015
1016 return ERROR_OK;
1017 }
1018
1019 static int cortex_a8_restore_smp(struct target *target,int handle_breakpoints)
1020 {
1021 int retval = 0;
1022 struct target_list *head;
1023 struct target *curr;
1024 uint32_t address;
1025 head = target->head;
1026 while(head != (struct target_list*)NULL)
1027 {
1028 curr = head->target;
1029 if ((curr != target) && (curr->state != TARGET_RUNNING))
1030 {
1031 /* resume current address , not in step mode */
1032 retval += cortex_a8_internal_restore(curr, 1, &address,
1033 handle_breakpoints, 0);
1034 retval += cortex_a8_internal_restart(curr);
1035 }
1036 head = head->next;
1037
1038 }
1039 return retval;
1040 }
1041
1042 static int cortex_a8_resume(struct target *target, int current,
1043 uint32_t address, int handle_breakpoints, int debug_execution)
1044 {
1045 int retval = 0;
1046 /* dummy resume for smp toggle in order to reduce gdb impact */
1047 if ((target->smp) && (target->gdb_service->core[1]!=-1))
1048 {
1049 /* simulate a start and halt of target */
1050 target->gdb_service->target = NULL;
1051 target->gdb_service->core[0] = target->gdb_service->core[1];
1052 /* fake resume at next poll we play the target core[1], see poll*/
1053 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1054 return 0;
1055 }
1056 cortex_a8_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1057 if (target->smp)
1058 { target->gdb_service->core[0] = -1;
1059 retval += cortex_a8_restore_smp(target, handle_breakpoints);
1060 }
1061 cortex_a8_internal_restart(target);
1062
1063 if (!debug_execution)
1064 {
1065 target->state = TARGET_RUNNING;
1066 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1067 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1068 }
1069 else
1070 {
1071 target->state = TARGET_DEBUG_RUNNING;
1072 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1073 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1074 }
1075
1076 return ERROR_OK;
1077 }
1078
1079 static int cortex_a8_debug_entry(struct target *target)
1080 {
1081 int i;
1082 uint32_t regfile[16], cpsr, dscr;
1083 int retval = ERROR_OK;
1084 struct working_area *regfile_working_area = NULL;
1085 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1086 struct armv7a_common *armv7a = target_to_armv7a(target);
1087 struct arm *armv4_5 = &armv7a->armv4_5_common;
1088 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1089 struct reg *reg;
1090
1091 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
1092
1093 /* REVISIT surely we should not re-read DSCR !! */
1094 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1095 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1096 if (retval != ERROR_OK)
1097 return retval;
1098
1099 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1100 * imprecise data aborts get discarded by issuing a Data
1101 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1102 */
1103
1104 /* Enable the ITR execution once we are in debug mode */
1105 dscr |= DSCR_ITR_EN;
1106 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
1107 armv7a->debug_base + CPUDBG_DSCR, dscr);
1108 if (retval != ERROR_OK)
1109 return retval;
1110
1111 /* Examine debug reason */
1112 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
1113
1114 /* save address of instruction that triggered the watchpoint? */
1115 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1116 uint32_t wfar;
1117
1118 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1119 armv7a->debug_base + CPUDBG_WFAR,
1120 &wfar);
1121 if (retval != ERROR_OK)
1122 return retval;
1123 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1124 }
1125
1126 /* REVISIT fast_reg_read is never set ... */
1127
1128 /* Examine target state and mode */
1129 if (cortex_a8->fast_reg_read)
1130 target_alloc_working_area(target, 64, &regfile_working_area);
1131
1132 /* First load register acessible through core debug port*/
1133 if (!regfile_working_area)
1134 {
1135 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1136 }
1137 else
1138 {
1139 retval = cortex_a8_read_regs_through_mem(target,
1140 regfile_working_area->address, regfile);
1141
1142 target_free_working_area(target, regfile_working_area);
1143 if (retval != ERROR_OK)
1144 {
1145 return retval;
1146 }
1147
1148 /* read Current PSR */
1149 retval = cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
1150 if (retval != ERROR_OK)
1151 return retval;
1152
1153 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1154
1155 arm_set_cpsr(armv4_5, cpsr);
1156
1157 /* update cache */
1158 for (i = 0; i <= ARM_PC; i++)
1159 {
1160 reg = arm_reg_current(armv4_5, i);
1161
1162 buf_set_u32(reg->value, 0, 32, regfile[i]);
1163 reg->valid = 1;
1164 reg->dirty = 0;
1165 }
1166
1167 /* Fixup PC Resume Address */
1168 if (cpsr & (1 << 5))
1169 {
1170 // T bit set for Thumb or ThumbEE state
1171 regfile[ARM_PC] -= 4;
1172 }
1173 else
1174 {
1175 // ARM state
1176 regfile[ARM_PC] -= 8;
1177 }
1178
1179 reg = armv4_5->pc;
1180 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1181 reg->dirty = reg->valid;
1182 }
1183
1184 #if 0
1185 /* TODO, Move this */
1186 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1187 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1188 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1189
1190 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1191 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1192
1193 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1194 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1195 #endif
1196
1197 /* Are we in an exception handler */
1198 // armv4_5->exception_number = 0;
1199 if (armv7a->post_debug_entry)
1200 {
1201 retval = armv7a->post_debug_entry(target);
1202 if (retval != ERROR_OK)
1203 return retval;
1204 }
1205
1206 return retval;
1207 }
1208
1209 static int cortex_a8_post_debug_entry(struct target *target)
1210 {
1211 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1212 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1213 int retval;
1214
1215 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1216 retval = armv7a->armv4_5_common.mrc(target, 15,
1217 0, 0, /* op1, op2 */
1218 1, 0, /* CRn, CRm */
1219 &cortex_a8->cp15_control_reg);
1220 if (retval != ERROR_OK)
1221 return retval;
1222 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
1223
1224 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
1225 {
1226 uint32_t cache_type_reg;
1227
1228 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
1229 retval = armv7a->armv4_5_common.mrc(target, 15,
1230 0, 1, /* op1, op2 */
1231 0, 0, /* CRn, CRm */
1232 &cache_type_reg);
1233 if (retval != ERROR_OK)
1234 return retval;
1235 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
1236
1237 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
1238 armv4_5_identify_cache(cache_type_reg,
1239 &armv7a->armv4_5_mmu.armv4_5_cache);
1240 }
1241
1242 armv7a->armv4_5_mmu.mmu_enabled =
1243 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
1244 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
1245 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
1246 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1247 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1248
1249 return ERROR_OK;
1250 }
1251
1252 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1253 int handle_breakpoints)
1254 {
1255 struct armv7a_common *armv7a = target_to_armv7a(target);
1256 struct arm *armv4_5 = &armv7a->armv4_5_common;
1257 struct breakpoint *breakpoint = NULL;
1258 struct breakpoint stepbreakpoint;
1259 struct reg *r;
1260 int retval;
1261
1262 if (target->state != TARGET_HALTED)
1263 {
1264 LOG_WARNING("target not halted");
1265 return ERROR_TARGET_NOT_HALTED;
1266 }
1267
1268 /* current = 1: continue on current pc, otherwise continue at <address> */
1269 r = armv4_5->pc;
1270 if (!current)
1271 {
1272 buf_set_u32(r->value, 0, 32, address);
1273 }
1274 else
1275 {
1276 address = buf_get_u32(r->value, 0, 32);
1277 }
1278
1279 /* The front-end may request us not to handle breakpoints.
1280 * But since Cortex-A8 uses breakpoint for single step,
1281 * we MUST handle breakpoints.
1282 */
1283 handle_breakpoints = 1;
1284 if (handle_breakpoints) {
1285 breakpoint = breakpoint_find(target, address);
1286 if (breakpoint)
1287 cortex_a8_unset_breakpoint(target, breakpoint);
1288 }
1289
1290 /* Setup single step breakpoint */
1291 stepbreakpoint.address = address;
1292 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1293 ? 2 : 4;
1294 stepbreakpoint.type = BKPT_HARD;
1295 stepbreakpoint.set = 0;
1296
1297 /* Break on IVA mismatch */
1298 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1299
1300 target->debug_reason = DBG_REASON_SINGLESTEP;
1301
1302 retval = cortex_a8_resume(target, 1, address, 0, 0);
1303 if (retval != ERROR_OK)
1304 return retval;
1305
1306 long long then = timeval_ms();
1307 while (target->state != TARGET_HALTED)
1308 {
1309 retval = cortex_a8_poll(target);
1310 if (retval != ERROR_OK)
1311 return retval;
1312 if (timeval_ms() > then + 1000)
1313 {
1314 LOG_ERROR("timeout waiting for target halt");
1315 return ERROR_FAIL;
1316 }
1317 }
1318
1319 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1320
1321 target->debug_reason = DBG_REASON_BREAKPOINT;
1322
1323 if (breakpoint)
1324 cortex_a8_set_breakpoint(target, breakpoint, 0);
1325
1326 if (target->state != TARGET_HALTED)
1327 LOG_DEBUG("target stepped");
1328
1329 return ERROR_OK;
1330 }
1331
1332 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1333 {
1334 struct armv7a_common *armv7a = target_to_armv7a(target);
1335
1336 LOG_DEBUG(" ");
1337
1338 if (armv7a->pre_restore_context)
1339 armv7a->pre_restore_context(target);
1340
1341 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1342 }
1343
1344
1345 /*
1346 * Cortex-A8 Breakpoint and watchpoint functions
1347 */
1348
1349 /* Setup hardware Breakpoint Register Pair */
1350 static int cortex_a8_set_breakpoint(struct target *target,
1351 struct breakpoint *breakpoint, uint8_t matchmode)
1352 {
1353 int retval;
1354 int brp_i=0;
1355 uint32_t control;
1356 uint8_t byte_addr_select = 0x0F;
1357 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1358 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1359 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1360
1361 if (breakpoint->set)
1362 {
1363 LOG_WARNING("breakpoint already set");
1364 return ERROR_OK;
1365 }
1366
1367 if (breakpoint->type == BKPT_HARD)
1368 {
1369 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1370 brp_i++ ;
1371 if (brp_i >= cortex_a8->brp_num)
1372 {
1373 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1374 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1375 }
1376 breakpoint->set = brp_i + 1;
1377 if (breakpoint->length == 2)
1378 {
1379 byte_addr_select = (3 << (breakpoint->address & 0x02));
1380 }
1381 control = ((matchmode & 0x7) << 20)
1382 | (byte_addr_select << 5)
1383 | (3 << 1) | 1;
1384 brp_list[brp_i].used = 1;
1385 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1386 brp_list[brp_i].control = control;
1387 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1388 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1389 brp_list[brp_i].value);
1390 if (retval != ERROR_OK)
1391 return retval;
1392 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1393 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1394 brp_list[brp_i].control);
1395 if (retval != ERROR_OK)
1396 return retval;
1397 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1398 brp_list[brp_i].control,
1399 brp_list[brp_i].value);
1400 }
1401 else if (breakpoint->type == BKPT_SOFT)
1402 {
1403 uint8_t code[4];
1404 if (breakpoint->length == 2)
1405 {
1406 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1407 }
1408 else
1409 {
1410 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1411 }
1412 retval = target->type->read_memory(target,
1413 breakpoint->address & 0xFFFFFFFE,
1414 breakpoint->length, 1,
1415 breakpoint->orig_instr);
1416 if (retval != ERROR_OK)
1417 return retval;
1418 retval = target->type->write_memory(target,
1419 breakpoint->address & 0xFFFFFFFE,
1420 breakpoint->length, 1, code);
1421 if (retval != ERROR_OK)
1422 return retval;
1423 breakpoint->set = 0x11; /* Any nice value but 0 */
1424 }
1425
1426 return ERROR_OK;
1427 }
1428
1429 static int cortex_a8_set_context_breakpoint(struct target *target,
1430 struct breakpoint *breakpoint, uint8_t matchmode)
1431 {
1432 int retval = ERROR_FAIL;
1433 int brp_i=0;
1434 uint32_t control;
1435 uint8_t byte_addr_select = 0x0F;
1436 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1437 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1438 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1439
1440 if (breakpoint->set)
1441 {
1442 LOG_WARNING("breakpoint already set");
1443 return retval ;
1444 }
1445 /*check available context BRPs*/
1446 while ((brp_list[brp_i].used || (brp_list[brp_i].type!=BRP_CONTEXT)) && (brp_i < cortex_a8->brp_num))
1447 brp_i++ ;
1448
1449 if (brp_i >= cortex_a8->brp_num)
1450 {
1451 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1452 return ERROR_FAIL;
1453 }
1454
1455 breakpoint->set = brp_i + 1;
1456 control = ((matchmode & 0x7) << 20)
1457 | (byte_addr_select << 5)
1458 | (3 << 1) | 1;
1459 brp_list[brp_i].used = 1;
1460 brp_list[brp_i].value = (breakpoint->asid);
1461 brp_list[brp_i].control = control;
1462 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1463 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1464 brp_list[brp_i].value);
1465 if(retval != ERROR_OK)
1466 return retval;
1467 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1468 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1469 brp_list[brp_i].control);
1470 if(retval != ERROR_OK)
1471 return retval;
1472 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1473 brp_list[brp_i].control,
1474 brp_list[brp_i].value);
1475 return ERROR_OK;
1476
1477 }
1478
1479 static int cortex_a8_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1480 {
1481 int retval = ERROR_FAIL;
1482 int brp_1=0; //holds the contextID pair
1483 int brp_2=0; // holds the IVA pair
1484 uint32_t control_CTX, control_IVA;
1485 uint8_t CTX_byte_addr_select = 0x0F;
1486 uint8_t IVA_byte_addr_select = 0x0F;
1487 uint8_t CTX_machmode = 0x03;
1488 uint8_t IVA_machmode = 0x01;
1489 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1490 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1491 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1492
1493
1494
1495 if (breakpoint->set)
1496 {
1497 LOG_WARNING("breakpoint already set");
1498 return retval ;
1499 }
1500 /*check available context BRPs*/
1501 while ((brp_list[brp_1].used || (brp_list[brp_1].type!=BRP_CONTEXT)) && (brp_1 < cortex_a8->brp_num))
1502 brp_1++ ;
1503
1504 printf("brp(CTX) found num: %d \n",brp_1);
1505 if (brp_1 >= cortex_a8->brp_num)
1506 {
1507 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1508 return ERROR_FAIL;
1509 }
1510
1511 while ((brp_list[brp_2].used || (brp_list[brp_2].type!=BRP_NORMAL)) && (brp_2 < cortex_a8->brp_num))
1512 brp_2++ ;
1513
1514 printf("brp(IVA) found num: %d \n",brp_2);
1515 if (brp_2 >= cortex_a8->brp_num)
1516 {
1517 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1518 return ERROR_FAIL;
1519 }
1520
1521 breakpoint->set = brp_1 + 1;
1522 breakpoint->linked_BRP= brp_2;
1523 control_CTX = ((CTX_machmode & 0x7) << 20)
1524 | (brp_2 << 16)
1525 | (0 << 14)
1526 | (CTX_byte_addr_select << 5)
1527 | (3 << 1) | 1;
1528 brp_list[brp_1].used = 1;
1529 brp_list[brp_1].value = (breakpoint->asid);
1530 brp_list[brp_1].control = control_CTX;
1531 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1532 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1533 brp_list[brp_1].value);
1534 if (retval != ERROR_OK)
1535 return retval;
1536 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1537 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1538 brp_list[brp_1].control);
1539 if( retval != ERROR_OK )
1540 return retval;
1541
1542 control_IVA = ((IVA_machmode & 0x7) << 20)
1543 | (brp_1 << 16)
1544 | (IVA_byte_addr_select << 5)
1545 | (3 << 1) | 1;
1546 brp_list[brp_2].used = 1;
1547 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1548 brp_list[brp_2].control = control_IVA;
1549 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1550 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1551 brp_list[brp_2].value);
1552 if (retval != ERROR_OK)
1553 return retval;
1554 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1555 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1556 brp_list[brp_2].control);
1557 if (retval != ERROR_OK )
1558 return retval;
1559
1560 return ERROR_OK;
1561 }
1562
1563
1564 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1565 {
1566 int retval;
1567 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1568 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1569 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1570
1571 if (!breakpoint->set)
1572 {
1573 LOG_WARNING("breakpoint not set");
1574 return ERROR_OK;
1575 }
1576
1577 if (breakpoint->type == BKPT_HARD)
1578 {
1579 if ((breakpoint->address != 0) && (breakpoint->asid != 0))
1580 {
1581 int brp_i = breakpoint->set - 1;
1582 int brp_j = breakpoint->linked_BRP;
1583 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1584 {
1585 LOG_DEBUG("Invalid BRP number in breakpoint");
1586 return ERROR_OK;
1587 }
1588 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1589 brp_list[brp_i].control, brp_list[brp_i].value);
1590 brp_list[brp_i].used = 0;
1591 brp_list[brp_i].value = 0;
1592 brp_list[brp_i].control = 0;
1593 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1594 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1595 brp_list[brp_i].control);
1596 if (retval != ERROR_OK)
1597 return retval;
1598 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1599 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1600 brp_list[brp_i].value);
1601 if (retval != ERROR_OK)
1602 return retval;
1603 if ((brp_j < 0) || (brp_j >= cortex_a8->brp_num))
1604 {
1605 LOG_DEBUG("Invalid BRP number in breakpoint");
1606 return ERROR_OK;
1607 }
1608 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1609 brp_list[brp_j].control, brp_list[brp_j].value);
1610 brp_list[brp_j].used = 0;
1611 brp_list[brp_j].value = 0;
1612 brp_list[brp_j].control = 0;
1613 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1614 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1615 brp_list[brp_j].control);
1616 if (retval != ERROR_OK)
1617 return retval;
1618 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1619 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1620 brp_list[brp_j].value);
1621 if (retval != ERROR_OK)
1622 return retval;
1623 breakpoint->linked_BRP = 0;
1624 breakpoint->set = 0;
1625 return ERROR_OK;
1626
1627 }
1628 else
1629 {
1630 int brp_i = breakpoint->set - 1;
1631 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1632 {
1633 LOG_DEBUG("Invalid BRP number in breakpoint");
1634 return ERROR_OK;
1635 }
1636 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1637 brp_list[brp_i].control, brp_list[brp_i].value);
1638 brp_list[brp_i].used = 0;
1639 brp_list[brp_i].value = 0;
1640 brp_list[brp_i].control = 0;
1641 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1642 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1643 brp_list[brp_i].control);
1644 if (retval != ERROR_OK)
1645 return retval;
1646 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1647 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1648 brp_list[brp_i].value);
1649 if (retval != ERROR_OK)
1650 return retval;
1651 breakpoint->set = 0;
1652 return ERROR_OK;
1653 }
1654 }
1655 else
1656 {
1657 /* restore original instruction (kept in target endianness) */
1658 if (breakpoint->length == 4)
1659 {
1660 retval = target->type->write_memory(target,
1661 breakpoint->address & 0xFFFFFFFE,
1662 4, 1, breakpoint->orig_instr);
1663 if (retval != ERROR_OK)
1664 return retval;
1665 }
1666 else
1667 {
1668 retval = target->type->write_memory(target,
1669 breakpoint->address & 0xFFFFFFFE,
1670 2, 1, breakpoint->orig_instr);
1671 if (retval != ERROR_OK)
1672 return retval;
1673 }
1674 }
1675 breakpoint->set = 0;
1676
1677 return ERROR_OK;
1678 }
1679
1680 static int cortex_a8_add_breakpoint(struct target *target,
1681 struct breakpoint *breakpoint)
1682 {
1683 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1684
1685 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1686 {
1687 LOG_INFO("no hardware breakpoint available");
1688 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1689 }
1690
1691 if (breakpoint->type == BKPT_HARD)
1692 cortex_a8->brp_num_available--;
1693
1694 return cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1695 }
1696
1697 static int cortex_a8_add_context_breakpoint(struct target *target,
1698 struct breakpoint *breakpoint)
1699 {
1700 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1701
1702 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1703 {
1704 LOG_INFO("no hardware breakpoint available");
1705 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1706 }
1707
1708 if (breakpoint->type == BKPT_HARD)
1709 cortex_a8->brp_num_available--;
1710
1711 return cortex_a8_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1712 }
1713
1714 static int cortex_a8_add_hybrid_breakpoint(struct target *target,
1715 struct breakpoint *breakpoint)
1716 {
1717 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1718
1719 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1720 {
1721 LOG_INFO("no hardware breakpoint available");
1722 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1723 }
1724
1725 if (breakpoint->type == BKPT_HARD)
1726 cortex_a8->brp_num_available--;
1727
1728 return cortex_a8_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1729 }
1730
1731
1732 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1733 {
1734 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1735
1736 #if 0
1737 /* It is perfectly possible to remove breakpoints while the target is running */
1738 if (target->state != TARGET_HALTED)
1739 {
1740 LOG_WARNING("target not halted");
1741 return ERROR_TARGET_NOT_HALTED;
1742 }
1743 #endif
1744
1745 if (breakpoint->set)
1746 {
1747 cortex_a8_unset_breakpoint(target, breakpoint);
1748 if (breakpoint->type == BKPT_HARD)
1749 cortex_a8->brp_num_available++ ;
1750 }
1751
1752
1753 return ERROR_OK;
1754 }
1755
1756
1757
1758 /*
1759 * Cortex-A8 Reset functions
1760 */
1761
1762 static int cortex_a8_assert_reset(struct target *target)
1763 {
1764 struct armv7a_common *armv7a = target_to_armv7a(target);
1765
1766 LOG_DEBUG(" ");
1767
1768 /* FIXME when halt is requested, make it work somehow... */
1769
1770 /* Issue some kind of warm reset. */
1771 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1772 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1773 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1774 /* REVISIT handle "pulls" cases, if there's
1775 * hardware that needs them to work.
1776 */
1777 jtag_add_reset(0, 1);
1778 } else {
1779 LOG_ERROR("%s: how to reset?", target_name(target));
1780 return ERROR_FAIL;
1781 }
1782
1783 /* registers are now invalid */
1784 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1785
1786 target->state = TARGET_RESET;
1787
1788 return ERROR_OK;
1789 }
1790
1791 static int cortex_a8_deassert_reset(struct target *target)
1792 {
1793 int retval;
1794
1795 LOG_DEBUG(" ");
1796
1797 /* be certain SRST is off */
1798 jtag_add_reset(0, 0);
1799
1800 retval = cortex_a8_poll(target);
1801 if (retval != ERROR_OK)
1802 return retval;
1803
1804 if (target->reset_halt) {
1805 if (target->state != TARGET_HALTED) {
1806 LOG_WARNING("%s: ran after reset and before halt ...",
1807 target_name(target));
1808 if ((retval = target_halt(target)) != ERROR_OK)
1809 return retval;
1810 }
1811 }
1812
1813 return ERROR_OK;
1814 }
1815
1816
1817 static int cortex_a8_write_apb_ab_memory(struct target *target,
1818 uint32_t address, uint32_t size,
1819 uint32_t count, const uint8_t *buffer)
1820 {
1821
1822 /* write memory through APB-AP */
1823
1824 int retval = ERROR_INVALID_ARGUMENTS;
1825 struct armv7a_common *armv7a = target_to_armv7a(target);
1826 struct arm *armv4_5 = &armv7a->armv4_5_common;
1827 int total_bytes = count * size;
1828 int start_byte, nbytes_to_write, i;
1829 struct reg *reg;
1830 union _data {
1831 uint8_t uc_a[4];
1832 uint32_t ui;
1833 } data;
1834
1835 if (target->state != TARGET_HALTED)
1836 {
1837 LOG_WARNING("target not halted");
1838 return ERROR_TARGET_NOT_HALTED;
1839 }
1840
1841 reg = arm_reg_current(armv4_5, 0);
1842 reg->dirty = 1;
1843 reg = arm_reg_current(armv4_5, 1);
1844 reg->dirty = 1;
1845
1846 retval = cortex_a8_dap_write_coreregister_u32(target, address & 0xFFFFFFFC, 0);
1847 if (retval != ERROR_OK)
1848 return retval;
1849
1850 start_byte = address & 0x3;
1851
1852 while (total_bytes > 0) {
1853
1854 nbytes_to_write = 4 - start_byte;
1855 if (total_bytes < nbytes_to_write)
1856 nbytes_to_write = total_bytes;
1857
1858 if ( nbytes_to_write != 4 ) {
1859
1860 /* execute instruction LDR r1, [r0] */
1861 retval = cortex_a8_exec_opcode(target, ARMV4_5_LDR(1, 0), NULL);
1862 if (retval != ERROR_OK)
1863 return retval;
1864
1865 retval = cortex_a8_dap_read_coreregister_u32(target, &data.ui, 1);
1866 if (retval != ERROR_OK)
1867 return retval;
1868 }
1869
1870 for (i = 0; i < nbytes_to_write; ++i)
1871 data.uc_a[i + start_byte] = *buffer++;
1872
1873 retval = cortex_a8_dap_write_coreregister_u32(target, data.ui, 1);
1874 if (retval != ERROR_OK)
1875 return retval;
1876
1877 /* execute instruction STRW r1, [r0], 1 (0xe4801004) */
1878 retval = cortex_a8_exec_opcode(target, ARMV4_5_STRW_IP(1, 0) , NULL);
1879 if (retval != ERROR_OK)
1880 return retval;
1881
1882 total_bytes -= nbytes_to_write;
1883 start_byte = 0;
1884 }
1885
1886 return retval;
1887 }
1888
1889
1890 static int cortex_a8_read_apb_ab_memory(struct target *target,
1891 uint32_t address, uint32_t size,
1892 uint32_t count, uint8_t *buffer)
1893 {
1894
1895 /* read memory through APB-AP */
1896
1897 int retval = ERROR_INVALID_ARGUMENTS;
1898 struct armv7a_common *armv7a = target_to_armv7a(target);
1899 struct arm *armv4_5 = &armv7a->armv4_5_common;
1900 int total_bytes = count * size;
1901 int start_byte, nbytes_to_read, i;
1902 struct reg *reg;
1903 union _data {
1904 uint8_t uc_a[4];
1905 uint32_t ui;
1906 } data;
1907
1908 if (target->state != TARGET_HALTED)
1909 {
1910 LOG_WARNING("target not halted");
1911 return ERROR_TARGET_NOT_HALTED;
1912 }
1913
1914 reg = arm_reg_current(armv4_5, 0);
1915 reg->dirty = 1;
1916 reg = arm_reg_current(armv4_5, 1);
1917 reg->dirty = 1;
1918
1919 retval = cortex_a8_dap_write_coreregister_u32(target, address & 0xFFFFFFFC, 0);
1920 if (retval != ERROR_OK)
1921 return retval;
1922
1923 start_byte = address & 0x3;
1924
1925 while (total_bytes > 0) {
1926
1927 /* execute instruction LDRW r1, [r0], 4 (0xe4901004) */
1928 retval = cortex_a8_exec_opcode(target, ARMV4_5_LDRW_IP(1, 0), NULL);
1929 if (retval != ERROR_OK)
1930 return retval;
1931
1932 retval = cortex_a8_dap_read_coreregister_u32(target, &data.ui, 1);
1933 if (retval != ERROR_OK)
1934 return retval;
1935
1936 nbytes_to_read = 4 - start_byte;
1937 if (total_bytes < nbytes_to_read)
1938 nbytes_to_read = total_bytes;
1939
1940 for (i = 0; i < nbytes_to_read; ++i)
1941 *buffer++ = data.uc_a[i + start_byte];
1942
1943 total_bytes -= nbytes_to_read;
1944 start_byte = 0;
1945 }
1946
1947 return retval;
1948 }
1949
1950
1951
1952 /*
1953 * Cortex-A8 Memory access
1954 *
1955 * This is same Cortex M3 but we must also use the correct
1956 * ap number for every access.
1957 */
1958
1959 static int cortex_a8_read_phys_memory(struct target *target,
1960 uint32_t address, uint32_t size,
1961 uint32_t count, uint8_t *buffer)
1962 {
1963 struct armv7a_common *armv7a = target_to_armv7a(target);
1964 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1965 int retval = ERROR_INVALID_ARGUMENTS;
1966 uint8_t apsel = swjdp->apsel;
1967 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d",
1968 address, size, count);
1969
1970 if (count && buffer) {
1971
1972 if ( apsel == swjdp_memoryap ) {
1973
1974 /* read memory through AHB-AP */
1975
1976 switch (size) {
1977 case 4:
1978 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
1979 buffer, 4 * count, address);
1980 break;
1981 case 2:
1982 retval = mem_ap_sel_read_buf_u16(swjdp, swjdp_memoryap,
1983 buffer, 2 * count, address);
1984 break;
1985 case 1:
1986 retval = mem_ap_sel_read_buf_u8(swjdp, swjdp_memoryap,
1987 buffer, count, address);
1988 break;
1989 }
1990 } else {
1991
1992 /* read memory through APB-AP */
1993 int enabled = 0;
1994
1995 retval = cortex_a8_mmu(target, &enabled);
1996 if (retval != ERROR_OK)
1997 return retval;
1998
1999 if (enabled)
2000 {
2001 LOG_WARNING("Reading physical memory through \
2002 APB with MMU enabled is not yet implemented");
2003 return ERROR_TARGET_FAILURE;
2004 }
2005 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
2006 }
2007 }
2008 return retval;
2009 }
2010
2011 static int cortex_a8_read_memory(struct target *target, uint32_t address,
2012 uint32_t size, uint32_t count, uint8_t *buffer)
2013 {
2014 int enabled = 0;
2015 uint32_t virt, phys;
2016 int retval;
2017 struct armv7a_common *armv7a = target_to_armv7a(target);
2018 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
2019 uint8_t apsel = swjdp->apsel;
2020
2021 /* cortex_a8 handles unaligned memory access */
2022 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address,
2023 size, count);
2024 if (apsel == swjdp_memoryap) {
2025 retval = cortex_a8_mmu(target, &enabled);
2026 if (retval != ERROR_OK)
2027 return retval;
2028
2029
2030 if(enabled)
2031 {
2032 virt = address;
2033 retval = cortex_a8_virt2phys(target, virt, &phys);
2034 if (retval != ERROR_OK)
2035 return retval;
2036
2037 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x",
2038 virt, phys);
2039 address = phys;
2040 }
2041 retval = cortex_a8_read_phys_memory(target, address, size, count, buffer);
2042 } else {
2043 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
2044 }
2045 return retval;
2046 }
2047
2048 static int cortex_a8_write_phys_memory(struct target *target,
2049 uint32_t address, uint32_t size,
2050 uint32_t count, const uint8_t *buffer)
2051 {
2052 struct armv7a_common *armv7a = target_to_armv7a(target);
2053 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
2054 int retval = ERROR_INVALID_ARGUMENTS;
2055 uint8_t apsel = swjdp->apsel;
2056
2057 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address,
2058 size, count);
2059
2060 if (count && buffer) {
2061
2062 if ( apsel == swjdp_memoryap ) {
2063
2064 /* write memory through AHB-AP */
2065
2066 switch (size) {
2067 case 4:
2068 retval = mem_ap_sel_write_buf_u32(swjdp, swjdp_memoryap,
2069 buffer, 4 * count, address);
2070 break;
2071 case 2:
2072 retval = mem_ap_sel_write_buf_u16(swjdp, swjdp_memoryap,
2073 buffer, 2 * count, address);
2074 break;
2075 case 1:
2076 retval = mem_ap_sel_write_buf_u8(swjdp, swjdp_memoryap,
2077 buffer, count, address);
2078 break;
2079 }
2080
2081 } else {
2082
2083 /* write memory through APB-AP */
2084 int enabled = 0;
2085
2086 retval = cortex_a8_mmu(target, &enabled);
2087 if (retval != ERROR_OK)
2088 return retval;
2089
2090 if (enabled)
2091 {
2092 LOG_WARNING("Writing physical memory through APB with MMU" \
2093 "enabled is not yet implemented");
2094 return ERROR_TARGET_FAILURE;
2095 }
2096 return cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
2097 }
2098 }
2099
2100
2101 /* REVISIT this op is generic ARMv7-A/R stuff */
2102 if (retval == ERROR_OK && target->state == TARGET_HALTED)
2103 {
2104 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
2105
2106 retval = dpm->prepare(dpm);
2107 if (retval != ERROR_OK)
2108 return retval;
2109
2110 /* The Cache handling will NOT work with MMU active, the
2111 * wrong addresses will be invalidated!
2112 *
2113 * For both ICache and DCache, walk all cache lines in the
2114 * address range. Cortex-A8 has fixed 64 byte line length.
2115 *
2116 * REVISIT per ARMv7, these may trigger watchpoints ...
2117 */
2118
2119 /* invalidate I-Cache */
2120 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
2121 {
2122 /* ICIMVAU - Invalidate Cache single entry
2123 * with MVA to PoU
2124 * MCR p15, 0, r0, c7, c5, 1
2125 */
2126 for (uint32_t cacheline = address;
2127 cacheline < address + size * count;
2128 cacheline += 64) {
2129 retval = dpm->instr_write_data_r0(dpm,
2130 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2131 cacheline);
2132 if (retval != ERROR_OK)
2133 return retval;
2134 }
2135 }
2136
2137 /* invalidate D-Cache */
2138 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
2139 {
2140 /* DCIMVAC - Invalidate data Cache line
2141 * with MVA to PoC
2142 * MCR p15, 0, r0, c7, c6, 1
2143 */
2144 for (uint32_t cacheline = address;
2145 cacheline < address + size * count;
2146 cacheline += 64) {
2147 retval = dpm->instr_write_data_r0(dpm,
2148 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2149 cacheline);
2150 if (retval != ERROR_OK)
2151 return retval;
2152 }
2153 }
2154
2155 /* (void) */ dpm->finish(dpm);
2156 }
2157
2158 return retval;
2159 }
2160
2161 static int cortex_a8_write_memory(struct target *target, uint32_t address,
2162 uint32_t size, uint32_t count, const uint8_t *buffer)
2163 {
2164 int enabled = 0;
2165 uint32_t virt, phys;
2166 int retval;
2167 struct armv7a_common *armv7a = target_to_armv7a(target);
2168 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
2169 uint8_t apsel = swjdp->apsel;
2170 /* cortex_a8 handles unaligned memory access */
2171 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address,
2172 size, count);
2173 if (apsel == swjdp_memoryap) {
2174
2175 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
2176 retval = cortex_a8_mmu(target, &enabled);
2177 if (retval != ERROR_OK)
2178 return retval;
2179
2180 if(enabled)
2181 {
2182 virt = address;
2183 retval = cortex_a8_virt2phys(target, virt, &phys);
2184 if (retval != ERROR_OK)
2185 return retval;
2186 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
2187 address = phys;
2188 }
2189
2190 retval = cortex_a8_write_phys_memory(target, address, size,
2191 count, buffer);
2192 }
2193 else {
2194 retval = cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
2195 }
2196 return retval;
2197 }
2198
2199 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
2200 uint32_t count, const uint8_t *buffer)
2201 {
2202 return cortex_a8_write_memory(target, address, 4, count, buffer);
2203 }
2204
2205
2206 static int cortex_a8_handle_target_request(void *priv)
2207 {
2208 struct target *target = priv;
2209 struct armv7a_common *armv7a = target_to_armv7a(target);
2210 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
2211 int retval;
2212
2213 if (!target_was_examined(target))
2214 return ERROR_OK;
2215 if (!target->dbg_msg_enabled)
2216 return ERROR_OK;
2217
2218 if (target->state == TARGET_RUNNING)
2219 {
2220 uint32_t request;
2221 uint32_t dscr;
2222 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2223 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2224
2225 /* check if we have data */
2226 while ((dscr & DSCR_DTR_TX_FULL) && (retval==ERROR_OK))
2227 {
2228 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2229 armv7a->debug_base+ CPUDBG_DTRTX, &request);
2230 if (retval == ERROR_OK)
2231 {
2232 target_request(target, request);
2233 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2234 armv7a->debug_base+ CPUDBG_DSCR, &dscr);
2235 }
2236 }
2237 }
2238
2239 return ERROR_OK;
2240 }
2241
2242 /*
2243 * Cortex-A8 target information and configuration
2244 */
2245
2246 static int cortex_a8_examine_first(struct target *target)
2247 {
2248 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2249 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2250 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
2251 int i;
2252 int retval = ERROR_OK;
2253 uint32_t didr, ctypr, ttypr, cpuid;
2254
2255 /* We do one extra read to ensure DAP is configured,
2256 * we call ahbap_debugport_init(swjdp) instead
2257 */
2258 retval = ahbap_debugport_init(swjdp);
2259 if (retval != ERROR_OK)
2260 return retval;
2261
2262 if (!target->dbgbase_set)
2263 {
2264 uint32_t dbgbase;
2265 /* Get ROM Table base */
2266 uint32_t apid;
2267 retval = dap_get_debugbase(swjdp, 1, &dbgbase, &apid);
2268 if (retval != ERROR_OK)
2269 return retval;
2270 /* Lookup 0x15 -- Processor DAP */
2271 retval = dap_lookup_cs_component(swjdp, 1, dbgbase, 0x15,
2272 &armv7a->debug_base);
2273 if (retval != ERROR_OK)
2274 return retval;
2275 }
2276 else
2277 {
2278 armv7a->debug_base = target->dbgbase;
2279 }
2280
2281 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2282 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2283 if (retval != ERROR_OK)
2284 return retval;
2285
2286 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2287 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
2288 {
2289 LOG_DEBUG("Examine %s failed", "CPUID");
2290 return retval;
2291 }
2292
2293 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2294 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
2295 {
2296 LOG_DEBUG("Examine %s failed", "CTYPR");
2297 return retval;
2298 }
2299
2300 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2301 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
2302 {
2303 LOG_DEBUG("Examine %s failed", "TTYPR");
2304 return retval;
2305 }
2306
2307 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2308 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
2309 {
2310 LOG_DEBUG("Examine %s failed", "DIDR");
2311 return retval;
2312 }
2313
2314 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2315 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2316 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2317 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2318
2319 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
2320 retval = cortex_a8_dpm_setup(cortex_a8, didr);
2321 if (retval != ERROR_OK)
2322 return retval;
2323
2324 /* Setup Breakpoint Register Pairs */
2325 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
2326 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2327 cortex_a8->brp_num_available = cortex_a8->brp_num;
2328 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
2329 // cortex_a8->brb_enabled = ????;
2330 for (i = 0; i < cortex_a8->brp_num; i++)
2331 {
2332 cortex_a8->brp_list[i].used = 0;
2333 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
2334 cortex_a8->brp_list[i].type = BRP_NORMAL;
2335 else
2336 cortex_a8->brp_list[i].type = BRP_CONTEXT;
2337 cortex_a8->brp_list[i].value = 0;
2338 cortex_a8->brp_list[i].control = 0;
2339 cortex_a8->brp_list[i].BRPn = i;
2340 }
2341
2342 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
2343
2344 target_set_examined(target);
2345 return ERROR_OK;
2346 }
2347
2348 static int cortex_a8_examine(struct target *target)
2349 {
2350 int retval = ERROR_OK;
2351
2352 /* don't re-probe hardware after each reset */
2353 if (!target_was_examined(target))
2354 retval = cortex_a8_examine_first(target);
2355
2356 /* Configure core debug access */
2357 if (retval == ERROR_OK)
2358 retval = cortex_a8_init_debug_access(target);
2359
2360 return retval;
2361 }
2362
2363 /*
2364 * Cortex-A8 target creation and initialization
2365 */
2366
2367 static int cortex_a8_init_target(struct command_context *cmd_ctx,
2368 struct target *target)
2369 {
2370 /* examine_first() does a bunch of this */
2371 return ERROR_OK;
2372 }
2373
2374 static int cortex_a8_init_arch_info(struct target *target,
2375 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
2376 {
2377 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2378 struct arm *armv4_5 = &armv7a->armv4_5_common;
2379 struct adiv5_dap *dap = &armv7a->dap;
2380
2381 armv7a->armv4_5_common.dap = dap;
2382
2383 /* Setup struct cortex_a8_common */
2384 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
2385 /* tap has no dap initialized */
2386 if (!tap->dap)
2387 {
2388 armv7a->armv4_5_common.dap = dap;
2389 /* Setup struct cortex_a8_common */
2390 armv4_5->arch_info = armv7a;
2391
2392 /* prepare JTAG information for the new target */
2393 cortex_a8->jtag_info.tap = tap;
2394 cortex_a8->jtag_info.scann_size = 4;
2395
2396 /* Leave (only) generic DAP stuff for debugport_init() */
2397 dap->jtag_info = &cortex_a8->jtag_info;
2398
2399 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
2400 dap->tar_autoincr_block = (1 << 10);
2401 dap->memaccess_tck = 80;
2402 tap->dap = dap;
2403 }
2404 else
2405 armv7a->armv4_5_common.dap = tap->dap;
2406
2407 cortex_a8->fast_reg_read = 0;
2408
2409 /* Set default value */
2410 cortex_a8->current_address_mode = ARM_MODE_ANY;
2411
2412 /* register arch-specific functions */
2413 armv7a->examine_debug_reason = NULL;
2414
2415 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
2416
2417 armv7a->pre_restore_context = NULL;
2418 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
2419 armv7a->armv4_5_mmu.get_ttb = cortex_a8_get_ttb;
2420 armv7a->armv4_5_mmu.read_memory = cortex_a8_read_phys_memory;
2421 armv7a->armv4_5_mmu.write_memory = cortex_a8_write_phys_memory;
2422 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a8_disable_mmu_caches;
2423 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a8_enable_mmu_caches;
2424 armv7a->armv4_5_mmu.has_tiny_pages = 1;
2425 armv7a->armv4_5_mmu.mmu_enabled = 0;
2426
2427
2428 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
2429
2430 /* REVISIT v7a setup should be in a v7a-specific routine */
2431 arm_init_arch_info(target, armv4_5);
2432 armv7a->common_magic = ARMV7_COMMON_MAGIC;
2433
2434 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
2435
2436 return ERROR_OK;
2437 }
2438
2439 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
2440 {
2441 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
2442
2443 return cortex_a8_init_arch_info(target, cortex_a8, target->tap);
2444 }
2445
2446 static int cortex_a8_get_ttb(struct target *target, uint32_t *result)
2447 {
2448 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2449 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2450 uint32_t ttb = 0, retval = ERROR_OK;
2451
2452 /* current_address_mode is set inside cortex_a8_virt2phys()
2453 where we can determine if address belongs to user or kernel */
2454 if(cortex_a8->current_address_mode == ARM_MODE_SVC)
2455 {
2456 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2457 retval = armv7a->armv4_5_common.mrc(target, 15,
2458 0, 1, /* op1, op2 */
2459 2, 0, /* CRn, CRm */
2460 &ttb);
2461 if (retval != ERROR_OK)
2462 return retval;
2463 }
2464 else if(cortex_a8->current_address_mode == ARM_MODE_USR)
2465 {
2466 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2467 retval = armv7a->armv4_5_common.mrc(target, 15,
2468 0, 0, /* op1, op2 */
2469 2, 0, /* CRn, CRm */
2470 &ttb);
2471 if (retval != ERROR_OK)
2472 return retval;
2473 }
2474 /* we don't know whose address is: user or kernel
2475 we assume that if we are in kernel mode then
2476 address belongs to kernel else if in user mode
2477 - to user */
2478 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
2479 {
2480 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2481 retval = armv7a->armv4_5_common.mrc(target, 15,
2482 0, 1, /* op1, op2 */
2483 2, 0, /* CRn, CRm */
2484 &ttb);
2485 if (retval != ERROR_OK)
2486 return retval;
2487 }
2488 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
2489 {
2490 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2491 retval = armv7a->armv4_5_common.mrc(target, 15,
2492 0, 0, /* op1, op2 */
2493 2, 0, /* CRn, CRm */
2494 &ttb);
2495 if (retval != ERROR_OK)
2496 return retval;
2497 }
2498 /* finally we don't know whose ttb to use: user or kernel */
2499 else
2500 LOG_ERROR("Don't know how to get ttb for current mode!!!");
2501
2502 ttb &= 0xffffc000;
2503
2504 *result = ttb;
2505
2506 return ERROR_OK;
2507 }
2508
2509 static int cortex_a8_disable_mmu_caches(struct target *target, int mmu,
2510 int d_u_cache, int i_cache)
2511 {
2512 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2513 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2514 uint32_t cp15_control;
2515 int retval;
2516
2517 /* read cp15 control register */
2518 retval = armv7a->armv4_5_common.mrc(target, 15,
2519 0, 0, /* op1, op2 */
2520 1, 0, /* CRn, CRm */
2521 &cp15_control);
2522 if (retval != ERROR_OK)
2523 return retval;
2524
2525
2526 if (mmu)
2527 cp15_control &= ~0x1U;
2528
2529 if (d_u_cache)
2530 cp15_control &= ~0x4U;
2531
2532 if (i_cache)
2533 cp15_control &= ~0x1000U;
2534
2535 retval = armv7a->armv4_5_common.mcr(target, 15,
2536 0, 0, /* op1, op2 */
2537 1, 0, /* CRn, CRm */
2538 cp15_control);
2539 return retval;
2540 }
2541
2542 static int cortex_a8_enable_mmu_caches(struct target *target, int mmu,
2543 int d_u_cache, int i_cache)
2544 {
2545 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2546 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2547 uint32_t cp15_control;
2548 int retval;
2549
2550 /* read cp15 control register */
2551 retval = armv7a->armv4_5_common.mrc(target, 15,
2552 0, 0, /* op1, op2 */
2553 1, 0, /* CRn, CRm */
2554 &cp15_control);
2555 if (retval != ERROR_OK)
2556 return retval;
2557
2558 if (mmu)
2559 cp15_control |= 0x1U;
2560
2561 if (d_u_cache)
2562 cp15_control |= 0x4U;
2563
2564 if (i_cache)
2565 cp15_control |= 0x1000U;
2566
2567 retval = armv7a->armv4_5_common.mcr(target, 15,
2568 0, 0, /* op1, op2 */
2569 1, 0, /* CRn, CRm */
2570 cp15_control);
2571 return retval;
2572 }
2573
2574
2575 static int cortex_a8_mmu(struct target *target, int *enabled)
2576 {
2577 if (target->state != TARGET_HALTED) {
2578 LOG_ERROR("%s: target not halted", __func__);
2579 return ERROR_TARGET_INVALID;
2580 }
2581
2582 *enabled = target_to_cortex_a8(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
2583 return ERROR_OK;
2584 }
2585
2586 static int cortex_a8_virt2phys(struct target *target,
2587 uint32_t virt, uint32_t *phys)
2588 {
2589 uint32_t cb;
2590 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2591 // struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2592 struct armv7a_common *armv7a = target_to_armv7a(target);
2593
2594 /* We assume that virtual address is separated
2595 between user and kernel in Linux style:
2596 0x00000000-0xbfffffff - User space
2597 0xc0000000-0xffffffff - Kernel space */
2598 if( virt < 0xc0000000 ) /* Linux user space */
2599 cortex_a8->current_address_mode = ARM_MODE_USR;
2600 else /* Linux kernel */
2601 cortex_a8->current_address_mode = ARM_MODE_SVC;
2602 uint32_t ret;
2603 int retval = armv4_5_mmu_translate_va(target,
2604 &armv7a->armv4_5_mmu, virt, &cb, &ret);
2605 if (retval != ERROR_OK)
2606 return retval;
2607 /* Reset the flag. We don't want someone else to use it by error */
2608 cortex_a8->current_address_mode = ARM_MODE_ANY;
2609
2610 *phys = ret;
2611 return ERROR_OK;
2612 }
2613
2614 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
2615 {
2616 struct target *target = get_current_target(CMD_CTX);
2617 struct armv7a_common *armv7a = target_to_armv7a(target);
2618
2619 return armv4_5_handle_cache_info_command(CMD_CTX,
2620 &armv7a->armv4_5_mmu.armv4_5_cache);
2621 }
2622
2623
2624 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
2625 {
2626 struct target *target = get_current_target(CMD_CTX);
2627 if (!target_was_examined(target))
2628 {
2629 LOG_ERROR("target not examined yet");
2630 return ERROR_FAIL;
2631 }
2632
2633 return cortex_a8_init_debug_access(target);
2634 }
2635 COMMAND_HANDLER(cortex_a8_handle_smp_off_command)
2636 {
2637 struct target *target = get_current_target(CMD_CTX);
2638 /* check target is an smp target */
2639 struct target_list *head;
2640 struct target *curr;
2641 head = target->head;
2642 target->smp = 0;
2643 if (head != (struct target_list*)NULL)
2644 {
2645 while (head != (struct target_list*)NULL)
2646 {
2647 curr = head->target;
2648 curr->smp = 0;
2649 head = head->next;
2650 }
2651 /* fixes the target display to the debugger */
2652 target->gdb_service->target = target;
2653 }
2654 return ERROR_OK;
2655 }
2656
2657 COMMAND_HANDLER(cortex_a8_handle_smp_on_command)
2658 {
2659 struct target *target = get_current_target(CMD_CTX);
2660 struct target_list *head;
2661 struct target *curr;
2662 head = target->head;
2663 if (head != (struct target_list*)NULL)
2664 { target->smp=1;
2665 while (head != (struct target_list*)NULL)
2666 {
2667 curr = head->target;
2668 curr->smp = 1;
2669 head = head->next;
2670 }
2671 }
2672 return ERROR_OK;
2673 }
2674
2675 COMMAND_HANDLER(cortex_a8_handle_smp_gdb_command)
2676 {
2677 struct target *target = get_current_target(CMD_CTX);
2678 int retval = ERROR_OK;
2679 struct target_list *head;
2680 head = target->head;
2681 if (head != (struct target_list*)NULL)
2682 {
2683 if (CMD_ARGC == 1)
2684 {
2685 int coreid = 0;
2686 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2687 if (ERROR_OK != retval)
2688 return retval;
2689 target->gdb_service->core[1]=coreid;
2690
2691 }
2692 command_print(CMD_CTX, "gdb coreid %d -> %d", target->gdb_service->core[0]
2693 , target->gdb_service->core[1]);
2694 }
2695 return ERROR_OK;
2696 }
2697
2698 static const struct command_registration cortex_a8_exec_command_handlers[] = {
2699 {
2700 .name = "cache_info",
2701 .handler = cortex_a8_handle_cache_info_command,
2702 .mode = COMMAND_EXEC,
2703 .help = "display information about target caches",
2704 },
2705 {
2706 .name = "dbginit",
2707 .handler = cortex_a8_handle_dbginit_command,
2708 .mode = COMMAND_EXEC,
2709 .help = "Initialize core debug",
2710 },
2711 { .name ="smp_off",
2712 .handler = cortex_a8_handle_smp_off_command,
2713 .mode = COMMAND_EXEC,
2714 .help = "Stop smp handling",
2715 },
2716 {
2717 .name ="smp_on",
2718 .handler = cortex_a8_handle_smp_on_command,
2719 .mode = COMMAND_EXEC,
2720 .help = "Restart smp handling",
2721 },
2722 {
2723 .name ="smp_gdb",
2724 .handler = cortex_a8_handle_smp_gdb_command,
2725 .mode = COMMAND_EXEC,
2726 .help = "display/fix current core played to gdb",
2727 },
2728
2729
2730 COMMAND_REGISTRATION_DONE
2731 };
2732 static const struct command_registration cortex_a8_command_handlers[] = {
2733 {
2734 .chain = arm_command_handlers,
2735 },
2736 {
2737 .chain = armv7a_command_handlers,
2738 },
2739 {
2740 .name = "cortex_a8",
2741 .mode = COMMAND_ANY,
2742 .help = "Cortex-A8 command group",
2743 .chain = cortex_a8_exec_command_handlers,
2744 },
2745 COMMAND_REGISTRATION_DONE
2746 };
2747
2748 struct target_type cortexa8_target = {
2749 .name = "cortex_a8",
2750
2751 .poll = cortex_a8_poll,
2752 .arch_state = armv7a_arch_state,
2753
2754 .target_request_data = NULL,
2755
2756 .halt = cortex_a8_halt,
2757 .resume = cortex_a8_resume,
2758 .step = cortex_a8_step,
2759
2760 .assert_reset = cortex_a8_assert_reset,
2761 .deassert_reset = cortex_a8_deassert_reset,
2762 .soft_reset_halt = NULL,
2763
2764 /* REVISIT allow exporting VFP3 registers ... */
2765 .get_gdb_reg_list = arm_get_gdb_reg_list,
2766
2767 .read_memory = cortex_a8_read_memory,
2768 .write_memory = cortex_a8_write_memory,
2769 .bulk_write_memory = cortex_a8_bulk_write_memory,
2770
2771 .checksum_memory = arm_checksum_memory,
2772 .blank_check_memory = arm_blank_check_memory,
2773
2774 .run_algorithm = armv4_5_run_algorithm,
2775
2776 .add_breakpoint = cortex_a8_add_breakpoint,
2777 .add_context_breakpoint = cortex_a8_add_context_breakpoint,
2778 .add_hybrid_breakpoint = cortex_a8_add_hybrid_breakpoint,
2779 .remove_breakpoint = cortex_a8_remove_breakpoint,
2780 .add_watchpoint = NULL,
2781 .remove_watchpoint = NULL,
2782
2783 .commands = cortex_a8_command_handlers,
2784 .target_create = cortex_a8_target_create,
2785 .init_target = cortex_a8_init_target,
2786 .examine = cortex_a8_examine,
2787
2788 .read_phys_memory = cortex_a8_read_phys_memory,
2789 .write_phys_memory = cortex_a8_write_phys_memory,
2790 .mmu = cortex_a8_mmu,
2791 .virt2phys = cortex_a8_virt2phys,
2792
2793 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)