jtag: linuxgpiod: drop extra parenthesis
[openocd.git] / src / target / xscale.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2006, 2007 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2007,2008 Øyvind Harboe *
8 * oyvind.harboe@zylin.com *
9 * *
10 * Copyright (C) 2009 Michael Schwingen *
11 * michael@schwingen.org *
12 ***************************************************************************/
13
14 #ifdef HAVE_CONFIG_H
15 #include "config.h"
16 #endif
17
18 #include "breakpoints.h"
19 #include "xscale.h"
20 #include "target_type.h"
21 #include "arm_jtag.h"
22 #include "arm_simulator.h"
23 #include "arm_disassembler.h"
24 #include <helper/time_support.h>
25 #include "register.h"
26 #include "image.h"
27 #include "arm_opcodes.h"
28 #include "armv4_5.h"
29
30 /*
31 * Important XScale documents available as of October 2009 include:
32 *
33 * Intel XScale® Core Developer’s Manual, January 2004
34 * Order Number: 273473-002
35 * This has a chapter detailing debug facilities, and punts some
36 * details to chip-specific microarchitecture documents.
37 *
38 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
39 * Document Number: 273539-005
40 * Less detailed than the developer's manual, but summarizes those
41 * missing details (for most XScales) and gives LOTS of notes about
42 * debugger/handler interaction issues. Presents a simpler reset
43 * and load-handler sequence than the arch doc. (Note, OpenOCD
44 * doesn't currently support "Hot-Debug" as defined there.)
45 *
46 * Chip-specific microarchitecture documents may also be useful.
47 */
48
49 /* forward declarations */
50 static int xscale_resume(struct target *, int current,
51 target_addr_t address, int handle_breakpoints, int debug_execution);
52 static int xscale_debug_entry(struct target *);
53 static int xscale_restore_banked(struct target *);
54 static int xscale_get_reg(struct reg *reg);
55 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
56 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
57 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
58 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
59 static int xscale_read_trace(struct target *);
60
61 /* This XScale "debug handler" is loaded into the processor's
62 * mini-ICache, which is 2K of code writable only via JTAG.
63 */
64 static const uint8_t xscale_debug_handler[] = {
65 #include "../../contrib/loaders/debug/xscale/debug_handler.inc"
66 };
67
68 static const char *const xscale_reg_list[] = {
69 "XSCALE_MAINID", /* 0 */
70 "XSCALE_CACHETYPE",
71 "XSCALE_CTRL",
72 "XSCALE_AUXCTRL",
73 "XSCALE_TTB",
74 "XSCALE_DAC",
75 "XSCALE_FSR",
76 "XSCALE_FAR",
77 "XSCALE_PID",
78 "XSCALE_CPACCESS",
79 "XSCALE_IBCR0", /* 10 */
80 "XSCALE_IBCR1",
81 "XSCALE_DBR0",
82 "XSCALE_DBR1",
83 "XSCALE_DBCON",
84 "XSCALE_TBREG",
85 "XSCALE_CHKPT0",
86 "XSCALE_CHKPT1",
87 "XSCALE_DCSR",
88 "XSCALE_TX",
89 "XSCALE_RX", /* 20 */
90 "XSCALE_TXRXCTRL",
91 };
92
93 static const struct xscale_reg xscale_reg_arch_info[] = {
94 {XSCALE_MAINID, NULL},
95 {XSCALE_CACHETYPE, NULL},
96 {XSCALE_CTRL, NULL},
97 {XSCALE_AUXCTRL, NULL},
98 {XSCALE_TTB, NULL},
99 {XSCALE_DAC, NULL},
100 {XSCALE_FSR, NULL},
101 {XSCALE_FAR, NULL},
102 {XSCALE_PID, NULL},
103 {XSCALE_CPACCESS, NULL},
104 {XSCALE_IBCR0, NULL},
105 {XSCALE_IBCR1, NULL},
106 {XSCALE_DBR0, NULL},
107 {XSCALE_DBR1, NULL},
108 {XSCALE_DBCON, NULL},
109 {XSCALE_TBREG, NULL},
110 {XSCALE_CHKPT0, NULL},
111 {XSCALE_CHKPT1, NULL},
112 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
113 {-1, NULL}, /* TX accessed via JTAG */
114 {-1, NULL}, /* RX accessed via JTAG */
115 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
116 };
117
118 /* convenience wrapper to access XScale specific registers */
119 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
120 {
121 uint8_t buf[4] = { 0 };
122
123 buf_set_u32(buf, 0, 32, value);
124
125 return xscale_set_reg(reg, buf);
126 }
127
128 static const char xscale_not[] = "target is not an XScale";
129
130 static int xscale_verify_pointer(struct command_invocation *cmd,
131 struct xscale_common *xscale)
132 {
133 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
134 command_print(cmd, xscale_not);
135 return ERROR_TARGET_INVALID;
136 }
137 return ERROR_OK;
138 }
139
140 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr, tap_state_t end_state)
141 {
142 assert(tap);
143
144 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr) {
145 struct scan_field field;
146 uint8_t scratch[4] = { 0 };
147
148 memset(&field, 0, sizeof(field));
149 field.num_bits = tap->ir_length;
150 field.out_value = scratch;
151 buf_set_u32(scratch, 0, field.num_bits, new_instr);
152
153 jtag_add_ir_scan(tap, &field, end_state);
154 }
155
156 return ERROR_OK;
157 }
158
159 static int xscale_read_dcsr(struct target *target)
160 {
161 struct xscale_common *xscale = target_to_xscale(target);
162 int retval;
163 struct scan_field fields[3];
164 uint8_t field0 = 0x0;
165 uint8_t field0_check_value = 0x2;
166 uint8_t field0_check_mask = 0x7;
167 uint8_t field2 = 0x0;
168 uint8_t field2_check_value = 0x0;
169 uint8_t field2_check_mask = 0x1;
170
171 xscale_jtag_set_instr(target->tap,
172 XSCALE_SELDCSR << xscale->xscale_variant,
173 TAP_DRPAUSE);
174
175 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
176 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
177
178 memset(&fields, 0, sizeof(fields));
179
180 fields[0].num_bits = 3;
181 fields[0].out_value = &field0;
182 uint8_t tmp;
183 fields[0].in_value = &tmp;
184
185 fields[1].num_bits = 32;
186 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
187
188 fields[2].num_bits = 1;
189 fields[2].out_value = &field2;
190 uint8_t tmp2;
191 fields[2].in_value = &tmp2;
192
193 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
194
195 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
196 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
197
198 retval = jtag_execute_queue();
199 if (retval != ERROR_OK) {
200 LOG_ERROR("JTAG error while reading DCSR");
201 return retval;
202 }
203
204 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = false;
205 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = true;
206
207 /* write the register with the value we just read
208 * on this second pass, only the first bit of field0 is guaranteed to be 0)
209 */
210 field0_check_mask = 0x1;
211 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
212 fields[1].in_value = NULL;
213
214 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
215
216 /* DANGER!!! this must be here. It will make sure that the arguments
217 * to jtag_set_check_value() does not go out of scope! */
218 return jtag_execute_queue();
219 }
220
221
222 static void xscale_getbuf(jtag_callback_data_t arg)
223 {
224 uint8_t *in = (uint8_t *)arg;
225 *((uint32_t *)arg) = buf_get_u32(in, 0, 32);
226 }
227
228 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
229 {
230 if (num_words == 0)
231 return ERROR_COMMAND_SYNTAX_ERROR;
232
233 struct xscale_common *xscale = target_to_xscale(target);
234 int retval = ERROR_OK;
235 tap_state_t path[3];
236 struct scan_field fields[3];
237 uint8_t *field0 = malloc(num_words * 1);
238 uint8_t field0_check_value = 0x2;
239 uint8_t field0_check_mask = 0x6;
240 uint32_t *field1 = malloc(num_words * 4);
241 uint8_t field2_check_value = 0x0;
242 uint8_t field2_check_mask = 0x1;
243 int words_done = 0;
244 int words_scheduled = 0;
245 int i;
246
247 path[0] = TAP_DRSELECT;
248 path[1] = TAP_DRCAPTURE;
249 path[2] = TAP_DRSHIFT;
250
251 memset(&fields, 0, sizeof(fields));
252
253 fields[0].num_bits = 3;
254 uint8_t tmp;
255 fields[0].in_value = &tmp;
256 fields[0].check_value = &field0_check_value;
257 fields[0].check_mask = &field0_check_mask;
258
259 fields[1].num_bits = 32;
260
261 fields[2].num_bits = 1;
262 uint8_t tmp2;
263 fields[2].in_value = &tmp2;
264 fields[2].check_value = &field2_check_value;
265 fields[2].check_mask = &field2_check_mask;
266
267 xscale_jtag_set_instr(target->tap,
268 XSCALE_DBGTX << xscale->xscale_variant,
269 TAP_IDLE);
270 jtag_add_runtest(1, TAP_IDLE); /* ensures that we're in the TAP_IDLE state as the above
271 *could be a no-op */
272
273 /* repeat until all words have been collected */
274 int attempts = 0;
275 while (words_done < num_words) {
276 /* schedule reads */
277 words_scheduled = 0;
278 for (i = words_done; i < num_words; i++) {
279 fields[0].in_value = &field0[i];
280
281 jtag_add_pathmove(3, path);
282
283 fields[1].in_value = (uint8_t *)(field1 + i);
284
285 jtag_add_dr_scan_check(target->tap, 3, fields, TAP_IDLE);
286
287 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
288
289 words_scheduled++;
290 }
291
292 retval = jtag_execute_queue();
293 if (retval != ERROR_OK) {
294 LOG_ERROR("JTAG error while receiving data from debug handler");
295 break;
296 }
297
298 /* examine results */
299 for (i = words_done; i < num_words; i++) {
300 if (!(field0[i] & 1)) {
301 /* move backwards if necessary */
302 int j;
303 for (j = i; j < num_words - 1; j++) {
304 field0[j] = field0[j + 1];
305 field1[j] = field1[j + 1];
306 }
307 words_scheduled--;
308 }
309 }
310 if (words_scheduled == 0) {
311 if (attempts++ == 1000) {
312 LOG_ERROR(
313 "Failed to receiving data from debug handler after 1000 attempts");
314 retval = ERROR_TARGET_TIMEOUT;
315 break;
316 }
317 }
318
319 words_done += words_scheduled;
320 }
321
322 for (i = 0; i < num_words; i++)
323 *(buffer++) = buf_get_u32((uint8_t *)&field1[i], 0, 32);
324
325 free(field1);
326
327 return retval;
328 }
329
330 static int xscale_read_tx(struct target *target, int consume)
331 {
332 struct xscale_common *xscale = target_to_xscale(target);
333 tap_state_t path[3];
334 tap_state_t noconsume_path[6];
335 int retval;
336 struct timeval timeout, now;
337 struct scan_field fields[3];
338 uint8_t field0_in = 0x0;
339 uint8_t field0_check_value = 0x2;
340 uint8_t field0_check_mask = 0x6;
341 uint8_t field2_check_value = 0x0;
342 uint8_t field2_check_mask = 0x1;
343
344 xscale_jtag_set_instr(target->tap,
345 XSCALE_DBGTX << xscale->xscale_variant,
346 TAP_IDLE);
347
348 path[0] = TAP_DRSELECT;
349 path[1] = TAP_DRCAPTURE;
350 path[2] = TAP_DRSHIFT;
351
352 noconsume_path[0] = TAP_DRSELECT;
353 noconsume_path[1] = TAP_DRCAPTURE;
354 noconsume_path[2] = TAP_DREXIT1;
355 noconsume_path[3] = TAP_DRPAUSE;
356 noconsume_path[4] = TAP_DREXIT2;
357 noconsume_path[5] = TAP_DRSHIFT;
358
359 memset(&fields, 0, sizeof(fields));
360
361 fields[0].num_bits = 3;
362 fields[0].in_value = &field0_in;
363
364 fields[1].num_bits = 32;
365 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
366
367 fields[2].num_bits = 1;
368 uint8_t tmp;
369 fields[2].in_value = &tmp;
370
371 gettimeofday(&timeout, NULL);
372 timeval_add_time(&timeout, 1, 0);
373
374 for (;; ) {
375 /* if we want to consume the register content (i.e. clear TX_READY),
376 * we have to go straight from Capture-DR to Shift-DR
377 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
378 */
379 if (consume)
380 jtag_add_pathmove(3, path);
381 else
382 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
383
384 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
385
386 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
387 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
388
389 retval = jtag_execute_queue();
390 if (retval != ERROR_OK) {
391 LOG_ERROR("JTAG error while reading TX");
392 return ERROR_TARGET_TIMEOUT;
393 }
394
395 gettimeofday(&now, NULL);
396 if (timeval_compare(&now, &timeout) > 0) {
397 LOG_ERROR("time out reading TX register");
398 return ERROR_TARGET_TIMEOUT;
399 }
400 if (!((!(field0_in & 1)) && consume))
401 goto done;
402 if (debug_level >= 3) {
403 LOG_DEBUG("waiting 100ms");
404 alive_sleep(100); /* avoid flooding the logs */
405 } else
406 keep_alive();
407 }
408 done:
409
410 if (!(field0_in & 1))
411 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
412
413 return ERROR_OK;
414 }
415
416 static int xscale_write_rx(struct target *target)
417 {
418 struct xscale_common *xscale = target_to_xscale(target);
419 int retval;
420 struct timeval timeout, now;
421 struct scan_field fields[3];
422 uint8_t field0_out = 0x0;
423 uint8_t field0_in = 0x0;
424 uint8_t field0_check_value = 0x2;
425 uint8_t field0_check_mask = 0x6;
426 uint8_t field2 = 0x0;
427 uint8_t field2_check_value = 0x0;
428 uint8_t field2_check_mask = 0x1;
429
430 xscale_jtag_set_instr(target->tap,
431 XSCALE_DBGRX << xscale->xscale_variant,
432 TAP_IDLE);
433
434 memset(&fields, 0, sizeof(fields));
435
436 fields[0].num_bits = 3;
437 fields[0].out_value = &field0_out;
438 fields[0].in_value = &field0_in;
439
440 fields[1].num_bits = 32;
441 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
442
443 fields[2].num_bits = 1;
444 fields[2].out_value = &field2;
445 uint8_t tmp;
446 fields[2].in_value = &tmp;
447
448 gettimeofday(&timeout, NULL);
449 timeval_add_time(&timeout, 1, 0);
450
451 /* poll until rx_read is low */
452 LOG_DEBUG("polling RX");
453 for (;;) {
454 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
455
456 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
457 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
458
459 retval = jtag_execute_queue();
460 if (retval != ERROR_OK) {
461 LOG_ERROR("JTAG error while writing RX");
462 return retval;
463 }
464
465 gettimeofday(&now, NULL);
466 if ((now.tv_sec > timeout.tv_sec) ||
467 ((now.tv_sec == timeout.tv_sec) && (now.tv_usec > timeout.tv_usec))) {
468 LOG_ERROR("time out writing RX register");
469 return ERROR_TARGET_TIMEOUT;
470 }
471 if (!(field0_in & 1))
472 goto done;
473 if (debug_level >= 3) {
474 LOG_DEBUG("waiting 100ms");
475 alive_sleep(100); /* avoid flooding the logs */
476 } else
477 keep_alive();
478 }
479 done:
480
481 /* set rx_valid */
482 field2 = 0x1;
483 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
484
485 retval = jtag_execute_queue();
486 if (retval != ERROR_OK) {
487 LOG_ERROR("JTAG error while writing RX");
488 return retval;
489 }
490
491 return ERROR_OK;
492 }
493
494 /* send count elements of size byte to the debug handler */
495 static int xscale_send(struct target *target, const uint8_t *buffer, int count, int size)
496 {
497 struct xscale_common *xscale = target_to_xscale(target);
498 int retval;
499 int done_count = 0;
500
501 xscale_jtag_set_instr(target->tap,
502 XSCALE_DBGRX << xscale->xscale_variant,
503 TAP_IDLE);
504
505 static const uint8_t t0;
506 uint8_t t1[4] = { 0 };
507 static const uint8_t t2 = 1;
508 struct scan_field fields[3] = {
509 { .num_bits = 3, .out_value = &t0 },
510 { .num_bits = 32, .out_value = t1 },
511 { .num_bits = 1, .out_value = &t2 },
512 };
513
514 int endianness = target->endianness;
515 while (done_count++ < count) {
516 uint32_t t;
517
518 switch (size) {
519 case 4:
520 if (endianness == TARGET_LITTLE_ENDIAN)
521 t = le_to_h_u32(buffer);
522 else
523 t = be_to_h_u32(buffer);
524 break;
525 case 2:
526 if (endianness == TARGET_LITTLE_ENDIAN)
527 t = le_to_h_u16(buffer);
528 else
529 t = be_to_h_u16(buffer);
530 break;
531 case 1:
532 t = buffer[0];
533 break;
534 default:
535 LOG_ERROR("BUG: size neither 4, 2 nor 1");
536 return ERROR_COMMAND_SYNTAX_ERROR;
537 }
538
539 buf_set_u32(t1, 0, 32, t);
540
541 jtag_add_dr_scan(target->tap,
542 3,
543 fields,
544 TAP_IDLE);
545 buffer += size;
546 }
547
548 retval = jtag_execute_queue();
549 if (retval != ERROR_OK) {
550 LOG_ERROR("JTAG error while sending data to debug handler");
551 return retval;
552 }
553
554 return ERROR_OK;
555 }
556
557 static int xscale_send_u32(struct target *target, uint32_t value)
558 {
559 struct xscale_common *xscale = target_to_xscale(target);
560
561 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
562 return xscale_write_rx(target);
563 }
564
565 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
566 {
567 struct xscale_common *xscale = target_to_xscale(target);
568 int retval;
569 struct scan_field fields[3];
570 uint8_t field0 = 0x0;
571 uint8_t field0_check_value = 0x2;
572 uint8_t field0_check_mask = 0x7;
573 uint8_t field2 = 0x0;
574 uint8_t field2_check_value = 0x0;
575 uint8_t field2_check_mask = 0x1;
576
577 if (hold_rst != -1)
578 xscale->hold_rst = hold_rst;
579
580 if (ext_dbg_brk != -1)
581 xscale->external_debug_break = ext_dbg_brk;
582
583 xscale_jtag_set_instr(target->tap,
584 XSCALE_SELDCSR << xscale->xscale_variant,
585 TAP_IDLE);
586
587 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
588 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
589
590 memset(&fields, 0, sizeof(fields));
591
592 fields[0].num_bits = 3;
593 fields[0].out_value = &field0;
594 uint8_t tmp;
595 fields[0].in_value = &tmp;
596
597 fields[1].num_bits = 32;
598 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
599
600 fields[2].num_bits = 1;
601 fields[2].out_value = &field2;
602 uint8_t tmp2;
603 fields[2].in_value = &tmp2;
604
605 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
606
607 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
608 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
609
610 retval = jtag_execute_queue();
611 if (retval != ERROR_OK) {
612 LOG_ERROR("JTAG error while writing DCSR");
613 return retval;
614 }
615
616 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = false;
617 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = true;
618
619 return ERROR_OK;
620 }
621
622 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
623 static unsigned int parity(unsigned int v)
624 {
625 /* unsigned int ov = v; */
626 v ^= v >> 16;
627 v ^= v >> 8;
628 v ^= v >> 4;
629 v &= 0xf;
630 /* LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1); */
631 return (0x6996 >> v) & 1;
632 }
633
634 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
635 {
636 struct xscale_common *xscale = target_to_xscale(target);
637 uint8_t packet[4] = { 0 };
638 uint8_t cmd = 0;
639 int word;
640 struct scan_field fields[2];
641
642 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
643
644 /* LDIC into IR */
645 xscale_jtag_set_instr(target->tap,
646 XSCALE_LDIC << xscale->xscale_variant,
647 TAP_IDLE);
648
649 /* CMD is b011 to load a cacheline into the Mini ICache.
650 * Loading into the main ICache is deprecated, and unused.
651 * It's followed by three zero bits, and 27 address bits.
652 */
653 buf_set_u32(&cmd, 0, 6, 0x3);
654
655 /* virtual address of desired cache line */
656 buf_set_u32(packet, 0, 27, va >> 5);
657
658 memset(&fields, 0, sizeof(fields));
659
660 fields[0].num_bits = 6;
661 fields[0].out_value = &cmd;
662
663 fields[1].num_bits = 27;
664 fields[1].out_value = packet;
665
666 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
667
668 /* rest of packet is a cacheline: 8 instructions, with parity */
669 fields[0].num_bits = 32;
670 fields[0].out_value = packet;
671
672 fields[1].num_bits = 1;
673 fields[1].out_value = &cmd;
674
675 for (word = 0; word < 8; word++) {
676 buf_set_u32(packet, 0, 32, buffer[word]);
677
678 uint32_t value;
679 memcpy(&value, packet, sizeof(uint32_t));
680 cmd = parity(value);
681
682 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
683 }
684
685 return jtag_execute_queue();
686 }
687
688 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
689 {
690 struct xscale_common *xscale = target_to_xscale(target);
691 uint8_t packet[4] = { 0 };
692 uint8_t cmd = 0;
693 struct scan_field fields[2];
694
695 xscale_jtag_set_instr(target->tap,
696 XSCALE_LDIC << xscale->xscale_variant,
697 TAP_IDLE);
698
699 /* CMD for invalidate IC line b000, bits [6:4] b000 */
700 buf_set_u32(&cmd, 0, 6, 0x0);
701
702 /* virtual address of desired cache line */
703 buf_set_u32(packet, 0, 27, va >> 5);
704
705 memset(&fields, 0, sizeof(fields));
706
707 fields[0].num_bits = 6;
708 fields[0].out_value = &cmd;
709
710 fields[1].num_bits = 27;
711 fields[1].out_value = packet;
712
713 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
714
715 return ERROR_OK;
716 }
717
718 static int xscale_update_vectors(struct target *target)
719 {
720 struct xscale_common *xscale = target_to_xscale(target);
721 int i;
722 int retval;
723
724 uint32_t low_reset_branch, high_reset_branch;
725
726 for (i = 1; i < 8; i++) {
727 /* if there's a static vector specified for this exception, override */
728 if (xscale->static_high_vectors_set & (1 << i))
729 xscale->high_vectors[i] = xscale->static_high_vectors[i];
730 else {
731 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
732 if (retval == ERROR_TARGET_TIMEOUT)
733 return retval;
734 if (retval != ERROR_OK) {
735 /* Some of these reads will fail as part of normal execution */
736 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
737 }
738 }
739 }
740
741 for (i = 1; i < 8; i++) {
742 if (xscale->static_low_vectors_set & (1 << i))
743 xscale->low_vectors[i] = xscale->static_low_vectors[i];
744 else {
745 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
746 if (retval == ERROR_TARGET_TIMEOUT)
747 return retval;
748 if (retval != ERROR_OK) {
749 /* Some of these reads will fail as part of normal execution */
750 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
751 }
752 }
753 }
754
755 /* calculate branches to debug handler */
756 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
757 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
758
759 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
760 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
761
762 /* invalidate and load exception vectors in mini i-cache */
763 xscale_invalidate_ic_line(target, 0x0);
764 xscale_invalidate_ic_line(target, 0xffff0000);
765
766 xscale_load_ic(target, 0x0, xscale->low_vectors);
767 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
768
769 return ERROR_OK;
770 }
771
772 static int xscale_arch_state(struct target *target)
773 {
774 struct xscale_common *xscale = target_to_xscale(target);
775 struct arm *arm = &xscale->arm;
776
777 static const char *state[] = {
778 "disabled", "enabled"
779 };
780
781 static const char *arch_dbg_reason[] = {
782 "", "\n(processor reset)", "\n(trace buffer full)"
783 };
784
785 if (arm->common_magic != ARM_COMMON_MAGIC) {
786 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
787 return ERROR_COMMAND_SYNTAX_ERROR;
788 }
789
790 arm_arch_state(target);
791 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
792 state[xscale->armv4_5_mmu.mmu_enabled],
793 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
794 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
795 arch_dbg_reason[xscale->arch_debug_reason]);
796
797 return ERROR_OK;
798 }
799
800 static int xscale_poll(struct target *target)
801 {
802 int retval = ERROR_OK;
803
804 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING)) {
805 enum target_state previous_state = target->state;
806 retval = xscale_read_tx(target, 0);
807 if (retval == ERROR_OK) {
808
809 /* there's data to read from the tx register, we entered debug state */
810 target->state = TARGET_HALTED;
811
812 /* process debug entry, fetching current mode regs */
813 retval = xscale_debug_entry(target);
814 } else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE) {
815 LOG_USER("error while polling TX register, reset CPU");
816 /* here we "lie" so GDB won't get stuck and a reset can be performed */
817 target->state = TARGET_HALTED;
818 }
819
820 /* debug_entry could have overwritten target state (i.e. immediate resume)
821 * don't signal event handlers in that case
822 */
823 if (target->state != TARGET_HALTED)
824 return ERROR_OK;
825
826 /* if target was running, signal that we halted
827 * otherwise we reentered from debug execution */
828 if (previous_state == TARGET_RUNNING)
829 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
830 else
831 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
832 }
833
834 return retval;
835 }
836
837 static int xscale_debug_entry(struct target *target)
838 {
839 struct xscale_common *xscale = target_to_xscale(target);
840 struct arm *arm = &xscale->arm;
841 uint32_t pc;
842 uint32_t buffer[10];
843 unsigned i;
844 int retval;
845 uint32_t moe;
846
847 /* clear external dbg break (will be written on next DCSR read) */
848 xscale->external_debug_break = 0;
849 retval = xscale_read_dcsr(target);
850 if (retval != ERROR_OK)
851 return retval;
852
853 /* get r0, pc, r1 to r7 and cpsr */
854 retval = xscale_receive(target, buffer, 10);
855 if (retval != ERROR_OK)
856 return retval;
857
858 /* move r0 from buffer to register cache */
859 buf_set_u32(arm->core_cache->reg_list[0].value, 0, 32, buffer[0]);
860 arm->core_cache->reg_list[0].dirty = true;
861 arm->core_cache->reg_list[0].valid = true;
862 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
863
864 /* move pc from buffer to register cache */
865 buf_set_u32(arm->pc->value, 0, 32, buffer[1]);
866 arm->pc->dirty = true;
867 arm->pc->valid = true;
868 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
869
870 /* move data from buffer to register cache */
871 for (i = 1; i <= 7; i++) {
872 buf_set_u32(arm->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
873 arm->core_cache->reg_list[i].dirty = true;
874 arm->core_cache->reg_list[i].valid = true;
875 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
876 }
877
878 arm_set_cpsr(arm, buffer[9]);
879 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
880
881 if (!is_arm_mode(arm->core_mode)) {
882 target->state = TARGET_UNKNOWN;
883 LOG_ERROR("cpsr contains invalid mode value - communication failure");
884 return ERROR_TARGET_FAILURE;
885 }
886 LOG_DEBUG("target entered debug state in %s mode",
887 arm_mode_name(arm->core_mode));
888
889 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
890 if (arm->spsr) {
891 xscale_receive(target, buffer, 8);
892 buf_set_u32(arm->spsr->value, 0, 32, buffer[7]);
893 arm->spsr->dirty = false;
894 arm->spsr->valid = true;
895 } else {
896 /* r8 to r14, but no spsr */
897 xscale_receive(target, buffer, 7);
898 }
899
900 /* move data from buffer to right banked register in cache */
901 for (i = 8; i <= 14; i++) {
902 struct reg *r = arm_reg_current(arm, i);
903
904 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
905 r->dirty = false;
906 r->valid = true;
907 }
908
909 /* mark xscale regs invalid to ensure they are retrieved from the
910 * debug handler if requested */
911 for (i = 0; i < xscale->reg_cache->num_regs; i++)
912 xscale->reg_cache->reg_list[i].valid = false;
913
914 /* examine debug reason */
915 xscale_read_dcsr(target);
916 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
917
918 /* stored PC (for calculating fixup) */
919 pc = buf_get_u32(arm->pc->value, 0, 32);
920
921 switch (moe) {
922 case 0x0: /* Processor reset */
923 target->debug_reason = DBG_REASON_DBGRQ;
924 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
925 pc -= 4;
926 break;
927 case 0x1: /* Instruction breakpoint hit */
928 target->debug_reason = DBG_REASON_BREAKPOINT;
929 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
930 pc -= 4;
931 break;
932 case 0x2: /* Data breakpoint hit */
933 target->debug_reason = DBG_REASON_WATCHPOINT;
934 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
935 pc -= 4;
936 break;
937 case 0x3: /* BKPT instruction executed */
938 target->debug_reason = DBG_REASON_BREAKPOINT;
939 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
940 pc -= 4;
941 break;
942 case 0x4: /* Ext. debug event */
943 target->debug_reason = DBG_REASON_DBGRQ;
944 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
945 pc -= 4;
946 break;
947 case 0x5: /* Vector trap occurred */
948 target->debug_reason = DBG_REASON_BREAKPOINT;
949 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
950 pc -= 4;
951 break;
952 case 0x6: /* Trace buffer full break */
953 target->debug_reason = DBG_REASON_DBGRQ;
954 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
955 pc -= 4;
956 break;
957 case 0x7: /* Reserved (may flag Hot-Debug support) */
958 default:
959 LOG_ERROR("Method of Entry is 'Reserved'");
960 exit(-1);
961 break;
962 }
963
964 /* apply PC fixup */
965 buf_set_u32(arm->pc->value, 0, 32, pc);
966
967 /* on the first debug entry, identify cache type */
968 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1) {
969 uint32_t cache_type_reg;
970
971 /* read cp15 cache type register */
972 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
973 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value,
974 0,
975 32);
976
977 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
978 }
979
980 /* examine MMU and Cache settings
981 * read cp15 control register */
982 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
983 xscale->cp15_control_reg =
984 buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
985 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
986 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
987 (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
988 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
989 (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
990
991 /* tracing enabled, read collected trace data */
992 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
993 xscale_read_trace(target);
994
995 /* Resume if entered debug due to buffer fill and we're still collecting
996 * trace data. Note that a debug exception due to trace buffer full
997 * can only happen in fill mode. */
998 if (xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL) {
999 if (--xscale->trace.fill_counter > 0)
1000 xscale_resume(target, 1, 0x0, 1, 0);
1001 } else /* entered debug for other reason; reset counter */
1002 xscale->trace.fill_counter = 0;
1003 }
1004
1005 return ERROR_OK;
1006 }
1007
1008 static int xscale_halt(struct target *target)
1009 {
1010 struct xscale_common *xscale = target_to_xscale(target);
1011
1012 LOG_DEBUG("target->state: %s",
1013 target_state_name(target));
1014
1015 if (target->state == TARGET_HALTED) {
1016 LOG_DEBUG("target was already halted");
1017 return ERROR_OK;
1018 } else if (target->state == TARGET_UNKNOWN) {
1019 /* this must not happen for a xscale target */
1020 LOG_ERROR("target was in unknown state when halt was requested");
1021 return ERROR_TARGET_INVALID;
1022 } else if (target->state == TARGET_RESET)
1023 LOG_DEBUG("target->state == TARGET_RESET");
1024 else {
1025 /* assert external dbg break */
1026 xscale->external_debug_break = 1;
1027 xscale_read_dcsr(target);
1028
1029 target->debug_reason = DBG_REASON_DBGRQ;
1030 }
1031
1032 return ERROR_OK;
1033 }
1034
1035 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1036 {
1037 struct xscale_common *xscale = target_to_xscale(target);
1038 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1039 int retval;
1040
1041 if (xscale->ibcr0_used) {
1042 struct breakpoint *ibcr0_bp =
1043 breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1044
1045 if (ibcr0_bp)
1046 xscale_unset_breakpoint(target, ibcr0_bp);
1047 else {
1048 LOG_ERROR(
1049 "BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1050 exit(-1);
1051 }
1052 }
1053
1054 retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1);
1055 if (retval != ERROR_OK)
1056 return retval;
1057
1058 return ERROR_OK;
1059 }
1060
1061 static int xscale_disable_single_step(struct target *target)
1062 {
1063 struct xscale_common *xscale = target_to_xscale(target);
1064 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1065 int retval;
1066
1067 retval = xscale_set_reg_u32(ibcr0, 0x0);
1068 if (retval != ERROR_OK)
1069 return retval;
1070
1071 return ERROR_OK;
1072 }
1073
1074 static void xscale_enable_watchpoints(struct target *target)
1075 {
1076 struct watchpoint *watchpoint = target->watchpoints;
1077
1078 while (watchpoint) {
1079 if (!watchpoint->is_set)
1080 xscale_set_watchpoint(target, watchpoint);
1081 watchpoint = watchpoint->next;
1082 }
1083 }
1084
1085 static void xscale_enable_breakpoints(struct target *target)
1086 {
1087 struct breakpoint *breakpoint = target->breakpoints;
1088
1089 /* set any pending breakpoints */
1090 while (breakpoint) {
1091 if (!breakpoint->is_set)
1092 xscale_set_breakpoint(target, breakpoint);
1093 breakpoint = breakpoint->next;
1094 }
1095 }
1096
1097 static void xscale_free_trace_data(struct xscale_common *xscale)
1098 {
1099 struct xscale_trace_data *td = xscale->trace.data;
1100 while (td) {
1101 struct xscale_trace_data *next_td = td->next;
1102 free(td->entries);
1103 free(td);
1104 td = next_td;
1105 }
1106 xscale->trace.data = NULL;
1107 }
1108
1109 static int xscale_resume(struct target *target, int current,
1110 target_addr_t address, int handle_breakpoints, int debug_execution)
1111 {
1112 struct xscale_common *xscale = target_to_xscale(target);
1113 struct arm *arm = &xscale->arm;
1114 uint32_t current_pc;
1115 int retval;
1116 int i;
1117
1118 LOG_DEBUG("-");
1119
1120 if (target->state != TARGET_HALTED) {
1121 LOG_TARGET_ERROR(target, "not halted");
1122 return ERROR_TARGET_NOT_HALTED;
1123 }
1124
1125 if (!debug_execution)
1126 target_free_all_working_areas(target);
1127
1128 /* update vector tables */
1129 retval = xscale_update_vectors(target);
1130 if (retval != ERROR_OK)
1131 return retval;
1132
1133 /* current = 1: continue on current pc, otherwise continue at <address> */
1134 if (!current)
1135 buf_set_u32(arm->pc->value, 0, 32, address);
1136
1137 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1138
1139 /* if we're at the reset vector, we have to simulate the branch */
1140 if (current_pc == 0x0) {
1141 arm_simulate_step(target, NULL);
1142 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1143 }
1144
1145 /* the front-end may request us not to handle breakpoints */
1146 if (handle_breakpoints) {
1147 struct breakpoint *breakpoint;
1148 breakpoint = breakpoint_find(target,
1149 buf_get_u32(arm->pc->value, 0, 32));
1150 if (breakpoint) {
1151 uint32_t next_pc;
1152 enum trace_mode saved_trace_mode;
1153
1154 /* there's a breakpoint at the current PC, we have to step over it */
1155 LOG_DEBUG("unset breakpoint at " TARGET_ADDR_FMT "",
1156 breakpoint->address);
1157 xscale_unset_breakpoint(target, breakpoint);
1158
1159 /* calculate PC of next instruction */
1160 retval = arm_simulate_step(target, &next_pc);
1161 if (retval != ERROR_OK) {
1162 uint32_t current_opcode;
1163 target_read_u32(target, current_pc, &current_opcode);
1164 LOG_ERROR(
1165 "BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "",
1166 current_opcode);
1167 }
1168
1169 LOG_DEBUG("enable single-step");
1170 xscale_enable_single_step(target, next_pc);
1171
1172 /* restore banked registers */
1173 retval = xscale_restore_banked(target);
1174 if (retval != ERROR_OK)
1175 return retval;
1176
1177 /* send resume request */
1178 xscale_send_u32(target, 0x30);
1179
1180 /* send CPSR */
1181 xscale_send_u32(target,
1182 buf_get_u32(arm->cpsr->value, 0, 32));
1183 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1184 buf_get_u32(arm->cpsr->value, 0, 32));
1185
1186 for (i = 7; i >= 0; i--) {
1187 /* send register */
1188 xscale_send_u32(target,
1189 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1190 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "",
1191 i, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1192 }
1193
1194 /* send PC */
1195 xscale_send_u32(target,
1196 buf_get_u32(arm->pc->value, 0, 32));
1197 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
1198 buf_get_u32(arm->pc->value, 0, 32));
1199
1200 /* disable trace data collection in xscale_debug_entry() */
1201 saved_trace_mode = xscale->trace.mode;
1202 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1203
1204 /* wait for and process debug entry */
1205 xscale_debug_entry(target);
1206
1207 /* re-enable trace buffer, if enabled previously */
1208 xscale->trace.mode = saved_trace_mode;
1209
1210 LOG_DEBUG("disable single-step");
1211 xscale_disable_single_step(target);
1212
1213 LOG_DEBUG("set breakpoint at " TARGET_ADDR_FMT "",
1214 breakpoint->address);
1215 xscale_set_breakpoint(target, breakpoint);
1216 }
1217 }
1218
1219 /* enable any pending breakpoints and watchpoints */
1220 xscale_enable_breakpoints(target);
1221 xscale_enable_watchpoints(target);
1222
1223 /* restore banked registers */
1224 retval = xscale_restore_banked(target);
1225 if (retval != ERROR_OK)
1226 return retval;
1227
1228 /* send resume request (command 0x30 or 0x31)
1229 * clean the trace buffer if it is to be enabled (0x62) */
1230 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1231 if (xscale->trace.mode == XSCALE_TRACE_FILL) {
1232 /* If trace enabled in fill mode and starting collection of new set
1233 * of buffers, initialize buffer counter and free previous buffers */
1234 if (xscale->trace.fill_counter == 0) {
1235 xscale->trace.fill_counter = xscale->trace.buffer_fill;
1236 xscale_free_trace_data(xscale);
1237 }
1238 } else /* wrap mode; free previous buffer */
1239 xscale_free_trace_data(xscale);
1240
1241 xscale_send_u32(target, 0x62);
1242 xscale_send_u32(target, 0x31);
1243 } else
1244 xscale_send_u32(target, 0x30);
1245
1246 /* send CPSR */
1247 xscale_send_u32(target, buf_get_u32(arm->cpsr->value, 0, 32));
1248 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1249 buf_get_u32(arm->cpsr->value, 0, 32));
1250
1251 for (i = 7; i >= 0; i--) {
1252 /* send register */
1253 xscale_send_u32(target, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1254 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "",
1255 i, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1256 }
1257
1258 /* send PC */
1259 xscale_send_u32(target, buf_get_u32(arm->pc->value, 0, 32));
1260 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1261 buf_get_u32(arm->pc->value, 0, 32));
1262
1263 target->debug_reason = DBG_REASON_NOTHALTED;
1264
1265 if (!debug_execution) {
1266 /* registers are now invalid */
1267 register_cache_invalidate(arm->core_cache);
1268 target->state = TARGET_RUNNING;
1269 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1270 } else {
1271 target->state = TARGET_DEBUG_RUNNING;
1272 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1273 }
1274
1275 LOG_DEBUG("target resumed");
1276
1277 return ERROR_OK;
1278 }
1279
1280 static int xscale_step_inner(struct target *target, int current,
1281 uint32_t address, int handle_breakpoints)
1282 {
1283 struct xscale_common *xscale = target_to_xscale(target);
1284 struct arm *arm = &xscale->arm;
1285 uint32_t next_pc;
1286 int retval;
1287 int i;
1288
1289 target->debug_reason = DBG_REASON_SINGLESTEP;
1290
1291 /* calculate PC of next instruction */
1292 retval = arm_simulate_step(target, &next_pc);
1293 if (retval != ERROR_OK) {
1294 uint32_t current_opcode, current_pc;
1295 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1296
1297 target_read_u32(target, current_pc, &current_opcode);
1298 LOG_ERROR(
1299 "BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "",
1300 current_opcode);
1301 return retval;
1302 }
1303
1304 LOG_DEBUG("enable single-step");
1305 retval = xscale_enable_single_step(target, next_pc);
1306 if (retval != ERROR_OK)
1307 return retval;
1308
1309 /* restore banked registers */
1310 retval = xscale_restore_banked(target);
1311 if (retval != ERROR_OK)
1312 return retval;
1313
1314 /* send resume request (command 0x30 or 0x31)
1315 * clean the trace buffer if it is to be enabled (0x62) */
1316 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1317 retval = xscale_send_u32(target, 0x62);
1318 if (retval != ERROR_OK)
1319 return retval;
1320 retval = xscale_send_u32(target, 0x31);
1321 if (retval != ERROR_OK)
1322 return retval;
1323 } else {
1324 retval = xscale_send_u32(target, 0x30);
1325 if (retval != ERROR_OK)
1326 return retval;
1327 }
1328
1329 /* send CPSR */
1330 retval = xscale_send_u32(target,
1331 buf_get_u32(arm->cpsr->value, 0, 32));
1332 if (retval != ERROR_OK)
1333 return retval;
1334 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1335 buf_get_u32(arm->cpsr->value, 0, 32));
1336
1337 for (i = 7; i >= 0; i--) {
1338 /* send register */
1339 retval = xscale_send_u32(target,
1340 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1341 if (retval != ERROR_OK)
1342 return retval;
1343 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i,
1344 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1345 }
1346
1347 /* send PC */
1348 retval = xscale_send_u32(target,
1349 buf_get_u32(arm->pc->value, 0, 32));
1350 if (retval != ERROR_OK)
1351 return retval;
1352 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1353 buf_get_u32(arm->pc->value, 0, 32));
1354
1355 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1356
1357 /* registers are now invalid */
1358 register_cache_invalidate(arm->core_cache);
1359
1360 /* wait for and process debug entry */
1361 retval = xscale_debug_entry(target);
1362 if (retval != ERROR_OK)
1363 return retval;
1364
1365 LOG_DEBUG("disable single-step");
1366 retval = xscale_disable_single_step(target);
1367 if (retval != ERROR_OK)
1368 return retval;
1369
1370 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1371
1372 return ERROR_OK;
1373 }
1374
1375 static int xscale_step(struct target *target, int current,
1376 target_addr_t address, int handle_breakpoints)
1377 {
1378 struct arm *arm = target_to_arm(target);
1379 struct breakpoint *breakpoint = NULL;
1380
1381 uint32_t current_pc;
1382 int retval;
1383
1384 if (target->state != TARGET_HALTED) {
1385 LOG_TARGET_ERROR(target, "not halted");
1386 return ERROR_TARGET_NOT_HALTED;
1387 }
1388
1389 /* current = 1: continue on current pc, otherwise continue at <address> */
1390 if (!current)
1391 buf_set_u32(arm->pc->value, 0, 32, address);
1392
1393 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1394
1395 /* if we're at the reset vector, we have to simulate the step */
1396 if (current_pc == 0x0) {
1397 retval = arm_simulate_step(target, NULL);
1398 if (retval != ERROR_OK)
1399 return retval;
1400 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1401 LOG_DEBUG("current pc %" PRIx32, current_pc);
1402
1403 target->debug_reason = DBG_REASON_SINGLESTEP;
1404 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1405
1406 return ERROR_OK;
1407 }
1408
1409 /* the front-end may request us not to handle breakpoints */
1410 if (handle_breakpoints)
1411 breakpoint = breakpoint_find(target,
1412 buf_get_u32(arm->pc->value, 0, 32));
1413 if (breakpoint) {
1414 retval = xscale_unset_breakpoint(target, breakpoint);
1415 if (retval != ERROR_OK)
1416 return retval;
1417 }
1418
1419 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1420 if (retval != ERROR_OK)
1421 return retval;
1422
1423 if (breakpoint)
1424 xscale_set_breakpoint(target, breakpoint);
1425
1426 LOG_DEBUG("target stepped");
1427
1428 return ERROR_OK;
1429
1430 }
1431
1432 static int xscale_assert_reset(struct target *target)
1433 {
1434 struct xscale_common *xscale = target_to_xscale(target);
1435
1436 /* TODO: apply hw reset signal in not examined state */
1437 if (!(target_was_examined(target))) {
1438 LOG_WARNING("Reset is not asserted because the target is not examined.");
1439 LOG_WARNING("Use a reset button or power cycle the target.");
1440 return ERROR_TARGET_NOT_EXAMINED;
1441 }
1442
1443 LOG_DEBUG("target->state: %s",
1444 target_state_name(target));
1445
1446 /* assert reset */
1447 jtag_add_reset(0, 1);
1448
1449 /* sleep 1ms, to be sure we fulfill any requirements */
1450 jtag_add_sleep(1000);
1451 jtag_execute_queue();
1452
1453 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1454 * end up in T-L-R, which would reset JTAG
1455 */
1456 xscale_jtag_set_instr(target->tap,
1457 XSCALE_SELDCSR << xscale->xscale_variant,
1458 TAP_IDLE);
1459
1460 /* set Hold reset, Halt mode and Trap Reset */
1461 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1462 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1463 xscale_write_dcsr(target, 1, 0);
1464
1465 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1466 xscale_jtag_set_instr(target->tap, ~0, TAP_IDLE);
1467 jtag_execute_queue();
1468
1469 target->state = TARGET_RESET;
1470
1471 if (target->reset_halt) {
1472 int retval = target_halt(target);
1473 if (retval != ERROR_OK)
1474 return retval;
1475 }
1476
1477 return ERROR_OK;
1478 }
1479
1480 static int xscale_deassert_reset(struct target *target)
1481 {
1482 struct xscale_common *xscale = target_to_xscale(target);
1483 struct breakpoint *breakpoint = target->breakpoints;
1484
1485 LOG_DEBUG("-");
1486
1487 xscale->ibcr_available = 2;
1488 xscale->ibcr0_used = 0;
1489 xscale->ibcr1_used = 0;
1490
1491 xscale->dbr_available = 2;
1492 xscale->dbr0_used = 0;
1493 xscale->dbr1_used = 0;
1494
1495 /* mark all hardware breakpoints as unset */
1496 while (breakpoint) {
1497 if (breakpoint->type == BKPT_HARD)
1498 breakpoint->is_set = false;
1499 breakpoint = breakpoint->next;
1500 }
1501
1502 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1503 xscale_free_trace_data(xscale);
1504
1505 register_cache_invalidate(xscale->arm.core_cache);
1506
1507 /* FIXME mark hardware watchpoints got unset too. Also,
1508 * at least some of the XScale registers are invalid...
1509 */
1510
1511 /*
1512 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1513 * contents got invalidated. Safer to force that, so writing new
1514 * contents can't ever fail..
1515 */
1516 {
1517 uint32_t address;
1518 unsigned buf_cnt;
1519 const uint8_t *buffer = xscale_debug_handler;
1520 int retval;
1521
1522 /* release SRST */
1523 jtag_add_reset(0, 0);
1524
1525 /* wait 300ms; 150 and 100ms were not enough */
1526 jtag_add_sleep(300*1000);
1527
1528 jtag_add_runtest(2030, TAP_IDLE);
1529 jtag_execute_queue();
1530
1531 /* set Hold reset, Halt mode and Trap Reset */
1532 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1533 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1534 xscale_write_dcsr(target, 1, 0);
1535
1536 /* Load the debug handler into the mini-icache. Since
1537 * it's using halt mode (not monitor mode), it runs in
1538 * "Special Debug State" for access to registers, memory,
1539 * coprocessors, trace data, etc.
1540 */
1541 address = xscale->handler_address;
1542 for (unsigned binary_size = sizeof(xscale_debug_handler);
1543 binary_size > 0;
1544 binary_size -= buf_cnt, buffer += buf_cnt) {
1545 uint32_t cache_line[8];
1546 unsigned i;
1547
1548 buf_cnt = binary_size;
1549 if (buf_cnt > 32)
1550 buf_cnt = 32;
1551
1552 for (i = 0; i < buf_cnt; i += 4) {
1553 /* convert LE buffer to host-endian uint32_t */
1554 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1555 }
1556
1557 for (; i < 32; i += 4)
1558 cache_line[i / 4] = 0xe1a08008;
1559
1560 /* only load addresses other than the reset vectors */
1561 if ((address % 0x400) != 0x0) {
1562 retval = xscale_load_ic(target, address,
1563 cache_line);
1564 if (retval != ERROR_OK)
1565 return retval;
1566 }
1567
1568 address += buf_cnt;
1569 }
1570
1571 retval = xscale_load_ic(target, 0x0,
1572 xscale->low_vectors);
1573 if (retval != ERROR_OK)
1574 return retval;
1575 retval = xscale_load_ic(target, 0xffff0000,
1576 xscale->high_vectors);
1577 if (retval != ERROR_OK)
1578 return retval;
1579
1580 jtag_add_runtest(30, TAP_IDLE);
1581
1582 jtag_add_sleep(100000);
1583
1584 /* set Hold reset, Halt mode and Trap Reset */
1585 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1586 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1587 xscale_write_dcsr(target, 1, 0);
1588
1589 /* clear Hold reset to let the target run (should enter debug handler) */
1590 xscale_write_dcsr(target, 0, 1);
1591 target->state = TARGET_RUNNING;
1592
1593 if (!target->reset_halt) {
1594 jtag_add_sleep(10000);
1595
1596 /* we should have entered debug now */
1597 xscale_debug_entry(target);
1598 target->state = TARGET_HALTED;
1599
1600 /* resume the target */
1601 xscale_resume(target, 1, 0x0, 1, 0);
1602 }
1603 }
1604
1605 return ERROR_OK;
1606 }
1607
1608 static int xscale_read_core_reg(struct target *target, struct reg *r,
1609 int num, enum arm_mode mode)
1610 {
1611 /** \todo add debug handler support for core register reads */
1612 LOG_ERROR("not implemented");
1613 return ERROR_OK;
1614 }
1615
1616 static int xscale_write_core_reg(struct target *target, struct reg *r,
1617 int num, enum arm_mode mode, uint8_t *value)
1618 {
1619 /** \todo add debug handler support for core register writes */
1620 LOG_ERROR("not implemented");
1621 return ERROR_OK;
1622 }
1623
1624 static int xscale_full_context(struct target *target)
1625 {
1626 struct arm *arm = target_to_arm(target);
1627
1628 uint32_t *buffer;
1629
1630 int i, j;
1631
1632 LOG_DEBUG("-");
1633
1634 if (target->state != TARGET_HALTED) {
1635 LOG_TARGET_ERROR(target, "not halted");
1636 return ERROR_TARGET_NOT_HALTED;
1637 }
1638
1639 buffer = malloc(4 * 8);
1640
1641 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1642 * we can't enter User mode on an XScale (unpredictable),
1643 * but User shares registers with SYS
1644 */
1645 for (i = 1; i < 7; i++) {
1646 enum arm_mode mode = armv4_5_number_to_mode(i);
1647 bool valid = true;
1648 struct reg *r;
1649
1650 if (mode == ARM_MODE_USR)
1651 continue;
1652
1653 /* check if there are invalid registers in the current mode
1654 */
1655 for (j = 0; valid && j <= 16; j++) {
1656 if (!ARMV4_5_CORE_REG_MODE(arm->core_cache,
1657 mode, j).valid)
1658 valid = false;
1659 }
1660 if (valid)
1661 continue;
1662
1663 /* request banked registers */
1664 xscale_send_u32(target, 0x0);
1665
1666 /* send CPSR for desired bank mode */
1667 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1668
1669 /* get banked registers: r8 to r14; and SPSR
1670 * except in USR/SYS mode
1671 */
1672 if (mode != ARM_MODE_SYS) {
1673 /* SPSR */
1674 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1675 mode, 16);
1676
1677 xscale_receive(target, buffer, 8);
1678
1679 buf_set_u32(r->value, 0, 32, buffer[7]);
1680 r->dirty = false;
1681 r->valid = true;
1682 } else
1683 xscale_receive(target, buffer, 7);
1684
1685 /* move data from buffer to register cache */
1686 for (j = 8; j <= 14; j++) {
1687 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1688 mode, j);
1689
1690 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1691 r->dirty = false;
1692 r->valid = true;
1693 }
1694 }
1695
1696 free(buffer);
1697
1698 return ERROR_OK;
1699 }
1700
1701 static int xscale_restore_banked(struct target *target)
1702 {
1703 struct arm *arm = target_to_arm(target);
1704
1705 int i, j;
1706
1707 if (target->state != TARGET_HALTED) {
1708 LOG_TARGET_ERROR(target, "not halted");
1709 return ERROR_TARGET_NOT_HALTED;
1710 }
1711
1712 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1713 * and check if any banked registers need to be written. Ignore
1714 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1715 * an XScale (unpredictable), but they share all registers.
1716 */
1717 for (i = 1; i < 7; i++) {
1718 enum arm_mode mode = armv4_5_number_to_mode(i);
1719 struct reg *r;
1720
1721 if (mode == ARM_MODE_USR)
1722 continue;
1723
1724 /* check if there are dirty registers in this mode */
1725 for (j = 8; j <= 14; j++) {
1726 if (ARMV4_5_CORE_REG_MODE(arm->core_cache,
1727 mode, j).dirty)
1728 goto dirty;
1729 }
1730
1731 /* if not USR/SYS, check if the SPSR needs to be written */
1732 if (mode != ARM_MODE_SYS) {
1733 if (ARMV4_5_CORE_REG_MODE(arm->core_cache,
1734 mode, 16).dirty)
1735 goto dirty;
1736 }
1737
1738 /* there's nothing to flush for this mode */
1739 continue;
1740
1741 dirty:
1742 /* command 0x1: "send banked registers" */
1743 xscale_send_u32(target, 0x1);
1744
1745 /* send CPSR for desired mode */
1746 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1747
1748 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1749 * but this protocol doesn't understand that nuance.
1750 */
1751 for (j = 8; j <= 14; j++) {
1752 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1753 mode, j);
1754 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1755 r->dirty = false;
1756 }
1757
1758 /* send spsr if not in USR/SYS mode */
1759 if (mode != ARM_MODE_SYS) {
1760 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1761 mode, 16);
1762 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1763 r->dirty = false;
1764 }
1765 }
1766
1767 return ERROR_OK;
1768 }
1769
1770 static int xscale_read_memory(struct target *target, target_addr_t address,
1771 uint32_t size, uint32_t count, uint8_t *buffer)
1772 {
1773 struct xscale_common *xscale = target_to_xscale(target);
1774 uint32_t *buf32;
1775 uint32_t i;
1776 int retval;
1777
1778 LOG_DEBUG("address: " TARGET_ADDR_FMT ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32,
1779 address,
1780 size,
1781 count);
1782
1783 if (target->state != TARGET_HALTED) {
1784 LOG_TARGET_ERROR(target, "not halted");
1785 return ERROR_TARGET_NOT_HALTED;
1786 }
1787
1788 /* sanitize arguments */
1789 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1790 return ERROR_COMMAND_SYNTAX_ERROR;
1791
1792 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1793 return ERROR_TARGET_UNALIGNED_ACCESS;
1794
1795 /* send memory read request (command 0x1n, n: access size) */
1796 retval = xscale_send_u32(target, 0x10 | size);
1797 if (retval != ERROR_OK)
1798 return retval;
1799
1800 /* send base address for read request */
1801 retval = xscale_send_u32(target, address);
1802 if (retval != ERROR_OK)
1803 return retval;
1804
1805 /* send number of requested data words */
1806 retval = xscale_send_u32(target, count);
1807 if (retval != ERROR_OK)
1808 return retval;
1809
1810 /* receive data from target (count times 32-bit words in host endianness) */
1811 buf32 = malloc(4 * count);
1812 retval = xscale_receive(target, buf32, count);
1813 if (retval != ERROR_OK) {
1814 free(buf32);
1815 return retval;
1816 }
1817
1818 /* extract data from host-endian buffer into byte stream */
1819 for (i = 0; i < count; i++) {
1820 switch (size) {
1821 case 4:
1822 target_buffer_set_u32(target, buffer, buf32[i]);
1823 buffer += 4;
1824 break;
1825 case 2:
1826 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1827 buffer += 2;
1828 break;
1829 case 1:
1830 *buffer++ = buf32[i] & 0xff;
1831 break;
1832 default:
1833 LOG_ERROR("invalid read size");
1834 return ERROR_COMMAND_SYNTAX_ERROR;
1835 }
1836 }
1837
1838 free(buf32);
1839
1840 /* examine DCSR, to see if Sticky Abort (SA) got set */
1841 retval = xscale_read_dcsr(target);
1842 if (retval != ERROR_OK)
1843 return retval;
1844 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1) {
1845 /* clear SA bit */
1846 retval = xscale_send_u32(target, 0x60);
1847 if (retval != ERROR_OK)
1848 return retval;
1849
1850 return ERROR_TARGET_DATA_ABORT;
1851 }
1852
1853 return ERROR_OK;
1854 }
1855
1856 static int xscale_read_phys_memory(struct target *target, target_addr_t address,
1857 uint32_t size, uint32_t count, uint8_t *buffer)
1858 {
1859 struct xscale_common *xscale = target_to_xscale(target);
1860
1861 /* with MMU inactive, there are only physical addresses */
1862 if (!xscale->armv4_5_mmu.mmu_enabled)
1863 return xscale_read_memory(target, address, size, count, buffer);
1864
1865 /** \todo: provide a non-stub implementation of this routine. */
1866 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1867 target_name(target), __func__);
1868 return ERROR_FAIL;
1869 }
1870
1871 static int xscale_write_memory(struct target *target, target_addr_t address,
1872 uint32_t size, uint32_t count, const uint8_t *buffer)
1873 {
1874 struct xscale_common *xscale = target_to_xscale(target);
1875 int retval;
1876
1877 LOG_DEBUG("address: " TARGET_ADDR_FMT ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32,
1878 address,
1879 size,
1880 count);
1881
1882 if (target->state != TARGET_HALTED) {
1883 LOG_TARGET_ERROR(target, "not halted");
1884 return ERROR_TARGET_NOT_HALTED;
1885 }
1886
1887 /* sanitize arguments */
1888 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1889 return ERROR_COMMAND_SYNTAX_ERROR;
1890
1891 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1892 return ERROR_TARGET_UNALIGNED_ACCESS;
1893
1894 /* send memory write request (command 0x2n, n: access size) */
1895 retval = xscale_send_u32(target, 0x20 | size);
1896 if (retval != ERROR_OK)
1897 return retval;
1898
1899 /* send base address for read request */
1900 retval = xscale_send_u32(target, address);
1901 if (retval != ERROR_OK)
1902 return retval;
1903
1904 /* send number of requested data words to be written*/
1905 retval = xscale_send_u32(target, count);
1906 if (retval != ERROR_OK)
1907 return retval;
1908
1909 /* extract data from host-endian buffer into byte stream */
1910 #if 0
1911 for (i = 0; i < count; i++) {
1912 switch (size) {
1913 case 4:
1914 value = target_buffer_get_u32(target, buffer);
1915 xscale_send_u32(target, value);
1916 buffer += 4;
1917 break;
1918 case 2:
1919 value = target_buffer_get_u16(target, buffer);
1920 xscale_send_u32(target, value);
1921 buffer += 2;
1922 break;
1923 case 1:
1924 value = *buffer;
1925 xscale_send_u32(target, value);
1926 buffer += 1;
1927 break;
1928 default:
1929 LOG_ERROR("should never get here");
1930 exit(-1);
1931 }
1932 }
1933 #endif
1934 retval = xscale_send(target, buffer, count, size);
1935 if (retval != ERROR_OK)
1936 return retval;
1937
1938 /* examine DCSR, to see if Sticky Abort (SA) got set */
1939 retval = xscale_read_dcsr(target);
1940 if (retval != ERROR_OK)
1941 return retval;
1942 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1) {
1943 /* clear SA bit */
1944 retval = xscale_send_u32(target, 0x60);
1945 if (retval != ERROR_OK)
1946 return retval;
1947
1948 LOG_ERROR("data abort writing memory");
1949 return ERROR_TARGET_DATA_ABORT;
1950 }
1951
1952 return ERROR_OK;
1953 }
1954
1955 static int xscale_write_phys_memory(struct target *target, target_addr_t address,
1956 uint32_t size, uint32_t count, const uint8_t *buffer)
1957 {
1958 struct xscale_common *xscale = target_to_xscale(target);
1959
1960 /* with MMU inactive, there are only physical addresses */
1961 if (!xscale->armv4_5_mmu.mmu_enabled)
1962 return xscale_write_memory(target, address, size, count, buffer);
1963
1964 /** \todo: provide a non-stub implementation of this routine. */
1965 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1966 target_name(target), __func__);
1967 return ERROR_FAIL;
1968 }
1969
1970 static int xscale_get_ttb(struct target *target, uint32_t *result)
1971 {
1972 struct xscale_common *xscale = target_to_xscale(target);
1973 uint32_t ttb;
1974 int retval;
1975
1976 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
1977 if (retval != ERROR_OK)
1978 return retval;
1979 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
1980
1981 *result = ttb;
1982
1983 return ERROR_OK;
1984 }
1985
1986 static int xscale_disable_mmu_caches(struct target *target, int mmu,
1987 int d_u_cache, int i_cache)
1988 {
1989 struct xscale_common *xscale = target_to_xscale(target);
1990 uint32_t cp15_control;
1991 int retval;
1992
1993 /* read cp15 control register */
1994 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1995 if (retval != ERROR_OK)
1996 return retval;
1997 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1998
1999 if (mmu)
2000 cp15_control &= ~0x1U;
2001
2002 if (d_u_cache) {
2003 /* clean DCache */
2004 retval = xscale_send_u32(target, 0x50);
2005 if (retval != ERROR_OK)
2006 return retval;
2007 retval = xscale_send_u32(target, xscale->cache_clean_address);
2008 if (retval != ERROR_OK)
2009 return retval;
2010
2011 /* invalidate DCache */
2012 retval = xscale_send_u32(target, 0x51);
2013 if (retval != ERROR_OK)
2014 return retval;
2015
2016 cp15_control &= ~0x4U;
2017 }
2018
2019 if (i_cache) {
2020 /* invalidate ICache */
2021 retval = xscale_send_u32(target, 0x52);
2022 if (retval != ERROR_OK)
2023 return retval;
2024 cp15_control &= ~0x1000U;
2025 }
2026
2027 /* write new cp15 control register */
2028 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2029 if (retval != ERROR_OK)
2030 return retval;
2031
2032 /* execute cpwait to ensure outstanding operations complete */
2033 retval = xscale_send_u32(target, 0x53);
2034 return retval;
2035 }
2036
2037 static int xscale_enable_mmu_caches(struct target *target, int mmu,
2038 int d_u_cache, int i_cache)
2039 {
2040 struct xscale_common *xscale = target_to_xscale(target);
2041 uint32_t cp15_control;
2042 int retval;
2043
2044 /* read cp15 control register */
2045 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2046 if (retval != ERROR_OK)
2047 return retval;
2048 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2049
2050 if (mmu)
2051 cp15_control |= 0x1U;
2052
2053 if (d_u_cache)
2054 cp15_control |= 0x4U;
2055
2056 if (i_cache)
2057 cp15_control |= 0x1000U;
2058
2059 /* write new cp15 control register */
2060 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2061 if (retval != ERROR_OK)
2062 return retval;
2063
2064 /* execute cpwait to ensure outstanding operations complete */
2065 retval = xscale_send_u32(target, 0x53);
2066 return retval;
2067 }
2068
2069 static int xscale_set_breakpoint(struct target *target,
2070 struct breakpoint *breakpoint)
2071 {
2072 int retval;
2073 struct xscale_common *xscale = target_to_xscale(target);
2074
2075 if (target->state != TARGET_HALTED) {
2076 LOG_TARGET_ERROR(target, "not halted");
2077 return ERROR_TARGET_NOT_HALTED;
2078 }
2079
2080 if (breakpoint->is_set) {
2081 LOG_WARNING("breakpoint already set");
2082 return ERROR_OK;
2083 }
2084
2085 if (breakpoint->type == BKPT_HARD) {
2086 uint32_t value = breakpoint->address | 1;
2087 if (!xscale->ibcr0_used) {
2088 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2089 xscale->ibcr0_used = 1;
2090 /* breakpoint set on first breakpoint register */
2091 breakpoint_hw_set(breakpoint, 0);
2092 } else if (!xscale->ibcr1_used) {
2093 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2094 xscale->ibcr1_used = 1;
2095 /* breakpoint set on second breakpoint register */
2096 breakpoint_hw_set(breakpoint, 1);
2097 } else {/* bug: availability previously verified in xscale_add_breakpoint() */
2098 LOG_ERROR("BUG: no hardware comparator available");
2099 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2100 }
2101 } else if (breakpoint->type == BKPT_SOFT) {
2102 if (breakpoint->length == 4) {
2103 /* keep the original instruction in target endianness */
2104 retval = target_read_memory(target, breakpoint->address, 4, 1,
2105 breakpoint->orig_instr);
2106 if (retval != ERROR_OK)
2107 return retval;
2108 /* write the bkpt instruction in target endianness
2109 *(arm7_9->arm_bkpt is host endian) */
2110 retval = target_write_u32(target, breakpoint->address,
2111 xscale->arm_bkpt);
2112 if (retval != ERROR_OK)
2113 return retval;
2114 } else {
2115 /* keep the original instruction in target endianness */
2116 retval = target_read_memory(target, breakpoint->address, 2, 1,
2117 breakpoint->orig_instr);
2118 if (retval != ERROR_OK)
2119 return retval;
2120 /* write the bkpt instruction in target endianness
2121 *(arm7_9->arm_bkpt is host endian) */
2122 retval = target_write_u16(target, breakpoint->address,
2123 xscale->thumb_bkpt);
2124 if (retval != ERROR_OK)
2125 return retval;
2126 }
2127 breakpoint->is_set = true;
2128
2129 xscale_send_u32(target, 0x50); /* clean dcache */
2130 xscale_send_u32(target, xscale->cache_clean_address);
2131 xscale_send_u32(target, 0x51); /* invalidate dcache */
2132 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2133 }
2134
2135 return ERROR_OK;
2136 }
2137
2138 static int xscale_add_breakpoint(struct target *target,
2139 struct breakpoint *breakpoint)
2140 {
2141 struct xscale_common *xscale = target_to_xscale(target);
2142
2143 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1)) {
2144 LOG_ERROR("no breakpoint unit available for hardware breakpoint");
2145 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2146 }
2147
2148 if ((breakpoint->length != 2) && (breakpoint->length != 4)) {
2149 LOG_ERROR("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2150 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2151 }
2152
2153 if (breakpoint->type == BKPT_HARD)
2154 xscale->ibcr_available--;
2155
2156 return xscale_set_breakpoint(target, breakpoint);
2157 }
2158
2159 static int xscale_unset_breakpoint(struct target *target,
2160 struct breakpoint *breakpoint)
2161 {
2162 int retval;
2163 struct xscale_common *xscale = target_to_xscale(target);
2164
2165 if (target->state != TARGET_HALTED) {
2166 LOG_TARGET_ERROR(target, "not halted");
2167 return ERROR_TARGET_NOT_HALTED;
2168 }
2169
2170 if (!breakpoint->is_set) {
2171 LOG_WARNING("breakpoint not set");
2172 return ERROR_OK;
2173 }
2174
2175 if (breakpoint->type == BKPT_HARD) {
2176 if (breakpoint->number == 0) {
2177 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2178 xscale->ibcr0_used = 0;
2179 } else if (breakpoint->number == 1) {
2180 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2181 xscale->ibcr1_used = 0;
2182 }
2183 breakpoint->is_set = false;
2184 } else {
2185 /* restore original instruction (kept in target endianness) */
2186 if (breakpoint->length == 4) {
2187 retval = target_write_memory(target, breakpoint->address, 4, 1,
2188 breakpoint->orig_instr);
2189 if (retval != ERROR_OK)
2190 return retval;
2191 } else {
2192 retval = target_write_memory(target, breakpoint->address, 2, 1,
2193 breakpoint->orig_instr);
2194 if (retval != ERROR_OK)
2195 return retval;
2196 }
2197 breakpoint->is_set = false;
2198
2199 xscale_send_u32(target, 0x50); /* clean dcache */
2200 xscale_send_u32(target, xscale->cache_clean_address);
2201 xscale_send_u32(target, 0x51); /* invalidate dcache */
2202 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2203 }
2204
2205 return ERROR_OK;
2206 }
2207
2208 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2209 {
2210 struct xscale_common *xscale = target_to_xscale(target);
2211
2212 if (target->state != TARGET_HALTED) {
2213 LOG_TARGET_ERROR(target, "not halted");
2214 return ERROR_TARGET_NOT_HALTED;
2215 }
2216
2217 if (breakpoint->is_set)
2218 xscale_unset_breakpoint(target, breakpoint);
2219
2220 if (breakpoint->type == BKPT_HARD)
2221 xscale->ibcr_available++;
2222
2223 return ERROR_OK;
2224 }
2225
2226 static int xscale_set_watchpoint(struct target *target,
2227 struct watchpoint *watchpoint)
2228 {
2229 struct xscale_common *xscale = target_to_xscale(target);
2230 uint32_t enable = 0;
2231 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2232 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2233
2234 if (target->state != TARGET_HALTED) {
2235 LOG_TARGET_ERROR(target, "not halted");
2236 return ERROR_TARGET_NOT_HALTED;
2237 }
2238
2239 switch (watchpoint->rw) {
2240 case WPT_READ:
2241 enable = 0x3;
2242 break;
2243 case WPT_ACCESS:
2244 enable = 0x2;
2245 break;
2246 case WPT_WRITE:
2247 enable = 0x1;
2248 break;
2249 default:
2250 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2251 }
2252
2253 /* For watchpoint across more than one word, both DBR registers must
2254 be enlisted, with the second used as a mask. */
2255 if (watchpoint->length > 4) {
2256 if (xscale->dbr0_used || xscale->dbr1_used) {
2257 LOG_ERROR("BUG: sufficient hardware comparators unavailable");
2258 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2259 }
2260
2261 /* Write mask value to DBR1, based on the length argument.
2262 * Address bits ignored by the comparator are those set in mask. */
2263 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1],
2264 watchpoint->length - 1);
2265 xscale->dbr1_used = 1;
2266 enable |= 0x100; /* DBCON[M] */
2267 }
2268
2269 if (!xscale->dbr0_used) {
2270 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2271 dbcon_value |= enable;
2272 xscale_set_reg_u32(dbcon, dbcon_value);
2273 watchpoint_set(watchpoint, 0);
2274 xscale->dbr0_used = 1;
2275 } else if (!xscale->dbr1_used) {
2276 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2277 dbcon_value |= enable << 2;
2278 xscale_set_reg_u32(dbcon, dbcon_value);
2279 watchpoint_set(watchpoint, 1);
2280 xscale->dbr1_used = 1;
2281 } else {
2282 LOG_ERROR("BUG: no hardware comparator available");
2283 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2284 }
2285
2286 return ERROR_OK;
2287 }
2288
2289 static int xscale_add_watchpoint(struct target *target,
2290 struct watchpoint *watchpoint)
2291 {
2292 struct xscale_common *xscale = target_to_xscale(target);
2293
2294 if (xscale->dbr_available < 1) {
2295 LOG_ERROR("no more watchpoint registers available");
2296 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2297 }
2298
2299 if (watchpoint->mask != WATCHPOINT_IGNORE_DATA_VALUE_MASK)
2300 LOG_WARNING("xscale does not support value, mask arguments; ignoring");
2301
2302 /* check that length is a power of two */
2303 for (uint32_t len = watchpoint->length; len != 1; len /= 2) {
2304 if (len % 2) {
2305 LOG_ERROR("xscale requires that watchpoint length is a power of two");
2306 return ERROR_COMMAND_ARGUMENT_INVALID;
2307 }
2308 }
2309
2310 if (watchpoint->length == 4) { /* single word watchpoint */
2311 xscale->dbr_available--;/* one DBR reg used */
2312 return ERROR_OK;
2313 }
2314
2315 /* watchpoints across multiple words require both DBR registers */
2316 if (xscale->dbr_available < 2) {
2317 LOG_ERROR("insufficient watchpoint registers available");
2318 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2319 }
2320
2321 if (watchpoint->length > watchpoint->address) {
2322 LOG_ERROR("xscale does not support watchpoints with length "
2323 "greater than address");
2324 return ERROR_COMMAND_ARGUMENT_INVALID;
2325 }
2326
2327 xscale->dbr_available = 0;
2328 return ERROR_OK;
2329 }
2330
2331 static int xscale_unset_watchpoint(struct target *target,
2332 struct watchpoint *watchpoint)
2333 {
2334 struct xscale_common *xscale = target_to_xscale(target);
2335 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2336 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2337
2338 if (target->state != TARGET_HALTED) {
2339 LOG_TARGET_ERROR(target, "not halted");
2340 return ERROR_TARGET_NOT_HALTED;
2341 }
2342
2343 if (!watchpoint->is_set) {
2344 LOG_WARNING("breakpoint not set");
2345 return ERROR_OK;
2346 }
2347
2348 if (watchpoint->number == 0) {
2349 if (watchpoint->length > 4) {
2350 dbcon_value &= ~0x103; /* clear DBCON[M] as well */
2351 xscale->dbr1_used = 0; /* DBR1 was used for mask */
2352 } else
2353 dbcon_value &= ~0x3;
2354
2355 xscale_set_reg_u32(dbcon, dbcon_value);
2356 xscale->dbr0_used = 0;
2357 } else if (watchpoint->number == 1) {
2358 dbcon_value &= ~0xc;
2359 xscale_set_reg_u32(dbcon, dbcon_value);
2360 xscale->dbr1_used = 0;
2361 }
2362 watchpoint->is_set = false;
2363
2364 return ERROR_OK;
2365 }
2366
2367 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2368 {
2369 struct xscale_common *xscale = target_to_xscale(target);
2370
2371 if (target->state != TARGET_HALTED) {
2372 LOG_TARGET_ERROR(target, "not halted");
2373 return ERROR_TARGET_NOT_HALTED;
2374 }
2375
2376 if (watchpoint->is_set)
2377 xscale_unset_watchpoint(target, watchpoint);
2378
2379 if (watchpoint->length > 4)
2380 xscale->dbr_available++;/* both DBR regs now available */
2381
2382 xscale->dbr_available++;
2383
2384 return ERROR_OK;
2385 }
2386
2387 static int xscale_get_reg(struct reg *reg)
2388 {
2389 struct xscale_reg *arch_info = reg->arch_info;
2390 struct target *target = arch_info->target;
2391 struct xscale_common *xscale = target_to_xscale(target);
2392
2393 /* DCSR, TX and RX are accessible via JTAG */
2394 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2395 return xscale_read_dcsr(arch_info->target);
2396 else if (strcmp(reg->name, "XSCALE_TX") == 0) {
2397 /* 1 = consume register content */
2398 return xscale_read_tx(arch_info->target, 1);
2399 } else if (strcmp(reg->name, "XSCALE_RX") == 0) {
2400 /* can't read from RX register (host -> debug handler) */
2401 return ERROR_OK;
2402 } else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0) {
2403 /* can't (explicitly) read from TXRXCTRL register */
2404 return ERROR_OK;
2405 } else {/* Other DBG registers have to be transferred by the debug handler
2406 * send CP read request (command 0x40) */
2407 xscale_send_u32(target, 0x40);
2408
2409 /* send CP register number */
2410 xscale_send_u32(target, arch_info->dbg_handler_number);
2411
2412 /* read register value */
2413 xscale_read_tx(target, 1);
2414 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2415
2416 reg->dirty = false;
2417 reg->valid = true;
2418 }
2419
2420 return ERROR_OK;
2421 }
2422
2423 static int xscale_set_reg(struct reg *reg, uint8_t *buf)
2424 {
2425 struct xscale_reg *arch_info = reg->arch_info;
2426 struct target *target = arch_info->target;
2427 struct xscale_common *xscale = target_to_xscale(target);
2428 uint32_t value = buf_get_u32(buf, 0, 32);
2429
2430 /* DCSR, TX and RX are accessible via JTAG */
2431 if (strcmp(reg->name, "XSCALE_DCSR") == 0) {
2432 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2433 return xscale_write_dcsr(arch_info->target, -1, -1);
2434 } else if (strcmp(reg->name, "XSCALE_RX") == 0) {
2435 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2436 return xscale_write_rx(arch_info->target);
2437 } else if (strcmp(reg->name, "XSCALE_TX") == 0) {
2438 /* can't write to TX register (debug-handler -> host) */
2439 return ERROR_OK;
2440 } else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0) {
2441 /* can't (explicitly) write to TXRXCTRL register */
2442 return ERROR_OK;
2443 } else {/* Other DBG registers have to be transferred by the debug handler
2444 * send CP write request (command 0x41) */
2445 xscale_send_u32(target, 0x41);
2446
2447 /* send CP register number */
2448 xscale_send_u32(target, arch_info->dbg_handler_number);
2449
2450 /* send CP register value */
2451 xscale_send_u32(target, value);
2452 buf_set_u32(reg->value, 0, 32, value);
2453 }
2454
2455 return ERROR_OK;
2456 }
2457
2458 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2459 {
2460 struct xscale_common *xscale = target_to_xscale(target);
2461 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2462 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2463
2464 /* send CP write request (command 0x41) */
2465 xscale_send_u32(target, 0x41);
2466
2467 /* send CP register number */
2468 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2469
2470 /* send CP register value */
2471 xscale_send_u32(target, value);
2472 buf_set_u32(dcsr->value, 0, 32, value);
2473
2474 return ERROR_OK;
2475 }
2476
2477 static int xscale_read_trace(struct target *target)
2478 {
2479 struct xscale_common *xscale = target_to_xscale(target);
2480 struct arm *arm = &xscale->arm;
2481 struct xscale_trace_data **trace_data_p;
2482
2483 /* 258 words from debug handler
2484 * 256 trace buffer entries
2485 * 2 checkpoint addresses
2486 */
2487 uint32_t trace_buffer[258];
2488 int is_address[256];
2489 int i, j;
2490 unsigned int num_checkpoints = 0;
2491
2492 if (target->state != TARGET_HALTED) {
2493 LOG_TARGET_ERROR(target, "must be stopped to read trace data");
2494 return ERROR_TARGET_NOT_HALTED;
2495 }
2496
2497 /* send read trace buffer command (command 0x61) */
2498 xscale_send_u32(target, 0x61);
2499
2500 /* receive trace buffer content */
2501 xscale_receive(target, trace_buffer, 258);
2502
2503 /* parse buffer backwards to identify address entries */
2504 for (i = 255; i >= 0; i--) {
2505 /* also count number of checkpointed entries */
2506 if ((trace_buffer[i] & 0xe0) == 0xc0)
2507 num_checkpoints++;
2508
2509 is_address[i] = 0;
2510 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2511 ((trace_buffer[i] & 0xf0) == 0xd0)) {
2512 if (i > 0)
2513 is_address[--i] = 1;
2514 if (i > 0)
2515 is_address[--i] = 1;
2516 if (i > 0)
2517 is_address[--i] = 1;
2518 if (i > 0)
2519 is_address[--i] = 1;
2520 }
2521 }
2522
2523
2524 /* search first non-zero entry that is not part of an address */
2525 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2526 ;
2527
2528 if (j == 256) {
2529 LOG_DEBUG("no trace data collected");
2530 return ERROR_XSCALE_NO_TRACE_DATA;
2531 }
2532
2533 /* account for possible partial address at buffer start (wrap mode only) */
2534 if (is_address[0]) { /* first entry is address; complete set of 4? */
2535 i = 1;
2536 while (i < 4)
2537 if (!is_address[i++])
2538 break;
2539 if (i < 4)
2540 j += i; /* partial address; can't use it */
2541 }
2542
2543 /* if first valid entry is indirect branch, can't use that either (no address) */
2544 if (((trace_buffer[j] & 0xf0) == 0x90) || ((trace_buffer[j] & 0xf0) == 0xd0))
2545 j++;
2546
2547 /* walk linked list to terminating entry */
2548 for (trace_data_p = &xscale->trace.data; *trace_data_p;
2549 trace_data_p = &(*trace_data_p)->next)
2550 ;
2551
2552 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2553 (*trace_data_p)->next = NULL;
2554 (*trace_data_p)->chkpt0 = trace_buffer[256];
2555 (*trace_data_p)->chkpt1 = trace_buffer[257];
2556 (*trace_data_p)->last_instruction = buf_get_u32(arm->pc->value, 0, 32);
2557 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2558 (*trace_data_p)->depth = 256 - j;
2559 (*trace_data_p)->num_checkpoints = num_checkpoints;
2560
2561 for (i = j; i < 256; i++) {
2562 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2563 if (is_address[i])
2564 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2565 else
2566 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2567 }
2568
2569 return ERROR_OK;
2570 }
2571
2572 static int xscale_read_instruction(struct target *target, uint32_t pc,
2573 struct arm_instruction *instruction)
2574 {
2575 struct xscale_common *const xscale = target_to_xscale(target);
2576 int section = -1;
2577 size_t size_read;
2578 uint32_t opcode;
2579 int retval;
2580
2581 if (!xscale->trace.image)
2582 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2583
2584 /* search for the section the current instruction belongs to */
2585 for (unsigned int i = 0; i < xscale->trace.image->num_sections; i++) {
2586 if ((xscale->trace.image->sections[i].base_address <= pc) &&
2587 (xscale->trace.image->sections[i].base_address +
2588 xscale->trace.image->sections[i].size > pc)) {
2589 section = i;
2590 break;
2591 }
2592 }
2593
2594 if (section == -1) {
2595 /* current instruction couldn't be found in the image */
2596 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2597 }
2598
2599 if (xscale->trace.core_state == ARM_STATE_ARM) {
2600 uint8_t buf[4];
2601 retval = image_read_section(xscale->trace.image, section,
2602 pc - xscale->trace.image->sections[section].base_address,
2603 4, buf, &size_read);
2604 if (retval != ERROR_OK) {
2605 LOG_ERROR("error while reading instruction");
2606 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2607 }
2608 opcode = target_buffer_get_u32(target, buf);
2609 arm_evaluate_opcode(opcode, pc, instruction);
2610 } else if (xscale->trace.core_state == ARM_STATE_THUMB) {
2611 uint8_t buf[2];
2612 retval = image_read_section(xscale->trace.image, section,
2613 pc - xscale->trace.image->sections[section].base_address,
2614 2, buf, &size_read);
2615 if (retval != ERROR_OK) {
2616 LOG_ERROR("error while reading instruction");
2617 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2618 }
2619 opcode = target_buffer_get_u16(target, buf);
2620 thumb_evaluate_opcode(opcode, pc, instruction);
2621 } else {
2622 LOG_ERROR("BUG: unknown core state encountered");
2623 exit(-1);
2624 }
2625
2626 return ERROR_OK;
2627 }
2628
2629 /* Extract address encoded into trace data.
2630 * Write result to address referenced by argument 'target', or 0 if incomplete. */
2631 static inline void xscale_branch_address(struct xscale_trace_data *trace_data,
2632 int i, uint32_t *target)
2633 {
2634 /* if there are less than four entries prior to the indirect branch message
2635 * we can't extract the address */
2636 if (i < 4)
2637 *target = 0;
2638 else {
2639 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2640 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2641 }
2642 }
2643
2644 static inline void xscale_display_instruction(struct target *target, uint32_t pc,
2645 struct arm_instruction *instruction,
2646 struct command_invocation *cmd)
2647 {
2648 int retval = xscale_read_instruction(target, pc, instruction);
2649 if (retval == ERROR_OK)
2650 command_print(cmd, "%s", instruction->text);
2651 else
2652 command_print(cmd, "0x%8.8" PRIx32 "\t<not found in image>", pc);
2653 }
2654
2655 static int xscale_analyze_trace(struct target *target, struct command_invocation *cmd)
2656 {
2657 struct xscale_common *xscale = target_to_xscale(target);
2658 struct xscale_trace_data *trace_data = xscale->trace.data;
2659 int i, retval;
2660 uint32_t breakpoint_pc = 0;
2661 struct arm_instruction instruction;
2662 uint32_t current_pc = 0;/* initialized when address determined */
2663
2664 if (!xscale->trace.image)
2665 LOG_WARNING("No trace image loaded; use 'xscale trace_image'");
2666
2667 /* loop for each trace buffer that was loaded from target */
2668 while (trace_data) {
2669 int chkpt = 0; /* incremented as checkpointed entries found */
2670 int j;
2671
2672 /* FIXME: set this to correct mode when trace buffer is first enabled */
2673 xscale->trace.core_state = ARM_STATE_ARM;
2674
2675 /* loop for each entry in this trace buffer */
2676 for (i = 0; i < trace_data->depth; i++) {
2677 int exception = 0;
2678 uint32_t chkpt_reg = 0x0;
2679 uint32_t branch_target = 0;
2680 int count;
2681
2682 /* trace entry type is upper nybble of 'message byte' */
2683 int trace_msg_type = (trace_data->entries[i].data & 0xf0) >> 4;
2684
2685 /* Target addresses of indirect branches are written into buffer
2686 * before the message byte representing the branch. Skip past it */
2687 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2688 continue;
2689
2690 switch (trace_msg_type) {
2691 case 0: /* Exceptions */
2692 case 1:
2693 case 2:
2694 case 3:
2695 case 4:
2696 case 5:
2697 case 6:
2698 case 7:
2699 exception = (trace_data->entries[i].data & 0x70) >> 4;
2700
2701 /* FIXME: vector table may be at ffff0000 */
2702 branch_target = (trace_data->entries[i].data & 0xf0) >> 2;
2703 break;
2704
2705 case 8: /* Direct Branch */
2706 break;
2707
2708 case 9: /* Indirect Branch */
2709 xscale_branch_address(trace_data, i, &branch_target);
2710 break;
2711
2712 case 13: /* Checkpointed Indirect Branch */
2713 xscale_branch_address(trace_data, i, &branch_target);
2714 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2715 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is
2716 *oldest */
2717 else
2718 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and
2719 *newest */
2720
2721 chkpt++;
2722 break;
2723
2724 case 12: /* Checkpointed Direct Branch */
2725 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2726 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is
2727 *oldest */
2728 else
2729 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and
2730 *newest */
2731
2732 /* if no current_pc, checkpoint will be starting point */
2733 if (current_pc == 0)
2734 branch_target = chkpt_reg;
2735
2736 chkpt++;
2737 break;
2738
2739 case 15:/* Roll-over */
2740 break;
2741
2742 default:/* Reserved */
2743 LOG_WARNING("trace is suspect: invalid trace message byte");
2744 continue;
2745
2746 }
2747
2748 /* If we don't have the current_pc yet, but we did get the branch target
2749 * (either from the trace buffer on indirect branch, or from a checkpoint reg),
2750 * then we can start displaying instructions at the next iteration, with
2751 * branch_target as the starting point.
2752 */
2753 if (current_pc == 0) {
2754 current_pc = branch_target; /* remains 0 unless branch_target *obtained */
2755 continue;
2756 }
2757
2758 /* We have current_pc. Read and display the instructions from the image.
2759 * First, display count instructions (lower nybble of message byte). */
2760 count = trace_data->entries[i].data & 0x0f;
2761 for (j = 0; j < count; j++) {
2762 xscale_display_instruction(target, current_pc, &instruction,
2763 cmd);
2764 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2765 }
2766
2767 /* An additional instruction is implicitly added to count for
2768 * rollover and some exceptions: undef, swi, prefetch abort. */
2769 if ((trace_msg_type == 15) || (exception > 0 && exception < 4)) {
2770 xscale_display_instruction(target, current_pc, &instruction,
2771 cmd);
2772 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2773 }
2774
2775 if (trace_msg_type == 15) /* rollover */
2776 continue;
2777
2778 if (exception) {
2779 command_print(cmd, "--- exception %i ---", exception);
2780 continue;
2781 }
2782
2783 /* not exception or rollover; next instruction is a branch and is
2784 * not included in the count */
2785 xscale_display_instruction(target, current_pc, &instruction, cmd);
2786
2787 /* for direct branches, extract branch destination from instruction */
2788 if ((trace_msg_type == 8) || (trace_msg_type == 12)) {
2789 retval = xscale_read_instruction(target, current_pc, &instruction);
2790 if (retval == ERROR_OK)
2791 current_pc = instruction.info.b_bl_bx_blx.target_address;
2792 else
2793 current_pc = 0; /* branch destination unknown */
2794
2795 /* direct branch w/ checkpoint; can also get from checkpoint reg */
2796 if (trace_msg_type == 12) {
2797 if (current_pc == 0)
2798 current_pc = chkpt_reg;
2799 else if (current_pc != chkpt_reg) /* sanity check */
2800 LOG_WARNING("trace is suspect: checkpoint register "
2801 "inconsistent with address from image");
2802 }
2803
2804 if (current_pc == 0)
2805 command_print(cmd, "address unknown");
2806
2807 continue;
2808 }
2809
2810 /* indirect branch; the branch destination was read from trace buffer */
2811 if ((trace_msg_type == 9) || (trace_msg_type == 13)) {
2812 current_pc = branch_target;
2813
2814 /* sanity check (checkpoint reg is redundant) */
2815 if ((trace_msg_type == 13) && (chkpt_reg != branch_target))
2816 LOG_WARNING("trace is suspect: checkpoint register "
2817 "inconsistent with address from trace buffer");
2818 }
2819
2820 } /* END: for (i = 0; i < trace_data->depth; i++) */
2821
2822 breakpoint_pc = trace_data->last_instruction; /* used below */
2823 trace_data = trace_data->next;
2824
2825 } /* END: while (trace_data) */
2826
2827 /* Finally... display all instructions up to the value of the pc when the
2828 * debug break occurred (saved when trace data was collected from target).
2829 * This is necessary because the trace only records execution branches and 16
2830 * consecutive instructions (rollovers), so last few typically missed.
2831 */
2832 if (current_pc == 0)
2833 return ERROR_OK;/* current_pc was never found */
2834
2835 /* how many instructions remaining? */
2836 int gap_count = (breakpoint_pc - current_pc) /
2837 (xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2);
2838
2839 /* should never be negative or over 16, but verify */
2840 if (gap_count < 0 || gap_count > 16) {
2841 LOG_WARNING("trace is suspect: excessive gap at end of trace");
2842 return ERROR_OK;/* bail; large number or negative value no good */
2843 }
2844
2845 /* display remaining instructions */
2846 for (i = 0; i < gap_count; i++) {
2847 xscale_display_instruction(target, current_pc, &instruction, cmd);
2848 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2849 }
2850
2851 return ERROR_OK;
2852 }
2853
2854 static const struct reg_arch_type xscale_reg_type = {
2855 .get = xscale_get_reg,
2856 .set = xscale_set_reg,
2857 };
2858
2859 static void xscale_build_reg_cache(struct target *target)
2860 {
2861 struct xscale_common *xscale = target_to_xscale(target);
2862 struct arm *arm = &xscale->arm;
2863 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2864 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2865 int i;
2866 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2867
2868 (*cache_p) = arm_build_reg_cache(target, arm);
2869
2870 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2871 cache_p = &(*cache_p)->next;
2872
2873 /* fill in values for the xscale reg cache */
2874 (*cache_p)->name = "XScale registers";
2875 (*cache_p)->next = NULL;
2876 (*cache_p)->reg_list = calloc(num_regs, sizeof(struct reg));
2877 (*cache_p)->num_regs = num_regs;
2878
2879 for (i = 0; i < num_regs; i++) {
2880 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2881 (*cache_p)->reg_list[i].value = calloc(4, 1);
2882 (*cache_p)->reg_list[i].dirty = false;
2883 (*cache_p)->reg_list[i].valid = false;
2884 (*cache_p)->reg_list[i].size = 32;
2885 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2886 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2887 (*cache_p)->reg_list[i].exist = true;
2888 arch_info[i] = xscale_reg_arch_info[i];
2889 arch_info[i].target = target;
2890 }
2891
2892 xscale->reg_cache = (*cache_p);
2893 }
2894
2895 static void xscale_free_reg_cache(struct target *target)
2896 {
2897 struct xscale_common *xscale = target_to_xscale(target);
2898 struct reg_cache *cache = xscale->reg_cache;
2899
2900 for (unsigned int i = 0; i < ARRAY_SIZE(xscale_reg_arch_info); i++)
2901 free(cache->reg_list[i].value);
2902
2903 free(cache->reg_list[0].arch_info);
2904 free(cache->reg_list);
2905 free(cache);
2906
2907 arm_free_reg_cache(&xscale->arm);
2908 }
2909
2910 static int xscale_init_target(struct command_context *cmd_ctx,
2911 struct target *target)
2912 {
2913 xscale_build_reg_cache(target);
2914 return ERROR_OK;
2915 }
2916
2917 static void xscale_deinit_target(struct target *target)
2918 {
2919 struct xscale_common *xscale = target_to_xscale(target);
2920
2921 xscale_free_reg_cache(target);
2922 free(xscale);
2923 }
2924
2925 static int xscale_init_arch_info(struct target *target,
2926 struct xscale_common *xscale, struct jtag_tap *tap)
2927 {
2928 struct arm *arm;
2929 uint32_t high_reset_branch, low_reset_branch;
2930 int i;
2931
2932 arm = &xscale->arm;
2933
2934 /* store architecture specific data */
2935 xscale->common_magic = XSCALE_COMMON_MAGIC;
2936
2937 /* PXA3xx with 11 bit IR shifts the JTAG instructions */
2938 if (tap->ir_length == 11)
2939 xscale->xscale_variant = XSCALE_PXA3XX;
2940 else
2941 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
2942
2943 /* the debug handler isn't installed (and thus not running) at this time */
2944 xscale->handler_address = 0xfe000800;
2945
2946 /* clear the vectors we keep locally for reference */
2947 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2948 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2949
2950 /* no user-specified vectors have been configured yet */
2951 xscale->static_low_vectors_set = 0x0;
2952 xscale->static_high_vectors_set = 0x0;
2953
2954 /* calculate branches to debug handler */
2955 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2956 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2957
2958 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2959 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2960
2961 for (i = 1; i <= 7; i++) {
2962 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2963 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2964 }
2965
2966 /* 64kB aligned region used for DCache cleaning */
2967 xscale->cache_clean_address = 0xfffe0000;
2968
2969 xscale->hold_rst = 0;
2970 xscale->external_debug_break = 0;
2971
2972 xscale->ibcr_available = 2;
2973 xscale->ibcr0_used = 0;
2974 xscale->ibcr1_used = 0;
2975
2976 xscale->dbr_available = 2;
2977 xscale->dbr0_used = 0;
2978 xscale->dbr1_used = 0;
2979
2980 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
2981 target_name(target));
2982
2983 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2984 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2985
2986 xscale->vector_catch = 0x1;
2987
2988 xscale->trace.data = NULL;
2989 xscale->trace.image = NULL;
2990 xscale->trace.mode = XSCALE_TRACE_DISABLED;
2991 xscale->trace.buffer_fill = 0;
2992 xscale->trace.fill_counter = 0;
2993
2994 /* prepare ARMv4/5 specific information */
2995 arm->arch_info = xscale;
2996 arm->core_type = ARM_CORE_TYPE_STD;
2997 arm->read_core_reg = xscale_read_core_reg;
2998 arm->write_core_reg = xscale_write_core_reg;
2999 arm->full_context = xscale_full_context;
3000
3001 arm_init_arch_info(target, arm);
3002
3003 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3004 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3005 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3006 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3007 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3008 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3009 xscale->armv4_5_mmu.has_tiny_pages = 1;
3010 xscale->armv4_5_mmu.mmu_enabled = 0;
3011
3012 return ERROR_OK;
3013 }
3014
3015 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3016 {
3017 struct xscale_common *xscale;
3018
3019 if (sizeof(xscale_debug_handler) > 0x800) {
3020 LOG_ERROR("debug_handler.bin: larger than 2kb");
3021 return ERROR_FAIL;
3022 }
3023
3024 xscale = calloc(1, sizeof(*xscale));
3025 if (!xscale)
3026 return ERROR_FAIL;
3027
3028 return xscale_init_arch_info(target, xscale, target->tap);
3029 }
3030
3031 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3032 {
3033 struct target *target = NULL;
3034 struct xscale_common *xscale;
3035 int retval;
3036 uint32_t handler_address;
3037
3038 if (CMD_ARGC < 2)
3039 return ERROR_COMMAND_SYNTAX_ERROR;
3040
3041 target = get_target(CMD_ARGV[0]);
3042 if (!target) {
3043 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3044 return ERROR_FAIL;
3045 }
3046
3047 xscale = target_to_xscale(target);
3048 retval = xscale_verify_pointer(CMD, xscale);
3049 if (retval != ERROR_OK)
3050 return retval;
3051
3052 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3053
3054 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3055 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3056 xscale->handler_address = handler_address;
3057 else {
3058 LOG_ERROR(
3059 "xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3060 return ERROR_FAIL;
3061 }
3062
3063 return ERROR_OK;
3064 }
3065
3066 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3067 {
3068 struct target *target = NULL;
3069 struct xscale_common *xscale;
3070 int retval;
3071 uint32_t cache_clean_address;
3072
3073 if (CMD_ARGC < 2)
3074 return ERROR_COMMAND_SYNTAX_ERROR;
3075
3076 target = get_target(CMD_ARGV[0]);
3077 if (!target) {
3078 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3079 return ERROR_FAIL;
3080 }
3081 xscale = target_to_xscale(target);
3082 retval = xscale_verify_pointer(CMD, xscale);
3083 if (retval != ERROR_OK)
3084 return retval;
3085
3086 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3087
3088 if (cache_clean_address & 0xffff)
3089 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3090 else
3091 xscale->cache_clean_address = cache_clean_address;
3092
3093 return ERROR_OK;
3094 }
3095
3096 COMMAND_HANDLER(xscale_handle_cache_info_command)
3097 {
3098 struct target *target = get_current_target(CMD_CTX);
3099 struct xscale_common *xscale = target_to_xscale(target);
3100 int retval;
3101
3102 retval = xscale_verify_pointer(CMD, xscale);
3103 if (retval != ERROR_OK)
3104 return retval;
3105
3106 return armv4_5_handle_cache_info_command(CMD, &xscale->armv4_5_mmu.armv4_5_cache);
3107 }
3108
3109 static int xscale_virt2phys(struct target *target,
3110 target_addr_t virtual, target_addr_t *physical)
3111 {
3112 struct xscale_common *xscale = target_to_xscale(target);
3113 uint32_t cb;
3114
3115 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3116 LOG_ERROR(xscale_not);
3117 return ERROR_TARGET_INVALID;
3118 }
3119
3120 uint32_t ret;
3121 int retval = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu,
3122 virtual, &cb, &ret);
3123 if (retval != ERROR_OK)
3124 return retval;
3125 *physical = ret;
3126 return ERROR_OK;
3127 }
3128
3129 static int xscale_mmu(struct target *target, int *enabled)
3130 {
3131 struct xscale_common *xscale = target_to_xscale(target);
3132
3133 if (target->state != TARGET_HALTED) {
3134 LOG_TARGET_ERROR(target, "not halted");
3135 return ERROR_TARGET_NOT_HALTED;
3136 }
3137 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3138 return ERROR_OK;
3139 }
3140
3141 COMMAND_HANDLER(xscale_handle_mmu_command)
3142 {
3143 struct target *target = get_current_target(CMD_CTX);
3144 struct xscale_common *xscale = target_to_xscale(target);
3145 int retval;
3146
3147 retval = xscale_verify_pointer(CMD, xscale);
3148 if (retval != ERROR_OK)
3149 return retval;
3150
3151 if (target->state != TARGET_HALTED) {
3152 command_print(CMD, "Error: target must be stopped for \"%s\" command", CMD_NAME);
3153 return ERROR_TARGET_NOT_HALTED;
3154 }
3155
3156 if (CMD_ARGC >= 1) {
3157 bool enable;
3158 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3159 if (enable)
3160 xscale_enable_mmu_caches(target, 1, 0, 0);
3161 else
3162 xscale_disable_mmu_caches(target, 1, 0, 0);
3163 xscale->armv4_5_mmu.mmu_enabled = enable;
3164 }
3165
3166 command_print(CMD, "mmu %s",
3167 (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3168
3169 return ERROR_OK;
3170 }
3171
3172 COMMAND_HANDLER(xscale_handle_idcache_command)
3173 {
3174 struct target *target = get_current_target(CMD_CTX);
3175 struct xscale_common *xscale = target_to_xscale(target);
3176
3177 int retval = xscale_verify_pointer(CMD, xscale);
3178 if (retval != ERROR_OK)
3179 return retval;
3180
3181 if (target->state != TARGET_HALTED) {
3182 command_print(CMD, "Error: target must be stopped for \"%s\" command", CMD_NAME);
3183 return ERROR_TARGET_NOT_HALTED;
3184 }
3185
3186 bool icache = false;
3187 if (strcmp(CMD_NAME, "icache") == 0)
3188 icache = true;
3189 if (CMD_ARGC >= 1) {
3190 bool enable;
3191 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3192 if (icache) {
3193 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3194 if (enable)
3195 xscale_enable_mmu_caches(target, 0, 0, 1);
3196 else
3197 xscale_disable_mmu_caches(target, 0, 0, 1);
3198 } else {
3199 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3200 if (enable)
3201 xscale_enable_mmu_caches(target, 0, 1, 0);
3202 else
3203 xscale_disable_mmu_caches(target, 0, 1, 0);
3204 }
3205 }
3206
3207 bool enabled = icache ?
3208 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3209 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3210 const char *msg = enabled ? "enabled" : "disabled";
3211 command_print(CMD, "%s %s", CMD_NAME, msg);
3212
3213 return ERROR_OK;
3214 }
3215
3216 static const struct {
3217 char name[15];
3218 unsigned mask;
3219 } vec_ids[] = {
3220 { "fiq", DCSR_TF, },
3221 { "irq", DCSR_TI, },
3222 { "dabt", DCSR_TD, },
3223 { "pabt", DCSR_TA, },
3224 { "swi", DCSR_TS, },
3225 { "undef", DCSR_TU, },
3226 { "reset", DCSR_TR, },
3227 };
3228
3229 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3230 {
3231 struct target *target = get_current_target(CMD_CTX);
3232 struct xscale_common *xscale = target_to_xscale(target);
3233 int retval;
3234 uint32_t dcsr_value;
3235 uint32_t catch = 0;
3236 struct reg *dcsr_reg = &xscale->reg_cache->reg_list[XSCALE_DCSR];
3237
3238 retval = xscale_verify_pointer(CMD, xscale);
3239 if (retval != ERROR_OK)
3240 return retval;
3241
3242 if (CMD_ARGC > 0) {
3243 if (CMD_ARGC == 1) {
3244 if (strcmp(CMD_ARGV[0], "all") == 0) {
3245 catch = DCSR_TRAP_MASK;
3246 CMD_ARGC--;
3247 } else if (strcmp(CMD_ARGV[0], "none") == 0) {
3248 catch = 0;
3249 CMD_ARGC--;
3250 }
3251 }
3252 while (CMD_ARGC-- > 0) {
3253 unsigned i;
3254 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
3255 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name))
3256 continue;
3257 catch |= vec_ids[i].mask;
3258 break;
3259 }
3260 if (i == ARRAY_SIZE(vec_ids)) {
3261 LOG_ERROR("No vector '%s'", CMD_ARGV[CMD_ARGC]);
3262 return ERROR_COMMAND_SYNTAX_ERROR;
3263 }
3264 }
3265 buf_set_u32(dcsr_reg->value, 0, 32,
3266 (buf_get_u32(dcsr_reg->value, 0, 32) & ~DCSR_TRAP_MASK) | catch);
3267 xscale_write_dcsr(target, -1, -1);
3268 }
3269
3270 dcsr_value = buf_get_u32(dcsr_reg->value, 0, 32);
3271 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
3272 command_print(CMD, "%15s: %s", vec_ids[i].name,
3273 (dcsr_value & vec_ids[i].mask) ? "catch" : "ignore");
3274 }
3275
3276 return ERROR_OK;
3277 }
3278
3279
3280 COMMAND_HANDLER(xscale_handle_vector_table_command)
3281 {
3282 struct target *target = get_current_target(CMD_CTX);
3283 struct xscale_common *xscale = target_to_xscale(target);
3284 int err = 0;
3285 int retval;
3286
3287 retval = xscale_verify_pointer(CMD, xscale);
3288 if (retval != ERROR_OK)
3289 return retval;
3290
3291 if (CMD_ARGC == 0) { /* print current settings */
3292 int idx;
3293
3294 command_print(CMD, "active user-set static vectors:");
3295 for (idx = 1; idx < 8; idx++)
3296 if (xscale->static_low_vectors_set & (1 << idx))
3297 command_print(CMD,
3298 "low %d: 0x%" PRIx32,
3299 idx,
3300 xscale->static_low_vectors[idx]);
3301 for (idx = 1; idx < 8; idx++)
3302 if (xscale->static_high_vectors_set & (1 << idx))
3303 command_print(CMD,
3304 "high %d: 0x%" PRIx32,
3305 idx,
3306 xscale->static_high_vectors[idx]);
3307 return ERROR_OK;
3308 }
3309
3310 if (CMD_ARGC != 3)
3311 err = 1;
3312 else {
3313 int idx;
3314 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3315 uint32_t vec;
3316 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3317
3318 if (idx < 1 || idx >= 8)
3319 err = 1;
3320
3321 if (!err && strcmp(CMD_ARGV[0], "low") == 0) {
3322 xscale->static_low_vectors_set |= (1<<idx);
3323 xscale->static_low_vectors[idx] = vec;
3324 } else if (!err && (strcmp(CMD_ARGV[0], "high") == 0)) {
3325 xscale->static_high_vectors_set |= (1<<idx);
3326 xscale->static_high_vectors[idx] = vec;
3327 } else
3328 err = 1;
3329 }
3330
3331 if (err)
3332 return ERROR_COMMAND_SYNTAX_ERROR;
3333
3334 return ERROR_OK;
3335 }
3336
3337
3338 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3339 {
3340 struct target *target = get_current_target(CMD_CTX);
3341 struct xscale_common *xscale = target_to_xscale(target);
3342 uint32_t dcsr_value;
3343 int retval;
3344
3345 retval = xscale_verify_pointer(CMD, xscale);
3346 if (retval != ERROR_OK)
3347 return retval;
3348
3349 if (target->state != TARGET_HALTED) {
3350 command_print(CMD, "Error: target must be stopped for \"%s\" command", CMD_NAME);
3351 return ERROR_TARGET_NOT_HALTED;
3352 }
3353
3354 if (CMD_ARGC >= 1) {
3355 if (strcmp("enable", CMD_ARGV[0]) == 0)
3356 xscale->trace.mode = XSCALE_TRACE_WRAP; /* default */
3357 else if (strcmp("disable", CMD_ARGV[0]) == 0)
3358 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3359 else
3360 return ERROR_COMMAND_SYNTAX_ERROR;
3361 }
3362
3363 if (CMD_ARGC >= 2 && xscale->trace.mode != XSCALE_TRACE_DISABLED) {
3364 if (strcmp("fill", CMD_ARGV[1]) == 0) {
3365 int buffcount = 1; /* default */
3366 if (CMD_ARGC >= 3)
3367 COMMAND_PARSE_NUMBER(int, CMD_ARGV[2], buffcount);
3368 if (buffcount < 1) { /* invalid */
3369 command_print(CMD, "fill buffer count must be > 0");
3370 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3371 return ERROR_COMMAND_SYNTAX_ERROR;
3372 }
3373 xscale->trace.buffer_fill = buffcount;
3374 xscale->trace.mode = XSCALE_TRACE_FILL;
3375 } else if (strcmp("wrap", CMD_ARGV[1]) == 0)
3376 xscale->trace.mode = XSCALE_TRACE_WRAP;
3377 else {
3378 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3379 return ERROR_COMMAND_SYNTAX_ERROR;
3380 }
3381 }
3382
3383 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
3384 char fill_string[12];
3385 sprintf(fill_string, "fill %d", xscale->trace.buffer_fill);
3386 command_print(CMD, "trace buffer enabled (%s)",
3387 (xscale->trace.mode == XSCALE_TRACE_FILL)
3388 ? fill_string : "wrap");
3389 } else
3390 command_print(CMD, "trace buffer disabled");
3391
3392 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3393 if (xscale->trace.mode == XSCALE_TRACE_FILL)
3394 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3395 else
3396 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3397
3398 return ERROR_OK;
3399 }
3400
3401 COMMAND_HANDLER(xscale_handle_trace_image_command)
3402 {
3403 struct target *target = get_current_target(CMD_CTX);
3404 struct xscale_common *xscale = target_to_xscale(target);
3405 int retval;
3406
3407 if (CMD_ARGC < 1)
3408 return ERROR_COMMAND_SYNTAX_ERROR;
3409
3410 retval = xscale_verify_pointer(CMD, xscale);
3411 if (retval != ERROR_OK)
3412 return retval;
3413
3414 if (xscale->trace.image) {
3415 image_close(xscale->trace.image);
3416 free(xscale->trace.image);
3417 command_print(CMD, "previously loaded image found and closed");
3418 }
3419
3420 xscale->trace.image = malloc(sizeof(struct image));
3421 xscale->trace.image->base_address_set = false;
3422 xscale->trace.image->start_address_set = false;
3423
3424 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3425 if (CMD_ARGC >= 2) {
3426 xscale->trace.image->base_address_set = true;
3427 COMMAND_PARSE_NUMBER(llong, CMD_ARGV[1], xscale->trace.image->base_address);
3428 } else
3429 xscale->trace.image->base_address_set = false;
3430
3431 if (image_open(xscale->trace.image, CMD_ARGV[0],
3432 (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK) {
3433 free(xscale->trace.image);
3434 xscale->trace.image = NULL;
3435 return ERROR_OK;
3436 }
3437
3438 return ERROR_OK;
3439 }
3440
3441 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3442 {
3443 struct target *target = get_current_target(CMD_CTX);
3444 struct xscale_common *xscale = target_to_xscale(target);
3445 struct xscale_trace_data *trace_data;
3446 struct fileio *file;
3447 int retval;
3448
3449 retval = xscale_verify_pointer(CMD, xscale);
3450 if (retval != ERROR_OK)
3451 return retval;
3452
3453 if (target->state != TARGET_HALTED) {
3454 command_print(CMD, "Error: target must be stopped for \"%s\" command", CMD_NAME);
3455 return ERROR_TARGET_NOT_HALTED;
3456 }
3457
3458 if (CMD_ARGC < 1)
3459 return ERROR_COMMAND_SYNTAX_ERROR;
3460
3461 trace_data = xscale->trace.data;
3462
3463 if (!trace_data) {
3464 command_print(CMD, "no trace data collected");
3465 return ERROR_OK;
3466 }
3467
3468 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3469 return ERROR_OK;
3470
3471 while (trace_data) {
3472 int i;
3473
3474 fileio_write_u32(file, trace_data->chkpt0);
3475 fileio_write_u32(file, trace_data->chkpt1);
3476 fileio_write_u32(file, trace_data->last_instruction);
3477 fileio_write_u32(file, trace_data->depth);
3478
3479 for (i = 0; i < trace_data->depth; i++)
3480 fileio_write_u32(file, trace_data->entries[i].data |
3481 ((trace_data->entries[i].type & 0xffff) << 16));
3482
3483 trace_data = trace_data->next;
3484 }
3485
3486 fileio_close(file);
3487
3488 return ERROR_OK;
3489 }
3490
3491 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3492 {
3493 struct target *target = get_current_target(CMD_CTX);
3494 struct xscale_common *xscale = target_to_xscale(target);
3495 int retval;
3496
3497 retval = xscale_verify_pointer(CMD, xscale);
3498 if (retval != ERROR_OK)
3499 return retval;
3500
3501 xscale_analyze_trace(target, CMD);
3502
3503 return ERROR_OK;
3504 }
3505
3506 COMMAND_HANDLER(xscale_handle_cp15)
3507 {
3508 struct target *target = get_current_target(CMD_CTX);
3509 struct xscale_common *xscale = target_to_xscale(target);
3510 int retval;
3511
3512 retval = xscale_verify_pointer(CMD, xscale);
3513 if (retval != ERROR_OK)
3514 return retval;
3515
3516 if (target->state != TARGET_HALTED) {
3517 command_print(CMD, "Error: target must be stopped for \"%s\" command", CMD_NAME);
3518 return ERROR_TARGET_NOT_HALTED;
3519 }
3520 uint32_t reg_no = 0;
3521 struct reg *reg = NULL;
3522 if (CMD_ARGC > 0) {
3523 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3524 /*translate from xscale cp15 register no to openocd register*/
3525 switch (reg_no) {
3526 case 0:
3527 reg_no = XSCALE_MAINID;
3528 break;
3529 case 1:
3530 reg_no = XSCALE_CTRL;
3531 break;
3532 case 2:
3533 reg_no = XSCALE_TTB;
3534 break;
3535 case 3:
3536 reg_no = XSCALE_DAC;
3537 break;
3538 case 5:
3539 reg_no = XSCALE_FSR;
3540 break;
3541 case 6:
3542 reg_no = XSCALE_FAR;
3543 break;
3544 case 13:
3545 reg_no = XSCALE_PID;
3546 break;
3547 case 15:
3548 reg_no = XSCALE_CPACCESS;
3549 break;
3550 default:
3551 command_print(CMD, "invalid register number");
3552 return ERROR_COMMAND_SYNTAX_ERROR;
3553 }
3554 reg = &xscale->reg_cache->reg_list[reg_no];
3555
3556 }
3557 if (CMD_ARGC == 1) {
3558 uint32_t value;
3559
3560 /* read cp15 control register */
3561 xscale_get_reg(reg);
3562 value = buf_get_u32(reg->value, 0, 32);
3563 command_print(CMD, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size),
3564 value);
3565 } else if (CMD_ARGC == 2) {
3566 uint32_t value;
3567 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3568
3569 /* send CP write request (command 0x41) */
3570 xscale_send_u32(target, 0x41);
3571
3572 /* send CP register number */
3573 xscale_send_u32(target, reg_no);
3574
3575 /* send CP register value */
3576 xscale_send_u32(target, value);
3577
3578 /* execute cpwait to ensure outstanding operations complete */
3579 xscale_send_u32(target, 0x53);
3580 } else
3581 return ERROR_COMMAND_SYNTAX_ERROR;
3582
3583 return ERROR_OK;
3584 }
3585
3586 static const struct command_registration xscale_exec_command_handlers[] = {
3587 {
3588 .name = "cache_info",
3589 .handler = xscale_handle_cache_info_command,
3590 .mode = COMMAND_EXEC,
3591 .help = "display information about CPU caches",
3592 .usage = "",
3593 },
3594 {
3595 .name = "mmu",
3596 .handler = xscale_handle_mmu_command,
3597 .mode = COMMAND_EXEC,
3598 .help = "enable or disable the MMU",
3599 .usage = "['enable'|'disable']",
3600 },
3601 {
3602 .name = "icache",
3603 .handler = xscale_handle_idcache_command,
3604 .mode = COMMAND_EXEC,
3605 .help = "display ICache state, optionally enabling or "
3606 "disabling it",
3607 .usage = "['enable'|'disable']",
3608 },
3609 {
3610 .name = "dcache",
3611 .handler = xscale_handle_idcache_command,
3612 .mode = COMMAND_EXEC,
3613 .help = "display DCache state, optionally enabling or "
3614 "disabling it",
3615 .usage = "['enable'|'disable']",
3616 },
3617 {
3618 .name = "vector_catch",
3619 .handler = xscale_handle_vector_catch_command,
3620 .mode = COMMAND_EXEC,
3621 .help = "set or display mask of vectors "
3622 "that should trigger debug entry",
3623 .usage = "['all'|'none'|'fiq'|'irq'|'dabt'|'pabt'|'swi'|'undef'|'reset']",
3624 },
3625 {
3626 .name = "vector_table",
3627 .handler = xscale_handle_vector_table_command,
3628 .mode = COMMAND_EXEC,
3629 .help = "set vector table entry in mini-ICache, "
3630 "or display current tables",
3631 .usage = "[('high'|'low') index code]",
3632 },
3633 {
3634 .name = "trace_buffer",
3635 .handler = xscale_handle_trace_buffer_command,
3636 .mode = COMMAND_EXEC,
3637 .help = "display trace buffer status, enable or disable "
3638 "tracing, and optionally reconfigure trace mode",
3639 .usage = "['enable'|'disable' ['fill' [number]|'wrap']]",
3640 },
3641 {
3642 .name = "dump_trace",
3643 .handler = xscale_handle_dump_trace_command,
3644 .mode = COMMAND_EXEC,
3645 .help = "dump content of trace buffer to file",
3646 .usage = "filename",
3647 },
3648 {
3649 .name = "analyze_trace",
3650 .handler = xscale_handle_analyze_trace_buffer_command,
3651 .mode = COMMAND_EXEC,
3652 .help = "analyze content of trace buffer",
3653 .usage = "",
3654 },
3655 {
3656 .name = "trace_image",
3657 .handler = xscale_handle_trace_image_command,
3658 .mode = COMMAND_EXEC,
3659 .help = "load image from file to address (default 0)",
3660 .usage = "filename [offset [filetype]]",
3661 },
3662 {
3663 .name = "cp15",
3664 .handler = xscale_handle_cp15,
3665 .mode = COMMAND_EXEC,
3666 .help = "Read or write coprocessor 15 register.",
3667 .usage = "register [value]",
3668 },
3669 COMMAND_REGISTRATION_DONE
3670 };
3671 static const struct command_registration xscale_any_command_handlers[] = {
3672 {
3673 .name = "debug_handler",
3674 .handler = xscale_handle_debug_handler_command,
3675 .mode = COMMAND_ANY,
3676 .help = "Change address used for debug handler.",
3677 .usage = "<target> <address>",
3678 },
3679 {
3680 .name = "cache_clean_address",
3681 .handler = xscale_handle_cache_clean_address_command,
3682 .mode = COMMAND_ANY,
3683 .help = "Change address used for cleaning data cache.",
3684 .usage = "address",
3685 },
3686 {
3687 .chain = xscale_exec_command_handlers,
3688 },
3689 COMMAND_REGISTRATION_DONE
3690 };
3691 static const struct command_registration xscale_command_handlers[] = {
3692 {
3693 .chain = arm_command_handlers,
3694 },
3695 {
3696 .name = "xscale",
3697 .mode = COMMAND_ANY,
3698 .help = "xscale command group",
3699 .usage = "",
3700 .chain = xscale_any_command_handlers,
3701 },
3702 COMMAND_REGISTRATION_DONE
3703 };
3704
3705 struct target_type xscale_target = {
3706 .name = "xscale",
3707
3708 .poll = xscale_poll,
3709 .arch_state = xscale_arch_state,
3710
3711 .halt = xscale_halt,
3712 .resume = xscale_resume,
3713 .step = xscale_step,
3714
3715 .assert_reset = xscale_assert_reset,
3716 .deassert_reset = xscale_deassert_reset,
3717
3718 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3719 .get_gdb_arch = arm_get_gdb_arch,
3720 .get_gdb_reg_list = arm_get_gdb_reg_list,
3721
3722 .read_memory = xscale_read_memory,
3723 .read_phys_memory = xscale_read_phys_memory,
3724 .write_memory = xscale_write_memory,
3725 .write_phys_memory = xscale_write_phys_memory,
3726
3727 .checksum_memory = arm_checksum_memory,
3728 .blank_check_memory = arm_blank_check_memory,
3729
3730 .run_algorithm = armv4_5_run_algorithm,
3731
3732 .add_breakpoint = xscale_add_breakpoint,
3733 .remove_breakpoint = xscale_remove_breakpoint,
3734 .add_watchpoint = xscale_add_watchpoint,
3735 .remove_watchpoint = xscale_remove_watchpoint,
3736
3737 .commands = xscale_command_handlers,
3738 .target_create = xscale_target_create,
3739 .init_target = xscale_init_target,
3740 .deinit_target = xscale_deinit_target,
3741
3742 .virt2phys = xscale_virt2phys,
3743 .mmu = xscale_mmu
3744 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)