Cleanup: removal of obsolete semicolons
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
25 ***************************************************************************/
26
27 #ifdef HAVE_CONFIG_H
28 #include "config.h"
29 #endif
30
31 #include "breakpoints.h"
32 #include "xscale.h"
33 #include "target_type.h"
34 #include "arm_jtag.h"
35 #include "arm_simulator.h"
36 #include "arm_disassembler.h"
37 #include <helper/time_support.h>
38 #include "register.h"
39 #include "image.h"
40 #include "arm_opcodes.h"
41 #include "armv4_5.h"
42
43 /*
44 * Important XScale documents available as of October 2009 include:
45 *
46 * Intel XScale® Core Developer’s Manual, January 2004
47 * Order Number: 273473-002
48 * This has a chapter detailing debug facilities, and punts some
49 * details to chip-specific microarchitecture documents.
50 *
51 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
52 * Document Number: 273539-005
53 * Less detailed than the developer's manual, but summarizes those
54 * missing details (for most XScales) and gives LOTS of notes about
55 * debugger/handler interaction issues. Presents a simpler reset
56 * and load-handler sequence than the arch doc. (Note, OpenOCD
57 * doesn't currently support "Hot-Debug" as defined there.)
58 *
59 * Chip-specific microarchitecture documents may also be useful.
60 */
61
62 /* forward declarations */
63 static int xscale_resume(struct target *, int current,
64 uint32_t address, int handle_breakpoints, int debug_execution);
65 static int xscale_debug_entry(struct target *);
66 static int xscale_restore_banked(struct target *);
67 static int xscale_get_reg(struct reg *reg);
68 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
69 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
70 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
71 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
72 static int xscale_read_trace(struct target *);
73
74 /* This XScale "debug handler" is loaded into the processor's
75 * mini-ICache, which is 2K of code writable only via JTAG.
76 */
77 static const uint8_t xscale_debug_handler[] = {
78 #include "xscale_debug.inc"
79 };
80
81 static const char *const xscale_reg_list[] = {
82 "XSCALE_MAINID", /* 0 */
83 "XSCALE_CACHETYPE",
84 "XSCALE_CTRL",
85 "XSCALE_AUXCTRL",
86 "XSCALE_TTB",
87 "XSCALE_DAC",
88 "XSCALE_FSR",
89 "XSCALE_FAR",
90 "XSCALE_PID",
91 "XSCALE_CPACCESS",
92 "XSCALE_IBCR0", /* 10 */
93 "XSCALE_IBCR1",
94 "XSCALE_DBR0",
95 "XSCALE_DBR1",
96 "XSCALE_DBCON",
97 "XSCALE_TBREG",
98 "XSCALE_CHKPT0",
99 "XSCALE_CHKPT1",
100 "XSCALE_DCSR",
101 "XSCALE_TX",
102 "XSCALE_RX", /* 20 */
103 "XSCALE_TXRXCTRL",
104 };
105
106 static const struct xscale_reg xscale_reg_arch_info[] = {
107 {XSCALE_MAINID, NULL},
108 {XSCALE_CACHETYPE, NULL},
109 {XSCALE_CTRL, NULL},
110 {XSCALE_AUXCTRL, NULL},
111 {XSCALE_TTB, NULL},
112 {XSCALE_DAC, NULL},
113 {XSCALE_FSR, NULL},
114 {XSCALE_FAR, NULL},
115 {XSCALE_PID, NULL},
116 {XSCALE_CPACCESS, NULL},
117 {XSCALE_IBCR0, NULL},
118 {XSCALE_IBCR1, NULL},
119 {XSCALE_DBR0, NULL},
120 {XSCALE_DBR1, NULL},
121 {XSCALE_DBCON, NULL},
122 {XSCALE_TBREG, NULL},
123 {XSCALE_CHKPT0, NULL},
124 {XSCALE_CHKPT1, NULL},
125 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
126 {-1, NULL}, /* TX accessed via JTAG */
127 {-1, NULL}, /* RX accessed via JTAG */
128 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
129 };
130
131 /* convenience wrapper to access XScale specific registers */
132 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
133 {
134 uint8_t buf[4];
135
136 buf_set_u32(buf, 0, 32, value);
137
138 return xscale_set_reg(reg, buf);
139 }
140
141 static const char xscale_not[] = "target is not an XScale";
142
143 static int xscale_verify_pointer(struct command_context *cmd_ctx,
144 struct xscale_common *xscale)
145 {
146 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
147 command_print(cmd_ctx, xscale_not);
148 return ERROR_TARGET_INVALID;
149 }
150 return ERROR_OK;
151 }
152
153 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr, tap_state_t end_state)
154 {
155 assert(tap != NULL);
156
157 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr) {
158 struct scan_field field;
159 uint8_t scratch[4];
160
161 memset(&field, 0, sizeof field);
162 field.num_bits = tap->ir_length;
163 field.out_value = scratch;
164 buf_set_u32(scratch, 0, field.num_bits, new_instr);
165
166 jtag_add_ir_scan(tap, &field, end_state);
167 }
168
169 return ERROR_OK;
170 }
171
172 static int xscale_read_dcsr(struct target *target)
173 {
174 struct xscale_common *xscale = target_to_xscale(target);
175 int retval;
176 struct scan_field fields[3];
177 uint8_t field0 = 0x0;
178 uint8_t field0_check_value = 0x2;
179 uint8_t field0_check_mask = 0x7;
180 uint8_t field2 = 0x0;
181 uint8_t field2_check_value = 0x0;
182 uint8_t field2_check_mask = 0x1;
183
184 xscale_jtag_set_instr(target->tap,
185 XSCALE_SELDCSR << xscale->xscale_variant,
186 TAP_DRPAUSE);
187
188 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
189 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
190
191 memset(&fields, 0, sizeof fields);
192
193 fields[0].num_bits = 3;
194 fields[0].out_value = &field0;
195 uint8_t tmp;
196 fields[0].in_value = &tmp;
197
198 fields[1].num_bits = 32;
199 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
200
201 fields[2].num_bits = 1;
202 fields[2].out_value = &field2;
203 uint8_t tmp2;
204 fields[2].in_value = &tmp2;
205
206 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
207
208 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
209 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
210
211 retval = jtag_execute_queue();
212 if (retval != ERROR_OK) {
213 LOG_ERROR("JTAG error while reading DCSR");
214 return retval;
215 }
216
217 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
218 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
219
220 /* write the register with the value we just read
221 * on this second pass, only the first bit of field0 is guaranteed to be 0)
222 */
223 field0_check_mask = 0x1;
224 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
225 fields[1].in_value = NULL;
226
227 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
228
229 /* DANGER!!! this must be here. It will make sure that the arguments
230 * to jtag_set_check_value() does not go out of scope! */
231 return jtag_execute_queue();
232 }
233
234
235 static void xscale_getbuf(jtag_callback_data_t arg)
236 {
237 uint8_t *in = (uint8_t *)arg;
238 *((uint32_t *)arg) = buf_get_u32(in, 0, 32);
239 }
240
241 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
242 {
243 if (num_words == 0)
244 return ERROR_COMMAND_SYNTAX_ERROR;
245
246 struct xscale_common *xscale = target_to_xscale(target);
247 int retval = ERROR_OK;
248 tap_state_t path[3];
249 struct scan_field fields[3];
250 uint8_t *field0 = malloc(num_words * 1);
251 uint8_t field0_check_value = 0x2;
252 uint8_t field0_check_mask = 0x6;
253 uint32_t *field1 = malloc(num_words * 4);
254 uint8_t field2_check_value = 0x0;
255 uint8_t field2_check_mask = 0x1;
256 int words_done = 0;
257 int words_scheduled = 0;
258 int i;
259
260 path[0] = TAP_DRSELECT;
261 path[1] = TAP_DRCAPTURE;
262 path[2] = TAP_DRSHIFT;
263
264 memset(&fields, 0, sizeof fields);
265
266 fields[0].num_bits = 3;
267 uint8_t tmp;
268 fields[0].in_value = &tmp;
269 fields[0].check_value = &field0_check_value;
270 fields[0].check_mask = &field0_check_mask;
271
272 fields[1].num_bits = 32;
273
274 fields[2].num_bits = 1;
275 uint8_t tmp2;
276 fields[2].in_value = &tmp2;
277 fields[2].check_value = &field2_check_value;
278 fields[2].check_mask = &field2_check_mask;
279
280 xscale_jtag_set_instr(target->tap,
281 XSCALE_DBGTX << xscale->xscale_variant,
282 TAP_IDLE);
283 jtag_add_runtest(1, TAP_IDLE); /* ensures that we're in the TAP_IDLE state as the above
284 *could be a no-op */
285
286 /* repeat until all words have been collected */
287 int attempts = 0;
288 while (words_done < num_words) {
289 /* schedule reads */
290 words_scheduled = 0;
291 for (i = words_done; i < num_words; i++) {
292 fields[0].in_value = &field0[i];
293
294 jtag_add_pathmove(3, path);
295
296 fields[1].in_value = (uint8_t *)(field1 + i);
297
298 jtag_add_dr_scan_check(target->tap, 3, fields, TAP_IDLE);
299
300 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
301
302 words_scheduled++;
303 }
304
305 retval = jtag_execute_queue();
306 if (retval != ERROR_OK) {
307 LOG_ERROR("JTAG error while receiving data from debug handler");
308 break;
309 }
310
311 /* examine results */
312 for (i = words_done; i < num_words; i++) {
313 if (!(field0[i] & 1)) {
314 /* move backwards if necessary */
315 int j;
316 for (j = i; j < num_words - 1; j++) {
317 field0[j] = field0[j + 1];
318 field1[j] = field1[j + 1];
319 }
320 words_scheduled--;
321 }
322 }
323 if (words_scheduled == 0) {
324 if (attempts++ == 1000) {
325 LOG_ERROR(
326 "Failed to receiving data from debug handler after 1000 attempts");
327 retval = ERROR_TARGET_TIMEOUT;
328 break;
329 }
330 }
331
332 words_done += words_scheduled;
333 }
334
335 for (i = 0; i < num_words; i++)
336 *(buffer++) = buf_get_u32((uint8_t *)&field1[i], 0, 32);
337
338 free(field1);
339
340 return retval;
341 }
342
343 static int xscale_read_tx(struct target *target, int consume)
344 {
345 struct xscale_common *xscale = target_to_xscale(target);
346 tap_state_t path[3];
347 tap_state_t noconsume_path[6];
348 int retval;
349 struct timeval timeout, now;
350 struct scan_field fields[3];
351 uint8_t field0_in = 0x0;
352 uint8_t field0_check_value = 0x2;
353 uint8_t field0_check_mask = 0x6;
354 uint8_t field2_check_value = 0x0;
355 uint8_t field2_check_mask = 0x1;
356
357 xscale_jtag_set_instr(target->tap,
358 XSCALE_DBGTX << xscale->xscale_variant,
359 TAP_IDLE);
360
361 path[0] = TAP_DRSELECT;
362 path[1] = TAP_DRCAPTURE;
363 path[2] = TAP_DRSHIFT;
364
365 noconsume_path[0] = TAP_DRSELECT;
366 noconsume_path[1] = TAP_DRCAPTURE;
367 noconsume_path[2] = TAP_DREXIT1;
368 noconsume_path[3] = TAP_DRPAUSE;
369 noconsume_path[4] = TAP_DREXIT2;
370 noconsume_path[5] = TAP_DRSHIFT;
371
372 memset(&fields, 0, sizeof fields);
373
374 fields[0].num_bits = 3;
375 fields[0].in_value = &field0_in;
376
377 fields[1].num_bits = 32;
378 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
379
380 fields[2].num_bits = 1;
381 uint8_t tmp;
382 fields[2].in_value = &tmp;
383
384 gettimeofday(&timeout, NULL);
385 timeval_add_time(&timeout, 1, 0);
386
387 for (;; ) {
388 /* if we want to consume the register content (i.e. clear TX_READY),
389 * we have to go straight from Capture-DR to Shift-DR
390 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
391 */
392 if (consume)
393 jtag_add_pathmove(3, path);
394 else
395 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
396
397 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
398
399 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
400 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
401
402 retval = jtag_execute_queue();
403 if (retval != ERROR_OK) {
404 LOG_ERROR("JTAG error while reading TX");
405 return ERROR_TARGET_TIMEOUT;
406 }
407
408 gettimeofday(&now, NULL);
409 if ((now.tv_sec > timeout.tv_sec) ||
410 ((now.tv_sec == timeout.tv_sec) && (now.tv_usec > timeout.tv_usec))) {
411 LOG_ERROR("time out reading TX register");
412 return ERROR_TARGET_TIMEOUT;
413 }
414 if (!((!(field0_in & 1)) && consume))
415 goto done;
416 if (debug_level >= 3) {
417 LOG_DEBUG("waiting 100ms");
418 alive_sleep(100); /* avoid flooding the logs */
419 } else
420 keep_alive();
421 }
422 done:
423
424 if (!(field0_in & 1))
425 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
426
427 return ERROR_OK;
428 }
429
430 static int xscale_write_rx(struct target *target)
431 {
432 struct xscale_common *xscale = target_to_xscale(target);
433 int retval;
434 struct timeval timeout, now;
435 struct scan_field fields[3];
436 uint8_t field0_out = 0x0;
437 uint8_t field0_in = 0x0;
438 uint8_t field0_check_value = 0x2;
439 uint8_t field0_check_mask = 0x6;
440 uint8_t field2 = 0x0;
441 uint8_t field2_check_value = 0x0;
442 uint8_t field2_check_mask = 0x1;
443
444 xscale_jtag_set_instr(target->tap,
445 XSCALE_DBGRX << xscale->xscale_variant,
446 TAP_IDLE);
447
448 memset(&fields, 0, sizeof fields);
449
450 fields[0].num_bits = 3;
451 fields[0].out_value = &field0_out;
452 fields[0].in_value = &field0_in;
453
454 fields[1].num_bits = 32;
455 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
456
457 fields[2].num_bits = 1;
458 fields[2].out_value = &field2;
459 uint8_t tmp;
460 fields[2].in_value = &tmp;
461
462 gettimeofday(&timeout, NULL);
463 timeval_add_time(&timeout, 1, 0);
464
465 /* poll until rx_read is low */
466 LOG_DEBUG("polling RX");
467 for (;;) {
468 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
469
470 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
471 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
472
473 retval = jtag_execute_queue();
474 if (retval != ERROR_OK) {
475 LOG_ERROR("JTAG error while writing RX");
476 return retval;
477 }
478
479 gettimeofday(&now, NULL);
480 if ((now.tv_sec > timeout.tv_sec) ||
481 ((now.tv_sec == timeout.tv_sec) && (now.tv_usec > timeout.tv_usec))) {
482 LOG_ERROR("time out writing RX register");
483 return ERROR_TARGET_TIMEOUT;
484 }
485 if (!(field0_in & 1))
486 goto done;
487 if (debug_level >= 3) {
488 LOG_DEBUG("waiting 100ms");
489 alive_sleep(100); /* avoid flooding the logs */
490 } else
491 keep_alive();
492 }
493 done:
494
495 /* set rx_valid */
496 field2 = 0x1;
497 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
498
499 retval = jtag_execute_queue();
500 if (retval != ERROR_OK) {
501 LOG_ERROR("JTAG error while writing RX");
502 return retval;
503 }
504
505 return ERROR_OK;
506 }
507
508 /* send count elements of size byte to the debug handler */
509 static int xscale_send(struct target *target, const uint8_t *buffer, int count, int size)
510 {
511 struct xscale_common *xscale = target_to_xscale(target);
512 int retval;
513 int done_count = 0;
514
515 xscale_jtag_set_instr(target->tap,
516 XSCALE_DBGRX << xscale->xscale_variant,
517 TAP_IDLE);
518
519 static const uint8_t t0;
520 uint8_t t1[4];
521 static const uint8_t t2 = 1;
522 struct scan_field fields[3] = {
523 { .num_bits = 3, .out_value = &t0 },
524 { .num_bits = 32, .out_value = t1 },
525 { .num_bits = 1, .out_value = &t2 },
526 };
527
528 int endianness = target->endianness;
529 while (done_count++ < count) {
530 uint32_t t;
531
532 switch (size) {
533 case 4:
534 if (endianness == TARGET_LITTLE_ENDIAN)
535 t = le_to_h_u32(buffer);
536 else
537 t = be_to_h_u32(buffer);
538 break;
539 case 2:
540 if (endianness == TARGET_LITTLE_ENDIAN)
541 t = le_to_h_u16(buffer);
542 else
543 t = be_to_h_u16(buffer);
544 break;
545 case 1:
546 t = buffer[0];
547 break;
548 default:
549 LOG_ERROR("BUG: size neither 4, 2 nor 1");
550 return ERROR_COMMAND_SYNTAX_ERROR;
551 }
552
553 buf_set_u32(t1, 0, 32, t);
554
555 jtag_add_dr_scan(target->tap,
556 3,
557 fields,
558 TAP_IDLE);
559 buffer += size;
560 }
561
562 retval = jtag_execute_queue();
563 if (retval != ERROR_OK) {
564 LOG_ERROR("JTAG error while sending data to debug handler");
565 return retval;
566 }
567
568 return ERROR_OK;
569 }
570
571 static int xscale_send_u32(struct target *target, uint32_t value)
572 {
573 struct xscale_common *xscale = target_to_xscale(target);
574
575 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
576 return xscale_write_rx(target);
577 }
578
579 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
580 {
581 struct xscale_common *xscale = target_to_xscale(target);
582 int retval;
583 struct scan_field fields[3];
584 uint8_t field0 = 0x0;
585 uint8_t field0_check_value = 0x2;
586 uint8_t field0_check_mask = 0x7;
587 uint8_t field2 = 0x0;
588 uint8_t field2_check_value = 0x0;
589 uint8_t field2_check_mask = 0x1;
590
591 if (hold_rst != -1)
592 xscale->hold_rst = hold_rst;
593
594 if (ext_dbg_brk != -1)
595 xscale->external_debug_break = ext_dbg_brk;
596
597 xscale_jtag_set_instr(target->tap,
598 XSCALE_SELDCSR << xscale->xscale_variant,
599 TAP_IDLE);
600
601 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
602 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
603
604 memset(&fields, 0, sizeof fields);
605
606 fields[0].num_bits = 3;
607 fields[0].out_value = &field0;
608 uint8_t tmp;
609 fields[0].in_value = &tmp;
610
611 fields[1].num_bits = 32;
612 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
613
614 fields[2].num_bits = 1;
615 fields[2].out_value = &field2;
616 uint8_t tmp2;
617 fields[2].in_value = &tmp2;
618
619 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
620
621 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
622 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
623
624 retval = jtag_execute_queue();
625 if (retval != ERROR_OK) {
626 LOG_ERROR("JTAG error while writing DCSR");
627 return retval;
628 }
629
630 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
631 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
632
633 return ERROR_OK;
634 }
635
636 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
637 static unsigned int parity(unsigned int v)
638 {
639 /* unsigned int ov = v; */
640 v ^= v >> 16;
641 v ^= v >> 8;
642 v ^= v >> 4;
643 v &= 0xf;
644 /* LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1); */
645 return (0x6996 >> v) & 1;
646 }
647
648 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
649 {
650 struct xscale_common *xscale = target_to_xscale(target);
651 uint8_t packet[4];
652 uint8_t cmd;
653 int word;
654 struct scan_field fields[2];
655
656 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
657
658 /* LDIC into IR */
659 xscale_jtag_set_instr(target->tap,
660 XSCALE_LDIC << xscale->xscale_variant,
661 TAP_IDLE);
662
663 /* CMD is b011 to load a cacheline into the Mini ICache.
664 * Loading into the main ICache is deprecated, and unused.
665 * It's followed by three zero bits, and 27 address bits.
666 */
667 buf_set_u32(&cmd, 0, 6, 0x3);
668
669 /* virtual address of desired cache line */
670 buf_set_u32(packet, 0, 27, va >> 5);
671
672 memset(&fields, 0, sizeof fields);
673
674 fields[0].num_bits = 6;
675 fields[0].out_value = &cmd;
676
677 fields[1].num_bits = 27;
678 fields[1].out_value = packet;
679
680 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
681
682 /* rest of packet is a cacheline: 8 instructions, with parity */
683 fields[0].num_bits = 32;
684 fields[0].out_value = packet;
685
686 fields[1].num_bits = 1;
687 fields[1].out_value = &cmd;
688
689 for (word = 0; word < 8; word++) {
690 buf_set_u32(packet, 0, 32, buffer[word]);
691
692 uint32_t value;
693 memcpy(&value, packet, sizeof(uint32_t));
694 cmd = parity(value);
695
696 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
697 }
698
699 return jtag_execute_queue();
700 }
701
702 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
703 {
704 struct xscale_common *xscale = target_to_xscale(target);
705 uint8_t packet[4];
706 uint8_t cmd;
707 struct scan_field fields[2];
708
709 xscale_jtag_set_instr(target->tap,
710 XSCALE_LDIC << xscale->xscale_variant,
711 TAP_IDLE);
712
713 /* CMD for invalidate IC line b000, bits [6:4] b000 */
714 buf_set_u32(&cmd, 0, 6, 0x0);
715
716 /* virtual address of desired cache line */
717 buf_set_u32(packet, 0, 27, va >> 5);
718
719 memset(&fields, 0, sizeof fields);
720
721 fields[0].num_bits = 6;
722 fields[0].out_value = &cmd;
723
724 fields[1].num_bits = 27;
725 fields[1].out_value = packet;
726
727 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
728
729 return ERROR_OK;
730 }
731
732 static int xscale_update_vectors(struct target *target)
733 {
734 struct xscale_common *xscale = target_to_xscale(target);
735 int i;
736 int retval;
737
738 uint32_t low_reset_branch, high_reset_branch;
739
740 for (i = 1; i < 8; i++) {
741 /* if there's a static vector specified for this exception, override */
742 if (xscale->static_high_vectors_set & (1 << i))
743 xscale->high_vectors[i] = xscale->static_high_vectors[i];
744 else {
745 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
746 if (retval == ERROR_TARGET_TIMEOUT)
747 return retval;
748 if (retval != ERROR_OK) {
749 /* Some of these reads will fail as part of normal execution */
750 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
751 }
752 }
753 }
754
755 for (i = 1; i < 8; i++) {
756 if (xscale->static_low_vectors_set & (1 << i))
757 xscale->low_vectors[i] = xscale->static_low_vectors[i];
758 else {
759 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
760 if (retval == ERROR_TARGET_TIMEOUT)
761 return retval;
762 if (retval != ERROR_OK) {
763 /* Some of these reads will fail as part of normal execution */
764 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
765 }
766 }
767 }
768
769 /* calculate branches to debug handler */
770 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
771 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
772
773 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
774 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
775
776 /* invalidate and load exception vectors in mini i-cache */
777 xscale_invalidate_ic_line(target, 0x0);
778 xscale_invalidate_ic_line(target, 0xffff0000);
779
780 xscale_load_ic(target, 0x0, xscale->low_vectors);
781 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
782
783 return ERROR_OK;
784 }
785
786 static int xscale_arch_state(struct target *target)
787 {
788 struct xscale_common *xscale = target_to_xscale(target);
789 struct arm *arm = &xscale->arm;
790
791 static const char *state[] = {
792 "disabled", "enabled"
793 };
794
795 static const char *arch_dbg_reason[] = {
796 "", "\n(processor reset)", "\n(trace buffer full)"
797 };
798
799 if (arm->common_magic != ARM_COMMON_MAGIC) {
800 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
801 return ERROR_COMMAND_SYNTAX_ERROR;
802 }
803
804 arm_arch_state(target);
805 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
806 state[xscale->armv4_5_mmu.mmu_enabled],
807 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
808 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
809 arch_dbg_reason[xscale->arch_debug_reason]);
810
811 return ERROR_OK;
812 }
813
814 static int xscale_poll(struct target *target)
815 {
816 int retval = ERROR_OK;
817
818 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING)) {
819 enum target_state previous_state = target->state;
820 retval = xscale_read_tx(target, 0);
821 if (retval == ERROR_OK) {
822
823 /* there's data to read from the tx register, we entered debug state */
824 target->state = TARGET_HALTED;
825
826 /* process debug entry, fetching current mode regs */
827 retval = xscale_debug_entry(target);
828 } else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE) {
829 LOG_USER("error while polling TX register, reset CPU");
830 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
831 target->state = TARGET_HALTED;
832 }
833
834 /* debug_entry could have overwritten target state (i.e. immediate resume)
835 * don't signal event handlers in that case
836 */
837 if (target->state != TARGET_HALTED)
838 return ERROR_OK;
839
840 /* if target was running, signal that we halted
841 * otherwise we reentered from debug execution */
842 if (previous_state == TARGET_RUNNING)
843 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
844 else
845 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
846 }
847
848 return retval;
849 }
850
851 static int xscale_debug_entry(struct target *target)
852 {
853 struct xscale_common *xscale = target_to_xscale(target);
854 struct arm *arm = &xscale->arm;
855 uint32_t pc;
856 uint32_t buffer[10];
857 unsigned i;
858 int retval;
859 uint32_t moe;
860
861 /* clear external dbg break (will be written on next DCSR read) */
862 xscale->external_debug_break = 0;
863 retval = xscale_read_dcsr(target);
864 if (retval != ERROR_OK)
865 return retval;
866
867 /* get r0, pc, r1 to r7 and cpsr */
868 retval = xscale_receive(target, buffer, 10);
869 if (retval != ERROR_OK)
870 return retval;
871
872 /* move r0 from buffer to register cache */
873 buf_set_u32(arm->core_cache->reg_list[0].value, 0, 32, buffer[0]);
874 arm->core_cache->reg_list[0].dirty = 1;
875 arm->core_cache->reg_list[0].valid = 1;
876 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
877
878 /* move pc from buffer to register cache */
879 buf_set_u32(arm->pc->value, 0, 32, buffer[1]);
880 arm->pc->dirty = 1;
881 arm->pc->valid = 1;
882 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
883
884 /* move data from buffer to register cache */
885 for (i = 1; i <= 7; i++) {
886 buf_set_u32(arm->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
887 arm->core_cache->reg_list[i].dirty = 1;
888 arm->core_cache->reg_list[i].valid = 1;
889 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
890 }
891
892 arm_set_cpsr(arm, buffer[9]);
893 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
894
895 if (!is_arm_mode(arm->core_mode)) {
896 target->state = TARGET_UNKNOWN;
897 LOG_ERROR("cpsr contains invalid mode value - communication failure");
898 return ERROR_TARGET_FAILURE;
899 }
900 LOG_DEBUG("target entered debug state in %s mode",
901 arm_mode_name(arm->core_mode));
902
903 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
904 if (arm->spsr) {
905 xscale_receive(target, buffer, 8);
906 buf_set_u32(arm->spsr->value, 0, 32, buffer[7]);
907 arm->spsr->dirty = false;
908 arm->spsr->valid = true;
909 } else {
910 /* r8 to r14, but no spsr */
911 xscale_receive(target, buffer, 7);
912 }
913
914 /* move data from buffer to right banked register in cache */
915 for (i = 8; i <= 14; i++) {
916 struct reg *r = arm_reg_current(arm, i);
917
918 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
919 r->dirty = false;
920 r->valid = true;
921 }
922
923 /* mark xscale regs invalid to ensure they are retrieved from the
924 * debug handler if requested */
925 for (i = 0; i < xscale->reg_cache->num_regs; i++)
926 xscale->reg_cache->reg_list[i].valid = 0;
927
928 /* examine debug reason */
929 xscale_read_dcsr(target);
930 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
931
932 /* stored PC (for calculating fixup) */
933 pc = buf_get_u32(arm->pc->value, 0, 32);
934
935 switch (moe) {
936 case 0x0: /* Processor reset */
937 target->debug_reason = DBG_REASON_DBGRQ;
938 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
939 pc -= 4;
940 break;
941 case 0x1: /* Instruction breakpoint hit */
942 target->debug_reason = DBG_REASON_BREAKPOINT;
943 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
944 pc -= 4;
945 break;
946 case 0x2: /* Data breakpoint hit */
947 target->debug_reason = DBG_REASON_WATCHPOINT;
948 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
949 pc -= 4;
950 break;
951 case 0x3: /* BKPT instruction executed */
952 target->debug_reason = DBG_REASON_BREAKPOINT;
953 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
954 pc -= 4;
955 break;
956 case 0x4: /* Ext. debug event */
957 target->debug_reason = DBG_REASON_DBGRQ;
958 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
959 pc -= 4;
960 break;
961 case 0x5: /* Vector trap occured */
962 target->debug_reason = DBG_REASON_BREAKPOINT;
963 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
964 pc -= 4;
965 break;
966 case 0x6: /* Trace buffer full break */
967 target->debug_reason = DBG_REASON_DBGRQ;
968 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
969 pc -= 4;
970 break;
971 case 0x7: /* Reserved (may flag Hot-Debug support) */
972 default:
973 LOG_ERROR("Method of Entry is 'Reserved'");
974 exit(-1);
975 break;
976 }
977
978 /* apply PC fixup */
979 buf_set_u32(arm->pc->value, 0, 32, pc);
980
981 /* on the first debug entry, identify cache type */
982 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1) {
983 uint32_t cache_type_reg;
984
985 /* read cp15 cache type register */
986 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
987 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value,
988 0,
989 32);
990
991 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
992 }
993
994 /* examine MMU and Cache settings
995 * read cp15 control register */
996 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
997 xscale->cp15_control_reg =
998 buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
999 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1000 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
1001 (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1002 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1003 (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1004
1005 /* tracing enabled, read collected trace data */
1006 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1007 xscale_read_trace(target);
1008
1009 /* Resume if entered debug due to buffer fill and we're still collecting
1010 * trace data. Note that a debug exception due to trace buffer full
1011 * can only happen in fill mode. */
1012 if (xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL) {
1013 if (--xscale->trace.fill_counter > 0)
1014 xscale_resume(target, 1, 0x0, 1, 0);
1015 } else /* entered debug for other reason; reset counter */
1016 xscale->trace.fill_counter = 0;
1017 }
1018
1019 return ERROR_OK;
1020 }
1021
1022 static int xscale_halt(struct target *target)
1023 {
1024 struct xscale_common *xscale = target_to_xscale(target);
1025
1026 LOG_DEBUG("target->state: %s",
1027 target_state_name(target));
1028
1029 if (target->state == TARGET_HALTED) {
1030 LOG_DEBUG("target was already halted");
1031 return ERROR_OK;
1032 } else if (target->state == TARGET_UNKNOWN) {
1033 /* this must not happen for a xscale target */
1034 LOG_ERROR("target was in unknown state when halt was requested");
1035 return ERROR_TARGET_INVALID;
1036 } else if (target->state == TARGET_RESET)
1037 LOG_DEBUG("target->state == TARGET_RESET");
1038 else {
1039 /* assert external dbg break */
1040 xscale->external_debug_break = 1;
1041 xscale_read_dcsr(target);
1042
1043 target->debug_reason = DBG_REASON_DBGRQ;
1044 }
1045
1046 return ERROR_OK;
1047 }
1048
1049 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1050 {
1051 struct xscale_common *xscale = target_to_xscale(target);
1052 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1053 int retval;
1054
1055 if (xscale->ibcr0_used) {
1056 struct breakpoint *ibcr0_bp =
1057 breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1058
1059 if (ibcr0_bp)
1060 xscale_unset_breakpoint(target, ibcr0_bp);
1061 else {
1062 LOG_ERROR(
1063 "BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1064 exit(-1);
1065 }
1066 }
1067
1068 retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1);
1069 if (retval != ERROR_OK)
1070 return retval;
1071
1072 return ERROR_OK;
1073 }
1074
1075 static int xscale_disable_single_step(struct target *target)
1076 {
1077 struct xscale_common *xscale = target_to_xscale(target);
1078 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1079 int retval;
1080
1081 retval = xscale_set_reg_u32(ibcr0, 0x0);
1082 if (retval != ERROR_OK)
1083 return retval;
1084
1085 return ERROR_OK;
1086 }
1087
1088 static void xscale_enable_watchpoints(struct target *target)
1089 {
1090 struct watchpoint *watchpoint = target->watchpoints;
1091
1092 while (watchpoint) {
1093 if (watchpoint->set == 0)
1094 xscale_set_watchpoint(target, watchpoint);
1095 watchpoint = watchpoint->next;
1096 }
1097 }
1098
1099 static void xscale_enable_breakpoints(struct target *target)
1100 {
1101 struct breakpoint *breakpoint = target->breakpoints;
1102
1103 /* set any pending breakpoints */
1104 while (breakpoint) {
1105 if (breakpoint->set == 0)
1106 xscale_set_breakpoint(target, breakpoint);
1107 breakpoint = breakpoint->next;
1108 }
1109 }
1110
1111 static void xscale_free_trace_data(struct xscale_common *xscale)
1112 {
1113 struct xscale_trace_data *td = xscale->trace.data;
1114 while (td) {
1115 struct xscale_trace_data *next_td = td->next;
1116 if (td->entries)
1117 free(td->entries);
1118 free(td);
1119 td = next_td;
1120 }
1121 xscale->trace.data = NULL;
1122 }
1123
1124 static int xscale_resume(struct target *target, int current,
1125 uint32_t address, int handle_breakpoints, int debug_execution)
1126 {
1127 struct xscale_common *xscale = target_to_xscale(target);
1128 struct arm *arm = &xscale->arm;
1129 uint32_t current_pc;
1130 int retval;
1131 int i;
1132
1133 LOG_DEBUG("-");
1134
1135 if (target->state != TARGET_HALTED) {
1136 LOG_WARNING("target not halted");
1137 return ERROR_TARGET_NOT_HALTED;
1138 }
1139
1140 if (!debug_execution)
1141 target_free_all_working_areas(target);
1142
1143 /* update vector tables */
1144 retval = xscale_update_vectors(target);
1145 if (retval != ERROR_OK)
1146 return retval;
1147
1148 /* current = 1: continue on current pc, otherwise continue at <address> */
1149 if (!current)
1150 buf_set_u32(arm->pc->value, 0, 32, address);
1151
1152 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1153
1154 /* if we're at the reset vector, we have to simulate the branch */
1155 if (current_pc == 0x0) {
1156 arm_simulate_step(target, NULL);
1157 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1158 }
1159
1160 /* the front-end may request us not to handle breakpoints */
1161 if (handle_breakpoints) {
1162 struct breakpoint *breakpoint;
1163 breakpoint = breakpoint_find(target,
1164 buf_get_u32(arm->pc->value, 0, 32));
1165 if (breakpoint != NULL) {
1166 uint32_t next_pc;
1167 enum trace_mode saved_trace_mode;
1168
1169 /* there's a breakpoint at the current PC, we have to step over it */
1170 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1171 xscale_unset_breakpoint(target, breakpoint);
1172
1173 /* calculate PC of next instruction */
1174 retval = arm_simulate_step(target, &next_pc);
1175 if (retval != ERROR_OK) {
1176 uint32_t current_opcode;
1177 target_read_u32(target, current_pc, &current_opcode);
1178 LOG_ERROR(
1179 "BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "",
1180 current_opcode);
1181 }
1182
1183 LOG_DEBUG("enable single-step");
1184 xscale_enable_single_step(target, next_pc);
1185
1186 /* restore banked registers */
1187 retval = xscale_restore_banked(target);
1188 if (retval != ERROR_OK)
1189 return retval;
1190
1191 /* send resume request */
1192 xscale_send_u32(target, 0x30);
1193
1194 /* send CPSR */
1195 xscale_send_u32(target,
1196 buf_get_u32(arm->cpsr->value, 0, 32));
1197 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1198 buf_get_u32(arm->cpsr->value, 0, 32));
1199
1200 for (i = 7; i >= 0; i--) {
1201 /* send register */
1202 xscale_send_u32(target,
1203 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1204 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "",
1205 i, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1206 }
1207
1208 /* send PC */
1209 xscale_send_u32(target,
1210 buf_get_u32(arm->pc->value, 0, 32));
1211 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
1212 buf_get_u32(arm->pc->value, 0, 32));
1213
1214 /* disable trace data collection in xscale_debug_entry() */
1215 saved_trace_mode = xscale->trace.mode;
1216 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1217
1218 /* wait for and process debug entry */
1219 xscale_debug_entry(target);
1220
1221 /* re-enable trace buffer, if enabled previously */
1222 xscale->trace.mode = saved_trace_mode;
1223
1224 LOG_DEBUG("disable single-step");
1225 xscale_disable_single_step(target);
1226
1227 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1228 xscale_set_breakpoint(target, breakpoint);
1229 }
1230 }
1231
1232 /* enable any pending breakpoints and watchpoints */
1233 xscale_enable_breakpoints(target);
1234 xscale_enable_watchpoints(target);
1235
1236 /* restore banked registers */
1237 retval = xscale_restore_banked(target);
1238 if (retval != ERROR_OK)
1239 return retval;
1240
1241 /* send resume request (command 0x30 or 0x31)
1242 * clean the trace buffer if it is to be enabled (0x62) */
1243 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1244 if (xscale->trace.mode == XSCALE_TRACE_FILL) {
1245 /* If trace enabled in fill mode and starting collection of new set
1246 * of buffers, initialize buffer counter and free previous buffers */
1247 if (xscale->trace.fill_counter == 0) {
1248 xscale->trace.fill_counter = xscale->trace.buffer_fill;
1249 xscale_free_trace_data(xscale);
1250 }
1251 } else /* wrap mode; free previous buffer */
1252 xscale_free_trace_data(xscale);
1253
1254 xscale_send_u32(target, 0x62);
1255 xscale_send_u32(target, 0x31);
1256 } else
1257 xscale_send_u32(target, 0x30);
1258
1259 /* send CPSR */
1260 xscale_send_u32(target, buf_get_u32(arm->cpsr->value, 0, 32));
1261 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1262 buf_get_u32(arm->cpsr->value, 0, 32));
1263
1264 for (i = 7; i >= 0; i--) {
1265 /* send register */
1266 xscale_send_u32(target, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1267 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "",
1268 i, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1269 }
1270
1271 /* send PC */
1272 xscale_send_u32(target, buf_get_u32(arm->pc->value, 0, 32));
1273 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1274 buf_get_u32(arm->pc->value, 0, 32));
1275
1276 target->debug_reason = DBG_REASON_NOTHALTED;
1277
1278 if (!debug_execution) {
1279 /* registers are now invalid */
1280 register_cache_invalidate(arm->core_cache);
1281 target->state = TARGET_RUNNING;
1282 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1283 } else {
1284 target->state = TARGET_DEBUG_RUNNING;
1285 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1286 }
1287
1288 LOG_DEBUG("target resumed");
1289
1290 return ERROR_OK;
1291 }
1292
1293 static int xscale_step_inner(struct target *target, int current,
1294 uint32_t address, int handle_breakpoints)
1295 {
1296 struct xscale_common *xscale = target_to_xscale(target);
1297 struct arm *arm = &xscale->arm;
1298 uint32_t next_pc;
1299 int retval;
1300 int i;
1301
1302 target->debug_reason = DBG_REASON_SINGLESTEP;
1303
1304 /* calculate PC of next instruction */
1305 retval = arm_simulate_step(target, &next_pc);
1306 if (retval != ERROR_OK) {
1307 uint32_t current_opcode, current_pc;
1308 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1309
1310 target_read_u32(target, current_pc, &current_opcode);
1311 LOG_ERROR(
1312 "BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "",
1313 current_opcode);
1314 return retval;
1315 }
1316
1317 LOG_DEBUG("enable single-step");
1318 retval = xscale_enable_single_step(target, next_pc);
1319 if (retval != ERROR_OK)
1320 return retval;
1321
1322 /* restore banked registers */
1323 retval = xscale_restore_banked(target);
1324 if (retval != ERROR_OK)
1325 return retval;
1326
1327 /* send resume request (command 0x30 or 0x31)
1328 * clean the trace buffer if it is to be enabled (0x62) */
1329 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1330 retval = xscale_send_u32(target, 0x62);
1331 if (retval != ERROR_OK)
1332 return retval;
1333 retval = xscale_send_u32(target, 0x31);
1334 if (retval != ERROR_OK)
1335 return retval;
1336 } else {
1337 retval = xscale_send_u32(target, 0x30);
1338 if (retval != ERROR_OK)
1339 return retval;
1340 }
1341
1342 /* send CPSR */
1343 retval = xscale_send_u32(target,
1344 buf_get_u32(arm->cpsr->value, 0, 32));
1345 if (retval != ERROR_OK)
1346 return retval;
1347 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1348 buf_get_u32(arm->cpsr->value, 0, 32));
1349
1350 for (i = 7; i >= 0; i--) {
1351 /* send register */
1352 retval = xscale_send_u32(target,
1353 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1354 if (retval != ERROR_OK)
1355 return retval;
1356 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i,
1357 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1358 }
1359
1360 /* send PC */
1361 retval = xscale_send_u32(target,
1362 buf_get_u32(arm->pc->value, 0, 32));
1363 if (retval != ERROR_OK)
1364 return retval;
1365 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1366 buf_get_u32(arm->pc->value, 0, 32));
1367
1368 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1369
1370 /* registers are now invalid */
1371 register_cache_invalidate(arm->core_cache);
1372
1373 /* wait for and process debug entry */
1374 retval = xscale_debug_entry(target);
1375 if (retval != ERROR_OK)
1376 return retval;
1377
1378 LOG_DEBUG("disable single-step");
1379 retval = xscale_disable_single_step(target);
1380 if (retval != ERROR_OK)
1381 return retval;
1382
1383 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1384
1385 return ERROR_OK;
1386 }
1387
1388 static int xscale_step(struct target *target, int current,
1389 uint32_t address, int handle_breakpoints)
1390 {
1391 struct arm *arm = target_to_arm(target);
1392 struct breakpoint *breakpoint = NULL;
1393
1394 uint32_t current_pc;
1395 int retval;
1396
1397 if (target->state != TARGET_HALTED) {
1398 LOG_WARNING("target not halted");
1399 return ERROR_TARGET_NOT_HALTED;
1400 }
1401
1402 /* current = 1: continue on current pc, otherwise continue at <address> */
1403 if (!current)
1404 buf_set_u32(arm->pc->value, 0, 32, address);
1405
1406 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1407
1408 /* if we're at the reset vector, we have to simulate the step */
1409 if (current_pc == 0x0) {
1410 retval = arm_simulate_step(target, NULL);
1411 if (retval != ERROR_OK)
1412 return retval;
1413 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1414 LOG_DEBUG("current pc %" PRIx32, current_pc);
1415
1416 target->debug_reason = DBG_REASON_SINGLESTEP;
1417 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1418
1419 return ERROR_OK;
1420 }
1421
1422 /* the front-end may request us not to handle breakpoints */
1423 if (handle_breakpoints)
1424 breakpoint = breakpoint_find(target,
1425 buf_get_u32(arm->pc->value, 0, 32));
1426 if (breakpoint != NULL) {
1427 retval = xscale_unset_breakpoint(target, breakpoint);
1428 if (retval != ERROR_OK)
1429 return retval;
1430 }
1431
1432 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1433 if (retval != ERROR_OK)
1434 return retval;
1435
1436 if (breakpoint)
1437 xscale_set_breakpoint(target, breakpoint);
1438
1439 LOG_DEBUG("target stepped");
1440
1441 return ERROR_OK;
1442
1443 }
1444
1445 static int xscale_assert_reset(struct target *target)
1446 {
1447 struct xscale_common *xscale = target_to_xscale(target);
1448
1449 LOG_DEBUG("target->state: %s",
1450 target_state_name(target));
1451
1452 /* assert reset */
1453 jtag_add_reset(0, 1);
1454
1455 /* sleep 1ms, to be sure we fulfill any requirements */
1456 jtag_add_sleep(1000);
1457 jtag_execute_queue();
1458
1459 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1460 * end up in T-L-R, which would reset JTAG
1461 */
1462 xscale_jtag_set_instr(target->tap,
1463 XSCALE_SELDCSR << xscale->xscale_variant,
1464 TAP_IDLE);
1465
1466 /* set Hold reset, Halt mode and Trap Reset */
1467 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1468 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1469 xscale_write_dcsr(target, 1, 0);
1470
1471 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1472 xscale_jtag_set_instr(target->tap, ~0, TAP_IDLE);
1473 jtag_execute_queue();
1474
1475 target->state = TARGET_RESET;
1476
1477 if (target->reset_halt) {
1478 int retval = target_halt(target);
1479 if (retval != ERROR_OK)
1480 return retval;
1481 }
1482
1483 return ERROR_OK;
1484 }
1485
1486 static int xscale_deassert_reset(struct target *target)
1487 {
1488 struct xscale_common *xscale = target_to_xscale(target);
1489 struct breakpoint *breakpoint = target->breakpoints;
1490
1491 LOG_DEBUG("-");
1492
1493 xscale->ibcr_available = 2;
1494 xscale->ibcr0_used = 0;
1495 xscale->ibcr1_used = 0;
1496
1497 xscale->dbr_available = 2;
1498 xscale->dbr0_used = 0;
1499 xscale->dbr1_used = 0;
1500
1501 /* mark all hardware breakpoints as unset */
1502 while (breakpoint) {
1503 if (breakpoint->type == BKPT_HARD)
1504 breakpoint->set = 0;
1505 breakpoint = breakpoint->next;
1506 }
1507
1508 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1509 xscale_free_trace_data(xscale);
1510
1511 register_cache_invalidate(xscale->arm.core_cache);
1512
1513 /* FIXME mark hardware watchpoints got unset too. Also,
1514 * at least some of the XScale registers are invalid...
1515 */
1516
1517 /*
1518 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1519 * contents got invalidated. Safer to force that, so writing new
1520 * contents can't ever fail..
1521 */
1522 {
1523 uint32_t address;
1524 unsigned buf_cnt;
1525 const uint8_t *buffer = xscale_debug_handler;
1526 int retval;
1527
1528 /* release SRST */
1529 jtag_add_reset(0, 0);
1530
1531 /* wait 300ms; 150 and 100ms were not enough */
1532 jtag_add_sleep(300*1000);
1533
1534 jtag_add_runtest(2030, TAP_IDLE);
1535 jtag_execute_queue();
1536
1537 /* set Hold reset, Halt mode and Trap Reset */
1538 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1539 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1540 xscale_write_dcsr(target, 1, 0);
1541
1542 /* Load the debug handler into the mini-icache. Since
1543 * it's using halt mode (not monitor mode), it runs in
1544 * "Special Debug State" for access to registers, memory,
1545 * coprocessors, trace data, etc.
1546 */
1547 address = xscale->handler_address;
1548 for (unsigned binary_size = sizeof xscale_debug_handler;
1549 binary_size > 0;
1550 binary_size -= buf_cnt, buffer += buf_cnt) {
1551 uint32_t cache_line[8];
1552 unsigned i;
1553
1554 buf_cnt = binary_size;
1555 if (buf_cnt > 32)
1556 buf_cnt = 32;
1557
1558 for (i = 0; i < buf_cnt; i += 4) {
1559 /* convert LE buffer to host-endian uint32_t */
1560 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1561 }
1562
1563 for (; i < 32; i += 4)
1564 cache_line[i / 4] = 0xe1a08008;
1565
1566 /* only load addresses other than the reset vectors */
1567 if ((address % 0x400) != 0x0) {
1568 retval = xscale_load_ic(target, address,
1569 cache_line);
1570 if (retval != ERROR_OK)
1571 return retval;
1572 }
1573
1574 address += buf_cnt;
1575 }
1576
1577 retval = xscale_load_ic(target, 0x0,
1578 xscale->low_vectors);
1579 if (retval != ERROR_OK)
1580 return retval;
1581 retval = xscale_load_ic(target, 0xffff0000,
1582 xscale->high_vectors);
1583 if (retval != ERROR_OK)
1584 return retval;
1585
1586 jtag_add_runtest(30, TAP_IDLE);
1587
1588 jtag_add_sleep(100000);
1589
1590 /* set Hold reset, Halt mode and Trap Reset */
1591 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1592 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1593 xscale_write_dcsr(target, 1, 0);
1594
1595 /* clear Hold reset to let the target run (should enter debug handler) */
1596 xscale_write_dcsr(target, 0, 1);
1597 target->state = TARGET_RUNNING;
1598
1599 if (!target->reset_halt) {
1600 jtag_add_sleep(10000);
1601
1602 /* we should have entered debug now */
1603 xscale_debug_entry(target);
1604 target->state = TARGET_HALTED;
1605
1606 /* resume the target */
1607 xscale_resume(target, 1, 0x0, 1, 0);
1608 }
1609 }
1610
1611 return ERROR_OK;
1612 }
1613
1614 static int xscale_read_core_reg(struct target *target, struct reg *r,
1615 int num, enum arm_mode mode)
1616 {
1617 /** \todo add debug handler support for core register reads */
1618 LOG_ERROR("not implemented");
1619 return ERROR_OK;
1620 }
1621
1622 static int xscale_write_core_reg(struct target *target, struct reg *r,
1623 int num, enum arm_mode mode, uint8_t *value)
1624 {
1625 /** \todo add debug handler support for core register writes */
1626 LOG_ERROR("not implemented");
1627 return ERROR_OK;
1628 }
1629
1630 static int xscale_full_context(struct target *target)
1631 {
1632 struct arm *arm = target_to_arm(target);
1633
1634 uint32_t *buffer;
1635
1636 int i, j;
1637
1638 LOG_DEBUG("-");
1639
1640 if (target->state != TARGET_HALTED) {
1641 LOG_WARNING("target not halted");
1642 return ERROR_TARGET_NOT_HALTED;
1643 }
1644
1645 buffer = malloc(4 * 8);
1646
1647 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1648 * we can't enter User mode on an XScale (unpredictable),
1649 * but User shares registers with SYS
1650 */
1651 for (i = 1; i < 7; i++) {
1652 enum arm_mode mode = armv4_5_number_to_mode(i);
1653 bool valid = true;
1654 struct reg *r;
1655
1656 if (mode == ARM_MODE_USR)
1657 continue;
1658
1659 /* check if there are invalid registers in the current mode
1660 */
1661 for (j = 0; valid && j <= 16; j++) {
1662 if (!ARMV4_5_CORE_REG_MODE(arm->core_cache,
1663 mode, j).valid)
1664 valid = false;
1665 }
1666 if (valid)
1667 continue;
1668
1669 /* request banked registers */
1670 xscale_send_u32(target, 0x0);
1671
1672 /* send CPSR for desired bank mode */
1673 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1674
1675 /* get banked registers: r8 to r14; and SPSR
1676 * except in USR/SYS mode
1677 */
1678 if (mode != ARM_MODE_SYS) {
1679 /* SPSR */
1680 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1681 mode, 16);
1682
1683 xscale_receive(target, buffer, 8);
1684
1685 buf_set_u32(r->value, 0, 32, buffer[7]);
1686 r->dirty = false;
1687 r->valid = true;
1688 } else
1689 xscale_receive(target, buffer, 7);
1690
1691 /* move data from buffer to register cache */
1692 for (j = 8; j <= 14; j++) {
1693 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1694 mode, j);
1695
1696 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1697 r->dirty = false;
1698 r->valid = true;
1699 }
1700 }
1701
1702 free(buffer);
1703
1704 return ERROR_OK;
1705 }
1706
1707 static int xscale_restore_banked(struct target *target)
1708 {
1709 struct arm *arm = target_to_arm(target);
1710
1711 int i, j;
1712
1713 if (target->state != TARGET_HALTED) {
1714 LOG_WARNING("target not halted");
1715 return ERROR_TARGET_NOT_HALTED;
1716 }
1717
1718 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1719 * and check if any banked registers need to be written. Ignore
1720 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1721 * an XScale (unpredictable), but they share all registers.
1722 */
1723 for (i = 1; i < 7; i++) {
1724 enum arm_mode mode = armv4_5_number_to_mode(i);
1725 struct reg *r;
1726
1727 if (mode == ARM_MODE_USR)
1728 continue;
1729
1730 /* check if there are dirty registers in this mode */
1731 for (j = 8; j <= 14; j++) {
1732 if (ARMV4_5_CORE_REG_MODE(arm->core_cache,
1733 mode, j).dirty)
1734 goto dirty;
1735 }
1736
1737 /* if not USR/SYS, check if the SPSR needs to be written */
1738 if (mode != ARM_MODE_SYS) {
1739 if (ARMV4_5_CORE_REG_MODE(arm->core_cache,
1740 mode, 16).dirty)
1741 goto dirty;
1742 }
1743
1744 /* there's nothing to flush for this mode */
1745 continue;
1746
1747 dirty:
1748 /* command 0x1: "send banked registers" */
1749 xscale_send_u32(target, 0x1);
1750
1751 /* send CPSR for desired mode */
1752 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1753
1754 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1755 * but this protocol doesn't understand that nuance.
1756 */
1757 for (j = 8; j <= 14; j++) {
1758 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1759 mode, j);
1760 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1761 r->dirty = false;
1762 }
1763
1764 /* send spsr if not in USR/SYS mode */
1765 if (mode != ARM_MODE_SYS) {
1766 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1767 mode, 16);
1768 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1769 r->dirty = false;
1770 }
1771 }
1772
1773 return ERROR_OK;
1774 }
1775
1776 static int xscale_read_memory(struct target *target, uint32_t address,
1777 uint32_t size, uint32_t count, uint8_t *buffer)
1778 {
1779 struct xscale_common *xscale = target_to_xscale(target);
1780 uint32_t *buf32;
1781 uint32_t i;
1782 int retval;
1783
1784 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32,
1785 address,
1786 size,
1787 count);
1788
1789 if (target->state != TARGET_HALTED) {
1790 LOG_WARNING("target not halted");
1791 return ERROR_TARGET_NOT_HALTED;
1792 }
1793
1794 /* sanitize arguments */
1795 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1796 return ERROR_COMMAND_SYNTAX_ERROR;
1797
1798 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1799 return ERROR_TARGET_UNALIGNED_ACCESS;
1800
1801 /* send memory read request (command 0x1n, n: access size) */
1802 retval = xscale_send_u32(target, 0x10 | size);
1803 if (retval != ERROR_OK)
1804 return retval;
1805
1806 /* send base address for read request */
1807 retval = xscale_send_u32(target, address);
1808 if (retval != ERROR_OK)
1809 return retval;
1810
1811 /* send number of requested data words */
1812 retval = xscale_send_u32(target, count);
1813 if (retval != ERROR_OK)
1814 return retval;
1815
1816 /* receive data from target (count times 32-bit words in host endianness) */
1817 buf32 = malloc(4 * count);
1818 retval = xscale_receive(target, buf32, count);
1819 if (retval != ERROR_OK) {
1820 free(buf32);
1821 return retval;
1822 }
1823
1824 /* extract data from host-endian buffer into byte stream */
1825 for (i = 0; i < count; i++) {
1826 switch (size) {
1827 case 4:
1828 target_buffer_set_u32(target, buffer, buf32[i]);
1829 buffer += 4;
1830 break;
1831 case 2:
1832 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1833 buffer += 2;
1834 break;
1835 case 1:
1836 *buffer++ = buf32[i] & 0xff;
1837 break;
1838 default:
1839 LOG_ERROR("invalid read size");
1840 return ERROR_COMMAND_SYNTAX_ERROR;
1841 }
1842 }
1843
1844 free(buf32);
1845
1846 /* examine DCSR, to see if Sticky Abort (SA) got set */
1847 retval = xscale_read_dcsr(target);
1848 if (retval != ERROR_OK)
1849 return retval;
1850 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1) {
1851 /* clear SA bit */
1852 retval = xscale_send_u32(target, 0x60);
1853 if (retval != ERROR_OK)
1854 return retval;
1855
1856 return ERROR_TARGET_DATA_ABORT;
1857 }
1858
1859 return ERROR_OK;
1860 }
1861
1862 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1863 uint32_t size, uint32_t count, uint8_t *buffer)
1864 {
1865 struct xscale_common *xscale = target_to_xscale(target);
1866
1867 /* with MMU inactive, there are only physical addresses */
1868 if (!xscale->armv4_5_mmu.mmu_enabled)
1869 return xscale_read_memory(target, address, size, count, buffer);
1870
1871 /** \todo: provide a non-stub implementation of this routine. */
1872 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1873 target_name(target), __func__);
1874 return ERROR_FAIL;
1875 }
1876
1877 static int xscale_write_memory(struct target *target, uint32_t address,
1878 uint32_t size, uint32_t count, const uint8_t *buffer)
1879 {
1880 struct xscale_common *xscale = target_to_xscale(target);
1881 int retval;
1882
1883 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32,
1884 address,
1885 size,
1886 count);
1887
1888 if (target->state != TARGET_HALTED) {
1889 LOG_WARNING("target not halted");
1890 return ERROR_TARGET_NOT_HALTED;
1891 }
1892
1893 /* sanitize arguments */
1894 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1895 return ERROR_COMMAND_SYNTAX_ERROR;
1896
1897 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1898 return ERROR_TARGET_UNALIGNED_ACCESS;
1899
1900 /* send memory write request (command 0x2n, n: access size) */
1901 retval = xscale_send_u32(target, 0x20 | size);
1902 if (retval != ERROR_OK)
1903 return retval;
1904
1905 /* send base address for read request */
1906 retval = xscale_send_u32(target, address);
1907 if (retval != ERROR_OK)
1908 return retval;
1909
1910 /* send number of requested data words to be written*/
1911 retval = xscale_send_u32(target, count);
1912 if (retval != ERROR_OK)
1913 return retval;
1914
1915 /* extract data from host-endian buffer into byte stream */
1916 #if 0
1917 for (i = 0; i < count; i++) {
1918 switch (size) {
1919 case 4:
1920 value = target_buffer_get_u32(target, buffer);
1921 xscale_send_u32(target, value);
1922 buffer += 4;
1923 break;
1924 case 2:
1925 value = target_buffer_get_u16(target, buffer);
1926 xscale_send_u32(target, value);
1927 buffer += 2;
1928 break;
1929 case 1:
1930 value = *buffer;
1931 xscale_send_u32(target, value);
1932 buffer += 1;
1933 break;
1934 default:
1935 LOG_ERROR("should never get here");
1936 exit(-1);
1937 }
1938 }
1939 #endif
1940 retval = xscale_send(target, buffer, count, size);
1941 if (retval != ERROR_OK)
1942 return retval;
1943
1944 /* examine DCSR, to see if Sticky Abort (SA) got set */
1945 retval = xscale_read_dcsr(target);
1946 if (retval != ERROR_OK)
1947 return retval;
1948 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1) {
1949 /* clear SA bit */
1950 retval = xscale_send_u32(target, 0x60);
1951 if (retval != ERROR_OK)
1952 return retval;
1953
1954 LOG_ERROR("data abort writing memory");
1955 return ERROR_TARGET_DATA_ABORT;
1956 }
1957
1958 return ERROR_OK;
1959 }
1960
1961 static int xscale_write_phys_memory(struct target *target, uint32_t address,
1962 uint32_t size, uint32_t count, const uint8_t *buffer)
1963 {
1964 struct xscale_common *xscale = target_to_xscale(target);
1965
1966 /* with MMU inactive, there are only physical addresses */
1967 if (!xscale->armv4_5_mmu.mmu_enabled)
1968 return xscale_write_memory(target, address, size, count, buffer);
1969
1970 /** \todo: provide a non-stub implementation of this routine. */
1971 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1972 target_name(target), __func__);
1973 return ERROR_FAIL;
1974 }
1975
1976 static int xscale_get_ttb(struct target *target, uint32_t *result)
1977 {
1978 struct xscale_common *xscale = target_to_xscale(target);
1979 uint32_t ttb;
1980 int retval;
1981
1982 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
1983 if (retval != ERROR_OK)
1984 return retval;
1985 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
1986
1987 *result = ttb;
1988
1989 return ERROR_OK;
1990 }
1991
1992 static int xscale_disable_mmu_caches(struct target *target, int mmu,
1993 int d_u_cache, int i_cache)
1994 {
1995 struct xscale_common *xscale = target_to_xscale(target);
1996 uint32_t cp15_control;
1997 int retval;
1998
1999 /* read cp15 control register */
2000 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2001 if (retval != ERROR_OK)
2002 return retval;
2003 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2004
2005 if (mmu)
2006 cp15_control &= ~0x1U;
2007
2008 if (d_u_cache) {
2009 /* clean DCache */
2010 retval = xscale_send_u32(target, 0x50);
2011 if (retval != ERROR_OK)
2012 return retval;
2013 retval = xscale_send_u32(target, xscale->cache_clean_address);
2014 if (retval != ERROR_OK)
2015 return retval;
2016
2017 /* invalidate DCache */
2018 retval = xscale_send_u32(target, 0x51);
2019 if (retval != ERROR_OK)
2020 return retval;
2021
2022 cp15_control &= ~0x4U;
2023 }
2024
2025 if (i_cache) {
2026 /* invalidate ICache */
2027 retval = xscale_send_u32(target, 0x52);
2028 if (retval != ERROR_OK)
2029 return retval;
2030 cp15_control &= ~0x1000U;
2031 }
2032
2033 /* write new cp15 control register */
2034 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2035 if (retval != ERROR_OK)
2036 return retval;
2037
2038 /* execute cpwait to ensure outstanding operations complete */
2039 retval = xscale_send_u32(target, 0x53);
2040 return retval;
2041 }
2042
2043 static int xscale_enable_mmu_caches(struct target *target, int mmu,
2044 int d_u_cache, int i_cache)
2045 {
2046 struct xscale_common *xscale = target_to_xscale(target);
2047 uint32_t cp15_control;
2048 int retval;
2049
2050 /* read cp15 control register */
2051 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2052 if (retval != ERROR_OK)
2053 return retval;
2054 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2055
2056 if (mmu)
2057 cp15_control |= 0x1U;
2058
2059 if (d_u_cache)
2060 cp15_control |= 0x4U;
2061
2062 if (i_cache)
2063 cp15_control |= 0x1000U;
2064
2065 /* write new cp15 control register */
2066 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2067 if (retval != ERROR_OK)
2068 return retval;
2069
2070 /* execute cpwait to ensure outstanding operations complete */
2071 retval = xscale_send_u32(target, 0x53);
2072 return retval;
2073 }
2074
2075 static int xscale_set_breakpoint(struct target *target,
2076 struct breakpoint *breakpoint)
2077 {
2078 int retval;
2079 struct xscale_common *xscale = target_to_xscale(target);
2080
2081 if (target->state != TARGET_HALTED) {
2082 LOG_WARNING("target not halted");
2083 return ERROR_TARGET_NOT_HALTED;
2084 }
2085
2086 if (breakpoint->set) {
2087 LOG_WARNING("breakpoint already set");
2088 return ERROR_OK;
2089 }
2090
2091 if (breakpoint->type == BKPT_HARD) {
2092 uint32_t value = breakpoint->address | 1;
2093 if (!xscale->ibcr0_used) {
2094 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2095 xscale->ibcr0_used = 1;
2096 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2097 } else if (!xscale->ibcr1_used) {
2098 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2099 xscale->ibcr1_used = 1;
2100 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2101 } else {/* bug: availability previously verified in xscale_add_breakpoint() */
2102 LOG_ERROR("BUG: no hardware comparator available");
2103 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2104 }
2105 } else if (breakpoint->type == BKPT_SOFT) {
2106 if (breakpoint->length == 4) {
2107 /* keep the original instruction in target endianness */
2108 retval = target_read_memory(target, breakpoint->address, 4, 1,
2109 breakpoint->orig_instr);
2110 if (retval != ERROR_OK)
2111 return retval;
2112 /* write the bkpt instruction in target endianness
2113 *(arm7_9->arm_bkpt is host endian) */
2114 retval = target_write_u32(target, breakpoint->address,
2115 xscale->arm_bkpt);
2116 if (retval != ERROR_OK)
2117 return retval;
2118 } else {
2119 /* keep the original instruction in target endianness */
2120 retval = target_read_memory(target, breakpoint->address, 2, 1,
2121 breakpoint->orig_instr);
2122 if (retval != ERROR_OK)
2123 return retval;
2124 /* write the bkpt instruction in target endianness
2125 *(arm7_9->arm_bkpt is host endian) */
2126 retval = target_write_u16(target, breakpoint->address,
2127 xscale->thumb_bkpt);
2128 if (retval != ERROR_OK)
2129 return retval;
2130 }
2131 breakpoint->set = 1;
2132
2133 xscale_send_u32(target, 0x50); /* clean dcache */
2134 xscale_send_u32(target, xscale->cache_clean_address);
2135 xscale_send_u32(target, 0x51); /* invalidate dcache */
2136 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2137 }
2138
2139 return ERROR_OK;
2140 }
2141
2142 static int xscale_add_breakpoint(struct target *target,
2143 struct breakpoint *breakpoint)
2144 {
2145 struct xscale_common *xscale = target_to_xscale(target);
2146
2147 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1)) {
2148 LOG_ERROR("no breakpoint unit available for hardware breakpoint");
2149 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2150 }
2151
2152 if ((breakpoint->length != 2) && (breakpoint->length != 4)) {
2153 LOG_ERROR("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2154 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2155 }
2156
2157 if (breakpoint->type == BKPT_HARD)
2158 xscale->ibcr_available--;
2159
2160 return xscale_set_breakpoint(target, breakpoint);
2161 }
2162
2163 static int xscale_unset_breakpoint(struct target *target,
2164 struct breakpoint *breakpoint)
2165 {
2166 int retval;
2167 struct xscale_common *xscale = target_to_xscale(target);
2168
2169 if (target->state != TARGET_HALTED) {
2170 LOG_WARNING("target not halted");
2171 return ERROR_TARGET_NOT_HALTED;
2172 }
2173
2174 if (!breakpoint->set) {
2175 LOG_WARNING("breakpoint not set");
2176 return ERROR_OK;
2177 }
2178
2179 if (breakpoint->type == BKPT_HARD) {
2180 if (breakpoint->set == 1) {
2181 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2182 xscale->ibcr0_used = 0;
2183 } else if (breakpoint->set == 2) {
2184 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2185 xscale->ibcr1_used = 0;
2186 }
2187 breakpoint->set = 0;
2188 } else {
2189 /* restore original instruction (kept in target endianness) */
2190 if (breakpoint->length == 4) {
2191 retval = target_write_memory(target, breakpoint->address, 4, 1,
2192 breakpoint->orig_instr);
2193 if (retval != ERROR_OK)
2194 return retval;
2195 } else {
2196 retval = target_write_memory(target, breakpoint->address, 2, 1,
2197 breakpoint->orig_instr);
2198 if (retval != ERROR_OK)
2199 return retval;
2200 }
2201 breakpoint->set = 0;
2202
2203 xscale_send_u32(target, 0x50); /* clean dcache */
2204 xscale_send_u32(target, xscale->cache_clean_address);
2205 xscale_send_u32(target, 0x51); /* invalidate dcache */
2206 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2207 }
2208
2209 return ERROR_OK;
2210 }
2211
2212 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2213 {
2214 struct xscale_common *xscale = target_to_xscale(target);
2215
2216 if (target->state != TARGET_HALTED) {
2217 LOG_ERROR("target not halted");
2218 return ERROR_TARGET_NOT_HALTED;
2219 }
2220
2221 if (breakpoint->set)
2222 xscale_unset_breakpoint(target, breakpoint);
2223
2224 if (breakpoint->type == BKPT_HARD)
2225 xscale->ibcr_available++;
2226
2227 return ERROR_OK;
2228 }
2229
2230 static int xscale_set_watchpoint(struct target *target,
2231 struct watchpoint *watchpoint)
2232 {
2233 struct xscale_common *xscale = target_to_xscale(target);
2234 uint32_t enable = 0;
2235 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2236 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2237
2238 if (target->state != TARGET_HALTED) {
2239 LOG_ERROR("target not halted");
2240 return ERROR_TARGET_NOT_HALTED;
2241 }
2242
2243 switch (watchpoint->rw) {
2244 case WPT_READ:
2245 enable = 0x3;
2246 break;
2247 case WPT_ACCESS:
2248 enable = 0x2;
2249 break;
2250 case WPT_WRITE:
2251 enable = 0x1;
2252 break;
2253 default:
2254 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2255 }
2256
2257 /* For watchpoint across more than one word, both DBR registers must
2258 be enlisted, with the second used as a mask. */
2259 if (watchpoint->length > 4) {
2260 if (xscale->dbr0_used || xscale->dbr1_used) {
2261 LOG_ERROR("BUG: sufficient hardware comparators unavailable");
2262 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2263 }
2264
2265 /* Write mask value to DBR1, based on the length argument.
2266 * Address bits ignored by the comparator are those set in mask. */
2267 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1],
2268 watchpoint->length - 1);
2269 xscale->dbr1_used = 1;
2270 enable |= 0x100; /* DBCON[M] */
2271 }
2272
2273 if (!xscale->dbr0_used) {
2274 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2275 dbcon_value |= enable;
2276 xscale_set_reg_u32(dbcon, dbcon_value);
2277 watchpoint->set = 1;
2278 xscale->dbr0_used = 1;
2279 } else if (!xscale->dbr1_used) {
2280 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2281 dbcon_value |= enable << 2;
2282 xscale_set_reg_u32(dbcon, dbcon_value);
2283 watchpoint->set = 2;
2284 xscale->dbr1_used = 1;
2285 } else {
2286 LOG_ERROR("BUG: no hardware comparator available");
2287 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2288 }
2289
2290 return ERROR_OK;
2291 }
2292
2293 static int xscale_add_watchpoint(struct target *target,
2294 struct watchpoint *watchpoint)
2295 {
2296 struct xscale_common *xscale = target_to_xscale(target);
2297
2298 if (xscale->dbr_available < 1) {
2299 LOG_ERROR("no more watchpoint registers available");
2300 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2301 }
2302
2303 if (watchpoint->value)
2304 LOG_WARNING("xscale does not support value, mask arguments; ignoring");
2305
2306 /* check that length is a power of two */
2307 for (uint32_t len = watchpoint->length; len != 1; len /= 2) {
2308 if (len % 2) {
2309 LOG_ERROR("xscale requires that watchpoint length is a power of two");
2310 return ERROR_COMMAND_ARGUMENT_INVALID;
2311 }
2312 }
2313
2314 if (watchpoint->length == 4) { /* single word watchpoint */
2315 xscale->dbr_available--;/* one DBR reg used */
2316 return ERROR_OK;
2317 }
2318
2319 /* watchpoints across multiple words require both DBR registers */
2320 if (xscale->dbr_available < 2) {
2321 LOG_ERROR("insufficient watchpoint registers available");
2322 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2323 }
2324
2325 if (watchpoint->length > watchpoint->address) {
2326 LOG_ERROR("xscale does not support watchpoints with length "
2327 "greater than address");
2328 return ERROR_COMMAND_ARGUMENT_INVALID;
2329 }
2330
2331 xscale->dbr_available = 0;
2332 return ERROR_OK;
2333 }
2334
2335 static int xscale_unset_watchpoint(struct target *target,
2336 struct watchpoint *watchpoint)
2337 {
2338 struct xscale_common *xscale = target_to_xscale(target);
2339 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2340 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2341
2342 if (target->state != TARGET_HALTED) {
2343 LOG_WARNING("target not halted");
2344 return ERROR_TARGET_NOT_HALTED;
2345 }
2346
2347 if (!watchpoint->set) {
2348 LOG_WARNING("breakpoint not set");
2349 return ERROR_OK;
2350 }
2351
2352 if (watchpoint->set == 1) {
2353 if (watchpoint->length > 4) {
2354 dbcon_value &= ~0x103; /* clear DBCON[M] as well */
2355 xscale->dbr1_used = 0; /* DBR1 was used for mask */
2356 } else
2357 dbcon_value &= ~0x3;
2358
2359 xscale_set_reg_u32(dbcon, dbcon_value);
2360 xscale->dbr0_used = 0;
2361 } else if (watchpoint->set == 2) {
2362 dbcon_value &= ~0xc;
2363 xscale_set_reg_u32(dbcon, dbcon_value);
2364 xscale->dbr1_used = 0;
2365 }
2366 watchpoint->set = 0;
2367
2368 return ERROR_OK;
2369 }
2370
2371 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2372 {
2373 struct xscale_common *xscale = target_to_xscale(target);
2374
2375 if (target->state != TARGET_HALTED) {
2376 LOG_ERROR("target not halted");
2377 return ERROR_TARGET_NOT_HALTED;
2378 }
2379
2380 if (watchpoint->set)
2381 xscale_unset_watchpoint(target, watchpoint);
2382
2383 if (watchpoint->length > 4)
2384 xscale->dbr_available++;/* both DBR regs now available */
2385
2386 xscale->dbr_available++;
2387
2388 return ERROR_OK;
2389 }
2390
2391 static int xscale_get_reg(struct reg *reg)
2392 {
2393 struct xscale_reg *arch_info = reg->arch_info;
2394 struct target *target = arch_info->target;
2395 struct xscale_common *xscale = target_to_xscale(target);
2396
2397 /* DCSR, TX and RX are accessible via JTAG */
2398 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2399 return xscale_read_dcsr(arch_info->target);
2400 else if (strcmp(reg->name, "XSCALE_TX") == 0) {
2401 /* 1 = consume register content */
2402 return xscale_read_tx(arch_info->target, 1);
2403 } else if (strcmp(reg->name, "XSCALE_RX") == 0) {
2404 /* can't read from RX register (host -> debug handler) */
2405 return ERROR_OK;
2406 } else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0) {
2407 /* can't (explicitly) read from TXRXCTRL register */
2408 return ERROR_OK;
2409 } else {/* Other DBG registers have to be transfered by the debug handler
2410 * send CP read request (command 0x40) */
2411 xscale_send_u32(target, 0x40);
2412
2413 /* send CP register number */
2414 xscale_send_u32(target, arch_info->dbg_handler_number);
2415
2416 /* read register value */
2417 xscale_read_tx(target, 1);
2418 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2419
2420 reg->dirty = 0;
2421 reg->valid = 1;
2422 }
2423
2424 return ERROR_OK;
2425 }
2426
2427 static int xscale_set_reg(struct reg *reg, uint8_t *buf)
2428 {
2429 struct xscale_reg *arch_info = reg->arch_info;
2430 struct target *target = arch_info->target;
2431 struct xscale_common *xscale = target_to_xscale(target);
2432 uint32_t value = buf_get_u32(buf, 0, 32);
2433
2434 /* DCSR, TX and RX are accessible via JTAG */
2435 if (strcmp(reg->name, "XSCALE_DCSR") == 0) {
2436 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2437 return xscale_write_dcsr(arch_info->target, -1, -1);
2438 } else if (strcmp(reg->name, "XSCALE_RX") == 0) {
2439 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2440 return xscale_write_rx(arch_info->target);
2441 } else if (strcmp(reg->name, "XSCALE_TX") == 0) {
2442 /* can't write to TX register (debug-handler -> host) */
2443 return ERROR_OK;
2444 } else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0) {
2445 /* can't (explicitly) write to TXRXCTRL register */
2446 return ERROR_OK;
2447 } else {/* Other DBG registers have to be transfered by the debug handler
2448 * send CP write request (command 0x41) */
2449 xscale_send_u32(target, 0x41);
2450
2451 /* send CP register number */
2452 xscale_send_u32(target, arch_info->dbg_handler_number);
2453
2454 /* send CP register value */
2455 xscale_send_u32(target, value);
2456 buf_set_u32(reg->value, 0, 32, value);
2457 }
2458
2459 return ERROR_OK;
2460 }
2461
2462 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2463 {
2464 struct xscale_common *xscale = target_to_xscale(target);
2465 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2466 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2467
2468 /* send CP write request (command 0x41) */
2469 xscale_send_u32(target, 0x41);
2470
2471 /* send CP register number */
2472 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2473
2474 /* send CP register value */
2475 xscale_send_u32(target, value);
2476 buf_set_u32(dcsr->value, 0, 32, value);
2477
2478 return ERROR_OK;
2479 }
2480
2481 static int xscale_read_trace(struct target *target)
2482 {
2483 struct xscale_common *xscale = target_to_xscale(target);
2484 struct arm *arm = &xscale->arm;
2485 struct xscale_trace_data **trace_data_p;
2486
2487 /* 258 words from debug handler
2488 * 256 trace buffer entries
2489 * 2 checkpoint addresses
2490 */
2491 uint32_t trace_buffer[258];
2492 int is_address[256];
2493 int i, j;
2494 unsigned int num_checkpoints = 0;
2495
2496 if (target->state != TARGET_HALTED) {
2497 LOG_WARNING("target must be stopped to read trace data");
2498 return ERROR_TARGET_NOT_HALTED;
2499 }
2500
2501 /* send read trace buffer command (command 0x61) */
2502 xscale_send_u32(target, 0x61);
2503
2504 /* receive trace buffer content */
2505 xscale_receive(target, trace_buffer, 258);
2506
2507 /* parse buffer backwards to identify address entries */
2508 for (i = 255; i >= 0; i--) {
2509 /* also count number of checkpointed entries */
2510 if ((trace_buffer[i] & 0xe0) == 0xc0)
2511 num_checkpoints++;
2512
2513 is_address[i] = 0;
2514 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2515 ((trace_buffer[i] & 0xf0) == 0xd0)) {
2516 if (i > 0)
2517 is_address[--i] = 1;
2518 if (i > 0)
2519 is_address[--i] = 1;
2520 if (i > 0)
2521 is_address[--i] = 1;
2522 if (i > 0)
2523 is_address[--i] = 1;
2524 }
2525 }
2526
2527
2528 /* search first non-zero entry that is not part of an address */
2529 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2530 ;
2531
2532 if (j == 256) {
2533 LOG_DEBUG("no trace data collected");
2534 return ERROR_XSCALE_NO_TRACE_DATA;
2535 }
2536
2537 /* account for possible partial address at buffer start (wrap mode only) */
2538 if (is_address[0]) { /* first entry is address; complete set of 4? */
2539 i = 1;
2540 while (i < 4)
2541 if (!is_address[i++])
2542 break;
2543 if (i < 4)
2544 j += i; /* partial address; can't use it */
2545 }
2546
2547 /* if first valid entry is indirect branch, can't use that either (no address) */
2548 if (((trace_buffer[j] & 0xf0) == 0x90) || ((trace_buffer[j] & 0xf0) == 0xd0))
2549 j++;
2550
2551 /* walk linked list to terminating entry */
2552 for (trace_data_p = &xscale->trace.data; *trace_data_p;
2553 trace_data_p = &(*trace_data_p)->next)
2554 ;
2555
2556 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2557 (*trace_data_p)->next = NULL;
2558 (*trace_data_p)->chkpt0 = trace_buffer[256];
2559 (*trace_data_p)->chkpt1 = trace_buffer[257];
2560 (*trace_data_p)->last_instruction = buf_get_u32(arm->pc->value, 0, 32);
2561 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2562 (*trace_data_p)->depth = 256 - j;
2563 (*trace_data_p)->num_checkpoints = num_checkpoints;
2564
2565 for (i = j; i < 256; i++) {
2566 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2567 if (is_address[i])
2568 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2569 else
2570 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2571 }
2572
2573 return ERROR_OK;
2574 }
2575
2576 static int xscale_read_instruction(struct target *target, uint32_t pc,
2577 struct arm_instruction *instruction)
2578 {
2579 struct xscale_common *const xscale = target_to_xscale(target);
2580 int i;
2581 int section = -1;
2582 size_t size_read;
2583 uint32_t opcode;
2584 int retval;
2585
2586 if (!xscale->trace.image)
2587 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2588
2589 /* search for the section the current instruction belongs to */
2590 for (i = 0; i < xscale->trace.image->num_sections; i++) {
2591 if ((xscale->trace.image->sections[i].base_address <= pc) &&
2592 (xscale->trace.image->sections[i].base_address +
2593 xscale->trace.image->sections[i].size > pc)) {
2594 section = i;
2595 break;
2596 }
2597 }
2598
2599 if (section == -1) {
2600 /* current instruction couldn't be found in the image */
2601 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2602 }
2603
2604 if (xscale->trace.core_state == ARM_STATE_ARM) {
2605 uint8_t buf[4];
2606 retval = image_read_section(xscale->trace.image, section,
2607 pc - xscale->trace.image->sections[section].base_address,
2608 4, buf, &size_read);
2609 if (retval != ERROR_OK) {
2610 LOG_ERROR("error while reading instruction");
2611 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2612 }
2613 opcode = target_buffer_get_u32(target, buf);
2614 arm_evaluate_opcode(opcode, pc, instruction);
2615 } else if (xscale->trace.core_state == ARM_STATE_THUMB) {
2616 uint8_t buf[2];
2617 retval = image_read_section(xscale->trace.image, section,
2618 pc - xscale->trace.image->sections[section].base_address,
2619 2, buf, &size_read);
2620 if (retval != ERROR_OK) {
2621 LOG_ERROR("error while reading instruction");
2622 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2623 }
2624 opcode = target_buffer_get_u16(target, buf);
2625 thumb_evaluate_opcode(opcode, pc, instruction);
2626 } else {
2627 LOG_ERROR("BUG: unknown core state encountered");
2628 exit(-1);
2629 }
2630
2631 return ERROR_OK;
2632 }
2633
2634 /* Extract address encoded into trace data.
2635 * Write result to address referenced by argument 'target', or 0 if incomplete. */
2636 static inline void xscale_branch_address(struct xscale_trace_data *trace_data,
2637 int i, uint32_t *target)
2638 {
2639 /* if there are less than four entries prior to the indirect branch message
2640 * we can't extract the address */
2641 if (i < 4)
2642 *target = 0;
2643 else {
2644 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2645 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2646 }
2647 }
2648
2649 static inline void xscale_display_instruction(struct target *target, uint32_t pc,
2650 struct arm_instruction *instruction,
2651 struct command_context *cmd_ctx)
2652 {
2653 int retval = xscale_read_instruction(target, pc, instruction);
2654 if (retval == ERROR_OK)
2655 command_print(cmd_ctx, "%s", instruction->text);
2656 else
2657 command_print(cmd_ctx, "0x%8.8" PRIx32 "\t<not found in image>", pc);
2658 }
2659
2660 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2661 {
2662 struct xscale_common *xscale = target_to_xscale(target);
2663 struct xscale_trace_data *trace_data = xscale->trace.data;
2664 int i, retval;
2665 uint32_t breakpoint_pc;
2666 struct arm_instruction instruction;
2667 uint32_t current_pc = 0;/* initialized when address determined */
2668
2669 if (!xscale->trace.image)
2670 LOG_WARNING("No trace image loaded; use 'xscale trace_image'");
2671
2672 /* loop for each trace buffer that was loaded from target */
2673 while (trace_data) {
2674 int chkpt = 0; /* incremented as checkpointed entries found */
2675 int j;
2676
2677 /* FIXME: set this to correct mode when trace buffer is first enabled */
2678 xscale->trace.core_state = ARM_STATE_ARM;
2679
2680 /* loop for each entry in this trace buffer */
2681 for (i = 0; i < trace_data->depth; i++) {
2682 int exception = 0;
2683 uint32_t chkpt_reg = 0x0;
2684 uint32_t branch_target = 0;
2685 int count;
2686
2687 /* trace entry type is upper nybble of 'message byte' */
2688 int trace_msg_type = (trace_data->entries[i].data & 0xf0) >> 4;
2689
2690 /* Target addresses of indirect branches are written into buffer
2691 * before the message byte representing the branch. Skip past it */
2692 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2693 continue;
2694
2695 switch (trace_msg_type) {
2696 case 0: /* Exceptions */
2697 case 1:
2698 case 2:
2699 case 3:
2700 case 4:
2701 case 5:
2702 case 6:
2703 case 7:
2704 exception = (trace_data->entries[i].data & 0x70) >> 4;
2705
2706 /* FIXME: vector table may be at ffff0000 */
2707 branch_target = (trace_data->entries[i].data & 0xf0) >> 2;
2708 break;
2709
2710 case 8: /* Direct Branch */
2711 break;
2712
2713 case 9: /* Indirect Branch */
2714 xscale_branch_address(trace_data, i, &branch_target);
2715 break;
2716
2717 case 13: /* Checkpointed Indirect Branch */
2718 xscale_branch_address(trace_data, i, &branch_target);
2719 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2720 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is
2721 *oldest */
2722 else
2723 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and
2724 *newest */
2725
2726 chkpt++;
2727 break;
2728
2729 case 12: /* Checkpointed Direct Branch */
2730 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2731 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is
2732 *oldest */
2733 else
2734 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and
2735 *newest */
2736
2737 /* if no current_pc, checkpoint will be starting point */
2738 if (current_pc == 0)
2739 branch_target = chkpt_reg;
2740
2741 chkpt++;
2742 break;
2743
2744 case 15:/* Roll-over */
2745 break;
2746
2747 default:/* Reserved */
2748 LOG_WARNING("trace is suspect: invalid trace message byte");
2749 continue;
2750
2751 }
2752
2753 /* If we don't have the current_pc yet, but we did get the branch target
2754 * (either from the trace buffer on indirect branch, or from a checkpoint reg),
2755 * then we can start displaying instructions at the next iteration, with
2756 * branch_target as the starting point.
2757 */
2758 if (current_pc == 0) {
2759 current_pc = branch_target; /* remains 0 unless branch_target *obtained */
2760 continue;
2761 }
2762
2763 /* We have current_pc. Read and display the instructions from the image.
2764 * First, display count instructions (lower nybble of message byte). */
2765 count = trace_data->entries[i].data & 0x0f;
2766 for (j = 0; j < count; j++) {
2767 xscale_display_instruction(target, current_pc, &instruction,
2768 cmd_ctx);
2769 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2770 }
2771
2772 /* An additional instruction is implicitly added to count for
2773 * rollover and some exceptions: undef, swi, prefetch abort. */
2774 if ((trace_msg_type == 15) || (exception > 0 && exception < 4)) {
2775 xscale_display_instruction(target, current_pc, &instruction,
2776 cmd_ctx);
2777 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2778 }
2779
2780 if (trace_msg_type == 15) /* rollover */
2781 continue;
2782
2783 if (exception) {
2784 command_print(cmd_ctx, "--- exception %i ---", exception);
2785 continue;
2786 }
2787
2788 /* not exception or rollover; next instruction is a branch and is
2789 * not included in the count */
2790 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2791
2792 /* for direct branches, extract branch destination from instruction */
2793 if ((trace_msg_type == 8) || (trace_msg_type == 12)) {
2794 retval = xscale_read_instruction(target, current_pc, &instruction);
2795 if (retval == ERROR_OK)
2796 current_pc = instruction.info.b_bl_bx_blx.target_address;
2797 else
2798 current_pc = 0; /* branch destination unknown */
2799
2800 /* direct branch w/ checkpoint; can also get from checkpoint reg */
2801 if (trace_msg_type == 12) {
2802 if (current_pc == 0)
2803 current_pc = chkpt_reg;
2804 else if (current_pc != chkpt_reg) /* sanity check */
2805 LOG_WARNING("trace is suspect: checkpoint register "
2806 "inconsistent with adddress from image");
2807 }
2808
2809 if (current_pc == 0)
2810 command_print(cmd_ctx, "address unknown");
2811
2812 continue;
2813 }
2814
2815 /* indirect branch; the branch destination was read from trace buffer */
2816 if ((trace_msg_type == 9) || (trace_msg_type == 13)) {
2817 current_pc = branch_target;
2818
2819 /* sanity check (checkpoint reg is redundant) */
2820 if ((trace_msg_type == 13) && (chkpt_reg != branch_target))
2821 LOG_WARNING("trace is suspect: checkpoint register "
2822 "inconsistent with address from trace buffer");
2823 }
2824
2825 } /* END: for (i = 0; i < trace_data->depth; i++) */
2826
2827 breakpoint_pc = trace_data->last_instruction; /* used below */
2828 trace_data = trace_data->next;
2829
2830 } /* END: while (trace_data) */
2831
2832 /* Finally... display all instructions up to the value of the pc when the
2833 * debug break occurred (saved when trace data was collected from target).
2834 * This is necessary because the trace only records execution branches and 16
2835 * consecutive instructions (rollovers), so last few typically missed.
2836 */
2837 if (current_pc == 0)
2838 return ERROR_OK;/* current_pc was never found */
2839
2840 /* how many instructions remaining? */
2841 int gap_count = (breakpoint_pc - current_pc) /
2842 (xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2);
2843
2844 /* should never be negative or over 16, but verify */
2845 if (gap_count < 0 || gap_count > 16) {
2846 LOG_WARNING("trace is suspect: excessive gap at end of trace");
2847 return ERROR_OK;/* bail; large number or negative value no good */
2848 }
2849
2850 /* display remaining instructions */
2851 for (i = 0; i < gap_count; i++) {
2852 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2853 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2854 }
2855
2856 return ERROR_OK;
2857 }
2858
2859 static const struct reg_arch_type xscale_reg_type = {
2860 .get = xscale_get_reg,
2861 .set = xscale_set_reg,
2862 };
2863
2864 static void xscale_build_reg_cache(struct target *target)
2865 {
2866 struct xscale_common *xscale = target_to_xscale(target);
2867 struct arm *arm = &xscale->arm;
2868 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2869 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2870 int i;
2871 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2872
2873 (*cache_p) = arm_build_reg_cache(target, arm);
2874
2875 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2876 cache_p = &(*cache_p)->next;
2877
2878 /* fill in values for the xscale reg cache */
2879 (*cache_p)->name = "XScale registers";
2880 (*cache_p)->next = NULL;
2881 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2882 (*cache_p)->num_regs = num_regs;
2883
2884 for (i = 0; i < num_regs; i++) {
2885 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2886 (*cache_p)->reg_list[i].value = calloc(4, 1);
2887 (*cache_p)->reg_list[i].dirty = 0;
2888 (*cache_p)->reg_list[i].valid = 0;
2889 (*cache_p)->reg_list[i].size = 32;
2890 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2891 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2892 arch_info[i] = xscale_reg_arch_info[i];
2893 arch_info[i].target = target;
2894 }
2895
2896 xscale->reg_cache = (*cache_p);
2897 }
2898
2899 static int xscale_init_target(struct command_context *cmd_ctx,
2900 struct target *target)
2901 {
2902 xscale_build_reg_cache(target);
2903 return ERROR_OK;
2904 }
2905
2906 static int xscale_init_arch_info(struct target *target,
2907 struct xscale_common *xscale, struct jtag_tap *tap)
2908 {
2909 struct arm *arm;
2910 uint32_t high_reset_branch, low_reset_branch;
2911 int i;
2912
2913 arm = &xscale->arm;
2914
2915 /* store architecture specfic data */
2916 xscale->common_magic = XSCALE_COMMON_MAGIC;
2917
2918 /* PXA3xx with 11 bit IR shifts the JTAG instructions */
2919 if (tap->ir_length == 11)
2920 xscale->xscale_variant = XSCALE_PXA3XX;
2921 else
2922 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
2923
2924 /* the debug handler isn't installed (and thus not running) at this time */
2925 xscale->handler_address = 0xfe000800;
2926
2927 /* clear the vectors we keep locally for reference */
2928 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2929 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2930
2931 /* no user-specified vectors have been configured yet */
2932 xscale->static_low_vectors_set = 0x0;
2933 xscale->static_high_vectors_set = 0x0;
2934
2935 /* calculate branches to debug handler */
2936 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2937 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2938
2939 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2940 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2941
2942 for (i = 1; i <= 7; i++) {
2943 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2944 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2945 }
2946
2947 /* 64kB aligned region used for DCache cleaning */
2948 xscale->cache_clean_address = 0xfffe0000;
2949
2950 xscale->hold_rst = 0;
2951 xscale->external_debug_break = 0;
2952
2953 xscale->ibcr_available = 2;
2954 xscale->ibcr0_used = 0;
2955 xscale->ibcr1_used = 0;
2956
2957 xscale->dbr_available = 2;
2958 xscale->dbr0_used = 0;
2959 xscale->dbr1_used = 0;
2960
2961 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
2962 target_name(target));
2963
2964 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2965 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2966
2967 xscale->vector_catch = 0x1;
2968
2969 xscale->trace.data = NULL;
2970 xscale->trace.image = NULL;
2971 xscale->trace.mode = XSCALE_TRACE_DISABLED;
2972 xscale->trace.buffer_fill = 0;
2973 xscale->trace.fill_counter = 0;
2974
2975 /* prepare ARMv4/5 specific information */
2976 arm->arch_info = xscale;
2977 arm->core_type = ARM_MODE_ANY;
2978 arm->read_core_reg = xscale_read_core_reg;
2979 arm->write_core_reg = xscale_write_core_reg;
2980 arm->full_context = xscale_full_context;
2981
2982 arm_init_arch_info(target, arm);
2983
2984 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
2985 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
2986 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
2987 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
2988 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
2989 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
2990 xscale->armv4_5_mmu.has_tiny_pages = 1;
2991 xscale->armv4_5_mmu.mmu_enabled = 0;
2992
2993 return ERROR_OK;
2994 }
2995
2996 static int xscale_target_create(struct target *target, Jim_Interp *interp)
2997 {
2998 struct xscale_common *xscale;
2999
3000 if (sizeof xscale_debug_handler > 0x800) {
3001 LOG_ERROR("debug_handler.bin: larger than 2kb");
3002 return ERROR_FAIL;
3003 }
3004
3005 xscale = calloc(1, sizeof(*xscale));
3006 if (!xscale)
3007 return ERROR_FAIL;
3008
3009 return xscale_init_arch_info(target, xscale, target->tap);
3010 }
3011
3012 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3013 {
3014 struct target *target = NULL;
3015 struct xscale_common *xscale;
3016 int retval;
3017 uint32_t handler_address;
3018
3019 if (CMD_ARGC < 2)
3020 return ERROR_COMMAND_SYNTAX_ERROR;
3021
3022 target = get_target(CMD_ARGV[0]);
3023 if (target == NULL) {
3024 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3025 return ERROR_FAIL;
3026 }
3027
3028 xscale = target_to_xscale(target);
3029 retval = xscale_verify_pointer(CMD_CTX, xscale);
3030 if (retval != ERROR_OK)
3031 return retval;
3032
3033 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3034
3035 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3036 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3037 xscale->handler_address = handler_address;
3038 else {
3039 LOG_ERROR(
3040 "xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3041 return ERROR_FAIL;
3042 }
3043
3044 return ERROR_OK;
3045 }
3046
3047 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3048 {
3049 struct target *target = NULL;
3050 struct xscale_common *xscale;
3051 int retval;
3052 uint32_t cache_clean_address;
3053
3054 if (CMD_ARGC < 2)
3055 return ERROR_COMMAND_SYNTAX_ERROR;
3056
3057 target = get_target(CMD_ARGV[0]);
3058 if (target == NULL) {
3059 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3060 return ERROR_FAIL;
3061 }
3062 xscale = target_to_xscale(target);
3063 retval = xscale_verify_pointer(CMD_CTX, xscale);
3064 if (retval != ERROR_OK)
3065 return retval;
3066
3067 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3068
3069 if (cache_clean_address & 0xffff)
3070 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3071 else
3072 xscale->cache_clean_address = cache_clean_address;
3073
3074 return ERROR_OK;
3075 }
3076
3077 COMMAND_HANDLER(xscale_handle_cache_info_command)
3078 {
3079 struct target *target = get_current_target(CMD_CTX);
3080 struct xscale_common *xscale = target_to_xscale(target);
3081 int retval;
3082
3083 retval = xscale_verify_pointer(CMD_CTX, xscale);
3084 if (retval != ERROR_OK)
3085 return retval;
3086
3087 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3088 }
3089
3090 static int xscale_virt2phys(struct target *target,
3091 uint32_t virtual, uint32_t *physical)
3092 {
3093 struct xscale_common *xscale = target_to_xscale(target);
3094 uint32_t cb;
3095
3096 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3097 LOG_ERROR(xscale_not);
3098 return ERROR_TARGET_INVALID;
3099 }
3100
3101 uint32_t ret;
3102 int retval = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu,
3103 virtual, &cb, &ret);
3104 if (retval != ERROR_OK)
3105 return retval;
3106 *physical = ret;
3107 return ERROR_OK;
3108 }
3109
3110 static int xscale_mmu(struct target *target, int *enabled)
3111 {
3112 struct xscale_common *xscale = target_to_xscale(target);
3113
3114 if (target->state != TARGET_HALTED) {
3115 LOG_ERROR("Target not halted");
3116 return ERROR_TARGET_INVALID;
3117 }
3118 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3119 return ERROR_OK;
3120 }
3121
3122 COMMAND_HANDLER(xscale_handle_mmu_command)
3123 {
3124 struct target *target = get_current_target(CMD_CTX);
3125 struct xscale_common *xscale = target_to_xscale(target);
3126 int retval;
3127
3128 retval = xscale_verify_pointer(CMD_CTX, xscale);
3129 if (retval != ERROR_OK)
3130 return retval;
3131
3132 if (target->state != TARGET_HALTED) {
3133 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3134 return ERROR_OK;
3135 }
3136
3137 if (CMD_ARGC >= 1) {
3138 bool enable;
3139 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3140 if (enable)
3141 xscale_enable_mmu_caches(target, 1, 0, 0);
3142 else
3143 xscale_disable_mmu_caches(target, 1, 0, 0);
3144 xscale->armv4_5_mmu.mmu_enabled = enable;
3145 }
3146
3147 command_print(CMD_CTX, "mmu %s",
3148 (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3149
3150 return ERROR_OK;
3151 }
3152
3153 COMMAND_HANDLER(xscale_handle_idcache_command)
3154 {
3155 struct target *target = get_current_target(CMD_CTX);
3156 struct xscale_common *xscale = target_to_xscale(target);
3157
3158 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3159 if (retval != ERROR_OK)
3160 return retval;
3161
3162 if (target->state != TARGET_HALTED) {
3163 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3164 return ERROR_OK;
3165 }
3166
3167 bool icache = false;
3168 if (strcmp(CMD_NAME, "icache") == 0)
3169 icache = true;
3170 if (CMD_ARGC >= 1) {
3171 bool enable;
3172 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3173 if (icache) {
3174 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3175 if (enable)
3176 xscale_enable_mmu_caches(target, 0, 0, 1);
3177 else
3178 xscale_disable_mmu_caches(target, 0, 0, 1);
3179 } else {
3180 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3181 if (enable)
3182 xscale_enable_mmu_caches(target, 0, 1, 0);
3183 else
3184 xscale_disable_mmu_caches(target, 0, 1, 0);
3185 }
3186 }
3187
3188 bool enabled = icache ?
3189 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3190 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3191 const char *msg = enabled ? "enabled" : "disabled";
3192 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3193
3194 return ERROR_OK;
3195 }
3196
3197 static const struct {
3198 char name[15];
3199 unsigned mask;
3200 } vec_ids[] = {
3201 { "fiq", DCSR_TF, },
3202 { "irq", DCSR_TI, },
3203 { "dabt", DCSR_TD, },
3204 { "pabt", DCSR_TA, },
3205 { "swi", DCSR_TS, },
3206 { "undef", DCSR_TU, },
3207 { "reset", DCSR_TR, },
3208 };
3209
3210 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3211 {
3212 struct target *target = get_current_target(CMD_CTX);
3213 struct xscale_common *xscale = target_to_xscale(target);
3214 int retval;
3215 uint32_t dcsr_value;
3216 uint32_t catch = 0;
3217 struct reg *dcsr_reg = &xscale->reg_cache->reg_list[XSCALE_DCSR];
3218
3219 retval = xscale_verify_pointer(CMD_CTX, xscale);
3220 if (retval != ERROR_OK)
3221 return retval;
3222
3223 dcsr_value = buf_get_u32(dcsr_reg->value, 0, 32);
3224 if (CMD_ARGC > 0) {
3225 if (CMD_ARGC == 1) {
3226 if (strcmp(CMD_ARGV[0], "all") == 0) {
3227 catch = DCSR_TRAP_MASK;
3228 CMD_ARGC--;
3229 } else if (strcmp(CMD_ARGV[0], "none") == 0) {
3230 catch = 0;
3231 CMD_ARGC--;
3232 }
3233 }
3234 while (CMD_ARGC-- > 0) {
3235 unsigned i;
3236 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
3237 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name))
3238 continue;
3239 catch |= vec_ids[i].mask;
3240 break;
3241 }
3242 if (i == ARRAY_SIZE(vec_ids)) {
3243 LOG_ERROR("No vector '%s'", CMD_ARGV[CMD_ARGC]);
3244 return ERROR_COMMAND_SYNTAX_ERROR;
3245 }
3246 }
3247 buf_set_u32(dcsr_reg->value, 0, 32,
3248 (buf_get_u32(dcsr_reg->value, 0, 32) & ~DCSR_TRAP_MASK) | catch);
3249 xscale_write_dcsr(target, -1, -1);
3250 }
3251
3252 dcsr_value = buf_get_u32(dcsr_reg->value, 0, 32);
3253 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
3254 command_print(CMD_CTX, "%15s: %s", vec_ids[i].name,
3255 (dcsr_value & vec_ids[i].mask) ? "catch" : "ignore");
3256 }
3257
3258 return ERROR_OK;
3259 }
3260
3261
3262 COMMAND_HANDLER(xscale_handle_vector_table_command)
3263 {
3264 struct target *target = get_current_target(CMD_CTX);
3265 struct xscale_common *xscale = target_to_xscale(target);
3266 int err = 0;
3267 int retval;
3268
3269 retval = xscale_verify_pointer(CMD_CTX, xscale);
3270 if (retval != ERROR_OK)
3271 return retval;
3272
3273 if (CMD_ARGC == 0) { /* print current settings */
3274 int idx;
3275
3276 command_print(CMD_CTX, "active user-set static vectors:");
3277 for (idx = 1; idx < 8; idx++)
3278 if (xscale->static_low_vectors_set & (1 << idx))
3279 command_print(CMD_CTX,
3280 "low %d: 0x%" PRIx32,
3281 idx,
3282 xscale->static_low_vectors[idx]);
3283 for (idx = 1; idx < 8; idx++)
3284 if (xscale->static_high_vectors_set & (1 << idx))
3285 command_print(CMD_CTX,
3286 "high %d: 0x%" PRIx32,
3287 idx,
3288 xscale->static_high_vectors[idx]);
3289 return ERROR_OK;
3290 }
3291
3292 if (CMD_ARGC != 3)
3293 err = 1;
3294 else {
3295 int idx;
3296 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3297 uint32_t vec;
3298 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3299
3300 if (idx < 1 || idx >= 8)
3301 err = 1;
3302
3303 if (!err && strcmp(CMD_ARGV[0], "low") == 0) {
3304 xscale->static_low_vectors_set |= (1<<idx);
3305 xscale->static_low_vectors[idx] = vec;
3306 } else if (!err && (strcmp(CMD_ARGV[0], "high") == 0)) {
3307 xscale->static_high_vectors_set |= (1<<idx);
3308 xscale->static_high_vectors[idx] = vec;
3309 } else
3310 err = 1;
3311 }
3312
3313 if (err)
3314 return ERROR_COMMAND_SYNTAX_ERROR;
3315
3316 return ERROR_OK;
3317 }
3318
3319
3320 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3321 {
3322 struct target *target = get_current_target(CMD_CTX);
3323 struct xscale_common *xscale = target_to_xscale(target);
3324 uint32_t dcsr_value;
3325 int retval;
3326
3327 retval = xscale_verify_pointer(CMD_CTX, xscale);
3328 if (retval != ERROR_OK)
3329 return retval;
3330
3331 if (target->state != TARGET_HALTED) {
3332 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3333 return ERROR_OK;
3334 }
3335
3336 if (CMD_ARGC >= 1) {
3337 if (strcmp("enable", CMD_ARGV[0]) == 0)
3338 xscale->trace.mode = XSCALE_TRACE_WRAP; /* default */
3339 else if (strcmp("disable", CMD_ARGV[0]) == 0)
3340 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3341 else
3342 return ERROR_COMMAND_SYNTAX_ERROR;
3343 }
3344
3345 if (CMD_ARGC >= 2 && xscale->trace.mode != XSCALE_TRACE_DISABLED) {
3346 if (strcmp("fill", CMD_ARGV[1]) == 0) {
3347 int buffcount = 1; /* default */
3348 if (CMD_ARGC >= 3)
3349 COMMAND_PARSE_NUMBER(int, CMD_ARGV[2], buffcount);
3350 if (buffcount < 1) { /* invalid */
3351 command_print(CMD_CTX, "fill buffer count must be > 0");
3352 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3353 return ERROR_COMMAND_SYNTAX_ERROR;
3354 }
3355 xscale->trace.buffer_fill = buffcount;
3356 xscale->trace.mode = XSCALE_TRACE_FILL;
3357 } else if (strcmp("wrap", CMD_ARGV[1]) == 0)
3358 xscale->trace.mode = XSCALE_TRACE_WRAP;
3359 else {
3360 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3361 return ERROR_COMMAND_SYNTAX_ERROR;
3362 }
3363 }
3364
3365 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
3366 char fill_string[12];
3367 sprintf(fill_string, "fill %d", xscale->trace.buffer_fill);
3368 command_print(CMD_CTX, "trace buffer enabled (%s)",
3369 (xscale->trace.mode == XSCALE_TRACE_FILL)
3370 ? fill_string : "wrap");
3371 } else
3372 command_print(CMD_CTX, "trace buffer disabled");
3373
3374 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3375 if (xscale->trace.mode == XSCALE_TRACE_FILL)
3376 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3377 else
3378 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3379
3380 return ERROR_OK;
3381 }
3382
3383 COMMAND_HANDLER(xscale_handle_trace_image_command)
3384 {
3385 struct target *target = get_current_target(CMD_CTX);
3386 struct xscale_common *xscale = target_to_xscale(target);
3387 int retval;
3388
3389 if (CMD_ARGC < 1)
3390 return ERROR_COMMAND_SYNTAX_ERROR;
3391
3392 retval = xscale_verify_pointer(CMD_CTX, xscale);
3393 if (retval != ERROR_OK)
3394 return retval;
3395
3396 if (xscale->trace.image) {
3397 image_close(xscale->trace.image);
3398 free(xscale->trace.image);
3399 command_print(CMD_CTX, "previously loaded image found and closed");
3400 }
3401
3402 xscale->trace.image = malloc(sizeof(struct image));
3403 xscale->trace.image->base_address_set = 0;
3404 xscale->trace.image->start_address_set = 0;
3405
3406 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3407 if (CMD_ARGC >= 2) {
3408 xscale->trace.image->base_address_set = 1;
3409 COMMAND_PARSE_NUMBER(llong, CMD_ARGV[1], xscale->trace.image->base_address);
3410 } else
3411 xscale->trace.image->base_address_set = 0;
3412
3413 if (image_open(xscale->trace.image, CMD_ARGV[0],
3414 (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK) {
3415 free(xscale->trace.image);
3416 xscale->trace.image = NULL;
3417 return ERROR_OK;
3418 }
3419
3420 return ERROR_OK;
3421 }
3422
3423 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3424 {
3425 struct target *target = get_current_target(CMD_CTX);
3426 struct xscale_common *xscale = target_to_xscale(target);
3427 struct xscale_trace_data *trace_data;
3428 struct fileio file;
3429 int retval;
3430
3431 retval = xscale_verify_pointer(CMD_CTX, xscale);
3432 if (retval != ERROR_OK)
3433 return retval;
3434
3435 if (target->state != TARGET_HALTED) {
3436 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3437 return ERROR_OK;
3438 }
3439
3440 if (CMD_ARGC < 1)
3441 return ERROR_COMMAND_SYNTAX_ERROR;
3442
3443 trace_data = xscale->trace.data;
3444
3445 if (!trace_data) {
3446 command_print(CMD_CTX, "no trace data collected");
3447 return ERROR_OK;
3448 }
3449
3450 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3451 return ERROR_OK;
3452
3453 while (trace_data) {
3454 int i;
3455
3456 fileio_write_u32(&file, trace_data->chkpt0);
3457 fileio_write_u32(&file, trace_data->chkpt1);
3458 fileio_write_u32(&file, trace_data->last_instruction);
3459 fileio_write_u32(&file, trace_data->depth);
3460
3461 for (i = 0; i < trace_data->depth; i++)
3462 fileio_write_u32(&file, trace_data->entries[i].data |
3463 ((trace_data->entries[i].type & 0xffff) << 16));
3464
3465 trace_data = trace_data->next;
3466 }
3467
3468 fileio_close(&file);
3469
3470 return ERROR_OK;
3471 }
3472
3473 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3474 {
3475 struct target *target = get_current_target(CMD_CTX);
3476 struct xscale_common *xscale = target_to_xscale(target);
3477 int retval;
3478
3479 retval = xscale_verify_pointer(CMD_CTX, xscale);
3480 if (retval != ERROR_OK)
3481 return retval;
3482
3483 xscale_analyze_trace(target, CMD_CTX);
3484
3485 return ERROR_OK;
3486 }
3487
3488 COMMAND_HANDLER(xscale_handle_cp15)
3489 {
3490 struct target *target = get_current_target(CMD_CTX);
3491 struct xscale_common *xscale = target_to_xscale(target);
3492 int retval;
3493
3494 retval = xscale_verify_pointer(CMD_CTX, xscale);
3495 if (retval != ERROR_OK)
3496 return retval;
3497
3498 if (target->state != TARGET_HALTED) {
3499 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3500 return ERROR_OK;
3501 }
3502 uint32_t reg_no = 0;
3503 struct reg *reg = NULL;
3504 if (CMD_ARGC > 0) {
3505 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3506 /*translate from xscale cp15 register no to openocd register*/
3507 switch (reg_no) {
3508 case 0:
3509 reg_no = XSCALE_MAINID;
3510 break;
3511 case 1:
3512 reg_no = XSCALE_CTRL;
3513 break;
3514 case 2:
3515 reg_no = XSCALE_TTB;
3516 break;
3517 case 3:
3518 reg_no = XSCALE_DAC;
3519 break;
3520 case 5:
3521 reg_no = XSCALE_FSR;
3522 break;
3523 case 6:
3524 reg_no = XSCALE_FAR;
3525 break;
3526 case 13:
3527 reg_no = XSCALE_PID;
3528 break;
3529 case 15:
3530 reg_no = XSCALE_CPACCESS;
3531 break;
3532 default:
3533 command_print(CMD_CTX, "invalid register number");
3534 return ERROR_COMMAND_SYNTAX_ERROR;
3535 }
3536 reg = &xscale->reg_cache->reg_list[reg_no];
3537
3538 }
3539 if (CMD_ARGC == 1) {
3540 uint32_t value;
3541
3542 /* read cp15 control register */
3543 xscale_get_reg(reg);
3544 value = buf_get_u32(reg->value, 0, 32);
3545 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size),
3546 value);
3547 } else if (CMD_ARGC == 2) {
3548 uint32_t value;
3549 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3550
3551 /* send CP write request (command 0x41) */
3552 xscale_send_u32(target, 0x41);
3553
3554 /* send CP register number */
3555 xscale_send_u32(target, reg_no);
3556
3557 /* send CP register value */
3558 xscale_send_u32(target, value);
3559
3560 /* execute cpwait to ensure outstanding operations complete */
3561 xscale_send_u32(target, 0x53);
3562 } else
3563 return ERROR_COMMAND_SYNTAX_ERROR;
3564
3565 return ERROR_OK;
3566 }
3567
3568 static const struct command_registration xscale_exec_command_handlers[] = {
3569 {
3570 .name = "cache_info",
3571 .handler = xscale_handle_cache_info_command,
3572 .mode = COMMAND_EXEC,
3573 .help = "display information about CPU caches",
3574 },
3575 {
3576 .name = "mmu",
3577 .handler = xscale_handle_mmu_command,
3578 .mode = COMMAND_EXEC,
3579 .help = "enable or disable the MMU",
3580 .usage = "['enable'|'disable']",
3581 },
3582 {
3583 .name = "icache",
3584 .handler = xscale_handle_idcache_command,
3585 .mode = COMMAND_EXEC,
3586 .help = "display ICache state, optionally enabling or "
3587 "disabling it",
3588 .usage = "['enable'|'disable']",
3589 },
3590 {
3591 .name = "dcache",
3592 .handler = xscale_handle_idcache_command,
3593 .mode = COMMAND_EXEC,
3594 .help = "display DCache state, optionally enabling or "
3595 "disabling it",
3596 .usage = "['enable'|'disable']",
3597 },
3598 {
3599 .name = "vector_catch",
3600 .handler = xscale_handle_vector_catch_command,
3601 .mode = COMMAND_EXEC,
3602 .help = "set or display mask of vectors "
3603 "that should trigger debug entry",
3604 .usage = "['all'|'none'|'fiq'|'irq'|'dabt'|'pabt'|'swi'|'undef'|'reset']",
3605 },
3606 {
3607 .name = "vector_table",
3608 .handler = xscale_handle_vector_table_command,
3609 .mode = COMMAND_EXEC,
3610 .help = "set vector table entry in mini-ICache, "
3611 "or display current tables",
3612 .usage = "[('high'|'low') index code]",
3613 },
3614 {
3615 .name = "trace_buffer",
3616 .handler = xscale_handle_trace_buffer_command,
3617 .mode = COMMAND_EXEC,
3618 .help = "display trace buffer status, enable or disable "
3619 "tracing, and optionally reconfigure trace mode",
3620 .usage = "['enable'|'disable' ['fill' [number]|'wrap']]",
3621 },
3622 {
3623 .name = "dump_trace",
3624 .handler = xscale_handle_dump_trace_command,
3625 .mode = COMMAND_EXEC,
3626 .help = "dump content of trace buffer to file",
3627 .usage = "filename",
3628 },
3629 {
3630 .name = "analyze_trace",
3631 .handler = xscale_handle_analyze_trace_buffer_command,
3632 .mode = COMMAND_EXEC,
3633 .help = "analyze content of trace buffer",
3634 .usage = "",
3635 },
3636 {
3637 .name = "trace_image",
3638 .handler = xscale_handle_trace_image_command,
3639 .mode = COMMAND_EXEC,
3640 .help = "load image from file to address (default 0)",
3641 .usage = "filename [offset [filetype]]",
3642 },
3643 {
3644 .name = "cp15",
3645 .handler = xscale_handle_cp15,
3646 .mode = COMMAND_EXEC,
3647 .help = "Read or write coprocessor 15 register.",
3648 .usage = "register [value]",
3649 },
3650 COMMAND_REGISTRATION_DONE
3651 };
3652 static const struct command_registration xscale_any_command_handlers[] = {
3653 {
3654 .name = "debug_handler",
3655 .handler = xscale_handle_debug_handler_command,
3656 .mode = COMMAND_ANY,
3657 .help = "Change address used for debug handler.",
3658 .usage = "<target> <address>",
3659 },
3660 {
3661 .name = "cache_clean_address",
3662 .handler = xscale_handle_cache_clean_address_command,
3663 .mode = COMMAND_ANY,
3664 .help = "Change address used for cleaning data cache.",
3665 .usage = "address",
3666 },
3667 {
3668 .chain = xscale_exec_command_handlers,
3669 },
3670 COMMAND_REGISTRATION_DONE
3671 };
3672 static const struct command_registration xscale_command_handlers[] = {
3673 {
3674 .chain = arm_command_handlers,
3675 },
3676 {
3677 .name = "xscale",
3678 .mode = COMMAND_ANY,
3679 .help = "xscale command group",
3680 .usage = "",
3681 .chain = xscale_any_command_handlers,
3682 },
3683 COMMAND_REGISTRATION_DONE
3684 };
3685
3686 struct target_type xscale_target = {
3687 .name = "xscale",
3688
3689 .poll = xscale_poll,
3690 .arch_state = xscale_arch_state,
3691
3692 .halt = xscale_halt,
3693 .resume = xscale_resume,
3694 .step = xscale_step,
3695
3696 .assert_reset = xscale_assert_reset,
3697 .deassert_reset = xscale_deassert_reset,
3698
3699 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3700 .get_gdb_reg_list = arm_get_gdb_reg_list,
3701
3702 .read_memory = xscale_read_memory,
3703 .read_phys_memory = xscale_read_phys_memory,
3704 .write_memory = xscale_write_memory,
3705 .write_phys_memory = xscale_write_phys_memory,
3706
3707 .checksum_memory = arm_checksum_memory,
3708 .blank_check_memory = arm_blank_check_memory,
3709
3710 .run_algorithm = armv4_5_run_algorithm,
3711
3712 .add_breakpoint = xscale_add_breakpoint,
3713 .remove_breakpoint = xscale_remove_breakpoint,
3714 .add_watchpoint = xscale_add_watchpoint,
3715 .remove_watchpoint = xscale_remove_watchpoint,
3716
3717 .commands = xscale_command_handlers,
3718 .target_create = xscale_target_create,
3719 .init_target = xscale_init_target,
3720
3721 .virt2phys = xscale_virt2phys,
3722 .mmu = xscale_mmu
3723 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)