armv7m: add FPU registers support
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
25 ***************************************************************************/
26
27 #ifdef HAVE_CONFIG_H
28 #include "config.h"
29 #endif
30
31 #include "breakpoints.h"
32 #include "xscale.h"
33 #include "target_type.h"
34 #include "arm_jtag.h"
35 #include "arm_simulator.h"
36 #include "arm_disassembler.h"
37 #include <helper/time_support.h>
38 #include "register.h"
39 #include "image.h"
40 #include "arm_opcodes.h"
41 #include "armv4_5.h"
42
43 /*
44 * Important XScale documents available as of October 2009 include:
45 *
46 * Intel XScale® Core Developer’s Manual, January 2004
47 * Order Number: 273473-002
48 * This has a chapter detailing debug facilities, and punts some
49 * details to chip-specific microarchitecture documents.
50 *
51 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
52 * Document Number: 273539-005
53 * Less detailed than the developer's manual, but summarizes those
54 * missing details (for most XScales) and gives LOTS of notes about
55 * debugger/handler interaction issues. Presents a simpler reset
56 * and load-handler sequence than the arch doc. (Note, OpenOCD
57 * doesn't currently support "Hot-Debug" as defined there.)
58 *
59 * Chip-specific microarchitecture documents may also be useful.
60 */
61
62 /* forward declarations */
63 static int xscale_resume(struct target *, int current,
64 uint32_t address, int handle_breakpoints, int debug_execution);
65 static int xscale_debug_entry(struct target *);
66 static int xscale_restore_banked(struct target *);
67 static int xscale_get_reg(struct reg *reg);
68 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
69 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
70 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
71 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
72 static int xscale_read_trace(struct target *);
73
74 /* This XScale "debug handler" is loaded into the processor's
75 * mini-ICache, which is 2K of code writable only via JTAG.
76 */
77 static const uint8_t xscale_debug_handler[] = {
78 #include "xscale_debug.inc"
79 };
80
81 static const char *const xscale_reg_list[] = {
82 "XSCALE_MAINID", /* 0 */
83 "XSCALE_CACHETYPE",
84 "XSCALE_CTRL",
85 "XSCALE_AUXCTRL",
86 "XSCALE_TTB",
87 "XSCALE_DAC",
88 "XSCALE_FSR",
89 "XSCALE_FAR",
90 "XSCALE_PID",
91 "XSCALE_CPACCESS",
92 "XSCALE_IBCR0", /* 10 */
93 "XSCALE_IBCR1",
94 "XSCALE_DBR0",
95 "XSCALE_DBR1",
96 "XSCALE_DBCON",
97 "XSCALE_TBREG",
98 "XSCALE_CHKPT0",
99 "XSCALE_CHKPT1",
100 "XSCALE_DCSR",
101 "XSCALE_TX",
102 "XSCALE_RX", /* 20 */
103 "XSCALE_TXRXCTRL",
104 };
105
106 static const struct xscale_reg xscale_reg_arch_info[] = {
107 {XSCALE_MAINID, NULL},
108 {XSCALE_CACHETYPE, NULL},
109 {XSCALE_CTRL, NULL},
110 {XSCALE_AUXCTRL, NULL},
111 {XSCALE_TTB, NULL},
112 {XSCALE_DAC, NULL},
113 {XSCALE_FSR, NULL},
114 {XSCALE_FAR, NULL},
115 {XSCALE_PID, NULL},
116 {XSCALE_CPACCESS, NULL},
117 {XSCALE_IBCR0, NULL},
118 {XSCALE_IBCR1, NULL},
119 {XSCALE_DBR0, NULL},
120 {XSCALE_DBR1, NULL},
121 {XSCALE_DBCON, NULL},
122 {XSCALE_TBREG, NULL},
123 {XSCALE_CHKPT0, NULL},
124 {XSCALE_CHKPT1, NULL},
125 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
126 {-1, NULL}, /* TX accessed via JTAG */
127 {-1, NULL}, /* RX accessed via JTAG */
128 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
129 };
130
131 /* convenience wrapper to access XScale specific registers */
132 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
133 {
134 uint8_t buf[4];
135
136 buf_set_u32(buf, 0, 32, value);
137
138 return xscale_set_reg(reg, buf);
139 }
140
141 static const char xscale_not[] = "target is not an XScale";
142
143 static int xscale_verify_pointer(struct command_context *cmd_ctx,
144 struct xscale_common *xscale)
145 {
146 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
147 command_print(cmd_ctx, xscale_not);
148 return ERROR_TARGET_INVALID;
149 }
150 return ERROR_OK;
151 }
152
153 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr, tap_state_t end_state)
154 {
155 assert(tap != NULL);
156
157 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr) {
158 struct scan_field field;
159 uint8_t scratch[4];
160
161 memset(&field, 0, sizeof field);
162 field.num_bits = tap->ir_length;
163 field.out_value = scratch;
164 buf_set_u32(scratch, 0, field.num_bits, new_instr);
165
166 jtag_add_ir_scan(tap, &field, end_state);
167 }
168
169 return ERROR_OK;
170 }
171
172 static int xscale_read_dcsr(struct target *target)
173 {
174 struct xscale_common *xscale = target_to_xscale(target);
175 int retval;
176 struct scan_field fields[3];
177 uint8_t field0 = 0x0;
178 uint8_t field0_check_value = 0x2;
179 uint8_t field0_check_mask = 0x7;
180 uint8_t field2 = 0x0;
181 uint8_t field2_check_value = 0x0;
182 uint8_t field2_check_mask = 0x1;
183
184 xscale_jtag_set_instr(target->tap,
185 XSCALE_SELDCSR << xscale->xscale_variant,
186 TAP_DRPAUSE);
187
188 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
189 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
190
191 memset(&fields, 0, sizeof fields);
192
193 fields[0].num_bits = 3;
194 fields[0].out_value = &field0;
195 uint8_t tmp;
196 fields[0].in_value = &tmp;
197
198 fields[1].num_bits = 32;
199 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
200
201 fields[2].num_bits = 1;
202 fields[2].out_value = &field2;
203 uint8_t tmp2;
204 fields[2].in_value = &tmp2;
205
206 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
207
208 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
209 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
210
211 retval = jtag_execute_queue();
212 if (retval != ERROR_OK) {
213 LOG_ERROR("JTAG error while reading DCSR");
214 return retval;
215 }
216
217 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
218 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
219
220 /* write the register with the value we just read
221 * on this second pass, only the first bit of field0 is guaranteed to be 0)
222 */
223 field0_check_mask = 0x1;
224 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
225 fields[1].in_value = NULL;
226
227 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
228
229 /* DANGER!!! this must be here. It will make sure that the arguments
230 * to jtag_set_check_value() does not go out of scope! */
231 return jtag_execute_queue();
232 }
233
234
235 static void xscale_getbuf(jtag_callback_data_t arg)
236 {
237 uint8_t *in = (uint8_t *)arg;
238 *((uint32_t *)arg) = buf_get_u32(in, 0, 32);
239 }
240
241 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
242 {
243 if (num_words == 0)
244 return ERROR_COMMAND_SYNTAX_ERROR;
245
246 struct xscale_common *xscale = target_to_xscale(target);
247 int retval = ERROR_OK;
248 tap_state_t path[3];
249 struct scan_field fields[3];
250 uint8_t *field0 = malloc(num_words * 1);
251 uint8_t field0_check_value = 0x2;
252 uint8_t field0_check_mask = 0x6;
253 uint32_t *field1 = malloc(num_words * 4);
254 uint8_t field2_check_value = 0x0;
255 uint8_t field2_check_mask = 0x1;
256 int words_done = 0;
257 int words_scheduled = 0;
258 int i;
259
260 path[0] = TAP_DRSELECT;
261 path[1] = TAP_DRCAPTURE;
262 path[2] = TAP_DRSHIFT;
263
264 memset(&fields, 0, sizeof fields);
265
266 fields[0].num_bits = 3;
267 uint8_t tmp;
268 fields[0].in_value = &tmp;
269 fields[0].check_value = &field0_check_value;
270 fields[0].check_mask = &field0_check_mask;
271
272 fields[1].num_bits = 32;
273
274 fields[2].num_bits = 1;
275 uint8_t tmp2;
276 fields[2].in_value = &tmp2;
277 fields[2].check_value = &field2_check_value;
278 fields[2].check_mask = &field2_check_mask;
279
280 xscale_jtag_set_instr(target->tap,
281 XSCALE_DBGTX << xscale->xscale_variant,
282 TAP_IDLE);
283 jtag_add_runtest(1, TAP_IDLE); /* ensures that we're in the TAP_IDLE state as the above
284 *could be a no-op */
285
286 /* repeat until all words have been collected */
287 int attempts = 0;
288 while (words_done < num_words) {
289 /* schedule reads */
290 words_scheduled = 0;
291 for (i = words_done; i < num_words; i++) {
292 fields[0].in_value = &field0[i];
293
294 jtag_add_pathmove(3, path);
295
296 fields[1].in_value = (uint8_t *)(field1 + i);
297
298 jtag_add_dr_scan_check(target->tap, 3, fields, TAP_IDLE);
299
300 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
301
302 words_scheduled++;
303 }
304
305 retval = jtag_execute_queue();
306 if (retval != ERROR_OK) {
307 LOG_ERROR("JTAG error while receiving data from debug handler");
308 break;
309 }
310
311 /* examine results */
312 for (i = words_done; i < num_words; i++) {
313 if (!(field0[i] & 1)) {
314 /* move backwards if necessary */
315 int j;
316 for (j = i; j < num_words - 1; j++) {
317 field0[j] = field0[j + 1];
318 field1[j] = field1[j + 1];
319 }
320 words_scheduled--;
321 }
322 }
323 if (words_scheduled == 0) {
324 if (attempts++ == 1000) {
325 LOG_ERROR(
326 "Failed to receiving data from debug handler after 1000 attempts");
327 retval = ERROR_TARGET_TIMEOUT;
328 break;
329 }
330 }
331
332 words_done += words_scheduled;
333 }
334
335 for (i = 0; i < num_words; i++)
336 *(buffer++) = buf_get_u32((uint8_t *)&field1[i], 0, 32);
337
338 free(field1);
339
340 return retval;
341 }
342
343 static int xscale_read_tx(struct target *target, int consume)
344 {
345 struct xscale_common *xscale = target_to_xscale(target);
346 tap_state_t path[3];
347 tap_state_t noconsume_path[6];
348 int retval;
349 struct timeval timeout, now;
350 struct scan_field fields[3];
351 uint8_t field0_in = 0x0;
352 uint8_t field0_check_value = 0x2;
353 uint8_t field0_check_mask = 0x6;
354 uint8_t field2_check_value = 0x0;
355 uint8_t field2_check_mask = 0x1;
356
357 xscale_jtag_set_instr(target->tap,
358 XSCALE_DBGTX << xscale->xscale_variant,
359 TAP_IDLE);
360
361 path[0] = TAP_DRSELECT;
362 path[1] = TAP_DRCAPTURE;
363 path[2] = TAP_DRSHIFT;
364
365 noconsume_path[0] = TAP_DRSELECT;
366 noconsume_path[1] = TAP_DRCAPTURE;
367 noconsume_path[2] = TAP_DREXIT1;
368 noconsume_path[3] = TAP_DRPAUSE;
369 noconsume_path[4] = TAP_DREXIT2;
370 noconsume_path[5] = TAP_DRSHIFT;
371
372 memset(&fields, 0, sizeof fields);
373
374 fields[0].num_bits = 3;
375 fields[0].in_value = &field0_in;
376
377 fields[1].num_bits = 32;
378 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
379
380 fields[2].num_bits = 1;
381 uint8_t tmp;
382 fields[2].in_value = &tmp;
383
384 gettimeofday(&timeout, NULL);
385 timeval_add_time(&timeout, 1, 0);
386
387 for (;; ) {
388 /* if we want to consume the register content (i.e. clear TX_READY),
389 * we have to go straight from Capture-DR to Shift-DR
390 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
391 */
392 if (consume)
393 jtag_add_pathmove(3, path);
394 else
395 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
396
397 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
398
399 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
400 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
401
402 retval = jtag_execute_queue();
403 if (retval != ERROR_OK) {
404 LOG_ERROR("JTAG error while reading TX");
405 return ERROR_TARGET_TIMEOUT;
406 }
407
408 gettimeofday(&now, NULL);
409 if ((now.tv_sec > timeout.tv_sec) ||
410 ((now.tv_sec == timeout.tv_sec) && (now.tv_usec > timeout.tv_usec))) {
411 LOG_ERROR("time out reading TX register");
412 return ERROR_TARGET_TIMEOUT;
413 }
414 if (!((!(field0_in & 1)) && consume))
415 goto done;
416 if (debug_level >= 3) {
417 LOG_DEBUG("waiting 100ms");
418 alive_sleep(100); /* avoid flooding the logs */
419 } else
420 keep_alive();
421 }
422 done:
423
424 if (!(field0_in & 1))
425 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
426
427 return ERROR_OK;
428 }
429
430 static int xscale_write_rx(struct target *target)
431 {
432 struct xscale_common *xscale = target_to_xscale(target);
433 int retval;
434 struct timeval timeout, now;
435 struct scan_field fields[3];
436 uint8_t field0_out = 0x0;
437 uint8_t field0_in = 0x0;
438 uint8_t field0_check_value = 0x2;
439 uint8_t field0_check_mask = 0x6;
440 uint8_t field2 = 0x0;
441 uint8_t field2_check_value = 0x0;
442 uint8_t field2_check_mask = 0x1;
443
444 xscale_jtag_set_instr(target->tap,
445 XSCALE_DBGRX << xscale->xscale_variant,
446 TAP_IDLE);
447
448 memset(&fields, 0, sizeof fields);
449
450 fields[0].num_bits = 3;
451 fields[0].out_value = &field0_out;
452 fields[0].in_value = &field0_in;
453
454 fields[1].num_bits = 32;
455 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
456
457 fields[2].num_bits = 1;
458 fields[2].out_value = &field2;
459 uint8_t tmp;
460 fields[2].in_value = &tmp;
461
462 gettimeofday(&timeout, NULL);
463 timeval_add_time(&timeout, 1, 0);
464
465 /* poll until rx_read is low */
466 LOG_DEBUG("polling RX");
467 for (;;) {
468 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
469
470 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
471 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
472
473 retval = jtag_execute_queue();
474 if (retval != ERROR_OK) {
475 LOG_ERROR("JTAG error while writing RX");
476 return retval;
477 }
478
479 gettimeofday(&now, NULL);
480 if ((now.tv_sec > timeout.tv_sec) ||
481 ((now.tv_sec == timeout.tv_sec) && (now.tv_usec > timeout.tv_usec))) {
482 LOG_ERROR("time out writing RX register");
483 return ERROR_TARGET_TIMEOUT;
484 }
485 if (!(field0_in & 1))
486 goto done;
487 if (debug_level >= 3) {
488 LOG_DEBUG("waiting 100ms");
489 alive_sleep(100); /* avoid flooding the logs */
490 } else
491 keep_alive();
492 }
493 done:
494
495 /* set rx_valid */
496 field2 = 0x1;
497 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
498
499 retval = jtag_execute_queue();
500 if (retval != ERROR_OK) {
501 LOG_ERROR("JTAG error while writing RX");
502 return retval;
503 }
504
505 return ERROR_OK;
506 }
507
508 /* send count elements of size byte to the debug handler */
509 static int xscale_send(struct target *target, const uint8_t *buffer, int count, int size)
510 {
511 struct xscale_common *xscale = target_to_xscale(target);
512 int retval;
513 int done_count = 0;
514
515 xscale_jtag_set_instr(target->tap,
516 XSCALE_DBGRX << xscale->xscale_variant,
517 TAP_IDLE);
518
519 static const uint8_t t0;
520 uint8_t t1[4];
521 static const uint8_t t2 = 1;
522 struct scan_field fields[3] = {
523 { .num_bits = 3, .out_value = &t0 },
524 { .num_bits = 32, .out_value = t1 },
525 { .num_bits = 1, .out_value = &t2 },
526 };
527
528 int endianness = target->endianness;
529 while (done_count++ < count) {
530 uint32_t t;
531
532 switch (size) {
533 case 4:
534 if (endianness == TARGET_LITTLE_ENDIAN)
535 t = le_to_h_u32(buffer);
536 else
537 t = be_to_h_u32(buffer);
538 break;
539 case 2:
540 if (endianness == TARGET_LITTLE_ENDIAN)
541 t = le_to_h_u16(buffer);
542 else
543 t = be_to_h_u16(buffer);
544 break;
545 case 1:
546 t = buffer[0];
547 break;
548 default:
549 LOG_ERROR("BUG: size neither 4, 2 nor 1");
550 return ERROR_COMMAND_SYNTAX_ERROR;
551 }
552
553 buf_set_u32(t1, 0, 32, t);
554
555 jtag_add_dr_scan(target->tap,
556 3,
557 fields,
558 TAP_IDLE);
559 buffer += size;
560 }
561
562 retval = jtag_execute_queue();
563 if (retval != ERROR_OK) {
564 LOG_ERROR("JTAG error while sending data to debug handler");
565 return retval;
566 }
567
568 return ERROR_OK;
569 }
570
571 static int xscale_send_u32(struct target *target, uint32_t value)
572 {
573 struct xscale_common *xscale = target_to_xscale(target);
574
575 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
576 return xscale_write_rx(target);
577 }
578
579 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
580 {
581 struct xscale_common *xscale = target_to_xscale(target);
582 int retval;
583 struct scan_field fields[3];
584 uint8_t field0 = 0x0;
585 uint8_t field0_check_value = 0x2;
586 uint8_t field0_check_mask = 0x7;
587 uint8_t field2 = 0x0;
588 uint8_t field2_check_value = 0x0;
589 uint8_t field2_check_mask = 0x1;
590
591 if (hold_rst != -1)
592 xscale->hold_rst = hold_rst;
593
594 if (ext_dbg_brk != -1)
595 xscale->external_debug_break = ext_dbg_brk;
596
597 xscale_jtag_set_instr(target->tap,
598 XSCALE_SELDCSR << xscale->xscale_variant,
599 TAP_IDLE);
600
601 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
602 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
603
604 memset(&fields, 0, sizeof fields);
605
606 fields[0].num_bits = 3;
607 fields[0].out_value = &field0;
608 uint8_t tmp;
609 fields[0].in_value = &tmp;
610
611 fields[1].num_bits = 32;
612 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
613
614 fields[2].num_bits = 1;
615 fields[2].out_value = &field2;
616 uint8_t tmp2;
617 fields[2].in_value = &tmp2;
618
619 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
620
621 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
622 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
623
624 retval = jtag_execute_queue();
625 if (retval != ERROR_OK) {
626 LOG_ERROR("JTAG error while writing DCSR");
627 return retval;
628 }
629
630 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
631 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
632
633 return ERROR_OK;
634 }
635
636 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
637 static unsigned int parity(unsigned int v)
638 {
639 /* unsigned int ov = v; */
640 v ^= v >> 16;
641 v ^= v >> 8;
642 v ^= v >> 4;
643 v &= 0xf;
644 /* LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1); */
645 return (0x6996 >> v) & 1;
646 }
647
648 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
649 {
650 struct xscale_common *xscale = target_to_xscale(target);
651 uint8_t packet[4];
652 uint8_t cmd;
653 int word;
654 struct scan_field fields[2];
655
656 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
657
658 /* LDIC into IR */
659 xscale_jtag_set_instr(target->tap,
660 XSCALE_LDIC << xscale->xscale_variant,
661 TAP_IDLE);
662
663 /* CMD is b011 to load a cacheline into the Mini ICache.
664 * Loading into the main ICache is deprecated, and unused.
665 * It's followed by three zero bits, and 27 address bits.
666 */
667 buf_set_u32(&cmd, 0, 6, 0x3);
668
669 /* virtual address of desired cache line */
670 buf_set_u32(packet, 0, 27, va >> 5);
671
672 memset(&fields, 0, sizeof fields);
673
674 fields[0].num_bits = 6;
675 fields[0].out_value = &cmd;
676
677 fields[1].num_bits = 27;
678 fields[1].out_value = packet;
679
680 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
681
682 /* rest of packet is a cacheline: 8 instructions, with parity */
683 fields[0].num_bits = 32;
684 fields[0].out_value = packet;
685
686 fields[1].num_bits = 1;
687 fields[1].out_value = &cmd;
688
689 for (word = 0; word < 8; word++) {
690 buf_set_u32(packet, 0, 32, buffer[word]);
691
692 uint32_t value;
693 memcpy(&value, packet, sizeof(uint32_t));
694 cmd = parity(value);
695
696 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
697 }
698
699 return jtag_execute_queue();
700 }
701
702 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
703 {
704 struct xscale_common *xscale = target_to_xscale(target);
705 uint8_t packet[4];
706 uint8_t cmd;
707 struct scan_field fields[2];
708
709 xscale_jtag_set_instr(target->tap,
710 XSCALE_LDIC << xscale->xscale_variant,
711 TAP_IDLE);
712
713 /* CMD for invalidate IC line b000, bits [6:4] b000 */
714 buf_set_u32(&cmd, 0, 6, 0x0);
715
716 /* virtual address of desired cache line */
717 buf_set_u32(packet, 0, 27, va >> 5);
718
719 memset(&fields, 0, sizeof fields);
720
721 fields[0].num_bits = 6;
722 fields[0].out_value = &cmd;
723
724 fields[1].num_bits = 27;
725 fields[1].out_value = packet;
726
727 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
728
729 return ERROR_OK;
730 }
731
732 static int xscale_update_vectors(struct target *target)
733 {
734 struct xscale_common *xscale = target_to_xscale(target);
735 int i;
736 int retval;
737
738 uint32_t low_reset_branch, high_reset_branch;
739
740 for (i = 1; i < 8; i++) {
741 /* if there's a static vector specified for this exception, override */
742 if (xscale->static_high_vectors_set & (1 << i))
743 xscale->high_vectors[i] = xscale->static_high_vectors[i];
744 else {
745 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
746 if (retval == ERROR_TARGET_TIMEOUT)
747 return retval;
748 if (retval != ERROR_OK) {
749 /* Some of these reads will fail as part of normal execution */
750 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
751 }
752 }
753 }
754
755 for (i = 1; i < 8; i++) {
756 if (xscale->static_low_vectors_set & (1 << i))
757 xscale->low_vectors[i] = xscale->static_low_vectors[i];
758 else {
759 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
760 if (retval == ERROR_TARGET_TIMEOUT)
761 return retval;
762 if (retval != ERROR_OK) {
763 /* Some of these reads will fail as part of normal execution */
764 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
765 }
766 }
767 }
768
769 /* calculate branches to debug handler */
770 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
771 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
772
773 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
774 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
775
776 /* invalidate and load exception vectors in mini i-cache */
777 xscale_invalidate_ic_line(target, 0x0);
778 xscale_invalidate_ic_line(target, 0xffff0000);
779
780 xscale_load_ic(target, 0x0, xscale->low_vectors);
781 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
782
783 return ERROR_OK;
784 }
785
786 static int xscale_arch_state(struct target *target)
787 {
788 struct xscale_common *xscale = target_to_xscale(target);
789 struct arm *arm = &xscale->arm;
790
791 static const char *state[] = {
792 "disabled", "enabled"
793 };
794
795 static const char *arch_dbg_reason[] = {
796 "", "\n(processor reset)", "\n(trace buffer full)"
797 };
798
799 if (arm->common_magic != ARM_COMMON_MAGIC) {
800 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
801 return ERROR_COMMAND_SYNTAX_ERROR;
802 }
803
804 arm_arch_state(target);
805 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
806 state[xscale->armv4_5_mmu.mmu_enabled],
807 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
808 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
809 arch_dbg_reason[xscale->arch_debug_reason]);
810
811 return ERROR_OK;
812 }
813
814 static int xscale_poll(struct target *target)
815 {
816 int retval = ERROR_OK;
817
818 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING)) {
819 enum target_state previous_state = target->state;
820 retval = xscale_read_tx(target, 0);
821 if (retval == ERROR_OK) {
822
823 /* there's data to read from the tx register, we entered debug state */
824 target->state = TARGET_HALTED;
825
826 /* process debug entry, fetching current mode regs */
827 retval = xscale_debug_entry(target);
828 } else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE) {
829 LOG_USER("error while polling TX register, reset CPU");
830 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
831 target->state = TARGET_HALTED;
832 }
833
834 /* debug_entry could have overwritten target state (i.e. immediate resume)
835 * don't signal event handlers in that case
836 */
837 if (target->state != TARGET_HALTED)
838 return ERROR_OK;
839
840 /* if target was running, signal that we halted
841 * otherwise we reentered from debug execution */
842 if (previous_state == TARGET_RUNNING)
843 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
844 else
845 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
846 }
847
848 return retval;
849 }
850
851 static int xscale_debug_entry(struct target *target)
852 {
853 struct xscale_common *xscale = target_to_xscale(target);
854 struct arm *arm = &xscale->arm;
855 uint32_t pc;
856 uint32_t buffer[10];
857 unsigned i;
858 int retval;
859 uint32_t moe;
860
861 /* clear external dbg break (will be written on next DCSR read) */
862 xscale->external_debug_break = 0;
863 retval = xscale_read_dcsr(target);
864 if (retval != ERROR_OK)
865 return retval;
866
867 /* get r0, pc, r1 to r7 and cpsr */
868 retval = xscale_receive(target, buffer, 10);
869 if (retval != ERROR_OK)
870 return retval;
871
872 /* move r0 from buffer to register cache */
873 buf_set_u32(arm->core_cache->reg_list[0].value, 0, 32, buffer[0]);
874 arm->core_cache->reg_list[0].dirty = 1;
875 arm->core_cache->reg_list[0].valid = 1;
876 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
877
878 /* move pc from buffer to register cache */
879 buf_set_u32(arm->pc->value, 0, 32, buffer[1]);
880 arm->pc->dirty = 1;
881 arm->pc->valid = 1;
882 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
883
884 /* move data from buffer to register cache */
885 for (i = 1; i <= 7; i++) {
886 buf_set_u32(arm->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
887 arm->core_cache->reg_list[i].dirty = 1;
888 arm->core_cache->reg_list[i].valid = 1;
889 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
890 }
891
892 arm_set_cpsr(arm, buffer[9]);
893 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
894
895 if (!is_arm_mode(arm->core_mode)) {
896 target->state = TARGET_UNKNOWN;
897 LOG_ERROR("cpsr contains invalid mode value - communication failure");
898 return ERROR_TARGET_FAILURE;
899 }
900 LOG_DEBUG("target entered debug state in %s mode",
901 arm_mode_name(arm->core_mode));
902
903 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
904 if (arm->spsr) {
905 xscale_receive(target, buffer, 8);
906 buf_set_u32(arm->spsr->value, 0, 32, buffer[7]);
907 arm->spsr->dirty = false;
908 arm->spsr->valid = true;
909 } else {
910 /* r8 to r14, but no spsr */
911 xscale_receive(target, buffer, 7);
912 }
913
914 /* move data from buffer to right banked register in cache */
915 for (i = 8; i <= 14; i++) {
916 struct reg *r = arm_reg_current(arm, i);
917
918 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
919 r->dirty = false;
920 r->valid = true;
921 }
922
923 /* mark xscale regs invalid to ensure they are retrieved from the
924 * debug handler if requested */
925 for (i = 0; i < xscale->reg_cache->num_regs; i++)
926 xscale->reg_cache->reg_list[i].valid = 0;
927
928 /* examine debug reason */
929 xscale_read_dcsr(target);
930 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
931
932 /* stored PC (for calculating fixup) */
933 pc = buf_get_u32(arm->pc->value, 0, 32);
934
935 switch (moe) {
936 case 0x0: /* Processor reset */
937 target->debug_reason = DBG_REASON_DBGRQ;
938 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
939 pc -= 4;
940 break;
941 case 0x1: /* Instruction breakpoint hit */
942 target->debug_reason = DBG_REASON_BREAKPOINT;
943 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
944 pc -= 4;
945 break;
946 case 0x2: /* Data breakpoint hit */
947 target->debug_reason = DBG_REASON_WATCHPOINT;
948 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
949 pc -= 4;
950 break;
951 case 0x3: /* BKPT instruction executed */
952 target->debug_reason = DBG_REASON_BREAKPOINT;
953 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
954 pc -= 4;
955 break;
956 case 0x4: /* Ext. debug event */
957 target->debug_reason = DBG_REASON_DBGRQ;
958 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
959 pc -= 4;
960 break;
961 case 0x5: /* Vector trap occured */
962 target->debug_reason = DBG_REASON_BREAKPOINT;
963 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
964 pc -= 4;
965 break;
966 case 0x6: /* Trace buffer full break */
967 target->debug_reason = DBG_REASON_DBGRQ;
968 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
969 pc -= 4;
970 break;
971 case 0x7: /* Reserved (may flag Hot-Debug support) */
972 default:
973 LOG_ERROR("Method of Entry is 'Reserved'");
974 exit(-1);
975 break;
976 }
977
978 /* apply PC fixup */
979 buf_set_u32(arm->pc->value, 0, 32, pc);
980
981 /* on the first debug entry, identify cache type */
982 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1) {
983 uint32_t cache_type_reg;
984
985 /* read cp15 cache type register */
986 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
987 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value,
988 0,
989 32);
990
991 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
992 }
993
994 /* examine MMU and Cache settings
995 * read cp15 control register */
996 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
997 xscale->cp15_control_reg =
998 buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
999 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1000 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
1001 (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1002 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1003 (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1004
1005 /* tracing enabled, read collected trace data */
1006 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1007 xscale_read_trace(target);
1008
1009 /* Resume if entered debug due to buffer fill and we're still collecting
1010 * trace data. Note that a debug exception due to trace buffer full
1011 * can only happen in fill mode. */
1012 if (xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL) {
1013 if (--xscale->trace.fill_counter > 0)
1014 xscale_resume(target, 1, 0x0, 1, 0);
1015 } else /* entered debug for other reason; reset counter */
1016 xscale->trace.fill_counter = 0;
1017 }
1018
1019 return ERROR_OK;
1020 }
1021
1022 static int xscale_halt(struct target *target)
1023 {
1024 struct xscale_common *xscale = target_to_xscale(target);
1025
1026 LOG_DEBUG("target->state: %s",
1027 target_state_name(target));
1028
1029 if (target->state == TARGET_HALTED) {
1030 LOG_DEBUG("target was already halted");
1031 return ERROR_OK;
1032 } else if (target->state == TARGET_UNKNOWN) {
1033 /* this must not happen for a xscale target */
1034 LOG_ERROR("target was in unknown state when halt was requested");
1035 return ERROR_TARGET_INVALID;
1036 } else if (target->state == TARGET_RESET)
1037 LOG_DEBUG("target->state == TARGET_RESET");
1038 else {
1039 /* assert external dbg break */
1040 xscale->external_debug_break = 1;
1041 xscale_read_dcsr(target);
1042
1043 target->debug_reason = DBG_REASON_DBGRQ;
1044 }
1045
1046 return ERROR_OK;
1047 }
1048
1049 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1050 {
1051 struct xscale_common *xscale = target_to_xscale(target);
1052 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1053 int retval;
1054
1055 if (xscale->ibcr0_used) {
1056 struct breakpoint *ibcr0_bp =
1057 breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1058
1059 if (ibcr0_bp)
1060 xscale_unset_breakpoint(target, ibcr0_bp);
1061 else {
1062 LOG_ERROR(
1063 "BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1064 exit(-1);
1065 }
1066 }
1067
1068 retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1);
1069 if (retval != ERROR_OK)
1070 return retval;
1071
1072 return ERROR_OK;
1073 }
1074
1075 static int xscale_disable_single_step(struct target *target)
1076 {
1077 struct xscale_common *xscale = target_to_xscale(target);
1078 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1079 int retval;
1080
1081 retval = xscale_set_reg_u32(ibcr0, 0x0);
1082 if (retval != ERROR_OK)
1083 return retval;
1084
1085 return ERROR_OK;
1086 }
1087
1088 static void xscale_enable_watchpoints(struct target *target)
1089 {
1090 struct watchpoint *watchpoint = target->watchpoints;
1091
1092 while (watchpoint) {
1093 if (watchpoint->set == 0)
1094 xscale_set_watchpoint(target, watchpoint);
1095 watchpoint = watchpoint->next;
1096 }
1097 }
1098
1099 static void xscale_enable_breakpoints(struct target *target)
1100 {
1101 struct breakpoint *breakpoint = target->breakpoints;
1102
1103 /* set any pending breakpoints */
1104 while (breakpoint) {
1105 if (breakpoint->set == 0)
1106 xscale_set_breakpoint(target, breakpoint);
1107 breakpoint = breakpoint->next;
1108 }
1109 }
1110
1111 static void xscale_free_trace_data(struct xscale_common *xscale)
1112 {
1113 struct xscale_trace_data *td = xscale->trace.data;
1114 while (td) {
1115 struct xscale_trace_data *next_td = td->next;
1116 if (td->entries)
1117 free(td->entries);
1118 free(td);
1119 td = next_td;
1120 }
1121 xscale->trace.data = NULL;
1122 }
1123
1124 static int xscale_resume(struct target *target, int current,
1125 uint32_t address, int handle_breakpoints, int debug_execution)
1126 {
1127 struct xscale_common *xscale = target_to_xscale(target);
1128 struct arm *arm = &xscale->arm;
1129 uint32_t current_pc;
1130 int retval;
1131 int i;
1132
1133 LOG_DEBUG("-");
1134
1135 if (target->state != TARGET_HALTED) {
1136 LOG_WARNING("target not halted");
1137 return ERROR_TARGET_NOT_HALTED;
1138 }
1139
1140 if (!debug_execution)
1141 target_free_all_working_areas(target);
1142
1143 /* update vector tables */
1144 retval = xscale_update_vectors(target);
1145 if (retval != ERROR_OK)
1146 return retval;
1147
1148 /* current = 1: continue on current pc, otherwise continue at <address> */
1149 if (!current)
1150 buf_set_u32(arm->pc->value, 0, 32, address);
1151
1152 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1153
1154 /* if we're at the reset vector, we have to simulate the branch */
1155 if (current_pc == 0x0) {
1156 arm_simulate_step(target, NULL);
1157 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1158 }
1159
1160 /* the front-end may request us not to handle breakpoints */
1161 if (handle_breakpoints) {
1162 struct breakpoint *breakpoint;
1163 breakpoint = breakpoint_find(target,
1164 buf_get_u32(arm->pc->value, 0, 32));
1165 if (breakpoint != NULL) {
1166 uint32_t next_pc;
1167 enum trace_mode saved_trace_mode;
1168
1169 /* there's a breakpoint at the current PC, we have to step over it */
1170 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1171 xscale_unset_breakpoint(target, breakpoint);
1172
1173 /* calculate PC of next instruction */
1174 retval = arm_simulate_step(target, &next_pc);
1175 if (retval != ERROR_OK) {
1176 uint32_t current_opcode;
1177 target_read_u32(target, current_pc, &current_opcode);
1178 LOG_ERROR(
1179 "BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "",
1180 current_opcode);
1181 }
1182
1183 LOG_DEBUG("enable single-step");
1184 xscale_enable_single_step(target, next_pc);
1185
1186 /* restore banked registers */
1187 retval = xscale_restore_banked(target);
1188 if (retval != ERROR_OK)
1189 return retval;
1190
1191 /* send resume request */
1192 xscale_send_u32(target, 0x30);
1193
1194 /* send CPSR */
1195 xscale_send_u32(target,
1196 buf_get_u32(arm->cpsr->value, 0, 32));
1197 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1198 buf_get_u32(arm->cpsr->value, 0, 32));
1199
1200 for (i = 7; i >= 0; i--) {
1201 /* send register */
1202 xscale_send_u32(target,
1203 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1204 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "",
1205 i, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1206 }
1207
1208 /* send PC */
1209 xscale_send_u32(target,
1210 buf_get_u32(arm->pc->value, 0, 32));
1211 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
1212 buf_get_u32(arm->pc->value, 0, 32));
1213
1214 /* disable trace data collection in xscale_debug_entry() */
1215 saved_trace_mode = xscale->trace.mode;
1216 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1217
1218 /* wait for and process debug entry */
1219 xscale_debug_entry(target);
1220
1221 /* re-enable trace buffer, if enabled previously */
1222 xscale->trace.mode = saved_trace_mode;
1223
1224 LOG_DEBUG("disable single-step");
1225 xscale_disable_single_step(target);
1226
1227 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1228 xscale_set_breakpoint(target, breakpoint);
1229 }
1230 }
1231
1232 /* enable any pending breakpoints and watchpoints */
1233 xscale_enable_breakpoints(target);
1234 xscale_enable_watchpoints(target);
1235
1236 /* restore banked registers */
1237 retval = xscale_restore_banked(target);
1238 if (retval != ERROR_OK)
1239 return retval;
1240
1241 /* send resume request (command 0x30 or 0x31)
1242 * clean the trace buffer if it is to be enabled (0x62) */
1243 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1244 if (xscale->trace.mode == XSCALE_TRACE_FILL) {
1245 /* If trace enabled in fill mode and starting collection of new set
1246 * of buffers, initialize buffer counter and free previous buffers */
1247 if (xscale->trace.fill_counter == 0) {
1248 xscale->trace.fill_counter = xscale->trace.buffer_fill;
1249 xscale_free_trace_data(xscale);
1250 }
1251 } else /* wrap mode; free previous buffer */
1252 xscale_free_trace_data(xscale);
1253
1254 xscale_send_u32(target, 0x62);
1255 xscale_send_u32(target, 0x31);
1256 } else
1257 xscale_send_u32(target, 0x30);
1258
1259 /* send CPSR */
1260 xscale_send_u32(target, buf_get_u32(arm->cpsr->value, 0, 32));
1261 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1262 buf_get_u32(arm->cpsr->value, 0, 32));
1263
1264 for (i = 7; i >= 0; i--) {
1265 /* send register */
1266 xscale_send_u32(target, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1267 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "",
1268 i, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1269 }
1270
1271 /* send PC */
1272 xscale_send_u32(target, buf_get_u32(arm->pc->value, 0, 32));
1273 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1274 buf_get_u32(arm->pc->value, 0, 32));
1275
1276 target->debug_reason = DBG_REASON_NOTHALTED;
1277
1278 if (!debug_execution) {
1279 /* registers are now invalid */
1280 register_cache_invalidate(arm->core_cache);
1281 target->state = TARGET_RUNNING;
1282 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1283 } else {
1284 target->state = TARGET_DEBUG_RUNNING;
1285 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1286 }
1287
1288 LOG_DEBUG("target resumed");
1289
1290 return ERROR_OK;
1291 }
1292
1293 static int xscale_step_inner(struct target *target, int current,
1294 uint32_t address, int handle_breakpoints)
1295 {
1296 struct xscale_common *xscale = target_to_xscale(target);
1297 struct arm *arm = &xscale->arm;
1298 uint32_t next_pc;
1299 int retval;
1300 int i;
1301
1302 target->debug_reason = DBG_REASON_SINGLESTEP;
1303
1304 /* calculate PC of next instruction */
1305 retval = arm_simulate_step(target, &next_pc);
1306 if (retval != ERROR_OK) {
1307 uint32_t current_opcode, current_pc;
1308 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1309
1310 target_read_u32(target, current_pc, &current_opcode);
1311 LOG_ERROR(
1312 "BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "",
1313 current_opcode);
1314 return retval;
1315 }
1316
1317 LOG_DEBUG("enable single-step");
1318 retval = xscale_enable_single_step(target, next_pc);
1319 if (retval != ERROR_OK)
1320 return retval;
1321
1322 /* restore banked registers */
1323 retval = xscale_restore_banked(target);
1324 if (retval != ERROR_OK)
1325 return retval;
1326
1327 /* send resume request (command 0x30 or 0x31)
1328 * clean the trace buffer if it is to be enabled (0x62) */
1329 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1330 retval = xscale_send_u32(target, 0x62);
1331 if (retval != ERROR_OK)
1332 return retval;
1333 retval = xscale_send_u32(target, 0x31);
1334 if (retval != ERROR_OK)
1335 return retval;
1336 } else {
1337 retval = xscale_send_u32(target, 0x30);
1338 if (retval != ERROR_OK)
1339 return retval;
1340 }
1341
1342 /* send CPSR */
1343 retval = xscale_send_u32(target,
1344 buf_get_u32(arm->cpsr->value, 0, 32));
1345 if (retval != ERROR_OK)
1346 return retval;
1347 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1348 buf_get_u32(arm->cpsr->value, 0, 32));
1349
1350 for (i = 7; i >= 0; i--) {
1351 /* send register */
1352 retval = xscale_send_u32(target,
1353 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1354 if (retval != ERROR_OK)
1355 return retval;
1356 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i,
1357 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1358 }
1359
1360 /* send PC */
1361 retval = xscale_send_u32(target,
1362 buf_get_u32(arm->pc->value, 0, 32));
1363 if (retval != ERROR_OK)
1364 return retval;
1365 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1366 buf_get_u32(arm->pc->value, 0, 32));
1367
1368 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1369
1370 /* registers are now invalid */
1371 register_cache_invalidate(arm->core_cache);
1372
1373 /* wait for and process debug entry */
1374 retval = xscale_debug_entry(target);
1375 if (retval != ERROR_OK)
1376 return retval;
1377
1378 LOG_DEBUG("disable single-step");
1379 retval = xscale_disable_single_step(target);
1380 if (retval != ERROR_OK)
1381 return retval;
1382
1383 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1384
1385 return ERROR_OK;
1386 }
1387
1388 static int xscale_step(struct target *target, int current,
1389 uint32_t address, int handle_breakpoints)
1390 {
1391 struct arm *arm = target_to_arm(target);
1392 struct breakpoint *breakpoint = NULL;
1393
1394 uint32_t current_pc;
1395 int retval;
1396
1397 if (target->state != TARGET_HALTED) {
1398 LOG_WARNING("target not halted");
1399 return ERROR_TARGET_NOT_HALTED;
1400 }
1401
1402 /* current = 1: continue on current pc, otherwise continue at <address> */
1403 if (!current)
1404 buf_set_u32(arm->pc->value, 0, 32, address);
1405
1406 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1407
1408 /* if we're at the reset vector, we have to simulate the step */
1409 if (current_pc == 0x0) {
1410 retval = arm_simulate_step(target, NULL);
1411 if (retval != ERROR_OK)
1412 return retval;
1413 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1414 LOG_DEBUG("current pc %" PRIx32, current_pc);
1415
1416 target->debug_reason = DBG_REASON_SINGLESTEP;
1417 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1418
1419 return ERROR_OK;
1420 }
1421
1422 /* the front-end may request us not to handle breakpoints */
1423 if (handle_breakpoints)
1424 breakpoint = breakpoint_find(target,
1425 buf_get_u32(arm->pc->value, 0, 32));
1426 if (breakpoint != NULL) {
1427 retval = xscale_unset_breakpoint(target, breakpoint);
1428 if (retval != ERROR_OK)
1429 return retval;
1430 }
1431
1432 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1433 if (retval != ERROR_OK)
1434 return retval;
1435
1436 if (breakpoint)
1437 xscale_set_breakpoint(target, breakpoint);
1438
1439 LOG_DEBUG("target stepped");
1440
1441 return ERROR_OK;
1442
1443 }
1444
1445 static int xscale_assert_reset(struct target *target)
1446 {
1447 struct xscale_common *xscale = target_to_xscale(target);
1448
1449 LOG_DEBUG("target->state: %s",
1450 target_state_name(target));
1451
1452 /* assert reset */
1453 jtag_add_reset(0, 1);
1454
1455 /* sleep 1ms, to be sure we fulfill any requirements */
1456 jtag_add_sleep(1000);
1457 jtag_execute_queue();
1458
1459 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1460 * end up in T-L-R, which would reset JTAG
1461 */
1462 xscale_jtag_set_instr(target->tap,
1463 XSCALE_SELDCSR << xscale->xscale_variant,
1464 TAP_IDLE);
1465
1466 /* set Hold reset, Halt mode and Trap Reset */
1467 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1468 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1469 xscale_write_dcsr(target, 1, 0);
1470
1471 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1472 xscale_jtag_set_instr(target->tap, ~0, TAP_IDLE);
1473 jtag_execute_queue();
1474
1475 target->state = TARGET_RESET;
1476
1477 if (target->reset_halt) {
1478 int retval = target_halt(target);
1479 if (retval != ERROR_OK)
1480 return retval;
1481 }
1482
1483 return ERROR_OK;
1484 }
1485
1486 static int xscale_deassert_reset(struct target *target)
1487 {
1488 struct xscale_common *xscale = target_to_xscale(target);
1489 struct breakpoint *breakpoint = target->breakpoints;
1490
1491 LOG_DEBUG("-");
1492
1493 xscale->ibcr_available = 2;
1494 xscale->ibcr0_used = 0;
1495 xscale->ibcr1_used = 0;
1496
1497 xscale->dbr_available = 2;
1498 xscale->dbr0_used = 0;
1499 xscale->dbr1_used = 0;
1500
1501 /* mark all hardware breakpoints as unset */
1502 while (breakpoint) {
1503 if (breakpoint->type == BKPT_HARD)
1504 breakpoint->set = 0;
1505 breakpoint = breakpoint->next;
1506 }
1507
1508 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1509 xscale_free_trace_data(xscale);
1510
1511 register_cache_invalidate(xscale->arm.core_cache);
1512
1513 /* FIXME mark hardware watchpoints got unset too. Also,
1514 * at least some of the XScale registers are invalid...
1515 */
1516
1517 /*
1518 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1519 * contents got invalidated. Safer to force that, so writing new
1520 * contents can't ever fail..
1521 */
1522 {
1523 uint32_t address;
1524 unsigned buf_cnt;
1525 const uint8_t *buffer = xscale_debug_handler;
1526 int retval;
1527
1528 /* release SRST */
1529 jtag_add_reset(0, 0);
1530
1531 /* wait 300ms; 150 and 100ms were not enough */
1532 jtag_add_sleep(300*1000);
1533
1534 jtag_add_runtest(2030, TAP_IDLE);
1535 jtag_execute_queue();
1536
1537 /* set Hold reset, Halt mode and Trap Reset */
1538 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1539 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1540 xscale_write_dcsr(target, 1, 0);
1541
1542 /* Load the debug handler into the mini-icache. Since
1543 * it's using halt mode (not monitor mode), it runs in
1544 * "Special Debug State" for access to registers, memory,
1545 * coprocessors, trace data, etc.
1546 */
1547 address = xscale->handler_address;
1548 for (unsigned binary_size = sizeof xscale_debug_handler;
1549 binary_size > 0;
1550 binary_size -= buf_cnt, buffer += buf_cnt) {
1551 uint32_t cache_line[8];
1552 unsigned i;
1553
1554 buf_cnt = binary_size;
1555 if (buf_cnt > 32)
1556 buf_cnt = 32;
1557
1558 for (i = 0; i < buf_cnt; i += 4) {
1559 /* convert LE buffer to host-endian uint32_t */
1560 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1561 }
1562
1563 for (; i < 32; i += 4)
1564 cache_line[i / 4] = 0xe1a08008;
1565
1566 /* only load addresses other than the reset vectors */
1567 if ((address % 0x400) != 0x0) {
1568 retval = xscale_load_ic(target, address,
1569 cache_line);
1570 if (retval != ERROR_OK)
1571 return retval;
1572 }
1573
1574 address += buf_cnt;
1575 }
1576 ;
1577
1578 retval = xscale_load_ic(target, 0x0,
1579 xscale->low_vectors);
1580 if (retval != ERROR_OK)
1581 return retval;
1582 retval = xscale_load_ic(target, 0xffff0000,
1583 xscale->high_vectors);
1584 if (retval != ERROR_OK)
1585 return retval;
1586
1587 jtag_add_runtest(30, TAP_IDLE);
1588
1589 jtag_add_sleep(100000);
1590
1591 /* set Hold reset, Halt mode and Trap Reset */
1592 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1593 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1594 xscale_write_dcsr(target, 1, 0);
1595
1596 /* clear Hold reset to let the target run (should enter debug handler) */
1597 xscale_write_dcsr(target, 0, 1);
1598 target->state = TARGET_RUNNING;
1599
1600 if (!target->reset_halt) {
1601 jtag_add_sleep(10000);
1602
1603 /* we should have entered debug now */
1604 xscale_debug_entry(target);
1605 target->state = TARGET_HALTED;
1606
1607 /* resume the target */
1608 xscale_resume(target, 1, 0x0, 1, 0);
1609 }
1610 }
1611
1612 return ERROR_OK;
1613 }
1614
1615 static int xscale_read_core_reg(struct target *target, struct reg *r,
1616 int num, enum arm_mode mode)
1617 {
1618 /** \todo add debug handler support for core register reads */
1619 LOG_ERROR("not implemented");
1620 return ERROR_OK;
1621 }
1622
1623 static int xscale_write_core_reg(struct target *target, struct reg *r,
1624 int num, enum arm_mode mode, uint8_t *value)
1625 {
1626 /** \todo add debug handler support for core register writes */
1627 LOG_ERROR("not implemented");
1628 return ERROR_OK;
1629 }
1630
1631 static int xscale_full_context(struct target *target)
1632 {
1633 struct arm *arm = target_to_arm(target);
1634
1635 uint32_t *buffer;
1636
1637 int i, j;
1638
1639 LOG_DEBUG("-");
1640
1641 if (target->state != TARGET_HALTED) {
1642 LOG_WARNING("target not halted");
1643 return ERROR_TARGET_NOT_HALTED;
1644 }
1645
1646 buffer = malloc(4 * 8);
1647
1648 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1649 * we can't enter User mode on an XScale (unpredictable),
1650 * but User shares registers with SYS
1651 */
1652 for (i = 1; i < 7; i++) {
1653 enum arm_mode mode = armv4_5_number_to_mode(i);
1654 bool valid = true;
1655 struct reg *r;
1656
1657 if (mode == ARM_MODE_USR)
1658 continue;
1659
1660 /* check if there are invalid registers in the current mode
1661 */
1662 for (j = 0; valid && j <= 16; j++) {
1663 if (!ARMV4_5_CORE_REG_MODE(arm->core_cache,
1664 mode, j).valid)
1665 valid = false;
1666 }
1667 if (valid)
1668 continue;
1669
1670 /* request banked registers */
1671 xscale_send_u32(target, 0x0);
1672
1673 /* send CPSR for desired bank mode */
1674 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1675
1676 /* get banked registers: r8 to r14; and SPSR
1677 * except in USR/SYS mode
1678 */
1679 if (mode != ARM_MODE_SYS) {
1680 /* SPSR */
1681 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1682 mode, 16);
1683
1684 xscale_receive(target, buffer, 8);
1685
1686 buf_set_u32(r->value, 0, 32, buffer[7]);
1687 r->dirty = false;
1688 r->valid = true;
1689 } else
1690 xscale_receive(target, buffer, 7);
1691
1692 /* move data from buffer to register cache */
1693 for (j = 8; j <= 14; j++) {
1694 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1695 mode, j);
1696
1697 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1698 r->dirty = false;
1699 r->valid = true;
1700 }
1701 }
1702
1703 free(buffer);
1704
1705 return ERROR_OK;
1706 }
1707
1708 static int xscale_restore_banked(struct target *target)
1709 {
1710 struct arm *arm = target_to_arm(target);
1711
1712 int i, j;
1713
1714 if (target->state != TARGET_HALTED) {
1715 LOG_WARNING("target not halted");
1716 return ERROR_TARGET_NOT_HALTED;
1717 }
1718
1719 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1720 * and check if any banked registers need to be written. Ignore
1721 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1722 * an XScale (unpredictable), but they share all registers.
1723 */
1724 for (i = 1; i < 7; i++) {
1725 enum arm_mode mode = armv4_5_number_to_mode(i);
1726 struct reg *r;
1727
1728 if (mode == ARM_MODE_USR)
1729 continue;
1730
1731 /* check if there are dirty registers in this mode */
1732 for (j = 8; j <= 14; j++) {
1733 if (ARMV4_5_CORE_REG_MODE(arm->core_cache,
1734 mode, j).dirty)
1735 goto dirty;
1736 }
1737
1738 /* if not USR/SYS, check if the SPSR needs to be written */
1739 if (mode != ARM_MODE_SYS) {
1740 if (ARMV4_5_CORE_REG_MODE(arm->core_cache,
1741 mode, 16).dirty)
1742 goto dirty;
1743 }
1744
1745 /* there's nothing to flush for this mode */
1746 continue;
1747
1748 dirty:
1749 /* command 0x1: "send banked registers" */
1750 xscale_send_u32(target, 0x1);
1751
1752 /* send CPSR for desired mode */
1753 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1754
1755 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1756 * but this protocol doesn't understand that nuance.
1757 */
1758 for (j = 8; j <= 14; j++) {
1759 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1760 mode, j);
1761 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1762 r->dirty = false;
1763 }
1764
1765 /* send spsr if not in USR/SYS mode */
1766 if (mode != ARM_MODE_SYS) {
1767 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1768 mode, 16);
1769 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1770 r->dirty = false;
1771 }
1772 }
1773
1774 return ERROR_OK;
1775 }
1776
1777 static int xscale_read_memory(struct target *target, uint32_t address,
1778 uint32_t size, uint32_t count, uint8_t *buffer)
1779 {
1780 struct xscale_common *xscale = target_to_xscale(target);
1781 uint32_t *buf32;
1782 uint32_t i;
1783 int retval;
1784
1785 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32,
1786 address,
1787 size,
1788 count);
1789
1790 if (target->state != TARGET_HALTED) {
1791 LOG_WARNING("target not halted");
1792 return ERROR_TARGET_NOT_HALTED;
1793 }
1794
1795 /* sanitize arguments */
1796 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1797 return ERROR_COMMAND_SYNTAX_ERROR;
1798
1799 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1800 return ERROR_TARGET_UNALIGNED_ACCESS;
1801
1802 /* send memory read request (command 0x1n, n: access size) */
1803 retval = xscale_send_u32(target, 0x10 | size);
1804 if (retval != ERROR_OK)
1805 return retval;
1806
1807 /* send base address for read request */
1808 retval = xscale_send_u32(target, address);
1809 if (retval != ERROR_OK)
1810 return retval;
1811
1812 /* send number of requested data words */
1813 retval = xscale_send_u32(target, count);
1814 if (retval != ERROR_OK)
1815 return retval;
1816
1817 /* receive data from target (count times 32-bit words in host endianness) */
1818 buf32 = malloc(4 * count);
1819 retval = xscale_receive(target, buf32, count);
1820 if (retval != ERROR_OK) {
1821 free(buf32);
1822 return retval;
1823 }
1824
1825 /* extract data from host-endian buffer into byte stream */
1826 for (i = 0; i < count; i++) {
1827 switch (size) {
1828 case 4:
1829 target_buffer_set_u32(target, buffer, buf32[i]);
1830 buffer += 4;
1831 break;
1832 case 2:
1833 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1834 buffer += 2;
1835 break;
1836 case 1:
1837 *buffer++ = buf32[i] & 0xff;
1838 break;
1839 default:
1840 LOG_ERROR("invalid read size");
1841 return ERROR_COMMAND_SYNTAX_ERROR;
1842 }
1843 }
1844
1845 free(buf32);
1846
1847 /* examine DCSR, to see if Sticky Abort (SA) got set */
1848 retval = xscale_read_dcsr(target);
1849 if (retval != ERROR_OK)
1850 return retval;
1851 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1) {
1852 /* clear SA bit */
1853 retval = xscale_send_u32(target, 0x60);
1854 if (retval != ERROR_OK)
1855 return retval;
1856
1857 return ERROR_TARGET_DATA_ABORT;
1858 }
1859
1860 return ERROR_OK;
1861 }
1862
1863 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1864 uint32_t size, uint32_t count, uint8_t *buffer)
1865 {
1866 struct xscale_common *xscale = target_to_xscale(target);
1867
1868 /* with MMU inactive, there are only physical addresses */
1869 if (!xscale->armv4_5_mmu.mmu_enabled)
1870 return xscale_read_memory(target, address, size, count, buffer);
1871
1872 /** \todo: provide a non-stub implementation of this routine. */
1873 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1874 target_name(target), __func__);
1875 return ERROR_FAIL;
1876 }
1877
1878 static int xscale_write_memory(struct target *target, uint32_t address,
1879 uint32_t size, uint32_t count, const uint8_t *buffer)
1880 {
1881 struct xscale_common *xscale = target_to_xscale(target);
1882 int retval;
1883
1884 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32,
1885 address,
1886 size,
1887 count);
1888
1889 if (target->state != TARGET_HALTED) {
1890 LOG_WARNING("target not halted");
1891 return ERROR_TARGET_NOT_HALTED;
1892 }
1893
1894 /* sanitize arguments */
1895 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1896 return ERROR_COMMAND_SYNTAX_ERROR;
1897
1898 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1899 return ERROR_TARGET_UNALIGNED_ACCESS;
1900
1901 /* send memory write request (command 0x2n, n: access size) */
1902 retval = xscale_send_u32(target, 0x20 | size);
1903 if (retval != ERROR_OK)
1904 return retval;
1905
1906 /* send base address for read request */
1907 retval = xscale_send_u32(target, address);
1908 if (retval != ERROR_OK)
1909 return retval;
1910
1911 /* send number of requested data words to be written*/
1912 retval = xscale_send_u32(target, count);
1913 if (retval != ERROR_OK)
1914 return retval;
1915
1916 /* extract data from host-endian buffer into byte stream */
1917 #if 0
1918 for (i = 0; i < count; i++) {
1919 switch (size) {
1920 case 4:
1921 value = target_buffer_get_u32(target, buffer);
1922 xscale_send_u32(target, value);
1923 buffer += 4;
1924 break;
1925 case 2:
1926 value = target_buffer_get_u16(target, buffer);
1927 xscale_send_u32(target, value);
1928 buffer += 2;
1929 break;
1930 case 1:
1931 value = *buffer;
1932 xscale_send_u32(target, value);
1933 buffer += 1;
1934 break;
1935 default:
1936 LOG_ERROR("should never get here");
1937 exit(-1);
1938 }
1939 }
1940 #endif
1941 retval = xscale_send(target, buffer, count, size);
1942 if (retval != ERROR_OK)
1943 return retval;
1944
1945 /* examine DCSR, to see if Sticky Abort (SA) got set */
1946 retval = xscale_read_dcsr(target);
1947 if (retval != ERROR_OK)
1948 return retval;
1949 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1) {
1950 /* clear SA bit */
1951 retval = xscale_send_u32(target, 0x60);
1952 if (retval != ERROR_OK)
1953 return retval;
1954
1955 LOG_ERROR("data abort writing memory");
1956 return ERROR_TARGET_DATA_ABORT;
1957 }
1958
1959 return ERROR_OK;
1960 }
1961
1962 static int xscale_write_phys_memory(struct target *target, uint32_t address,
1963 uint32_t size, uint32_t count, const uint8_t *buffer)
1964 {
1965 struct xscale_common *xscale = target_to_xscale(target);
1966
1967 /* with MMU inactive, there are only physical addresses */
1968 if (!xscale->armv4_5_mmu.mmu_enabled)
1969 return xscale_write_memory(target, address, size, count, buffer);
1970
1971 /** \todo: provide a non-stub implementation of this routine. */
1972 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1973 target_name(target), __func__);
1974 return ERROR_FAIL;
1975 }
1976
1977 static int xscale_get_ttb(struct target *target, uint32_t *result)
1978 {
1979 struct xscale_common *xscale = target_to_xscale(target);
1980 uint32_t ttb;
1981 int retval;
1982
1983 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
1984 if (retval != ERROR_OK)
1985 return retval;
1986 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
1987
1988 *result = ttb;
1989
1990 return ERROR_OK;
1991 }
1992
1993 static int xscale_disable_mmu_caches(struct target *target, int mmu,
1994 int d_u_cache, int i_cache)
1995 {
1996 struct xscale_common *xscale = target_to_xscale(target);
1997 uint32_t cp15_control;
1998 int retval;
1999
2000 /* read cp15 control register */
2001 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2002 if (retval != ERROR_OK)
2003 return retval;
2004 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2005
2006 if (mmu)
2007 cp15_control &= ~0x1U;
2008
2009 if (d_u_cache) {
2010 /* clean DCache */
2011 retval = xscale_send_u32(target, 0x50);
2012 if (retval != ERROR_OK)
2013 return retval;
2014 retval = xscale_send_u32(target, xscale->cache_clean_address);
2015 if (retval != ERROR_OK)
2016 return retval;
2017
2018 /* invalidate DCache */
2019 retval = xscale_send_u32(target, 0x51);
2020 if (retval != ERROR_OK)
2021 return retval;
2022
2023 cp15_control &= ~0x4U;
2024 }
2025
2026 if (i_cache) {
2027 /* invalidate ICache */
2028 retval = xscale_send_u32(target, 0x52);
2029 if (retval != ERROR_OK)
2030 return retval;
2031 cp15_control &= ~0x1000U;
2032 }
2033
2034 /* write new cp15 control register */
2035 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2036 if (retval != ERROR_OK)
2037 return retval;
2038
2039 /* execute cpwait to ensure outstanding operations complete */
2040 retval = xscale_send_u32(target, 0x53);
2041 return retval;
2042 }
2043
2044 static int xscale_enable_mmu_caches(struct target *target, int mmu,
2045 int d_u_cache, int i_cache)
2046 {
2047 struct xscale_common *xscale = target_to_xscale(target);
2048 uint32_t cp15_control;
2049 int retval;
2050
2051 /* read cp15 control register */
2052 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2053 if (retval != ERROR_OK)
2054 return retval;
2055 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2056
2057 if (mmu)
2058 cp15_control |= 0x1U;
2059
2060 if (d_u_cache)
2061 cp15_control |= 0x4U;
2062
2063 if (i_cache)
2064 cp15_control |= 0x1000U;
2065
2066 /* write new cp15 control register */
2067 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2068 if (retval != ERROR_OK)
2069 return retval;
2070
2071 /* execute cpwait to ensure outstanding operations complete */
2072 retval = xscale_send_u32(target, 0x53);
2073 return retval;
2074 }
2075
2076 static int xscale_set_breakpoint(struct target *target,
2077 struct breakpoint *breakpoint)
2078 {
2079 int retval;
2080 struct xscale_common *xscale = target_to_xscale(target);
2081
2082 if (target->state != TARGET_HALTED) {
2083 LOG_WARNING("target not halted");
2084 return ERROR_TARGET_NOT_HALTED;
2085 }
2086
2087 if (breakpoint->set) {
2088 LOG_WARNING("breakpoint already set");
2089 return ERROR_OK;
2090 }
2091
2092 if (breakpoint->type == BKPT_HARD) {
2093 uint32_t value = breakpoint->address | 1;
2094 if (!xscale->ibcr0_used) {
2095 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2096 xscale->ibcr0_used = 1;
2097 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2098 } else if (!xscale->ibcr1_used) {
2099 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2100 xscale->ibcr1_used = 1;
2101 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2102 } else {/* bug: availability previously verified in xscale_add_breakpoint() */
2103 LOG_ERROR("BUG: no hardware comparator available");
2104 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2105 }
2106 } else if (breakpoint->type == BKPT_SOFT) {
2107 if (breakpoint->length == 4) {
2108 /* keep the original instruction in target endianness */
2109 retval = target_read_memory(target, breakpoint->address, 4, 1,
2110 breakpoint->orig_instr);
2111 if (retval != ERROR_OK)
2112 return retval;
2113 /* write the bkpt instruction in target endianness
2114 *(arm7_9->arm_bkpt is host endian) */
2115 retval = target_write_u32(target, breakpoint->address,
2116 xscale->arm_bkpt);
2117 if (retval != ERROR_OK)
2118 return retval;
2119 } else {
2120 /* keep the original instruction in target endianness */
2121 retval = target_read_memory(target, breakpoint->address, 2, 1,
2122 breakpoint->orig_instr);
2123 if (retval != ERROR_OK)
2124 return retval;
2125 /* write the bkpt instruction in target endianness
2126 *(arm7_9->arm_bkpt is host endian) */
2127 retval = target_write_u16(target, breakpoint->address,
2128 xscale->thumb_bkpt);
2129 if (retval != ERROR_OK)
2130 return retval;
2131 }
2132 breakpoint->set = 1;
2133
2134 xscale_send_u32(target, 0x50); /* clean dcache */
2135 xscale_send_u32(target, xscale->cache_clean_address);
2136 xscale_send_u32(target, 0x51); /* invalidate dcache */
2137 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2138 }
2139
2140 return ERROR_OK;
2141 }
2142
2143 static int xscale_add_breakpoint(struct target *target,
2144 struct breakpoint *breakpoint)
2145 {
2146 struct xscale_common *xscale = target_to_xscale(target);
2147
2148 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1)) {
2149 LOG_ERROR("no breakpoint unit available for hardware breakpoint");
2150 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2151 }
2152
2153 if ((breakpoint->length != 2) && (breakpoint->length != 4)) {
2154 LOG_ERROR("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2155 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2156 }
2157
2158 if (breakpoint->type == BKPT_HARD)
2159 xscale->ibcr_available--;
2160
2161 return xscale_set_breakpoint(target, breakpoint);
2162 }
2163
2164 static int xscale_unset_breakpoint(struct target *target,
2165 struct breakpoint *breakpoint)
2166 {
2167 int retval;
2168 struct xscale_common *xscale = target_to_xscale(target);
2169
2170 if (target->state != TARGET_HALTED) {
2171 LOG_WARNING("target not halted");
2172 return ERROR_TARGET_NOT_HALTED;
2173 }
2174
2175 if (!breakpoint->set) {
2176 LOG_WARNING("breakpoint not set");
2177 return ERROR_OK;
2178 }
2179
2180 if (breakpoint->type == BKPT_HARD) {
2181 if (breakpoint->set == 1) {
2182 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2183 xscale->ibcr0_used = 0;
2184 } else if (breakpoint->set == 2) {
2185 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2186 xscale->ibcr1_used = 0;
2187 }
2188 breakpoint->set = 0;
2189 } else {
2190 /* restore original instruction (kept in target endianness) */
2191 if (breakpoint->length == 4) {
2192 retval = target_write_memory(target, breakpoint->address, 4, 1,
2193 breakpoint->orig_instr);
2194 if (retval != ERROR_OK)
2195 return retval;
2196 } else {
2197 retval = target_write_memory(target, breakpoint->address, 2, 1,
2198 breakpoint->orig_instr);
2199 if (retval != ERROR_OK)
2200 return retval;
2201 }
2202 breakpoint->set = 0;
2203
2204 xscale_send_u32(target, 0x50); /* clean dcache */
2205 xscale_send_u32(target, xscale->cache_clean_address);
2206 xscale_send_u32(target, 0x51); /* invalidate dcache */
2207 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2208 }
2209
2210 return ERROR_OK;
2211 }
2212
2213 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2214 {
2215 struct xscale_common *xscale = target_to_xscale(target);
2216
2217 if (target->state != TARGET_HALTED) {
2218 LOG_ERROR("target not halted");
2219 return ERROR_TARGET_NOT_HALTED;
2220 }
2221
2222 if (breakpoint->set)
2223 xscale_unset_breakpoint(target, breakpoint);
2224
2225 if (breakpoint->type == BKPT_HARD)
2226 xscale->ibcr_available++;
2227
2228 return ERROR_OK;
2229 }
2230
2231 static int xscale_set_watchpoint(struct target *target,
2232 struct watchpoint *watchpoint)
2233 {
2234 struct xscale_common *xscale = target_to_xscale(target);
2235 uint32_t enable = 0;
2236 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2237 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2238
2239 if (target->state != TARGET_HALTED) {
2240 LOG_ERROR("target not halted");
2241 return ERROR_TARGET_NOT_HALTED;
2242 }
2243
2244 switch (watchpoint->rw) {
2245 case WPT_READ:
2246 enable = 0x3;
2247 break;
2248 case WPT_ACCESS:
2249 enable = 0x2;
2250 break;
2251 case WPT_WRITE:
2252 enable = 0x1;
2253 break;
2254 default:
2255 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2256 }
2257
2258 /* For watchpoint across more than one word, both DBR registers must
2259 be enlisted, with the second used as a mask. */
2260 if (watchpoint->length > 4) {
2261 if (xscale->dbr0_used || xscale->dbr1_used) {
2262 LOG_ERROR("BUG: sufficient hardware comparators unavailable");
2263 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2264 }
2265
2266 /* Write mask value to DBR1, based on the length argument.
2267 * Address bits ignored by the comparator are those set in mask. */
2268 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1],
2269 watchpoint->length - 1);
2270 xscale->dbr1_used = 1;
2271 enable |= 0x100; /* DBCON[M] */
2272 }
2273
2274 if (!xscale->dbr0_used) {
2275 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2276 dbcon_value |= enable;
2277 xscale_set_reg_u32(dbcon, dbcon_value);
2278 watchpoint->set = 1;
2279 xscale->dbr0_used = 1;
2280 } else if (!xscale->dbr1_used) {
2281 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2282 dbcon_value |= enable << 2;
2283 xscale_set_reg_u32(dbcon, dbcon_value);
2284 watchpoint->set = 2;
2285 xscale->dbr1_used = 1;
2286 } else {
2287 LOG_ERROR("BUG: no hardware comparator available");
2288 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2289 }
2290
2291 return ERROR_OK;
2292 }
2293
2294 static int xscale_add_watchpoint(struct target *target,
2295 struct watchpoint *watchpoint)
2296 {
2297 struct xscale_common *xscale = target_to_xscale(target);
2298
2299 if (xscale->dbr_available < 1) {
2300 LOG_ERROR("no more watchpoint registers available");
2301 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2302 }
2303
2304 if (watchpoint->value)
2305 LOG_WARNING("xscale does not support value, mask arguments; ignoring");
2306
2307 /* check that length is a power of two */
2308 for (uint32_t len = watchpoint->length; len != 1; len /= 2) {
2309 if (len % 2) {
2310 LOG_ERROR("xscale requires that watchpoint length is a power of two");
2311 return ERROR_COMMAND_ARGUMENT_INVALID;
2312 }
2313 }
2314
2315 if (watchpoint->length == 4) { /* single word watchpoint */
2316 xscale->dbr_available--;/* one DBR reg used */
2317 return ERROR_OK;
2318 }
2319
2320 /* watchpoints across multiple words require both DBR registers */
2321 if (xscale->dbr_available < 2) {
2322 LOG_ERROR("insufficient watchpoint registers available");
2323 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2324 }
2325
2326 if (watchpoint->length > watchpoint->address) {
2327 LOG_ERROR("xscale does not support watchpoints with length "
2328 "greater than address");
2329 return ERROR_COMMAND_ARGUMENT_INVALID;
2330 }
2331
2332 xscale->dbr_available = 0;
2333 return ERROR_OK;
2334 }
2335
2336 static int xscale_unset_watchpoint(struct target *target,
2337 struct watchpoint *watchpoint)
2338 {
2339 struct xscale_common *xscale = target_to_xscale(target);
2340 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2341 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2342
2343 if (target->state != TARGET_HALTED) {
2344 LOG_WARNING("target not halted");
2345 return ERROR_TARGET_NOT_HALTED;
2346 }
2347
2348 if (!watchpoint->set) {
2349 LOG_WARNING("breakpoint not set");
2350 return ERROR_OK;
2351 }
2352
2353 if (watchpoint->set == 1) {
2354 if (watchpoint->length > 4) {
2355 dbcon_value &= ~0x103; /* clear DBCON[M] as well */
2356 xscale->dbr1_used = 0; /* DBR1 was used for mask */
2357 } else
2358 dbcon_value &= ~0x3;
2359
2360 xscale_set_reg_u32(dbcon, dbcon_value);
2361 xscale->dbr0_used = 0;
2362 } else if (watchpoint->set == 2) {
2363 dbcon_value &= ~0xc;
2364 xscale_set_reg_u32(dbcon, dbcon_value);
2365 xscale->dbr1_used = 0;
2366 }
2367 watchpoint->set = 0;
2368
2369 return ERROR_OK;
2370 }
2371
2372 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2373 {
2374 struct xscale_common *xscale = target_to_xscale(target);
2375
2376 if (target->state != TARGET_HALTED) {
2377 LOG_ERROR("target not halted");
2378 return ERROR_TARGET_NOT_HALTED;
2379 }
2380
2381 if (watchpoint->set)
2382 xscale_unset_watchpoint(target, watchpoint);
2383
2384 if (watchpoint->length > 4)
2385 xscale->dbr_available++;/* both DBR regs now available */
2386
2387 xscale->dbr_available++;
2388
2389 return ERROR_OK;
2390 }
2391
2392 static int xscale_get_reg(struct reg *reg)
2393 {
2394 struct xscale_reg *arch_info = reg->arch_info;
2395 struct target *target = arch_info->target;
2396 struct xscale_common *xscale = target_to_xscale(target);
2397
2398 /* DCSR, TX and RX are accessible via JTAG */
2399 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2400 return xscale_read_dcsr(arch_info->target);
2401 else if (strcmp(reg->name, "XSCALE_TX") == 0) {
2402 /* 1 = consume register content */
2403 return xscale_read_tx(arch_info->target, 1);
2404 } else if (strcmp(reg->name, "XSCALE_RX") == 0) {
2405 /* can't read from RX register (host -> debug handler) */
2406 return ERROR_OK;
2407 } else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0) {
2408 /* can't (explicitly) read from TXRXCTRL register */
2409 return ERROR_OK;
2410 } else {/* Other DBG registers have to be transfered by the debug handler
2411 * send CP read request (command 0x40) */
2412 xscale_send_u32(target, 0x40);
2413
2414 /* send CP register number */
2415 xscale_send_u32(target, arch_info->dbg_handler_number);
2416
2417 /* read register value */
2418 xscale_read_tx(target, 1);
2419 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2420
2421 reg->dirty = 0;
2422 reg->valid = 1;
2423 }
2424
2425 return ERROR_OK;
2426 }
2427
2428 static int xscale_set_reg(struct reg *reg, uint8_t *buf)
2429 {
2430 struct xscale_reg *arch_info = reg->arch_info;
2431 struct target *target = arch_info->target;
2432 struct xscale_common *xscale = target_to_xscale(target);
2433 uint32_t value = buf_get_u32(buf, 0, 32);
2434
2435 /* DCSR, TX and RX are accessible via JTAG */
2436 if (strcmp(reg->name, "XSCALE_DCSR") == 0) {
2437 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2438 return xscale_write_dcsr(arch_info->target, -1, -1);
2439 } else if (strcmp(reg->name, "XSCALE_RX") == 0) {
2440 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2441 return xscale_write_rx(arch_info->target);
2442 } else if (strcmp(reg->name, "XSCALE_TX") == 0) {
2443 /* can't write to TX register (debug-handler -> host) */
2444 return ERROR_OK;
2445 } else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0) {
2446 /* can't (explicitly) write to TXRXCTRL register */
2447 return ERROR_OK;
2448 } else {/* Other DBG registers have to be transfered by the debug handler
2449 * send CP write request (command 0x41) */
2450 xscale_send_u32(target, 0x41);
2451
2452 /* send CP register number */
2453 xscale_send_u32(target, arch_info->dbg_handler_number);
2454
2455 /* send CP register value */
2456 xscale_send_u32(target, value);
2457 buf_set_u32(reg->value, 0, 32, value);
2458 }
2459
2460 return ERROR_OK;
2461 }
2462
2463 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2464 {
2465 struct xscale_common *xscale = target_to_xscale(target);
2466 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2467 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2468
2469 /* send CP write request (command 0x41) */
2470 xscale_send_u32(target, 0x41);
2471
2472 /* send CP register number */
2473 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2474
2475 /* send CP register value */
2476 xscale_send_u32(target, value);
2477 buf_set_u32(dcsr->value, 0, 32, value);
2478
2479 return ERROR_OK;
2480 }
2481
2482 static int xscale_read_trace(struct target *target)
2483 {
2484 struct xscale_common *xscale = target_to_xscale(target);
2485 struct arm *arm = &xscale->arm;
2486 struct xscale_trace_data **trace_data_p;
2487
2488 /* 258 words from debug handler
2489 * 256 trace buffer entries
2490 * 2 checkpoint addresses
2491 */
2492 uint32_t trace_buffer[258];
2493 int is_address[256];
2494 int i, j;
2495 unsigned int num_checkpoints = 0;
2496
2497 if (target->state != TARGET_HALTED) {
2498 LOG_WARNING("target must be stopped to read trace data");
2499 return ERROR_TARGET_NOT_HALTED;
2500 }
2501
2502 /* send read trace buffer command (command 0x61) */
2503 xscale_send_u32(target, 0x61);
2504
2505 /* receive trace buffer content */
2506 xscale_receive(target, trace_buffer, 258);
2507
2508 /* parse buffer backwards to identify address entries */
2509 for (i = 255; i >= 0; i--) {
2510 /* also count number of checkpointed entries */
2511 if ((trace_buffer[i] & 0xe0) == 0xc0)
2512 num_checkpoints++;
2513
2514 is_address[i] = 0;
2515 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2516 ((trace_buffer[i] & 0xf0) == 0xd0)) {
2517 if (i > 0)
2518 is_address[--i] = 1;
2519 if (i > 0)
2520 is_address[--i] = 1;
2521 if (i > 0)
2522 is_address[--i] = 1;
2523 if (i > 0)
2524 is_address[--i] = 1;
2525 }
2526 }
2527
2528
2529 /* search first non-zero entry that is not part of an address */
2530 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2531 ;
2532
2533 if (j == 256) {
2534 LOG_DEBUG("no trace data collected");
2535 return ERROR_XSCALE_NO_TRACE_DATA;
2536 }
2537
2538 /* account for possible partial address at buffer start (wrap mode only) */
2539 if (is_address[0]) { /* first entry is address; complete set of 4? */
2540 i = 1;
2541 while (i < 4)
2542 if (!is_address[i++])
2543 break;
2544 if (i < 4)
2545 j += i; /* partial address; can't use it */
2546 }
2547
2548 /* if first valid entry is indirect branch, can't use that either (no address) */
2549 if (((trace_buffer[j] & 0xf0) == 0x90) || ((trace_buffer[j] & 0xf0) == 0xd0))
2550 j++;
2551
2552 /* walk linked list to terminating entry */
2553 for (trace_data_p = &xscale->trace.data; *trace_data_p;
2554 trace_data_p = &(*trace_data_p)->next)
2555 ;
2556
2557 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2558 (*trace_data_p)->next = NULL;
2559 (*trace_data_p)->chkpt0 = trace_buffer[256];
2560 (*trace_data_p)->chkpt1 = trace_buffer[257];
2561 (*trace_data_p)->last_instruction = buf_get_u32(arm->pc->value, 0, 32);
2562 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2563 (*trace_data_p)->depth = 256 - j;
2564 (*trace_data_p)->num_checkpoints = num_checkpoints;
2565
2566 for (i = j; i < 256; i++) {
2567 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2568 if (is_address[i])
2569 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2570 else
2571 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2572 }
2573
2574 return ERROR_OK;
2575 }
2576
2577 static int xscale_read_instruction(struct target *target, uint32_t pc,
2578 struct arm_instruction *instruction)
2579 {
2580 struct xscale_common *const xscale = target_to_xscale(target);
2581 int i;
2582 int section = -1;
2583 size_t size_read;
2584 uint32_t opcode;
2585 int retval;
2586
2587 if (!xscale->trace.image)
2588 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2589
2590 /* search for the section the current instruction belongs to */
2591 for (i = 0; i < xscale->trace.image->num_sections; i++) {
2592 if ((xscale->trace.image->sections[i].base_address <= pc) &&
2593 (xscale->trace.image->sections[i].base_address +
2594 xscale->trace.image->sections[i].size > pc)) {
2595 section = i;
2596 break;
2597 }
2598 }
2599
2600 if (section == -1) {
2601 /* current instruction couldn't be found in the image */
2602 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2603 }
2604
2605 if (xscale->trace.core_state == ARM_STATE_ARM) {
2606 uint8_t buf[4];
2607 retval = image_read_section(xscale->trace.image, section,
2608 pc - xscale->trace.image->sections[section].base_address,
2609 4, buf, &size_read);
2610 if (retval != ERROR_OK) {
2611 LOG_ERROR("error while reading instruction");
2612 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2613 }
2614 opcode = target_buffer_get_u32(target, buf);
2615 arm_evaluate_opcode(opcode, pc, instruction);
2616 } else if (xscale->trace.core_state == ARM_STATE_THUMB) {
2617 uint8_t buf[2];
2618 retval = image_read_section(xscale->trace.image, section,
2619 pc - xscale->trace.image->sections[section].base_address,
2620 2, buf, &size_read);
2621 if (retval != ERROR_OK) {
2622 LOG_ERROR("error while reading instruction");
2623 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2624 }
2625 opcode = target_buffer_get_u16(target, buf);
2626 thumb_evaluate_opcode(opcode, pc, instruction);
2627 } else {
2628 LOG_ERROR("BUG: unknown core state encountered");
2629 exit(-1);
2630 }
2631
2632 return ERROR_OK;
2633 }
2634
2635 /* Extract address encoded into trace data.
2636 * Write result to address referenced by argument 'target', or 0 if incomplete. */
2637 static inline void xscale_branch_address(struct xscale_trace_data *trace_data,
2638 int i, uint32_t *target)
2639 {
2640 /* if there are less than four entries prior to the indirect branch message
2641 * we can't extract the address */
2642 if (i < 4)
2643 *target = 0;
2644 else {
2645 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2646 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2647 }
2648 }
2649
2650 static inline void xscale_display_instruction(struct target *target, uint32_t pc,
2651 struct arm_instruction *instruction,
2652 struct command_context *cmd_ctx)
2653 {
2654 int retval = xscale_read_instruction(target, pc, instruction);
2655 if (retval == ERROR_OK)
2656 command_print(cmd_ctx, "%s", instruction->text);
2657 else
2658 command_print(cmd_ctx, "0x%8.8" PRIx32 "\t<not found in image>", pc);
2659 }
2660
2661 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2662 {
2663 struct xscale_common *xscale = target_to_xscale(target);
2664 struct xscale_trace_data *trace_data = xscale->trace.data;
2665 int i, retval;
2666 uint32_t breakpoint_pc;
2667 struct arm_instruction instruction;
2668 uint32_t current_pc = 0;/* initialized when address determined */
2669
2670 if (!xscale->trace.image)
2671 LOG_WARNING("No trace image loaded; use 'xscale trace_image'");
2672
2673 /* loop for each trace buffer that was loaded from target */
2674 while (trace_data) {
2675 int chkpt = 0; /* incremented as checkpointed entries found */
2676 int j;
2677
2678 /* FIXME: set this to correct mode when trace buffer is first enabled */
2679 xscale->trace.core_state = ARM_STATE_ARM;
2680
2681 /* loop for each entry in this trace buffer */
2682 for (i = 0; i < trace_data->depth; i++) {
2683 int exception = 0;
2684 uint32_t chkpt_reg = 0x0;
2685 uint32_t branch_target = 0;
2686 int count;
2687
2688 /* trace entry type is upper nybble of 'message byte' */
2689 int trace_msg_type = (trace_data->entries[i].data & 0xf0) >> 4;
2690
2691 /* Target addresses of indirect branches are written into buffer
2692 * before the message byte representing the branch. Skip past it */
2693 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2694 continue;
2695
2696 switch (trace_msg_type) {
2697 case 0: /* Exceptions */
2698 case 1:
2699 case 2:
2700 case 3:
2701 case 4:
2702 case 5:
2703 case 6:
2704 case 7:
2705 exception = (trace_data->entries[i].data & 0x70) >> 4;
2706
2707 /* FIXME: vector table may be at ffff0000 */
2708 branch_target = (trace_data->entries[i].data & 0xf0) >> 2;
2709 break;
2710
2711 case 8: /* Direct Branch */
2712 break;
2713
2714 case 9: /* Indirect Branch */
2715 xscale_branch_address(trace_data, i, &branch_target);
2716 break;
2717
2718 case 13: /* Checkpointed Indirect Branch */
2719 xscale_branch_address(trace_data, i, &branch_target);
2720 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2721 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is
2722 *oldest */
2723 else
2724 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and
2725 *newest */
2726
2727 chkpt++;
2728 break;
2729
2730 case 12: /* Checkpointed Direct Branch */
2731 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2732 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is
2733 *oldest */
2734 else
2735 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and
2736 *newest */
2737
2738 /* if no current_pc, checkpoint will be starting point */
2739 if (current_pc == 0)
2740 branch_target = chkpt_reg;
2741
2742 chkpt++;
2743 break;
2744
2745 case 15:/* Roll-over */
2746 break;
2747
2748 default:/* Reserved */
2749 LOG_WARNING("trace is suspect: invalid trace message byte");
2750 continue;
2751
2752 }
2753
2754 /* If we don't have the current_pc yet, but we did get the branch target
2755 * (either from the trace buffer on indirect branch, or from a checkpoint reg),
2756 * then we can start displaying instructions at the next iteration, with
2757 * branch_target as the starting point.
2758 */
2759 if (current_pc == 0) {
2760 current_pc = branch_target; /* remains 0 unless branch_target *obtained */
2761 continue;
2762 }
2763
2764 /* We have current_pc. Read and display the instructions from the image.
2765 * First, display count instructions (lower nybble of message byte). */
2766 count = trace_data->entries[i].data & 0x0f;
2767 for (j = 0; j < count; j++) {
2768 xscale_display_instruction(target, current_pc, &instruction,
2769 cmd_ctx);
2770 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2771 }
2772
2773 /* An additional instruction is implicitly added to count for
2774 * rollover and some exceptions: undef, swi, prefetch abort. */
2775 if ((trace_msg_type == 15) || (exception > 0 && exception < 4)) {
2776 xscale_display_instruction(target, current_pc, &instruction,
2777 cmd_ctx);
2778 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2779 }
2780
2781 if (trace_msg_type == 15) /* rollover */
2782 continue;
2783
2784 if (exception) {
2785 command_print(cmd_ctx, "--- exception %i ---", exception);
2786 continue;
2787 }
2788
2789 /* not exception or rollover; next instruction is a branch and is
2790 * not included in the count */
2791 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2792
2793 /* for direct branches, extract branch destination from instruction */
2794 if ((trace_msg_type == 8) || (trace_msg_type == 12)) {
2795 retval = xscale_read_instruction(target, current_pc, &instruction);
2796 if (retval == ERROR_OK)
2797 current_pc = instruction.info.b_bl_bx_blx.target_address;
2798 else
2799 current_pc = 0; /* branch destination unknown */
2800
2801 /* direct branch w/ checkpoint; can also get from checkpoint reg */
2802 if (trace_msg_type == 12) {
2803 if (current_pc == 0)
2804 current_pc = chkpt_reg;
2805 else if (current_pc != chkpt_reg) /* sanity check */
2806 LOG_WARNING("trace is suspect: checkpoint register "
2807 "inconsistent with adddress from image");
2808 }
2809
2810 if (current_pc == 0)
2811 command_print(cmd_ctx, "address unknown");
2812
2813 continue;
2814 }
2815
2816 /* indirect branch; the branch destination was read from trace buffer */
2817 if ((trace_msg_type == 9) || (trace_msg_type == 13)) {
2818 current_pc = branch_target;
2819
2820 /* sanity check (checkpoint reg is redundant) */
2821 if ((trace_msg_type == 13) && (chkpt_reg != branch_target))
2822 LOG_WARNING("trace is suspect: checkpoint register "
2823 "inconsistent with address from trace buffer");
2824 }
2825
2826 } /* END: for (i = 0; i < trace_data->depth; i++) */
2827
2828 breakpoint_pc = trace_data->last_instruction; /* used below */
2829 trace_data = trace_data->next;
2830
2831 } /* END: while (trace_data) */
2832
2833 /* Finally... display all instructions up to the value of the pc when the
2834 * debug break occurred (saved when trace data was collected from target).
2835 * This is necessary because the trace only records execution branches and 16
2836 * consecutive instructions (rollovers), so last few typically missed.
2837 */
2838 if (current_pc == 0)
2839 return ERROR_OK;/* current_pc was never found */
2840
2841 /* how many instructions remaining? */
2842 int gap_count = (breakpoint_pc - current_pc) /
2843 (xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2);
2844
2845 /* should never be negative or over 16, but verify */
2846 if (gap_count < 0 || gap_count > 16) {
2847 LOG_WARNING("trace is suspect: excessive gap at end of trace");
2848 return ERROR_OK;/* bail; large number or negative value no good */
2849 }
2850
2851 /* display remaining instructions */
2852 for (i = 0; i < gap_count; i++) {
2853 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2854 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2855 }
2856
2857 return ERROR_OK;
2858 }
2859
2860 static const struct reg_arch_type xscale_reg_type = {
2861 .get = xscale_get_reg,
2862 .set = xscale_set_reg,
2863 };
2864
2865 static void xscale_build_reg_cache(struct target *target)
2866 {
2867 struct xscale_common *xscale = target_to_xscale(target);
2868 struct arm *arm = &xscale->arm;
2869 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2870 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2871 int i;
2872 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2873
2874 (*cache_p) = arm_build_reg_cache(target, arm);
2875
2876 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2877 cache_p = &(*cache_p)->next;
2878
2879 /* fill in values for the xscale reg cache */
2880 (*cache_p)->name = "XScale registers";
2881 (*cache_p)->next = NULL;
2882 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2883 (*cache_p)->num_regs = num_regs;
2884
2885 for (i = 0; i < num_regs; i++) {
2886 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2887 (*cache_p)->reg_list[i].value = calloc(4, 1);
2888 (*cache_p)->reg_list[i].dirty = 0;
2889 (*cache_p)->reg_list[i].valid = 0;
2890 (*cache_p)->reg_list[i].size = 32;
2891 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2892 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2893 arch_info[i] = xscale_reg_arch_info[i];
2894 arch_info[i].target = target;
2895 }
2896
2897 xscale->reg_cache = (*cache_p);
2898 }
2899
2900 static int xscale_init_target(struct command_context *cmd_ctx,
2901 struct target *target)
2902 {
2903 xscale_build_reg_cache(target);
2904 return ERROR_OK;
2905 }
2906
2907 static int xscale_init_arch_info(struct target *target,
2908 struct xscale_common *xscale, struct jtag_tap *tap)
2909 {
2910 struct arm *arm;
2911 uint32_t high_reset_branch, low_reset_branch;
2912 int i;
2913
2914 arm = &xscale->arm;
2915
2916 /* store architecture specfic data */
2917 xscale->common_magic = XSCALE_COMMON_MAGIC;
2918
2919 /* PXA3xx with 11 bit IR shifts the JTAG instructions */
2920 if (tap->ir_length == 11)
2921 xscale->xscale_variant = XSCALE_PXA3XX;
2922 else
2923 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
2924
2925 /* the debug handler isn't installed (and thus not running) at this time */
2926 xscale->handler_address = 0xfe000800;
2927
2928 /* clear the vectors we keep locally for reference */
2929 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2930 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2931
2932 /* no user-specified vectors have been configured yet */
2933 xscale->static_low_vectors_set = 0x0;
2934 xscale->static_high_vectors_set = 0x0;
2935
2936 /* calculate branches to debug handler */
2937 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2938 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2939
2940 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2941 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2942
2943 for (i = 1; i <= 7; i++) {
2944 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2945 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2946 }
2947
2948 /* 64kB aligned region used for DCache cleaning */
2949 xscale->cache_clean_address = 0xfffe0000;
2950
2951 xscale->hold_rst = 0;
2952 xscale->external_debug_break = 0;
2953
2954 xscale->ibcr_available = 2;
2955 xscale->ibcr0_used = 0;
2956 xscale->ibcr1_used = 0;
2957
2958 xscale->dbr_available = 2;
2959 xscale->dbr0_used = 0;
2960 xscale->dbr1_used = 0;
2961
2962 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
2963 target_name(target));
2964
2965 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2966 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2967
2968 xscale->vector_catch = 0x1;
2969
2970 xscale->trace.data = NULL;
2971 xscale->trace.image = NULL;
2972 xscale->trace.mode = XSCALE_TRACE_DISABLED;
2973 xscale->trace.buffer_fill = 0;
2974 xscale->trace.fill_counter = 0;
2975
2976 /* prepare ARMv4/5 specific information */
2977 arm->arch_info = xscale;
2978 arm->core_type = ARM_MODE_ANY;
2979 arm->read_core_reg = xscale_read_core_reg;
2980 arm->write_core_reg = xscale_write_core_reg;
2981 arm->full_context = xscale_full_context;
2982
2983 arm_init_arch_info(target, arm);
2984
2985 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
2986 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
2987 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
2988 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
2989 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
2990 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
2991 xscale->armv4_5_mmu.has_tiny_pages = 1;
2992 xscale->armv4_5_mmu.mmu_enabled = 0;
2993
2994 return ERROR_OK;
2995 }
2996
2997 static int xscale_target_create(struct target *target, Jim_Interp *interp)
2998 {
2999 struct xscale_common *xscale;
3000
3001 if (sizeof xscale_debug_handler > 0x800) {
3002 LOG_ERROR("debug_handler.bin: larger than 2kb");
3003 return ERROR_FAIL;
3004 }
3005
3006 xscale = calloc(1, sizeof(*xscale));
3007 if (!xscale)
3008 return ERROR_FAIL;
3009
3010 return xscale_init_arch_info(target, xscale, target->tap);
3011 }
3012
3013 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3014 {
3015 struct target *target = NULL;
3016 struct xscale_common *xscale;
3017 int retval;
3018 uint32_t handler_address;
3019
3020 if (CMD_ARGC < 2)
3021 return ERROR_COMMAND_SYNTAX_ERROR;
3022
3023 target = get_target(CMD_ARGV[0]);
3024 if (target == NULL) {
3025 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3026 return ERROR_FAIL;
3027 }
3028
3029 xscale = target_to_xscale(target);
3030 retval = xscale_verify_pointer(CMD_CTX, xscale);
3031 if (retval != ERROR_OK)
3032 return retval;
3033
3034 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3035
3036 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3037 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3038 xscale->handler_address = handler_address;
3039 else {
3040 LOG_ERROR(
3041 "xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3042 return ERROR_FAIL;
3043 }
3044
3045 return ERROR_OK;
3046 }
3047
3048 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3049 {
3050 struct target *target = NULL;
3051 struct xscale_common *xscale;
3052 int retval;
3053 uint32_t cache_clean_address;
3054
3055 if (CMD_ARGC < 2)
3056 return ERROR_COMMAND_SYNTAX_ERROR;
3057
3058 target = get_target(CMD_ARGV[0]);
3059 if (target == NULL) {
3060 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3061 return ERROR_FAIL;
3062 }
3063 xscale = target_to_xscale(target);
3064 retval = xscale_verify_pointer(CMD_CTX, xscale);
3065 if (retval != ERROR_OK)
3066 return retval;
3067
3068 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3069
3070 if (cache_clean_address & 0xffff)
3071 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3072 else
3073 xscale->cache_clean_address = cache_clean_address;
3074
3075 return ERROR_OK;
3076 }
3077
3078 COMMAND_HANDLER(xscale_handle_cache_info_command)
3079 {
3080 struct target *target = get_current_target(CMD_CTX);
3081 struct xscale_common *xscale = target_to_xscale(target);
3082 int retval;
3083
3084 retval = xscale_verify_pointer(CMD_CTX, xscale);
3085 if (retval != ERROR_OK)
3086 return retval;
3087
3088 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3089 }
3090
3091 static int xscale_virt2phys(struct target *target,
3092 uint32_t virtual, uint32_t *physical)
3093 {
3094 struct xscale_common *xscale = target_to_xscale(target);
3095 uint32_t cb;
3096
3097 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3098 LOG_ERROR(xscale_not);
3099 return ERROR_TARGET_INVALID;
3100 }
3101
3102 uint32_t ret;
3103 int retval = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu,
3104 virtual, &cb, &ret);
3105 if (retval != ERROR_OK)
3106 return retval;
3107 *physical = ret;
3108 return ERROR_OK;
3109 }
3110
3111 static int xscale_mmu(struct target *target, int *enabled)
3112 {
3113 struct xscale_common *xscale = target_to_xscale(target);
3114
3115 if (target->state != TARGET_HALTED) {
3116 LOG_ERROR("Target not halted");
3117 return ERROR_TARGET_INVALID;
3118 }
3119 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3120 return ERROR_OK;
3121 }
3122
3123 COMMAND_HANDLER(xscale_handle_mmu_command)
3124 {
3125 struct target *target = get_current_target(CMD_CTX);
3126 struct xscale_common *xscale = target_to_xscale(target);
3127 int retval;
3128
3129 retval = xscale_verify_pointer(CMD_CTX, xscale);
3130 if (retval != ERROR_OK)
3131 return retval;
3132
3133 if (target->state != TARGET_HALTED) {
3134 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3135 return ERROR_OK;
3136 }
3137
3138 if (CMD_ARGC >= 1) {
3139 bool enable;
3140 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3141 if (enable)
3142 xscale_enable_mmu_caches(target, 1, 0, 0);
3143 else
3144 xscale_disable_mmu_caches(target, 1, 0, 0);
3145 xscale->armv4_5_mmu.mmu_enabled = enable;
3146 }
3147
3148 command_print(CMD_CTX, "mmu %s",
3149 (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3150
3151 return ERROR_OK;
3152 }
3153
3154 COMMAND_HANDLER(xscale_handle_idcache_command)
3155 {
3156 struct target *target = get_current_target(CMD_CTX);
3157 struct xscale_common *xscale = target_to_xscale(target);
3158
3159 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3160 if (retval != ERROR_OK)
3161 return retval;
3162
3163 if (target->state != TARGET_HALTED) {
3164 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3165 return ERROR_OK;
3166 }
3167
3168 bool icache = false;
3169 if (strcmp(CMD_NAME, "icache") == 0)
3170 icache = true;
3171 if (CMD_ARGC >= 1) {
3172 bool enable;
3173 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3174 if (icache) {
3175 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3176 if (enable)
3177 xscale_enable_mmu_caches(target, 0, 0, 1);
3178 else
3179 xscale_disable_mmu_caches(target, 0, 0, 1);
3180 } else {
3181 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3182 if (enable)
3183 xscale_enable_mmu_caches(target, 0, 1, 0);
3184 else
3185 xscale_disable_mmu_caches(target, 0, 1, 0);
3186 }
3187 }
3188
3189 bool enabled = icache ?
3190 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3191 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3192 const char *msg = enabled ? "enabled" : "disabled";
3193 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3194
3195 return ERROR_OK;
3196 }
3197
3198 static const struct {
3199 char name[15];
3200 unsigned mask;
3201 } vec_ids[] = {
3202 { "fiq", DCSR_TF, },
3203 { "irq", DCSR_TI, },
3204 { "dabt", DCSR_TD, },
3205 { "pabt", DCSR_TA, },
3206 { "swi", DCSR_TS, },
3207 { "undef", DCSR_TU, },
3208 { "reset", DCSR_TR, },
3209 };
3210
3211 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3212 {
3213 struct target *target = get_current_target(CMD_CTX);
3214 struct xscale_common *xscale = target_to_xscale(target);
3215 int retval;
3216 uint32_t dcsr_value;
3217 uint32_t catch = 0;
3218 struct reg *dcsr_reg = &xscale->reg_cache->reg_list[XSCALE_DCSR];
3219
3220 retval = xscale_verify_pointer(CMD_CTX, xscale);
3221 if (retval != ERROR_OK)
3222 return retval;
3223
3224 dcsr_value = buf_get_u32(dcsr_reg->value, 0, 32);
3225 if (CMD_ARGC > 0) {
3226 if (CMD_ARGC == 1) {
3227 if (strcmp(CMD_ARGV[0], "all") == 0) {
3228 catch = DCSR_TRAP_MASK;
3229 CMD_ARGC--;
3230 } else if (strcmp(CMD_ARGV[0], "none") == 0) {
3231 catch = 0;
3232 CMD_ARGC--;
3233 }
3234 }
3235 while (CMD_ARGC-- > 0) {
3236 unsigned i;
3237 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
3238 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name))
3239 continue;
3240 catch |= vec_ids[i].mask;
3241 break;
3242 }
3243 if (i == ARRAY_SIZE(vec_ids)) {
3244 LOG_ERROR("No vector '%s'", CMD_ARGV[CMD_ARGC]);
3245 return ERROR_COMMAND_SYNTAX_ERROR;
3246 }
3247 }
3248 buf_set_u32(dcsr_reg->value, 0, 32,
3249 (buf_get_u32(dcsr_reg->value, 0, 32) & ~DCSR_TRAP_MASK) | catch);
3250 xscale_write_dcsr(target, -1, -1);
3251 }
3252
3253 dcsr_value = buf_get_u32(dcsr_reg->value, 0, 32);
3254 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
3255 command_print(CMD_CTX, "%15s: %s", vec_ids[i].name,
3256 (dcsr_value & vec_ids[i].mask) ? "catch" : "ignore");
3257 }
3258
3259 return ERROR_OK;
3260 }
3261
3262
3263 COMMAND_HANDLER(xscale_handle_vector_table_command)
3264 {
3265 struct target *target = get_current_target(CMD_CTX);
3266 struct xscale_common *xscale = target_to_xscale(target);
3267 int err = 0;
3268 int retval;
3269
3270 retval = xscale_verify_pointer(CMD_CTX, xscale);
3271 if (retval != ERROR_OK)
3272 return retval;
3273
3274 if (CMD_ARGC == 0) { /* print current settings */
3275 int idx;
3276
3277 command_print(CMD_CTX, "active user-set static vectors:");
3278 for (idx = 1; idx < 8; idx++)
3279 if (xscale->static_low_vectors_set & (1 << idx))
3280 command_print(CMD_CTX,
3281 "low %d: 0x%" PRIx32,
3282 idx,
3283 xscale->static_low_vectors[idx]);
3284 for (idx = 1; idx < 8; idx++)
3285 if (xscale->static_high_vectors_set & (1 << idx))
3286 command_print(CMD_CTX,
3287 "high %d: 0x%" PRIx32,
3288 idx,
3289 xscale->static_high_vectors[idx]);
3290 return ERROR_OK;
3291 }
3292
3293 if (CMD_ARGC != 3)
3294 err = 1;
3295 else {
3296 int idx;
3297 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3298 uint32_t vec;
3299 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3300
3301 if (idx < 1 || idx >= 8)
3302 err = 1;
3303
3304 if (!err && strcmp(CMD_ARGV[0], "low") == 0) {
3305 xscale->static_low_vectors_set |= (1<<idx);
3306 xscale->static_low_vectors[idx] = vec;
3307 } else if (!err && (strcmp(CMD_ARGV[0], "high") == 0)) {
3308 xscale->static_high_vectors_set |= (1<<idx);
3309 xscale->static_high_vectors[idx] = vec;
3310 } else
3311 err = 1;
3312 }
3313
3314 if (err)
3315 return ERROR_COMMAND_SYNTAX_ERROR;
3316
3317 return ERROR_OK;
3318 }
3319
3320
3321 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3322 {
3323 struct target *target = get_current_target(CMD_CTX);
3324 struct xscale_common *xscale = target_to_xscale(target);
3325 uint32_t dcsr_value;
3326 int retval;
3327
3328 retval = xscale_verify_pointer(CMD_CTX, xscale);
3329 if (retval != ERROR_OK)
3330 return retval;
3331
3332 if (target->state != TARGET_HALTED) {
3333 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3334 return ERROR_OK;
3335 }
3336
3337 if (CMD_ARGC >= 1) {
3338 if (strcmp("enable", CMD_ARGV[0]) == 0)
3339 xscale->trace.mode = XSCALE_TRACE_WRAP; /* default */
3340 else if (strcmp("disable", CMD_ARGV[0]) == 0)
3341 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3342 else
3343 return ERROR_COMMAND_SYNTAX_ERROR;
3344 }
3345
3346 if (CMD_ARGC >= 2 && xscale->trace.mode != XSCALE_TRACE_DISABLED) {
3347 if (strcmp("fill", CMD_ARGV[1]) == 0) {
3348 int buffcount = 1; /* default */
3349 if (CMD_ARGC >= 3)
3350 COMMAND_PARSE_NUMBER(int, CMD_ARGV[2], buffcount);
3351 if (buffcount < 1) { /* invalid */
3352 command_print(CMD_CTX, "fill buffer count must be > 0");
3353 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3354 return ERROR_COMMAND_SYNTAX_ERROR;
3355 }
3356 xscale->trace.buffer_fill = buffcount;
3357 xscale->trace.mode = XSCALE_TRACE_FILL;
3358 } else if (strcmp("wrap", CMD_ARGV[1]) == 0)
3359 xscale->trace.mode = XSCALE_TRACE_WRAP;
3360 else {
3361 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3362 return ERROR_COMMAND_SYNTAX_ERROR;
3363 }
3364 }
3365
3366 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
3367 char fill_string[12];
3368 sprintf(fill_string, "fill %d", xscale->trace.buffer_fill);
3369 command_print(CMD_CTX, "trace buffer enabled (%s)",
3370 (xscale->trace.mode == XSCALE_TRACE_FILL)
3371 ? fill_string : "wrap");
3372 } else
3373 command_print(CMD_CTX, "trace buffer disabled");
3374
3375 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3376 if (xscale->trace.mode == XSCALE_TRACE_FILL)
3377 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3378 else
3379 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3380
3381 return ERROR_OK;
3382 }
3383
3384 COMMAND_HANDLER(xscale_handle_trace_image_command)
3385 {
3386 struct target *target = get_current_target(CMD_CTX);
3387 struct xscale_common *xscale = target_to_xscale(target);
3388 int retval;
3389
3390 if (CMD_ARGC < 1)
3391 return ERROR_COMMAND_SYNTAX_ERROR;
3392
3393 retval = xscale_verify_pointer(CMD_CTX, xscale);
3394 if (retval != ERROR_OK)
3395 return retval;
3396
3397 if (xscale->trace.image) {
3398 image_close(xscale->trace.image);
3399 free(xscale->trace.image);
3400 command_print(CMD_CTX, "previously loaded image found and closed");
3401 }
3402
3403 xscale->trace.image = malloc(sizeof(struct image));
3404 xscale->trace.image->base_address_set = 0;
3405 xscale->trace.image->start_address_set = 0;
3406
3407 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3408 if (CMD_ARGC >= 2) {
3409 xscale->trace.image->base_address_set = 1;
3410 COMMAND_PARSE_NUMBER(llong, CMD_ARGV[1], xscale->trace.image->base_address);
3411 } else
3412 xscale->trace.image->base_address_set = 0;
3413
3414 if (image_open(xscale->trace.image, CMD_ARGV[0],
3415 (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK) {
3416 free(xscale->trace.image);
3417 xscale->trace.image = NULL;
3418 return ERROR_OK;
3419 }
3420
3421 return ERROR_OK;
3422 }
3423
3424 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3425 {
3426 struct target *target = get_current_target(CMD_CTX);
3427 struct xscale_common *xscale = target_to_xscale(target);
3428 struct xscale_trace_data *trace_data;
3429 struct fileio file;
3430 int retval;
3431
3432 retval = xscale_verify_pointer(CMD_CTX, xscale);
3433 if (retval != ERROR_OK)
3434 return retval;
3435
3436 if (target->state != TARGET_HALTED) {
3437 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3438 return ERROR_OK;
3439 }
3440
3441 if (CMD_ARGC < 1)
3442 return ERROR_COMMAND_SYNTAX_ERROR;
3443
3444 trace_data = xscale->trace.data;
3445
3446 if (!trace_data) {
3447 command_print(CMD_CTX, "no trace data collected");
3448 return ERROR_OK;
3449 }
3450
3451 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3452 return ERROR_OK;
3453
3454 while (trace_data) {
3455 int i;
3456
3457 fileio_write_u32(&file, trace_data->chkpt0);
3458 fileio_write_u32(&file, trace_data->chkpt1);
3459 fileio_write_u32(&file, trace_data->last_instruction);
3460 fileio_write_u32(&file, trace_data->depth);
3461
3462 for (i = 0; i < trace_data->depth; i++)
3463 fileio_write_u32(&file, trace_data->entries[i].data |
3464 ((trace_data->entries[i].type & 0xffff) << 16));
3465
3466 trace_data = trace_data->next;
3467 }
3468
3469 fileio_close(&file);
3470
3471 return ERROR_OK;
3472 }
3473
3474 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3475 {
3476 struct target *target = get_current_target(CMD_CTX);
3477 struct xscale_common *xscale = target_to_xscale(target);
3478 int retval;
3479
3480 retval = xscale_verify_pointer(CMD_CTX, xscale);
3481 if (retval != ERROR_OK)
3482 return retval;
3483
3484 xscale_analyze_trace(target, CMD_CTX);
3485
3486 return ERROR_OK;
3487 }
3488
3489 COMMAND_HANDLER(xscale_handle_cp15)
3490 {
3491 struct target *target = get_current_target(CMD_CTX);
3492 struct xscale_common *xscale = target_to_xscale(target);
3493 int retval;
3494
3495 retval = xscale_verify_pointer(CMD_CTX, xscale);
3496 if (retval != ERROR_OK)
3497 return retval;
3498
3499 if (target->state != TARGET_HALTED) {
3500 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3501 return ERROR_OK;
3502 }
3503 uint32_t reg_no = 0;
3504 struct reg *reg = NULL;
3505 if (CMD_ARGC > 0) {
3506 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3507 /*translate from xscale cp15 register no to openocd register*/
3508 switch (reg_no) {
3509 case 0:
3510 reg_no = XSCALE_MAINID;
3511 break;
3512 case 1:
3513 reg_no = XSCALE_CTRL;
3514 break;
3515 case 2:
3516 reg_no = XSCALE_TTB;
3517 break;
3518 case 3:
3519 reg_no = XSCALE_DAC;
3520 break;
3521 case 5:
3522 reg_no = XSCALE_FSR;
3523 break;
3524 case 6:
3525 reg_no = XSCALE_FAR;
3526 break;
3527 case 13:
3528 reg_no = XSCALE_PID;
3529 break;
3530 case 15:
3531 reg_no = XSCALE_CPACCESS;
3532 break;
3533 default:
3534 command_print(CMD_CTX, "invalid register number");
3535 return ERROR_COMMAND_SYNTAX_ERROR;
3536 }
3537 reg = &xscale->reg_cache->reg_list[reg_no];
3538
3539 }
3540 if (CMD_ARGC == 1) {
3541 uint32_t value;
3542
3543 /* read cp15 control register */
3544 xscale_get_reg(reg);
3545 value = buf_get_u32(reg->value, 0, 32);
3546 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size),
3547 value);
3548 } else if (CMD_ARGC == 2) {
3549 uint32_t value;
3550 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3551
3552 /* send CP write request (command 0x41) */
3553 xscale_send_u32(target, 0x41);
3554
3555 /* send CP register number */
3556 xscale_send_u32(target, reg_no);
3557
3558 /* send CP register value */
3559 xscale_send_u32(target, value);
3560
3561 /* execute cpwait to ensure outstanding operations complete */
3562 xscale_send_u32(target, 0x53);
3563 } else
3564 return ERROR_COMMAND_SYNTAX_ERROR;
3565
3566 return ERROR_OK;
3567 }
3568
3569 static const struct command_registration xscale_exec_command_handlers[] = {
3570 {
3571 .name = "cache_info",
3572 .handler = xscale_handle_cache_info_command,
3573 .mode = COMMAND_EXEC,
3574 .help = "display information about CPU caches",
3575 },
3576 {
3577 .name = "mmu",
3578 .handler = xscale_handle_mmu_command,
3579 .mode = COMMAND_EXEC,
3580 .help = "enable or disable the MMU",
3581 .usage = "['enable'|'disable']",
3582 },
3583 {
3584 .name = "icache",
3585 .handler = xscale_handle_idcache_command,
3586 .mode = COMMAND_EXEC,
3587 .help = "display ICache state, optionally enabling or "
3588 "disabling it",
3589 .usage = "['enable'|'disable']",
3590 },
3591 {
3592 .name = "dcache",
3593 .handler = xscale_handle_idcache_command,
3594 .mode = COMMAND_EXEC,
3595 .help = "display DCache state, optionally enabling or "
3596 "disabling it",
3597 .usage = "['enable'|'disable']",
3598 },
3599 {
3600 .name = "vector_catch",
3601 .handler = xscale_handle_vector_catch_command,
3602 .mode = COMMAND_EXEC,
3603 .help = "set or display mask of vectors "
3604 "that should trigger debug entry",
3605 .usage = "['all'|'none'|'fiq'|'irq'|'dabt'|'pabt'|'swi'|'undef'|'reset']",
3606 },
3607 {
3608 .name = "vector_table",
3609 .handler = xscale_handle_vector_table_command,
3610 .mode = COMMAND_EXEC,
3611 .help = "set vector table entry in mini-ICache, "
3612 "or display current tables",
3613 .usage = "[('high'|'low') index code]",
3614 },
3615 {
3616 .name = "trace_buffer",
3617 .handler = xscale_handle_trace_buffer_command,
3618 .mode = COMMAND_EXEC,
3619 .help = "display trace buffer status, enable or disable "
3620 "tracing, and optionally reconfigure trace mode",
3621 .usage = "['enable'|'disable' ['fill' [number]|'wrap']]",
3622 },
3623 {
3624 .name = "dump_trace",
3625 .handler = xscale_handle_dump_trace_command,
3626 .mode = COMMAND_EXEC,
3627 .help = "dump content of trace buffer to file",
3628 .usage = "filename",
3629 },
3630 {
3631 .name = "analyze_trace",
3632 .handler = xscale_handle_analyze_trace_buffer_command,
3633 .mode = COMMAND_EXEC,
3634 .help = "analyze content of trace buffer",
3635 .usage = "",
3636 },
3637 {
3638 .name = "trace_image",
3639 .handler = xscale_handle_trace_image_command,
3640 .mode = COMMAND_EXEC,
3641 .help = "load image from file to address (default 0)",
3642 .usage = "filename [offset [filetype]]",
3643 },
3644 {
3645 .name = "cp15",
3646 .handler = xscale_handle_cp15,
3647 .mode = COMMAND_EXEC,
3648 .help = "Read or write coprocessor 15 register.",
3649 .usage = "register [value]",
3650 },
3651 COMMAND_REGISTRATION_DONE
3652 };
3653 static const struct command_registration xscale_any_command_handlers[] = {
3654 {
3655 .name = "debug_handler",
3656 .handler = xscale_handle_debug_handler_command,
3657 .mode = COMMAND_ANY,
3658 .help = "Change address used for debug handler.",
3659 .usage = "<target> <address>",
3660 },
3661 {
3662 .name = "cache_clean_address",
3663 .handler = xscale_handle_cache_clean_address_command,
3664 .mode = COMMAND_ANY,
3665 .help = "Change address used for cleaning data cache.",
3666 .usage = "address",
3667 },
3668 {
3669 .chain = xscale_exec_command_handlers,
3670 },
3671 COMMAND_REGISTRATION_DONE
3672 };
3673 static const struct command_registration xscale_command_handlers[] = {
3674 {
3675 .chain = arm_command_handlers,
3676 },
3677 {
3678 .name = "xscale",
3679 .mode = COMMAND_ANY,
3680 .help = "xscale command group",
3681 .usage = "",
3682 .chain = xscale_any_command_handlers,
3683 },
3684 COMMAND_REGISTRATION_DONE
3685 };
3686
3687 struct target_type xscale_target = {
3688 .name = "xscale",
3689
3690 .poll = xscale_poll,
3691 .arch_state = xscale_arch_state,
3692
3693 .halt = xscale_halt,
3694 .resume = xscale_resume,
3695 .step = xscale_step,
3696
3697 .assert_reset = xscale_assert_reset,
3698 .deassert_reset = xscale_deassert_reset,
3699
3700 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3701 .get_gdb_reg_list = arm_get_gdb_reg_list,
3702
3703 .read_memory = xscale_read_memory,
3704 .read_phys_memory = xscale_read_phys_memory,
3705 .write_memory = xscale_write_memory,
3706 .write_phys_memory = xscale_write_phys_memory,
3707
3708 .checksum_memory = arm_checksum_memory,
3709 .blank_check_memory = arm_blank_check_memory,
3710
3711 .run_algorithm = armv4_5_run_algorithm,
3712
3713 .add_breakpoint = xscale_add_breakpoint,
3714 .remove_breakpoint = xscale_remove_breakpoint,
3715 .add_watchpoint = xscale_add_watchpoint,
3716 .remove_watchpoint = xscale_remove_watchpoint,
3717
3718 .commands = xscale_command_handlers,
3719 .target_create = xscale_target_create,
3720 .init_target = xscale_init_target,
3721
3722 .virt2phys = xscale_virt2phys,
3723 .mmu = xscale_mmu
3724 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)