target: improve robustness of reset command
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
25 ***************************************************************************/
26
27 #ifdef HAVE_CONFIG_H
28 #include "config.h"
29 #endif
30
31 #include "breakpoints.h"
32 #include "xscale.h"
33 #include "target_type.h"
34 #include "arm_jtag.h"
35 #include "arm_simulator.h"
36 #include "arm_disassembler.h"
37 #include <helper/time_support.h>
38 #include "register.h"
39 #include "image.h"
40 #include "arm_opcodes.h"
41 #include "armv4_5.h"
42
43 /*
44 * Important XScale documents available as of October 2009 include:
45 *
46 * Intel XScale® Core Developer’s Manual, January 2004
47 * Order Number: 273473-002
48 * This has a chapter detailing debug facilities, and punts some
49 * details to chip-specific microarchitecture documents.
50 *
51 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
52 * Document Number: 273539-005
53 * Less detailed than the developer's manual, but summarizes those
54 * missing details (for most XScales) and gives LOTS of notes about
55 * debugger/handler interaction issues. Presents a simpler reset
56 * and load-handler sequence than the arch doc. (Note, OpenOCD
57 * doesn't currently support "Hot-Debug" as defined there.)
58 *
59 * Chip-specific microarchitecture documents may also be useful.
60 */
61
62 /* forward declarations */
63 static int xscale_resume(struct target *, int current,
64 uint32_t address, int handle_breakpoints, int debug_execution);
65 static int xscale_debug_entry(struct target *);
66 static int xscale_restore_banked(struct target *);
67 static int xscale_get_reg(struct reg *reg);
68 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
69 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
70 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
71 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
72 static int xscale_read_trace(struct target *);
73
74 /* This XScale "debug handler" is loaded into the processor's
75 * mini-ICache, which is 2K of code writable only via JTAG.
76 */
77 static const uint8_t xscale_debug_handler[] = {
78 #include "xscale_debug.inc"
79 };
80
81 static const char *const xscale_reg_list[] = {
82 "XSCALE_MAINID", /* 0 */
83 "XSCALE_CACHETYPE",
84 "XSCALE_CTRL",
85 "XSCALE_AUXCTRL",
86 "XSCALE_TTB",
87 "XSCALE_DAC",
88 "XSCALE_FSR",
89 "XSCALE_FAR",
90 "XSCALE_PID",
91 "XSCALE_CPACCESS",
92 "XSCALE_IBCR0", /* 10 */
93 "XSCALE_IBCR1",
94 "XSCALE_DBR0",
95 "XSCALE_DBR1",
96 "XSCALE_DBCON",
97 "XSCALE_TBREG",
98 "XSCALE_CHKPT0",
99 "XSCALE_CHKPT1",
100 "XSCALE_DCSR",
101 "XSCALE_TX",
102 "XSCALE_RX", /* 20 */
103 "XSCALE_TXRXCTRL",
104 };
105
106 static const struct xscale_reg xscale_reg_arch_info[] = {
107 {XSCALE_MAINID, NULL},
108 {XSCALE_CACHETYPE, NULL},
109 {XSCALE_CTRL, NULL},
110 {XSCALE_AUXCTRL, NULL},
111 {XSCALE_TTB, NULL},
112 {XSCALE_DAC, NULL},
113 {XSCALE_FSR, NULL},
114 {XSCALE_FAR, NULL},
115 {XSCALE_PID, NULL},
116 {XSCALE_CPACCESS, NULL},
117 {XSCALE_IBCR0, NULL},
118 {XSCALE_IBCR1, NULL},
119 {XSCALE_DBR0, NULL},
120 {XSCALE_DBR1, NULL},
121 {XSCALE_DBCON, NULL},
122 {XSCALE_TBREG, NULL},
123 {XSCALE_CHKPT0, NULL},
124 {XSCALE_CHKPT1, NULL},
125 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
126 {-1, NULL}, /* TX accessed via JTAG */
127 {-1, NULL}, /* RX accessed via JTAG */
128 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
129 };
130
131 /* convenience wrapper to access XScale specific registers */
132 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
133 {
134 uint8_t buf[4];
135
136 buf_set_u32(buf, 0, 32, value);
137
138 return xscale_set_reg(reg, buf);
139 }
140
141 static const char xscale_not[] = "target is not an XScale";
142
143 static int xscale_verify_pointer(struct command_context *cmd_ctx,
144 struct xscale_common *xscale)
145 {
146 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
147 command_print(cmd_ctx, xscale_not);
148 return ERROR_TARGET_INVALID;
149 }
150 return ERROR_OK;
151 }
152
153 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr, tap_state_t end_state)
154 {
155 assert(tap != NULL);
156
157 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr) {
158 struct scan_field field;
159 uint8_t scratch[4];
160
161 memset(&field, 0, sizeof field);
162 field.num_bits = tap->ir_length;
163 field.out_value = scratch;
164 buf_set_u32(scratch, 0, field.num_bits, new_instr);
165
166 jtag_add_ir_scan(tap, &field, end_state);
167 }
168
169 return ERROR_OK;
170 }
171
172 static int xscale_read_dcsr(struct target *target)
173 {
174 struct xscale_common *xscale = target_to_xscale(target);
175 int retval;
176 struct scan_field fields[3];
177 uint8_t field0 = 0x0;
178 uint8_t field0_check_value = 0x2;
179 uint8_t field0_check_mask = 0x7;
180 uint8_t field2 = 0x0;
181 uint8_t field2_check_value = 0x0;
182 uint8_t field2_check_mask = 0x1;
183
184 xscale_jtag_set_instr(target->tap,
185 XSCALE_SELDCSR << xscale->xscale_variant,
186 TAP_DRPAUSE);
187
188 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
189 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
190
191 memset(&fields, 0, sizeof fields);
192
193 fields[0].num_bits = 3;
194 fields[0].out_value = &field0;
195 uint8_t tmp;
196 fields[0].in_value = &tmp;
197
198 fields[1].num_bits = 32;
199 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
200
201 fields[2].num_bits = 1;
202 fields[2].out_value = &field2;
203 uint8_t tmp2;
204 fields[2].in_value = &tmp2;
205
206 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
207
208 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
209 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
210
211 retval = jtag_execute_queue();
212 if (retval != ERROR_OK) {
213 LOG_ERROR("JTAG error while reading DCSR");
214 return retval;
215 }
216
217 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
218 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
219
220 /* write the register with the value we just read
221 * on this second pass, only the first bit of field0 is guaranteed to be 0)
222 */
223 field0_check_mask = 0x1;
224 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
225 fields[1].in_value = NULL;
226
227 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
228
229 /* DANGER!!! this must be here. It will make sure that the arguments
230 * to jtag_set_check_value() does not go out of scope! */
231 return jtag_execute_queue();
232 }
233
234
235 static void xscale_getbuf(jtag_callback_data_t arg)
236 {
237 uint8_t *in = (uint8_t *)arg;
238 *((uint32_t *)arg) = buf_get_u32(in, 0, 32);
239 }
240
241 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
242 {
243 if (num_words == 0)
244 return ERROR_COMMAND_SYNTAX_ERROR;
245
246 struct xscale_common *xscale = target_to_xscale(target);
247 int retval = ERROR_OK;
248 tap_state_t path[3];
249 struct scan_field fields[3];
250 uint8_t *field0 = malloc(num_words * 1);
251 uint8_t field0_check_value = 0x2;
252 uint8_t field0_check_mask = 0x6;
253 uint32_t *field1 = malloc(num_words * 4);
254 uint8_t field2_check_value = 0x0;
255 uint8_t field2_check_mask = 0x1;
256 int words_done = 0;
257 int words_scheduled = 0;
258 int i;
259
260 path[0] = TAP_DRSELECT;
261 path[1] = TAP_DRCAPTURE;
262 path[2] = TAP_DRSHIFT;
263
264 memset(&fields, 0, sizeof fields);
265
266 fields[0].num_bits = 3;
267 uint8_t tmp;
268 fields[0].in_value = &tmp;
269 fields[0].check_value = &field0_check_value;
270 fields[0].check_mask = &field0_check_mask;
271
272 fields[1].num_bits = 32;
273
274 fields[2].num_bits = 1;
275 uint8_t tmp2;
276 fields[2].in_value = &tmp2;
277 fields[2].check_value = &field2_check_value;
278 fields[2].check_mask = &field2_check_mask;
279
280 xscale_jtag_set_instr(target->tap,
281 XSCALE_DBGTX << xscale->xscale_variant,
282 TAP_IDLE);
283 jtag_add_runtest(1, TAP_IDLE); /* ensures that we're in the TAP_IDLE state as the above
284 *could be a no-op */
285
286 /* repeat until all words have been collected */
287 int attempts = 0;
288 while (words_done < num_words) {
289 /* schedule reads */
290 words_scheduled = 0;
291 for (i = words_done; i < num_words; i++) {
292 fields[0].in_value = &field0[i];
293
294 jtag_add_pathmove(3, path);
295
296 fields[1].in_value = (uint8_t *)(field1 + i);
297
298 jtag_add_dr_scan_check(target->tap, 3, fields, TAP_IDLE);
299
300 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
301
302 words_scheduled++;
303 }
304
305 retval = jtag_execute_queue();
306 if (retval != ERROR_OK) {
307 LOG_ERROR("JTAG error while receiving data from debug handler");
308 break;
309 }
310
311 /* examine results */
312 for (i = words_done; i < num_words; i++) {
313 if (!(field0[i] & 1)) {
314 /* move backwards if necessary */
315 int j;
316 for (j = i; j < num_words - 1; j++) {
317 field0[j] = field0[j + 1];
318 field1[j] = field1[j + 1];
319 }
320 words_scheduled--;
321 }
322 }
323 if (words_scheduled == 0) {
324 if (attempts++ == 1000) {
325 LOG_ERROR(
326 "Failed to receiving data from debug handler after 1000 attempts");
327 retval = ERROR_TARGET_TIMEOUT;
328 break;
329 }
330 }
331
332 words_done += words_scheduled;
333 }
334
335 for (i = 0; i < num_words; i++)
336 *(buffer++) = buf_get_u32((uint8_t *)&field1[i], 0, 32);
337
338 free(field1);
339
340 return retval;
341 }
342
343 static int xscale_read_tx(struct target *target, int consume)
344 {
345 struct xscale_common *xscale = target_to_xscale(target);
346 tap_state_t path[3];
347 tap_state_t noconsume_path[6];
348 int retval;
349 struct timeval timeout, now;
350 struct scan_field fields[3];
351 uint8_t field0_in = 0x0;
352 uint8_t field0_check_value = 0x2;
353 uint8_t field0_check_mask = 0x6;
354 uint8_t field2_check_value = 0x0;
355 uint8_t field2_check_mask = 0x1;
356
357 xscale_jtag_set_instr(target->tap,
358 XSCALE_DBGTX << xscale->xscale_variant,
359 TAP_IDLE);
360
361 path[0] = TAP_DRSELECT;
362 path[1] = TAP_DRCAPTURE;
363 path[2] = TAP_DRSHIFT;
364
365 noconsume_path[0] = TAP_DRSELECT;
366 noconsume_path[1] = TAP_DRCAPTURE;
367 noconsume_path[2] = TAP_DREXIT1;
368 noconsume_path[3] = TAP_DRPAUSE;
369 noconsume_path[4] = TAP_DREXIT2;
370 noconsume_path[5] = TAP_DRSHIFT;
371
372 memset(&fields, 0, sizeof fields);
373
374 fields[0].num_bits = 3;
375 fields[0].in_value = &field0_in;
376
377 fields[1].num_bits = 32;
378 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
379
380 fields[2].num_bits = 1;
381 uint8_t tmp;
382 fields[2].in_value = &tmp;
383
384 gettimeofday(&timeout, NULL);
385 timeval_add_time(&timeout, 1, 0);
386
387 for (;; ) {
388 /* if we want to consume the register content (i.e. clear TX_READY),
389 * we have to go straight from Capture-DR to Shift-DR
390 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
391 */
392 if (consume)
393 jtag_add_pathmove(3, path);
394 else
395 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
396
397 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
398
399 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
400 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
401
402 retval = jtag_execute_queue();
403 if (retval != ERROR_OK) {
404 LOG_ERROR("JTAG error while reading TX");
405 return ERROR_TARGET_TIMEOUT;
406 }
407
408 gettimeofday(&now, NULL);
409 if ((now.tv_sec > timeout.tv_sec) ||
410 ((now.tv_sec == timeout.tv_sec) && (now.tv_usec > timeout.tv_usec))) {
411 LOG_ERROR("time out reading TX register");
412 return ERROR_TARGET_TIMEOUT;
413 }
414 if (!((!(field0_in & 1)) && consume))
415 goto done;
416 if (debug_level >= 3) {
417 LOG_DEBUG("waiting 100ms");
418 alive_sleep(100); /* avoid flooding the logs */
419 } else
420 keep_alive();
421 }
422 done:
423
424 if (!(field0_in & 1))
425 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
426
427 return ERROR_OK;
428 }
429
430 static int xscale_write_rx(struct target *target)
431 {
432 struct xscale_common *xscale = target_to_xscale(target);
433 int retval;
434 struct timeval timeout, now;
435 struct scan_field fields[3];
436 uint8_t field0_out = 0x0;
437 uint8_t field0_in = 0x0;
438 uint8_t field0_check_value = 0x2;
439 uint8_t field0_check_mask = 0x6;
440 uint8_t field2 = 0x0;
441 uint8_t field2_check_value = 0x0;
442 uint8_t field2_check_mask = 0x1;
443
444 xscale_jtag_set_instr(target->tap,
445 XSCALE_DBGRX << xscale->xscale_variant,
446 TAP_IDLE);
447
448 memset(&fields, 0, sizeof fields);
449
450 fields[0].num_bits = 3;
451 fields[0].out_value = &field0_out;
452 fields[0].in_value = &field0_in;
453
454 fields[1].num_bits = 32;
455 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
456
457 fields[2].num_bits = 1;
458 fields[2].out_value = &field2;
459 uint8_t tmp;
460 fields[2].in_value = &tmp;
461
462 gettimeofday(&timeout, NULL);
463 timeval_add_time(&timeout, 1, 0);
464
465 /* poll until rx_read is low */
466 LOG_DEBUG("polling RX");
467 for (;;) {
468 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
469
470 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
471 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
472
473 retval = jtag_execute_queue();
474 if (retval != ERROR_OK) {
475 LOG_ERROR("JTAG error while writing RX");
476 return retval;
477 }
478
479 gettimeofday(&now, NULL);
480 if ((now.tv_sec > timeout.tv_sec) ||
481 ((now.tv_sec == timeout.tv_sec) && (now.tv_usec > timeout.tv_usec))) {
482 LOG_ERROR("time out writing RX register");
483 return ERROR_TARGET_TIMEOUT;
484 }
485 if (!(field0_in & 1))
486 goto done;
487 if (debug_level >= 3) {
488 LOG_DEBUG("waiting 100ms");
489 alive_sleep(100); /* avoid flooding the logs */
490 } else
491 keep_alive();
492 }
493 done:
494
495 /* set rx_valid */
496 field2 = 0x1;
497 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
498
499 retval = jtag_execute_queue();
500 if (retval != ERROR_OK) {
501 LOG_ERROR("JTAG error while writing RX");
502 return retval;
503 }
504
505 return ERROR_OK;
506 }
507
508 /* send count elements of size byte to the debug handler */
509 static int xscale_send(struct target *target, const uint8_t *buffer, int count, int size)
510 {
511 struct xscale_common *xscale = target_to_xscale(target);
512 int retval;
513 int done_count = 0;
514
515 xscale_jtag_set_instr(target->tap,
516 XSCALE_DBGRX << xscale->xscale_variant,
517 TAP_IDLE);
518
519 static const uint8_t t0;
520 uint8_t t1[4];
521 static const uint8_t t2 = 1;
522 struct scan_field fields[3] = {
523 { .num_bits = 3, .out_value = &t0 },
524 { .num_bits = 32, .out_value = t1 },
525 { .num_bits = 1, .out_value = &t2 },
526 };
527
528 int endianness = target->endianness;
529 while (done_count++ < count) {
530 uint32_t t;
531
532 switch (size) {
533 case 4:
534 if (endianness == TARGET_LITTLE_ENDIAN)
535 t = le_to_h_u32(buffer);
536 else
537 t = be_to_h_u32(buffer);
538 break;
539 case 2:
540 if (endianness == TARGET_LITTLE_ENDIAN)
541 t = le_to_h_u16(buffer);
542 else
543 t = be_to_h_u16(buffer);
544 break;
545 case 1:
546 t = buffer[0];
547 break;
548 default:
549 LOG_ERROR("BUG: size neither 4, 2 nor 1");
550 return ERROR_COMMAND_SYNTAX_ERROR;
551 }
552
553 buf_set_u32(t1, 0, 32, t);
554
555 jtag_add_dr_scan(target->tap,
556 3,
557 fields,
558 TAP_IDLE);
559 buffer += size;
560 }
561
562 retval = jtag_execute_queue();
563 if (retval != ERROR_OK) {
564 LOG_ERROR("JTAG error while sending data to debug handler");
565 return retval;
566 }
567
568 return ERROR_OK;
569 }
570
571 static int xscale_send_u32(struct target *target, uint32_t value)
572 {
573 struct xscale_common *xscale = target_to_xscale(target);
574
575 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
576 return xscale_write_rx(target);
577 }
578
579 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
580 {
581 struct xscale_common *xscale = target_to_xscale(target);
582 int retval;
583 struct scan_field fields[3];
584 uint8_t field0 = 0x0;
585 uint8_t field0_check_value = 0x2;
586 uint8_t field0_check_mask = 0x7;
587 uint8_t field2 = 0x0;
588 uint8_t field2_check_value = 0x0;
589 uint8_t field2_check_mask = 0x1;
590
591 if (hold_rst != -1)
592 xscale->hold_rst = hold_rst;
593
594 if (ext_dbg_brk != -1)
595 xscale->external_debug_break = ext_dbg_brk;
596
597 xscale_jtag_set_instr(target->tap,
598 XSCALE_SELDCSR << xscale->xscale_variant,
599 TAP_IDLE);
600
601 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
602 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
603
604 memset(&fields, 0, sizeof fields);
605
606 fields[0].num_bits = 3;
607 fields[0].out_value = &field0;
608 uint8_t tmp;
609 fields[0].in_value = &tmp;
610
611 fields[1].num_bits = 32;
612 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
613
614 fields[2].num_bits = 1;
615 fields[2].out_value = &field2;
616 uint8_t tmp2;
617 fields[2].in_value = &tmp2;
618
619 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
620
621 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
622 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
623
624 retval = jtag_execute_queue();
625 if (retval != ERROR_OK) {
626 LOG_ERROR("JTAG error while writing DCSR");
627 return retval;
628 }
629
630 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
631 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
632
633 return ERROR_OK;
634 }
635
636 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
637 static unsigned int parity(unsigned int v)
638 {
639 /* unsigned int ov = v; */
640 v ^= v >> 16;
641 v ^= v >> 8;
642 v ^= v >> 4;
643 v &= 0xf;
644 /* LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1); */
645 return (0x6996 >> v) & 1;
646 }
647
648 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
649 {
650 struct xscale_common *xscale = target_to_xscale(target);
651 uint8_t packet[4];
652 uint8_t cmd;
653 int word;
654 struct scan_field fields[2];
655
656 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
657
658 /* LDIC into IR */
659 xscale_jtag_set_instr(target->tap,
660 XSCALE_LDIC << xscale->xscale_variant,
661 TAP_IDLE);
662
663 /* CMD is b011 to load a cacheline into the Mini ICache.
664 * Loading into the main ICache is deprecated, and unused.
665 * It's followed by three zero bits, and 27 address bits.
666 */
667 buf_set_u32(&cmd, 0, 6, 0x3);
668
669 /* virtual address of desired cache line */
670 buf_set_u32(packet, 0, 27, va >> 5);
671
672 memset(&fields, 0, sizeof fields);
673
674 fields[0].num_bits = 6;
675 fields[0].out_value = &cmd;
676
677 fields[1].num_bits = 27;
678 fields[1].out_value = packet;
679
680 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
681
682 /* rest of packet is a cacheline: 8 instructions, with parity */
683 fields[0].num_bits = 32;
684 fields[0].out_value = packet;
685
686 fields[1].num_bits = 1;
687 fields[1].out_value = &cmd;
688
689 for (word = 0; word < 8; word++) {
690 buf_set_u32(packet, 0, 32, buffer[word]);
691
692 uint32_t value;
693 memcpy(&value, packet, sizeof(uint32_t));
694 cmd = parity(value);
695
696 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
697 }
698
699 return jtag_execute_queue();
700 }
701
702 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
703 {
704 struct xscale_common *xscale = target_to_xscale(target);
705 uint8_t packet[4];
706 uint8_t cmd;
707 struct scan_field fields[2];
708
709 xscale_jtag_set_instr(target->tap,
710 XSCALE_LDIC << xscale->xscale_variant,
711 TAP_IDLE);
712
713 /* CMD for invalidate IC line b000, bits [6:4] b000 */
714 buf_set_u32(&cmd, 0, 6, 0x0);
715
716 /* virtual address of desired cache line */
717 buf_set_u32(packet, 0, 27, va >> 5);
718
719 memset(&fields, 0, sizeof fields);
720
721 fields[0].num_bits = 6;
722 fields[0].out_value = &cmd;
723
724 fields[1].num_bits = 27;
725 fields[1].out_value = packet;
726
727 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
728
729 return ERROR_OK;
730 }
731
732 static int xscale_update_vectors(struct target *target)
733 {
734 struct xscale_common *xscale = target_to_xscale(target);
735 int i;
736 int retval;
737
738 uint32_t low_reset_branch, high_reset_branch;
739
740 for (i = 1; i < 8; i++) {
741 /* if there's a static vector specified for this exception, override */
742 if (xscale->static_high_vectors_set & (1 << i))
743 xscale->high_vectors[i] = xscale->static_high_vectors[i];
744 else {
745 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
746 if (retval == ERROR_TARGET_TIMEOUT)
747 return retval;
748 if (retval != ERROR_OK) {
749 /* Some of these reads will fail as part of normal execution */
750 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
751 }
752 }
753 }
754
755 for (i = 1; i < 8; i++) {
756 if (xscale->static_low_vectors_set & (1 << i))
757 xscale->low_vectors[i] = xscale->static_low_vectors[i];
758 else {
759 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
760 if (retval == ERROR_TARGET_TIMEOUT)
761 return retval;
762 if (retval != ERROR_OK) {
763 /* Some of these reads will fail as part of normal execution */
764 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
765 }
766 }
767 }
768
769 /* calculate branches to debug handler */
770 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
771 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
772
773 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
774 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
775
776 /* invalidate and load exception vectors in mini i-cache */
777 xscale_invalidate_ic_line(target, 0x0);
778 xscale_invalidate_ic_line(target, 0xffff0000);
779
780 xscale_load_ic(target, 0x0, xscale->low_vectors);
781 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
782
783 return ERROR_OK;
784 }
785
786 static int xscale_arch_state(struct target *target)
787 {
788 struct xscale_common *xscale = target_to_xscale(target);
789 struct arm *arm = &xscale->arm;
790
791 static const char *state[] = {
792 "disabled", "enabled"
793 };
794
795 static const char *arch_dbg_reason[] = {
796 "", "\n(processor reset)", "\n(trace buffer full)"
797 };
798
799 if (arm->common_magic != ARM_COMMON_MAGIC) {
800 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
801 return ERROR_COMMAND_SYNTAX_ERROR;
802 }
803
804 arm_arch_state(target);
805 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
806 state[xscale->armv4_5_mmu.mmu_enabled],
807 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
808 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
809 arch_dbg_reason[xscale->arch_debug_reason]);
810
811 return ERROR_OK;
812 }
813
814 static int xscale_poll(struct target *target)
815 {
816 int retval = ERROR_OK;
817
818 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING)) {
819 enum target_state previous_state = target->state;
820 retval = xscale_read_tx(target, 0);
821 if (retval == ERROR_OK) {
822
823 /* there's data to read from the tx register, we entered debug state */
824 target->state = TARGET_HALTED;
825
826 /* process debug entry, fetching current mode regs */
827 retval = xscale_debug_entry(target);
828 } else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE) {
829 LOG_USER("error while polling TX register, reset CPU");
830 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
831 target->state = TARGET_HALTED;
832 }
833
834 /* debug_entry could have overwritten target state (i.e. immediate resume)
835 * don't signal event handlers in that case
836 */
837 if (target->state != TARGET_HALTED)
838 return ERROR_OK;
839
840 /* if target was running, signal that we halted
841 * otherwise we reentered from debug execution */
842 if (previous_state == TARGET_RUNNING)
843 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
844 else
845 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
846 }
847
848 return retval;
849 }
850
851 static int xscale_debug_entry(struct target *target)
852 {
853 struct xscale_common *xscale = target_to_xscale(target);
854 struct arm *arm = &xscale->arm;
855 uint32_t pc;
856 uint32_t buffer[10];
857 unsigned i;
858 int retval;
859 uint32_t moe;
860
861 /* clear external dbg break (will be written on next DCSR read) */
862 xscale->external_debug_break = 0;
863 retval = xscale_read_dcsr(target);
864 if (retval != ERROR_OK)
865 return retval;
866
867 /* get r0, pc, r1 to r7 and cpsr */
868 retval = xscale_receive(target, buffer, 10);
869 if (retval != ERROR_OK)
870 return retval;
871
872 /* move r0 from buffer to register cache */
873 buf_set_u32(arm->core_cache->reg_list[0].value, 0, 32, buffer[0]);
874 arm->core_cache->reg_list[0].dirty = 1;
875 arm->core_cache->reg_list[0].valid = 1;
876 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
877
878 /* move pc from buffer to register cache */
879 buf_set_u32(arm->pc->value, 0, 32, buffer[1]);
880 arm->pc->dirty = 1;
881 arm->pc->valid = 1;
882 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
883
884 /* move data from buffer to register cache */
885 for (i = 1; i <= 7; i++) {
886 buf_set_u32(arm->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
887 arm->core_cache->reg_list[i].dirty = 1;
888 arm->core_cache->reg_list[i].valid = 1;
889 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
890 }
891
892 arm_set_cpsr(arm, buffer[9]);
893 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
894
895 if (!is_arm_mode(arm->core_mode)) {
896 target->state = TARGET_UNKNOWN;
897 LOG_ERROR("cpsr contains invalid mode value - communication failure");
898 return ERROR_TARGET_FAILURE;
899 }
900 LOG_DEBUG("target entered debug state in %s mode",
901 arm_mode_name(arm->core_mode));
902
903 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
904 if (arm->spsr) {
905 xscale_receive(target, buffer, 8);
906 buf_set_u32(arm->spsr->value, 0, 32, buffer[7]);
907 arm->spsr->dirty = false;
908 arm->spsr->valid = true;
909 } else {
910 /* r8 to r14, but no spsr */
911 xscale_receive(target, buffer, 7);
912 }
913
914 /* move data from buffer to right banked register in cache */
915 for (i = 8; i <= 14; i++) {
916 struct reg *r = arm_reg_current(arm, i);
917
918 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
919 r->dirty = false;
920 r->valid = true;
921 }
922
923 /* mark xscale regs invalid to ensure they are retrieved from the
924 * debug handler if requested */
925 for (i = 0; i < xscale->reg_cache->num_regs; i++)
926 xscale->reg_cache->reg_list[i].valid = 0;
927
928 /* examine debug reason */
929 xscale_read_dcsr(target);
930 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
931
932 /* stored PC (for calculating fixup) */
933 pc = buf_get_u32(arm->pc->value, 0, 32);
934
935 switch (moe) {
936 case 0x0: /* Processor reset */
937 target->debug_reason = DBG_REASON_DBGRQ;
938 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
939 pc -= 4;
940 break;
941 case 0x1: /* Instruction breakpoint hit */
942 target->debug_reason = DBG_REASON_BREAKPOINT;
943 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
944 pc -= 4;
945 break;
946 case 0x2: /* Data breakpoint hit */
947 target->debug_reason = DBG_REASON_WATCHPOINT;
948 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
949 pc -= 4;
950 break;
951 case 0x3: /* BKPT instruction executed */
952 target->debug_reason = DBG_REASON_BREAKPOINT;
953 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
954 pc -= 4;
955 break;
956 case 0x4: /* Ext. debug event */
957 target->debug_reason = DBG_REASON_DBGRQ;
958 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
959 pc -= 4;
960 break;
961 case 0x5: /* Vector trap occured */
962 target->debug_reason = DBG_REASON_BREAKPOINT;
963 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
964 pc -= 4;
965 break;
966 case 0x6: /* Trace buffer full break */
967 target->debug_reason = DBG_REASON_DBGRQ;
968 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
969 pc -= 4;
970 break;
971 case 0x7: /* Reserved (may flag Hot-Debug support) */
972 default:
973 LOG_ERROR("Method of Entry is 'Reserved'");
974 exit(-1);
975 break;
976 }
977
978 /* apply PC fixup */
979 buf_set_u32(arm->pc->value, 0, 32, pc);
980
981 /* on the first debug entry, identify cache type */
982 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1) {
983 uint32_t cache_type_reg;
984
985 /* read cp15 cache type register */
986 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
987 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value,
988 0,
989 32);
990
991 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
992 }
993
994 /* examine MMU and Cache settings
995 * read cp15 control register */
996 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
997 xscale->cp15_control_reg =
998 buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
999 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1000 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
1001 (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1002 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1003 (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1004
1005 /* tracing enabled, read collected trace data */
1006 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1007 xscale_read_trace(target);
1008
1009 /* Resume if entered debug due to buffer fill and we're still collecting
1010 * trace data. Note that a debug exception due to trace buffer full
1011 * can only happen in fill mode. */
1012 if (xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL) {
1013 if (--xscale->trace.fill_counter > 0)
1014 xscale_resume(target, 1, 0x0, 1, 0);
1015 } else /* entered debug for other reason; reset counter */
1016 xscale->trace.fill_counter = 0;
1017 }
1018
1019 return ERROR_OK;
1020 }
1021
1022 static int xscale_halt(struct target *target)
1023 {
1024 struct xscale_common *xscale = target_to_xscale(target);
1025
1026 LOG_DEBUG("target->state: %s",
1027 target_state_name(target));
1028
1029 if (target->state == TARGET_HALTED) {
1030 LOG_DEBUG("target was already halted");
1031 return ERROR_OK;
1032 } else if (target->state == TARGET_UNKNOWN) {
1033 /* this must not happen for a xscale target */
1034 LOG_ERROR("target was in unknown state when halt was requested");
1035 return ERROR_TARGET_INVALID;
1036 } else if (target->state == TARGET_RESET)
1037 LOG_DEBUG("target->state == TARGET_RESET");
1038 else {
1039 /* assert external dbg break */
1040 xscale->external_debug_break = 1;
1041 xscale_read_dcsr(target);
1042
1043 target->debug_reason = DBG_REASON_DBGRQ;
1044 }
1045
1046 return ERROR_OK;
1047 }
1048
1049 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1050 {
1051 struct xscale_common *xscale = target_to_xscale(target);
1052 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1053 int retval;
1054
1055 if (xscale->ibcr0_used) {
1056 struct breakpoint *ibcr0_bp =
1057 breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1058
1059 if (ibcr0_bp)
1060 xscale_unset_breakpoint(target, ibcr0_bp);
1061 else {
1062 LOG_ERROR(
1063 "BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1064 exit(-1);
1065 }
1066 }
1067
1068 retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1);
1069 if (retval != ERROR_OK)
1070 return retval;
1071
1072 return ERROR_OK;
1073 }
1074
1075 static int xscale_disable_single_step(struct target *target)
1076 {
1077 struct xscale_common *xscale = target_to_xscale(target);
1078 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1079 int retval;
1080
1081 retval = xscale_set_reg_u32(ibcr0, 0x0);
1082 if (retval != ERROR_OK)
1083 return retval;
1084
1085 return ERROR_OK;
1086 }
1087
1088 static void xscale_enable_watchpoints(struct target *target)
1089 {
1090 struct watchpoint *watchpoint = target->watchpoints;
1091
1092 while (watchpoint) {
1093 if (watchpoint->set == 0)
1094 xscale_set_watchpoint(target, watchpoint);
1095 watchpoint = watchpoint->next;
1096 }
1097 }
1098
1099 static void xscale_enable_breakpoints(struct target *target)
1100 {
1101 struct breakpoint *breakpoint = target->breakpoints;
1102
1103 /* set any pending breakpoints */
1104 while (breakpoint) {
1105 if (breakpoint->set == 0)
1106 xscale_set_breakpoint(target, breakpoint);
1107 breakpoint = breakpoint->next;
1108 }
1109 }
1110
1111 static void xscale_free_trace_data(struct xscale_common *xscale)
1112 {
1113 struct xscale_trace_data *td = xscale->trace.data;
1114 while (td) {
1115 struct xscale_trace_data *next_td = td->next;
1116 if (td->entries)
1117 free(td->entries);
1118 free(td);
1119 td = next_td;
1120 }
1121 xscale->trace.data = NULL;
1122 }
1123
1124 static int xscale_resume(struct target *target, int current,
1125 uint32_t address, int handle_breakpoints, int debug_execution)
1126 {
1127 struct xscale_common *xscale = target_to_xscale(target);
1128 struct arm *arm = &xscale->arm;
1129 uint32_t current_pc;
1130 int retval;
1131 int i;
1132
1133 LOG_DEBUG("-");
1134
1135 if (target->state != TARGET_HALTED) {
1136 LOG_WARNING("target not halted");
1137 return ERROR_TARGET_NOT_HALTED;
1138 }
1139
1140 if (!debug_execution)
1141 target_free_all_working_areas(target);
1142
1143 /* update vector tables */
1144 retval = xscale_update_vectors(target);
1145 if (retval != ERROR_OK)
1146 return retval;
1147
1148 /* current = 1: continue on current pc, otherwise continue at <address> */
1149 if (!current)
1150 buf_set_u32(arm->pc->value, 0, 32, address);
1151
1152 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1153
1154 /* if we're at the reset vector, we have to simulate the branch */
1155 if (current_pc == 0x0) {
1156 arm_simulate_step(target, NULL);
1157 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1158 }
1159
1160 /* the front-end may request us not to handle breakpoints */
1161 if (handle_breakpoints) {
1162 struct breakpoint *breakpoint;
1163 breakpoint = breakpoint_find(target,
1164 buf_get_u32(arm->pc->value, 0, 32));
1165 if (breakpoint != NULL) {
1166 uint32_t next_pc;
1167 enum trace_mode saved_trace_mode;
1168
1169 /* there's a breakpoint at the current PC, we have to step over it */
1170 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1171 xscale_unset_breakpoint(target, breakpoint);
1172
1173 /* calculate PC of next instruction */
1174 retval = arm_simulate_step(target, &next_pc);
1175 if (retval != ERROR_OK) {
1176 uint32_t current_opcode;
1177 target_read_u32(target, current_pc, &current_opcode);
1178 LOG_ERROR(
1179 "BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "",
1180 current_opcode);
1181 }
1182
1183 LOG_DEBUG("enable single-step");
1184 xscale_enable_single_step(target, next_pc);
1185
1186 /* restore banked registers */
1187 retval = xscale_restore_banked(target);
1188 if (retval != ERROR_OK)
1189 return retval;
1190
1191 /* send resume request */
1192 xscale_send_u32(target, 0x30);
1193
1194 /* send CPSR */
1195 xscale_send_u32(target,
1196 buf_get_u32(arm->cpsr->value, 0, 32));
1197 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1198 buf_get_u32(arm->cpsr->value, 0, 32));
1199
1200 for (i = 7; i >= 0; i--) {
1201 /* send register */
1202 xscale_send_u32(target,
1203 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1204 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "",
1205 i, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1206 }
1207
1208 /* send PC */
1209 xscale_send_u32(target,
1210 buf_get_u32(arm->pc->value, 0, 32));
1211 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
1212 buf_get_u32(arm->pc->value, 0, 32));
1213
1214 /* disable trace data collection in xscale_debug_entry() */
1215 saved_trace_mode = xscale->trace.mode;
1216 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1217
1218 /* wait for and process debug entry */
1219 xscale_debug_entry(target);
1220
1221 /* re-enable trace buffer, if enabled previously */
1222 xscale->trace.mode = saved_trace_mode;
1223
1224 LOG_DEBUG("disable single-step");
1225 xscale_disable_single_step(target);
1226
1227 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1228 xscale_set_breakpoint(target, breakpoint);
1229 }
1230 }
1231
1232 /* enable any pending breakpoints and watchpoints */
1233 xscale_enable_breakpoints(target);
1234 xscale_enable_watchpoints(target);
1235
1236 /* restore banked registers */
1237 retval = xscale_restore_banked(target);
1238 if (retval != ERROR_OK)
1239 return retval;
1240
1241 /* send resume request (command 0x30 or 0x31)
1242 * clean the trace buffer if it is to be enabled (0x62) */
1243 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1244 if (xscale->trace.mode == XSCALE_TRACE_FILL) {
1245 /* If trace enabled in fill mode and starting collection of new set
1246 * of buffers, initialize buffer counter and free previous buffers */
1247 if (xscale->trace.fill_counter == 0) {
1248 xscale->trace.fill_counter = xscale->trace.buffer_fill;
1249 xscale_free_trace_data(xscale);
1250 }
1251 } else /* wrap mode; free previous buffer */
1252 xscale_free_trace_data(xscale);
1253
1254 xscale_send_u32(target, 0x62);
1255 xscale_send_u32(target, 0x31);
1256 } else
1257 xscale_send_u32(target, 0x30);
1258
1259 /* send CPSR */
1260 xscale_send_u32(target, buf_get_u32(arm->cpsr->value, 0, 32));
1261 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1262 buf_get_u32(arm->cpsr->value, 0, 32));
1263
1264 for (i = 7; i >= 0; i--) {
1265 /* send register */
1266 xscale_send_u32(target, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1267 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "",
1268 i, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1269 }
1270
1271 /* send PC */
1272 xscale_send_u32(target, buf_get_u32(arm->pc->value, 0, 32));
1273 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1274 buf_get_u32(arm->pc->value, 0, 32));
1275
1276 target->debug_reason = DBG_REASON_NOTHALTED;
1277
1278 if (!debug_execution) {
1279 /* registers are now invalid */
1280 register_cache_invalidate(arm->core_cache);
1281 target->state = TARGET_RUNNING;
1282 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1283 } else {
1284 target->state = TARGET_DEBUG_RUNNING;
1285 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1286 }
1287
1288 LOG_DEBUG("target resumed");
1289
1290 return ERROR_OK;
1291 }
1292
1293 static int xscale_step_inner(struct target *target, int current,
1294 uint32_t address, int handle_breakpoints)
1295 {
1296 struct xscale_common *xscale = target_to_xscale(target);
1297 struct arm *arm = &xscale->arm;
1298 uint32_t next_pc;
1299 int retval;
1300 int i;
1301
1302 target->debug_reason = DBG_REASON_SINGLESTEP;
1303
1304 /* calculate PC of next instruction */
1305 retval = arm_simulate_step(target, &next_pc);
1306 if (retval != ERROR_OK) {
1307 uint32_t current_opcode, current_pc;
1308 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1309
1310 target_read_u32(target, current_pc, &current_opcode);
1311 LOG_ERROR(
1312 "BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "",
1313 current_opcode);
1314 return retval;
1315 }
1316
1317 LOG_DEBUG("enable single-step");
1318 retval = xscale_enable_single_step(target, next_pc);
1319 if (retval != ERROR_OK)
1320 return retval;
1321
1322 /* restore banked registers */
1323 retval = xscale_restore_banked(target);
1324 if (retval != ERROR_OK)
1325 return retval;
1326
1327 /* send resume request (command 0x30 or 0x31)
1328 * clean the trace buffer if it is to be enabled (0x62) */
1329 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1330 retval = xscale_send_u32(target, 0x62);
1331 if (retval != ERROR_OK)
1332 return retval;
1333 retval = xscale_send_u32(target, 0x31);
1334 if (retval != ERROR_OK)
1335 return retval;
1336 } else {
1337 retval = xscale_send_u32(target, 0x30);
1338 if (retval != ERROR_OK)
1339 return retval;
1340 }
1341
1342 /* send CPSR */
1343 retval = xscale_send_u32(target,
1344 buf_get_u32(arm->cpsr->value, 0, 32));
1345 if (retval != ERROR_OK)
1346 return retval;
1347 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1348 buf_get_u32(arm->cpsr->value, 0, 32));
1349
1350 for (i = 7; i >= 0; i--) {
1351 /* send register */
1352 retval = xscale_send_u32(target,
1353 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1354 if (retval != ERROR_OK)
1355 return retval;
1356 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i,
1357 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1358 }
1359
1360 /* send PC */
1361 retval = xscale_send_u32(target,
1362 buf_get_u32(arm->pc->value, 0, 32));
1363 if (retval != ERROR_OK)
1364 return retval;
1365 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1366 buf_get_u32(arm->pc->value, 0, 32));
1367
1368 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1369
1370 /* registers are now invalid */
1371 register_cache_invalidate(arm->core_cache);
1372
1373 /* wait for and process debug entry */
1374 retval = xscale_debug_entry(target);
1375 if (retval != ERROR_OK)
1376 return retval;
1377
1378 LOG_DEBUG("disable single-step");
1379 retval = xscale_disable_single_step(target);
1380 if (retval != ERROR_OK)
1381 return retval;
1382
1383 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1384
1385 return ERROR_OK;
1386 }
1387
1388 static int xscale_step(struct target *target, int current,
1389 uint32_t address, int handle_breakpoints)
1390 {
1391 struct arm *arm = target_to_arm(target);
1392 struct breakpoint *breakpoint = NULL;
1393
1394 uint32_t current_pc;
1395 int retval;
1396
1397 if (target->state != TARGET_HALTED) {
1398 LOG_WARNING("target not halted");
1399 return ERROR_TARGET_NOT_HALTED;
1400 }
1401
1402 /* current = 1: continue on current pc, otherwise continue at <address> */
1403 if (!current)
1404 buf_set_u32(arm->pc->value, 0, 32, address);
1405
1406 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1407
1408 /* if we're at the reset vector, we have to simulate the step */
1409 if (current_pc == 0x0) {
1410 retval = arm_simulate_step(target, NULL);
1411 if (retval != ERROR_OK)
1412 return retval;
1413 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1414 LOG_DEBUG("current pc %" PRIx32, current_pc);
1415
1416 target->debug_reason = DBG_REASON_SINGLESTEP;
1417 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1418
1419 return ERROR_OK;
1420 }
1421
1422 /* the front-end may request us not to handle breakpoints */
1423 if (handle_breakpoints)
1424 breakpoint = breakpoint_find(target,
1425 buf_get_u32(arm->pc->value, 0, 32));
1426 if (breakpoint != NULL) {
1427 retval = xscale_unset_breakpoint(target, breakpoint);
1428 if (retval != ERROR_OK)
1429 return retval;
1430 }
1431
1432 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1433 if (retval != ERROR_OK)
1434 return retval;
1435
1436 if (breakpoint)
1437 xscale_set_breakpoint(target, breakpoint);
1438
1439 LOG_DEBUG("target stepped");
1440
1441 return ERROR_OK;
1442
1443 }
1444
1445 static int xscale_assert_reset(struct target *target)
1446 {
1447 struct xscale_common *xscale = target_to_xscale(target);
1448
1449 /* TODO: apply hw reset signal in not examined state */
1450 if (!(target_was_examined(target))) {
1451 LOG_WARNING("Reset is not asserted because the target is not examined.");
1452 LOG_WARNING("Use a reset button or power cycle the target.");
1453 return ERROR_TARGET_NOT_EXAMINED;
1454 }
1455
1456 LOG_DEBUG("target->state: %s",
1457 target_state_name(target));
1458
1459 /* assert reset */
1460 jtag_add_reset(0, 1);
1461
1462 /* sleep 1ms, to be sure we fulfill any requirements */
1463 jtag_add_sleep(1000);
1464 jtag_execute_queue();
1465
1466 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1467 * end up in T-L-R, which would reset JTAG
1468 */
1469 xscale_jtag_set_instr(target->tap,
1470 XSCALE_SELDCSR << xscale->xscale_variant,
1471 TAP_IDLE);
1472
1473 /* set Hold reset, Halt mode and Trap Reset */
1474 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1475 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1476 xscale_write_dcsr(target, 1, 0);
1477
1478 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1479 xscale_jtag_set_instr(target->tap, ~0, TAP_IDLE);
1480 jtag_execute_queue();
1481
1482 target->state = TARGET_RESET;
1483
1484 if (target->reset_halt) {
1485 int retval = target_halt(target);
1486 if (retval != ERROR_OK)
1487 return retval;
1488 }
1489
1490 return ERROR_OK;
1491 }
1492
1493 static int xscale_deassert_reset(struct target *target)
1494 {
1495 struct xscale_common *xscale = target_to_xscale(target);
1496 struct breakpoint *breakpoint = target->breakpoints;
1497
1498 LOG_DEBUG("-");
1499
1500 xscale->ibcr_available = 2;
1501 xscale->ibcr0_used = 0;
1502 xscale->ibcr1_used = 0;
1503
1504 xscale->dbr_available = 2;
1505 xscale->dbr0_used = 0;
1506 xscale->dbr1_used = 0;
1507
1508 /* mark all hardware breakpoints as unset */
1509 while (breakpoint) {
1510 if (breakpoint->type == BKPT_HARD)
1511 breakpoint->set = 0;
1512 breakpoint = breakpoint->next;
1513 }
1514
1515 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1516 xscale_free_trace_data(xscale);
1517
1518 register_cache_invalidate(xscale->arm.core_cache);
1519
1520 /* FIXME mark hardware watchpoints got unset too. Also,
1521 * at least some of the XScale registers are invalid...
1522 */
1523
1524 /*
1525 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1526 * contents got invalidated. Safer to force that, so writing new
1527 * contents can't ever fail..
1528 */
1529 {
1530 uint32_t address;
1531 unsigned buf_cnt;
1532 const uint8_t *buffer = xscale_debug_handler;
1533 int retval;
1534
1535 /* release SRST */
1536 jtag_add_reset(0, 0);
1537
1538 /* wait 300ms; 150 and 100ms were not enough */
1539 jtag_add_sleep(300*1000);
1540
1541 jtag_add_runtest(2030, TAP_IDLE);
1542 jtag_execute_queue();
1543
1544 /* set Hold reset, Halt mode and Trap Reset */
1545 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1546 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1547 xscale_write_dcsr(target, 1, 0);
1548
1549 /* Load the debug handler into the mini-icache. Since
1550 * it's using halt mode (not monitor mode), it runs in
1551 * "Special Debug State" for access to registers, memory,
1552 * coprocessors, trace data, etc.
1553 */
1554 address = xscale->handler_address;
1555 for (unsigned binary_size = sizeof xscale_debug_handler;
1556 binary_size > 0;
1557 binary_size -= buf_cnt, buffer += buf_cnt) {
1558 uint32_t cache_line[8];
1559 unsigned i;
1560
1561 buf_cnt = binary_size;
1562 if (buf_cnt > 32)
1563 buf_cnt = 32;
1564
1565 for (i = 0; i < buf_cnt; i += 4) {
1566 /* convert LE buffer to host-endian uint32_t */
1567 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1568 }
1569
1570 for (; i < 32; i += 4)
1571 cache_line[i / 4] = 0xe1a08008;
1572
1573 /* only load addresses other than the reset vectors */
1574 if ((address % 0x400) != 0x0) {
1575 retval = xscale_load_ic(target, address,
1576 cache_line);
1577 if (retval != ERROR_OK)
1578 return retval;
1579 }
1580
1581 address += buf_cnt;
1582 }
1583
1584 retval = xscale_load_ic(target, 0x0,
1585 xscale->low_vectors);
1586 if (retval != ERROR_OK)
1587 return retval;
1588 retval = xscale_load_ic(target, 0xffff0000,
1589 xscale->high_vectors);
1590 if (retval != ERROR_OK)
1591 return retval;
1592
1593 jtag_add_runtest(30, TAP_IDLE);
1594
1595 jtag_add_sleep(100000);
1596
1597 /* set Hold reset, Halt mode and Trap Reset */
1598 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1599 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1600 xscale_write_dcsr(target, 1, 0);
1601
1602 /* clear Hold reset to let the target run (should enter debug handler) */
1603 xscale_write_dcsr(target, 0, 1);
1604 target->state = TARGET_RUNNING;
1605
1606 if (!target->reset_halt) {
1607 jtag_add_sleep(10000);
1608
1609 /* we should have entered debug now */
1610 xscale_debug_entry(target);
1611 target->state = TARGET_HALTED;
1612
1613 /* resume the target */
1614 xscale_resume(target, 1, 0x0, 1, 0);
1615 }
1616 }
1617
1618 return ERROR_OK;
1619 }
1620
1621 static int xscale_read_core_reg(struct target *target, struct reg *r,
1622 int num, enum arm_mode mode)
1623 {
1624 /** \todo add debug handler support for core register reads */
1625 LOG_ERROR("not implemented");
1626 return ERROR_OK;
1627 }
1628
1629 static int xscale_write_core_reg(struct target *target, struct reg *r,
1630 int num, enum arm_mode mode, uint8_t *value)
1631 {
1632 /** \todo add debug handler support for core register writes */
1633 LOG_ERROR("not implemented");
1634 return ERROR_OK;
1635 }
1636
1637 static int xscale_full_context(struct target *target)
1638 {
1639 struct arm *arm = target_to_arm(target);
1640
1641 uint32_t *buffer;
1642
1643 int i, j;
1644
1645 LOG_DEBUG("-");
1646
1647 if (target->state != TARGET_HALTED) {
1648 LOG_WARNING("target not halted");
1649 return ERROR_TARGET_NOT_HALTED;
1650 }
1651
1652 buffer = malloc(4 * 8);
1653
1654 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1655 * we can't enter User mode on an XScale (unpredictable),
1656 * but User shares registers with SYS
1657 */
1658 for (i = 1; i < 7; i++) {
1659 enum arm_mode mode = armv4_5_number_to_mode(i);
1660 bool valid = true;
1661 struct reg *r;
1662
1663 if (mode == ARM_MODE_USR)
1664 continue;
1665
1666 /* check if there are invalid registers in the current mode
1667 */
1668 for (j = 0; valid && j <= 16; j++) {
1669 if (!ARMV4_5_CORE_REG_MODE(arm->core_cache,
1670 mode, j).valid)
1671 valid = false;
1672 }
1673 if (valid)
1674 continue;
1675
1676 /* request banked registers */
1677 xscale_send_u32(target, 0x0);
1678
1679 /* send CPSR for desired bank mode */
1680 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1681
1682 /* get banked registers: r8 to r14; and SPSR
1683 * except in USR/SYS mode
1684 */
1685 if (mode != ARM_MODE_SYS) {
1686 /* SPSR */
1687 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1688 mode, 16);
1689
1690 xscale_receive(target, buffer, 8);
1691
1692 buf_set_u32(r->value, 0, 32, buffer[7]);
1693 r->dirty = false;
1694 r->valid = true;
1695 } else
1696 xscale_receive(target, buffer, 7);
1697
1698 /* move data from buffer to register cache */
1699 for (j = 8; j <= 14; j++) {
1700 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1701 mode, j);
1702
1703 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1704 r->dirty = false;
1705 r->valid = true;
1706 }
1707 }
1708
1709 free(buffer);
1710
1711 return ERROR_OK;
1712 }
1713
1714 static int xscale_restore_banked(struct target *target)
1715 {
1716 struct arm *arm = target_to_arm(target);
1717
1718 int i, j;
1719
1720 if (target->state != TARGET_HALTED) {
1721 LOG_WARNING("target not halted");
1722 return ERROR_TARGET_NOT_HALTED;
1723 }
1724
1725 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1726 * and check if any banked registers need to be written. Ignore
1727 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1728 * an XScale (unpredictable), but they share all registers.
1729 */
1730 for (i = 1; i < 7; i++) {
1731 enum arm_mode mode = armv4_5_number_to_mode(i);
1732 struct reg *r;
1733
1734 if (mode == ARM_MODE_USR)
1735 continue;
1736
1737 /* check if there are dirty registers in this mode */
1738 for (j = 8; j <= 14; j++) {
1739 if (ARMV4_5_CORE_REG_MODE(arm->core_cache,
1740 mode, j).dirty)
1741 goto dirty;
1742 }
1743
1744 /* if not USR/SYS, check if the SPSR needs to be written */
1745 if (mode != ARM_MODE_SYS) {
1746 if (ARMV4_5_CORE_REG_MODE(arm->core_cache,
1747 mode, 16).dirty)
1748 goto dirty;
1749 }
1750
1751 /* there's nothing to flush for this mode */
1752 continue;
1753
1754 dirty:
1755 /* command 0x1: "send banked registers" */
1756 xscale_send_u32(target, 0x1);
1757
1758 /* send CPSR for desired mode */
1759 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1760
1761 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1762 * but this protocol doesn't understand that nuance.
1763 */
1764 for (j = 8; j <= 14; j++) {
1765 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1766 mode, j);
1767 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1768 r->dirty = false;
1769 }
1770
1771 /* send spsr if not in USR/SYS mode */
1772 if (mode != ARM_MODE_SYS) {
1773 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1774 mode, 16);
1775 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1776 r->dirty = false;
1777 }
1778 }
1779
1780 return ERROR_OK;
1781 }
1782
1783 static int xscale_read_memory(struct target *target, uint32_t address,
1784 uint32_t size, uint32_t count, uint8_t *buffer)
1785 {
1786 struct xscale_common *xscale = target_to_xscale(target);
1787 uint32_t *buf32;
1788 uint32_t i;
1789 int retval;
1790
1791 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32,
1792 address,
1793 size,
1794 count);
1795
1796 if (target->state != TARGET_HALTED) {
1797 LOG_WARNING("target not halted");
1798 return ERROR_TARGET_NOT_HALTED;
1799 }
1800
1801 /* sanitize arguments */
1802 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1803 return ERROR_COMMAND_SYNTAX_ERROR;
1804
1805 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1806 return ERROR_TARGET_UNALIGNED_ACCESS;
1807
1808 /* send memory read request (command 0x1n, n: access size) */
1809 retval = xscale_send_u32(target, 0x10 | size);
1810 if (retval != ERROR_OK)
1811 return retval;
1812
1813 /* send base address for read request */
1814 retval = xscale_send_u32(target, address);
1815 if (retval != ERROR_OK)
1816 return retval;
1817
1818 /* send number of requested data words */
1819 retval = xscale_send_u32(target, count);
1820 if (retval != ERROR_OK)
1821 return retval;
1822
1823 /* receive data from target (count times 32-bit words in host endianness) */
1824 buf32 = malloc(4 * count);
1825 retval = xscale_receive(target, buf32, count);
1826 if (retval != ERROR_OK) {
1827 free(buf32);
1828 return retval;
1829 }
1830
1831 /* extract data from host-endian buffer into byte stream */
1832 for (i = 0; i < count; i++) {
1833 switch (size) {
1834 case 4:
1835 target_buffer_set_u32(target, buffer, buf32[i]);
1836 buffer += 4;
1837 break;
1838 case 2:
1839 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1840 buffer += 2;
1841 break;
1842 case 1:
1843 *buffer++ = buf32[i] & 0xff;
1844 break;
1845 default:
1846 LOG_ERROR("invalid read size");
1847 return ERROR_COMMAND_SYNTAX_ERROR;
1848 }
1849 }
1850
1851 free(buf32);
1852
1853 /* examine DCSR, to see if Sticky Abort (SA) got set */
1854 retval = xscale_read_dcsr(target);
1855 if (retval != ERROR_OK)
1856 return retval;
1857 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1) {
1858 /* clear SA bit */
1859 retval = xscale_send_u32(target, 0x60);
1860 if (retval != ERROR_OK)
1861 return retval;
1862
1863 return ERROR_TARGET_DATA_ABORT;
1864 }
1865
1866 return ERROR_OK;
1867 }
1868
1869 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1870 uint32_t size, uint32_t count, uint8_t *buffer)
1871 {
1872 struct xscale_common *xscale = target_to_xscale(target);
1873
1874 /* with MMU inactive, there are only physical addresses */
1875 if (!xscale->armv4_5_mmu.mmu_enabled)
1876 return xscale_read_memory(target, address, size, count, buffer);
1877
1878 /** \todo: provide a non-stub implementation of this routine. */
1879 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1880 target_name(target), __func__);
1881 return ERROR_FAIL;
1882 }
1883
1884 static int xscale_write_memory(struct target *target, uint32_t address,
1885 uint32_t size, uint32_t count, const uint8_t *buffer)
1886 {
1887 struct xscale_common *xscale = target_to_xscale(target);
1888 int retval;
1889
1890 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32,
1891 address,
1892 size,
1893 count);
1894
1895 if (target->state != TARGET_HALTED) {
1896 LOG_WARNING("target not halted");
1897 return ERROR_TARGET_NOT_HALTED;
1898 }
1899
1900 /* sanitize arguments */
1901 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1902 return ERROR_COMMAND_SYNTAX_ERROR;
1903
1904 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1905 return ERROR_TARGET_UNALIGNED_ACCESS;
1906
1907 /* send memory write request (command 0x2n, n: access size) */
1908 retval = xscale_send_u32(target, 0x20 | size);
1909 if (retval != ERROR_OK)
1910 return retval;
1911
1912 /* send base address for read request */
1913 retval = xscale_send_u32(target, address);
1914 if (retval != ERROR_OK)
1915 return retval;
1916
1917 /* send number of requested data words to be written*/
1918 retval = xscale_send_u32(target, count);
1919 if (retval != ERROR_OK)
1920 return retval;
1921
1922 /* extract data from host-endian buffer into byte stream */
1923 #if 0
1924 for (i = 0; i < count; i++) {
1925 switch (size) {
1926 case 4:
1927 value = target_buffer_get_u32(target, buffer);
1928 xscale_send_u32(target, value);
1929 buffer += 4;
1930 break;
1931 case 2:
1932 value = target_buffer_get_u16(target, buffer);
1933 xscale_send_u32(target, value);
1934 buffer += 2;
1935 break;
1936 case 1:
1937 value = *buffer;
1938 xscale_send_u32(target, value);
1939 buffer += 1;
1940 break;
1941 default:
1942 LOG_ERROR("should never get here");
1943 exit(-1);
1944 }
1945 }
1946 #endif
1947 retval = xscale_send(target, buffer, count, size);
1948 if (retval != ERROR_OK)
1949 return retval;
1950
1951 /* examine DCSR, to see if Sticky Abort (SA) got set */
1952 retval = xscale_read_dcsr(target);
1953 if (retval != ERROR_OK)
1954 return retval;
1955 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1) {
1956 /* clear SA bit */
1957 retval = xscale_send_u32(target, 0x60);
1958 if (retval != ERROR_OK)
1959 return retval;
1960
1961 LOG_ERROR("data abort writing memory");
1962 return ERROR_TARGET_DATA_ABORT;
1963 }
1964
1965 return ERROR_OK;
1966 }
1967
1968 static int xscale_write_phys_memory(struct target *target, uint32_t address,
1969 uint32_t size, uint32_t count, const uint8_t *buffer)
1970 {
1971 struct xscale_common *xscale = target_to_xscale(target);
1972
1973 /* with MMU inactive, there are only physical addresses */
1974 if (!xscale->armv4_5_mmu.mmu_enabled)
1975 return xscale_write_memory(target, address, size, count, buffer);
1976
1977 /** \todo: provide a non-stub implementation of this routine. */
1978 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1979 target_name(target), __func__);
1980 return ERROR_FAIL;
1981 }
1982
1983 static int xscale_get_ttb(struct target *target, uint32_t *result)
1984 {
1985 struct xscale_common *xscale = target_to_xscale(target);
1986 uint32_t ttb;
1987 int retval;
1988
1989 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
1990 if (retval != ERROR_OK)
1991 return retval;
1992 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
1993
1994 *result = ttb;
1995
1996 return ERROR_OK;
1997 }
1998
1999 static int xscale_disable_mmu_caches(struct target *target, int mmu,
2000 int d_u_cache, int i_cache)
2001 {
2002 struct xscale_common *xscale = target_to_xscale(target);
2003 uint32_t cp15_control;
2004 int retval;
2005
2006 /* read cp15 control register */
2007 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2008 if (retval != ERROR_OK)
2009 return retval;
2010 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2011
2012 if (mmu)
2013 cp15_control &= ~0x1U;
2014
2015 if (d_u_cache) {
2016 /* clean DCache */
2017 retval = xscale_send_u32(target, 0x50);
2018 if (retval != ERROR_OK)
2019 return retval;
2020 retval = xscale_send_u32(target, xscale->cache_clean_address);
2021 if (retval != ERROR_OK)
2022 return retval;
2023
2024 /* invalidate DCache */
2025 retval = xscale_send_u32(target, 0x51);
2026 if (retval != ERROR_OK)
2027 return retval;
2028
2029 cp15_control &= ~0x4U;
2030 }
2031
2032 if (i_cache) {
2033 /* invalidate ICache */
2034 retval = xscale_send_u32(target, 0x52);
2035 if (retval != ERROR_OK)
2036 return retval;
2037 cp15_control &= ~0x1000U;
2038 }
2039
2040 /* write new cp15 control register */
2041 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2042 if (retval != ERROR_OK)
2043 return retval;
2044
2045 /* execute cpwait to ensure outstanding operations complete */
2046 retval = xscale_send_u32(target, 0x53);
2047 return retval;
2048 }
2049
2050 static int xscale_enable_mmu_caches(struct target *target, int mmu,
2051 int d_u_cache, int i_cache)
2052 {
2053 struct xscale_common *xscale = target_to_xscale(target);
2054 uint32_t cp15_control;
2055 int retval;
2056
2057 /* read cp15 control register */
2058 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2059 if (retval != ERROR_OK)
2060 return retval;
2061 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2062
2063 if (mmu)
2064 cp15_control |= 0x1U;
2065
2066 if (d_u_cache)
2067 cp15_control |= 0x4U;
2068
2069 if (i_cache)
2070 cp15_control |= 0x1000U;
2071
2072 /* write new cp15 control register */
2073 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2074 if (retval != ERROR_OK)
2075 return retval;
2076
2077 /* execute cpwait to ensure outstanding operations complete */
2078 retval = xscale_send_u32(target, 0x53);
2079 return retval;
2080 }
2081
2082 static int xscale_set_breakpoint(struct target *target,
2083 struct breakpoint *breakpoint)
2084 {
2085 int retval;
2086 struct xscale_common *xscale = target_to_xscale(target);
2087
2088 if (target->state != TARGET_HALTED) {
2089 LOG_WARNING("target not halted");
2090 return ERROR_TARGET_NOT_HALTED;
2091 }
2092
2093 if (breakpoint->set) {
2094 LOG_WARNING("breakpoint already set");
2095 return ERROR_OK;
2096 }
2097
2098 if (breakpoint->type == BKPT_HARD) {
2099 uint32_t value = breakpoint->address | 1;
2100 if (!xscale->ibcr0_used) {
2101 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2102 xscale->ibcr0_used = 1;
2103 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2104 } else if (!xscale->ibcr1_used) {
2105 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2106 xscale->ibcr1_used = 1;
2107 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2108 } else {/* bug: availability previously verified in xscale_add_breakpoint() */
2109 LOG_ERROR("BUG: no hardware comparator available");
2110 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2111 }
2112 } else if (breakpoint->type == BKPT_SOFT) {
2113 if (breakpoint->length == 4) {
2114 /* keep the original instruction in target endianness */
2115 retval = target_read_memory(target, breakpoint->address, 4, 1,
2116 breakpoint->orig_instr);
2117 if (retval != ERROR_OK)
2118 return retval;
2119 /* write the bkpt instruction in target endianness
2120 *(arm7_9->arm_bkpt is host endian) */
2121 retval = target_write_u32(target, breakpoint->address,
2122 xscale->arm_bkpt);
2123 if (retval != ERROR_OK)
2124 return retval;
2125 } else {
2126 /* keep the original instruction in target endianness */
2127 retval = target_read_memory(target, breakpoint->address, 2, 1,
2128 breakpoint->orig_instr);
2129 if (retval != ERROR_OK)
2130 return retval;
2131 /* write the bkpt instruction in target endianness
2132 *(arm7_9->arm_bkpt is host endian) */
2133 retval = target_write_u16(target, breakpoint->address,
2134 xscale->thumb_bkpt);
2135 if (retval != ERROR_OK)
2136 return retval;
2137 }
2138 breakpoint->set = 1;
2139
2140 xscale_send_u32(target, 0x50); /* clean dcache */
2141 xscale_send_u32(target, xscale->cache_clean_address);
2142 xscale_send_u32(target, 0x51); /* invalidate dcache */
2143 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2144 }
2145
2146 return ERROR_OK;
2147 }
2148
2149 static int xscale_add_breakpoint(struct target *target,
2150 struct breakpoint *breakpoint)
2151 {
2152 struct xscale_common *xscale = target_to_xscale(target);
2153
2154 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1)) {
2155 LOG_ERROR("no breakpoint unit available for hardware breakpoint");
2156 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2157 }
2158
2159 if ((breakpoint->length != 2) && (breakpoint->length != 4)) {
2160 LOG_ERROR("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2161 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2162 }
2163
2164 if (breakpoint->type == BKPT_HARD)
2165 xscale->ibcr_available--;
2166
2167 return xscale_set_breakpoint(target, breakpoint);
2168 }
2169
2170 static int xscale_unset_breakpoint(struct target *target,
2171 struct breakpoint *breakpoint)
2172 {
2173 int retval;
2174 struct xscale_common *xscale = target_to_xscale(target);
2175
2176 if (target->state != TARGET_HALTED) {
2177 LOG_WARNING("target not halted");
2178 return ERROR_TARGET_NOT_HALTED;
2179 }
2180
2181 if (!breakpoint->set) {
2182 LOG_WARNING("breakpoint not set");
2183 return ERROR_OK;
2184 }
2185
2186 if (breakpoint->type == BKPT_HARD) {
2187 if (breakpoint->set == 1) {
2188 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2189 xscale->ibcr0_used = 0;
2190 } else if (breakpoint->set == 2) {
2191 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2192 xscale->ibcr1_used = 0;
2193 }
2194 breakpoint->set = 0;
2195 } else {
2196 /* restore original instruction (kept in target endianness) */
2197 if (breakpoint->length == 4) {
2198 retval = target_write_memory(target, breakpoint->address, 4, 1,
2199 breakpoint->orig_instr);
2200 if (retval != ERROR_OK)
2201 return retval;
2202 } else {
2203 retval = target_write_memory(target, breakpoint->address, 2, 1,
2204 breakpoint->orig_instr);
2205 if (retval != ERROR_OK)
2206 return retval;
2207 }
2208 breakpoint->set = 0;
2209
2210 xscale_send_u32(target, 0x50); /* clean dcache */
2211 xscale_send_u32(target, xscale->cache_clean_address);
2212 xscale_send_u32(target, 0x51); /* invalidate dcache */
2213 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2214 }
2215
2216 return ERROR_OK;
2217 }
2218
2219 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2220 {
2221 struct xscale_common *xscale = target_to_xscale(target);
2222
2223 if (target->state != TARGET_HALTED) {
2224 LOG_ERROR("target not halted");
2225 return ERROR_TARGET_NOT_HALTED;
2226 }
2227
2228 if (breakpoint->set)
2229 xscale_unset_breakpoint(target, breakpoint);
2230
2231 if (breakpoint->type == BKPT_HARD)
2232 xscale->ibcr_available++;
2233
2234 return ERROR_OK;
2235 }
2236
2237 static int xscale_set_watchpoint(struct target *target,
2238 struct watchpoint *watchpoint)
2239 {
2240 struct xscale_common *xscale = target_to_xscale(target);
2241 uint32_t enable = 0;
2242 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2243 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2244
2245 if (target->state != TARGET_HALTED) {
2246 LOG_ERROR("target not halted");
2247 return ERROR_TARGET_NOT_HALTED;
2248 }
2249
2250 switch (watchpoint->rw) {
2251 case WPT_READ:
2252 enable = 0x3;
2253 break;
2254 case WPT_ACCESS:
2255 enable = 0x2;
2256 break;
2257 case WPT_WRITE:
2258 enable = 0x1;
2259 break;
2260 default:
2261 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2262 }
2263
2264 /* For watchpoint across more than one word, both DBR registers must
2265 be enlisted, with the second used as a mask. */
2266 if (watchpoint->length > 4) {
2267 if (xscale->dbr0_used || xscale->dbr1_used) {
2268 LOG_ERROR("BUG: sufficient hardware comparators unavailable");
2269 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2270 }
2271
2272 /* Write mask value to DBR1, based on the length argument.
2273 * Address bits ignored by the comparator are those set in mask. */
2274 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1],
2275 watchpoint->length - 1);
2276 xscale->dbr1_used = 1;
2277 enable |= 0x100; /* DBCON[M] */
2278 }
2279
2280 if (!xscale->dbr0_used) {
2281 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2282 dbcon_value |= enable;
2283 xscale_set_reg_u32(dbcon, dbcon_value);
2284 watchpoint->set = 1;
2285 xscale->dbr0_used = 1;
2286 } else if (!xscale->dbr1_used) {
2287 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2288 dbcon_value |= enable << 2;
2289 xscale_set_reg_u32(dbcon, dbcon_value);
2290 watchpoint->set = 2;
2291 xscale->dbr1_used = 1;
2292 } else {
2293 LOG_ERROR("BUG: no hardware comparator available");
2294 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2295 }
2296
2297 return ERROR_OK;
2298 }
2299
2300 static int xscale_add_watchpoint(struct target *target,
2301 struct watchpoint *watchpoint)
2302 {
2303 struct xscale_common *xscale = target_to_xscale(target);
2304
2305 if (xscale->dbr_available < 1) {
2306 LOG_ERROR("no more watchpoint registers available");
2307 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2308 }
2309
2310 if (watchpoint->value)
2311 LOG_WARNING("xscale does not support value, mask arguments; ignoring");
2312
2313 /* check that length is a power of two */
2314 for (uint32_t len = watchpoint->length; len != 1; len /= 2) {
2315 if (len % 2) {
2316 LOG_ERROR("xscale requires that watchpoint length is a power of two");
2317 return ERROR_COMMAND_ARGUMENT_INVALID;
2318 }
2319 }
2320
2321 if (watchpoint->length == 4) { /* single word watchpoint */
2322 xscale->dbr_available--;/* one DBR reg used */
2323 return ERROR_OK;
2324 }
2325
2326 /* watchpoints across multiple words require both DBR registers */
2327 if (xscale->dbr_available < 2) {
2328 LOG_ERROR("insufficient watchpoint registers available");
2329 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2330 }
2331
2332 if (watchpoint->length > watchpoint->address) {
2333 LOG_ERROR("xscale does not support watchpoints with length "
2334 "greater than address");
2335 return ERROR_COMMAND_ARGUMENT_INVALID;
2336 }
2337
2338 xscale->dbr_available = 0;
2339 return ERROR_OK;
2340 }
2341
2342 static int xscale_unset_watchpoint(struct target *target,
2343 struct watchpoint *watchpoint)
2344 {
2345 struct xscale_common *xscale = target_to_xscale(target);
2346 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2347 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2348
2349 if (target->state != TARGET_HALTED) {
2350 LOG_WARNING("target not halted");
2351 return ERROR_TARGET_NOT_HALTED;
2352 }
2353
2354 if (!watchpoint->set) {
2355 LOG_WARNING("breakpoint not set");
2356 return ERROR_OK;
2357 }
2358
2359 if (watchpoint->set == 1) {
2360 if (watchpoint->length > 4) {
2361 dbcon_value &= ~0x103; /* clear DBCON[M] as well */
2362 xscale->dbr1_used = 0; /* DBR1 was used for mask */
2363 } else
2364 dbcon_value &= ~0x3;
2365
2366 xscale_set_reg_u32(dbcon, dbcon_value);
2367 xscale->dbr0_used = 0;
2368 } else if (watchpoint->set == 2) {
2369 dbcon_value &= ~0xc;
2370 xscale_set_reg_u32(dbcon, dbcon_value);
2371 xscale->dbr1_used = 0;
2372 }
2373 watchpoint->set = 0;
2374
2375 return ERROR_OK;
2376 }
2377
2378 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2379 {
2380 struct xscale_common *xscale = target_to_xscale(target);
2381
2382 if (target->state != TARGET_HALTED) {
2383 LOG_ERROR("target not halted");
2384 return ERROR_TARGET_NOT_HALTED;
2385 }
2386
2387 if (watchpoint->set)
2388 xscale_unset_watchpoint(target, watchpoint);
2389
2390 if (watchpoint->length > 4)
2391 xscale->dbr_available++;/* both DBR regs now available */
2392
2393 xscale->dbr_available++;
2394
2395 return ERROR_OK;
2396 }
2397
2398 static int xscale_get_reg(struct reg *reg)
2399 {
2400 struct xscale_reg *arch_info = reg->arch_info;
2401 struct target *target = arch_info->target;
2402 struct xscale_common *xscale = target_to_xscale(target);
2403
2404 /* DCSR, TX and RX are accessible via JTAG */
2405 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2406 return xscale_read_dcsr(arch_info->target);
2407 else if (strcmp(reg->name, "XSCALE_TX") == 0) {
2408 /* 1 = consume register content */
2409 return xscale_read_tx(arch_info->target, 1);
2410 } else if (strcmp(reg->name, "XSCALE_RX") == 0) {
2411 /* can't read from RX register (host -> debug handler) */
2412 return ERROR_OK;
2413 } else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0) {
2414 /* can't (explicitly) read from TXRXCTRL register */
2415 return ERROR_OK;
2416 } else {/* Other DBG registers have to be transfered by the debug handler
2417 * send CP read request (command 0x40) */
2418 xscale_send_u32(target, 0x40);
2419
2420 /* send CP register number */
2421 xscale_send_u32(target, arch_info->dbg_handler_number);
2422
2423 /* read register value */
2424 xscale_read_tx(target, 1);
2425 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2426
2427 reg->dirty = 0;
2428 reg->valid = 1;
2429 }
2430
2431 return ERROR_OK;
2432 }
2433
2434 static int xscale_set_reg(struct reg *reg, uint8_t *buf)
2435 {
2436 struct xscale_reg *arch_info = reg->arch_info;
2437 struct target *target = arch_info->target;
2438 struct xscale_common *xscale = target_to_xscale(target);
2439 uint32_t value = buf_get_u32(buf, 0, 32);
2440
2441 /* DCSR, TX and RX are accessible via JTAG */
2442 if (strcmp(reg->name, "XSCALE_DCSR") == 0) {
2443 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2444 return xscale_write_dcsr(arch_info->target, -1, -1);
2445 } else if (strcmp(reg->name, "XSCALE_RX") == 0) {
2446 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2447 return xscale_write_rx(arch_info->target);
2448 } else if (strcmp(reg->name, "XSCALE_TX") == 0) {
2449 /* can't write to TX register (debug-handler -> host) */
2450 return ERROR_OK;
2451 } else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0) {
2452 /* can't (explicitly) write to TXRXCTRL register */
2453 return ERROR_OK;
2454 } else {/* Other DBG registers have to be transfered by the debug handler
2455 * send CP write request (command 0x41) */
2456 xscale_send_u32(target, 0x41);
2457
2458 /* send CP register number */
2459 xscale_send_u32(target, arch_info->dbg_handler_number);
2460
2461 /* send CP register value */
2462 xscale_send_u32(target, value);
2463 buf_set_u32(reg->value, 0, 32, value);
2464 }
2465
2466 return ERROR_OK;
2467 }
2468
2469 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2470 {
2471 struct xscale_common *xscale = target_to_xscale(target);
2472 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2473 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2474
2475 /* send CP write request (command 0x41) */
2476 xscale_send_u32(target, 0x41);
2477
2478 /* send CP register number */
2479 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2480
2481 /* send CP register value */
2482 xscale_send_u32(target, value);
2483 buf_set_u32(dcsr->value, 0, 32, value);
2484
2485 return ERROR_OK;
2486 }
2487
2488 static int xscale_read_trace(struct target *target)
2489 {
2490 struct xscale_common *xscale = target_to_xscale(target);
2491 struct arm *arm = &xscale->arm;
2492 struct xscale_trace_data **trace_data_p;
2493
2494 /* 258 words from debug handler
2495 * 256 trace buffer entries
2496 * 2 checkpoint addresses
2497 */
2498 uint32_t trace_buffer[258];
2499 int is_address[256];
2500 int i, j;
2501 unsigned int num_checkpoints = 0;
2502
2503 if (target->state != TARGET_HALTED) {
2504 LOG_WARNING("target must be stopped to read trace data");
2505 return ERROR_TARGET_NOT_HALTED;
2506 }
2507
2508 /* send read trace buffer command (command 0x61) */
2509 xscale_send_u32(target, 0x61);
2510
2511 /* receive trace buffer content */
2512 xscale_receive(target, trace_buffer, 258);
2513
2514 /* parse buffer backwards to identify address entries */
2515 for (i = 255; i >= 0; i--) {
2516 /* also count number of checkpointed entries */
2517 if ((trace_buffer[i] & 0xe0) == 0xc0)
2518 num_checkpoints++;
2519
2520 is_address[i] = 0;
2521 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2522 ((trace_buffer[i] & 0xf0) == 0xd0)) {
2523 if (i > 0)
2524 is_address[--i] = 1;
2525 if (i > 0)
2526 is_address[--i] = 1;
2527 if (i > 0)
2528 is_address[--i] = 1;
2529 if (i > 0)
2530 is_address[--i] = 1;
2531 }
2532 }
2533
2534
2535 /* search first non-zero entry that is not part of an address */
2536 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2537 ;
2538
2539 if (j == 256) {
2540 LOG_DEBUG("no trace data collected");
2541 return ERROR_XSCALE_NO_TRACE_DATA;
2542 }
2543
2544 /* account for possible partial address at buffer start (wrap mode only) */
2545 if (is_address[0]) { /* first entry is address; complete set of 4? */
2546 i = 1;
2547 while (i < 4)
2548 if (!is_address[i++])
2549 break;
2550 if (i < 4)
2551 j += i; /* partial address; can't use it */
2552 }
2553
2554 /* if first valid entry is indirect branch, can't use that either (no address) */
2555 if (((trace_buffer[j] & 0xf0) == 0x90) || ((trace_buffer[j] & 0xf0) == 0xd0))
2556 j++;
2557
2558 /* walk linked list to terminating entry */
2559 for (trace_data_p = &xscale->trace.data; *trace_data_p;
2560 trace_data_p = &(*trace_data_p)->next)
2561 ;
2562
2563 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2564 (*trace_data_p)->next = NULL;
2565 (*trace_data_p)->chkpt0 = trace_buffer[256];
2566 (*trace_data_p)->chkpt1 = trace_buffer[257];
2567 (*trace_data_p)->last_instruction = buf_get_u32(arm->pc->value, 0, 32);
2568 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2569 (*trace_data_p)->depth = 256 - j;
2570 (*trace_data_p)->num_checkpoints = num_checkpoints;
2571
2572 for (i = j; i < 256; i++) {
2573 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2574 if (is_address[i])
2575 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2576 else
2577 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2578 }
2579
2580 return ERROR_OK;
2581 }
2582
2583 static int xscale_read_instruction(struct target *target, uint32_t pc,
2584 struct arm_instruction *instruction)
2585 {
2586 struct xscale_common *const xscale = target_to_xscale(target);
2587 int i;
2588 int section = -1;
2589 size_t size_read;
2590 uint32_t opcode;
2591 int retval;
2592
2593 if (!xscale->trace.image)
2594 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2595
2596 /* search for the section the current instruction belongs to */
2597 for (i = 0; i < xscale->trace.image->num_sections; i++) {
2598 if ((xscale->trace.image->sections[i].base_address <= pc) &&
2599 (xscale->trace.image->sections[i].base_address +
2600 xscale->trace.image->sections[i].size > pc)) {
2601 section = i;
2602 break;
2603 }
2604 }
2605
2606 if (section == -1) {
2607 /* current instruction couldn't be found in the image */
2608 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2609 }
2610
2611 if (xscale->trace.core_state == ARM_STATE_ARM) {
2612 uint8_t buf[4];
2613 retval = image_read_section(xscale->trace.image, section,
2614 pc - xscale->trace.image->sections[section].base_address,
2615 4, buf, &size_read);
2616 if (retval != ERROR_OK) {
2617 LOG_ERROR("error while reading instruction");
2618 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2619 }
2620 opcode = target_buffer_get_u32(target, buf);
2621 arm_evaluate_opcode(opcode, pc, instruction);
2622 } else if (xscale->trace.core_state == ARM_STATE_THUMB) {
2623 uint8_t buf[2];
2624 retval = image_read_section(xscale->trace.image, section,
2625 pc - xscale->trace.image->sections[section].base_address,
2626 2, buf, &size_read);
2627 if (retval != ERROR_OK) {
2628 LOG_ERROR("error while reading instruction");
2629 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2630 }
2631 opcode = target_buffer_get_u16(target, buf);
2632 thumb_evaluate_opcode(opcode, pc, instruction);
2633 } else {
2634 LOG_ERROR("BUG: unknown core state encountered");
2635 exit(-1);
2636 }
2637
2638 return ERROR_OK;
2639 }
2640
2641 /* Extract address encoded into trace data.
2642 * Write result to address referenced by argument 'target', or 0 if incomplete. */
2643 static inline void xscale_branch_address(struct xscale_trace_data *trace_data,
2644 int i, uint32_t *target)
2645 {
2646 /* if there are less than four entries prior to the indirect branch message
2647 * we can't extract the address */
2648 if (i < 4)
2649 *target = 0;
2650 else {
2651 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2652 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2653 }
2654 }
2655
2656 static inline void xscale_display_instruction(struct target *target, uint32_t pc,
2657 struct arm_instruction *instruction,
2658 struct command_context *cmd_ctx)
2659 {
2660 int retval = xscale_read_instruction(target, pc, instruction);
2661 if (retval == ERROR_OK)
2662 command_print(cmd_ctx, "%s", instruction->text);
2663 else
2664 command_print(cmd_ctx, "0x%8.8" PRIx32 "\t<not found in image>", pc);
2665 }
2666
2667 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2668 {
2669 struct xscale_common *xscale = target_to_xscale(target);
2670 struct xscale_trace_data *trace_data = xscale->trace.data;
2671 int i, retval;
2672 uint32_t breakpoint_pc;
2673 struct arm_instruction instruction;
2674 uint32_t current_pc = 0;/* initialized when address determined */
2675
2676 if (!xscale->trace.image)
2677 LOG_WARNING("No trace image loaded; use 'xscale trace_image'");
2678
2679 /* loop for each trace buffer that was loaded from target */
2680 while (trace_data) {
2681 int chkpt = 0; /* incremented as checkpointed entries found */
2682 int j;
2683
2684 /* FIXME: set this to correct mode when trace buffer is first enabled */
2685 xscale->trace.core_state = ARM_STATE_ARM;
2686
2687 /* loop for each entry in this trace buffer */
2688 for (i = 0; i < trace_data->depth; i++) {
2689 int exception = 0;
2690 uint32_t chkpt_reg = 0x0;
2691 uint32_t branch_target = 0;
2692 int count;
2693
2694 /* trace entry type is upper nybble of 'message byte' */
2695 int trace_msg_type = (trace_data->entries[i].data & 0xf0) >> 4;
2696
2697 /* Target addresses of indirect branches are written into buffer
2698 * before the message byte representing the branch. Skip past it */
2699 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2700 continue;
2701
2702 switch (trace_msg_type) {
2703 case 0: /* Exceptions */
2704 case 1:
2705 case 2:
2706 case 3:
2707 case 4:
2708 case 5:
2709 case 6:
2710 case 7:
2711 exception = (trace_data->entries[i].data & 0x70) >> 4;
2712
2713 /* FIXME: vector table may be at ffff0000 */
2714 branch_target = (trace_data->entries[i].data & 0xf0) >> 2;
2715 break;
2716
2717 case 8: /* Direct Branch */
2718 break;
2719
2720 case 9: /* Indirect Branch */
2721 xscale_branch_address(trace_data, i, &branch_target);
2722 break;
2723
2724 case 13: /* Checkpointed Indirect Branch */
2725 xscale_branch_address(trace_data, i, &branch_target);
2726 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2727 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is
2728 *oldest */
2729 else
2730 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and
2731 *newest */
2732
2733 chkpt++;
2734 break;
2735
2736 case 12: /* Checkpointed Direct Branch */
2737 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2738 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is
2739 *oldest */
2740 else
2741 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and
2742 *newest */
2743
2744 /* if no current_pc, checkpoint will be starting point */
2745 if (current_pc == 0)
2746 branch_target = chkpt_reg;
2747
2748 chkpt++;
2749 break;
2750
2751 case 15:/* Roll-over */
2752 break;
2753
2754 default:/* Reserved */
2755 LOG_WARNING("trace is suspect: invalid trace message byte");
2756 continue;
2757
2758 }
2759
2760 /* If we don't have the current_pc yet, but we did get the branch target
2761 * (either from the trace buffer on indirect branch, or from a checkpoint reg),
2762 * then we can start displaying instructions at the next iteration, with
2763 * branch_target as the starting point.
2764 */
2765 if (current_pc == 0) {
2766 current_pc = branch_target; /* remains 0 unless branch_target *obtained */
2767 continue;
2768 }
2769
2770 /* We have current_pc. Read and display the instructions from the image.
2771 * First, display count instructions (lower nybble of message byte). */
2772 count = trace_data->entries[i].data & 0x0f;
2773 for (j = 0; j < count; j++) {
2774 xscale_display_instruction(target, current_pc, &instruction,
2775 cmd_ctx);
2776 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2777 }
2778
2779 /* An additional instruction is implicitly added to count for
2780 * rollover and some exceptions: undef, swi, prefetch abort. */
2781 if ((trace_msg_type == 15) || (exception > 0 && exception < 4)) {
2782 xscale_display_instruction(target, current_pc, &instruction,
2783 cmd_ctx);
2784 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2785 }
2786
2787 if (trace_msg_type == 15) /* rollover */
2788 continue;
2789
2790 if (exception) {
2791 command_print(cmd_ctx, "--- exception %i ---", exception);
2792 continue;
2793 }
2794
2795 /* not exception or rollover; next instruction is a branch and is
2796 * not included in the count */
2797 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2798
2799 /* for direct branches, extract branch destination from instruction */
2800 if ((trace_msg_type == 8) || (trace_msg_type == 12)) {
2801 retval = xscale_read_instruction(target, current_pc, &instruction);
2802 if (retval == ERROR_OK)
2803 current_pc = instruction.info.b_bl_bx_blx.target_address;
2804 else
2805 current_pc = 0; /* branch destination unknown */
2806
2807 /* direct branch w/ checkpoint; can also get from checkpoint reg */
2808 if (trace_msg_type == 12) {
2809 if (current_pc == 0)
2810 current_pc = chkpt_reg;
2811 else if (current_pc != chkpt_reg) /* sanity check */
2812 LOG_WARNING("trace is suspect: checkpoint register "
2813 "inconsistent with adddress from image");
2814 }
2815
2816 if (current_pc == 0)
2817 command_print(cmd_ctx, "address unknown");
2818
2819 continue;
2820 }
2821
2822 /* indirect branch; the branch destination was read from trace buffer */
2823 if ((trace_msg_type == 9) || (trace_msg_type == 13)) {
2824 current_pc = branch_target;
2825
2826 /* sanity check (checkpoint reg is redundant) */
2827 if ((trace_msg_type == 13) && (chkpt_reg != branch_target))
2828 LOG_WARNING("trace is suspect: checkpoint register "
2829 "inconsistent with address from trace buffer");
2830 }
2831
2832 } /* END: for (i = 0; i < trace_data->depth; i++) */
2833
2834 breakpoint_pc = trace_data->last_instruction; /* used below */
2835 trace_data = trace_data->next;
2836
2837 } /* END: while (trace_data) */
2838
2839 /* Finally... display all instructions up to the value of the pc when the
2840 * debug break occurred (saved when trace data was collected from target).
2841 * This is necessary because the trace only records execution branches and 16
2842 * consecutive instructions (rollovers), so last few typically missed.
2843 */
2844 if (current_pc == 0)
2845 return ERROR_OK;/* current_pc was never found */
2846
2847 /* how many instructions remaining? */
2848 int gap_count = (breakpoint_pc - current_pc) /
2849 (xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2);
2850
2851 /* should never be negative or over 16, but verify */
2852 if (gap_count < 0 || gap_count > 16) {
2853 LOG_WARNING("trace is suspect: excessive gap at end of trace");
2854 return ERROR_OK;/* bail; large number or negative value no good */
2855 }
2856
2857 /* display remaining instructions */
2858 for (i = 0; i < gap_count; i++) {
2859 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2860 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2861 }
2862
2863 return ERROR_OK;
2864 }
2865
2866 static const struct reg_arch_type xscale_reg_type = {
2867 .get = xscale_get_reg,
2868 .set = xscale_set_reg,
2869 };
2870
2871 static void xscale_build_reg_cache(struct target *target)
2872 {
2873 struct xscale_common *xscale = target_to_xscale(target);
2874 struct arm *arm = &xscale->arm;
2875 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2876 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2877 int i;
2878 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2879
2880 (*cache_p) = arm_build_reg_cache(target, arm);
2881
2882 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2883 cache_p = &(*cache_p)->next;
2884
2885 /* fill in values for the xscale reg cache */
2886 (*cache_p)->name = "XScale registers";
2887 (*cache_p)->next = NULL;
2888 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2889 (*cache_p)->num_regs = num_regs;
2890
2891 for (i = 0; i < num_regs; i++) {
2892 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2893 (*cache_p)->reg_list[i].value = calloc(4, 1);
2894 (*cache_p)->reg_list[i].dirty = 0;
2895 (*cache_p)->reg_list[i].valid = 0;
2896 (*cache_p)->reg_list[i].size = 32;
2897 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2898 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2899 arch_info[i] = xscale_reg_arch_info[i];
2900 arch_info[i].target = target;
2901 }
2902
2903 xscale->reg_cache = (*cache_p);
2904 }
2905
2906 static int xscale_init_target(struct command_context *cmd_ctx,
2907 struct target *target)
2908 {
2909 xscale_build_reg_cache(target);
2910 return ERROR_OK;
2911 }
2912
2913 static int xscale_init_arch_info(struct target *target,
2914 struct xscale_common *xscale, struct jtag_tap *tap)
2915 {
2916 struct arm *arm;
2917 uint32_t high_reset_branch, low_reset_branch;
2918 int i;
2919
2920 arm = &xscale->arm;
2921
2922 /* store architecture specfic data */
2923 xscale->common_magic = XSCALE_COMMON_MAGIC;
2924
2925 /* PXA3xx with 11 bit IR shifts the JTAG instructions */
2926 if (tap->ir_length == 11)
2927 xscale->xscale_variant = XSCALE_PXA3XX;
2928 else
2929 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
2930
2931 /* the debug handler isn't installed (and thus not running) at this time */
2932 xscale->handler_address = 0xfe000800;
2933
2934 /* clear the vectors we keep locally for reference */
2935 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2936 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2937
2938 /* no user-specified vectors have been configured yet */
2939 xscale->static_low_vectors_set = 0x0;
2940 xscale->static_high_vectors_set = 0x0;
2941
2942 /* calculate branches to debug handler */
2943 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2944 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2945
2946 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2947 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2948
2949 for (i = 1; i <= 7; i++) {
2950 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2951 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2952 }
2953
2954 /* 64kB aligned region used for DCache cleaning */
2955 xscale->cache_clean_address = 0xfffe0000;
2956
2957 xscale->hold_rst = 0;
2958 xscale->external_debug_break = 0;
2959
2960 xscale->ibcr_available = 2;
2961 xscale->ibcr0_used = 0;
2962 xscale->ibcr1_used = 0;
2963
2964 xscale->dbr_available = 2;
2965 xscale->dbr0_used = 0;
2966 xscale->dbr1_used = 0;
2967
2968 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
2969 target_name(target));
2970
2971 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2972 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2973
2974 xscale->vector_catch = 0x1;
2975
2976 xscale->trace.data = NULL;
2977 xscale->trace.image = NULL;
2978 xscale->trace.mode = XSCALE_TRACE_DISABLED;
2979 xscale->trace.buffer_fill = 0;
2980 xscale->trace.fill_counter = 0;
2981
2982 /* prepare ARMv4/5 specific information */
2983 arm->arch_info = xscale;
2984 arm->core_type = ARM_MODE_ANY;
2985 arm->read_core_reg = xscale_read_core_reg;
2986 arm->write_core_reg = xscale_write_core_reg;
2987 arm->full_context = xscale_full_context;
2988
2989 arm_init_arch_info(target, arm);
2990
2991 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
2992 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
2993 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
2994 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
2995 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
2996 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
2997 xscale->armv4_5_mmu.has_tiny_pages = 1;
2998 xscale->armv4_5_mmu.mmu_enabled = 0;
2999
3000 return ERROR_OK;
3001 }
3002
3003 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3004 {
3005 struct xscale_common *xscale;
3006
3007 if (sizeof xscale_debug_handler > 0x800) {
3008 LOG_ERROR("debug_handler.bin: larger than 2kb");
3009 return ERROR_FAIL;
3010 }
3011
3012 xscale = calloc(1, sizeof(*xscale));
3013 if (!xscale)
3014 return ERROR_FAIL;
3015
3016 return xscale_init_arch_info(target, xscale, target->tap);
3017 }
3018
3019 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3020 {
3021 struct target *target = NULL;
3022 struct xscale_common *xscale;
3023 int retval;
3024 uint32_t handler_address;
3025
3026 if (CMD_ARGC < 2)
3027 return ERROR_COMMAND_SYNTAX_ERROR;
3028
3029 target = get_target(CMD_ARGV[0]);
3030 if (target == NULL) {
3031 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3032 return ERROR_FAIL;
3033 }
3034
3035 xscale = target_to_xscale(target);
3036 retval = xscale_verify_pointer(CMD_CTX, xscale);
3037 if (retval != ERROR_OK)
3038 return retval;
3039
3040 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3041
3042 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3043 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3044 xscale->handler_address = handler_address;
3045 else {
3046 LOG_ERROR(
3047 "xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3048 return ERROR_FAIL;
3049 }
3050
3051 return ERROR_OK;
3052 }
3053
3054 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3055 {
3056 struct target *target = NULL;
3057 struct xscale_common *xscale;
3058 int retval;
3059 uint32_t cache_clean_address;
3060
3061 if (CMD_ARGC < 2)
3062 return ERROR_COMMAND_SYNTAX_ERROR;
3063
3064 target = get_target(CMD_ARGV[0]);
3065 if (target == NULL) {
3066 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3067 return ERROR_FAIL;
3068 }
3069 xscale = target_to_xscale(target);
3070 retval = xscale_verify_pointer(CMD_CTX, xscale);
3071 if (retval != ERROR_OK)
3072 return retval;
3073
3074 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3075
3076 if (cache_clean_address & 0xffff)
3077 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3078 else
3079 xscale->cache_clean_address = cache_clean_address;
3080
3081 return ERROR_OK;
3082 }
3083
3084 COMMAND_HANDLER(xscale_handle_cache_info_command)
3085 {
3086 struct target *target = get_current_target(CMD_CTX);
3087 struct xscale_common *xscale = target_to_xscale(target);
3088 int retval;
3089
3090 retval = xscale_verify_pointer(CMD_CTX, xscale);
3091 if (retval != ERROR_OK)
3092 return retval;
3093
3094 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3095 }
3096
3097 static int xscale_virt2phys(struct target *target,
3098 uint32_t virtual, uint32_t *physical)
3099 {
3100 struct xscale_common *xscale = target_to_xscale(target);
3101 uint32_t cb;
3102
3103 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3104 LOG_ERROR(xscale_not);
3105 return ERROR_TARGET_INVALID;
3106 }
3107
3108 uint32_t ret;
3109 int retval = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu,
3110 virtual, &cb, &ret);
3111 if (retval != ERROR_OK)
3112 return retval;
3113 *physical = ret;
3114 return ERROR_OK;
3115 }
3116
3117 static int xscale_mmu(struct target *target, int *enabled)
3118 {
3119 struct xscale_common *xscale = target_to_xscale(target);
3120
3121 if (target->state != TARGET_HALTED) {
3122 LOG_ERROR("Target not halted");
3123 return ERROR_TARGET_INVALID;
3124 }
3125 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3126 return ERROR_OK;
3127 }
3128
3129 COMMAND_HANDLER(xscale_handle_mmu_command)
3130 {
3131 struct target *target = get_current_target(CMD_CTX);
3132 struct xscale_common *xscale = target_to_xscale(target);
3133 int retval;
3134
3135 retval = xscale_verify_pointer(CMD_CTX, xscale);
3136 if (retval != ERROR_OK)
3137 return retval;
3138
3139 if (target->state != TARGET_HALTED) {
3140 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3141 return ERROR_OK;
3142 }
3143
3144 if (CMD_ARGC >= 1) {
3145 bool enable;
3146 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3147 if (enable)
3148 xscale_enable_mmu_caches(target, 1, 0, 0);
3149 else
3150 xscale_disable_mmu_caches(target, 1, 0, 0);
3151 xscale->armv4_5_mmu.mmu_enabled = enable;
3152 }
3153
3154 command_print(CMD_CTX, "mmu %s",
3155 (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3156
3157 return ERROR_OK;
3158 }
3159
3160 COMMAND_HANDLER(xscale_handle_idcache_command)
3161 {
3162 struct target *target = get_current_target(CMD_CTX);
3163 struct xscale_common *xscale = target_to_xscale(target);
3164
3165 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3166 if (retval != ERROR_OK)
3167 return retval;
3168
3169 if (target->state != TARGET_HALTED) {
3170 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3171 return ERROR_OK;
3172 }
3173
3174 bool icache = false;
3175 if (strcmp(CMD_NAME, "icache") == 0)
3176 icache = true;
3177 if (CMD_ARGC >= 1) {
3178 bool enable;
3179 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3180 if (icache) {
3181 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3182 if (enable)
3183 xscale_enable_mmu_caches(target, 0, 0, 1);
3184 else
3185 xscale_disable_mmu_caches(target, 0, 0, 1);
3186 } else {
3187 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3188 if (enable)
3189 xscale_enable_mmu_caches(target, 0, 1, 0);
3190 else
3191 xscale_disable_mmu_caches(target, 0, 1, 0);
3192 }
3193 }
3194
3195 bool enabled = icache ?
3196 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3197 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3198 const char *msg = enabled ? "enabled" : "disabled";
3199 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3200
3201 return ERROR_OK;
3202 }
3203
3204 static const struct {
3205 char name[15];
3206 unsigned mask;
3207 } vec_ids[] = {
3208 { "fiq", DCSR_TF, },
3209 { "irq", DCSR_TI, },
3210 { "dabt", DCSR_TD, },
3211 { "pabt", DCSR_TA, },
3212 { "swi", DCSR_TS, },
3213 { "undef", DCSR_TU, },
3214 { "reset", DCSR_TR, },
3215 };
3216
3217 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3218 {
3219 struct target *target = get_current_target(CMD_CTX);
3220 struct xscale_common *xscale = target_to_xscale(target);
3221 int retval;
3222 uint32_t dcsr_value;
3223 uint32_t catch = 0;
3224 struct reg *dcsr_reg = &xscale->reg_cache->reg_list[XSCALE_DCSR];
3225
3226 retval = xscale_verify_pointer(CMD_CTX, xscale);
3227 if (retval != ERROR_OK)
3228 return retval;
3229
3230 dcsr_value = buf_get_u32(dcsr_reg->value, 0, 32);
3231 if (CMD_ARGC > 0) {
3232 if (CMD_ARGC == 1) {
3233 if (strcmp(CMD_ARGV[0], "all") == 0) {
3234 catch = DCSR_TRAP_MASK;
3235 CMD_ARGC--;
3236 } else if (strcmp(CMD_ARGV[0], "none") == 0) {
3237 catch = 0;
3238 CMD_ARGC--;
3239 }
3240 }
3241 while (CMD_ARGC-- > 0) {
3242 unsigned i;
3243 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
3244 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name))
3245 continue;
3246 catch |= vec_ids[i].mask;
3247 break;
3248 }
3249 if (i == ARRAY_SIZE(vec_ids)) {
3250 LOG_ERROR("No vector '%s'", CMD_ARGV[CMD_ARGC]);
3251 return ERROR_COMMAND_SYNTAX_ERROR;
3252 }
3253 }
3254 buf_set_u32(dcsr_reg->value, 0, 32,
3255 (buf_get_u32(dcsr_reg->value, 0, 32) & ~DCSR_TRAP_MASK) | catch);
3256 xscale_write_dcsr(target, -1, -1);
3257 }
3258
3259 dcsr_value = buf_get_u32(dcsr_reg->value, 0, 32);
3260 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
3261 command_print(CMD_CTX, "%15s: %s", vec_ids[i].name,
3262 (dcsr_value & vec_ids[i].mask) ? "catch" : "ignore");
3263 }
3264
3265 return ERROR_OK;
3266 }
3267
3268
3269 COMMAND_HANDLER(xscale_handle_vector_table_command)
3270 {
3271 struct target *target = get_current_target(CMD_CTX);
3272 struct xscale_common *xscale = target_to_xscale(target);
3273 int err = 0;
3274 int retval;
3275
3276 retval = xscale_verify_pointer(CMD_CTX, xscale);
3277 if (retval != ERROR_OK)
3278 return retval;
3279
3280 if (CMD_ARGC == 0) { /* print current settings */
3281 int idx;
3282
3283 command_print(CMD_CTX, "active user-set static vectors:");
3284 for (idx = 1; idx < 8; idx++)
3285 if (xscale->static_low_vectors_set & (1 << idx))
3286 command_print(CMD_CTX,
3287 "low %d: 0x%" PRIx32,
3288 idx,
3289 xscale->static_low_vectors[idx]);
3290 for (idx = 1; idx < 8; idx++)
3291 if (xscale->static_high_vectors_set & (1 << idx))
3292 command_print(CMD_CTX,
3293 "high %d: 0x%" PRIx32,
3294 idx,
3295 xscale->static_high_vectors[idx]);
3296 return ERROR_OK;
3297 }
3298
3299 if (CMD_ARGC != 3)
3300 err = 1;
3301 else {
3302 int idx;
3303 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3304 uint32_t vec;
3305 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3306
3307 if (idx < 1 || idx >= 8)
3308 err = 1;
3309
3310 if (!err && strcmp(CMD_ARGV[0], "low") == 0) {
3311 xscale->static_low_vectors_set |= (1<<idx);
3312 xscale->static_low_vectors[idx] = vec;
3313 } else if (!err && (strcmp(CMD_ARGV[0], "high") == 0)) {
3314 xscale->static_high_vectors_set |= (1<<idx);
3315 xscale->static_high_vectors[idx] = vec;
3316 } else
3317 err = 1;
3318 }
3319
3320 if (err)
3321 return ERROR_COMMAND_SYNTAX_ERROR;
3322
3323 return ERROR_OK;
3324 }
3325
3326
3327 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3328 {
3329 struct target *target = get_current_target(CMD_CTX);
3330 struct xscale_common *xscale = target_to_xscale(target);
3331 uint32_t dcsr_value;
3332 int retval;
3333
3334 retval = xscale_verify_pointer(CMD_CTX, xscale);
3335 if (retval != ERROR_OK)
3336 return retval;
3337
3338 if (target->state != TARGET_HALTED) {
3339 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3340 return ERROR_OK;
3341 }
3342
3343 if (CMD_ARGC >= 1) {
3344 if (strcmp("enable", CMD_ARGV[0]) == 0)
3345 xscale->trace.mode = XSCALE_TRACE_WRAP; /* default */
3346 else if (strcmp("disable", CMD_ARGV[0]) == 0)
3347 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3348 else
3349 return ERROR_COMMAND_SYNTAX_ERROR;
3350 }
3351
3352 if (CMD_ARGC >= 2 && xscale->trace.mode != XSCALE_TRACE_DISABLED) {
3353 if (strcmp("fill", CMD_ARGV[1]) == 0) {
3354 int buffcount = 1; /* default */
3355 if (CMD_ARGC >= 3)
3356 COMMAND_PARSE_NUMBER(int, CMD_ARGV[2], buffcount);
3357 if (buffcount < 1) { /* invalid */
3358 command_print(CMD_CTX, "fill buffer count must be > 0");
3359 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3360 return ERROR_COMMAND_SYNTAX_ERROR;
3361 }
3362 xscale->trace.buffer_fill = buffcount;
3363 xscale->trace.mode = XSCALE_TRACE_FILL;
3364 } else if (strcmp("wrap", CMD_ARGV[1]) == 0)
3365 xscale->trace.mode = XSCALE_TRACE_WRAP;
3366 else {
3367 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3368 return ERROR_COMMAND_SYNTAX_ERROR;
3369 }
3370 }
3371
3372 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
3373 char fill_string[12];
3374 sprintf(fill_string, "fill %d", xscale->trace.buffer_fill);
3375 command_print(CMD_CTX, "trace buffer enabled (%s)",
3376 (xscale->trace.mode == XSCALE_TRACE_FILL)
3377 ? fill_string : "wrap");
3378 } else
3379 command_print(CMD_CTX, "trace buffer disabled");
3380
3381 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3382 if (xscale->trace.mode == XSCALE_TRACE_FILL)
3383 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3384 else
3385 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3386
3387 return ERROR_OK;
3388 }
3389
3390 COMMAND_HANDLER(xscale_handle_trace_image_command)
3391 {
3392 struct target *target = get_current_target(CMD_CTX);
3393 struct xscale_common *xscale = target_to_xscale(target);
3394 int retval;
3395
3396 if (CMD_ARGC < 1)
3397 return ERROR_COMMAND_SYNTAX_ERROR;
3398
3399 retval = xscale_verify_pointer(CMD_CTX, xscale);
3400 if (retval != ERROR_OK)
3401 return retval;
3402
3403 if (xscale->trace.image) {
3404 image_close(xscale->trace.image);
3405 free(xscale->trace.image);
3406 command_print(CMD_CTX, "previously loaded image found and closed");
3407 }
3408
3409 xscale->trace.image = malloc(sizeof(struct image));
3410 xscale->trace.image->base_address_set = 0;
3411 xscale->trace.image->start_address_set = 0;
3412
3413 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3414 if (CMD_ARGC >= 2) {
3415 xscale->trace.image->base_address_set = 1;
3416 COMMAND_PARSE_NUMBER(llong, CMD_ARGV[1], xscale->trace.image->base_address);
3417 } else
3418 xscale->trace.image->base_address_set = 0;
3419
3420 if (image_open(xscale->trace.image, CMD_ARGV[0],
3421 (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK) {
3422 free(xscale->trace.image);
3423 xscale->trace.image = NULL;
3424 return ERROR_OK;
3425 }
3426
3427 return ERROR_OK;
3428 }
3429
3430 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3431 {
3432 struct target *target = get_current_target(CMD_CTX);
3433 struct xscale_common *xscale = target_to_xscale(target);
3434 struct xscale_trace_data *trace_data;
3435 struct fileio *file;
3436 int retval;
3437
3438 retval = xscale_verify_pointer(CMD_CTX, xscale);
3439 if (retval != ERROR_OK)
3440 return retval;
3441
3442 if (target->state != TARGET_HALTED) {
3443 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3444 return ERROR_OK;
3445 }
3446
3447 if (CMD_ARGC < 1)
3448 return ERROR_COMMAND_SYNTAX_ERROR;
3449
3450 trace_data = xscale->trace.data;
3451
3452 if (!trace_data) {
3453 command_print(CMD_CTX, "no trace data collected");
3454 return ERROR_OK;
3455 }
3456
3457 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3458 return ERROR_OK;
3459
3460 while (trace_data) {
3461 int i;
3462
3463 fileio_write_u32(file, trace_data->chkpt0);
3464 fileio_write_u32(file, trace_data->chkpt1);
3465 fileio_write_u32(file, trace_data->last_instruction);
3466 fileio_write_u32(file, trace_data->depth);
3467
3468 for (i = 0; i < trace_data->depth; i++)
3469 fileio_write_u32(file, trace_data->entries[i].data |
3470 ((trace_data->entries[i].type & 0xffff) << 16));
3471
3472 trace_data = trace_data->next;
3473 }
3474
3475 fileio_close(file);
3476
3477 return ERROR_OK;
3478 }
3479
3480 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3481 {
3482 struct target *target = get_current_target(CMD_CTX);
3483 struct xscale_common *xscale = target_to_xscale(target);
3484 int retval;
3485
3486 retval = xscale_verify_pointer(CMD_CTX, xscale);
3487 if (retval != ERROR_OK)
3488 return retval;
3489
3490 xscale_analyze_trace(target, CMD_CTX);
3491
3492 return ERROR_OK;
3493 }
3494
3495 COMMAND_HANDLER(xscale_handle_cp15)
3496 {
3497 struct target *target = get_current_target(CMD_CTX);
3498 struct xscale_common *xscale = target_to_xscale(target);
3499 int retval;
3500
3501 retval = xscale_verify_pointer(CMD_CTX, xscale);
3502 if (retval != ERROR_OK)
3503 return retval;
3504
3505 if (target->state != TARGET_HALTED) {
3506 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3507 return ERROR_OK;
3508 }
3509 uint32_t reg_no = 0;
3510 struct reg *reg = NULL;
3511 if (CMD_ARGC > 0) {
3512 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3513 /*translate from xscale cp15 register no to openocd register*/
3514 switch (reg_no) {
3515 case 0:
3516 reg_no = XSCALE_MAINID;
3517 break;
3518 case 1:
3519 reg_no = XSCALE_CTRL;
3520 break;
3521 case 2:
3522 reg_no = XSCALE_TTB;
3523 break;
3524 case 3:
3525 reg_no = XSCALE_DAC;
3526 break;
3527 case 5:
3528 reg_no = XSCALE_FSR;
3529 break;
3530 case 6:
3531 reg_no = XSCALE_FAR;
3532 break;
3533 case 13:
3534 reg_no = XSCALE_PID;
3535 break;
3536 case 15:
3537 reg_no = XSCALE_CPACCESS;
3538 break;
3539 default:
3540 command_print(CMD_CTX, "invalid register number");
3541 return ERROR_COMMAND_SYNTAX_ERROR;
3542 }
3543 reg = &xscale->reg_cache->reg_list[reg_no];
3544
3545 }
3546 if (CMD_ARGC == 1) {
3547 uint32_t value;
3548
3549 /* read cp15 control register */
3550 xscale_get_reg(reg);
3551 value = buf_get_u32(reg->value, 0, 32);
3552 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size),
3553 value);
3554 } else if (CMD_ARGC == 2) {
3555 uint32_t value;
3556 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3557
3558 /* send CP write request (command 0x41) */
3559 xscale_send_u32(target, 0x41);
3560
3561 /* send CP register number */
3562 xscale_send_u32(target, reg_no);
3563
3564 /* send CP register value */
3565 xscale_send_u32(target, value);
3566
3567 /* execute cpwait to ensure outstanding operations complete */
3568 xscale_send_u32(target, 0x53);
3569 } else
3570 return ERROR_COMMAND_SYNTAX_ERROR;
3571
3572 return ERROR_OK;
3573 }
3574
3575 static const struct command_registration xscale_exec_command_handlers[] = {
3576 {
3577 .name = "cache_info",
3578 .handler = xscale_handle_cache_info_command,
3579 .mode = COMMAND_EXEC,
3580 .help = "display information about CPU caches",
3581 },
3582 {
3583 .name = "mmu",
3584 .handler = xscale_handle_mmu_command,
3585 .mode = COMMAND_EXEC,
3586 .help = "enable or disable the MMU",
3587 .usage = "['enable'|'disable']",
3588 },
3589 {
3590 .name = "icache",
3591 .handler = xscale_handle_idcache_command,
3592 .mode = COMMAND_EXEC,
3593 .help = "display ICache state, optionally enabling or "
3594 "disabling it",
3595 .usage = "['enable'|'disable']",
3596 },
3597 {
3598 .name = "dcache",
3599 .handler = xscale_handle_idcache_command,
3600 .mode = COMMAND_EXEC,
3601 .help = "display DCache state, optionally enabling or "
3602 "disabling it",
3603 .usage = "['enable'|'disable']",
3604 },
3605 {
3606 .name = "vector_catch",
3607 .handler = xscale_handle_vector_catch_command,
3608 .mode = COMMAND_EXEC,
3609 .help = "set or display mask of vectors "
3610 "that should trigger debug entry",
3611 .usage = "['all'|'none'|'fiq'|'irq'|'dabt'|'pabt'|'swi'|'undef'|'reset']",
3612 },
3613 {
3614 .name = "vector_table",
3615 .handler = xscale_handle_vector_table_command,
3616 .mode = COMMAND_EXEC,
3617 .help = "set vector table entry in mini-ICache, "
3618 "or display current tables",
3619 .usage = "[('high'|'low') index code]",
3620 },
3621 {
3622 .name = "trace_buffer",
3623 .handler = xscale_handle_trace_buffer_command,
3624 .mode = COMMAND_EXEC,
3625 .help = "display trace buffer status, enable or disable "
3626 "tracing, and optionally reconfigure trace mode",
3627 .usage = "['enable'|'disable' ['fill' [number]|'wrap']]",
3628 },
3629 {
3630 .name = "dump_trace",
3631 .handler = xscale_handle_dump_trace_command,
3632 .mode = COMMAND_EXEC,
3633 .help = "dump content of trace buffer to file",
3634 .usage = "filename",
3635 },
3636 {
3637 .name = "analyze_trace",
3638 .handler = xscale_handle_analyze_trace_buffer_command,
3639 .mode = COMMAND_EXEC,
3640 .help = "analyze content of trace buffer",
3641 .usage = "",
3642 },
3643 {
3644 .name = "trace_image",
3645 .handler = xscale_handle_trace_image_command,
3646 .mode = COMMAND_EXEC,
3647 .help = "load image from file to address (default 0)",
3648 .usage = "filename [offset [filetype]]",
3649 },
3650 {
3651 .name = "cp15",
3652 .handler = xscale_handle_cp15,
3653 .mode = COMMAND_EXEC,
3654 .help = "Read or write coprocessor 15 register.",
3655 .usage = "register [value]",
3656 },
3657 COMMAND_REGISTRATION_DONE
3658 };
3659 static const struct command_registration xscale_any_command_handlers[] = {
3660 {
3661 .name = "debug_handler",
3662 .handler = xscale_handle_debug_handler_command,
3663 .mode = COMMAND_ANY,
3664 .help = "Change address used for debug handler.",
3665 .usage = "<target> <address>",
3666 },
3667 {
3668 .name = "cache_clean_address",
3669 .handler = xscale_handle_cache_clean_address_command,
3670 .mode = COMMAND_ANY,
3671 .help = "Change address used for cleaning data cache.",
3672 .usage = "address",
3673 },
3674 {
3675 .chain = xscale_exec_command_handlers,
3676 },
3677 COMMAND_REGISTRATION_DONE
3678 };
3679 static const struct command_registration xscale_command_handlers[] = {
3680 {
3681 .chain = arm_command_handlers,
3682 },
3683 {
3684 .name = "xscale",
3685 .mode = COMMAND_ANY,
3686 .help = "xscale command group",
3687 .usage = "",
3688 .chain = xscale_any_command_handlers,
3689 },
3690 COMMAND_REGISTRATION_DONE
3691 };
3692
3693 struct target_type xscale_target = {
3694 .name = "xscale",
3695
3696 .poll = xscale_poll,
3697 .arch_state = xscale_arch_state,
3698
3699 .halt = xscale_halt,
3700 .resume = xscale_resume,
3701 .step = xscale_step,
3702
3703 .assert_reset = xscale_assert_reset,
3704 .deassert_reset = xscale_deassert_reset,
3705
3706 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3707 .get_gdb_reg_list = arm_get_gdb_reg_list,
3708
3709 .read_memory = xscale_read_memory,
3710 .read_phys_memory = xscale_read_phys_memory,
3711 .write_memory = xscale_write_memory,
3712 .write_phys_memory = xscale_write_phys_memory,
3713
3714 .checksum_memory = arm_checksum_memory,
3715 .blank_check_memory = arm_blank_check_memory,
3716
3717 .run_algorithm = armv4_5_run_algorithm,
3718
3719 .add_breakpoint = xscale_add_breakpoint,
3720 .remove_breakpoint = xscale_remove_breakpoint,
3721 .add_watchpoint = xscale_add_watchpoint,
3722 .remove_watchpoint = xscale_remove_watchpoint,
3723
3724 .commands = xscale_command_handlers,
3725 .target_create = xscale_target_create,
3726 .init_target = xscale_init_target,
3727
3728 .virt2phys = xscale_virt2phys,
3729 .mmu = xscale_mmu
3730 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)