target: Add 64-bit target address support
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
23 ***************************************************************************/
24
25 #ifdef HAVE_CONFIG_H
26 #include "config.h"
27 #endif
28
29 #include "breakpoints.h"
30 #include "xscale.h"
31 #include "target_type.h"
32 #include "arm_jtag.h"
33 #include "arm_simulator.h"
34 #include "arm_disassembler.h"
35 #include <helper/time_support.h>
36 #include "register.h"
37 #include "image.h"
38 #include "arm_opcodes.h"
39 #include "armv4_5.h"
40
41 /*
42 * Important XScale documents available as of October 2009 include:
43 *
44 * Intel XScale® Core Developer’s Manual, January 2004
45 * Order Number: 273473-002
46 * This has a chapter detailing debug facilities, and punts some
47 * details to chip-specific microarchitecture documents.
48 *
49 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
50 * Document Number: 273539-005
51 * Less detailed than the developer's manual, but summarizes those
52 * missing details (for most XScales) and gives LOTS of notes about
53 * debugger/handler interaction issues. Presents a simpler reset
54 * and load-handler sequence than the arch doc. (Note, OpenOCD
55 * doesn't currently support "Hot-Debug" as defined there.)
56 *
57 * Chip-specific microarchitecture documents may also be useful.
58 */
59
60 /* forward declarations */
61 static int xscale_resume(struct target *, int current,
62 target_addr_t address, int handle_breakpoints, int debug_execution);
63 static int xscale_debug_entry(struct target *);
64 static int xscale_restore_banked(struct target *);
65 static int xscale_get_reg(struct reg *reg);
66 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
67 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
68 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
69 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
70 static int xscale_read_trace(struct target *);
71
72 /* This XScale "debug handler" is loaded into the processor's
73 * mini-ICache, which is 2K of code writable only via JTAG.
74 */
75 static const uint8_t xscale_debug_handler[] = {
76 #include "../../contrib/loaders/debug/xscale/debug_handler.inc"
77 };
78
79 static const char *const xscale_reg_list[] = {
80 "XSCALE_MAINID", /* 0 */
81 "XSCALE_CACHETYPE",
82 "XSCALE_CTRL",
83 "XSCALE_AUXCTRL",
84 "XSCALE_TTB",
85 "XSCALE_DAC",
86 "XSCALE_FSR",
87 "XSCALE_FAR",
88 "XSCALE_PID",
89 "XSCALE_CPACCESS",
90 "XSCALE_IBCR0", /* 10 */
91 "XSCALE_IBCR1",
92 "XSCALE_DBR0",
93 "XSCALE_DBR1",
94 "XSCALE_DBCON",
95 "XSCALE_TBREG",
96 "XSCALE_CHKPT0",
97 "XSCALE_CHKPT1",
98 "XSCALE_DCSR",
99 "XSCALE_TX",
100 "XSCALE_RX", /* 20 */
101 "XSCALE_TXRXCTRL",
102 };
103
104 static const struct xscale_reg xscale_reg_arch_info[] = {
105 {XSCALE_MAINID, NULL},
106 {XSCALE_CACHETYPE, NULL},
107 {XSCALE_CTRL, NULL},
108 {XSCALE_AUXCTRL, NULL},
109 {XSCALE_TTB, NULL},
110 {XSCALE_DAC, NULL},
111 {XSCALE_FSR, NULL},
112 {XSCALE_FAR, NULL},
113 {XSCALE_PID, NULL},
114 {XSCALE_CPACCESS, NULL},
115 {XSCALE_IBCR0, NULL},
116 {XSCALE_IBCR1, NULL},
117 {XSCALE_DBR0, NULL},
118 {XSCALE_DBR1, NULL},
119 {XSCALE_DBCON, NULL},
120 {XSCALE_TBREG, NULL},
121 {XSCALE_CHKPT0, NULL},
122 {XSCALE_CHKPT1, NULL},
123 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
124 {-1, NULL}, /* TX accessed via JTAG */
125 {-1, NULL}, /* RX accessed via JTAG */
126 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
127 };
128
129 /* convenience wrapper to access XScale specific registers */
130 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
131 {
132 uint8_t buf[4];
133
134 buf_set_u32(buf, 0, 32, value);
135
136 return xscale_set_reg(reg, buf);
137 }
138
139 static const char xscale_not[] = "target is not an XScale";
140
141 static int xscale_verify_pointer(struct command_context *cmd_ctx,
142 struct xscale_common *xscale)
143 {
144 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
145 command_print(cmd_ctx, xscale_not);
146 return ERROR_TARGET_INVALID;
147 }
148 return ERROR_OK;
149 }
150
151 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr, tap_state_t end_state)
152 {
153 assert(tap != NULL);
154
155 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr) {
156 struct scan_field field;
157 uint8_t scratch[4];
158
159 memset(&field, 0, sizeof field);
160 field.num_bits = tap->ir_length;
161 field.out_value = scratch;
162 buf_set_u32(scratch, 0, field.num_bits, new_instr);
163
164 jtag_add_ir_scan(tap, &field, end_state);
165 }
166
167 return ERROR_OK;
168 }
169
170 static int xscale_read_dcsr(struct target *target)
171 {
172 struct xscale_common *xscale = target_to_xscale(target);
173 int retval;
174 struct scan_field fields[3];
175 uint8_t field0 = 0x0;
176 uint8_t field0_check_value = 0x2;
177 uint8_t field0_check_mask = 0x7;
178 uint8_t field2 = 0x0;
179 uint8_t field2_check_value = 0x0;
180 uint8_t field2_check_mask = 0x1;
181
182 xscale_jtag_set_instr(target->tap,
183 XSCALE_SELDCSR << xscale->xscale_variant,
184 TAP_DRPAUSE);
185
186 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
187 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
188
189 memset(&fields, 0, sizeof fields);
190
191 fields[0].num_bits = 3;
192 fields[0].out_value = &field0;
193 uint8_t tmp;
194 fields[0].in_value = &tmp;
195
196 fields[1].num_bits = 32;
197 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
198
199 fields[2].num_bits = 1;
200 fields[2].out_value = &field2;
201 uint8_t tmp2;
202 fields[2].in_value = &tmp2;
203
204 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
205
206 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
207 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
208
209 retval = jtag_execute_queue();
210 if (retval != ERROR_OK) {
211 LOG_ERROR("JTAG error while reading DCSR");
212 return retval;
213 }
214
215 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
216 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
217
218 /* write the register with the value we just read
219 * on this second pass, only the first bit of field0 is guaranteed to be 0)
220 */
221 field0_check_mask = 0x1;
222 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
223 fields[1].in_value = NULL;
224
225 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
226
227 /* DANGER!!! this must be here. It will make sure that the arguments
228 * to jtag_set_check_value() does not go out of scope! */
229 return jtag_execute_queue();
230 }
231
232
233 static void xscale_getbuf(jtag_callback_data_t arg)
234 {
235 uint8_t *in = (uint8_t *)arg;
236 *((uint32_t *)arg) = buf_get_u32(in, 0, 32);
237 }
238
239 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
240 {
241 if (num_words == 0)
242 return ERROR_COMMAND_SYNTAX_ERROR;
243
244 struct xscale_common *xscale = target_to_xscale(target);
245 int retval = ERROR_OK;
246 tap_state_t path[3];
247 struct scan_field fields[3];
248 uint8_t *field0 = malloc(num_words * 1);
249 uint8_t field0_check_value = 0x2;
250 uint8_t field0_check_mask = 0x6;
251 uint32_t *field1 = malloc(num_words * 4);
252 uint8_t field2_check_value = 0x0;
253 uint8_t field2_check_mask = 0x1;
254 int words_done = 0;
255 int words_scheduled = 0;
256 int i;
257
258 path[0] = TAP_DRSELECT;
259 path[1] = TAP_DRCAPTURE;
260 path[2] = TAP_DRSHIFT;
261
262 memset(&fields, 0, sizeof fields);
263
264 fields[0].num_bits = 3;
265 uint8_t tmp;
266 fields[0].in_value = &tmp;
267 fields[0].check_value = &field0_check_value;
268 fields[0].check_mask = &field0_check_mask;
269
270 fields[1].num_bits = 32;
271
272 fields[2].num_bits = 1;
273 uint8_t tmp2;
274 fields[2].in_value = &tmp2;
275 fields[2].check_value = &field2_check_value;
276 fields[2].check_mask = &field2_check_mask;
277
278 xscale_jtag_set_instr(target->tap,
279 XSCALE_DBGTX << xscale->xscale_variant,
280 TAP_IDLE);
281 jtag_add_runtest(1, TAP_IDLE); /* ensures that we're in the TAP_IDLE state as the above
282 *could be a no-op */
283
284 /* repeat until all words have been collected */
285 int attempts = 0;
286 while (words_done < num_words) {
287 /* schedule reads */
288 words_scheduled = 0;
289 for (i = words_done; i < num_words; i++) {
290 fields[0].in_value = &field0[i];
291
292 jtag_add_pathmove(3, path);
293
294 fields[1].in_value = (uint8_t *)(field1 + i);
295
296 jtag_add_dr_scan_check(target->tap, 3, fields, TAP_IDLE);
297
298 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
299
300 words_scheduled++;
301 }
302
303 retval = jtag_execute_queue();
304 if (retval != ERROR_OK) {
305 LOG_ERROR("JTAG error while receiving data from debug handler");
306 break;
307 }
308
309 /* examine results */
310 for (i = words_done; i < num_words; i++) {
311 if (!(field0[i] & 1)) {
312 /* move backwards if necessary */
313 int j;
314 for (j = i; j < num_words - 1; j++) {
315 field0[j] = field0[j + 1];
316 field1[j] = field1[j + 1];
317 }
318 words_scheduled--;
319 }
320 }
321 if (words_scheduled == 0) {
322 if (attempts++ == 1000) {
323 LOG_ERROR(
324 "Failed to receiving data from debug handler after 1000 attempts");
325 retval = ERROR_TARGET_TIMEOUT;
326 break;
327 }
328 }
329
330 words_done += words_scheduled;
331 }
332
333 for (i = 0; i < num_words; i++)
334 *(buffer++) = buf_get_u32((uint8_t *)&field1[i], 0, 32);
335
336 free(field1);
337
338 return retval;
339 }
340
341 static int xscale_read_tx(struct target *target, int consume)
342 {
343 struct xscale_common *xscale = target_to_xscale(target);
344 tap_state_t path[3];
345 tap_state_t noconsume_path[6];
346 int retval;
347 struct timeval timeout, now;
348 struct scan_field fields[3];
349 uint8_t field0_in = 0x0;
350 uint8_t field0_check_value = 0x2;
351 uint8_t field0_check_mask = 0x6;
352 uint8_t field2_check_value = 0x0;
353 uint8_t field2_check_mask = 0x1;
354
355 xscale_jtag_set_instr(target->tap,
356 XSCALE_DBGTX << xscale->xscale_variant,
357 TAP_IDLE);
358
359 path[0] = TAP_DRSELECT;
360 path[1] = TAP_DRCAPTURE;
361 path[2] = TAP_DRSHIFT;
362
363 noconsume_path[0] = TAP_DRSELECT;
364 noconsume_path[1] = TAP_DRCAPTURE;
365 noconsume_path[2] = TAP_DREXIT1;
366 noconsume_path[3] = TAP_DRPAUSE;
367 noconsume_path[4] = TAP_DREXIT2;
368 noconsume_path[5] = TAP_DRSHIFT;
369
370 memset(&fields, 0, sizeof fields);
371
372 fields[0].num_bits = 3;
373 fields[0].in_value = &field0_in;
374
375 fields[1].num_bits = 32;
376 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
377
378 fields[2].num_bits = 1;
379 uint8_t tmp;
380 fields[2].in_value = &tmp;
381
382 gettimeofday(&timeout, NULL);
383 timeval_add_time(&timeout, 1, 0);
384
385 for (;; ) {
386 /* if we want to consume the register content (i.e. clear TX_READY),
387 * we have to go straight from Capture-DR to Shift-DR
388 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
389 */
390 if (consume)
391 jtag_add_pathmove(3, path);
392 else
393 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
394
395 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
396
397 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
398 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
399
400 retval = jtag_execute_queue();
401 if (retval != ERROR_OK) {
402 LOG_ERROR("JTAG error while reading TX");
403 return ERROR_TARGET_TIMEOUT;
404 }
405
406 gettimeofday(&now, NULL);
407 if ((now.tv_sec > timeout.tv_sec) ||
408 ((now.tv_sec == timeout.tv_sec) && (now.tv_usec > timeout.tv_usec))) {
409 LOG_ERROR("time out reading TX register");
410 return ERROR_TARGET_TIMEOUT;
411 }
412 if (!((!(field0_in & 1)) && consume))
413 goto done;
414 if (debug_level >= 3) {
415 LOG_DEBUG("waiting 100ms");
416 alive_sleep(100); /* avoid flooding the logs */
417 } else
418 keep_alive();
419 }
420 done:
421
422 if (!(field0_in & 1))
423 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
424
425 return ERROR_OK;
426 }
427
428 static int xscale_write_rx(struct target *target)
429 {
430 struct xscale_common *xscale = target_to_xscale(target);
431 int retval;
432 struct timeval timeout, now;
433 struct scan_field fields[3];
434 uint8_t field0_out = 0x0;
435 uint8_t field0_in = 0x0;
436 uint8_t field0_check_value = 0x2;
437 uint8_t field0_check_mask = 0x6;
438 uint8_t field2 = 0x0;
439 uint8_t field2_check_value = 0x0;
440 uint8_t field2_check_mask = 0x1;
441
442 xscale_jtag_set_instr(target->tap,
443 XSCALE_DBGRX << xscale->xscale_variant,
444 TAP_IDLE);
445
446 memset(&fields, 0, sizeof fields);
447
448 fields[0].num_bits = 3;
449 fields[0].out_value = &field0_out;
450 fields[0].in_value = &field0_in;
451
452 fields[1].num_bits = 32;
453 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
454
455 fields[2].num_bits = 1;
456 fields[2].out_value = &field2;
457 uint8_t tmp;
458 fields[2].in_value = &tmp;
459
460 gettimeofday(&timeout, NULL);
461 timeval_add_time(&timeout, 1, 0);
462
463 /* poll until rx_read is low */
464 LOG_DEBUG("polling RX");
465 for (;;) {
466 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
467
468 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
469 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
470
471 retval = jtag_execute_queue();
472 if (retval != ERROR_OK) {
473 LOG_ERROR("JTAG error while writing RX");
474 return retval;
475 }
476
477 gettimeofday(&now, NULL);
478 if ((now.tv_sec > timeout.tv_sec) ||
479 ((now.tv_sec == timeout.tv_sec) && (now.tv_usec > timeout.tv_usec))) {
480 LOG_ERROR("time out writing RX register");
481 return ERROR_TARGET_TIMEOUT;
482 }
483 if (!(field0_in & 1))
484 goto done;
485 if (debug_level >= 3) {
486 LOG_DEBUG("waiting 100ms");
487 alive_sleep(100); /* avoid flooding the logs */
488 } else
489 keep_alive();
490 }
491 done:
492
493 /* set rx_valid */
494 field2 = 0x1;
495 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
496
497 retval = jtag_execute_queue();
498 if (retval != ERROR_OK) {
499 LOG_ERROR("JTAG error while writing RX");
500 return retval;
501 }
502
503 return ERROR_OK;
504 }
505
506 /* send count elements of size byte to the debug handler */
507 static int xscale_send(struct target *target, const uint8_t *buffer, int count, int size)
508 {
509 struct xscale_common *xscale = target_to_xscale(target);
510 int retval;
511 int done_count = 0;
512
513 xscale_jtag_set_instr(target->tap,
514 XSCALE_DBGRX << xscale->xscale_variant,
515 TAP_IDLE);
516
517 static const uint8_t t0;
518 uint8_t t1[4];
519 static const uint8_t t2 = 1;
520 struct scan_field fields[3] = {
521 { .num_bits = 3, .out_value = &t0 },
522 { .num_bits = 32, .out_value = t1 },
523 { .num_bits = 1, .out_value = &t2 },
524 };
525
526 int endianness = target->endianness;
527 while (done_count++ < count) {
528 uint32_t t;
529
530 switch (size) {
531 case 4:
532 if (endianness == TARGET_LITTLE_ENDIAN)
533 t = le_to_h_u32(buffer);
534 else
535 t = be_to_h_u32(buffer);
536 break;
537 case 2:
538 if (endianness == TARGET_LITTLE_ENDIAN)
539 t = le_to_h_u16(buffer);
540 else
541 t = be_to_h_u16(buffer);
542 break;
543 case 1:
544 t = buffer[0];
545 break;
546 default:
547 LOG_ERROR("BUG: size neither 4, 2 nor 1");
548 return ERROR_COMMAND_SYNTAX_ERROR;
549 }
550
551 buf_set_u32(t1, 0, 32, t);
552
553 jtag_add_dr_scan(target->tap,
554 3,
555 fields,
556 TAP_IDLE);
557 buffer += size;
558 }
559
560 retval = jtag_execute_queue();
561 if (retval != ERROR_OK) {
562 LOG_ERROR("JTAG error while sending data to debug handler");
563 return retval;
564 }
565
566 return ERROR_OK;
567 }
568
569 static int xscale_send_u32(struct target *target, uint32_t value)
570 {
571 struct xscale_common *xscale = target_to_xscale(target);
572
573 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
574 return xscale_write_rx(target);
575 }
576
577 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
578 {
579 struct xscale_common *xscale = target_to_xscale(target);
580 int retval;
581 struct scan_field fields[3];
582 uint8_t field0 = 0x0;
583 uint8_t field0_check_value = 0x2;
584 uint8_t field0_check_mask = 0x7;
585 uint8_t field2 = 0x0;
586 uint8_t field2_check_value = 0x0;
587 uint8_t field2_check_mask = 0x1;
588
589 if (hold_rst != -1)
590 xscale->hold_rst = hold_rst;
591
592 if (ext_dbg_brk != -1)
593 xscale->external_debug_break = ext_dbg_brk;
594
595 xscale_jtag_set_instr(target->tap,
596 XSCALE_SELDCSR << xscale->xscale_variant,
597 TAP_IDLE);
598
599 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
600 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
601
602 memset(&fields, 0, sizeof fields);
603
604 fields[0].num_bits = 3;
605 fields[0].out_value = &field0;
606 uint8_t tmp;
607 fields[0].in_value = &tmp;
608
609 fields[1].num_bits = 32;
610 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
611
612 fields[2].num_bits = 1;
613 fields[2].out_value = &field2;
614 uint8_t tmp2;
615 fields[2].in_value = &tmp2;
616
617 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
618
619 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
620 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
621
622 retval = jtag_execute_queue();
623 if (retval != ERROR_OK) {
624 LOG_ERROR("JTAG error while writing DCSR");
625 return retval;
626 }
627
628 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
629 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
630
631 return ERROR_OK;
632 }
633
634 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
635 static unsigned int parity(unsigned int v)
636 {
637 /* unsigned int ov = v; */
638 v ^= v >> 16;
639 v ^= v >> 8;
640 v ^= v >> 4;
641 v &= 0xf;
642 /* LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1); */
643 return (0x6996 >> v) & 1;
644 }
645
646 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
647 {
648 struct xscale_common *xscale = target_to_xscale(target);
649 uint8_t packet[4];
650 uint8_t cmd;
651 int word;
652 struct scan_field fields[2];
653
654 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
655
656 /* LDIC into IR */
657 xscale_jtag_set_instr(target->tap,
658 XSCALE_LDIC << xscale->xscale_variant,
659 TAP_IDLE);
660
661 /* CMD is b011 to load a cacheline into the Mini ICache.
662 * Loading into the main ICache is deprecated, and unused.
663 * It's followed by three zero bits, and 27 address bits.
664 */
665 buf_set_u32(&cmd, 0, 6, 0x3);
666
667 /* virtual address of desired cache line */
668 buf_set_u32(packet, 0, 27, va >> 5);
669
670 memset(&fields, 0, sizeof fields);
671
672 fields[0].num_bits = 6;
673 fields[0].out_value = &cmd;
674
675 fields[1].num_bits = 27;
676 fields[1].out_value = packet;
677
678 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
679
680 /* rest of packet is a cacheline: 8 instructions, with parity */
681 fields[0].num_bits = 32;
682 fields[0].out_value = packet;
683
684 fields[1].num_bits = 1;
685 fields[1].out_value = &cmd;
686
687 for (word = 0; word < 8; word++) {
688 buf_set_u32(packet, 0, 32, buffer[word]);
689
690 uint32_t value;
691 memcpy(&value, packet, sizeof(uint32_t));
692 cmd = parity(value);
693
694 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
695 }
696
697 return jtag_execute_queue();
698 }
699
700 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
701 {
702 struct xscale_common *xscale = target_to_xscale(target);
703 uint8_t packet[4];
704 uint8_t cmd;
705 struct scan_field fields[2];
706
707 xscale_jtag_set_instr(target->tap,
708 XSCALE_LDIC << xscale->xscale_variant,
709 TAP_IDLE);
710
711 /* CMD for invalidate IC line b000, bits [6:4] b000 */
712 buf_set_u32(&cmd, 0, 6, 0x0);
713
714 /* virtual address of desired cache line */
715 buf_set_u32(packet, 0, 27, va >> 5);
716
717 memset(&fields, 0, sizeof fields);
718
719 fields[0].num_bits = 6;
720 fields[0].out_value = &cmd;
721
722 fields[1].num_bits = 27;
723 fields[1].out_value = packet;
724
725 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
726
727 return ERROR_OK;
728 }
729
730 static int xscale_update_vectors(struct target *target)
731 {
732 struct xscale_common *xscale = target_to_xscale(target);
733 int i;
734 int retval;
735
736 uint32_t low_reset_branch, high_reset_branch;
737
738 for (i = 1; i < 8; i++) {
739 /* if there's a static vector specified for this exception, override */
740 if (xscale->static_high_vectors_set & (1 << i))
741 xscale->high_vectors[i] = xscale->static_high_vectors[i];
742 else {
743 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
744 if (retval == ERROR_TARGET_TIMEOUT)
745 return retval;
746 if (retval != ERROR_OK) {
747 /* Some of these reads will fail as part of normal execution */
748 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
749 }
750 }
751 }
752
753 for (i = 1; i < 8; i++) {
754 if (xscale->static_low_vectors_set & (1 << i))
755 xscale->low_vectors[i] = xscale->static_low_vectors[i];
756 else {
757 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
758 if (retval == ERROR_TARGET_TIMEOUT)
759 return retval;
760 if (retval != ERROR_OK) {
761 /* Some of these reads will fail as part of normal execution */
762 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
763 }
764 }
765 }
766
767 /* calculate branches to debug handler */
768 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
769 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
770
771 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
772 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
773
774 /* invalidate and load exception vectors in mini i-cache */
775 xscale_invalidate_ic_line(target, 0x0);
776 xscale_invalidate_ic_line(target, 0xffff0000);
777
778 xscale_load_ic(target, 0x0, xscale->low_vectors);
779 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
780
781 return ERROR_OK;
782 }
783
784 static int xscale_arch_state(struct target *target)
785 {
786 struct xscale_common *xscale = target_to_xscale(target);
787 struct arm *arm = &xscale->arm;
788
789 static const char *state[] = {
790 "disabled", "enabled"
791 };
792
793 static const char *arch_dbg_reason[] = {
794 "", "\n(processor reset)", "\n(trace buffer full)"
795 };
796
797 if (arm->common_magic != ARM_COMMON_MAGIC) {
798 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
799 return ERROR_COMMAND_SYNTAX_ERROR;
800 }
801
802 arm_arch_state(target);
803 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
804 state[xscale->armv4_5_mmu.mmu_enabled],
805 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
806 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
807 arch_dbg_reason[xscale->arch_debug_reason]);
808
809 return ERROR_OK;
810 }
811
812 static int xscale_poll(struct target *target)
813 {
814 int retval = ERROR_OK;
815
816 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING)) {
817 enum target_state previous_state = target->state;
818 retval = xscale_read_tx(target, 0);
819 if (retval == ERROR_OK) {
820
821 /* there's data to read from the tx register, we entered debug state */
822 target->state = TARGET_HALTED;
823
824 /* process debug entry, fetching current mode regs */
825 retval = xscale_debug_entry(target);
826 } else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE) {
827 LOG_USER("error while polling TX register, reset CPU");
828 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
829 target->state = TARGET_HALTED;
830 }
831
832 /* debug_entry could have overwritten target state (i.e. immediate resume)
833 * don't signal event handlers in that case
834 */
835 if (target->state != TARGET_HALTED)
836 return ERROR_OK;
837
838 /* if target was running, signal that we halted
839 * otherwise we reentered from debug execution */
840 if (previous_state == TARGET_RUNNING)
841 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
842 else
843 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
844 }
845
846 return retval;
847 }
848
849 static int xscale_debug_entry(struct target *target)
850 {
851 struct xscale_common *xscale = target_to_xscale(target);
852 struct arm *arm = &xscale->arm;
853 uint32_t pc;
854 uint32_t buffer[10];
855 unsigned i;
856 int retval;
857 uint32_t moe;
858
859 /* clear external dbg break (will be written on next DCSR read) */
860 xscale->external_debug_break = 0;
861 retval = xscale_read_dcsr(target);
862 if (retval != ERROR_OK)
863 return retval;
864
865 /* get r0, pc, r1 to r7 and cpsr */
866 retval = xscale_receive(target, buffer, 10);
867 if (retval != ERROR_OK)
868 return retval;
869
870 /* move r0 from buffer to register cache */
871 buf_set_u32(arm->core_cache->reg_list[0].value, 0, 32, buffer[0]);
872 arm->core_cache->reg_list[0].dirty = 1;
873 arm->core_cache->reg_list[0].valid = 1;
874 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
875
876 /* move pc from buffer to register cache */
877 buf_set_u32(arm->pc->value, 0, 32, buffer[1]);
878 arm->pc->dirty = 1;
879 arm->pc->valid = 1;
880 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
881
882 /* move data from buffer to register cache */
883 for (i = 1; i <= 7; i++) {
884 buf_set_u32(arm->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
885 arm->core_cache->reg_list[i].dirty = 1;
886 arm->core_cache->reg_list[i].valid = 1;
887 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
888 }
889
890 arm_set_cpsr(arm, buffer[9]);
891 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
892
893 if (!is_arm_mode(arm->core_mode)) {
894 target->state = TARGET_UNKNOWN;
895 LOG_ERROR("cpsr contains invalid mode value - communication failure");
896 return ERROR_TARGET_FAILURE;
897 }
898 LOG_DEBUG("target entered debug state in %s mode",
899 arm_mode_name(arm->core_mode));
900
901 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
902 if (arm->spsr) {
903 xscale_receive(target, buffer, 8);
904 buf_set_u32(arm->spsr->value, 0, 32, buffer[7]);
905 arm->spsr->dirty = false;
906 arm->spsr->valid = true;
907 } else {
908 /* r8 to r14, but no spsr */
909 xscale_receive(target, buffer, 7);
910 }
911
912 /* move data from buffer to right banked register in cache */
913 for (i = 8; i <= 14; i++) {
914 struct reg *r = arm_reg_current(arm, i);
915
916 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
917 r->dirty = false;
918 r->valid = true;
919 }
920
921 /* mark xscale regs invalid to ensure they are retrieved from the
922 * debug handler if requested */
923 for (i = 0; i < xscale->reg_cache->num_regs; i++)
924 xscale->reg_cache->reg_list[i].valid = 0;
925
926 /* examine debug reason */
927 xscale_read_dcsr(target);
928 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
929
930 /* stored PC (for calculating fixup) */
931 pc = buf_get_u32(arm->pc->value, 0, 32);
932
933 switch (moe) {
934 case 0x0: /* Processor reset */
935 target->debug_reason = DBG_REASON_DBGRQ;
936 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
937 pc -= 4;
938 break;
939 case 0x1: /* Instruction breakpoint hit */
940 target->debug_reason = DBG_REASON_BREAKPOINT;
941 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
942 pc -= 4;
943 break;
944 case 0x2: /* Data breakpoint hit */
945 target->debug_reason = DBG_REASON_WATCHPOINT;
946 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
947 pc -= 4;
948 break;
949 case 0x3: /* BKPT instruction executed */
950 target->debug_reason = DBG_REASON_BREAKPOINT;
951 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
952 pc -= 4;
953 break;
954 case 0x4: /* Ext. debug event */
955 target->debug_reason = DBG_REASON_DBGRQ;
956 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
957 pc -= 4;
958 break;
959 case 0x5: /* Vector trap occured */
960 target->debug_reason = DBG_REASON_BREAKPOINT;
961 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
962 pc -= 4;
963 break;
964 case 0x6: /* Trace buffer full break */
965 target->debug_reason = DBG_REASON_DBGRQ;
966 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
967 pc -= 4;
968 break;
969 case 0x7: /* Reserved (may flag Hot-Debug support) */
970 default:
971 LOG_ERROR("Method of Entry is 'Reserved'");
972 exit(-1);
973 break;
974 }
975
976 /* apply PC fixup */
977 buf_set_u32(arm->pc->value, 0, 32, pc);
978
979 /* on the first debug entry, identify cache type */
980 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1) {
981 uint32_t cache_type_reg;
982
983 /* read cp15 cache type register */
984 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
985 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value,
986 0,
987 32);
988
989 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
990 }
991
992 /* examine MMU and Cache settings
993 * read cp15 control register */
994 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
995 xscale->cp15_control_reg =
996 buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
997 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
998 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
999 (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1000 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1001 (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1002
1003 /* tracing enabled, read collected trace data */
1004 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1005 xscale_read_trace(target);
1006
1007 /* Resume if entered debug due to buffer fill and we're still collecting
1008 * trace data. Note that a debug exception due to trace buffer full
1009 * can only happen in fill mode. */
1010 if (xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL) {
1011 if (--xscale->trace.fill_counter > 0)
1012 xscale_resume(target, 1, 0x0, 1, 0);
1013 } else /* entered debug for other reason; reset counter */
1014 xscale->trace.fill_counter = 0;
1015 }
1016
1017 return ERROR_OK;
1018 }
1019
1020 static int xscale_halt(struct target *target)
1021 {
1022 struct xscale_common *xscale = target_to_xscale(target);
1023
1024 LOG_DEBUG("target->state: %s",
1025 target_state_name(target));
1026
1027 if (target->state == TARGET_HALTED) {
1028 LOG_DEBUG("target was already halted");
1029 return ERROR_OK;
1030 } else if (target->state == TARGET_UNKNOWN) {
1031 /* this must not happen for a xscale target */
1032 LOG_ERROR("target was in unknown state when halt was requested");
1033 return ERROR_TARGET_INVALID;
1034 } else if (target->state == TARGET_RESET)
1035 LOG_DEBUG("target->state == TARGET_RESET");
1036 else {
1037 /* assert external dbg break */
1038 xscale->external_debug_break = 1;
1039 xscale_read_dcsr(target);
1040
1041 target->debug_reason = DBG_REASON_DBGRQ;
1042 }
1043
1044 return ERROR_OK;
1045 }
1046
1047 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1048 {
1049 struct xscale_common *xscale = target_to_xscale(target);
1050 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1051 int retval;
1052
1053 if (xscale->ibcr0_used) {
1054 struct breakpoint *ibcr0_bp =
1055 breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1056
1057 if (ibcr0_bp)
1058 xscale_unset_breakpoint(target, ibcr0_bp);
1059 else {
1060 LOG_ERROR(
1061 "BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1062 exit(-1);
1063 }
1064 }
1065
1066 retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1);
1067 if (retval != ERROR_OK)
1068 return retval;
1069
1070 return ERROR_OK;
1071 }
1072
1073 static int xscale_disable_single_step(struct target *target)
1074 {
1075 struct xscale_common *xscale = target_to_xscale(target);
1076 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1077 int retval;
1078
1079 retval = xscale_set_reg_u32(ibcr0, 0x0);
1080 if (retval != ERROR_OK)
1081 return retval;
1082
1083 return ERROR_OK;
1084 }
1085
1086 static void xscale_enable_watchpoints(struct target *target)
1087 {
1088 struct watchpoint *watchpoint = target->watchpoints;
1089
1090 while (watchpoint) {
1091 if (watchpoint->set == 0)
1092 xscale_set_watchpoint(target, watchpoint);
1093 watchpoint = watchpoint->next;
1094 }
1095 }
1096
1097 static void xscale_enable_breakpoints(struct target *target)
1098 {
1099 struct breakpoint *breakpoint = target->breakpoints;
1100
1101 /* set any pending breakpoints */
1102 while (breakpoint) {
1103 if (breakpoint->set == 0)
1104 xscale_set_breakpoint(target, breakpoint);
1105 breakpoint = breakpoint->next;
1106 }
1107 }
1108
1109 static void xscale_free_trace_data(struct xscale_common *xscale)
1110 {
1111 struct xscale_trace_data *td = xscale->trace.data;
1112 while (td) {
1113 struct xscale_trace_data *next_td = td->next;
1114 if (td->entries)
1115 free(td->entries);
1116 free(td);
1117 td = next_td;
1118 }
1119 xscale->trace.data = NULL;
1120 }
1121
1122 static int xscale_resume(struct target *target, int current,
1123 target_addr_t address, int handle_breakpoints, int debug_execution)
1124 {
1125 struct xscale_common *xscale = target_to_xscale(target);
1126 struct arm *arm = &xscale->arm;
1127 uint32_t current_pc;
1128 int retval;
1129 int i;
1130
1131 LOG_DEBUG("-");
1132
1133 if (target->state != TARGET_HALTED) {
1134 LOG_WARNING("target not halted");
1135 return ERROR_TARGET_NOT_HALTED;
1136 }
1137
1138 if (!debug_execution)
1139 target_free_all_working_areas(target);
1140
1141 /* update vector tables */
1142 retval = xscale_update_vectors(target);
1143 if (retval != ERROR_OK)
1144 return retval;
1145
1146 /* current = 1: continue on current pc, otherwise continue at <address> */
1147 if (!current)
1148 buf_set_u32(arm->pc->value, 0, 32, address);
1149
1150 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1151
1152 /* if we're at the reset vector, we have to simulate the branch */
1153 if (current_pc == 0x0) {
1154 arm_simulate_step(target, NULL);
1155 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1156 }
1157
1158 /* the front-end may request us not to handle breakpoints */
1159 if (handle_breakpoints) {
1160 struct breakpoint *breakpoint;
1161 breakpoint = breakpoint_find(target,
1162 buf_get_u32(arm->pc->value, 0, 32));
1163 if (breakpoint != NULL) {
1164 uint32_t next_pc;
1165 enum trace_mode saved_trace_mode;
1166
1167 /* there's a breakpoint at the current PC, we have to step over it */
1168 LOG_DEBUG("unset breakpoint at " TARGET_ADDR_FMT "",
1169 breakpoint->address);
1170 xscale_unset_breakpoint(target, breakpoint);
1171
1172 /* calculate PC of next instruction */
1173 retval = arm_simulate_step(target, &next_pc);
1174 if (retval != ERROR_OK) {
1175 uint32_t current_opcode;
1176 target_read_u32(target, current_pc, &current_opcode);
1177 LOG_ERROR(
1178 "BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "",
1179 current_opcode);
1180 }
1181
1182 LOG_DEBUG("enable single-step");
1183 xscale_enable_single_step(target, next_pc);
1184
1185 /* restore banked registers */
1186 retval = xscale_restore_banked(target);
1187 if (retval != ERROR_OK)
1188 return retval;
1189
1190 /* send resume request */
1191 xscale_send_u32(target, 0x30);
1192
1193 /* send CPSR */
1194 xscale_send_u32(target,
1195 buf_get_u32(arm->cpsr->value, 0, 32));
1196 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1197 buf_get_u32(arm->cpsr->value, 0, 32));
1198
1199 for (i = 7; i >= 0; i--) {
1200 /* send register */
1201 xscale_send_u32(target,
1202 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1203 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "",
1204 i, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1205 }
1206
1207 /* send PC */
1208 xscale_send_u32(target,
1209 buf_get_u32(arm->pc->value, 0, 32));
1210 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
1211 buf_get_u32(arm->pc->value, 0, 32));
1212
1213 /* disable trace data collection in xscale_debug_entry() */
1214 saved_trace_mode = xscale->trace.mode;
1215 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1216
1217 /* wait for and process debug entry */
1218 xscale_debug_entry(target);
1219
1220 /* re-enable trace buffer, if enabled previously */
1221 xscale->trace.mode = saved_trace_mode;
1222
1223 LOG_DEBUG("disable single-step");
1224 xscale_disable_single_step(target);
1225
1226 LOG_DEBUG("set breakpoint at " TARGET_ADDR_FMT "",
1227 breakpoint->address);
1228 xscale_set_breakpoint(target, breakpoint);
1229 }
1230 }
1231
1232 /* enable any pending breakpoints and watchpoints */
1233 xscale_enable_breakpoints(target);
1234 xscale_enable_watchpoints(target);
1235
1236 /* restore banked registers */
1237 retval = xscale_restore_banked(target);
1238 if (retval != ERROR_OK)
1239 return retval;
1240
1241 /* send resume request (command 0x30 or 0x31)
1242 * clean the trace buffer if it is to be enabled (0x62) */
1243 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1244 if (xscale->trace.mode == XSCALE_TRACE_FILL) {
1245 /* If trace enabled in fill mode and starting collection of new set
1246 * of buffers, initialize buffer counter and free previous buffers */
1247 if (xscale->trace.fill_counter == 0) {
1248 xscale->trace.fill_counter = xscale->trace.buffer_fill;
1249 xscale_free_trace_data(xscale);
1250 }
1251 } else /* wrap mode; free previous buffer */
1252 xscale_free_trace_data(xscale);
1253
1254 xscale_send_u32(target, 0x62);
1255 xscale_send_u32(target, 0x31);
1256 } else
1257 xscale_send_u32(target, 0x30);
1258
1259 /* send CPSR */
1260 xscale_send_u32(target, buf_get_u32(arm->cpsr->value, 0, 32));
1261 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1262 buf_get_u32(arm->cpsr->value, 0, 32));
1263
1264 for (i = 7; i >= 0; i--) {
1265 /* send register */
1266 xscale_send_u32(target, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1267 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "",
1268 i, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1269 }
1270
1271 /* send PC */
1272 xscale_send_u32(target, buf_get_u32(arm->pc->value, 0, 32));
1273 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1274 buf_get_u32(arm->pc->value, 0, 32));
1275
1276 target->debug_reason = DBG_REASON_NOTHALTED;
1277
1278 if (!debug_execution) {
1279 /* registers are now invalid */
1280 register_cache_invalidate(arm->core_cache);
1281 target->state = TARGET_RUNNING;
1282 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1283 } else {
1284 target->state = TARGET_DEBUG_RUNNING;
1285 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1286 }
1287
1288 LOG_DEBUG("target resumed");
1289
1290 return ERROR_OK;
1291 }
1292
1293 static int xscale_step_inner(struct target *target, int current,
1294 uint32_t address, int handle_breakpoints)
1295 {
1296 struct xscale_common *xscale = target_to_xscale(target);
1297 struct arm *arm = &xscale->arm;
1298 uint32_t next_pc;
1299 int retval;
1300 int i;
1301
1302 target->debug_reason = DBG_REASON_SINGLESTEP;
1303
1304 /* calculate PC of next instruction */
1305 retval = arm_simulate_step(target, &next_pc);
1306 if (retval != ERROR_OK) {
1307 uint32_t current_opcode, current_pc;
1308 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1309
1310 target_read_u32(target, current_pc, &current_opcode);
1311 LOG_ERROR(
1312 "BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "",
1313 current_opcode);
1314 return retval;
1315 }
1316
1317 LOG_DEBUG("enable single-step");
1318 retval = xscale_enable_single_step(target, next_pc);
1319 if (retval != ERROR_OK)
1320 return retval;
1321
1322 /* restore banked registers */
1323 retval = xscale_restore_banked(target);
1324 if (retval != ERROR_OK)
1325 return retval;
1326
1327 /* send resume request (command 0x30 or 0x31)
1328 * clean the trace buffer if it is to be enabled (0x62) */
1329 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1330 retval = xscale_send_u32(target, 0x62);
1331 if (retval != ERROR_OK)
1332 return retval;
1333 retval = xscale_send_u32(target, 0x31);
1334 if (retval != ERROR_OK)
1335 return retval;
1336 } else {
1337 retval = xscale_send_u32(target, 0x30);
1338 if (retval != ERROR_OK)
1339 return retval;
1340 }
1341
1342 /* send CPSR */
1343 retval = xscale_send_u32(target,
1344 buf_get_u32(arm->cpsr->value, 0, 32));
1345 if (retval != ERROR_OK)
1346 return retval;
1347 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1348 buf_get_u32(arm->cpsr->value, 0, 32));
1349
1350 for (i = 7; i >= 0; i--) {
1351 /* send register */
1352 retval = xscale_send_u32(target,
1353 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1354 if (retval != ERROR_OK)
1355 return retval;
1356 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i,
1357 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1358 }
1359
1360 /* send PC */
1361 retval = xscale_send_u32(target,
1362 buf_get_u32(arm->pc->value, 0, 32));
1363 if (retval != ERROR_OK)
1364 return retval;
1365 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1366 buf_get_u32(arm->pc->value, 0, 32));
1367
1368 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1369
1370 /* registers are now invalid */
1371 register_cache_invalidate(arm->core_cache);
1372
1373 /* wait for and process debug entry */
1374 retval = xscale_debug_entry(target);
1375 if (retval != ERROR_OK)
1376 return retval;
1377
1378 LOG_DEBUG("disable single-step");
1379 retval = xscale_disable_single_step(target);
1380 if (retval != ERROR_OK)
1381 return retval;
1382
1383 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1384
1385 return ERROR_OK;
1386 }
1387
1388 static int xscale_step(struct target *target, int current,
1389 target_addr_t address, int handle_breakpoints)
1390 {
1391 struct arm *arm = target_to_arm(target);
1392 struct breakpoint *breakpoint = NULL;
1393
1394 uint32_t current_pc;
1395 int retval;
1396
1397 if (target->state != TARGET_HALTED) {
1398 LOG_WARNING("target not halted");
1399 return ERROR_TARGET_NOT_HALTED;
1400 }
1401
1402 /* current = 1: continue on current pc, otherwise continue at <address> */
1403 if (!current)
1404 buf_set_u32(arm->pc->value, 0, 32, address);
1405
1406 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1407
1408 /* if we're at the reset vector, we have to simulate the step */
1409 if (current_pc == 0x0) {
1410 retval = arm_simulate_step(target, NULL);
1411 if (retval != ERROR_OK)
1412 return retval;
1413 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1414 LOG_DEBUG("current pc %" PRIx32, current_pc);
1415
1416 target->debug_reason = DBG_REASON_SINGLESTEP;
1417 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1418
1419 return ERROR_OK;
1420 }
1421
1422 /* the front-end may request us not to handle breakpoints */
1423 if (handle_breakpoints)
1424 breakpoint = breakpoint_find(target,
1425 buf_get_u32(arm->pc->value, 0, 32));
1426 if (breakpoint != NULL) {
1427 retval = xscale_unset_breakpoint(target, breakpoint);
1428 if (retval != ERROR_OK)
1429 return retval;
1430 }
1431
1432 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1433 if (retval != ERROR_OK)
1434 return retval;
1435
1436 if (breakpoint)
1437 xscale_set_breakpoint(target, breakpoint);
1438
1439 LOG_DEBUG("target stepped");
1440
1441 return ERROR_OK;
1442
1443 }
1444
1445 static int xscale_assert_reset(struct target *target)
1446 {
1447 struct xscale_common *xscale = target_to_xscale(target);
1448
1449 /* TODO: apply hw reset signal in not examined state */
1450 if (!(target_was_examined(target))) {
1451 LOG_WARNING("Reset is not asserted because the target is not examined.");
1452 LOG_WARNING("Use a reset button or power cycle the target.");
1453 return ERROR_TARGET_NOT_EXAMINED;
1454 }
1455
1456 LOG_DEBUG("target->state: %s",
1457 target_state_name(target));
1458
1459 /* assert reset */
1460 jtag_add_reset(0, 1);
1461
1462 /* sleep 1ms, to be sure we fulfill any requirements */
1463 jtag_add_sleep(1000);
1464 jtag_execute_queue();
1465
1466 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1467 * end up in T-L-R, which would reset JTAG
1468 */
1469 xscale_jtag_set_instr(target->tap,
1470 XSCALE_SELDCSR << xscale->xscale_variant,
1471 TAP_IDLE);
1472
1473 /* set Hold reset, Halt mode and Trap Reset */
1474 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1475 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1476 xscale_write_dcsr(target, 1, 0);
1477
1478 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1479 xscale_jtag_set_instr(target->tap, ~0, TAP_IDLE);
1480 jtag_execute_queue();
1481
1482 target->state = TARGET_RESET;
1483
1484 if (target->reset_halt) {
1485 int retval = target_halt(target);
1486 if (retval != ERROR_OK)
1487 return retval;
1488 }
1489
1490 return ERROR_OK;
1491 }
1492
1493 static int xscale_deassert_reset(struct target *target)
1494 {
1495 struct xscale_common *xscale = target_to_xscale(target);
1496 struct breakpoint *breakpoint = target->breakpoints;
1497
1498 LOG_DEBUG("-");
1499
1500 xscale->ibcr_available = 2;
1501 xscale->ibcr0_used = 0;
1502 xscale->ibcr1_used = 0;
1503
1504 xscale->dbr_available = 2;
1505 xscale->dbr0_used = 0;
1506 xscale->dbr1_used = 0;
1507
1508 /* mark all hardware breakpoints as unset */
1509 while (breakpoint) {
1510 if (breakpoint->type == BKPT_HARD)
1511 breakpoint->set = 0;
1512 breakpoint = breakpoint->next;
1513 }
1514
1515 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1516 xscale_free_trace_data(xscale);
1517
1518 register_cache_invalidate(xscale->arm.core_cache);
1519
1520 /* FIXME mark hardware watchpoints got unset too. Also,
1521 * at least some of the XScale registers are invalid...
1522 */
1523
1524 /*
1525 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1526 * contents got invalidated. Safer to force that, so writing new
1527 * contents can't ever fail..
1528 */
1529 {
1530 uint32_t address;
1531 unsigned buf_cnt;
1532 const uint8_t *buffer = xscale_debug_handler;
1533 int retval;
1534
1535 /* release SRST */
1536 jtag_add_reset(0, 0);
1537
1538 /* wait 300ms; 150 and 100ms were not enough */
1539 jtag_add_sleep(300*1000);
1540
1541 jtag_add_runtest(2030, TAP_IDLE);
1542 jtag_execute_queue();
1543
1544 /* set Hold reset, Halt mode and Trap Reset */
1545 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1546 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1547 xscale_write_dcsr(target, 1, 0);
1548
1549 /* Load the debug handler into the mini-icache. Since
1550 * it's using halt mode (not monitor mode), it runs in
1551 * "Special Debug State" for access to registers, memory,
1552 * coprocessors, trace data, etc.
1553 */
1554 address = xscale->handler_address;
1555 for (unsigned binary_size = sizeof xscale_debug_handler;
1556 binary_size > 0;
1557 binary_size -= buf_cnt, buffer += buf_cnt) {
1558 uint32_t cache_line[8];
1559 unsigned i;
1560
1561 buf_cnt = binary_size;
1562 if (buf_cnt > 32)
1563 buf_cnt = 32;
1564
1565 for (i = 0; i < buf_cnt; i += 4) {
1566 /* convert LE buffer to host-endian uint32_t */
1567 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1568 }
1569
1570 for (; i < 32; i += 4)
1571 cache_line[i / 4] = 0xe1a08008;
1572
1573 /* only load addresses other than the reset vectors */
1574 if ((address % 0x400) != 0x0) {
1575 retval = xscale_load_ic(target, address,
1576 cache_line);
1577 if (retval != ERROR_OK)
1578 return retval;
1579 }
1580
1581 address += buf_cnt;
1582 }
1583
1584 retval = xscale_load_ic(target, 0x0,
1585 xscale->low_vectors);
1586 if (retval != ERROR_OK)
1587 return retval;
1588 retval = xscale_load_ic(target, 0xffff0000,
1589 xscale->high_vectors);
1590 if (retval != ERROR_OK)
1591 return retval;
1592
1593 jtag_add_runtest(30, TAP_IDLE);
1594
1595 jtag_add_sleep(100000);
1596
1597 /* set Hold reset, Halt mode and Trap Reset */
1598 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1599 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1600 xscale_write_dcsr(target, 1, 0);
1601
1602 /* clear Hold reset to let the target run (should enter debug handler) */
1603 xscale_write_dcsr(target, 0, 1);
1604 target->state = TARGET_RUNNING;
1605
1606 if (!target->reset_halt) {
1607 jtag_add_sleep(10000);
1608
1609 /* we should have entered debug now */
1610 xscale_debug_entry(target);
1611 target->state = TARGET_HALTED;
1612
1613 /* resume the target */
1614 xscale_resume(target, 1, 0x0, 1, 0);
1615 }
1616 }
1617
1618 return ERROR_OK;
1619 }
1620
1621 static int xscale_read_core_reg(struct target *target, struct reg *r,
1622 int num, enum arm_mode mode)
1623 {
1624 /** \todo add debug handler support for core register reads */
1625 LOG_ERROR("not implemented");
1626 return ERROR_OK;
1627 }
1628
1629 static int xscale_write_core_reg(struct target *target, struct reg *r,
1630 int num, enum arm_mode mode, uint8_t *value)
1631 {
1632 /** \todo add debug handler support for core register writes */
1633 LOG_ERROR("not implemented");
1634 return ERROR_OK;
1635 }
1636
1637 static int xscale_full_context(struct target *target)
1638 {
1639 struct arm *arm = target_to_arm(target);
1640
1641 uint32_t *buffer;
1642
1643 int i, j;
1644
1645 LOG_DEBUG("-");
1646
1647 if (target->state != TARGET_HALTED) {
1648 LOG_WARNING("target not halted");
1649 return ERROR_TARGET_NOT_HALTED;
1650 }
1651
1652 buffer = malloc(4 * 8);
1653
1654 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1655 * we can't enter User mode on an XScale (unpredictable),
1656 * but User shares registers with SYS
1657 */
1658 for (i = 1; i < 7; i++) {
1659 enum arm_mode mode = armv4_5_number_to_mode(i);
1660 bool valid = true;
1661 struct reg *r;
1662
1663 if (mode == ARM_MODE_USR)
1664 continue;
1665
1666 /* check if there are invalid registers in the current mode
1667 */
1668 for (j = 0; valid && j <= 16; j++) {
1669 if (!ARMV4_5_CORE_REG_MODE(arm->core_cache,
1670 mode, j).valid)
1671 valid = false;
1672 }
1673 if (valid)
1674 continue;
1675
1676 /* request banked registers */
1677 xscale_send_u32(target, 0x0);
1678
1679 /* send CPSR for desired bank mode */
1680 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1681
1682 /* get banked registers: r8 to r14; and SPSR
1683 * except in USR/SYS mode
1684 */
1685 if (mode != ARM_MODE_SYS) {
1686 /* SPSR */
1687 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1688 mode, 16);
1689
1690 xscale_receive(target, buffer, 8);
1691
1692 buf_set_u32(r->value, 0, 32, buffer[7]);
1693 r->dirty = false;
1694 r->valid = true;
1695 } else
1696 xscale_receive(target, buffer, 7);
1697
1698 /* move data from buffer to register cache */
1699 for (j = 8; j <= 14; j++) {
1700 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1701 mode, j);
1702
1703 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1704 r->dirty = false;
1705 r->valid = true;
1706 }
1707 }
1708
1709 free(buffer);
1710
1711 return ERROR_OK;
1712 }
1713
1714 static int xscale_restore_banked(struct target *target)
1715 {
1716 struct arm *arm = target_to_arm(target);
1717
1718 int i, j;
1719
1720 if (target->state != TARGET_HALTED) {
1721 LOG_WARNING("target not halted");
1722 return ERROR_TARGET_NOT_HALTED;
1723 }
1724
1725 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1726 * and check if any banked registers need to be written. Ignore
1727 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1728 * an XScale (unpredictable), but they share all registers.
1729 */
1730 for (i = 1; i < 7; i++) {
1731 enum arm_mode mode = armv4_5_number_to_mode(i);
1732 struct reg *r;
1733
1734 if (mode == ARM_MODE_USR)
1735 continue;
1736
1737 /* check if there are dirty registers in this mode */
1738 for (j = 8; j <= 14; j++) {
1739 if (ARMV4_5_CORE_REG_MODE(arm->core_cache,
1740 mode, j).dirty)
1741 goto dirty;
1742 }
1743
1744 /* if not USR/SYS, check if the SPSR needs to be written */
1745 if (mode != ARM_MODE_SYS) {
1746 if (ARMV4_5_CORE_REG_MODE(arm->core_cache,
1747 mode, 16).dirty)
1748 goto dirty;
1749 }
1750
1751 /* there's nothing to flush for this mode */
1752 continue;
1753
1754 dirty:
1755 /* command 0x1: "send banked registers" */
1756 xscale_send_u32(target, 0x1);
1757
1758 /* send CPSR for desired mode */
1759 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1760
1761 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1762 * but this protocol doesn't understand that nuance.
1763 */
1764 for (j = 8; j <= 14; j++) {
1765 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1766 mode, j);
1767 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1768 r->dirty = false;
1769 }
1770
1771 /* send spsr if not in USR/SYS mode */
1772 if (mode != ARM_MODE_SYS) {
1773 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1774 mode, 16);
1775 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1776 r->dirty = false;
1777 }
1778 }
1779
1780 return ERROR_OK;
1781 }
1782
1783 static int xscale_read_memory(struct target *target, target_addr_t address,
1784 uint32_t size, uint32_t count, uint8_t *buffer)
1785 {
1786 struct xscale_common *xscale = target_to_xscale(target);
1787 uint32_t *buf32;
1788 uint32_t i;
1789 int retval;
1790
1791 LOG_DEBUG("address: " TARGET_ADDR_FMT ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32,
1792 address,
1793 size,
1794 count);
1795
1796 if (target->state != TARGET_HALTED) {
1797 LOG_WARNING("target not halted");
1798 return ERROR_TARGET_NOT_HALTED;
1799 }
1800
1801 /* sanitize arguments */
1802 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1803 return ERROR_COMMAND_SYNTAX_ERROR;
1804
1805 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1806 return ERROR_TARGET_UNALIGNED_ACCESS;
1807
1808 /* send memory read request (command 0x1n, n: access size) */
1809 retval = xscale_send_u32(target, 0x10 | size);
1810 if (retval != ERROR_OK)
1811 return retval;
1812
1813 /* send base address for read request */
1814 retval = xscale_send_u32(target, address);
1815 if (retval != ERROR_OK)
1816 return retval;
1817
1818 /* send number of requested data words */
1819 retval = xscale_send_u32(target, count);
1820 if (retval != ERROR_OK)
1821 return retval;
1822
1823 /* receive data from target (count times 32-bit words in host endianness) */
1824 buf32 = malloc(4 * count);
1825 retval = xscale_receive(target, buf32, count);
1826 if (retval != ERROR_OK) {
1827 free(buf32);
1828 return retval;
1829 }
1830
1831 /* extract data from host-endian buffer into byte stream */
1832 for (i = 0; i < count; i++) {
1833 switch (size) {
1834 case 4:
1835 target_buffer_set_u32(target, buffer, buf32[i]);
1836 buffer += 4;
1837 break;
1838 case 2:
1839 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1840 buffer += 2;
1841 break;
1842 case 1:
1843 *buffer++ = buf32[i] & 0xff;
1844 break;
1845 default:
1846 LOG_ERROR("invalid read size");
1847 return ERROR_COMMAND_SYNTAX_ERROR;
1848 }
1849 }
1850
1851 free(buf32);
1852
1853 /* examine DCSR, to see if Sticky Abort (SA) got set */
1854 retval = xscale_read_dcsr(target);
1855 if (retval != ERROR_OK)
1856 return retval;
1857 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1) {
1858 /* clear SA bit */
1859 retval = xscale_send_u32(target, 0x60);
1860 if (retval != ERROR_OK)
1861 return retval;
1862
1863 return ERROR_TARGET_DATA_ABORT;
1864 }
1865
1866 return ERROR_OK;
1867 }
1868
1869 static int xscale_read_phys_memory(struct target *target, target_addr_t address,
1870 uint32_t size, uint32_t count, uint8_t *buffer)
1871 {
1872 struct xscale_common *xscale = target_to_xscale(target);
1873
1874 /* with MMU inactive, there are only physical addresses */
1875 if (!xscale->armv4_5_mmu.mmu_enabled)
1876 return xscale_read_memory(target, address, size, count, buffer);
1877
1878 /** \todo: provide a non-stub implementation of this routine. */
1879 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1880 target_name(target), __func__);
1881 return ERROR_FAIL;
1882 }
1883
1884 static int xscale_write_memory(struct target *target, target_addr_t address,
1885 uint32_t size, uint32_t count, const uint8_t *buffer)
1886 {
1887 struct xscale_common *xscale = target_to_xscale(target);
1888 int retval;
1889
1890 LOG_DEBUG("address: " TARGET_ADDR_FMT ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32,
1891 address,
1892 size,
1893 count);
1894
1895 if (target->state != TARGET_HALTED) {
1896 LOG_WARNING("target not halted");
1897 return ERROR_TARGET_NOT_HALTED;
1898 }
1899
1900 /* sanitize arguments */
1901 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1902 return ERROR_COMMAND_SYNTAX_ERROR;
1903
1904 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1905 return ERROR_TARGET_UNALIGNED_ACCESS;
1906
1907 /* send memory write request (command 0x2n, n: access size) */
1908 retval = xscale_send_u32(target, 0x20 | size);
1909 if (retval != ERROR_OK)
1910 return retval;
1911
1912 /* send base address for read request */
1913 retval = xscale_send_u32(target, address);
1914 if (retval != ERROR_OK)
1915 return retval;
1916
1917 /* send number of requested data words to be written*/
1918 retval = xscale_send_u32(target, count);
1919 if (retval != ERROR_OK)
1920 return retval;
1921
1922 /* extract data from host-endian buffer into byte stream */
1923 #if 0
1924 for (i = 0; i < count; i++) {
1925 switch (size) {
1926 case 4:
1927 value = target_buffer_get_u32(target, buffer);
1928 xscale_send_u32(target, value);
1929 buffer += 4;
1930 break;
1931 case 2:
1932 value = target_buffer_get_u16(target, buffer);
1933 xscale_send_u32(target, value);
1934 buffer += 2;
1935 break;
1936 case 1:
1937 value = *buffer;
1938 xscale_send_u32(target, value);
1939 buffer += 1;
1940 break;
1941 default:
1942 LOG_ERROR("should never get here");
1943 exit(-1);
1944 }
1945 }
1946 #endif
1947 retval = xscale_send(target, buffer, count, size);
1948 if (retval != ERROR_OK)
1949 return retval;
1950
1951 /* examine DCSR, to see if Sticky Abort (SA) got set */
1952 retval = xscale_read_dcsr(target);
1953 if (retval != ERROR_OK)
1954 return retval;
1955 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1) {
1956 /* clear SA bit */
1957 retval = xscale_send_u32(target, 0x60);
1958 if (retval != ERROR_OK)
1959 return retval;
1960
1961 LOG_ERROR("data abort writing memory");
1962 return ERROR_TARGET_DATA_ABORT;
1963 }
1964
1965 return ERROR_OK;
1966 }
1967
1968 static int xscale_write_phys_memory(struct target *target, target_addr_t address,
1969 uint32_t size, uint32_t count, const uint8_t *buffer)
1970 {
1971 struct xscale_common *xscale = target_to_xscale(target);
1972
1973 /* with MMU inactive, there are only physical addresses */
1974 if (!xscale->armv4_5_mmu.mmu_enabled)
1975 return xscale_write_memory(target, address, size, count, buffer);
1976
1977 /** \todo: provide a non-stub implementation of this routine. */
1978 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1979 target_name(target), __func__);
1980 return ERROR_FAIL;
1981 }
1982
1983 static int xscale_get_ttb(struct target *target, uint32_t *result)
1984 {
1985 struct xscale_common *xscale = target_to_xscale(target);
1986 uint32_t ttb;
1987 int retval;
1988
1989 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
1990 if (retval != ERROR_OK)
1991 return retval;
1992 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
1993
1994 *result = ttb;
1995
1996 return ERROR_OK;
1997 }
1998
1999 static int xscale_disable_mmu_caches(struct target *target, int mmu,
2000 int d_u_cache, int i_cache)
2001 {
2002 struct xscale_common *xscale = target_to_xscale(target);
2003 uint32_t cp15_control;
2004 int retval;
2005
2006 /* read cp15 control register */
2007 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2008 if (retval != ERROR_OK)
2009 return retval;
2010 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2011
2012 if (mmu)
2013 cp15_control &= ~0x1U;
2014
2015 if (d_u_cache) {
2016 /* clean DCache */
2017 retval = xscale_send_u32(target, 0x50);
2018 if (retval != ERROR_OK)
2019 return retval;
2020 retval = xscale_send_u32(target, xscale->cache_clean_address);
2021 if (retval != ERROR_OK)
2022 return retval;
2023
2024 /* invalidate DCache */
2025 retval = xscale_send_u32(target, 0x51);
2026 if (retval != ERROR_OK)
2027 return retval;
2028
2029 cp15_control &= ~0x4U;
2030 }
2031
2032 if (i_cache) {
2033 /* invalidate ICache */
2034 retval = xscale_send_u32(target, 0x52);
2035 if (retval != ERROR_OK)
2036 return retval;
2037 cp15_control &= ~0x1000U;
2038 }
2039
2040 /* write new cp15 control register */
2041 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2042 if (retval != ERROR_OK)
2043 return retval;
2044
2045 /* execute cpwait to ensure outstanding operations complete */
2046 retval = xscale_send_u32(target, 0x53);
2047 return retval;
2048 }
2049
2050 static int xscale_enable_mmu_caches(struct target *target, int mmu,
2051 int d_u_cache, int i_cache)
2052 {
2053 struct xscale_common *xscale = target_to_xscale(target);
2054 uint32_t cp15_control;
2055 int retval;
2056
2057 /* read cp15 control register */
2058 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2059 if (retval != ERROR_OK)
2060 return retval;
2061 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2062
2063 if (mmu)
2064 cp15_control |= 0x1U;
2065
2066 if (d_u_cache)
2067 cp15_control |= 0x4U;
2068
2069 if (i_cache)
2070 cp15_control |= 0x1000U;
2071
2072 /* write new cp15 control register */
2073 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2074 if (retval != ERROR_OK)
2075 return retval;
2076
2077 /* execute cpwait to ensure outstanding operations complete */
2078 retval = xscale_send_u32(target, 0x53);
2079 return retval;
2080 }
2081
2082 static int xscale_set_breakpoint(struct target *target,
2083 struct breakpoint *breakpoint)
2084 {
2085 int retval;
2086 struct xscale_common *xscale = target_to_xscale(target);
2087
2088 if (target->state != TARGET_HALTED) {
2089 LOG_WARNING("target not halted");
2090 return ERROR_TARGET_NOT_HALTED;
2091 }
2092
2093 if (breakpoint->set) {
2094 LOG_WARNING("breakpoint already set");
2095 return ERROR_OK;
2096 }
2097
2098 if (breakpoint->type == BKPT_HARD) {
2099 uint32_t value = breakpoint->address | 1;
2100 if (!xscale->ibcr0_used) {
2101 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2102 xscale->ibcr0_used = 1;
2103 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2104 } else if (!xscale->ibcr1_used) {
2105 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2106 xscale->ibcr1_used = 1;
2107 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2108 } else {/* bug: availability previously verified in xscale_add_breakpoint() */
2109 LOG_ERROR("BUG: no hardware comparator available");
2110 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2111 }
2112 } else if (breakpoint->type == BKPT_SOFT) {
2113 if (breakpoint->length == 4) {
2114 /* keep the original instruction in target endianness */
2115 retval = target_read_memory(target, breakpoint->address, 4, 1,
2116 breakpoint->orig_instr);
2117 if (retval != ERROR_OK)
2118 return retval;
2119 /* write the bkpt instruction in target endianness
2120 *(arm7_9->arm_bkpt is host endian) */
2121 retval = target_write_u32(target, breakpoint->address,
2122 xscale->arm_bkpt);
2123 if (retval != ERROR_OK)
2124 return retval;
2125 } else {
2126 /* keep the original instruction in target endianness */
2127 retval = target_read_memory(target, breakpoint->address, 2, 1,
2128 breakpoint->orig_instr);
2129 if (retval != ERROR_OK)
2130 return retval;
2131 /* write the bkpt instruction in target endianness
2132 *(arm7_9->arm_bkpt is host endian) */
2133 retval = target_write_u16(target, breakpoint->address,
2134 xscale->thumb_bkpt);
2135 if (retval != ERROR_OK)
2136 return retval;
2137 }
2138 breakpoint->set = 1;
2139
2140 xscale_send_u32(target, 0x50); /* clean dcache */
2141 xscale_send_u32(target, xscale->cache_clean_address);
2142 xscale_send_u32(target, 0x51); /* invalidate dcache */
2143 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2144 }
2145
2146 return ERROR_OK;
2147 }
2148
2149 static int xscale_add_breakpoint(struct target *target,
2150 struct breakpoint *breakpoint)
2151 {
2152 struct xscale_common *xscale = target_to_xscale(target);
2153
2154 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1)) {
2155 LOG_ERROR("no breakpoint unit available for hardware breakpoint");
2156 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2157 }
2158
2159 if ((breakpoint->length != 2) && (breakpoint->length != 4)) {
2160 LOG_ERROR("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2161 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2162 }
2163
2164 if (breakpoint->type == BKPT_HARD)
2165 xscale->ibcr_available--;
2166
2167 return xscale_set_breakpoint(target, breakpoint);
2168 }
2169
2170 static int xscale_unset_breakpoint(struct target *target,
2171 struct breakpoint *breakpoint)
2172 {
2173 int retval;
2174 struct xscale_common *xscale = target_to_xscale(target);
2175
2176 if (target->state != TARGET_HALTED) {
2177 LOG_WARNING("target not halted");
2178 return ERROR_TARGET_NOT_HALTED;
2179 }
2180
2181 if (!breakpoint->set) {
2182 LOG_WARNING("breakpoint not set");
2183 return ERROR_OK;
2184 }
2185
2186 if (breakpoint->type == BKPT_HARD) {
2187 if (breakpoint->set == 1) {
2188 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2189 xscale->ibcr0_used = 0;
2190 } else if (breakpoint->set == 2) {
2191 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2192 xscale->ibcr1_used = 0;
2193 }
2194 breakpoint->set = 0;
2195 } else {
2196 /* restore original instruction (kept in target endianness) */
2197 if (breakpoint->length == 4) {
2198 retval = target_write_memory(target, breakpoint->address, 4, 1,
2199 breakpoint->orig_instr);
2200 if (retval != ERROR_OK)
2201 return retval;
2202 } else {
2203 retval = target_write_memory(target, breakpoint->address, 2, 1,
2204 breakpoint->orig_instr);
2205 if (retval != ERROR_OK)
2206 return retval;
2207 }
2208 breakpoint->set = 0;
2209
2210 xscale_send_u32(target, 0x50); /* clean dcache */
2211 xscale_send_u32(target, xscale->cache_clean_address);
2212 xscale_send_u32(target, 0x51); /* invalidate dcache */
2213 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2214 }
2215
2216 return ERROR_OK;
2217 }
2218
2219 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2220 {
2221 struct xscale_common *xscale = target_to_xscale(target);
2222
2223 if (target->state != TARGET_HALTED) {
2224 LOG_ERROR("target not halted");
2225 return ERROR_TARGET_NOT_HALTED;
2226 }
2227
2228 if (breakpoint->set)
2229 xscale_unset_breakpoint(target, breakpoint);
2230
2231 if (breakpoint->type == BKPT_HARD)
2232 xscale->ibcr_available++;
2233
2234 return ERROR_OK;
2235 }
2236
2237 static int xscale_set_watchpoint(struct target *target,
2238 struct watchpoint *watchpoint)
2239 {
2240 struct xscale_common *xscale = target_to_xscale(target);
2241 uint32_t enable = 0;
2242 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2243 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2244
2245 if (target->state != TARGET_HALTED) {
2246 LOG_ERROR("target not halted");
2247 return ERROR_TARGET_NOT_HALTED;
2248 }
2249
2250 switch (watchpoint->rw) {
2251 case WPT_READ:
2252 enable = 0x3;
2253 break;
2254 case WPT_ACCESS:
2255 enable = 0x2;
2256 break;
2257 case WPT_WRITE:
2258 enable = 0x1;
2259 break;
2260 default:
2261 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2262 }
2263
2264 /* For watchpoint across more than one word, both DBR registers must
2265 be enlisted, with the second used as a mask. */
2266 if (watchpoint->length > 4) {
2267 if (xscale->dbr0_used || xscale->dbr1_used) {
2268 LOG_ERROR("BUG: sufficient hardware comparators unavailable");
2269 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2270 }
2271
2272 /* Write mask value to DBR1, based on the length argument.
2273 * Address bits ignored by the comparator are those set in mask. */
2274 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1],
2275 watchpoint->length - 1);
2276 xscale->dbr1_used = 1;
2277 enable |= 0x100; /* DBCON[M] */
2278 }
2279
2280 if (!xscale->dbr0_used) {
2281 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2282 dbcon_value |= enable;
2283 xscale_set_reg_u32(dbcon, dbcon_value);
2284 watchpoint->set = 1;
2285 xscale->dbr0_used = 1;
2286 } else if (!xscale->dbr1_used) {
2287 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2288 dbcon_value |= enable << 2;
2289 xscale_set_reg_u32(dbcon, dbcon_value);
2290 watchpoint->set = 2;
2291 xscale->dbr1_used = 1;
2292 } else {
2293 LOG_ERROR("BUG: no hardware comparator available");
2294 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2295 }
2296
2297 return ERROR_OK;
2298 }
2299
2300 static int xscale_add_watchpoint(struct target *target,
2301 struct watchpoint *watchpoint)
2302 {
2303 struct xscale_common *xscale = target_to_xscale(target);
2304
2305 if (xscale->dbr_available < 1) {
2306 LOG_ERROR("no more watchpoint registers available");
2307 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2308 }
2309
2310 if (watchpoint->value)
2311 LOG_WARNING("xscale does not support value, mask arguments; ignoring");
2312
2313 /* check that length is a power of two */
2314 for (uint32_t len = watchpoint->length; len != 1; len /= 2) {
2315 if (len % 2) {
2316 LOG_ERROR("xscale requires that watchpoint length is a power of two");
2317 return ERROR_COMMAND_ARGUMENT_INVALID;
2318 }
2319 }
2320
2321 if (watchpoint->length == 4) { /* single word watchpoint */
2322 xscale->dbr_available--;/* one DBR reg used */
2323 return ERROR_OK;
2324 }
2325
2326 /* watchpoints across multiple words require both DBR registers */
2327 if (xscale->dbr_available < 2) {
2328 LOG_ERROR("insufficient watchpoint registers available");
2329 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2330 }
2331
2332 if (watchpoint->length > watchpoint->address) {
2333 LOG_ERROR("xscale does not support watchpoints with length "
2334 "greater than address");
2335 return ERROR_COMMAND_ARGUMENT_INVALID;
2336 }
2337
2338 xscale->dbr_available = 0;
2339 return ERROR_OK;
2340 }
2341
2342 static int xscale_unset_watchpoint(struct target *target,
2343 struct watchpoint *watchpoint)
2344 {
2345 struct xscale_common *xscale = target_to_xscale(target);
2346 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2347 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2348
2349 if (target->state != TARGET_HALTED) {
2350 LOG_WARNING("target not halted");
2351 return ERROR_TARGET_NOT_HALTED;
2352 }
2353
2354 if (!watchpoint->set) {
2355 LOG_WARNING("breakpoint not set");
2356 return ERROR_OK;
2357 }
2358
2359 if (watchpoint->set == 1) {
2360 if (watchpoint->length > 4) {
2361 dbcon_value &= ~0x103; /* clear DBCON[M] as well */
2362 xscale->dbr1_used = 0; /* DBR1 was used for mask */
2363 } else
2364 dbcon_value &= ~0x3;
2365
2366 xscale_set_reg_u32(dbcon, dbcon_value);
2367 xscale->dbr0_used = 0;
2368 } else if (watchpoint->set == 2) {
2369 dbcon_value &= ~0xc;
2370 xscale_set_reg_u32(dbcon, dbcon_value);
2371 xscale->dbr1_used = 0;
2372 }
2373 watchpoint->set = 0;
2374
2375 return ERROR_OK;
2376 }
2377
2378 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2379 {
2380 struct xscale_common *xscale = target_to_xscale(target);
2381
2382 if (target->state != TARGET_HALTED) {
2383 LOG_ERROR("target not halted");
2384 return ERROR_TARGET_NOT_HALTED;
2385 }
2386
2387 if (watchpoint->set)
2388 xscale_unset_watchpoint(target, watchpoint);
2389
2390 if (watchpoint->length > 4)
2391 xscale->dbr_available++;/* both DBR regs now available */
2392
2393 xscale->dbr_available++;
2394
2395 return ERROR_OK;
2396 }
2397
2398 static int xscale_get_reg(struct reg *reg)
2399 {
2400 struct xscale_reg *arch_info = reg->arch_info;
2401 struct target *target = arch_info->target;
2402 struct xscale_common *xscale = target_to_xscale(target);
2403
2404 /* DCSR, TX and RX are accessible via JTAG */
2405 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2406 return xscale_read_dcsr(arch_info->target);
2407 else if (strcmp(reg->name, "XSCALE_TX") == 0) {
2408 /* 1 = consume register content */
2409 return xscale_read_tx(arch_info->target, 1);
2410 } else if (strcmp(reg->name, "XSCALE_RX") == 0) {
2411 /* can't read from RX register (host -> debug handler) */
2412 return ERROR_OK;
2413 } else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0) {
2414 /* can't (explicitly) read from TXRXCTRL register */
2415 return ERROR_OK;
2416 } else {/* Other DBG registers have to be transfered by the debug handler
2417 * send CP read request (command 0x40) */
2418 xscale_send_u32(target, 0x40);
2419
2420 /* send CP register number */
2421 xscale_send_u32(target, arch_info->dbg_handler_number);
2422
2423 /* read register value */
2424 xscale_read_tx(target, 1);
2425 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2426
2427 reg->dirty = 0;
2428 reg->valid = 1;
2429 }
2430
2431 return ERROR_OK;
2432 }
2433
2434 static int xscale_set_reg(struct reg *reg, uint8_t *buf)
2435 {
2436 struct xscale_reg *arch_info = reg->arch_info;
2437 struct target *target = arch_info->target;
2438 struct xscale_common *xscale = target_to_xscale(target);
2439 uint32_t value = buf_get_u32(buf, 0, 32);
2440
2441 /* DCSR, TX and RX are accessible via JTAG */
2442 if (strcmp(reg->name, "XSCALE_DCSR") == 0) {
2443 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2444 return xscale_write_dcsr(arch_info->target, -1, -1);
2445 } else if (strcmp(reg->name, "XSCALE_RX") == 0) {
2446 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2447 return xscale_write_rx(arch_info->target);
2448 } else if (strcmp(reg->name, "XSCALE_TX") == 0) {
2449 /* can't write to TX register (debug-handler -> host) */
2450 return ERROR_OK;
2451 } else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0) {
2452 /* can't (explicitly) write to TXRXCTRL register */
2453 return ERROR_OK;
2454 } else {/* Other DBG registers have to be transfered by the debug handler
2455 * send CP write request (command 0x41) */
2456 xscale_send_u32(target, 0x41);
2457
2458 /* send CP register number */
2459 xscale_send_u32(target, arch_info->dbg_handler_number);
2460
2461 /* send CP register value */
2462 xscale_send_u32(target, value);
2463 buf_set_u32(reg->value, 0, 32, value);
2464 }
2465
2466 return ERROR_OK;
2467 }
2468
2469 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2470 {
2471 struct xscale_common *xscale = target_to_xscale(target);
2472 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2473 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2474
2475 /* send CP write request (command 0x41) */
2476 xscale_send_u32(target, 0x41);
2477
2478 /* send CP register number */
2479 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2480
2481 /* send CP register value */
2482 xscale_send_u32(target, value);
2483 buf_set_u32(dcsr->value, 0, 32, value);
2484
2485 return ERROR_OK;
2486 }
2487
2488 static int xscale_read_trace(struct target *target)
2489 {
2490 struct xscale_common *xscale = target_to_xscale(target);
2491 struct arm *arm = &xscale->arm;
2492 struct xscale_trace_data **trace_data_p;
2493
2494 /* 258 words from debug handler
2495 * 256 trace buffer entries
2496 * 2 checkpoint addresses
2497 */
2498 uint32_t trace_buffer[258];
2499 int is_address[256];
2500 int i, j;
2501 unsigned int num_checkpoints = 0;
2502
2503 if (target->state != TARGET_HALTED) {
2504 LOG_WARNING("target must be stopped to read trace data");
2505 return ERROR_TARGET_NOT_HALTED;
2506 }
2507
2508 /* send read trace buffer command (command 0x61) */
2509 xscale_send_u32(target, 0x61);
2510
2511 /* receive trace buffer content */
2512 xscale_receive(target, trace_buffer, 258);
2513
2514 /* parse buffer backwards to identify address entries */
2515 for (i = 255; i >= 0; i--) {
2516 /* also count number of checkpointed entries */
2517 if ((trace_buffer[i] & 0xe0) == 0xc0)
2518 num_checkpoints++;
2519
2520 is_address[i] = 0;
2521 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2522 ((trace_buffer[i] & 0xf0) == 0xd0)) {
2523 if (i > 0)
2524 is_address[--i] = 1;
2525 if (i > 0)
2526 is_address[--i] = 1;
2527 if (i > 0)
2528 is_address[--i] = 1;
2529 if (i > 0)
2530 is_address[--i] = 1;
2531 }
2532 }
2533
2534
2535 /* search first non-zero entry that is not part of an address */
2536 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2537 ;
2538
2539 if (j == 256) {
2540 LOG_DEBUG("no trace data collected");
2541 return ERROR_XSCALE_NO_TRACE_DATA;
2542 }
2543
2544 /* account for possible partial address at buffer start (wrap mode only) */
2545 if (is_address[0]) { /* first entry is address; complete set of 4? */
2546 i = 1;
2547 while (i < 4)
2548 if (!is_address[i++])
2549 break;
2550 if (i < 4)
2551 j += i; /* partial address; can't use it */
2552 }
2553
2554 /* if first valid entry is indirect branch, can't use that either (no address) */
2555 if (((trace_buffer[j] & 0xf0) == 0x90) || ((trace_buffer[j] & 0xf0) == 0xd0))
2556 j++;
2557
2558 /* walk linked list to terminating entry */
2559 for (trace_data_p = &xscale->trace.data; *trace_data_p;
2560 trace_data_p = &(*trace_data_p)->next)
2561 ;
2562
2563 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2564 (*trace_data_p)->next = NULL;
2565 (*trace_data_p)->chkpt0 = trace_buffer[256];
2566 (*trace_data_p)->chkpt1 = trace_buffer[257];
2567 (*trace_data_p)->last_instruction = buf_get_u32(arm->pc->value, 0, 32);
2568 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2569 (*trace_data_p)->depth = 256 - j;
2570 (*trace_data_p)->num_checkpoints = num_checkpoints;
2571
2572 for (i = j; i < 256; i++) {
2573 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2574 if (is_address[i])
2575 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2576 else
2577 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2578 }
2579
2580 return ERROR_OK;
2581 }
2582
2583 static int xscale_read_instruction(struct target *target, uint32_t pc,
2584 struct arm_instruction *instruction)
2585 {
2586 struct xscale_common *const xscale = target_to_xscale(target);
2587 int i;
2588 int section = -1;
2589 size_t size_read;
2590 uint32_t opcode;
2591 int retval;
2592
2593 if (!xscale->trace.image)
2594 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2595
2596 /* search for the section the current instruction belongs to */
2597 for (i = 0; i < xscale->trace.image->num_sections; i++) {
2598 if ((xscale->trace.image->sections[i].base_address <= pc) &&
2599 (xscale->trace.image->sections[i].base_address +
2600 xscale->trace.image->sections[i].size > pc)) {
2601 section = i;
2602 break;
2603 }
2604 }
2605
2606 if (section == -1) {
2607 /* current instruction couldn't be found in the image */
2608 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2609 }
2610
2611 if (xscale->trace.core_state == ARM_STATE_ARM) {
2612 uint8_t buf[4];
2613 retval = image_read_section(xscale->trace.image, section,
2614 pc - xscale->trace.image->sections[section].base_address,
2615 4, buf, &size_read);
2616 if (retval != ERROR_OK) {
2617 LOG_ERROR("error while reading instruction");
2618 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2619 }
2620 opcode = target_buffer_get_u32(target, buf);
2621 arm_evaluate_opcode(opcode, pc, instruction);
2622 } else if (xscale->trace.core_state == ARM_STATE_THUMB) {
2623 uint8_t buf[2];
2624 retval = image_read_section(xscale->trace.image, section,
2625 pc - xscale->trace.image->sections[section].base_address,
2626 2, buf, &size_read);
2627 if (retval != ERROR_OK) {
2628 LOG_ERROR("error while reading instruction");
2629 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2630 }
2631 opcode = target_buffer_get_u16(target, buf);
2632 thumb_evaluate_opcode(opcode, pc, instruction);
2633 } else {
2634 LOG_ERROR("BUG: unknown core state encountered");
2635 exit(-1);
2636 }
2637
2638 return ERROR_OK;
2639 }
2640
2641 /* Extract address encoded into trace data.
2642 * Write result to address referenced by argument 'target', or 0 if incomplete. */
2643 static inline void xscale_branch_address(struct xscale_trace_data *trace_data,
2644 int i, uint32_t *target)
2645 {
2646 /* if there are less than four entries prior to the indirect branch message
2647 * we can't extract the address */
2648 if (i < 4)
2649 *target = 0;
2650 else {
2651 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2652 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2653 }
2654 }
2655
2656 static inline void xscale_display_instruction(struct target *target, uint32_t pc,
2657 struct arm_instruction *instruction,
2658 struct command_context *cmd_ctx)
2659 {
2660 int retval = xscale_read_instruction(target, pc, instruction);
2661 if (retval == ERROR_OK)
2662 command_print(cmd_ctx, "%s", instruction->text);
2663 else
2664 command_print(cmd_ctx, "0x%8.8" PRIx32 "\t<not found in image>", pc);
2665 }
2666
2667 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2668 {
2669 struct xscale_common *xscale = target_to_xscale(target);
2670 struct xscale_trace_data *trace_data = xscale->trace.data;
2671 int i, retval;
2672 uint32_t breakpoint_pc = 0;
2673 struct arm_instruction instruction;
2674 uint32_t current_pc = 0;/* initialized when address determined */
2675
2676 if (!xscale->trace.image)
2677 LOG_WARNING("No trace image loaded; use 'xscale trace_image'");
2678
2679 /* loop for each trace buffer that was loaded from target */
2680 while (trace_data) {
2681 int chkpt = 0; /* incremented as checkpointed entries found */
2682 int j;
2683
2684 /* FIXME: set this to correct mode when trace buffer is first enabled */
2685 xscale->trace.core_state = ARM_STATE_ARM;
2686
2687 /* loop for each entry in this trace buffer */
2688 for (i = 0; i < trace_data->depth; i++) {
2689 int exception = 0;
2690 uint32_t chkpt_reg = 0x0;
2691 uint32_t branch_target = 0;
2692 int count;
2693
2694 /* trace entry type is upper nybble of 'message byte' */
2695 int trace_msg_type = (trace_data->entries[i].data & 0xf0) >> 4;
2696
2697 /* Target addresses of indirect branches are written into buffer
2698 * before the message byte representing the branch. Skip past it */
2699 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2700 continue;
2701
2702 switch (trace_msg_type) {
2703 case 0: /* Exceptions */
2704 case 1:
2705 case 2:
2706 case 3:
2707 case 4:
2708 case 5:
2709 case 6:
2710 case 7:
2711 exception = (trace_data->entries[i].data & 0x70) >> 4;
2712
2713 /* FIXME: vector table may be at ffff0000 */
2714 branch_target = (trace_data->entries[i].data & 0xf0) >> 2;
2715 break;
2716
2717 case 8: /* Direct Branch */
2718 break;
2719
2720 case 9: /* Indirect Branch */
2721 xscale_branch_address(trace_data, i, &branch_target);
2722 break;
2723
2724 case 13: /* Checkpointed Indirect Branch */
2725 xscale_branch_address(trace_data, i, &branch_target);
2726 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2727 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is
2728 *oldest */
2729 else
2730 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and
2731 *newest */
2732
2733 chkpt++;
2734 break;
2735
2736 case 12: /* Checkpointed Direct Branch */
2737 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2738 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is
2739 *oldest */
2740 else
2741 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and
2742 *newest */
2743
2744 /* if no current_pc, checkpoint will be starting point */
2745 if (current_pc == 0)
2746 branch_target = chkpt_reg;
2747
2748 chkpt++;
2749 break;
2750
2751 case 15:/* Roll-over */
2752 break;
2753
2754 default:/* Reserved */
2755 LOG_WARNING("trace is suspect: invalid trace message byte");
2756 continue;
2757
2758 }
2759
2760 /* If we don't have the current_pc yet, but we did get the branch target
2761 * (either from the trace buffer on indirect branch, or from a checkpoint reg),
2762 * then we can start displaying instructions at the next iteration, with
2763 * branch_target as the starting point.
2764 */
2765 if (current_pc == 0) {
2766 current_pc = branch_target; /* remains 0 unless branch_target *obtained */
2767 continue;
2768 }
2769
2770 /* We have current_pc. Read and display the instructions from the image.
2771 * First, display count instructions (lower nybble of message byte). */
2772 count = trace_data->entries[i].data & 0x0f;
2773 for (j = 0; j < count; j++) {
2774 xscale_display_instruction(target, current_pc, &instruction,
2775 cmd_ctx);
2776 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2777 }
2778
2779 /* An additional instruction is implicitly added to count for
2780 * rollover and some exceptions: undef, swi, prefetch abort. */
2781 if ((trace_msg_type == 15) || (exception > 0 && exception < 4)) {
2782 xscale_display_instruction(target, current_pc, &instruction,
2783 cmd_ctx);
2784 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2785 }
2786
2787 if (trace_msg_type == 15) /* rollover */
2788 continue;
2789
2790 if (exception) {
2791 command_print(cmd_ctx, "--- exception %i ---", exception);
2792 continue;
2793 }
2794
2795 /* not exception or rollover; next instruction is a branch and is
2796 * not included in the count */
2797 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2798
2799 /* for direct branches, extract branch destination from instruction */
2800 if ((trace_msg_type == 8) || (trace_msg_type == 12)) {
2801 retval = xscale_read_instruction(target, current_pc, &instruction);
2802 if (retval == ERROR_OK)
2803 current_pc = instruction.info.b_bl_bx_blx.target_address;
2804 else
2805 current_pc = 0; /* branch destination unknown */
2806
2807 /* direct branch w/ checkpoint; can also get from checkpoint reg */
2808 if (trace_msg_type == 12) {
2809 if (current_pc == 0)
2810 current_pc = chkpt_reg;
2811 else if (current_pc != chkpt_reg) /* sanity check */
2812 LOG_WARNING("trace is suspect: checkpoint register "
2813 "inconsistent with adddress from image");
2814 }
2815
2816 if (current_pc == 0)
2817 command_print(cmd_ctx, "address unknown");
2818
2819 continue;
2820 }
2821
2822 /* indirect branch; the branch destination was read from trace buffer */
2823 if ((trace_msg_type == 9) || (trace_msg_type == 13)) {
2824 current_pc = branch_target;
2825
2826 /* sanity check (checkpoint reg is redundant) */
2827 if ((trace_msg_type == 13) && (chkpt_reg != branch_target))
2828 LOG_WARNING("trace is suspect: checkpoint register "
2829 "inconsistent with address from trace buffer");
2830 }
2831
2832 } /* END: for (i = 0; i < trace_data->depth; i++) */
2833
2834 breakpoint_pc = trace_data->last_instruction; /* used below */
2835 trace_data = trace_data->next;
2836
2837 } /* END: while (trace_data) */
2838
2839 /* Finally... display all instructions up to the value of the pc when the
2840 * debug break occurred (saved when trace data was collected from target).
2841 * This is necessary because the trace only records execution branches and 16
2842 * consecutive instructions (rollovers), so last few typically missed.
2843 */
2844 if (current_pc == 0)
2845 return ERROR_OK;/* current_pc was never found */
2846
2847 /* how many instructions remaining? */
2848 int gap_count = (breakpoint_pc - current_pc) /
2849 (xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2);
2850
2851 /* should never be negative or over 16, but verify */
2852 if (gap_count < 0 || gap_count > 16) {
2853 LOG_WARNING("trace is suspect: excessive gap at end of trace");
2854 return ERROR_OK;/* bail; large number or negative value no good */
2855 }
2856
2857 /* display remaining instructions */
2858 for (i = 0; i < gap_count; i++) {
2859 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2860 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2861 }
2862
2863 return ERROR_OK;
2864 }
2865
2866 static const struct reg_arch_type xscale_reg_type = {
2867 .get = xscale_get_reg,
2868 .set = xscale_set_reg,
2869 };
2870
2871 static void xscale_build_reg_cache(struct target *target)
2872 {
2873 struct xscale_common *xscale = target_to_xscale(target);
2874 struct arm *arm = &xscale->arm;
2875 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2876 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2877 int i;
2878 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2879
2880 (*cache_p) = arm_build_reg_cache(target, arm);
2881
2882 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2883 cache_p = &(*cache_p)->next;
2884
2885 /* fill in values for the xscale reg cache */
2886 (*cache_p)->name = "XScale registers";
2887 (*cache_p)->next = NULL;
2888 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2889 (*cache_p)->num_regs = num_regs;
2890
2891 for (i = 0; i < num_regs; i++) {
2892 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2893 (*cache_p)->reg_list[i].value = calloc(4, 1);
2894 (*cache_p)->reg_list[i].dirty = 0;
2895 (*cache_p)->reg_list[i].valid = 0;
2896 (*cache_p)->reg_list[i].size = 32;
2897 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2898 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2899 arch_info[i] = xscale_reg_arch_info[i];
2900 arch_info[i].target = target;
2901 }
2902
2903 xscale->reg_cache = (*cache_p);
2904 }
2905
2906 static int xscale_init_target(struct command_context *cmd_ctx,
2907 struct target *target)
2908 {
2909 xscale_build_reg_cache(target);
2910 return ERROR_OK;
2911 }
2912
2913 static int xscale_init_arch_info(struct target *target,
2914 struct xscale_common *xscale, struct jtag_tap *tap)
2915 {
2916 struct arm *arm;
2917 uint32_t high_reset_branch, low_reset_branch;
2918 int i;
2919
2920 arm = &xscale->arm;
2921
2922 /* store architecture specfic data */
2923 xscale->common_magic = XSCALE_COMMON_MAGIC;
2924
2925 /* PXA3xx with 11 bit IR shifts the JTAG instructions */
2926 if (tap->ir_length == 11)
2927 xscale->xscale_variant = XSCALE_PXA3XX;
2928 else
2929 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
2930
2931 /* the debug handler isn't installed (and thus not running) at this time */
2932 xscale->handler_address = 0xfe000800;
2933
2934 /* clear the vectors we keep locally for reference */
2935 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2936 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2937
2938 /* no user-specified vectors have been configured yet */
2939 xscale->static_low_vectors_set = 0x0;
2940 xscale->static_high_vectors_set = 0x0;
2941
2942 /* calculate branches to debug handler */
2943 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2944 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2945
2946 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2947 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2948
2949 for (i = 1; i <= 7; i++) {
2950 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2951 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2952 }
2953
2954 /* 64kB aligned region used for DCache cleaning */
2955 xscale->cache_clean_address = 0xfffe0000;
2956
2957 xscale->hold_rst = 0;
2958 xscale->external_debug_break = 0;
2959
2960 xscale->ibcr_available = 2;
2961 xscale->ibcr0_used = 0;
2962 xscale->ibcr1_used = 0;
2963
2964 xscale->dbr_available = 2;
2965 xscale->dbr0_used = 0;
2966 xscale->dbr1_used = 0;
2967
2968 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
2969 target_name(target));
2970
2971 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2972 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2973
2974 xscale->vector_catch = 0x1;
2975
2976 xscale->trace.data = NULL;
2977 xscale->trace.image = NULL;
2978 xscale->trace.mode = XSCALE_TRACE_DISABLED;
2979 xscale->trace.buffer_fill = 0;
2980 xscale->trace.fill_counter = 0;
2981
2982 /* prepare ARMv4/5 specific information */
2983 arm->arch_info = xscale;
2984 arm->core_type = ARM_MODE_ANY;
2985 arm->read_core_reg = xscale_read_core_reg;
2986 arm->write_core_reg = xscale_write_core_reg;
2987 arm->full_context = xscale_full_context;
2988
2989 arm_init_arch_info(target, arm);
2990
2991 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
2992 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
2993 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
2994 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
2995 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
2996 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
2997 xscale->armv4_5_mmu.has_tiny_pages = 1;
2998 xscale->armv4_5_mmu.mmu_enabled = 0;
2999
3000 return ERROR_OK;
3001 }
3002
3003 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3004 {
3005 struct xscale_common *xscale;
3006
3007 if (sizeof xscale_debug_handler > 0x800) {
3008 LOG_ERROR("debug_handler.bin: larger than 2kb");
3009 return ERROR_FAIL;
3010 }
3011
3012 xscale = calloc(1, sizeof(*xscale));
3013 if (!xscale)
3014 return ERROR_FAIL;
3015
3016 return xscale_init_arch_info(target, xscale, target->tap);
3017 }
3018
3019 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3020 {
3021 struct target *target = NULL;
3022 struct xscale_common *xscale;
3023 int retval;
3024 uint32_t handler_address;
3025
3026 if (CMD_ARGC < 2)
3027 return ERROR_COMMAND_SYNTAX_ERROR;
3028
3029 target = get_target(CMD_ARGV[0]);
3030 if (target == NULL) {
3031 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3032 return ERROR_FAIL;
3033 }
3034
3035 xscale = target_to_xscale(target);
3036 retval = xscale_verify_pointer(CMD_CTX, xscale);
3037 if (retval != ERROR_OK)
3038 return retval;
3039
3040 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3041
3042 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3043 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3044 xscale->handler_address = handler_address;
3045 else {
3046 LOG_ERROR(
3047 "xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3048 return ERROR_FAIL;
3049 }
3050
3051 return ERROR_OK;
3052 }
3053
3054 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3055 {
3056 struct target *target = NULL;
3057 struct xscale_common *xscale;
3058 int retval;
3059 uint32_t cache_clean_address;
3060
3061 if (CMD_ARGC < 2)
3062 return ERROR_COMMAND_SYNTAX_ERROR;
3063
3064 target = get_target(CMD_ARGV[0]);
3065 if (target == NULL) {
3066 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3067 return ERROR_FAIL;
3068 }
3069 xscale = target_to_xscale(target);
3070 retval = xscale_verify_pointer(CMD_CTX, xscale);
3071 if (retval != ERROR_OK)
3072 return retval;
3073
3074 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3075
3076 if (cache_clean_address & 0xffff)
3077 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3078 else
3079 xscale->cache_clean_address = cache_clean_address;
3080
3081 return ERROR_OK;
3082 }
3083
3084 COMMAND_HANDLER(xscale_handle_cache_info_command)
3085 {
3086 struct target *target = get_current_target(CMD_CTX);
3087 struct xscale_common *xscale = target_to_xscale(target);
3088 int retval;
3089
3090 retval = xscale_verify_pointer(CMD_CTX, xscale);
3091 if (retval != ERROR_OK)
3092 return retval;
3093
3094 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3095 }
3096
3097 static int xscale_virt2phys(struct target *target,
3098 target_addr_t virtual, target_addr_t *physical)
3099 {
3100 struct xscale_common *xscale = target_to_xscale(target);
3101 uint32_t cb;
3102
3103 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3104 LOG_ERROR(xscale_not);
3105 return ERROR_TARGET_INVALID;
3106 }
3107
3108 uint32_t ret;
3109 int retval = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu,
3110 virtual, &cb, &ret);
3111 if (retval != ERROR_OK)
3112 return retval;
3113 *physical = ret;
3114 return ERROR_OK;
3115 }
3116
3117 static int xscale_mmu(struct target *target, int *enabled)
3118 {
3119 struct xscale_common *xscale = target_to_xscale(target);
3120
3121 if (target->state != TARGET_HALTED) {
3122 LOG_ERROR("Target not halted");
3123 return ERROR_TARGET_INVALID;
3124 }
3125 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3126 return ERROR_OK;
3127 }
3128
3129 COMMAND_HANDLER(xscale_handle_mmu_command)
3130 {
3131 struct target *target = get_current_target(CMD_CTX);
3132 struct xscale_common *xscale = target_to_xscale(target);
3133 int retval;
3134
3135 retval = xscale_verify_pointer(CMD_CTX, xscale);
3136 if (retval != ERROR_OK)
3137 return retval;
3138
3139 if (target->state != TARGET_HALTED) {
3140 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3141 return ERROR_OK;
3142 }
3143
3144 if (CMD_ARGC >= 1) {
3145 bool enable;
3146 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3147 if (enable)
3148 xscale_enable_mmu_caches(target, 1, 0, 0);
3149 else
3150 xscale_disable_mmu_caches(target, 1, 0, 0);
3151 xscale->armv4_5_mmu.mmu_enabled = enable;
3152 }
3153
3154 command_print(CMD_CTX, "mmu %s",
3155 (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3156
3157 return ERROR_OK;
3158 }
3159
3160 COMMAND_HANDLER(xscale_handle_idcache_command)
3161 {
3162 struct target *target = get_current_target(CMD_CTX);
3163 struct xscale_common *xscale = target_to_xscale(target);
3164
3165 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3166 if (retval != ERROR_OK)
3167 return retval;
3168
3169 if (target->state != TARGET_HALTED) {
3170 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3171 return ERROR_OK;
3172 }
3173
3174 bool icache = false;
3175 if (strcmp(CMD_NAME, "icache") == 0)
3176 icache = true;
3177 if (CMD_ARGC >= 1) {
3178 bool enable;
3179 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3180 if (icache) {
3181 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3182 if (enable)
3183 xscale_enable_mmu_caches(target, 0, 0, 1);
3184 else
3185 xscale_disable_mmu_caches(target, 0, 0, 1);
3186 } else {
3187 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3188 if (enable)
3189 xscale_enable_mmu_caches(target, 0, 1, 0);
3190 else
3191 xscale_disable_mmu_caches(target, 0, 1, 0);
3192 }
3193 }
3194
3195 bool enabled = icache ?
3196 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3197 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3198 const char *msg = enabled ? "enabled" : "disabled";
3199 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3200
3201 return ERROR_OK;
3202 }
3203
3204 static const struct {
3205 char name[15];
3206 unsigned mask;
3207 } vec_ids[] = {
3208 { "fiq", DCSR_TF, },
3209 { "irq", DCSR_TI, },
3210 { "dabt", DCSR_TD, },
3211 { "pabt", DCSR_TA, },
3212 { "swi", DCSR_TS, },
3213 { "undef", DCSR_TU, },
3214 { "reset", DCSR_TR, },
3215 };
3216
3217 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3218 {
3219 struct target *target = get_current_target(CMD_CTX);
3220 struct xscale_common *xscale = target_to_xscale(target);
3221 int retval;
3222 uint32_t dcsr_value;
3223 uint32_t catch = 0;
3224 struct reg *dcsr_reg = &xscale->reg_cache->reg_list[XSCALE_DCSR];
3225
3226 retval = xscale_verify_pointer(CMD_CTX, xscale);
3227 if (retval != ERROR_OK)
3228 return retval;
3229
3230 dcsr_value = buf_get_u32(dcsr_reg->value, 0, 32);
3231 if (CMD_ARGC > 0) {
3232 if (CMD_ARGC == 1) {
3233 if (strcmp(CMD_ARGV[0], "all") == 0) {
3234 catch = DCSR_TRAP_MASK;
3235 CMD_ARGC--;
3236 } else if (strcmp(CMD_ARGV[0], "none") == 0) {
3237 catch = 0;
3238 CMD_ARGC--;
3239 }
3240 }
3241 while (CMD_ARGC-- > 0) {
3242 unsigned i;
3243 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
3244 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name))
3245 continue;
3246 catch |= vec_ids[i].mask;
3247 break;
3248 }
3249 if (i == ARRAY_SIZE(vec_ids)) {
3250 LOG_ERROR("No vector '%s'", CMD_ARGV[CMD_ARGC]);
3251 return ERROR_COMMAND_SYNTAX_ERROR;
3252 }
3253 }
3254 buf_set_u32(dcsr_reg->value, 0, 32,
3255 (buf_get_u32(dcsr_reg->value, 0, 32) & ~DCSR_TRAP_MASK) | catch);
3256 xscale_write_dcsr(target, -1, -1);
3257 }
3258
3259 dcsr_value = buf_get_u32(dcsr_reg->value, 0, 32);
3260 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
3261 command_print(CMD_CTX, "%15s: %s", vec_ids[i].name,
3262 (dcsr_value & vec_ids[i].mask) ? "catch" : "ignore");
3263 }
3264
3265 return ERROR_OK;
3266 }
3267
3268
3269 COMMAND_HANDLER(xscale_handle_vector_table_command)
3270 {
3271 struct target *target = get_current_target(CMD_CTX);
3272 struct xscale_common *xscale = target_to_xscale(target);
3273 int err = 0;
3274 int retval;
3275
3276 retval = xscale_verify_pointer(CMD_CTX, xscale);
3277 if (retval != ERROR_OK)
3278 return retval;
3279
3280 if (CMD_ARGC == 0) { /* print current settings */
3281 int idx;
3282
3283 command_print(CMD_CTX, "active user-set static vectors:");
3284 for (idx = 1; idx < 8; idx++)
3285 if (xscale->static_low_vectors_set & (1 << idx))
3286 command_print(CMD_CTX,
3287 "low %d: 0x%" PRIx32,
3288 idx,
3289 xscale->static_low_vectors[idx]);
3290 for (idx = 1; idx < 8; idx++)
3291 if (xscale->static_high_vectors_set & (1 << idx))
3292 command_print(CMD_CTX,
3293 "high %d: 0x%" PRIx32,
3294 idx,
3295 xscale->static_high_vectors[idx]);
3296 return ERROR_OK;
3297 }
3298
3299 if (CMD_ARGC != 3)
3300 err = 1;
3301 else {
3302 int idx;
3303 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3304 uint32_t vec;
3305 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3306
3307 if (idx < 1 || idx >= 8)
3308 err = 1;
3309
3310 if (!err && strcmp(CMD_ARGV[0], "low") == 0) {
3311 xscale->static_low_vectors_set |= (1<<idx);
3312 xscale->static_low_vectors[idx] = vec;
3313 } else if (!err && (strcmp(CMD_ARGV[0], "high") == 0)) {
3314 xscale->static_high_vectors_set |= (1<<idx);
3315 xscale->static_high_vectors[idx] = vec;
3316 } else
3317 err = 1;
3318 }
3319
3320 if (err)
3321 return ERROR_COMMAND_SYNTAX_ERROR;
3322
3323 return ERROR_OK;
3324 }
3325
3326
3327 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3328 {
3329 struct target *target = get_current_target(CMD_CTX);
3330 struct xscale_common *xscale = target_to_xscale(target);
3331 uint32_t dcsr_value;
3332 int retval;
3333
3334 retval = xscale_verify_pointer(CMD_CTX, xscale);
3335 if (retval != ERROR_OK)
3336 return retval;
3337
3338 if (target->state != TARGET_HALTED) {
3339 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3340 return ERROR_OK;
3341 }
3342
3343 if (CMD_ARGC >= 1) {
3344 if (strcmp("enable", CMD_ARGV[0]) == 0)
3345 xscale->trace.mode = XSCALE_TRACE_WRAP; /* default */
3346 else if (strcmp("disable", CMD_ARGV[0]) == 0)
3347 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3348 else
3349 return ERROR_COMMAND_SYNTAX_ERROR;
3350 }
3351
3352 if (CMD_ARGC >= 2 && xscale->trace.mode != XSCALE_TRACE_DISABLED) {
3353 if (strcmp("fill", CMD_ARGV[1]) == 0) {
3354 int buffcount = 1; /* default */
3355 if (CMD_ARGC >= 3)
3356 COMMAND_PARSE_NUMBER(int, CMD_ARGV[2], buffcount);
3357 if (buffcount < 1) { /* invalid */
3358 command_print(CMD_CTX, "fill buffer count must be > 0");
3359 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3360 return ERROR_COMMAND_SYNTAX_ERROR;
3361 }
3362 xscale->trace.buffer_fill = buffcount;
3363 xscale->trace.mode = XSCALE_TRACE_FILL;
3364 } else if (strcmp("wrap", CMD_ARGV[1]) == 0)
3365 xscale->trace.mode = XSCALE_TRACE_WRAP;
3366 else {
3367 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3368 return ERROR_COMMAND_SYNTAX_ERROR;
3369 }
3370 }
3371
3372 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
3373 char fill_string[12];
3374 sprintf(fill_string, "fill %d", xscale->trace.buffer_fill);
3375 command_print(CMD_CTX, "trace buffer enabled (%s)",
3376 (xscale->trace.mode == XSCALE_TRACE_FILL)
3377 ? fill_string : "wrap");
3378 } else
3379 command_print(CMD_CTX, "trace buffer disabled");
3380
3381 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3382 if (xscale->trace.mode == XSCALE_TRACE_FILL)
3383 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3384 else
3385 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3386
3387 return ERROR_OK;
3388 }
3389
3390 COMMAND_HANDLER(xscale_handle_trace_image_command)
3391 {
3392 struct target *target = get_current_target(CMD_CTX);
3393 struct xscale_common *xscale = target_to_xscale(target);
3394 int retval;
3395
3396 if (CMD_ARGC < 1)
3397 return ERROR_COMMAND_SYNTAX_ERROR;
3398
3399 retval = xscale_verify_pointer(CMD_CTX, xscale);
3400 if (retval != ERROR_OK)
3401 return retval;
3402
3403 if (xscale->trace.image) {
3404 image_close(xscale->trace.image);
3405 free(xscale->trace.image);
3406 command_print(CMD_CTX, "previously loaded image found and closed");
3407 }
3408
3409 xscale->trace.image = malloc(sizeof(struct image));
3410 xscale->trace.image->base_address_set = 0;
3411 xscale->trace.image->start_address_set = 0;
3412
3413 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3414 if (CMD_ARGC >= 2) {
3415 xscale->trace.image->base_address_set = 1;
3416 COMMAND_PARSE_NUMBER(llong, CMD_ARGV[1], xscale->trace.image->base_address);
3417 } else
3418 xscale->trace.image->base_address_set = 0;
3419
3420 if (image_open(xscale->trace.image, CMD_ARGV[0],
3421 (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK) {
3422 free(xscale->trace.image);
3423 xscale->trace.image = NULL;
3424 return ERROR_OK;
3425 }
3426
3427 return ERROR_OK;
3428 }
3429
3430 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3431 {
3432 struct target *target = get_current_target(CMD_CTX);
3433 struct xscale_common *xscale = target_to_xscale(target);
3434 struct xscale_trace_data *trace_data;
3435 struct fileio *file;
3436 int retval;
3437
3438 retval = xscale_verify_pointer(CMD_CTX, xscale);
3439 if (retval != ERROR_OK)
3440 return retval;
3441
3442 if (target->state != TARGET_HALTED) {
3443 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3444 return ERROR_OK;
3445 }
3446
3447 if (CMD_ARGC < 1)
3448 return ERROR_COMMAND_SYNTAX_ERROR;
3449
3450 trace_data = xscale->trace.data;
3451
3452 if (!trace_data) {
3453 command_print(CMD_CTX, "no trace data collected");
3454 return ERROR_OK;
3455 }
3456
3457 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3458 return ERROR_OK;
3459
3460 while (trace_data) {
3461 int i;
3462
3463 fileio_write_u32(file, trace_data->chkpt0);
3464 fileio_write_u32(file, trace_data->chkpt1);
3465 fileio_write_u32(file, trace_data->last_instruction);
3466 fileio_write_u32(file, trace_data->depth);
3467
3468 for (i = 0; i < trace_data->depth; i++)
3469 fileio_write_u32(file, trace_data->entries[i].data |
3470 ((trace_data->entries[i].type & 0xffff) << 16));
3471
3472 trace_data = trace_data->next;
3473 }
3474
3475 fileio_close(file);
3476
3477 return ERROR_OK;
3478 }
3479
3480 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3481 {
3482 struct target *target = get_current_target(CMD_CTX);
3483 struct xscale_common *xscale = target_to_xscale(target);
3484 int retval;
3485
3486 retval = xscale_verify_pointer(CMD_CTX, xscale);
3487 if (retval != ERROR_OK)
3488 return retval;
3489
3490 xscale_analyze_trace(target, CMD_CTX);
3491
3492 return ERROR_OK;
3493 }
3494
3495 COMMAND_HANDLER(xscale_handle_cp15)
3496 {
3497 struct target *target = get_current_target(CMD_CTX);
3498 struct xscale_common *xscale = target_to_xscale(target);
3499 int retval;
3500
3501 retval = xscale_verify_pointer(CMD_CTX, xscale);
3502 if (retval != ERROR_OK)
3503 return retval;
3504
3505 if (target->state != TARGET_HALTED) {
3506 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3507 return ERROR_OK;
3508 }
3509 uint32_t reg_no = 0;
3510 struct reg *reg = NULL;
3511 if (CMD_ARGC > 0) {
3512 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3513 /*translate from xscale cp15 register no to openocd register*/
3514 switch (reg_no) {
3515 case 0:
3516 reg_no = XSCALE_MAINID;
3517 break;
3518 case 1:
3519 reg_no = XSCALE_CTRL;
3520 break;
3521 case 2:
3522 reg_no = XSCALE_TTB;
3523 break;
3524 case 3:
3525 reg_no = XSCALE_DAC;
3526 break;
3527 case 5:
3528 reg_no = XSCALE_FSR;
3529 break;
3530 case 6:
3531 reg_no = XSCALE_FAR;
3532 break;
3533 case 13:
3534 reg_no = XSCALE_PID;
3535 break;
3536 case 15:
3537 reg_no = XSCALE_CPACCESS;
3538 break;
3539 default:
3540 command_print(CMD_CTX, "invalid register number");
3541 return ERROR_COMMAND_SYNTAX_ERROR;
3542 }
3543 reg = &xscale->reg_cache->reg_list[reg_no];
3544
3545 }
3546 if (CMD_ARGC == 1) {
3547 uint32_t value;
3548
3549 /* read cp15 control register */
3550 xscale_get_reg(reg);
3551 value = buf_get_u32(reg->value, 0, 32);
3552 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size),
3553 value);
3554 } else if (CMD_ARGC == 2) {
3555 uint32_t value;
3556 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3557
3558 /* send CP write request (command 0x41) */
3559 xscale_send_u32(target, 0x41);
3560
3561 /* send CP register number */
3562 xscale_send_u32(target, reg_no);
3563
3564 /* send CP register value */
3565 xscale_send_u32(target, value);
3566
3567 /* execute cpwait to ensure outstanding operations complete */
3568 xscale_send_u32(target, 0x53);
3569 } else
3570 return ERROR_COMMAND_SYNTAX_ERROR;
3571
3572 return ERROR_OK;
3573 }
3574
3575 static const struct command_registration xscale_exec_command_handlers[] = {
3576 {
3577 .name = "cache_info",
3578 .handler = xscale_handle_cache_info_command,
3579 .mode = COMMAND_EXEC,
3580 .help = "display information about CPU caches",
3581 },
3582 {
3583 .name = "mmu",
3584 .handler = xscale_handle_mmu_command,
3585 .mode = COMMAND_EXEC,
3586 .help = "enable or disable the MMU",
3587 .usage = "['enable'|'disable']",
3588 },
3589 {
3590 .name = "icache",
3591 .handler = xscale_handle_idcache_command,
3592 .mode = COMMAND_EXEC,
3593 .help = "display ICache state, optionally enabling or "
3594 "disabling it",
3595 .usage = "['enable'|'disable']",
3596 },
3597 {
3598 .name = "dcache",
3599 .handler = xscale_handle_idcache_command,
3600 .mode = COMMAND_EXEC,
3601 .help = "display DCache state, optionally enabling or "
3602 "disabling it",
3603 .usage = "['enable'|'disable']",
3604 },
3605 {
3606 .name = "vector_catch",
3607 .handler = xscale_handle_vector_catch_command,
3608 .mode = COMMAND_EXEC,
3609 .help = "set or display mask of vectors "
3610 "that should trigger debug entry",
3611 .usage = "['all'|'none'|'fiq'|'irq'|'dabt'|'pabt'|'swi'|'undef'|'reset']",
3612 },
3613 {
3614 .name = "vector_table",
3615 .handler = xscale_handle_vector_table_command,
3616 .mode = COMMAND_EXEC,
3617 .help = "set vector table entry in mini-ICache, "
3618 "or display current tables",
3619 .usage = "[('high'|'low') index code]",
3620 },
3621 {
3622 .name = "trace_buffer",
3623 .handler = xscale_handle_trace_buffer_command,
3624 .mode = COMMAND_EXEC,
3625 .help = "display trace buffer status, enable or disable "
3626 "tracing, and optionally reconfigure trace mode",
3627 .usage = "['enable'|'disable' ['fill' [number]|'wrap']]",
3628 },
3629 {
3630 .name = "dump_trace",
3631 .handler = xscale_handle_dump_trace_command,
3632 .mode = COMMAND_EXEC,
3633 .help = "dump content of trace buffer to file",
3634 .usage = "filename",
3635 },
3636 {
3637 .name = "analyze_trace",
3638 .handler = xscale_handle_analyze_trace_buffer_command,
3639 .mode = COMMAND_EXEC,
3640 .help = "analyze content of trace buffer",
3641 .usage = "",
3642 },
3643 {
3644 .name = "trace_image",
3645 .handler = xscale_handle_trace_image_command,
3646 .mode = COMMAND_EXEC,
3647 .help = "load image from file to address (default 0)",
3648 .usage = "filename [offset [filetype]]",
3649 },
3650 {
3651 .name = "cp15",
3652 .handler = xscale_handle_cp15,
3653 .mode = COMMAND_EXEC,
3654 .help = "Read or write coprocessor 15 register.",
3655 .usage = "register [value]",
3656 },
3657 COMMAND_REGISTRATION_DONE
3658 };
3659 static const struct command_registration xscale_any_command_handlers[] = {
3660 {
3661 .name = "debug_handler",
3662 .handler = xscale_handle_debug_handler_command,
3663 .mode = COMMAND_ANY,
3664 .help = "Change address used for debug handler.",
3665 .usage = "<target> <address>",
3666 },
3667 {
3668 .name = "cache_clean_address",
3669 .handler = xscale_handle_cache_clean_address_command,
3670 .mode = COMMAND_ANY,
3671 .help = "Change address used for cleaning data cache.",
3672 .usage = "address",
3673 },
3674 {
3675 .chain = xscale_exec_command_handlers,
3676 },
3677 COMMAND_REGISTRATION_DONE
3678 };
3679 static const struct command_registration xscale_command_handlers[] = {
3680 {
3681 .chain = arm_command_handlers,
3682 },
3683 {
3684 .name = "xscale",
3685 .mode = COMMAND_ANY,
3686 .help = "xscale command group",
3687 .usage = "",
3688 .chain = xscale_any_command_handlers,
3689 },
3690 COMMAND_REGISTRATION_DONE
3691 };
3692
3693 struct target_type xscale_target = {
3694 .name = "xscale",
3695
3696 .poll = xscale_poll,
3697 .arch_state = xscale_arch_state,
3698
3699 .halt = xscale_halt,
3700 .resume = xscale_resume,
3701 .step = xscale_step,
3702
3703 .assert_reset = xscale_assert_reset,
3704 .deassert_reset = xscale_deassert_reset,
3705
3706 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3707 .get_gdb_reg_list = arm_get_gdb_reg_list,
3708
3709 .read_memory = xscale_read_memory,
3710 .read_phys_memory = xscale_read_phys_memory,
3711 .write_memory = xscale_write_memory,
3712 .write_phys_memory = xscale_write_phys_memory,
3713
3714 .checksum_memory = arm_checksum_memory,
3715 .blank_check_memory = arm_blank_check_memory,
3716
3717 .run_algorithm = armv4_5_run_algorithm,
3718
3719 .add_breakpoint = xscale_add_breakpoint,
3720 .remove_breakpoint = xscale_remove_breakpoint,
3721 .add_watchpoint = xscale_add_watchpoint,
3722 .remove_watchpoint = xscale_remove_watchpoint,
3723
3724 .commands = xscale_command_handlers,
3725 .target_create = xscale_target_create,
3726 .init_target = xscale_init_target,
3727
3728 .virt2phys = xscale_virt2phys,
3729 .mmu = xscale_mmu
3730 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)