quark_x10xx: add new target quark_x10xx
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
25 ***************************************************************************/
26
27 #ifdef HAVE_CONFIG_H
28 #include "config.h"
29 #endif
30
31 #include "breakpoints.h"
32 #include "xscale.h"
33 #include "target_type.h"
34 #include "arm_jtag.h"
35 #include "arm_simulator.h"
36 #include "arm_disassembler.h"
37 #include <helper/time_support.h>
38 #include "register.h"
39 #include "image.h"
40 #include "arm_opcodes.h"
41 #include "armv4_5.h"
42
43 /*
44 * Important XScale documents available as of October 2009 include:
45 *
46 * Intel XScale® Core Developer’s Manual, January 2004
47 * Order Number: 273473-002
48 * This has a chapter detailing debug facilities, and punts some
49 * details to chip-specific microarchitecture documents.
50 *
51 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
52 * Document Number: 273539-005
53 * Less detailed than the developer's manual, but summarizes those
54 * missing details (for most XScales) and gives LOTS of notes about
55 * debugger/handler interaction issues. Presents a simpler reset
56 * and load-handler sequence than the arch doc. (Note, OpenOCD
57 * doesn't currently support "Hot-Debug" as defined there.)
58 *
59 * Chip-specific microarchitecture documents may also be useful.
60 */
61
62 /* forward declarations */
63 static int xscale_resume(struct target *, int current,
64 uint32_t address, int handle_breakpoints, int debug_execution);
65 static int xscale_debug_entry(struct target *);
66 static int xscale_restore_banked(struct target *);
67 static int xscale_get_reg(struct reg *reg);
68 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
69 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
70 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
71 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
72 static int xscale_read_trace(struct target *);
73
74 /* This XScale "debug handler" is loaded into the processor's
75 * mini-ICache, which is 2K of code writable only via JTAG.
76 *
77 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
78 * binary files cleanly. It's string oriented, and terminates them
79 * with a NUL character. Better would be to generate the constants
80 * and let other code decide names, scoping, and other housekeeping.
81 */
82 static /* unsigned const char xscale_debug_handler[] = ... */
83 #include "xscale_debug.h"
84
85 static char *const xscale_reg_list[] = {
86 "XSCALE_MAINID", /* 0 */
87 "XSCALE_CACHETYPE",
88 "XSCALE_CTRL",
89 "XSCALE_AUXCTRL",
90 "XSCALE_TTB",
91 "XSCALE_DAC",
92 "XSCALE_FSR",
93 "XSCALE_FAR",
94 "XSCALE_PID",
95 "XSCALE_CPACCESS",
96 "XSCALE_IBCR0", /* 10 */
97 "XSCALE_IBCR1",
98 "XSCALE_DBR0",
99 "XSCALE_DBR1",
100 "XSCALE_DBCON",
101 "XSCALE_TBREG",
102 "XSCALE_CHKPT0",
103 "XSCALE_CHKPT1",
104 "XSCALE_DCSR",
105 "XSCALE_TX",
106 "XSCALE_RX", /* 20 */
107 "XSCALE_TXRXCTRL",
108 };
109
110 static const struct xscale_reg xscale_reg_arch_info[] = {
111 {XSCALE_MAINID, NULL},
112 {XSCALE_CACHETYPE, NULL},
113 {XSCALE_CTRL, NULL},
114 {XSCALE_AUXCTRL, NULL},
115 {XSCALE_TTB, NULL},
116 {XSCALE_DAC, NULL},
117 {XSCALE_FSR, NULL},
118 {XSCALE_FAR, NULL},
119 {XSCALE_PID, NULL},
120 {XSCALE_CPACCESS, NULL},
121 {XSCALE_IBCR0, NULL},
122 {XSCALE_IBCR1, NULL},
123 {XSCALE_DBR0, NULL},
124 {XSCALE_DBR1, NULL},
125 {XSCALE_DBCON, NULL},
126 {XSCALE_TBREG, NULL},
127 {XSCALE_CHKPT0, NULL},
128 {XSCALE_CHKPT1, NULL},
129 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
130 {-1, NULL}, /* TX accessed via JTAG */
131 {-1, NULL}, /* RX accessed via JTAG */
132 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
133 };
134
135 /* convenience wrapper to access XScale specific registers */
136 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
137 {
138 uint8_t buf[4];
139
140 buf_set_u32(buf, 0, 32, value);
141
142 return xscale_set_reg(reg, buf);
143 }
144
145 static const char xscale_not[] = "target is not an XScale";
146
147 static int xscale_verify_pointer(struct command_context *cmd_ctx,
148 struct xscale_common *xscale)
149 {
150 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
151 command_print(cmd_ctx, xscale_not);
152 return ERROR_TARGET_INVALID;
153 }
154 return ERROR_OK;
155 }
156
157 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr, tap_state_t end_state)
158 {
159 assert(tap != NULL);
160
161 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr) {
162 struct scan_field field;
163 uint8_t scratch[4];
164
165 memset(&field, 0, sizeof field);
166 field.num_bits = tap->ir_length;
167 field.out_value = scratch;
168 buf_set_u32(scratch, 0, field.num_bits, new_instr);
169
170 jtag_add_ir_scan(tap, &field, end_state);
171 }
172
173 return ERROR_OK;
174 }
175
176 static int xscale_read_dcsr(struct target *target)
177 {
178 struct xscale_common *xscale = target_to_xscale(target);
179 int retval;
180 struct scan_field fields[3];
181 uint8_t field0 = 0x0;
182 uint8_t field0_check_value = 0x2;
183 uint8_t field0_check_mask = 0x7;
184 uint8_t field2 = 0x0;
185 uint8_t field2_check_value = 0x0;
186 uint8_t field2_check_mask = 0x1;
187
188 xscale_jtag_set_instr(target->tap,
189 XSCALE_SELDCSR << xscale->xscale_variant,
190 TAP_DRPAUSE);
191
192 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
193 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
194
195 memset(&fields, 0, sizeof fields);
196
197 fields[0].num_bits = 3;
198 fields[0].out_value = &field0;
199 uint8_t tmp;
200 fields[0].in_value = &tmp;
201
202 fields[1].num_bits = 32;
203 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
204
205 fields[2].num_bits = 1;
206 fields[2].out_value = &field2;
207 uint8_t tmp2;
208 fields[2].in_value = &tmp2;
209
210 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
211
212 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
213 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
214
215 retval = jtag_execute_queue();
216 if (retval != ERROR_OK) {
217 LOG_ERROR("JTAG error while reading DCSR");
218 return retval;
219 }
220
221 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
222 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
223
224 /* write the register with the value we just read
225 * on this second pass, only the first bit of field0 is guaranteed to be 0)
226 */
227 field0_check_mask = 0x1;
228 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
229 fields[1].in_value = NULL;
230
231 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
232
233 /* DANGER!!! this must be here. It will make sure that the arguments
234 * to jtag_set_check_value() does not go out of scope! */
235 return jtag_execute_queue();
236 }
237
238
239 static void xscale_getbuf(jtag_callback_data_t arg)
240 {
241 uint8_t *in = (uint8_t *)arg;
242 *((uint32_t *)arg) = buf_get_u32(in, 0, 32);
243 }
244
245 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
246 {
247 if (num_words == 0)
248 return ERROR_COMMAND_SYNTAX_ERROR;
249
250 struct xscale_common *xscale = target_to_xscale(target);
251 int retval = ERROR_OK;
252 tap_state_t path[3];
253 struct scan_field fields[3];
254 uint8_t *field0 = malloc(num_words * 1);
255 uint8_t field0_check_value = 0x2;
256 uint8_t field0_check_mask = 0x6;
257 uint32_t *field1 = malloc(num_words * 4);
258 uint8_t field2_check_value = 0x0;
259 uint8_t field2_check_mask = 0x1;
260 int words_done = 0;
261 int words_scheduled = 0;
262 int i;
263
264 path[0] = TAP_DRSELECT;
265 path[1] = TAP_DRCAPTURE;
266 path[2] = TAP_DRSHIFT;
267
268 memset(&fields, 0, sizeof fields);
269
270 fields[0].num_bits = 3;
271 uint8_t tmp;
272 fields[0].in_value = &tmp;
273 fields[0].check_value = &field0_check_value;
274 fields[0].check_mask = &field0_check_mask;
275
276 fields[1].num_bits = 32;
277
278 fields[2].num_bits = 1;
279 uint8_t tmp2;
280 fields[2].in_value = &tmp2;
281 fields[2].check_value = &field2_check_value;
282 fields[2].check_mask = &field2_check_mask;
283
284 xscale_jtag_set_instr(target->tap,
285 XSCALE_DBGTX << xscale->xscale_variant,
286 TAP_IDLE);
287 jtag_add_runtest(1, TAP_IDLE); /* ensures that we're in the TAP_IDLE state as the above
288 *could be a no-op */
289
290 /* repeat until all words have been collected */
291 int attempts = 0;
292 while (words_done < num_words) {
293 /* schedule reads */
294 words_scheduled = 0;
295 for (i = words_done; i < num_words; i++) {
296 fields[0].in_value = &field0[i];
297
298 jtag_add_pathmove(3, path);
299
300 fields[1].in_value = (uint8_t *)(field1 + i);
301
302 jtag_add_dr_scan_check(target->tap, 3, fields, TAP_IDLE);
303
304 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
305
306 words_scheduled++;
307 }
308
309 retval = jtag_execute_queue();
310 if (retval != ERROR_OK) {
311 LOG_ERROR("JTAG error while receiving data from debug handler");
312 break;
313 }
314
315 /* examine results */
316 for (i = words_done; i < num_words; i++) {
317 if (!(field0[i] & 1)) {
318 /* move backwards if necessary */
319 int j;
320 for (j = i; j < num_words - 1; j++) {
321 field0[j] = field0[j + 1];
322 field1[j] = field1[j + 1];
323 }
324 words_scheduled--;
325 }
326 }
327 if (words_scheduled == 0) {
328 if (attempts++ == 1000) {
329 LOG_ERROR(
330 "Failed to receiving data from debug handler after 1000 attempts");
331 retval = ERROR_TARGET_TIMEOUT;
332 break;
333 }
334 }
335
336 words_done += words_scheduled;
337 }
338
339 for (i = 0; i < num_words; i++)
340 *(buffer++) = buf_get_u32((uint8_t *)&field1[i], 0, 32);
341
342 free(field1);
343
344 return retval;
345 }
346
347 static int xscale_read_tx(struct target *target, int consume)
348 {
349 struct xscale_common *xscale = target_to_xscale(target);
350 tap_state_t path[3];
351 tap_state_t noconsume_path[6];
352 int retval;
353 struct timeval timeout, now;
354 struct scan_field fields[3];
355 uint8_t field0_in = 0x0;
356 uint8_t field0_check_value = 0x2;
357 uint8_t field0_check_mask = 0x6;
358 uint8_t field2_check_value = 0x0;
359 uint8_t field2_check_mask = 0x1;
360
361 xscale_jtag_set_instr(target->tap,
362 XSCALE_DBGTX << xscale->xscale_variant,
363 TAP_IDLE);
364
365 path[0] = TAP_DRSELECT;
366 path[1] = TAP_DRCAPTURE;
367 path[2] = TAP_DRSHIFT;
368
369 noconsume_path[0] = TAP_DRSELECT;
370 noconsume_path[1] = TAP_DRCAPTURE;
371 noconsume_path[2] = TAP_DREXIT1;
372 noconsume_path[3] = TAP_DRPAUSE;
373 noconsume_path[4] = TAP_DREXIT2;
374 noconsume_path[5] = TAP_DRSHIFT;
375
376 memset(&fields, 0, sizeof fields);
377
378 fields[0].num_bits = 3;
379 fields[0].in_value = &field0_in;
380
381 fields[1].num_bits = 32;
382 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
383
384 fields[2].num_bits = 1;
385 uint8_t tmp;
386 fields[2].in_value = &tmp;
387
388 gettimeofday(&timeout, NULL);
389 timeval_add_time(&timeout, 1, 0);
390
391 for (;; ) {
392 /* if we want to consume the register content (i.e. clear TX_READY),
393 * we have to go straight from Capture-DR to Shift-DR
394 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
395 */
396 if (consume)
397 jtag_add_pathmove(3, path);
398 else
399 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
400
401 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
402
403 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
404 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
405
406 retval = jtag_execute_queue();
407 if (retval != ERROR_OK) {
408 LOG_ERROR("JTAG error while reading TX");
409 return ERROR_TARGET_TIMEOUT;
410 }
411
412 gettimeofday(&now, NULL);
413 if ((now.tv_sec > timeout.tv_sec) ||
414 ((now.tv_sec == timeout.tv_sec) && (now.tv_usec > timeout.tv_usec))) {
415 LOG_ERROR("time out reading TX register");
416 return ERROR_TARGET_TIMEOUT;
417 }
418 if (!((!(field0_in & 1)) && consume))
419 goto done;
420 if (debug_level >= 3) {
421 LOG_DEBUG("waiting 100ms");
422 alive_sleep(100); /* avoid flooding the logs */
423 } else
424 keep_alive();
425 }
426 done:
427
428 if (!(field0_in & 1))
429 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
430
431 return ERROR_OK;
432 }
433
434 static int xscale_write_rx(struct target *target)
435 {
436 struct xscale_common *xscale = target_to_xscale(target);
437 int retval;
438 struct timeval timeout, now;
439 struct scan_field fields[3];
440 uint8_t field0_out = 0x0;
441 uint8_t field0_in = 0x0;
442 uint8_t field0_check_value = 0x2;
443 uint8_t field0_check_mask = 0x6;
444 uint8_t field2 = 0x0;
445 uint8_t field2_check_value = 0x0;
446 uint8_t field2_check_mask = 0x1;
447
448 xscale_jtag_set_instr(target->tap,
449 XSCALE_DBGRX << xscale->xscale_variant,
450 TAP_IDLE);
451
452 memset(&fields, 0, sizeof fields);
453
454 fields[0].num_bits = 3;
455 fields[0].out_value = &field0_out;
456 fields[0].in_value = &field0_in;
457
458 fields[1].num_bits = 32;
459 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
460
461 fields[2].num_bits = 1;
462 fields[2].out_value = &field2;
463 uint8_t tmp;
464 fields[2].in_value = &tmp;
465
466 gettimeofday(&timeout, NULL);
467 timeval_add_time(&timeout, 1, 0);
468
469 /* poll until rx_read is low */
470 LOG_DEBUG("polling RX");
471 for (;;) {
472 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
473
474 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
475 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
476
477 retval = jtag_execute_queue();
478 if (retval != ERROR_OK) {
479 LOG_ERROR("JTAG error while writing RX");
480 return retval;
481 }
482
483 gettimeofday(&now, NULL);
484 if ((now.tv_sec > timeout.tv_sec) ||
485 ((now.tv_sec == timeout.tv_sec) && (now.tv_usec > timeout.tv_usec))) {
486 LOG_ERROR("time out writing RX register");
487 return ERROR_TARGET_TIMEOUT;
488 }
489 if (!(field0_in & 1))
490 goto done;
491 if (debug_level >= 3) {
492 LOG_DEBUG("waiting 100ms");
493 alive_sleep(100); /* avoid flooding the logs */
494 } else
495 keep_alive();
496 }
497 done:
498
499 /* set rx_valid */
500 field2 = 0x1;
501 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
502
503 retval = jtag_execute_queue();
504 if (retval != ERROR_OK) {
505 LOG_ERROR("JTAG error while writing RX");
506 return retval;
507 }
508
509 return ERROR_OK;
510 }
511
512 /* send count elements of size byte to the debug handler */
513 static int xscale_send(struct target *target, const uint8_t *buffer, int count, int size)
514 {
515 struct xscale_common *xscale = target_to_xscale(target);
516 int retval;
517 int done_count = 0;
518
519 xscale_jtag_set_instr(target->tap,
520 XSCALE_DBGRX << xscale->xscale_variant,
521 TAP_IDLE);
522
523 static const uint8_t t0;
524 uint8_t t1[4];
525 static const uint8_t t2 = 1;
526 struct scan_field fields[3] = {
527 { .num_bits = 3, .out_value = &t0 },
528 { .num_bits = 32, .out_value = t1 },
529 { .num_bits = 1, .out_value = &t2 },
530 };
531
532 int endianness = target->endianness;
533 while (done_count++ < count) {
534 uint32_t t;
535
536 switch (size) {
537 case 4:
538 if (endianness == TARGET_LITTLE_ENDIAN)
539 t = le_to_h_u32(buffer);
540 else
541 t = be_to_h_u32(buffer);
542 break;
543 case 2:
544 if (endianness == TARGET_LITTLE_ENDIAN)
545 t = le_to_h_u16(buffer);
546 else
547 t = be_to_h_u16(buffer);
548 break;
549 case 1:
550 t = buffer[0];
551 break;
552 default:
553 LOG_ERROR("BUG: size neither 4, 2 nor 1");
554 return ERROR_COMMAND_SYNTAX_ERROR;
555 }
556
557 buf_set_u32(t1, 0, 32, t);
558
559 jtag_add_dr_scan(target->tap,
560 3,
561 fields,
562 TAP_IDLE);
563 buffer += size;
564 }
565
566 retval = jtag_execute_queue();
567 if (retval != ERROR_OK) {
568 LOG_ERROR("JTAG error while sending data to debug handler");
569 return retval;
570 }
571
572 return ERROR_OK;
573 }
574
575 static int xscale_send_u32(struct target *target, uint32_t value)
576 {
577 struct xscale_common *xscale = target_to_xscale(target);
578
579 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
580 return xscale_write_rx(target);
581 }
582
583 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
584 {
585 struct xscale_common *xscale = target_to_xscale(target);
586 int retval;
587 struct scan_field fields[3];
588 uint8_t field0 = 0x0;
589 uint8_t field0_check_value = 0x2;
590 uint8_t field0_check_mask = 0x7;
591 uint8_t field2 = 0x0;
592 uint8_t field2_check_value = 0x0;
593 uint8_t field2_check_mask = 0x1;
594
595 if (hold_rst != -1)
596 xscale->hold_rst = hold_rst;
597
598 if (ext_dbg_brk != -1)
599 xscale->external_debug_break = ext_dbg_brk;
600
601 xscale_jtag_set_instr(target->tap,
602 XSCALE_SELDCSR << xscale->xscale_variant,
603 TAP_IDLE);
604
605 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
606 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
607
608 memset(&fields, 0, sizeof fields);
609
610 fields[0].num_bits = 3;
611 fields[0].out_value = &field0;
612 uint8_t tmp;
613 fields[0].in_value = &tmp;
614
615 fields[1].num_bits = 32;
616 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
617
618 fields[2].num_bits = 1;
619 fields[2].out_value = &field2;
620 uint8_t tmp2;
621 fields[2].in_value = &tmp2;
622
623 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
624
625 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
626 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
627
628 retval = jtag_execute_queue();
629 if (retval != ERROR_OK) {
630 LOG_ERROR("JTAG error while writing DCSR");
631 return retval;
632 }
633
634 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
635 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
636
637 return ERROR_OK;
638 }
639
640 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
641 static unsigned int parity(unsigned int v)
642 {
643 /* unsigned int ov = v; */
644 v ^= v >> 16;
645 v ^= v >> 8;
646 v ^= v >> 4;
647 v &= 0xf;
648 /* LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1); */
649 return (0x6996 >> v) & 1;
650 }
651
652 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
653 {
654 struct xscale_common *xscale = target_to_xscale(target);
655 uint8_t packet[4];
656 uint8_t cmd;
657 int word;
658 struct scan_field fields[2];
659
660 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
661
662 /* LDIC into IR */
663 xscale_jtag_set_instr(target->tap,
664 XSCALE_LDIC << xscale->xscale_variant,
665 TAP_IDLE);
666
667 /* CMD is b011 to load a cacheline into the Mini ICache.
668 * Loading into the main ICache is deprecated, and unused.
669 * It's followed by three zero bits, and 27 address bits.
670 */
671 buf_set_u32(&cmd, 0, 6, 0x3);
672
673 /* virtual address of desired cache line */
674 buf_set_u32(packet, 0, 27, va >> 5);
675
676 memset(&fields, 0, sizeof fields);
677
678 fields[0].num_bits = 6;
679 fields[0].out_value = &cmd;
680
681 fields[1].num_bits = 27;
682 fields[1].out_value = packet;
683
684 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
685
686 /* rest of packet is a cacheline: 8 instructions, with parity */
687 fields[0].num_bits = 32;
688 fields[0].out_value = packet;
689
690 fields[1].num_bits = 1;
691 fields[1].out_value = &cmd;
692
693 for (word = 0; word < 8; word++) {
694 buf_set_u32(packet, 0, 32, buffer[word]);
695
696 uint32_t value;
697 memcpy(&value, packet, sizeof(uint32_t));
698 cmd = parity(value);
699
700 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
701 }
702
703 return jtag_execute_queue();
704 }
705
706 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
707 {
708 struct xscale_common *xscale = target_to_xscale(target);
709 uint8_t packet[4];
710 uint8_t cmd;
711 struct scan_field fields[2];
712
713 xscale_jtag_set_instr(target->tap,
714 XSCALE_LDIC << xscale->xscale_variant,
715 TAP_IDLE);
716
717 /* CMD for invalidate IC line b000, bits [6:4] b000 */
718 buf_set_u32(&cmd, 0, 6, 0x0);
719
720 /* virtual address of desired cache line */
721 buf_set_u32(packet, 0, 27, va >> 5);
722
723 memset(&fields, 0, sizeof fields);
724
725 fields[0].num_bits = 6;
726 fields[0].out_value = &cmd;
727
728 fields[1].num_bits = 27;
729 fields[1].out_value = packet;
730
731 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
732
733 return ERROR_OK;
734 }
735
736 static int xscale_update_vectors(struct target *target)
737 {
738 struct xscale_common *xscale = target_to_xscale(target);
739 int i;
740 int retval;
741
742 uint32_t low_reset_branch, high_reset_branch;
743
744 for (i = 1; i < 8; i++) {
745 /* if there's a static vector specified for this exception, override */
746 if (xscale->static_high_vectors_set & (1 << i))
747 xscale->high_vectors[i] = xscale->static_high_vectors[i];
748 else {
749 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
750 if (retval == ERROR_TARGET_TIMEOUT)
751 return retval;
752 if (retval != ERROR_OK) {
753 /* Some of these reads will fail as part of normal execution */
754 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
755 }
756 }
757 }
758
759 for (i = 1; i < 8; i++) {
760 if (xscale->static_low_vectors_set & (1 << i))
761 xscale->low_vectors[i] = xscale->static_low_vectors[i];
762 else {
763 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
764 if (retval == ERROR_TARGET_TIMEOUT)
765 return retval;
766 if (retval != ERROR_OK) {
767 /* Some of these reads will fail as part of normal execution */
768 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
769 }
770 }
771 }
772
773 /* calculate branches to debug handler */
774 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
775 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
776
777 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
778 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
779
780 /* invalidate and load exception vectors in mini i-cache */
781 xscale_invalidate_ic_line(target, 0x0);
782 xscale_invalidate_ic_line(target, 0xffff0000);
783
784 xscale_load_ic(target, 0x0, xscale->low_vectors);
785 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
786
787 return ERROR_OK;
788 }
789
790 static int xscale_arch_state(struct target *target)
791 {
792 struct xscale_common *xscale = target_to_xscale(target);
793 struct arm *arm = &xscale->arm;
794
795 static const char *state[] = {
796 "disabled", "enabled"
797 };
798
799 static const char *arch_dbg_reason[] = {
800 "", "\n(processor reset)", "\n(trace buffer full)"
801 };
802
803 if (arm->common_magic != ARM_COMMON_MAGIC) {
804 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
805 return ERROR_COMMAND_SYNTAX_ERROR;
806 }
807
808 arm_arch_state(target);
809 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
810 state[xscale->armv4_5_mmu.mmu_enabled],
811 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
812 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
813 arch_dbg_reason[xscale->arch_debug_reason]);
814
815 return ERROR_OK;
816 }
817
818 static int xscale_poll(struct target *target)
819 {
820 int retval = ERROR_OK;
821
822 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING)) {
823 enum target_state previous_state = target->state;
824 retval = xscale_read_tx(target, 0);
825 if (retval == ERROR_OK) {
826
827 /* there's data to read from the tx register, we entered debug state */
828 target->state = TARGET_HALTED;
829
830 /* process debug entry, fetching current mode regs */
831 retval = xscale_debug_entry(target);
832 } else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE) {
833 LOG_USER("error while polling TX register, reset CPU");
834 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
835 target->state = TARGET_HALTED;
836 }
837
838 /* debug_entry could have overwritten target state (i.e. immediate resume)
839 * don't signal event handlers in that case
840 */
841 if (target->state != TARGET_HALTED)
842 return ERROR_OK;
843
844 /* if target was running, signal that we halted
845 * otherwise we reentered from debug execution */
846 if (previous_state == TARGET_RUNNING)
847 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
848 else
849 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
850 }
851
852 return retval;
853 }
854
855 static int xscale_debug_entry(struct target *target)
856 {
857 struct xscale_common *xscale = target_to_xscale(target);
858 struct arm *arm = &xscale->arm;
859 uint32_t pc;
860 uint32_t buffer[10];
861 unsigned i;
862 int retval;
863 uint32_t moe;
864
865 /* clear external dbg break (will be written on next DCSR read) */
866 xscale->external_debug_break = 0;
867 retval = xscale_read_dcsr(target);
868 if (retval != ERROR_OK)
869 return retval;
870
871 /* get r0, pc, r1 to r7 and cpsr */
872 retval = xscale_receive(target, buffer, 10);
873 if (retval != ERROR_OK)
874 return retval;
875
876 /* move r0 from buffer to register cache */
877 buf_set_u32(arm->core_cache->reg_list[0].value, 0, 32, buffer[0]);
878 arm->core_cache->reg_list[0].dirty = 1;
879 arm->core_cache->reg_list[0].valid = 1;
880 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
881
882 /* move pc from buffer to register cache */
883 buf_set_u32(arm->pc->value, 0, 32, buffer[1]);
884 arm->pc->dirty = 1;
885 arm->pc->valid = 1;
886 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
887
888 /* move data from buffer to register cache */
889 for (i = 1; i <= 7; i++) {
890 buf_set_u32(arm->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
891 arm->core_cache->reg_list[i].dirty = 1;
892 arm->core_cache->reg_list[i].valid = 1;
893 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
894 }
895
896 arm_set_cpsr(arm, buffer[9]);
897 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
898
899 if (!is_arm_mode(arm->core_mode)) {
900 target->state = TARGET_UNKNOWN;
901 LOG_ERROR("cpsr contains invalid mode value - communication failure");
902 return ERROR_TARGET_FAILURE;
903 }
904 LOG_DEBUG("target entered debug state in %s mode",
905 arm_mode_name(arm->core_mode));
906
907 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
908 if (arm->spsr) {
909 xscale_receive(target, buffer, 8);
910 buf_set_u32(arm->spsr->value, 0, 32, buffer[7]);
911 arm->spsr->dirty = false;
912 arm->spsr->valid = true;
913 } else {
914 /* r8 to r14, but no spsr */
915 xscale_receive(target, buffer, 7);
916 }
917
918 /* move data from buffer to right banked register in cache */
919 for (i = 8; i <= 14; i++) {
920 struct reg *r = arm_reg_current(arm, i);
921
922 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
923 r->dirty = false;
924 r->valid = true;
925 }
926
927 /* mark xscale regs invalid to ensure they are retrieved from the
928 * debug handler if requested */
929 for (i = 0; i < xscale->reg_cache->num_regs; i++)
930 xscale->reg_cache->reg_list[i].valid = 0;
931
932 /* examine debug reason */
933 xscale_read_dcsr(target);
934 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
935
936 /* stored PC (for calculating fixup) */
937 pc = buf_get_u32(arm->pc->value, 0, 32);
938
939 switch (moe) {
940 case 0x0: /* Processor reset */
941 target->debug_reason = DBG_REASON_DBGRQ;
942 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
943 pc -= 4;
944 break;
945 case 0x1: /* Instruction breakpoint hit */
946 target->debug_reason = DBG_REASON_BREAKPOINT;
947 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
948 pc -= 4;
949 break;
950 case 0x2: /* Data breakpoint hit */
951 target->debug_reason = DBG_REASON_WATCHPOINT;
952 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
953 pc -= 4;
954 break;
955 case 0x3: /* BKPT instruction executed */
956 target->debug_reason = DBG_REASON_BREAKPOINT;
957 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
958 pc -= 4;
959 break;
960 case 0x4: /* Ext. debug event */
961 target->debug_reason = DBG_REASON_DBGRQ;
962 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
963 pc -= 4;
964 break;
965 case 0x5: /* Vector trap occured */
966 target->debug_reason = DBG_REASON_BREAKPOINT;
967 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
968 pc -= 4;
969 break;
970 case 0x6: /* Trace buffer full break */
971 target->debug_reason = DBG_REASON_DBGRQ;
972 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
973 pc -= 4;
974 break;
975 case 0x7: /* Reserved (may flag Hot-Debug support) */
976 default:
977 LOG_ERROR("Method of Entry is 'Reserved'");
978 exit(-1);
979 break;
980 }
981
982 /* apply PC fixup */
983 buf_set_u32(arm->pc->value, 0, 32, pc);
984
985 /* on the first debug entry, identify cache type */
986 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1) {
987 uint32_t cache_type_reg;
988
989 /* read cp15 cache type register */
990 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
991 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value,
992 0,
993 32);
994
995 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
996 }
997
998 /* examine MMU and Cache settings
999 * read cp15 control register */
1000 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1001 xscale->cp15_control_reg =
1002 buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1003 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1004 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
1005 (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1006 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1007 (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1008
1009 /* tracing enabled, read collected trace data */
1010 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1011 xscale_read_trace(target);
1012
1013 /* Resume if entered debug due to buffer fill and we're still collecting
1014 * trace data. Note that a debug exception due to trace buffer full
1015 * can only happen in fill mode. */
1016 if (xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL) {
1017 if (--xscale->trace.fill_counter > 0)
1018 xscale_resume(target, 1, 0x0, 1, 0);
1019 } else /* entered debug for other reason; reset counter */
1020 xscale->trace.fill_counter = 0;
1021 }
1022
1023 return ERROR_OK;
1024 }
1025
1026 static int xscale_halt(struct target *target)
1027 {
1028 struct xscale_common *xscale = target_to_xscale(target);
1029
1030 LOG_DEBUG("target->state: %s",
1031 target_state_name(target));
1032
1033 if (target->state == TARGET_HALTED) {
1034 LOG_DEBUG("target was already halted");
1035 return ERROR_OK;
1036 } else if (target->state == TARGET_UNKNOWN) {
1037 /* this must not happen for a xscale target */
1038 LOG_ERROR("target was in unknown state when halt was requested");
1039 return ERROR_TARGET_INVALID;
1040 } else if (target->state == TARGET_RESET)
1041 LOG_DEBUG("target->state == TARGET_RESET");
1042 else {
1043 /* assert external dbg break */
1044 xscale->external_debug_break = 1;
1045 xscale_read_dcsr(target);
1046
1047 target->debug_reason = DBG_REASON_DBGRQ;
1048 }
1049
1050 return ERROR_OK;
1051 }
1052
1053 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1054 {
1055 struct xscale_common *xscale = target_to_xscale(target);
1056 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1057 int retval;
1058
1059 if (xscale->ibcr0_used) {
1060 struct breakpoint *ibcr0_bp =
1061 breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1062
1063 if (ibcr0_bp)
1064 xscale_unset_breakpoint(target, ibcr0_bp);
1065 else {
1066 LOG_ERROR(
1067 "BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1068 exit(-1);
1069 }
1070 }
1071
1072 retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1);
1073 if (retval != ERROR_OK)
1074 return retval;
1075
1076 return ERROR_OK;
1077 }
1078
1079 static int xscale_disable_single_step(struct target *target)
1080 {
1081 struct xscale_common *xscale = target_to_xscale(target);
1082 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1083 int retval;
1084
1085 retval = xscale_set_reg_u32(ibcr0, 0x0);
1086 if (retval != ERROR_OK)
1087 return retval;
1088
1089 return ERROR_OK;
1090 }
1091
1092 static void xscale_enable_watchpoints(struct target *target)
1093 {
1094 struct watchpoint *watchpoint = target->watchpoints;
1095
1096 while (watchpoint) {
1097 if (watchpoint->set == 0)
1098 xscale_set_watchpoint(target, watchpoint);
1099 watchpoint = watchpoint->next;
1100 }
1101 }
1102
1103 static void xscale_enable_breakpoints(struct target *target)
1104 {
1105 struct breakpoint *breakpoint = target->breakpoints;
1106
1107 /* set any pending breakpoints */
1108 while (breakpoint) {
1109 if (breakpoint->set == 0)
1110 xscale_set_breakpoint(target, breakpoint);
1111 breakpoint = breakpoint->next;
1112 }
1113 }
1114
1115 static void xscale_free_trace_data(struct xscale_common *xscale)
1116 {
1117 struct xscale_trace_data *td = xscale->trace.data;
1118 while (td) {
1119 struct xscale_trace_data *next_td = td->next;
1120 if (td->entries)
1121 free(td->entries);
1122 free(td);
1123 td = next_td;
1124 }
1125 xscale->trace.data = NULL;
1126 }
1127
1128 static int xscale_resume(struct target *target, int current,
1129 uint32_t address, int handle_breakpoints, int debug_execution)
1130 {
1131 struct xscale_common *xscale = target_to_xscale(target);
1132 struct arm *arm = &xscale->arm;
1133 uint32_t current_pc;
1134 int retval;
1135 int i;
1136
1137 LOG_DEBUG("-");
1138
1139 if (target->state != TARGET_HALTED) {
1140 LOG_WARNING("target not halted");
1141 return ERROR_TARGET_NOT_HALTED;
1142 }
1143
1144 if (!debug_execution)
1145 target_free_all_working_areas(target);
1146
1147 /* update vector tables */
1148 retval = xscale_update_vectors(target);
1149 if (retval != ERROR_OK)
1150 return retval;
1151
1152 /* current = 1: continue on current pc, otherwise continue at <address> */
1153 if (!current)
1154 buf_set_u32(arm->pc->value, 0, 32, address);
1155
1156 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1157
1158 /* if we're at the reset vector, we have to simulate the branch */
1159 if (current_pc == 0x0) {
1160 arm_simulate_step(target, NULL);
1161 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1162 }
1163
1164 /* the front-end may request us not to handle breakpoints */
1165 if (handle_breakpoints) {
1166 struct breakpoint *breakpoint;
1167 breakpoint = breakpoint_find(target,
1168 buf_get_u32(arm->pc->value, 0, 32));
1169 if (breakpoint != NULL) {
1170 uint32_t next_pc;
1171 enum trace_mode saved_trace_mode;
1172
1173 /* there's a breakpoint at the current PC, we have to step over it */
1174 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1175 xscale_unset_breakpoint(target, breakpoint);
1176
1177 /* calculate PC of next instruction */
1178 retval = arm_simulate_step(target, &next_pc);
1179 if (retval != ERROR_OK) {
1180 uint32_t current_opcode;
1181 target_read_u32(target, current_pc, &current_opcode);
1182 LOG_ERROR(
1183 "BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "",
1184 current_opcode);
1185 }
1186
1187 LOG_DEBUG("enable single-step");
1188 xscale_enable_single_step(target, next_pc);
1189
1190 /* restore banked registers */
1191 retval = xscale_restore_banked(target);
1192 if (retval != ERROR_OK)
1193 return retval;
1194
1195 /* send resume request */
1196 xscale_send_u32(target, 0x30);
1197
1198 /* send CPSR */
1199 xscale_send_u32(target,
1200 buf_get_u32(arm->cpsr->value, 0, 32));
1201 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1202 buf_get_u32(arm->cpsr->value, 0, 32));
1203
1204 for (i = 7; i >= 0; i--) {
1205 /* send register */
1206 xscale_send_u32(target,
1207 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1208 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "",
1209 i, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1210 }
1211
1212 /* send PC */
1213 xscale_send_u32(target,
1214 buf_get_u32(arm->pc->value, 0, 32));
1215 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
1216 buf_get_u32(arm->pc->value, 0, 32));
1217
1218 /* disable trace data collection in xscale_debug_entry() */
1219 saved_trace_mode = xscale->trace.mode;
1220 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1221
1222 /* wait for and process debug entry */
1223 xscale_debug_entry(target);
1224
1225 /* re-enable trace buffer, if enabled previously */
1226 xscale->trace.mode = saved_trace_mode;
1227
1228 LOG_DEBUG("disable single-step");
1229 xscale_disable_single_step(target);
1230
1231 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1232 xscale_set_breakpoint(target, breakpoint);
1233 }
1234 }
1235
1236 /* enable any pending breakpoints and watchpoints */
1237 xscale_enable_breakpoints(target);
1238 xscale_enable_watchpoints(target);
1239
1240 /* restore banked registers */
1241 retval = xscale_restore_banked(target);
1242 if (retval != ERROR_OK)
1243 return retval;
1244
1245 /* send resume request (command 0x30 or 0x31)
1246 * clean the trace buffer if it is to be enabled (0x62) */
1247 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1248 if (xscale->trace.mode == XSCALE_TRACE_FILL) {
1249 /* If trace enabled in fill mode and starting collection of new set
1250 * of buffers, initialize buffer counter and free previous buffers */
1251 if (xscale->trace.fill_counter == 0) {
1252 xscale->trace.fill_counter = xscale->trace.buffer_fill;
1253 xscale_free_trace_data(xscale);
1254 }
1255 } else /* wrap mode; free previous buffer */
1256 xscale_free_trace_data(xscale);
1257
1258 xscale_send_u32(target, 0x62);
1259 xscale_send_u32(target, 0x31);
1260 } else
1261 xscale_send_u32(target, 0x30);
1262
1263 /* send CPSR */
1264 xscale_send_u32(target, buf_get_u32(arm->cpsr->value, 0, 32));
1265 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1266 buf_get_u32(arm->cpsr->value, 0, 32));
1267
1268 for (i = 7; i >= 0; i--) {
1269 /* send register */
1270 xscale_send_u32(target, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1271 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "",
1272 i, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1273 }
1274
1275 /* send PC */
1276 xscale_send_u32(target, buf_get_u32(arm->pc->value, 0, 32));
1277 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1278 buf_get_u32(arm->pc->value, 0, 32));
1279
1280 target->debug_reason = DBG_REASON_NOTHALTED;
1281
1282 if (!debug_execution) {
1283 /* registers are now invalid */
1284 register_cache_invalidate(arm->core_cache);
1285 target->state = TARGET_RUNNING;
1286 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1287 } else {
1288 target->state = TARGET_DEBUG_RUNNING;
1289 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1290 }
1291
1292 LOG_DEBUG("target resumed");
1293
1294 return ERROR_OK;
1295 }
1296
1297 static int xscale_step_inner(struct target *target, int current,
1298 uint32_t address, int handle_breakpoints)
1299 {
1300 struct xscale_common *xscale = target_to_xscale(target);
1301 struct arm *arm = &xscale->arm;
1302 uint32_t next_pc;
1303 int retval;
1304 int i;
1305
1306 target->debug_reason = DBG_REASON_SINGLESTEP;
1307
1308 /* calculate PC of next instruction */
1309 retval = arm_simulate_step(target, &next_pc);
1310 if (retval != ERROR_OK) {
1311 uint32_t current_opcode, current_pc;
1312 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1313
1314 target_read_u32(target, current_pc, &current_opcode);
1315 LOG_ERROR(
1316 "BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "",
1317 current_opcode);
1318 return retval;
1319 }
1320
1321 LOG_DEBUG("enable single-step");
1322 retval = xscale_enable_single_step(target, next_pc);
1323 if (retval != ERROR_OK)
1324 return retval;
1325
1326 /* restore banked registers */
1327 retval = xscale_restore_banked(target);
1328 if (retval != ERROR_OK)
1329 return retval;
1330
1331 /* send resume request (command 0x30 or 0x31)
1332 * clean the trace buffer if it is to be enabled (0x62) */
1333 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1334 retval = xscale_send_u32(target, 0x62);
1335 if (retval != ERROR_OK)
1336 return retval;
1337 retval = xscale_send_u32(target, 0x31);
1338 if (retval != ERROR_OK)
1339 return retval;
1340 } else {
1341 retval = xscale_send_u32(target, 0x30);
1342 if (retval != ERROR_OK)
1343 return retval;
1344 }
1345
1346 /* send CPSR */
1347 retval = xscale_send_u32(target,
1348 buf_get_u32(arm->cpsr->value, 0, 32));
1349 if (retval != ERROR_OK)
1350 return retval;
1351 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1352 buf_get_u32(arm->cpsr->value, 0, 32));
1353
1354 for (i = 7; i >= 0; i--) {
1355 /* send register */
1356 retval = xscale_send_u32(target,
1357 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1358 if (retval != ERROR_OK)
1359 return retval;
1360 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i,
1361 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1362 }
1363
1364 /* send PC */
1365 retval = xscale_send_u32(target,
1366 buf_get_u32(arm->pc->value, 0, 32));
1367 if (retval != ERROR_OK)
1368 return retval;
1369 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1370 buf_get_u32(arm->pc->value, 0, 32));
1371
1372 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1373
1374 /* registers are now invalid */
1375 register_cache_invalidate(arm->core_cache);
1376
1377 /* wait for and process debug entry */
1378 retval = xscale_debug_entry(target);
1379 if (retval != ERROR_OK)
1380 return retval;
1381
1382 LOG_DEBUG("disable single-step");
1383 retval = xscale_disable_single_step(target);
1384 if (retval != ERROR_OK)
1385 return retval;
1386
1387 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1388
1389 return ERROR_OK;
1390 }
1391
1392 static int xscale_step(struct target *target, int current,
1393 uint32_t address, int handle_breakpoints)
1394 {
1395 struct arm *arm = target_to_arm(target);
1396 struct breakpoint *breakpoint = NULL;
1397
1398 uint32_t current_pc;
1399 int retval;
1400
1401 if (target->state != TARGET_HALTED) {
1402 LOG_WARNING("target not halted");
1403 return ERROR_TARGET_NOT_HALTED;
1404 }
1405
1406 /* current = 1: continue on current pc, otherwise continue at <address> */
1407 if (!current)
1408 buf_set_u32(arm->pc->value, 0, 32, address);
1409
1410 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1411
1412 /* if we're at the reset vector, we have to simulate the step */
1413 if (current_pc == 0x0) {
1414 retval = arm_simulate_step(target, NULL);
1415 if (retval != ERROR_OK)
1416 return retval;
1417 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1418 LOG_DEBUG("current pc %" PRIx32, current_pc);
1419
1420 target->debug_reason = DBG_REASON_SINGLESTEP;
1421 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1422
1423 return ERROR_OK;
1424 }
1425
1426 /* the front-end may request us not to handle breakpoints */
1427 if (handle_breakpoints)
1428 breakpoint = breakpoint_find(target,
1429 buf_get_u32(arm->pc->value, 0, 32));
1430 if (breakpoint != NULL) {
1431 retval = xscale_unset_breakpoint(target, breakpoint);
1432 if (retval != ERROR_OK)
1433 return retval;
1434 }
1435
1436 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1437 if (retval != ERROR_OK)
1438 return retval;
1439
1440 if (breakpoint)
1441 xscale_set_breakpoint(target, breakpoint);
1442
1443 LOG_DEBUG("target stepped");
1444
1445 return ERROR_OK;
1446
1447 }
1448
1449 static int xscale_assert_reset(struct target *target)
1450 {
1451 struct xscale_common *xscale = target_to_xscale(target);
1452
1453 LOG_DEBUG("target->state: %s",
1454 target_state_name(target));
1455
1456 /* assert reset */
1457 jtag_add_reset(0, 1);
1458
1459 /* sleep 1ms, to be sure we fulfill any requirements */
1460 jtag_add_sleep(1000);
1461 jtag_execute_queue();
1462
1463 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1464 * end up in T-L-R, which would reset JTAG
1465 */
1466 xscale_jtag_set_instr(target->tap,
1467 XSCALE_SELDCSR << xscale->xscale_variant,
1468 TAP_IDLE);
1469
1470 /* set Hold reset, Halt mode and Trap Reset */
1471 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1472 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1473 xscale_write_dcsr(target, 1, 0);
1474
1475 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1476 xscale_jtag_set_instr(target->tap, ~0, TAP_IDLE);
1477 jtag_execute_queue();
1478
1479 target->state = TARGET_RESET;
1480
1481 if (target->reset_halt) {
1482 int retval = target_halt(target);
1483 if (retval != ERROR_OK)
1484 return retval;
1485 }
1486
1487 return ERROR_OK;
1488 }
1489
1490 static int xscale_deassert_reset(struct target *target)
1491 {
1492 struct xscale_common *xscale = target_to_xscale(target);
1493 struct breakpoint *breakpoint = target->breakpoints;
1494
1495 LOG_DEBUG("-");
1496
1497 xscale->ibcr_available = 2;
1498 xscale->ibcr0_used = 0;
1499 xscale->ibcr1_used = 0;
1500
1501 xscale->dbr_available = 2;
1502 xscale->dbr0_used = 0;
1503 xscale->dbr1_used = 0;
1504
1505 /* mark all hardware breakpoints as unset */
1506 while (breakpoint) {
1507 if (breakpoint->type == BKPT_HARD)
1508 breakpoint->set = 0;
1509 breakpoint = breakpoint->next;
1510 }
1511
1512 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1513 xscale_free_trace_data(xscale);
1514
1515 register_cache_invalidate(xscale->arm.core_cache);
1516
1517 /* FIXME mark hardware watchpoints got unset too. Also,
1518 * at least some of the XScale registers are invalid...
1519 */
1520
1521 /*
1522 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1523 * contents got invalidated. Safer to force that, so writing new
1524 * contents can't ever fail..
1525 */
1526 {
1527 uint32_t address;
1528 unsigned buf_cnt;
1529 const uint8_t *buffer = xscale_debug_handler;
1530 int retval;
1531
1532 /* release SRST */
1533 jtag_add_reset(0, 0);
1534
1535 /* wait 300ms; 150 and 100ms were not enough */
1536 jtag_add_sleep(300*1000);
1537
1538 jtag_add_runtest(2030, TAP_IDLE);
1539 jtag_execute_queue();
1540
1541 /* set Hold reset, Halt mode and Trap Reset */
1542 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1543 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1544 xscale_write_dcsr(target, 1, 0);
1545
1546 /* Load the debug handler into the mini-icache. Since
1547 * it's using halt mode (not monitor mode), it runs in
1548 * "Special Debug State" for access to registers, memory,
1549 * coprocessors, trace data, etc.
1550 */
1551 address = xscale->handler_address;
1552 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1553 binary_size > 0;
1554 binary_size -= buf_cnt, buffer += buf_cnt) {
1555 uint32_t cache_line[8];
1556 unsigned i;
1557
1558 buf_cnt = binary_size;
1559 if (buf_cnt > 32)
1560 buf_cnt = 32;
1561
1562 for (i = 0; i < buf_cnt; i += 4) {
1563 /* convert LE buffer to host-endian uint32_t */
1564 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1565 }
1566
1567 for (; i < 32; i += 4)
1568 cache_line[i / 4] = 0xe1a08008;
1569
1570 /* only load addresses other than the reset vectors */
1571 if ((address % 0x400) != 0x0) {
1572 retval = xscale_load_ic(target, address,
1573 cache_line);
1574 if (retval != ERROR_OK)
1575 return retval;
1576 }
1577
1578 address += buf_cnt;
1579 }
1580 ;
1581
1582 retval = xscale_load_ic(target, 0x0,
1583 xscale->low_vectors);
1584 if (retval != ERROR_OK)
1585 return retval;
1586 retval = xscale_load_ic(target, 0xffff0000,
1587 xscale->high_vectors);
1588 if (retval != ERROR_OK)
1589 return retval;
1590
1591 jtag_add_runtest(30, TAP_IDLE);
1592
1593 jtag_add_sleep(100000);
1594
1595 /* set Hold reset, Halt mode and Trap Reset */
1596 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1597 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1598 xscale_write_dcsr(target, 1, 0);
1599
1600 /* clear Hold reset to let the target run (should enter debug handler) */
1601 xscale_write_dcsr(target, 0, 1);
1602 target->state = TARGET_RUNNING;
1603
1604 if (!target->reset_halt) {
1605 jtag_add_sleep(10000);
1606
1607 /* we should have entered debug now */
1608 xscale_debug_entry(target);
1609 target->state = TARGET_HALTED;
1610
1611 /* resume the target */
1612 xscale_resume(target, 1, 0x0, 1, 0);
1613 }
1614 }
1615
1616 return ERROR_OK;
1617 }
1618
1619 static int xscale_read_core_reg(struct target *target, struct reg *r,
1620 int num, enum arm_mode mode)
1621 {
1622 /** \todo add debug handler support for core register reads */
1623 LOG_ERROR("not implemented");
1624 return ERROR_OK;
1625 }
1626
1627 static int xscale_write_core_reg(struct target *target, struct reg *r,
1628 int num, enum arm_mode mode, uint32_t value)
1629 {
1630 /** \todo add debug handler support for core register writes */
1631 LOG_ERROR("not implemented");
1632 return ERROR_OK;
1633 }
1634
1635 static int xscale_full_context(struct target *target)
1636 {
1637 struct arm *arm = target_to_arm(target);
1638
1639 uint32_t *buffer;
1640
1641 int i, j;
1642
1643 LOG_DEBUG("-");
1644
1645 if (target->state != TARGET_HALTED) {
1646 LOG_WARNING("target not halted");
1647 return ERROR_TARGET_NOT_HALTED;
1648 }
1649
1650 buffer = malloc(4 * 8);
1651
1652 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1653 * we can't enter User mode on an XScale (unpredictable),
1654 * but User shares registers with SYS
1655 */
1656 for (i = 1; i < 7; i++) {
1657 enum arm_mode mode = armv4_5_number_to_mode(i);
1658 bool valid = true;
1659 struct reg *r;
1660
1661 if (mode == ARM_MODE_USR)
1662 continue;
1663
1664 /* check if there are invalid registers in the current mode
1665 */
1666 for (j = 0; valid && j <= 16; j++) {
1667 if (!ARMV4_5_CORE_REG_MODE(arm->core_cache,
1668 mode, j).valid)
1669 valid = false;
1670 }
1671 if (valid)
1672 continue;
1673
1674 /* request banked registers */
1675 xscale_send_u32(target, 0x0);
1676
1677 /* send CPSR for desired bank mode */
1678 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1679
1680 /* get banked registers: r8 to r14; and SPSR
1681 * except in USR/SYS mode
1682 */
1683 if (mode != ARM_MODE_SYS) {
1684 /* SPSR */
1685 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1686 mode, 16);
1687
1688 xscale_receive(target, buffer, 8);
1689
1690 buf_set_u32(r->value, 0, 32, buffer[7]);
1691 r->dirty = false;
1692 r->valid = true;
1693 } else
1694 xscale_receive(target, buffer, 7);
1695
1696 /* move data from buffer to register cache */
1697 for (j = 8; j <= 14; j++) {
1698 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1699 mode, j);
1700
1701 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1702 r->dirty = false;
1703 r->valid = true;
1704 }
1705 }
1706
1707 free(buffer);
1708
1709 return ERROR_OK;
1710 }
1711
1712 static int xscale_restore_banked(struct target *target)
1713 {
1714 struct arm *arm = target_to_arm(target);
1715
1716 int i, j;
1717
1718 if (target->state != TARGET_HALTED) {
1719 LOG_WARNING("target not halted");
1720 return ERROR_TARGET_NOT_HALTED;
1721 }
1722
1723 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1724 * and check if any banked registers need to be written. Ignore
1725 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1726 * an XScale (unpredictable), but they share all registers.
1727 */
1728 for (i = 1; i < 7; i++) {
1729 enum arm_mode mode = armv4_5_number_to_mode(i);
1730 struct reg *r;
1731
1732 if (mode == ARM_MODE_USR)
1733 continue;
1734
1735 /* check if there are dirty registers in this mode */
1736 for (j = 8; j <= 14; j++) {
1737 if (ARMV4_5_CORE_REG_MODE(arm->core_cache,
1738 mode, j).dirty)
1739 goto dirty;
1740 }
1741
1742 /* if not USR/SYS, check if the SPSR needs to be written */
1743 if (mode != ARM_MODE_SYS) {
1744 if (ARMV4_5_CORE_REG_MODE(arm->core_cache,
1745 mode, 16).dirty)
1746 goto dirty;
1747 }
1748
1749 /* there's nothing to flush for this mode */
1750 continue;
1751
1752 dirty:
1753 /* command 0x1: "send banked registers" */
1754 xscale_send_u32(target, 0x1);
1755
1756 /* send CPSR for desired mode */
1757 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1758
1759 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1760 * but this protocol doesn't understand that nuance.
1761 */
1762 for (j = 8; j <= 14; j++) {
1763 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1764 mode, j);
1765 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1766 r->dirty = false;
1767 }
1768
1769 /* send spsr if not in USR/SYS mode */
1770 if (mode != ARM_MODE_SYS) {
1771 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1772 mode, 16);
1773 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1774 r->dirty = false;
1775 }
1776 }
1777
1778 return ERROR_OK;
1779 }
1780
1781 static int xscale_read_memory(struct target *target, uint32_t address,
1782 uint32_t size, uint32_t count, uint8_t *buffer)
1783 {
1784 struct xscale_common *xscale = target_to_xscale(target);
1785 uint32_t *buf32;
1786 uint32_t i;
1787 int retval;
1788
1789 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32,
1790 address,
1791 size,
1792 count);
1793
1794 if (target->state != TARGET_HALTED) {
1795 LOG_WARNING("target not halted");
1796 return ERROR_TARGET_NOT_HALTED;
1797 }
1798
1799 /* sanitize arguments */
1800 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1801 return ERROR_COMMAND_SYNTAX_ERROR;
1802
1803 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1804 return ERROR_TARGET_UNALIGNED_ACCESS;
1805
1806 /* send memory read request (command 0x1n, n: access size) */
1807 retval = xscale_send_u32(target, 0x10 | size);
1808 if (retval != ERROR_OK)
1809 return retval;
1810
1811 /* send base address for read request */
1812 retval = xscale_send_u32(target, address);
1813 if (retval != ERROR_OK)
1814 return retval;
1815
1816 /* send number of requested data words */
1817 retval = xscale_send_u32(target, count);
1818 if (retval != ERROR_OK)
1819 return retval;
1820
1821 /* receive data from target (count times 32-bit words in host endianness) */
1822 buf32 = malloc(4 * count);
1823 retval = xscale_receive(target, buf32, count);
1824 if (retval != ERROR_OK)
1825 return retval;
1826
1827 /* extract data from host-endian buffer into byte stream */
1828 for (i = 0; i < count; i++) {
1829 switch (size) {
1830 case 4:
1831 target_buffer_set_u32(target, buffer, buf32[i]);
1832 buffer += 4;
1833 break;
1834 case 2:
1835 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1836 buffer += 2;
1837 break;
1838 case 1:
1839 *buffer++ = buf32[i] & 0xff;
1840 break;
1841 default:
1842 LOG_ERROR("invalid read size");
1843 return ERROR_COMMAND_SYNTAX_ERROR;
1844 }
1845 }
1846
1847 free(buf32);
1848
1849 /* examine DCSR, to see if Sticky Abort (SA) got set */
1850 retval = xscale_read_dcsr(target);
1851 if (retval != ERROR_OK)
1852 return retval;
1853 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1) {
1854 /* clear SA bit */
1855 retval = xscale_send_u32(target, 0x60);
1856 if (retval != ERROR_OK)
1857 return retval;
1858
1859 return ERROR_TARGET_DATA_ABORT;
1860 }
1861
1862 return ERROR_OK;
1863 }
1864
1865 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1866 uint32_t size, uint32_t count, uint8_t *buffer)
1867 {
1868 struct xscale_common *xscale = target_to_xscale(target);
1869
1870 /* with MMU inactive, there are only physical addresses */
1871 if (!xscale->armv4_5_mmu.mmu_enabled)
1872 return xscale_read_memory(target, address, size, count, buffer);
1873
1874 /** \todo: provide a non-stub implementation of this routine. */
1875 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1876 target_name(target), __func__);
1877 return ERROR_FAIL;
1878 }
1879
1880 static int xscale_write_memory(struct target *target, uint32_t address,
1881 uint32_t size, uint32_t count, const uint8_t *buffer)
1882 {
1883 struct xscale_common *xscale = target_to_xscale(target);
1884 int retval;
1885
1886 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32,
1887 address,
1888 size,
1889 count);
1890
1891 if (target->state != TARGET_HALTED) {
1892 LOG_WARNING("target not halted");
1893 return ERROR_TARGET_NOT_HALTED;
1894 }
1895
1896 /* sanitize arguments */
1897 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1898 return ERROR_COMMAND_SYNTAX_ERROR;
1899
1900 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1901 return ERROR_TARGET_UNALIGNED_ACCESS;
1902
1903 /* send memory write request (command 0x2n, n: access size) */
1904 retval = xscale_send_u32(target, 0x20 | size);
1905 if (retval != ERROR_OK)
1906 return retval;
1907
1908 /* send base address for read request */
1909 retval = xscale_send_u32(target, address);
1910 if (retval != ERROR_OK)
1911 return retval;
1912
1913 /* send number of requested data words to be written*/
1914 retval = xscale_send_u32(target, count);
1915 if (retval != ERROR_OK)
1916 return retval;
1917
1918 /* extract data from host-endian buffer into byte stream */
1919 #if 0
1920 for (i = 0; i < count; i++) {
1921 switch (size) {
1922 case 4:
1923 value = target_buffer_get_u32(target, buffer);
1924 xscale_send_u32(target, value);
1925 buffer += 4;
1926 break;
1927 case 2:
1928 value = target_buffer_get_u16(target, buffer);
1929 xscale_send_u32(target, value);
1930 buffer += 2;
1931 break;
1932 case 1:
1933 value = *buffer;
1934 xscale_send_u32(target, value);
1935 buffer += 1;
1936 break;
1937 default:
1938 LOG_ERROR("should never get here");
1939 exit(-1);
1940 }
1941 }
1942 #endif
1943 retval = xscale_send(target, buffer, count, size);
1944 if (retval != ERROR_OK)
1945 return retval;
1946
1947 /* examine DCSR, to see if Sticky Abort (SA) got set */
1948 retval = xscale_read_dcsr(target);
1949 if (retval != ERROR_OK)
1950 return retval;
1951 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1) {
1952 /* clear SA bit */
1953 retval = xscale_send_u32(target, 0x60);
1954 if (retval != ERROR_OK)
1955 return retval;
1956
1957 LOG_ERROR("data abort writing memory");
1958 return ERROR_TARGET_DATA_ABORT;
1959 }
1960
1961 return ERROR_OK;
1962 }
1963
1964 static int xscale_write_phys_memory(struct target *target, uint32_t address,
1965 uint32_t size, uint32_t count, const uint8_t *buffer)
1966 {
1967 struct xscale_common *xscale = target_to_xscale(target);
1968
1969 /* with MMU inactive, there are only physical addresses */
1970 if (!xscale->armv4_5_mmu.mmu_enabled)
1971 return xscale_write_memory(target, address, size, count, buffer);
1972
1973 /** \todo: provide a non-stub implementation of this routine. */
1974 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1975 target_name(target), __func__);
1976 return ERROR_FAIL;
1977 }
1978
1979 static int xscale_get_ttb(struct target *target, uint32_t *result)
1980 {
1981 struct xscale_common *xscale = target_to_xscale(target);
1982 uint32_t ttb;
1983 int retval;
1984
1985 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
1986 if (retval != ERROR_OK)
1987 return retval;
1988 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
1989
1990 *result = ttb;
1991
1992 return ERROR_OK;
1993 }
1994
1995 static int xscale_disable_mmu_caches(struct target *target, int mmu,
1996 int d_u_cache, int i_cache)
1997 {
1998 struct xscale_common *xscale = target_to_xscale(target);
1999 uint32_t cp15_control;
2000 int retval;
2001
2002 /* read cp15 control register */
2003 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2004 if (retval != ERROR_OK)
2005 return retval;
2006 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2007
2008 if (mmu)
2009 cp15_control &= ~0x1U;
2010
2011 if (d_u_cache) {
2012 /* clean DCache */
2013 retval = xscale_send_u32(target, 0x50);
2014 if (retval != ERROR_OK)
2015 return retval;
2016 retval = xscale_send_u32(target, xscale->cache_clean_address);
2017 if (retval != ERROR_OK)
2018 return retval;
2019
2020 /* invalidate DCache */
2021 retval = xscale_send_u32(target, 0x51);
2022 if (retval != ERROR_OK)
2023 return retval;
2024
2025 cp15_control &= ~0x4U;
2026 }
2027
2028 if (i_cache) {
2029 /* invalidate ICache */
2030 retval = xscale_send_u32(target, 0x52);
2031 if (retval != ERROR_OK)
2032 return retval;
2033 cp15_control &= ~0x1000U;
2034 }
2035
2036 /* write new cp15 control register */
2037 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2038 if (retval != ERROR_OK)
2039 return retval;
2040
2041 /* execute cpwait to ensure outstanding operations complete */
2042 retval = xscale_send_u32(target, 0x53);
2043 return retval;
2044 }
2045
2046 static int xscale_enable_mmu_caches(struct target *target, int mmu,
2047 int d_u_cache, int i_cache)
2048 {
2049 struct xscale_common *xscale = target_to_xscale(target);
2050 uint32_t cp15_control;
2051 int retval;
2052
2053 /* read cp15 control register */
2054 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2055 if (retval != ERROR_OK)
2056 return retval;
2057 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2058
2059 if (mmu)
2060 cp15_control |= 0x1U;
2061
2062 if (d_u_cache)
2063 cp15_control |= 0x4U;
2064
2065 if (i_cache)
2066 cp15_control |= 0x1000U;
2067
2068 /* write new cp15 control register */
2069 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2070 if (retval != ERROR_OK)
2071 return retval;
2072
2073 /* execute cpwait to ensure outstanding operations complete */
2074 retval = xscale_send_u32(target, 0x53);
2075 return retval;
2076 }
2077
2078 static int xscale_set_breakpoint(struct target *target,
2079 struct breakpoint *breakpoint)
2080 {
2081 int retval;
2082 struct xscale_common *xscale = target_to_xscale(target);
2083
2084 if (target->state != TARGET_HALTED) {
2085 LOG_WARNING("target not halted");
2086 return ERROR_TARGET_NOT_HALTED;
2087 }
2088
2089 if (breakpoint->set) {
2090 LOG_WARNING("breakpoint already set");
2091 return ERROR_OK;
2092 }
2093
2094 if (breakpoint->type == BKPT_HARD) {
2095 uint32_t value = breakpoint->address | 1;
2096 if (!xscale->ibcr0_used) {
2097 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2098 xscale->ibcr0_used = 1;
2099 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2100 } else if (!xscale->ibcr1_used) {
2101 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2102 xscale->ibcr1_used = 1;
2103 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2104 } else {/* bug: availability previously verified in xscale_add_breakpoint() */
2105 LOG_ERROR("BUG: no hardware comparator available");
2106 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2107 }
2108 } else if (breakpoint->type == BKPT_SOFT) {
2109 if (breakpoint->length == 4) {
2110 /* keep the original instruction in target endianness */
2111 retval = target_read_memory(target, breakpoint->address, 4, 1,
2112 breakpoint->orig_instr);
2113 if (retval != ERROR_OK)
2114 return retval;
2115 /* write the bkpt instruction in target endianness
2116 *(arm7_9->arm_bkpt is host endian) */
2117 retval = target_write_u32(target, breakpoint->address,
2118 xscale->arm_bkpt);
2119 if (retval != ERROR_OK)
2120 return retval;
2121 } else {
2122 /* keep the original instruction in target endianness */
2123 retval = target_read_memory(target, breakpoint->address, 2, 1,
2124 breakpoint->orig_instr);
2125 if (retval != ERROR_OK)
2126 return retval;
2127 /* write the bkpt instruction in target endianness
2128 *(arm7_9->arm_bkpt is host endian) */
2129 retval = target_write_u16(target, breakpoint->address,
2130 xscale->thumb_bkpt);
2131 if (retval != ERROR_OK)
2132 return retval;
2133 }
2134 breakpoint->set = 1;
2135
2136 xscale_send_u32(target, 0x50); /* clean dcache */
2137 xscale_send_u32(target, xscale->cache_clean_address);
2138 xscale_send_u32(target, 0x51); /* invalidate dcache */
2139 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2140 }
2141
2142 return ERROR_OK;
2143 }
2144
2145 static int xscale_add_breakpoint(struct target *target,
2146 struct breakpoint *breakpoint)
2147 {
2148 struct xscale_common *xscale = target_to_xscale(target);
2149
2150 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1)) {
2151 LOG_ERROR("no breakpoint unit available for hardware breakpoint");
2152 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2153 }
2154
2155 if ((breakpoint->length != 2) && (breakpoint->length != 4)) {
2156 LOG_ERROR("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2157 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2158 }
2159
2160 if (breakpoint->type == BKPT_HARD)
2161 xscale->ibcr_available--;
2162
2163 return xscale_set_breakpoint(target, breakpoint);
2164 }
2165
2166 static int xscale_unset_breakpoint(struct target *target,
2167 struct breakpoint *breakpoint)
2168 {
2169 int retval;
2170 struct xscale_common *xscale = target_to_xscale(target);
2171
2172 if (target->state != TARGET_HALTED) {
2173 LOG_WARNING("target not halted");
2174 return ERROR_TARGET_NOT_HALTED;
2175 }
2176
2177 if (!breakpoint->set) {
2178 LOG_WARNING("breakpoint not set");
2179 return ERROR_OK;
2180 }
2181
2182 if (breakpoint->type == BKPT_HARD) {
2183 if (breakpoint->set == 1) {
2184 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2185 xscale->ibcr0_used = 0;
2186 } else if (breakpoint->set == 2) {
2187 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2188 xscale->ibcr1_used = 0;
2189 }
2190 breakpoint->set = 0;
2191 } else {
2192 /* restore original instruction (kept in target endianness) */
2193 if (breakpoint->length == 4) {
2194 retval = target_write_memory(target, breakpoint->address, 4, 1,
2195 breakpoint->orig_instr);
2196 if (retval != ERROR_OK)
2197 return retval;
2198 } else {
2199 retval = target_write_memory(target, breakpoint->address, 2, 1,
2200 breakpoint->orig_instr);
2201 if (retval != ERROR_OK)
2202 return retval;
2203 }
2204 breakpoint->set = 0;
2205
2206 xscale_send_u32(target, 0x50); /* clean dcache */
2207 xscale_send_u32(target, xscale->cache_clean_address);
2208 xscale_send_u32(target, 0x51); /* invalidate dcache */
2209 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2210 }
2211
2212 return ERROR_OK;
2213 }
2214
2215 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2216 {
2217 struct xscale_common *xscale = target_to_xscale(target);
2218
2219 if (target->state != TARGET_HALTED) {
2220 LOG_ERROR("target not halted");
2221 return ERROR_TARGET_NOT_HALTED;
2222 }
2223
2224 if (breakpoint->set)
2225 xscale_unset_breakpoint(target, breakpoint);
2226
2227 if (breakpoint->type == BKPT_HARD)
2228 xscale->ibcr_available++;
2229
2230 return ERROR_OK;
2231 }
2232
2233 static int xscale_set_watchpoint(struct target *target,
2234 struct watchpoint *watchpoint)
2235 {
2236 struct xscale_common *xscale = target_to_xscale(target);
2237 uint32_t enable = 0;
2238 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2239 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2240
2241 if (target->state != TARGET_HALTED) {
2242 LOG_ERROR("target not halted");
2243 return ERROR_TARGET_NOT_HALTED;
2244 }
2245
2246 switch (watchpoint->rw) {
2247 case WPT_READ:
2248 enable = 0x3;
2249 break;
2250 case WPT_ACCESS:
2251 enable = 0x2;
2252 break;
2253 case WPT_WRITE:
2254 enable = 0x1;
2255 break;
2256 default:
2257 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2258 }
2259
2260 /* For watchpoint across more than one word, both DBR registers must
2261 be enlisted, with the second used as a mask. */
2262 if (watchpoint->length > 4) {
2263 if (xscale->dbr0_used || xscale->dbr1_used) {
2264 LOG_ERROR("BUG: sufficient hardware comparators unavailable");
2265 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2266 }
2267
2268 /* Write mask value to DBR1, based on the length argument.
2269 * Address bits ignored by the comparator are those set in mask. */
2270 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1],
2271 watchpoint->length - 1);
2272 xscale->dbr1_used = 1;
2273 enable |= 0x100; /* DBCON[M] */
2274 }
2275
2276 if (!xscale->dbr0_used) {
2277 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2278 dbcon_value |= enable;
2279 xscale_set_reg_u32(dbcon, dbcon_value);
2280 watchpoint->set = 1;
2281 xscale->dbr0_used = 1;
2282 } else if (!xscale->dbr1_used) {
2283 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2284 dbcon_value |= enable << 2;
2285 xscale_set_reg_u32(dbcon, dbcon_value);
2286 watchpoint->set = 2;
2287 xscale->dbr1_used = 1;
2288 } else {
2289 LOG_ERROR("BUG: no hardware comparator available");
2290 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2291 }
2292
2293 return ERROR_OK;
2294 }
2295
2296 static int xscale_add_watchpoint(struct target *target,
2297 struct watchpoint *watchpoint)
2298 {
2299 struct xscale_common *xscale = target_to_xscale(target);
2300
2301 if (xscale->dbr_available < 1) {
2302 LOG_ERROR("no more watchpoint registers available");
2303 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2304 }
2305
2306 if (watchpoint->value)
2307 LOG_WARNING("xscale does not support value, mask arguments; ignoring");
2308
2309 /* check that length is a power of two */
2310 for (uint32_t len = watchpoint->length; len != 1; len /= 2) {
2311 if (len % 2) {
2312 LOG_ERROR("xscale requires that watchpoint length is a power of two");
2313 return ERROR_COMMAND_ARGUMENT_INVALID;
2314 }
2315 }
2316
2317 if (watchpoint->length == 4) { /* single word watchpoint */
2318 xscale->dbr_available--;/* one DBR reg used */
2319 return ERROR_OK;
2320 }
2321
2322 /* watchpoints across multiple words require both DBR registers */
2323 if (xscale->dbr_available < 2) {
2324 LOG_ERROR("insufficient watchpoint registers available");
2325 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2326 }
2327
2328 if (watchpoint->length > watchpoint->address) {
2329 LOG_ERROR("xscale does not support watchpoints with length "
2330 "greater than address");
2331 return ERROR_COMMAND_ARGUMENT_INVALID;
2332 }
2333
2334 xscale->dbr_available = 0;
2335 return ERROR_OK;
2336 }
2337
2338 static int xscale_unset_watchpoint(struct target *target,
2339 struct watchpoint *watchpoint)
2340 {
2341 struct xscale_common *xscale = target_to_xscale(target);
2342 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2343 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2344
2345 if (target->state != TARGET_HALTED) {
2346 LOG_WARNING("target not halted");
2347 return ERROR_TARGET_NOT_HALTED;
2348 }
2349
2350 if (!watchpoint->set) {
2351 LOG_WARNING("breakpoint not set");
2352 return ERROR_OK;
2353 }
2354
2355 if (watchpoint->set == 1) {
2356 if (watchpoint->length > 4) {
2357 dbcon_value &= ~0x103; /* clear DBCON[M] as well */
2358 xscale->dbr1_used = 0; /* DBR1 was used for mask */
2359 } else
2360 dbcon_value &= ~0x3;
2361
2362 xscale_set_reg_u32(dbcon, dbcon_value);
2363 xscale->dbr0_used = 0;
2364 } else if (watchpoint->set == 2) {
2365 dbcon_value &= ~0xc;
2366 xscale_set_reg_u32(dbcon, dbcon_value);
2367 xscale->dbr1_used = 0;
2368 }
2369 watchpoint->set = 0;
2370
2371 return ERROR_OK;
2372 }
2373
2374 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2375 {
2376 struct xscale_common *xscale = target_to_xscale(target);
2377
2378 if (target->state != TARGET_HALTED) {
2379 LOG_ERROR("target not halted");
2380 return ERROR_TARGET_NOT_HALTED;
2381 }
2382
2383 if (watchpoint->set)
2384 xscale_unset_watchpoint(target, watchpoint);
2385
2386 if (watchpoint->length > 4)
2387 xscale->dbr_available++;/* both DBR regs now available */
2388
2389 xscale->dbr_available++;
2390
2391 return ERROR_OK;
2392 }
2393
2394 static int xscale_get_reg(struct reg *reg)
2395 {
2396 struct xscale_reg *arch_info = reg->arch_info;
2397 struct target *target = arch_info->target;
2398 struct xscale_common *xscale = target_to_xscale(target);
2399
2400 /* DCSR, TX and RX are accessible via JTAG */
2401 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2402 return xscale_read_dcsr(arch_info->target);
2403 else if (strcmp(reg->name, "XSCALE_TX") == 0) {
2404 /* 1 = consume register content */
2405 return xscale_read_tx(arch_info->target, 1);
2406 } else if (strcmp(reg->name, "XSCALE_RX") == 0) {
2407 /* can't read from RX register (host -> debug handler) */
2408 return ERROR_OK;
2409 } else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0) {
2410 /* can't (explicitly) read from TXRXCTRL register */
2411 return ERROR_OK;
2412 } else {/* Other DBG registers have to be transfered by the debug handler
2413 * send CP read request (command 0x40) */
2414 xscale_send_u32(target, 0x40);
2415
2416 /* send CP register number */
2417 xscale_send_u32(target, arch_info->dbg_handler_number);
2418
2419 /* read register value */
2420 xscale_read_tx(target, 1);
2421 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2422
2423 reg->dirty = 0;
2424 reg->valid = 1;
2425 }
2426
2427 return ERROR_OK;
2428 }
2429
2430 static int xscale_set_reg(struct reg *reg, uint8_t *buf)
2431 {
2432 struct xscale_reg *arch_info = reg->arch_info;
2433 struct target *target = arch_info->target;
2434 struct xscale_common *xscale = target_to_xscale(target);
2435 uint32_t value = buf_get_u32(buf, 0, 32);
2436
2437 /* DCSR, TX and RX are accessible via JTAG */
2438 if (strcmp(reg->name, "XSCALE_DCSR") == 0) {
2439 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2440 return xscale_write_dcsr(arch_info->target, -1, -1);
2441 } else if (strcmp(reg->name, "XSCALE_RX") == 0) {
2442 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2443 return xscale_write_rx(arch_info->target);
2444 } else if (strcmp(reg->name, "XSCALE_TX") == 0) {
2445 /* can't write to TX register (debug-handler -> host) */
2446 return ERROR_OK;
2447 } else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0) {
2448 /* can't (explicitly) write to TXRXCTRL register */
2449 return ERROR_OK;
2450 } else {/* Other DBG registers have to be transfered by the debug handler
2451 * send CP write request (command 0x41) */
2452 xscale_send_u32(target, 0x41);
2453
2454 /* send CP register number */
2455 xscale_send_u32(target, arch_info->dbg_handler_number);
2456
2457 /* send CP register value */
2458 xscale_send_u32(target, value);
2459 buf_set_u32(reg->value, 0, 32, value);
2460 }
2461
2462 return ERROR_OK;
2463 }
2464
2465 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2466 {
2467 struct xscale_common *xscale = target_to_xscale(target);
2468 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2469 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2470
2471 /* send CP write request (command 0x41) */
2472 xscale_send_u32(target, 0x41);
2473
2474 /* send CP register number */
2475 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2476
2477 /* send CP register value */
2478 xscale_send_u32(target, value);
2479 buf_set_u32(dcsr->value, 0, 32, value);
2480
2481 return ERROR_OK;
2482 }
2483
2484 static int xscale_read_trace(struct target *target)
2485 {
2486 struct xscale_common *xscale = target_to_xscale(target);
2487 struct arm *arm = &xscale->arm;
2488 struct xscale_trace_data **trace_data_p;
2489
2490 /* 258 words from debug handler
2491 * 256 trace buffer entries
2492 * 2 checkpoint addresses
2493 */
2494 uint32_t trace_buffer[258];
2495 int is_address[256];
2496 int i, j;
2497 unsigned int num_checkpoints = 0;
2498
2499 if (target->state != TARGET_HALTED) {
2500 LOG_WARNING("target must be stopped to read trace data");
2501 return ERROR_TARGET_NOT_HALTED;
2502 }
2503
2504 /* send read trace buffer command (command 0x61) */
2505 xscale_send_u32(target, 0x61);
2506
2507 /* receive trace buffer content */
2508 xscale_receive(target, trace_buffer, 258);
2509
2510 /* parse buffer backwards to identify address entries */
2511 for (i = 255; i >= 0; i--) {
2512 /* also count number of checkpointed entries */
2513 if ((trace_buffer[i] & 0xe0) == 0xc0)
2514 num_checkpoints++;
2515
2516 is_address[i] = 0;
2517 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2518 ((trace_buffer[i] & 0xf0) == 0xd0)) {
2519 if (i > 0)
2520 is_address[--i] = 1;
2521 if (i > 0)
2522 is_address[--i] = 1;
2523 if (i > 0)
2524 is_address[--i] = 1;
2525 if (i > 0)
2526 is_address[--i] = 1;
2527 }
2528 }
2529
2530
2531 /* search first non-zero entry that is not part of an address */
2532 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2533 ;
2534
2535 if (j == 256) {
2536 LOG_DEBUG("no trace data collected");
2537 return ERROR_XSCALE_NO_TRACE_DATA;
2538 }
2539
2540 /* account for possible partial address at buffer start (wrap mode only) */
2541 if (is_address[0]) { /* first entry is address; complete set of 4? */
2542 i = 1;
2543 while (i < 4)
2544 if (!is_address[i++])
2545 break;
2546 if (i < 4)
2547 j += i; /* partial address; can't use it */
2548 }
2549
2550 /* if first valid entry is indirect branch, can't use that either (no address) */
2551 if (((trace_buffer[j] & 0xf0) == 0x90) || ((trace_buffer[j] & 0xf0) == 0xd0))
2552 j++;
2553
2554 /* walk linked list to terminating entry */
2555 for (trace_data_p = &xscale->trace.data; *trace_data_p;
2556 trace_data_p = &(*trace_data_p)->next)
2557 ;
2558
2559 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2560 (*trace_data_p)->next = NULL;
2561 (*trace_data_p)->chkpt0 = trace_buffer[256];
2562 (*trace_data_p)->chkpt1 = trace_buffer[257];
2563 (*trace_data_p)->last_instruction = buf_get_u32(arm->pc->value, 0, 32);
2564 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2565 (*trace_data_p)->depth = 256 - j;
2566 (*trace_data_p)->num_checkpoints = num_checkpoints;
2567
2568 for (i = j; i < 256; i++) {
2569 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2570 if (is_address[i])
2571 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2572 else
2573 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2574 }
2575
2576 return ERROR_OK;
2577 }
2578
2579 static int xscale_read_instruction(struct target *target, uint32_t pc,
2580 struct arm_instruction *instruction)
2581 {
2582 struct xscale_common *const xscale = target_to_xscale(target);
2583 int i;
2584 int section = -1;
2585 size_t size_read;
2586 uint32_t opcode;
2587 int retval;
2588
2589 if (!xscale->trace.image)
2590 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2591
2592 /* search for the section the current instruction belongs to */
2593 for (i = 0; i < xscale->trace.image->num_sections; i++) {
2594 if ((xscale->trace.image->sections[i].base_address <= pc) &&
2595 (xscale->trace.image->sections[i].base_address +
2596 xscale->trace.image->sections[i].size > pc)) {
2597 section = i;
2598 break;
2599 }
2600 }
2601
2602 if (section == -1) {
2603 /* current instruction couldn't be found in the image */
2604 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2605 }
2606
2607 if (xscale->trace.core_state == ARM_STATE_ARM) {
2608 uint8_t buf[4];
2609 retval = image_read_section(xscale->trace.image, section,
2610 pc - xscale->trace.image->sections[section].base_address,
2611 4, buf, &size_read);
2612 if (retval != ERROR_OK) {
2613 LOG_ERROR("error while reading instruction");
2614 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2615 }
2616 opcode = target_buffer_get_u32(target, buf);
2617 arm_evaluate_opcode(opcode, pc, instruction);
2618 } else if (xscale->trace.core_state == ARM_STATE_THUMB) {
2619 uint8_t buf[2];
2620 retval = image_read_section(xscale->trace.image, section,
2621 pc - xscale->trace.image->sections[section].base_address,
2622 2, buf, &size_read);
2623 if (retval != ERROR_OK) {
2624 LOG_ERROR("error while reading instruction");
2625 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2626 }
2627 opcode = target_buffer_get_u16(target, buf);
2628 thumb_evaluate_opcode(opcode, pc, instruction);
2629 } else {
2630 LOG_ERROR("BUG: unknown core state encountered");
2631 exit(-1);
2632 }
2633
2634 return ERROR_OK;
2635 }
2636
2637 /* Extract address encoded into trace data.
2638 * Write result to address referenced by argument 'target', or 0 if incomplete. */
2639 static inline void xscale_branch_address(struct xscale_trace_data *trace_data,
2640 int i, uint32_t *target)
2641 {
2642 /* if there are less than four entries prior to the indirect branch message
2643 * we can't extract the address */
2644 if (i < 4)
2645 *target = 0;
2646 else {
2647 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2648 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2649 }
2650 }
2651
2652 static inline void xscale_display_instruction(struct target *target, uint32_t pc,
2653 struct arm_instruction *instruction,
2654 struct command_context *cmd_ctx)
2655 {
2656 int retval = xscale_read_instruction(target, pc, instruction);
2657 if (retval == ERROR_OK)
2658 command_print(cmd_ctx, "%s", instruction->text);
2659 else
2660 command_print(cmd_ctx, "0x%8.8" PRIx32 "\t<not found in image>", pc);
2661 }
2662
2663 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2664 {
2665 struct xscale_common *xscale = target_to_xscale(target);
2666 struct xscale_trace_data *trace_data = xscale->trace.data;
2667 int i, retval;
2668 uint32_t breakpoint_pc;
2669 struct arm_instruction instruction;
2670 uint32_t current_pc = 0;/* initialized when address determined */
2671
2672 if (!xscale->trace.image)
2673 LOG_WARNING("No trace image loaded; use 'xscale trace_image'");
2674
2675 /* loop for each trace buffer that was loaded from target */
2676 while (trace_data) {
2677 int chkpt = 0; /* incremented as checkpointed entries found */
2678 int j;
2679
2680 /* FIXME: set this to correct mode when trace buffer is first enabled */
2681 xscale->trace.core_state = ARM_STATE_ARM;
2682
2683 /* loop for each entry in this trace buffer */
2684 for (i = 0; i < trace_data->depth; i++) {
2685 int exception = 0;
2686 uint32_t chkpt_reg = 0x0;
2687 uint32_t branch_target = 0;
2688 int count;
2689
2690 /* trace entry type is upper nybble of 'message byte' */
2691 int trace_msg_type = (trace_data->entries[i].data & 0xf0) >> 4;
2692
2693 /* Target addresses of indirect branches are written into buffer
2694 * before the message byte representing the branch. Skip past it */
2695 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2696 continue;
2697
2698 switch (trace_msg_type) {
2699 case 0: /* Exceptions */
2700 case 1:
2701 case 2:
2702 case 3:
2703 case 4:
2704 case 5:
2705 case 6:
2706 case 7:
2707 exception = (trace_data->entries[i].data & 0x70) >> 4;
2708
2709 /* FIXME: vector table may be at ffff0000 */
2710 branch_target = (trace_data->entries[i].data & 0xf0) >> 2;
2711 break;
2712
2713 case 8: /* Direct Branch */
2714 break;
2715
2716 case 9: /* Indirect Branch */
2717 xscale_branch_address(trace_data, i, &branch_target);
2718 break;
2719
2720 case 13: /* Checkpointed Indirect Branch */
2721 xscale_branch_address(trace_data, i, &branch_target);
2722 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2723 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is
2724 *oldest */
2725 else
2726 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and
2727 *newest */
2728
2729 chkpt++;
2730 break;
2731
2732 case 12: /* Checkpointed Direct Branch */
2733 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2734 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is
2735 *oldest */
2736 else
2737 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and
2738 *newest */
2739
2740 /* if no current_pc, checkpoint will be starting point */
2741 if (current_pc == 0)
2742 branch_target = chkpt_reg;
2743
2744 chkpt++;
2745 break;
2746
2747 case 15:/* Roll-over */
2748 break;
2749
2750 default:/* Reserved */
2751 LOG_WARNING("trace is suspect: invalid trace message byte");
2752 continue;
2753
2754 }
2755
2756 /* If we don't have the current_pc yet, but we did get the branch target
2757 * (either from the trace buffer on indirect branch, or from a checkpoint reg),
2758 * then we can start displaying instructions at the next iteration, with
2759 * branch_target as the starting point.
2760 */
2761 if (current_pc == 0) {
2762 current_pc = branch_target; /* remains 0 unless branch_target *obtained */
2763 continue;
2764 }
2765
2766 /* We have current_pc. Read and display the instructions from the image.
2767 * First, display count instructions (lower nybble of message byte). */
2768 count = trace_data->entries[i].data & 0x0f;
2769 for (j = 0; j < count; j++) {
2770 xscale_display_instruction(target, current_pc, &instruction,
2771 cmd_ctx);
2772 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2773 }
2774
2775 /* An additional instruction is implicitly added to count for
2776 * rollover and some exceptions: undef, swi, prefetch abort. */
2777 if ((trace_msg_type == 15) || (exception > 0 && exception < 4)) {
2778 xscale_display_instruction(target, current_pc, &instruction,
2779 cmd_ctx);
2780 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2781 }
2782
2783 if (trace_msg_type == 15) /* rollover */
2784 continue;
2785
2786 if (exception) {
2787 command_print(cmd_ctx, "--- exception %i ---", exception);
2788 continue;
2789 }
2790
2791 /* not exception or rollover; next instruction is a branch and is
2792 * not included in the count */
2793 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2794
2795 /* for direct branches, extract branch destination from instruction */
2796 if ((trace_msg_type == 8) || (trace_msg_type == 12)) {
2797 retval = xscale_read_instruction(target, current_pc, &instruction);
2798 if (retval == ERROR_OK)
2799 current_pc = instruction.info.b_bl_bx_blx.target_address;
2800 else
2801 current_pc = 0; /* branch destination unknown */
2802
2803 /* direct branch w/ checkpoint; can also get from checkpoint reg */
2804 if (trace_msg_type == 12) {
2805 if (current_pc == 0)
2806 current_pc = chkpt_reg;
2807 else if (current_pc != chkpt_reg) /* sanity check */
2808 LOG_WARNING("trace is suspect: checkpoint register "
2809 "inconsistent with adddress from image");
2810 }
2811
2812 if (current_pc == 0)
2813 command_print(cmd_ctx, "address unknown");
2814
2815 continue;
2816 }
2817
2818 /* indirect branch; the branch destination was read from trace buffer */
2819 if ((trace_msg_type == 9) || (trace_msg_type == 13)) {
2820 current_pc = branch_target;
2821
2822 /* sanity check (checkpoint reg is redundant) */
2823 if ((trace_msg_type == 13) && (chkpt_reg != branch_target))
2824 LOG_WARNING("trace is suspect: checkpoint register "
2825 "inconsistent with address from trace buffer");
2826 }
2827
2828 } /* END: for (i = 0; i < trace_data->depth; i++) */
2829
2830 breakpoint_pc = trace_data->last_instruction; /* used below */
2831 trace_data = trace_data->next;
2832
2833 } /* END: while (trace_data) */
2834
2835 /* Finally... display all instructions up to the value of the pc when the
2836 * debug break occurred (saved when trace data was collected from target).
2837 * This is necessary because the trace only records execution branches and 16
2838 * consecutive instructions (rollovers), so last few typically missed.
2839 */
2840 if (current_pc == 0)
2841 return ERROR_OK;/* current_pc was never found */
2842
2843 /* how many instructions remaining? */
2844 int gap_count = (breakpoint_pc - current_pc) /
2845 (xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2);
2846
2847 /* should never be negative or over 16, but verify */
2848 if (gap_count < 0 || gap_count > 16) {
2849 LOG_WARNING("trace is suspect: excessive gap at end of trace");
2850 return ERROR_OK;/* bail; large number or negative value no good */
2851 }
2852
2853 /* display remaining instructions */
2854 for (i = 0; i < gap_count; i++) {
2855 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2856 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2857 }
2858
2859 return ERROR_OK;
2860 }
2861
2862 static const struct reg_arch_type xscale_reg_type = {
2863 .get = xscale_get_reg,
2864 .set = xscale_set_reg,
2865 };
2866
2867 static void xscale_build_reg_cache(struct target *target)
2868 {
2869 struct xscale_common *xscale = target_to_xscale(target);
2870 struct arm *arm = &xscale->arm;
2871 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2872 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2873 int i;
2874 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2875
2876 (*cache_p) = arm_build_reg_cache(target, arm);
2877
2878 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2879 cache_p = &(*cache_p)->next;
2880
2881 /* fill in values for the xscale reg cache */
2882 (*cache_p)->name = "XScale registers";
2883 (*cache_p)->next = NULL;
2884 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2885 (*cache_p)->num_regs = num_regs;
2886
2887 for (i = 0; i < num_regs; i++) {
2888 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2889 (*cache_p)->reg_list[i].value = calloc(4, 1);
2890 (*cache_p)->reg_list[i].dirty = 0;
2891 (*cache_p)->reg_list[i].valid = 0;
2892 (*cache_p)->reg_list[i].size = 32;
2893 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2894 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2895 arch_info[i] = xscale_reg_arch_info[i];
2896 arch_info[i].target = target;
2897 }
2898
2899 xscale->reg_cache = (*cache_p);
2900 }
2901
2902 static int xscale_init_target(struct command_context *cmd_ctx,
2903 struct target *target)
2904 {
2905 xscale_build_reg_cache(target);
2906 return ERROR_OK;
2907 }
2908
2909 static int xscale_init_arch_info(struct target *target,
2910 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2911 {
2912 struct arm *arm;
2913 uint32_t high_reset_branch, low_reset_branch;
2914 int i;
2915
2916 arm = &xscale->arm;
2917
2918 /* store architecture specfic data */
2919 xscale->common_magic = XSCALE_COMMON_MAGIC;
2920
2921 /* we don't really *need* a variant param ... */
2922 if (variant) {
2923 int ir_length = 0;
2924
2925 if (strcmp(variant, "pxa250") == 0
2926 || strcmp(variant, "pxa255") == 0
2927 || strcmp(variant, "pxa26x") == 0)
2928 ir_length = 5;
2929 else if (strcmp(variant, "pxa27x") == 0
2930 || strcmp(variant, "ixp42x") == 0
2931 || strcmp(variant, "ixp45x") == 0
2932 || strcmp(variant, "ixp46x") == 0)
2933 ir_length = 7;
2934 else if (strcmp(variant, "pxa3xx") == 0)
2935 ir_length = 11;
2936 else
2937 LOG_WARNING("%s: unrecognized variant %s",
2938 tap->dotted_name, variant);
2939
2940 if (ir_length && ir_length != tap->ir_length) {
2941 LOG_WARNING("%s: IR length for %s is %d; fixing",
2942 tap->dotted_name, variant, ir_length);
2943 tap->ir_length = ir_length;
2944 }
2945 }
2946
2947 /* PXA3xx shifts the JTAG instructions */
2948 if (tap->ir_length == 11)
2949 xscale->xscale_variant = XSCALE_PXA3XX;
2950 else
2951 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
2952
2953 /* the debug handler isn't installed (and thus not running) at this time */
2954 xscale->handler_address = 0xfe000800;
2955
2956 /* clear the vectors we keep locally for reference */
2957 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2958 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2959
2960 /* no user-specified vectors have been configured yet */
2961 xscale->static_low_vectors_set = 0x0;
2962 xscale->static_high_vectors_set = 0x0;
2963
2964 /* calculate branches to debug handler */
2965 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2966 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2967
2968 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2969 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2970
2971 for (i = 1; i <= 7; i++) {
2972 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2973 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2974 }
2975
2976 /* 64kB aligned region used for DCache cleaning */
2977 xscale->cache_clean_address = 0xfffe0000;
2978
2979 xscale->hold_rst = 0;
2980 xscale->external_debug_break = 0;
2981
2982 xscale->ibcr_available = 2;
2983 xscale->ibcr0_used = 0;
2984 xscale->ibcr1_used = 0;
2985
2986 xscale->dbr_available = 2;
2987 xscale->dbr0_used = 0;
2988 xscale->dbr1_used = 0;
2989
2990 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
2991 target_name(target));
2992
2993 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2994 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2995
2996 xscale->vector_catch = 0x1;
2997
2998 xscale->trace.data = NULL;
2999 xscale->trace.image = NULL;
3000 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3001 xscale->trace.buffer_fill = 0;
3002 xscale->trace.fill_counter = 0;
3003
3004 /* prepare ARMv4/5 specific information */
3005 arm->arch_info = xscale;
3006 arm->core_type = ARM_MODE_ANY;
3007 arm->read_core_reg = xscale_read_core_reg;
3008 arm->write_core_reg = xscale_write_core_reg;
3009 arm->full_context = xscale_full_context;
3010
3011 arm_init_arch_info(target, arm);
3012
3013 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3014 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3015 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3016 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3017 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3018 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3019 xscale->armv4_5_mmu.has_tiny_pages = 1;
3020 xscale->armv4_5_mmu.mmu_enabled = 0;
3021
3022 return ERROR_OK;
3023 }
3024
3025 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3026 {
3027 struct xscale_common *xscale;
3028
3029 if (sizeof xscale_debug_handler - 1 > 0x800) {
3030 LOG_ERROR("debug_handler.bin: larger than 2kb");
3031 return ERROR_FAIL;
3032 }
3033
3034 xscale = calloc(1, sizeof(*xscale));
3035 if (!xscale)
3036 return ERROR_FAIL;
3037
3038 return xscale_init_arch_info(target, xscale, target->tap,
3039 target->variant);
3040 }
3041
3042 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3043 {
3044 struct target *target = NULL;
3045 struct xscale_common *xscale;
3046 int retval;
3047 uint32_t handler_address;
3048
3049 if (CMD_ARGC < 2)
3050 return ERROR_COMMAND_SYNTAX_ERROR;
3051
3052 target = get_target(CMD_ARGV[0]);
3053 if (target == NULL) {
3054 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3055 return ERROR_FAIL;
3056 }
3057
3058 xscale = target_to_xscale(target);
3059 retval = xscale_verify_pointer(CMD_CTX, xscale);
3060 if (retval != ERROR_OK)
3061 return retval;
3062
3063 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3064
3065 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3066 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3067 xscale->handler_address = handler_address;
3068 else {
3069 LOG_ERROR(
3070 "xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3071 return ERROR_FAIL;
3072 }
3073
3074 return ERROR_OK;
3075 }
3076
3077 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3078 {
3079 struct target *target = NULL;
3080 struct xscale_common *xscale;
3081 int retval;
3082 uint32_t cache_clean_address;
3083
3084 if (CMD_ARGC < 2)
3085 return ERROR_COMMAND_SYNTAX_ERROR;
3086
3087 target = get_target(CMD_ARGV[0]);
3088 if (target == NULL) {
3089 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3090 return ERROR_FAIL;
3091 }
3092 xscale = target_to_xscale(target);
3093 retval = xscale_verify_pointer(CMD_CTX, xscale);
3094 if (retval != ERROR_OK)
3095 return retval;
3096
3097 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3098
3099 if (cache_clean_address & 0xffff)
3100 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3101 else
3102 xscale->cache_clean_address = cache_clean_address;
3103
3104 return ERROR_OK;
3105 }
3106
3107 COMMAND_HANDLER(xscale_handle_cache_info_command)
3108 {
3109 struct target *target = get_current_target(CMD_CTX);
3110 struct xscale_common *xscale = target_to_xscale(target);
3111 int retval;
3112
3113 retval = xscale_verify_pointer(CMD_CTX, xscale);
3114 if (retval != ERROR_OK)
3115 return retval;
3116
3117 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3118 }
3119
3120 static int xscale_virt2phys(struct target *target,
3121 uint32_t virtual, uint32_t *physical)
3122 {
3123 struct xscale_common *xscale = target_to_xscale(target);
3124 uint32_t cb;
3125
3126 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3127 LOG_ERROR(xscale_not);
3128 return ERROR_TARGET_INVALID;
3129 }
3130
3131 uint32_t ret;
3132 int retval = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu,
3133 virtual, &cb, &ret);
3134 if (retval != ERROR_OK)
3135 return retval;
3136 *physical = ret;
3137 return ERROR_OK;
3138 }
3139
3140 static int xscale_mmu(struct target *target, int *enabled)
3141 {
3142 struct xscale_common *xscale = target_to_xscale(target);
3143
3144 if (target->state != TARGET_HALTED) {
3145 LOG_ERROR("Target not halted");
3146 return ERROR_TARGET_INVALID;
3147 }
3148 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3149 return ERROR_OK;
3150 }
3151
3152 COMMAND_HANDLER(xscale_handle_mmu_command)
3153 {
3154 struct target *target = get_current_target(CMD_CTX);
3155 struct xscale_common *xscale = target_to_xscale(target);
3156 int retval;
3157
3158 retval = xscale_verify_pointer(CMD_CTX, xscale);
3159 if (retval != ERROR_OK)
3160 return retval;
3161
3162 if (target->state != TARGET_HALTED) {
3163 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3164 return ERROR_OK;
3165 }
3166
3167 if (CMD_ARGC >= 1) {
3168 bool enable;
3169 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3170 if (enable)
3171 xscale_enable_mmu_caches(target, 1, 0, 0);
3172 else
3173 xscale_disable_mmu_caches(target, 1, 0, 0);
3174 xscale->armv4_5_mmu.mmu_enabled = enable;
3175 }
3176
3177 command_print(CMD_CTX, "mmu %s",
3178 (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3179
3180 return ERROR_OK;
3181 }
3182
3183 COMMAND_HANDLER(xscale_handle_idcache_command)
3184 {
3185 struct target *target = get_current_target(CMD_CTX);
3186 struct xscale_common *xscale = target_to_xscale(target);
3187
3188 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3189 if (retval != ERROR_OK)
3190 return retval;
3191
3192 if (target->state != TARGET_HALTED) {
3193 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3194 return ERROR_OK;
3195 }
3196
3197 bool icache = false;
3198 if (strcmp(CMD_NAME, "icache") == 0)
3199 icache = true;
3200 if (CMD_ARGC >= 1) {
3201 bool enable;
3202 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3203 if (icache) {
3204 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3205 if (enable)
3206 xscale_enable_mmu_caches(target, 0, 0, 1);
3207 else
3208 xscale_disable_mmu_caches(target, 0, 0, 1);
3209 } else {
3210 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3211 if (enable)
3212 xscale_enable_mmu_caches(target, 0, 1, 0);
3213 else
3214 xscale_disable_mmu_caches(target, 0, 1, 0);
3215 }
3216 }
3217
3218 bool enabled = icache ?
3219 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3220 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3221 const char *msg = enabled ? "enabled" : "disabled";
3222 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3223
3224 return ERROR_OK;
3225 }
3226
3227 static const struct {
3228 char name[15];
3229 unsigned mask;
3230 } vec_ids[] = {
3231 { "fiq", DCSR_TF, },
3232 { "irq", DCSR_TI, },
3233 { "dabt", DCSR_TD, },
3234 { "pabt", DCSR_TA, },
3235 { "swi", DCSR_TS, },
3236 { "undef", DCSR_TU, },
3237 { "reset", DCSR_TR, },
3238 };
3239
3240 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3241 {
3242 struct target *target = get_current_target(CMD_CTX);
3243 struct xscale_common *xscale = target_to_xscale(target);
3244 int retval;
3245 uint32_t dcsr_value;
3246 uint32_t catch = 0;
3247 struct reg *dcsr_reg = &xscale->reg_cache->reg_list[XSCALE_DCSR];
3248
3249 retval = xscale_verify_pointer(CMD_CTX, xscale);
3250 if (retval != ERROR_OK)
3251 return retval;
3252
3253 dcsr_value = buf_get_u32(dcsr_reg->value, 0, 32);
3254 if (CMD_ARGC > 0) {
3255 if (CMD_ARGC == 1) {
3256 if (strcmp(CMD_ARGV[0], "all") == 0) {
3257 catch = DCSR_TRAP_MASK;
3258 CMD_ARGC--;
3259 } else if (strcmp(CMD_ARGV[0], "none") == 0) {
3260 catch = 0;
3261 CMD_ARGC--;
3262 }
3263 }
3264 while (CMD_ARGC-- > 0) {
3265 unsigned i;
3266 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
3267 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name))
3268 continue;
3269 catch |= vec_ids[i].mask;
3270 break;
3271 }
3272 if (i == ARRAY_SIZE(vec_ids)) {
3273 LOG_ERROR("No vector '%s'", CMD_ARGV[CMD_ARGC]);
3274 return ERROR_COMMAND_SYNTAX_ERROR;
3275 }
3276 }
3277 *(uint32_t *)(dcsr_reg->value) &= ~DCSR_TRAP_MASK;
3278 *(uint32_t *)(dcsr_reg->value) |= catch;
3279 xscale_write_dcsr(target, -1, -1);
3280 }
3281
3282 dcsr_value = buf_get_u32(dcsr_reg->value, 0, 32);
3283 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
3284 command_print(CMD_CTX, "%15s: %s", vec_ids[i].name,
3285 (dcsr_value & vec_ids[i].mask) ? "catch" : "ignore");
3286 }
3287
3288 return ERROR_OK;
3289 }
3290
3291
3292 COMMAND_HANDLER(xscale_handle_vector_table_command)
3293 {
3294 struct target *target = get_current_target(CMD_CTX);
3295 struct xscale_common *xscale = target_to_xscale(target);
3296 int err = 0;
3297 int retval;
3298
3299 retval = xscale_verify_pointer(CMD_CTX, xscale);
3300 if (retval != ERROR_OK)
3301 return retval;
3302
3303 if (CMD_ARGC == 0) { /* print current settings */
3304 int idx;
3305
3306 command_print(CMD_CTX, "active user-set static vectors:");
3307 for (idx = 1; idx < 8; idx++)
3308 if (xscale->static_low_vectors_set & (1 << idx))
3309 command_print(CMD_CTX,
3310 "low %d: 0x%" PRIx32,
3311 idx,
3312 xscale->static_low_vectors[idx]);
3313 for (idx = 1; idx < 8; idx++)
3314 if (xscale->static_high_vectors_set & (1 << idx))
3315 command_print(CMD_CTX,
3316 "high %d: 0x%" PRIx32,
3317 idx,
3318 xscale->static_high_vectors[idx]);
3319 return ERROR_OK;
3320 }
3321
3322 if (CMD_ARGC != 3)
3323 err = 1;
3324 else {
3325 int idx;
3326 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3327 uint32_t vec;
3328 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3329
3330 if (idx < 1 || idx >= 8)
3331 err = 1;
3332
3333 if (!err && strcmp(CMD_ARGV[0], "low") == 0) {
3334 xscale->static_low_vectors_set |= (1<<idx);
3335 xscale->static_low_vectors[idx] = vec;
3336 } else if (!err && (strcmp(CMD_ARGV[0], "high") == 0)) {
3337 xscale->static_high_vectors_set |= (1<<idx);
3338 xscale->static_high_vectors[idx] = vec;
3339 } else
3340 err = 1;
3341 }
3342
3343 if (err)
3344 return ERROR_COMMAND_SYNTAX_ERROR;
3345
3346 return ERROR_OK;
3347 }
3348
3349
3350 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3351 {
3352 struct target *target = get_current_target(CMD_CTX);
3353 struct xscale_common *xscale = target_to_xscale(target);
3354 uint32_t dcsr_value;
3355 int retval;
3356
3357 retval = xscale_verify_pointer(CMD_CTX, xscale);
3358 if (retval != ERROR_OK)
3359 return retval;
3360
3361 if (target->state != TARGET_HALTED) {
3362 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3363 return ERROR_OK;
3364 }
3365
3366 if (CMD_ARGC >= 1) {
3367 if (strcmp("enable", CMD_ARGV[0]) == 0)
3368 xscale->trace.mode = XSCALE_TRACE_WRAP; /* default */
3369 else if (strcmp("disable", CMD_ARGV[0]) == 0)
3370 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3371 else
3372 return ERROR_COMMAND_SYNTAX_ERROR;
3373 }
3374
3375 if (CMD_ARGC >= 2 && xscale->trace.mode != XSCALE_TRACE_DISABLED) {
3376 if (strcmp("fill", CMD_ARGV[1]) == 0) {
3377 int buffcount = 1; /* default */
3378 if (CMD_ARGC >= 3)
3379 COMMAND_PARSE_NUMBER(int, CMD_ARGV[2], buffcount);
3380 if (buffcount < 1) { /* invalid */
3381 command_print(CMD_CTX, "fill buffer count must be > 0");
3382 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3383 return ERROR_COMMAND_SYNTAX_ERROR;
3384 }
3385 xscale->trace.buffer_fill = buffcount;
3386 xscale->trace.mode = XSCALE_TRACE_FILL;
3387 } else if (strcmp("wrap", CMD_ARGV[1]) == 0)
3388 xscale->trace.mode = XSCALE_TRACE_WRAP;
3389 else {
3390 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3391 return ERROR_COMMAND_SYNTAX_ERROR;
3392 }
3393 }
3394
3395 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
3396 char fill_string[12];
3397 sprintf(fill_string, "fill %d", xscale->trace.buffer_fill);
3398 command_print(CMD_CTX, "trace buffer enabled (%s)",
3399 (xscale->trace.mode == XSCALE_TRACE_FILL)
3400 ? fill_string : "wrap");
3401 } else
3402 command_print(CMD_CTX, "trace buffer disabled");
3403
3404 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3405 if (xscale->trace.mode == XSCALE_TRACE_FILL)
3406 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3407 else
3408 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3409
3410 return ERROR_OK;
3411 }
3412
3413 COMMAND_HANDLER(xscale_handle_trace_image_command)
3414 {
3415 struct target *target = get_current_target(CMD_CTX);
3416 struct xscale_common *xscale = target_to_xscale(target);
3417 int retval;
3418
3419 if (CMD_ARGC < 1)
3420 return ERROR_COMMAND_SYNTAX_ERROR;
3421
3422 retval = xscale_verify_pointer(CMD_CTX, xscale);
3423 if (retval != ERROR_OK)
3424 return retval;
3425
3426 if (xscale->trace.image) {
3427 image_close(xscale->trace.image);
3428 free(xscale->trace.image);
3429 command_print(CMD_CTX, "previously loaded image found and closed");
3430 }
3431
3432 xscale->trace.image = malloc(sizeof(struct image));
3433 xscale->trace.image->base_address_set = 0;
3434 xscale->trace.image->start_address_set = 0;
3435
3436 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3437 if (CMD_ARGC >= 2) {
3438 xscale->trace.image->base_address_set = 1;
3439 COMMAND_PARSE_NUMBER(llong, CMD_ARGV[1], xscale->trace.image->base_address);
3440 } else
3441 xscale->trace.image->base_address_set = 0;
3442
3443 if (image_open(xscale->trace.image, CMD_ARGV[0],
3444 (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK) {
3445 free(xscale->trace.image);
3446 xscale->trace.image = NULL;
3447 return ERROR_OK;
3448 }
3449
3450 return ERROR_OK;
3451 }
3452
3453 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3454 {
3455 struct target *target = get_current_target(CMD_CTX);
3456 struct xscale_common *xscale = target_to_xscale(target);
3457 struct xscale_trace_data *trace_data;
3458 struct fileio file;
3459 int retval;
3460
3461 retval = xscale_verify_pointer(CMD_CTX, xscale);
3462 if (retval != ERROR_OK)
3463 return retval;
3464
3465 if (target->state != TARGET_HALTED) {
3466 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3467 return ERROR_OK;
3468 }
3469
3470 if (CMD_ARGC < 1)
3471 return ERROR_COMMAND_SYNTAX_ERROR;
3472
3473 trace_data = xscale->trace.data;
3474
3475 if (!trace_data) {
3476 command_print(CMD_CTX, "no trace data collected");
3477 return ERROR_OK;
3478 }
3479
3480 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3481 return ERROR_OK;
3482
3483 while (trace_data) {
3484 int i;
3485
3486 fileio_write_u32(&file, trace_data->chkpt0);
3487 fileio_write_u32(&file, trace_data->chkpt1);
3488 fileio_write_u32(&file, trace_data->last_instruction);
3489 fileio_write_u32(&file, trace_data->depth);
3490
3491 for (i = 0; i < trace_data->depth; i++)
3492 fileio_write_u32(&file, trace_data->entries[i].data |
3493 ((trace_data->entries[i].type & 0xffff) << 16));
3494
3495 trace_data = trace_data->next;
3496 }
3497
3498 fileio_close(&file);
3499
3500 return ERROR_OK;
3501 }
3502
3503 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3504 {
3505 struct target *target = get_current_target(CMD_CTX);
3506 struct xscale_common *xscale = target_to_xscale(target);
3507 int retval;
3508
3509 retval = xscale_verify_pointer(CMD_CTX, xscale);
3510 if (retval != ERROR_OK)
3511 return retval;
3512
3513 xscale_analyze_trace(target, CMD_CTX);
3514
3515 return ERROR_OK;
3516 }
3517
3518 COMMAND_HANDLER(xscale_handle_cp15)
3519 {
3520 struct target *target = get_current_target(CMD_CTX);
3521 struct xscale_common *xscale = target_to_xscale(target);
3522 int retval;
3523
3524 retval = xscale_verify_pointer(CMD_CTX, xscale);
3525 if (retval != ERROR_OK)
3526 return retval;
3527
3528 if (target->state != TARGET_HALTED) {
3529 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3530 return ERROR_OK;
3531 }
3532 uint32_t reg_no = 0;
3533 struct reg *reg = NULL;
3534 if (CMD_ARGC > 0) {
3535 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3536 /*translate from xscale cp15 register no to openocd register*/
3537 switch (reg_no) {
3538 case 0:
3539 reg_no = XSCALE_MAINID;
3540 break;
3541 case 1:
3542 reg_no = XSCALE_CTRL;
3543 break;
3544 case 2:
3545 reg_no = XSCALE_TTB;
3546 break;
3547 case 3:
3548 reg_no = XSCALE_DAC;
3549 break;
3550 case 5:
3551 reg_no = XSCALE_FSR;
3552 break;
3553 case 6:
3554 reg_no = XSCALE_FAR;
3555 break;
3556 case 13:
3557 reg_no = XSCALE_PID;
3558 break;
3559 case 15:
3560 reg_no = XSCALE_CPACCESS;
3561 break;
3562 default:
3563 command_print(CMD_CTX, "invalid register number");
3564 return ERROR_COMMAND_SYNTAX_ERROR;
3565 }
3566 reg = &xscale->reg_cache->reg_list[reg_no];
3567
3568 }
3569 if (CMD_ARGC == 1) {
3570 uint32_t value;
3571
3572 /* read cp15 control register */
3573 xscale_get_reg(reg);
3574 value = buf_get_u32(reg->value, 0, 32);
3575 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size),
3576 value);
3577 } else if (CMD_ARGC == 2) {
3578 uint32_t value;
3579 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3580
3581 /* send CP write request (command 0x41) */
3582 xscale_send_u32(target, 0x41);
3583
3584 /* send CP register number */
3585 xscale_send_u32(target, reg_no);
3586
3587 /* send CP register value */
3588 xscale_send_u32(target, value);
3589
3590 /* execute cpwait to ensure outstanding operations complete */
3591 xscale_send_u32(target, 0x53);
3592 } else
3593 return ERROR_COMMAND_SYNTAX_ERROR;
3594
3595 return ERROR_OK;
3596 }
3597
3598 static const struct command_registration xscale_exec_command_handlers[] = {
3599 {
3600 .name = "cache_info",
3601 .handler = xscale_handle_cache_info_command,
3602 .mode = COMMAND_EXEC,
3603 .help = "display information about CPU caches",
3604 },
3605 {
3606 .name = "mmu",
3607 .handler = xscale_handle_mmu_command,
3608 .mode = COMMAND_EXEC,
3609 .help = "enable or disable the MMU",
3610 .usage = "['enable'|'disable']",
3611 },
3612 {
3613 .name = "icache",
3614 .handler = xscale_handle_idcache_command,
3615 .mode = COMMAND_EXEC,
3616 .help = "display ICache state, optionally enabling or "
3617 "disabling it",
3618 .usage = "['enable'|'disable']",
3619 },
3620 {
3621 .name = "dcache",
3622 .handler = xscale_handle_idcache_command,
3623 .mode = COMMAND_EXEC,
3624 .help = "display DCache state, optionally enabling or "
3625 "disabling it",
3626 .usage = "['enable'|'disable']",
3627 },
3628 {
3629 .name = "vector_catch",
3630 .handler = xscale_handle_vector_catch_command,
3631 .mode = COMMAND_EXEC,
3632 .help = "set or display mask of vectors "
3633 "that should trigger debug entry",
3634 .usage = "['all'|'none'|'fiq'|'irq'|'dabt'|'pabt'|'swi'|'undef'|'reset']",
3635 },
3636 {
3637 .name = "vector_table",
3638 .handler = xscale_handle_vector_table_command,
3639 .mode = COMMAND_EXEC,
3640 .help = "set vector table entry in mini-ICache, "
3641 "or display current tables",
3642 .usage = "[('high'|'low') index code]",
3643 },
3644 {
3645 .name = "trace_buffer",
3646 .handler = xscale_handle_trace_buffer_command,
3647 .mode = COMMAND_EXEC,
3648 .help = "display trace buffer status, enable or disable "
3649 "tracing, and optionally reconfigure trace mode",
3650 .usage = "['enable'|'disable' ['fill' [number]|'wrap']]",
3651 },
3652 {
3653 .name = "dump_trace",
3654 .handler = xscale_handle_dump_trace_command,
3655 .mode = COMMAND_EXEC,
3656 .help = "dump content of trace buffer to file",
3657 .usage = "filename",
3658 },
3659 {
3660 .name = "analyze_trace",
3661 .handler = xscale_handle_analyze_trace_buffer_command,
3662 .mode = COMMAND_EXEC,
3663 .help = "analyze content of trace buffer",
3664 .usage = "",
3665 },
3666 {
3667 .name = "trace_image",
3668 .handler = xscale_handle_trace_image_command,
3669 .mode = COMMAND_EXEC,
3670 .help = "load image from file to address (default 0)",
3671 .usage = "filename [offset [filetype]]",
3672 },
3673 {
3674 .name = "cp15",
3675 .handler = xscale_handle_cp15,
3676 .mode = COMMAND_EXEC,
3677 .help = "Read or write coprocessor 15 register.",
3678 .usage = "register [value]",
3679 },
3680 COMMAND_REGISTRATION_DONE
3681 };
3682 static const struct command_registration xscale_any_command_handlers[] = {
3683 {
3684 .name = "debug_handler",
3685 .handler = xscale_handle_debug_handler_command,
3686 .mode = COMMAND_ANY,
3687 .help = "Change address used for debug handler.",
3688 .usage = "<target> <address>",
3689 },
3690 {
3691 .name = "cache_clean_address",
3692 .handler = xscale_handle_cache_clean_address_command,
3693 .mode = COMMAND_ANY,
3694 .help = "Change address used for cleaning data cache.",
3695 .usage = "address",
3696 },
3697 {
3698 .chain = xscale_exec_command_handlers,
3699 },
3700 COMMAND_REGISTRATION_DONE
3701 };
3702 static const struct command_registration xscale_command_handlers[] = {
3703 {
3704 .chain = arm_command_handlers,
3705 },
3706 {
3707 .name = "xscale",
3708 .mode = COMMAND_ANY,
3709 .help = "xscale command group",
3710 .usage = "",
3711 .chain = xscale_any_command_handlers,
3712 },
3713 COMMAND_REGISTRATION_DONE
3714 };
3715
3716 struct target_type xscale_target = {
3717 .name = "xscale",
3718
3719 .poll = xscale_poll,
3720 .arch_state = xscale_arch_state,
3721
3722 .halt = xscale_halt,
3723 .resume = xscale_resume,
3724 .step = xscale_step,
3725
3726 .assert_reset = xscale_assert_reset,
3727 .deassert_reset = xscale_deassert_reset,
3728
3729 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3730 .get_gdb_reg_list = arm_get_gdb_reg_list,
3731
3732 .read_memory = xscale_read_memory,
3733 .read_phys_memory = xscale_read_phys_memory,
3734 .write_memory = xscale_write_memory,
3735 .write_phys_memory = xscale_write_phys_memory,
3736
3737 .checksum_memory = arm_checksum_memory,
3738 .blank_check_memory = arm_blank_check_memory,
3739
3740 .run_algorithm = armv4_5_run_algorithm,
3741
3742 .add_breakpoint = xscale_add_breakpoint,
3743 .remove_breakpoint = xscale_remove_breakpoint,
3744 .add_watchpoint = xscale_add_watchpoint,
3745 .remove_watchpoint = xscale_remove_watchpoint,
3746
3747 .commands = xscale_command_handlers,
3748 .target_create = xscale_target_create,
3749 .init_target = xscale_init_target,
3750
3751 .virt2phys = xscale_virt2phys,
3752 .mmu = xscale_mmu
3753 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)