Make OpenOCD build using -Og.
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
23 ***************************************************************************/
24
25 #ifdef HAVE_CONFIG_H
26 #include "config.h"
27 #endif
28
29 #include "breakpoints.h"
30 #include "xscale.h"
31 #include "target_type.h"
32 #include "arm_jtag.h"
33 #include "arm_simulator.h"
34 #include "arm_disassembler.h"
35 #include <helper/time_support.h>
36 #include "register.h"
37 #include "image.h"
38 #include "arm_opcodes.h"
39 #include "armv4_5.h"
40
41 /*
42 * Important XScale documents available as of October 2009 include:
43 *
44 * Intel XScale® Core Developer’s Manual, January 2004
45 * Order Number: 273473-002
46 * This has a chapter detailing debug facilities, and punts some
47 * details to chip-specific microarchitecture documents.
48 *
49 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
50 * Document Number: 273539-005
51 * Less detailed than the developer's manual, but summarizes those
52 * missing details (for most XScales) and gives LOTS of notes about
53 * debugger/handler interaction issues. Presents a simpler reset
54 * and load-handler sequence than the arch doc. (Note, OpenOCD
55 * doesn't currently support "Hot-Debug" as defined there.)
56 *
57 * Chip-specific microarchitecture documents may also be useful.
58 */
59
60 /* forward declarations */
61 static int xscale_resume(struct target *, int current,
62 uint32_t address, int handle_breakpoints, int debug_execution);
63 static int xscale_debug_entry(struct target *);
64 static int xscale_restore_banked(struct target *);
65 static int xscale_get_reg(struct reg *reg);
66 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
67 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
68 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
69 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
70 static int xscale_read_trace(struct target *);
71
72 /* This XScale "debug handler" is loaded into the processor's
73 * mini-ICache, which is 2K of code writable only via JTAG.
74 */
75 static const uint8_t xscale_debug_handler[] = {
76 #include "xscale_debug.inc"
77 };
78
79 static const char *const xscale_reg_list[] = {
80 "XSCALE_MAINID", /* 0 */
81 "XSCALE_CACHETYPE",
82 "XSCALE_CTRL",
83 "XSCALE_AUXCTRL",
84 "XSCALE_TTB",
85 "XSCALE_DAC",
86 "XSCALE_FSR",
87 "XSCALE_FAR",
88 "XSCALE_PID",
89 "XSCALE_CPACCESS",
90 "XSCALE_IBCR0", /* 10 */
91 "XSCALE_IBCR1",
92 "XSCALE_DBR0",
93 "XSCALE_DBR1",
94 "XSCALE_DBCON",
95 "XSCALE_TBREG",
96 "XSCALE_CHKPT0",
97 "XSCALE_CHKPT1",
98 "XSCALE_DCSR",
99 "XSCALE_TX",
100 "XSCALE_RX", /* 20 */
101 "XSCALE_TXRXCTRL",
102 };
103
104 static const struct xscale_reg xscale_reg_arch_info[] = {
105 {XSCALE_MAINID, NULL},
106 {XSCALE_CACHETYPE, NULL},
107 {XSCALE_CTRL, NULL},
108 {XSCALE_AUXCTRL, NULL},
109 {XSCALE_TTB, NULL},
110 {XSCALE_DAC, NULL},
111 {XSCALE_FSR, NULL},
112 {XSCALE_FAR, NULL},
113 {XSCALE_PID, NULL},
114 {XSCALE_CPACCESS, NULL},
115 {XSCALE_IBCR0, NULL},
116 {XSCALE_IBCR1, NULL},
117 {XSCALE_DBR0, NULL},
118 {XSCALE_DBR1, NULL},
119 {XSCALE_DBCON, NULL},
120 {XSCALE_TBREG, NULL},
121 {XSCALE_CHKPT0, NULL},
122 {XSCALE_CHKPT1, NULL},
123 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
124 {-1, NULL}, /* TX accessed via JTAG */
125 {-1, NULL}, /* RX accessed via JTAG */
126 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
127 };
128
129 /* convenience wrapper to access XScale specific registers */
130 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
131 {
132 uint8_t buf[4];
133
134 buf_set_u32(buf, 0, 32, value);
135
136 return xscale_set_reg(reg, buf);
137 }
138
139 static const char xscale_not[] = "target is not an XScale";
140
141 static int xscale_verify_pointer(struct command_context *cmd_ctx,
142 struct xscale_common *xscale)
143 {
144 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
145 command_print(cmd_ctx, xscale_not);
146 return ERROR_TARGET_INVALID;
147 }
148 return ERROR_OK;
149 }
150
151 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr, tap_state_t end_state)
152 {
153 assert(tap != NULL);
154
155 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr) {
156 struct scan_field field;
157 uint8_t scratch[4];
158
159 memset(&field, 0, sizeof field);
160 field.num_bits = tap->ir_length;
161 field.out_value = scratch;
162 buf_set_u32(scratch, 0, field.num_bits, new_instr);
163
164 jtag_add_ir_scan(tap, &field, end_state);
165 }
166
167 return ERROR_OK;
168 }
169
170 static int xscale_read_dcsr(struct target *target)
171 {
172 struct xscale_common *xscale = target_to_xscale(target);
173 int retval;
174 struct scan_field fields[3];
175 uint8_t field0 = 0x0;
176 uint8_t field0_check_value = 0x2;
177 uint8_t field0_check_mask = 0x7;
178 uint8_t field2 = 0x0;
179 uint8_t field2_check_value = 0x0;
180 uint8_t field2_check_mask = 0x1;
181
182 xscale_jtag_set_instr(target->tap,
183 XSCALE_SELDCSR << xscale->xscale_variant,
184 TAP_DRPAUSE);
185
186 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
187 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
188
189 memset(&fields, 0, sizeof fields);
190
191 fields[0].num_bits = 3;
192 fields[0].out_value = &field0;
193 uint8_t tmp;
194 fields[0].in_value = &tmp;
195
196 fields[1].num_bits = 32;
197 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
198
199 fields[2].num_bits = 1;
200 fields[2].out_value = &field2;
201 uint8_t tmp2;
202 fields[2].in_value = &tmp2;
203
204 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
205
206 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
207 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
208
209 retval = jtag_execute_queue();
210 if (retval != ERROR_OK) {
211 LOG_ERROR("JTAG error while reading DCSR");
212 return retval;
213 }
214
215 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
216 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
217
218 /* write the register with the value we just read
219 * on this second pass, only the first bit of field0 is guaranteed to be 0)
220 */
221 field0_check_mask = 0x1;
222 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
223 fields[1].in_value = NULL;
224
225 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
226
227 /* DANGER!!! this must be here. It will make sure that the arguments
228 * to jtag_set_check_value() does not go out of scope! */
229 return jtag_execute_queue();
230 }
231
232
233 static void xscale_getbuf(jtag_callback_data_t arg)
234 {
235 uint8_t *in = (uint8_t *)arg;
236 *((uint32_t *)arg) = buf_get_u32(in, 0, 32);
237 }
238
239 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
240 {
241 if (num_words == 0)
242 return ERROR_COMMAND_SYNTAX_ERROR;
243
244 struct xscale_common *xscale = target_to_xscale(target);
245 int retval = ERROR_OK;
246 tap_state_t path[3];
247 struct scan_field fields[3];
248 uint8_t *field0 = malloc(num_words * 1);
249 uint8_t field0_check_value = 0x2;
250 uint8_t field0_check_mask = 0x6;
251 uint32_t *field1 = malloc(num_words * 4);
252 uint8_t field2_check_value = 0x0;
253 uint8_t field2_check_mask = 0x1;
254 int words_done = 0;
255 int words_scheduled = 0;
256 int i;
257
258 path[0] = TAP_DRSELECT;
259 path[1] = TAP_DRCAPTURE;
260 path[2] = TAP_DRSHIFT;
261
262 memset(&fields, 0, sizeof fields);
263
264 fields[0].num_bits = 3;
265 uint8_t tmp;
266 fields[0].in_value = &tmp;
267 fields[0].check_value = &field0_check_value;
268 fields[0].check_mask = &field0_check_mask;
269
270 fields[1].num_bits = 32;
271
272 fields[2].num_bits = 1;
273 uint8_t tmp2;
274 fields[2].in_value = &tmp2;
275 fields[2].check_value = &field2_check_value;
276 fields[2].check_mask = &field2_check_mask;
277
278 xscale_jtag_set_instr(target->tap,
279 XSCALE_DBGTX << xscale->xscale_variant,
280 TAP_IDLE);
281 jtag_add_runtest(1, TAP_IDLE); /* ensures that we're in the TAP_IDLE state as the above
282 *could be a no-op */
283
284 /* repeat until all words have been collected */
285 int attempts = 0;
286 while (words_done < num_words) {
287 /* schedule reads */
288 words_scheduled = 0;
289 for (i = words_done; i < num_words; i++) {
290 fields[0].in_value = &field0[i];
291
292 jtag_add_pathmove(3, path);
293
294 fields[1].in_value = (uint8_t *)(field1 + i);
295
296 jtag_add_dr_scan_check(target->tap, 3, fields, TAP_IDLE);
297
298 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
299
300 words_scheduled++;
301 }
302
303 retval = jtag_execute_queue();
304 if (retval != ERROR_OK) {
305 LOG_ERROR("JTAG error while receiving data from debug handler");
306 break;
307 }
308
309 /* examine results */
310 for (i = words_done; i < num_words; i++) {
311 if (!(field0[i] & 1)) {
312 /* move backwards if necessary */
313 int j;
314 for (j = i; j < num_words - 1; j++) {
315 field0[j] = field0[j + 1];
316 field1[j] = field1[j + 1];
317 }
318 words_scheduled--;
319 }
320 }
321 if (words_scheduled == 0) {
322 if (attempts++ == 1000) {
323 LOG_ERROR(
324 "Failed to receiving data from debug handler after 1000 attempts");
325 retval = ERROR_TARGET_TIMEOUT;
326 break;
327 }
328 }
329
330 words_done += words_scheduled;
331 }
332
333 for (i = 0; i < num_words; i++)
334 *(buffer++) = buf_get_u32((uint8_t *)&field1[i], 0, 32);
335
336 free(field1);
337
338 return retval;
339 }
340
341 static int xscale_read_tx(struct target *target, int consume)
342 {
343 struct xscale_common *xscale = target_to_xscale(target);
344 tap_state_t path[3];
345 tap_state_t noconsume_path[6];
346 int retval;
347 struct timeval timeout, now;
348 struct scan_field fields[3];
349 uint8_t field0_in = 0x0;
350 uint8_t field0_check_value = 0x2;
351 uint8_t field0_check_mask = 0x6;
352 uint8_t field2_check_value = 0x0;
353 uint8_t field2_check_mask = 0x1;
354
355 xscale_jtag_set_instr(target->tap,
356 XSCALE_DBGTX << xscale->xscale_variant,
357 TAP_IDLE);
358
359 path[0] = TAP_DRSELECT;
360 path[1] = TAP_DRCAPTURE;
361 path[2] = TAP_DRSHIFT;
362
363 noconsume_path[0] = TAP_DRSELECT;
364 noconsume_path[1] = TAP_DRCAPTURE;
365 noconsume_path[2] = TAP_DREXIT1;
366 noconsume_path[3] = TAP_DRPAUSE;
367 noconsume_path[4] = TAP_DREXIT2;
368 noconsume_path[5] = TAP_DRSHIFT;
369
370 memset(&fields, 0, sizeof fields);
371
372 fields[0].num_bits = 3;
373 fields[0].in_value = &field0_in;
374
375 fields[1].num_bits = 32;
376 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
377
378 fields[2].num_bits = 1;
379 uint8_t tmp;
380 fields[2].in_value = &tmp;
381
382 gettimeofday(&timeout, NULL);
383 timeval_add_time(&timeout, 1, 0);
384
385 for (;; ) {
386 /* if we want to consume the register content (i.e. clear TX_READY),
387 * we have to go straight from Capture-DR to Shift-DR
388 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
389 */
390 if (consume)
391 jtag_add_pathmove(3, path);
392 else
393 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
394
395 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
396
397 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
398 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
399
400 retval = jtag_execute_queue();
401 if (retval != ERROR_OK) {
402 LOG_ERROR("JTAG error while reading TX");
403 return ERROR_TARGET_TIMEOUT;
404 }
405
406 gettimeofday(&now, NULL);
407 if ((now.tv_sec > timeout.tv_sec) ||
408 ((now.tv_sec == timeout.tv_sec) && (now.tv_usec > timeout.tv_usec))) {
409 LOG_ERROR("time out reading TX register");
410 return ERROR_TARGET_TIMEOUT;
411 }
412 if (!((!(field0_in & 1)) && consume))
413 goto done;
414 if (debug_level >= 3) {
415 LOG_DEBUG("waiting 100ms");
416 alive_sleep(100); /* avoid flooding the logs */
417 } else
418 keep_alive();
419 }
420 done:
421
422 if (!(field0_in & 1))
423 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
424
425 return ERROR_OK;
426 }
427
428 static int xscale_write_rx(struct target *target)
429 {
430 struct xscale_common *xscale = target_to_xscale(target);
431 int retval;
432 struct timeval timeout, now;
433 struct scan_field fields[3];
434 uint8_t field0_out = 0x0;
435 uint8_t field0_in = 0x0;
436 uint8_t field0_check_value = 0x2;
437 uint8_t field0_check_mask = 0x6;
438 uint8_t field2 = 0x0;
439 uint8_t field2_check_value = 0x0;
440 uint8_t field2_check_mask = 0x1;
441
442 xscale_jtag_set_instr(target->tap,
443 XSCALE_DBGRX << xscale->xscale_variant,
444 TAP_IDLE);
445
446 memset(&fields, 0, sizeof fields);
447
448 fields[0].num_bits = 3;
449 fields[0].out_value = &field0_out;
450 fields[0].in_value = &field0_in;
451
452 fields[1].num_bits = 32;
453 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
454
455 fields[2].num_bits = 1;
456 fields[2].out_value = &field2;
457 uint8_t tmp;
458 fields[2].in_value = &tmp;
459
460 gettimeofday(&timeout, NULL);
461 timeval_add_time(&timeout, 1, 0);
462
463 /* poll until rx_read is low */
464 LOG_DEBUG("polling RX");
465 for (;;) {
466 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
467
468 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
469 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
470
471 retval = jtag_execute_queue();
472 if (retval != ERROR_OK) {
473 LOG_ERROR("JTAG error while writing RX");
474 return retval;
475 }
476
477 gettimeofday(&now, NULL);
478 if ((now.tv_sec > timeout.tv_sec) ||
479 ((now.tv_sec == timeout.tv_sec) && (now.tv_usec > timeout.tv_usec))) {
480 LOG_ERROR("time out writing RX register");
481 return ERROR_TARGET_TIMEOUT;
482 }
483 if (!(field0_in & 1))
484 goto done;
485 if (debug_level >= 3) {
486 LOG_DEBUG("waiting 100ms");
487 alive_sleep(100); /* avoid flooding the logs */
488 } else
489 keep_alive();
490 }
491 done:
492
493 /* set rx_valid */
494 field2 = 0x1;
495 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
496
497 retval = jtag_execute_queue();
498 if (retval != ERROR_OK) {
499 LOG_ERROR("JTAG error while writing RX");
500 return retval;
501 }
502
503 return ERROR_OK;
504 }
505
506 /* send count elements of size byte to the debug handler */
507 static int xscale_send(struct target *target, const uint8_t *buffer, int count, int size)
508 {
509 struct xscale_common *xscale = target_to_xscale(target);
510 int retval;
511 int done_count = 0;
512
513 xscale_jtag_set_instr(target->tap,
514 XSCALE_DBGRX << xscale->xscale_variant,
515 TAP_IDLE);
516
517 static const uint8_t t0;
518 uint8_t t1[4];
519 static const uint8_t t2 = 1;
520 struct scan_field fields[3] = {
521 { .num_bits = 3, .out_value = &t0 },
522 { .num_bits = 32, .out_value = t1 },
523 { .num_bits = 1, .out_value = &t2 },
524 };
525
526 int endianness = target->endianness;
527 while (done_count++ < count) {
528 uint32_t t;
529
530 switch (size) {
531 case 4:
532 if (endianness == TARGET_LITTLE_ENDIAN)
533 t = le_to_h_u32(buffer);
534 else
535 t = be_to_h_u32(buffer);
536 break;
537 case 2:
538 if (endianness == TARGET_LITTLE_ENDIAN)
539 t = le_to_h_u16(buffer);
540 else
541 t = be_to_h_u16(buffer);
542 break;
543 case 1:
544 t = buffer[0];
545 break;
546 default:
547 LOG_ERROR("BUG: size neither 4, 2 nor 1");
548 return ERROR_COMMAND_SYNTAX_ERROR;
549 }
550
551 buf_set_u32(t1, 0, 32, t);
552
553 jtag_add_dr_scan(target->tap,
554 3,
555 fields,
556 TAP_IDLE);
557 buffer += size;
558 }
559
560 retval = jtag_execute_queue();
561 if (retval != ERROR_OK) {
562 LOG_ERROR("JTAG error while sending data to debug handler");
563 return retval;
564 }
565
566 return ERROR_OK;
567 }
568
569 static int xscale_send_u32(struct target *target, uint32_t value)
570 {
571 struct xscale_common *xscale = target_to_xscale(target);
572
573 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
574 return xscale_write_rx(target);
575 }
576
577 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
578 {
579 struct xscale_common *xscale = target_to_xscale(target);
580 int retval;
581 struct scan_field fields[3];
582 uint8_t field0 = 0x0;
583 uint8_t field0_check_value = 0x2;
584 uint8_t field0_check_mask = 0x7;
585 uint8_t field2 = 0x0;
586 uint8_t field2_check_value = 0x0;
587 uint8_t field2_check_mask = 0x1;
588
589 if (hold_rst != -1)
590 xscale->hold_rst = hold_rst;
591
592 if (ext_dbg_brk != -1)
593 xscale->external_debug_break = ext_dbg_brk;
594
595 xscale_jtag_set_instr(target->tap,
596 XSCALE_SELDCSR << xscale->xscale_variant,
597 TAP_IDLE);
598
599 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
600 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
601
602 memset(&fields, 0, sizeof fields);
603
604 fields[0].num_bits = 3;
605 fields[0].out_value = &field0;
606 uint8_t tmp;
607 fields[0].in_value = &tmp;
608
609 fields[1].num_bits = 32;
610 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
611
612 fields[2].num_bits = 1;
613 fields[2].out_value = &field2;
614 uint8_t tmp2;
615 fields[2].in_value = &tmp2;
616
617 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
618
619 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
620 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
621
622 retval = jtag_execute_queue();
623 if (retval != ERROR_OK) {
624 LOG_ERROR("JTAG error while writing DCSR");
625 return retval;
626 }
627
628 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
629 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
630
631 return ERROR_OK;
632 }
633
634 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
635 static unsigned int parity(unsigned int v)
636 {
637 /* unsigned int ov = v; */
638 v ^= v >> 16;
639 v ^= v >> 8;
640 v ^= v >> 4;
641 v &= 0xf;
642 /* LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1); */
643 return (0x6996 >> v) & 1;
644 }
645
646 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
647 {
648 struct xscale_common *xscale = target_to_xscale(target);
649 uint8_t packet[4];
650 uint8_t cmd;
651 int word;
652 struct scan_field fields[2];
653
654 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
655
656 /* LDIC into IR */
657 xscale_jtag_set_instr(target->tap,
658 XSCALE_LDIC << xscale->xscale_variant,
659 TAP_IDLE);
660
661 /* CMD is b011 to load a cacheline into the Mini ICache.
662 * Loading into the main ICache is deprecated, and unused.
663 * It's followed by three zero bits, and 27 address bits.
664 */
665 buf_set_u32(&cmd, 0, 6, 0x3);
666
667 /* virtual address of desired cache line */
668 buf_set_u32(packet, 0, 27, va >> 5);
669
670 memset(&fields, 0, sizeof fields);
671
672 fields[0].num_bits = 6;
673 fields[0].out_value = &cmd;
674
675 fields[1].num_bits = 27;
676 fields[1].out_value = packet;
677
678 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
679
680 /* rest of packet is a cacheline: 8 instructions, with parity */
681 fields[0].num_bits = 32;
682 fields[0].out_value = packet;
683
684 fields[1].num_bits = 1;
685 fields[1].out_value = &cmd;
686
687 for (word = 0; word < 8; word++) {
688 buf_set_u32(packet, 0, 32, buffer[word]);
689
690 uint32_t value;
691 memcpy(&value, packet, sizeof(uint32_t));
692 cmd = parity(value);
693
694 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
695 }
696
697 return jtag_execute_queue();
698 }
699
700 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
701 {
702 struct xscale_common *xscale = target_to_xscale(target);
703 uint8_t packet[4];
704 uint8_t cmd;
705 struct scan_field fields[2];
706
707 xscale_jtag_set_instr(target->tap,
708 XSCALE_LDIC << xscale->xscale_variant,
709 TAP_IDLE);
710
711 /* CMD for invalidate IC line b000, bits [6:4] b000 */
712 buf_set_u32(&cmd, 0, 6, 0x0);
713
714 /* virtual address of desired cache line */
715 buf_set_u32(packet, 0, 27, va >> 5);
716
717 memset(&fields, 0, sizeof fields);
718
719 fields[0].num_bits = 6;
720 fields[0].out_value = &cmd;
721
722 fields[1].num_bits = 27;
723 fields[1].out_value = packet;
724
725 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
726
727 return ERROR_OK;
728 }
729
730 static int xscale_update_vectors(struct target *target)
731 {
732 struct xscale_common *xscale = target_to_xscale(target);
733 int i;
734 int retval;
735
736 uint32_t low_reset_branch, high_reset_branch;
737
738 for (i = 1; i < 8; i++) {
739 /* if there's a static vector specified for this exception, override */
740 if (xscale->static_high_vectors_set & (1 << i))
741 xscale->high_vectors[i] = xscale->static_high_vectors[i];
742 else {
743 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
744 if (retval == ERROR_TARGET_TIMEOUT)
745 return retval;
746 if (retval != ERROR_OK) {
747 /* Some of these reads will fail as part of normal execution */
748 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
749 }
750 }
751 }
752
753 for (i = 1; i < 8; i++) {
754 if (xscale->static_low_vectors_set & (1 << i))
755 xscale->low_vectors[i] = xscale->static_low_vectors[i];
756 else {
757 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
758 if (retval == ERROR_TARGET_TIMEOUT)
759 return retval;
760 if (retval != ERROR_OK) {
761 /* Some of these reads will fail as part of normal execution */
762 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
763 }
764 }
765 }
766
767 /* calculate branches to debug handler */
768 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
769 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
770
771 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
772 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
773
774 /* invalidate and load exception vectors in mini i-cache */
775 xscale_invalidate_ic_line(target, 0x0);
776 xscale_invalidate_ic_line(target, 0xffff0000);
777
778 xscale_load_ic(target, 0x0, xscale->low_vectors);
779 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
780
781 return ERROR_OK;
782 }
783
784 static int xscale_arch_state(struct target *target)
785 {
786 struct xscale_common *xscale = target_to_xscale(target);
787 struct arm *arm = &xscale->arm;
788
789 static const char *state[] = {
790 "disabled", "enabled"
791 };
792
793 static const char *arch_dbg_reason[] = {
794 "", "\n(processor reset)", "\n(trace buffer full)"
795 };
796
797 if (arm->common_magic != ARM_COMMON_MAGIC) {
798 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
799 return ERROR_COMMAND_SYNTAX_ERROR;
800 }
801
802 arm_arch_state(target);
803 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
804 state[xscale->armv4_5_mmu.mmu_enabled],
805 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
806 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
807 arch_dbg_reason[xscale->arch_debug_reason]);
808
809 return ERROR_OK;
810 }
811
812 static int xscale_poll(struct target *target)
813 {
814 int retval = ERROR_OK;
815
816 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING)) {
817 enum target_state previous_state = target->state;
818 retval = xscale_read_tx(target, 0);
819 if (retval == ERROR_OK) {
820
821 /* there's data to read from the tx register, we entered debug state */
822 target->state = TARGET_HALTED;
823
824 /* process debug entry, fetching current mode regs */
825 retval = xscale_debug_entry(target);
826 } else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE) {
827 LOG_USER("error while polling TX register, reset CPU");
828 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
829 target->state = TARGET_HALTED;
830 }
831
832 /* debug_entry could have overwritten target state (i.e. immediate resume)
833 * don't signal event handlers in that case
834 */
835 if (target->state != TARGET_HALTED)
836 return ERROR_OK;
837
838 /* if target was running, signal that we halted
839 * otherwise we reentered from debug execution */
840 if (previous_state == TARGET_RUNNING)
841 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
842 else
843 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
844 }
845
846 return retval;
847 }
848
849 static int xscale_debug_entry(struct target *target)
850 {
851 struct xscale_common *xscale = target_to_xscale(target);
852 struct arm *arm = &xscale->arm;
853 uint32_t pc;
854 uint32_t buffer[10];
855 unsigned i;
856 int retval;
857 uint32_t moe;
858
859 /* clear external dbg break (will be written on next DCSR read) */
860 xscale->external_debug_break = 0;
861 retval = xscale_read_dcsr(target);
862 if (retval != ERROR_OK)
863 return retval;
864
865 /* get r0, pc, r1 to r7 and cpsr */
866 retval = xscale_receive(target, buffer, 10);
867 if (retval != ERROR_OK)
868 return retval;
869
870 /* move r0 from buffer to register cache */
871 buf_set_u32(arm->core_cache->reg_list[0].value, 0, 32, buffer[0]);
872 arm->core_cache->reg_list[0].dirty = 1;
873 arm->core_cache->reg_list[0].valid = 1;
874 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
875
876 /* move pc from buffer to register cache */
877 buf_set_u32(arm->pc->value, 0, 32, buffer[1]);
878 arm->pc->dirty = 1;
879 arm->pc->valid = 1;
880 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
881
882 /* move data from buffer to register cache */
883 for (i = 1; i <= 7; i++) {
884 buf_set_u32(arm->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
885 arm->core_cache->reg_list[i].dirty = 1;
886 arm->core_cache->reg_list[i].valid = 1;
887 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
888 }
889
890 arm_set_cpsr(arm, buffer[9]);
891 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
892
893 if (!is_arm_mode(arm->core_mode)) {
894 target->state = TARGET_UNKNOWN;
895 LOG_ERROR("cpsr contains invalid mode value - communication failure");
896 return ERROR_TARGET_FAILURE;
897 }
898 LOG_DEBUG("target entered debug state in %s mode",
899 arm_mode_name(arm->core_mode));
900
901 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
902 if (arm->spsr) {
903 xscale_receive(target, buffer, 8);
904 buf_set_u32(arm->spsr->value, 0, 32, buffer[7]);
905 arm->spsr->dirty = false;
906 arm->spsr->valid = true;
907 } else {
908 /* r8 to r14, but no spsr */
909 xscale_receive(target, buffer, 7);
910 }
911
912 /* move data from buffer to right banked register in cache */
913 for (i = 8; i <= 14; i++) {
914 struct reg *r = arm_reg_current(arm, i);
915
916 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
917 r->dirty = false;
918 r->valid = true;
919 }
920
921 /* mark xscale regs invalid to ensure they are retrieved from the
922 * debug handler if requested */
923 for (i = 0; i < xscale->reg_cache->num_regs; i++)
924 xscale->reg_cache->reg_list[i].valid = 0;
925
926 /* examine debug reason */
927 xscale_read_dcsr(target);
928 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
929
930 /* stored PC (for calculating fixup) */
931 pc = buf_get_u32(arm->pc->value, 0, 32);
932
933 switch (moe) {
934 case 0x0: /* Processor reset */
935 target->debug_reason = DBG_REASON_DBGRQ;
936 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
937 pc -= 4;
938 break;
939 case 0x1: /* Instruction breakpoint hit */
940 target->debug_reason = DBG_REASON_BREAKPOINT;
941 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
942 pc -= 4;
943 break;
944 case 0x2: /* Data breakpoint hit */
945 target->debug_reason = DBG_REASON_WATCHPOINT;
946 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
947 pc -= 4;
948 break;
949 case 0x3: /* BKPT instruction executed */
950 target->debug_reason = DBG_REASON_BREAKPOINT;
951 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
952 pc -= 4;
953 break;
954 case 0x4: /* Ext. debug event */
955 target->debug_reason = DBG_REASON_DBGRQ;
956 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
957 pc -= 4;
958 break;
959 case 0x5: /* Vector trap occured */
960 target->debug_reason = DBG_REASON_BREAKPOINT;
961 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
962 pc -= 4;
963 break;
964 case 0x6: /* Trace buffer full break */
965 target->debug_reason = DBG_REASON_DBGRQ;
966 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
967 pc -= 4;
968 break;
969 case 0x7: /* Reserved (may flag Hot-Debug support) */
970 default:
971 LOG_ERROR("Method of Entry is 'Reserved'");
972 exit(-1);
973 break;
974 }
975
976 /* apply PC fixup */
977 buf_set_u32(arm->pc->value, 0, 32, pc);
978
979 /* on the first debug entry, identify cache type */
980 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1) {
981 uint32_t cache_type_reg;
982
983 /* read cp15 cache type register */
984 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
985 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value,
986 0,
987 32);
988
989 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
990 }
991
992 /* examine MMU and Cache settings
993 * read cp15 control register */
994 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
995 xscale->cp15_control_reg =
996 buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
997 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
998 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
999 (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1000 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1001 (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1002
1003 /* tracing enabled, read collected trace data */
1004 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1005 xscale_read_trace(target);
1006
1007 /* Resume if entered debug due to buffer fill and we're still collecting
1008 * trace data. Note that a debug exception due to trace buffer full
1009 * can only happen in fill mode. */
1010 if (xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL) {
1011 if (--xscale->trace.fill_counter > 0)
1012 xscale_resume(target, 1, 0x0, 1, 0);
1013 } else /* entered debug for other reason; reset counter */
1014 xscale->trace.fill_counter = 0;
1015 }
1016
1017 return ERROR_OK;
1018 }
1019
1020 static int xscale_halt(struct target *target)
1021 {
1022 struct xscale_common *xscale = target_to_xscale(target);
1023
1024 LOG_DEBUG("target->state: %s",
1025 target_state_name(target));
1026
1027 if (target->state == TARGET_HALTED) {
1028 LOG_DEBUG("target was already halted");
1029 return ERROR_OK;
1030 } else if (target->state == TARGET_UNKNOWN) {
1031 /* this must not happen for a xscale target */
1032 LOG_ERROR("target was in unknown state when halt was requested");
1033 return ERROR_TARGET_INVALID;
1034 } else if (target->state == TARGET_RESET)
1035 LOG_DEBUG("target->state == TARGET_RESET");
1036 else {
1037 /* assert external dbg break */
1038 xscale->external_debug_break = 1;
1039 xscale_read_dcsr(target);
1040
1041 target->debug_reason = DBG_REASON_DBGRQ;
1042 }
1043
1044 return ERROR_OK;
1045 }
1046
1047 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1048 {
1049 struct xscale_common *xscale = target_to_xscale(target);
1050 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1051 int retval;
1052
1053 if (xscale->ibcr0_used) {
1054 struct breakpoint *ibcr0_bp =
1055 breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1056
1057 if (ibcr0_bp)
1058 xscale_unset_breakpoint(target, ibcr0_bp);
1059 else {
1060 LOG_ERROR(
1061 "BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1062 exit(-1);
1063 }
1064 }
1065
1066 retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1);
1067 if (retval != ERROR_OK)
1068 return retval;
1069
1070 return ERROR_OK;
1071 }
1072
1073 static int xscale_disable_single_step(struct target *target)
1074 {
1075 struct xscale_common *xscale = target_to_xscale(target);
1076 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1077 int retval;
1078
1079 retval = xscale_set_reg_u32(ibcr0, 0x0);
1080 if (retval != ERROR_OK)
1081 return retval;
1082
1083 return ERROR_OK;
1084 }
1085
1086 static void xscale_enable_watchpoints(struct target *target)
1087 {
1088 struct watchpoint *watchpoint = target->watchpoints;
1089
1090 while (watchpoint) {
1091 if (watchpoint->set == 0)
1092 xscale_set_watchpoint(target, watchpoint);
1093 watchpoint = watchpoint->next;
1094 }
1095 }
1096
1097 static void xscale_enable_breakpoints(struct target *target)
1098 {
1099 struct breakpoint *breakpoint = target->breakpoints;
1100
1101 /* set any pending breakpoints */
1102 while (breakpoint) {
1103 if (breakpoint->set == 0)
1104 xscale_set_breakpoint(target, breakpoint);
1105 breakpoint = breakpoint->next;
1106 }
1107 }
1108
1109 static void xscale_free_trace_data(struct xscale_common *xscale)
1110 {
1111 struct xscale_trace_data *td = xscale->trace.data;
1112 while (td) {
1113 struct xscale_trace_data *next_td = td->next;
1114 if (td->entries)
1115 free(td->entries);
1116 free(td);
1117 td = next_td;
1118 }
1119 xscale->trace.data = NULL;
1120 }
1121
1122 static int xscale_resume(struct target *target, int current,
1123 uint32_t address, int handle_breakpoints, int debug_execution)
1124 {
1125 struct xscale_common *xscale = target_to_xscale(target);
1126 struct arm *arm = &xscale->arm;
1127 uint32_t current_pc;
1128 int retval;
1129 int i;
1130
1131 LOG_DEBUG("-");
1132
1133 if (target->state != TARGET_HALTED) {
1134 LOG_WARNING("target not halted");
1135 return ERROR_TARGET_NOT_HALTED;
1136 }
1137
1138 if (!debug_execution)
1139 target_free_all_working_areas(target);
1140
1141 /* update vector tables */
1142 retval = xscale_update_vectors(target);
1143 if (retval != ERROR_OK)
1144 return retval;
1145
1146 /* current = 1: continue on current pc, otherwise continue at <address> */
1147 if (!current)
1148 buf_set_u32(arm->pc->value, 0, 32, address);
1149
1150 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1151
1152 /* if we're at the reset vector, we have to simulate the branch */
1153 if (current_pc == 0x0) {
1154 arm_simulate_step(target, NULL);
1155 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1156 }
1157
1158 /* the front-end may request us not to handle breakpoints */
1159 if (handle_breakpoints) {
1160 struct breakpoint *breakpoint;
1161 breakpoint = breakpoint_find(target,
1162 buf_get_u32(arm->pc->value, 0, 32));
1163 if (breakpoint != NULL) {
1164 uint32_t next_pc;
1165 enum trace_mode saved_trace_mode;
1166
1167 /* there's a breakpoint at the current PC, we have to step over it */
1168 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1169 xscale_unset_breakpoint(target, breakpoint);
1170
1171 /* calculate PC of next instruction */
1172 retval = arm_simulate_step(target, &next_pc);
1173 if (retval != ERROR_OK) {
1174 uint32_t current_opcode;
1175 target_read_u32(target, current_pc, &current_opcode);
1176 LOG_ERROR(
1177 "BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "",
1178 current_opcode);
1179 }
1180
1181 LOG_DEBUG("enable single-step");
1182 xscale_enable_single_step(target, next_pc);
1183
1184 /* restore banked registers */
1185 retval = xscale_restore_banked(target);
1186 if (retval != ERROR_OK)
1187 return retval;
1188
1189 /* send resume request */
1190 xscale_send_u32(target, 0x30);
1191
1192 /* send CPSR */
1193 xscale_send_u32(target,
1194 buf_get_u32(arm->cpsr->value, 0, 32));
1195 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1196 buf_get_u32(arm->cpsr->value, 0, 32));
1197
1198 for (i = 7; i >= 0; i--) {
1199 /* send register */
1200 xscale_send_u32(target,
1201 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1202 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "",
1203 i, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1204 }
1205
1206 /* send PC */
1207 xscale_send_u32(target,
1208 buf_get_u32(arm->pc->value, 0, 32));
1209 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
1210 buf_get_u32(arm->pc->value, 0, 32));
1211
1212 /* disable trace data collection in xscale_debug_entry() */
1213 saved_trace_mode = xscale->trace.mode;
1214 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1215
1216 /* wait for and process debug entry */
1217 xscale_debug_entry(target);
1218
1219 /* re-enable trace buffer, if enabled previously */
1220 xscale->trace.mode = saved_trace_mode;
1221
1222 LOG_DEBUG("disable single-step");
1223 xscale_disable_single_step(target);
1224
1225 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1226 xscale_set_breakpoint(target, breakpoint);
1227 }
1228 }
1229
1230 /* enable any pending breakpoints and watchpoints */
1231 xscale_enable_breakpoints(target);
1232 xscale_enable_watchpoints(target);
1233
1234 /* restore banked registers */
1235 retval = xscale_restore_banked(target);
1236 if (retval != ERROR_OK)
1237 return retval;
1238
1239 /* send resume request (command 0x30 or 0x31)
1240 * clean the trace buffer if it is to be enabled (0x62) */
1241 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1242 if (xscale->trace.mode == XSCALE_TRACE_FILL) {
1243 /* If trace enabled in fill mode and starting collection of new set
1244 * of buffers, initialize buffer counter and free previous buffers */
1245 if (xscale->trace.fill_counter == 0) {
1246 xscale->trace.fill_counter = xscale->trace.buffer_fill;
1247 xscale_free_trace_data(xscale);
1248 }
1249 } else /* wrap mode; free previous buffer */
1250 xscale_free_trace_data(xscale);
1251
1252 xscale_send_u32(target, 0x62);
1253 xscale_send_u32(target, 0x31);
1254 } else
1255 xscale_send_u32(target, 0x30);
1256
1257 /* send CPSR */
1258 xscale_send_u32(target, buf_get_u32(arm->cpsr->value, 0, 32));
1259 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1260 buf_get_u32(arm->cpsr->value, 0, 32));
1261
1262 for (i = 7; i >= 0; i--) {
1263 /* send register */
1264 xscale_send_u32(target, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1265 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "",
1266 i, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1267 }
1268
1269 /* send PC */
1270 xscale_send_u32(target, buf_get_u32(arm->pc->value, 0, 32));
1271 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1272 buf_get_u32(arm->pc->value, 0, 32));
1273
1274 target->debug_reason = DBG_REASON_NOTHALTED;
1275
1276 if (!debug_execution) {
1277 /* registers are now invalid */
1278 register_cache_invalidate(arm->core_cache);
1279 target->state = TARGET_RUNNING;
1280 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1281 } else {
1282 target->state = TARGET_DEBUG_RUNNING;
1283 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1284 }
1285
1286 LOG_DEBUG("target resumed");
1287
1288 return ERROR_OK;
1289 }
1290
1291 static int xscale_step_inner(struct target *target, int current,
1292 uint32_t address, int handle_breakpoints)
1293 {
1294 struct xscale_common *xscale = target_to_xscale(target);
1295 struct arm *arm = &xscale->arm;
1296 uint32_t next_pc;
1297 int retval;
1298 int i;
1299
1300 target->debug_reason = DBG_REASON_SINGLESTEP;
1301
1302 /* calculate PC of next instruction */
1303 retval = arm_simulate_step(target, &next_pc);
1304 if (retval != ERROR_OK) {
1305 uint32_t current_opcode, current_pc;
1306 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1307
1308 target_read_u32(target, current_pc, &current_opcode);
1309 LOG_ERROR(
1310 "BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "",
1311 current_opcode);
1312 return retval;
1313 }
1314
1315 LOG_DEBUG("enable single-step");
1316 retval = xscale_enable_single_step(target, next_pc);
1317 if (retval != ERROR_OK)
1318 return retval;
1319
1320 /* restore banked registers */
1321 retval = xscale_restore_banked(target);
1322 if (retval != ERROR_OK)
1323 return retval;
1324
1325 /* send resume request (command 0x30 or 0x31)
1326 * clean the trace buffer if it is to be enabled (0x62) */
1327 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1328 retval = xscale_send_u32(target, 0x62);
1329 if (retval != ERROR_OK)
1330 return retval;
1331 retval = xscale_send_u32(target, 0x31);
1332 if (retval != ERROR_OK)
1333 return retval;
1334 } else {
1335 retval = xscale_send_u32(target, 0x30);
1336 if (retval != ERROR_OK)
1337 return retval;
1338 }
1339
1340 /* send CPSR */
1341 retval = xscale_send_u32(target,
1342 buf_get_u32(arm->cpsr->value, 0, 32));
1343 if (retval != ERROR_OK)
1344 return retval;
1345 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1346 buf_get_u32(arm->cpsr->value, 0, 32));
1347
1348 for (i = 7; i >= 0; i--) {
1349 /* send register */
1350 retval = xscale_send_u32(target,
1351 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1352 if (retval != ERROR_OK)
1353 return retval;
1354 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i,
1355 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1356 }
1357
1358 /* send PC */
1359 retval = xscale_send_u32(target,
1360 buf_get_u32(arm->pc->value, 0, 32));
1361 if (retval != ERROR_OK)
1362 return retval;
1363 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1364 buf_get_u32(arm->pc->value, 0, 32));
1365
1366 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1367
1368 /* registers are now invalid */
1369 register_cache_invalidate(arm->core_cache);
1370
1371 /* wait for and process debug entry */
1372 retval = xscale_debug_entry(target);
1373 if (retval != ERROR_OK)
1374 return retval;
1375
1376 LOG_DEBUG("disable single-step");
1377 retval = xscale_disable_single_step(target);
1378 if (retval != ERROR_OK)
1379 return retval;
1380
1381 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1382
1383 return ERROR_OK;
1384 }
1385
1386 static int xscale_step(struct target *target, int current,
1387 uint32_t address, int handle_breakpoints)
1388 {
1389 struct arm *arm = target_to_arm(target);
1390 struct breakpoint *breakpoint = NULL;
1391
1392 uint32_t current_pc;
1393 int retval;
1394
1395 if (target->state != TARGET_HALTED) {
1396 LOG_WARNING("target not halted");
1397 return ERROR_TARGET_NOT_HALTED;
1398 }
1399
1400 /* current = 1: continue on current pc, otherwise continue at <address> */
1401 if (!current)
1402 buf_set_u32(arm->pc->value, 0, 32, address);
1403
1404 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1405
1406 /* if we're at the reset vector, we have to simulate the step */
1407 if (current_pc == 0x0) {
1408 retval = arm_simulate_step(target, NULL);
1409 if (retval != ERROR_OK)
1410 return retval;
1411 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1412 LOG_DEBUG("current pc %" PRIx32, current_pc);
1413
1414 target->debug_reason = DBG_REASON_SINGLESTEP;
1415 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1416
1417 return ERROR_OK;
1418 }
1419
1420 /* the front-end may request us not to handle breakpoints */
1421 if (handle_breakpoints)
1422 breakpoint = breakpoint_find(target,
1423 buf_get_u32(arm->pc->value, 0, 32));
1424 if (breakpoint != NULL) {
1425 retval = xscale_unset_breakpoint(target, breakpoint);
1426 if (retval != ERROR_OK)
1427 return retval;
1428 }
1429
1430 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1431 if (retval != ERROR_OK)
1432 return retval;
1433
1434 if (breakpoint)
1435 xscale_set_breakpoint(target, breakpoint);
1436
1437 LOG_DEBUG("target stepped");
1438
1439 return ERROR_OK;
1440
1441 }
1442
1443 static int xscale_assert_reset(struct target *target)
1444 {
1445 struct xscale_common *xscale = target_to_xscale(target);
1446
1447 /* TODO: apply hw reset signal in not examined state */
1448 if (!(target_was_examined(target))) {
1449 LOG_WARNING("Reset is not asserted because the target is not examined.");
1450 LOG_WARNING("Use a reset button or power cycle the target.");
1451 return ERROR_TARGET_NOT_EXAMINED;
1452 }
1453
1454 LOG_DEBUG("target->state: %s",
1455 target_state_name(target));
1456
1457 /* assert reset */
1458 jtag_add_reset(0, 1);
1459
1460 /* sleep 1ms, to be sure we fulfill any requirements */
1461 jtag_add_sleep(1000);
1462 jtag_execute_queue();
1463
1464 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1465 * end up in T-L-R, which would reset JTAG
1466 */
1467 xscale_jtag_set_instr(target->tap,
1468 XSCALE_SELDCSR << xscale->xscale_variant,
1469 TAP_IDLE);
1470
1471 /* set Hold reset, Halt mode and Trap Reset */
1472 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1473 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1474 xscale_write_dcsr(target, 1, 0);
1475
1476 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1477 xscale_jtag_set_instr(target->tap, ~0, TAP_IDLE);
1478 jtag_execute_queue();
1479
1480 target->state = TARGET_RESET;
1481
1482 if (target->reset_halt) {
1483 int retval = target_halt(target);
1484 if (retval != ERROR_OK)
1485 return retval;
1486 }
1487
1488 return ERROR_OK;
1489 }
1490
1491 static int xscale_deassert_reset(struct target *target)
1492 {
1493 struct xscale_common *xscale = target_to_xscale(target);
1494 struct breakpoint *breakpoint = target->breakpoints;
1495
1496 LOG_DEBUG("-");
1497
1498 xscale->ibcr_available = 2;
1499 xscale->ibcr0_used = 0;
1500 xscale->ibcr1_used = 0;
1501
1502 xscale->dbr_available = 2;
1503 xscale->dbr0_used = 0;
1504 xscale->dbr1_used = 0;
1505
1506 /* mark all hardware breakpoints as unset */
1507 while (breakpoint) {
1508 if (breakpoint->type == BKPT_HARD)
1509 breakpoint->set = 0;
1510 breakpoint = breakpoint->next;
1511 }
1512
1513 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1514 xscale_free_trace_data(xscale);
1515
1516 register_cache_invalidate(xscale->arm.core_cache);
1517
1518 /* FIXME mark hardware watchpoints got unset too. Also,
1519 * at least some of the XScale registers are invalid...
1520 */
1521
1522 /*
1523 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1524 * contents got invalidated. Safer to force that, so writing new
1525 * contents can't ever fail..
1526 */
1527 {
1528 uint32_t address;
1529 unsigned buf_cnt;
1530 const uint8_t *buffer = xscale_debug_handler;
1531 int retval;
1532
1533 /* release SRST */
1534 jtag_add_reset(0, 0);
1535
1536 /* wait 300ms; 150 and 100ms were not enough */
1537 jtag_add_sleep(300*1000);
1538
1539 jtag_add_runtest(2030, TAP_IDLE);
1540 jtag_execute_queue();
1541
1542 /* set Hold reset, Halt mode and Trap Reset */
1543 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1544 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1545 xscale_write_dcsr(target, 1, 0);
1546
1547 /* Load the debug handler into the mini-icache. Since
1548 * it's using halt mode (not monitor mode), it runs in
1549 * "Special Debug State" for access to registers, memory,
1550 * coprocessors, trace data, etc.
1551 */
1552 address = xscale->handler_address;
1553 for (unsigned binary_size = sizeof xscale_debug_handler;
1554 binary_size > 0;
1555 binary_size -= buf_cnt, buffer += buf_cnt) {
1556 uint32_t cache_line[8];
1557 unsigned i;
1558
1559 buf_cnt = binary_size;
1560 if (buf_cnt > 32)
1561 buf_cnt = 32;
1562
1563 for (i = 0; i < buf_cnt; i += 4) {
1564 /* convert LE buffer to host-endian uint32_t */
1565 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1566 }
1567
1568 for (; i < 32; i += 4)
1569 cache_line[i / 4] = 0xe1a08008;
1570
1571 /* only load addresses other than the reset vectors */
1572 if ((address % 0x400) != 0x0) {
1573 retval = xscale_load_ic(target, address,
1574 cache_line);
1575 if (retval != ERROR_OK)
1576 return retval;
1577 }
1578
1579 address += buf_cnt;
1580 }
1581
1582 retval = xscale_load_ic(target, 0x0,
1583 xscale->low_vectors);
1584 if (retval != ERROR_OK)
1585 return retval;
1586 retval = xscale_load_ic(target, 0xffff0000,
1587 xscale->high_vectors);
1588 if (retval != ERROR_OK)
1589 return retval;
1590
1591 jtag_add_runtest(30, TAP_IDLE);
1592
1593 jtag_add_sleep(100000);
1594
1595 /* set Hold reset, Halt mode and Trap Reset */
1596 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1597 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1598 xscale_write_dcsr(target, 1, 0);
1599
1600 /* clear Hold reset to let the target run (should enter debug handler) */
1601 xscale_write_dcsr(target, 0, 1);
1602 target->state = TARGET_RUNNING;
1603
1604 if (!target->reset_halt) {
1605 jtag_add_sleep(10000);
1606
1607 /* we should have entered debug now */
1608 xscale_debug_entry(target);
1609 target->state = TARGET_HALTED;
1610
1611 /* resume the target */
1612 xscale_resume(target, 1, 0x0, 1, 0);
1613 }
1614 }
1615
1616 return ERROR_OK;
1617 }
1618
1619 static int xscale_read_core_reg(struct target *target, struct reg *r,
1620 int num, enum arm_mode mode)
1621 {
1622 /** \todo add debug handler support for core register reads */
1623 LOG_ERROR("not implemented");
1624 return ERROR_OK;
1625 }
1626
1627 static int xscale_write_core_reg(struct target *target, struct reg *r,
1628 int num, enum arm_mode mode, uint8_t *value)
1629 {
1630 /** \todo add debug handler support for core register writes */
1631 LOG_ERROR("not implemented");
1632 return ERROR_OK;
1633 }
1634
1635 static int xscale_full_context(struct target *target)
1636 {
1637 struct arm *arm = target_to_arm(target);
1638
1639 uint32_t *buffer;
1640
1641 int i, j;
1642
1643 LOG_DEBUG("-");
1644
1645 if (target->state != TARGET_HALTED) {
1646 LOG_WARNING("target not halted");
1647 return ERROR_TARGET_NOT_HALTED;
1648 }
1649
1650 buffer = malloc(4 * 8);
1651
1652 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1653 * we can't enter User mode on an XScale (unpredictable),
1654 * but User shares registers with SYS
1655 */
1656 for (i = 1; i < 7; i++) {
1657 enum arm_mode mode = armv4_5_number_to_mode(i);
1658 bool valid = true;
1659 struct reg *r;
1660
1661 if (mode == ARM_MODE_USR)
1662 continue;
1663
1664 /* check if there are invalid registers in the current mode
1665 */
1666 for (j = 0; valid && j <= 16; j++) {
1667 if (!ARMV4_5_CORE_REG_MODE(arm->core_cache,
1668 mode, j).valid)
1669 valid = false;
1670 }
1671 if (valid)
1672 continue;
1673
1674 /* request banked registers */
1675 xscale_send_u32(target, 0x0);
1676
1677 /* send CPSR for desired bank mode */
1678 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1679
1680 /* get banked registers: r8 to r14; and SPSR
1681 * except in USR/SYS mode
1682 */
1683 if (mode != ARM_MODE_SYS) {
1684 /* SPSR */
1685 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1686 mode, 16);
1687
1688 xscale_receive(target, buffer, 8);
1689
1690 buf_set_u32(r->value, 0, 32, buffer[7]);
1691 r->dirty = false;
1692 r->valid = true;
1693 } else
1694 xscale_receive(target, buffer, 7);
1695
1696 /* move data from buffer to register cache */
1697 for (j = 8; j <= 14; j++) {
1698 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1699 mode, j);
1700
1701 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1702 r->dirty = false;
1703 r->valid = true;
1704 }
1705 }
1706
1707 free(buffer);
1708
1709 return ERROR_OK;
1710 }
1711
1712 static int xscale_restore_banked(struct target *target)
1713 {
1714 struct arm *arm = target_to_arm(target);
1715
1716 int i, j;
1717
1718 if (target->state != TARGET_HALTED) {
1719 LOG_WARNING("target not halted");
1720 return ERROR_TARGET_NOT_HALTED;
1721 }
1722
1723 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1724 * and check if any banked registers need to be written. Ignore
1725 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1726 * an XScale (unpredictable), but they share all registers.
1727 */
1728 for (i = 1; i < 7; i++) {
1729 enum arm_mode mode = armv4_5_number_to_mode(i);
1730 struct reg *r;
1731
1732 if (mode == ARM_MODE_USR)
1733 continue;
1734
1735 /* check if there are dirty registers in this mode */
1736 for (j = 8; j <= 14; j++) {
1737 if (ARMV4_5_CORE_REG_MODE(arm->core_cache,
1738 mode, j).dirty)
1739 goto dirty;
1740 }
1741
1742 /* if not USR/SYS, check if the SPSR needs to be written */
1743 if (mode != ARM_MODE_SYS) {
1744 if (ARMV4_5_CORE_REG_MODE(arm->core_cache,
1745 mode, 16).dirty)
1746 goto dirty;
1747 }
1748
1749 /* there's nothing to flush for this mode */
1750 continue;
1751
1752 dirty:
1753 /* command 0x1: "send banked registers" */
1754 xscale_send_u32(target, 0x1);
1755
1756 /* send CPSR for desired mode */
1757 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1758
1759 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1760 * but this protocol doesn't understand that nuance.
1761 */
1762 for (j = 8; j <= 14; j++) {
1763 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1764 mode, j);
1765 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1766 r->dirty = false;
1767 }
1768
1769 /* send spsr if not in USR/SYS mode */
1770 if (mode != ARM_MODE_SYS) {
1771 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1772 mode, 16);
1773 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1774 r->dirty = false;
1775 }
1776 }
1777
1778 return ERROR_OK;
1779 }
1780
1781 static int xscale_read_memory(struct target *target, uint32_t address,
1782 uint32_t size, uint32_t count, uint8_t *buffer)
1783 {
1784 struct xscale_common *xscale = target_to_xscale(target);
1785 uint32_t *buf32;
1786 uint32_t i;
1787 int retval;
1788
1789 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32,
1790 address,
1791 size,
1792 count);
1793
1794 if (target->state != TARGET_HALTED) {
1795 LOG_WARNING("target not halted");
1796 return ERROR_TARGET_NOT_HALTED;
1797 }
1798
1799 /* sanitize arguments */
1800 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1801 return ERROR_COMMAND_SYNTAX_ERROR;
1802
1803 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1804 return ERROR_TARGET_UNALIGNED_ACCESS;
1805
1806 /* send memory read request (command 0x1n, n: access size) */
1807 retval = xscale_send_u32(target, 0x10 | size);
1808 if (retval != ERROR_OK)
1809 return retval;
1810
1811 /* send base address for read request */
1812 retval = xscale_send_u32(target, address);
1813 if (retval != ERROR_OK)
1814 return retval;
1815
1816 /* send number of requested data words */
1817 retval = xscale_send_u32(target, count);
1818 if (retval != ERROR_OK)
1819 return retval;
1820
1821 /* receive data from target (count times 32-bit words in host endianness) */
1822 buf32 = malloc(4 * count);
1823 retval = xscale_receive(target, buf32, count);
1824 if (retval != ERROR_OK) {
1825 free(buf32);
1826 return retval;
1827 }
1828
1829 /* extract data from host-endian buffer into byte stream */
1830 for (i = 0; i < count; i++) {
1831 switch (size) {
1832 case 4:
1833 target_buffer_set_u32(target, buffer, buf32[i]);
1834 buffer += 4;
1835 break;
1836 case 2:
1837 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1838 buffer += 2;
1839 break;
1840 case 1:
1841 *buffer++ = buf32[i] & 0xff;
1842 break;
1843 default:
1844 LOG_ERROR("invalid read size");
1845 return ERROR_COMMAND_SYNTAX_ERROR;
1846 }
1847 }
1848
1849 free(buf32);
1850
1851 /* examine DCSR, to see if Sticky Abort (SA) got set */
1852 retval = xscale_read_dcsr(target);
1853 if (retval != ERROR_OK)
1854 return retval;
1855 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1) {
1856 /* clear SA bit */
1857 retval = xscale_send_u32(target, 0x60);
1858 if (retval != ERROR_OK)
1859 return retval;
1860
1861 return ERROR_TARGET_DATA_ABORT;
1862 }
1863
1864 return ERROR_OK;
1865 }
1866
1867 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1868 uint32_t size, uint32_t count, uint8_t *buffer)
1869 {
1870 struct xscale_common *xscale = target_to_xscale(target);
1871
1872 /* with MMU inactive, there are only physical addresses */
1873 if (!xscale->armv4_5_mmu.mmu_enabled)
1874 return xscale_read_memory(target, address, size, count, buffer);
1875
1876 /** \todo: provide a non-stub implementation of this routine. */
1877 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1878 target_name(target), __func__);
1879 return ERROR_FAIL;
1880 }
1881
1882 static int xscale_write_memory(struct target *target, uint32_t address,
1883 uint32_t size, uint32_t count, const uint8_t *buffer)
1884 {
1885 struct xscale_common *xscale = target_to_xscale(target);
1886 int retval;
1887
1888 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32,
1889 address,
1890 size,
1891 count);
1892
1893 if (target->state != TARGET_HALTED) {
1894 LOG_WARNING("target not halted");
1895 return ERROR_TARGET_NOT_HALTED;
1896 }
1897
1898 /* sanitize arguments */
1899 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1900 return ERROR_COMMAND_SYNTAX_ERROR;
1901
1902 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1903 return ERROR_TARGET_UNALIGNED_ACCESS;
1904
1905 /* send memory write request (command 0x2n, n: access size) */
1906 retval = xscale_send_u32(target, 0x20 | size);
1907 if (retval != ERROR_OK)
1908 return retval;
1909
1910 /* send base address for read request */
1911 retval = xscale_send_u32(target, address);
1912 if (retval != ERROR_OK)
1913 return retval;
1914
1915 /* send number of requested data words to be written*/
1916 retval = xscale_send_u32(target, count);
1917 if (retval != ERROR_OK)
1918 return retval;
1919
1920 /* extract data from host-endian buffer into byte stream */
1921 #if 0
1922 for (i = 0; i < count; i++) {
1923 switch (size) {
1924 case 4:
1925 value = target_buffer_get_u32(target, buffer);
1926 xscale_send_u32(target, value);
1927 buffer += 4;
1928 break;
1929 case 2:
1930 value = target_buffer_get_u16(target, buffer);
1931 xscale_send_u32(target, value);
1932 buffer += 2;
1933 break;
1934 case 1:
1935 value = *buffer;
1936 xscale_send_u32(target, value);
1937 buffer += 1;
1938 break;
1939 default:
1940 LOG_ERROR("should never get here");
1941 exit(-1);
1942 }
1943 }
1944 #endif
1945 retval = xscale_send(target, buffer, count, size);
1946 if (retval != ERROR_OK)
1947 return retval;
1948
1949 /* examine DCSR, to see if Sticky Abort (SA) got set */
1950 retval = xscale_read_dcsr(target);
1951 if (retval != ERROR_OK)
1952 return retval;
1953 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1) {
1954 /* clear SA bit */
1955 retval = xscale_send_u32(target, 0x60);
1956 if (retval != ERROR_OK)
1957 return retval;
1958
1959 LOG_ERROR("data abort writing memory");
1960 return ERROR_TARGET_DATA_ABORT;
1961 }
1962
1963 return ERROR_OK;
1964 }
1965
1966 static int xscale_write_phys_memory(struct target *target, uint32_t address,
1967 uint32_t size, uint32_t count, const uint8_t *buffer)
1968 {
1969 struct xscale_common *xscale = target_to_xscale(target);
1970
1971 /* with MMU inactive, there are only physical addresses */
1972 if (!xscale->armv4_5_mmu.mmu_enabled)
1973 return xscale_write_memory(target, address, size, count, buffer);
1974
1975 /** \todo: provide a non-stub implementation of this routine. */
1976 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1977 target_name(target), __func__);
1978 return ERROR_FAIL;
1979 }
1980
1981 static int xscale_get_ttb(struct target *target, uint32_t *result)
1982 {
1983 struct xscale_common *xscale = target_to_xscale(target);
1984 uint32_t ttb;
1985 int retval;
1986
1987 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
1988 if (retval != ERROR_OK)
1989 return retval;
1990 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
1991
1992 *result = ttb;
1993
1994 return ERROR_OK;
1995 }
1996
1997 static int xscale_disable_mmu_caches(struct target *target, int mmu,
1998 int d_u_cache, int i_cache)
1999 {
2000 struct xscale_common *xscale = target_to_xscale(target);
2001 uint32_t cp15_control;
2002 int retval;
2003
2004 /* read cp15 control register */
2005 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2006 if (retval != ERROR_OK)
2007 return retval;
2008 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2009
2010 if (mmu)
2011 cp15_control &= ~0x1U;
2012
2013 if (d_u_cache) {
2014 /* clean DCache */
2015 retval = xscale_send_u32(target, 0x50);
2016 if (retval != ERROR_OK)
2017 return retval;
2018 retval = xscale_send_u32(target, xscale->cache_clean_address);
2019 if (retval != ERROR_OK)
2020 return retval;
2021
2022 /* invalidate DCache */
2023 retval = xscale_send_u32(target, 0x51);
2024 if (retval != ERROR_OK)
2025 return retval;
2026
2027 cp15_control &= ~0x4U;
2028 }
2029
2030 if (i_cache) {
2031 /* invalidate ICache */
2032 retval = xscale_send_u32(target, 0x52);
2033 if (retval != ERROR_OK)
2034 return retval;
2035 cp15_control &= ~0x1000U;
2036 }
2037
2038 /* write new cp15 control register */
2039 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2040 if (retval != ERROR_OK)
2041 return retval;
2042
2043 /* execute cpwait to ensure outstanding operations complete */
2044 retval = xscale_send_u32(target, 0x53);
2045 return retval;
2046 }
2047
2048 static int xscale_enable_mmu_caches(struct target *target, int mmu,
2049 int d_u_cache, int i_cache)
2050 {
2051 struct xscale_common *xscale = target_to_xscale(target);
2052 uint32_t cp15_control;
2053 int retval;
2054
2055 /* read cp15 control register */
2056 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2057 if (retval != ERROR_OK)
2058 return retval;
2059 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2060
2061 if (mmu)
2062 cp15_control |= 0x1U;
2063
2064 if (d_u_cache)
2065 cp15_control |= 0x4U;
2066
2067 if (i_cache)
2068 cp15_control |= 0x1000U;
2069
2070 /* write new cp15 control register */
2071 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2072 if (retval != ERROR_OK)
2073 return retval;
2074
2075 /* execute cpwait to ensure outstanding operations complete */
2076 retval = xscale_send_u32(target, 0x53);
2077 return retval;
2078 }
2079
2080 static int xscale_set_breakpoint(struct target *target,
2081 struct breakpoint *breakpoint)
2082 {
2083 int retval;
2084 struct xscale_common *xscale = target_to_xscale(target);
2085
2086 if (target->state != TARGET_HALTED) {
2087 LOG_WARNING("target not halted");
2088 return ERROR_TARGET_NOT_HALTED;
2089 }
2090
2091 if (breakpoint->set) {
2092 LOG_WARNING("breakpoint already set");
2093 return ERROR_OK;
2094 }
2095
2096 if (breakpoint->type == BKPT_HARD) {
2097 uint32_t value = breakpoint->address | 1;
2098 if (!xscale->ibcr0_used) {
2099 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2100 xscale->ibcr0_used = 1;
2101 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2102 } else if (!xscale->ibcr1_used) {
2103 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2104 xscale->ibcr1_used = 1;
2105 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2106 } else {/* bug: availability previously verified in xscale_add_breakpoint() */
2107 LOG_ERROR("BUG: no hardware comparator available");
2108 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2109 }
2110 } else if (breakpoint->type == BKPT_SOFT) {
2111 if (breakpoint->length == 4) {
2112 /* keep the original instruction in target endianness */
2113 retval = target_read_memory(target, breakpoint->address, 4, 1,
2114 breakpoint->orig_instr);
2115 if (retval != ERROR_OK)
2116 return retval;
2117 /* write the bkpt instruction in target endianness
2118 *(arm7_9->arm_bkpt is host endian) */
2119 retval = target_write_u32(target, breakpoint->address,
2120 xscale->arm_bkpt);
2121 if (retval != ERROR_OK)
2122 return retval;
2123 } else {
2124 /* keep the original instruction in target endianness */
2125 retval = target_read_memory(target, breakpoint->address, 2, 1,
2126 breakpoint->orig_instr);
2127 if (retval != ERROR_OK)
2128 return retval;
2129 /* write the bkpt instruction in target endianness
2130 *(arm7_9->arm_bkpt is host endian) */
2131 retval = target_write_u16(target, breakpoint->address,
2132 xscale->thumb_bkpt);
2133 if (retval != ERROR_OK)
2134 return retval;
2135 }
2136 breakpoint->set = 1;
2137
2138 xscale_send_u32(target, 0x50); /* clean dcache */
2139 xscale_send_u32(target, xscale->cache_clean_address);
2140 xscale_send_u32(target, 0x51); /* invalidate dcache */
2141 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2142 }
2143
2144 return ERROR_OK;
2145 }
2146
2147 static int xscale_add_breakpoint(struct target *target,
2148 struct breakpoint *breakpoint)
2149 {
2150 struct xscale_common *xscale = target_to_xscale(target);
2151
2152 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1)) {
2153 LOG_ERROR("no breakpoint unit available for hardware breakpoint");
2154 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2155 }
2156
2157 if ((breakpoint->length != 2) && (breakpoint->length != 4)) {
2158 LOG_ERROR("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2159 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2160 }
2161
2162 if (breakpoint->type == BKPT_HARD)
2163 xscale->ibcr_available--;
2164
2165 return xscale_set_breakpoint(target, breakpoint);
2166 }
2167
2168 static int xscale_unset_breakpoint(struct target *target,
2169 struct breakpoint *breakpoint)
2170 {
2171 int retval;
2172 struct xscale_common *xscale = target_to_xscale(target);
2173
2174 if (target->state != TARGET_HALTED) {
2175 LOG_WARNING("target not halted");
2176 return ERROR_TARGET_NOT_HALTED;
2177 }
2178
2179 if (!breakpoint->set) {
2180 LOG_WARNING("breakpoint not set");
2181 return ERROR_OK;
2182 }
2183
2184 if (breakpoint->type == BKPT_HARD) {
2185 if (breakpoint->set == 1) {
2186 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2187 xscale->ibcr0_used = 0;
2188 } else if (breakpoint->set == 2) {
2189 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2190 xscale->ibcr1_used = 0;
2191 }
2192 breakpoint->set = 0;
2193 } else {
2194 /* restore original instruction (kept in target endianness) */
2195 if (breakpoint->length == 4) {
2196 retval = target_write_memory(target, breakpoint->address, 4, 1,
2197 breakpoint->orig_instr);
2198 if (retval != ERROR_OK)
2199 return retval;
2200 } else {
2201 retval = target_write_memory(target, breakpoint->address, 2, 1,
2202 breakpoint->orig_instr);
2203 if (retval != ERROR_OK)
2204 return retval;
2205 }
2206 breakpoint->set = 0;
2207
2208 xscale_send_u32(target, 0x50); /* clean dcache */
2209 xscale_send_u32(target, xscale->cache_clean_address);
2210 xscale_send_u32(target, 0x51); /* invalidate dcache */
2211 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2212 }
2213
2214 return ERROR_OK;
2215 }
2216
2217 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2218 {
2219 struct xscale_common *xscale = target_to_xscale(target);
2220
2221 if (target->state != TARGET_HALTED) {
2222 LOG_ERROR("target not halted");
2223 return ERROR_TARGET_NOT_HALTED;
2224 }
2225
2226 if (breakpoint->set)
2227 xscale_unset_breakpoint(target, breakpoint);
2228
2229 if (breakpoint->type == BKPT_HARD)
2230 xscale->ibcr_available++;
2231
2232 return ERROR_OK;
2233 }
2234
2235 static int xscale_set_watchpoint(struct target *target,
2236 struct watchpoint *watchpoint)
2237 {
2238 struct xscale_common *xscale = target_to_xscale(target);
2239 uint32_t enable = 0;
2240 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2241 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2242
2243 if (target->state != TARGET_HALTED) {
2244 LOG_ERROR("target not halted");
2245 return ERROR_TARGET_NOT_HALTED;
2246 }
2247
2248 switch (watchpoint->rw) {
2249 case WPT_READ:
2250 enable = 0x3;
2251 break;
2252 case WPT_ACCESS:
2253 enable = 0x2;
2254 break;
2255 case WPT_WRITE:
2256 enable = 0x1;
2257 break;
2258 default:
2259 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2260 }
2261
2262 /* For watchpoint across more than one word, both DBR registers must
2263 be enlisted, with the second used as a mask. */
2264 if (watchpoint->length > 4) {
2265 if (xscale->dbr0_used || xscale->dbr1_used) {
2266 LOG_ERROR("BUG: sufficient hardware comparators unavailable");
2267 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2268 }
2269
2270 /* Write mask value to DBR1, based on the length argument.
2271 * Address bits ignored by the comparator are those set in mask. */
2272 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1],
2273 watchpoint->length - 1);
2274 xscale->dbr1_used = 1;
2275 enable |= 0x100; /* DBCON[M] */
2276 }
2277
2278 if (!xscale->dbr0_used) {
2279 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2280 dbcon_value |= enable;
2281 xscale_set_reg_u32(dbcon, dbcon_value);
2282 watchpoint->set = 1;
2283 xscale->dbr0_used = 1;
2284 } else if (!xscale->dbr1_used) {
2285 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2286 dbcon_value |= enable << 2;
2287 xscale_set_reg_u32(dbcon, dbcon_value);
2288 watchpoint->set = 2;
2289 xscale->dbr1_used = 1;
2290 } else {
2291 LOG_ERROR("BUG: no hardware comparator available");
2292 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2293 }
2294
2295 return ERROR_OK;
2296 }
2297
2298 static int xscale_add_watchpoint(struct target *target,
2299 struct watchpoint *watchpoint)
2300 {
2301 struct xscale_common *xscale = target_to_xscale(target);
2302
2303 if (xscale->dbr_available < 1) {
2304 LOG_ERROR("no more watchpoint registers available");
2305 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2306 }
2307
2308 if (watchpoint->value)
2309 LOG_WARNING("xscale does not support value, mask arguments; ignoring");
2310
2311 /* check that length is a power of two */
2312 for (uint32_t len = watchpoint->length; len != 1; len /= 2) {
2313 if (len % 2) {
2314 LOG_ERROR("xscale requires that watchpoint length is a power of two");
2315 return ERROR_COMMAND_ARGUMENT_INVALID;
2316 }
2317 }
2318
2319 if (watchpoint->length == 4) { /* single word watchpoint */
2320 xscale->dbr_available--;/* one DBR reg used */
2321 return ERROR_OK;
2322 }
2323
2324 /* watchpoints across multiple words require both DBR registers */
2325 if (xscale->dbr_available < 2) {
2326 LOG_ERROR("insufficient watchpoint registers available");
2327 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2328 }
2329
2330 if (watchpoint->length > watchpoint->address) {
2331 LOG_ERROR("xscale does not support watchpoints with length "
2332 "greater than address");
2333 return ERROR_COMMAND_ARGUMENT_INVALID;
2334 }
2335
2336 xscale->dbr_available = 0;
2337 return ERROR_OK;
2338 }
2339
2340 static int xscale_unset_watchpoint(struct target *target,
2341 struct watchpoint *watchpoint)
2342 {
2343 struct xscale_common *xscale = target_to_xscale(target);
2344 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2345 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2346
2347 if (target->state != TARGET_HALTED) {
2348 LOG_WARNING("target not halted");
2349 return ERROR_TARGET_NOT_HALTED;
2350 }
2351
2352 if (!watchpoint->set) {
2353 LOG_WARNING("breakpoint not set");
2354 return ERROR_OK;
2355 }
2356
2357 if (watchpoint->set == 1) {
2358 if (watchpoint->length > 4) {
2359 dbcon_value &= ~0x103; /* clear DBCON[M] as well */
2360 xscale->dbr1_used = 0; /* DBR1 was used for mask */
2361 } else
2362 dbcon_value &= ~0x3;
2363
2364 xscale_set_reg_u32(dbcon, dbcon_value);
2365 xscale->dbr0_used = 0;
2366 } else if (watchpoint->set == 2) {
2367 dbcon_value &= ~0xc;
2368 xscale_set_reg_u32(dbcon, dbcon_value);
2369 xscale->dbr1_used = 0;
2370 }
2371 watchpoint->set = 0;
2372
2373 return ERROR_OK;
2374 }
2375
2376 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2377 {
2378 struct xscale_common *xscale = target_to_xscale(target);
2379
2380 if (target->state != TARGET_HALTED) {
2381 LOG_ERROR("target not halted");
2382 return ERROR_TARGET_NOT_HALTED;
2383 }
2384
2385 if (watchpoint->set)
2386 xscale_unset_watchpoint(target, watchpoint);
2387
2388 if (watchpoint->length > 4)
2389 xscale->dbr_available++;/* both DBR regs now available */
2390
2391 xscale->dbr_available++;
2392
2393 return ERROR_OK;
2394 }
2395
2396 static int xscale_get_reg(struct reg *reg)
2397 {
2398 struct xscale_reg *arch_info = reg->arch_info;
2399 struct target *target = arch_info->target;
2400 struct xscale_common *xscale = target_to_xscale(target);
2401
2402 /* DCSR, TX and RX are accessible via JTAG */
2403 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2404 return xscale_read_dcsr(arch_info->target);
2405 else if (strcmp(reg->name, "XSCALE_TX") == 0) {
2406 /* 1 = consume register content */
2407 return xscale_read_tx(arch_info->target, 1);
2408 } else if (strcmp(reg->name, "XSCALE_RX") == 0) {
2409 /* can't read from RX register (host -> debug handler) */
2410 return ERROR_OK;
2411 } else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0) {
2412 /* can't (explicitly) read from TXRXCTRL register */
2413 return ERROR_OK;
2414 } else {/* Other DBG registers have to be transfered by the debug handler
2415 * send CP read request (command 0x40) */
2416 xscale_send_u32(target, 0x40);
2417
2418 /* send CP register number */
2419 xscale_send_u32(target, arch_info->dbg_handler_number);
2420
2421 /* read register value */
2422 xscale_read_tx(target, 1);
2423 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2424
2425 reg->dirty = 0;
2426 reg->valid = 1;
2427 }
2428
2429 return ERROR_OK;
2430 }
2431
2432 static int xscale_set_reg(struct reg *reg, uint8_t *buf)
2433 {
2434 struct xscale_reg *arch_info = reg->arch_info;
2435 struct target *target = arch_info->target;
2436 struct xscale_common *xscale = target_to_xscale(target);
2437 uint32_t value = buf_get_u32(buf, 0, 32);
2438
2439 /* DCSR, TX and RX are accessible via JTAG */
2440 if (strcmp(reg->name, "XSCALE_DCSR") == 0) {
2441 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2442 return xscale_write_dcsr(arch_info->target, -1, -1);
2443 } else if (strcmp(reg->name, "XSCALE_RX") == 0) {
2444 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2445 return xscale_write_rx(arch_info->target);
2446 } else if (strcmp(reg->name, "XSCALE_TX") == 0) {
2447 /* can't write to TX register (debug-handler -> host) */
2448 return ERROR_OK;
2449 } else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0) {
2450 /* can't (explicitly) write to TXRXCTRL register */
2451 return ERROR_OK;
2452 } else {/* Other DBG registers have to be transfered by the debug handler
2453 * send CP write request (command 0x41) */
2454 xscale_send_u32(target, 0x41);
2455
2456 /* send CP register number */
2457 xscale_send_u32(target, arch_info->dbg_handler_number);
2458
2459 /* send CP register value */
2460 xscale_send_u32(target, value);
2461 buf_set_u32(reg->value, 0, 32, value);
2462 }
2463
2464 return ERROR_OK;
2465 }
2466
2467 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2468 {
2469 struct xscale_common *xscale = target_to_xscale(target);
2470 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2471 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2472
2473 /* send CP write request (command 0x41) */
2474 xscale_send_u32(target, 0x41);
2475
2476 /* send CP register number */
2477 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2478
2479 /* send CP register value */
2480 xscale_send_u32(target, value);
2481 buf_set_u32(dcsr->value, 0, 32, value);
2482
2483 return ERROR_OK;
2484 }
2485
2486 static int xscale_read_trace(struct target *target)
2487 {
2488 struct xscale_common *xscale = target_to_xscale(target);
2489 struct arm *arm = &xscale->arm;
2490 struct xscale_trace_data **trace_data_p;
2491
2492 /* 258 words from debug handler
2493 * 256 trace buffer entries
2494 * 2 checkpoint addresses
2495 */
2496 uint32_t trace_buffer[258];
2497 int is_address[256];
2498 int i, j;
2499 unsigned int num_checkpoints = 0;
2500
2501 if (target->state != TARGET_HALTED) {
2502 LOG_WARNING("target must be stopped to read trace data");
2503 return ERROR_TARGET_NOT_HALTED;
2504 }
2505
2506 /* send read trace buffer command (command 0x61) */
2507 xscale_send_u32(target, 0x61);
2508
2509 /* receive trace buffer content */
2510 xscale_receive(target, trace_buffer, 258);
2511
2512 /* parse buffer backwards to identify address entries */
2513 for (i = 255; i >= 0; i--) {
2514 /* also count number of checkpointed entries */
2515 if ((trace_buffer[i] & 0xe0) == 0xc0)
2516 num_checkpoints++;
2517
2518 is_address[i] = 0;
2519 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2520 ((trace_buffer[i] & 0xf0) == 0xd0)) {
2521 if (i > 0)
2522 is_address[--i] = 1;
2523 if (i > 0)
2524 is_address[--i] = 1;
2525 if (i > 0)
2526 is_address[--i] = 1;
2527 if (i > 0)
2528 is_address[--i] = 1;
2529 }
2530 }
2531
2532
2533 /* search first non-zero entry that is not part of an address */
2534 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2535 ;
2536
2537 if (j == 256) {
2538 LOG_DEBUG("no trace data collected");
2539 return ERROR_XSCALE_NO_TRACE_DATA;
2540 }
2541
2542 /* account for possible partial address at buffer start (wrap mode only) */
2543 if (is_address[0]) { /* first entry is address; complete set of 4? */
2544 i = 1;
2545 while (i < 4)
2546 if (!is_address[i++])
2547 break;
2548 if (i < 4)
2549 j += i; /* partial address; can't use it */
2550 }
2551
2552 /* if first valid entry is indirect branch, can't use that either (no address) */
2553 if (((trace_buffer[j] & 0xf0) == 0x90) || ((trace_buffer[j] & 0xf0) == 0xd0))
2554 j++;
2555
2556 /* walk linked list to terminating entry */
2557 for (trace_data_p = &xscale->trace.data; *trace_data_p;
2558 trace_data_p = &(*trace_data_p)->next)
2559 ;
2560
2561 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2562 (*trace_data_p)->next = NULL;
2563 (*trace_data_p)->chkpt0 = trace_buffer[256];
2564 (*trace_data_p)->chkpt1 = trace_buffer[257];
2565 (*trace_data_p)->last_instruction = buf_get_u32(arm->pc->value, 0, 32);
2566 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2567 (*trace_data_p)->depth = 256 - j;
2568 (*trace_data_p)->num_checkpoints = num_checkpoints;
2569
2570 for (i = j; i < 256; i++) {
2571 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2572 if (is_address[i])
2573 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2574 else
2575 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2576 }
2577
2578 return ERROR_OK;
2579 }
2580
2581 static int xscale_read_instruction(struct target *target, uint32_t pc,
2582 struct arm_instruction *instruction)
2583 {
2584 struct xscale_common *const xscale = target_to_xscale(target);
2585 int i;
2586 int section = -1;
2587 size_t size_read;
2588 uint32_t opcode;
2589 int retval;
2590
2591 if (!xscale->trace.image)
2592 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2593
2594 /* search for the section the current instruction belongs to */
2595 for (i = 0; i < xscale->trace.image->num_sections; i++) {
2596 if ((xscale->trace.image->sections[i].base_address <= pc) &&
2597 (xscale->trace.image->sections[i].base_address +
2598 xscale->trace.image->sections[i].size > pc)) {
2599 section = i;
2600 break;
2601 }
2602 }
2603
2604 if (section == -1) {
2605 /* current instruction couldn't be found in the image */
2606 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2607 }
2608
2609 if (xscale->trace.core_state == ARM_STATE_ARM) {
2610 uint8_t buf[4];
2611 retval = image_read_section(xscale->trace.image, section,
2612 pc - xscale->trace.image->sections[section].base_address,
2613 4, buf, &size_read);
2614 if (retval != ERROR_OK) {
2615 LOG_ERROR("error while reading instruction");
2616 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2617 }
2618 opcode = target_buffer_get_u32(target, buf);
2619 arm_evaluate_opcode(opcode, pc, instruction);
2620 } else if (xscale->trace.core_state == ARM_STATE_THUMB) {
2621 uint8_t buf[2];
2622 retval = image_read_section(xscale->trace.image, section,
2623 pc - xscale->trace.image->sections[section].base_address,
2624 2, buf, &size_read);
2625 if (retval != ERROR_OK) {
2626 LOG_ERROR("error while reading instruction");
2627 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2628 }
2629 opcode = target_buffer_get_u16(target, buf);
2630 thumb_evaluate_opcode(opcode, pc, instruction);
2631 } else {
2632 LOG_ERROR("BUG: unknown core state encountered");
2633 exit(-1);
2634 }
2635
2636 return ERROR_OK;
2637 }
2638
2639 /* Extract address encoded into trace data.
2640 * Write result to address referenced by argument 'target', or 0 if incomplete. */
2641 static inline void xscale_branch_address(struct xscale_trace_data *trace_data,
2642 int i, uint32_t *target)
2643 {
2644 /* if there are less than four entries prior to the indirect branch message
2645 * we can't extract the address */
2646 if (i < 4)
2647 *target = 0;
2648 else {
2649 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2650 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2651 }
2652 }
2653
2654 static inline void xscale_display_instruction(struct target *target, uint32_t pc,
2655 struct arm_instruction *instruction,
2656 struct command_context *cmd_ctx)
2657 {
2658 int retval = xscale_read_instruction(target, pc, instruction);
2659 if (retval == ERROR_OK)
2660 command_print(cmd_ctx, "%s", instruction->text);
2661 else
2662 command_print(cmd_ctx, "0x%8.8" PRIx32 "\t<not found in image>", pc);
2663 }
2664
2665 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2666 {
2667 struct xscale_common *xscale = target_to_xscale(target);
2668 struct xscale_trace_data *trace_data = xscale->trace.data;
2669 int i, retval;
2670 uint32_t breakpoint_pc = 0;
2671 struct arm_instruction instruction;
2672 uint32_t current_pc = 0;/* initialized when address determined */
2673
2674 if (!xscale->trace.image)
2675 LOG_WARNING("No trace image loaded; use 'xscale trace_image'");
2676
2677 /* loop for each trace buffer that was loaded from target */
2678 while (trace_data) {
2679 int chkpt = 0; /* incremented as checkpointed entries found */
2680 int j;
2681
2682 /* FIXME: set this to correct mode when trace buffer is first enabled */
2683 xscale->trace.core_state = ARM_STATE_ARM;
2684
2685 /* loop for each entry in this trace buffer */
2686 for (i = 0; i < trace_data->depth; i++) {
2687 int exception = 0;
2688 uint32_t chkpt_reg = 0x0;
2689 uint32_t branch_target = 0;
2690 int count;
2691
2692 /* trace entry type is upper nybble of 'message byte' */
2693 int trace_msg_type = (trace_data->entries[i].data & 0xf0) >> 4;
2694
2695 /* Target addresses of indirect branches are written into buffer
2696 * before the message byte representing the branch. Skip past it */
2697 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2698 continue;
2699
2700 switch (trace_msg_type) {
2701 case 0: /* Exceptions */
2702 case 1:
2703 case 2:
2704 case 3:
2705 case 4:
2706 case 5:
2707 case 6:
2708 case 7:
2709 exception = (trace_data->entries[i].data & 0x70) >> 4;
2710
2711 /* FIXME: vector table may be at ffff0000 */
2712 branch_target = (trace_data->entries[i].data & 0xf0) >> 2;
2713 break;
2714
2715 case 8: /* Direct Branch */
2716 break;
2717
2718 case 9: /* Indirect Branch */
2719 xscale_branch_address(trace_data, i, &branch_target);
2720 break;
2721
2722 case 13: /* Checkpointed Indirect Branch */
2723 xscale_branch_address(trace_data, i, &branch_target);
2724 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2725 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is
2726 *oldest */
2727 else
2728 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and
2729 *newest */
2730
2731 chkpt++;
2732 break;
2733
2734 case 12: /* Checkpointed Direct Branch */
2735 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2736 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is
2737 *oldest */
2738 else
2739 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and
2740 *newest */
2741
2742 /* if no current_pc, checkpoint will be starting point */
2743 if (current_pc == 0)
2744 branch_target = chkpt_reg;
2745
2746 chkpt++;
2747 break;
2748
2749 case 15:/* Roll-over */
2750 break;
2751
2752 default:/* Reserved */
2753 LOG_WARNING("trace is suspect: invalid trace message byte");
2754 continue;
2755
2756 }
2757
2758 /* If we don't have the current_pc yet, but we did get the branch target
2759 * (either from the trace buffer on indirect branch, or from a checkpoint reg),
2760 * then we can start displaying instructions at the next iteration, with
2761 * branch_target as the starting point.
2762 */
2763 if (current_pc == 0) {
2764 current_pc = branch_target; /* remains 0 unless branch_target *obtained */
2765 continue;
2766 }
2767
2768 /* We have current_pc. Read and display the instructions from the image.
2769 * First, display count instructions (lower nybble of message byte). */
2770 count = trace_data->entries[i].data & 0x0f;
2771 for (j = 0; j < count; j++) {
2772 xscale_display_instruction(target, current_pc, &instruction,
2773 cmd_ctx);
2774 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2775 }
2776
2777 /* An additional instruction is implicitly added to count for
2778 * rollover and some exceptions: undef, swi, prefetch abort. */
2779 if ((trace_msg_type == 15) || (exception > 0 && exception < 4)) {
2780 xscale_display_instruction(target, current_pc, &instruction,
2781 cmd_ctx);
2782 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2783 }
2784
2785 if (trace_msg_type == 15) /* rollover */
2786 continue;
2787
2788 if (exception) {
2789 command_print(cmd_ctx, "--- exception %i ---", exception);
2790 continue;
2791 }
2792
2793 /* not exception or rollover; next instruction is a branch and is
2794 * not included in the count */
2795 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2796
2797 /* for direct branches, extract branch destination from instruction */
2798 if ((trace_msg_type == 8) || (trace_msg_type == 12)) {
2799 retval = xscale_read_instruction(target, current_pc, &instruction);
2800 if (retval == ERROR_OK)
2801 current_pc = instruction.info.b_bl_bx_blx.target_address;
2802 else
2803 current_pc = 0; /* branch destination unknown */
2804
2805 /* direct branch w/ checkpoint; can also get from checkpoint reg */
2806 if (trace_msg_type == 12) {
2807 if (current_pc == 0)
2808 current_pc = chkpt_reg;
2809 else if (current_pc != chkpt_reg) /* sanity check */
2810 LOG_WARNING("trace is suspect: checkpoint register "
2811 "inconsistent with adddress from image");
2812 }
2813
2814 if (current_pc == 0)
2815 command_print(cmd_ctx, "address unknown");
2816
2817 continue;
2818 }
2819
2820 /* indirect branch; the branch destination was read from trace buffer */
2821 if ((trace_msg_type == 9) || (trace_msg_type == 13)) {
2822 current_pc = branch_target;
2823
2824 /* sanity check (checkpoint reg is redundant) */
2825 if ((trace_msg_type == 13) && (chkpt_reg != branch_target))
2826 LOG_WARNING("trace is suspect: checkpoint register "
2827 "inconsistent with address from trace buffer");
2828 }
2829
2830 } /* END: for (i = 0; i < trace_data->depth; i++) */
2831
2832 breakpoint_pc = trace_data->last_instruction; /* used below */
2833 trace_data = trace_data->next;
2834
2835 } /* END: while (trace_data) */
2836
2837 /* Finally... display all instructions up to the value of the pc when the
2838 * debug break occurred (saved when trace data was collected from target).
2839 * This is necessary because the trace only records execution branches and 16
2840 * consecutive instructions (rollovers), so last few typically missed.
2841 */
2842 if (current_pc == 0)
2843 return ERROR_OK;/* current_pc was never found */
2844
2845 /* how many instructions remaining? */
2846 int gap_count = (breakpoint_pc - current_pc) /
2847 (xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2);
2848
2849 /* should never be negative or over 16, but verify */
2850 if (gap_count < 0 || gap_count > 16) {
2851 LOG_WARNING("trace is suspect: excessive gap at end of trace");
2852 return ERROR_OK;/* bail; large number or negative value no good */
2853 }
2854
2855 /* display remaining instructions */
2856 for (i = 0; i < gap_count; i++) {
2857 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2858 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2859 }
2860
2861 return ERROR_OK;
2862 }
2863
2864 static const struct reg_arch_type xscale_reg_type = {
2865 .get = xscale_get_reg,
2866 .set = xscale_set_reg,
2867 };
2868
2869 static void xscale_build_reg_cache(struct target *target)
2870 {
2871 struct xscale_common *xscale = target_to_xscale(target);
2872 struct arm *arm = &xscale->arm;
2873 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2874 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2875 int i;
2876 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2877
2878 (*cache_p) = arm_build_reg_cache(target, arm);
2879
2880 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2881 cache_p = &(*cache_p)->next;
2882
2883 /* fill in values for the xscale reg cache */
2884 (*cache_p)->name = "XScale registers";
2885 (*cache_p)->next = NULL;
2886 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2887 (*cache_p)->num_regs = num_regs;
2888
2889 for (i = 0; i < num_regs; i++) {
2890 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2891 (*cache_p)->reg_list[i].value = calloc(4, 1);
2892 (*cache_p)->reg_list[i].dirty = 0;
2893 (*cache_p)->reg_list[i].valid = 0;
2894 (*cache_p)->reg_list[i].size = 32;
2895 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2896 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2897 arch_info[i] = xscale_reg_arch_info[i];
2898 arch_info[i].target = target;
2899 }
2900
2901 xscale->reg_cache = (*cache_p);
2902 }
2903
2904 static int xscale_init_target(struct command_context *cmd_ctx,
2905 struct target *target)
2906 {
2907 xscale_build_reg_cache(target);
2908 return ERROR_OK;
2909 }
2910
2911 static int xscale_init_arch_info(struct target *target,
2912 struct xscale_common *xscale, struct jtag_tap *tap)
2913 {
2914 struct arm *arm;
2915 uint32_t high_reset_branch, low_reset_branch;
2916 int i;
2917
2918 arm = &xscale->arm;
2919
2920 /* store architecture specfic data */
2921 xscale->common_magic = XSCALE_COMMON_MAGIC;
2922
2923 /* PXA3xx with 11 bit IR shifts the JTAG instructions */
2924 if (tap->ir_length == 11)
2925 xscale->xscale_variant = XSCALE_PXA3XX;
2926 else
2927 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
2928
2929 /* the debug handler isn't installed (and thus not running) at this time */
2930 xscale->handler_address = 0xfe000800;
2931
2932 /* clear the vectors we keep locally for reference */
2933 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2934 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2935
2936 /* no user-specified vectors have been configured yet */
2937 xscale->static_low_vectors_set = 0x0;
2938 xscale->static_high_vectors_set = 0x0;
2939
2940 /* calculate branches to debug handler */
2941 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2942 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2943
2944 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2945 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2946
2947 for (i = 1; i <= 7; i++) {
2948 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2949 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2950 }
2951
2952 /* 64kB aligned region used for DCache cleaning */
2953 xscale->cache_clean_address = 0xfffe0000;
2954
2955 xscale->hold_rst = 0;
2956 xscale->external_debug_break = 0;
2957
2958 xscale->ibcr_available = 2;
2959 xscale->ibcr0_used = 0;
2960 xscale->ibcr1_used = 0;
2961
2962 xscale->dbr_available = 2;
2963 xscale->dbr0_used = 0;
2964 xscale->dbr1_used = 0;
2965
2966 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
2967 target_name(target));
2968
2969 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2970 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2971
2972 xscale->vector_catch = 0x1;
2973
2974 xscale->trace.data = NULL;
2975 xscale->trace.image = NULL;
2976 xscale->trace.mode = XSCALE_TRACE_DISABLED;
2977 xscale->trace.buffer_fill = 0;
2978 xscale->trace.fill_counter = 0;
2979
2980 /* prepare ARMv4/5 specific information */
2981 arm->arch_info = xscale;
2982 arm->core_type = ARM_MODE_ANY;
2983 arm->read_core_reg = xscale_read_core_reg;
2984 arm->write_core_reg = xscale_write_core_reg;
2985 arm->full_context = xscale_full_context;
2986
2987 arm_init_arch_info(target, arm);
2988
2989 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
2990 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
2991 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
2992 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
2993 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
2994 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
2995 xscale->armv4_5_mmu.has_tiny_pages = 1;
2996 xscale->armv4_5_mmu.mmu_enabled = 0;
2997
2998 return ERROR_OK;
2999 }
3000
3001 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3002 {
3003 struct xscale_common *xscale;
3004
3005 if (sizeof xscale_debug_handler > 0x800) {
3006 LOG_ERROR("debug_handler.bin: larger than 2kb");
3007 return ERROR_FAIL;
3008 }
3009
3010 xscale = calloc(1, sizeof(*xscale));
3011 if (!xscale)
3012 return ERROR_FAIL;
3013
3014 return xscale_init_arch_info(target, xscale, target->tap);
3015 }
3016
3017 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3018 {
3019 struct target *target = NULL;
3020 struct xscale_common *xscale;
3021 int retval;
3022 uint32_t handler_address;
3023
3024 if (CMD_ARGC < 2)
3025 return ERROR_COMMAND_SYNTAX_ERROR;
3026
3027 target = get_target(CMD_ARGV[0]);
3028 if (target == NULL) {
3029 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3030 return ERROR_FAIL;
3031 }
3032
3033 xscale = target_to_xscale(target);
3034 retval = xscale_verify_pointer(CMD_CTX, xscale);
3035 if (retval != ERROR_OK)
3036 return retval;
3037
3038 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3039
3040 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3041 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3042 xscale->handler_address = handler_address;
3043 else {
3044 LOG_ERROR(
3045 "xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3046 return ERROR_FAIL;
3047 }
3048
3049 return ERROR_OK;
3050 }
3051
3052 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3053 {
3054 struct target *target = NULL;
3055 struct xscale_common *xscale;
3056 int retval;
3057 uint32_t cache_clean_address;
3058
3059 if (CMD_ARGC < 2)
3060 return ERROR_COMMAND_SYNTAX_ERROR;
3061
3062 target = get_target(CMD_ARGV[0]);
3063 if (target == NULL) {
3064 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3065 return ERROR_FAIL;
3066 }
3067 xscale = target_to_xscale(target);
3068 retval = xscale_verify_pointer(CMD_CTX, xscale);
3069 if (retval != ERROR_OK)
3070 return retval;
3071
3072 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3073
3074 if (cache_clean_address & 0xffff)
3075 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3076 else
3077 xscale->cache_clean_address = cache_clean_address;
3078
3079 return ERROR_OK;
3080 }
3081
3082 COMMAND_HANDLER(xscale_handle_cache_info_command)
3083 {
3084 struct target *target = get_current_target(CMD_CTX);
3085 struct xscale_common *xscale = target_to_xscale(target);
3086 int retval;
3087
3088 retval = xscale_verify_pointer(CMD_CTX, xscale);
3089 if (retval != ERROR_OK)
3090 return retval;
3091
3092 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3093 }
3094
3095 static int xscale_virt2phys(struct target *target,
3096 uint32_t virtual, uint32_t *physical)
3097 {
3098 struct xscale_common *xscale = target_to_xscale(target);
3099 uint32_t cb;
3100
3101 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3102 LOG_ERROR(xscale_not);
3103 return ERROR_TARGET_INVALID;
3104 }
3105
3106 uint32_t ret;
3107 int retval = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu,
3108 virtual, &cb, &ret);
3109 if (retval != ERROR_OK)
3110 return retval;
3111 *physical = ret;
3112 return ERROR_OK;
3113 }
3114
3115 static int xscale_mmu(struct target *target, int *enabled)
3116 {
3117 struct xscale_common *xscale = target_to_xscale(target);
3118
3119 if (target->state != TARGET_HALTED) {
3120 LOG_ERROR("Target not halted");
3121 return ERROR_TARGET_INVALID;
3122 }
3123 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3124 return ERROR_OK;
3125 }
3126
3127 COMMAND_HANDLER(xscale_handle_mmu_command)
3128 {
3129 struct target *target = get_current_target(CMD_CTX);
3130 struct xscale_common *xscale = target_to_xscale(target);
3131 int retval;
3132
3133 retval = xscale_verify_pointer(CMD_CTX, xscale);
3134 if (retval != ERROR_OK)
3135 return retval;
3136
3137 if (target->state != TARGET_HALTED) {
3138 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3139 return ERROR_OK;
3140 }
3141
3142 if (CMD_ARGC >= 1) {
3143 bool enable;
3144 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3145 if (enable)
3146 xscale_enable_mmu_caches(target, 1, 0, 0);
3147 else
3148 xscale_disable_mmu_caches(target, 1, 0, 0);
3149 xscale->armv4_5_mmu.mmu_enabled = enable;
3150 }
3151
3152 command_print(CMD_CTX, "mmu %s",
3153 (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3154
3155 return ERROR_OK;
3156 }
3157
3158 COMMAND_HANDLER(xscale_handle_idcache_command)
3159 {
3160 struct target *target = get_current_target(CMD_CTX);
3161 struct xscale_common *xscale = target_to_xscale(target);
3162
3163 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3164 if (retval != ERROR_OK)
3165 return retval;
3166
3167 if (target->state != TARGET_HALTED) {
3168 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3169 return ERROR_OK;
3170 }
3171
3172 bool icache = false;
3173 if (strcmp(CMD_NAME, "icache") == 0)
3174 icache = true;
3175 if (CMD_ARGC >= 1) {
3176 bool enable;
3177 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3178 if (icache) {
3179 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3180 if (enable)
3181 xscale_enable_mmu_caches(target, 0, 0, 1);
3182 else
3183 xscale_disable_mmu_caches(target, 0, 0, 1);
3184 } else {
3185 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3186 if (enable)
3187 xscale_enable_mmu_caches(target, 0, 1, 0);
3188 else
3189 xscale_disable_mmu_caches(target, 0, 1, 0);
3190 }
3191 }
3192
3193 bool enabled = icache ?
3194 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3195 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3196 const char *msg = enabled ? "enabled" : "disabled";
3197 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3198
3199 return ERROR_OK;
3200 }
3201
3202 static const struct {
3203 char name[15];
3204 unsigned mask;
3205 } vec_ids[] = {
3206 { "fiq", DCSR_TF, },
3207 { "irq", DCSR_TI, },
3208 { "dabt", DCSR_TD, },
3209 { "pabt", DCSR_TA, },
3210 { "swi", DCSR_TS, },
3211 { "undef", DCSR_TU, },
3212 { "reset", DCSR_TR, },
3213 };
3214
3215 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3216 {
3217 struct target *target = get_current_target(CMD_CTX);
3218 struct xscale_common *xscale = target_to_xscale(target);
3219 int retval;
3220 uint32_t dcsr_value;
3221 uint32_t catch = 0;
3222 struct reg *dcsr_reg = &xscale->reg_cache->reg_list[XSCALE_DCSR];
3223
3224 retval = xscale_verify_pointer(CMD_CTX, xscale);
3225 if (retval != ERROR_OK)
3226 return retval;
3227
3228 dcsr_value = buf_get_u32(dcsr_reg->value, 0, 32);
3229 if (CMD_ARGC > 0) {
3230 if (CMD_ARGC == 1) {
3231 if (strcmp(CMD_ARGV[0], "all") == 0) {
3232 catch = DCSR_TRAP_MASK;
3233 CMD_ARGC--;
3234 } else if (strcmp(CMD_ARGV[0], "none") == 0) {
3235 catch = 0;
3236 CMD_ARGC--;
3237 }
3238 }
3239 while (CMD_ARGC-- > 0) {
3240 unsigned i;
3241 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
3242 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name))
3243 continue;
3244 catch |= vec_ids[i].mask;
3245 break;
3246 }
3247 if (i == ARRAY_SIZE(vec_ids)) {
3248 LOG_ERROR("No vector '%s'", CMD_ARGV[CMD_ARGC]);
3249 return ERROR_COMMAND_SYNTAX_ERROR;
3250 }
3251 }
3252 buf_set_u32(dcsr_reg->value, 0, 32,
3253 (buf_get_u32(dcsr_reg->value, 0, 32) & ~DCSR_TRAP_MASK) | catch);
3254 xscale_write_dcsr(target, -1, -1);
3255 }
3256
3257 dcsr_value = buf_get_u32(dcsr_reg->value, 0, 32);
3258 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
3259 command_print(CMD_CTX, "%15s: %s", vec_ids[i].name,
3260 (dcsr_value & vec_ids[i].mask) ? "catch" : "ignore");
3261 }
3262
3263 return ERROR_OK;
3264 }
3265
3266
3267 COMMAND_HANDLER(xscale_handle_vector_table_command)
3268 {
3269 struct target *target = get_current_target(CMD_CTX);
3270 struct xscale_common *xscale = target_to_xscale(target);
3271 int err = 0;
3272 int retval;
3273
3274 retval = xscale_verify_pointer(CMD_CTX, xscale);
3275 if (retval != ERROR_OK)
3276 return retval;
3277
3278 if (CMD_ARGC == 0) { /* print current settings */
3279 int idx;
3280
3281 command_print(CMD_CTX, "active user-set static vectors:");
3282 for (idx = 1; idx < 8; idx++)
3283 if (xscale->static_low_vectors_set & (1 << idx))
3284 command_print(CMD_CTX,
3285 "low %d: 0x%" PRIx32,
3286 idx,
3287 xscale->static_low_vectors[idx]);
3288 for (idx = 1; idx < 8; idx++)
3289 if (xscale->static_high_vectors_set & (1 << idx))
3290 command_print(CMD_CTX,
3291 "high %d: 0x%" PRIx32,
3292 idx,
3293 xscale->static_high_vectors[idx]);
3294 return ERROR_OK;
3295 }
3296
3297 if (CMD_ARGC != 3)
3298 err = 1;
3299 else {
3300 int idx;
3301 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3302 uint32_t vec;
3303 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3304
3305 if (idx < 1 || idx >= 8)
3306 err = 1;
3307
3308 if (!err && strcmp(CMD_ARGV[0], "low") == 0) {
3309 xscale->static_low_vectors_set |= (1<<idx);
3310 xscale->static_low_vectors[idx] = vec;
3311 } else if (!err && (strcmp(CMD_ARGV[0], "high") == 0)) {
3312 xscale->static_high_vectors_set |= (1<<idx);
3313 xscale->static_high_vectors[idx] = vec;
3314 } else
3315 err = 1;
3316 }
3317
3318 if (err)
3319 return ERROR_COMMAND_SYNTAX_ERROR;
3320
3321 return ERROR_OK;
3322 }
3323
3324
3325 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3326 {
3327 struct target *target = get_current_target(CMD_CTX);
3328 struct xscale_common *xscale = target_to_xscale(target);
3329 uint32_t dcsr_value;
3330 int retval;
3331
3332 retval = xscale_verify_pointer(CMD_CTX, xscale);
3333 if (retval != ERROR_OK)
3334 return retval;
3335
3336 if (target->state != TARGET_HALTED) {
3337 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3338 return ERROR_OK;
3339 }
3340
3341 if (CMD_ARGC >= 1) {
3342 if (strcmp("enable", CMD_ARGV[0]) == 0)
3343 xscale->trace.mode = XSCALE_TRACE_WRAP; /* default */
3344 else if (strcmp("disable", CMD_ARGV[0]) == 0)
3345 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3346 else
3347 return ERROR_COMMAND_SYNTAX_ERROR;
3348 }
3349
3350 if (CMD_ARGC >= 2 && xscale->trace.mode != XSCALE_TRACE_DISABLED) {
3351 if (strcmp("fill", CMD_ARGV[1]) == 0) {
3352 int buffcount = 1; /* default */
3353 if (CMD_ARGC >= 3)
3354 COMMAND_PARSE_NUMBER(int, CMD_ARGV[2], buffcount);
3355 if (buffcount < 1) { /* invalid */
3356 command_print(CMD_CTX, "fill buffer count must be > 0");
3357 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3358 return ERROR_COMMAND_SYNTAX_ERROR;
3359 }
3360 xscale->trace.buffer_fill = buffcount;
3361 xscale->trace.mode = XSCALE_TRACE_FILL;
3362 } else if (strcmp("wrap", CMD_ARGV[1]) == 0)
3363 xscale->trace.mode = XSCALE_TRACE_WRAP;
3364 else {
3365 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3366 return ERROR_COMMAND_SYNTAX_ERROR;
3367 }
3368 }
3369
3370 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
3371 char fill_string[12];
3372 sprintf(fill_string, "fill %d", xscale->trace.buffer_fill);
3373 command_print(CMD_CTX, "trace buffer enabled (%s)",
3374 (xscale->trace.mode == XSCALE_TRACE_FILL)
3375 ? fill_string : "wrap");
3376 } else
3377 command_print(CMD_CTX, "trace buffer disabled");
3378
3379 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3380 if (xscale->trace.mode == XSCALE_TRACE_FILL)
3381 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3382 else
3383 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3384
3385 return ERROR_OK;
3386 }
3387
3388 COMMAND_HANDLER(xscale_handle_trace_image_command)
3389 {
3390 struct target *target = get_current_target(CMD_CTX);
3391 struct xscale_common *xscale = target_to_xscale(target);
3392 int retval;
3393
3394 if (CMD_ARGC < 1)
3395 return ERROR_COMMAND_SYNTAX_ERROR;
3396
3397 retval = xscale_verify_pointer(CMD_CTX, xscale);
3398 if (retval != ERROR_OK)
3399 return retval;
3400
3401 if (xscale->trace.image) {
3402 image_close(xscale->trace.image);
3403 free(xscale->trace.image);
3404 command_print(CMD_CTX, "previously loaded image found and closed");
3405 }
3406
3407 xscale->trace.image = malloc(sizeof(struct image));
3408 xscale->trace.image->base_address_set = 0;
3409 xscale->trace.image->start_address_set = 0;
3410
3411 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3412 if (CMD_ARGC >= 2) {
3413 xscale->trace.image->base_address_set = 1;
3414 COMMAND_PARSE_NUMBER(llong, CMD_ARGV[1], xscale->trace.image->base_address);
3415 } else
3416 xscale->trace.image->base_address_set = 0;
3417
3418 if (image_open(xscale->trace.image, CMD_ARGV[0],
3419 (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK) {
3420 free(xscale->trace.image);
3421 xscale->trace.image = NULL;
3422 return ERROR_OK;
3423 }
3424
3425 return ERROR_OK;
3426 }
3427
3428 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3429 {
3430 struct target *target = get_current_target(CMD_CTX);
3431 struct xscale_common *xscale = target_to_xscale(target);
3432 struct xscale_trace_data *trace_data;
3433 struct fileio *file;
3434 int retval;
3435
3436 retval = xscale_verify_pointer(CMD_CTX, xscale);
3437 if (retval != ERROR_OK)
3438 return retval;
3439
3440 if (target->state != TARGET_HALTED) {
3441 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3442 return ERROR_OK;
3443 }
3444
3445 if (CMD_ARGC < 1)
3446 return ERROR_COMMAND_SYNTAX_ERROR;
3447
3448 trace_data = xscale->trace.data;
3449
3450 if (!trace_data) {
3451 command_print(CMD_CTX, "no trace data collected");
3452 return ERROR_OK;
3453 }
3454
3455 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3456 return ERROR_OK;
3457
3458 while (trace_data) {
3459 int i;
3460
3461 fileio_write_u32(file, trace_data->chkpt0);
3462 fileio_write_u32(file, trace_data->chkpt1);
3463 fileio_write_u32(file, trace_data->last_instruction);
3464 fileio_write_u32(file, trace_data->depth);
3465
3466 for (i = 0; i < trace_data->depth; i++)
3467 fileio_write_u32(file, trace_data->entries[i].data |
3468 ((trace_data->entries[i].type & 0xffff) << 16));
3469
3470 trace_data = trace_data->next;
3471 }
3472
3473 fileio_close(file);
3474
3475 return ERROR_OK;
3476 }
3477
3478 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3479 {
3480 struct target *target = get_current_target(CMD_CTX);
3481 struct xscale_common *xscale = target_to_xscale(target);
3482 int retval;
3483
3484 retval = xscale_verify_pointer(CMD_CTX, xscale);
3485 if (retval != ERROR_OK)
3486 return retval;
3487
3488 xscale_analyze_trace(target, CMD_CTX);
3489
3490 return ERROR_OK;
3491 }
3492
3493 COMMAND_HANDLER(xscale_handle_cp15)
3494 {
3495 struct target *target = get_current_target(CMD_CTX);
3496 struct xscale_common *xscale = target_to_xscale(target);
3497 int retval;
3498
3499 retval = xscale_verify_pointer(CMD_CTX, xscale);
3500 if (retval != ERROR_OK)
3501 return retval;
3502
3503 if (target->state != TARGET_HALTED) {
3504 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3505 return ERROR_OK;
3506 }
3507 uint32_t reg_no = 0;
3508 struct reg *reg = NULL;
3509 if (CMD_ARGC > 0) {
3510 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3511 /*translate from xscale cp15 register no to openocd register*/
3512 switch (reg_no) {
3513 case 0:
3514 reg_no = XSCALE_MAINID;
3515 break;
3516 case 1:
3517 reg_no = XSCALE_CTRL;
3518 break;
3519 case 2:
3520 reg_no = XSCALE_TTB;
3521 break;
3522 case 3:
3523 reg_no = XSCALE_DAC;
3524 break;
3525 case 5:
3526 reg_no = XSCALE_FSR;
3527 break;
3528 case 6:
3529 reg_no = XSCALE_FAR;
3530 break;
3531 case 13:
3532 reg_no = XSCALE_PID;
3533 break;
3534 case 15:
3535 reg_no = XSCALE_CPACCESS;
3536 break;
3537 default:
3538 command_print(CMD_CTX, "invalid register number");
3539 return ERROR_COMMAND_SYNTAX_ERROR;
3540 }
3541 reg = &xscale->reg_cache->reg_list[reg_no];
3542
3543 }
3544 if (CMD_ARGC == 1) {
3545 uint32_t value;
3546
3547 /* read cp15 control register */
3548 xscale_get_reg(reg);
3549 value = buf_get_u32(reg->value, 0, 32);
3550 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size),
3551 value);
3552 } else if (CMD_ARGC == 2) {
3553 uint32_t value;
3554 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3555
3556 /* send CP write request (command 0x41) */
3557 xscale_send_u32(target, 0x41);
3558
3559 /* send CP register number */
3560 xscale_send_u32(target, reg_no);
3561
3562 /* send CP register value */
3563 xscale_send_u32(target, value);
3564
3565 /* execute cpwait to ensure outstanding operations complete */
3566 xscale_send_u32(target, 0x53);
3567 } else
3568 return ERROR_COMMAND_SYNTAX_ERROR;
3569
3570 return ERROR_OK;
3571 }
3572
3573 static const struct command_registration xscale_exec_command_handlers[] = {
3574 {
3575 .name = "cache_info",
3576 .handler = xscale_handle_cache_info_command,
3577 .mode = COMMAND_EXEC,
3578 .help = "display information about CPU caches",
3579 },
3580 {
3581 .name = "mmu",
3582 .handler = xscale_handle_mmu_command,
3583 .mode = COMMAND_EXEC,
3584 .help = "enable or disable the MMU",
3585 .usage = "['enable'|'disable']",
3586 },
3587 {
3588 .name = "icache",
3589 .handler = xscale_handle_idcache_command,
3590 .mode = COMMAND_EXEC,
3591 .help = "display ICache state, optionally enabling or "
3592 "disabling it",
3593 .usage = "['enable'|'disable']",
3594 },
3595 {
3596 .name = "dcache",
3597 .handler = xscale_handle_idcache_command,
3598 .mode = COMMAND_EXEC,
3599 .help = "display DCache state, optionally enabling or "
3600 "disabling it",
3601 .usage = "['enable'|'disable']",
3602 },
3603 {
3604 .name = "vector_catch",
3605 .handler = xscale_handle_vector_catch_command,
3606 .mode = COMMAND_EXEC,
3607 .help = "set or display mask of vectors "
3608 "that should trigger debug entry",
3609 .usage = "['all'|'none'|'fiq'|'irq'|'dabt'|'pabt'|'swi'|'undef'|'reset']",
3610 },
3611 {
3612 .name = "vector_table",
3613 .handler = xscale_handle_vector_table_command,
3614 .mode = COMMAND_EXEC,
3615 .help = "set vector table entry in mini-ICache, "
3616 "or display current tables",
3617 .usage = "[('high'|'low') index code]",
3618 },
3619 {
3620 .name = "trace_buffer",
3621 .handler = xscale_handle_trace_buffer_command,
3622 .mode = COMMAND_EXEC,
3623 .help = "display trace buffer status, enable or disable "
3624 "tracing, and optionally reconfigure trace mode",
3625 .usage = "['enable'|'disable' ['fill' [number]|'wrap']]",
3626 },
3627 {
3628 .name = "dump_trace",
3629 .handler = xscale_handle_dump_trace_command,
3630 .mode = COMMAND_EXEC,
3631 .help = "dump content of trace buffer to file",
3632 .usage = "filename",
3633 },
3634 {
3635 .name = "analyze_trace",
3636 .handler = xscale_handle_analyze_trace_buffer_command,
3637 .mode = COMMAND_EXEC,
3638 .help = "analyze content of trace buffer",
3639 .usage = "",
3640 },
3641 {
3642 .name = "trace_image",
3643 .handler = xscale_handle_trace_image_command,
3644 .mode = COMMAND_EXEC,
3645 .help = "load image from file to address (default 0)",
3646 .usage = "filename [offset [filetype]]",
3647 },
3648 {
3649 .name = "cp15",
3650 .handler = xscale_handle_cp15,
3651 .mode = COMMAND_EXEC,
3652 .help = "Read or write coprocessor 15 register.",
3653 .usage = "register [value]",
3654 },
3655 COMMAND_REGISTRATION_DONE
3656 };
3657 static const struct command_registration xscale_any_command_handlers[] = {
3658 {
3659 .name = "debug_handler",
3660 .handler = xscale_handle_debug_handler_command,
3661 .mode = COMMAND_ANY,
3662 .help = "Change address used for debug handler.",
3663 .usage = "<target> <address>",
3664 },
3665 {
3666 .name = "cache_clean_address",
3667 .handler = xscale_handle_cache_clean_address_command,
3668 .mode = COMMAND_ANY,
3669 .help = "Change address used for cleaning data cache.",
3670 .usage = "address",
3671 },
3672 {
3673 .chain = xscale_exec_command_handlers,
3674 },
3675 COMMAND_REGISTRATION_DONE
3676 };
3677 static const struct command_registration xscale_command_handlers[] = {
3678 {
3679 .chain = arm_command_handlers,
3680 },
3681 {
3682 .name = "xscale",
3683 .mode = COMMAND_ANY,
3684 .help = "xscale command group",
3685 .usage = "",
3686 .chain = xscale_any_command_handlers,
3687 },
3688 COMMAND_REGISTRATION_DONE
3689 };
3690
3691 struct target_type xscale_target = {
3692 .name = "xscale",
3693
3694 .poll = xscale_poll,
3695 .arch_state = xscale_arch_state,
3696
3697 .halt = xscale_halt,
3698 .resume = xscale_resume,
3699 .step = xscale_step,
3700
3701 .assert_reset = xscale_assert_reset,
3702 .deassert_reset = xscale_deassert_reset,
3703
3704 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3705 .get_gdb_reg_list = arm_get_gdb_reg_list,
3706
3707 .read_memory = xscale_read_memory,
3708 .read_phys_memory = xscale_read_phys_memory,
3709 .write_memory = xscale_write_memory,
3710 .write_phys_memory = xscale_write_phys_memory,
3711
3712 .checksum_memory = arm_checksum_memory,
3713 .blank_check_memory = arm_blank_check_memory,
3714
3715 .run_algorithm = armv4_5_run_algorithm,
3716
3717 .add_breakpoint = xscale_add_breakpoint,
3718 .remove_breakpoint = xscale_remove_breakpoint,
3719 .add_watchpoint = xscale_add_watchpoint,
3720 .remove_watchpoint = xscale_remove_watchpoint,
3721
3722 .commands = xscale_command_handlers,
3723 .target_create = xscale_target_create,
3724 .init_target = xscale_init_target,
3725
3726 .virt2phys = xscale_virt2phys,
3727 .mmu = xscale_mmu
3728 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)