093925b6271cae845f7ad731e5e20bbf0a06d093
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
23 ***************************************************************************/
24
25 #ifdef HAVE_CONFIG_H
26 #include "config.h"
27 #endif
28
29 #include "breakpoints.h"
30 #include "xscale.h"
31 #include "target_type.h"
32 #include "arm_jtag.h"
33 #include "arm_simulator.h"
34 #include "arm_disassembler.h"
35 #include <helper/time_support.h>
36 #include "register.h"
37 #include "image.h"
38 #include "arm_opcodes.h"
39 #include "armv4_5.h"
40
41 /*
42 * Important XScale documents available as of October 2009 include:
43 *
44 * Intel XScale® Core Developer’s Manual, January 2004
45 * Order Number: 273473-002
46 * This has a chapter detailing debug facilities, and punts some
47 * details to chip-specific microarchitecture documents.
48 *
49 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
50 * Document Number: 273539-005
51 * Less detailed than the developer's manual, but summarizes those
52 * missing details (for most XScales) and gives LOTS of notes about
53 * debugger/handler interaction issues. Presents a simpler reset
54 * and load-handler sequence than the arch doc. (Note, OpenOCD
55 * doesn't currently support "Hot-Debug" as defined there.)
56 *
57 * Chip-specific microarchitecture documents may also be useful.
58 */
59
60 /* forward declarations */
61 static int xscale_resume(struct target *, int current,
62 target_addr_t address, int handle_breakpoints, int debug_execution);
63 static int xscale_debug_entry(struct target *);
64 static int xscale_restore_banked(struct target *);
65 static int xscale_get_reg(struct reg *reg);
66 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
67 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
68 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
69 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
70 static int xscale_read_trace(struct target *);
71
72 /* This XScale "debug handler" is loaded into the processor's
73 * mini-ICache, which is 2K of code writable only via JTAG.
74 */
75 static const uint8_t xscale_debug_handler[] = {
76 #include "../../contrib/loaders/debug/xscale/debug_handler.inc"
77 };
78
79 static const char *const xscale_reg_list[] = {
80 "XSCALE_MAINID", /* 0 */
81 "XSCALE_CACHETYPE",
82 "XSCALE_CTRL",
83 "XSCALE_AUXCTRL",
84 "XSCALE_TTB",
85 "XSCALE_DAC",
86 "XSCALE_FSR",
87 "XSCALE_FAR",
88 "XSCALE_PID",
89 "XSCALE_CPACCESS",
90 "XSCALE_IBCR0", /* 10 */
91 "XSCALE_IBCR1",
92 "XSCALE_DBR0",
93 "XSCALE_DBR1",
94 "XSCALE_DBCON",
95 "XSCALE_TBREG",
96 "XSCALE_CHKPT0",
97 "XSCALE_CHKPT1",
98 "XSCALE_DCSR",
99 "XSCALE_TX",
100 "XSCALE_RX", /* 20 */
101 "XSCALE_TXRXCTRL",
102 };
103
104 static const struct xscale_reg xscale_reg_arch_info[] = {
105 {XSCALE_MAINID, NULL},
106 {XSCALE_CACHETYPE, NULL},
107 {XSCALE_CTRL, NULL},
108 {XSCALE_AUXCTRL, NULL},
109 {XSCALE_TTB, NULL},
110 {XSCALE_DAC, NULL},
111 {XSCALE_FSR, NULL},
112 {XSCALE_FAR, NULL},
113 {XSCALE_PID, NULL},
114 {XSCALE_CPACCESS, NULL},
115 {XSCALE_IBCR0, NULL},
116 {XSCALE_IBCR1, NULL},
117 {XSCALE_DBR0, NULL},
118 {XSCALE_DBR1, NULL},
119 {XSCALE_DBCON, NULL},
120 {XSCALE_TBREG, NULL},
121 {XSCALE_CHKPT0, NULL},
122 {XSCALE_CHKPT1, NULL},
123 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
124 {-1, NULL}, /* TX accessed via JTAG */
125 {-1, NULL}, /* RX accessed via JTAG */
126 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
127 };
128
129 /* convenience wrapper to access XScale specific registers */
130 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
131 {
132 uint8_t buf[4];
133
134 buf_set_u32(buf, 0, 32, value);
135
136 return xscale_set_reg(reg, buf);
137 }
138
139 static const char xscale_not[] = "target is not an XScale";
140
141 static int xscale_verify_pointer(struct command_invocation *cmd,
142 struct xscale_common *xscale)
143 {
144 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
145 command_print(cmd, xscale_not);
146 return ERROR_TARGET_INVALID;
147 }
148 return ERROR_OK;
149 }
150
151 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr, tap_state_t end_state)
152 {
153 assert(tap != NULL);
154
155 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr) {
156 struct scan_field field;
157 uint8_t scratch[4];
158
159 memset(&field, 0, sizeof field);
160 field.num_bits = tap->ir_length;
161 field.out_value = scratch;
162 buf_set_u32(scratch, 0, field.num_bits, new_instr);
163
164 jtag_add_ir_scan(tap, &field, end_state);
165 }
166
167 return ERROR_OK;
168 }
169
170 static int xscale_read_dcsr(struct target *target)
171 {
172 struct xscale_common *xscale = target_to_xscale(target);
173 int retval;
174 struct scan_field fields[3];
175 uint8_t field0 = 0x0;
176 uint8_t field0_check_value = 0x2;
177 uint8_t field0_check_mask = 0x7;
178 uint8_t field2 = 0x0;
179 uint8_t field2_check_value = 0x0;
180 uint8_t field2_check_mask = 0x1;
181
182 xscale_jtag_set_instr(target->tap,
183 XSCALE_SELDCSR << xscale->xscale_variant,
184 TAP_DRPAUSE);
185
186 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
187 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
188
189 memset(&fields, 0, sizeof fields);
190
191 fields[0].num_bits = 3;
192 fields[0].out_value = &field0;
193 uint8_t tmp;
194 fields[0].in_value = &tmp;
195
196 fields[1].num_bits = 32;
197 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
198
199 fields[2].num_bits = 1;
200 fields[2].out_value = &field2;
201 uint8_t tmp2;
202 fields[2].in_value = &tmp2;
203
204 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
205
206 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
207 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
208
209 retval = jtag_execute_queue();
210 if (retval != ERROR_OK) {
211 LOG_ERROR("JTAG error while reading DCSR");
212 return retval;
213 }
214
215 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = false;
216 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = true;
217
218 /* write the register with the value we just read
219 * on this second pass, only the first bit of field0 is guaranteed to be 0)
220 */
221 field0_check_mask = 0x1;
222 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
223 fields[1].in_value = NULL;
224
225 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
226
227 /* DANGER!!! this must be here. It will make sure that the arguments
228 * to jtag_set_check_value() does not go out of scope! */
229 return jtag_execute_queue();
230 }
231
232
233 static void xscale_getbuf(jtag_callback_data_t arg)
234 {
235 uint8_t *in = (uint8_t *)arg;
236 *((uint32_t *)arg) = buf_get_u32(in, 0, 32);
237 }
238
239 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
240 {
241 if (num_words == 0)
242 return ERROR_COMMAND_SYNTAX_ERROR;
243
244 struct xscale_common *xscale = target_to_xscale(target);
245 int retval = ERROR_OK;
246 tap_state_t path[3];
247 struct scan_field fields[3];
248 uint8_t *field0 = malloc(num_words * 1);
249 uint8_t field0_check_value = 0x2;
250 uint8_t field0_check_mask = 0x6;
251 uint32_t *field1 = malloc(num_words * 4);
252 uint8_t field2_check_value = 0x0;
253 uint8_t field2_check_mask = 0x1;
254 int words_done = 0;
255 int words_scheduled = 0;
256 int i;
257
258 path[0] = TAP_DRSELECT;
259 path[1] = TAP_DRCAPTURE;
260 path[2] = TAP_DRSHIFT;
261
262 memset(&fields, 0, sizeof fields);
263
264 fields[0].num_bits = 3;
265 uint8_t tmp;
266 fields[0].in_value = &tmp;
267 fields[0].check_value = &field0_check_value;
268 fields[0].check_mask = &field0_check_mask;
269
270 fields[1].num_bits = 32;
271
272 fields[2].num_bits = 1;
273 uint8_t tmp2;
274 fields[2].in_value = &tmp2;
275 fields[2].check_value = &field2_check_value;
276 fields[2].check_mask = &field2_check_mask;
277
278 xscale_jtag_set_instr(target->tap,
279 XSCALE_DBGTX << xscale->xscale_variant,
280 TAP_IDLE);
281 jtag_add_runtest(1, TAP_IDLE); /* ensures that we're in the TAP_IDLE state as the above
282 *could be a no-op */
283
284 /* repeat until all words have been collected */
285 int attempts = 0;
286 while (words_done < num_words) {
287 /* schedule reads */
288 words_scheduled = 0;
289 for (i = words_done; i < num_words; i++) {
290 fields[0].in_value = &field0[i];
291
292 jtag_add_pathmove(3, path);
293
294 fields[1].in_value = (uint8_t *)(field1 + i);
295
296 jtag_add_dr_scan_check(target->tap, 3, fields, TAP_IDLE);
297
298 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
299
300 words_scheduled++;
301 }
302
303 retval = jtag_execute_queue();
304 if (retval != ERROR_OK) {
305 LOG_ERROR("JTAG error while receiving data from debug handler");
306 break;
307 }
308
309 /* examine results */
310 for (i = words_done; i < num_words; i++) {
311 if (!(field0[i] & 1)) {
312 /* move backwards if necessary */
313 int j;
314 for (j = i; j < num_words - 1; j++) {
315 field0[j] = field0[j + 1];
316 field1[j] = field1[j + 1];
317 }
318 words_scheduled--;
319 }
320 }
321 if (words_scheduled == 0) {
322 if (attempts++ == 1000) {
323 LOG_ERROR(
324 "Failed to receiving data from debug handler after 1000 attempts");
325 retval = ERROR_TARGET_TIMEOUT;
326 break;
327 }
328 }
329
330 words_done += words_scheduled;
331 }
332
333 for (i = 0; i < num_words; i++)
334 *(buffer++) = buf_get_u32((uint8_t *)&field1[i], 0, 32);
335
336 free(field1);
337
338 return retval;
339 }
340
341 static int xscale_read_tx(struct target *target, int consume)
342 {
343 struct xscale_common *xscale = target_to_xscale(target);
344 tap_state_t path[3];
345 tap_state_t noconsume_path[6];
346 int retval;
347 struct timeval timeout, now;
348 struct scan_field fields[3];
349 uint8_t field0_in = 0x0;
350 uint8_t field0_check_value = 0x2;
351 uint8_t field0_check_mask = 0x6;
352 uint8_t field2_check_value = 0x0;
353 uint8_t field2_check_mask = 0x1;
354
355 xscale_jtag_set_instr(target->tap,
356 XSCALE_DBGTX << xscale->xscale_variant,
357 TAP_IDLE);
358
359 path[0] = TAP_DRSELECT;
360 path[1] = TAP_DRCAPTURE;
361 path[2] = TAP_DRSHIFT;
362
363 noconsume_path[0] = TAP_DRSELECT;
364 noconsume_path[1] = TAP_DRCAPTURE;
365 noconsume_path[2] = TAP_DREXIT1;
366 noconsume_path[3] = TAP_DRPAUSE;
367 noconsume_path[4] = TAP_DREXIT2;
368 noconsume_path[5] = TAP_DRSHIFT;
369
370 memset(&fields, 0, sizeof fields);
371
372 fields[0].num_bits = 3;
373 fields[0].in_value = &field0_in;
374
375 fields[1].num_bits = 32;
376 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
377
378 fields[2].num_bits = 1;
379 uint8_t tmp;
380 fields[2].in_value = &tmp;
381
382 gettimeofday(&timeout, NULL);
383 timeval_add_time(&timeout, 1, 0);
384
385 for (;; ) {
386 /* if we want to consume the register content (i.e. clear TX_READY),
387 * we have to go straight from Capture-DR to Shift-DR
388 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
389 */
390 if (consume)
391 jtag_add_pathmove(3, path);
392 else
393 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
394
395 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
396
397 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
398 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
399
400 retval = jtag_execute_queue();
401 if (retval != ERROR_OK) {
402 LOG_ERROR("JTAG error while reading TX");
403 return ERROR_TARGET_TIMEOUT;
404 }
405
406 gettimeofday(&now, NULL);
407 if (timeval_compare(&now, &timeout) > 0) {
408 LOG_ERROR("time out reading TX register");
409 return ERROR_TARGET_TIMEOUT;
410 }
411 if (!((!(field0_in & 1)) && consume))
412 goto done;
413 if (debug_level >= 3) {
414 LOG_DEBUG("waiting 100ms");
415 alive_sleep(100); /* avoid flooding the logs */
416 } else
417 keep_alive();
418 }
419 done:
420
421 if (!(field0_in & 1))
422 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
423
424 return ERROR_OK;
425 }
426
427 static int xscale_write_rx(struct target *target)
428 {
429 struct xscale_common *xscale = target_to_xscale(target);
430 int retval;
431 struct timeval timeout, now;
432 struct scan_field fields[3];
433 uint8_t field0_out = 0x0;
434 uint8_t field0_in = 0x0;
435 uint8_t field0_check_value = 0x2;
436 uint8_t field0_check_mask = 0x6;
437 uint8_t field2 = 0x0;
438 uint8_t field2_check_value = 0x0;
439 uint8_t field2_check_mask = 0x1;
440
441 xscale_jtag_set_instr(target->tap,
442 XSCALE_DBGRX << xscale->xscale_variant,
443 TAP_IDLE);
444
445 memset(&fields, 0, sizeof fields);
446
447 fields[0].num_bits = 3;
448 fields[0].out_value = &field0_out;
449 fields[0].in_value = &field0_in;
450
451 fields[1].num_bits = 32;
452 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
453
454 fields[2].num_bits = 1;
455 fields[2].out_value = &field2;
456 uint8_t tmp;
457 fields[2].in_value = &tmp;
458
459 gettimeofday(&timeout, NULL);
460 timeval_add_time(&timeout, 1, 0);
461
462 /* poll until rx_read is low */
463 LOG_DEBUG("polling RX");
464 for (;;) {
465 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
466
467 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
468 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
469
470 retval = jtag_execute_queue();
471 if (retval != ERROR_OK) {
472 LOG_ERROR("JTAG error while writing RX");
473 return retval;
474 }
475
476 gettimeofday(&now, NULL);
477 if ((now.tv_sec > timeout.tv_sec) ||
478 ((now.tv_sec == timeout.tv_sec) && (now.tv_usec > timeout.tv_usec))) {
479 LOG_ERROR("time out writing RX register");
480 return ERROR_TARGET_TIMEOUT;
481 }
482 if (!(field0_in & 1))
483 goto done;
484 if (debug_level >= 3) {
485 LOG_DEBUG("waiting 100ms");
486 alive_sleep(100); /* avoid flooding the logs */
487 } else
488 keep_alive();
489 }
490 done:
491
492 /* set rx_valid */
493 field2 = 0x1;
494 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
495
496 retval = jtag_execute_queue();
497 if (retval != ERROR_OK) {
498 LOG_ERROR("JTAG error while writing RX");
499 return retval;
500 }
501
502 return ERROR_OK;
503 }
504
505 /* send count elements of size byte to the debug handler */
506 static int xscale_send(struct target *target, const uint8_t *buffer, int count, int size)
507 {
508 struct xscale_common *xscale = target_to_xscale(target);
509 int retval;
510 int done_count = 0;
511
512 xscale_jtag_set_instr(target->tap,
513 XSCALE_DBGRX << xscale->xscale_variant,
514 TAP_IDLE);
515
516 static const uint8_t t0;
517 uint8_t t1[4];
518 static const uint8_t t2 = 1;
519 struct scan_field fields[3] = {
520 { .num_bits = 3, .out_value = &t0 },
521 { .num_bits = 32, .out_value = t1 },
522 { .num_bits = 1, .out_value = &t2 },
523 };
524
525 int endianness = target->endianness;
526 while (done_count++ < count) {
527 uint32_t t;
528
529 switch (size) {
530 case 4:
531 if (endianness == TARGET_LITTLE_ENDIAN)
532 t = le_to_h_u32(buffer);
533 else
534 t = be_to_h_u32(buffer);
535 break;
536 case 2:
537 if (endianness == TARGET_LITTLE_ENDIAN)
538 t = le_to_h_u16(buffer);
539 else
540 t = be_to_h_u16(buffer);
541 break;
542 case 1:
543 t = buffer[0];
544 break;
545 default:
546 LOG_ERROR("BUG: size neither 4, 2 nor 1");
547 return ERROR_COMMAND_SYNTAX_ERROR;
548 }
549
550 buf_set_u32(t1, 0, 32, t);
551
552 jtag_add_dr_scan(target->tap,
553 3,
554 fields,
555 TAP_IDLE);
556 buffer += size;
557 }
558
559 retval = jtag_execute_queue();
560 if (retval != ERROR_OK) {
561 LOG_ERROR("JTAG error while sending data to debug handler");
562 return retval;
563 }
564
565 return ERROR_OK;
566 }
567
568 static int xscale_send_u32(struct target *target, uint32_t value)
569 {
570 struct xscale_common *xscale = target_to_xscale(target);
571
572 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
573 return xscale_write_rx(target);
574 }
575
576 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
577 {
578 struct xscale_common *xscale = target_to_xscale(target);
579 int retval;
580 struct scan_field fields[3];
581 uint8_t field0 = 0x0;
582 uint8_t field0_check_value = 0x2;
583 uint8_t field0_check_mask = 0x7;
584 uint8_t field2 = 0x0;
585 uint8_t field2_check_value = 0x0;
586 uint8_t field2_check_mask = 0x1;
587
588 if (hold_rst != -1)
589 xscale->hold_rst = hold_rst;
590
591 if (ext_dbg_brk != -1)
592 xscale->external_debug_break = ext_dbg_brk;
593
594 xscale_jtag_set_instr(target->tap,
595 XSCALE_SELDCSR << xscale->xscale_variant,
596 TAP_IDLE);
597
598 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
599 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
600
601 memset(&fields, 0, sizeof fields);
602
603 fields[0].num_bits = 3;
604 fields[0].out_value = &field0;
605 uint8_t tmp;
606 fields[0].in_value = &tmp;
607
608 fields[1].num_bits = 32;
609 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
610
611 fields[2].num_bits = 1;
612 fields[2].out_value = &field2;
613 uint8_t tmp2;
614 fields[2].in_value = &tmp2;
615
616 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
617
618 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
619 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
620
621 retval = jtag_execute_queue();
622 if (retval != ERROR_OK) {
623 LOG_ERROR("JTAG error while writing DCSR");
624 return retval;
625 }
626
627 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = false;
628 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = true;
629
630 return ERROR_OK;
631 }
632
633 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
634 static unsigned int parity(unsigned int v)
635 {
636 /* unsigned int ov = v; */
637 v ^= v >> 16;
638 v ^= v >> 8;
639 v ^= v >> 4;
640 v &= 0xf;
641 /* LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1); */
642 return (0x6996 >> v) & 1;
643 }
644
645 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
646 {
647 struct xscale_common *xscale = target_to_xscale(target);
648 uint8_t packet[4];
649 uint8_t cmd;
650 int word;
651 struct scan_field fields[2];
652
653 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
654
655 /* LDIC into IR */
656 xscale_jtag_set_instr(target->tap,
657 XSCALE_LDIC << xscale->xscale_variant,
658 TAP_IDLE);
659
660 /* CMD is b011 to load a cacheline into the Mini ICache.
661 * Loading into the main ICache is deprecated, and unused.
662 * It's followed by three zero bits, and 27 address bits.
663 */
664 buf_set_u32(&cmd, 0, 6, 0x3);
665
666 /* virtual address of desired cache line */
667 buf_set_u32(packet, 0, 27, va >> 5);
668
669 memset(&fields, 0, sizeof fields);
670
671 fields[0].num_bits = 6;
672 fields[0].out_value = &cmd;
673
674 fields[1].num_bits = 27;
675 fields[1].out_value = packet;
676
677 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
678
679 /* rest of packet is a cacheline: 8 instructions, with parity */
680 fields[0].num_bits = 32;
681 fields[0].out_value = packet;
682
683 fields[1].num_bits = 1;
684 fields[1].out_value = &cmd;
685
686 for (word = 0; word < 8; word++) {
687 buf_set_u32(packet, 0, 32, buffer[word]);
688
689 uint32_t value;
690 memcpy(&value, packet, sizeof(uint32_t));
691 cmd = parity(value);
692
693 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
694 }
695
696 return jtag_execute_queue();
697 }
698
699 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
700 {
701 struct xscale_common *xscale = target_to_xscale(target);
702 uint8_t packet[4];
703 uint8_t cmd;
704 struct scan_field fields[2];
705
706 xscale_jtag_set_instr(target->tap,
707 XSCALE_LDIC << xscale->xscale_variant,
708 TAP_IDLE);
709
710 /* CMD for invalidate IC line b000, bits [6:4] b000 */
711 buf_set_u32(&cmd, 0, 6, 0x0);
712
713 /* virtual address of desired cache line */
714 buf_set_u32(packet, 0, 27, va >> 5);
715
716 memset(&fields, 0, sizeof fields);
717
718 fields[0].num_bits = 6;
719 fields[0].out_value = &cmd;
720
721 fields[1].num_bits = 27;
722 fields[1].out_value = packet;
723
724 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
725
726 return ERROR_OK;
727 }
728
729 static int xscale_update_vectors(struct target *target)
730 {
731 struct xscale_common *xscale = target_to_xscale(target);
732 int i;
733 int retval;
734
735 uint32_t low_reset_branch, high_reset_branch;
736
737 for (i = 1; i < 8; i++) {
738 /* if there's a static vector specified for this exception, override */
739 if (xscale->static_high_vectors_set & (1 << i))
740 xscale->high_vectors[i] = xscale->static_high_vectors[i];
741 else {
742 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
743 if (retval == ERROR_TARGET_TIMEOUT)
744 return retval;
745 if (retval != ERROR_OK) {
746 /* Some of these reads will fail as part of normal execution */
747 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
748 }
749 }
750 }
751
752 for (i = 1; i < 8; i++) {
753 if (xscale->static_low_vectors_set & (1 << i))
754 xscale->low_vectors[i] = xscale->static_low_vectors[i];
755 else {
756 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
757 if (retval == ERROR_TARGET_TIMEOUT)
758 return retval;
759 if (retval != ERROR_OK) {
760 /* Some of these reads will fail as part of normal execution */
761 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
762 }
763 }
764 }
765
766 /* calculate branches to debug handler */
767 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
768 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
769
770 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
771 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
772
773 /* invalidate and load exception vectors in mini i-cache */
774 xscale_invalidate_ic_line(target, 0x0);
775 xscale_invalidate_ic_line(target, 0xffff0000);
776
777 xscale_load_ic(target, 0x0, xscale->low_vectors);
778 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
779
780 return ERROR_OK;
781 }
782
783 static int xscale_arch_state(struct target *target)
784 {
785 struct xscale_common *xscale = target_to_xscale(target);
786 struct arm *arm = &xscale->arm;
787
788 static const char *state[] = {
789 "disabled", "enabled"
790 };
791
792 static const char *arch_dbg_reason[] = {
793 "", "\n(processor reset)", "\n(trace buffer full)"
794 };
795
796 if (arm->common_magic != ARM_COMMON_MAGIC) {
797 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
798 return ERROR_COMMAND_SYNTAX_ERROR;
799 }
800
801 arm_arch_state(target);
802 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
803 state[xscale->armv4_5_mmu.mmu_enabled],
804 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
805 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
806 arch_dbg_reason[xscale->arch_debug_reason]);
807
808 return ERROR_OK;
809 }
810
811 static int xscale_poll(struct target *target)
812 {
813 int retval = ERROR_OK;
814
815 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING)) {
816 enum target_state previous_state = target->state;
817 retval = xscale_read_tx(target, 0);
818 if (retval == ERROR_OK) {
819
820 /* there's data to read from the tx register, we entered debug state */
821 target->state = TARGET_HALTED;
822
823 /* process debug entry, fetching current mode regs */
824 retval = xscale_debug_entry(target);
825 } else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE) {
826 LOG_USER("error while polling TX register, reset CPU");
827 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
828 target->state = TARGET_HALTED;
829 }
830
831 /* debug_entry could have overwritten target state (i.e. immediate resume)
832 * don't signal event handlers in that case
833 */
834 if (target->state != TARGET_HALTED)
835 return ERROR_OK;
836
837 /* if target was running, signal that we halted
838 * otherwise we reentered from debug execution */
839 if (previous_state == TARGET_RUNNING)
840 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
841 else
842 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
843 }
844
845 return retval;
846 }
847
848 static int xscale_debug_entry(struct target *target)
849 {
850 struct xscale_common *xscale = target_to_xscale(target);
851 struct arm *arm = &xscale->arm;
852 uint32_t pc;
853 uint32_t buffer[10];
854 unsigned i;
855 int retval;
856 uint32_t moe;
857
858 /* clear external dbg break (will be written on next DCSR read) */
859 xscale->external_debug_break = 0;
860 retval = xscale_read_dcsr(target);
861 if (retval != ERROR_OK)
862 return retval;
863
864 /* get r0, pc, r1 to r7 and cpsr */
865 retval = xscale_receive(target, buffer, 10);
866 if (retval != ERROR_OK)
867 return retval;
868
869 /* move r0 from buffer to register cache */
870 buf_set_u32(arm->core_cache->reg_list[0].value, 0, 32, buffer[0]);
871 arm->core_cache->reg_list[0].dirty = true;
872 arm->core_cache->reg_list[0].valid = true;
873 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
874
875 /* move pc from buffer to register cache */
876 buf_set_u32(arm->pc->value, 0, 32, buffer[1]);
877 arm->pc->dirty = true;
878 arm->pc->valid = true;
879 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
880
881 /* move data from buffer to register cache */
882 for (i = 1; i <= 7; i++) {
883 buf_set_u32(arm->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
884 arm->core_cache->reg_list[i].dirty = true;
885 arm->core_cache->reg_list[i].valid = true;
886 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
887 }
888
889 arm_set_cpsr(arm, buffer[9]);
890 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
891
892 if (!is_arm_mode(arm->core_mode)) {
893 target->state = TARGET_UNKNOWN;
894 LOG_ERROR("cpsr contains invalid mode value - communication failure");
895 return ERROR_TARGET_FAILURE;
896 }
897 LOG_DEBUG("target entered debug state in %s mode",
898 arm_mode_name(arm->core_mode));
899
900 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
901 if (arm->spsr) {
902 xscale_receive(target, buffer, 8);
903 buf_set_u32(arm->spsr->value, 0, 32, buffer[7]);
904 arm->spsr->dirty = false;
905 arm->spsr->valid = true;
906 } else {
907 /* r8 to r14, but no spsr */
908 xscale_receive(target, buffer, 7);
909 }
910
911 /* move data from buffer to right banked register in cache */
912 for (i = 8; i <= 14; i++) {
913 struct reg *r = arm_reg_current(arm, i);
914
915 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
916 r->dirty = false;
917 r->valid = true;
918 }
919
920 /* mark xscale regs invalid to ensure they are retrieved from the
921 * debug handler if requested */
922 for (i = 0; i < xscale->reg_cache->num_regs; i++)
923 xscale->reg_cache->reg_list[i].valid = false;
924
925 /* examine debug reason */
926 xscale_read_dcsr(target);
927 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
928
929 /* stored PC (for calculating fixup) */
930 pc = buf_get_u32(arm->pc->value, 0, 32);
931
932 switch (moe) {
933 case 0x0: /* Processor reset */
934 target->debug_reason = DBG_REASON_DBGRQ;
935 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
936 pc -= 4;
937 break;
938 case 0x1: /* Instruction breakpoint hit */
939 target->debug_reason = DBG_REASON_BREAKPOINT;
940 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
941 pc -= 4;
942 break;
943 case 0x2: /* Data breakpoint hit */
944 target->debug_reason = DBG_REASON_WATCHPOINT;
945 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
946 pc -= 4;
947 break;
948 case 0x3: /* BKPT instruction executed */
949 target->debug_reason = DBG_REASON_BREAKPOINT;
950 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
951 pc -= 4;
952 break;
953 case 0x4: /* Ext. debug event */
954 target->debug_reason = DBG_REASON_DBGRQ;
955 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
956 pc -= 4;
957 break;
958 case 0x5: /* Vector trap occured */
959 target->debug_reason = DBG_REASON_BREAKPOINT;
960 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
961 pc -= 4;
962 break;
963 case 0x6: /* Trace buffer full break */
964 target->debug_reason = DBG_REASON_DBGRQ;
965 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
966 pc -= 4;
967 break;
968 case 0x7: /* Reserved (may flag Hot-Debug support) */
969 default:
970 LOG_ERROR("Method of Entry is 'Reserved'");
971 exit(-1);
972 break;
973 }
974
975 /* apply PC fixup */
976 buf_set_u32(arm->pc->value, 0, 32, pc);
977
978 /* on the first debug entry, identify cache type */
979 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1) {
980 uint32_t cache_type_reg;
981
982 /* read cp15 cache type register */
983 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
984 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value,
985 0,
986 32);
987
988 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
989 }
990
991 /* examine MMU and Cache settings
992 * read cp15 control register */
993 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
994 xscale->cp15_control_reg =
995 buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
996 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
997 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
998 (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
999 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1000 (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1001
1002 /* tracing enabled, read collected trace data */
1003 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1004 xscale_read_trace(target);
1005
1006 /* Resume if entered debug due to buffer fill and we're still collecting
1007 * trace data. Note that a debug exception due to trace buffer full
1008 * can only happen in fill mode. */
1009 if (xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL) {
1010 if (--xscale->trace.fill_counter > 0)
1011 xscale_resume(target, 1, 0x0, 1, 0);
1012 } else /* entered debug for other reason; reset counter */
1013 xscale->trace.fill_counter = 0;
1014 }
1015
1016 return ERROR_OK;
1017 }
1018
1019 static int xscale_halt(struct target *target)
1020 {
1021 struct xscale_common *xscale = target_to_xscale(target);
1022
1023 LOG_DEBUG("target->state: %s",
1024 target_state_name(target));
1025
1026 if (target->state == TARGET_HALTED) {
1027 LOG_DEBUG("target was already halted");
1028 return ERROR_OK;
1029 } else if (target->state == TARGET_UNKNOWN) {
1030 /* this must not happen for a xscale target */
1031 LOG_ERROR("target was in unknown state when halt was requested");
1032 return ERROR_TARGET_INVALID;
1033 } else if (target->state == TARGET_RESET)
1034 LOG_DEBUG("target->state == TARGET_RESET");
1035 else {
1036 /* assert external dbg break */
1037 xscale->external_debug_break = 1;
1038 xscale_read_dcsr(target);
1039
1040 target->debug_reason = DBG_REASON_DBGRQ;
1041 }
1042
1043 return ERROR_OK;
1044 }
1045
1046 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1047 {
1048 struct xscale_common *xscale = target_to_xscale(target);
1049 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1050 int retval;
1051
1052 if (xscale->ibcr0_used) {
1053 struct breakpoint *ibcr0_bp =
1054 breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1055
1056 if (ibcr0_bp)
1057 xscale_unset_breakpoint(target, ibcr0_bp);
1058 else {
1059 LOG_ERROR(
1060 "BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1061 exit(-1);
1062 }
1063 }
1064
1065 retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1);
1066 if (retval != ERROR_OK)
1067 return retval;
1068
1069 return ERROR_OK;
1070 }
1071
1072 static int xscale_disable_single_step(struct target *target)
1073 {
1074 struct xscale_common *xscale = target_to_xscale(target);
1075 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1076 int retval;
1077
1078 retval = xscale_set_reg_u32(ibcr0, 0x0);
1079 if (retval != ERROR_OK)
1080 return retval;
1081
1082 return ERROR_OK;
1083 }
1084
1085 static void xscale_enable_watchpoints(struct target *target)
1086 {
1087 struct watchpoint *watchpoint = target->watchpoints;
1088
1089 while (watchpoint) {
1090 if (watchpoint->set == 0)
1091 xscale_set_watchpoint(target, watchpoint);
1092 watchpoint = watchpoint->next;
1093 }
1094 }
1095
1096 static void xscale_enable_breakpoints(struct target *target)
1097 {
1098 struct breakpoint *breakpoint = target->breakpoints;
1099
1100 /* set any pending breakpoints */
1101 while (breakpoint) {
1102 if (breakpoint->set == 0)
1103 xscale_set_breakpoint(target, breakpoint);
1104 breakpoint = breakpoint->next;
1105 }
1106 }
1107
1108 static void xscale_free_trace_data(struct xscale_common *xscale)
1109 {
1110 struct xscale_trace_data *td = xscale->trace.data;
1111 while (td) {
1112 struct xscale_trace_data *next_td = td->next;
1113 if (td->entries)
1114 free(td->entries);
1115 free(td);
1116 td = next_td;
1117 }
1118 xscale->trace.data = NULL;
1119 }
1120
1121 static int xscale_resume(struct target *target, int current,
1122 target_addr_t address, int handle_breakpoints, int debug_execution)
1123 {
1124 struct xscale_common *xscale = target_to_xscale(target);
1125 struct arm *arm = &xscale->arm;
1126 uint32_t current_pc;
1127 int retval;
1128 int i;
1129
1130 LOG_DEBUG("-");
1131
1132 if (target->state != TARGET_HALTED) {
1133 LOG_WARNING("target not halted");
1134 return ERROR_TARGET_NOT_HALTED;
1135 }
1136
1137 if (!debug_execution)
1138 target_free_all_working_areas(target);
1139
1140 /* update vector tables */
1141 retval = xscale_update_vectors(target);
1142 if (retval != ERROR_OK)
1143 return retval;
1144
1145 /* current = 1: continue on current pc, otherwise continue at <address> */
1146 if (!current)
1147 buf_set_u32(arm->pc->value, 0, 32, address);
1148
1149 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1150
1151 /* if we're at the reset vector, we have to simulate the branch */
1152 if (current_pc == 0x0) {
1153 arm_simulate_step(target, NULL);
1154 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1155 }
1156
1157 /* the front-end may request us not to handle breakpoints */
1158 if (handle_breakpoints) {
1159 struct breakpoint *breakpoint;
1160 breakpoint = breakpoint_find(target,
1161 buf_get_u32(arm->pc->value, 0, 32));
1162 if (breakpoint != NULL) {
1163 uint32_t next_pc;
1164 enum trace_mode saved_trace_mode;
1165
1166 /* there's a breakpoint at the current PC, we have to step over it */
1167 LOG_DEBUG("unset breakpoint at " TARGET_ADDR_FMT "",
1168 breakpoint->address);
1169 xscale_unset_breakpoint(target, breakpoint);
1170
1171 /* calculate PC of next instruction */
1172 retval = arm_simulate_step(target, &next_pc);
1173 if (retval != ERROR_OK) {
1174 uint32_t current_opcode;
1175 target_read_u32(target, current_pc, &current_opcode);
1176 LOG_ERROR(
1177 "BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "",
1178 current_opcode);
1179 }
1180
1181 LOG_DEBUG("enable single-step");
1182 xscale_enable_single_step(target, next_pc);
1183
1184 /* restore banked registers */
1185 retval = xscale_restore_banked(target);
1186 if (retval != ERROR_OK)
1187 return retval;
1188
1189 /* send resume request */
1190 xscale_send_u32(target, 0x30);
1191
1192 /* send CPSR */
1193 xscale_send_u32(target,
1194 buf_get_u32(arm->cpsr->value, 0, 32));
1195 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1196 buf_get_u32(arm->cpsr->value, 0, 32));
1197
1198 for (i = 7; i >= 0; i--) {
1199 /* send register */
1200 xscale_send_u32(target,
1201 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1202 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "",
1203 i, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1204 }
1205
1206 /* send PC */
1207 xscale_send_u32(target,
1208 buf_get_u32(arm->pc->value, 0, 32));
1209 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
1210 buf_get_u32(arm->pc->value, 0, 32));
1211
1212 /* disable trace data collection in xscale_debug_entry() */
1213 saved_trace_mode = xscale->trace.mode;
1214 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1215
1216 /* wait for and process debug entry */
1217 xscale_debug_entry(target);
1218
1219 /* re-enable trace buffer, if enabled previously */
1220 xscale->trace.mode = saved_trace_mode;
1221
1222 LOG_DEBUG("disable single-step");
1223 xscale_disable_single_step(target);
1224
1225 LOG_DEBUG("set breakpoint at " TARGET_ADDR_FMT "",
1226 breakpoint->address);
1227 xscale_set_breakpoint(target, breakpoint);
1228 }
1229 }
1230
1231 /* enable any pending breakpoints and watchpoints */
1232 xscale_enable_breakpoints(target);
1233 xscale_enable_watchpoints(target);
1234
1235 /* restore banked registers */
1236 retval = xscale_restore_banked(target);
1237 if (retval != ERROR_OK)
1238 return retval;
1239
1240 /* send resume request (command 0x30 or 0x31)
1241 * clean the trace buffer if it is to be enabled (0x62) */
1242 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1243 if (xscale->trace.mode == XSCALE_TRACE_FILL) {
1244 /* If trace enabled in fill mode and starting collection of new set
1245 * of buffers, initialize buffer counter and free previous buffers */
1246 if (xscale->trace.fill_counter == 0) {
1247 xscale->trace.fill_counter = xscale->trace.buffer_fill;
1248 xscale_free_trace_data(xscale);
1249 }
1250 } else /* wrap mode; free previous buffer */
1251 xscale_free_trace_data(xscale);
1252
1253 xscale_send_u32(target, 0x62);
1254 xscale_send_u32(target, 0x31);
1255 } else
1256 xscale_send_u32(target, 0x30);
1257
1258 /* send CPSR */
1259 xscale_send_u32(target, buf_get_u32(arm->cpsr->value, 0, 32));
1260 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1261 buf_get_u32(arm->cpsr->value, 0, 32));
1262
1263 for (i = 7; i >= 0; i--) {
1264 /* send register */
1265 xscale_send_u32(target, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1266 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "",
1267 i, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1268 }
1269
1270 /* send PC */
1271 xscale_send_u32(target, buf_get_u32(arm->pc->value, 0, 32));
1272 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1273 buf_get_u32(arm->pc->value, 0, 32));
1274
1275 target->debug_reason = DBG_REASON_NOTHALTED;
1276
1277 if (!debug_execution) {
1278 /* registers are now invalid */
1279 register_cache_invalidate(arm->core_cache);
1280 target->state = TARGET_RUNNING;
1281 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1282 } else {
1283 target->state = TARGET_DEBUG_RUNNING;
1284 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1285 }
1286
1287 LOG_DEBUG("target resumed");
1288
1289 return ERROR_OK;
1290 }
1291
1292 static int xscale_step_inner(struct target *target, int current,
1293 uint32_t address, int handle_breakpoints)
1294 {
1295 struct xscale_common *xscale = target_to_xscale(target);
1296 struct arm *arm = &xscale->arm;
1297 uint32_t next_pc;
1298 int retval;
1299 int i;
1300
1301 target->debug_reason = DBG_REASON_SINGLESTEP;
1302
1303 /* calculate PC of next instruction */
1304 retval = arm_simulate_step(target, &next_pc);
1305 if (retval != ERROR_OK) {
1306 uint32_t current_opcode, current_pc;
1307 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1308
1309 target_read_u32(target, current_pc, &current_opcode);
1310 LOG_ERROR(
1311 "BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "",
1312 current_opcode);
1313 return retval;
1314 }
1315
1316 LOG_DEBUG("enable single-step");
1317 retval = xscale_enable_single_step(target, next_pc);
1318 if (retval != ERROR_OK)
1319 return retval;
1320
1321 /* restore banked registers */
1322 retval = xscale_restore_banked(target);
1323 if (retval != ERROR_OK)
1324 return retval;
1325
1326 /* send resume request (command 0x30 or 0x31)
1327 * clean the trace buffer if it is to be enabled (0x62) */
1328 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1329 retval = xscale_send_u32(target, 0x62);
1330 if (retval != ERROR_OK)
1331 return retval;
1332 retval = xscale_send_u32(target, 0x31);
1333 if (retval != ERROR_OK)
1334 return retval;
1335 } else {
1336 retval = xscale_send_u32(target, 0x30);
1337 if (retval != ERROR_OK)
1338 return retval;
1339 }
1340
1341 /* send CPSR */
1342 retval = xscale_send_u32(target,
1343 buf_get_u32(arm->cpsr->value, 0, 32));
1344 if (retval != ERROR_OK)
1345 return retval;
1346 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1347 buf_get_u32(arm->cpsr->value, 0, 32));
1348
1349 for (i = 7; i >= 0; i--) {
1350 /* send register */
1351 retval = xscale_send_u32(target,
1352 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1353 if (retval != ERROR_OK)
1354 return retval;
1355 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i,
1356 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1357 }
1358
1359 /* send PC */
1360 retval = xscale_send_u32(target,
1361 buf_get_u32(arm->pc->value, 0, 32));
1362 if (retval != ERROR_OK)
1363 return retval;
1364 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1365 buf_get_u32(arm->pc->value, 0, 32));
1366
1367 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1368
1369 /* registers are now invalid */
1370 register_cache_invalidate(arm->core_cache);
1371
1372 /* wait for and process debug entry */
1373 retval = xscale_debug_entry(target);
1374 if (retval != ERROR_OK)
1375 return retval;
1376
1377 LOG_DEBUG("disable single-step");
1378 retval = xscale_disable_single_step(target);
1379 if (retval != ERROR_OK)
1380 return retval;
1381
1382 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1383
1384 return ERROR_OK;
1385 }
1386
1387 static int xscale_step(struct target *target, int current,
1388 target_addr_t address, int handle_breakpoints)
1389 {
1390 struct arm *arm = target_to_arm(target);
1391 struct breakpoint *breakpoint = NULL;
1392
1393 uint32_t current_pc;
1394 int retval;
1395
1396 if (target->state != TARGET_HALTED) {
1397 LOG_WARNING("target not halted");
1398 return ERROR_TARGET_NOT_HALTED;
1399 }
1400
1401 /* current = 1: continue on current pc, otherwise continue at <address> */
1402 if (!current)
1403 buf_set_u32(arm->pc->value, 0, 32, address);
1404
1405 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1406
1407 /* if we're at the reset vector, we have to simulate the step */
1408 if (current_pc == 0x0) {
1409 retval = arm_simulate_step(target, NULL);
1410 if (retval != ERROR_OK)
1411 return retval;
1412 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1413 LOG_DEBUG("current pc %" PRIx32, current_pc);
1414
1415 target->debug_reason = DBG_REASON_SINGLESTEP;
1416 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1417
1418 return ERROR_OK;
1419 }
1420
1421 /* the front-end may request us not to handle breakpoints */
1422 if (handle_breakpoints)
1423 breakpoint = breakpoint_find(target,
1424 buf_get_u32(arm->pc->value, 0, 32));
1425 if (breakpoint != NULL) {
1426 retval = xscale_unset_breakpoint(target, breakpoint);
1427 if (retval != ERROR_OK)
1428 return retval;
1429 }
1430
1431 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1432 if (retval != ERROR_OK)
1433 return retval;
1434
1435 if (breakpoint)
1436 xscale_set_breakpoint(target, breakpoint);
1437
1438 LOG_DEBUG("target stepped");
1439
1440 return ERROR_OK;
1441
1442 }
1443
1444 static int xscale_assert_reset(struct target *target)
1445 {
1446 struct xscale_common *xscale = target_to_xscale(target);
1447
1448 /* TODO: apply hw reset signal in not examined state */
1449 if (!(target_was_examined(target))) {
1450 LOG_WARNING("Reset is not asserted because the target is not examined.");
1451 LOG_WARNING("Use a reset button or power cycle the target.");
1452 return ERROR_TARGET_NOT_EXAMINED;
1453 }
1454
1455 LOG_DEBUG("target->state: %s",
1456 target_state_name(target));
1457
1458 /* assert reset */
1459 jtag_add_reset(0, 1);
1460
1461 /* sleep 1ms, to be sure we fulfill any requirements */
1462 jtag_add_sleep(1000);
1463 jtag_execute_queue();
1464
1465 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1466 * end up in T-L-R, which would reset JTAG
1467 */
1468 xscale_jtag_set_instr(target->tap,
1469 XSCALE_SELDCSR << xscale->xscale_variant,
1470 TAP_IDLE);
1471
1472 /* set Hold reset, Halt mode and Trap Reset */
1473 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1474 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1475 xscale_write_dcsr(target, 1, 0);
1476
1477 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1478 xscale_jtag_set_instr(target->tap, ~0, TAP_IDLE);
1479 jtag_execute_queue();
1480
1481 target->state = TARGET_RESET;
1482
1483 if (target->reset_halt) {
1484 int retval = target_halt(target);
1485 if (retval != ERROR_OK)
1486 return retval;
1487 }
1488
1489 return ERROR_OK;
1490 }
1491
1492 static int xscale_deassert_reset(struct target *target)
1493 {
1494 struct xscale_common *xscale = target_to_xscale(target);
1495 struct breakpoint *breakpoint = target->breakpoints;
1496
1497 LOG_DEBUG("-");
1498
1499 xscale->ibcr_available = 2;
1500 xscale->ibcr0_used = 0;
1501 xscale->ibcr1_used = 0;
1502
1503 xscale->dbr_available = 2;
1504 xscale->dbr0_used = 0;
1505 xscale->dbr1_used = 0;
1506
1507 /* mark all hardware breakpoints as unset */
1508 while (breakpoint) {
1509 if (breakpoint->type == BKPT_HARD)
1510 breakpoint->set = 0;
1511 breakpoint = breakpoint->next;
1512 }
1513
1514 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1515 xscale_free_trace_data(xscale);
1516
1517 register_cache_invalidate(xscale->arm.core_cache);
1518
1519 /* FIXME mark hardware watchpoints got unset too. Also,
1520 * at least some of the XScale registers are invalid...
1521 */
1522
1523 /*
1524 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1525 * contents got invalidated. Safer to force that, so writing new
1526 * contents can't ever fail..
1527 */
1528 {
1529 uint32_t address;
1530 unsigned buf_cnt;
1531 const uint8_t *buffer = xscale_debug_handler;
1532 int retval;
1533
1534 /* release SRST */
1535 jtag_add_reset(0, 0);
1536
1537 /* wait 300ms; 150 and 100ms were not enough */
1538 jtag_add_sleep(300*1000);
1539
1540 jtag_add_runtest(2030, TAP_IDLE);
1541 jtag_execute_queue();
1542
1543 /* set Hold reset, Halt mode and Trap Reset */
1544 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1545 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1546 xscale_write_dcsr(target, 1, 0);
1547
1548 /* Load the debug handler into the mini-icache. Since
1549 * it's using halt mode (not monitor mode), it runs in
1550 * "Special Debug State" for access to registers, memory,
1551 * coprocessors, trace data, etc.
1552 */
1553 address = xscale->handler_address;
1554 for (unsigned binary_size = sizeof xscale_debug_handler;
1555 binary_size > 0;
1556 binary_size -= buf_cnt, buffer += buf_cnt) {
1557 uint32_t cache_line[8];
1558 unsigned i;
1559
1560 buf_cnt = binary_size;
1561 if (buf_cnt > 32)
1562 buf_cnt = 32;
1563
1564 for (i = 0; i < buf_cnt; i += 4) {
1565 /* convert LE buffer to host-endian uint32_t */
1566 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1567 }
1568
1569 for (; i < 32; i += 4)
1570 cache_line[i / 4] = 0xe1a08008;
1571
1572 /* only load addresses other than the reset vectors */
1573 if ((address % 0x400) != 0x0) {
1574 retval = xscale_load_ic(target, address,
1575 cache_line);
1576 if (retval != ERROR_OK)
1577 return retval;
1578 }
1579
1580 address += buf_cnt;
1581 }
1582
1583 retval = xscale_load_ic(target, 0x0,
1584 xscale->low_vectors);
1585 if (retval != ERROR_OK)
1586 return retval;
1587 retval = xscale_load_ic(target, 0xffff0000,
1588 xscale->high_vectors);
1589 if (retval != ERROR_OK)
1590 return retval;
1591
1592 jtag_add_runtest(30, TAP_IDLE);
1593
1594 jtag_add_sleep(100000);
1595
1596 /* set Hold reset, Halt mode and Trap Reset */
1597 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1598 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1599 xscale_write_dcsr(target, 1, 0);
1600
1601 /* clear Hold reset to let the target run (should enter debug handler) */
1602 xscale_write_dcsr(target, 0, 1);
1603 target->state = TARGET_RUNNING;
1604
1605 if (!target->reset_halt) {
1606 jtag_add_sleep(10000);
1607
1608 /* we should have entered debug now */
1609 xscale_debug_entry(target);
1610 target->state = TARGET_HALTED;
1611
1612 /* resume the target */
1613 xscale_resume(target, 1, 0x0, 1, 0);
1614 }
1615 }
1616
1617 return ERROR_OK;
1618 }
1619
1620 static int xscale_read_core_reg(struct target *target, struct reg *r,
1621 int num, enum arm_mode mode)
1622 {
1623 /** \todo add debug handler support for core register reads */
1624 LOG_ERROR("not implemented");
1625 return ERROR_OK;
1626 }
1627
1628 static int xscale_write_core_reg(struct target *target, struct reg *r,
1629 int num, enum arm_mode mode, uint8_t *value)
1630 {
1631 /** \todo add debug handler support for core register writes */
1632 LOG_ERROR("not implemented");
1633 return ERROR_OK;
1634 }
1635
1636 static int xscale_full_context(struct target *target)
1637 {
1638 struct arm *arm = target_to_arm(target);
1639
1640 uint32_t *buffer;
1641
1642 int i, j;
1643
1644 LOG_DEBUG("-");
1645
1646 if (target->state != TARGET_HALTED) {
1647 LOG_WARNING("target not halted");
1648 return ERROR_TARGET_NOT_HALTED;
1649 }
1650
1651 buffer = malloc(4 * 8);
1652
1653 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1654 * we can't enter User mode on an XScale (unpredictable),
1655 * but User shares registers with SYS
1656 */
1657 for (i = 1; i < 7; i++) {
1658 enum arm_mode mode = armv4_5_number_to_mode(i);
1659 bool valid = true;
1660 struct reg *r;
1661
1662 if (mode == ARM_MODE_USR)
1663 continue;
1664
1665 /* check if there are invalid registers in the current mode
1666 */
1667 for (j = 0; valid && j <= 16; j++) {
1668 if (!ARMV4_5_CORE_REG_MODE(arm->core_cache,
1669 mode, j).valid)
1670 valid = false;
1671 }
1672 if (valid)
1673 continue;
1674
1675 /* request banked registers */
1676 xscale_send_u32(target, 0x0);
1677
1678 /* send CPSR for desired bank mode */
1679 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1680
1681 /* get banked registers: r8 to r14; and SPSR
1682 * except in USR/SYS mode
1683 */
1684 if (mode != ARM_MODE_SYS) {
1685 /* SPSR */
1686 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1687 mode, 16);
1688
1689 xscale_receive(target, buffer, 8);
1690
1691 buf_set_u32(r->value, 0, 32, buffer[7]);
1692 r->dirty = false;
1693 r->valid = true;
1694 } else
1695 xscale_receive(target, buffer, 7);
1696
1697 /* move data from buffer to register cache */
1698 for (j = 8; j <= 14; j++) {
1699 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1700 mode, j);
1701
1702 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1703 r->dirty = false;
1704 r->valid = true;
1705 }
1706 }
1707
1708 free(buffer);
1709
1710 return ERROR_OK;
1711 }
1712
1713 static int xscale_restore_banked(struct target *target)
1714 {
1715 struct arm *arm = target_to_arm(target);
1716
1717 int i, j;
1718
1719 if (target->state != TARGET_HALTED) {
1720 LOG_WARNING("target not halted");
1721 return ERROR_TARGET_NOT_HALTED;
1722 }
1723
1724 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1725 * and check if any banked registers need to be written. Ignore
1726 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1727 * an XScale (unpredictable), but they share all registers.
1728 */
1729 for (i = 1; i < 7; i++) {
1730 enum arm_mode mode = armv4_5_number_to_mode(i);
1731 struct reg *r;
1732
1733 if (mode == ARM_MODE_USR)
1734 continue;
1735
1736 /* check if there are dirty registers in this mode */
1737 for (j = 8; j <= 14; j++) {
1738 if (ARMV4_5_CORE_REG_MODE(arm->core_cache,
1739 mode, j).dirty)
1740 goto dirty;
1741 }
1742
1743 /* if not USR/SYS, check if the SPSR needs to be written */
1744 if (mode != ARM_MODE_SYS) {
1745 if (ARMV4_5_CORE_REG_MODE(arm->core_cache,
1746 mode, 16).dirty)
1747 goto dirty;
1748 }
1749
1750 /* there's nothing to flush for this mode */
1751 continue;
1752
1753 dirty:
1754 /* command 0x1: "send banked registers" */
1755 xscale_send_u32(target, 0x1);
1756
1757 /* send CPSR for desired mode */
1758 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1759
1760 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1761 * but this protocol doesn't understand that nuance.
1762 */
1763 for (j = 8; j <= 14; j++) {
1764 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1765 mode, j);
1766 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1767 r->dirty = false;
1768 }
1769
1770 /* send spsr if not in USR/SYS mode */
1771 if (mode != ARM_MODE_SYS) {
1772 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1773 mode, 16);
1774 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1775 r->dirty = false;
1776 }
1777 }
1778
1779 return ERROR_OK;
1780 }
1781
1782 static int xscale_read_memory(struct target *target, target_addr_t address,
1783 uint32_t size, uint32_t count, uint8_t *buffer)
1784 {
1785 struct xscale_common *xscale = target_to_xscale(target);
1786 uint32_t *buf32;
1787 uint32_t i;
1788 int retval;
1789
1790 LOG_DEBUG("address: " TARGET_ADDR_FMT ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32,
1791 address,
1792 size,
1793 count);
1794
1795 if (target->state != TARGET_HALTED) {
1796 LOG_WARNING("target not halted");
1797 return ERROR_TARGET_NOT_HALTED;
1798 }
1799
1800 /* sanitize arguments */
1801 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1802 return ERROR_COMMAND_SYNTAX_ERROR;
1803
1804 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1805 return ERROR_TARGET_UNALIGNED_ACCESS;
1806
1807 /* send memory read request (command 0x1n, n: access size) */
1808 retval = xscale_send_u32(target, 0x10 | size);
1809 if (retval != ERROR_OK)
1810 return retval;
1811
1812 /* send base address for read request */
1813 retval = xscale_send_u32(target, address);
1814 if (retval != ERROR_OK)
1815 return retval;
1816
1817 /* send number of requested data words */
1818 retval = xscale_send_u32(target, count);
1819 if (retval != ERROR_OK)
1820 return retval;
1821
1822 /* receive data from target (count times 32-bit words in host endianness) */
1823 buf32 = malloc(4 * count);
1824 retval = xscale_receive(target, buf32, count);
1825 if (retval != ERROR_OK) {
1826 free(buf32);
1827 return retval;
1828 }
1829
1830 /* extract data from host-endian buffer into byte stream */
1831 for (i = 0; i < count; i++) {
1832 switch (size) {
1833 case 4:
1834 target_buffer_set_u32(target, buffer, buf32[i]);
1835 buffer += 4;
1836 break;
1837 case 2:
1838 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1839 buffer += 2;
1840 break;
1841 case 1:
1842 *buffer++ = buf32[i] & 0xff;
1843 break;
1844 default:
1845 LOG_ERROR("invalid read size");
1846 return ERROR_COMMAND_SYNTAX_ERROR;
1847 }
1848 }
1849
1850 free(buf32);
1851
1852 /* examine DCSR, to see if Sticky Abort (SA) got set */
1853 retval = xscale_read_dcsr(target);
1854 if (retval != ERROR_OK)
1855 return retval;
1856 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1) {
1857 /* clear SA bit */
1858 retval = xscale_send_u32(target, 0x60);
1859 if (retval != ERROR_OK)
1860 return retval;
1861
1862 return ERROR_TARGET_DATA_ABORT;
1863 }
1864
1865 return ERROR_OK;
1866 }
1867
1868 static int xscale_read_phys_memory(struct target *target, target_addr_t address,
1869 uint32_t size, uint32_t count, uint8_t *buffer)
1870 {
1871 struct xscale_common *xscale = target_to_xscale(target);
1872
1873 /* with MMU inactive, there are only physical addresses */
1874 if (!xscale->armv4_5_mmu.mmu_enabled)
1875 return xscale_read_memory(target, address, size, count, buffer);
1876
1877 /** \todo: provide a non-stub implementation of this routine. */
1878 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1879 target_name(target), __func__);
1880 return ERROR_FAIL;
1881 }
1882
1883 static int xscale_write_memory(struct target *target, target_addr_t address,
1884 uint32_t size, uint32_t count, const uint8_t *buffer)
1885 {
1886 struct xscale_common *xscale = target_to_xscale(target);
1887 int retval;
1888
1889 LOG_DEBUG("address: " TARGET_ADDR_FMT ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32,
1890 address,
1891 size,
1892 count);
1893
1894 if (target->state != TARGET_HALTED) {
1895 LOG_WARNING("target not halted");
1896 return ERROR_TARGET_NOT_HALTED;
1897 }
1898
1899 /* sanitize arguments */
1900 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1901 return ERROR_COMMAND_SYNTAX_ERROR;
1902
1903 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1904 return ERROR_TARGET_UNALIGNED_ACCESS;
1905
1906 /* send memory write request (command 0x2n, n: access size) */
1907 retval = xscale_send_u32(target, 0x20 | size);
1908 if (retval != ERROR_OK)
1909 return retval;
1910
1911 /* send base address for read request */
1912 retval = xscale_send_u32(target, address);
1913 if (retval != ERROR_OK)
1914 return retval;
1915
1916 /* send number of requested data words to be written*/
1917 retval = xscale_send_u32(target, count);
1918 if (retval != ERROR_OK)
1919 return retval;
1920
1921 /* extract data from host-endian buffer into byte stream */
1922 #if 0
1923 for (i = 0; i < count; i++) {
1924 switch (size) {
1925 case 4:
1926 value = target_buffer_get_u32(target, buffer);
1927 xscale_send_u32(target, value);
1928 buffer += 4;
1929 break;
1930 case 2:
1931 value = target_buffer_get_u16(target, buffer);
1932 xscale_send_u32(target, value);
1933 buffer += 2;
1934 break;
1935 case 1:
1936 value = *buffer;
1937 xscale_send_u32(target, value);
1938 buffer += 1;
1939 break;
1940 default:
1941 LOG_ERROR("should never get here");
1942 exit(-1);
1943 }
1944 }
1945 #endif
1946 retval = xscale_send(target, buffer, count, size);
1947 if (retval != ERROR_OK)
1948 return retval;
1949
1950 /* examine DCSR, to see if Sticky Abort (SA) got set */
1951 retval = xscale_read_dcsr(target);
1952 if (retval != ERROR_OK)
1953 return retval;
1954 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1) {
1955 /* clear SA bit */
1956 retval = xscale_send_u32(target, 0x60);
1957 if (retval != ERROR_OK)
1958 return retval;
1959
1960 LOG_ERROR("data abort writing memory");
1961 return ERROR_TARGET_DATA_ABORT;
1962 }
1963
1964 return ERROR_OK;
1965 }
1966
1967 static int xscale_write_phys_memory(struct target *target, target_addr_t address,
1968 uint32_t size, uint32_t count, const uint8_t *buffer)
1969 {
1970 struct xscale_common *xscale = target_to_xscale(target);
1971
1972 /* with MMU inactive, there are only physical addresses */
1973 if (!xscale->armv4_5_mmu.mmu_enabled)
1974 return xscale_write_memory(target, address, size, count, buffer);
1975
1976 /** \todo: provide a non-stub implementation of this routine. */
1977 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1978 target_name(target), __func__);
1979 return ERROR_FAIL;
1980 }
1981
1982 static int xscale_get_ttb(struct target *target, uint32_t *result)
1983 {
1984 struct xscale_common *xscale = target_to_xscale(target);
1985 uint32_t ttb;
1986 int retval;
1987
1988 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
1989 if (retval != ERROR_OK)
1990 return retval;
1991 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
1992
1993 *result = ttb;
1994
1995 return ERROR_OK;
1996 }
1997
1998 static int xscale_disable_mmu_caches(struct target *target, int mmu,
1999 int d_u_cache, int i_cache)
2000 {
2001 struct xscale_common *xscale = target_to_xscale(target);
2002 uint32_t cp15_control;
2003 int retval;
2004
2005 /* read cp15 control register */
2006 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2007 if (retval != ERROR_OK)
2008 return retval;
2009 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2010
2011 if (mmu)
2012 cp15_control &= ~0x1U;
2013
2014 if (d_u_cache) {
2015 /* clean DCache */
2016 retval = xscale_send_u32(target, 0x50);
2017 if (retval != ERROR_OK)
2018 return retval;
2019 retval = xscale_send_u32(target, xscale->cache_clean_address);
2020 if (retval != ERROR_OK)
2021 return retval;
2022
2023 /* invalidate DCache */
2024 retval = xscale_send_u32(target, 0x51);
2025 if (retval != ERROR_OK)
2026 return retval;
2027
2028 cp15_control &= ~0x4U;
2029 }
2030
2031 if (i_cache) {
2032 /* invalidate ICache */
2033 retval = xscale_send_u32(target, 0x52);
2034 if (retval != ERROR_OK)
2035 return retval;
2036 cp15_control &= ~0x1000U;
2037 }
2038
2039 /* write new cp15 control register */
2040 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2041 if (retval != ERROR_OK)
2042 return retval;
2043
2044 /* execute cpwait to ensure outstanding operations complete */
2045 retval = xscale_send_u32(target, 0x53);
2046 return retval;
2047 }
2048
2049 static int xscale_enable_mmu_caches(struct target *target, int mmu,
2050 int d_u_cache, int i_cache)
2051 {
2052 struct xscale_common *xscale = target_to_xscale(target);
2053 uint32_t cp15_control;
2054 int retval;
2055
2056 /* read cp15 control register */
2057 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2058 if (retval != ERROR_OK)
2059 return retval;
2060 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2061
2062 if (mmu)
2063 cp15_control |= 0x1U;
2064
2065 if (d_u_cache)
2066 cp15_control |= 0x4U;
2067
2068 if (i_cache)
2069 cp15_control |= 0x1000U;
2070
2071 /* write new cp15 control register */
2072 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2073 if (retval != ERROR_OK)
2074 return retval;
2075
2076 /* execute cpwait to ensure outstanding operations complete */
2077 retval = xscale_send_u32(target, 0x53);
2078 return retval;
2079 }
2080
2081 static int xscale_set_breakpoint(struct target *target,
2082 struct breakpoint *breakpoint)
2083 {
2084 int retval;
2085 struct xscale_common *xscale = target_to_xscale(target);
2086
2087 if (target->state != TARGET_HALTED) {
2088 LOG_WARNING("target not halted");
2089 return ERROR_TARGET_NOT_HALTED;
2090 }
2091
2092 if (breakpoint->set) {
2093 LOG_WARNING("breakpoint already set");
2094 return ERROR_OK;
2095 }
2096
2097 if (breakpoint->type == BKPT_HARD) {
2098 uint32_t value = breakpoint->address | 1;
2099 if (!xscale->ibcr0_used) {
2100 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2101 xscale->ibcr0_used = 1;
2102 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2103 } else if (!xscale->ibcr1_used) {
2104 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2105 xscale->ibcr1_used = 1;
2106 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2107 } else {/* bug: availability previously verified in xscale_add_breakpoint() */
2108 LOG_ERROR("BUG: no hardware comparator available");
2109 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2110 }
2111 } else if (breakpoint->type == BKPT_SOFT) {
2112 if (breakpoint->length == 4) {
2113 /* keep the original instruction in target endianness */
2114 retval = target_read_memory(target, breakpoint->address, 4, 1,
2115 breakpoint->orig_instr);
2116 if (retval != ERROR_OK)
2117 return retval;
2118 /* write the bkpt instruction in target endianness
2119 *(arm7_9->arm_bkpt is host endian) */
2120 retval = target_write_u32(target, breakpoint->address,
2121 xscale->arm_bkpt);
2122 if (retval != ERROR_OK)
2123 return retval;
2124 } else {
2125 /* keep the original instruction in target endianness */
2126 retval = target_read_memory(target, breakpoint->address, 2, 1,
2127 breakpoint->orig_instr);
2128 if (retval != ERROR_OK)
2129 return retval;
2130 /* write the bkpt instruction in target endianness
2131 *(arm7_9->arm_bkpt is host endian) */
2132 retval = target_write_u16(target, breakpoint->address,
2133 xscale->thumb_bkpt);
2134 if (retval != ERROR_OK)
2135 return retval;
2136 }
2137 breakpoint->set = 1;
2138
2139 xscale_send_u32(target, 0x50); /* clean dcache */
2140 xscale_send_u32(target, xscale->cache_clean_address);
2141 xscale_send_u32(target, 0x51); /* invalidate dcache */
2142 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2143 }
2144
2145 return ERROR_OK;
2146 }
2147
2148 static int xscale_add_breakpoint(struct target *target,
2149 struct breakpoint *breakpoint)
2150 {
2151 struct xscale_common *xscale = target_to_xscale(target);
2152
2153 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1)) {
2154 LOG_ERROR("no breakpoint unit available for hardware breakpoint");
2155 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2156 }
2157
2158 if ((breakpoint->length != 2) && (breakpoint->length != 4)) {
2159 LOG_ERROR("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2160 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2161 }
2162
2163 if (breakpoint->type == BKPT_HARD)
2164 xscale->ibcr_available--;
2165
2166 return xscale_set_breakpoint(target, breakpoint);
2167 }
2168
2169 static int xscale_unset_breakpoint(struct target *target,
2170 struct breakpoint *breakpoint)
2171 {
2172 int retval;
2173 struct xscale_common *xscale = target_to_xscale(target);
2174
2175 if (target->state != TARGET_HALTED) {
2176 LOG_WARNING("target not halted");
2177 return ERROR_TARGET_NOT_HALTED;
2178 }
2179
2180 if (!breakpoint->set) {
2181 LOG_WARNING("breakpoint not set");
2182 return ERROR_OK;
2183 }
2184
2185 if (breakpoint->type == BKPT_HARD) {
2186 if (breakpoint->set == 1) {
2187 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2188 xscale->ibcr0_used = 0;
2189 } else if (breakpoint->set == 2) {
2190 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2191 xscale->ibcr1_used = 0;
2192 }
2193 breakpoint->set = 0;
2194 } else {
2195 /* restore original instruction (kept in target endianness) */
2196 if (breakpoint->length == 4) {
2197 retval = target_write_memory(target, breakpoint->address, 4, 1,
2198 breakpoint->orig_instr);
2199 if (retval != ERROR_OK)
2200 return retval;
2201 } else {
2202 retval = target_write_memory(target, breakpoint->address, 2, 1,
2203 breakpoint->orig_instr);
2204 if (retval != ERROR_OK)
2205 return retval;
2206 }
2207 breakpoint->set = 0;
2208
2209 xscale_send_u32(target, 0x50); /* clean dcache */
2210 xscale_send_u32(target, xscale->cache_clean_address);
2211 xscale_send_u32(target, 0x51); /* invalidate dcache */
2212 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2213 }
2214
2215 return ERROR_OK;
2216 }
2217
2218 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2219 {
2220 struct xscale_common *xscale = target_to_xscale(target);
2221
2222 if (target->state != TARGET_HALTED) {
2223 LOG_ERROR("target not halted");
2224 return ERROR_TARGET_NOT_HALTED;
2225 }
2226
2227 if (breakpoint->set)
2228 xscale_unset_breakpoint(target, breakpoint);
2229
2230 if (breakpoint->type == BKPT_HARD)
2231 xscale->ibcr_available++;
2232
2233 return ERROR_OK;
2234 }
2235
2236 static int xscale_set_watchpoint(struct target *target,
2237 struct watchpoint *watchpoint)
2238 {
2239 struct xscale_common *xscale = target_to_xscale(target);
2240 uint32_t enable = 0;
2241 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2242 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2243
2244 if (target->state != TARGET_HALTED) {
2245 LOG_ERROR("target not halted");
2246 return ERROR_TARGET_NOT_HALTED;
2247 }
2248
2249 switch (watchpoint->rw) {
2250 case WPT_READ:
2251 enable = 0x3;
2252 break;
2253 case WPT_ACCESS:
2254 enable = 0x2;
2255 break;
2256 case WPT_WRITE:
2257 enable = 0x1;
2258 break;
2259 default:
2260 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2261 }
2262
2263 /* For watchpoint across more than one word, both DBR registers must
2264 be enlisted, with the second used as a mask. */
2265 if (watchpoint->length > 4) {
2266 if (xscale->dbr0_used || xscale->dbr1_used) {
2267 LOG_ERROR("BUG: sufficient hardware comparators unavailable");
2268 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2269 }
2270
2271 /* Write mask value to DBR1, based on the length argument.
2272 * Address bits ignored by the comparator are those set in mask. */
2273 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1],
2274 watchpoint->length - 1);
2275 xscale->dbr1_used = 1;
2276 enable |= 0x100; /* DBCON[M] */
2277 }
2278
2279 if (!xscale->dbr0_used) {
2280 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2281 dbcon_value |= enable;
2282 xscale_set_reg_u32(dbcon, dbcon_value);
2283 watchpoint->set = 1;
2284 xscale->dbr0_used = 1;
2285 } else if (!xscale->dbr1_used) {
2286 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2287 dbcon_value |= enable << 2;
2288 xscale_set_reg_u32(dbcon, dbcon_value);
2289 watchpoint->set = 2;
2290 xscale->dbr1_used = 1;
2291 } else {
2292 LOG_ERROR("BUG: no hardware comparator available");
2293 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2294 }
2295
2296 return ERROR_OK;
2297 }
2298
2299 static int xscale_add_watchpoint(struct target *target,
2300 struct watchpoint *watchpoint)
2301 {
2302 struct xscale_common *xscale = target_to_xscale(target);
2303
2304 if (xscale->dbr_available < 1) {
2305 LOG_ERROR("no more watchpoint registers available");
2306 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2307 }
2308
2309 if (watchpoint->value)
2310 LOG_WARNING("xscale does not support value, mask arguments; ignoring");
2311
2312 /* check that length is a power of two */
2313 for (uint32_t len = watchpoint->length; len != 1; len /= 2) {
2314 if (len % 2) {
2315 LOG_ERROR("xscale requires that watchpoint length is a power of two");
2316 return ERROR_COMMAND_ARGUMENT_INVALID;
2317 }
2318 }
2319
2320 if (watchpoint->length == 4) { /* single word watchpoint */
2321 xscale->dbr_available--;/* one DBR reg used */
2322 return ERROR_OK;
2323 }
2324
2325 /* watchpoints across multiple words require both DBR registers */
2326 if (xscale->dbr_available < 2) {
2327 LOG_ERROR("insufficient watchpoint registers available");
2328 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2329 }
2330
2331 if (watchpoint->length > watchpoint->address) {
2332 LOG_ERROR("xscale does not support watchpoints with length "
2333 "greater than address");
2334 return ERROR_COMMAND_ARGUMENT_INVALID;
2335 }
2336
2337 xscale->dbr_available = 0;
2338 return ERROR_OK;
2339 }
2340
2341 static int xscale_unset_watchpoint(struct target *target,
2342 struct watchpoint *watchpoint)
2343 {
2344 struct xscale_common *xscale = target_to_xscale(target);
2345 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2346 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2347
2348 if (target->state != TARGET_HALTED) {
2349 LOG_WARNING("target not halted");
2350 return ERROR_TARGET_NOT_HALTED;
2351 }
2352
2353 if (!watchpoint->set) {
2354 LOG_WARNING("breakpoint not set");
2355 return ERROR_OK;
2356 }
2357
2358 if (watchpoint->set == 1) {
2359 if (watchpoint->length > 4) {
2360 dbcon_value &= ~0x103; /* clear DBCON[M] as well */
2361 xscale->dbr1_used = 0; /* DBR1 was used for mask */
2362 } else
2363 dbcon_value &= ~0x3;
2364
2365 xscale_set_reg_u32(dbcon, dbcon_value);
2366 xscale->dbr0_used = 0;
2367 } else if (watchpoint->set == 2) {
2368 dbcon_value &= ~0xc;
2369 xscale_set_reg_u32(dbcon, dbcon_value);
2370 xscale->dbr1_used = 0;
2371 }
2372 watchpoint->set = 0;
2373
2374 return ERROR_OK;
2375 }
2376
2377 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2378 {
2379 struct xscale_common *xscale = target_to_xscale(target);
2380
2381 if (target->state != TARGET_HALTED) {
2382 LOG_ERROR("target not halted");
2383 return ERROR_TARGET_NOT_HALTED;
2384 }
2385
2386 if (watchpoint->set)
2387 xscale_unset_watchpoint(target, watchpoint);
2388
2389 if (watchpoint->length > 4)
2390 xscale->dbr_available++;/* both DBR regs now available */
2391
2392 xscale->dbr_available++;
2393
2394 return ERROR_OK;
2395 }
2396
2397 static int xscale_get_reg(struct reg *reg)
2398 {
2399 struct xscale_reg *arch_info = reg->arch_info;
2400 struct target *target = arch_info->target;
2401 struct xscale_common *xscale = target_to_xscale(target);
2402
2403 /* DCSR, TX and RX are accessible via JTAG */
2404 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2405 return xscale_read_dcsr(arch_info->target);
2406 else if (strcmp(reg->name, "XSCALE_TX") == 0) {
2407 /* 1 = consume register content */
2408 return xscale_read_tx(arch_info->target, 1);
2409 } else if (strcmp(reg->name, "XSCALE_RX") == 0) {
2410 /* can't read from RX register (host -> debug handler) */
2411 return ERROR_OK;
2412 } else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0) {
2413 /* can't (explicitly) read from TXRXCTRL register */
2414 return ERROR_OK;
2415 } else {/* Other DBG registers have to be transfered by the debug handler
2416 * send CP read request (command 0x40) */
2417 xscale_send_u32(target, 0x40);
2418
2419 /* send CP register number */
2420 xscale_send_u32(target, arch_info->dbg_handler_number);
2421
2422 /* read register value */
2423 xscale_read_tx(target, 1);
2424 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2425
2426 reg->dirty = false;
2427 reg->valid = true;
2428 }
2429
2430 return ERROR_OK;
2431 }
2432
2433 static int xscale_set_reg(struct reg *reg, uint8_t *buf)
2434 {
2435 struct xscale_reg *arch_info = reg->arch_info;
2436 struct target *target = arch_info->target;
2437 struct xscale_common *xscale = target_to_xscale(target);
2438 uint32_t value = buf_get_u32(buf, 0, 32);
2439
2440 /* DCSR, TX and RX are accessible via JTAG */
2441 if (strcmp(reg->name, "XSCALE_DCSR") == 0) {
2442 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2443 return xscale_write_dcsr(arch_info->target, -1, -1);
2444 } else if (strcmp(reg->name, "XSCALE_RX") == 0) {
2445 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2446 return xscale_write_rx(arch_info->target);
2447 } else if (strcmp(reg->name, "XSCALE_TX") == 0) {
2448 /* can't write to TX register (debug-handler -> host) */
2449 return ERROR_OK;
2450 } else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0) {
2451 /* can't (explicitly) write to TXRXCTRL register */
2452 return ERROR_OK;
2453 } else {/* Other DBG registers have to be transfered by the debug handler
2454 * send CP write request (command 0x41) */
2455 xscale_send_u32(target, 0x41);
2456
2457 /* send CP register number */
2458 xscale_send_u32(target, arch_info->dbg_handler_number);
2459
2460 /* send CP register value */
2461 xscale_send_u32(target, value);
2462 buf_set_u32(reg->value, 0, 32, value);
2463 }
2464
2465 return ERROR_OK;
2466 }
2467
2468 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2469 {
2470 struct xscale_common *xscale = target_to_xscale(target);
2471 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2472 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2473
2474 /* send CP write request (command 0x41) */
2475 xscale_send_u32(target, 0x41);
2476
2477 /* send CP register number */
2478 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2479
2480 /* send CP register value */
2481 xscale_send_u32(target, value);
2482 buf_set_u32(dcsr->value, 0, 32, value);
2483
2484 return ERROR_OK;
2485 }
2486
2487 static int xscale_read_trace(struct target *target)
2488 {
2489 struct xscale_common *xscale = target_to_xscale(target);
2490 struct arm *arm = &xscale->arm;
2491 struct xscale_trace_data **trace_data_p;
2492
2493 /* 258 words from debug handler
2494 * 256 trace buffer entries
2495 * 2 checkpoint addresses
2496 */
2497 uint32_t trace_buffer[258];
2498 int is_address[256];
2499 int i, j;
2500 unsigned int num_checkpoints = 0;
2501
2502 if (target->state != TARGET_HALTED) {
2503 LOG_WARNING("target must be stopped to read trace data");
2504 return ERROR_TARGET_NOT_HALTED;
2505 }
2506
2507 /* send read trace buffer command (command 0x61) */
2508 xscale_send_u32(target, 0x61);
2509
2510 /* receive trace buffer content */
2511 xscale_receive(target, trace_buffer, 258);
2512
2513 /* parse buffer backwards to identify address entries */
2514 for (i = 255; i >= 0; i--) {
2515 /* also count number of checkpointed entries */
2516 if ((trace_buffer[i] & 0xe0) == 0xc0)
2517 num_checkpoints++;
2518
2519 is_address[i] = 0;
2520 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2521 ((trace_buffer[i] & 0xf0) == 0xd0)) {
2522 if (i > 0)
2523 is_address[--i] = 1;
2524 if (i > 0)
2525 is_address[--i] = 1;
2526 if (i > 0)
2527 is_address[--i] = 1;
2528 if (i > 0)
2529 is_address[--i] = 1;
2530 }
2531 }
2532
2533
2534 /* search first non-zero entry that is not part of an address */
2535 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2536 ;
2537
2538 if (j == 256) {
2539 LOG_DEBUG("no trace data collected");
2540 return ERROR_XSCALE_NO_TRACE_DATA;
2541 }
2542
2543 /* account for possible partial address at buffer start (wrap mode only) */
2544 if (is_address[0]) { /* first entry is address; complete set of 4? */
2545 i = 1;
2546 while (i < 4)
2547 if (!is_address[i++])
2548 break;
2549 if (i < 4)
2550 j += i; /* partial address; can't use it */
2551 }
2552
2553 /* if first valid entry is indirect branch, can't use that either (no address) */
2554 if (((trace_buffer[j] & 0xf0) == 0x90) || ((trace_buffer[j] & 0xf0) == 0xd0))
2555 j++;
2556
2557 /* walk linked list to terminating entry */
2558 for (trace_data_p = &xscale->trace.data; *trace_data_p;
2559 trace_data_p = &(*trace_data_p)->next)
2560 ;
2561
2562 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2563 (*trace_data_p)->next = NULL;
2564 (*trace_data_p)->chkpt0 = trace_buffer[256];
2565 (*trace_data_p)->chkpt1 = trace_buffer[257];
2566 (*trace_data_p)->last_instruction = buf_get_u32(arm->pc->value, 0, 32);
2567 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2568 (*trace_data_p)->depth = 256 - j;
2569 (*trace_data_p)->num_checkpoints = num_checkpoints;
2570
2571 for (i = j; i < 256; i++) {
2572 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2573 if (is_address[i])
2574 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2575 else
2576 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2577 }
2578
2579 return ERROR_OK;
2580 }
2581
2582 static int xscale_read_instruction(struct target *target, uint32_t pc,
2583 struct arm_instruction *instruction)
2584 {
2585 struct xscale_common *const xscale = target_to_xscale(target);
2586 int i;
2587 int section = -1;
2588 size_t size_read;
2589 uint32_t opcode;
2590 int retval;
2591
2592 if (!xscale->trace.image)
2593 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2594
2595 /* search for the section the current instruction belongs to */
2596 for (i = 0; i < xscale->trace.image->num_sections; i++) {
2597 if ((xscale->trace.image->sections[i].base_address <= pc) &&
2598 (xscale->trace.image->sections[i].base_address +
2599 xscale->trace.image->sections[i].size > pc)) {
2600 section = i;
2601 break;
2602 }
2603 }
2604
2605 if (section == -1) {
2606 /* current instruction couldn't be found in the image */
2607 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2608 }
2609
2610 if (xscale->trace.core_state == ARM_STATE_ARM) {
2611 uint8_t buf[4];
2612 retval = image_read_section(xscale->trace.image, section,
2613 pc - xscale->trace.image->sections[section].base_address,
2614 4, buf, &size_read);
2615 if (retval != ERROR_OK) {
2616 LOG_ERROR("error while reading instruction");
2617 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2618 }
2619 opcode = target_buffer_get_u32(target, buf);
2620 arm_evaluate_opcode(opcode, pc, instruction);
2621 } else if (xscale->trace.core_state == ARM_STATE_THUMB) {
2622 uint8_t buf[2];
2623 retval = image_read_section(xscale->trace.image, section,
2624 pc - xscale->trace.image->sections[section].base_address,
2625 2, buf, &size_read);
2626 if (retval != ERROR_OK) {
2627 LOG_ERROR("error while reading instruction");
2628 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2629 }
2630 opcode = target_buffer_get_u16(target, buf);
2631 thumb_evaluate_opcode(opcode, pc, instruction);
2632 } else {
2633 LOG_ERROR("BUG: unknown core state encountered");
2634 exit(-1);
2635 }
2636
2637 return ERROR_OK;
2638 }
2639
2640 /* Extract address encoded into trace data.
2641 * Write result to address referenced by argument 'target', or 0 if incomplete. */
2642 static inline void xscale_branch_address(struct xscale_trace_data *trace_data,
2643 int i, uint32_t *target)
2644 {
2645 /* if there are less than four entries prior to the indirect branch message
2646 * we can't extract the address */
2647 if (i < 4)
2648 *target = 0;
2649 else {
2650 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2651 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2652 }
2653 }
2654
2655 static inline void xscale_display_instruction(struct target *target, uint32_t pc,
2656 struct arm_instruction *instruction,
2657 struct command_invocation *cmd)
2658 {
2659 int retval = xscale_read_instruction(target, pc, instruction);
2660 if (retval == ERROR_OK)
2661 command_print(cmd, "%s", instruction->text);
2662 else
2663 command_print(cmd, "0x%8.8" PRIx32 "\t<not found in image>", pc);
2664 }
2665
2666 static int xscale_analyze_trace(struct target *target, struct command_invocation *cmd)
2667 {
2668 struct xscale_common *xscale = target_to_xscale(target);
2669 struct xscale_trace_data *trace_data = xscale->trace.data;
2670 int i, retval;
2671 uint32_t breakpoint_pc = 0;
2672 struct arm_instruction instruction;
2673 uint32_t current_pc = 0;/* initialized when address determined */
2674
2675 if (!xscale->trace.image)
2676 LOG_WARNING("No trace image loaded; use 'xscale trace_image'");
2677
2678 /* loop for each trace buffer that was loaded from target */
2679 while (trace_data) {
2680 int chkpt = 0; /* incremented as checkpointed entries found */
2681 int j;
2682
2683 /* FIXME: set this to correct mode when trace buffer is first enabled */
2684 xscale->trace.core_state = ARM_STATE_ARM;
2685
2686 /* loop for each entry in this trace buffer */
2687 for (i = 0; i < trace_data->depth; i++) {
2688 int exception = 0;
2689 uint32_t chkpt_reg = 0x0;
2690 uint32_t branch_target = 0;
2691 int count;
2692
2693 /* trace entry type is upper nybble of 'message byte' */
2694 int trace_msg_type = (trace_data->entries[i].data & 0xf0) >> 4;
2695
2696 /* Target addresses of indirect branches are written into buffer
2697 * before the message byte representing the branch. Skip past it */
2698 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2699 continue;
2700
2701 switch (trace_msg_type) {
2702 case 0: /* Exceptions */
2703 case 1:
2704 case 2:
2705 case 3:
2706 case 4:
2707 case 5:
2708 case 6:
2709 case 7:
2710 exception = (trace_data->entries[i].data & 0x70) >> 4;
2711
2712 /* FIXME: vector table may be at ffff0000 */
2713 branch_target = (trace_data->entries[i].data & 0xf0) >> 2;
2714 break;
2715
2716 case 8: /* Direct Branch */
2717 break;
2718
2719 case 9: /* Indirect Branch */
2720 xscale_branch_address(trace_data, i, &branch_target);
2721 break;
2722
2723 case 13: /* Checkpointed Indirect Branch */
2724 xscale_branch_address(trace_data, i, &branch_target);
2725 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2726 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is
2727 *oldest */
2728 else
2729 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and
2730 *newest */
2731
2732 chkpt++;
2733 break;
2734
2735 case 12: /* Checkpointed Direct Branch */
2736 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2737 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is
2738 *oldest */
2739 else
2740 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and
2741 *newest */
2742
2743 /* if no current_pc, checkpoint will be starting point */
2744 if (current_pc == 0)
2745 branch_target = chkpt_reg;
2746
2747 chkpt++;
2748 break;
2749
2750 case 15:/* Roll-over */
2751 break;
2752
2753 default:/* Reserved */
2754 LOG_WARNING("trace is suspect: invalid trace message byte");
2755 continue;
2756
2757 }
2758
2759 /* If we don't have the current_pc yet, but we did get the branch target
2760 * (either from the trace buffer on indirect branch, or from a checkpoint reg),
2761 * then we can start displaying instructions at the next iteration, with
2762 * branch_target as the starting point.
2763 */
2764 if (current_pc == 0) {
2765 current_pc = branch_target; /* remains 0 unless branch_target *obtained */
2766 continue;
2767 }
2768
2769 /* We have current_pc. Read and display the instructions from the image.
2770 * First, display count instructions (lower nybble of message byte). */
2771 count = trace_data->entries[i].data & 0x0f;
2772 for (j = 0; j < count; j++) {
2773 xscale_display_instruction(target, current_pc, &instruction,
2774 cmd);
2775 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2776 }
2777
2778 /* An additional instruction is implicitly added to count for
2779 * rollover and some exceptions: undef, swi, prefetch abort. */
2780 if ((trace_msg_type == 15) || (exception > 0 && exception < 4)) {
2781 xscale_display_instruction(target, current_pc, &instruction,
2782 cmd);
2783 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2784 }
2785
2786 if (trace_msg_type == 15) /* rollover */
2787 continue;
2788
2789 if (exception) {
2790 command_print(cmd, "--- exception %i ---", exception);
2791 continue;
2792 }
2793
2794 /* not exception or rollover; next instruction is a branch and is
2795 * not included in the count */
2796 xscale_display_instruction(target, current_pc, &instruction, cmd);
2797
2798 /* for direct branches, extract branch destination from instruction */
2799 if ((trace_msg_type == 8) || (trace_msg_type == 12)) {
2800 retval = xscale_read_instruction(target, current_pc, &instruction);
2801 if (retval == ERROR_OK)
2802 current_pc = instruction.info.b_bl_bx_blx.target_address;
2803 else
2804 current_pc = 0; /* branch destination unknown */
2805
2806 /* direct branch w/ checkpoint; can also get from checkpoint reg */
2807 if (trace_msg_type == 12) {
2808 if (current_pc == 0)
2809 current_pc = chkpt_reg;
2810 else if (current_pc != chkpt_reg) /* sanity check */
2811 LOG_WARNING("trace is suspect: checkpoint register "
2812 "inconsistent with adddress from image");
2813 }
2814
2815 if (current_pc == 0)
2816 command_print(cmd, "address unknown");
2817
2818 continue;
2819 }
2820
2821 /* indirect branch; the branch destination was read from trace buffer */
2822 if ((trace_msg_type == 9) || (trace_msg_type == 13)) {
2823 current_pc = branch_target;
2824
2825 /* sanity check (checkpoint reg is redundant) */
2826 if ((trace_msg_type == 13) && (chkpt_reg != branch_target))
2827 LOG_WARNING("trace is suspect: checkpoint register "
2828 "inconsistent with address from trace buffer");
2829 }
2830
2831 } /* END: for (i = 0; i < trace_data->depth; i++) */
2832
2833 breakpoint_pc = trace_data->last_instruction; /* used below */
2834 trace_data = trace_data->next;
2835
2836 } /* END: while (trace_data) */
2837
2838 /* Finally... display all instructions up to the value of the pc when the
2839 * debug break occurred (saved when trace data was collected from target).
2840 * This is necessary because the trace only records execution branches and 16
2841 * consecutive instructions (rollovers), so last few typically missed.
2842 */
2843 if (current_pc == 0)
2844 return ERROR_OK;/* current_pc was never found */
2845
2846 /* how many instructions remaining? */
2847 int gap_count = (breakpoint_pc - current_pc) /
2848 (xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2);
2849
2850 /* should never be negative or over 16, but verify */
2851 if (gap_count < 0 || gap_count > 16) {
2852 LOG_WARNING("trace is suspect: excessive gap at end of trace");
2853 return ERROR_OK;/* bail; large number or negative value no good */
2854 }
2855
2856 /* display remaining instructions */
2857 for (i = 0; i < gap_count; i++) {
2858 xscale_display_instruction(target, current_pc, &instruction, cmd);
2859 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2860 }
2861
2862 return ERROR_OK;
2863 }
2864
2865 static const struct reg_arch_type xscale_reg_type = {
2866 .get = xscale_get_reg,
2867 .set = xscale_set_reg,
2868 };
2869
2870 static void xscale_build_reg_cache(struct target *target)
2871 {
2872 struct xscale_common *xscale = target_to_xscale(target);
2873 struct arm *arm = &xscale->arm;
2874 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2875 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2876 int i;
2877 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2878
2879 (*cache_p) = arm_build_reg_cache(target, arm);
2880
2881 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2882 cache_p = &(*cache_p)->next;
2883
2884 /* fill in values for the xscale reg cache */
2885 (*cache_p)->name = "XScale registers";
2886 (*cache_p)->next = NULL;
2887 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2888 (*cache_p)->num_regs = num_regs;
2889
2890 for (i = 0; i < num_regs; i++) {
2891 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2892 (*cache_p)->reg_list[i].value = calloc(4, 1);
2893 (*cache_p)->reg_list[i].dirty = false;
2894 (*cache_p)->reg_list[i].valid = false;
2895 (*cache_p)->reg_list[i].size = 32;
2896 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2897 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2898 arch_info[i] = xscale_reg_arch_info[i];
2899 arch_info[i].target = target;
2900 }
2901
2902 xscale->reg_cache = (*cache_p);
2903 }
2904
2905 static int xscale_init_target(struct command_context *cmd_ctx,
2906 struct target *target)
2907 {
2908 xscale_build_reg_cache(target);
2909 return ERROR_OK;
2910 }
2911
2912 static int xscale_init_arch_info(struct target *target,
2913 struct xscale_common *xscale, struct jtag_tap *tap)
2914 {
2915 struct arm *arm;
2916 uint32_t high_reset_branch, low_reset_branch;
2917 int i;
2918
2919 arm = &xscale->arm;
2920
2921 /* store architecture specfic data */
2922 xscale->common_magic = XSCALE_COMMON_MAGIC;
2923
2924 /* PXA3xx with 11 bit IR shifts the JTAG instructions */
2925 if (tap->ir_length == 11)
2926 xscale->xscale_variant = XSCALE_PXA3XX;
2927 else
2928 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
2929
2930 /* the debug handler isn't installed (and thus not running) at this time */
2931 xscale->handler_address = 0xfe000800;
2932
2933 /* clear the vectors we keep locally for reference */
2934 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2935 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2936
2937 /* no user-specified vectors have been configured yet */
2938 xscale->static_low_vectors_set = 0x0;
2939 xscale->static_high_vectors_set = 0x0;
2940
2941 /* calculate branches to debug handler */
2942 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2943 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2944
2945 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2946 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2947
2948 for (i = 1; i <= 7; i++) {
2949 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2950 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2951 }
2952
2953 /* 64kB aligned region used for DCache cleaning */
2954 xscale->cache_clean_address = 0xfffe0000;
2955
2956 xscale->hold_rst = 0;
2957 xscale->external_debug_break = 0;
2958
2959 xscale->ibcr_available = 2;
2960 xscale->ibcr0_used = 0;
2961 xscale->ibcr1_used = 0;
2962
2963 xscale->dbr_available = 2;
2964 xscale->dbr0_used = 0;
2965 xscale->dbr1_used = 0;
2966
2967 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
2968 target_name(target));
2969
2970 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2971 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2972
2973 xscale->vector_catch = 0x1;
2974
2975 xscale->trace.data = NULL;
2976 xscale->trace.image = NULL;
2977 xscale->trace.mode = XSCALE_TRACE_DISABLED;
2978 xscale->trace.buffer_fill = 0;
2979 xscale->trace.fill_counter = 0;
2980
2981 /* prepare ARMv4/5 specific information */
2982 arm->arch_info = xscale;
2983 arm->core_type = ARM_MODE_ANY;
2984 arm->read_core_reg = xscale_read_core_reg;
2985 arm->write_core_reg = xscale_write_core_reg;
2986 arm->full_context = xscale_full_context;
2987
2988 arm_init_arch_info(target, arm);
2989
2990 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
2991 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
2992 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
2993 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
2994 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
2995 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
2996 xscale->armv4_5_mmu.has_tiny_pages = 1;
2997 xscale->armv4_5_mmu.mmu_enabled = 0;
2998
2999 return ERROR_OK;
3000 }
3001
3002 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3003 {
3004 struct xscale_common *xscale;
3005
3006 if (sizeof xscale_debug_handler > 0x800) {
3007 LOG_ERROR("debug_handler.bin: larger than 2kb");
3008 return ERROR_FAIL;
3009 }
3010
3011 xscale = calloc(1, sizeof(*xscale));
3012 if (!xscale)
3013 return ERROR_FAIL;
3014
3015 return xscale_init_arch_info(target, xscale, target->tap);
3016 }
3017
3018 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3019 {
3020 struct target *target = NULL;
3021 struct xscale_common *xscale;
3022 int retval;
3023 uint32_t handler_address;
3024
3025 if (CMD_ARGC < 2)
3026 return ERROR_COMMAND_SYNTAX_ERROR;
3027
3028 target = get_target(CMD_ARGV[0]);
3029 if (target == NULL) {
3030 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3031 return ERROR_FAIL;
3032 }
3033
3034 xscale = target_to_xscale(target);
3035 retval = xscale_verify_pointer(CMD, xscale);
3036 if (retval != ERROR_OK)
3037 return retval;
3038
3039 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3040
3041 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3042 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3043 xscale->handler_address = handler_address;
3044 else {
3045 LOG_ERROR(
3046 "xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3047 return ERROR_FAIL;
3048 }
3049
3050 return ERROR_OK;
3051 }
3052
3053 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3054 {
3055 struct target *target = NULL;
3056 struct xscale_common *xscale;
3057 int retval;
3058 uint32_t cache_clean_address;
3059
3060 if (CMD_ARGC < 2)
3061 return ERROR_COMMAND_SYNTAX_ERROR;
3062
3063 target = get_target(CMD_ARGV[0]);
3064 if (target == NULL) {
3065 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3066 return ERROR_FAIL;
3067 }
3068 xscale = target_to_xscale(target);
3069 retval = xscale_verify_pointer(CMD, xscale);
3070 if (retval != ERROR_OK)
3071 return retval;
3072
3073 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3074
3075 if (cache_clean_address & 0xffff)
3076 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3077 else
3078 xscale->cache_clean_address = cache_clean_address;
3079
3080 return ERROR_OK;
3081 }
3082
3083 COMMAND_HANDLER(xscale_handle_cache_info_command)
3084 {
3085 struct target *target = get_current_target(CMD_CTX);
3086 struct xscale_common *xscale = target_to_xscale(target);
3087 int retval;
3088
3089 retval = xscale_verify_pointer(CMD, xscale);
3090 if (retval != ERROR_OK)
3091 return retval;
3092
3093 return armv4_5_handle_cache_info_command(CMD, &xscale->armv4_5_mmu.armv4_5_cache);
3094 }
3095
3096 static int xscale_virt2phys(struct target *target,
3097 target_addr_t virtual, target_addr_t *physical)
3098 {
3099 struct xscale_common *xscale = target_to_xscale(target);
3100 uint32_t cb;
3101
3102 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3103 LOG_ERROR(xscale_not);
3104 return ERROR_TARGET_INVALID;
3105 }
3106
3107 uint32_t ret;
3108 int retval = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu,
3109 virtual, &cb, &ret);
3110 if (retval != ERROR_OK)
3111 return retval;
3112 *physical = ret;
3113 return ERROR_OK;
3114 }
3115
3116 static int xscale_mmu(struct target *target, int *enabled)
3117 {
3118 struct xscale_common *xscale = target_to_xscale(target);
3119
3120 if (target->state != TARGET_HALTED) {
3121 LOG_ERROR("Target not halted");
3122 return ERROR_TARGET_INVALID;
3123 }
3124 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3125 return ERROR_OK;
3126 }
3127
3128 COMMAND_HANDLER(xscale_handle_mmu_command)
3129 {
3130 struct target *target = get_current_target(CMD_CTX);
3131 struct xscale_common *xscale = target_to_xscale(target);
3132 int retval;
3133
3134 retval = xscale_verify_pointer(CMD, xscale);
3135 if (retval != ERROR_OK)
3136 return retval;
3137
3138 if (target->state != TARGET_HALTED) {
3139 command_print(CMD, "target must be stopped for \"%s\" command", CMD_NAME);
3140 return ERROR_OK;
3141 }
3142
3143 if (CMD_ARGC >= 1) {
3144 bool enable;
3145 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3146 if (enable)
3147 xscale_enable_mmu_caches(target, 1, 0, 0);
3148 else
3149 xscale_disable_mmu_caches(target, 1, 0, 0);
3150 xscale->armv4_5_mmu.mmu_enabled = enable;
3151 }
3152
3153 command_print(CMD, "mmu %s",
3154 (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3155
3156 return ERROR_OK;
3157 }
3158
3159 COMMAND_HANDLER(xscale_handle_idcache_command)
3160 {
3161 struct target *target = get_current_target(CMD_CTX);
3162 struct xscale_common *xscale = target_to_xscale(target);
3163
3164 int retval = xscale_verify_pointer(CMD, xscale);
3165 if (retval != ERROR_OK)
3166 return retval;
3167
3168 if (target->state != TARGET_HALTED) {
3169 command_print(CMD, "target must be stopped for \"%s\" command", CMD_NAME);
3170 return ERROR_OK;
3171 }
3172
3173 bool icache = false;
3174 if (strcmp(CMD_NAME, "icache") == 0)
3175 icache = true;
3176 if (CMD_ARGC >= 1) {
3177 bool enable;
3178 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3179 if (icache) {
3180 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3181 if (enable)
3182 xscale_enable_mmu_caches(target, 0, 0, 1);
3183 else
3184 xscale_disable_mmu_caches(target, 0, 0, 1);
3185 } else {
3186 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3187 if (enable)
3188 xscale_enable_mmu_caches(target, 0, 1, 0);
3189 else
3190 xscale_disable_mmu_caches(target, 0, 1, 0);
3191 }
3192 }
3193
3194 bool enabled = icache ?
3195 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3196 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3197 const char *msg = enabled ? "enabled" : "disabled";
3198 command_print(CMD, "%s %s", CMD_NAME, msg);
3199
3200 return ERROR_OK;
3201 }
3202
3203 static const struct {
3204 char name[15];
3205 unsigned mask;
3206 } vec_ids[] = {
3207 { "fiq", DCSR_TF, },
3208 { "irq", DCSR_TI, },
3209 { "dabt", DCSR_TD, },
3210 { "pabt", DCSR_TA, },
3211 { "swi", DCSR_TS, },
3212 { "undef", DCSR_TU, },
3213 { "reset", DCSR_TR, },
3214 };
3215
3216 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3217 {
3218 struct target *target = get_current_target(CMD_CTX);
3219 struct xscale_common *xscale = target_to_xscale(target);
3220 int retval;
3221 uint32_t dcsr_value;
3222 uint32_t catch = 0;
3223 struct reg *dcsr_reg = &xscale->reg_cache->reg_list[XSCALE_DCSR];
3224
3225 retval = xscale_verify_pointer(CMD, xscale);
3226 if (retval != ERROR_OK)
3227 return retval;
3228
3229 dcsr_value = buf_get_u32(dcsr_reg->value, 0, 32);
3230 if (CMD_ARGC > 0) {
3231 if (CMD_ARGC == 1) {
3232 if (strcmp(CMD_ARGV[0], "all") == 0) {
3233 catch = DCSR_TRAP_MASK;
3234 CMD_ARGC--;
3235 } else if (strcmp(CMD_ARGV[0], "none") == 0) {
3236 catch = 0;
3237 CMD_ARGC--;
3238 }
3239 }
3240 while (CMD_ARGC-- > 0) {
3241 unsigned i;
3242 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
3243 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name))
3244 continue;
3245 catch |= vec_ids[i].mask;
3246 break;
3247 }
3248 if (i == ARRAY_SIZE(vec_ids)) {
3249 LOG_ERROR("No vector '%s'", CMD_ARGV[CMD_ARGC]);
3250 return ERROR_COMMAND_SYNTAX_ERROR;
3251 }
3252 }
3253 buf_set_u32(dcsr_reg->value, 0, 32,
3254 (buf_get_u32(dcsr_reg->value, 0, 32) & ~DCSR_TRAP_MASK) | catch);
3255 xscale_write_dcsr(target, -1, -1);
3256 }
3257
3258 dcsr_value = buf_get_u32(dcsr_reg->value, 0, 32);
3259 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
3260 command_print(CMD, "%15s: %s", vec_ids[i].name,
3261 (dcsr_value & vec_ids[i].mask) ? "catch" : "ignore");
3262 }
3263
3264 return ERROR_OK;
3265 }
3266
3267
3268 COMMAND_HANDLER(xscale_handle_vector_table_command)
3269 {
3270 struct target *target = get_current_target(CMD_CTX);
3271 struct xscale_common *xscale = target_to_xscale(target);
3272 int err = 0;
3273 int retval;
3274
3275 retval = xscale_verify_pointer(CMD, xscale);
3276 if (retval != ERROR_OK)
3277 return retval;
3278
3279 if (CMD_ARGC == 0) { /* print current settings */
3280 int idx;
3281
3282 command_print(CMD, "active user-set static vectors:");
3283 for (idx = 1; idx < 8; idx++)
3284 if (xscale->static_low_vectors_set & (1 << idx))
3285 command_print(CMD,
3286 "low %d: 0x%" PRIx32,
3287 idx,
3288 xscale->static_low_vectors[idx]);
3289 for (idx = 1; idx < 8; idx++)
3290 if (xscale->static_high_vectors_set & (1 << idx))
3291 command_print(CMD,
3292 "high %d: 0x%" PRIx32,
3293 idx,
3294 xscale->static_high_vectors[idx]);
3295 return ERROR_OK;
3296 }
3297
3298 if (CMD_ARGC != 3)
3299 err = 1;
3300 else {
3301 int idx;
3302 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3303 uint32_t vec;
3304 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3305
3306 if (idx < 1 || idx >= 8)
3307 err = 1;
3308
3309 if (!err && strcmp(CMD_ARGV[0], "low") == 0) {
3310 xscale->static_low_vectors_set |= (1<<idx);
3311 xscale->static_low_vectors[idx] = vec;
3312 } else if (!err && (strcmp(CMD_ARGV[0], "high") == 0)) {
3313 xscale->static_high_vectors_set |= (1<<idx);
3314 xscale->static_high_vectors[idx] = vec;
3315 } else
3316 err = 1;
3317 }
3318
3319 if (err)
3320 return ERROR_COMMAND_SYNTAX_ERROR;
3321
3322 return ERROR_OK;
3323 }
3324
3325
3326 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3327 {
3328 struct target *target = get_current_target(CMD_CTX);
3329 struct xscale_common *xscale = target_to_xscale(target);
3330 uint32_t dcsr_value;
3331 int retval;
3332
3333 retval = xscale_verify_pointer(CMD, xscale);
3334 if (retval != ERROR_OK)
3335 return retval;
3336
3337 if (target->state != TARGET_HALTED) {
3338 command_print(CMD, "target must be stopped for \"%s\" command", CMD_NAME);
3339 return ERROR_OK;
3340 }
3341
3342 if (CMD_ARGC >= 1) {
3343 if (strcmp("enable", CMD_ARGV[0]) == 0)
3344 xscale->trace.mode = XSCALE_TRACE_WRAP; /* default */
3345 else if (strcmp("disable", CMD_ARGV[0]) == 0)
3346 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3347 else
3348 return ERROR_COMMAND_SYNTAX_ERROR;
3349 }
3350
3351 if (CMD_ARGC >= 2 && xscale->trace.mode != XSCALE_TRACE_DISABLED) {
3352 if (strcmp("fill", CMD_ARGV[1]) == 0) {
3353 int buffcount = 1; /* default */
3354 if (CMD_ARGC >= 3)
3355 COMMAND_PARSE_NUMBER(int, CMD_ARGV[2], buffcount);
3356 if (buffcount < 1) { /* invalid */
3357 command_print(CMD, "fill buffer count must be > 0");
3358 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3359 return ERROR_COMMAND_SYNTAX_ERROR;
3360 }
3361 xscale->trace.buffer_fill = buffcount;
3362 xscale->trace.mode = XSCALE_TRACE_FILL;
3363 } else if (strcmp("wrap", CMD_ARGV[1]) == 0)
3364 xscale->trace.mode = XSCALE_TRACE_WRAP;
3365 else {
3366 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3367 return ERROR_COMMAND_SYNTAX_ERROR;
3368 }
3369 }
3370
3371 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
3372 char fill_string[12];
3373 sprintf(fill_string, "fill %d", xscale->trace.buffer_fill);
3374 command_print(CMD, "trace buffer enabled (%s)",
3375 (xscale->trace.mode == XSCALE_TRACE_FILL)
3376 ? fill_string : "wrap");
3377 } else
3378 command_print(CMD, "trace buffer disabled");
3379
3380 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3381 if (xscale->trace.mode == XSCALE_TRACE_FILL)
3382 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3383 else
3384 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3385
3386 return ERROR_OK;
3387 }
3388
3389 COMMAND_HANDLER(xscale_handle_trace_image_command)
3390 {
3391 struct target *target = get_current_target(CMD_CTX);
3392 struct xscale_common *xscale = target_to_xscale(target);
3393 int retval;
3394
3395 if (CMD_ARGC < 1)
3396 return ERROR_COMMAND_SYNTAX_ERROR;
3397
3398 retval = xscale_verify_pointer(CMD, xscale);
3399 if (retval != ERROR_OK)
3400 return retval;
3401
3402 if (xscale->trace.image) {
3403 image_close(xscale->trace.image);
3404 free(xscale->trace.image);
3405 command_print(CMD, "previously loaded image found and closed");
3406 }
3407
3408 xscale->trace.image = malloc(sizeof(struct image));
3409 xscale->trace.image->base_address_set = 0;
3410 xscale->trace.image->start_address_set = 0;
3411
3412 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3413 if (CMD_ARGC >= 2) {
3414 xscale->trace.image->base_address_set = 1;
3415 COMMAND_PARSE_NUMBER(llong, CMD_ARGV[1], xscale->trace.image->base_address);
3416 } else
3417 xscale->trace.image->base_address_set = 0;
3418
3419 if (image_open(xscale->trace.image, CMD_ARGV[0],
3420 (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK) {
3421 free(xscale->trace.image);
3422 xscale->trace.image = NULL;
3423 return ERROR_OK;
3424 }
3425
3426 return ERROR_OK;
3427 }
3428
3429 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3430 {
3431 struct target *target = get_current_target(CMD_CTX);
3432 struct xscale_common *xscale = target_to_xscale(target);
3433 struct xscale_trace_data *trace_data;
3434 struct fileio *file;
3435 int retval;
3436
3437 retval = xscale_verify_pointer(CMD, xscale);
3438 if (retval != ERROR_OK)
3439 return retval;
3440
3441 if (target->state != TARGET_HALTED) {
3442 command_print(CMD, "target must be stopped for \"%s\" command", CMD_NAME);
3443 return ERROR_OK;
3444 }
3445
3446 if (CMD_ARGC < 1)
3447 return ERROR_COMMAND_SYNTAX_ERROR;
3448
3449 trace_data = xscale->trace.data;
3450
3451 if (!trace_data) {
3452 command_print(CMD, "no trace data collected");
3453 return ERROR_OK;
3454 }
3455
3456 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3457 return ERROR_OK;
3458
3459 while (trace_data) {
3460 int i;
3461
3462 fileio_write_u32(file, trace_data->chkpt0);
3463 fileio_write_u32(file, trace_data->chkpt1);
3464 fileio_write_u32(file, trace_data->last_instruction);
3465 fileio_write_u32(file, trace_data->depth);
3466
3467 for (i = 0; i < trace_data->depth; i++)
3468 fileio_write_u32(file, trace_data->entries[i].data |
3469 ((trace_data->entries[i].type & 0xffff) << 16));
3470
3471 trace_data = trace_data->next;
3472 }
3473
3474 fileio_close(file);
3475
3476 return ERROR_OK;
3477 }
3478
3479 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3480 {
3481 struct target *target = get_current_target(CMD_CTX);
3482 struct xscale_common *xscale = target_to_xscale(target);
3483 int retval;
3484
3485 retval = xscale_verify_pointer(CMD, xscale);
3486 if (retval != ERROR_OK)
3487 return retval;
3488
3489 xscale_analyze_trace(target, CMD);
3490
3491 return ERROR_OK;
3492 }
3493
3494 COMMAND_HANDLER(xscale_handle_cp15)
3495 {
3496 struct target *target = get_current_target(CMD_CTX);
3497 struct xscale_common *xscale = target_to_xscale(target);
3498 int retval;
3499
3500 retval = xscale_verify_pointer(CMD, xscale);
3501 if (retval != ERROR_OK)
3502 return retval;
3503
3504 if (target->state != TARGET_HALTED) {
3505 command_print(CMD, "target must be stopped for \"%s\" command", CMD_NAME);
3506 return ERROR_OK;
3507 }
3508 uint32_t reg_no = 0;
3509 struct reg *reg = NULL;
3510 if (CMD_ARGC > 0) {
3511 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3512 /*translate from xscale cp15 register no to openocd register*/
3513 switch (reg_no) {
3514 case 0:
3515 reg_no = XSCALE_MAINID;
3516 break;
3517 case 1:
3518 reg_no = XSCALE_CTRL;
3519 break;
3520 case 2:
3521 reg_no = XSCALE_TTB;
3522 break;
3523 case 3:
3524 reg_no = XSCALE_DAC;
3525 break;
3526 case 5:
3527 reg_no = XSCALE_FSR;
3528 break;
3529 case 6:
3530 reg_no = XSCALE_FAR;
3531 break;
3532 case 13:
3533 reg_no = XSCALE_PID;
3534 break;
3535 case 15:
3536 reg_no = XSCALE_CPACCESS;
3537 break;
3538 default:
3539 command_print(CMD, "invalid register number");
3540 return ERROR_COMMAND_SYNTAX_ERROR;
3541 }
3542 reg = &xscale->reg_cache->reg_list[reg_no];
3543
3544 }
3545 if (CMD_ARGC == 1) {
3546 uint32_t value;
3547
3548 /* read cp15 control register */
3549 xscale_get_reg(reg);
3550 value = buf_get_u32(reg->value, 0, 32);
3551 command_print(CMD, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size),
3552 value);
3553 } else if (CMD_ARGC == 2) {
3554 uint32_t value;
3555 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3556
3557 /* send CP write request (command 0x41) */
3558 xscale_send_u32(target, 0x41);
3559
3560 /* send CP register number */
3561 xscale_send_u32(target, reg_no);
3562
3563 /* send CP register value */
3564 xscale_send_u32(target, value);
3565
3566 /* execute cpwait to ensure outstanding operations complete */
3567 xscale_send_u32(target, 0x53);
3568 } else
3569 return ERROR_COMMAND_SYNTAX_ERROR;
3570
3571 return ERROR_OK;
3572 }
3573
3574 static const struct command_registration xscale_exec_command_handlers[] = {
3575 {
3576 .name = "cache_info",
3577 .handler = xscale_handle_cache_info_command,
3578 .mode = COMMAND_EXEC,
3579 .help = "display information about CPU caches",
3580 .usage = "",
3581 },
3582 {
3583 .name = "mmu",
3584 .handler = xscale_handle_mmu_command,
3585 .mode = COMMAND_EXEC,
3586 .help = "enable or disable the MMU",
3587 .usage = "['enable'|'disable']",
3588 },
3589 {
3590 .name = "icache",
3591 .handler = xscale_handle_idcache_command,
3592 .mode = COMMAND_EXEC,
3593 .help = "display ICache state, optionally enabling or "
3594 "disabling it",
3595 .usage = "['enable'|'disable']",
3596 },
3597 {
3598 .name = "dcache",
3599 .handler = xscale_handle_idcache_command,
3600 .mode = COMMAND_EXEC,
3601 .help = "display DCache state, optionally enabling or "
3602 "disabling it",
3603 .usage = "['enable'|'disable']",
3604 },
3605 {
3606 .name = "vector_catch",
3607 .handler = xscale_handle_vector_catch_command,
3608 .mode = COMMAND_EXEC,
3609 .help = "set or display mask of vectors "
3610 "that should trigger debug entry",
3611 .usage = "['all'|'none'|'fiq'|'irq'|'dabt'|'pabt'|'swi'|'undef'|'reset']",
3612 },
3613 {
3614 .name = "vector_table",
3615 .handler = xscale_handle_vector_table_command,
3616 .mode = COMMAND_EXEC,
3617 .help = "set vector table entry in mini-ICache, "
3618 "or display current tables",
3619 .usage = "[('high'|'low') index code]",
3620 },
3621 {
3622 .name = "trace_buffer",
3623 .handler = xscale_handle_trace_buffer_command,
3624 .mode = COMMAND_EXEC,
3625 .help = "display trace buffer status, enable or disable "
3626 "tracing, and optionally reconfigure trace mode",
3627 .usage = "['enable'|'disable' ['fill' [number]|'wrap']]",
3628 },
3629 {
3630 .name = "dump_trace",
3631 .handler = xscale_handle_dump_trace_command,
3632 .mode = COMMAND_EXEC,
3633 .help = "dump content of trace buffer to file",
3634 .usage = "filename",
3635 },
3636 {
3637 .name = "analyze_trace",
3638 .handler = xscale_handle_analyze_trace_buffer_command,
3639 .mode = COMMAND_EXEC,
3640 .help = "analyze content of trace buffer",
3641 .usage = "",
3642 },
3643 {
3644 .name = "trace_image",
3645 .handler = xscale_handle_trace_image_command,
3646 .mode = COMMAND_EXEC,
3647 .help = "load image from file to address (default 0)",
3648 .usage = "filename [offset [filetype]]",
3649 },
3650 {
3651 .name = "cp15",
3652 .handler = xscale_handle_cp15,
3653 .mode = COMMAND_EXEC,
3654 .help = "Read or write coprocessor 15 register.",
3655 .usage = "register [value]",
3656 },
3657 COMMAND_REGISTRATION_DONE
3658 };
3659 static const struct command_registration xscale_any_command_handlers[] = {
3660 {
3661 .name = "debug_handler",
3662 .handler = xscale_handle_debug_handler_command,
3663 .mode = COMMAND_ANY,
3664 .help = "Change address used for debug handler.",
3665 .usage = "<target> <address>",
3666 },
3667 {
3668 .name = "cache_clean_address",
3669 .handler = xscale_handle_cache_clean_address_command,
3670 .mode = COMMAND_ANY,
3671 .help = "Change address used for cleaning data cache.",
3672 .usage = "address",
3673 },
3674 {
3675 .chain = xscale_exec_command_handlers,
3676 },
3677 COMMAND_REGISTRATION_DONE
3678 };
3679 static const struct command_registration xscale_command_handlers[] = {
3680 {
3681 .chain = arm_command_handlers,
3682 },
3683 {
3684 .name = "xscale",
3685 .mode = COMMAND_ANY,
3686 .help = "xscale command group",
3687 .usage = "",
3688 .chain = xscale_any_command_handlers,
3689 },
3690 COMMAND_REGISTRATION_DONE
3691 };
3692
3693 struct target_type xscale_target = {
3694 .name = "xscale",
3695
3696 .poll = xscale_poll,
3697 .arch_state = xscale_arch_state,
3698
3699 .halt = xscale_halt,
3700 .resume = xscale_resume,
3701 .step = xscale_step,
3702
3703 .assert_reset = xscale_assert_reset,
3704 .deassert_reset = xscale_deassert_reset,
3705
3706 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3707 .get_gdb_arch = arm_get_gdb_arch,
3708 .get_gdb_reg_list = arm_get_gdb_reg_list,
3709
3710 .read_memory = xscale_read_memory,
3711 .read_phys_memory = xscale_read_phys_memory,
3712 .write_memory = xscale_write_memory,
3713 .write_phys_memory = xscale_write_phys_memory,
3714
3715 .checksum_memory = arm_checksum_memory,
3716 .blank_check_memory = arm_blank_check_memory,
3717
3718 .run_algorithm = armv4_5_run_algorithm,
3719
3720 .add_breakpoint = xscale_add_breakpoint,
3721 .remove_breakpoint = xscale_remove_breakpoint,
3722 .add_watchpoint = xscale_add_watchpoint,
3723 .remove_watchpoint = xscale_remove_watchpoint,
3724
3725 .commands = xscale_command_handlers,
3726 .target_create = xscale_target_create,
3727 .init_target = xscale_init_target,
3728
3729 .virt2phys = xscale_virt2phys,
3730 .mmu = xscale_mmu
3731 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)