XScale: context restore, cleanup/bugfix
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include "time_support.h"
37 #include "register.h"
38 #include "image.h"
39
40
41 /*
42 * Important XScale documents available as of October 2009 include:
43 *
44 * Intel XScale® Core Developer’s Manual, January 2004
45 * Order Number: 273473-002
46 * This has a chapter detailing debug facilities, and punts some
47 * details to chip-specific microarchitecture documents.
48 *
49 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
50 * Document Number: 273539-005
51 * Less detailed than the developer's manual, but summarizes those
52 * missing details (for most XScales) and gives LOTS of notes about
53 * debugger/handler interaction issues. Presents a simpler reset
54 * and load-handler sequence than the arch doc. (Note, OpenOCD
55 * doesn't currently support "Hot-Debug" as defined there.)
56 *
57 * Chip-specific microarchitecture documents may also be useful.
58 */
59
60
61 /* forward declarations */
62 static int xscale_resume(struct target *, int current,
63 uint32_t address, int handle_breakpoints, int debug_execution);
64 static int xscale_debug_entry(struct target *);
65 static int xscale_restore_context(struct target *);
66 static int xscale_get_reg(struct reg *reg);
67 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
68 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
69 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
70 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_read_trace(struct target *);
72
73
74 /* This XScale "debug handler" is loaded into the processor's
75 * mini-ICache, which is 2K of code writable only via JTAG.
76 *
77 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
78 * binary files cleanly. It's string oriented, and terminates them
79 * with a NUL character. Better would be to generate the constants
80 * and let other code decide names, scoping, and other housekeeping.
81 */
82 static /* unsigned const char xscale_debug_handler[] = ... */
83 #include "xscale_debug.h"
84
85 static char *const xscale_reg_list[] =
86 {
87 "XSCALE_MAINID", /* 0 */
88 "XSCALE_CACHETYPE",
89 "XSCALE_CTRL",
90 "XSCALE_AUXCTRL",
91 "XSCALE_TTB",
92 "XSCALE_DAC",
93 "XSCALE_FSR",
94 "XSCALE_FAR",
95 "XSCALE_PID",
96 "XSCALE_CPACCESS",
97 "XSCALE_IBCR0", /* 10 */
98 "XSCALE_IBCR1",
99 "XSCALE_DBR0",
100 "XSCALE_DBR1",
101 "XSCALE_DBCON",
102 "XSCALE_TBREG",
103 "XSCALE_CHKPT0",
104 "XSCALE_CHKPT1",
105 "XSCALE_DCSR",
106 "XSCALE_TX",
107 "XSCALE_RX", /* 20 */
108 "XSCALE_TXRXCTRL",
109 };
110
111 static const struct xscale_reg xscale_reg_arch_info[] =
112 {
113 {XSCALE_MAINID, NULL},
114 {XSCALE_CACHETYPE, NULL},
115 {XSCALE_CTRL, NULL},
116 {XSCALE_AUXCTRL, NULL},
117 {XSCALE_TTB, NULL},
118 {XSCALE_DAC, NULL},
119 {XSCALE_FSR, NULL},
120 {XSCALE_FAR, NULL},
121 {XSCALE_PID, NULL},
122 {XSCALE_CPACCESS, NULL},
123 {XSCALE_IBCR0, NULL},
124 {XSCALE_IBCR1, NULL},
125 {XSCALE_DBR0, NULL},
126 {XSCALE_DBR1, NULL},
127 {XSCALE_DBCON, NULL},
128 {XSCALE_TBREG, NULL},
129 {XSCALE_CHKPT0, NULL},
130 {XSCALE_CHKPT1, NULL},
131 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
132 {-1, NULL}, /* TX accessed via JTAG */
133 {-1, NULL}, /* RX accessed via JTAG */
134 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
135 };
136
137 /* convenience wrapper to access XScale specific registers */
138 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
139 {
140 uint8_t buf[4];
141
142 buf_set_u32(buf, 0, 32, value);
143
144 return xscale_set_reg(reg, buf);
145 }
146
147 static const char xscale_not[] = "target is not an XScale";
148
149 static int xscale_verify_pointer(struct command_context *cmd_ctx,
150 struct xscale_common *xscale)
151 {
152 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
153 command_print(cmd_ctx, xscale_not);
154 return ERROR_TARGET_INVALID;
155 }
156 return ERROR_OK;
157 }
158
159 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr)
160 {
161 if (tap == NULL)
162 return ERROR_FAIL;
163
164 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
165 {
166 struct scan_field field;
167 uint8_t scratch[4];
168
169 memset(&field, 0, sizeof field);
170 field.tap = tap;
171 field.num_bits = tap->ir_length;
172 field.out_value = scratch;
173 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
174
175 jtag_add_ir_scan(1, &field, jtag_get_end_state());
176 }
177
178 return ERROR_OK;
179 }
180
181 static int xscale_read_dcsr(struct target *target)
182 {
183 struct xscale_common *xscale = target_to_xscale(target);
184 int retval;
185 struct scan_field fields[3];
186 uint8_t field0 = 0x0;
187 uint8_t field0_check_value = 0x2;
188 uint8_t field0_check_mask = 0x7;
189 uint8_t field2 = 0x0;
190 uint8_t field2_check_value = 0x0;
191 uint8_t field2_check_mask = 0x1;
192
193 jtag_set_end_state(TAP_DRPAUSE);
194 xscale_jtag_set_instr(target->tap,
195 XSCALE_SELDCSR << xscale->xscale_variant);
196
197 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
198 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
199
200 memset(&fields, 0, sizeof fields);
201
202 fields[0].tap = target->tap;
203 fields[0].num_bits = 3;
204 fields[0].out_value = &field0;
205 uint8_t tmp;
206 fields[0].in_value = &tmp;
207
208 fields[1].tap = target->tap;
209 fields[1].num_bits = 32;
210 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
211
212 fields[2].tap = target->tap;
213 fields[2].num_bits = 1;
214 fields[2].out_value = &field2;
215 uint8_t tmp2;
216 fields[2].in_value = &tmp2;
217
218 jtag_add_dr_scan(3, fields, jtag_get_end_state());
219
220 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
221 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
222
223 if ((retval = jtag_execute_queue()) != ERROR_OK)
224 {
225 LOG_ERROR("JTAG error while reading DCSR");
226 return retval;
227 }
228
229 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
230 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
231
232 /* write the register with the value we just read
233 * on this second pass, only the first bit of field0 is guaranteed to be 0)
234 */
235 field0_check_mask = 0x1;
236 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
237 fields[1].in_value = NULL;
238
239 jtag_set_end_state(TAP_IDLE);
240
241 jtag_add_dr_scan(3, fields, jtag_get_end_state());
242
243 /* DANGER!!! this must be here. It will make sure that the arguments
244 * to jtag_set_check_value() does not go out of scope! */
245 return jtag_execute_queue();
246 }
247
248
249 static void xscale_getbuf(jtag_callback_data_t arg)
250 {
251 uint8_t *in = (uint8_t *)arg;
252 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
253 }
254
255 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
256 {
257 if (num_words == 0)
258 return ERROR_INVALID_ARGUMENTS;
259
260 struct xscale_common *xscale = target_to_xscale(target);
261 int retval = ERROR_OK;
262 tap_state_t path[3];
263 struct scan_field fields[3];
264 uint8_t *field0 = malloc(num_words * 1);
265 uint8_t field0_check_value = 0x2;
266 uint8_t field0_check_mask = 0x6;
267 uint32_t *field1 = malloc(num_words * 4);
268 uint8_t field2_check_value = 0x0;
269 uint8_t field2_check_mask = 0x1;
270 int words_done = 0;
271 int words_scheduled = 0;
272 int i;
273
274 path[0] = TAP_DRSELECT;
275 path[1] = TAP_DRCAPTURE;
276 path[2] = TAP_DRSHIFT;
277
278 memset(&fields, 0, sizeof fields);
279
280 fields[0].tap = target->tap;
281 fields[0].num_bits = 3;
282 fields[0].check_value = &field0_check_value;
283 fields[0].check_mask = &field0_check_mask;
284
285 fields[1].tap = target->tap;
286 fields[1].num_bits = 32;
287
288 fields[2].tap = target->tap;
289 fields[2].num_bits = 1;
290 fields[2].check_value = &field2_check_value;
291 fields[2].check_mask = &field2_check_mask;
292
293 jtag_set_end_state(TAP_IDLE);
294 xscale_jtag_set_instr(target->tap,
295 XSCALE_DBGTX << xscale->xscale_variant);
296 jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
297
298 /* repeat until all words have been collected */
299 int attempts = 0;
300 while (words_done < num_words)
301 {
302 /* schedule reads */
303 words_scheduled = 0;
304 for (i = words_done; i < num_words; i++)
305 {
306 fields[0].in_value = &field0[i];
307
308 jtag_add_pathmove(3, path);
309
310 fields[1].in_value = (uint8_t *)(field1 + i);
311
312 jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
313
314 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
315
316 words_scheduled++;
317 }
318
319 if ((retval = jtag_execute_queue()) != ERROR_OK)
320 {
321 LOG_ERROR("JTAG error while receiving data from debug handler");
322 break;
323 }
324
325 /* examine results */
326 for (i = words_done; i < num_words; i++)
327 {
328 if (!(field0[0] & 1))
329 {
330 /* move backwards if necessary */
331 int j;
332 for (j = i; j < num_words - 1; j++)
333 {
334 field0[j] = field0[j + 1];
335 field1[j] = field1[j + 1];
336 }
337 words_scheduled--;
338 }
339 }
340 if (words_scheduled == 0)
341 {
342 if (attempts++==1000)
343 {
344 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
345 retval = ERROR_TARGET_TIMEOUT;
346 break;
347 }
348 }
349
350 words_done += words_scheduled;
351 }
352
353 for (i = 0; i < num_words; i++)
354 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
355
356 free(field1);
357
358 return retval;
359 }
360
361 static int xscale_read_tx(struct target *target, int consume)
362 {
363 struct xscale_common *xscale = target_to_xscale(target);
364 tap_state_t path[3];
365 tap_state_t noconsume_path[6];
366 int retval;
367 struct timeval timeout, now;
368 struct scan_field fields[3];
369 uint8_t field0_in = 0x0;
370 uint8_t field0_check_value = 0x2;
371 uint8_t field0_check_mask = 0x6;
372 uint8_t field2_check_value = 0x0;
373 uint8_t field2_check_mask = 0x1;
374
375 jtag_set_end_state(TAP_IDLE);
376
377 xscale_jtag_set_instr(target->tap,
378 XSCALE_DBGTX << xscale->xscale_variant);
379
380 path[0] = TAP_DRSELECT;
381 path[1] = TAP_DRCAPTURE;
382 path[2] = TAP_DRSHIFT;
383
384 noconsume_path[0] = TAP_DRSELECT;
385 noconsume_path[1] = TAP_DRCAPTURE;
386 noconsume_path[2] = TAP_DREXIT1;
387 noconsume_path[3] = TAP_DRPAUSE;
388 noconsume_path[4] = TAP_DREXIT2;
389 noconsume_path[5] = TAP_DRSHIFT;
390
391 memset(&fields, 0, sizeof fields);
392
393 fields[0].tap = target->tap;
394 fields[0].num_bits = 3;
395 fields[0].in_value = &field0_in;
396
397 fields[1].tap = target->tap;
398 fields[1].num_bits = 32;
399 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
400
401 fields[2].tap = target->tap;
402 fields[2].num_bits = 1;
403 uint8_t tmp;
404 fields[2].in_value = &tmp;
405
406 gettimeofday(&timeout, NULL);
407 timeval_add_time(&timeout, 1, 0);
408
409 for (;;)
410 {
411 /* if we want to consume the register content (i.e. clear TX_READY),
412 * we have to go straight from Capture-DR to Shift-DR
413 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
414 */
415 if (consume)
416 jtag_add_pathmove(3, path);
417 else
418 {
419 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
420 }
421
422 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
423
424 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
425 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
426
427 if ((retval = jtag_execute_queue()) != ERROR_OK)
428 {
429 LOG_ERROR("JTAG error while reading TX");
430 return ERROR_TARGET_TIMEOUT;
431 }
432
433 gettimeofday(&now, NULL);
434 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
435 {
436 LOG_ERROR("time out reading TX register");
437 return ERROR_TARGET_TIMEOUT;
438 }
439 if (!((!(field0_in & 1)) && consume))
440 {
441 goto done;
442 }
443 if (debug_level >= 3)
444 {
445 LOG_DEBUG("waiting 100ms");
446 alive_sleep(100); /* avoid flooding the logs */
447 } else
448 {
449 keep_alive();
450 }
451 }
452 done:
453
454 if (!(field0_in & 1))
455 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
456
457 return ERROR_OK;
458 }
459
460 static int xscale_write_rx(struct target *target)
461 {
462 struct xscale_common *xscale = target_to_xscale(target);
463 int retval;
464 struct timeval timeout, now;
465 struct scan_field fields[3];
466 uint8_t field0_out = 0x0;
467 uint8_t field0_in = 0x0;
468 uint8_t field0_check_value = 0x2;
469 uint8_t field0_check_mask = 0x6;
470 uint8_t field2 = 0x0;
471 uint8_t field2_check_value = 0x0;
472 uint8_t field2_check_mask = 0x1;
473
474 jtag_set_end_state(TAP_IDLE);
475
476 xscale_jtag_set_instr(target->tap,
477 XSCALE_DBGRX << xscale->xscale_variant);
478
479 memset(&fields, 0, sizeof fields);
480
481 fields[0].tap = target->tap;
482 fields[0].num_bits = 3;
483 fields[0].out_value = &field0_out;
484 fields[0].in_value = &field0_in;
485
486 fields[1].tap = target->tap;
487 fields[1].num_bits = 32;
488 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
489
490 fields[2].tap = target->tap;
491 fields[2].num_bits = 1;
492 fields[2].out_value = &field2;
493 uint8_t tmp;
494 fields[2].in_value = &tmp;
495
496 gettimeofday(&timeout, NULL);
497 timeval_add_time(&timeout, 1, 0);
498
499 /* poll until rx_read is low */
500 LOG_DEBUG("polling RX");
501 for (;;)
502 {
503 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
504
505 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
506 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
507
508 if ((retval = jtag_execute_queue()) != ERROR_OK)
509 {
510 LOG_ERROR("JTAG error while writing RX");
511 return retval;
512 }
513
514 gettimeofday(&now, NULL);
515 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
516 {
517 LOG_ERROR("time out writing RX register");
518 return ERROR_TARGET_TIMEOUT;
519 }
520 if (!(field0_in & 1))
521 goto done;
522 if (debug_level >= 3)
523 {
524 LOG_DEBUG("waiting 100ms");
525 alive_sleep(100); /* avoid flooding the logs */
526 } else
527 {
528 keep_alive();
529 }
530 }
531 done:
532
533 /* set rx_valid */
534 field2 = 0x1;
535 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
536
537 if ((retval = jtag_execute_queue()) != ERROR_OK)
538 {
539 LOG_ERROR("JTAG error while writing RX");
540 return retval;
541 }
542
543 return ERROR_OK;
544 }
545
546 /* send count elements of size byte to the debug handler */
547 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
548 {
549 struct xscale_common *xscale = target_to_xscale(target);
550 uint32_t t[3];
551 int bits[3];
552 int retval;
553 int done_count = 0;
554
555 jtag_set_end_state(TAP_IDLE);
556
557 xscale_jtag_set_instr(target->tap,
558 XSCALE_DBGRX << xscale->xscale_variant);
559
560 bits[0]=3;
561 t[0]=0;
562 bits[1]=32;
563 t[2]=1;
564 bits[2]=1;
565 int endianness = target->endianness;
566 while (done_count++ < count)
567 {
568 switch (size)
569 {
570 case 4:
571 if (endianness == TARGET_LITTLE_ENDIAN)
572 {
573 t[1]=le_to_h_u32(buffer);
574 } else
575 {
576 t[1]=be_to_h_u32(buffer);
577 }
578 break;
579 case 2:
580 if (endianness == TARGET_LITTLE_ENDIAN)
581 {
582 t[1]=le_to_h_u16(buffer);
583 } else
584 {
585 t[1]=be_to_h_u16(buffer);
586 }
587 break;
588 case 1:
589 t[1]=buffer[0];
590 break;
591 default:
592 LOG_ERROR("BUG: size neither 4, 2 nor 1");
593 return ERROR_INVALID_ARGUMENTS;
594 }
595 jtag_add_dr_out(target->tap,
596 3,
597 bits,
598 t,
599 jtag_set_end_state(TAP_IDLE));
600 buffer += size;
601 }
602
603 if ((retval = jtag_execute_queue()) != ERROR_OK)
604 {
605 LOG_ERROR("JTAG error while sending data to debug handler");
606 return retval;
607 }
608
609 return ERROR_OK;
610 }
611
612 static int xscale_send_u32(struct target *target, uint32_t value)
613 {
614 struct xscale_common *xscale = target_to_xscale(target);
615
616 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
617 return xscale_write_rx(target);
618 }
619
620 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
621 {
622 struct xscale_common *xscale = target_to_xscale(target);
623 int retval;
624 struct scan_field fields[3];
625 uint8_t field0 = 0x0;
626 uint8_t field0_check_value = 0x2;
627 uint8_t field0_check_mask = 0x7;
628 uint8_t field2 = 0x0;
629 uint8_t field2_check_value = 0x0;
630 uint8_t field2_check_mask = 0x1;
631
632 if (hold_rst != -1)
633 xscale->hold_rst = hold_rst;
634
635 if (ext_dbg_brk != -1)
636 xscale->external_debug_break = ext_dbg_brk;
637
638 jtag_set_end_state(TAP_IDLE);
639 xscale_jtag_set_instr(target->tap,
640 XSCALE_SELDCSR << xscale->xscale_variant);
641
642 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
643 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
644
645 memset(&fields, 0, sizeof fields);
646
647 fields[0].tap = target->tap;
648 fields[0].num_bits = 3;
649 fields[0].out_value = &field0;
650 uint8_t tmp;
651 fields[0].in_value = &tmp;
652
653 fields[1].tap = target->tap;
654 fields[1].num_bits = 32;
655 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
656
657 fields[2].tap = target->tap;
658 fields[2].num_bits = 1;
659 fields[2].out_value = &field2;
660 uint8_t tmp2;
661 fields[2].in_value = &tmp2;
662
663 jtag_add_dr_scan(3, fields, jtag_get_end_state());
664
665 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
666 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
667
668 if ((retval = jtag_execute_queue()) != ERROR_OK)
669 {
670 LOG_ERROR("JTAG error while writing DCSR");
671 return retval;
672 }
673
674 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
675 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
676
677 return ERROR_OK;
678 }
679
680 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
681 static unsigned int parity (unsigned int v)
682 {
683 // unsigned int ov = v;
684 v ^= v >> 16;
685 v ^= v >> 8;
686 v ^= v >> 4;
687 v &= 0xf;
688 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
689 return (0x6996 >> v) & 1;
690 }
691
692 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
693 {
694 struct xscale_common *xscale = target_to_xscale(target);
695 uint8_t packet[4];
696 uint8_t cmd;
697 int word;
698 struct scan_field fields[2];
699
700 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
701
702 /* LDIC into IR */
703 jtag_set_end_state(TAP_IDLE);
704 xscale_jtag_set_instr(target->tap,
705 XSCALE_LDIC << xscale->xscale_variant);
706
707 /* CMD is b011 to load a cacheline into the Mini ICache.
708 * Loading into the main ICache is deprecated, and unused.
709 * It's followed by three zero bits, and 27 address bits.
710 */
711 buf_set_u32(&cmd, 0, 6, 0x3);
712
713 /* virtual address of desired cache line */
714 buf_set_u32(packet, 0, 27, va >> 5);
715
716 memset(&fields, 0, sizeof fields);
717
718 fields[0].tap = target->tap;
719 fields[0].num_bits = 6;
720 fields[0].out_value = &cmd;
721
722 fields[1].tap = target->tap;
723 fields[1].num_bits = 27;
724 fields[1].out_value = packet;
725
726 jtag_add_dr_scan(2, fields, jtag_get_end_state());
727
728 /* rest of packet is a cacheline: 8 instructions, with parity */
729 fields[0].num_bits = 32;
730 fields[0].out_value = packet;
731
732 fields[1].num_bits = 1;
733 fields[1].out_value = &cmd;
734
735 for (word = 0; word < 8; word++)
736 {
737 buf_set_u32(packet, 0, 32, buffer[word]);
738
739 uint32_t value;
740 memcpy(&value, packet, sizeof(uint32_t));
741 cmd = parity(value);
742
743 jtag_add_dr_scan(2, fields, jtag_get_end_state());
744 }
745
746 return jtag_execute_queue();
747 }
748
749 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
750 {
751 struct xscale_common *xscale = target_to_xscale(target);
752 uint8_t packet[4];
753 uint8_t cmd;
754 struct scan_field fields[2];
755
756 jtag_set_end_state(TAP_IDLE);
757 xscale_jtag_set_instr(target->tap,
758 XSCALE_LDIC << xscale->xscale_variant);
759
760 /* CMD for invalidate IC line b000, bits [6:4] b000 */
761 buf_set_u32(&cmd, 0, 6, 0x0);
762
763 /* virtual address of desired cache line */
764 buf_set_u32(packet, 0, 27, va >> 5);
765
766 memset(&fields, 0, sizeof fields);
767
768 fields[0].tap = target->tap;
769 fields[0].num_bits = 6;
770 fields[0].out_value = &cmd;
771
772 fields[1].tap = target->tap;
773 fields[1].num_bits = 27;
774 fields[1].out_value = packet;
775
776 jtag_add_dr_scan(2, fields, jtag_get_end_state());
777
778 return ERROR_OK;
779 }
780
781 static int xscale_update_vectors(struct target *target)
782 {
783 struct xscale_common *xscale = target_to_xscale(target);
784 int i;
785 int retval;
786
787 uint32_t low_reset_branch, high_reset_branch;
788
789 for (i = 1; i < 8; i++)
790 {
791 /* if there's a static vector specified for this exception, override */
792 if (xscale->static_high_vectors_set & (1 << i))
793 {
794 xscale->high_vectors[i] = xscale->static_high_vectors[i];
795 }
796 else
797 {
798 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
799 if (retval == ERROR_TARGET_TIMEOUT)
800 return retval;
801 if (retval != ERROR_OK)
802 {
803 /* Some of these reads will fail as part of normal execution */
804 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
805 }
806 }
807 }
808
809 for (i = 1; i < 8; i++)
810 {
811 if (xscale->static_low_vectors_set & (1 << i))
812 {
813 xscale->low_vectors[i] = xscale->static_low_vectors[i];
814 }
815 else
816 {
817 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
818 if (retval == ERROR_TARGET_TIMEOUT)
819 return retval;
820 if (retval != ERROR_OK)
821 {
822 /* Some of these reads will fail as part of normal execution */
823 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
824 }
825 }
826 }
827
828 /* calculate branches to debug handler */
829 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
830 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
831
832 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
833 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
834
835 /* invalidate and load exception vectors in mini i-cache */
836 xscale_invalidate_ic_line(target, 0x0);
837 xscale_invalidate_ic_line(target, 0xffff0000);
838
839 xscale_load_ic(target, 0x0, xscale->low_vectors);
840 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
841
842 return ERROR_OK;
843 }
844
845 static int xscale_arch_state(struct target *target)
846 {
847 struct xscale_common *xscale = target_to_xscale(target);
848 struct arm *armv4_5 = &xscale->armv4_5_common;
849
850 static const char *state[] =
851 {
852 "disabled", "enabled"
853 };
854
855 static const char *arch_dbg_reason[] =
856 {
857 "", "\n(processor reset)", "\n(trace buffer full)"
858 };
859
860 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
861 {
862 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
863 return ERROR_INVALID_ARGUMENTS;
864 }
865
866 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
867 "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "\n"
868 "MMU: %s, D-Cache: %s, I-Cache: %s"
869 "%s",
870 armv4_5_state_strings[armv4_5->core_state],
871 Jim_Nvp_value2name_simple(nvp_target_debug_reason, target->debug_reason)->name ,
872 arm_mode_name(armv4_5->core_mode),
873 buf_get_u32(armv4_5->cpsr->value, 0, 32),
874 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
875 state[xscale->armv4_5_mmu.mmu_enabled],
876 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
877 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
878 arch_dbg_reason[xscale->arch_debug_reason]);
879
880 return ERROR_OK;
881 }
882
883 static int xscale_poll(struct target *target)
884 {
885 int retval = ERROR_OK;
886
887 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
888 {
889 enum target_state previous_state = target->state;
890 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
891 {
892
893 /* there's data to read from the tx register, we entered debug state */
894 target->state = TARGET_HALTED;
895
896 /* process debug entry, fetching current mode regs */
897 retval = xscale_debug_entry(target);
898 }
899 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
900 {
901 LOG_USER("error while polling TX register, reset CPU");
902 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
903 target->state = TARGET_HALTED;
904 }
905
906 /* debug_entry could have overwritten target state (i.e. immediate resume)
907 * don't signal event handlers in that case
908 */
909 if (target->state != TARGET_HALTED)
910 return ERROR_OK;
911
912 /* if target was running, signal that we halted
913 * otherwise we reentered from debug execution */
914 if (previous_state == TARGET_RUNNING)
915 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
916 else
917 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
918 }
919
920 return retval;
921 }
922
923 static int xscale_debug_entry(struct target *target)
924 {
925 struct xscale_common *xscale = target_to_xscale(target);
926 struct arm *armv4_5 = &xscale->armv4_5_common;
927 uint32_t pc;
928 uint32_t buffer[10];
929 int i;
930 int retval;
931 uint32_t moe;
932
933 /* clear external dbg break (will be written on next DCSR read) */
934 xscale->external_debug_break = 0;
935 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
936 return retval;
937
938 /* get r0, pc, r1 to r7 and cpsr */
939 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
940 return retval;
941
942 /* move r0 from buffer to register cache */
943 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
944 armv4_5->core_cache->reg_list[0].dirty = 1;
945 armv4_5->core_cache->reg_list[0].valid = 1;
946 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
947
948 /* move pc from buffer to register cache */
949 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
950 armv4_5->core_cache->reg_list[15].dirty = 1;
951 armv4_5->core_cache->reg_list[15].valid = 1;
952 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
953
954 /* move data from buffer to register cache */
955 for (i = 1; i <= 7; i++)
956 {
957 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
958 armv4_5->core_cache->reg_list[i].dirty = 1;
959 armv4_5->core_cache->reg_list[i].valid = 1;
960 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
961 }
962
963 arm_set_cpsr(armv4_5, buffer[9]);
964 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
965
966 if (!is_arm_mode(armv4_5->core_mode))
967 {
968 target->state = TARGET_UNKNOWN;
969 LOG_ERROR("cpsr contains invalid mode value - communication failure");
970 return ERROR_TARGET_FAILURE;
971 }
972 LOG_DEBUG("target entered debug state in %s mode",
973 arm_mode_name(armv4_5->core_mode));
974
975 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
976 if (armv4_5->spsr) {
977 xscale_receive(target, buffer, 8);
978 buf_set_u32(armv4_5->spsr->value, 0, 32, buffer[7]);
979 armv4_5->spsr->dirty = false;
980 armv4_5->spsr->valid = true;
981 }
982 else
983 {
984 /* r8 to r14, but no spsr */
985 xscale_receive(target, buffer, 7);
986 }
987
988 /* move data from buffer to right banked register in cache */
989 for (i = 8; i <= 14; i++)
990 {
991 struct reg *r = arm_reg_current(armv4_5, i);
992
993 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
994 r->dirty = false;
995 r->valid = true;
996 }
997
998 /* examine debug reason */
999 xscale_read_dcsr(target);
1000 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
1001
1002 /* stored PC (for calculating fixup) */
1003 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1004
1005 switch (moe)
1006 {
1007 case 0x0: /* Processor reset */
1008 target->debug_reason = DBG_REASON_DBGRQ;
1009 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1010 pc -= 4;
1011 break;
1012 case 0x1: /* Instruction breakpoint hit */
1013 target->debug_reason = DBG_REASON_BREAKPOINT;
1014 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1015 pc -= 4;
1016 break;
1017 case 0x2: /* Data breakpoint hit */
1018 target->debug_reason = DBG_REASON_WATCHPOINT;
1019 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1020 pc -= 4;
1021 break;
1022 case 0x3: /* BKPT instruction executed */
1023 target->debug_reason = DBG_REASON_BREAKPOINT;
1024 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1025 pc -= 4;
1026 break;
1027 case 0x4: /* Ext. debug event */
1028 target->debug_reason = DBG_REASON_DBGRQ;
1029 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1030 pc -= 4;
1031 break;
1032 case 0x5: /* Vector trap occured */
1033 target->debug_reason = DBG_REASON_BREAKPOINT;
1034 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1035 pc -= 4;
1036 break;
1037 case 0x6: /* Trace buffer full break */
1038 target->debug_reason = DBG_REASON_DBGRQ;
1039 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1040 pc -= 4;
1041 break;
1042 case 0x7: /* Reserved (may flag Hot-Debug support) */
1043 default:
1044 LOG_ERROR("Method of Entry is 'Reserved'");
1045 exit(-1);
1046 break;
1047 }
1048
1049 /* apply PC fixup */
1050 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1051
1052 /* on the first debug entry, identify cache type */
1053 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1054 {
1055 uint32_t cache_type_reg;
1056
1057 /* read cp15 cache type register */
1058 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1059 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1060
1061 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1062 }
1063
1064 /* examine MMU and Cache settings */
1065 /* read cp15 control register */
1066 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1067 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1068 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1069 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1070 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1071
1072 /* tracing enabled, read collected trace data */
1073 if (xscale->trace.buffer_enabled)
1074 {
1075 xscale_read_trace(target);
1076 xscale->trace.buffer_fill--;
1077
1078 /* resume if we're still collecting trace data */
1079 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1080 && (xscale->trace.buffer_fill > 0))
1081 {
1082 xscale_resume(target, 1, 0x0, 1, 0);
1083 }
1084 else
1085 {
1086 xscale->trace.buffer_enabled = 0;
1087 }
1088 }
1089
1090 return ERROR_OK;
1091 }
1092
1093 static int xscale_halt(struct target *target)
1094 {
1095 struct xscale_common *xscale = target_to_xscale(target);
1096
1097 LOG_DEBUG("target->state: %s",
1098 target_state_name(target));
1099
1100 if (target->state == TARGET_HALTED)
1101 {
1102 LOG_DEBUG("target was already halted");
1103 return ERROR_OK;
1104 }
1105 else if (target->state == TARGET_UNKNOWN)
1106 {
1107 /* this must not happen for a xscale target */
1108 LOG_ERROR("target was in unknown state when halt was requested");
1109 return ERROR_TARGET_INVALID;
1110 }
1111 else if (target->state == TARGET_RESET)
1112 {
1113 LOG_DEBUG("target->state == TARGET_RESET");
1114 }
1115 else
1116 {
1117 /* assert external dbg break */
1118 xscale->external_debug_break = 1;
1119 xscale_read_dcsr(target);
1120
1121 target->debug_reason = DBG_REASON_DBGRQ;
1122 }
1123
1124 return ERROR_OK;
1125 }
1126
1127 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1128 {
1129 struct xscale_common *xscale = target_to_xscale(target);
1130 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1131 int retval;
1132
1133 if (xscale->ibcr0_used)
1134 {
1135 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1136
1137 if (ibcr0_bp)
1138 {
1139 xscale_unset_breakpoint(target, ibcr0_bp);
1140 }
1141 else
1142 {
1143 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1144 exit(-1);
1145 }
1146 }
1147
1148 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1149 return retval;
1150
1151 return ERROR_OK;
1152 }
1153
1154 static int xscale_disable_single_step(struct target *target)
1155 {
1156 struct xscale_common *xscale = target_to_xscale(target);
1157 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1158 int retval;
1159
1160 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1161 return retval;
1162
1163 return ERROR_OK;
1164 }
1165
1166 static void xscale_enable_watchpoints(struct target *target)
1167 {
1168 struct watchpoint *watchpoint = target->watchpoints;
1169
1170 while (watchpoint)
1171 {
1172 if (watchpoint->set == 0)
1173 xscale_set_watchpoint(target, watchpoint);
1174 watchpoint = watchpoint->next;
1175 }
1176 }
1177
1178 static void xscale_enable_breakpoints(struct target *target)
1179 {
1180 struct breakpoint *breakpoint = target->breakpoints;
1181
1182 /* set any pending breakpoints */
1183 while (breakpoint)
1184 {
1185 if (breakpoint->set == 0)
1186 xscale_set_breakpoint(target, breakpoint);
1187 breakpoint = breakpoint->next;
1188 }
1189 }
1190
1191 static int xscale_resume(struct target *target, int current,
1192 uint32_t address, int handle_breakpoints, int debug_execution)
1193 {
1194 struct xscale_common *xscale = target_to_xscale(target);
1195 struct arm *armv4_5 = &xscale->armv4_5_common;
1196 struct breakpoint *breakpoint = target->breakpoints;
1197 uint32_t current_pc;
1198 int retval;
1199 int i;
1200
1201 LOG_DEBUG("-");
1202
1203 if (target->state != TARGET_HALTED)
1204 {
1205 LOG_WARNING("target not halted");
1206 return ERROR_TARGET_NOT_HALTED;
1207 }
1208
1209 if (!debug_execution)
1210 {
1211 target_free_all_working_areas(target);
1212 }
1213
1214 /* update vector tables */
1215 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1216 return retval;
1217
1218 /* current = 1: continue on current pc, otherwise continue at <address> */
1219 if (!current)
1220 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1221
1222 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1223
1224 /* if we're at the reset vector, we have to simulate the branch */
1225 if (current_pc == 0x0)
1226 {
1227 arm_simulate_step(target, NULL);
1228 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1229 }
1230
1231 /* the front-end may request us not to handle breakpoints */
1232 if (handle_breakpoints)
1233 {
1234 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1235 {
1236 uint32_t next_pc;
1237
1238 /* there's a breakpoint at the current PC, we have to step over it */
1239 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1240 xscale_unset_breakpoint(target, breakpoint);
1241
1242 /* calculate PC of next instruction */
1243 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1244 {
1245 uint32_t current_opcode;
1246 target_read_u32(target, current_pc, &current_opcode);
1247 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1248 }
1249
1250 LOG_DEBUG("enable single-step");
1251 xscale_enable_single_step(target, next_pc);
1252
1253 /* restore banked registers */
1254 xscale_restore_context(target);
1255
1256 /* send resume request (command 0x30 or 0x31)
1257 * clean the trace buffer if it is to be enabled (0x62) */
1258 if (xscale->trace.buffer_enabled)
1259 {
1260 xscale_send_u32(target, 0x62);
1261 xscale_send_u32(target, 0x31);
1262 }
1263 else
1264 xscale_send_u32(target, 0x30);
1265
1266 /* send CPSR */
1267 xscale_send_u32(target,
1268 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1269 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1270 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1271
1272 for (i = 7; i >= 0; i--)
1273 {
1274 /* send register */
1275 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1276 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1277 }
1278
1279 /* send PC */
1280 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1281 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1282
1283 /* wait for and process debug entry */
1284 xscale_debug_entry(target);
1285
1286 LOG_DEBUG("disable single-step");
1287 xscale_disable_single_step(target);
1288
1289 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1290 xscale_set_breakpoint(target, breakpoint);
1291 }
1292 }
1293
1294 /* enable any pending breakpoints and watchpoints */
1295 xscale_enable_breakpoints(target);
1296 xscale_enable_watchpoints(target);
1297
1298 /* restore banked registers */
1299 xscale_restore_context(target);
1300
1301 /* send resume request (command 0x30 or 0x31)
1302 * clean the trace buffer if it is to be enabled (0x62) */
1303 if (xscale->trace.buffer_enabled)
1304 {
1305 xscale_send_u32(target, 0x62);
1306 xscale_send_u32(target, 0x31);
1307 }
1308 else
1309 xscale_send_u32(target, 0x30);
1310
1311 /* send CPSR */
1312 xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
1313 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1314 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1315
1316 for (i = 7; i >= 0; i--)
1317 {
1318 /* send register */
1319 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1320 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1321 }
1322
1323 /* send PC */
1324 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1325 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1326
1327 target->debug_reason = DBG_REASON_NOTHALTED;
1328
1329 if (!debug_execution)
1330 {
1331 /* registers are now invalid */
1332 register_cache_invalidate(armv4_5->core_cache);
1333 target->state = TARGET_RUNNING;
1334 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1335 }
1336 else
1337 {
1338 target->state = TARGET_DEBUG_RUNNING;
1339 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1340 }
1341
1342 LOG_DEBUG("target resumed");
1343
1344 return ERROR_OK;
1345 }
1346
1347 static int xscale_step_inner(struct target *target, int current,
1348 uint32_t address, int handle_breakpoints)
1349 {
1350 struct xscale_common *xscale = target_to_xscale(target);
1351 struct arm *armv4_5 = &xscale->armv4_5_common;
1352 uint32_t next_pc;
1353 int retval;
1354 int i;
1355
1356 target->debug_reason = DBG_REASON_SINGLESTEP;
1357
1358 /* calculate PC of next instruction */
1359 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1360 {
1361 uint32_t current_opcode, current_pc;
1362 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1363
1364 target_read_u32(target, current_pc, &current_opcode);
1365 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1366 return retval;
1367 }
1368
1369 LOG_DEBUG("enable single-step");
1370 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1371 return retval;
1372
1373 /* restore banked registers */
1374 if ((retval = xscale_restore_context(target)) != ERROR_OK)
1375 return retval;
1376
1377 /* send resume request (command 0x30 or 0x31)
1378 * clean the trace buffer if it is to be enabled (0x62) */
1379 if (xscale->trace.buffer_enabled)
1380 {
1381 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1382 return retval;
1383 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1384 return retval;
1385 }
1386 else
1387 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1388 return retval;
1389
1390 /* send CPSR */
1391 retval = xscale_send_u32(target,
1392 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1393 if (retval != ERROR_OK)
1394 return retval;
1395 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1396 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1397
1398 for (i = 7; i >= 0; i--)
1399 {
1400 /* send register */
1401 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1402 return retval;
1403 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1404 }
1405
1406 /* send PC */
1407 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))) != ERROR_OK)
1408 return retval;
1409 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1410
1411 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1412
1413 /* registers are now invalid */
1414 register_cache_invalidate(armv4_5->core_cache);
1415
1416 /* wait for and process debug entry */
1417 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1418 return retval;
1419
1420 LOG_DEBUG("disable single-step");
1421 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1422 return retval;
1423
1424 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1425
1426 return ERROR_OK;
1427 }
1428
1429 static int xscale_step(struct target *target, int current,
1430 uint32_t address, int handle_breakpoints)
1431 {
1432 struct arm *armv4_5 = target_to_armv4_5(target);
1433 struct breakpoint *breakpoint = target->breakpoints;
1434
1435 uint32_t current_pc;
1436 int retval;
1437
1438 if (target->state != TARGET_HALTED)
1439 {
1440 LOG_WARNING("target not halted");
1441 return ERROR_TARGET_NOT_HALTED;
1442 }
1443
1444 /* current = 1: continue on current pc, otherwise continue at <address> */
1445 if (!current)
1446 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1447
1448 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1449
1450 /* if we're at the reset vector, we have to simulate the step */
1451 if (current_pc == 0x0)
1452 {
1453 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1454 return retval;
1455 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1456
1457 target->debug_reason = DBG_REASON_SINGLESTEP;
1458 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1459
1460 return ERROR_OK;
1461 }
1462
1463 /* the front-end may request us not to handle breakpoints */
1464 if (handle_breakpoints)
1465 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1466 {
1467 if ((retval = xscale_unset_breakpoint(target, breakpoint)) != ERROR_OK)
1468 return retval;
1469 }
1470
1471 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1472
1473 if (breakpoint)
1474 {
1475 xscale_set_breakpoint(target, breakpoint);
1476 }
1477
1478 LOG_DEBUG("target stepped");
1479
1480 return ERROR_OK;
1481
1482 }
1483
1484 static int xscale_assert_reset(struct target *target)
1485 {
1486 struct xscale_common *xscale = target_to_xscale(target);
1487
1488 LOG_DEBUG("target->state: %s",
1489 target_state_name(target));
1490
1491 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1492 * end up in T-L-R, which would reset JTAG
1493 */
1494 jtag_set_end_state(TAP_IDLE);
1495 xscale_jtag_set_instr(target->tap,
1496 XSCALE_SELDCSR << xscale->xscale_variant);
1497
1498 /* set Hold reset, Halt mode and Trap Reset */
1499 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1500 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1501 xscale_write_dcsr(target, 1, 0);
1502
1503 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1504 xscale_jtag_set_instr(target->tap, 0x7f);
1505 jtag_execute_queue();
1506
1507 /* assert reset */
1508 jtag_add_reset(0, 1);
1509
1510 /* sleep 1ms, to be sure we fulfill any requirements */
1511 jtag_add_sleep(1000);
1512 jtag_execute_queue();
1513
1514 target->state = TARGET_RESET;
1515
1516 if (target->reset_halt)
1517 {
1518 int retval;
1519 if ((retval = target_halt(target)) != ERROR_OK)
1520 return retval;
1521 }
1522
1523 return ERROR_OK;
1524 }
1525
1526 static int xscale_deassert_reset(struct target *target)
1527 {
1528 struct xscale_common *xscale = target_to_xscale(target);
1529 struct breakpoint *breakpoint = target->breakpoints;
1530
1531 LOG_DEBUG("-");
1532
1533 xscale->ibcr_available = 2;
1534 xscale->ibcr0_used = 0;
1535 xscale->ibcr1_used = 0;
1536
1537 xscale->dbr_available = 2;
1538 xscale->dbr0_used = 0;
1539 xscale->dbr1_used = 0;
1540
1541 /* mark all hardware breakpoints as unset */
1542 while (breakpoint)
1543 {
1544 if (breakpoint->type == BKPT_HARD)
1545 {
1546 breakpoint->set = 0;
1547 }
1548 breakpoint = breakpoint->next;
1549 }
1550
1551 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1552
1553 /* FIXME mark hardware watchpoints got unset too. Also,
1554 * at least some of the XScale registers are invalid...
1555 */
1556
1557 /*
1558 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1559 * contents got invalidated. Safer to force that, so writing new
1560 * contents can't ever fail..
1561 */
1562 {
1563 uint32_t address;
1564 unsigned buf_cnt;
1565 const uint8_t *buffer = xscale_debug_handler;
1566 int retval;
1567
1568 /* release SRST */
1569 jtag_add_reset(0, 0);
1570
1571 /* wait 300ms; 150 and 100ms were not enough */
1572 jtag_add_sleep(300*1000);
1573
1574 jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
1575 jtag_execute_queue();
1576
1577 /* set Hold reset, Halt mode and Trap Reset */
1578 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1579 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1580 xscale_write_dcsr(target, 1, 0);
1581
1582 /* Load the debug handler into the mini-icache. Since
1583 * it's using halt mode (not monitor mode), it runs in
1584 * "Special Debug State" for access to registers, memory,
1585 * coprocessors, trace data, etc.
1586 */
1587 address = xscale->handler_address;
1588 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1589 binary_size > 0;
1590 binary_size -= buf_cnt, buffer += buf_cnt)
1591 {
1592 uint32_t cache_line[8];
1593 unsigned i;
1594
1595 buf_cnt = binary_size;
1596 if (buf_cnt > 32)
1597 buf_cnt = 32;
1598
1599 for (i = 0; i < buf_cnt; i += 4)
1600 {
1601 /* convert LE buffer to host-endian uint32_t */
1602 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1603 }
1604
1605 for (; i < 32; i += 4)
1606 {
1607 cache_line[i / 4] = 0xe1a08008;
1608 }
1609
1610 /* only load addresses other than the reset vectors */
1611 if ((address % 0x400) != 0x0)
1612 {
1613 retval = xscale_load_ic(target, address,
1614 cache_line);
1615 if (retval != ERROR_OK)
1616 return retval;
1617 }
1618
1619 address += buf_cnt;
1620 };
1621
1622 retval = xscale_load_ic(target, 0x0,
1623 xscale->low_vectors);
1624 if (retval != ERROR_OK)
1625 return retval;
1626 retval = xscale_load_ic(target, 0xffff0000,
1627 xscale->high_vectors);
1628 if (retval != ERROR_OK)
1629 return retval;
1630
1631 jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
1632
1633 jtag_add_sleep(100000);
1634
1635 /* set Hold reset, Halt mode and Trap Reset */
1636 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1637 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1638 xscale_write_dcsr(target, 1, 0);
1639
1640 /* clear Hold reset to let the target run (should enter debug handler) */
1641 xscale_write_dcsr(target, 0, 1);
1642 target->state = TARGET_RUNNING;
1643
1644 if (!target->reset_halt)
1645 {
1646 jtag_add_sleep(10000);
1647
1648 /* we should have entered debug now */
1649 xscale_debug_entry(target);
1650 target->state = TARGET_HALTED;
1651
1652 /* resume the target */
1653 xscale_resume(target, 1, 0x0, 1, 0);
1654 }
1655 }
1656
1657 return ERROR_OK;
1658 }
1659
1660 static int xscale_read_core_reg(struct target *target, struct reg *r,
1661 int num, enum armv4_5_mode mode)
1662 {
1663 /** \todo add debug handler support for core register reads */
1664 LOG_ERROR("not implemented");
1665 return ERROR_OK;
1666 }
1667
1668 static int xscale_write_core_reg(struct target *target, struct reg *r,
1669 int num, enum armv4_5_mode mode, uint32_t value)
1670 {
1671 /** \todo add debug handler support for core register writes */
1672 LOG_ERROR("not implemented");
1673 return ERROR_OK;
1674 }
1675
1676 static int xscale_full_context(struct target *target)
1677 {
1678 struct arm *armv4_5 = target_to_armv4_5(target);
1679
1680 uint32_t *buffer;
1681
1682 int i, j;
1683
1684 LOG_DEBUG("-");
1685
1686 if (target->state != TARGET_HALTED)
1687 {
1688 LOG_WARNING("target not halted");
1689 return ERROR_TARGET_NOT_HALTED;
1690 }
1691
1692 buffer = malloc(4 * 8);
1693
1694 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1695 * we can't enter User mode on an XScale (unpredictable),
1696 * but User shares registers with SYS
1697 */
1698 for (i = 1; i < 7; i++)
1699 {
1700 enum armv4_5_mode mode = armv4_5_number_to_mode(i);
1701 bool valid = true;
1702 struct reg *r;
1703
1704 if (mode == ARMV4_5_MODE_USR)
1705 continue;
1706
1707 /* check if there are invalid registers in the current mode
1708 */
1709 for (j = 0; valid && j <= 16; j++)
1710 {
1711 if (!ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1712 mode, j).valid)
1713 valid = false;
1714 }
1715 if (valid)
1716 continue;
1717
1718 /* request banked registers */
1719 xscale_send_u32(target, 0x0);
1720
1721 /* send CPSR for desired bank mode */
1722 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1723
1724 /* get banked registers: r8 to r14; and SPSR
1725 * except in USR/SYS mode
1726 */
1727 if (mode != ARMV4_5_MODE_SYS) {
1728 /* SPSR */
1729 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1730 mode, 16);
1731
1732 xscale_receive(target, buffer, 8);
1733
1734 buf_set_u32(r->value, 0, 32, buffer[7]);
1735 r->dirty = false;
1736 r->valid = true;
1737 } else {
1738 xscale_receive(target, buffer, 7);
1739 }
1740
1741 /* move data from buffer to register cache */
1742 for (j = 8; j <= 14; j++)
1743 {
1744 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1745 mode, j);
1746
1747 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1748 r->dirty = false;
1749 r->valid = true;
1750 }
1751 }
1752
1753 free(buffer);
1754
1755 return ERROR_OK;
1756 }
1757
1758 static int xscale_restore_context(struct target *target)
1759 {
1760 struct arm *armv4_5 = target_to_armv4_5(target);
1761
1762 int i, j;
1763
1764 if (target->state != TARGET_HALTED)
1765 {
1766 LOG_WARNING("target not halted");
1767 return ERROR_TARGET_NOT_HALTED;
1768 }
1769
1770 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1771 * and check if any banked registers need to be written. Ignore
1772 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1773 * an XScale (unpredictable), but they share all registers.
1774 */
1775 for (i = 1; i < 7; i++)
1776 {
1777 int dirty = 0;
1778 enum armv4_5_mode mode = armv4_5_number_to_mode(i);
1779
1780 if (mode == ARMV4_5_MODE_USR)
1781 continue;
1782
1783 /* check if there are dirty registers in this mode */
1784 for (j = 8; j <= 14; j++)
1785 {
1786 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1787 mode, j).dirty)
1788 dirty = 1;
1789 }
1790
1791 /* if not USR/SYS, check if the SPSR needs to be written */
1792 if (mode != ARMV4_5_MODE_SYS)
1793 {
1794 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1795 mode, 16).dirty)
1796 dirty = 1;
1797 }
1798
1799 /* is there anything to flush for this mode? */
1800 if (dirty)
1801 {
1802 uint32_t tmp_cpsr;
1803 struct reg *r;
1804
1805 /* command 0x1: "send banked registers" */
1806 xscale_send_u32(target, 0x1);
1807
1808 tmp_cpsr = 0x0;
1809 tmp_cpsr |= mode;
1810 tmp_cpsr |= 0xc0; /* I/F bits */
1811
1812 /* send CPSR for desired mode */
1813 xscale_send_u32(target, tmp_cpsr);
1814
1815 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1816 for (j = 8; j <= 14; j++)
1817 {
1818 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1819 mode, j);
1820 xscale_send_u32(target,
1821 buf_get_u32(r->value, 0, 32));
1822 r->dirty = false;
1823 }
1824
1825 if (mode != ARMV4_5_MODE_SYS)
1826 {
1827 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1828 mode, 16);
1829 xscale_send_u32(target,
1830 buf_get_u32(r->value, 0, 32));
1831 r->dirty = false;
1832 }
1833 }
1834 }
1835
1836 return ERROR_OK;
1837 }
1838
1839 static int xscale_read_memory(struct target *target, uint32_t address,
1840 uint32_t size, uint32_t count, uint8_t *buffer)
1841 {
1842 struct xscale_common *xscale = target_to_xscale(target);
1843 uint32_t *buf32;
1844 uint32_t i;
1845 int retval;
1846
1847 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1848
1849 if (target->state != TARGET_HALTED)
1850 {
1851 LOG_WARNING("target not halted");
1852 return ERROR_TARGET_NOT_HALTED;
1853 }
1854
1855 /* sanitize arguments */
1856 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1857 return ERROR_INVALID_ARGUMENTS;
1858
1859 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1860 return ERROR_TARGET_UNALIGNED_ACCESS;
1861
1862 /* send memory read request (command 0x1n, n: access size) */
1863 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1864 return retval;
1865
1866 /* send base address for read request */
1867 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1868 return retval;
1869
1870 /* send number of requested data words */
1871 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1872 return retval;
1873
1874 /* receive data from target (count times 32-bit words in host endianness) */
1875 buf32 = malloc(4 * count);
1876 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1877 return retval;
1878
1879 /* extract data from host-endian buffer into byte stream */
1880 for (i = 0; i < count; i++)
1881 {
1882 switch (size)
1883 {
1884 case 4:
1885 target_buffer_set_u32(target, buffer, buf32[i]);
1886 buffer += 4;
1887 break;
1888 case 2:
1889 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1890 buffer += 2;
1891 break;
1892 case 1:
1893 *buffer++ = buf32[i] & 0xff;
1894 break;
1895 default:
1896 LOG_ERROR("invalid read size");
1897 return ERROR_INVALID_ARGUMENTS;
1898 }
1899 }
1900
1901 free(buf32);
1902
1903 /* examine DCSR, to see if Sticky Abort (SA) got set */
1904 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1905 return retval;
1906 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1907 {
1908 /* clear SA bit */
1909 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1910 return retval;
1911
1912 return ERROR_TARGET_DATA_ABORT;
1913 }
1914
1915 return ERROR_OK;
1916 }
1917
1918 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1919 uint32_t size, uint32_t count, uint8_t *buffer)
1920 {
1921 /** \todo: provide a non-stub implementtion of this routine. */
1922 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1923 target_name(target), __func__);
1924 return ERROR_FAIL;
1925 }
1926
1927 static int xscale_write_memory(struct target *target, uint32_t address,
1928 uint32_t size, uint32_t count, uint8_t *buffer)
1929 {
1930 struct xscale_common *xscale = target_to_xscale(target);
1931 int retval;
1932
1933 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1934
1935 if (target->state != TARGET_HALTED)
1936 {
1937 LOG_WARNING("target not halted");
1938 return ERROR_TARGET_NOT_HALTED;
1939 }
1940
1941 /* sanitize arguments */
1942 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1943 return ERROR_INVALID_ARGUMENTS;
1944
1945 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1946 return ERROR_TARGET_UNALIGNED_ACCESS;
1947
1948 /* send memory write request (command 0x2n, n: access size) */
1949 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1950 return retval;
1951
1952 /* send base address for read request */
1953 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1954 return retval;
1955
1956 /* send number of requested data words to be written*/
1957 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1958 return retval;
1959
1960 /* extract data from host-endian buffer into byte stream */
1961 #if 0
1962 for (i = 0; i < count; i++)
1963 {
1964 switch (size)
1965 {
1966 case 4:
1967 value = target_buffer_get_u32(target, buffer);
1968 xscale_send_u32(target, value);
1969 buffer += 4;
1970 break;
1971 case 2:
1972 value = target_buffer_get_u16(target, buffer);
1973 xscale_send_u32(target, value);
1974 buffer += 2;
1975 break;
1976 case 1:
1977 value = *buffer;
1978 xscale_send_u32(target, value);
1979 buffer += 1;
1980 break;
1981 default:
1982 LOG_ERROR("should never get here");
1983 exit(-1);
1984 }
1985 }
1986 #endif
1987 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1988 return retval;
1989
1990 /* examine DCSR, to see if Sticky Abort (SA) got set */
1991 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1992 return retval;
1993 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1994 {
1995 /* clear SA bit */
1996 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1997 return retval;
1998
1999 return ERROR_TARGET_DATA_ABORT;
2000 }
2001
2002 return ERROR_OK;
2003 }
2004
2005 static int xscale_write_phys_memory(struct target *target, uint32_t address,
2006 uint32_t size, uint32_t count, uint8_t *buffer)
2007 {
2008 /** \todo: provide a non-stub implementtion of this routine. */
2009 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
2010 target_name(target), __func__);
2011 return ERROR_FAIL;
2012 }
2013
2014 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
2015 uint32_t count, uint8_t *buffer)
2016 {
2017 return xscale_write_memory(target, address, 4, count, buffer);
2018 }
2019
2020 static uint32_t xscale_get_ttb(struct target *target)
2021 {
2022 struct xscale_common *xscale = target_to_xscale(target);
2023 uint32_t ttb;
2024
2025 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2026 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2027
2028 return ttb;
2029 }
2030
2031 static void xscale_disable_mmu_caches(struct target *target, int mmu,
2032 int d_u_cache, int i_cache)
2033 {
2034 struct xscale_common *xscale = target_to_xscale(target);
2035 uint32_t cp15_control;
2036
2037 /* read cp15 control register */
2038 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2039 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2040
2041 if (mmu)
2042 cp15_control &= ~0x1U;
2043
2044 if (d_u_cache)
2045 {
2046 /* clean DCache */
2047 xscale_send_u32(target, 0x50);
2048 xscale_send_u32(target, xscale->cache_clean_address);
2049
2050 /* invalidate DCache */
2051 xscale_send_u32(target, 0x51);
2052
2053 cp15_control &= ~0x4U;
2054 }
2055
2056 if (i_cache)
2057 {
2058 /* invalidate ICache */
2059 xscale_send_u32(target, 0x52);
2060 cp15_control &= ~0x1000U;
2061 }
2062
2063 /* write new cp15 control register */
2064 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2065
2066 /* execute cpwait to ensure outstanding operations complete */
2067 xscale_send_u32(target, 0x53);
2068 }
2069
2070 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2071 int d_u_cache, int i_cache)
2072 {
2073 struct xscale_common *xscale = target_to_xscale(target);
2074 uint32_t cp15_control;
2075
2076 /* read cp15 control register */
2077 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2078 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2079
2080 if (mmu)
2081 cp15_control |= 0x1U;
2082
2083 if (d_u_cache)
2084 cp15_control |= 0x4U;
2085
2086 if (i_cache)
2087 cp15_control |= 0x1000U;
2088
2089 /* write new cp15 control register */
2090 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2091
2092 /* execute cpwait to ensure outstanding operations complete */
2093 xscale_send_u32(target, 0x53);
2094 }
2095
2096 static int xscale_set_breakpoint(struct target *target,
2097 struct breakpoint *breakpoint)
2098 {
2099 int retval;
2100 struct xscale_common *xscale = target_to_xscale(target);
2101
2102 if (target->state != TARGET_HALTED)
2103 {
2104 LOG_WARNING("target not halted");
2105 return ERROR_TARGET_NOT_HALTED;
2106 }
2107
2108 if (breakpoint->set)
2109 {
2110 LOG_WARNING("breakpoint already set");
2111 return ERROR_OK;
2112 }
2113
2114 if (breakpoint->type == BKPT_HARD)
2115 {
2116 uint32_t value = breakpoint->address | 1;
2117 if (!xscale->ibcr0_used)
2118 {
2119 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2120 xscale->ibcr0_used = 1;
2121 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2122 }
2123 else if (!xscale->ibcr1_used)
2124 {
2125 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2126 xscale->ibcr1_used = 1;
2127 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2128 }
2129 else
2130 {
2131 LOG_ERROR("BUG: no hardware comparator available");
2132 return ERROR_OK;
2133 }
2134 }
2135 else if (breakpoint->type == BKPT_SOFT)
2136 {
2137 if (breakpoint->length == 4)
2138 {
2139 /* keep the original instruction in target endianness */
2140 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2141 {
2142 return retval;
2143 }
2144 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2145 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2146 {
2147 return retval;
2148 }
2149 }
2150 else
2151 {
2152 /* keep the original instruction in target endianness */
2153 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2154 {
2155 return retval;
2156 }
2157 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2158 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2159 {
2160 return retval;
2161 }
2162 }
2163 breakpoint->set = 1;
2164 }
2165
2166 return ERROR_OK;
2167 }
2168
2169 static int xscale_add_breakpoint(struct target *target,
2170 struct breakpoint *breakpoint)
2171 {
2172 struct xscale_common *xscale = target_to_xscale(target);
2173
2174 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2175 {
2176 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2177 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2178 }
2179
2180 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2181 {
2182 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2183 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2184 }
2185
2186 if (breakpoint->type == BKPT_HARD)
2187 {
2188 xscale->ibcr_available--;
2189 }
2190
2191 return ERROR_OK;
2192 }
2193
2194 static int xscale_unset_breakpoint(struct target *target,
2195 struct breakpoint *breakpoint)
2196 {
2197 int retval;
2198 struct xscale_common *xscale = target_to_xscale(target);
2199
2200 if (target->state != TARGET_HALTED)
2201 {
2202 LOG_WARNING("target not halted");
2203 return ERROR_TARGET_NOT_HALTED;
2204 }
2205
2206 if (!breakpoint->set)
2207 {
2208 LOG_WARNING("breakpoint not set");
2209 return ERROR_OK;
2210 }
2211
2212 if (breakpoint->type == BKPT_HARD)
2213 {
2214 if (breakpoint->set == 1)
2215 {
2216 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2217 xscale->ibcr0_used = 0;
2218 }
2219 else if (breakpoint->set == 2)
2220 {
2221 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2222 xscale->ibcr1_used = 0;
2223 }
2224 breakpoint->set = 0;
2225 }
2226 else
2227 {
2228 /* restore original instruction (kept in target endianness) */
2229 if (breakpoint->length == 4)
2230 {
2231 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2232 {
2233 return retval;
2234 }
2235 }
2236 else
2237 {
2238 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2239 {
2240 return retval;
2241 }
2242 }
2243 breakpoint->set = 0;
2244 }
2245
2246 return ERROR_OK;
2247 }
2248
2249 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2250 {
2251 struct xscale_common *xscale = target_to_xscale(target);
2252
2253 if (target->state != TARGET_HALTED)
2254 {
2255 LOG_WARNING("target not halted");
2256 return ERROR_TARGET_NOT_HALTED;
2257 }
2258
2259 if (breakpoint->set)
2260 {
2261 xscale_unset_breakpoint(target, breakpoint);
2262 }
2263
2264 if (breakpoint->type == BKPT_HARD)
2265 xscale->ibcr_available++;
2266
2267 return ERROR_OK;
2268 }
2269
2270 static int xscale_set_watchpoint(struct target *target,
2271 struct watchpoint *watchpoint)
2272 {
2273 struct xscale_common *xscale = target_to_xscale(target);
2274 uint8_t enable = 0;
2275 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2276 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2277
2278 if (target->state != TARGET_HALTED)
2279 {
2280 LOG_WARNING("target not halted");
2281 return ERROR_TARGET_NOT_HALTED;
2282 }
2283
2284 xscale_get_reg(dbcon);
2285
2286 switch (watchpoint->rw)
2287 {
2288 case WPT_READ:
2289 enable = 0x3;
2290 break;
2291 case WPT_ACCESS:
2292 enable = 0x2;
2293 break;
2294 case WPT_WRITE:
2295 enable = 0x1;
2296 break;
2297 default:
2298 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2299 }
2300
2301 if (!xscale->dbr0_used)
2302 {
2303 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2304 dbcon_value |= enable;
2305 xscale_set_reg_u32(dbcon, dbcon_value);
2306 watchpoint->set = 1;
2307 xscale->dbr0_used = 1;
2308 }
2309 else if (!xscale->dbr1_used)
2310 {
2311 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2312 dbcon_value |= enable << 2;
2313 xscale_set_reg_u32(dbcon, dbcon_value);
2314 watchpoint->set = 2;
2315 xscale->dbr1_used = 1;
2316 }
2317 else
2318 {
2319 LOG_ERROR("BUG: no hardware comparator available");
2320 return ERROR_OK;
2321 }
2322
2323 return ERROR_OK;
2324 }
2325
2326 static int xscale_add_watchpoint(struct target *target,
2327 struct watchpoint *watchpoint)
2328 {
2329 struct xscale_common *xscale = target_to_xscale(target);
2330
2331 if (xscale->dbr_available < 1)
2332 {
2333 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2334 }
2335
2336 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2337 {
2338 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2339 }
2340
2341 xscale->dbr_available--;
2342
2343 return ERROR_OK;
2344 }
2345
2346 static int xscale_unset_watchpoint(struct target *target,
2347 struct watchpoint *watchpoint)
2348 {
2349 struct xscale_common *xscale = target_to_xscale(target);
2350 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2351 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2352
2353 if (target->state != TARGET_HALTED)
2354 {
2355 LOG_WARNING("target not halted");
2356 return ERROR_TARGET_NOT_HALTED;
2357 }
2358
2359 if (!watchpoint->set)
2360 {
2361 LOG_WARNING("breakpoint not set");
2362 return ERROR_OK;
2363 }
2364
2365 if (watchpoint->set == 1)
2366 {
2367 dbcon_value &= ~0x3;
2368 xscale_set_reg_u32(dbcon, dbcon_value);
2369 xscale->dbr0_used = 0;
2370 }
2371 else if (watchpoint->set == 2)
2372 {
2373 dbcon_value &= ~0xc;
2374 xscale_set_reg_u32(dbcon, dbcon_value);
2375 xscale->dbr1_used = 0;
2376 }
2377 watchpoint->set = 0;
2378
2379 return ERROR_OK;
2380 }
2381
2382 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2383 {
2384 struct xscale_common *xscale = target_to_xscale(target);
2385
2386 if (target->state != TARGET_HALTED)
2387 {
2388 LOG_WARNING("target not halted");
2389 return ERROR_TARGET_NOT_HALTED;
2390 }
2391
2392 if (watchpoint->set)
2393 {
2394 xscale_unset_watchpoint(target, watchpoint);
2395 }
2396
2397 xscale->dbr_available++;
2398
2399 return ERROR_OK;
2400 }
2401
2402 static int xscale_get_reg(struct reg *reg)
2403 {
2404 struct xscale_reg *arch_info = reg->arch_info;
2405 struct target *target = arch_info->target;
2406 struct xscale_common *xscale = target_to_xscale(target);
2407
2408 /* DCSR, TX and RX are accessible via JTAG */
2409 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2410 {
2411 return xscale_read_dcsr(arch_info->target);
2412 }
2413 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2414 {
2415 /* 1 = consume register content */
2416 return xscale_read_tx(arch_info->target, 1);
2417 }
2418 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2419 {
2420 /* can't read from RX register (host -> debug handler) */
2421 return ERROR_OK;
2422 }
2423 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2424 {
2425 /* can't (explicitly) read from TXRXCTRL register */
2426 return ERROR_OK;
2427 }
2428 else /* Other DBG registers have to be transfered by the debug handler */
2429 {
2430 /* send CP read request (command 0x40) */
2431 xscale_send_u32(target, 0x40);
2432
2433 /* send CP register number */
2434 xscale_send_u32(target, arch_info->dbg_handler_number);
2435
2436 /* read register value */
2437 xscale_read_tx(target, 1);
2438 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2439
2440 reg->dirty = 0;
2441 reg->valid = 1;
2442 }
2443
2444 return ERROR_OK;
2445 }
2446
2447 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2448 {
2449 struct xscale_reg *arch_info = reg->arch_info;
2450 struct target *target = arch_info->target;
2451 struct xscale_common *xscale = target_to_xscale(target);
2452 uint32_t value = buf_get_u32(buf, 0, 32);
2453
2454 /* DCSR, TX and RX are accessible via JTAG */
2455 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2456 {
2457 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2458 return xscale_write_dcsr(arch_info->target, -1, -1);
2459 }
2460 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2461 {
2462 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2463 return xscale_write_rx(arch_info->target);
2464 }
2465 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2466 {
2467 /* can't write to TX register (debug-handler -> host) */
2468 return ERROR_OK;
2469 }
2470 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2471 {
2472 /* can't (explicitly) write to TXRXCTRL register */
2473 return ERROR_OK;
2474 }
2475 else /* Other DBG registers have to be transfered by the debug handler */
2476 {
2477 /* send CP write request (command 0x41) */
2478 xscale_send_u32(target, 0x41);
2479
2480 /* send CP register number */
2481 xscale_send_u32(target, arch_info->dbg_handler_number);
2482
2483 /* send CP register value */
2484 xscale_send_u32(target, value);
2485 buf_set_u32(reg->value, 0, 32, value);
2486 }
2487
2488 return ERROR_OK;
2489 }
2490
2491 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2492 {
2493 struct xscale_common *xscale = target_to_xscale(target);
2494 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2495 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2496
2497 /* send CP write request (command 0x41) */
2498 xscale_send_u32(target, 0x41);
2499
2500 /* send CP register number */
2501 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2502
2503 /* send CP register value */
2504 xscale_send_u32(target, value);
2505 buf_set_u32(dcsr->value, 0, 32, value);
2506
2507 return ERROR_OK;
2508 }
2509
2510 static int xscale_read_trace(struct target *target)
2511 {
2512 struct xscale_common *xscale = target_to_xscale(target);
2513 struct arm *armv4_5 = &xscale->armv4_5_common;
2514 struct xscale_trace_data **trace_data_p;
2515
2516 /* 258 words from debug handler
2517 * 256 trace buffer entries
2518 * 2 checkpoint addresses
2519 */
2520 uint32_t trace_buffer[258];
2521 int is_address[256];
2522 int i, j;
2523
2524 if (target->state != TARGET_HALTED)
2525 {
2526 LOG_WARNING("target must be stopped to read trace data");
2527 return ERROR_TARGET_NOT_HALTED;
2528 }
2529
2530 /* send read trace buffer command (command 0x61) */
2531 xscale_send_u32(target, 0x61);
2532
2533 /* receive trace buffer content */
2534 xscale_receive(target, trace_buffer, 258);
2535
2536 /* parse buffer backwards to identify address entries */
2537 for (i = 255; i >= 0; i--)
2538 {
2539 is_address[i] = 0;
2540 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2541 ((trace_buffer[i] & 0xf0) == 0xd0))
2542 {
2543 if (i >= 3)
2544 is_address[--i] = 1;
2545 if (i >= 2)
2546 is_address[--i] = 1;
2547 if (i >= 1)
2548 is_address[--i] = 1;
2549 if (i >= 0)
2550 is_address[--i] = 1;
2551 }
2552 }
2553
2554
2555 /* search first non-zero entry */
2556 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2557 ;
2558
2559 if (j == 256)
2560 {
2561 LOG_DEBUG("no trace data collected");
2562 return ERROR_XSCALE_NO_TRACE_DATA;
2563 }
2564
2565 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2566 ;
2567
2568 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2569 (*trace_data_p)->next = NULL;
2570 (*trace_data_p)->chkpt0 = trace_buffer[256];
2571 (*trace_data_p)->chkpt1 = trace_buffer[257];
2572 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2573 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2574 (*trace_data_p)->depth = 256 - j;
2575
2576 for (i = j; i < 256; i++)
2577 {
2578 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2579 if (is_address[i])
2580 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2581 else
2582 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2583 }
2584
2585 return ERROR_OK;
2586 }
2587
2588 static int xscale_read_instruction(struct target *target,
2589 struct arm_instruction *instruction)
2590 {
2591 struct xscale_common *xscale = target_to_xscale(target);
2592 int i;
2593 int section = -1;
2594 size_t size_read;
2595 uint32_t opcode;
2596 int retval;
2597
2598 if (!xscale->trace.image)
2599 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2600
2601 /* search for the section the current instruction belongs to */
2602 for (i = 0; i < xscale->trace.image->num_sections; i++)
2603 {
2604 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2605 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2606 {
2607 section = i;
2608 break;
2609 }
2610 }
2611
2612 if (section == -1)
2613 {
2614 /* current instruction couldn't be found in the image */
2615 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2616 }
2617
2618 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2619 {
2620 uint8_t buf[4];
2621 if ((retval = image_read_section(xscale->trace.image, section,
2622 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2623 4, buf, &size_read)) != ERROR_OK)
2624 {
2625 LOG_ERROR("error while reading instruction: %i", retval);
2626 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2627 }
2628 opcode = target_buffer_get_u32(target, buf);
2629 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2630 }
2631 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2632 {
2633 uint8_t buf[2];
2634 if ((retval = image_read_section(xscale->trace.image, section,
2635 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2636 2, buf, &size_read)) != ERROR_OK)
2637 {
2638 LOG_ERROR("error while reading instruction: %i", retval);
2639 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2640 }
2641 opcode = target_buffer_get_u16(target, buf);
2642 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2643 }
2644 else
2645 {
2646 LOG_ERROR("BUG: unknown core state encountered");
2647 exit(-1);
2648 }
2649
2650 return ERROR_OK;
2651 }
2652
2653 static int xscale_branch_address(struct xscale_trace_data *trace_data,
2654 int i, uint32_t *target)
2655 {
2656 /* if there are less than four entries prior to the indirect branch message
2657 * we can't extract the address */
2658 if (i < 4)
2659 {
2660 return -1;
2661 }
2662
2663 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2664 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2665
2666 return 0;
2667 }
2668
2669 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2670 {
2671 struct xscale_common *xscale = target_to_xscale(target);
2672 int next_pc_ok = 0;
2673 uint32_t next_pc = 0x0;
2674 struct xscale_trace_data *trace_data = xscale->trace.data;
2675 int retval;
2676
2677 while (trace_data)
2678 {
2679 int i, chkpt;
2680 int rollover;
2681 int branch;
2682 int exception;
2683 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2684
2685 chkpt = 0;
2686 rollover = 0;
2687
2688 for (i = 0; i < trace_data->depth; i++)
2689 {
2690 next_pc_ok = 0;
2691 branch = 0;
2692 exception = 0;
2693
2694 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2695 continue;
2696
2697 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2698 {
2699 case 0: /* Exceptions */
2700 case 1:
2701 case 2:
2702 case 3:
2703 case 4:
2704 case 5:
2705 case 6:
2706 case 7:
2707 exception = (trace_data->entries[i].data & 0x70) >> 4;
2708 next_pc_ok = 1;
2709 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2710 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2711 break;
2712 case 8: /* Direct Branch */
2713 branch = 1;
2714 break;
2715 case 9: /* Indirect Branch */
2716 branch = 1;
2717 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2718 {
2719 next_pc_ok = 1;
2720 }
2721 break;
2722 case 13: /* Checkpointed Indirect Branch */
2723 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2724 {
2725 next_pc_ok = 1;
2726 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2727 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2728 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2729 }
2730 /* explicit fall-through */
2731 case 12: /* Checkpointed Direct Branch */
2732 branch = 1;
2733 if (chkpt == 0)
2734 {
2735 next_pc_ok = 1;
2736 next_pc = trace_data->chkpt0;
2737 chkpt++;
2738 }
2739 else if (chkpt == 1)
2740 {
2741 next_pc_ok = 1;
2742 next_pc = trace_data->chkpt0;
2743 chkpt++;
2744 }
2745 else
2746 {
2747 LOG_WARNING("more than two checkpointed branches encountered");
2748 }
2749 break;
2750 case 15: /* Roll-over */
2751 rollover++;
2752 continue;
2753 default: /* Reserved */
2754 command_print(cmd_ctx, "--- reserved trace message ---");
2755 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2756 return ERROR_OK;
2757 }
2758
2759 if (xscale->trace.pc_ok)
2760 {
2761 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2762 struct arm_instruction instruction;
2763
2764 if ((exception == 6) || (exception == 7))
2765 {
2766 /* IRQ or FIQ exception, no instruction executed */
2767 executed -= 1;
2768 }
2769
2770 while (executed-- >= 0)
2771 {
2772 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2773 {
2774 /* can't continue tracing with no image available */
2775 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2776 {
2777 return retval;
2778 }
2779 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2780 {
2781 /* TODO: handle incomplete images */
2782 }
2783 }
2784
2785 /* a precise abort on a load to the PC is included in the incremental
2786 * word count, other instructions causing data aborts are not included
2787 */
2788 if ((executed == 0) && (exception == 4)
2789 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2790 {
2791 if ((instruction.type == ARM_LDM)
2792 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2793 {
2794 executed--;
2795 }
2796 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2797 && (instruction.info.load_store.Rd != 15))
2798 {
2799 executed--;
2800 }
2801 }
2802
2803 /* only the last instruction executed
2804 * (the one that caused the control flow change)
2805 * could be a taken branch
2806 */
2807 if (((executed == -1) && (branch == 1)) &&
2808 (((instruction.type == ARM_B) ||
2809 (instruction.type == ARM_BL) ||
2810 (instruction.type == ARM_BLX)) &&
2811 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2812 {
2813 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2814 }
2815 else
2816 {
2817 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2818 }
2819 command_print(cmd_ctx, "%s", instruction.text);
2820 }
2821
2822 rollover = 0;
2823 }
2824
2825 if (next_pc_ok)
2826 {
2827 xscale->trace.current_pc = next_pc;
2828 xscale->trace.pc_ok = 1;
2829 }
2830 }
2831
2832 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2833 {
2834 struct arm_instruction instruction;
2835 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2836 {
2837 /* can't continue tracing with no image available */
2838 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2839 {
2840 return retval;
2841 }
2842 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2843 {
2844 /* TODO: handle incomplete images */
2845 }
2846 }
2847 command_print(cmd_ctx, "%s", instruction.text);
2848 }
2849
2850 trace_data = trace_data->next;
2851 }
2852
2853 return ERROR_OK;
2854 }
2855
2856 static const struct reg_arch_type xscale_reg_type = {
2857 .get = xscale_get_reg,
2858 .set = xscale_set_reg,
2859 };
2860
2861 static void xscale_build_reg_cache(struct target *target)
2862 {
2863 struct xscale_common *xscale = target_to_xscale(target);
2864 struct arm *armv4_5 = &xscale->armv4_5_common;
2865 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2866 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2867 int i;
2868 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2869
2870 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
2871
2872 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2873 cache_p = &(*cache_p)->next;
2874
2875 /* fill in values for the xscale reg cache */
2876 (*cache_p)->name = "XScale registers";
2877 (*cache_p)->next = NULL;
2878 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2879 (*cache_p)->num_regs = num_regs;
2880
2881 for (i = 0; i < num_regs; i++)
2882 {
2883 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2884 (*cache_p)->reg_list[i].value = calloc(4, 1);
2885 (*cache_p)->reg_list[i].dirty = 0;
2886 (*cache_p)->reg_list[i].valid = 0;
2887 (*cache_p)->reg_list[i].size = 32;
2888 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2889 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2890 arch_info[i] = xscale_reg_arch_info[i];
2891 arch_info[i].target = target;
2892 }
2893
2894 xscale->reg_cache = (*cache_p);
2895 }
2896
2897 static int xscale_init_target(struct command_context *cmd_ctx,
2898 struct target *target)
2899 {
2900 xscale_build_reg_cache(target);
2901 return ERROR_OK;
2902 }
2903
2904 static int xscale_init_arch_info(struct target *target,
2905 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2906 {
2907 struct arm *armv4_5;
2908 uint32_t high_reset_branch, low_reset_branch;
2909 int i;
2910
2911 armv4_5 = &xscale->armv4_5_common;
2912
2913 /* store architecture specfic data */
2914 xscale->common_magic = XSCALE_COMMON_MAGIC;
2915
2916 /* we don't really *need* a variant param ... */
2917 if (variant) {
2918 int ir_length = 0;
2919
2920 if (strcmp(variant, "pxa250") == 0
2921 || strcmp(variant, "pxa255") == 0
2922 || strcmp(variant, "pxa26x") == 0)
2923 ir_length = 5;
2924 else if (strcmp(variant, "pxa27x") == 0
2925 || strcmp(variant, "ixp42x") == 0
2926 || strcmp(variant, "ixp45x") == 0
2927 || strcmp(variant, "ixp46x") == 0)
2928 ir_length = 7;
2929 else if (strcmp(variant, "pxa3xx") == 0)
2930 ir_length = 11;
2931 else
2932 LOG_WARNING("%s: unrecognized variant %s",
2933 tap->dotted_name, variant);
2934
2935 if (ir_length && ir_length != tap->ir_length) {
2936 LOG_WARNING("%s: IR length for %s is %d; fixing",
2937 tap->dotted_name, variant, ir_length);
2938 tap->ir_length = ir_length;
2939 }
2940 }
2941
2942 /* PXA3xx shifts the JTAG instructions */
2943 if (tap->ir_length == 11)
2944 xscale->xscale_variant = XSCALE_PXA3XX;
2945 else
2946 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
2947
2948 /* the debug handler isn't installed (and thus not running) at this time */
2949 xscale->handler_address = 0xfe000800;
2950
2951 /* clear the vectors we keep locally for reference */
2952 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2953 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2954
2955 /* no user-specified vectors have been configured yet */
2956 xscale->static_low_vectors_set = 0x0;
2957 xscale->static_high_vectors_set = 0x0;
2958
2959 /* calculate branches to debug handler */
2960 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2961 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2962
2963 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2964 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2965
2966 for (i = 1; i <= 7; i++)
2967 {
2968 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2969 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2970 }
2971
2972 /* 64kB aligned region used for DCache cleaning */
2973 xscale->cache_clean_address = 0xfffe0000;
2974
2975 xscale->hold_rst = 0;
2976 xscale->external_debug_break = 0;
2977
2978 xscale->ibcr_available = 2;
2979 xscale->ibcr0_used = 0;
2980 xscale->ibcr1_used = 0;
2981
2982 xscale->dbr_available = 2;
2983 xscale->dbr0_used = 0;
2984 xscale->dbr1_used = 0;
2985
2986 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2987 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2988
2989 xscale->vector_catch = 0x1;
2990
2991 xscale->trace.capture_status = TRACE_IDLE;
2992 xscale->trace.data = NULL;
2993 xscale->trace.image = NULL;
2994 xscale->trace.buffer_enabled = 0;
2995 xscale->trace.buffer_fill = 0;
2996
2997 /* prepare ARMv4/5 specific information */
2998 armv4_5->arch_info = xscale;
2999 armv4_5->read_core_reg = xscale_read_core_reg;
3000 armv4_5->write_core_reg = xscale_write_core_reg;
3001 armv4_5->full_context = xscale_full_context;
3002
3003 armv4_5_init_arch_info(target, armv4_5);
3004
3005 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3006 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3007 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3008 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3009 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3010 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3011 xscale->armv4_5_mmu.has_tiny_pages = 1;
3012 xscale->armv4_5_mmu.mmu_enabled = 0;
3013
3014 return ERROR_OK;
3015 }
3016
3017 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3018 {
3019 struct xscale_common *xscale;
3020
3021 if (sizeof xscale_debug_handler - 1 > 0x800) {
3022 LOG_ERROR("debug_handler.bin: larger than 2kb");
3023 return ERROR_FAIL;
3024 }
3025
3026 xscale = calloc(1, sizeof(*xscale));
3027 if (!xscale)
3028 return ERROR_FAIL;
3029
3030 return xscale_init_arch_info(target, xscale, target->tap,
3031 target->variant);
3032 }
3033
3034 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3035 {
3036 struct target *target = NULL;
3037 struct xscale_common *xscale;
3038 int retval;
3039 uint32_t handler_address;
3040
3041 if (CMD_ARGC < 2)
3042 {
3043 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3044 return ERROR_OK;
3045 }
3046
3047 if ((target = get_target(CMD_ARGV[0])) == NULL)
3048 {
3049 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3050 return ERROR_FAIL;
3051 }
3052
3053 xscale = target_to_xscale(target);
3054 retval = xscale_verify_pointer(CMD_CTX, xscale);
3055 if (retval != ERROR_OK)
3056 return retval;
3057
3058 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3059
3060 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3061 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3062 {
3063 xscale->handler_address = handler_address;
3064 }
3065 else
3066 {
3067 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3068 return ERROR_FAIL;
3069 }
3070
3071 return ERROR_OK;
3072 }
3073
3074 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3075 {
3076 struct target *target = NULL;
3077 struct xscale_common *xscale;
3078 int retval;
3079 uint32_t cache_clean_address;
3080
3081 if (CMD_ARGC < 2)
3082 {
3083 return ERROR_COMMAND_SYNTAX_ERROR;
3084 }
3085
3086 target = get_target(CMD_ARGV[0]);
3087 if (target == NULL)
3088 {
3089 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3090 return ERROR_FAIL;
3091 }
3092 xscale = target_to_xscale(target);
3093 retval = xscale_verify_pointer(CMD_CTX, xscale);
3094 if (retval != ERROR_OK)
3095 return retval;
3096
3097 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3098
3099 if (cache_clean_address & 0xffff)
3100 {
3101 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3102 }
3103 else
3104 {
3105 xscale->cache_clean_address = cache_clean_address;
3106 }
3107
3108 return ERROR_OK;
3109 }
3110
3111 COMMAND_HANDLER(xscale_handle_cache_info_command)
3112 {
3113 struct target *target = get_current_target(CMD_CTX);
3114 struct xscale_common *xscale = target_to_xscale(target);
3115 int retval;
3116
3117 retval = xscale_verify_pointer(CMD_CTX, xscale);
3118 if (retval != ERROR_OK)
3119 return retval;
3120
3121 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3122 }
3123
3124 static int xscale_virt2phys(struct target *target,
3125 uint32_t virtual, uint32_t *physical)
3126 {
3127 struct xscale_common *xscale = target_to_xscale(target);
3128 int type;
3129 uint32_t cb;
3130 int domain;
3131 uint32_t ap;
3132
3133 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3134 LOG_ERROR(xscale_not);
3135 return ERROR_TARGET_INVALID;
3136 }
3137
3138 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3139 if (type == -1)
3140 {
3141 return ret;
3142 }
3143 *physical = ret;
3144 return ERROR_OK;
3145 }
3146
3147 static int xscale_mmu(struct target *target, int *enabled)
3148 {
3149 struct xscale_common *xscale = target_to_xscale(target);
3150
3151 if (target->state != TARGET_HALTED)
3152 {
3153 LOG_ERROR("Target not halted");
3154 return ERROR_TARGET_INVALID;
3155 }
3156 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3157 return ERROR_OK;
3158 }
3159
3160 COMMAND_HANDLER(xscale_handle_mmu_command)
3161 {
3162 struct target *target = get_current_target(CMD_CTX);
3163 struct xscale_common *xscale = target_to_xscale(target);
3164 int retval;
3165
3166 retval = xscale_verify_pointer(CMD_CTX, xscale);
3167 if (retval != ERROR_OK)
3168 return retval;
3169
3170 if (target->state != TARGET_HALTED)
3171 {
3172 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3173 return ERROR_OK;
3174 }
3175
3176 if (CMD_ARGC >= 1)
3177 {
3178 bool enable;
3179 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3180 if (enable)
3181 xscale_enable_mmu_caches(target, 1, 0, 0);
3182 else
3183 xscale_disable_mmu_caches(target, 1, 0, 0);
3184 xscale->armv4_5_mmu.mmu_enabled = enable;
3185 }
3186
3187 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3188
3189 return ERROR_OK;
3190 }
3191
3192 COMMAND_HANDLER(xscale_handle_idcache_command)
3193 {
3194 struct target *target = get_current_target(CMD_CTX);
3195 struct xscale_common *xscale = target_to_xscale(target);
3196
3197 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3198 if (retval != ERROR_OK)
3199 return retval;
3200
3201 if (target->state != TARGET_HALTED)
3202 {
3203 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3204 return ERROR_OK;
3205 }
3206
3207 bool icache;
3208 COMMAND_PARSE_BOOL(CMD_NAME, icache, "icache", "dcache");
3209
3210 if (CMD_ARGC >= 1)
3211 {
3212 bool enable;
3213 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3214 if (enable)
3215 xscale_enable_mmu_caches(target, 1, 0, 0);
3216 else
3217 xscale_disable_mmu_caches(target, 1, 0, 0);
3218 if (icache)
3219 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3220 else
3221 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3222 }
3223
3224 bool enabled = icache ?
3225 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3226 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3227 const char *msg = enabled ? "enabled" : "disabled";
3228 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3229
3230 return ERROR_OK;
3231 }
3232
3233 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3234 {
3235 struct target *target = get_current_target(CMD_CTX);
3236 struct xscale_common *xscale = target_to_xscale(target);
3237 int retval;
3238
3239 retval = xscale_verify_pointer(CMD_CTX, xscale);
3240 if (retval != ERROR_OK)
3241 return retval;
3242
3243 if (CMD_ARGC < 1)
3244 {
3245 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3246 }
3247 else
3248 {
3249 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3250 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3251 xscale_write_dcsr(target, -1, -1);
3252 }
3253
3254 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3255
3256 return ERROR_OK;
3257 }
3258
3259
3260 COMMAND_HANDLER(xscale_handle_vector_table_command)
3261 {
3262 struct target *target = get_current_target(CMD_CTX);
3263 struct xscale_common *xscale = target_to_xscale(target);
3264 int err = 0;
3265 int retval;
3266
3267 retval = xscale_verify_pointer(CMD_CTX, xscale);
3268 if (retval != ERROR_OK)
3269 return retval;
3270
3271 if (CMD_ARGC == 0) /* print current settings */
3272 {
3273 int idx;
3274
3275 command_print(CMD_CTX, "active user-set static vectors:");
3276 for (idx = 1; idx < 8; idx++)
3277 if (xscale->static_low_vectors_set & (1 << idx))
3278 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3279 for (idx = 1; idx < 8; idx++)
3280 if (xscale->static_high_vectors_set & (1 << idx))
3281 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3282 return ERROR_OK;
3283 }
3284
3285 if (CMD_ARGC != 3)
3286 err = 1;
3287 else
3288 {
3289 int idx;
3290 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3291 uint32_t vec;
3292 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3293
3294 if (idx < 1 || idx >= 8)
3295 err = 1;
3296
3297 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3298 {
3299 xscale->static_low_vectors_set |= (1<<idx);
3300 xscale->static_low_vectors[idx] = vec;
3301 }
3302 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3303 {
3304 xscale->static_high_vectors_set |= (1<<idx);
3305 xscale->static_high_vectors[idx] = vec;
3306 }
3307 else
3308 err = 1;
3309 }
3310
3311 if (err)
3312 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3313
3314 return ERROR_OK;
3315 }
3316
3317
3318 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3319 {
3320 struct target *target = get_current_target(CMD_CTX);
3321 struct xscale_common *xscale = target_to_xscale(target);
3322 struct arm *armv4_5 = &xscale->armv4_5_common;
3323 uint32_t dcsr_value;
3324 int retval;
3325
3326 retval = xscale_verify_pointer(CMD_CTX, xscale);
3327 if (retval != ERROR_OK)
3328 return retval;
3329
3330 if (target->state != TARGET_HALTED)
3331 {
3332 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3333 return ERROR_OK;
3334 }
3335
3336 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3337 {
3338 struct xscale_trace_data *td, *next_td;
3339 xscale->trace.buffer_enabled = 1;
3340
3341 /* free old trace data */
3342 td = xscale->trace.data;
3343 while (td)
3344 {
3345 next_td = td->next;
3346
3347 if (td->entries)
3348 free(td->entries);
3349 free(td);
3350 td = next_td;
3351 }
3352 xscale->trace.data = NULL;
3353 }
3354 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3355 {
3356 xscale->trace.buffer_enabled = 0;
3357 }
3358
3359 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3360 {
3361 uint32_t fill = 1;
3362 if (CMD_ARGC >= 3)
3363 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3364 xscale->trace.buffer_fill = fill;
3365 }
3366 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3367 {
3368 xscale->trace.buffer_fill = -1;
3369 }
3370
3371 if (xscale->trace.buffer_enabled)
3372 {
3373 /* if we enable the trace buffer in fill-once
3374 * mode we know the address of the first instruction */
3375 xscale->trace.pc_ok = 1;
3376 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3377 }
3378 else
3379 {
3380 /* otherwise the address is unknown, and we have no known good PC */
3381 xscale->trace.pc_ok = 0;
3382 }
3383
3384 command_print(CMD_CTX, "trace buffer %s (%s)",
3385 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3386 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3387
3388 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3389 if (xscale->trace.buffer_fill >= 0)
3390 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3391 else
3392 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3393
3394 return ERROR_OK;
3395 }
3396
3397 COMMAND_HANDLER(xscale_handle_trace_image_command)
3398 {
3399 struct target *target = get_current_target(CMD_CTX);
3400 struct xscale_common *xscale = target_to_xscale(target);
3401 int retval;
3402
3403 if (CMD_ARGC < 1)
3404 {
3405 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3406 return ERROR_OK;
3407 }
3408
3409 retval = xscale_verify_pointer(CMD_CTX, xscale);
3410 if (retval != ERROR_OK)
3411 return retval;
3412
3413 if (xscale->trace.image)
3414 {
3415 image_close(xscale->trace.image);
3416 free(xscale->trace.image);
3417 command_print(CMD_CTX, "previously loaded image found and closed");
3418 }
3419
3420 xscale->trace.image = malloc(sizeof(struct image));
3421 xscale->trace.image->base_address_set = 0;
3422 xscale->trace.image->start_address_set = 0;
3423
3424 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3425 if (CMD_ARGC >= 2)
3426 {
3427 xscale->trace.image->base_address_set = 1;
3428 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], xscale->trace.image->base_address);
3429 }
3430 else
3431 {
3432 xscale->trace.image->base_address_set = 0;
3433 }
3434
3435 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3436 {
3437 free(xscale->trace.image);
3438 xscale->trace.image = NULL;
3439 return ERROR_OK;
3440 }
3441
3442 return ERROR_OK;
3443 }
3444
3445 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3446 {
3447 struct target *target = get_current_target(CMD_CTX);
3448 struct xscale_common *xscale = target_to_xscale(target);
3449 struct xscale_trace_data *trace_data;
3450 struct fileio file;
3451 int retval;
3452
3453 retval = xscale_verify_pointer(CMD_CTX, xscale);
3454 if (retval != ERROR_OK)
3455 return retval;
3456
3457 if (target->state != TARGET_HALTED)
3458 {
3459 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3460 return ERROR_OK;
3461 }
3462
3463 if (CMD_ARGC < 1)
3464 {
3465 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3466 return ERROR_OK;
3467 }
3468
3469 trace_data = xscale->trace.data;
3470
3471 if (!trace_data)
3472 {
3473 command_print(CMD_CTX, "no trace data collected");
3474 return ERROR_OK;
3475 }
3476
3477 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3478 {
3479 return ERROR_OK;
3480 }
3481
3482 while (trace_data)
3483 {
3484 int i;
3485
3486 fileio_write_u32(&file, trace_data->chkpt0);
3487 fileio_write_u32(&file, trace_data->chkpt1);
3488 fileio_write_u32(&file, trace_data->last_instruction);
3489 fileio_write_u32(&file, trace_data->depth);
3490
3491 for (i = 0; i < trace_data->depth; i++)
3492 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3493
3494 trace_data = trace_data->next;
3495 }
3496
3497 fileio_close(&file);
3498
3499 return ERROR_OK;
3500 }
3501
3502 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3503 {
3504 struct target *target = get_current_target(CMD_CTX);
3505 struct xscale_common *xscale = target_to_xscale(target);
3506 int retval;
3507
3508 retval = xscale_verify_pointer(CMD_CTX, xscale);
3509 if (retval != ERROR_OK)
3510 return retval;
3511
3512 xscale_analyze_trace(target, CMD_CTX);
3513
3514 return ERROR_OK;
3515 }
3516
3517 COMMAND_HANDLER(xscale_handle_cp15)
3518 {
3519 struct target *target = get_current_target(CMD_CTX);
3520 struct xscale_common *xscale = target_to_xscale(target);
3521 int retval;
3522
3523 retval = xscale_verify_pointer(CMD_CTX, xscale);
3524 if (retval != ERROR_OK)
3525 return retval;
3526
3527 if (target->state != TARGET_HALTED)
3528 {
3529 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3530 return ERROR_OK;
3531 }
3532 uint32_t reg_no = 0;
3533 struct reg *reg = NULL;
3534 if (CMD_ARGC > 0)
3535 {
3536 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3537 /*translate from xscale cp15 register no to openocd register*/
3538 switch (reg_no)
3539 {
3540 case 0:
3541 reg_no = XSCALE_MAINID;
3542 break;
3543 case 1:
3544 reg_no = XSCALE_CTRL;
3545 break;
3546 case 2:
3547 reg_no = XSCALE_TTB;
3548 break;
3549 case 3:
3550 reg_no = XSCALE_DAC;
3551 break;
3552 case 5:
3553 reg_no = XSCALE_FSR;
3554 break;
3555 case 6:
3556 reg_no = XSCALE_FAR;
3557 break;
3558 case 13:
3559 reg_no = XSCALE_PID;
3560 break;
3561 case 15:
3562 reg_no = XSCALE_CPACCESS;
3563 break;
3564 default:
3565 command_print(CMD_CTX, "invalid register number");
3566 return ERROR_INVALID_ARGUMENTS;
3567 }
3568 reg = &xscale->reg_cache->reg_list[reg_no];
3569
3570 }
3571 if (CMD_ARGC == 1)
3572 {
3573 uint32_t value;
3574
3575 /* read cp15 control register */
3576 xscale_get_reg(reg);
3577 value = buf_get_u32(reg->value, 0, 32);
3578 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3579 }
3580 else if (CMD_ARGC == 2)
3581 {
3582 uint32_t value;
3583 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3584
3585 /* send CP write request (command 0x41) */
3586 xscale_send_u32(target, 0x41);
3587
3588 /* send CP register number */
3589 xscale_send_u32(target, reg_no);
3590
3591 /* send CP register value */
3592 xscale_send_u32(target, value);
3593
3594 /* execute cpwait to ensure outstanding operations complete */
3595 xscale_send_u32(target, 0x53);
3596 }
3597 else
3598 {
3599 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3600 }
3601
3602 return ERROR_OK;
3603 }
3604
3605 static const struct command_registration xscale_exec_command_handlers[] = {
3606 {
3607 .name = "cache_info",
3608 .handler = &xscale_handle_cache_info_command,
3609 .mode = COMMAND_EXEC, NULL,
3610 },
3611
3612 {
3613 .name = "mmu",
3614 .handler = &xscale_handle_mmu_command,
3615 .mode = COMMAND_EXEC,
3616 .usage = "[enable|disable]",
3617 .help = "enable or disable the MMU",
3618 },
3619 {
3620 .name = "icache",
3621 .handler = &xscale_handle_idcache_command,
3622 .mode = COMMAND_EXEC,
3623 .usage = "[enable|disable]",
3624 .help = "enable or disable the ICache",
3625 },
3626 {
3627 .name = "dcache",
3628 .handler = &xscale_handle_idcache_command,
3629 .mode = COMMAND_EXEC,
3630 .usage = "[enable|disable]",
3631 .help = "enable or disable the DCache",
3632 },
3633
3634 {
3635 .name = "vector_catch",
3636 .handler = &xscale_handle_vector_catch_command,
3637 .mode = COMMAND_EXEC,
3638 .help = "mask of vectors that should be caught",
3639 .usage = "[<mask>]",
3640 },
3641 {
3642 .name = "vector_table",
3643 .handler = &xscale_handle_vector_table_command,
3644 .mode = COMMAND_EXEC,
3645 .usage = "<high|low> <index> <code>",
3646 .help = "set static code for exception handler entry",
3647 },
3648
3649 {
3650 .name = "trace_buffer",
3651 .handler = &xscale_handle_trace_buffer_command,
3652 .mode = COMMAND_EXEC,
3653 .usage = "<enable | disable> [fill [n]|wrap]",
3654 },
3655 {
3656 .name = "dump_trace",
3657 .handler = &xscale_handle_dump_trace_command,
3658 .mode = COMMAND_EXEC,
3659 .help = "dump content of trace buffer to <file>",
3660 .usage = "<file>",
3661 },
3662 {
3663 .name = "analyze_trace",
3664 .handler = &xscale_handle_analyze_trace_buffer_command,
3665 .mode = COMMAND_EXEC,
3666 .help = "analyze content of trace buffer",
3667 },
3668 {
3669 .name = "trace_image",
3670 .handler = &xscale_handle_trace_image_command,
3671 COMMAND_EXEC,
3672 .help = "load image from <file> [base address]",
3673 .usage = "<file> [address] [type]",
3674 },
3675
3676 {
3677 .name = "cp15",
3678 .handler = &xscale_handle_cp15,
3679 .mode = COMMAND_EXEC,
3680 .help = "access coproc 15",
3681 .usage = "<register> [value]",
3682 },
3683 COMMAND_REGISTRATION_DONE
3684 };
3685 static const struct command_registration xscale_any_command_handlers[] = {
3686 {
3687 .name = "debug_handler",
3688 .handler = &xscale_handle_debug_handler_command,
3689 .mode = COMMAND_ANY,
3690 .usage = "<target#> <address>",
3691 },
3692 {
3693 .name = "cache_clean_address",
3694 .handler = &xscale_handle_cache_clean_address_command,
3695 .mode = COMMAND_ANY,
3696 },
3697 {
3698 .chain = xscale_exec_command_handlers,
3699 },
3700 COMMAND_REGISTRATION_DONE
3701 };
3702 static const struct command_registration xscale_command_handlers[] = {
3703 {
3704 .chain = arm_command_handlers,
3705 },
3706 {
3707 .name = "xscale",
3708 .mode = COMMAND_ANY,
3709 .help = "xscale command group",
3710 .chain = xscale_any_command_handlers,
3711 },
3712 COMMAND_REGISTRATION_DONE
3713 };
3714
3715 struct target_type xscale_target =
3716 {
3717 .name = "xscale",
3718
3719 .poll = xscale_poll,
3720 .arch_state = xscale_arch_state,
3721
3722 .target_request_data = NULL,
3723
3724 .halt = xscale_halt,
3725 .resume = xscale_resume,
3726 .step = xscale_step,
3727
3728 .assert_reset = xscale_assert_reset,
3729 .deassert_reset = xscale_deassert_reset,
3730 .soft_reset_halt = NULL,
3731
3732 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
3733
3734 .read_memory = xscale_read_memory,
3735 .read_phys_memory = xscale_read_phys_memory,
3736 .write_memory = xscale_write_memory,
3737 .write_phys_memory = xscale_write_phys_memory,
3738 .bulk_write_memory = xscale_bulk_write_memory,
3739
3740 .checksum_memory = arm_checksum_memory,
3741 .blank_check_memory = arm_blank_check_memory,
3742
3743 .run_algorithm = armv4_5_run_algorithm,
3744
3745 .add_breakpoint = xscale_add_breakpoint,
3746 .remove_breakpoint = xscale_remove_breakpoint,
3747 .add_watchpoint = xscale_add_watchpoint,
3748 .remove_watchpoint = xscale_remove_watchpoint,
3749
3750 .commands = xscale_command_handlers,
3751 .target_create = xscale_target_create,
3752 .init_target = xscale_init_target,
3753
3754 .virt2phys = xscale_virt2phys,
3755 .mmu = xscale_mmu
3756 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)