f22513a048d568fa628990f75990d2eba4df2d73
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include <helper/time_support.h>
37 #include "register.h"
38 #include "image.h"
39 #include "arm_opcodes.h"
40 #include "armv4_5.h"
41
42
43 /*
44 * Important XScale documents available as of October 2009 include:
45 *
46 * Intel XScale® Core Developer’s Manual, January 2004
47 * Order Number: 273473-002
48 * This has a chapter detailing debug facilities, and punts some
49 * details to chip-specific microarchitecture documents.
50 *
51 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
52 * Document Number: 273539-005
53 * Less detailed than the developer's manual, but summarizes those
54 * missing details (for most XScales) and gives LOTS of notes about
55 * debugger/handler interaction issues. Presents a simpler reset
56 * and load-handler sequence than the arch doc. (Note, OpenOCD
57 * doesn't currently support "Hot-Debug" as defined there.)
58 *
59 * Chip-specific microarchitecture documents may also be useful.
60 */
61
62
63 /* forward declarations */
64 static int xscale_resume(struct target *, int current,
65 uint32_t address, int handle_breakpoints, int debug_execution);
66 static int xscale_debug_entry(struct target *);
67 static int xscale_restore_banked(struct target *);
68 static int xscale_get_reg(struct reg *reg);
69 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
70 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
72 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
73 static int xscale_read_trace(struct target *);
74
75
76 /* This XScale "debug handler" is loaded into the processor's
77 * mini-ICache, which is 2K of code writable only via JTAG.
78 *
79 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
80 * binary files cleanly. It's string oriented, and terminates them
81 * with a NUL character. Better would be to generate the constants
82 * and let other code decide names, scoping, and other housekeeping.
83 */
84 static /* unsigned const char xscale_debug_handler[] = ... */
85 #include "xscale_debug.h"
86
87 static char *const xscale_reg_list[] =
88 {
89 "XSCALE_MAINID", /* 0 */
90 "XSCALE_CACHETYPE",
91 "XSCALE_CTRL",
92 "XSCALE_AUXCTRL",
93 "XSCALE_TTB",
94 "XSCALE_DAC",
95 "XSCALE_FSR",
96 "XSCALE_FAR",
97 "XSCALE_PID",
98 "XSCALE_CPACCESS",
99 "XSCALE_IBCR0", /* 10 */
100 "XSCALE_IBCR1",
101 "XSCALE_DBR0",
102 "XSCALE_DBR1",
103 "XSCALE_DBCON",
104 "XSCALE_TBREG",
105 "XSCALE_CHKPT0",
106 "XSCALE_CHKPT1",
107 "XSCALE_DCSR",
108 "XSCALE_TX",
109 "XSCALE_RX", /* 20 */
110 "XSCALE_TXRXCTRL",
111 };
112
113 static const struct xscale_reg xscale_reg_arch_info[] =
114 {
115 {XSCALE_MAINID, NULL},
116 {XSCALE_CACHETYPE, NULL},
117 {XSCALE_CTRL, NULL},
118 {XSCALE_AUXCTRL, NULL},
119 {XSCALE_TTB, NULL},
120 {XSCALE_DAC, NULL},
121 {XSCALE_FSR, NULL},
122 {XSCALE_FAR, NULL},
123 {XSCALE_PID, NULL},
124 {XSCALE_CPACCESS, NULL},
125 {XSCALE_IBCR0, NULL},
126 {XSCALE_IBCR1, NULL},
127 {XSCALE_DBR0, NULL},
128 {XSCALE_DBR1, NULL},
129 {XSCALE_DBCON, NULL},
130 {XSCALE_TBREG, NULL},
131 {XSCALE_CHKPT0, NULL},
132 {XSCALE_CHKPT1, NULL},
133 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
134 {-1, NULL}, /* TX accessed via JTAG */
135 {-1, NULL}, /* RX accessed via JTAG */
136 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
137 };
138
139 /* convenience wrapper to access XScale specific registers */
140 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
141 {
142 uint8_t buf[4];
143
144 buf_set_u32(buf, 0, 32, value);
145
146 return xscale_set_reg(reg, buf);
147 }
148
149 static const char xscale_not[] = "target is not an XScale";
150
151 static int xscale_verify_pointer(struct command_context *cmd_ctx,
152 struct xscale_common *xscale)
153 {
154 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
155 command_print(cmd_ctx, xscale_not);
156 return ERROR_TARGET_INVALID;
157 }
158 return ERROR_OK;
159 }
160
161 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr, tap_state_t end_state)
162 {
163 if (tap == NULL)
164 return ERROR_FAIL;
165
166 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
167 {
168 struct scan_field field;
169 uint8_t scratch[4];
170
171 memset(&field, 0, sizeof field);
172 field.num_bits = tap->ir_length;
173 field.out_value = scratch;
174 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
175
176 jtag_add_ir_scan(tap, &field, end_state);
177 }
178
179 return ERROR_OK;
180 }
181
182 static int xscale_read_dcsr(struct target *target)
183 {
184 struct xscale_common *xscale = target_to_xscale(target);
185 int retval;
186 struct scan_field fields[3];
187 uint8_t field0 = 0x0;
188 uint8_t field0_check_value = 0x2;
189 uint8_t field0_check_mask = 0x7;
190 uint8_t field2 = 0x0;
191 uint8_t field2_check_value = 0x0;
192 uint8_t field2_check_mask = 0x1;
193
194 jtag_set_end_state(TAP_DRPAUSE);
195 xscale_jtag_set_instr(target->tap,
196 XSCALE_SELDCSR << xscale->xscale_variant,
197 TAP_DRPAUSE);
198
199 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
200 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
201
202 memset(&fields, 0, sizeof fields);
203
204 fields[0].num_bits = 3;
205 fields[0].out_value = &field0;
206 uint8_t tmp;
207 fields[0].in_value = &tmp;
208
209 fields[1].num_bits = 32;
210 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
211
212 fields[2].num_bits = 1;
213 fields[2].out_value = &field2;
214 uint8_t tmp2;
215 fields[2].in_value = &tmp2;
216
217 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
218
219 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
220 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
221
222 if ((retval = jtag_execute_queue()) != ERROR_OK)
223 {
224 LOG_ERROR("JTAG error while reading DCSR");
225 return retval;
226 }
227
228 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
229 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
230
231 /* write the register with the value we just read
232 * on this second pass, only the first bit of field0 is guaranteed to be 0)
233 */
234 field0_check_mask = 0x1;
235 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
236 fields[1].in_value = NULL;
237
238 jtag_set_end_state(TAP_IDLE);
239
240 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
241
242 /* DANGER!!! this must be here. It will make sure that the arguments
243 * to jtag_set_check_value() does not go out of scope! */
244 return jtag_execute_queue();
245 }
246
247
248 static void xscale_getbuf(jtag_callback_data_t arg)
249 {
250 uint8_t *in = (uint8_t *)arg;
251 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
252 }
253
254 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
255 {
256 if (num_words == 0)
257 return ERROR_INVALID_ARGUMENTS;
258
259 struct xscale_common *xscale = target_to_xscale(target);
260 int retval = ERROR_OK;
261 tap_state_t path[3];
262 struct scan_field fields[3];
263 uint8_t *field0 = malloc(num_words * 1);
264 uint8_t field0_check_value = 0x2;
265 uint8_t field0_check_mask = 0x6;
266 uint32_t *field1 = malloc(num_words * 4);
267 uint8_t field2_check_value = 0x0;
268 uint8_t field2_check_mask = 0x1;
269 int words_done = 0;
270 int words_scheduled = 0;
271 int i;
272
273 path[0] = TAP_DRSELECT;
274 path[1] = TAP_DRCAPTURE;
275 path[2] = TAP_DRSHIFT;
276
277 memset(&fields, 0, sizeof fields);
278
279 fields[0].num_bits = 3;
280 fields[0].check_value = &field0_check_value;
281 fields[0].check_mask = &field0_check_mask;
282
283 fields[1].num_bits = 32;
284
285 fields[2].num_bits = 1;
286 fields[2].check_value = &field2_check_value;
287 fields[2].check_mask = &field2_check_mask;
288
289 jtag_set_end_state(TAP_IDLE);
290 xscale_jtag_set_instr(target->tap,
291 XSCALE_DBGTX << xscale->xscale_variant,
292 TAP_IDLE);
293 jtag_add_runtest(1, TAP_IDLE); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
294
295 /* repeat until all words have been collected */
296 int attempts = 0;
297 while (words_done < num_words)
298 {
299 /* schedule reads */
300 words_scheduled = 0;
301 for (i = words_done; i < num_words; i++)
302 {
303 fields[0].in_value = &field0[i];
304
305 jtag_add_pathmove(3, path);
306
307 fields[1].in_value = (uint8_t *)(field1 + i);
308
309 jtag_add_dr_scan_check(target->tap, 3, fields, TAP_IDLE);
310
311 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
312
313 words_scheduled++;
314 }
315
316 if ((retval = jtag_execute_queue()) != ERROR_OK)
317 {
318 LOG_ERROR("JTAG error while receiving data from debug handler");
319 break;
320 }
321
322 /* examine results */
323 for (i = words_done; i < num_words; i++)
324 {
325 if (!(field0[0] & 1))
326 {
327 /* move backwards if necessary */
328 int j;
329 for (j = i; j < num_words - 1; j++)
330 {
331 field0[j] = field0[j + 1];
332 field1[j] = field1[j + 1];
333 }
334 words_scheduled--;
335 }
336 }
337 if (words_scheduled == 0)
338 {
339 if (attempts++==1000)
340 {
341 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
342 retval = ERROR_TARGET_TIMEOUT;
343 break;
344 }
345 }
346
347 words_done += words_scheduled;
348 }
349
350 for (i = 0; i < num_words; i++)
351 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
352
353 free(field1);
354
355 return retval;
356 }
357
358 static int xscale_read_tx(struct target *target, int consume)
359 {
360 struct xscale_common *xscale = target_to_xscale(target);
361 tap_state_t path[3];
362 tap_state_t noconsume_path[6];
363 int retval;
364 struct timeval timeout, now;
365 struct scan_field fields[3];
366 uint8_t field0_in = 0x0;
367 uint8_t field0_check_value = 0x2;
368 uint8_t field0_check_mask = 0x6;
369 uint8_t field2_check_value = 0x0;
370 uint8_t field2_check_mask = 0x1;
371
372 jtag_set_end_state(TAP_IDLE);
373
374 xscale_jtag_set_instr(target->tap,
375 XSCALE_DBGTX << xscale->xscale_variant,
376 TAP_IDLE);
377
378 path[0] = TAP_DRSELECT;
379 path[1] = TAP_DRCAPTURE;
380 path[2] = TAP_DRSHIFT;
381
382 noconsume_path[0] = TAP_DRSELECT;
383 noconsume_path[1] = TAP_DRCAPTURE;
384 noconsume_path[2] = TAP_DREXIT1;
385 noconsume_path[3] = TAP_DRPAUSE;
386 noconsume_path[4] = TAP_DREXIT2;
387 noconsume_path[5] = TAP_DRSHIFT;
388
389 memset(&fields, 0, sizeof fields);
390
391 fields[0].num_bits = 3;
392 fields[0].in_value = &field0_in;
393
394 fields[1].num_bits = 32;
395 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
396
397 fields[2].num_bits = 1;
398 uint8_t tmp;
399 fields[2].in_value = &tmp;
400
401 gettimeofday(&timeout, NULL);
402 timeval_add_time(&timeout, 1, 0);
403
404 for (;;)
405 {
406 /* if we want to consume the register content (i.e. clear TX_READY),
407 * we have to go straight from Capture-DR to Shift-DR
408 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
409 */
410 if (consume)
411 jtag_add_pathmove(3, path);
412 else
413 {
414 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
415 }
416
417 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
418
419 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
420 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
421
422 if ((retval = jtag_execute_queue()) != ERROR_OK)
423 {
424 LOG_ERROR("JTAG error while reading TX");
425 return ERROR_TARGET_TIMEOUT;
426 }
427
428 gettimeofday(&now, NULL);
429 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
430 {
431 LOG_ERROR("time out reading TX register");
432 return ERROR_TARGET_TIMEOUT;
433 }
434 if (!((!(field0_in & 1)) && consume))
435 {
436 goto done;
437 }
438 if (debug_level >= 3)
439 {
440 LOG_DEBUG("waiting 100ms");
441 alive_sleep(100); /* avoid flooding the logs */
442 } else
443 {
444 keep_alive();
445 }
446 }
447 done:
448
449 if (!(field0_in & 1))
450 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
451
452 return ERROR_OK;
453 }
454
455 static int xscale_write_rx(struct target *target)
456 {
457 struct xscale_common *xscale = target_to_xscale(target);
458 int retval;
459 struct timeval timeout, now;
460 struct scan_field fields[3];
461 uint8_t field0_out = 0x0;
462 uint8_t field0_in = 0x0;
463 uint8_t field0_check_value = 0x2;
464 uint8_t field0_check_mask = 0x6;
465 uint8_t field2 = 0x0;
466 uint8_t field2_check_value = 0x0;
467 uint8_t field2_check_mask = 0x1;
468
469 jtag_set_end_state(TAP_IDLE);
470
471 xscale_jtag_set_instr(target->tap,
472 XSCALE_DBGRX << xscale->xscale_variant,
473 TAP_IDLE);
474
475 memset(&fields, 0, sizeof fields);
476
477 fields[0].num_bits = 3;
478 fields[0].out_value = &field0_out;
479 fields[0].in_value = &field0_in;
480
481 fields[1].num_bits = 32;
482 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
483
484 fields[2].num_bits = 1;
485 fields[2].out_value = &field2;
486 uint8_t tmp;
487 fields[2].in_value = &tmp;
488
489 gettimeofday(&timeout, NULL);
490 timeval_add_time(&timeout, 1, 0);
491
492 /* poll until rx_read is low */
493 LOG_DEBUG("polling RX");
494 for (;;)
495 {
496 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
497
498 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
499 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
500
501 if ((retval = jtag_execute_queue()) != ERROR_OK)
502 {
503 LOG_ERROR("JTAG error while writing RX");
504 return retval;
505 }
506
507 gettimeofday(&now, NULL);
508 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
509 {
510 LOG_ERROR("time out writing RX register");
511 return ERROR_TARGET_TIMEOUT;
512 }
513 if (!(field0_in & 1))
514 goto done;
515 if (debug_level >= 3)
516 {
517 LOG_DEBUG("waiting 100ms");
518 alive_sleep(100); /* avoid flooding the logs */
519 } else
520 {
521 keep_alive();
522 }
523 }
524 done:
525
526 /* set rx_valid */
527 field2 = 0x1;
528 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
529
530 if ((retval = jtag_execute_queue()) != ERROR_OK)
531 {
532 LOG_ERROR("JTAG error while writing RX");
533 return retval;
534 }
535
536 return ERROR_OK;
537 }
538
539 /* send count elements of size byte to the debug handler */
540 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
541 {
542 struct xscale_common *xscale = target_to_xscale(target);
543 uint32_t t[3];
544 int bits[3];
545 int retval;
546 int done_count = 0;
547
548 jtag_set_end_state(TAP_IDLE);
549
550 xscale_jtag_set_instr(target->tap,
551 XSCALE_DBGRX << xscale->xscale_variant,
552 TAP_IDLE);
553
554 bits[0]=3;
555 t[0]=0;
556 bits[1]=32;
557 t[2]=1;
558 bits[2]=1;
559 int endianness = target->endianness;
560 while (done_count++ < count)
561 {
562 switch (size)
563 {
564 case 4:
565 if (endianness == TARGET_LITTLE_ENDIAN)
566 {
567 t[1]=le_to_h_u32(buffer);
568 } else
569 {
570 t[1]=be_to_h_u32(buffer);
571 }
572 break;
573 case 2:
574 if (endianness == TARGET_LITTLE_ENDIAN)
575 {
576 t[1]=le_to_h_u16(buffer);
577 } else
578 {
579 t[1]=be_to_h_u16(buffer);
580 }
581 break;
582 case 1:
583 t[1]=buffer[0];
584 break;
585 default:
586 LOG_ERROR("BUG: size neither 4, 2 nor 1");
587 return ERROR_INVALID_ARGUMENTS;
588 }
589 jtag_add_dr_out(target->tap,
590 3,
591 bits,
592 t,
593 TAP_IDLE);
594 buffer += size;
595 }
596
597 if ((retval = jtag_execute_queue()) != ERROR_OK)
598 {
599 LOG_ERROR("JTAG error while sending data to debug handler");
600 return retval;
601 }
602
603 return ERROR_OK;
604 }
605
606 static int xscale_send_u32(struct target *target, uint32_t value)
607 {
608 struct xscale_common *xscale = target_to_xscale(target);
609
610 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
611 return xscale_write_rx(target);
612 }
613
614 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
615 {
616 struct xscale_common *xscale = target_to_xscale(target);
617 int retval;
618 struct scan_field fields[3];
619 uint8_t field0 = 0x0;
620 uint8_t field0_check_value = 0x2;
621 uint8_t field0_check_mask = 0x7;
622 uint8_t field2 = 0x0;
623 uint8_t field2_check_value = 0x0;
624 uint8_t field2_check_mask = 0x1;
625
626 if (hold_rst != -1)
627 xscale->hold_rst = hold_rst;
628
629 if (ext_dbg_brk != -1)
630 xscale->external_debug_break = ext_dbg_brk;
631
632 jtag_set_end_state(TAP_IDLE);
633 xscale_jtag_set_instr(target->tap,
634 XSCALE_SELDCSR << xscale->xscale_variant,
635 TAP_IDLE);
636
637 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
638 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
639
640 memset(&fields, 0, sizeof fields);
641
642 fields[0].num_bits = 3;
643 fields[0].out_value = &field0;
644 uint8_t tmp;
645 fields[0].in_value = &tmp;
646
647 fields[1].num_bits = 32;
648 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
649
650 fields[2].num_bits = 1;
651 fields[2].out_value = &field2;
652 uint8_t tmp2;
653 fields[2].in_value = &tmp2;
654
655 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
656
657 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
658 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
659
660 if ((retval = jtag_execute_queue()) != ERROR_OK)
661 {
662 LOG_ERROR("JTAG error while writing DCSR");
663 return retval;
664 }
665
666 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
667 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
668
669 return ERROR_OK;
670 }
671
672 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
673 static unsigned int parity (unsigned int v)
674 {
675 // unsigned int ov = v;
676 v ^= v >> 16;
677 v ^= v >> 8;
678 v ^= v >> 4;
679 v &= 0xf;
680 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
681 return (0x6996 >> v) & 1;
682 }
683
684 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
685 {
686 struct xscale_common *xscale = target_to_xscale(target);
687 uint8_t packet[4];
688 uint8_t cmd;
689 int word;
690 struct scan_field fields[2];
691
692 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
693
694 /* LDIC into IR */
695 jtag_set_end_state(TAP_IDLE);
696 xscale_jtag_set_instr(target->tap,
697 XSCALE_LDIC << xscale->xscale_variant,
698 TAP_IDLE);
699
700 /* CMD is b011 to load a cacheline into the Mini ICache.
701 * Loading into the main ICache is deprecated, and unused.
702 * It's followed by three zero bits, and 27 address bits.
703 */
704 buf_set_u32(&cmd, 0, 6, 0x3);
705
706 /* virtual address of desired cache line */
707 buf_set_u32(packet, 0, 27, va >> 5);
708
709 memset(&fields, 0, sizeof fields);
710
711 fields[0].num_bits = 6;
712 fields[0].out_value = &cmd;
713
714 fields[1].num_bits = 27;
715 fields[1].out_value = packet;
716
717 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
718
719 /* rest of packet is a cacheline: 8 instructions, with parity */
720 fields[0].num_bits = 32;
721 fields[0].out_value = packet;
722
723 fields[1].num_bits = 1;
724 fields[1].out_value = &cmd;
725
726 for (word = 0; word < 8; word++)
727 {
728 buf_set_u32(packet, 0, 32, buffer[word]);
729
730 uint32_t value;
731 memcpy(&value, packet, sizeof(uint32_t));
732 cmd = parity(value);
733
734 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
735 }
736
737 return jtag_execute_queue();
738 }
739
740 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
741 {
742 struct xscale_common *xscale = target_to_xscale(target);
743 uint8_t packet[4];
744 uint8_t cmd;
745 struct scan_field fields[2];
746
747 jtag_set_end_state(TAP_IDLE);
748 xscale_jtag_set_instr(target->tap,
749 XSCALE_LDIC << xscale->xscale_variant,
750 TAP_IDLE);
751
752 /* CMD for invalidate IC line b000, bits [6:4] b000 */
753 buf_set_u32(&cmd, 0, 6, 0x0);
754
755 /* virtual address of desired cache line */
756 buf_set_u32(packet, 0, 27, va >> 5);
757
758 memset(&fields, 0, sizeof fields);
759
760 fields[0].num_bits = 6;
761 fields[0].out_value = &cmd;
762
763 fields[1].num_bits = 27;
764 fields[1].out_value = packet;
765
766 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
767
768 return ERROR_OK;
769 }
770
771 static int xscale_update_vectors(struct target *target)
772 {
773 struct xscale_common *xscale = target_to_xscale(target);
774 int i;
775 int retval;
776
777 uint32_t low_reset_branch, high_reset_branch;
778
779 for (i = 1; i < 8; i++)
780 {
781 /* if there's a static vector specified for this exception, override */
782 if (xscale->static_high_vectors_set & (1 << i))
783 {
784 xscale->high_vectors[i] = xscale->static_high_vectors[i];
785 }
786 else
787 {
788 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
789 if (retval == ERROR_TARGET_TIMEOUT)
790 return retval;
791 if (retval != ERROR_OK)
792 {
793 /* Some of these reads will fail as part of normal execution */
794 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
795 }
796 }
797 }
798
799 for (i = 1; i < 8; i++)
800 {
801 if (xscale->static_low_vectors_set & (1 << i))
802 {
803 xscale->low_vectors[i] = xscale->static_low_vectors[i];
804 }
805 else
806 {
807 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
808 if (retval == ERROR_TARGET_TIMEOUT)
809 return retval;
810 if (retval != ERROR_OK)
811 {
812 /* Some of these reads will fail as part of normal execution */
813 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
814 }
815 }
816 }
817
818 /* calculate branches to debug handler */
819 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
820 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
821
822 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
823 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
824
825 /* invalidate and load exception vectors in mini i-cache */
826 xscale_invalidate_ic_line(target, 0x0);
827 xscale_invalidate_ic_line(target, 0xffff0000);
828
829 xscale_load_ic(target, 0x0, xscale->low_vectors);
830 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
831
832 return ERROR_OK;
833 }
834
835 static int xscale_arch_state(struct target *target)
836 {
837 struct xscale_common *xscale = target_to_xscale(target);
838 struct arm *armv4_5 = &xscale->armv4_5_common;
839
840 static const char *state[] =
841 {
842 "disabled", "enabled"
843 };
844
845 static const char *arch_dbg_reason[] =
846 {
847 "", "\n(processor reset)", "\n(trace buffer full)"
848 };
849
850 if (armv4_5->common_magic != ARM_COMMON_MAGIC)
851 {
852 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
853 return ERROR_INVALID_ARGUMENTS;
854 }
855
856 arm_arch_state(target);
857 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
858 state[xscale->armv4_5_mmu.mmu_enabled],
859 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
860 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
861 arch_dbg_reason[xscale->arch_debug_reason]);
862
863 return ERROR_OK;
864 }
865
866 static int xscale_poll(struct target *target)
867 {
868 int retval = ERROR_OK;
869
870 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
871 {
872 enum target_state previous_state = target->state;
873 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
874 {
875
876 /* there's data to read from the tx register, we entered debug state */
877 target->state = TARGET_HALTED;
878
879 /* process debug entry, fetching current mode regs */
880 retval = xscale_debug_entry(target);
881 }
882 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
883 {
884 LOG_USER("error while polling TX register, reset CPU");
885 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
886 target->state = TARGET_HALTED;
887 }
888
889 /* debug_entry could have overwritten target state (i.e. immediate resume)
890 * don't signal event handlers in that case
891 */
892 if (target->state != TARGET_HALTED)
893 return ERROR_OK;
894
895 /* if target was running, signal that we halted
896 * otherwise we reentered from debug execution */
897 if (previous_state == TARGET_RUNNING)
898 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
899 else
900 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
901 }
902
903 return retval;
904 }
905
906 static int xscale_debug_entry(struct target *target)
907 {
908 struct xscale_common *xscale = target_to_xscale(target);
909 struct arm *armv4_5 = &xscale->armv4_5_common;
910 uint32_t pc;
911 uint32_t buffer[10];
912 int i;
913 int retval;
914 uint32_t moe;
915
916 /* clear external dbg break (will be written on next DCSR read) */
917 xscale->external_debug_break = 0;
918 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
919 return retval;
920
921 /* get r0, pc, r1 to r7 and cpsr */
922 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
923 return retval;
924
925 /* move r0 from buffer to register cache */
926 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
927 armv4_5->core_cache->reg_list[0].dirty = 1;
928 armv4_5->core_cache->reg_list[0].valid = 1;
929 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
930
931 /* move pc from buffer to register cache */
932 buf_set_u32(armv4_5->pc->value, 0, 32, buffer[1]);
933 armv4_5->pc->dirty = 1;
934 armv4_5->pc->valid = 1;
935 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
936
937 /* move data from buffer to register cache */
938 for (i = 1; i <= 7; i++)
939 {
940 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
941 armv4_5->core_cache->reg_list[i].dirty = 1;
942 armv4_5->core_cache->reg_list[i].valid = 1;
943 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
944 }
945
946 arm_set_cpsr(armv4_5, buffer[9]);
947 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
948
949 if (!is_arm_mode(armv4_5->core_mode))
950 {
951 target->state = TARGET_UNKNOWN;
952 LOG_ERROR("cpsr contains invalid mode value - communication failure");
953 return ERROR_TARGET_FAILURE;
954 }
955 LOG_DEBUG("target entered debug state in %s mode",
956 arm_mode_name(armv4_5->core_mode));
957
958 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
959 if (armv4_5->spsr) {
960 xscale_receive(target, buffer, 8);
961 buf_set_u32(armv4_5->spsr->value, 0, 32, buffer[7]);
962 armv4_5->spsr->dirty = false;
963 armv4_5->spsr->valid = true;
964 }
965 else
966 {
967 /* r8 to r14, but no spsr */
968 xscale_receive(target, buffer, 7);
969 }
970
971 /* move data from buffer to right banked register in cache */
972 for (i = 8; i <= 14; i++)
973 {
974 struct reg *r = arm_reg_current(armv4_5, i);
975
976 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
977 r->dirty = false;
978 r->valid = true;
979 }
980
981 /* examine debug reason */
982 xscale_read_dcsr(target);
983 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
984
985 /* stored PC (for calculating fixup) */
986 pc = buf_get_u32(armv4_5->pc->value, 0, 32);
987
988 switch (moe)
989 {
990 case 0x0: /* Processor reset */
991 target->debug_reason = DBG_REASON_DBGRQ;
992 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
993 pc -= 4;
994 break;
995 case 0x1: /* Instruction breakpoint hit */
996 target->debug_reason = DBG_REASON_BREAKPOINT;
997 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
998 pc -= 4;
999 break;
1000 case 0x2: /* Data breakpoint hit */
1001 target->debug_reason = DBG_REASON_WATCHPOINT;
1002 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1003 pc -= 4;
1004 break;
1005 case 0x3: /* BKPT instruction executed */
1006 target->debug_reason = DBG_REASON_BREAKPOINT;
1007 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1008 pc -= 4;
1009 break;
1010 case 0x4: /* Ext. debug event */
1011 target->debug_reason = DBG_REASON_DBGRQ;
1012 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1013 pc -= 4;
1014 break;
1015 case 0x5: /* Vector trap occured */
1016 target->debug_reason = DBG_REASON_BREAKPOINT;
1017 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1018 pc -= 4;
1019 break;
1020 case 0x6: /* Trace buffer full break */
1021 target->debug_reason = DBG_REASON_DBGRQ;
1022 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1023 pc -= 4;
1024 break;
1025 case 0x7: /* Reserved (may flag Hot-Debug support) */
1026 default:
1027 LOG_ERROR("Method of Entry is 'Reserved'");
1028 exit(-1);
1029 break;
1030 }
1031
1032 /* apply PC fixup */
1033 buf_set_u32(armv4_5->pc->value, 0, 32, pc);
1034
1035 /* on the first debug entry, identify cache type */
1036 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1037 {
1038 uint32_t cache_type_reg;
1039
1040 /* read cp15 cache type register */
1041 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1042 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1043
1044 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1045 }
1046
1047 /* examine MMU and Cache settings */
1048 /* read cp15 control register */
1049 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1050 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1051 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1052 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1053 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1054
1055 /* tracing enabled, read collected trace data */
1056 if (xscale->trace.buffer_enabled)
1057 {
1058 xscale_read_trace(target);
1059 xscale->trace.buffer_fill--;
1060
1061 /* resume if we're still collecting trace data */
1062 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1063 && (xscale->trace.buffer_fill > 0))
1064 {
1065 xscale_resume(target, 1, 0x0, 1, 0);
1066 }
1067 else
1068 {
1069 xscale->trace.buffer_enabled = 0;
1070 }
1071 }
1072
1073 return ERROR_OK;
1074 }
1075
1076 static int xscale_halt(struct target *target)
1077 {
1078 struct xscale_common *xscale = target_to_xscale(target);
1079
1080 LOG_DEBUG("target->state: %s",
1081 target_state_name(target));
1082
1083 if (target->state == TARGET_HALTED)
1084 {
1085 LOG_DEBUG("target was already halted");
1086 return ERROR_OK;
1087 }
1088 else if (target->state == TARGET_UNKNOWN)
1089 {
1090 /* this must not happen for a xscale target */
1091 LOG_ERROR("target was in unknown state when halt was requested");
1092 return ERROR_TARGET_INVALID;
1093 }
1094 else if (target->state == TARGET_RESET)
1095 {
1096 LOG_DEBUG("target->state == TARGET_RESET");
1097 }
1098 else
1099 {
1100 /* assert external dbg break */
1101 xscale->external_debug_break = 1;
1102 xscale_read_dcsr(target);
1103
1104 target->debug_reason = DBG_REASON_DBGRQ;
1105 }
1106
1107 return ERROR_OK;
1108 }
1109
1110 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1111 {
1112 struct xscale_common *xscale = target_to_xscale(target);
1113 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1114 int retval;
1115
1116 if (xscale->ibcr0_used)
1117 {
1118 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1119
1120 if (ibcr0_bp)
1121 {
1122 xscale_unset_breakpoint(target, ibcr0_bp);
1123 }
1124 else
1125 {
1126 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1127 exit(-1);
1128 }
1129 }
1130
1131 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1132 return retval;
1133
1134 return ERROR_OK;
1135 }
1136
1137 static int xscale_disable_single_step(struct target *target)
1138 {
1139 struct xscale_common *xscale = target_to_xscale(target);
1140 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1141 int retval;
1142
1143 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1144 return retval;
1145
1146 return ERROR_OK;
1147 }
1148
1149 static void xscale_enable_watchpoints(struct target *target)
1150 {
1151 struct watchpoint *watchpoint = target->watchpoints;
1152
1153 while (watchpoint)
1154 {
1155 if (watchpoint->set == 0)
1156 xscale_set_watchpoint(target, watchpoint);
1157 watchpoint = watchpoint->next;
1158 }
1159 }
1160
1161 static void xscale_enable_breakpoints(struct target *target)
1162 {
1163 struct breakpoint *breakpoint = target->breakpoints;
1164
1165 /* set any pending breakpoints */
1166 while (breakpoint)
1167 {
1168 if (breakpoint->set == 0)
1169 xscale_set_breakpoint(target, breakpoint);
1170 breakpoint = breakpoint->next;
1171 }
1172 }
1173
1174 static int xscale_resume(struct target *target, int current,
1175 uint32_t address, int handle_breakpoints, int debug_execution)
1176 {
1177 struct xscale_common *xscale = target_to_xscale(target);
1178 struct arm *armv4_5 = &xscale->armv4_5_common;
1179 struct breakpoint *breakpoint = target->breakpoints;
1180 uint32_t current_pc;
1181 int retval;
1182 int i;
1183
1184 LOG_DEBUG("-");
1185
1186 if (target->state != TARGET_HALTED)
1187 {
1188 LOG_WARNING("target not halted");
1189 return ERROR_TARGET_NOT_HALTED;
1190 }
1191
1192 if (!debug_execution)
1193 {
1194 target_free_all_working_areas(target);
1195 }
1196
1197 /* update vector tables */
1198 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1199 return retval;
1200
1201 /* current = 1: continue on current pc, otherwise continue at <address> */
1202 if (!current)
1203 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1204
1205 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1206
1207 /* if we're at the reset vector, we have to simulate the branch */
1208 if (current_pc == 0x0)
1209 {
1210 arm_simulate_step(target, NULL);
1211 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1212 }
1213
1214 /* the front-end may request us not to handle breakpoints */
1215 if (handle_breakpoints)
1216 {
1217 breakpoint = breakpoint_find(target,
1218 buf_get_u32(armv4_5->pc->value, 0, 32));
1219 if (breakpoint != NULL)
1220 {
1221 uint32_t next_pc;
1222
1223 /* there's a breakpoint at the current PC, we have to step over it */
1224 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1225 xscale_unset_breakpoint(target, breakpoint);
1226
1227 /* calculate PC of next instruction */
1228 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1229 {
1230 uint32_t current_opcode;
1231 target_read_u32(target, current_pc, &current_opcode);
1232 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1233 }
1234
1235 LOG_DEBUG("enable single-step");
1236 xscale_enable_single_step(target, next_pc);
1237
1238 /* restore banked registers */
1239 retval = xscale_restore_banked(target);
1240
1241 /* send resume request (command 0x30 or 0x31)
1242 * clean the trace buffer if it is to be enabled (0x62) */
1243 if (xscale->trace.buffer_enabled)
1244 {
1245 xscale_send_u32(target, 0x62);
1246 xscale_send_u32(target, 0x31);
1247 }
1248 else
1249 xscale_send_u32(target, 0x30);
1250
1251 /* send CPSR */
1252 xscale_send_u32(target,
1253 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1254 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1255 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1256
1257 for (i = 7; i >= 0; i--)
1258 {
1259 /* send register */
1260 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1261 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1262 }
1263
1264 /* send PC */
1265 xscale_send_u32(target,
1266 buf_get_u32(armv4_5->pc->value, 0, 32));
1267 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
1268 buf_get_u32(armv4_5->pc->value, 0, 32));
1269
1270 /* wait for and process debug entry */
1271 xscale_debug_entry(target);
1272
1273 LOG_DEBUG("disable single-step");
1274 xscale_disable_single_step(target);
1275
1276 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1277 xscale_set_breakpoint(target, breakpoint);
1278 }
1279 }
1280
1281 /* enable any pending breakpoints and watchpoints */
1282 xscale_enable_breakpoints(target);
1283 xscale_enable_watchpoints(target);
1284
1285 /* restore banked registers */
1286 retval = xscale_restore_banked(target);
1287
1288 /* send resume request (command 0x30 or 0x31)
1289 * clean the trace buffer if it is to be enabled (0x62) */
1290 if (xscale->trace.buffer_enabled)
1291 {
1292 xscale_send_u32(target, 0x62);
1293 xscale_send_u32(target, 0x31);
1294 }
1295 else
1296 xscale_send_u32(target, 0x30);
1297
1298 /* send CPSR */
1299 xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
1300 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1301 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1302
1303 for (i = 7; i >= 0; i--)
1304 {
1305 /* send register */
1306 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1307 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1308 }
1309
1310 /* send PC */
1311 xscale_send_u32(target, buf_get_u32(armv4_5->pc->value, 0, 32));
1312 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1313 buf_get_u32(armv4_5->pc->value, 0, 32));
1314
1315 target->debug_reason = DBG_REASON_NOTHALTED;
1316
1317 if (!debug_execution)
1318 {
1319 /* registers are now invalid */
1320 register_cache_invalidate(armv4_5->core_cache);
1321 target->state = TARGET_RUNNING;
1322 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1323 }
1324 else
1325 {
1326 target->state = TARGET_DEBUG_RUNNING;
1327 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1328 }
1329
1330 LOG_DEBUG("target resumed");
1331
1332 return ERROR_OK;
1333 }
1334
1335 static int xscale_step_inner(struct target *target, int current,
1336 uint32_t address, int handle_breakpoints)
1337 {
1338 struct xscale_common *xscale = target_to_xscale(target);
1339 struct arm *armv4_5 = &xscale->armv4_5_common;
1340 uint32_t next_pc;
1341 int retval;
1342 int i;
1343
1344 target->debug_reason = DBG_REASON_SINGLESTEP;
1345
1346 /* calculate PC of next instruction */
1347 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1348 {
1349 uint32_t current_opcode, current_pc;
1350 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1351
1352 target_read_u32(target, current_pc, &current_opcode);
1353 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1354 return retval;
1355 }
1356
1357 LOG_DEBUG("enable single-step");
1358 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1359 return retval;
1360
1361 /* restore banked registers */
1362 if ((retval = xscale_restore_banked(target)) != ERROR_OK)
1363 return retval;
1364
1365 /* send resume request (command 0x30 or 0x31)
1366 * clean the trace buffer if it is to be enabled (0x62) */
1367 if (xscale->trace.buffer_enabled)
1368 {
1369 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1370 return retval;
1371 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1372 return retval;
1373 }
1374 else
1375 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1376 return retval;
1377
1378 /* send CPSR */
1379 retval = xscale_send_u32(target,
1380 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1381 if (retval != ERROR_OK)
1382 return retval;
1383 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1384 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1385
1386 for (i = 7; i >= 0; i--)
1387 {
1388 /* send register */
1389 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1390 return retval;
1391 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1392 }
1393
1394 /* send PC */
1395 retval = xscale_send_u32(target,
1396 buf_get_u32(armv4_5->pc->value, 0, 32));
1397 if (retval != ERROR_OK)
1398 return retval;
1399 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1400 buf_get_u32(armv4_5->pc->value, 0, 32));
1401
1402 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1403
1404 /* registers are now invalid */
1405 register_cache_invalidate(armv4_5->core_cache);
1406
1407 /* wait for and process debug entry */
1408 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1409 return retval;
1410
1411 LOG_DEBUG("disable single-step");
1412 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1413 return retval;
1414
1415 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1416
1417 return ERROR_OK;
1418 }
1419
1420 static int xscale_step(struct target *target, int current,
1421 uint32_t address, int handle_breakpoints)
1422 {
1423 struct arm *armv4_5 = target_to_arm(target);
1424 struct breakpoint *breakpoint = NULL;
1425
1426 uint32_t current_pc;
1427 int retval;
1428
1429 if (target->state != TARGET_HALTED)
1430 {
1431 LOG_WARNING("target not halted");
1432 return ERROR_TARGET_NOT_HALTED;
1433 }
1434
1435 /* current = 1: continue on current pc, otherwise continue at <address> */
1436 if (!current)
1437 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1438
1439 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1440
1441 /* if we're at the reset vector, we have to simulate the step */
1442 if (current_pc == 0x0)
1443 {
1444 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1445 return retval;
1446 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1447
1448 target->debug_reason = DBG_REASON_SINGLESTEP;
1449 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1450
1451 return ERROR_OK;
1452 }
1453
1454 /* the front-end may request us not to handle breakpoints */
1455 if (handle_breakpoints)
1456 breakpoint = breakpoint_find(target,
1457 buf_get_u32(armv4_5->pc->value, 0, 32));
1458 if (breakpoint != NULL) {
1459 retval = xscale_unset_breakpoint(target, breakpoint);
1460 if (retval != ERROR_OK)
1461 return retval;
1462 }
1463
1464 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1465
1466 if (breakpoint)
1467 {
1468 xscale_set_breakpoint(target, breakpoint);
1469 }
1470
1471 LOG_DEBUG("target stepped");
1472
1473 return ERROR_OK;
1474
1475 }
1476
1477 static int xscale_assert_reset(struct target *target)
1478 {
1479 struct xscale_common *xscale = target_to_xscale(target);
1480
1481 LOG_DEBUG("target->state: %s",
1482 target_state_name(target));
1483
1484 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1485 * end up in T-L-R, which would reset JTAG
1486 */
1487 jtag_set_end_state(TAP_IDLE);
1488 xscale_jtag_set_instr(target->tap,
1489 XSCALE_SELDCSR << xscale->xscale_variant,
1490 TAP_IDLE);
1491
1492 /* set Hold reset, Halt mode and Trap Reset */
1493 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1494 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1495 xscale_write_dcsr(target, 1, 0);
1496
1497 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1498 xscale_jtag_set_instr(target->tap, ~0, TAP_IDLE);
1499 jtag_execute_queue();
1500
1501 /* assert reset */
1502 jtag_add_reset(0, 1);
1503
1504 /* sleep 1ms, to be sure we fulfill any requirements */
1505 jtag_add_sleep(1000);
1506 jtag_execute_queue();
1507
1508 target->state = TARGET_RESET;
1509
1510 if (target->reset_halt)
1511 {
1512 int retval;
1513 if ((retval = target_halt(target)) != ERROR_OK)
1514 return retval;
1515 }
1516
1517 return ERROR_OK;
1518 }
1519
1520 static int xscale_deassert_reset(struct target *target)
1521 {
1522 struct xscale_common *xscale = target_to_xscale(target);
1523 struct breakpoint *breakpoint = target->breakpoints;
1524
1525 LOG_DEBUG("-");
1526
1527 xscale->ibcr_available = 2;
1528 xscale->ibcr0_used = 0;
1529 xscale->ibcr1_used = 0;
1530
1531 xscale->dbr_available = 2;
1532 xscale->dbr0_used = 0;
1533 xscale->dbr1_used = 0;
1534
1535 /* mark all hardware breakpoints as unset */
1536 while (breakpoint)
1537 {
1538 if (breakpoint->type == BKPT_HARD)
1539 {
1540 breakpoint->set = 0;
1541 }
1542 breakpoint = breakpoint->next;
1543 }
1544
1545 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1546
1547 /* FIXME mark hardware watchpoints got unset too. Also,
1548 * at least some of the XScale registers are invalid...
1549 */
1550
1551 /*
1552 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1553 * contents got invalidated. Safer to force that, so writing new
1554 * contents can't ever fail..
1555 */
1556 {
1557 uint32_t address;
1558 unsigned buf_cnt;
1559 const uint8_t *buffer = xscale_debug_handler;
1560 int retval;
1561
1562 /* release SRST */
1563 jtag_add_reset(0, 0);
1564
1565 /* wait 300ms; 150 and 100ms were not enough */
1566 jtag_add_sleep(300*1000);
1567
1568 jtag_add_runtest(2030, TAP_IDLE);
1569 jtag_execute_queue();
1570
1571 /* set Hold reset, Halt mode and Trap Reset */
1572 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1573 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1574 xscale_write_dcsr(target, 1, 0);
1575
1576 /* Load the debug handler into the mini-icache. Since
1577 * it's using halt mode (not monitor mode), it runs in
1578 * "Special Debug State" for access to registers, memory,
1579 * coprocessors, trace data, etc.
1580 */
1581 address = xscale->handler_address;
1582 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1583 binary_size > 0;
1584 binary_size -= buf_cnt, buffer += buf_cnt)
1585 {
1586 uint32_t cache_line[8];
1587 unsigned i;
1588
1589 buf_cnt = binary_size;
1590 if (buf_cnt > 32)
1591 buf_cnt = 32;
1592
1593 for (i = 0; i < buf_cnt; i += 4)
1594 {
1595 /* convert LE buffer to host-endian uint32_t */
1596 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1597 }
1598
1599 for (; i < 32; i += 4)
1600 {
1601 cache_line[i / 4] = 0xe1a08008;
1602 }
1603
1604 /* only load addresses other than the reset vectors */
1605 if ((address % 0x400) != 0x0)
1606 {
1607 retval = xscale_load_ic(target, address,
1608 cache_line);
1609 if (retval != ERROR_OK)
1610 return retval;
1611 }
1612
1613 address += buf_cnt;
1614 };
1615
1616 retval = xscale_load_ic(target, 0x0,
1617 xscale->low_vectors);
1618 if (retval != ERROR_OK)
1619 return retval;
1620 retval = xscale_load_ic(target, 0xffff0000,
1621 xscale->high_vectors);
1622 if (retval != ERROR_OK)
1623 return retval;
1624
1625 jtag_add_runtest(30, TAP_IDLE);
1626
1627 jtag_add_sleep(100000);
1628
1629 /* set Hold reset, Halt mode and Trap Reset */
1630 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1631 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1632 xscale_write_dcsr(target, 1, 0);
1633
1634 /* clear Hold reset to let the target run (should enter debug handler) */
1635 xscale_write_dcsr(target, 0, 1);
1636 target->state = TARGET_RUNNING;
1637
1638 if (!target->reset_halt)
1639 {
1640 jtag_add_sleep(10000);
1641
1642 /* we should have entered debug now */
1643 xscale_debug_entry(target);
1644 target->state = TARGET_HALTED;
1645
1646 /* resume the target */
1647 xscale_resume(target, 1, 0x0, 1, 0);
1648 }
1649 }
1650
1651 return ERROR_OK;
1652 }
1653
1654 static int xscale_read_core_reg(struct target *target, struct reg *r,
1655 int num, enum arm_mode mode)
1656 {
1657 /** \todo add debug handler support for core register reads */
1658 LOG_ERROR("not implemented");
1659 return ERROR_OK;
1660 }
1661
1662 static int xscale_write_core_reg(struct target *target, struct reg *r,
1663 int num, enum arm_mode mode, uint32_t value)
1664 {
1665 /** \todo add debug handler support for core register writes */
1666 LOG_ERROR("not implemented");
1667 return ERROR_OK;
1668 }
1669
1670 static int xscale_full_context(struct target *target)
1671 {
1672 struct arm *armv4_5 = target_to_arm(target);
1673
1674 uint32_t *buffer;
1675
1676 int i, j;
1677
1678 LOG_DEBUG("-");
1679
1680 if (target->state != TARGET_HALTED)
1681 {
1682 LOG_WARNING("target not halted");
1683 return ERROR_TARGET_NOT_HALTED;
1684 }
1685
1686 buffer = malloc(4 * 8);
1687
1688 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1689 * we can't enter User mode on an XScale (unpredictable),
1690 * but User shares registers with SYS
1691 */
1692 for (i = 1; i < 7; i++)
1693 {
1694 enum arm_mode mode = armv4_5_number_to_mode(i);
1695 bool valid = true;
1696 struct reg *r;
1697
1698 if (mode == ARM_MODE_USR)
1699 continue;
1700
1701 /* check if there are invalid registers in the current mode
1702 */
1703 for (j = 0; valid && j <= 16; j++)
1704 {
1705 if (!ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1706 mode, j).valid)
1707 valid = false;
1708 }
1709 if (valid)
1710 continue;
1711
1712 /* request banked registers */
1713 xscale_send_u32(target, 0x0);
1714
1715 /* send CPSR for desired bank mode */
1716 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1717
1718 /* get banked registers: r8 to r14; and SPSR
1719 * except in USR/SYS mode
1720 */
1721 if (mode != ARM_MODE_SYS) {
1722 /* SPSR */
1723 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1724 mode, 16);
1725
1726 xscale_receive(target, buffer, 8);
1727
1728 buf_set_u32(r->value, 0, 32, buffer[7]);
1729 r->dirty = false;
1730 r->valid = true;
1731 } else {
1732 xscale_receive(target, buffer, 7);
1733 }
1734
1735 /* move data from buffer to register cache */
1736 for (j = 8; j <= 14; j++)
1737 {
1738 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1739 mode, j);
1740
1741 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1742 r->dirty = false;
1743 r->valid = true;
1744 }
1745 }
1746
1747 free(buffer);
1748
1749 return ERROR_OK;
1750 }
1751
1752 static int xscale_restore_banked(struct target *target)
1753 {
1754 struct arm *armv4_5 = target_to_arm(target);
1755
1756 int i, j;
1757
1758 if (target->state != TARGET_HALTED)
1759 {
1760 LOG_WARNING("target not halted");
1761 return ERROR_TARGET_NOT_HALTED;
1762 }
1763
1764 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1765 * and check if any banked registers need to be written. Ignore
1766 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1767 * an XScale (unpredictable), but they share all registers.
1768 */
1769 for (i = 1; i < 7; i++)
1770 {
1771 enum arm_mode mode = armv4_5_number_to_mode(i);
1772 struct reg *r;
1773
1774 if (mode == ARM_MODE_USR)
1775 continue;
1776
1777 /* check if there are dirty registers in this mode */
1778 for (j = 8; j <= 14; j++)
1779 {
1780 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1781 mode, j).dirty)
1782 goto dirty;
1783 }
1784
1785 /* if not USR/SYS, check if the SPSR needs to be written */
1786 if (mode != ARM_MODE_SYS)
1787 {
1788 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1789 mode, 16).dirty)
1790 goto dirty;
1791 }
1792
1793 /* there's nothing to flush for this mode */
1794 continue;
1795
1796 dirty:
1797 /* command 0x1: "send banked registers" */
1798 xscale_send_u32(target, 0x1);
1799
1800 /* send CPSR for desired mode */
1801 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1802
1803 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1804 * but this protocol doesn't understand that nuance.
1805 */
1806 for (j = 8; j <= 14; j++) {
1807 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1808 mode, j);
1809 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1810 r->dirty = false;
1811 }
1812
1813 /* send spsr if not in USR/SYS mode */
1814 if (mode != ARM_MODE_SYS) {
1815 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1816 mode, 16);
1817 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1818 r->dirty = false;
1819 }
1820 }
1821
1822 return ERROR_OK;
1823 }
1824
1825 static int xscale_read_memory(struct target *target, uint32_t address,
1826 uint32_t size, uint32_t count, uint8_t *buffer)
1827 {
1828 struct xscale_common *xscale = target_to_xscale(target);
1829 uint32_t *buf32;
1830 uint32_t i;
1831 int retval;
1832
1833 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1834
1835 if (target->state != TARGET_HALTED)
1836 {
1837 LOG_WARNING("target not halted");
1838 return ERROR_TARGET_NOT_HALTED;
1839 }
1840
1841 /* sanitize arguments */
1842 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1843 return ERROR_INVALID_ARGUMENTS;
1844
1845 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1846 return ERROR_TARGET_UNALIGNED_ACCESS;
1847
1848 /* send memory read request (command 0x1n, n: access size) */
1849 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1850 return retval;
1851
1852 /* send base address for read request */
1853 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1854 return retval;
1855
1856 /* send number of requested data words */
1857 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1858 return retval;
1859
1860 /* receive data from target (count times 32-bit words in host endianness) */
1861 buf32 = malloc(4 * count);
1862 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1863 return retval;
1864
1865 /* extract data from host-endian buffer into byte stream */
1866 for (i = 0; i < count; i++)
1867 {
1868 switch (size)
1869 {
1870 case 4:
1871 target_buffer_set_u32(target, buffer, buf32[i]);
1872 buffer += 4;
1873 break;
1874 case 2:
1875 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1876 buffer += 2;
1877 break;
1878 case 1:
1879 *buffer++ = buf32[i] & 0xff;
1880 break;
1881 default:
1882 LOG_ERROR("invalid read size");
1883 return ERROR_INVALID_ARGUMENTS;
1884 }
1885 }
1886
1887 free(buf32);
1888
1889 /* examine DCSR, to see if Sticky Abort (SA) got set */
1890 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1891 return retval;
1892 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1893 {
1894 /* clear SA bit */
1895 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1896 return retval;
1897
1898 return ERROR_TARGET_DATA_ABORT;
1899 }
1900
1901 return ERROR_OK;
1902 }
1903
1904 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1905 uint32_t size, uint32_t count, uint8_t *buffer)
1906 {
1907 struct xscale_common *xscale = target_to_xscale(target);
1908
1909 /* with MMU inactive, there are only physical addresses */
1910 if (!xscale->armv4_5_mmu.mmu_enabled)
1911 return xscale_read_memory(target, address, size, count, buffer);
1912
1913 /** \todo: provide a non-stub implementation of this routine. */
1914 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1915 target_name(target), __func__);
1916 return ERROR_FAIL;
1917 }
1918
1919 static int xscale_write_memory(struct target *target, uint32_t address,
1920 uint32_t size, uint32_t count, uint8_t *buffer)
1921 {
1922 struct xscale_common *xscale = target_to_xscale(target);
1923 int retval;
1924
1925 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1926
1927 if (target->state != TARGET_HALTED)
1928 {
1929 LOG_WARNING("target not halted");
1930 return ERROR_TARGET_NOT_HALTED;
1931 }
1932
1933 /* sanitize arguments */
1934 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1935 return ERROR_INVALID_ARGUMENTS;
1936
1937 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1938 return ERROR_TARGET_UNALIGNED_ACCESS;
1939
1940 /* send memory write request (command 0x2n, n: access size) */
1941 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1942 return retval;
1943
1944 /* send base address for read request */
1945 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1946 return retval;
1947
1948 /* send number of requested data words to be written*/
1949 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1950 return retval;
1951
1952 /* extract data from host-endian buffer into byte stream */
1953 #if 0
1954 for (i = 0; i < count; i++)
1955 {
1956 switch (size)
1957 {
1958 case 4:
1959 value = target_buffer_get_u32(target, buffer);
1960 xscale_send_u32(target, value);
1961 buffer += 4;
1962 break;
1963 case 2:
1964 value = target_buffer_get_u16(target, buffer);
1965 xscale_send_u32(target, value);
1966 buffer += 2;
1967 break;
1968 case 1:
1969 value = *buffer;
1970 xscale_send_u32(target, value);
1971 buffer += 1;
1972 break;
1973 default:
1974 LOG_ERROR("should never get here");
1975 exit(-1);
1976 }
1977 }
1978 #endif
1979 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1980 return retval;
1981
1982 /* examine DCSR, to see if Sticky Abort (SA) got set */
1983 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1984 return retval;
1985 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1986 {
1987 /* clear SA bit */
1988 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1989 return retval;
1990
1991 return ERROR_TARGET_DATA_ABORT;
1992 }
1993
1994 return ERROR_OK;
1995 }
1996
1997 static int xscale_write_phys_memory(struct target *target, uint32_t address,
1998 uint32_t size, uint32_t count, uint8_t *buffer)
1999 {
2000 struct xscale_common *xscale = target_to_xscale(target);
2001
2002 /* with MMU inactive, there are only physical addresses */
2003 if (!xscale->armv4_5_mmu.mmu_enabled)
2004 return xscale_read_memory(target, address, size, count, buffer);
2005
2006 /** \todo: provide a non-stub implementation of this routine. */
2007 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
2008 target_name(target), __func__);
2009 return ERROR_FAIL;
2010 }
2011
2012 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
2013 uint32_t count, uint8_t *buffer)
2014 {
2015 return xscale_write_memory(target, address, 4, count, buffer);
2016 }
2017
2018 static uint32_t xscale_get_ttb(struct target *target)
2019 {
2020 struct xscale_common *xscale = target_to_xscale(target);
2021 uint32_t ttb;
2022
2023 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2024 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2025
2026 return ttb;
2027 }
2028
2029 static void xscale_disable_mmu_caches(struct target *target, int mmu,
2030 int d_u_cache, int i_cache)
2031 {
2032 struct xscale_common *xscale = target_to_xscale(target);
2033 uint32_t cp15_control;
2034
2035 /* read cp15 control register */
2036 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2037 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2038
2039 if (mmu)
2040 cp15_control &= ~0x1U;
2041
2042 if (d_u_cache)
2043 {
2044 /* clean DCache */
2045 xscale_send_u32(target, 0x50);
2046 xscale_send_u32(target, xscale->cache_clean_address);
2047
2048 /* invalidate DCache */
2049 xscale_send_u32(target, 0x51);
2050
2051 cp15_control &= ~0x4U;
2052 }
2053
2054 if (i_cache)
2055 {
2056 /* invalidate ICache */
2057 xscale_send_u32(target, 0x52);
2058 cp15_control &= ~0x1000U;
2059 }
2060
2061 /* write new cp15 control register */
2062 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2063
2064 /* execute cpwait to ensure outstanding operations complete */
2065 xscale_send_u32(target, 0x53);
2066 }
2067
2068 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2069 int d_u_cache, int i_cache)
2070 {
2071 struct xscale_common *xscale = target_to_xscale(target);
2072 uint32_t cp15_control;
2073
2074 /* read cp15 control register */
2075 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2076 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2077
2078 if (mmu)
2079 cp15_control |= 0x1U;
2080
2081 if (d_u_cache)
2082 cp15_control |= 0x4U;
2083
2084 if (i_cache)
2085 cp15_control |= 0x1000U;
2086
2087 /* write new cp15 control register */
2088 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2089
2090 /* execute cpwait to ensure outstanding operations complete */
2091 xscale_send_u32(target, 0x53);
2092 }
2093
2094 static int xscale_set_breakpoint(struct target *target,
2095 struct breakpoint *breakpoint)
2096 {
2097 int retval;
2098 struct xscale_common *xscale = target_to_xscale(target);
2099
2100 if (target->state != TARGET_HALTED)
2101 {
2102 LOG_WARNING("target not halted");
2103 return ERROR_TARGET_NOT_HALTED;
2104 }
2105
2106 if (breakpoint->set)
2107 {
2108 LOG_WARNING("breakpoint already set");
2109 return ERROR_OK;
2110 }
2111
2112 if (breakpoint->type == BKPT_HARD)
2113 {
2114 uint32_t value = breakpoint->address | 1;
2115 if (!xscale->ibcr0_used)
2116 {
2117 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2118 xscale->ibcr0_used = 1;
2119 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2120 }
2121 else if (!xscale->ibcr1_used)
2122 {
2123 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2124 xscale->ibcr1_used = 1;
2125 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2126 }
2127 else
2128 {
2129 LOG_ERROR("BUG: no hardware comparator available");
2130 return ERROR_OK;
2131 }
2132 }
2133 else if (breakpoint->type == BKPT_SOFT)
2134 {
2135 if (breakpoint->length == 4)
2136 {
2137 /* keep the original instruction in target endianness */
2138 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2139 {
2140 return retval;
2141 }
2142 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2143 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2144 {
2145 return retval;
2146 }
2147 }
2148 else
2149 {
2150 /* keep the original instruction in target endianness */
2151 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2152 {
2153 return retval;
2154 }
2155 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2156 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2157 {
2158 return retval;
2159 }
2160 }
2161 breakpoint->set = 1;
2162 }
2163
2164 return ERROR_OK;
2165 }
2166
2167 static int xscale_add_breakpoint(struct target *target,
2168 struct breakpoint *breakpoint)
2169 {
2170 struct xscale_common *xscale = target_to_xscale(target);
2171
2172 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2173 {
2174 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2175 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2176 }
2177
2178 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2179 {
2180 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2181 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2182 }
2183
2184 if (breakpoint->type == BKPT_HARD)
2185 {
2186 xscale->ibcr_available--;
2187 }
2188
2189 return ERROR_OK;
2190 }
2191
2192 static int xscale_unset_breakpoint(struct target *target,
2193 struct breakpoint *breakpoint)
2194 {
2195 int retval;
2196 struct xscale_common *xscale = target_to_xscale(target);
2197
2198 if (target->state != TARGET_HALTED)
2199 {
2200 LOG_WARNING("target not halted");
2201 return ERROR_TARGET_NOT_HALTED;
2202 }
2203
2204 if (!breakpoint->set)
2205 {
2206 LOG_WARNING("breakpoint not set");
2207 return ERROR_OK;
2208 }
2209
2210 if (breakpoint->type == BKPT_HARD)
2211 {
2212 if (breakpoint->set == 1)
2213 {
2214 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2215 xscale->ibcr0_used = 0;
2216 }
2217 else if (breakpoint->set == 2)
2218 {
2219 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2220 xscale->ibcr1_used = 0;
2221 }
2222 breakpoint->set = 0;
2223 }
2224 else
2225 {
2226 /* restore original instruction (kept in target endianness) */
2227 if (breakpoint->length == 4)
2228 {
2229 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2230 {
2231 return retval;
2232 }
2233 }
2234 else
2235 {
2236 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2237 {
2238 return retval;
2239 }
2240 }
2241 breakpoint->set = 0;
2242 }
2243
2244 return ERROR_OK;
2245 }
2246
2247 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2248 {
2249 struct xscale_common *xscale = target_to_xscale(target);
2250
2251 if (target->state != TARGET_HALTED)
2252 {
2253 LOG_WARNING("target not halted");
2254 return ERROR_TARGET_NOT_HALTED;
2255 }
2256
2257 if (breakpoint->set)
2258 {
2259 xscale_unset_breakpoint(target, breakpoint);
2260 }
2261
2262 if (breakpoint->type == BKPT_HARD)
2263 xscale->ibcr_available++;
2264
2265 return ERROR_OK;
2266 }
2267
2268 static int xscale_set_watchpoint(struct target *target,
2269 struct watchpoint *watchpoint)
2270 {
2271 struct xscale_common *xscale = target_to_xscale(target);
2272 uint8_t enable = 0;
2273 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2274 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2275
2276 if (target->state != TARGET_HALTED)
2277 {
2278 LOG_WARNING("target not halted");
2279 return ERROR_TARGET_NOT_HALTED;
2280 }
2281
2282 xscale_get_reg(dbcon);
2283
2284 switch (watchpoint->rw)
2285 {
2286 case WPT_READ:
2287 enable = 0x3;
2288 break;
2289 case WPT_ACCESS:
2290 enable = 0x2;
2291 break;
2292 case WPT_WRITE:
2293 enable = 0x1;
2294 break;
2295 default:
2296 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2297 }
2298
2299 if (!xscale->dbr0_used)
2300 {
2301 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2302 dbcon_value |= enable;
2303 xscale_set_reg_u32(dbcon, dbcon_value);
2304 watchpoint->set = 1;
2305 xscale->dbr0_used = 1;
2306 }
2307 else if (!xscale->dbr1_used)
2308 {
2309 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2310 dbcon_value |= enable << 2;
2311 xscale_set_reg_u32(dbcon, dbcon_value);
2312 watchpoint->set = 2;
2313 xscale->dbr1_used = 1;
2314 }
2315 else
2316 {
2317 LOG_ERROR("BUG: no hardware comparator available");
2318 return ERROR_OK;
2319 }
2320
2321 return ERROR_OK;
2322 }
2323
2324 static int xscale_add_watchpoint(struct target *target,
2325 struct watchpoint *watchpoint)
2326 {
2327 struct xscale_common *xscale = target_to_xscale(target);
2328
2329 if (xscale->dbr_available < 1)
2330 {
2331 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2332 }
2333
2334 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2335 {
2336 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2337 }
2338
2339 xscale->dbr_available--;
2340
2341 return ERROR_OK;
2342 }
2343
2344 static int xscale_unset_watchpoint(struct target *target,
2345 struct watchpoint *watchpoint)
2346 {
2347 struct xscale_common *xscale = target_to_xscale(target);
2348 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2349 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2350
2351 if (target->state != TARGET_HALTED)
2352 {
2353 LOG_WARNING("target not halted");
2354 return ERROR_TARGET_NOT_HALTED;
2355 }
2356
2357 if (!watchpoint->set)
2358 {
2359 LOG_WARNING("breakpoint not set");
2360 return ERROR_OK;
2361 }
2362
2363 if (watchpoint->set == 1)
2364 {
2365 dbcon_value &= ~0x3;
2366 xscale_set_reg_u32(dbcon, dbcon_value);
2367 xscale->dbr0_used = 0;
2368 }
2369 else if (watchpoint->set == 2)
2370 {
2371 dbcon_value &= ~0xc;
2372 xscale_set_reg_u32(dbcon, dbcon_value);
2373 xscale->dbr1_used = 0;
2374 }
2375 watchpoint->set = 0;
2376
2377 return ERROR_OK;
2378 }
2379
2380 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2381 {
2382 struct xscale_common *xscale = target_to_xscale(target);
2383
2384 if (target->state != TARGET_HALTED)
2385 {
2386 LOG_WARNING("target not halted");
2387 return ERROR_TARGET_NOT_HALTED;
2388 }
2389
2390 if (watchpoint->set)
2391 {
2392 xscale_unset_watchpoint(target, watchpoint);
2393 }
2394
2395 xscale->dbr_available++;
2396
2397 return ERROR_OK;
2398 }
2399
2400 static int xscale_get_reg(struct reg *reg)
2401 {
2402 struct xscale_reg *arch_info = reg->arch_info;
2403 struct target *target = arch_info->target;
2404 struct xscale_common *xscale = target_to_xscale(target);
2405
2406 /* DCSR, TX and RX are accessible via JTAG */
2407 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2408 {
2409 return xscale_read_dcsr(arch_info->target);
2410 }
2411 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2412 {
2413 /* 1 = consume register content */
2414 return xscale_read_tx(arch_info->target, 1);
2415 }
2416 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2417 {
2418 /* can't read from RX register (host -> debug handler) */
2419 return ERROR_OK;
2420 }
2421 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2422 {
2423 /* can't (explicitly) read from TXRXCTRL register */
2424 return ERROR_OK;
2425 }
2426 else /* Other DBG registers have to be transfered by the debug handler */
2427 {
2428 /* send CP read request (command 0x40) */
2429 xscale_send_u32(target, 0x40);
2430
2431 /* send CP register number */
2432 xscale_send_u32(target, arch_info->dbg_handler_number);
2433
2434 /* read register value */
2435 xscale_read_tx(target, 1);
2436 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2437
2438 reg->dirty = 0;
2439 reg->valid = 1;
2440 }
2441
2442 return ERROR_OK;
2443 }
2444
2445 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2446 {
2447 struct xscale_reg *arch_info = reg->arch_info;
2448 struct target *target = arch_info->target;
2449 struct xscale_common *xscale = target_to_xscale(target);
2450 uint32_t value = buf_get_u32(buf, 0, 32);
2451
2452 /* DCSR, TX and RX are accessible via JTAG */
2453 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2454 {
2455 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2456 return xscale_write_dcsr(arch_info->target, -1, -1);
2457 }
2458 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2459 {
2460 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2461 return xscale_write_rx(arch_info->target);
2462 }
2463 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2464 {
2465 /* can't write to TX register (debug-handler -> host) */
2466 return ERROR_OK;
2467 }
2468 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2469 {
2470 /* can't (explicitly) write to TXRXCTRL register */
2471 return ERROR_OK;
2472 }
2473 else /* Other DBG registers have to be transfered by the debug handler */
2474 {
2475 /* send CP write request (command 0x41) */
2476 xscale_send_u32(target, 0x41);
2477
2478 /* send CP register number */
2479 xscale_send_u32(target, arch_info->dbg_handler_number);
2480
2481 /* send CP register value */
2482 xscale_send_u32(target, value);
2483 buf_set_u32(reg->value, 0, 32, value);
2484 }
2485
2486 return ERROR_OK;
2487 }
2488
2489 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2490 {
2491 struct xscale_common *xscale = target_to_xscale(target);
2492 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2493 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2494
2495 /* send CP write request (command 0x41) */
2496 xscale_send_u32(target, 0x41);
2497
2498 /* send CP register number */
2499 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2500
2501 /* send CP register value */
2502 xscale_send_u32(target, value);
2503 buf_set_u32(dcsr->value, 0, 32, value);
2504
2505 return ERROR_OK;
2506 }
2507
2508 static int xscale_read_trace(struct target *target)
2509 {
2510 struct xscale_common *xscale = target_to_xscale(target);
2511 struct arm *armv4_5 = &xscale->armv4_5_common;
2512 struct xscale_trace_data **trace_data_p;
2513
2514 /* 258 words from debug handler
2515 * 256 trace buffer entries
2516 * 2 checkpoint addresses
2517 */
2518 uint32_t trace_buffer[258];
2519 int is_address[256];
2520 int i, j;
2521
2522 if (target->state != TARGET_HALTED)
2523 {
2524 LOG_WARNING("target must be stopped to read trace data");
2525 return ERROR_TARGET_NOT_HALTED;
2526 }
2527
2528 /* send read trace buffer command (command 0x61) */
2529 xscale_send_u32(target, 0x61);
2530
2531 /* receive trace buffer content */
2532 xscale_receive(target, trace_buffer, 258);
2533
2534 /* parse buffer backwards to identify address entries */
2535 for (i = 255; i >= 0; i--)
2536 {
2537 is_address[i] = 0;
2538 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2539 ((trace_buffer[i] & 0xf0) == 0xd0))
2540 {
2541 if (i >= 3)
2542 is_address[--i] = 1;
2543 if (i >= 2)
2544 is_address[--i] = 1;
2545 if (i >= 1)
2546 is_address[--i] = 1;
2547 if (i >= 0)
2548 is_address[--i] = 1;
2549 }
2550 }
2551
2552
2553 /* search first non-zero entry */
2554 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2555 ;
2556
2557 if (j == 256)
2558 {
2559 LOG_DEBUG("no trace data collected");
2560 return ERROR_XSCALE_NO_TRACE_DATA;
2561 }
2562
2563 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2564 ;
2565
2566 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2567 (*trace_data_p)->next = NULL;
2568 (*trace_data_p)->chkpt0 = trace_buffer[256];
2569 (*trace_data_p)->chkpt1 = trace_buffer[257];
2570 (*trace_data_p)->last_instruction =
2571 buf_get_u32(armv4_5->pc->value, 0, 32);
2572 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2573 (*trace_data_p)->depth = 256 - j;
2574
2575 for (i = j; i < 256; i++)
2576 {
2577 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2578 if (is_address[i])
2579 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2580 else
2581 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2582 }
2583
2584 return ERROR_OK;
2585 }
2586
2587 static int xscale_read_instruction(struct target *target,
2588 struct arm_instruction *instruction)
2589 {
2590 struct xscale_common *xscale = target_to_xscale(target);
2591 int i;
2592 int section = -1;
2593 size_t size_read;
2594 uint32_t opcode;
2595 int retval;
2596
2597 if (!xscale->trace.image)
2598 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2599
2600 /* search for the section the current instruction belongs to */
2601 for (i = 0; i < xscale->trace.image->num_sections; i++)
2602 {
2603 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2604 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2605 {
2606 section = i;
2607 break;
2608 }
2609 }
2610
2611 if (section == -1)
2612 {
2613 /* current instruction couldn't be found in the image */
2614 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2615 }
2616
2617 if (xscale->trace.core_state == ARM_STATE_ARM)
2618 {
2619 uint8_t buf[4];
2620 if ((retval = image_read_section(xscale->trace.image, section,
2621 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2622 4, buf, &size_read)) != ERROR_OK)
2623 {
2624 LOG_ERROR("error while reading instruction: %i", retval);
2625 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2626 }
2627 opcode = target_buffer_get_u32(target, buf);
2628 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2629 }
2630 else if (xscale->trace.core_state == ARM_STATE_THUMB)
2631 {
2632 uint8_t buf[2];
2633 if ((retval = image_read_section(xscale->trace.image, section,
2634 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2635 2, buf, &size_read)) != ERROR_OK)
2636 {
2637 LOG_ERROR("error while reading instruction: %i", retval);
2638 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2639 }
2640 opcode = target_buffer_get_u16(target, buf);
2641 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2642 }
2643 else
2644 {
2645 LOG_ERROR("BUG: unknown core state encountered");
2646 exit(-1);
2647 }
2648
2649 return ERROR_OK;
2650 }
2651
2652 static int xscale_branch_address(struct xscale_trace_data *trace_data,
2653 int i, uint32_t *target)
2654 {
2655 /* if there are less than four entries prior to the indirect branch message
2656 * we can't extract the address */
2657 if (i < 4)
2658 {
2659 return -1;
2660 }
2661
2662 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2663 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2664
2665 return 0;
2666 }
2667
2668 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2669 {
2670 struct xscale_common *xscale = target_to_xscale(target);
2671 int next_pc_ok = 0;
2672 uint32_t next_pc = 0x0;
2673 struct xscale_trace_data *trace_data = xscale->trace.data;
2674 int retval;
2675
2676 while (trace_data)
2677 {
2678 int i, chkpt;
2679 int rollover;
2680 int branch;
2681 int exception;
2682 xscale->trace.core_state = ARM_STATE_ARM;
2683
2684 chkpt = 0;
2685 rollover = 0;
2686
2687 for (i = 0; i < trace_data->depth; i++)
2688 {
2689 next_pc_ok = 0;
2690 branch = 0;
2691 exception = 0;
2692
2693 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2694 continue;
2695
2696 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2697 {
2698 case 0: /* Exceptions */
2699 case 1:
2700 case 2:
2701 case 3:
2702 case 4:
2703 case 5:
2704 case 6:
2705 case 7:
2706 exception = (trace_data->entries[i].data & 0x70) >> 4;
2707 next_pc_ok = 1;
2708 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2709 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2710 break;
2711 case 8: /* Direct Branch */
2712 branch = 1;
2713 break;
2714 case 9: /* Indirect Branch */
2715 branch = 1;
2716 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2717 {
2718 next_pc_ok = 1;
2719 }
2720 break;
2721 case 13: /* Checkpointed Indirect Branch */
2722 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2723 {
2724 next_pc_ok = 1;
2725 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2726 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2727 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2728 }
2729 /* explicit fall-through */
2730 case 12: /* Checkpointed Direct Branch */
2731 branch = 1;
2732 if (chkpt == 0)
2733 {
2734 next_pc_ok = 1;
2735 next_pc = trace_data->chkpt0;
2736 chkpt++;
2737 }
2738 else if (chkpt == 1)
2739 {
2740 next_pc_ok = 1;
2741 next_pc = trace_data->chkpt0;
2742 chkpt++;
2743 }
2744 else
2745 {
2746 LOG_WARNING("more than two checkpointed branches encountered");
2747 }
2748 break;
2749 case 15: /* Roll-over */
2750 rollover++;
2751 continue;
2752 default: /* Reserved */
2753 command_print(cmd_ctx, "--- reserved trace message ---");
2754 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2755 return ERROR_OK;
2756 }
2757
2758 if (xscale->trace.pc_ok)
2759 {
2760 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2761 struct arm_instruction instruction;
2762
2763 if ((exception == 6) || (exception == 7))
2764 {
2765 /* IRQ or FIQ exception, no instruction executed */
2766 executed -= 1;
2767 }
2768
2769 while (executed-- >= 0)
2770 {
2771 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2772 {
2773 /* can't continue tracing with no image available */
2774 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2775 {
2776 return retval;
2777 }
2778 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2779 {
2780 /* TODO: handle incomplete images */
2781 }
2782 }
2783
2784 /* a precise abort on a load to the PC is included in the incremental
2785 * word count, other instructions causing data aborts are not included
2786 */
2787 if ((executed == 0) && (exception == 4)
2788 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2789 {
2790 if ((instruction.type == ARM_LDM)
2791 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2792 {
2793 executed--;
2794 }
2795 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2796 && (instruction.info.load_store.Rd != 15))
2797 {
2798 executed--;
2799 }
2800 }
2801
2802 /* only the last instruction executed
2803 * (the one that caused the control flow change)
2804 * could be a taken branch
2805 */
2806 if (((executed == -1) && (branch == 1)) &&
2807 (((instruction.type == ARM_B) ||
2808 (instruction.type == ARM_BL) ||
2809 (instruction.type == ARM_BLX)) &&
2810 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2811 {
2812 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2813 }
2814 else
2815 {
2816 xscale->trace.current_pc += (xscale->trace.core_state == ARM_STATE_ARM) ? 4 : 2;
2817 }
2818 command_print(cmd_ctx, "%s", instruction.text);
2819 }
2820
2821 rollover = 0;
2822 }
2823
2824 if (next_pc_ok)
2825 {
2826 xscale->trace.current_pc = next_pc;
2827 xscale->trace.pc_ok = 1;
2828 }
2829 }
2830
2831 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARM_STATE_ARM) ? 4 : 2)
2832 {
2833 struct arm_instruction instruction;
2834 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2835 {
2836 /* can't continue tracing with no image available */
2837 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2838 {
2839 return retval;
2840 }
2841 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2842 {
2843 /* TODO: handle incomplete images */
2844 }
2845 }
2846 command_print(cmd_ctx, "%s", instruction.text);
2847 }
2848
2849 trace_data = trace_data->next;
2850 }
2851
2852 return ERROR_OK;
2853 }
2854
2855 static const struct reg_arch_type xscale_reg_type = {
2856 .get = xscale_get_reg,
2857 .set = xscale_set_reg,
2858 };
2859
2860 static void xscale_build_reg_cache(struct target *target)
2861 {
2862 struct xscale_common *xscale = target_to_xscale(target);
2863 struct arm *armv4_5 = &xscale->armv4_5_common;
2864 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2865 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2866 int i;
2867 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2868
2869 (*cache_p) = arm_build_reg_cache(target, armv4_5);
2870
2871 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2872 cache_p = &(*cache_p)->next;
2873
2874 /* fill in values for the xscale reg cache */
2875 (*cache_p)->name = "XScale registers";
2876 (*cache_p)->next = NULL;
2877 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2878 (*cache_p)->num_regs = num_regs;
2879
2880 for (i = 0; i < num_regs; i++)
2881 {
2882 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2883 (*cache_p)->reg_list[i].value = calloc(4, 1);
2884 (*cache_p)->reg_list[i].dirty = 0;
2885 (*cache_p)->reg_list[i].valid = 0;
2886 (*cache_p)->reg_list[i].size = 32;
2887 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2888 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2889 arch_info[i] = xscale_reg_arch_info[i];
2890 arch_info[i].target = target;
2891 }
2892
2893 xscale->reg_cache = (*cache_p);
2894 }
2895
2896 static int xscale_init_target(struct command_context *cmd_ctx,
2897 struct target *target)
2898 {
2899 xscale_build_reg_cache(target);
2900 return ERROR_OK;
2901 }
2902
2903 static int xscale_init_arch_info(struct target *target,
2904 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2905 {
2906 struct arm *armv4_5;
2907 uint32_t high_reset_branch, low_reset_branch;
2908 int i;
2909
2910 armv4_5 = &xscale->armv4_5_common;
2911
2912 /* store architecture specfic data */
2913 xscale->common_magic = XSCALE_COMMON_MAGIC;
2914
2915 /* we don't really *need* a variant param ... */
2916 if (variant) {
2917 int ir_length = 0;
2918
2919 if (strcmp(variant, "pxa250") == 0
2920 || strcmp(variant, "pxa255") == 0
2921 || strcmp(variant, "pxa26x") == 0)
2922 ir_length = 5;
2923 else if (strcmp(variant, "pxa27x") == 0
2924 || strcmp(variant, "ixp42x") == 0
2925 || strcmp(variant, "ixp45x") == 0
2926 || strcmp(variant, "ixp46x") == 0)
2927 ir_length = 7;
2928 else if (strcmp(variant, "pxa3xx") == 0)
2929 ir_length = 11;
2930 else
2931 LOG_WARNING("%s: unrecognized variant %s",
2932 tap->dotted_name, variant);
2933
2934 if (ir_length && ir_length != tap->ir_length) {
2935 LOG_WARNING("%s: IR length for %s is %d; fixing",
2936 tap->dotted_name, variant, ir_length);
2937 tap->ir_length = ir_length;
2938 }
2939 }
2940
2941 /* PXA3xx shifts the JTAG instructions */
2942 if (tap->ir_length == 11)
2943 xscale->xscale_variant = XSCALE_PXA3XX;
2944 else
2945 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
2946
2947 /* the debug handler isn't installed (and thus not running) at this time */
2948 xscale->handler_address = 0xfe000800;
2949
2950 /* clear the vectors we keep locally for reference */
2951 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2952 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2953
2954 /* no user-specified vectors have been configured yet */
2955 xscale->static_low_vectors_set = 0x0;
2956 xscale->static_high_vectors_set = 0x0;
2957
2958 /* calculate branches to debug handler */
2959 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2960 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2961
2962 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2963 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2964
2965 for (i = 1; i <= 7; i++)
2966 {
2967 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2968 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2969 }
2970
2971 /* 64kB aligned region used for DCache cleaning */
2972 xscale->cache_clean_address = 0xfffe0000;
2973
2974 xscale->hold_rst = 0;
2975 xscale->external_debug_break = 0;
2976
2977 xscale->ibcr_available = 2;
2978 xscale->ibcr0_used = 0;
2979 xscale->ibcr1_used = 0;
2980
2981 xscale->dbr_available = 2;
2982 xscale->dbr0_used = 0;
2983 xscale->dbr1_used = 0;
2984
2985 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
2986 target_name(target));
2987
2988 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2989 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2990
2991 xscale->vector_catch = 0x1;
2992
2993 xscale->trace.capture_status = TRACE_IDLE;
2994 xscale->trace.data = NULL;
2995 xscale->trace.image = NULL;
2996 xscale->trace.buffer_enabled = 0;
2997 xscale->trace.buffer_fill = 0;
2998
2999 /* prepare ARMv4/5 specific information */
3000 armv4_5->arch_info = xscale;
3001 armv4_5->read_core_reg = xscale_read_core_reg;
3002 armv4_5->write_core_reg = xscale_write_core_reg;
3003 armv4_5->full_context = xscale_full_context;
3004
3005 arm_init_arch_info(target, armv4_5);
3006
3007 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3008 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3009 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3010 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3011 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3012 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3013 xscale->armv4_5_mmu.has_tiny_pages = 1;
3014 xscale->armv4_5_mmu.mmu_enabled = 0;
3015
3016 return ERROR_OK;
3017 }
3018
3019 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3020 {
3021 struct xscale_common *xscale;
3022
3023 if (sizeof xscale_debug_handler - 1 > 0x800) {
3024 LOG_ERROR("debug_handler.bin: larger than 2kb");
3025 return ERROR_FAIL;
3026 }
3027
3028 xscale = calloc(1, sizeof(*xscale));
3029 if (!xscale)
3030 return ERROR_FAIL;
3031
3032 return xscale_init_arch_info(target, xscale, target->tap,
3033 target->variant);
3034 }
3035
3036 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3037 {
3038 struct target *target = NULL;
3039 struct xscale_common *xscale;
3040 int retval;
3041 uint32_t handler_address;
3042
3043 if (CMD_ARGC < 2)
3044 {
3045 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3046 return ERROR_OK;
3047 }
3048
3049 if ((target = get_target(CMD_ARGV[0])) == NULL)
3050 {
3051 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3052 return ERROR_FAIL;
3053 }
3054
3055 xscale = target_to_xscale(target);
3056 retval = xscale_verify_pointer(CMD_CTX, xscale);
3057 if (retval != ERROR_OK)
3058 return retval;
3059
3060 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3061
3062 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3063 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3064 {
3065 xscale->handler_address = handler_address;
3066 }
3067 else
3068 {
3069 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3070 return ERROR_FAIL;
3071 }
3072
3073 return ERROR_OK;
3074 }
3075
3076 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3077 {
3078 struct target *target = NULL;
3079 struct xscale_common *xscale;
3080 int retval;
3081 uint32_t cache_clean_address;
3082
3083 if (CMD_ARGC < 2)
3084 {
3085 return ERROR_COMMAND_SYNTAX_ERROR;
3086 }
3087
3088 target = get_target(CMD_ARGV[0]);
3089 if (target == NULL)
3090 {
3091 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3092 return ERROR_FAIL;
3093 }
3094 xscale = target_to_xscale(target);
3095 retval = xscale_verify_pointer(CMD_CTX, xscale);
3096 if (retval != ERROR_OK)
3097 return retval;
3098
3099 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3100
3101 if (cache_clean_address & 0xffff)
3102 {
3103 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3104 }
3105 else
3106 {
3107 xscale->cache_clean_address = cache_clean_address;
3108 }
3109
3110 return ERROR_OK;
3111 }
3112
3113 COMMAND_HANDLER(xscale_handle_cache_info_command)
3114 {
3115 struct target *target = get_current_target(CMD_CTX);
3116 struct xscale_common *xscale = target_to_xscale(target);
3117 int retval;
3118
3119 retval = xscale_verify_pointer(CMD_CTX, xscale);
3120 if (retval != ERROR_OK)
3121 return retval;
3122
3123 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3124 }
3125
3126 static int xscale_virt2phys(struct target *target,
3127 uint32_t virtual, uint32_t *physical)
3128 {
3129 struct xscale_common *xscale = target_to_xscale(target);
3130 int type;
3131 uint32_t cb;
3132 int domain;
3133 uint32_t ap;
3134
3135 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3136 LOG_ERROR(xscale_not);
3137 return ERROR_TARGET_INVALID;
3138 }
3139
3140 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3141 if (type == -1)
3142 {
3143 return ret;
3144 }
3145 *physical = ret;
3146 return ERROR_OK;
3147 }
3148
3149 static int xscale_mmu(struct target *target, int *enabled)
3150 {
3151 struct xscale_common *xscale = target_to_xscale(target);
3152
3153 if (target->state != TARGET_HALTED)
3154 {
3155 LOG_ERROR("Target not halted");
3156 return ERROR_TARGET_INVALID;
3157 }
3158 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3159 return ERROR_OK;
3160 }
3161
3162 COMMAND_HANDLER(xscale_handle_mmu_command)
3163 {
3164 struct target *target = get_current_target(CMD_CTX);
3165 struct xscale_common *xscale = target_to_xscale(target);
3166 int retval;
3167
3168 retval = xscale_verify_pointer(CMD_CTX, xscale);
3169 if (retval != ERROR_OK)
3170 return retval;
3171
3172 if (target->state != TARGET_HALTED)
3173 {
3174 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3175 return ERROR_OK;
3176 }
3177
3178 if (CMD_ARGC >= 1)
3179 {
3180 bool enable;
3181 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3182 if (enable)
3183 xscale_enable_mmu_caches(target, 1, 0, 0);
3184 else
3185 xscale_disable_mmu_caches(target, 1, 0, 0);
3186 xscale->armv4_5_mmu.mmu_enabled = enable;
3187 }
3188
3189 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3190
3191 return ERROR_OK;
3192 }
3193
3194 COMMAND_HANDLER(xscale_handle_idcache_command)
3195 {
3196 struct target *target = get_current_target(CMD_CTX);
3197 struct xscale_common *xscale = target_to_xscale(target);
3198
3199 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3200 if (retval != ERROR_OK)
3201 return retval;
3202
3203 if (target->state != TARGET_HALTED)
3204 {
3205 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3206 return ERROR_OK;
3207 }
3208
3209 bool icache = false;
3210 if (strcmp(CMD_NAME, "icache") == 0)
3211 icache = true;
3212 if (CMD_ARGC >= 1)
3213 {
3214 bool enable;
3215 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3216 if (icache) {
3217 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3218 if (enable)
3219 xscale_enable_mmu_caches(target, 0, 0, 1);
3220 else
3221 xscale_disable_mmu_caches(target, 0, 0, 1);
3222 } else {
3223 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3224 if (enable)
3225 xscale_enable_mmu_caches(target, 0, 1, 0);
3226 else
3227 xscale_disable_mmu_caches(target, 0, 1, 0);
3228 }
3229 }
3230
3231 bool enabled = icache ?
3232 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3233 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3234 const char *msg = enabled ? "enabled" : "disabled";
3235 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3236
3237 return ERROR_OK;
3238 }
3239
3240 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3241 {
3242 struct target *target = get_current_target(CMD_CTX);
3243 struct xscale_common *xscale = target_to_xscale(target);
3244 int retval;
3245
3246 retval = xscale_verify_pointer(CMD_CTX, xscale);
3247 if (retval != ERROR_OK)
3248 return retval;
3249
3250 if (CMD_ARGC < 1)
3251 {
3252 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3253 }
3254 else
3255 {
3256 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3257 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3258 xscale_write_dcsr(target, -1, -1);
3259 }
3260
3261 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3262
3263 return ERROR_OK;
3264 }
3265
3266
3267 COMMAND_HANDLER(xscale_handle_vector_table_command)
3268 {
3269 struct target *target = get_current_target(CMD_CTX);
3270 struct xscale_common *xscale = target_to_xscale(target);
3271 int err = 0;
3272 int retval;
3273
3274 retval = xscale_verify_pointer(CMD_CTX, xscale);
3275 if (retval != ERROR_OK)
3276 return retval;
3277
3278 if (CMD_ARGC == 0) /* print current settings */
3279 {
3280 int idx;
3281
3282 command_print(CMD_CTX, "active user-set static vectors:");
3283 for (idx = 1; idx < 8; idx++)
3284 if (xscale->static_low_vectors_set & (1 << idx))
3285 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3286 for (idx = 1; idx < 8; idx++)
3287 if (xscale->static_high_vectors_set & (1 << idx))
3288 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3289 return ERROR_OK;
3290 }
3291
3292 if (CMD_ARGC != 3)
3293 err = 1;
3294 else
3295 {
3296 int idx;
3297 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3298 uint32_t vec;
3299 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3300
3301 if (idx < 1 || idx >= 8)
3302 err = 1;
3303
3304 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3305 {
3306 xscale->static_low_vectors_set |= (1<<idx);
3307 xscale->static_low_vectors[idx] = vec;
3308 }
3309 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3310 {
3311 xscale->static_high_vectors_set |= (1<<idx);
3312 xscale->static_high_vectors[idx] = vec;
3313 }
3314 else
3315 err = 1;
3316 }
3317
3318 if (err)
3319 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3320
3321 return ERROR_OK;
3322 }
3323
3324
3325 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3326 {
3327 struct target *target = get_current_target(CMD_CTX);
3328 struct xscale_common *xscale = target_to_xscale(target);
3329 struct arm *armv4_5 = &xscale->armv4_5_common;
3330 uint32_t dcsr_value;
3331 int retval;
3332
3333 retval = xscale_verify_pointer(CMD_CTX, xscale);
3334 if (retval != ERROR_OK)
3335 return retval;
3336
3337 if (target->state != TARGET_HALTED)
3338 {
3339 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3340 return ERROR_OK;
3341 }
3342
3343 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3344 {
3345 struct xscale_trace_data *td, *next_td;
3346 xscale->trace.buffer_enabled = 1;
3347
3348 /* free old trace data */
3349 td = xscale->trace.data;
3350 while (td)
3351 {
3352 next_td = td->next;
3353
3354 if (td->entries)
3355 free(td->entries);
3356 free(td);
3357 td = next_td;
3358 }
3359 xscale->trace.data = NULL;
3360 }
3361 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3362 {
3363 xscale->trace.buffer_enabled = 0;
3364 }
3365
3366 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3367 {
3368 uint32_t fill = 1;
3369 if (CMD_ARGC >= 3)
3370 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3371 xscale->trace.buffer_fill = fill;
3372 }
3373 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3374 {
3375 xscale->trace.buffer_fill = -1;
3376 }
3377
3378 if (xscale->trace.buffer_enabled)
3379 {
3380 /* if we enable the trace buffer in fill-once
3381 * mode we know the address of the first instruction */
3382 xscale->trace.pc_ok = 1;
3383 xscale->trace.current_pc =
3384 buf_get_u32(armv4_5->pc->value, 0, 32);
3385 }
3386 else
3387 {
3388 /* otherwise the address is unknown, and we have no known good PC */
3389 xscale->trace.pc_ok = 0;
3390 }
3391
3392 command_print(CMD_CTX, "trace buffer %s (%s)",
3393 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3394 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3395
3396 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3397 if (xscale->trace.buffer_fill >= 0)
3398 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3399 else
3400 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3401
3402 return ERROR_OK;
3403 }
3404
3405 COMMAND_HANDLER(xscale_handle_trace_image_command)
3406 {
3407 struct target *target = get_current_target(CMD_CTX);
3408 struct xscale_common *xscale = target_to_xscale(target);
3409 int retval;
3410
3411 if (CMD_ARGC < 1)
3412 {
3413 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3414 return ERROR_OK;
3415 }
3416
3417 retval = xscale_verify_pointer(CMD_CTX, xscale);
3418 if (retval != ERROR_OK)
3419 return retval;
3420
3421 if (xscale->trace.image)
3422 {
3423 image_close(xscale->trace.image);
3424 free(xscale->trace.image);
3425 command_print(CMD_CTX, "previously loaded image found and closed");
3426 }
3427
3428 xscale->trace.image = malloc(sizeof(struct image));
3429 xscale->trace.image->base_address_set = 0;
3430 xscale->trace.image->start_address_set = 0;
3431
3432 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3433 if (CMD_ARGC >= 2)
3434 {
3435 xscale->trace.image->base_address_set = 1;
3436 COMMAND_PARSE_NUMBER(llong, CMD_ARGV[1], xscale->trace.image->base_address);
3437 }
3438 else
3439 {
3440 xscale->trace.image->base_address_set = 0;
3441 }
3442
3443 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3444 {
3445 free(xscale->trace.image);
3446 xscale->trace.image = NULL;
3447 return ERROR_OK;
3448 }
3449
3450 return ERROR_OK;
3451 }
3452
3453 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3454 {
3455 struct target *target = get_current_target(CMD_CTX);
3456 struct xscale_common *xscale = target_to_xscale(target);
3457 struct xscale_trace_data *trace_data;
3458 struct fileio file;
3459 int retval;
3460
3461 retval = xscale_verify_pointer(CMD_CTX, xscale);
3462 if (retval != ERROR_OK)
3463 return retval;
3464
3465 if (target->state != TARGET_HALTED)
3466 {
3467 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3468 return ERROR_OK;
3469 }
3470
3471 if (CMD_ARGC < 1)
3472 {
3473 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3474 return ERROR_OK;
3475 }
3476
3477 trace_data = xscale->trace.data;
3478
3479 if (!trace_data)
3480 {
3481 command_print(CMD_CTX, "no trace data collected");
3482 return ERROR_OK;
3483 }
3484
3485 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3486 {
3487 return ERROR_OK;
3488 }
3489
3490 while (trace_data)
3491 {
3492 int i;
3493
3494 fileio_write_u32(&file, trace_data->chkpt0);
3495 fileio_write_u32(&file, trace_data->chkpt1);
3496 fileio_write_u32(&file, trace_data->last_instruction);
3497 fileio_write_u32(&file, trace_data->depth);
3498
3499 for (i = 0; i < trace_data->depth; i++)
3500 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3501
3502 trace_data = trace_data->next;
3503 }
3504
3505 fileio_close(&file);
3506
3507 return ERROR_OK;
3508 }
3509
3510 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3511 {
3512 struct target *target = get_current_target(CMD_CTX);
3513 struct xscale_common *xscale = target_to_xscale(target);
3514 int retval;
3515
3516 retval = xscale_verify_pointer(CMD_CTX, xscale);
3517 if (retval != ERROR_OK)
3518 return retval;
3519
3520 xscale_analyze_trace(target, CMD_CTX);
3521
3522 return ERROR_OK;
3523 }
3524
3525 COMMAND_HANDLER(xscale_handle_cp15)
3526 {
3527 struct target *target = get_current_target(CMD_CTX);
3528 struct xscale_common *xscale = target_to_xscale(target);
3529 int retval;
3530
3531 retval = xscale_verify_pointer(CMD_CTX, xscale);
3532 if (retval != ERROR_OK)
3533 return retval;
3534
3535 if (target->state != TARGET_HALTED)
3536 {
3537 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3538 return ERROR_OK;
3539 }
3540 uint32_t reg_no = 0;
3541 struct reg *reg = NULL;
3542 if (CMD_ARGC > 0)
3543 {
3544 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3545 /*translate from xscale cp15 register no to openocd register*/
3546 switch (reg_no)
3547 {
3548 case 0:
3549 reg_no = XSCALE_MAINID;
3550 break;
3551 case 1:
3552 reg_no = XSCALE_CTRL;
3553 break;
3554 case 2:
3555 reg_no = XSCALE_TTB;
3556 break;
3557 case 3:
3558 reg_no = XSCALE_DAC;
3559 break;
3560 case 5:
3561 reg_no = XSCALE_FSR;
3562 break;
3563 case 6:
3564 reg_no = XSCALE_FAR;
3565 break;
3566 case 13:
3567 reg_no = XSCALE_PID;
3568 break;
3569 case 15:
3570 reg_no = XSCALE_CPACCESS;
3571 break;
3572 default:
3573 command_print(CMD_CTX, "invalid register number");
3574 return ERROR_INVALID_ARGUMENTS;
3575 }
3576 reg = &xscale->reg_cache->reg_list[reg_no];
3577
3578 }
3579 if (CMD_ARGC == 1)
3580 {
3581 uint32_t value;
3582
3583 /* read cp15 control register */
3584 xscale_get_reg(reg);
3585 value = buf_get_u32(reg->value, 0, 32);
3586 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3587 }
3588 else if (CMD_ARGC == 2)
3589 {
3590 uint32_t value;
3591 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3592
3593 /* send CP write request (command 0x41) */
3594 xscale_send_u32(target, 0x41);
3595
3596 /* send CP register number */
3597 xscale_send_u32(target, reg_no);
3598
3599 /* send CP register value */
3600 xscale_send_u32(target, value);
3601
3602 /* execute cpwait to ensure outstanding operations complete */
3603 xscale_send_u32(target, 0x53);
3604 }
3605 else
3606 {
3607 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3608 }
3609
3610 return ERROR_OK;
3611 }
3612
3613 static const struct command_registration xscale_exec_command_handlers[] = {
3614 {
3615 .name = "cache_info",
3616 .handler = xscale_handle_cache_info_command,
3617 .mode = COMMAND_EXEC,
3618 .help = "display information about CPU caches",
3619 },
3620 {
3621 .name = "mmu",
3622 .handler = xscale_handle_mmu_command,
3623 .mode = COMMAND_EXEC,
3624 .help = "enable or disable the MMU",
3625 .usage = "['enable'|'disable']",
3626 },
3627 {
3628 .name = "icache",
3629 .handler = xscale_handle_idcache_command,
3630 .mode = COMMAND_EXEC,
3631 .help = "display ICache state, optionally enabling or "
3632 "disabling it",
3633 .usage = "['enable'|'disable']",
3634 },
3635 {
3636 .name = "dcache",
3637 .handler = xscale_handle_idcache_command,
3638 .mode = COMMAND_EXEC,
3639 .help = "display DCache state, optionally enabling or "
3640 "disabling it",
3641 .usage = "['enable'|'disable']",
3642 },
3643 {
3644 .name = "vector_catch",
3645 .handler = xscale_handle_vector_catch_command,
3646 .mode = COMMAND_EXEC,
3647 .help = "set or display 8-bit mask of vectors "
3648 "that should trigger debug entry",
3649 .usage = "[mask]",
3650 },
3651 {
3652 .name = "vector_table",
3653 .handler = xscale_handle_vector_table_command,
3654 .mode = COMMAND_EXEC,
3655 .help = "set vector table entry in mini-ICache, "
3656 "or display current tables",
3657 .usage = "[('high'|'low') index code]",
3658 },
3659 {
3660 .name = "trace_buffer",
3661 .handler = xscale_handle_trace_buffer_command,
3662 .mode = COMMAND_EXEC,
3663 .help = "display trace buffer status, enable or disable "
3664 "tracing, and optionally reconfigure trace mode",
3665 .usage = "['enable'|'disable' ['fill' number|'wrap']]",
3666 },
3667 {
3668 .name = "dump_trace",
3669 .handler = xscale_handle_dump_trace_command,
3670 .mode = COMMAND_EXEC,
3671 .help = "dump content of trace buffer to file",
3672 .usage = "filename",
3673 },
3674 {
3675 .name = "analyze_trace",
3676 .handler = xscale_handle_analyze_trace_buffer_command,
3677 .mode = COMMAND_EXEC,
3678 .help = "analyze content of trace buffer",
3679 .usage = "",
3680 },
3681 {
3682 .name = "trace_image",
3683 .handler = xscale_handle_trace_image_command,
3684 .mode = COMMAND_EXEC,
3685 .help = "load image from file to address (default 0)",
3686 .usage = "filename [offset [filetype]]",
3687 },
3688 {
3689 .name = "cp15",
3690 .handler = xscale_handle_cp15,
3691 .mode = COMMAND_EXEC,
3692 .help = "Read or write coprocessor 15 register.",
3693 .usage = "register [value]",
3694 },
3695 COMMAND_REGISTRATION_DONE
3696 };
3697 static const struct command_registration xscale_any_command_handlers[] = {
3698 {
3699 .name = "debug_handler",
3700 .handler = xscale_handle_debug_handler_command,
3701 .mode = COMMAND_ANY,
3702 .help = "Change address used for debug handler.",
3703 .usage = "target address",
3704 },
3705 {
3706 .name = "cache_clean_address",
3707 .handler = xscale_handle_cache_clean_address_command,
3708 .mode = COMMAND_ANY,
3709 .help = "Change address used for cleaning data cache.",
3710 .usage = "address",
3711 },
3712 {
3713 .chain = xscale_exec_command_handlers,
3714 },
3715 COMMAND_REGISTRATION_DONE
3716 };
3717 static const struct command_registration xscale_command_handlers[] = {
3718 {
3719 .chain = arm_command_handlers,
3720 },
3721 {
3722 .name = "xscale",
3723 .mode = COMMAND_ANY,
3724 .help = "xscale command group",
3725 .chain = xscale_any_command_handlers,
3726 },
3727 COMMAND_REGISTRATION_DONE
3728 };
3729
3730 struct target_type xscale_target =
3731 {
3732 .name = "xscale",
3733
3734 .poll = xscale_poll,
3735 .arch_state = xscale_arch_state,
3736
3737 .target_request_data = NULL,
3738
3739 .halt = xscale_halt,
3740 .resume = xscale_resume,
3741 .step = xscale_step,
3742
3743 .assert_reset = xscale_assert_reset,
3744 .deassert_reset = xscale_deassert_reset,
3745 .soft_reset_halt = NULL,
3746
3747 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3748 .get_gdb_reg_list = arm_get_gdb_reg_list,
3749
3750 .read_memory = xscale_read_memory,
3751 .read_phys_memory = xscale_read_phys_memory,
3752 .write_memory = xscale_write_memory,
3753 .write_phys_memory = xscale_write_phys_memory,
3754 .bulk_write_memory = xscale_bulk_write_memory,
3755
3756 .checksum_memory = arm_checksum_memory,
3757 .blank_check_memory = arm_blank_check_memory,
3758
3759 .run_algorithm = armv4_5_run_algorithm,
3760
3761 .add_breakpoint = xscale_add_breakpoint,
3762 .remove_breakpoint = xscale_remove_breakpoint,
3763 .add_watchpoint = xscale_add_watchpoint,
3764 .remove_watchpoint = xscale_remove_watchpoint,
3765
3766 .commands = xscale_command_handlers,
3767 .target_create = xscale_target_create,
3768 .init_target = xscale_init_target,
3769
3770 .virt2phys = xscale_virt2phys,
3771 .mmu = xscale_mmu
3772 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)