ARM: rename ARMV4_5_STATE_* as ARM_STATE_*
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include <helper/time_support.h>
37 #include "register.h"
38 #include "image.h"
39
40
41 /*
42 * Important XScale documents available as of October 2009 include:
43 *
44 * Intel XScale® Core Developer’s Manual, January 2004
45 * Order Number: 273473-002
46 * This has a chapter detailing debug facilities, and punts some
47 * details to chip-specific microarchitecture documents.
48 *
49 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
50 * Document Number: 273539-005
51 * Less detailed than the developer's manual, but summarizes those
52 * missing details (for most XScales) and gives LOTS of notes about
53 * debugger/handler interaction issues. Presents a simpler reset
54 * and load-handler sequence than the arch doc. (Note, OpenOCD
55 * doesn't currently support "Hot-Debug" as defined there.)
56 *
57 * Chip-specific microarchitecture documents may also be useful.
58 */
59
60
61 /* forward declarations */
62 static int xscale_resume(struct target *, int current,
63 uint32_t address, int handle_breakpoints, int debug_execution);
64 static int xscale_debug_entry(struct target *);
65 static int xscale_restore_banked(struct target *);
66 static int xscale_get_reg(struct reg *reg);
67 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
68 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
69 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
70 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_read_trace(struct target *);
72
73
74 /* This XScale "debug handler" is loaded into the processor's
75 * mini-ICache, which is 2K of code writable only via JTAG.
76 *
77 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
78 * binary files cleanly. It's string oriented, and terminates them
79 * with a NUL character. Better would be to generate the constants
80 * and let other code decide names, scoping, and other housekeeping.
81 */
82 static /* unsigned const char xscale_debug_handler[] = ... */
83 #include "xscale_debug.h"
84
85 static char *const xscale_reg_list[] =
86 {
87 "XSCALE_MAINID", /* 0 */
88 "XSCALE_CACHETYPE",
89 "XSCALE_CTRL",
90 "XSCALE_AUXCTRL",
91 "XSCALE_TTB",
92 "XSCALE_DAC",
93 "XSCALE_FSR",
94 "XSCALE_FAR",
95 "XSCALE_PID",
96 "XSCALE_CPACCESS",
97 "XSCALE_IBCR0", /* 10 */
98 "XSCALE_IBCR1",
99 "XSCALE_DBR0",
100 "XSCALE_DBR1",
101 "XSCALE_DBCON",
102 "XSCALE_TBREG",
103 "XSCALE_CHKPT0",
104 "XSCALE_CHKPT1",
105 "XSCALE_DCSR",
106 "XSCALE_TX",
107 "XSCALE_RX", /* 20 */
108 "XSCALE_TXRXCTRL",
109 };
110
111 static const struct xscale_reg xscale_reg_arch_info[] =
112 {
113 {XSCALE_MAINID, NULL},
114 {XSCALE_CACHETYPE, NULL},
115 {XSCALE_CTRL, NULL},
116 {XSCALE_AUXCTRL, NULL},
117 {XSCALE_TTB, NULL},
118 {XSCALE_DAC, NULL},
119 {XSCALE_FSR, NULL},
120 {XSCALE_FAR, NULL},
121 {XSCALE_PID, NULL},
122 {XSCALE_CPACCESS, NULL},
123 {XSCALE_IBCR0, NULL},
124 {XSCALE_IBCR1, NULL},
125 {XSCALE_DBR0, NULL},
126 {XSCALE_DBR1, NULL},
127 {XSCALE_DBCON, NULL},
128 {XSCALE_TBREG, NULL},
129 {XSCALE_CHKPT0, NULL},
130 {XSCALE_CHKPT1, NULL},
131 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
132 {-1, NULL}, /* TX accessed via JTAG */
133 {-1, NULL}, /* RX accessed via JTAG */
134 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
135 };
136
137 /* convenience wrapper to access XScale specific registers */
138 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
139 {
140 uint8_t buf[4];
141
142 buf_set_u32(buf, 0, 32, value);
143
144 return xscale_set_reg(reg, buf);
145 }
146
147 static const char xscale_not[] = "target is not an XScale";
148
149 static int xscale_verify_pointer(struct command_context *cmd_ctx,
150 struct xscale_common *xscale)
151 {
152 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
153 command_print(cmd_ctx, xscale_not);
154 return ERROR_TARGET_INVALID;
155 }
156 return ERROR_OK;
157 }
158
159 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr)
160 {
161 if (tap == NULL)
162 return ERROR_FAIL;
163
164 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
165 {
166 struct scan_field field;
167 uint8_t scratch[4];
168
169 memset(&field, 0, sizeof field);
170 field.tap = tap;
171 field.num_bits = tap->ir_length;
172 field.out_value = scratch;
173 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
174
175 jtag_add_ir_scan(1, &field, jtag_get_end_state());
176 }
177
178 return ERROR_OK;
179 }
180
181 static int xscale_read_dcsr(struct target *target)
182 {
183 struct xscale_common *xscale = target_to_xscale(target);
184 int retval;
185 struct scan_field fields[3];
186 uint8_t field0 = 0x0;
187 uint8_t field0_check_value = 0x2;
188 uint8_t field0_check_mask = 0x7;
189 uint8_t field2 = 0x0;
190 uint8_t field2_check_value = 0x0;
191 uint8_t field2_check_mask = 0x1;
192
193 jtag_set_end_state(TAP_DRPAUSE);
194 xscale_jtag_set_instr(target->tap,
195 XSCALE_SELDCSR << xscale->xscale_variant);
196
197 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
198 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
199
200 memset(&fields, 0, sizeof fields);
201
202 fields[0].tap = target->tap;
203 fields[0].num_bits = 3;
204 fields[0].out_value = &field0;
205 uint8_t tmp;
206 fields[0].in_value = &tmp;
207
208 fields[1].tap = target->tap;
209 fields[1].num_bits = 32;
210 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
211
212 fields[2].tap = target->tap;
213 fields[2].num_bits = 1;
214 fields[2].out_value = &field2;
215 uint8_t tmp2;
216 fields[2].in_value = &tmp2;
217
218 jtag_add_dr_scan(3, fields, jtag_get_end_state());
219
220 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
221 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
222
223 if ((retval = jtag_execute_queue()) != ERROR_OK)
224 {
225 LOG_ERROR("JTAG error while reading DCSR");
226 return retval;
227 }
228
229 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
230 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
231
232 /* write the register with the value we just read
233 * on this second pass, only the first bit of field0 is guaranteed to be 0)
234 */
235 field0_check_mask = 0x1;
236 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
237 fields[1].in_value = NULL;
238
239 jtag_set_end_state(TAP_IDLE);
240
241 jtag_add_dr_scan(3, fields, jtag_get_end_state());
242
243 /* DANGER!!! this must be here. It will make sure that the arguments
244 * to jtag_set_check_value() does not go out of scope! */
245 return jtag_execute_queue();
246 }
247
248
249 static void xscale_getbuf(jtag_callback_data_t arg)
250 {
251 uint8_t *in = (uint8_t *)arg;
252 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
253 }
254
255 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
256 {
257 if (num_words == 0)
258 return ERROR_INVALID_ARGUMENTS;
259
260 struct xscale_common *xscale = target_to_xscale(target);
261 int retval = ERROR_OK;
262 tap_state_t path[3];
263 struct scan_field fields[3];
264 uint8_t *field0 = malloc(num_words * 1);
265 uint8_t field0_check_value = 0x2;
266 uint8_t field0_check_mask = 0x6;
267 uint32_t *field1 = malloc(num_words * 4);
268 uint8_t field2_check_value = 0x0;
269 uint8_t field2_check_mask = 0x1;
270 int words_done = 0;
271 int words_scheduled = 0;
272 int i;
273
274 path[0] = TAP_DRSELECT;
275 path[1] = TAP_DRCAPTURE;
276 path[2] = TAP_DRSHIFT;
277
278 memset(&fields, 0, sizeof fields);
279
280 fields[0].tap = target->tap;
281 fields[0].num_bits = 3;
282 fields[0].check_value = &field0_check_value;
283 fields[0].check_mask = &field0_check_mask;
284
285 fields[1].tap = target->tap;
286 fields[1].num_bits = 32;
287
288 fields[2].tap = target->tap;
289 fields[2].num_bits = 1;
290 fields[2].check_value = &field2_check_value;
291 fields[2].check_mask = &field2_check_mask;
292
293 jtag_set_end_state(TAP_IDLE);
294 xscale_jtag_set_instr(target->tap,
295 XSCALE_DBGTX << xscale->xscale_variant);
296 jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
297
298 /* repeat until all words have been collected */
299 int attempts = 0;
300 while (words_done < num_words)
301 {
302 /* schedule reads */
303 words_scheduled = 0;
304 for (i = words_done; i < num_words; i++)
305 {
306 fields[0].in_value = &field0[i];
307
308 jtag_add_pathmove(3, path);
309
310 fields[1].in_value = (uint8_t *)(field1 + i);
311
312 jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
313
314 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
315
316 words_scheduled++;
317 }
318
319 if ((retval = jtag_execute_queue()) != ERROR_OK)
320 {
321 LOG_ERROR("JTAG error while receiving data from debug handler");
322 break;
323 }
324
325 /* examine results */
326 for (i = words_done; i < num_words; i++)
327 {
328 if (!(field0[0] & 1))
329 {
330 /* move backwards if necessary */
331 int j;
332 for (j = i; j < num_words - 1; j++)
333 {
334 field0[j] = field0[j + 1];
335 field1[j] = field1[j + 1];
336 }
337 words_scheduled--;
338 }
339 }
340 if (words_scheduled == 0)
341 {
342 if (attempts++==1000)
343 {
344 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
345 retval = ERROR_TARGET_TIMEOUT;
346 break;
347 }
348 }
349
350 words_done += words_scheduled;
351 }
352
353 for (i = 0; i < num_words; i++)
354 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
355
356 free(field1);
357
358 return retval;
359 }
360
361 static int xscale_read_tx(struct target *target, int consume)
362 {
363 struct xscale_common *xscale = target_to_xscale(target);
364 tap_state_t path[3];
365 tap_state_t noconsume_path[6];
366 int retval;
367 struct timeval timeout, now;
368 struct scan_field fields[3];
369 uint8_t field0_in = 0x0;
370 uint8_t field0_check_value = 0x2;
371 uint8_t field0_check_mask = 0x6;
372 uint8_t field2_check_value = 0x0;
373 uint8_t field2_check_mask = 0x1;
374
375 jtag_set_end_state(TAP_IDLE);
376
377 xscale_jtag_set_instr(target->tap,
378 XSCALE_DBGTX << xscale->xscale_variant);
379
380 path[0] = TAP_DRSELECT;
381 path[1] = TAP_DRCAPTURE;
382 path[2] = TAP_DRSHIFT;
383
384 noconsume_path[0] = TAP_DRSELECT;
385 noconsume_path[1] = TAP_DRCAPTURE;
386 noconsume_path[2] = TAP_DREXIT1;
387 noconsume_path[3] = TAP_DRPAUSE;
388 noconsume_path[4] = TAP_DREXIT2;
389 noconsume_path[5] = TAP_DRSHIFT;
390
391 memset(&fields, 0, sizeof fields);
392
393 fields[0].tap = target->tap;
394 fields[0].num_bits = 3;
395 fields[0].in_value = &field0_in;
396
397 fields[1].tap = target->tap;
398 fields[1].num_bits = 32;
399 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
400
401 fields[2].tap = target->tap;
402 fields[2].num_bits = 1;
403 uint8_t tmp;
404 fields[2].in_value = &tmp;
405
406 gettimeofday(&timeout, NULL);
407 timeval_add_time(&timeout, 1, 0);
408
409 for (;;)
410 {
411 /* if we want to consume the register content (i.e. clear TX_READY),
412 * we have to go straight from Capture-DR to Shift-DR
413 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
414 */
415 if (consume)
416 jtag_add_pathmove(3, path);
417 else
418 {
419 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
420 }
421
422 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
423
424 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
425 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
426
427 if ((retval = jtag_execute_queue()) != ERROR_OK)
428 {
429 LOG_ERROR("JTAG error while reading TX");
430 return ERROR_TARGET_TIMEOUT;
431 }
432
433 gettimeofday(&now, NULL);
434 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
435 {
436 LOG_ERROR("time out reading TX register");
437 return ERROR_TARGET_TIMEOUT;
438 }
439 if (!((!(field0_in & 1)) && consume))
440 {
441 goto done;
442 }
443 if (debug_level >= 3)
444 {
445 LOG_DEBUG("waiting 100ms");
446 alive_sleep(100); /* avoid flooding the logs */
447 } else
448 {
449 keep_alive();
450 }
451 }
452 done:
453
454 if (!(field0_in & 1))
455 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
456
457 return ERROR_OK;
458 }
459
460 static int xscale_write_rx(struct target *target)
461 {
462 struct xscale_common *xscale = target_to_xscale(target);
463 int retval;
464 struct timeval timeout, now;
465 struct scan_field fields[3];
466 uint8_t field0_out = 0x0;
467 uint8_t field0_in = 0x0;
468 uint8_t field0_check_value = 0x2;
469 uint8_t field0_check_mask = 0x6;
470 uint8_t field2 = 0x0;
471 uint8_t field2_check_value = 0x0;
472 uint8_t field2_check_mask = 0x1;
473
474 jtag_set_end_state(TAP_IDLE);
475
476 xscale_jtag_set_instr(target->tap,
477 XSCALE_DBGRX << xscale->xscale_variant);
478
479 memset(&fields, 0, sizeof fields);
480
481 fields[0].tap = target->tap;
482 fields[0].num_bits = 3;
483 fields[0].out_value = &field0_out;
484 fields[0].in_value = &field0_in;
485
486 fields[1].tap = target->tap;
487 fields[1].num_bits = 32;
488 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
489
490 fields[2].tap = target->tap;
491 fields[2].num_bits = 1;
492 fields[2].out_value = &field2;
493 uint8_t tmp;
494 fields[2].in_value = &tmp;
495
496 gettimeofday(&timeout, NULL);
497 timeval_add_time(&timeout, 1, 0);
498
499 /* poll until rx_read is low */
500 LOG_DEBUG("polling RX");
501 for (;;)
502 {
503 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
504
505 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
506 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
507
508 if ((retval = jtag_execute_queue()) != ERROR_OK)
509 {
510 LOG_ERROR("JTAG error while writing RX");
511 return retval;
512 }
513
514 gettimeofday(&now, NULL);
515 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
516 {
517 LOG_ERROR("time out writing RX register");
518 return ERROR_TARGET_TIMEOUT;
519 }
520 if (!(field0_in & 1))
521 goto done;
522 if (debug_level >= 3)
523 {
524 LOG_DEBUG("waiting 100ms");
525 alive_sleep(100); /* avoid flooding the logs */
526 } else
527 {
528 keep_alive();
529 }
530 }
531 done:
532
533 /* set rx_valid */
534 field2 = 0x1;
535 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
536
537 if ((retval = jtag_execute_queue()) != ERROR_OK)
538 {
539 LOG_ERROR("JTAG error while writing RX");
540 return retval;
541 }
542
543 return ERROR_OK;
544 }
545
546 /* send count elements of size byte to the debug handler */
547 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
548 {
549 struct xscale_common *xscale = target_to_xscale(target);
550 uint32_t t[3];
551 int bits[3];
552 int retval;
553 int done_count = 0;
554
555 jtag_set_end_state(TAP_IDLE);
556
557 xscale_jtag_set_instr(target->tap,
558 XSCALE_DBGRX << xscale->xscale_variant);
559
560 bits[0]=3;
561 t[0]=0;
562 bits[1]=32;
563 t[2]=1;
564 bits[2]=1;
565 int endianness = target->endianness;
566 while (done_count++ < count)
567 {
568 switch (size)
569 {
570 case 4:
571 if (endianness == TARGET_LITTLE_ENDIAN)
572 {
573 t[1]=le_to_h_u32(buffer);
574 } else
575 {
576 t[1]=be_to_h_u32(buffer);
577 }
578 break;
579 case 2:
580 if (endianness == TARGET_LITTLE_ENDIAN)
581 {
582 t[1]=le_to_h_u16(buffer);
583 } else
584 {
585 t[1]=be_to_h_u16(buffer);
586 }
587 break;
588 case 1:
589 t[1]=buffer[0];
590 break;
591 default:
592 LOG_ERROR("BUG: size neither 4, 2 nor 1");
593 return ERROR_INVALID_ARGUMENTS;
594 }
595 jtag_add_dr_out(target->tap,
596 3,
597 bits,
598 t,
599 jtag_set_end_state(TAP_IDLE));
600 buffer += size;
601 }
602
603 if ((retval = jtag_execute_queue()) != ERROR_OK)
604 {
605 LOG_ERROR("JTAG error while sending data to debug handler");
606 return retval;
607 }
608
609 return ERROR_OK;
610 }
611
612 static int xscale_send_u32(struct target *target, uint32_t value)
613 {
614 struct xscale_common *xscale = target_to_xscale(target);
615
616 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
617 return xscale_write_rx(target);
618 }
619
620 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
621 {
622 struct xscale_common *xscale = target_to_xscale(target);
623 int retval;
624 struct scan_field fields[3];
625 uint8_t field0 = 0x0;
626 uint8_t field0_check_value = 0x2;
627 uint8_t field0_check_mask = 0x7;
628 uint8_t field2 = 0x0;
629 uint8_t field2_check_value = 0x0;
630 uint8_t field2_check_mask = 0x1;
631
632 if (hold_rst != -1)
633 xscale->hold_rst = hold_rst;
634
635 if (ext_dbg_brk != -1)
636 xscale->external_debug_break = ext_dbg_brk;
637
638 jtag_set_end_state(TAP_IDLE);
639 xscale_jtag_set_instr(target->tap,
640 XSCALE_SELDCSR << xscale->xscale_variant);
641
642 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
643 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
644
645 memset(&fields, 0, sizeof fields);
646
647 fields[0].tap = target->tap;
648 fields[0].num_bits = 3;
649 fields[0].out_value = &field0;
650 uint8_t tmp;
651 fields[0].in_value = &tmp;
652
653 fields[1].tap = target->tap;
654 fields[1].num_bits = 32;
655 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
656
657 fields[2].tap = target->tap;
658 fields[2].num_bits = 1;
659 fields[2].out_value = &field2;
660 uint8_t tmp2;
661 fields[2].in_value = &tmp2;
662
663 jtag_add_dr_scan(3, fields, jtag_get_end_state());
664
665 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
666 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
667
668 if ((retval = jtag_execute_queue()) != ERROR_OK)
669 {
670 LOG_ERROR("JTAG error while writing DCSR");
671 return retval;
672 }
673
674 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
675 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
676
677 return ERROR_OK;
678 }
679
680 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
681 static unsigned int parity (unsigned int v)
682 {
683 // unsigned int ov = v;
684 v ^= v >> 16;
685 v ^= v >> 8;
686 v ^= v >> 4;
687 v &= 0xf;
688 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
689 return (0x6996 >> v) & 1;
690 }
691
692 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
693 {
694 struct xscale_common *xscale = target_to_xscale(target);
695 uint8_t packet[4];
696 uint8_t cmd;
697 int word;
698 struct scan_field fields[2];
699
700 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
701
702 /* LDIC into IR */
703 jtag_set_end_state(TAP_IDLE);
704 xscale_jtag_set_instr(target->tap,
705 XSCALE_LDIC << xscale->xscale_variant);
706
707 /* CMD is b011 to load a cacheline into the Mini ICache.
708 * Loading into the main ICache is deprecated, and unused.
709 * It's followed by three zero bits, and 27 address bits.
710 */
711 buf_set_u32(&cmd, 0, 6, 0x3);
712
713 /* virtual address of desired cache line */
714 buf_set_u32(packet, 0, 27, va >> 5);
715
716 memset(&fields, 0, sizeof fields);
717
718 fields[0].tap = target->tap;
719 fields[0].num_bits = 6;
720 fields[0].out_value = &cmd;
721
722 fields[1].tap = target->tap;
723 fields[1].num_bits = 27;
724 fields[1].out_value = packet;
725
726 jtag_add_dr_scan(2, fields, jtag_get_end_state());
727
728 /* rest of packet is a cacheline: 8 instructions, with parity */
729 fields[0].num_bits = 32;
730 fields[0].out_value = packet;
731
732 fields[1].num_bits = 1;
733 fields[1].out_value = &cmd;
734
735 for (word = 0; word < 8; word++)
736 {
737 buf_set_u32(packet, 0, 32, buffer[word]);
738
739 uint32_t value;
740 memcpy(&value, packet, sizeof(uint32_t));
741 cmd = parity(value);
742
743 jtag_add_dr_scan(2, fields, jtag_get_end_state());
744 }
745
746 return jtag_execute_queue();
747 }
748
749 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
750 {
751 struct xscale_common *xscale = target_to_xscale(target);
752 uint8_t packet[4];
753 uint8_t cmd;
754 struct scan_field fields[2];
755
756 jtag_set_end_state(TAP_IDLE);
757 xscale_jtag_set_instr(target->tap,
758 XSCALE_LDIC << xscale->xscale_variant);
759
760 /* CMD for invalidate IC line b000, bits [6:4] b000 */
761 buf_set_u32(&cmd, 0, 6, 0x0);
762
763 /* virtual address of desired cache line */
764 buf_set_u32(packet, 0, 27, va >> 5);
765
766 memset(&fields, 0, sizeof fields);
767
768 fields[0].tap = target->tap;
769 fields[0].num_bits = 6;
770 fields[0].out_value = &cmd;
771
772 fields[1].tap = target->tap;
773 fields[1].num_bits = 27;
774 fields[1].out_value = packet;
775
776 jtag_add_dr_scan(2, fields, jtag_get_end_state());
777
778 return ERROR_OK;
779 }
780
781 static int xscale_update_vectors(struct target *target)
782 {
783 struct xscale_common *xscale = target_to_xscale(target);
784 int i;
785 int retval;
786
787 uint32_t low_reset_branch, high_reset_branch;
788
789 for (i = 1; i < 8; i++)
790 {
791 /* if there's a static vector specified for this exception, override */
792 if (xscale->static_high_vectors_set & (1 << i))
793 {
794 xscale->high_vectors[i] = xscale->static_high_vectors[i];
795 }
796 else
797 {
798 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
799 if (retval == ERROR_TARGET_TIMEOUT)
800 return retval;
801 if (retval != ERROR_OK)
802 {
803 /* Some of these reads will fail as part of normal execution */
804 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
805 }
806 }
807 }
808
809 for (i = 1; i < 8; i++)
810 {
811 if (xscale->static_low_vectors_set & (1 << i))
812 {
813 xscale->low_vectors[i] = xscale->static_low_vectors[i];
814 }
815 else
816 {
817 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
818 if (retval == ERROR_TARGET_TIMEOUT)
819 return retval;
820 if (retval != ERROR_OK)
821 {
822 /* Some of these reads will fail as part of normal execution */
823 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
824 }
825 }
826 }
827
828 /* calculate branches to debug handler */
829 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
830 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
831
832 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
833 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
834
835 /* invalidate and load exception vectors in mini i-cache */
836 xscale_invalidate_ic_line(target, 0x0);
837 xscale_invalidate_ic_line(target, 0xffff0000);
838
839 xscale_load_ic(target, 0x0, xscale->low_vectors);
840 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
841
842 return ERROR_OK;
843 }
844
845 static int xscale_arch_state(struct target *target)
846 {
847 struct xscale_common *xscale = target_to_xscale(target);
848 struct arm *armv4_5 = &xscale->armv4_5_common;
849
850 static const char *state[] =
851 {
852 "disabled", "enabled"
853 };
854
855 static const char *arch_dbg_reason[] =
856 {
857 "", "\n(processor reset)", "\n(trace buffer full)"
858 };
859
860 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
861 {
862 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
863 return ERROR_INVALID_ARGUMENTS;
864 }
865
866 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
867 "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "\n"
868 "MMU: %s, D-Cache: %s, I-Cache: %s"
869 "%s",
870 armv4_5_state_strings[armv4_5->core_state],
871 Jim_Nvp_value2name_simple(nvp_target_debug_reason, target->debug_reason)->name ,
872 arm_mode_name(armv4_5->core_mode),
873 buf_get_u32(armv4_5->cpsr->value, 0, 32),
874 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
875 state[xscale->armv4_5_mmu.mmu_enabled],
876 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
877 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
878 arch_dbg_reason[xscale->arch_debug_reason]);
879
880 return ERROR_OK;
881 }
882
883 static int xscale_poll(struct target *target)
884 {
885 int retval = ERROR_OK;
886
887 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
888 {
889 enum target_state previous_state = target->state;
890 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
891 {
892
893 /* there's data to read from the tx register, we entered debug state */
894 target->state = TARGET_HALTED;
895
896 /* process debug entry, fetching current mode regs */
897 retval = xscale_debug_entry(target);
898 }
899 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
900 {
901 LOG_USER("error while polling TX register, reset CPU");
902 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
903 target->state = TARGET_HALTED;
904 }
905
906 /* debug_entry could have overwritten target state (i.e. immediate resume)
907 * don't signal event handlers in that case
908 */
909 if (target->state != TARGET_HALTED)
910 return ERROR_OK;
911
912 /* if target was running, signal that we halted
913 * otherwise we reentered from debug execution */
914 if (previous_state == TARGET_RUNNING)
915 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
916 else
917 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
918 }
919
920 return retval;
921 }
922
923 static int xscale_debug_entry(struct target *target)
924 {
925 struct xscale_common *xscale = target_to_xscale(target);
926 struct arm *armv4_5 = &xscale->armv4_5_common;
927 uint32_t pc;
928 uint32_t buffer[10];
929 int i;
930 int retval;
931 uint32_t moe;
932
933 /* clear external dbg break (will be written on next DCSR read) */
934 xscale->external_debug_break = 0;
935 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
936 return retval;
937
938 /* get r0, pc, r1 to r7 and cpsr */
939 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
940 return retval;
941
942 /* move r0 from buffer to register cache */
943 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
944 armv4_5->core_cache->reg_list[0].dirty = 1;
945 armv4_5->core_cache->reg_list[0].valid = 1;
946 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
947
948 /* move pc from buffer to register cache */
949 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
950 armv4_5->core_cache->reg_list[15].dirty = 1;
951 armv4_5->core_cache->reg_list[15].valid = 1;
952 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
953
954 /* move data from buffer to register cache */
955 for (i = 1; i <= 7; i++)
956 {
957 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
958 armv4_5->core_cache->reg_list[i].dirty = 1;
959 armv4_5->core_cache->reg_list[i].valid = 1;
960 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
961 }
962
963 arm_set_cpsr(armv4_5, buffer[9]);
964 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
965
966 if (!is_arm_mode(armv4_5->core_mode))
967 {
968 target->state = TARGET_UNKNOWN;
969 LOG_ERROR("cpsr contains invalid mode value - communication failure");
970 return ERROR_TARGET_FAILURE;
971 }
972 LOG_DEBUG("target entered debug state in %s mode",
973 arm_mode_name(armv4_5->core_mode));
974
975 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
976 if (armv4_5->spsr) {
977 xscale_receive(target, buffer, 8);
978 buf_set_u32(armv4_5->spsr->value, 0, 32, buffer[7]);
979 armv4_5->spsr->dirty = false;
980 armv4_5->spsr->valid = true;
981 }
982 else
983 {
984 /* r8 to r14, but no spsr */
985 xscale_receive(target, buffer, 7);
986 }
987
988 /* move data from buffer to right banked register in cache */
989 for (i = 8; i <= 14; i++)
990 {
991 struct reg *r = arm_reg_current(armv4_5, i);
992
993 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
994 r->dirty = false;
995 r->valid = true;
996 }
997
998 /* examine debug reason */
999 xscale_read_dcsr(target);
1000 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
1001
1002 /* stored PC (for calculating fixup) */
1003 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1004
1005 switch (moe)
1006 {
1007 case 0x0: /* Processor reset */
1008 target->debug_reason = DBG_REASON_DBGRQ;
1009 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1010 pc -= 4;
1011 break;
1012 case 0x1: /* Instruction breakpoint hit */
1013 target->debug_reason = DBG_REASON_BREAKPOINT;
1014 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1015 pc -= 4;
1016 break;
1017 case 0x2: /* Data breakpoint hit */
1018 target->debug_reason = DBG_REASON_WATCHPOINT;
1019 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1020 pc -= 4;
1021 break;
1022 case 0x3: /* BKPT instruction executed */
1023 target->debug_reason = DBG_REASON_BREAKPOINT;
1024 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1025 pc -= 4;
1026 break;
1027 case 0x4: /* Ext. debug event */
1028 target->debug_reason = DBG_REASON_DBGRQ;
1029 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1030 pc -= 4;
1031 break;
1032 case 0x5: /* Vector trap occured */
1033 target->debug_reason = DBG_REASON_BREAKPOINT;
1034 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1035 pc -= 4;
1036 break;
1037 case 0x6: /* Trace buffer full break */
1038 target->debug_reason = DBG_REASON_DBGRQ;
1039 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1040 pc -= 4;
1041 break;
1042 case 0x7: /* Reserved (may flag Hot-Debug support) */
1043 default:
1044 LOG_ERROR("Method of Entry is 'Reserved'");
1045 exit(-1);
1046 break;
1047 }
1048
1049 /* apply PC fixup */
1050 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1051
1052 /* on the first debug entry, identify cache type */
1053 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1054 {
1055 uint32_t cache_type_reg;
1056
1057 /* read cp15 cache type register */
1058 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1059 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1060
1061 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1062 }
1063
1064 /* examine MMU and Cache settings */
1065 /* read cp15 control register */
1066 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1067 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1068 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1069 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1070 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1071
1072 /* tracing enabled, read collected trace data */
1073 if (xscale->trace.buffer_enabled)
1074 {
1075 xscale_read_trace(target);
1076 xscale->trace.buffer_fill--;
1077
1078 /* resume if we're still collecting trace data */
1079 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1080 && (xscale->trace.buffer_fill > 0))
1081 {
1082 xscale_resume(target, 1, 0x0, 1, 0);
1083 }
1084 else
1085 {
1086 xscale->trace.buffer_enabled = 0;
1087 }
1088 }
1089
1090 return ERROR_OK;
1091 }
1092
1093 static int xscale_halt(struct target *target)
1094 {
1095 struct xscale_common *xscale = target_to_xscale(target);
1096
1097 LOG_DEBUG("target->state: %s",
1098 target_state_name(target));
1099
1100 if (target->state == TARGET_HALTED)
1101 {
1102 LOG_DEBUG("target was already halted");
1103 return ERROR_OK;
1104 }
1105 else if (target->state == TARGET_UNKNOWN)
1106 {
1107 /* this must not happen for a xscale target */
1108 LOG_ERROR("target was in unknown state when halt was requested");
1109 return ERROR_TARGET_INVALID;
1110 }
1111 else if (target->state == TARGET_RESET)
1112 {
1113 LOG_DEBUG("target->state == TARGET_RESET");
1114 }
1115 else
1116 {
1117 /* assert external dbg break */
1118 xscale->external_debug_break = 1;
1119 xscale_read_dcsr(target);
1120
1121 target->debug_reason = DBG_REASON_DBGRQ;
1122 }
1123
1124 return ERROR_OK;
1125 }
1126
1127 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1128 {
1129 struct xscale_common *xscale = target_to_xscale(target);
1130 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1131 int retval;
1132
1133 if (xscale->ibcr0_used)
1134 {
1135 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1136
1137 if (ibcr0_bp)
1138 {
1139 xscale_unset_breakpoint(target, ibcr0_bp);
1140 }
1141 else
1142 {
1143 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1144 exit(-1);
1145 }
1146 }
1147
1148 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1149 return retval;
1150
1151 return ERROR_OK;
1152 }
1153
1154 static int xscale_disable_single_step(struct target *target)
1155 {
1156 struct xscale_common *xscale = target_to_xscale(target);
1157 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1158 int retval;
1159
1160 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1161 return retval;
1162
1163 return ERROR_OK;
1164 }
1165
1166 static void xscale_enable_watchpoints(struct target *target)
1167 {
1168 struct watchpoint *watchpoint = target->watchpoints;
1169
1170 while (watchpoint)
1171 {
1172 if (watchpoint->set == 0)
1173 xscale_set_watchpoint(target, watchpoint);
1174 watchpoint = watchpoint->next;
1175 }
1176 }
1177
1178 static void xscale_enable_breakpoints(struct target *target)
1179 {
1180 struct breakpoint *breakpoint = target->breakpoints;
1181
1182 /* set any pending breakpoints */
1183 while (breakpoint)
1184 {
1185 if (breakpoint->set == 0)
1186 xscale_set_breakpoint(target, breakpoint);
1187 breakpoint = breakpoint->next;
1188 }
1189 }
1190
1191 static int xscale_resume(struct target *target, int current,
1192 uint32_t address, int handle_breakpoints, int debug_execution)
1193 {
1194 struct xscale_common *xscale = target_to_xscale(target);
1195 struct arm *armv4_5 = &xscale->armv4_5_common;
1196 struct breakpoint *breakpoint = target->breakpoints;
1197 uint32_t current_pc;
1198 int retval;
1199 int i;
1200
1201 LOG_DEBUG("-");
1202
1203 if (target->state != TARGET_HALTED)
1204 {
1205 LOG_WARNING("target not halted");
1206 return ERROR_TARGET_NOT_HALTED;
1207 }
1208
1209 if (!debug_execution)
1210 {
1211 target_free_all_working_areas(target);
1212 }
1213
1214 /* update vector tables */
1215 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1216 return retval;
1217
1218 /* current = 1: continue on current pc, otherwise continue at <address> */
1219 if (!current)
1220 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1221
1222 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1223
1224 /* if we're at the reset vector, we have to simulate the branch */
1225 if (current_pc == 0x0)
1226 {
1227 arm_simulate_step(target, NULL);
1228 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1229 }
1230
1231 /* the front-end may request us not to handle breakpoints */
1232 if (handle_breakpoints)
1233 {
1234 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1235 {
1236 uint32_t next_pc;
1237
1238 /* there's a breakpoint at the current PC, we have to step over it */
1239 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1240 xscale_unset_breakpoint(target, breakpoint);
1241
1242 /* calculate PC of next instruction */
1243 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1244 {
1245 uint32_t current_opcode;
1246 target_read_u32(target, current_pc, &current_opcode);
1247 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1248 }
1249
1250 LOG_DEBUG("enable single-step");
1251 xscale_enable_single_step(target, next_pc);
1252
1253 /* restore banked registers */
1254 retval = xscale_restore_banked(target);
1255
1256 /* send resume request (command 0x30 or 0x31)
1257 * clean the trace buffer if it is to be enabled (0x62) */
1258 if (xscale->trace.buffer_enabled)
1259 {
1260 xscale_send_u32(target, 0x62);
1261 xscale_send_u32(target, 0x31);
1262 }
1263 else
1264 xscale_send_u32(target, 0x30);
1265
1266 /* send CPSR */
1267 xscale_send_u32(target,
1268 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1269 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1270 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1271
1272 for (i = 7; i >= 0; i--)
1273 {
1274 /* send register */
1275 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1276 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1277 }
1278
1279 /* send PC */
1280 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1281 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1282
1283 /* wait for and process debug entry */
1284 xscale_debug_entry(target);
1285
1286 LOG_DEBUG("disable single-step");
1287 xscale_disable_single_step(target);
1288
1289 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1290 xscale_set_breakpoint(target, breakpoint);
1291 }
1292 }
1293
1294 /* enable any pending breakpoints and watchpoints */
1295 xscale_enable_breakpoints(target);
1296 xscale_enable_watchpoints(target);
1297
1298 /* restore banked registers */
1299 retval = xscale_restore_banked(target);
1300
1301 /* send resume request (command 0x30 or 0x31)
1302 * clean the trace buffer if it is to be enabled (0x62) */
1303 if (xscale->trace.buffer_enabled)
1304 {
1305 xscale_send_u32(target, 0x62);
1306 xscale_send_u32(target, 0x31);
1307 }
1308 else
1309 xscale_send_u32(target, 0x30);
1310
1311 /* send CPSR */
1312 xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
1313 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1314 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1315
1316 for (i = 7; i >= 0; i--)
1317 {
1318 /* send register */
1319 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1320 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1321 }
1322
1323 /* send PC */
1324 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1325 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1326
1327 target->debug_reason = DBG_REASON_NOTHALTED;
1328
1329 if (!debug_execution)
1330 {
1331 /* registers are now invalid */
1332 register_cache_invalidate(armv4_5->core_cache);
1333 target->state = TARGET_RUNNING;
1334 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1335 }
1336 else
1337 {
1338 target->state = TARGET_DEBUG_RUNNING;
1339 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1340 }
1341
1342 LOG_DEBUG("target resumed");
1343
1344 return ERROR_OK;
1345 }
1346
1347 static int xscale_step_inner(struct target *target, int current,
1348 uint32_t address, int handle_breakpoints)
1349 {
1350 struct xscale_common *xscale = target_to_xscale(target);
1351 struct arm *armv4_5 = &xscale->armv4_5_common;
1352 uint32_t next_pc;
1353 int retval;
1354 int i;
1355
1356 target->debug_reason = DBG_REASON_SINGLESTEP;
1357
1358 /* calculate PC of next instruction */
1359 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1360 {
1361 uint32_t current_opcode, current_pc;
1362 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1363
1364 target_read_u32(target, current_pc, &current_opcode);
1365 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1366 return retval;
1367 }
1368
1369 LOG_DEBUG("enable single-step");
1370 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1371 return retval;
1372
1373 /* restore banked registers */
1374 if ((retval = xscale_restore_banked(target)) != ERROR_OK)
1375 return retval;
1376
1377 /* send resume request (command 0x30 or 0x31)
1378 * clean the trace buffer if it is to be enabled (0x62) */
1379 if (xscale->trace.buffer_enabled)
1380 {
1381 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1382 return retval;
1383 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1384 return retval;
1385 }
1386 else
1387 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1388 return retval;
1389
1390 /* send CPSR */
1391 retval = xscale_send_u32(target,
1392 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1393 if (retval != ERROR_OK)
1394 return retval;
1395 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1396 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1397
1398 for (i = 7; i >= 0; i--)
1399 {
1400 /* send register */
1401 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1402 return retval;
1403 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1404 }
1405
1406 /* send PC */
1407 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))) != ERROR_OK)
1408 return retval;
1409 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1410
1411 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1412
1413 /* registers are now invalid */
1414 register_cache_invalidate(armv4_5->core_cache);
1415
1416 /* wait for and process debug entry */
1417 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1418 return retval;
1419
1420 LOG_DEBUG("disable single-step");
1421 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1422 return retval;
1423
1424 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1425
1426 return ERROR_OK;
1427 }
1428
1429 static int xscale_step(struct target *target, int current,
1430 uint32_t address, int handle_breakpoints)
1431 {
1432 struct arm *armv4_5 = target_to_armv4_5(target);
1433 struct breakpoint *breakpoint = target->breakpoints;
1434
1435 uint32_t current_pc;
1436 int retval;
1437
1438 if (target->state != TARGET_HALTED)
1439 {
1440 LOG_WARNING("target not halted");
1441 return ERROR_TARGET_NOT_HALTED;
1442 }
1443
1444 /* current = 1: continue on current pc, otherwise continue at <address> */
1445 if (!current)
1446 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1447
1448 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1449
1450 /* if we're at the reset vector, we have to simulate the step */
1451 if (current_pc == 0x0)
1452 {
1453 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1454 return retval;
1455 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1456
1457 target->debug_reason = DBG_REASON_SINGLESTEP;
1458 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1459
1460 return ERROR_OK;
1461 }
1462
1463 /* the front-end may request us not to handle breakpoints */
1464 if (handle_breakpoints)
1465 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1466 {
1467 if ((retval = xscale_unset_breakpoint(target, breakpoint)) != ERROR_OK)
1468 return retval;
1469 }
1470
1471 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1472
1473 if (breakpoint)
1474 {
1475 xscale_set_breakpoint(target, breakpoint);
1476 }
1477
1478 LOG_DEBUG("target stepped");
1479
1480 return ERROR_OK;
1481
1482 }
1483
1484 static int xscale_assert_reset(struct target *target)
1485 {
1486 struct xscale_common *xscale = target_to_xscale(target);
1487
1488 LOG_DEBUG("target->state: %s",
1489 target_state_name(target));
1490
1491 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1492 * end up in T-L-R, which would reset JTAG
1493 */
1494 jtag_set_end_state(TAP_IDLE);
1495 xscale_jtag_set_instr(target->tap,
1496 XSCALE_SELDCSR << xscale->xscale_variant);
1497
1498 /* set Hold reset, Halt mode and Trap Reset */
1499 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1500 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1501 xscale_write_dcsr(target, 1, 0);
1502
1503 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1504 xscale_jtag_set_instr(target->tap, 0x7f);
1505 jtag_execute_queue();
1506
1507 /* assert reset */
1508 jtag_add_reset(0, 1);
1509
1510 /* sleep 1ms, to be sure we fulfill any requirements */
1511 jtag_add_sleep(1000);
1512 jtag_execute_queue();
1513
1514 target->state = TARGET_RESET;
1515
1516 if (target->reset_halt)
1517 {
1518 int retval;
1519 if ((retval = target_halt(target)) != ERROR_OK)
1520 return retval;
1521 }
1522
1523 return ERROR_OK;
1524 }
1525
1526 static int xscale_deassert_reset(struct target *target)
1527 {
1528 struct xscale_common *xscale = target_to_xscale(target);
1529 struct breakpoint *breakpoint = target->breakpoints;
1530
1531 LOG_DEBUG("-");
1532
1533 xscale->ibcr_available = 2;
1534 xscale->ibcr0_used = 0;
1535 xscale->ibcr1_used = 0;
1536
1537 xscale->dbr_available = 2;
1538 xscale->dbr0_used = 0;
1539 xscale->dbr1_used = 0;
1540
1541 /* mark all hardware breakpoints as unset */
1542 while (breakpoint)
1543 {
1544 if (breakpoint->type == BKPT_HARD)
1545 {
1546 breakpoint->set = 0;
1547 }
1548 breakpoint = breakpoint->next;
1549 }
1550
1551 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1552
1553 /* FIXME mark hardware watchpoints got unset too. Also,
1554 * at least some of the XScale registers are invalid...
1555 */
1556
1557 /*
1558 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1559 * contents got invalidated. Safer to force that, so writing new
1560 * contents can't ever fail..
1561 */
1562 {
1563 uint32_t address;
1564 unsigned buf_cnt;
1565 const uint8_t *buffer = xscale_debug_handler;
1566 int retval;
1567
1568 /* release SRST */
1569 jtag_add_reset(0, 0);
1570
1571 /* wait 300ms; 150 and 100ms were not enough */
1572 jtag_add_sleep(300*1000);
1573
1574 jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
1575 jtag_execute_queue();
1576
1577 /* set Hold reset, Halt mode and Trap Reset */
1578 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1579 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1580 xscale_write_dcsr(target, 1, 0);
1581
1582 /* Load the debug handler into the mini-icache. Since
1583 * it's using halt mode (not monitor mode), it runs in
1584 * "Special Debug State" for access to registers, memory,
1585 * coprocessors, trace data, etc.
1586 */
1587 address = xscale->handler_address;
1588 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1589 binary_size > 0;
1590 binary_size -= buf_cnt, buffer += buf_cnt)
1591 {
1592 uint32_t cache_line[8];
1593 unsigned i;
1594
1595 buf_cnt = binary_size;
1596 if (buf_cnt > 32)
1597 buf_cnt = 32;
1598
1599 for (i = 0; i < buf_cnt; i += 4)
1600 {
1601 /* convert LE buffer to host-endian uint32_t */
1602 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1603 }
1604
1605 for (; i < 32; i += 4)
1606 {
1607 cache_line[i / 4] = 0xe1a08008;
1608 }
1609
1610 /* only load addresses other than the reset vectors */
1611 if ((address % 0x400) != 0x0)
1612 {
1613 retval = xscale_load_ic(target, address,
1614 cache_line);
1615 if (retval != ERROR_OK)
1616 return retval;
1617 }
1618
1619 address += buf_cnt;
1620 };
1621
1622 retval = xscale_load_ic(target, 0x0,
1623 xscale->low_vectors);
1624 if (retval != ERROR_OK)
1625 return retval;
1626 retval = xscale_load_ic(target, 0xffff0000,
1627 xscale->high_vectors);
1628 if (retval != ERROR_OK)
1629 return retval;
1630
1631 jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
1632
1633 jtag_add_sleep(100000);
1634
1635 /* set Hold reset, Halt mode and Trap Reset */
1636 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1637 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1638 xscale_write_dcsr(target, 1, 0);
1639
1640 /* clear Hold reset to let the target run (should enter debug handler) */
1641 xscale_write_dcsr(target, 0, 1);
1642 target->state = TARGET_RUNNING;
1643
1644 if (!target->reset_halt)
1645 {
1646 jtag_add_sleep(10000);
1647
1648 /* we should have entered debug now */
1649 xscale_debug_entry(target);
1650 target->state = TARGET_HALTED;
1651
1652 /* resume the target */
1653 xscale_resume(target, 1, 0x0, 1, 0);
1654 }
1655 }
1656
1657 return ERROR_OK;
1658 }
1659
1660 static int xscale_read_core_reg(struct target *target, struct reg *r,
1661 int num, enum armv4_5_mode mode)
1662 {
1663 /** \todo add debug handler support for core register reads */
1664 LOG_ERROR("not implemented");
1665 return ERROR_OK;
1666 }
1667
1668 static int xscale_write_core_reg(struct target *target, struct reg *r,
1669 int num, enum armv4_5_mode mode, uint32_t value)
1670 {
1671 /** \todo add debug handler support for core register writes */
1672 LOG_ERROR("not implemented");
1673 return ERROR_OK;
1674 }
1675
1676 static int xscale_full_context(struct target *target)
1677 {
1678 struct arm *armv4_5 = target_to_armv4_5(target);
1679
1680 uint32_t *buffer;
1681
1682 int i, j;
1683
1684 LOG_DEBUG("-");
1685
1686 if (target->state != TARGET_HALTED)
1687 {
1688 LOG_WARNING("target not halted");
1689 return ERROR_TARGET_NOT_HALTED;
1690 }
1691
1692 buffer = malloc(4 * 8);
1693
1694 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1695 * we can't enter User mode on an XScale (unpredictable),
1696 * but User shares registers with SYS
1697 */
1698 for (i = 1; i < 7; i++)
1699 {
1700 enum armv4_5_mode mode = armv4_5_number_to_mode(i);
1701 bool valid = true;
1702 struct reg *r;
1703
1704 if (mode == ARMV4_5_MODE_USR)
1705 continue;
1706
1707 /* check if there are invalid registers in the current mode
1708 */
1709 for (j = 0; valid && j <= 16; j++)
1710 {
1711 if (!ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1712 mode, j).valid)
1713 valid = false;
1714 }
1715 if (valid)
1716 continue;
1717
1718 /* request banked registers */
1719 xscale_send_u32(target, 0x0);
1720
1721 /* send CPSR for desired bank mode */
1722 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1723
1724 /* get banked registers: r8 to r14; and SPSR
1725 * except in USR/SYS mode
1726 */
1727 if (mode != ARMV4_5_MODE_SYS) {
1728 /* SPSR */
1729 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1730 mode, 16);
1731
1732 xscale_receive(target, buffer, 8);
1733
1734 buf_set_u32(r->value, 0, 32, buffer[7]);
1735 r->dirty = false;
1736 r->valid = true;
1737 } else {
1738 xscale_receive(target, buffer, 7);
1739 }
1740
1741 /* move data from buffer to register cache */
1742 for (j = 8; j <= 14; j++)
1743 {
1744 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1745 mode, j);
1746
1747 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1748 r->dirty = false;
1749 r->valid = true;
1750 }
1751 }
1752
1753 free(buffer);
1754
1755 return ERROR_OK;
1756 }
1757
1758 static int xscale_restore_banked(struct target *target)
1759 {
1760 struct arm *armv4_5 = target_to_armv4_5(target);
1761
1762 int i, j;
1763
1764 if (target->state != TARGET_HALTED)
1765 {
1766 LOG_WARNING("target not halted");
1767 return ERROR_TARGET_NOT_HALTED;
1768 }
1769
1770 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1771 * and check if any banked registers need to be written. Ignore
1772 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1773 * an XScale (unpredictable), but they share all registers.
1774 */
1775 for (i = 1; i < 7; i++)
1776 {
1777 enum armv4_5_mode mode = armv4_5_number_to_mode(i);
1778 struct reg *r;
1779
1780 if (mode == ARMV4_5_MODE_USR)
1781 continue;
1782
1783 /* check if there are dirty registers in this mode */
1784 for (j = 8; j <= 14; j++)
1785 {
1786 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1787 mode, j).dirty)
1788 goto dirty;
1789 }
1790
1791 /* if not USR/SYS, check if the SPSR needs to be written */
1792 if (mode != ARMV4_5_MODE_SYS)
1793 {
1794 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1795 mode, 16).dirty)
1796 goto dirty;
1797 }
1798
1799 /* there's nothing to flush for this mode */
1800 continue;
1801
1802 dirty:
1803 /* command 0x1: "send banked registers" */
1804 xscale_send_u32(target, 0x1);
1805
1806 /* send CPSR for desired mode */
1807 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1808
1809 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1810 * but this protocol doesn't understand that nuance.
1811 */
1812 for (j = 8; j <= 14; j++) {
1813 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1814 mode, j);
1815 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1816 r->dirty = false;
1817 }
1818
1819 /* send spsr if not in USR/SYS mode */
1820 if (mode != ARMV4_5_MODE_SYS) {
1821 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1822 mode, 16);
1823 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1824 r->dirty = false;
1825 }
1826 }
1827
1828 return ERROR_OK;
1829 }
1830
1831 static int xscale_read_memory(struct target *target, uint32_t address,
1832 uint32_t size, uint32_t count, uint8_t *buffer)
1833 {
1834 struct xscale_common *xscale = target_to_xscale(target);
1835 uint32_t *buf32;
1836 uint32_t i;
1837 int retval;
1838
1839 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1840
1841 if (target->state != TARGET_HALTED)
1842 {
1843 LOG_WARNING("target not halted");
1844 return ERROR_TARGET_NOT_HALTED;
1845 }
1846
1847 /* sanitize arguments */
1848 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1849 return ERROR_INVALID_ARGUMENTS;
1850
1851 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1852 return ERROR_TARGET_UNALIGNED_ACCESS;
1853
1854 /* send memory read request (command 0x1n, n: access size) */
1855 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1856 return retval;
1857
1858 /* send base address for read request */
1859 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1860 return retval;
1861
1862 /* send number of requested data words */
1863 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1864 return retval;
1865
1866 /* receive data from target (count times 32-bit words in host endianness) */
1867 buf32 = malloc(4 * count);
1868 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1869 return retval;
1870
1871 /* extract data from host-endian buffer into byte stream */
1872 for (i = 0; i < count; i++)
1873 {
1874 switch (size)
1875 {
1876 case 4:
1877 target_buffer_set_u32(target, buffer, buf32[i]);
1878 buffer += 4;
1879 break;
1880 case 2:
1881 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1882 buffer += 2;
1883 break;
1884 case 1:
1885 *buffer++ = buf32[i] & 0xff;
1886 break;
1887 default:
1888 LOG_ERROR("invalid read size");
1889 return ERROR_INVALID_ARGUMENTS;
1890 }
1891 }
1892
1893 free(buf32);
1894
1895 /* examine DCSR, to see if Sticky Abort (SA) got set */
1896 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1897 return retval;
1898 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1899 {
1900 /* clear SA bit */
1901 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1902 return retval;
1903
1904 return ERROR_TARGET_DATA_ABORT;
1905 }
1906
1907 return ERROR_OK;
1908 }
1909
1910 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1911 uint32_t size, uint32_t count, uint8_t *buffer)
1912 {
1913 /** \todo: provide a non-stub implementtion of this routine. */
1914 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1915 target_name(target), __func__);
1916 return ERROR_FAIL;
1917 }
1918
1919 static int xscale_write_memory(struct target *target, uint32_t address,
1920 uint32_t size, uint32_t count, uint8_t *buffer)
1921 {
1922 struct xscale_common *xscale = target_to_xscale(target);
1923 int retval;
1924
1925 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1926
1927 if (target->state != TARGET_HALTED)
1928 {
1929 LOG_WARNING("target not halted");
1930 return ERROR_TARGET_NOT_HALTED;
1931 }
1932
1933 /* sanitize arguments */
1934 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1935 return ERROR_INVALID_ARGUMENTS;
1936
1937 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1938 return ERROR_TARGET_UNALIGNED_ACCESS;
1939
1940 /* send memory write request (command 0x2n, n: access size) */
1941 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1942 return retval;
1943
1944 /* send base address for read request */
1945 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1946 return retval;
1947
1948 /* send number of requested data words to be written*/
1949 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1950 return retval;
1951
1952 /* extract data from host-endian buffer into byte stream */
1953 #if 0
1954 for (i = 0; i < count; i++)
1955 {
1956 switch (size)
1957 {
1958 case 4:
1959 value = target_buffer_get_u32(target, buffer);
1960 xscale_send_u32(target, value);
1961 buffer += 4;
1962 break;
1963 case 2:
1964 value = target_buffer_get_u16(target, buffer);
1965 xscale_send_u32(target, value);
1966 buffer += 2;
1967 break;
1968 case 1:
1969 value = *buffer;
1970 xscale_send_u32(target, value);
1971 buffer += 1;
1972 break;
1973 default:
1974 LOG_ERROR("should never get here");
1975 exit(-1);
1976 }
1977 }
1978 #endif
1979 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1980 return retval;
1981
1982 /* examine DCSR, to see if Sticky Abort (SA) got set */
1983 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1984 return retval;
1985 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1986 {
1987 /* clear SA bit */
1988 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1989 return retval;
1990
1991 return ERROR_TARGET_DATA_ABORT;
1992 }
1993
1994 return ERROR_OK;
1995 }
1996
1997 static int xscale_write_phys_memory(struct target *target, uint32_t address,
1998 uint32_t size, uint32_t count, uint8_t *buffer)
1999 {
2000 /** \todo: provide a non-stub implementtion of this routine. */
2001 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
2002 target_name(target), __func__);
2003 return ERROR_FAIL;
2004 }
2005
2006 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
2007 uint32_t count, uint8_t *buffer)
2008 {
2009 return xscale_write_memory(target, address, 4, count, buffer);
2010 }
2011
2012 static uint32_t xscale_get_ttb(struct target *target)
2013 {
2014 struct xscale_common *xscale = target_to_xscale(target);
2015 uint32_t ttb;
2016
2017 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2018 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2019
2020 return ttb;
2021 }
2022
2023 static void xscale_disable_mmu_caches(struct target *target, int mmu,
2024 int d_u_cache, int i_cache)
2025 {
2026 struct xscale_common *xscale = target_to_xscale(target);
2027 uint32_t cp15_control;
2028
2029 /* read cp15 control register */
2030 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2031 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2032
2033 if (mmu)
2034 cp15_control &= ~0x1U;
2035
2036 if (d_u_cache)
2037 {
2038 /* clean DCache */
2039 xscale_send_u32(target, 0x50);
2040 xscale_send_u32(target, xscale->cache_clean_address);
2041
2042 /* invalidate DCache */
2043 xscale_send_u32(target, 0x51);
2044
2045 cp15_control &= ~0x4U;
2046 }
2047
2048 if (i_cache)
2049 {
2050 /* invalidate ICache */
2051 xscale_send_u32(target, 0x52);
2052 cp15_control &= ~0x1000U;
2053 }
2054
2055 /* write new cp15 control register */
2056 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2057
2058 /* execute cpwait to ensure outstanding operations complete */
2059 xscale_send_u32(target, 0x53);
2060 }
2061
2062 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2063 int d_u_cache, int i_cache)
2064 {
2065 struct xscale_common *xscale = target_to_xscale(target);
2066 uint32_t cp15_control;
2067
2068 /* read cp15 control register */
2069 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2070 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2071
2072 if (mmu)
2073 cp15_control |= 0x1U;
2074
2075 if (d_u_cache)
2076 cp15_control |= 0x4U;
2077
2078 if (i_cache)
2079 cp15_control |= 0x1000U;
2080
2081 /* write new cp15 control register */
2082 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2083
2084 /* execute cpwait to ensure outstanding operations complete */
2085 xscale_send_u32(target, 0x53);
2086 }
2087
2088 static int xscale_set_breakpoint(struct target *target,
2089 struct breakpoint *breakpoint)
2090 {
2091 int retval;
2092 struct xscale_common *xscale = target_to_xscale(target);
2093
2094 if (target->state != TARGET_HALTED)
2095 {
2096 LOG_WARNING("target not halted");
2097 return ERROR_TARGET_NOT_HALTED;
2098 }
2099
2100 if (breakpoint->set)
2101 {
2102 LOG_WARNING("breakpoint already set");
2103 return ERROR_OK;
2104 }
2105
2106 if (breakpoint->type == BKPT_HARD)
2107 {
2108 uint32_t value = breakpoint->address | 1;
2109 if (!xscale->ibcr0_used)
2110 {
2111 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2112 xscale->ibcr0_used = 1;
2113 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2114 }
2115 else if (!xscale->ibcr1_used)
2116 {
2117 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2118 xscale->ibcr1_used = 1;
2119 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2120 }
2121 else
2122 {
2123 LOG_ERROR("BUG: no hardware comparator available");
2124 return ERROR_OK;
2125 }
2126 }
2127 else if (breakpoint->type == BKPT_SOFT)
2128 {
2129 if (breakpoint->length == 4)
2130 {
2131 /* keep the original instruction in target endianness */
2132 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2133 {
2134 return retval;
2135 }
2136 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2137 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2138 {
2139 return retval;
2140 }
2141 }
2142 else
2143 {
2144 /* keep the original instruction in target endianness */
2145 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2146 {
2147 return retval;
2148 }
2149 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2150 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2151 {
2152 return retval;
2153 }
2154 }
2155 breakpoint->set = 1;
2156 }
2157
2158 return ERROR_OK;
2159 }
2160
2161 static int xscale_add_breakpoint(struct target *target,
2162 struct breakpoint *breakpoint)
2163 {
2164 struct xscale_common *xscale = target_to_xscale(target);
2165
2166 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2167 {
2168 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2169 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2170 }
2171
2172 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2173 {
2174 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2175 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2176 }
2177
2178 if (breakpoint->type == BKPT_HARD)
2179 {
2180 xscale->ibcr_available--;
2181 }
2182
2183 return ERROR_OK;
2184 }
2185
2186 static int xscale_unset_breakpoint(struct target *target,
2187 struct breakpoint *breakpoint)
2188 {
2189 int retval;
2190 struct xscale_common *xscale = target_to_xscale(target);
2191
2192 if (target->state != TARGET_HALTED)
2193 {
2194 LOG_WARNING("target not halted");
2195 return ERROR_TARGET_NOT_HALTED;
2196 }
2197
2198 if (!breakpoint->set)
2199 {
2200 LOG_WARNING("breakpoint not set");
2201 return ERROR_OK;
2202 }
2203
2204 if (breakpoint->type == BKPT_HARD)
2205 {
2206 if (breakpoint->set == 1)
2207 {
2208 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2209 xscale->ibcr0_used = 0;
2210 }
2211 else if (breakpoint->set == 2)
2212 {
2213 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2214 xscale->ibcr1_used = 0;
2215 }
2216 breakpoint->set = 0;
2217 }
2218 else
2219 {
2220 /* restore original instruction (kept in target endianness) */
2221 if (breakpoint->length == 4)
2222 {
2223 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2224 {
2225 return retval;
2226 }
2227 }
2228 else
2229 {
2230 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2231 {
2232 return retval;
2233 }
2234 }
2235 breakpoint->set = 0;
2236 }
2237
2238 return ERROR_OK;
2239 }
2240
2241 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2242 {
2243 struct xscale_common *xscale = target_to_xscale(target);
2244
2245 if (target->state != TARGET_HALTED)
2246 {
2247 LOG_WARNING("target not halted");
2248 return ERROR_TARGET_NOT_HALTED;
2249 }
2250
2251 if (breakpoint->set)
2252 {
2253 xscale_unset_breakpoint(target, breakpoint);
2254 }
2255
2256 if (breakpoint->type == BKPT_HARD)
2257 xscale->ibcr_available++;
2258
2259 return ERROR_OK;
2260 }
2261
2262 static int xscale_set_watchpoint(struct target *target,
2263 struct watchpoint *watchpoint)
2264 {
2265 struct xscale_common *xscale = target_to_xscale(target);
2266 uint8_t enable = 0;
2267 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2268 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2269
2270 if (target->state != TARGET_HALTED)
2271 {
2272 LOG_WARNING("target not halted");
2273 return ERROR_TARGET_NOT_HALTED;
2274 }
2275
2276 xscale_get_reg(dbcon);
2277
2278 switch (watchpoint->rw)
2279 {
2280 case WPT_READ:
2281 enable = 0x3;
2282 break;
2283 case WPT_ACCESS:
2284 enable = 0x2;
2285 break;
2286 case WPT_WRITE:
2287 enable = 0x1;
2288 break;
2289 default:
2290 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2291 }
2292
2293 if (!xscale->dbr0_used)
2294 {
2295 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2296 dbcon_value |= enable;
2297 xscale_set_reg_u32(dbcon, dbcon_value);
2298 watchpoint->set = 1;
2299 xscale->dbr0_used = 1;
2300 }
2301 else if (!xscale->dbr1_used)
2302 {
2303 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2304 dbcon_value |= enable << 2;
2305 xscale_set_reg_u32(dbcon, dbcon_value);
2306 watchpoint->set = 2;
2307 xscale->dbr1_used = 1;
2308 }
2309 else
2310 {
2311 LOG_ERROR("BUG: no hardware comparator available");
2312 return ERROR_OK;
2313 }
2314
2315 return ERROR_OK;
2316 }
2317
2318 static int xscale_add_watchpoint(struct target *target,
2319 struct watchpoint *watchpoint)
2320 {
2321 struct xscale_common *xscale = target_to_xscale(target);
2322
2323 if (xscale->dbr_available < 1)
2324 {
2325 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2326 }
2327
2328 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2329 {
2330 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2331 }
2332
2333 xscale->dbr_available--;
2334
2335 return ERROR_OK;
2336 }
2337
2338 static int xscale_unset_watchpoint(struct target *target,
2339 struct watchpoint *watchpoint)
2340 {
2341 struct xscale_common *xscale = target_to_xscale(target);
2342 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2343 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2344
2345 if (target->state != TARGET_HALTED)
2346 {
2347 LOG_WARNING("target not halted");
2348 return ERROR_TARGET_NOT_HALTED;
2349 }
2350
2351 if (!watchpoint->set)
2352 {
2353 LOG_WARNING("breakpoint not set");
2354 return ERROR_OK;
2355 }
2356
2357 if (watchpoint->set == 1)
2358 {
2359 dbcon_value &= ~0x3;
2360 xscale_set_reg_u32(dbcon, dbcon_value);
2361 xscale->dbr0_used = 0;
2362 }
2363 else if (watchpoint->set == 2)
2364 {
2365 dbcon_value &= ~0xc;
2366 xscale_set_reg_u32(dbcon, dbcon_value);
2367 xscale->dbr1_used = 0;
2368 }
2369 watchpoint->set = 0;
2370
2371 return ERROR_OK;
2372 }
2373
2374 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2375 {
2376 struct xscale_common *xscale = target_to_xscale(target);
2377
2378 if (target->state != TARGET_HALTED)
2379 {
2380 LOG_WARNING("target not halted");
2381 return ERROR_TARGET_NOT_HALTED;
2382 }
2383
2384 if (watchpoint->set)
2385 {
2386 xscale_unset_watchpoint(target, watchpoint);
2387 }
2388
2389 xscale->dbr_available++;
2390
2391 return ERROR_OK;
2392 }
2393
2394 static int xscale_get_reg(struct reg *reg)
2395 {
2396 struct xscale_reg *arch_info = reg->arch_info;
2397 struct target *target = arch_info->target;
2398 struct xscale_common *xscale = target_to_xscale(target);
2399
2400 /* DCSR, TX and RX are accessible via JTAG */
2401 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2402 {
2403 return xscale_read_dcsr(arch_info->target);
2404 }
2405 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2406 {
2407 /* 1 = consume register content */
2408 return xscale_read_tx(arch_info->target, 1);
2409 }
2410 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2411 {
2412 /* can't read from RX register (host -> debug handler) */
2413 return ERROR_OK;
2414 }
2415 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2416 {
2417 /* can't (explicitly) read from TXRXCTRL register */
2418 return ERROR_OK;
2419 }
2420 else /* Other DBG registers have to be transfered by the debug handler */
2421 {
2422 /* send CP read request (command 0x40) */
2423 xscale_send_u32(target, 0x40);
2424
2425 /* send CP register number */
2426 xscale_send_u32(target, arch_info->dbg_handler_number);
2427
2428 /* read register value */
2429 xscale_read_tx(target, 1);
2430 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2431
2432 reg->dirty = 0;
2433 reg->valid = 1;
2434 }
2435
2436 return ERROR_OK;
2437 }
2438
2439 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2440 {
2441 struct xscale_reg *arch_info = reg->arch_info;
2442 struct target *target = arch_info->target;
2443 struct xscale_common *xscale = target_to_xscale(target);
2444 uint32_t value = buf_get_u32(buf, 0, 32);
2445
2446 /* DCSR, TX and RX are accessible via JTAG */
2447 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2448 {
2449 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2450 return xscale_write_dcsr(arch_info->target, -1, -1);
2451 }
2452 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2453 {
2454 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2455 return xscale_write_rx(arch_info->target);
2456 }
2457 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2458 {
2459 /* can't write to TX register (debug-handler -> host) */
2460 return ERROR_OK;
2461 }
2462 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2463 {
2464 /* can't (explicitly) write to TXRXCTRL register */
2465 return ERROR_OK;
2466 }
2467 else /* Other DBG registers have to be transfered by the debug handler */
2468 {
2469 /* send CP write request (command 0x41) */
2470 xscale_send_u32(target, 0x41);
2471
2472 /* send CP register number */
2473 xscale_send_u32(target, arch_info->dbg_handler_number);
2474
2475 /* send CP register value */
2476 xscale_send_u32(target, value);
2477 buf_set_u32(reg->value, 0, 32, value);
2478 }
2479
2480 return ERROR_OK;
2481 }
2482
2483 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2484 {
2485 struct xscale_common *xscale = target_to_xscale(target);
2486 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2487 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2488
2489 /* send CP write request (command 0x41) */
2490 xscale_send_u32(target, 0x41);
2491
2492 /* send CP register number */
2493 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2494
2495 /* send CP register value */
2496 xscale_send_u32(target, value);
2497 buf_set_u32(dcsr->value, 0, 32, value);
2498
2499 return ERROR_OK;
2500 }
2501
2502 static int xscale_read_trace(struct target *target)
2503 {
2504 struct xscale_common *xscale = target_to_xscale(target);
2505 struct arm *armv4_5 = &xscale->armv4_5_common;
2506 struct xscale_trace_data **trace_data_p;
2507
2508 /* 258 words from debug handler
2509 * 256 trace buffer entries
2510 * 2 checkpoint addresses
2511 */
2512 uint32_t trace_buffer[258];
2513 int is_address[256];
2514 int i, j;
2515
2516 if (target->state != TARGET_HALTED)
2517 {
2518 LOG_WARNING("target must be stopped to read trace data");
2519 return ERROR_TARGET_NOT_HALTED;
2520 }
2521
2522 /* send read trace buffer command (command 0x61) */
2523 xscale_send_u32(target, 0x61);
2524
2525 /* receive trace buffer content */
2526 xscale_receive(target, trace_buffer, 258);
2527
2528 /* parse buffer backwards to identify address entries */
2529 for (i = 255; i >= 0; i--)
2530 {
2531 is_address[i] = 0;
2532 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2533 ((trace_buffer[i] & 0xf0) == 0xd0))
2534 {
2535 if (i >= 3)
2536 is_address[--i] = 1;
2537 if (i >= 2)
2538 is_address[--i] = 1;
2539 if (i >= 1)
2540 is_address[--i] = 1;
2541 if (i >= 0)
2542 is_address[--i] = 1;
2543 }
2544 }
2545
2546
2547 /* search first non-zero entry */
2548 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2549 ;
2550
2551 if (j == 256)
2552 {
2553 LOG_DEBUG("no trace data collected");
2554 return ERROR_XSCALE_NO_TRACE_DATA;
2555 }
2556
2557 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2558 ;
2559
2560 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2561 (*trace_data_p)->next = NULL;
2562 (*trace_data_p)->chkpt0 = trace_buffer[256];
2563 (*trace_data_p)->chkpt1 = trace_buffer[257];
2564 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2565 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2566 (*trace_data_p)->depth = 256 - j;
2567
2568 for (i = j; i < 256; i++)
2569 {
2570 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2571 if (is_address[i])
2572 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2573 else
2574 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2575 }
2576
2577 return ERROR_OK;
2578 }
2579
2580 static int xscale_read_instruction(struct target *target,
2581 struct arm_instruction *instruction)
2582 {
2583 struct xscale_common *xscale = target_to_xscale(target);
2584 int i;
2585 int section = -1;
2586 size_t size_read;
2587 uint32_t opcode;
2588 int retval;
2589
2590 if (!xscale->trace.image)
2591 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2592
2593 /* search for the section the current instruction belongs to */
2594 for (i = 0; i < xscale->trace.image->num_sections; i++)
2595 {
2596 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2597 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2598 {
2599 section = i;
2600 break;
2601 }
2602 }
2603
2604 if (section == -1)
2605 {
2606 /* current instruction couldn't be found in the image */
2607 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2608 }
2609
2610 if (xscale->trace.core_state == ARM_STATE_ARM)
2611 {
2612 uint8_t buf[4];
2613 if ((retval = image_read_section(xscale->trace.image, section,
2614 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2615 4, buf, &size_read)) != ERROR_OK)
2616 {
2617 LOG_ERROR("error while reading instruction: %i", retval);
2618 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2619 }
2620 opcode = target_buffer_get_u32(target, buf);
2621 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2622 }
2623 else if (xscale->trace.core_state == ARM_STATE_THUMB)
2624 {
2625 uint8_t buf[2];
2626 if ((retval = image_read_section(xscale->trace.image, section,
2627 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2628 2, buf, &size_read)) != ERROR_OK)
2629 {
2630 LOG_ERROR("error while reading instruction: %i", retval);
2631 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2632 }
2633 opcode = target_buffer_get_u16(target, buf);
2634 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2635 }
2636 else
2637 {
2638 LOG_ERROR("BUG: unknown core state encountered");
2639 exit(-1);
2640 }
2641
2642 return ERROR_OK;
2643 }
2644
2645 static int xscale_branch_address(struct xscale_trace_data *trace_data,
2646 int i, uint32_t *target)
2647 {
2648 /* if there are less than four entries prior to the indirect branch message
2649 * we can't extract the address */
2650 if (i < 4)
2651 {
2652 return -1;
2653 }
2654
2655 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2656 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2657
2658 return 0;
2659 }
2660
2661 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2662 {
2663 struct xscale_common *xscale = target_to_xscale(target);
2664 int next_pc_ok = 0;
2665 uint32_t next_pc = 0x0;
2666 struct xscale_trace_data *trace_data = xscale->trace.data;
2667 int retval;
2668
2669 while (trace_data)
2670 {
2671 int i, chkpt;
2672 int rollover;
2673 int branch;
2674 int exception;
2675 xscale->trace.core_state = ARM_STATE_ARM;
2676
2677 chkpt = 0;
2678 rollover = 0;
2679
2680 for (i = 0; i < trace_data->depth; i++)
2681 {
2682 next_pc_ok = 0;
2683 branch = 0;
2684 exception = 0;
2685
2686 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2687 continue;
2688
2689 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2690 {
2691 case 0: /* Exceptions */
2692 case 1:
2693 case 2:
2694 case 3:
2695 case 4:
2696 case 5:
2697 case 6:
2698 case 7:
2699 exception = (trace_data->entries[i].data & 0x70) >> 4;
2700 next_pc_ok = 1;
2701 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2702 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2703 break;
2704 case 8: /* Direct Branch */
2705 branch = 1;
2706 break;
2707 case 9: /* Indirect Branch */
2708 branch = 1;
2709 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2710 {
2711 next_pc_ok = 1;
2712 }
2713 break;
2714 case 13: /* Checkpointed Indirect Branch */
2715 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2716 {
2717 next_pc_ok = 1;
2718 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2719 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2720 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2721 }
2722 /* explicit fall-through */
2723 case 12: /* Checkpointed Direct Branch */
2724 branch = 1;
2725 if (chkpt == 0)
2726 {
2727 next_pc_ok = 1;
2728 next_pc = trace_data->chkpt0;
2729 chkpt++;
2730 }
2731 else if (chkpt == 1)
2732 {
2733 next_pc_ok = 1;
2734 next_pc = trace_data->chkpt0;
2735 chkpt++;
2736 }
2737 else
2738 {
2739 LOG_WARNING("more than two checkpointed branches encountered");
2740 }
2741 break;
2742 case 15: /* Roll-over */
2743 rollover++;
2744 continue;
2745 default: /* Reserved */
2746 command_print(cmd_ctx, "--- reserved trace message ---");
2747 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2748 return ERROR_OK;
2749 }
2750
2751 if (xscale->trace.pc_ok)
2752 {
2753 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2754 struct arm_instruction instruction;
2755
2756 if ((exception == 6) || (exception == 7))
2757 {
2758 /* IRQ or FIQ exception, no instruction executed */
2759 executed -= 1;
2760 }
2761
2762 while (executed-- >= 0)
2763 {
2764 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2765 {
2766 /* can't continue tracing with no image available */
2767 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2768 {
2769 return retval;
2770 }
2771 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2772 {
2773 /* TODO: handle incomplete images */
2774 }
2775 }
2776
2777 /* a precise abort on a load to the PC is included in the incremental
2778 * word count, other instructions causing data aborts are not included
2779 */
2780 if ((executed == 0) && (exception == 4)
2781 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2782 {
2783 if ((instruction.type == ARM_LDM)
2784 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2785 {
2786 executed--;
2787 }
2788 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2789 && (instruction.info.load_store.Rd != 15))
2790 {
2791 executed--;
2792 }
2793 }
2794
2795 /* only the last instruction executed
2796 * (the one that caused the control flow change)
2797 * could be a taken branch
2798 */
2799 if (((executed == -1) && (branch == 1)) &&
2800 (((instruction.type == ARM_B) ||
2801 (instruction.type == ARM_BL) ||
2802 (instruction.type == ARM_BLX)) &&
2803 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2804 {
2805 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2806 }
2807 else
2808 {
2809 xscale->trace.current_pc += (xscale->trace.core_state == ARM_STATE_ARM) ? 4 : 2;
2810 }
2811 command_print(cmd_ctx, "%s", instruction.text);
2812 }
2813
2814 rollover = 0;
2815 }
2816
2817 if (next_pc_ok)
2818 {
2819 xscale->trace.current_pc = next_pc;
2820 xscale->trace.pc_ok = 1;
2821 }
2822 }
2823
2824 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARM_STATE_ARM) ? 4 : 2)
2825 {
2826 struct arm_instruction instruction;
2827 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2828 {
2829 /* can't continue tracing with no image available */
2830 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2831 {
2832 return retval;
2833 }
2834 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2835 {
2836 /* TODO: handle incomplete images */
2837 }
2838 }
2839 command_print(cmd_ctx, "%s", instruction.text);
2840 }
2841
2842 trace_data = trace_data->next;
2843 }
2844
2845 return ERROR_OK;
2846 }
2847
2848 static const struct reg_arch_type xscale_reg_type = {
2849 .get = xscale_get_reg,
2850 .set = xscale_set_reg,
2851 };
2852
2853 static void xscale_build_reg_cache(struct target *target)
2854 {
2855 struct xscale_common *xscale = target_to_xscale(target);
2856 struct arm *armv4_5 = &xscale->armv4_5_common;
2857 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2858 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2859 int i;
2860 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2861
2862 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
2863
2864 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2865 cache_p = &(*cache_p)->next;
2866
2867 /* fill in values for the xscale reg cache */
2868 (*cache_p)->name = "XScale registers";
2869 (*cache_p)->next = NULL;
2870 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2871 (*cache_p)->num_regs = num_regs;
2872
2873 for (i = 0; i < num_regs; i++)
2874 {
2875 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2876 (*cache_p)->reg_list[i].value = calloc(4, 1);
2877 (*cache_p)->reg_list[i].dirty = 0;
2878 (*cache_p)->reg_list[i].valid = 0;
2879 (*cache_p)->reg_list[i].size = 32;
2880 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2881 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2882 arch_info[i] = xscale_reg_arch_info[i];
2883 arch_info[i].target = target;
2884 }
2885
2886 xscale->reg_cache = (*cache_p);
2887 }
2888
2889 static int xscale_init_target(struct command_context *cmd_ctx,
2890 struct target *target)
2891 {
2892 xscale_build_reg_cache(target);
2893 return ERROR_OK;
2894 }
2895
2896 static int xscale_init_arch_info(struct target *target,
2897 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2898 {
2899 struct arm *armv4_5;
2900 uint32_t high_reset_branch, low_reset_branch;
2901 int i;
2902
2903 armv4_5 = &xscale->armv4_5_common;
2904
2905 /* store architecture specfic data */
2906 xscale->common_magic = XSCALE_COMMON_MAGIC;
2907
2908 /* we don't really *need* a variant param ... */
2909 if (variant) {
2910 int ir_length = 0;
2911
2912 if (strcmp(variant, "pxa250") == 0
2913 || strcmp(variant, "pxa255") == 0
2914 || strcmp(variant, "pxa26x") == 0)
2915 ir_length = 5;
2916 else if (strcmp(variant, "pxa27x") == 0
2917 || strcmp(variant, "ixp42x") == 0
2918 || strcmp(variant, "ixp45x") == 0
2919 || strcmp(variant, "ixp46x") == 0)
2920 ir_length = 7;
2921 else if (strcmp(variant, "pxa3xx") == 0)
2922 ir_length = 11;
2923 else
2924 LOG_WARNING("%s: unrecognized variant %s",
2925 tap->dotted_name, variant);
2926
2927 if (ir_length && ir_length != tap->ir_length) {
2928 LOG_WARNING("%s: IR length for %s is %d; fixing",
2929 tap->dotted_name, variant, ir_length);
2930 tap->ir_length = ir_length;
2931 }
2932 }
2933
2934 /* PXA3xx shifts the JTAG instructions */
2935 if (tap->ir_length == 11)
2936 xscale->xscale_variant = XSCALE_PXA3XX;
2937 else
2938 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
2939
2940 /* the debug handler isn't installed (and thus not running) at this time */
2941 xscale->handler_address = 0xfe000800;
2942
2943 /* clear the vectors we keep locally for reference */
2944 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2945 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2946
2947 /* no user-specified vectors have been configured yet */
2948 xscale->static_low_vectors_set = 0x0;
2949 xscale->static_high_vectors_set = 0x0;
2950
2951 /* calculate branches to debug handler */
2952 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2953 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2954
2955 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2956 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2957
2958 for (i = 1; i <= 7; i++)
2959 {
2960 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2961 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2962 }
2963
2964 /* 64kB aligned region used for DCache cleaning */
2965 xscale->cache_clean_address = 0xfffe0000;
2966
2967 xscale->hold_rst = 0;
2968 xscale->external_debug_break = 0;
2969
2970 xscale->ibcr_available = 2;
2971 xscale->ibcr0_used = 0;
2972 xscale->ibcr1_used = 0;
2973
2974 xscale->dbr_available = 2;
2975 xscale->dbr0_used = 0;
2976 xscale->dbr1_used = 0;
2977
2978 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2979 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2980
2981 xscale->vector_catch = 0x1;
2982
2983 xscale->trace.capture_status = TRACE_IDLE;
2984 xscale->trace.data = NULL;
2985 xscale->trace.image = NULL;
2986 xscale->trace.buffer_enabled = 0;
2987 xscale->trace.buffer_fill = 0;
2988
2989 /* prepare ARMv4/5 specific information */
2990 armv4_5->arch_info = xscale;
2991 armv4_5->read_core_reg = xscale_read_core_reg;
2992 armv4_5->write_core_reg = xscale_write_core_reg;
2993 armv4_5->full_context = xscale_full_context;
2994
2995 armv4_5_init_arch_info(target, armv4_5);
2996
2997 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
2998 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
2999 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3000 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3001 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3002 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3003 xscale->armv4_5_mmu.has_tiny_pages = 1;
3004 xscale->armv4_5_mmu.mmu_enabled = 0;
3005
3006 return ERROR_OK;
3007 }
3008
3009 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3010 {
3011 struct xscale_common *xscale;
3012
3013 if (sizeof xscale_debug_handler - 1 > 0x800) {
3014 LOG_ERROR("debug_handler.bin: larger than 2kb");
3015 return ERROR_FAIL;
3016 }
3017
3018 xscale = calloc(1, sizeof(*xscale));
3019 if (!xscale)
3020 return ERROR_FAIL;
3021
3022 return xscale_init_arch_info(target, xscale, target->tap,
3023 target->variant);
3024 }
3025
3026 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3027 {
3028 struct target *target = NULL;
3029 struct xscale_common *xscale;
3030 int retval;
3031 uint32_t handler_address;
3032
3033 if (CMD_ARGC < 2)
3034 {
3035 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3036 return ERROR_OK;
3037 }
3038
3039 if ((target = get_target(CMD_ARGV[0])) == NULL)
3040 {
3041 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3042 return ERROR_FAIL;
3043 }
3044
3045 xscale = target_to_xscale(target);
3046 retval = xscale_verify_pointer(CMD_CTX, xscale);
3047 if (retval != ERROR_OK)
3048 return retval;
3049
3050 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3051
3052 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3053 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3054 {
3055 xscale->handler_address = handler_address;
3056 }
3057 else
3058 {
3059 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3060 return ERROR_FAIL;
3061 }
3062
3063 return ERROR_OK;
3064 }
3065
3066 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3067 {
3068 struct target *target = NULL;
3069 struct xscale_common *xscale;
3070 int retval;
3071 uint32_t cache_clean_address;
3072
3073 if (CMD_ARGC < 2)
3074 {
3075 return ERROR_COMMAND_SYNTAX_ERROR;
3076 }
3077
3078 target = get_target(CMD_ARGV[0]);
3079 if (target == NULL)
3080 {
3081 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3082 return ERROR_FAIL;
3083 }
3084 xscale = target_to_xscale(target);
3085 retval = xscale_verify_pointer(CMD_CTX, xscale);
3086 if (retval != ERROR_OK)
3087 return retval;
3088
3089 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3090
3091 if (cache_clean_address & 0xffff)
3092 {
3093 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3094 }
3095 else
3096 {
3097 xscale->cache_clean_address = cache_clean_address;
3098 }
3099
3100 return ERROR_OK;
3101 }
3102
3103 COMMAND_HANDLER(xscale_handle_cache_info_command)
3104 {
3105 struct target *target = get_current_target(CMD_CTX);
3106 struct xscale_common *xscale = target_to_xscale(target);
3107 int retval;
3108
3109 retval = xscale_verify_pointer(CMD_CTX, xscale);
3110 if (retval != ERROR_OK)
3111 return retval;
3112
3113 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3114 }
3115
3116 static int xscale_virt2phys(struct target *target,
3117 uint32_t virtual, uint32_t *physical)
3118 {
3119 struct xscale_common *xscale = target_to_xscale(target);
3120 int type;
3121 uint32_t cb;
3122 int domain;
3123 uint32_t ap;
3124
3125 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3126 LOG_ERROR(xscale_not);
3127 return ERROR_TARGET_INVALID;
3128 }
3129
3130 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3131 if (type == -1)
3132 {
3133 return ret;
3134 }
3135 *physical = ret;
3136 return ERROR_OK;
3137 }
3138
3139 static int xscale_mmu(struct target *target, int *enabled)
3140 {
3141 struct xscale_common *xscale = target_to_xscale(target);
3142
3143 if (target->state != TARGET_HALTED)
3144 {
3145 LOG_ERROR("Target not halted");
3146 return ERROR_TARGET_INVALID;
3147 }
3148 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3149 return ERROR_OK;
3150 }
3151
3152 COMMAND_HANDLER(xscale_handle_mmu_command)
3153 {
3154 struct target *target = get_current_target(CMD_CTX);
3155 struct xscale_common *xscale = target_to_xscale(target);
3156 int retval;
3157
3158 retval = xscale_verify_pointer(CMD_CTX, xscale);
3159 if (retval != ERROR_OK)
3160 return retval;
3161
3162 if (target->state != TARGET_HALTED)
3163 {
3164 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3165 return ERROR_OK;
3166 }
3167
3168 if (CMD_ARGC >= 1)
3169 {
3170 bool enable;
3171 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3172 if (enable)
3173 xscale_enable_mmu_caches(target, 1, 0, 0);
3174 else
3175 xscale_disable_mmu_caches(target, 1, 0, 0);
3176 xscale->armv4_5_mmu.mmu_enabled = enable;
3177 }
3178
3179 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3180
3181 return ERROR_OK;
3182 }
3183
3184 COMMAND_HANDLER(xscale_handle_idcache_command)
3185 {
3186 struct target *target = get_current_target(CMD_CTX);
3187 struct xscale_common *xscale = target_to_xscale(target);
3188
3189 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3190 if (retval != ERROR_OK)
3191 return retval;
3192
3193 if (target->state != TARGET_HALTED)
3194 {
3195 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3196 return ERROR_OK;
3197 }
3198
3199 bool icache;
3200 COMMAND_PARSE_BOOL(CMD_NAME, icache, "icache", "dcache");
3201
3202 if (CMD_ARGC >= 1)
3203 {
3204 bool enable;
3205 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3206 if (enable)
3207 xscale_enable_mmu_caches(target, 1, 0, 0);
3208 else
3209 xscale_disable_mmu_caches(target, 1, 0, 0);
3210 if (icache)
3211 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3212 else
3213 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3214 }
3215
3216 bool enabled = icache ?
3217 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3218 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3219 const char *msg = enabled ? "enabled" : "disabled";
3220 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3221
3222 return ERROR_OK;
3223 }
3224
3225 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3226 {
3227 struct target *target = get_current_target(CMD_CTX);
3228 struct xscale_common *xscale = target_to_xscale(target);
3229 int retval;
3230
3231 retval = xscale_verify_pointer(CMD_CTX, xscale);
3232 if (retval != ERROR_OK)
3233 return retval;
3234
3235 if (CMD_ARGC < 1)
3236 {
3237 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3238 }
3239 else
3240 {
3241 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3242 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3243 xscale_write_dcsr(target, -1, -1);
3244 }
3245
3246 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3247
3248 return ERROR_OK;
3249 }
3250
3251
3252 COMMAND_HANDLER(xscale_handle_vector_table_command)
3253 {
3254 struct target *target = get_current_target(CMD_CTX);
3255 struct xscale_common *xscale = target_to_xscale(target);
3256 int err = 0;
3257 int retval;
3258
3259 retval = xscale_verify_pointer(CMD_CTX, xscale);
3260 if (retval != ERROR_OK)
3261 return retval;
3262
3263 if (CMD_ARGC == 0) /* print current settings */
3264 {
3265 int idx;
3266
3267 command_print(CMD_CTX, "active user-set static vectors:");
3268 for (idx = 1; idx < 8; idx++)
3269 if (xscale->static_low_vectors_set & (1 << idx))
3270 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3271 for (idx = 1; idx < 8; idx++)
3272 if (xscale->static_high_vectors_set & (1 << idx))
3273 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3274 return ERROR_OK;
3275 }
3276
3277 if (CMD_ARGC != 3)
3278 err = 1;
3279 else
3280 {
3281 int idx;
3282 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3283 uint32_t vec;
3284 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3285
3286 if (idx < 1 || idx >= 8)
3287 err = 1;
3288
3289 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3290 {
3291 xscale->static_low_vectors_set |= (1<<idx);
3292 xscale->static_low_vectors[idx] = vec;
3293 }
3294 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3295 {
3296 xscale->static_high_vectors_set |= (1<<idx);
3297 xscale->static_high_vectors[idx] = vec;
3298 }
3299 else
3300 err = 1;
3301 }
3302
3303 if (err)
3304 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3305
3306 return ERROR_OK;
3307 }
3308
3309
3310 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3311 {
3312 struct target *target = get_current_target(CMD_CTX);
3313 struct xscale_common *xscale = target_to_xscale(target);
3314 struct arm *armv4_5 = &xscale->armv4_5_common;
3315 uint32_t dcsr_value;
3316 int retval;
3317
3318 retval = xscale_verify_pointer(CMD_CTX, xscale);
3319 if (retval != ERROR_OK)
3320 return retval;
3321
3322 if (target->state != TARGET_HALTED)
3323 {
3324 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3325 return ERROR_OK;
3326 }
3327
3328 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3329 {
3330 struct xscale_trace_data *td, *next_td;
3331 xscale->trace.buffer_enabled = 1;
3332
3333 /* free old trace data */
3334 td = xscale->trace.data;
3335 while (td)
3336 {
3337 next_td = td->next;
3338
3339 if (td->entries)
3340 free(td->entries);
3341 free(td);
3342 td = next_td;
3343 }
3344 xscale->trace.data = NULL;
3345 }
3346 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3347 {
3348 xscale->trace.buffer_enabled = 0;
3349 }
3350
3351 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3352 {
3353 uint32_t fill = 1;
3354 if (CMD_ARGC >= 3)
3355 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3356 xscale->trace.buffer_fill = fill;
3357 }
3358 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3359 {
3360 xscale->trace.buffer_fill = -1;
3361 }
3362
3363 if (xscale->trace.buffer_enabled)
3364 {
3365 /* if we enable the trace buffer in fill-once
3366 * mode we know the address of the first instruction */
3367 xscale->trace.pc_ok = 1;
3368 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3369 }
3370 else
3371 {
3372 /* otherwise the address is unknown, and we have no known good PC */
3373 xscale->trace.pc_ok = 0;
3374 }
3375
3376 command_print(CMD_CTX, "trace buffer %s (%s)",
3377 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3378 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3379
3380 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3381 if (xscale->trace.buffer_fill >= 0)
3382 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3383 else
3384 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3385
3386 return ERROR_OK;
3387 }
3388
3389 COMMAND_HANDLER(xscale_handle_trace_image_command)
3390 {
3391 struct target *target = get_current_target(CMD_CTX);
3392 struct xscale_common *xscale = target_to_xscale(target);
3393 int retval;
3394
3395 if (CMD_ARGC < 1)
3396 {
3397 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3398 return ERROR_OK;
3399 }
3400
3401 retval = xscale_verify_pointer(CMD_CTX, xscale);
3402 if (retval != ERROR_OK)
3403 return retval;
3404
3405 if (xscale->trace.image)
3406 {
3407 image_close(xscale->trace.image);
3408 free(xscale->trace.image);
3409 command_print(CMD_CTX, "previously loaded image found and closed");
3410 }
3411
3412 xscale->trace.image = malloc(sizeof(struct image));
3413 xscale->trace.image->base_address_set = 0;
3414 xscale->trace.image->start_address_set = 0;
3415
3416 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3417 if (CMD_ARGC >= 2)
3418 {
3419 xscale->trace.image->base_address_set = 1;
3420 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], xscale->trace.image->base_address);
3421 }
3422 else
3423 {
3424 xscale->trace.image->base_address_set = 0;
3425 }
3426
3427 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3428 {
3429 free(xscale->trace.image);
3430 xscale->trace.image = NULL;
3431 return ERROR_OK;
3432 }
3433
3434 return ERROR_OK;
3435 }
3436
3437 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3438 {
3439 struct target *target = get_current_target(CMD_CTX);
3440 struct xscale_common *xscale = target_to_xscale(target);
3441 struct xscale_trace_data *trace_data;
3442 struct fileio file;
3443 int retval;
3444
3445 retval = xscale_verify_pointer(CMD_CTX, xscale);
3446 if (retval != ERROR_OK)
3447 return retval;
3448
3449 if (target->state != TARGET_HALTED)
3450 {
3451 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3452 return ERROR_OK;
3453 }
3454
3455 if (CMD_ARGC < 1)
3456 {
3457 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3458 return ERROR_OK;
3459 }
3460
3461 trace_data = xscale->trace.data;
3462
3463 if (!trace_data)
3464 {
3465 command_print(CMD_CTX, "no trace data collected");
3466 return ERROR_OK;
3467 }
3468
3469 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3470 {
3471 return ERROR_OK;
3472 }
3473
3474 while (trace_data)
3475 {
3476 int i;
3477
3478 fileio_write_u32(&file, trace_data->chkpt0);
3479 fileio_write_u32(&file, trace_data->chkpt1);
3480 fileio_write_u32(&file, trace_data->last_instruction);
3481 fileio_write_u32(&file, trace_data->depth);
3482
3483 for (i = 0; i < trace_data->depth; i++)
3484 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3485
3486 trace_data = trace_data->next;
3487 }
3488
3489 fileio_close(&file);
3490
3491 return ERROR_OK;
3492 }
3493
3494 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3495 {
3496 struct target *target = get_current_target(CMD_CTX);
3497 struct xscale_common *xscale = target_to_xscale(target);
3498 int retval;
3499
3500 retval = xscale_verify_pointer(CMD_CTX, xscale);
3501 if (retval != ERROR_OK)
3502 return retval;
3503
3504 xscale_analyze_trace(target, CMD_CTX);
3505
3506 return ERROR_OK;
3507 }
3508
3509 COMMAND_HANDLER(xscale_handle_cp15)
3510 {
3511 struct target *target = get_current_target(CMD_CTX);
3512 struct xscale_common *xscale = target_to_xscale(target);
3513 int retval;
3514
3515 retval = xscale_verify_pointer(CMD_CTX, xscale);
3516 if (retval != ERROR_OK)
3517 return retval;
3518
3519 if (target->state != TARGET_HALTED)
3520 {
3521 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3522 return ERROR_OK;
3523 }
3524 uint32_t reg_no = 0;
3525 struct reg *reg = NULL;
3526 if (CMD_ARGC > 0)
3527 {
3528 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3529 /*translate from xscale cp15 register no to openocd register*/
3530 switch (reg_no)
3531 {
3532 case 0:
3533 reg_no = XSCALE_MAINID;
3534 break;
3535 case 1:
3536 reg_no = XSCALE_CTRL;
3537 break;
3538 case 2:
3539 reg_no = XSCALE_TTB;
3540 break;
3541 case 3:
3542 reg_no = XSCALE_DAC;
3543 break;
3544 case 5:
3545 reg_no = XSCALE_FSR;
3546 break;
3547 case 6:
3548 reg_no = XSCALE_FAR;
3549 break;
3550 case 13:
3551 reg_no = XSCALE_PID;
3552 break;
3553 case 15:
3554 reg_no = XSCALE_CPACCESS;
3555 break;
3556 default:
3557 command_print(CMD_CTX, "invalid register number");
3558 return ERROR_INVALID_ARGUMENTS;
3559 }
3560 reg = &xscale->reg_cache->reg_list[reg_no];
3561
3562 }
3563 if (CMD_ARGC == 1)
3564 {
3565 uint32_t value;
3566
3567 /* read cp15 control register */
3568 xscale_get_reg(reg);
3569 value = buf_get_u32(reg->value, 0, 32);
3570 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3571 }
3572 else if (CMD_ARGC == 2)
3573 {
3574 uint32_t value;
3575 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3576
3577 /* send CP write request (command 0x41) */
3578 xscale_send_u32(target, 0x41);
3579
3580 /* send CP register number */
3581 xscale_send_u32(target, reg_no);
3582
3583 /* send CP register value */
3584 xscale_send_u32(target, value);
3585
3586 /* execute cpwait to ensure outstanding operations complete */
3587 xscale_send_u32(target, 0x53);
3588 }
3589 else
3590 {
3591 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3592 }
3593
3594 return ERROR_OK;
3595 }
3596
3597 static const struct command_registration xscale_exec_command_handlers[] = {
3598 {
3599 .name = "cache_info",
3600 .handler = &xscale_handle_cache_info_command,
3601 .mode = COMMAND_EXEC, NULL,
3602 },
3603
3604 {
3605 .name = "mmu",
3606 .handler = &xscale_handle_mmu_command,
3607 .mode = COMMAND_EXEC,
3608 .usage = "[enable|disable]",
3609 .help = "enable or disable the MMU",
3610 },
3611 {
3612 .name = "icache",
3613 .handler = &xscale_handle_idcache_command,
3614 .mode = COMMAND_EXEC,
3615 .usage = "[enable|disable]",
3616 .help = "enable or disable the ICache",
3617 },
3618 {
3619 .name = "dcache",
3620 .handler = &xscale_handle_idcache_command,
3621 .mode = COMMAND_EXEC,
3622 .usage = "[enable|disable]",
3623 .help = "enable or disable the DCache",
3624 },
3625
3626 {
3627 .name = "vector_catch",
3628 .handler = &xscale_handle_vector_catch_command,
3629 .mode = COMMAND_EXEC,
3630 .help = "mask of vectors that should be caught",
3631 .usage = "[<mask>]",
3632 },
3633 {
3634 .name = "vector_table",
3635 .handler = &xscale_handle_vector_table_command,
3636 .mode = COMMAND_EXEC,
3637 .usage = "<high|low> <index> <code>",
3638 .help = "set static code for exception handler entry",
3639 },
3640
3641 {
3642 .name = "trace_buffer",
3643 .handler = &xscale_handle_trace_buffer_command,
3644 .mode = COMMAND_EXEC,
3645 .usage = "<enable | disable> [fill [n]|wrap]",
3646 },
3647 {
3648 .name = "dump_trace",
3649 .handler = &xscale_handle_dump_trace_command,
3650 .mode = COMMAND_EXEC,
3651 .help = "dump content of trace buffer to <file>",
3652 .usage = "<file>",
3653 },
3654 {
3655 .name = "analyze_trace",
3656 .handler = &xscale_handle_analyze_trace_buffer_command,
3657 .mode = COMMAND_EXEC,
3658 .help = "analyze content of trace buffer",
3659 },
3660 {
3661 .name = "trace_image",
3662 .handler = &xscale_handle_trace_image_command,
3663 COMMAND_EXEC,
3664 .help = "load image from <file> [base address]",
3665 .usage = "<file> [address] [type]",
3666 },
3667
3668 {
3669 .name = "cp15",
3670 .handler = &xscale_handle_cp15,
3671 .mode = COMMAND_EXEC,
3672 .help = "access coproc 15",
3673 .usage = "<register> [value]",
3674 },
3675 COMMAND_REGISTRATION_DONE
3676 };
3677 static const struct command_registration xscale_any_command_handlers[] = {
3678 {
3679 .name = "debug_handler",
3680 .handler = &xscale_handle_debug_handler_command,
3681 .mode = COMMAND_ANY,
3682 .usage = "<target#> <address>",
3683 },
3684 {
3685 .name = "cache_clean_address",
3686 .handler = &xscale_handle_cache_clean_address_command,
3687 .mode = COMMAND_ANY,
3688 },
3689 {
3690 .chain = xscale_exec_command_handlers,
3691 },
3692 COMMAND_REGISTRATION_DONE
3693 };
3694 static const struct command_registration xscale_command_handlers[] = {
3695 {
3696 .chain = arm_command_handlers,
3697 },
3698 {
3699 .name = "xscale",
3700 .mode = COMMAND_ANY,
3701 .help = "xscale command group",
3702 .chain = xscale_any_command_handlers,
3703 },
3704 COMMAND_REGISTRATION_DONE
3705 };
3706
3707 struct target_type xscale_target =
3708 {
3709 .name = "xscale",
3710
3711 .poll = xscale_poll,
3712 .arch_state = xscale_arch_state,
3713
3714 .target_request_data = NULL,
3715
3716 .halt = xscale_halt,
3717 .resume = xscale_resume,
3718 .step = xscale_step,
3719
3720 .assert_reset = xscale_assert_reset,
3721 .deassert_reset = xscale_deassert_reset,
3722 .soft_reset_halt = NULL,
3723
3724 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
3725
3726 .read_memory = xscale_read_memory,
3727 .read_phys_memory = xscale_read_phys_memory,
3728 .write_memory = xscale_write_memory,
3729 .write_phys_memory = xscale_write_phys_memory,
3730 .bulk_write_memory = xscale_bulk_write_memory,
3731
3732 .checksum_memory = arm_checksum_memory,
3733 .blank_check_memory = arm_blank_check_memory,
3734
3735 .run_algorithm = armv4_5_run_algorithm,
3736
3737 .add_breakpoint = xscale_add_breakpoint,
3738 .remove_breakpoint = xscale_remove_breakpoint,
3739 .add_watchpoint = xscale_add_watchpoint,
3740 .remove_watchpoint = xscale_remove_watchpoint,
3741
3742 .commands = xscale_command_handlers,
3743 .target_create = xscale_target_create,
3744 .init_target = xscale_init_target,
3745
3746 .virt2phys = xscale_virt2phys,
3747 .mmu = xscale_mmu
3748 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)