ARM: list number of HW breakpoints/watchpoints
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include <helper/time_support.h>
37 #include "register.h"
38 #include "image.h"
39 #include "arm_opcodes.h"
40 #include "armv4_5.h"
41
42
43 /*
44 * Important XScale documents available as of October 2009 include:
45 *
46 * Intel XScale® Core Developer’s Manual, January 2004
47 * Order Number: 273473-002
48 * This has a chapter detailing debug facilities, and punts some
49 * details to chip-specific microarchitecture documents.
50 *
51 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
52 * Document Number: 273539-005
53 * Less detailed than the developer's manual, but summarizes those
54 * missing details (for most XScales) and gives LOTS of notes about
55 * debugger/handler interaction issues. Presents a simpler reset
56 * and load-handler sequence than the arch doc. (Note, OpenOCD
57 * doesn't currently support "Hot-Debug" as defined there.)
58 *
59 * Chip-specific microarchitecture documents may also be useful.
60 */
61
62
63 /* forward declarations */
64 static int xscale_resume(struct target *, int current,
65 uint32_t address, int handle_breakpoints, int debug_execution);
66 static int xscale_debug_entry(struct target *);
67 static int xscale_restore_banked(struct target *);
68 static int xscale_get_reg(struct reg *reg);
69 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
70 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
72 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
73 static int xscale_read_trace(struct target *);
74
75
76 /* This XScale "debug handler" is loaded into the processor's
77 * mini-ICache, which is 2K of code writable only via JTAG.
78 *
79 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
80 * binary files cleanly. It's string oriented, and terminates them
81 * with a NUL character. Better would be to generate the constants
82 * and let other code decide names, scoping, and other housekeeping.
83 */
84 static /* unsigned const char xscale_debug_handler[] = ... */
85 #include "xscale_debug.h"
86
87 static char *const xscale_reg_list[] =
88 {
89 "XSCALE_MAINID", /* 0 */
90 "XSCALE_CACHETYPE",
91 "XSCALE_CTRL",
92 "XSCALE_AUXCTRL",
93 "XSCALE_TTB",
94 "XSCALE_DAC",
95 "XSCALE_FSR",
96 "XSCALE_FAR",
97 "XSCALE_PID",
98 "XSCALE_CPACCESS",
99 "XSCALE_IBCR0", /* 10 */
100 "XSCALE_IBCR1",
101 "XSCALE_DBR0",
102 "XSCALE_DBR1",
103 "XSCALE_DBCON",
104 "XSCALE_TBREG",
105 "XSCALE_CHKPT0",
106 "XSCALE_CHKPT1",
107 "XSCALE_DCSR",
108 "XSCALE_TX",
109 "XSCALE_RX", /* 20 */
110 "XSCALE_TXRXCTRL",
111 };
112
113 static const struct xscale_reg xscale_reg_arch_info[] =
114 {
115 {XSCALE_MAINID, NULL},
116 {XSCALE_CACHETYPE, NULL},
117 {XSCALE_CTRL, NULL},
118 {XSCALE_AUXCTRL, NULL},
119 {XSCALE_TTB, NULL},
120 {XSCALE_DAC, NULL},
121 {XSCALE_FSR, NULL},
122 {XSCALE_FAR, NULL},
123 {XSCALE_PID, NULL},
124 {XSCALE_CPACCESS, NULL},
125 {XSCALE_IBCR0, NULL},
126 {XSCALE_IBCR1, NULL},
127 {XSCALE_DBR0, NULL},
128 {XSCALE_DBR1, NULL},
129 {XSCALE_DBCON, NULL},
130 {XSCALE_TBREG, NULL},
131 {XSCALE_CHKPT0, NULL},
132 {XSCALE_CHKPT1, NULL},
133 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
134 {-1, NULL}, /* TX accessed via JTAG */
135 {-1, NULL}, /* RX accessed via JTAG */
136 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
137 };
138
139 /* convenience wrapper to access XScale specific registers */
140 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
141 {
142 uint8_t buf[4];
143
144 buf_set_u32(buf, 0, 32, value);
145
146 return xscale_set_reg(reg, buf);
147 }
148
149 static const char xscale_not[] = "target is not an XScale";
150
151 static int xscale_verify_pointer(struct command_context *cmd_ctx,
152 struct xscale_common *xscale)
153 {
154 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
155 command_print(cmd_ctx, xscale_not);
156 return ERROR_TARGET_INVALID;
157 }
158 return ERROR_OK;
159 }
160
161 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr)
162 {
163 if (tap == NULL)
164 return ERROR_FAIL;
165
166 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
167 {
168 struct scan_field field;
169 uint8_t scratch[4];
170
171 memset(&field, 0, sizeof field);
172 field.tap = tap;
173 field.num_bits = tap->ir_length;
174 field.out_value = scratch;
175 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
176
177 jtag_add_ir_scan(1, &field, jtag_get_end_state());
178 }
179
180 return ERROR_OK;
181 }
182
183 static int xscale_read_dcsr(struct target *target)
184 {
185 struct xscale_common *xscale = target_to_xscale(target);
186 int retval;
187 struct scan_field fields[3];
188 uint8_t field0 = 0x0;
189 uint8_t field0_check_value = 0x2;
190 uint8_t field0_check_mask = 0x7;
191 uint8_t field2 = 0x0;
192 uint8_t field2_check_value = 0x0;
193 uint8_t field2_check_mask = 0x1;
194
195 jtag_set_end_state(TAP_DRPAUSE);
196 xscale_jtag_set_instr(target->tap,
197 XSCALE_SELDCSR << xscale->xscale_variant);
198
199 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
200 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
201
202 memset(&fields, 0, sizeof fields);
203
204 fields[0].tap = target->tap;
205 fields[0].num_bits = 3;
206 fields[0].out_value = &field0;
207 uint8_t tmp;
208 fields[0].in_value = &tmp;
209
210 fields[1].tap = target->tap;
211 fields[1].num_bits = 32;
212 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
213
214 fields[2].tap = target->tap;
215 fields[2].num_bits = 1;
216 fields[2].out_value = &field2;
217 uint8_t tmp2;
218 fields[2].in_value = &tmp2;
219
220 jtag_add_dr_scan(3, fields, jtag_get_end_state());
221
222 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
223 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
224
225 if ((retval = jtag_execute_queue()) != ERROR_OK)
226 {
227 LOG_ERROR("JTAG error while reading DCSR");
228 return retval;
229 }
230
231 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
232 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
233
234 /* write the register with the value we just read
235 * on this second pass, only the first bit of field0 is guaranteed to be 0)
236 */
237 field0_check_mask = 0x1;
238 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
239 fields[1].in_value = NULL;
240
241 jtag_set_end_state(TAP_IDLE);
242
243 jtag_add_dr_scan(3, fields, jtag_get_end_state());
244
245 /* DANGER!!! this must be here. It will make sure that the arguments
246 * to jtag_set_check_value() does not go out of scope! */
247 return jtag_execute_queue();
248 }
249
250
251 static void xscale_getbuf(jtag_callback_data_t arg)
252 {
253 uint8_t *in = (uint8_t *)arg;
254 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
255 }
256
257 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
258 {
259 if (num_words == 0)
260 return ERROR_INVALID_ARGUMENTS;
261
262 struct xscale_common *xscale = target_to_xscale(target);
263 int retval = ERROR_OK;
264 tap_state_t path[3];
265 struct scan_field fields[3];
266 uint8_t *field0 = malloc(num_words * 1);
267 uint8_t field0_check_value = 0x2;
268 uint8_t field0_check_mask = 0x6;
269 uint32_t *field1 = malloc(num_words * 4);
270 uint8_t field2_check_value = 0x0;
271 uint8_t field2_check_mask = 0x1;
272 int words_done = 0;
273 int words_scheduled = 0;
274 int i;
275
276 path[0] = TAP_DRSELECT;
277 path[1] = TAP_DRCAPTURE;
278 path[2] = TAP_DRSHIFT;
279
280 memset(&fields, 0, sizeof fields);
281
282 fields[0].tap = target->tap;
283 fields[0].num_bits = 3;
284 fields[0].check_value = &field0_check_value;
285 fields[0].check_mask = &field0_check_mask;
286
287 fields[1].tap = target->tap;
288 fields[1].num_bits = 32;
289
290 fields[2].tap = target->tap;
291 fields[2].num_bits = 1;
292 fields[2].check_value = &field2_check_value;
293 fields[2].check_mask = &field2_check_mask;
294
295 jtag_set_end_state(TAP_IDLE);
296 xscale_jtag_set_instr(target->tap,
297 XSCALE_DBGTX << xscale->xscale_variant);
298 jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
299
300 /* repeat until all words have been collected */
301 int attempts = 0;
302 while (words_done < num_words)
303 {
304 /* schedule reads */
305 words_scheduled = 0;
306 for (i = words_done; i < num_words; i++)
307 {
308 fields[0].in_value = &field0[i];
309
310 jtag_add_pathmove(3, path);
311
312 fields[1].in_value = (uint8_t *)(field1 + i);
313
314 jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
315
316 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
317
318 words_scheduled++;
319 }
320
321 if ((retval = jtag_execute_queue()) != ERROR_OK)
322 {
323 LOG_ERROR("JTAG error while receiving data from debug handler");
324 break;
325 }
326
327 /* examine results */
328 for (i = words_done; i < num_words; i++)
329 {
330 if (!(field0[0] & 1))
331 {
332 /* move backwards if necessary */
333 int j;
334 for (j = i; j < num_words - 1; j++)
335 {
336 field0[j] = field0[j + 1];
337 field1[j] = field1[j + 1];
338 }
339 words_scheduled--;
340 }
341 }
342 if (words_scheduled == 0)
343 {
344 if (attempts++==1000)
345 {
346 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
347 retval = ERROR_TARGET_TIMEOUT;
348 break;
349 }
350 }
351
352 words_done += words_scheduled;
353 }
354
355 for (i = 0; i < num_words; i++)
356 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
357
358 free(field1);
359
360 return retval;
361 }
362
363 static int xscale_read_tx(struct target *target, int consume)
364 {
365 struct xscale_common *xscale = target_to_xscale(target);
366 tap_state_t path[3];
367 tap_state_t noconsume_path[6];
368 int retval;
369 struct timeval timeout, now;
370 struct scan_field fields[3];
371 uint8_t field0_in = 0x0;
372 uint8_t field0_check_value = 0x2;
373 uint8_t field0_check_mask = 0x6;
374 uint8_t field2_check_value = 0x0;
375 uint8_t field2_check_mask = 0x1;
376
377 jtag_set_end_state(TAP_IDLE);
378
379 xscale_jtag_set_instr(target->tap,
380 XSCALE_DBGTX << xscale->xscale_variant);
381
382 path[0] = TAP_DRSELECT;
383 path[1] = TAP_DRCAPTURE;
384 path[2] = TAP_DRSHIFT;
385
386 noconsume_path[0] = TAP_DRSELECT;
387 noconsume_path[1] = TAP_DRCAPTURE;
388 noconsume_path[2] = TAP_DREXIT1;
389 noconsume_path[3] = TAP_DRPAUSE;
390 noconsume_path[4] = TAP_DREXIT2;
391 noconsume_path[5] = TAP_DRSHIFT;
392
393 memset(&fields, 0, sizeof fields);
394
395 fields[0].tap = target->tap;
396 fields[0].num_bits = 3;
397 fields[0].in_value = &field0_in;
398
399 fields[1].tap = target->tap;
400 fields[1].num_bits = 32;
401 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
402
403 fields[2].tap = target->tap;
404 fields[2].num_bits = 1;
405 uint8_t tmp;
406 fields[2].in_value = &tmp;
407
408 gettimeofday(&timeout, NULL);
409 timeval_add_time(&timeout, 1, 0);
410
411 for (;;)
412 {
413 /* if we want to consume the register content (i.e. clear TX_READY),
414 * we have to go straight from Capture-DR to Shift-DR
415 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
416 */
417 if (consume)
418 jtag_add_pathmove(3, path);
419 else
420 {
421 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
422 }
423
424 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
425
426 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
427 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
428
429 if ((retval = jtag_execute_queue()) != ERROR_OK)
430 {
431 LOG_ERROR("JTAG error while reading TX");
432 return ERROR_TARGET_TIMEOUT;
433 }
434
435 gettimeofday(&now, NULL);
436 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
437 {
438 LOG_ERROR("time out reading TX register");
439 return ERROR_TARGET_TIMEOUT;
440 }
441 if (!((!(field0_in & 1)) && consume))
442 {
443 goto done;
444 }
445 if (debug_level >= 3)
446 {
447 LOG_DEBUG("waiting 100ms");
448 alive_sleep(100); /* avoid flooding the logs */
449 } else
450 {
451 keep_alive();
452 }
453 }
454 done:
455
456 if (!(field0_in & 1))
457 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
458
459 return ERROR_OK;
460 }
461
462 static int xscale_write_rx(struct target *target)
463 {
464 struct xscale_common *xscale = target_to_xscale(target);
465 int retval;
466 struct timeval timeout, now;
467 struct scan_field fields[3];
468 uint8_t field0_out = 0x0;
469 uint8_t field0_in = 0x0;
470 uint8_t field0_check_value = 0x2;
471 uint8_t field0_check_mask = 0x6;
472 uint8_t field2 = 0x0;
473 uint8_t field2_check_value = 0x0;
474 uint8_t field2_check_mask = 0x1;
475
476 jtag_set_end_state(TAP_IDLE);
477
478 xscale_jtag_set_instr(target->tap,
479 XSCALE_DBGRX << xscale->xscale_variant);
480
481 memset(&fields, 0, sizeof fields);
482
483 fields[0].tap = target->tap;
484 fields[0].num_bits = 3;
485 fields[0].out_value = &field0_out;
486 fields[0].in_value = &field0_in;
487
488 fields[1].tap = target->tap;
489 fields[1].num_bits = 32;
490 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
491
492 fields[2].tap = target->tap;
493 fields[2].num_bits = 1;
494 fields[2].out_value = &field2;
495 uint8_t tmp;
496 fields[2].in_value = &tmp;
497
498 gettimeofday(&timeout, NULL);
499 timeval_add_time(&timeout, 1, 0);
500
501 /* poll until rx_read is low */
502 LOG_DEBUG("polling RX");
503 for (;;)
504 {
505 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
506
507 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
508 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
509
510 if ((retval = jtag_execute_queue()) != ERROR_OK)
511 {
512 LOG_ERROR("JTAG error while writing RX");
513 return retval;
514 }
515
516 gettimeofday(&now, NULL);
517 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
518 {
519 LOG_ERROR("time out writing RX register");
520 return ERROR_TARGET_TIMEOUT;
521 }
522 if (!(field0_in & 1))
523 goto done;
524 if (debug_level >= 3)
525 {
526 LOG_DEBUG("waiting 100ms");
527 alive_sleep(100); /* avoid flooding the logs */
528 } else
529 {
530 keep_alive();
531 }
532 }
533 done:
534
535 /* set rx_valid */
536 field2 = 0x1;
537 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
538
539 if ((retval = jtag_execute_queue()) != ERROR_OK)
540 {
541 LOG_ERROR("JTAG error while writing RX");
542 return retval;
543 }
544
545 return ERROR_OK;
546 }
547
548 /* send count elements of size byte to the debug handler */
549 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
550 {
551 struct xscale_common *xscale = target_to_xscale(target);
552 uint32_t t[3];
553 int bits[3];
554 int retval;
555 int done_count = 0;
556
557 jtag_set_end_state(TAP_IDLE);
558
559 xscale_jtag_set_instr(target->tap,
560 XSCALE_DBGRX << xscale->xscale_variant);
561
562 bits[0]=3;
563 t[0]=0;
564 bits[1]=32;
565 t[2]=1;
566 bits[2]=1;
567 int endianness = target->endianness;
568 while (done_count++ < count)
569 {
570 switch (size)
571 {
572 case 4:
573 if (endianness == TARGET_LITTLE_ENDIAN)
574 {
575 t[1]=le_to_h_u32(buffer);
576 } else
577 {
578 t[1]=be_to_h_u32(buffer);
579 }
580 break;
581 case 2:
582 if (endianness == TARGET_LITTLE_ENDIAN)
583 {
584 t[1]=le_to_h_u16(buffer);
585 } else
586 {
587 t[1]=be_to_h_u16(buffer);
588 }
589 break;
590 case 1:
591 t[1]=buffer[0];
592 break;
593 default:
594 LOG_ERROR("BUG: size neither 4, 2 nor 1");
595 return ERROR_INVALID_ARGUMENTS;
596 }
597 jtag_add_dr_out(target->tap,
598 3,
599 bits,
600 t,
601 jtag_set_end_state(TAP_IDLE));
602 buffer += size;
603 }
604
605 if ((retval = jtag_execute_queue()) != ERROR_OK)
606 {
607 LOG_ERROR("JTAG error while sending data to debug handler");
608 return retval;
609 }
610
611 return ERROR_OK;
612 }
613
614 static int xscale_send_u32(struct target *target, uint32_t value)
615 {
616 struct xscale_common *xscale = target_to_xscale(target);
617
618 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
619 return xscale_write_rx(target);
620 }
621
622 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
623 {
624 struct xscale_common *xscale = target_to_xscale(target);
625 int retval;
626 struct scan_field fields[3];
627 uint8_t field0 = 0x0;
628 uint8_t field0_check_value = 0x2;
629 uint8_t field0_check_mask = 0x7;
630 uint8_t field2 = 0x0;
631 uint8_t field2_check_value = 0x0;
632 uint8_t field2_check_mask = 0x1;
633
634 if (hold_rst != -1)
635 xscale->hold_rst = hold_rst;
636
637 if (ext_dbg_brk != -1)
638 xscale->external_debug_break = ext_dbg_brk;
639
640 jtag_set_end_state(TAP_IDLE);
641 xscale_jtag_set_instr(target->tap,
642 XSCALE_SELDCSR << xscale->xscale_variant);
643
644 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
645 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
646
647 memset(&fields, 0, sizeof fields);
648
649 fields[0].tap = target->tap;
650 fields[0].num_bits = 3;
651 fields[0].out_value = &field0;
652 uint8_t tmp;
653 fields[0].in_value = &tmp;
654
655 fields[1].tap = target->tap;
656 fields[1].num_bits = 32;
657 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
658
659 fields[2].tap = target->tap;
660 fields[2].num_bits = 1;
661 fields[2].out_value = &field2;
662 uint8_t tmp2;
663 fields[2].in_value = &tmp2;
664
665 jtag_add_dr_scan(3, fields, jtag_get_end_state());
666
667 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
668 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
669
670 if ((retval = jtag_execute_queue()) != ERROR_OK)
671 {
672 LOG_ERROR("JTAG error while writing DCSR");
673 return retval;
674 }
675
676 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
677 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
678
679 return ERROR_OK;
680 }
681
682 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
683 static unsigned int parity (unsigned int v)
684 {
685 // unsigned int ov = v;
686 v ^= v >> 16;
687 v ^= v >> 8;
688 v ^= v >> 4;
689 v &= 0xf;
690 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
691 return (0x6996 >> v) & 1;
692 }
693
694 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
695 {
696 struct xscale_common *xscale = target_to_xscale(target);
697 uint8_t packet[4];
698 uint8_t cmd;
699 int word;
700 struct scan_field fields[2];
701
702 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
703
704 /* LDIC into IR */
705 jtag_set_end_state(TAP_IDLE);
706 xscale_jtag_set_instr(target->tap,
707 XSCALE_LDIC << xscale->xscale_variant);
708
709 /* CMD is b011 to load a cacheline into the Mini ICache.
710 * Loading into the main ICache is deprecated, and unused.
711 * It's followed by three zero bits, and 27 address bits.
712 */
713 buf_set_u32(&cmd, 0, 6, 0x3);
714
715 /* virtual address of desired cache line */
716 buf_set_u32(packet, 0, 27, va >> 5);
717
718 memset(&fields, 0, sizeof fields);
719
720 fields[0].tap = target->tap;
721 fields[0].num_bits = 6;
722 fields[0].out_value = &cmd;
723
724 fields[1].tap = target->tap;
725 fields[1].num_bits = 27;
726 fields[1].out_value = packet;
727
728 jtag_add_dr_scan(2, fields, jtag_get_end_state());
729
730 /* rest of packet is a cacheline: 8 instructions, with parity */
731 fields[0].num_bits = 32;
732 fields[0].out_value = packet;
733
734 fields[1].num_bits = 1;
735 fields[1].out_value = &cmd;
736
737 for (word = 0; word < 8; word++)
738 {
739 buf_set_u32(packet, 0, 32, buffer[word]);
740
741 uint32_t value;
742 memcpy(&value, packet, sizeof(uint32_t));
743 cmd = parity(value);
744
745 jtag_add_dr_scan(2, fields, jtag_get_end_state());
746 }
747
748 return jtag_execute_queue();
749 }
750
751 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
752 {
753 struct xscale_common *xscale = target_to_xscale(target);
754 uint8_t packet[4];
755 uint8_t cmd;
756 struct scan_field fields[2];
757
758 jtag_set_end_state(TAP_IDLE);
759 xscale_jtag_set_instr(target->tap,
760 XSCALE_LDIC << xscale->xscale_variant);
761
762 /* CMD for invalidate IC line b000, bits [6:4] b000 */
763 buf_set_u32(&cmd, 0, 6, 0x0);
764
765 /* virtual address of desired cache line */
766 buf_set_u32(packet, 0, 27, va >> 5);
767
768 memset(&fields, 0, sizeof fields);
769
770 fields[0].tap = target->tap;
771 fields[0].num_bits = 6;
772 fields[0].out_value = &cmd;
773
774 fields[1].tap = target->tap;
775 fields[1].num_bits = 27;
776 fields[1].out_value = packet;
777
778 jtag_add_dr_scan(2, fields, jtag_get_end_state());
779
780 return ERROR_OK;
781 }
782
783 static int xscale_update_vectors(struct target *target)
784 {
785 struct xscale_common *xscale = target_to_xscale(target);
786 int i;
787 int retval;
788
789 uint32_t low_reset_branch, high_reset_branch;
790
791 for (i = 1; i < 8; i++)
792 {
793 /* if there's a static vector specified for this exception, override */
794 if (xscale->static_high_vectors_set & (1 << i))
795 {
796 xscale->high_vectors[i] = xscale->static_high_vectors[i];
797 }
798 else
799 {
800 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
801 if (retval == ERROR_TARGET_TIMEOUT)
802 return retval;
803 if (retval != ERROR_OK)
804 {
805 /* Some of these reads will fail as part of normal execution */
806 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
807 }
808 }
809 }
810
811 for (i = 1; i < 8; i++)
812 {
813 if (xscale->static_low_vectors_set & (1 << i))
814 {
815 xscale->low_vectors[i] = xscale->static_low_vectors[i];
816 }
817 else
818 {
819 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
820 if (retval == ERROR_TARGET_TIMEOUT)
821 return retval;
822 if (retval != ERROR_OK)
823 {
824 /* Some of these reads will fail as part of normal execution */
825 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
826 }
827 }
828 }
829
830 /* calculate branches to debug handler */
831 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
832 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
833
834 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
835 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
836
837 /* invalidate and load exception vectors in mini i-cache */
838 xscale_invalidate_ic_line(target, 0x0);
839 xscale_invalidate_ic_line(target, 0xffff0000);
840
841 xscale_load_ic(target, 0x0, xscale->low_vectors);
842 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
843
844 return ERROR_OK;
845 }
846
847 static int xscale_arch_state(struct target *target)
848 {
849 struct xscale_common *xscale = target_to_xscale(target);
850 struct arm *armv4_5 = &xscale->armv4_5_common;
851
852 static const char *state[] =
853 {
854 "disabled", "enabled"
855 };
856
857 static const char *arch_dbg_reason[] =
858 {
859 "", "\n(processor reset)", "\n(trace buffer full)"
860 };
861
862 if (armv4_5->common_magic != ARM_COMMON_MAGIC)
863 {
864 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
865 return ERROR_INVALID_ARGUMENTS;
866 }
867
868 arm_arch_state(target);
869 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
870 state[xscale->armv4_5_mmu.mmu_enabled],
871 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
872 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
873 arch_dbg_reason[xscale->arch_debug_reason]);
874
875 return ERROR_OK;
876 }
877
878 static int xscale_poll(struct target *target)
879 {
880 int retval = ERROR_OK;
881
882 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
883 {
884 enum target_state previous_state = target->state;
885 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
886 {
887
888 /* there's data to read from the tx register, we entered debug state */
889 target->state = TARGET_HALTED;
890
891 /* process debug entry, fetching current mode regs */
892 retval = xscale_debug_entry(target);
893 }
894 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
895 {
896 LOG_USER("error while polling TX register, reset CPU");
897 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
898 target->state = TARGET_HALTED;
899 }
900
901 /* debug_entry could have overwritten target state (i.e. immediate resume)
902 * don't signal event handlers in that case
903 */
904 if (target->state != TARGET_HALTED)
905 return ERROR_OK;
906
907 /* if target was running, signal that we halted
908 * otherwise we reentered from debug execution */
909 if (previous_state == TARGET_RUNNING)
910 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
911 else
912 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
913 }
914
915 return retval;
916 }
917
918 static int xscale_debug_entry(struct target *target)
919 {
920 struct xscale_common *xscale = target_to_xscale(target);
921 struct arm *armv4_5 = &xscale->armv4_5_common;
922 uint32_t pc;
923 uint32_t buffer[10];
924 int i;
925 int retval;
926 uint32_t moe;
927
928 /* clear external dbg break (will be written on next DCSR read) */
929 xscale->external_debug_break = 0;
930 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
931 return retval;
932
933 /* get r0, pc, r1 to r7 and cpsr */
934 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
935 return retval;
936
937 /* move r0 from buffer to register cache */
938 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
939 armv4_5->core_cache->reg_list[0].dirty = 1;
940 armv4_5->core_cache->reg_list[0].valid = 1;
941 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
942
943 /* move pc from buffer to register cache */
944 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
945 armv4_5->core_cache->reg_list[15].dirty = 1;
946 armv4_5->core_cache->reg_list[15].valid = 1;
947 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
948
949 /* move data from buffer to register cache */
950 for (i = 1; i <= 7; i++)
951 {
952 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
953 armv4_5->core_cache->reg_list[i].dirty = 1;
954 armv4_5->core_cache->reg_list[i].valid = 1;
955 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
956 }
957
958 arm_set_cpsr(armv4_5, buffer[9]);
959 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
960
961 if (!is_arm_mode(armv4_5->core_mode))
962 {
963 target->state = TARGET_UNKNOWN;
964 LOG_ERROR("cpsr contains invalid mode value - communication failure");
965 return ERROR_TARGET_FAILURE;
966 }
967 LOG_DEBUG("target entered debug state in %s mode",
968 arm_mode_name(armv4_5->core_mode));
969
970 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
971 if (armv4_5->spsr) {
972 xscale_receive(target, buffer, 8);
973 buf_set_u32(armv4_5->spsr->value, 0, 32, buffer[7]);
974 armv4_5->spsr->dirty = false;
975 armv4_5->spsr->valid = true;
976 }
977 else
978 {
979 /* r8 to r14, but no spsr */
980 xscale_receive(target, buffer, 7);
981 }
982
983 /* move data from buffer to right banked register in cache */
984 for (i = 8; i <= 14; i++)
985 {
986 struct reg *r = arm_reg_current(armv4_5, i);
987
988 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
989 r->dirty = false;
990 r->valid = true;
991 }
992
993 /* examine debug reason */
994 xscale_read_dcsr(target);
995 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
996
997 /* stored PC (for calculating fixup) */
998 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
999
1000 switch (moe)
1001 {
1002 case 0x0: /* Processor reset */
1003 target->debug_reason = DBG_REASON_DBGRQ;
1004 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1005 pc -= 4;
1006 break;
1007 case 0x1: /* Instruction breakpoint hit */
1008 target->debug_reason = DBG_REASON_BREAKPOINT;
1009 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1010 pc -= 4;
1011 break;
1012 case 0x2: /* Data breakpoint hit */
1013 target->debug_reason = DBG_REASON_WATCHPOINT;
1014 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1015 pc -= 4;
1016 break;
1017 case 0x3: /* BKPT instruction executed */
1018 target->debug_reason = DBG_REASON_BREAKPOINT;
1019 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1020 pc -= 4;
1021 break;
1022 case 0x4: /* Ext. debug event */
1023 target->debug_reason = DBG_REASON_DBGRQ;
1024 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1025 pc -= 4;
1026 break;
1027 case 0x5: /* Vector trap occured */
1028 target->debug_reason = DBG_REASON_BREAKPOINT;
1029 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1030 pc -= 4;
1031 break;
1032 case 0x6: /* Trace buffer full break */
1033 target->debug_reason = DBG_REASON_DBGRQ;
1034 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1035 pc -= 4;
1036 break;
1037 case 0x7: /* Reserved (may flag Hot-Debug support) */
1038 default:
1039 LOG_ERROR("Method of Entry is 'Reserved'");
1040 exit(-1);
1041 break;
1042 }
1043
1044 /* apply PC fixup */
1045 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1046
1047 /* on the first debug entry, identify cache type */
1048 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1049 {
1050 uint32_t cache_type_reg;
1051
1052 /* read cp15 cache type register */
1053 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1054 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1055
1056 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1057 }
1058
1059 /* examine MMU and Cache settings */
1060 /* read cp15 control register */
1061 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1062 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1063 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1064 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1065 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1066
1067 /* tracing enabled, read collected trace data */
1068 if (xscale->trace.buffer_enabled)
1069 {
1070 xscale_read_trace(target);
1071 xscale->trace.buffer_fill--;
1072
1073 /* resume if we're still collecting trace data */
1074 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1075 && (xscale->trace.buffer_fill > 0))
1076 {
1077 xscale_resume(target, 1, 0x0, 1, 0);
1078 }
1079 else
1080 {
1081 xscale->trace.buffer_enabled = 0;
1082 }
1083 }
1084
1085 return ERROR_OK;
1086 }
1087
1088 static int xscale_halt(struct target *target)
1089 {
1090 struct xscale_common *xscale = target_to_xscale(target);
1091
1092 LOG_DEBUG("target->state: %s",
1093 target_state_name(target));
1094
1095 if (target->state == TARGET_HALTED)
1096 {
1097 LOG_DEBUG("target was already halted");
1098 return ERROR_OK;
1099 }
1100 else if (target->state == TARGET_UNKNOWN)
1101 {
1102 /* this must not happen for a xscale target */
1103 LOG_ERROR("target was in unknown state when halt was requested");
1104 return ERROR_TARGET_INVALID;
1105 }
1106 else if (target->state == TARGET_RESET)
1107 {
1108 LOG_DEBUG("target->state == TARGET_RESET");
1109 }
1110 else
1111 {
1112 /* assert external dbg break */
1113 xscale->external_debug_break = 1;
1114 xscale_read_dcsr(target);
1115
1116 target->debug_reason = DBG_REASON_DBGRQ;
1117 }
1118
1119 return ERROR_OK;
1120 }
1121
1122 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1123 {
1124 struct xscale_common *xscale = target_to_xscale(target);
1125 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1126 int retval;
1127
1128 if (xscale->ibcr0_used)
1129 {
1130 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1131
1132 if (ibcr0_bp)
1133 {
1134 xscale_unset_breakpoint(target, ibcr0_bp);
1135 }
1136 else
1137 {
1138 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1139 exit(-1);
1140 }
1141 }
1142
1143 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1144 return retval;
1145
1146 return ERROR_OK;
1147 }
1148
1149 static int xscale_disable_single_step(struct target *target)
1150 {
1151 struct xscale_common *xscale = target_to_xscale(target);
1152 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1153 int retval;
1154
1155 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1156 return retval;
1157
1158 return ERROR_OK;
1159 }
1160
1161 static void xscale_enable_watchpoints(struct target *target)
1162 {
1163 struct watchpoint *watchpoint = target->watchpoints;
1164
1165 while (watchpoint)
1166 {
1167 if (watchpoint->set == 0)
1168 xscale_set_watchpoint(target, watchpoint);
1169 watchpoint = watchpoint->next;
1170 }
1171 }
1172
1173 static void xscale_enable_breakpoints(struct target *target)
1174 {
1175 struct breakpoint *breakpoint = target->breakpoints;
1176
1177 /* set any pending breakpoints */
1178 while (breakpoint)
1179 {
1180 if (breakpoint->set == 0)
1181 xscale_set_breakpoint(target, breakpoint);
1182 breakpoint = breakpoint->next;
1183 }
1184 }
1185
1186 static int xscale_resume(struct target *target, int current,
1187 uint32_t address, int handle_breakpoints, int debug_execution)
1188 {
1189 struct xscale_common *xscale = target_to_xscale(target);
1190 struct arm *armv4_5 = &xscale->armv4_5_common;
1191 struct breakpoint *breakpoint = target->breakpoints;
1192 uint32_t current_pc;
1193 int retval;
1194 int i;
1195
1196 LOG_DEBUG("-");
1197
1198 if (target->state != TARGET_HALTED)
1199 {
1200 LOG_WARNING("target not halted");
1201 return ERROR_TARGET_NOT_HALTED;
1202 }
1203
1204 if (!debug_execution)
1205 {
1206 target_free_all_working_areas(target);
1207 }
1208
1209 /* update vector tables */
1210 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1211 return retval;
1212
1213 /* current = 1: continue on current pc, otherwise continue at <address> */
1214 if (!current)
1215 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1216
1217 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1218
1219 /* if we're at the reset vector, we have to simulate the branch */
1220 if (current_pc == 0x0)
1221 {
1222 arm_simulate_step(target, NULL);
1223 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1224 }
1225
1226 /* the front-end may request us not to handle breakpoints */
1227 if (handle_breakpoints)
1228 {
1229 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1230 {
1231 uint32_t next_pc;
1232
1233 /* there's a breakpoint at the current PC, we have to step over it */
1234 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1235 xscale_unset_breakpoint(target, breakpoint);
1236
1237 /* calculate PC of next instruction */
1238 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1239 {
1240 uint32_t current_opcode;
1241 target_read_u32(target, current_pc, &current_opcode);
1242 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1243 }
1244
1245 LOG_DEBUG("enable single-step");
1246 xscale_enable_single_step(target, next_pc);
1247
1248 /* restore banked registers */
1249 retval = xscale_restore_banked(target);
1250
1251 /* send resume request (command 0x30 or 0x31)
1252 * clean the trace buffer if it is to be enabled (0x62) */
1253 if (xscale->trace.buffer_enabled)
1254 {
1255 xscale_send_u32(target, 0x62);
1256 xscale_send_u32(target, 0x31);
1257 }
1258 else
1259 xscale_send_u32(target, 0x30);
1260
1261 /* send CPSR */
1262 xscale_send_u32(target,
1263 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1264 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1265 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1266
1267 for (i = 7; i >= 0; i--)
1268 {
1269 /* send register */
1270 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1271 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1272 }
1273
1274 /* send PC */
1275 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1276 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1277
1278 /* wait for and process debug entry */
1279 xscale_debug_entry(target);
1280
1281 LOG_DEBUG("disable single-step");
1282 xscale_disable_single_step(target);
1283
1284 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1285 xscale_set_breakpoint(target, breakpoint);
1286 }
1287 }
1288
1289 /* enable any pending breakpoints and watchpoints */
1290 xscale_enable_breakpoints(target);
1291 xscale_enable_watchpoints(target);
1292
1293 /* restore banked registers */
1294 retval = xscale_restore_banked(target);
1295
1296 /* send resume request (command 0x30 or 0x31)
1297 * clean the trace buffer if it is to be enabled (0x62) */
1298 if (xscale->trace.buffer_enabled)
1299 {
1300 xscale_send_u32(target, 0x62);
1301 xscale_send_u32(target, 0x31);
1302 }
1303 else
1304 xscale_send_u32(target, 0x30);
1305
1306 /* send CPSR */
1307 xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
1308 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1309 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1310
1311 for (i = 7; i >= 0; i--)
1312 {
1313 /* send register */
1314 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1315 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1316 }
1317
1318 /* send PC */
1319 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1320 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1321
1322 target->debug_reason = DBG_REASON_NOTHALTED;
1323
1324 if (!debug_execution)
1325 {
1326 /* registers are now invalid */
1327 register_cache_invalidate(armv4_5->core_cache);
1328 target->state = TARGET_RUNNING;
1329 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1330 }
1331 else
1332 {
1333 target->state = TARGET_DEBUG_RUNNING;
1334 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1335 }
1336
1337 LOG_DEBUG("target resumed");
1338
1339 return ERROR_OK;
1340 }
1341
1342 static int xscale_step_inner(struct target *target, int current,
1343 uint32_t address, int handle_breakpoints)
1344 {
1345 struct xscale_common *xscale = target_to_xscale(target);
1346 struct arm *armv4_5 = &xscale->armv4_5_common;
1347 uint32_t next_pc;
1348 int retval;
1349 int i;
1350
1351 target->debug_reason = DBG_REASON_SINGLESTEP;
1352
1353 /* calculate PC of next instruction */
1354 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1355 {
1356 uint32_t current_opcode, current_pc;
1357 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1358
1359 target_read_u32(target, current_pc, &current_opcode);
1360 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1361 return retval;
1362 }
1363
1364 LOG_DEBUG("enable single-step");
1365 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1366 return retval;
1367
1368 /* restore banked registers */
1369 if ((retval = xscale_restore_banked(target)) != ERROR_OK)
1370 return retval;
1371
1372 /* send resume request (command 0x30 or 0x31)
1373 * clean the trace buffer if it is to be enabled (0x62) */
1374 if (xscale->trace.buffer_enabled)
1375 {
1376 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1377 return retval;
1378 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1379 return retval;
1380 }
1381 else
1382 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1383 return retval;
1384
1385 /* send CPSR */
1386 retval = xscale_send_u32(target,
1387 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1388 if (retval != ERROR_OK)
1389 return retval;
1390 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1391 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1392
1393 for (i = 7; i >= 0; i--)
1394 {
1395 /* send register */
1396 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1397 return retval;
1398 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1399 }
1400
1401 /* send PC */
1402 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))) != ERROR_OK)
1403 return retval;
1404 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1405
1406 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1407
1408 /* registers are now invalid */
1409 register_cache_invalidate(armv4_5->core_cache);
1410
1411 /* wait for and process debug entry */
1412 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1413 return retval;
1414
1415 LOG_DEBUG("disable single-step");
1416 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1417 return retval;
1418
1419 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1420
1421 return ERROR_OK;
1422 }
1423
1424 static int xscale_step(struct target *target, int current,
1425 uint32_t address, int handle_breakpoints)
1426 {
1427 struct arm *armv4_5 = target_to_arm(target);
1428 struct breakpoint *breakpoint = target->breakpoints;
1429
1430 uint32_t current_pc;
1431 int retval;
1432
1433 if (target->state != TARGET_HALTED)
1434 {
1435 LOG_WARNING("target not halted");
1436 return ERROR_TARGET_NOT_HALTED;
1437 }
1438
1439 /* current = 1: continue on current pc, otherwise continue at <address> */
1440 if (!current)
1441 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1442
1443 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1444
1445 /* if we're at the reset vector, we have to simulate the step */
1446 if (current_pc == 0x0)
1447 {
1448 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1449 return retval;
1450 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1451
1452 target->debug_reason = DBG_REASON_SINGLESTEP;
1453 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1454
1455 return ERROR_OK;
1456 }
1457
1458 /* the front-end may request us not to handle breakpoints */
1459 if (handle_breakpoints)
1460 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1461 {
1462 if ((retval = xscale_unset_breakpoint(target, breakpoint)) != ERROR_OK)
1463 return retval;
1464 }
1465
1466 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1467
1468 if (breakpoint)
1469 {
1470 xscale_set_breakpoint(target, breakpoint);
1471 }
1472
1473 LOG_DEBUG("target stepped");
1474
1475 return ERROR_OK;
1476
1477 }
1478
1479 static int xscale_assert_reset(struct target *target)
1480 {
1481 struct xscale_common *xscale = target_to_xscale(target);
1482
1483 LOG_DEBUG("target->state: %s",
1484 target_state_name(target));
1485
1486 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1487 * end up in T-L-R, which would reset JTAG
1488 */
1489 jtag_set_end_state(TAP_IDLE);
1490 xscale_jtag_set_instr(target->tap,
1491 XSCALE_SELDCSR << xscale->xscale_variant);
1492
1493 /* set Hold reset, Halt mode and Trap Reset */
1494 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1495 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1496 xscale_write_dcsr(target, 1, 0);
1497
1498 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1499 xscale_jtag_set_instr(target->tap, 0x7f);
1500 jtag_execute_queue();
1501
1502 /* assert reset */
1503 jtag_add_reset(0, 1);
1504
1505 /* sleep 1ms, to be sure we fulfill any requirements */
1506 jtag_add_sleep(1000);
1507 jtag_execute_queue();
1508
1509 target->state = TARGET_RESET;
1510
1511 if (target->reset_halt)
1512 {
1513 int retval;
1514 if ((retval = target_halt(target)) != ERROR_OK)
1515 return retval;
1516 }
1517
1518 return ERROR_OK;
1519 }
1520
1521 static int xscale_deassert_reset(struct target *target)
1522 {
1523 struct xscale_common *xscale = target_to_xscale(target);
1524 struct breakpoint *breakpoint = target->breakpoints;
1525
1526 LOG_DEBUG("-");
1527
1528 xscale->ibcr_available = 2;
1529 xscale->ibcr0_used = 0;
1530 xscale->ibcr1_used = 0;
1531
1532 xscale->dbr_available = 2;
1533 xscale->dbr0_used = 0;
1534 xscale->dbr1_used = 0;
1535
1536 /* mark all hardware breakpoints as unset */
1537 while (breakpoint)
1538 {
1539 if (breakpoint->type == BKPT_HARD)
1540 {
1541 breakpoint->set = 0;
1542 }
1543 breakpoint = breakpoint->next;
1544 }
1545
1546 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1547
1548 /* FIXME mark hardware watchpoints got unset too. Also,
1549 * at least some of the XScale registers are invalid...
1550 */
1551
1552 /*
1553 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1554 * contents got invalidated. Safer to force that, so writing new
1555 * contents can't ever fail..
1556 */
1557 {
1558 uint32_t address;
1559 unsigned buf_cnt;
1560 const uint8_t *buffer = xscale_debug_handler;
1561 int retval;
1562
1563 /* release SRST */
1564 jtag_add_reset(0, 0);
1565
1566 /* wait 300ms; 150 and 100ms were not enough */
1567 jtag_add_sleep(300*1000);
1568
1569 jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
1570 jtag_execute_queue();
1571
1572 /* set Hold reset, Halt mode and Trap Reset */
1573 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1574 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1575 xscale_write_dcsr(target, 1, 0);
1576
1577 /* Load the debug handler into the mini-icache. Since
1578 * it's using halt mode (not monitor mode), it runs in
1579 * "Special Debug State" for access to registers, memory,
1580 * coprocessors, trace data, etc.
1581 */
1582 address = xscale->handler_address;
1583 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1584 binary_size > 0;
1585 binary_size -= buf_cnt, buffer += buf_cnt)
1586 {
1587 uint32_t cache_line[8];
1588 unsigned i;
1589
1590 buf_cnt = binary_size;
1591 if (buf_cnt > 32)
1592 buf_cnt = 32;
1593
1594 for (i = 0; i < buf_cnt; i += 4)
1595 {
1596 /* convert LE buffer to host-endian uint32_t */
1597 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1598 }
1599
1600 for (; i < 32; i += 4)
1601 {
1602 cache_line[i / 4] = 0xe1a08008;
1603 }
1604
1605 /* only load addresses other than the reset vectors */
1606 if ((address % 0x400) != 0x0)
1607 {
1608 retval = xscale_load_ic(target, address,
1609 cache_line);
1610 if (retval != ERROR_OK)
1611 return retval;
1612 }
1613
1614 address += buf_cnt;
1615 };
1616
1617 retval = xscale_load_ic(target, 0x0,
1618 xscale->low_vectors);
1619 if (retval != ERROR_OK)
1620 return retval;
1621 retval = xscale_load_ic(target, 0xffff0000,
1622 xscale->high_vectors);
1623 if (retval != ERROR_OK)
1624 return retval;
1625
1626 jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
1627
1628 jtag_add_sleep(100000);
1629
1630 /* set Hold reset, Halt mode and Trap Reset */
1631 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1632 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1633 xscale_write_dcsr(target, 1, 0);
1634
1635 /* clear Hold reset to let the target run (should enter debug handler) */
1636 xscale_write_dcsr(target, 0, 1);
1637 target->state = TARGET_RUNNING;
1638
1639 if (!target->reset_halt)
1640 {
1641 jtag_add_sleep(10000);
1642
1643 /* we should have entered debug now */
1644 xscale_debug_entry(target);
1645 target->state = TARGET_HALTED;
1646
1647 /* resume the target */
1648 xscale_resume(target, 1, 0x0, 1, 0);
1649 }
1650 }
1651
1652 return ERROR_OK;
1653 }
1654
1655 static int xscale_read_core_reg(struct target *target, struct reg *r,
1656 int num, enum arm_mode mode)
1657 {
1658 /** \todo add debug handler support for core register reads */
1659 LOG_ERROR("not implemented");
1660 return ERROR_OK;
1661 }
1662
1663 static int xscale_write_core_reg(struct target *target, struct reg *r,
1664 int num, enum arm_mode mode, uint32_t value)
1665 {
1666 /** \todo add debug handler support for core register writes */
1667 LOG_ERROR("not implemented");
1668 return ERROR_OK;
1669 }
1670
1671 static int xscale_full_context(struct target *target)
1672 {
1673 struct arm *armv4_5 = target_to_arm(target);
1674
1675 uint32_t *buffer;
1676
1677 int i, j;
1678
1679 LOG_DEBUG("-");
1680
1681 if (target->state != TARGET_HALTED)
1682 {
1683 LOG_WARNING("target not halted");
1684 return ERROR_TARGET_NOT_HALTED;
1685 }
1686
1687 buffer = malloc(4 * 8);
1688
1689 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1690 * we can't enter User mode on an XScale (unpredictable),
1691 * but User shares registers with SYS
1692 */
1693 for (i = 1; i < 7; i++)
1694 {
1695 enum arm_mode mode = armv4_5_number_to_mode(i);
1696 bool valid = true;
1697 struct reg *r;
1698
1699 if (mode == ARM_MODE_USR)
1700 continue;
1701
1702 /* check if there are invalid registers in the current mode
1703 */
1704 for (j = 0; valid && j <= 16; j++)
1705 {
1706 if (!ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1707 mode, j).valid)
1708 valid = false;
1709 }
1710 if (valid)
1711 continue;
1712
1713 /* request banked registers */
1714 xscale_send_u32(target, 0x0);
1715
1716 /* send CPSR for desired bank mode */
1717 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1718
1719 /* get banked registers: r8 to r14; and SPSR
1720 * except in USR/SYS mode
1721 */
1722 if (mode != ARM_MODE_SYS) {
1723 /* SPSR */
1724 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1725 mode, 16);
1726
1727 xscale_receive(target, buffer, 8);
1728
1729 buf_set_u32(r->value, 0, 32, buffer[7]);
1730 r->dirty = false;
1731 r->valid = true;
1732 } else {
1733 xscale_receive(target, buffer, 7);
1734 }
1735
1736 /* move data from buffer to register cache */
1737 for (j = 8; j <= 14; j++)
1738 {
1739 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1740 mode, j);
1741
1742 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1743 r->dirty = false;
1744 r->valid = true;
1745 }
1746 }
1747
1748 free(buffer);
1749
1750 return ERROR_OK;
1751 }
1752
1753 static int xscale_restore_banked(struct target *target)
1754 {
1755 struct arm *armv4_5 = target_to_arm(target);
1756
1757 int i, j;
1758
1759 if (target->state != TARGET_HALTED)
1760 {
1761 LOG_WARNING("target not halted");
1762 return ERROR_TARGET_NOT_HALTED;
1763 }
1764
1765 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1766 * and check if any banked registers need to be written. Ignore
1767 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1768 * an XScale (unpredictable), but they share all registers.
1769 */
1770 for (i = 1; i < 7; i++)
1771 {
1772 enum arm_mode mode = armv4_5_number_to_mode(i);
1773 struct reg *r;
1774
1775 if (mode == ARM_MODE_USR)
1776 continue;
1777
1778 /* check if there are dirty registers in this mode */
1779 for (j = 8; j <= 14; j++)
1780 {
1781 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1782 mode, j).dirty)
1783 goto dirty;
1784 }
1785
1786 /* if not USR/SYS, check if the SPSR needs to be written */
1787 if (mode != ARM_MODE_SYS)
1788 {
1789 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1790 mode, 16).dirty)
1791 goto dirty;
1792 }
1793
1794 /* there's nothing to flush for this mode */
1795 continue;
1796
1797 dirty:
1798 /* command 0x1: "send banked registers" */
1799 xscale_send_u32(target, 0x1);
1800
1801 /* send CPSR for desired mode */
1802 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1803
1804 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1805 * but this protocol doesn't understand that nuance.
1806 */
1807 for (j = 8; j <= 14; j++) {
1808 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1809 mode, j);
1810 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1811 r->dirty = false;
1812 }
1813
1814 /* send spsr if not in USR/SYS mode */
1815 if (mode != ARM_MODE_SYS) {
1816 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1817 mode, 16);
1818 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1819 r->dirty = false;
1820 }
1821 }
1822
1823 return ERROR_OK;
1824 }
1825
1826 static int xscale_read_memory(struct target *target, uint32_t address,
1827 uint32_t size, uint32_t count, uint8_t *buffer)
1828 {
1829 struct xscale_common *xscale = target_to_xscale(target);
1830 uint32_t *buf32;
1831 uint32_t i;
1832 int retval;
1833
1834 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1835
1836 if (target->state != TARGET_HALTED)
1837 {
1838 LOG_WARNING("target not halted");
1839 return ERROR_TARGET_NOT_HALTED;
1840 }
1841
1842 /* sanitize arguments */
1843 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1844 return ERROR_INVALID_ARGUMENTS;
1845
1846 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1847 return ERROR_TARGET_UNALIGNED_ACCESS;
1848
1849 /* send memory read request (command 0x1n, n: access size) */
1850 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1851 return retval;
1852
1853 /* send base address for read request */
1854 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1855 return retval;
1856
1857 /* send number of requested data words */
1858 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1859 return retval;
1860
1861 /* receive data from target (count times 32-bit words in host endianness) */
1862 buf32 = malloc(4 * count);
1863 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1864 return retval;
1865
1866 /* extract data from host-endian buffer into byte stream */
1867 for (i = 0; i < count; i++)
1868 {
1869 switch (size)
1870 {
1871 case 4:
1872 target_buffer_set_u32(target, buffer, buf32[i]);
1873 buffer += 4;
1874 break;
1875 case 2:
1876 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1877 buffer += 2;
1878 break;
1879 case 1:
1880 *buffer++ = buf32[i] & 0xff;
1881 break;
1882 default:
1883 LOG_ERROR("invalid read size");
1884 return ERROR_INVALID_ARGUMENTS;
1885 }
1886 }
1887
1888 free(buf32);
1889
1890 /* examine DCSR, to see if Sticky Abort (SA) got set */
1891 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1892 return retval;
1893 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1894 {
1895 /* clear SA bit */
1896 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1897 return retval;
1898
1899 return ERROR_TARGET_DATA_ABORT;
1900 }
1901
1902 return ERROR_OK;
1903 }
1904
1905 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1906 uint32_t size, uint32_t count, uint8_t *buffer)
1907 {
1908 /** \todo: provide a non-stub implementtion of this routine. */
1909 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1910 target_name(target), __func__);
1911 return ERROR_FAIL;
1912 }
1913
1914 static int xscale_write_memory(struct target *target, uint32_t address,
1915 uint32_t size, uint32_t count, uint8_t *buffer)
1916 {
1917 struct xscale_common *xscale = target_to_xscale(target);
1918 int retval;
1919
1920 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1921
1922 if (target->state != TARGET_HALTED)
1923 {
1924 LOG_WARNING("target not halted");
1925 return ERROR_TARGET_NOT_HALTED;
1926 }
1927
1928 /* sanitize arguments */
1929 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1930 return ERROR_INVALID_ARGUMENTS;
1931
1932 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1933 return ERROR_TARGET_UNALIGNED_ACCESS;
1934
1935 /* send memory write request (command 0x2n, n: access size) */
1936 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1937 return retval;
1938
1939 /* send base address for read request */
1940 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1941 return retval;
1942
1943 /* send number of requested data words to be written*/
1944 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1945 return retval;
1946
1947 /* extract data from host-endian buffer into byte stream */
1948 #if 0
1949 for (i = 0; i < count; i++)
1950 {
1951 switch (size)
1952 {
1953 case 4:
1954 value = target_buffer_get_u32(target, buffer);
1955 xscale_send_u32(target, value);
1956 buffer += 4;
1957 break;
1958 case 2:
1959 value = target_buffer_get_u16(target, buffer);
1960 xscale_send_u32(target, value);
1961 buffer += 2;
1962 break;
1963 case 1:
1964 value = *buffer;
1965 xscale_send_u32(target, value);
1966 buffer += 1;
1967 break;
1968 default:
1969 LOG_ERROR("should never get here");
1970 exit(-1);
1971 }
1972 }
1973 #endif
1974 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1975 return retval;
1976
1977 /* examine DCSR, to see if Sticky Abort (SA) got set */
1978 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1979 return retval;
1980 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1981 {
1982 /* clear SA bit */
1983 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1984 return retval;
1985
1986 return ERROR_TARGET_DATA_ABORT;
1987 }
1988
1989 return ERROR_OK;
1990 }
1991
1992 static int xscale_write_phys_memory(struct target *target, uint32_t address,
1993 uint32_t size, uint32_t count, uint8_t *buffer)
1994 {
1995 /** \todo: provide a non-stub implementtion of this routine. */
1996 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1997 target_name(target), __func__);
1998 return ERROR_FAIL;
1999 }
2000
2001 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
2002 uint32_t count, uint8_t *buffer)
2003 {
2004 return xscale_write_memory(target, address, 4, count, buffer);
2005 }
2006
2007 static uint32_t xscale_get_ttb(struct target *target)
2008 {
2009 struct xscale_common *xscale = target_to_xscale(target);
2010 uint32_t ttb;
2011
2012 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2013 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2014
2015 return ttb;
2016 }
2017
2018 static void xscale_disable_mmu_caches(struct target *target, int mmu,
2019 int d_u_cache, int i_cache)
2020 {
2021 struct xscale_common *xscale = target_to_xscale(target);
2022 uint32_t cp15_control;
2023
2024 /* read cp15 control register */
2025 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2026 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2027
2028 if (mmu)
2029 cp15_control &= ~0x1U;
2030
2031 if (d_u_cache)
2032 {
2033 /* clean DCache */
2034 xscale_send_u32(target, 0x50);
2035 xscale_send_u32(target, xscale->cache_clean_address);
2036
2037 /* invalidate DCache */
2038 xscale_send_u32(target, 0x51);
2039
2040 cp15_control &= ~0x4U;
2041 }
2042
2043 if (i_cache)
2044 {
2045 /* invalidate ICache */
2046 xscale_send_u32(target, 0x52);
2047 cp15_control &= ~0x1000U;
2048 }
2049
2050 /* write new cp15 control register */
2051 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2052
2053 /* execute cpwait to ensure outstanding operations complete */
2054 xscale_send_u32(target, 0x53);
2055 }
2056
2057 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2058 int d_u_cache, int i_cache)
2059 {
2060 struct xscale_common *xscale = target_to_xscale(target);
2061 uint32_t cp15_control;
2062
2063 /* read cp15 control register */
2064 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2065 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2066
2067 if (mmu)
2068 cp15_control |= 0x1U;
2069
2070 if (d_u_cache)
2071 cp15_control |= 0x4U;
2072
2073 if (i_cache)
2074 cp15_control |= 0x1000U;
2075
2076 /* write new cp15 control register */
2077 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2078
2079 /* execute cpwait to ensure outstanding operations complete */
2080 xscale_send_u32(target, 0x53);
2081 }
2082
2083 static int xscale_set_breakpoint(struct target *target,
2084 struct breakpoint *breakpoint)
2085 {
2086 int retval;
2087 struct xscale_common *xscale = target_to_xscale(target);
2088
2089 if (target->state != TARGET_HALTED)
2090 {
2091 LOG_WARNING("target not halted");
2092 return ERROR_TARGET_NOT_HALTED;
2093 }
2094
2095 if (breakpoint->set)
2096 {
2097 LOG_WARNING("breakpoint already set");
2098 return ERROR_OK;
2099 }
2100
2101 if (breakpoint->type == BKPT_HARD)
2102 {
2103 uint32_t value = breakpoint->address | 1;
2104 if (!xscale->ibcr0_used)
2105 {
2106 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2107 xscale->ibcr0_used = 1;
2108 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2109 }
2110 else if (!xscale->ibcr1_used)
2111 {
2112 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2113 xscale->ibcr1_used = 1;
2114 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2115 }
2116 else
2117 {
2118 LOG_ERROR("BUG: no hardware comparator available");
2119 return ERROR_OK;
2120 }
2121 }
2122 else if (breakpoint->type == BKPT_SOFT)
2123 {
2124 if (breakpoint->length == 4)
2125 {
2126 /* keep the original instruction in target endianness */
2127 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2128 {
2129 return retval;
2130 }
2131 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2132 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2133 {
2134 return retval;
2135 }
2136 }
2137 else
2138 {
2139 /* keep the original instruction in target endianness */
2140 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2141 {
2142 return retval;
2143 }
2144 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2145 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2146 {
2147 return retval;
2148 }
2149 }
2150 breakpoint->set = 1;
2151 }
2152
2153 return ERROR_OK;
2154 }
2155
2156 static int xscale_add_breakpoint(struct target *target,
2157 struct breakpoint *breakpoint)
2158 {
2159 struct xscale_common *xscale = target_to_xscale(target);
2160
2161 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2162 {
2163 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2164 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2165 }
2166
2167 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2168 {
2169 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2170 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2171 }
2172
2173 if (breakpoint->type == BKPT_HARD)
2174 {
2175 xscale->ibcr_available--;
2176 }
2177
2178 return ERROR_OK;
2179 }
2180
2181 static int xscale_unset_breakpoint(struct target *target,
2182 struct breakpoint *breakpoint)
2183 {
2184 int retval;
2185 struct xscale_common *xscale = target_to_xscale(target);
2186
2187 if (target->state != TARGET_HALTED)
2188 {
2189 LOG_WARNING("target not halted");
2190 return ERROR_TARGET_NOT_HALTED;
2191 }
2192
2193 if (!breakpoint->set)
2194 {
2195 LOG_WARNING("breakpoint not set");
2196 return ERROR_OK;
2197 }
2198
2199 if (breakpoint->type == BKPT_HARD)
2200 {
2201 if (breakpoint->set == 1)
2202 {
2203 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2204 xscale->ibcr0_used = 0;
2205 }
2206 else if (breakpoint->set == 2)
2207 {
2208 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2209 xscale->ibcr1_used = 0;
2210 }
2211 breakpoint->set = 0;
2212 }
2213 else
2214 {
2215 /* restore original instruction (kept in target endianness) */
2216 if (breakpoint->length == 4)
2217 {
2218 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2219 {
2220 return retval;
2221 }
2222 }
2223 else
2224 {
2225 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2226 {
2227 return retval;
2228 }
2229 }
2230 breakpoint->set = 0;
2231 }
2232
2233 return ERROR_OK;
2234 }
2235
2236 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2237 {
2238 struct xscale_common *xscale = target_to_xscale(target);
2239
2240 if (target->state != TARGET_HALTED)
2241 {
2242 LOG_WARNING("target not halted");
2243 return ERROR_TARGET_NOT_HALTED;
2244 }
2245
2246 if (breakpoint->set)
2247 {
2248 xscale_unset_breakpoint(target, breakpoint);
2249 }
2250
2251 if (breakpoint->type == BKPT_HARD)
2252 xscale->ibcr_available++;
2253
2254 return ERROR_OK;
2255 }
2256
2257 static int xscale_set_watchpoint(struct target *target,
2258 struct watchpoint *watchpoint)
2259 {
2260 struct xscale_common *xscale = target_to_xscale(target);
2261 uint8_t enable = 0;
2262 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2263 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2264
2265 if (target->state != TARGET_HALTED)
2266 {
2267 LOG_WARNING("target not halted");
2268 return ERROR_TARGET_NOT_HALTED;
2269 }
2270
2271 xscale_get_reg(dbcon);
2272
2273 switch (watchpoint->rw)
2274 {
2275 case WPT_READ:
2276 enable = 0x3;
2277 break;
2278 case WPT_ACCESS:
2279 enable = 0x2;
2280 break;
2281 case WPT_WRITE:
2282 enable = 0x1;
2283 break;
2284 default:
2285 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2286 }
2287
2288 if (!xscale->dbr0_used)
2289 {
2290 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2291 dbcon_value |= enable;
2292 xscale_set_reg_u32(dbcon, dbcon_value);
2293 watchpoint->set = 1;
2294 xscale->dbr0_used = 1;
2295 }
2296 else if (!xscale->dbr1_used)
2297 {
2298 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2299 dbcon_value |= enable << 2;
2300 xscale_set_reg_u32(dbcon, dbcon_value);
2301 watchpoint->set = 2;
2302 xscale->dbr1_used = 1;
2303 }
2304 else
2305 {
2306 LOG_ERROR("BUG: no hardware comparator available");
2307 return ERROR_OK;
2308 }
2309
2310 return ERROR_OK;
2311 }
2312
2313 static int xscale_add_watchpoint(struct target *target,
2314 struct watchpoint *watchpoint)
2315 {
2316 struct xscale_common *xscale = target_to_xscale(target);
2317
2318 if (xscale->dbr_available < 1)
2319 {
2320 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2321 }
2322
2323 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2324 {
2325 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2326 }
2327
2328 xscale->dbr_available--;
2329
2330 return ERROR_OK;
2331 }
2332
2333 static int xscale_unset_watchpoint(struct target *target,
2334 struct watchpoint *watchpoint)
2335 {
2336 struct xscale_common *xscale = target_to_xscale(target);
2337 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2338 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2339
2340 if (target->state != TARGET_HALTED)
2341 {
2342 LOG_WARNING("target not halted");
2343 return ERROR_TARGET_NOT_HALTED;
2344 }
2345
2346 if (!watchpoint->set)
2347 {
2348 LOG_WARNING("breakpoint not set");
2349 return ERROR_OK;
2350 }
2351
2352 if (watchpoint->set == 1)
2353 {
2354 dbcon_value &= ~0x3;
2355 xscale_set_reg_u32(dbcon, dbcon_value);
2356 xscale->dbr0_used = 0;
2357 }
2358 else if (watchpoint->set == 2)
2359 {
2360 dbcon_value &= ~0xc;
2361 xscale_set_reg_u32(dbcon, dbcon_value);
2362 xscale->dbr1_used = 0;
2363 }
2364 watchpoint->set = 0;
2365
2366 return ERROR_OK;
2367 }
2368
2369 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2370 {
2371 struct xscale_common *xscale = target_to_xscale(target);
2372
2373 if (target->state != TARGET_HALTED)
2374 {
2375 LOG_WARNING("target not halted");
2376 return ERROR_TARGET_NOT_HALTED;
2377 }
2378
2379 if (watchpoint->set)
2380 {
2381 xscale_unset_watchpoint(target, watchpoint);
2382 }
2383
2384 xscale->dbr_available++;
2385
2386 return ERROR_OK;
2387 }
2388
2389 static int xscale_get_reg(struct reg *reg)
2390 {
2391 struct xscale_reg *arch_info = reg->arch_info;
2392 struct target *target = arch_info->target;
2393 struct xscale_common *xscale = target_to_xscale(target);
2394
2395 /* DCSR, TX and RX are accessible via JTAG */
2396 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2397 {
2398 return xscale_read_dcsr(arch_info->target);
2399 }
2400 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2401 {
2402 /* 1 = consume register content */
2403 return xscale_read_tx(arch_info->target, 1);
2404 }
2405 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2406 {
2407 /* can't read from RX register (host -> debug handler) */
2408 return ERROR_OK;
2409 }
2410 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2411 {
2412 /* can't (explicitly) read from TXRXCTRL register */
2413 return ERROR_OK;
2414 }
2415 else /* Other DBG registers have to be transfered by the debug handler */
2416 {
2417 /* send CP read request (command 0x40) */
2418 xscale_send_u32(target, 0x40);
2419
2420 /* send CP register number */
2421 xscale_send_u32(target, arch_info->dbg_handler_number);
2422
2423 /* read register value */
2424 xscale_read_tx(target, 1);
2425 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2426
2427 reg->dirty = 0;
2428 reg->valid = 1;
2429 }
2430
2431 return ERROR_OK;
2432 }
2433
2434 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2435 {
2436 struct xscale_reg *arch_info = reg->arch_info;
2437 struct target *target = arch_info->target;
2438 struct xscale_common *xscale = target_to_xscale(target);
2439 uint32_t value = buf_get_u32(buf, 0, 32);
2440
2441 /* DCSR, TX and RX are accessible via JTAG */
2442 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2443 {
2444 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2445 return xscale_write_dcsr(arch_info->target, -1, -1);
2446 }
2447 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2448 {
2449 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2450 return xscale_write_rx(arch_info->target);
2451 }
2452 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2453 {
2454 /* can't write to TX register (debug-handler -> host) */
2455 return ERROR_OK;
2456 }
2457 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2458 {
2459 /* can't (explicitly) write to TXRXCTRL register */
2460 return ERROR_OK;
2461 }
2462 else /* Other DBG registers have to be transfered by the debug handler */
2463 {
2464 /* send CP write request (command 0x41) */
2465 xscale_send_u32(target, 0x41);
2466
2467 /* send CP register number */
2468 xscale_send_u32(target, arch_info->dbg_handler_number);
2469
2470 /* send CP register value */
2471 xscale_send_u32(target, value);
2472 buf_set_u32(reg->value, 0, 32, value);
2473 }
2474
2475 return ERROR_OK;
2476 }
2477
2478 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2479 {
2480 struct xscale_common *xscale = target_to_xscale(target);
2481 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2482 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2483
2484 /* send CP write request (command 0x41) */
2485 xscale_send_u32(target, 0x41);
2486
2487 /* send CP register number */
2488 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2489
2490 /* send CP register value */
2491 xscale_send_u32(target, value);
2492 buf_set_u32(dcsr->value, 0, 32, value);
2493
2494 return ERROR_OK;
2495 }
2496
2497 static int xscale_read_trace(struct target *target)
2498 {
2499 struct xscale_common *xscale = target_to_xscale(target);
2500 struct arm *armv4_5 = &xscale->armv4_5_common;
2501 struct xscale_trace_data **trace_data_p;
2502
2503 /* 258 words from debug handler
2504 * 256 trace buffer entries
2505 * 2 checkpoint addresses
2506 */
2507 uint32_t trace_buffer[258];
2508 int is_address[256];
2509 int i, j;
2510
2511 if (target->state != TARGET_HALTED)
2512 {
2513 LOG_WARNING("target must be stopped to read trace data");
2514 return ERROR_TARGET_NOT_HALTED;
2515 }
2516
2517 /* send read trace buffer command (command 0x61) */
2518 xscale_send_u32(target, 0x61);
2519
2520 /* receive trace buffer content */
2521 xscale_receive(target, trace_buffer, 258);
2522
2523 /* parse buffer backwards to identify address entries */
2524 for (i = 255; i >= 0; i--)
2525 {
2526 is_address[i] = 0;
2527 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2528 ((trace_buffer[i] & 0xf0) == 0xd0))
2529 {
2530 if (i >= 3)
2531 is_address[--i] = 1;
2532 if (i >= 2)
2533 is_address[--i] = 1;
2534 if (i >= 1)
2535 is_address[--i] = 1;
2536 if (i >= 0)
2537 is_address[--i] = 1;
2538 }
2539 }
2540
2541
2542 /* search first non-zero entry */
2543 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2544 ;
2545
2546 if (j == 256)
2547 {
2548 LOG_DEBUG("no trace data collected");
2549 return ERROR_XSCALE_NO_TRACE_DATA;
2550 }
2551
2552 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2553 ;
2554
2555 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2556 (*trace_data_p)->next = NULL;
2557 (*trace_data_p)->chkpt0 = trace_buffer[256];
2558 (*trace_data_p)->chkpt1 = trace_buffer[257];
2559 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2560 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2561 (*trace_data_p)->depth = 256 - j;
2562
2563 for (i = j; i < 256; i++)
2564 {
2565 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2566 if (is_address[i])
2567 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2568 else
2569 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2570 }
2571
2572 return ERROR_OK;
2573 }
2574
2575 static int xscale_read_instruction(struct target *target,
2576 struct arm_instruction *instruction)
2577 {
2578 struct xscale_common *xscale = target_to_xscale(target);
2579 int i;
2580 int section = -1;
2581 size_t size_read;
2582 uint32_t opcode;
2583 int retval;
2584
2585 if (!xscale->trace.image)
2586 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2587
2588 /* search for the section the current instruction belongs to */
2589 for (i = 0; i < xscale->trace.image->num_sections; i++)
2590 {
2591 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2592 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2593 {
2594 section = i;
2595 break;
2596 }
2597 }
2598
2599 if (section == -1)
2600 {
2601 /* current instruction couldn't be found in the image */
2602 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2603 }
2604
2605 if (xscale->trace.core_state == ARM_STATE_ARM)
2606 {
2607 uint8_t buf[4];
2608 if ((retval = image_read_section(xscale->trace.image, section,
2609 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2610 4, buf, &size_read)) != ERROR_OK)
2611 {
2612 LOG_ERROR("error while reading instruction: %i", retval);
2613 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2614 }
2615 opcode = target_buffer_get_u32(target, buf);
2616 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2617 }
2618 else if (xscale->trace.core_state == ARM_STATE_THUMB)
2619 {
2620 uint8_t buf[2];
2621 if ((retval = image_read_section(xscale->trace.image, section,
2622 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2623 2, buf, &size_read)) != ERROR_OK)
2624 {
2625 LOG_ERROR("error while reading instruction: %i", retval);
2626 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2627 }
2628 opcode = target_buffer_get_u16(target, buf);
2629 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2630 }
2631 else
2632 {
2633 LOG_ERROR("BUG: unknown core state encountered");
2634 exit(-1);
2635 }
2636
2637 return ERROR_OK;
2638 }
2639
2640 static int xscale_branch_address(struct xscale_trace_data *trace_data,
2641 int i, uint32_t *target)
2642 {
2643 /* if there are less than four entries prior to the indirect branch message
2644 * we can't extract the address */
2645 if (i < 4)
2646 {
2647 return -1;
2648 }
2649
2650 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2651 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2652
2653 return 0;
2654 }
2655
2656 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2657 {
2658 struct xscale_common *xscale = target_to_xscale(target);
2659 int next_pc_ok = 0;
2660 uint32_t next_pc = 0x0;
2661 struct xscale_trace_data *trace_data = xscale->trace.data;
2662 int retval;
2663
2664 while (trace_data)
2665 {
2666 int i, chkpt;
2667 int rollover;
2668 int branch;
2669 int exception;
2670 xscale->trace.core_state = ARM_STATE_ARM;
2671
2672 chkpt = 0;
2673 rollover = 0;
2674
2675 for (i = 0; i < trace_data->depth; i++)
2676 {
2677 next_pc_ok = 0;
2678 branch = 0;
2679 exception = 0;
2680
2681 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2682 continue;
2683
2684 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2685 {
2686 case 0: /* Exceptions */
2687 case 1:
2688 case 2:
2689 case 3:
2690 case 4:
2691 case 5:
2692 case 6:
2693 case 7:
2694 exception = (trace_data->entries[i].data & 0x70) >> 4;
2695 next_pc_ok = 1;
2696 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2697 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2698 break;
2699 case 8: /* Direct Branch */
2700 branch = 1;
2701 break;
2702 case 9: /* Indirect Branch */
2703 branch = 1;
2704 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2705 {
2706 next_pc_ok = 1;
2707 }
2708 break;
2709 case 13: /* Checkpointed Indirect Branch */
2710 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2711 {
2712 next_pc_ok = 1;
2713 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2714 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2715 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2716 }
2717 /* explicit fall-through */
2718 case 12: /* Checkpointed Direct Branch */
2719 branch = 1;
2720 if (chkpt == 0)
2721 {
2722 next_pc_ok = 1;
2723 next_pc = trace_data->chkpt0;
2724 chkpt++;
2725 }
2726 else if (chkpt == 1)
2727 {
2728 next_pc_ok = 1;
2729 next_pc = trace_data->chkpt0;
2730 chkpt++;
2731 }
2732 else
2733 {
2734 LOG_WARNING("more than two checkpointed branches encountered");
2735 }
2736 break;
2737 case 15: /* Roll-over */
2738 rollover++;
2739 continue;
2740 default: /* Reserved */
2741 command_print(cmd_ctx, "--- reserved trace message ---");
2742 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2743 return ERROR_OK;
2744 }
2745
2746 if (xscale->trace.pc_ok)
2747 {
2748 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2749 struct arm_instruction instruction;
2750
2751 if ((exception == 6) || (exception == 7))
2752 {
2753 /* IRQ or FIQ exception, no instruction executed */
2754 executed -= 1;
2755 }
2756
2757 while (executed-- >= 0)
2758 {
2759 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2760 {
2761 /* can't continue tracing with no image available */
2762 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2763 {
2764 return retval;
2765 }
2766 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2767 {
2768 /* TODO: handle incomplete images */
2769 }
2770 }
2771
2772 /* a precise abort on a load to the PC is included in the incremental
2773 * word count, other instructions causing data aborts are not included
2774 */
2775 if ((executed == 0) && (exception == 4)
2776 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2777 {
2778 if ((instruction.type == ARM_LDM)
2779 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2780 {
2781 executed--;
2782 }
2783 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2784 && (instruction.info.load_store.Rd != 15))
2785 {
2786 executed--;
2787 }
2788 }
2789
2790 /* only the last instruction executed
2791 * (the one that caused the control flow change)
2792 * could be a taken branch
2793 */
2794 if (((executed == -1) && (branch == 1)) &&
2795 (((instruction.type == ARM_B) ||
2796 (instruction.type == ARM_BL) ||
2797 (instruction.type == ARM_BLX)) &&
2798 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2799 {
2800 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2801 }
2802 else
2803 {
2804 xscale->trace.current_pc += (xscale->trace.core_state == ARM_STATE_ARM) ? 4 : 2;
2805 }
2806 command_print(cmd_ctx, "%s", instruction.text);
2807 }
2808
2809 rollover = 0;
2810 }
2811
2812 if (next_pc_ok)
2813 {
2814 xscale->trace.current_pc = next_pc;
2815 xscale->trace.pc_ok = 1;
2816 }
2817 }
2818
2819 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARM_STATE_ARM) ? 4 : 2)
2820 {
2821 struct arm_instruction instruction;
2822 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2823 {
2824 /* can't continue tracing with no image available */
2825 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2826 {
2827 return retval;
2828 }
2829 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2830 {
2831 /* TODO: handle incomplete images */
2832 }
2833 }
2834 command_print(cmd_ctx, "%s", instruction.text);
2835 }
2836
2837 trace_data = trace_data->next;
2838 }
2839
2840 return ERROR_OK;
2841 }
2842
2843 static const struct reg_arch_type xscale_reg_type = {
2844 .get = xscale_get_reg,
2845 .set = xscale_set_reg,
2846 };
2847
2848 static void xscale_build_reg_cache(struct target *target)
2849 {
2850 struct xscale_common *xscale = target_to_xscale(target);
2851 struct arm *armv4_5 = &xscale->armv4_5_common;
2852 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2853 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2854 int i;
2855 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2856
2857 (*cache_p) = arm_build_reg_cache(target, armv4_5);
2858
2859 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2860 cache_p = &(*cache_p)->next;
2861
2862 /* fill in values for the xscale reg cache */
2863 (*cache_p)->name = "XScale registers";
2864 (*cache_p)->next = NULL;
2865 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2866 (*cache_p)->num_regs = num_regs;
2867
2868 for (i = 0; i < num_regs; i++)
2869 {
2870 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2871 (*cache_p)->reg_list[i].value = calloc(4, 1);
2872 (*cache_p)->reg_list[i].dirty = 0;
2873 (*cache_p)->reg_list[i].valid = 0;
2874 (*cache_p)->reg_list[i].size = 32;
2875 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2876 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2877 arch_info[i] = xscale_reg_arch_info[i];
2878 arch_info[i].target = target;
2879 }
2880
2881 xscale->reg_cache = (*cache_p);
2882 }
2883
2884 static int xscale_init_target(struct command_context *cmd_ctx,
2885 struct target *target)
2886 {
2887 xscale_build_reg_cache(target);
2888 return ERROR_OK;
2889 }
2890
2891 static int xscale_init_arch_info(struct target *target,
2892 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2893 {
2894 struct arm *armv4_5;
2895 uint32_t high_reset_branch, low_reset_branch;
2896 int i;
2897
2898 armv4_5 = &xscale->armv4_5_common;
2899
2900 /* store architecture specfic data */
2901 xscale->common_magic = XSCALE_COMMON_MAGIC;
2902
2903 /* we don't really *need* a variant param ... */
2904 if (variant) {
2905 int ir_length = 0;
2906
2907 if (strcmp(variant, "pxa250") == 0
2908 || strcmp(variant, "pxa255") == 0
2909 || strcmp(variant, "pxa26x") == 0)
2910 ir_length = 5;
2911 else if (strcmp(variant, "pxa27x") == 0
2912 || strcmp(variant, "ixp42x") == 0
2913 || strcmp(variant, "ixp45x") == 0
2914 || strcmp(variant, "ixp46x") == 0)
2915 ir_length = 7;
2916 else if (strcmp(variant, "pxa3xx") == 0)
2917 ir_length = 11;
2918 else
2919 LOG_WARNING("%s: unrecognized variant %s",
2920 tap->dotted_name, variant);
2921
2922 if (ir_length && ir_length != tap->ir_length) {
2923 LOG_WARNING("%s: IR length for %s is %d; fixing",
2924 tap->dotted_name, variant, ir_length);
2925 tap->ir_length = ir_length;
2926 }
2927 }
2928
2929 /* PXA3xx shifts the JTAG instructions */
2930 if (tap->ir_length == 11)
2931 xscale->xscale_variant = XSCALE_PXA3XX;
2932 else
2933 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
2934
2935 /* the debug handler isn't installed (and thus not running) at this time */
2936 xscale->handler_address = 0xfe000800;
2937
2938 /* clear the vectors we keep locally for reference */
2939 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2940 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2941
2942 /* no user-specified vectors have been configured yet */
2943 xscale->static_low_vectors_set = 0x0;
2944 xscale->static_high_vectors_set = 0x0;
2945
2946 /* calculate branches to debug handler */
2947 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2948 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2949
2950 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2951 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2952
2953 for (i = 1; i <= 7; i++)
2954 {
2955 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2956 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2957 }
2958
2959 /* 64kB aligned region used for DCache cleaning */
2960 xscale->cache_clean_address = 0xfffe0000;
2961
2962 xscale->hold_rst = 0;
2963 xscale->external_debug_break = 0;
2964
2965 xscale->ibcr_available = 2;
2966 xscale->ibcr0_used = 0;
2967 xscale->ibcr1_used = 0;
2968
2969 xscale->dbr_available = 2;
2970 xscale->dbr0_used = 0;
2971 xscale->dbr1_used = 0;
2972
2973 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
2974 target_name(target));
2975
2976 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2977 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2978
2979 xscale->vector_catch = 0x1;
2980
2981 xscale->trace.capture_status = TRACE_IDLE;
2982 xscale->trace.data = NULL;
2983 xscale->trace.image = NULL;
2984 xscale->trace.buffer_enabled = 0;
2985 xscale->trace.buffer_fill = 0;
2986
2987 /* prepare ARMv4/5 specific information */
2988 armv4_5->arch_info = xscale;
2989 armv4_5->read_core_reg = xscale_read_core_reg;
2990 armv4_5->write_core_reg = xscale_write_core_reg;
2991 armv4_5->full_context = xscale_full_context;
2992
2993 arm_init_arch_info(target, armv4_5);
2994
2995 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
2996 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
2997 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
2998 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
2999 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3000 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3001 xscale->armv4_5_mmu.has_tiny_pages = 1;
3002 xscale->armv4_5_mmu.mmu_enabled = 0;
3003
3004 return ERROR_OK;
3005 }
3006
3007 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3008 {
3009 struct xscale_common *xscale;
3010
3011 if (sizeof xscale_debug_handler - 1 > 0x800) {
3012 LOG_ERROR("debug_handler.bin: larger than 2kb");
3013 return ERROR_FAIL;
3014 }
3015
3016 xscale = calloc(1, sizeof(*xscale));
3017 if (!xscale)
3018 return ERROR_FAIL;
3019
3020 return xscale_init_arch_info(target, xscale, target->tap,
3021 target->variant);
3022 }
3023
3024 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3025 {
3026 struct target *target = NULL;
3027 struct xscale_common *xscale;
3028 int retval;
3029 uint32_t handler_address;
3030
3031 if (CMD_ARGC < 2)
3032 {
3033 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3034 return ERROR_OK;
3035 }
3036
3037 if ((target = get_target(CMD_ARGV[0])) == NULL)
3038 {
3039 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3040 return ERROR_FAIL;
3041 }
3042
3043 xscale = target_to_xscale(target);
3044 retval = xscale_verify_pointer(CMD_CTX, xscale);
3045 if (retval != ERROR_OK)
3046 return retval;
3047
3048 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3049
3050 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3051 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3052 {
3053 xscale->handler_address = handler_address;
3054 }
3055 else
3056 {
3057 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3058 return ERROR_FAIL;
3059 }
3060
3061 return ERROR_OK;
3062 }
3063
3064 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3065 {
3066 struct target *target = NULL;
3067 struct xscale_common *xscale;
3068 int retval;
3069 uint32_t cache_clean_address;
3070
3071 if (CMD_ARGC < 2)
3072 {
3073 return ERROR_COMMAND_SYNTAX_ERROR;
3074 }
3075
3076 target = get_target(CMD_ARGV[0]);
3077 if (target == NULL)
3078 {
3079 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3080 return ERROR_FAIL;
3081 }
3082 xscale = target_to_xscale(target);
3083 retval = xscale_verify_pointer(CMD_CTX, xscale);
3084 if (retval != ERROR_OK)
3085 return retval;
3086
3087 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3088
3089 if (cache_clean_address & 0xffff)
3090 {
3091 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3092 }
3093 else
3094 {
3095 xscale->cache_clean_address = cache_clean_address;
3096 }
3097
3098 return ERROR_OK;
3099 }
3100
3101 COMMAND_HANDLER(xscale_handle_cache_info_command)
3102 {
3103 struct target *target = get_current_target(CMD_CTX);
3104 struct xscale_common *xscale = target_to_xscale(target);
3105 int retval;
3106
3107 retval = xscale_verify_pointer(CMD_CTX, xscale);
3108 if (retval != ERROR_OK)
3109 return retval;
3110
3111 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3112 }
3113
3114 static int xscale_virt2phys(struct target *target,
3115 uint32_t virtual, uint32_t *physical)
3116 {
3117 struct xscale_common *xscale = target_to_xscale(target);
3118 int type;
3119 uint32_t cb;
3120 int domain;
3121 uint32_t ap;
3122
3123 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3124 LOG_ERROR(xscale_not);
3125 return ERROR_TARGET_INVALID;
3126 }
3127
3128 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3129 if (type == -1)
3130 {
3131 return ret;
3132 }
3133 *physical = ret;
3134 return ERROR_OK;
3135 }
3136
3137 static int xscale_mmu(struct target *target, int *enabled)
3138 {
3139 struct xscale_common *xscale = target_to_xscale(target);
3140
3141 if (target->state != TARGET_HALTED)
3142 {
3143 LOG_ERROR("Target not halted");
3144 return ERROR_TARGET_INVALID;
3145 }
3146 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3147 return ERROR_OK;
3148 }
3149
3150 COMMAND_HANDLER(xscale_handle_mmu_command)
3151 {
3152 struct target *target = get_current_target(CMD_CTX);
3153 struct xscale_common *xscale = target_to_xscale(target);
3154 int retval;
3155
3156 retval = xscale_verify_pointer(CMD_CTX, xscale);
3157 if (retval != ERROR_OK)
3158 return retval;
3159
3160 if (target->state != TARGET_HALTED)
3161 {
3162 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3163 return ERROR_OK;
3164 }
3165
3166 if (CMD_ARGC >= 1)
3167 {
3168 bool enable;
3169 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3170 if (enable)
3171 xscale_enable_mmu_caches(target, 1, 0, 0);
3172 else
3173 xscale_disable_mmu_caches(target, 1, 0, 0);
3174 xscale->armv4_5_mmu.mmu_enabled = enable;
3175 }
3176
3177 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3178
3179 return ERROR_OK;
3180 }
3181
3182 COMMAND_HANDLER(xscale_handle_idcache_command)
3183 {
3184 struct target *target = get_current_target(CMD_CTX);
3185 struct xscale_common *xscale = target_to_xscale(target);
3186
3187 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3188 if (retval != ERROR_OK)
3189 return retval;
3190
3191 if (target->state != TARGET_HALTED)
3192 {
3193 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3194 return ERROR_OK;
3195 }
3196
3197 bool icache;
3198 COMMAND_PARSE_BOOL(CMD_NAME, icache, "icache", "dcache");
3199
3200 if (CMD_ARGC >= 1)
3201 {
3202 bool enable;
3203 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3204 if (enable)
3205 xscale_enable_mmu_caches(target, 1, 0, 0);
3206 else
3207 xscale_disable_mmu_caches(target, 1, 0, 0);
3208 if (icache)
3209 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3210 else
3211 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3212 }
3213
3214 bool enabled = icache ?
3215 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3216 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3217 const char *msg = enabled ? "enabled" : "disabled";
3218 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3219
3220 return ERROR_OK;
3221 }
3222
3223 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3224 {
3225 struct target *target = get_current_target(CMD_CTX);
3226 struct xscale_common *xscale = target_to_xscale(target);
3227 int retval;
3228
3229 retval = xscale_verify_pointer(CMD_CTX, xscale);
3230 if (retval != ERROR_OK)
3231 return retval;
3232
3233 if (CMD_ARGC < 1)
3234 {
3235 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3236 }
3237 else
3238 {
3239 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3240 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3241 xscale_write_dcsr(target, -1, -1);
3242 }
3243
3244 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3245
3246 return ERROR_OK;
3247 }
3248
3249
3250 COMMAND_HANDLER(xscale_handle_vector_table_command)
3251 {
3252 struct target *target = get_current_target(CMD_CTX);
3253 struct xscale_common *xscale = target_to_xscale(target);
3254 int err = 0;
3255 int retval;
3256
3257 retval = xscale_verify_pointer(CMD_CTX, xscale);
3258 if (retval != ERROR_OK)
3259 return retval;
3260
3261 if (CMD_ARGC == 0) /* print current settings */
3262 {
3263 int idx;
3264
3265 command_print(CMD_CTX, "active user-set static vectors:");
3266 for (idx = 1; idx < 8; idx++)
3267 if (xscale->static_low_vectors_set & (1 << idx))
3268 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3269 for (idx = 1; idx < 8; idx++)
3270 if (xscale->static_high_vectors_set & (1 << idx))
3271 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3272 return ERROR_OK;
3273 }
3274
3275 if (CMD_ARGC != 3)
3276 err = 1;
3277 else
3278 {
3279 int idx;
3280 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3281 uint32_t vec;
3282 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3283
3284 if (idx < 1 || idx >= 8)
3285 err = 1;
3286
3287 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3288 {
3289 xscale->static_low_vectors_set |= (1<<idx);
3290 xscale->static_low_vectors[idx] = vec;
3291 }
3292 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3293 {
3294 xscale->static_high_vectors_set |= (1<<idx);
3295 xscale->static_high_vectors[idx] = vec;
3296 }
3297 else
3298 err = 1;
3299 }
3300
3301 if (err)
3302 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3303
3304 return ERROR_OK;
3305 }
3306
3307
3308 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3309 {
3310 struct target *target = get_current_target(CMD_CTX);
3311 struct xscale_common *xscale = target_to_xscale(target);
3312 struct arm *armv4_5 = &xscale->armv4_5_common;
3313 uint32_t dcsr_value;
3314 int retval;
3315
3316 retval = xscale_verify_pointer(CMD_CTX, xscale);
3317 if (retval != ERROR_OK)
3318 return retval;
3319
3320 if (target->state != TARGET_HALTED)
3321 {
3322 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3323 return ERROR_OK;
3324 }
3325
3326 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3327 {
3328 struct xscale_trace_data *td, *next_td;
3329 xscale->trace.buffer_enabled = 1;
3330
3331 /* free old trace data */
3332 td = xscale->trace.data;
3333 while (td)
3334 {
3335 next_td = td->next;
3336
3337 if (td->entries)
3338 free(td->entries);
3339 free(td);
3340 td = next_td;
3341 }
3342 xscale->trace.data = NULL;
3343 }
3344 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3345 {
3346 xscale->trace.buffer_enabled = 0;
3347 }
3348
3349 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3350 {
3351 uint32_t fill = 1;
3352 if (CMD_ARGC >= 3)
3353 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3354 xscale->trace.buffer_fill = fill;
3355 }
3356 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3357 {
3358 xscale->trace.buffer_fill = -1;
3359 }
3360
3361 if (xscale->trace.buffer_enabled)
3362 {
3363 /* if we enable the trace buffer in fill-once
3364 * mode we know the address of the first instruction */
3365 xscale->trace.pc_ok = 1;
3366 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3367 }
3368 else
3369 {
3370 /* otherwise the address is unknown, and we have no known good PC */
3371 xscale->trace.pc_ok = 0;
3372 }
3373
3374 command_print(CMD_CTX, "trace buffer %s (%s)",
3375 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3376 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3377
3378 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3379 if (xscale->trace.buffer_fill >= 0)
3380 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3381 else
3382 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3383
3384 return ERROR_OK;
3385 }
3386
3387 COMMAND_HANDLER(xscale_handle_trace_image_command)
3388 {
3389 struct target *target = get_current_target(CMD_CTX);
3390 struct xscale_common *xscale = target_to_xscale(target);
3391 int retval;
3392
3393 if (CMD_ARGC < 1)
3394 {
3395 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3396 return ERROR_OK;
3397 }
3398
3399 retval = xscale_verify_pointer(CMD_CTX, xscale);
3400 if (retval != ERROR_OK)
3401 return retval;
3402
3403 if (xscale->trace.image)
3404 {
3405 image_close(xscale->trace.image);
3406 free(xscale->trace.image);
3407 command_print(CMD_CTX, "previously loaded image found and closed");
3408 }
3409
3410 xscale->trace.image = malloc(sizeof(struct image));
3411 xscale->trace.image->base_address_set = 0;
3412 xscale->trace.image->start_address_set = 0;
3413
3414 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3415 if (CMD_ARGC >= 2)
3416 {
3417 xscale->trace.image->base_address_set = 1;
3418 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], xscale->trace.image->base_address);
3419 }
3420 else
3421 {
3422 xscale->trace.image->base_address_set = 0;
3423 }
3424
3425 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3426 {
3427 free(xscale->trace.image);
3428 xscale->trace.image = NULL;
3429 return ERROR_OK;
3430 }
3431
3432 return ERROR_OK;
3433 }
3434
3435 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3436 {
3437 struct target *target = get_current_target(CMD_CTX);
3438 struct xscale_common *xscale = target_to_xscale(target);
3439 struct xscale_trace_data *trace_data;
3440 struct fileio file;
3441 int retval;
3442
3443 retval = xscale_verify_pointer(CMD_CTX, xscale);
3444 if (retval != ERROR_OK)
3445 return retval;
3446
3447 if (target->state != TARGET_HALTED)
3448 {
3449 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3450 return ERROR_OK;
3451 }
3452
3453 if (CMD_ARGC < 1)
3454 {
3455 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3456 return ERROR_OK;
3457 }
3458
3459 trace_data = xscale->trace.data;
3460
3461 if (!trace_data)
3462 {
3463 command_print(CMD_CTX, "no trace data collected");
3464 return ERROR_OK;
3465 }
3466
3467 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3468 {
3469 return ERROR_OK;
3470 }
3471
3472 while (trace_data)
3473 {
3474 int i;
3475
3476 fileio_write_u32(&file, trace_data->chkpt0);
3477 fileio_write_u32(&file, trace_data->chkpt1);
3478 fileio_write_u32(&file, trace_data->last_instruction);
3479 fileio_write_u32(&file, trace_data->depth);
3480
3481 for (i = 0; i < trace_data->depth; i++)
3482 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3483
3484 trace_data = trace_data->next;
3485 }
3486
3487 fileio_close(&file);
3488
3489 return ERROR_OK;
3490 }
3491
3492 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3493 {
3494 struct target *target = get_current_target(CMD_CTX);
3495 struct xscale_common *xscale = target_to_xscale(target);
3496 int retval;
3497
3498 retval = xscale_verify_pointer(CMD_CTX, xscale);
3499 if (retval != ERROR_OK)
3500 return retval;
3501
3502 xscale_analyze_trace(target, CMD_CTX);
3503
3504 return ERROR_OK;
3505 }
3506
3507 COMMAND_HANDLER(xscale_handle_cp15)
3508 {
3509 struct target *target = get_current_target(CMD_CTX);
3510 struct xscale_common *xscale = target_to_xscale(target);
3511 int retval;
3512
3513 retval = xscale_verify_pointer(CMD_CTX, xscale);
3514 if (retval != ERROR_OK)
3515 return retval;
3516
3517 if (target->state != TARGET_HALTED)
3518 {
3519 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3520 return ERROR_OK;
3521 }
3522 uint32_t reg_no = 0;
3523 struct reg *reg = NULL;
3524 if (CMD_ARGC > 0)
3525 {
3526 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3527 /*translate from xscale cp15 register no to openocd register*/
3528 switch (reg_no)
3529 {
3530 case 0:
3531 reg_no = XSCALE_MAINID;
3532 break;
3533 case 1:
3534 reg_no = XSCALE_CTRL;
3535 break;
3536 case 2:
3537 reg_no = XSCALE_TTB;
3538 break;
3539 case 3:
3540 reg_no = XSCALE_DAC;
3541 break;
3542 case 5:
3543 reg_no = XSCALE_FSR;
3544 break;
3545 case 6:
3546 reg_no = XSCALE_FAR;
3547 break;
3548 case 13:
3549 reg_no = XSCALE_PID;
3550 break;
3551 case 15:
3552 reg_no = XSCALE_CPACCESS;
3553 break;
3554 default:
3555 command_print(CMD_CTX, "invalid register number");
3556 return ERROR_INVALID_ARGUMENTS;
3557 }
3558 reg = &xscale->reg_cache->reg_list[reg_no];
3559
3560 }
3561 if (CMD_ARGC == 1)
3562 {
3563 uint32_t value;
3564
3565 /* read cp15 control register */
3566 xscale_get_reg(reg);
3567 value = buf_get_u32(reg->value, 0, 32);
3568 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3569 }
3570 else if (CMD_ARGC == 2)
3571 {
3572 uint32_t value;
3573 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3574
3575 /* send CP write request (command 0x41) */
3576 xscale_send_u32(target, 0x41);
3577
3578 /* send CP register number */
3579 xscale_send_u32(target, reg_no);
3580
3581 /* send CP register value */
3582 xscale_send_u32(target, value);
3583
3584 /* execute cpwait to ensure outstanding operations complete */
3585 xscale_send_u32(target, 0x53);
3586 }
3587 else
3588 {
3589 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3590 }
3591
3592 return ERROR_OK;
3593 }
3594
3595 static const struct command_registration xscale_exec_command_handlers[] = {
3596 {
3597 .name = "cache_info",
3598 .handler = &xscale_handle_cache_info_command,
3599 .mode = COMMAND_EXEC, NULL,
3600 },
3601
3602 {
3603 .name = "mmu",
3604 .handler = &xscale_handle_mmu_command,
3605 .mode = COMMAND_EXEC,
3606 .usage = "[enable|disable]",
3607 .help = "enable or disable the MMU",
3608 },
3609 {
3610 .name = "icache",
3611 .handler = &xscale_handle_idcache_command,
3612 .mode = COMMAND_EXEC,
3613 .usage = "[enable|disable]",
3614 .help = "enable or disable the ICache",
3615 },
3616 {
3617 .name = "dcache",
3618 .handler = &xscale_handle_idcache_command,
3619 .mode = COMMAND_EXEC,
3620 .usage = "[enable|disable]",
3621 .help = "enable or disable the DCache",
3622 },
3623
3624 {
3625 .name = "vector_catch",
3626 .handler = &xscale_handle_vector_catch_command,
3627 .mode = COMMAND_EXEC,
3628 .help = "mask of vectors that should be caught",
3629 .usage = "[<mask>]",
3630 },
3631 {
3632 .name = "vector_table",
3633 .handler = &xscale_handle_vector_table_command,
3634 .mode = COMMAND_EXEC,
3635 .usage = "<high|low> <index> <code>",
3636 .help = "set static code for exception handler entry",
3637 },
3638
3639 {
3640 .name = "trace_buffer",
3641 .handler = &xscale_handle_trace_buffer_command,
3642 .mode = COMMAND_EXEC,
3643 .usage = "<enable | disable> [fill [n]|wrap]",
3644 },
3645 {
3646 .name = "dump_trace",
3647 .handler = &xscale_handle_dump_trace_command,
3648 .mode = COMMAND_EXEC,
3649 .help = "dump content of trace buffer to <file>",
3650 .usage = "<file>",
3651 },
3652 {
3653 .name = "analyze_trace",
3654 .handler = &xscale_handle_analyze_trace_buffer_command,
3655 .mode = COMMAND_EXEC,
3656 .help = "analyze content of trace buffer",
3657 },
3658 {
3659 .name = "trace_image",
3660 .handler = &xscale_handle_trace_image_command,
3661 COMMAND_EXEC,
3662 .help = "load image from <file> [base address]",
3663 .usage = "<file> [address] [type]",
3664 },
3665
3666 {
3667 .name = "cp15",
3668 .handler = &xscale_handle_cp15,
3669 .mode = COMMAND_EXEC,
3670 .help = "access coproc 15",
3671 .usage = "<register> [value]",
3672 },
3673 COMMAND_REGISTRATION_DONE
3674 };
3675 static const struct command_registration xscale_any_command_handlers[] = {
3676 {
3677 .name = "debug_handler",
3678 .handler = &xscale_handle_debug_handler_command,
3679 .mode = COMMAND_ANY,
3680 .usage = "<target#> <address>",
3681 },
3682 {
3683 .name = "cache_clean_address",
3684 .handler = &xscale_handle_cache_clean_address_command,
3685 .mode = COMMAND_ANY,
3686 },
3687 {
3688 .chain = xscale_exec_command_handlers,
3689 },
3690 COMMAND_REGISTRATION_DONE
3691 };
3692 static const struct command_registration xscale_command_handlers[] = {
3693 {
3694 .chain = arm_command_handlers,
3695 },
3696 {
3697 .name = "xscale",
3698 .mode = COMMAND_ANY,
3699 .help = "xscale command group",
3700 .chain = xscale_any_command_handlers,
3701 },
3702 COMMAND_REGISTRATION_DONE
3703 };
3704
3705 struct target_type xscale_target =
3706 {
3707 .name = "xscale",
3708
3709 .poll = xscale_poll,
3710 .arch_state = xscale_arch_state,
3711
3712 .target_request_data = NULL,
3713
3714 .halt = xscale_halt,
3715 .resume = xscale_resume,
3716 .step = xscale_step,
3717
3718 .assert_reset = xscale_assert_reset,
3719 .deassert_reset = xscale_deassert_reset,
3720 .soft_reset_halt = NULL,
3721
3722 .get_gdb_reg_list = arm_get_gdb_reg_list,
3723
3724 .read_memory = xscale_read_memory,
3725 .read_phys_memory = xscale_read_phys_memory,
3726 .write_memory = xscale_write_memory,
3727 .write_phys_memory = xscale_write_phys_memory,
3728 .bulk_write_memory = xscale_bulk_write_memory,
3729
3730 .checksum_memory = arm_checksum_memory,
3731 .blank_check_memory = arm_blank_check_memory,
3732
3733 .run_algorithm = armv4_5_run_algorithm,
3734
3735 .add_breakpoint = xscale_add_breakpoint,
3736 .remove_breakpoint = xscale_remove_breakpoint,
3737 .add_watchpoint = xscale_add_watchpoint,
3738 .remove_watchpoint = xscale_remove_watchpoint,
3739
3740 .commands = xscale_command_handlers,
3741 .target_create = xscale_target_create,
3742 .init_target = xscale_init_target,
3743
3744 .virt2phys = xscale_virt2phys,
3745 .mmu = xscale_mmu
3746 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)