XScale: initial PXA3xx support
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include "time_support.h"
37 #include "register.h"
38 #include "image.h"
39
40
41 /*
42 * Important XScale documents available as of October 2009 include:
43 *
44 * Intel XScale® Core Developer’s Manual, January 2004
45 * Order Number: 273473-002
46 * This has a chapter detailing debug facilities, and punts some
47 * details to chip-specific microarchitecture documents.
48 *
49 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
50 * Document Number: 273539-005
51 * Less detailed than the developer's manual, but summarizes those
52 * missing details (for most XScales) and gives LOTS of notes about
53 * debugger/handler interaction issues. Presents a simpler reset
54 * and load-handler sequence than the arch doc. (Note, OpenOCD
55 * doesn't currently support "Hot-Debug" as defined there.)
56 *
57 * Chip-specific microarchitecture documents may also be useful.
58 */
59
60
61 /* forward declarations */
62 static int xscale_resume(struct target *, int current,
63 uint32_t address, int handle_breakpoints, int debug_execution);
64 static int xscale_debug_entry(struct target *);
65 static int xscale_restore_context(struct target *);
66 static int xscale_get_reg(struct reg *reg);
67 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
68 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
69 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
70 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_read_trace(struct target *);
72
73
74 /* This XScale "debug handler" is loaded into the processor's
75 * mini-ICache, which is 2K of code writable only via JTAG.
76 *
77 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
78 * binary files cleanly. It's string oriented, and terminates them
79 * with a NUL character. Better would be to generate the constants
80 * and let other code decide names, scoping, and other housekeeping.
81 */
82 static /* unsigned const char xscale_debug_handler[] = ... */
83 #include "xscale_debug.h"
84
85 static char *const xscale_reg_list[] =
86 {
87 "XSCALE_MAINID", /* 0 */
88 "XSCALE_CACHETYPE",
89 "XSCALE_CTRL",
90 "XSCALE_AUXCTRL",
91 "XSCALE_TTB",
92 "XSCALE_DAC",
93 "XSCALE_FSR",
94 "XSCALE_FAR",
95 "XSCALE_PID",
96 "XSCALE_CPACCESS",
97 "XSCALE_IBCR0", /* 10 */
98 "XSCALE_IBCR1",
99 "XSCALE_DBR0",
100 "XSCALE_DBR1",
101 "XSCALE_DBCON",
102 "XSCALE_TBREG",
103 "XSCALE_CHKPT0",
104 "XSCALE_CHKPT1",
105 "XSCALE_DCSR",
106 "XSCALE_TX",
107 "XSCALE_RX", /* 20 */
108 "XSCALE_TXRXCTRL",
109 };
110
111 static const struct xscale_reg xscale_reg_arch_info[] =
112 {
113 {XSCALE_MAINID, NULL},
114 {XSCALE_CACHETYPE, NULL},
115 {XSCALE_CTRL, NULL},
116 {XSCALE_AUXCTRL, NULL},
117 {XSCALE_TTB, NULL},
118 {XSCALE_DAC, NULL},
119 {XSCALE_FSR, NULL},
120 {XSCALE_FAR, NULL},
121 {XSCALE_PID, NULL},
122 {XSCALE_CPACCESS, NULL},
123 {XSCALE_IBCR0, NULL},
124 {XSCALE_IBCR1, NULL},
125 {XSCALE_DBR0, NULL},
126 {XSCALE_DBR1, NULL},
127 {XSCALE_DBCON, NULL},
128 {XSCALE_TBREG, NULL},
129 {XSCALE_CHKPT0, NULL},
130 {XSCALE_CHKPT1, NULL},
131 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
132 {-1, NULL}, /* TX accessed via JTAG */
133 {-1, NULL}, /* RX accessed via JTAG */
134 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
135 };
136
137 /* convenience wrapper to access XScale specific registers */
138 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
139 {
140 uint8_t buf[4];
141
142 buf_set_u32(buf, 0, 32, value);
143
144 return xscale_set_reg(reg, buf);
145 }
146
147 static const char xscale_not[] = "target is not an XScale";
148
149 static int xscale_verify_pointer(struct command_context *cmd_ctx,
150 struct xscale_common *xscale)
151 {
152 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
153 command_print(cmd_ctx, xscale_not);
154 return ERROR_TARGET_INVALID;
155 }
156 return ERROR_OK;
157 }
158
159 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr)
160 {
161 if (tap == NULL)
162 return ERROR_FAIL;
163
164 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
165 {
166 struct scan_field field;
167 uint8_t scratch[4];
168
169 memset(&field, 0, sizeof field);
170 field.tap = tap;
171 field.num_bits = tap->ir_length;
172 field.out_value = scratch;
173 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
174
175 jtag_add_ir_scan(1, &field, jtag_get_end_state());
176 }
177
178 return ERROR_OK;
179 }
180
181 static int xscale_read_dcsr(struct target *target)
182 {
183 struct xscale_common *xscale = target_to_xscale(target);
184 int retval;
185 struct scan_field fields[3];
186 uint8_t field0 = 0x0;
187 uint8_t field0_check_value = 0x2;
188 uint8_t field0_check_mask = 0x7;
189 uint8_t field2 = 0x0;
190 uint8_t field2_check_value = 0x0;
191 uint8_t field2_check_mask = 0x1;
192
193 jtag_set_end_state(TAP_DRPAUSE);
194 xscale_jtag_set_instr(target->tap,
195 XSCALE_SELDCSR << xscale->xscale_variant);
196
197 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
198 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
199
200 memset(&fields, 0, sizeof fields);
201
202 fields[0].tap = target->tap;
203 fields[0].num_bits = 3;
204 fields[0].out_value = &field0;
205 uint8_t tmp;
206 fields[0].in_value = &tmp;
207
208 fields[1].tap = target->tap;
209 fields[1].num_bits = 32;
210 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
211
212 fields[2].tap = target->tap;
213 fields[2].num_bits = 1;
214 fields[2].out_value = &field2;
215 uint8_t tmp2;
216 fields[2].in_value = &tmp2;
217
218 jtag_add_dr_scan(3, fields, jtag_get_end_state());
219
220 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
221 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
222
223 if ((retval = jtag_execute_queue()) != ERROR_OK)
224 {
225 LOG_ERROR("JTAG error while reading DCSR");
226 return retval;
227 }
228
229 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
230 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
231
232 /* write the register with the value we just read
233 * on this second pass, only the first bit of field0 is guaranteed to be 0)
234 */
235 field0_check_mask = 0x1;
236 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
237 fields[1].in_value = NULL;
238
239 jtag_set_end_state(TAP_IDLE);
240
241 jtag_add_dr_scan(3, fields, jtag_get_end_state());
242
243 /* DANGER!!! this must be here. It will make sure that the arguments
244 * to jtag_set_check_value() does not go out of scope! */
245 return jtag_execute_queue();
246 }
247
248
249 static void xscale_getbuf(jtag_callback_data_t arg)
250 {
251 uint8_t *in = (uint8_t *)arg;
252 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
253 }
254
255 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
256 {
257 if (num_words == 0)
258 return ERROR_INVALID_ARGUMENTS;
259
260 struct xscale_common *xscale = target_to_xscale(target);
261 int retval = ERROR_OK;
262 tap_state_t path[3];
263 struct scan_field fields[3];
264 uint8_t *field0 = malloc(num_words * 1);
265 uint8_t field0_check_value = 0x2;
266 uint8_t field0_check_mask = 0x6;
267 uint32_t *field1 = malloc(num_words * 4);
268 uint8_t field2_check_value = 0x0;
269 uint8_t field2_check_mask = 0x1;
270 int words_done = 0;
271 int words_scheduled = 0;
272 int i;
273
274 path[0] = TAP_DRSELECT;
275 path[1] = TAP_DRCAPTURE;
276 path[2] = TAP_DRSHIFT;
277
278 memset(&fields, 0, sizeof fields);
279
280 fields[0].tap = target->tap;
281 fields[0].num_bits = 3;
282 fields[0].check_value = &field0_check_value;
283 fields[0].check_mask = &field0_check_mask;
284
285 fields[1].tap = target->tap;
286 fields[1].num_bits = 32;
287
288 fields[2].tap = target->tap;
289 fields[2].num_bits = 1;
290 fields[2].check_value = &field2_check_value;
291 fields[2].check_mask = &field2_check_mask;
292
293 jtag_set_end_state(TAP_IDLE);
294 xscale_jtag_set_instr(target->tap,
295 XSCALE_DBGTX << xscale->xscale_variant);
296 jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
297
298 /* repeat until all words have been collected */
299 int attempts = 0;
300 while (words_done < num_words)
301 {
302 /* schedule reads */
303 words_scheduled = 0;
304 for (i = words_done; i < num_words; i++)
305 {
306 fields[0].in_value = &field0[i];
307
308 jtag_add_pathmove(3, path);
309
310 fields[1].in_value = (uint8_t *)(field1 + i);
311
312 jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
313
314 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
315
316 words_scheduled++;
317 }
318
319 if ((retval = jtag_execute_queue()) != ERROR_OK)
320 {
321 LOG_ERROR("JTAG error while receiving data from debug handler");
322 break;
323 }
324
325 /* examine results */
326 for (i = words_done; i < num_words; i++)
327 {
328 if (!(field0[0] & 1))
329 {
330 /* move backwards if necessary */
331 int j;
332 for (j = i; j < num_words - 1; j++)
333 {
334 field0[j] = field0[j + 1];
335 field1[j] = field1[j + 1];
336 }
337 words_scheduled--;
338 }
339 }
340 if (words_scheduled == 0)
341 {
342 if (attempts++==1000)
343 {
344 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
345 retval = ERROR_TARGET_TIMEOUT;
346 break;
347 }
348 }
349
350 words_done += words_scheduled;
351 }
352
353 for (i = 0; i < num_words; i++)
354 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
355
356 free(field1);
357
358 return retval;
359 }
360
361 static int xscale_read_tx(struct target *target, int consume)
362 {
363 struct xscale_common *xscale = target_to_xscale(target);
364 tap_state_t path[3];
365 tap_state_t noconsume_path[6];
366 int retval;
367 struct timeval timeout, now;
368 struct scan_field fields[3];
369 uint8_t field0_in = 0x0;
370 uint8_t field0_check_value = 0x2;
371 uint8_t field0_check_mask = 0x6;
372 uint8_t field2_check_value = 0x0;
373 uint8_t field2_check_mask = 0x1;
374
375 jtag_set_end_state(TAP_IDLE);
376
377 xscale_jtag_set_instr(target->tap,
378 XSCALE_DBGTX << xscale->xscale_variant);
379
380 path[0] = TAP_DRSELECT;
381 path[1] = TAP_DRCAPTURE;
382 path[2] = TAP_DRSHIFT;
383
384 noconsume_path[0] = TAP_DRSELECT;
385 noconsume_path[1] = TAP_DRCAPTURE;
386 noconsume_path[2] = TAP_DREXIT1;
387 noconsume_path[3] = TAP_DRPAUSE;
388 noconsume_path[4] = TAP_DREXIT2;
389 noconsume_path[5] = TAP_DRSHIFT;
390
391 memset(&fields, 0, sizeof fields);
392
393 fields[0].tap = target->tap;
394 fields[0].num_bits = 3;
395 fields[0].in_value = &field0_in;
396
397 fields[1].tap = target->tap;
398 fields[1].num_bits = 32;
399 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
400
401 fields[2].tap = target->tap;
402 fields[2].num_bits = 1;
403 uint8_t tmp;
404 fields[2].in_value = &tmp;
405
406 gettimeofday(&timeout, NULL);
407 timeval_add_time(&timeout, 1, 0);
408
409 for (;;)
410 {
411 /* if we want to consume the register content (i.e. clear TX_READY),
412 * we have to go straight from Capture-DR to Shift-DR
413 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
414 */
415 if (consume)
416 jtag_add_pathmove(3, path);
417 else
418 {
419 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
420 }
421
422 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
423
424 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
425 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
426
427 if ((retval = jtag_execute_queue()) != ERROR_OK)
428 {
429 LOG_ERROR("JTAG error while reading TX");
430 return ERROR_TARGET_TIMEOUT;
431 }
432
433 gettimeofday(&now, NULL);
434 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
435 {
436 LOG_ERROR("time out reading TX register");
437 return ERROR_TARGET_TIMEOUT;
438 }
439 if (!((!(field0_in & 1)) && consume))
440 {
441 goto done;
442 }
443 if (debug_level >= 3)
444 {
445 LOG_DEBUG("waiting 100ms");
446 alive_sleep(100); /* avoid flooding the logs */
447 } else
448 {
449 keep_alive();
450 }
451 }
452 done:
453
454 if (!(field0_in & 1))
455 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
456
457 return ERROR_OK;
458 }
459
460 static int xscale_write_rx(struct target *target)
461 {
462 struct xscale_common *xscale = target_to_xscale(target);
463 int retval;
464 struct timeval timeout, now;
465 struct scan_field fields[3];
466 uint8_t field0_out = 0x0;
467 uint8_t field0_in = 0x0;
468 uint8_t field0_check_value = 0x2;
469 uint8_t field0_check_mask = 0x6;
470 uint8_t field2 = 0x0;
471 uint8_t field2_check_value = 0x0;
472 uint8_t field2_check_mask = 0x1;
473
474 jtag_set_end_state(TAP_IDLE);
475
476 xscale_jtag_set_instr(target->tap,
477 XSCALE_DBGRX << xscale->xscale_variant);
478
479 memset(&fields, 0, sizeof fields);
480
481 fields[0].tap = target->tap;
482 fields[0].num_bits = 3;
483 fields[0].out_value = &field0_out;
484 fields[0].in_value = &field0_in;
485
486 fields[1].tap = target->tap;
487 fields[1].num_bits = 32;
488 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
489
490 fields[2].tap = target->tap;
491 fields[2].num_bits = 1;
492 fields[2].out_value = &field2;
493 uint8_t tmp;
494 fields[2].in_value = &tmp;
495
496 gettimeofday(&timeout, NULL);
497 timeval_add_time(&timeout, 1, 0);
498
499 /* poll until rx_read is low */
500 LOG_DEBUG("polling RX");
501 for (;;)
502 {
503 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
504
505 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
506 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
507
508 if ((retval = jtag_execute_queue()) != ERROR_OK)
509 {
510 LOG_ERROR("JTAG error while writing RX");
511 return retval;
512 }
513
514 gettimeofday(&now, NULL);
515 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
516 {
517 LOG_ERROR("time out writing RX register");
518 return ERROR_TARGET_TIMEOUT;
519 }
520 if (!(field0_in & 1))
521 goto done;
522 if (debug_level >= 3)
523 {
524 LOG_DEBUG("waiting 100ms");
525 alive_sleep(100); /* avoid flooding the logs */
526 } else
527 {
528 keep_alive();
529 }
530 }
531 done:
532
533 /* set rx_valid */
534 field2 = 0x1;
535 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
536
537 if ((retval = jtag_execute_queue()) != ERROR_OK)
538 {
539 LOG_ERROR("JTAG error while writing RX");
540 return retval;
541 }
542
543 return ERROR_OK;
544 }
545
546 /* send count elements of size byte to the debug handler */
547 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
548 {
549 struct xscale_common *xscale = target_to_xscale(target);
550 uint32_t t[3];
551 int bits[3];
552 int retval;
553 int done_count = 0;
554
555 jtag_set_end_state(TAP_IDLE);
556
557 xscale_jtag_set_instr(target->tap,
558 XSCALE_DBGRX << xscale->xscale_variant);
559
560 bits[0]=3;
561 t[0]=0;
562 bits[1]=32;
563 t[2]=1;
564 bits[2]=1;
565 int endianness = target->endianness;
566 while (done_count++ < count)
567 {
568 switch (size)
569 {
570 case 4:
571 if (endianness == TARGET_LITTLE_ENDIAN)
572 {
573 t[1]=le_to_h_u32(buffer);
574 } else
575 {
576 t[1]=be_to_h_u32(buffer);
577 }
578 break;
579 case 2:
580 if (endianness == TARGET_LITTLE_ENDIAN)
581 {
582 t[1]=le_to_h_u16(buffer);
583 } else
584 {
585 t[1]=be_to_h_u16(buffer);
586 }
587 break;
588 case 1:
589 t[1]=buffer[0];
590 break;
591 default:
592 LOG_ERROR("BUG: size neither 4, 2 nor 1");
593 return ERROR_INVALID_ARGUMENTS;
594 }
595 jtag_add_dr_out(target->tap,
596 3,
597 bits,
598 t,
599 jtag_set_end_state(TAP_IDLE));
600 buffer += size;
601 }
602
603 if ((retval = jtag_execute_queue()) != ERROR_OK)
604 {
605 LOG_ERROR("JTAG error while sending data to debug handler");
606 return retval;
607 }
608
609 return ERROR_OK;
610 }
611
612 static int xscale_send_u32(struct target *target, uint32_t value)
613 {
614 struct xscale_common *xscale = target_to_xscale(target);
615
616 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
617 return xscale_write_rx(target);
618 }
619
620 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
621 {
622 struct xscale_common *xscale = target_to_xscale(target);
623 int retval;
624 struct scan_field fields[3];
625 uint8_t field0 = 0x0;
626 uint8_t field0_check_value = 0x2;
627 uint8_t field0_check_mask = 0x7;
628 uint8_t field2 = 0x0;
629 uint8_t field2_check_value = 0x0;
630 uint8_t field2_check_mask = 0x1;
631
632 if (hold_rst != -1)
633 xscale->hold_rst = hold_rst;
634
635 if (ext_dbg_brk != -1)
636 xscale->external_debug_break = ext_dbg_brk;
637
638 jtag_set_end_state(TAP_IDLE);
639 xscale_jtag_set_instr(target->tap,
640 XSCALE_SELDCSR << xscale->xscale_variant);
641
642 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
643 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
644
645 memset(&fields, 0, sizeof fields);
646
647 fields[0].tap = target->tap;
648 fields[0].num_bits = 3;
649 fields[0].out_value = &field0;
650 uint8_t tmp;
651 fields[0].in_value = &tmp;
652
653 fields[1].tap = target->tap;
654 fields[1].num_bits = 32;
655 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
656
657 fields[2].tap = target->tap;
658 fields[2].num_bits = 1;
659 fields[2].out_value = &field2;
660 uint8_t tmp2;
661 fields[2].in_value = &tmp2;
662
663 jtag_add_dr_scan(3, fields, jtag_get_end_state());
664
665 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
666 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
667
668 if ((retval = jtag_execute_queue()) != ERROR_OK)
669 {
670 LOG_ERROR("JTAG error while writing DCSR");
671 return retval;
672 }
673
674 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
675 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
676
677 return ERROR_OK;
678 }
679
680 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
681 static unsigned int parity (unsigned int v)
682 {
683 // unsigned int ov = v;
684 v ^= v >> 16;
685 v ^= v >> 8;
686 v ^= v >> 4;
687 v &= 0xf;
688 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
689 return (0x6996 >> v) & 1;
690 }
691
692 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
693 {
694 struct xscale_common *xscale = target_to_xscale(target);
695 uint8_t packet[4];
696 uint8_t cmd;
697 int word;
698 struct scan_field fields[2];
699
700 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
701
702 /* LDIC into IR */
703 jtag_set_end_state(TAP_IDLE);
704 xscale_jtag_set_instr(target->tap,
705 XSCALE_LDIC << xscale->xscale_variant);
706
707 /* CMD is b011 to load a cacheline into the Mini ICache.
708 * Loading into the main ICache is deprecated, and unused.
709 * It's followed by three zero bits, and 27 address bits.
710 */
711 buf_set_u32(&cmd, 0, 6, 0x3);
712
713 /* virtual address of desired cache line */
714 buf_set_u32(packet, 0, 27, va >> 5);
715
716 memset(&fields, 0, sizeof fields);
717
718 fields[0].tap = target->tap;
719 fields[0].num_bits = 6;
720 fields[0].out_value = &cmd;
721
722 fields[1].tap = target->tap;
723 fields[1].num_bits = 27;
724 fields[1].out_value = packet;
725
726 jtag_add_dr_scan(2, fields, jtag_get_end_state());
727
728 /* rest of packet is a cacheline: 8 instructions, with parity */
729 fields[0].num_bits = 32;
730 fields[0].out_value = packet;
731
732 fields[1].num_bits = 1;
733 fields[1].out_value = &cmd;
734
735 for (word = 0; word < 8; word++)
736 {
737 buf_set_u32(packet, 0, 32, buffer[word]);
738
739 uint32_t value;
740 memcpy(&value, packet, sizeof(uint32_t));
741 cmd = parity(value);
742
743 jtag_add_dr_scan(2, fields, jtag_get_end_state());
744 }
745
746 return jtag_execute_queue();
747 }
748
749 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
750 {
751 struct xscale_common *xscale = target_to_xscale(target);
752 uint8_t packet[4];
753 uint8_t cmd;
754 struct scan_field fields[2];
755
756 jtag_set_end_state(TAP_IDLE);
757 xscale_jtag_set_instr(target->tap,
758 XSCALE_LDIC << xscale->xscale_variant);
759
760 /* CMD for invalidate IC line b000, bits [6:4] b000 */
761 buf_set_u32(&cmd, 0, 6, 0x0);
762
763 /* virtual address of desired cache line */
764 buf_set_u32(packet, 0, 27, va >> 5);
765
766 memset(&fields, 0, sizeof fields);
767
768 fields[0].tap = target->tap;
769 fields[0].num_bits = 6;
770 fields[0].out_value = &cmd;
771
772 fields[1].tap = target->tap;
773 fields[1].num_bits = 27;
774 fields[1].out_value = packet;
775
776 jtag_add_dr_scan(2, fields, jtag_get_end_state());
777
778 return ERROR_OK;
779 }
780
781 static int xscale_update_vectors(struct target *target)
782 {
783 struct xscale_common *xscale = target_to_xscale(target);
784 int i;
785 int retval;
786
787 uint32_t low_reset_branch, high_reset_branch;
788
789 for (i = 1; i < 8; i++)
790 {
791 /* if there's a static vector specified for this exception, override */
792 if (xscale->static_high_vectors_set & (1 << i))
793 {
794 xscale->high_vectors[i] = xscale->static_high_vectors[i];
795 }
796 else
797 {
798 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
799 if (retval == ERROR_TARGET_TIMEOUT)
800 return retval;
801 if (retval != ERROR_OK)
802 {
803 /* Some of these reads will fail as part of normal execution */
804 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
805 }
806 }
807 }
808
809 for (i = 1; i < 8; i++)
810 {
811 if (xscale->static_low_vectors_set & (1 << i))
812 {
813 xscale->low_vectors[i] = xscale->static_low_vectors[i];
814 }
815 else
816 {
817 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
818 if (retval == ERROR_TARGET_TIMEOUT)
819 return retval;
820 if (retval != ERROR_OK)
821 {
822 /* Some of these reads will fail as part of normal execution */
823 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
824 }
825 }
826 }
827
828 /* calculate branches to debug handler */
829 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
830 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
831
832 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
833 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
834
835 /* invalidate and load exception vectors in mini i-cache */
836 xscale_invalidate_ic_line(target, 0x0);
837 xscale_invalidate_ic_line(target, 0xffff0000);
838
839 xscale_load_ic(target, 0x0, xscale->low_vectors);
840 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
841
842 return ERROR_OK;
843 }
844
845 static int xscale_arch_state(struct target *target)
846 {
847 struct xscale_common *xscale = target_to_xscale(target);
848 struct arm *armv4_5 = &xscale->armv4_5_common;
849
850 static const char *state[] =
851 {
852 "disabled", "enabled"
853 };
854
855 static const char *arch_dbg_reason[] =
856 {
857 "", "\n(processor reset)", "\n(trace buffer full)"
858 };
859
860 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
861 {
862 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
863 return ERROR_INVALID_ARGUMENTS;
864 }
865
866 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
867 "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "\n"
868 "MMU: %s, D-Cache: %s, I-Cache: %s"
869 "%s",
870 armv4_5_state_strings[armv4_5->core_state],
871 Jim_Nvp_value2name_simple(nvp_target_debug_reason, target->debug_reason)->name ,
872 arm_mode_name(armv4_5->core_mode),
873 buf_get_u32(armv4_5->cpsr->value, 0, 32),
874 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
875 state[xscale->armv4_5_mmu.mmu_enabled],
876 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
877 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
878 arch_dbg_reason[xscale->arch_debug_reason]);
879
880 return ERROR_OK;
881 }
882
883 static int xscale_poll(struct target *target)
884 {
885 int retval = ERROR_OK;
886
887 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
888 {
889 enum target_state previous_state = target->state;
890 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
891 {
892
893 /* there's data to read from the tx register, we entered debug state */
894 target->state = TARGET_HALTED;
895
896 /* process debug entry, fetching current mode regs */
897 retval = xscale_debug_entry(target);
898 }
899 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
900 {
901 LOG_USER("error while polling TX register, reset CPU");
902 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
903 target->state = TARGET_HALTED;
904 }
905
906 /* debug_entry could have overwritten target state (i.e. immediate resume)
907 * don't signal event handlers in that case
908 */
909 if (target->state != TARGET_HALTED)
910 return ERROR_OK;
911
912 /* if target was running, signal that we halted
913 * otherwise we reentered from debug execution */
914 if (previous_state == TARGET_RUNNING)
915 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
916 else
917 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
918 }
919
920 return retval;
921 }
922
923 static int xscale_debug_entry(struct target *target)
924 {
925 struct xscale_common *xscale = target_to_xscale(target);
926 struct arm *armv4_5 = &xscale->armv4_5_common;
927 uint32_t pc;
928 uint32_t buffer[10];
929 int i;
930 int retval;
931 uint32_t moe;
932
933 /* clear external dbg break (will be written on next DCSR read) */
934 xscale->external_debug_break = 0;
935 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
936 return retval;
937
938 /* get r0, pc, r1 to r7 and cpsr */
939 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
940 return retval;
941
942 /* move r0 from buffer to register cache */
943 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
944 armv4_5->core_cache->reg_list[0].dirty = 1;
945 armv4_5->core_cache->reg_list[0].valid = 1;
946 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
947
948 /* move pc from buffer to register cache */
949 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
950 armv4_5->core_cache->reg_list[15].dirty = 1;
951 armv4_5->core_cache->reg_list[15].valid = 1;
952 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
953
954 /* move data from buffer to register cache */
955 for (i = 1; i <= 7; i++)
956 {
957 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
958 armv4_5->core_cache->reg_list[i].dirty = 1;
959 armv4_5->core_cache->reg_list[i].valid = 1;
960 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
961 }
962
963 arm_set_cpsr(armv4_5, buffer[9]);
964 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
965
966 if (!is_arm_mode(armv4_5->core_mode))
967 {
968 target->state = TARGET_UNKNOWN;
969 LOG_ERROR("cpsr contains invalid mode value - communication failure");
970 return ERROR_TARGET_FAILURE;
971 }
972 LOG_DEBUG("target entered debug state in %s mode",
973 arm_mode_name(armv4_5->core_mode));
974
975 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
976 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
977 {
978 xscale_receive(target, buffer, 8);
979 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
980 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
981 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
982 }
983 else
984 {
985 /* r8 to r14, but no spsr */
986 xscale_receive(target, buffer, 7);
987 }
988
989 /* move data from buffer to register cache */
990 for (i = 8; i <= 14; i++)
991 {
992 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
993 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
994 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
995 }
996
997 /* examine debug reason */
998 xscale_read_dcsr(target);
999 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
1000
1001 /* stored PC (for calculating fixup) */
1002 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1003
1004 switch (moe)
1005 {
1006 case 0x0: /* Processor reset */
1007 target->debug_reason = DBG_REASON_DBGRQ;
1008 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1009 pc -= 4;
1010 break;
1011 case 0x1: /* Instruction breakpoint hit */
1012 target->debug_reason = DBG_REASON_BREAKPOINT;
1013 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1014 pc -= 4;
1015 break;
1016 case 0x2: /* Data breakpoint hit */
1017 target->debug_reason = DBG_REASON_WATCHPOINT;
1018 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1019 pc -= 4;
1020 break;
1021 case 0x3: /* BKPT instruction executed */
1022 target->debug_reason = DBG_REASON_BREAKPOINT;
1023 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1024 pc -= 4;
1025 break;
1026 case 0x4: /* Ext. debug event */
1027 target->debug_reason = DBG_REASON_DBGRQ;
1028 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1029 pc -= 4;
1030 break;
1031 case 0x5: /* Vector trap occured */
1032 target->debug_reason = DBG_REASON_BREAKPOINT;
1033 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1034 pc -= 4;
1035 break;
1036 case 0x6: /* Trace buffer full break */
1037 target->debug_reason = DBG_REASON_DBGRQ;
1038 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1039 pc -= 4;
1040 break;
1041 case 0x7: /* Reserved (may flag Hot-Debug support) */
1042 default:
1043 LOG_ERROR("Method of Entry is 'Reserved'");
1044 exit(-1);
1045 break;
1046 }
1047
1048 /* apply PC fixup */
1049 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1050
1051 /* on the first debug entry, identify cache type */
1052 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1053 {
1054 uint32_t cache_type_reg;
1055
1056 /* read cp15 cache type register */
1057 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1058 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1059
1060 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1061 }
1062
1063 /* examine MMU and Cache settings */
1064 /* read cp15 control register */
1065 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1066 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1067 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1068 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1069 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1070
1071 /* tracing enabled, read collected trace data */
1072 if (xscale->trace.buffer_enabled)
1073 {
1074 xscale_read_trace(target);
1075 xscale->trace.buffer_fill--;
1076
1077 /* resume if we're still collecting trace data */
1078 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1079 && (xscale->trace.buffer_fill > 0))
1080 {
1081 xscale_resume(target, 1, 0x0, 1, 0);
1082 }
1083 else
1084 {
1085 xscale->trace.buffer_enabled = 0;
1086 }
1087 }
1088
1089 return ERROR_OK;
1090 }
1091
1092 static int xscale_halt(struct target *target)
1093 {
1094 struct xscale_common *xscale = target_to_xscale(target);
1095
1096 LOG_DEBUG("target->state: %s",
1097 target_state_name(target));
1098
1099 if (target->state == TARGET_HALTED)
1100 {
1101 LOG_DEBUG("target was already halted");
1102 return ERROR_OK;
1103 }
1104 else if (target->state == TARGET_UNKNOWN)
1105 {
1106 /* this must not happen for a xscale target */
1107 LOG_ERROR("target was in unknown state when halt was requested");
1108 return ERROR_TARGET_INVALID;
1109 }
1110 else if (target->state == TARGET_RESET)
1111 {
1112 LOG_DEBUG("target->state == TARGET_RESET");
1113 }
1114 else
1115 {
1116 /* assert external dbg break */
1117 xscale->external_debug_break = 1;
1118 xscale_read_dcsr(target);
1119
1120 target->debug_reason = DBG_REASON_DBGRQ;
1121 }
1122
1123 return ERROR_OK;
1124 }
1125
1126 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1127 {
1128 struct xscale_common *xscale = target_to_xscale(target);
1129 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1130 int retval;
1131
1132 if (xscale->ibcr0_used)
1133 {
1134 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1135
1136 if (ibcr0_bp)
1137 {
1138 xscale_unset_breakpoint(target, ibcr0_bp);
1139 }
1140 else
1141 {
1142 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1143 exit(-1);
1144 }
1145 }
1146
1147 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1148 return retval;
1149
1150 return ERROR_OK;
1151 }
1152
1153 static int xscale_disable_single_step(struct target *target)
1154 {
1155 struct xscale_common *xscale = target_to_xscale(target);
1156 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1157 int retval;
1158
1159 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1160 return retval;
1161
1162 return ERROR_OK;
1163 }
1164
1165 static void xscale_enable_watchpoints(struct target *target)
1166 {
1167 struct watchpoint *watchpoint = target->watchpoints;
1168
1169 while (watchpoint)
1170 {
1171 if (watchpoint->set == 0)
1172 xscale_set_watchpoint(target, watchpoint);
1173 watchpoint = watchpoint->next;
1174 }
1175 }
1176
1177 static void xscale_enable_breakpoints(struct target *target)
1178 {
1179 struct breakpoint *breakpoint = target->breakpoints;
1180
1181 /* set any pending breakpoints */
1182 while (breakpoint)
1183 {
1184 if (breakpoint->set == 0)
1185 xscale_set_breakpoint(target, breakpoint);
1186 breakpoint = breakpoint->next;
1187 }
1188 }
1189
1190 static int xscale_resume(struct target *target, int current,
1191 uint32_t address, int handle_breakpoints, int debug_execution)
1192 {
1193 struct xscale_common *xscale = target_to_xscale(target);
1194 struct arm *armv4_5 = &xscale->armv4_5_common;
1195 struct breakpoint *breakpoint = target->breakpoints;
1196 uint32_t current_pc;
1197 int retval;
1198 int i;
1199
1200 LOG_DEBUG("-");
1201
1202 if (target->state != TARGET_HALTED)
1203 {
1204 LOG_WARNING("target not halted");
1205 return ERROR_TARGET_NOT_HALTED;
1206 }
1207
1208 if (!debug_execution)
1209 {
1210 target_free_all_working_areas(target);
1211 }
1212
1213 /* update vector tables */
1214 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1215 return retval;
1216
1217 /* current = 1: continue on current pc, otherwise continue at <address> */
1218 if (!current)
1219 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1220
1221 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1222
1223 /* if we're at the reset vector, we have to simulate the branch */
1224 if (current_pc == 0x0)
1225 {
1226 arm_simulate_step(target, NULL);
1227 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1228 }
1229
1230 /* the front-end may request us not to handle breakpoints */
1231 if (handle_breakpoints)
1232 {
1233 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1234 {
1235 uint32_t next_pc;
1236
1237 /* there's a breakpoint at the current PC, we have to step over it */
1238 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1239 xscale_unset_breakpoint(target, breakpoint);
1240
1241 /* calculate PC of next instruction */
1242 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1243 {
1244 uint32_t current_opcode;
1245 target_read_u32(target, current_pc, &current_opcode);
1246 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1247 }
1248
1249 LOG_DEBUG("enable single-step");
1250 xscale_enable_single_step(target, next_pc);
1251
1252 /* restore banked registers */
1253 xscale_restore_context(target);
1254
1255 /* send resume request (command 0x30 or 0x31)
1256 * clean the trace buffer if it is to be enabled (0x62) */
1257 if (xscale->trace.buffer_enabled)
1258 {
1259 xscale_send_u32(target, 0x62);
1260 xscale_send_u32(target, 0x31);
1261 }
1262 else
1263 xscale_send_u32(target, 0x30);
1264
1265 /* send CPSR */
1266 xscale_send_u32(target,
1267 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1268 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1269 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1270
1271 for (i = 7; i >= 0; i--)
1272 {
1273 /* send register */
1274 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1275 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1276 }
1277
1278 /* send PC */
1279 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1280 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1281
1282 /* wait for and process debug entry */
1283 xscale_debug_entry(target);
1284
1285 LOG_DEBUG("disable single-step");
1286 xscale_disable_single_step(target);
1287
1288 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1289 xscale_set_breakpoint(target, breakpoint);
1290 }
1291 }
1292
1293 /* enable any pending breakpoints and watchpoints */
1294 xscale_enable_breakpoints(target);
1295 xscale_enable_watchpoints(target);
1296
1297 /* restore banked registers */
1298 xscale_restore_context(target);
1299
1300 /* send resume request (command 0x30 or 0x31)
1301 * clean the trace buffer if it is to be enabled (0x62) */
1302 if (xscale->trace.buffer_enabled)
1303 {
1304 xscale_send_u32(target, 0x62);
1305 xscale_send_u32(target, 0x31);
1306 }
1307 else
1308 xscale_send_u32(target, 0x30);
1309
1310 /* send CPSR */
1311 xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
1312 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1313 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1314
1315 for (i = 7; i >= 0; i--)
1316 {
1317 /* send register */
1318 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1319 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1320 }
1321
1322 /* send PC */
1323 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1324 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1325
1326 target->debug_reason = DBG_REASON_NOTHALTED;
1327
1328 if (!debug_execution)
1329 {
1330 /* registers are now invalid */
1331 register_cache_invalidate(armv4_5->core_cache);
1332 target->state = TARGET_RUNNING;
1333 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1334 }
1335 else
1336 {
1337 target->state = TARGET_DEBUG_RUNNING;
1338 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1339 }
1340
1341 LOG_DEBUG("target resumed");
1342
1343 return ERROR_OK;
1344 }
1345
1346 static int xscale_step_inner(struct target *target, int current,
1347 uint32_t address, int handle_breakpoints)
1348 {
1349 struct xscale_common *xscale = target_to_xscale(target);
1350 struct arm *armv4_5 = &xscale->armv4_5_common;
1351 uint32_t next_pc;
1352 int retval;
1353 int i;
1354
1355 target->debug_reason = DBG_REASON_SINGLESTEP;
1356
1357 /* calculate PC of next instruction */
1358 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1359 {
1360 uint32_t current_opcode, current_pc;
1361 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1362
1363 target_read_u32(target, current_pc, &current_opcode);
1364 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1365 return retval;
1366 }
1367
1368 LOG_DEBUG("enable single-step");
1369 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1370 return retval;
1371
1372 /* restore banked registers */
1373 if ((retval = xscale_restore_context(target)) != ERROR_OK)
1374 return retval;
1375
1376 /* send resume request (command 0x30 or 0x31)
1377 * clean the trace buffer if it is to be enabled (0x62) */
1378 if (xscale->trace.buffer_enabled)
1379 {
1380 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1381 return retval;
1382 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1383 return retval;
1384 }
1385 else
1386 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1387 return retval;
1388
1389 /* send CPSR */
1390 retval = xscale_send_u32(target,
1391 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1392 if (retval != ERROR_OK)
1393 return retval;
1394 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1395 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1396
1397 for (i = 7; i >= 0; i--)
1398 {
1399 /* send register */
1400 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1401 return retval;
1402 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1403 }
1404
1405 /* send PC */
1406 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))) != ERROR_OK)
1407 return retval;
1408 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1409
1410 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1411
1412 /* registers are now invalid */
1413 register_cache_invalidate(armv4_5->core_cache);
1414
1415 /* wait for and process debug entry */
1416 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1417 return retval;
1418
1419 LOG_DEBUG("disable single-step");
1420 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1421 return retval;
1422
1423 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1424
1425 return ERROR_OK;
1426 }
1427
1428 static int xscale_step(struct target *target, int current,
1429 uint32_t address, int handle_breakpoints)
1430 {
1431 struct arm *armv4_5 = target_to_armv4_5(target);
1432 struct breakpoint *breakpoint = target->breakpoints;
1433
1434 uint32_t current_pc;
1435 int retval;
1436
1437 if (target->state != TARGET_HALTED)
1438 {
1439 LOG_WARNING("target not halted");
1440 return ERROR_TARGET_NOT_HALTED;
1441 }
1442
1443 /* current = 1: continue on current pc, otherwise continue at <address> */
1444 if (!current)
1445 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1446
1447 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1448
1449 /* if we're at the reset vector, we have to simulate the step */
1450 if (current_pc == 0x0)
1451 {
1452 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1453 return retval;
1454 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1455
1456 target->debug_reason = DBG_REASON_SINGLESTEP;
1457 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1458
1459 return ERROR_OK;
1460 }
1461
1462 /* the front-end may request us not to handle breakpoints */
1463 if (handle_breakpoints)
1464 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1465 {
1466 if ((retval = xscale_unset_breakpoint(target, breakpoint)) != ERROR_OK)
1467 return retval;
1468 }
1469
1470 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1471
1472 if (breakpoint)
1473 {
1474 xscale_set_breakpoint(target, breakpoint);
1475 }
1476
1477 LOG_DEBUG("target stepped");
1478
1479 return ERROR_OK;
1480
1481 }
1482
1483 static int xscale_assert_reset(struct target *target)
1484 {
1485 struct xscale_common *xscale = target_to_xscale(target);
1486
1487 LOG_DEBUG("target->state: %s",
1488 target_state_name(target));
1489
1490 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1491 * end up in T-L-R, which would reset JTAG
1492 */
1493 jtag_set_end_state(TAP_IDLE);
1494 xscale_jtag_set_instr(target->tap,
1495 XSCALE_SELDCSR << xscale->xscale_variant);
1496
1497 /* set Hold reset, Halt mode and Trap Reset */
1498 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1499 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1500 xscale_write_dcsr(target, 1, 0);
1501
1502 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1503 xscale_jtag_set_instr(target->tap, 0x7f);
1504 jtag_execute_queue();
1505
1506 /* assert reset */
1507 jtag_add_reset(0, 1);
1508
1509 /* sleep 1ms, to be sure we fulfill any requirements */
1510 jtag_add_sleep(1000);
1511 jtag_execute_queue();
1512
1513 target->state = TARGET_RESET;
1514
1515 if (target->reset_halt)
1516 {
1517 int retval;
1518 if ((retval = target_halt(target)) != ERROR_OK)
1519 return retval;
1520 }
1521
1522 return ERROR_OK;
1523 }
1524
1525 static int xscale_deassert_reset(struct target *target)
1526 {
1527 struct xscale_common *xscale = target_to_xscale(target);
1528 struct breakpoint *breakpoint = target->breakpoints;
1529
1530 LOG_DEBUG("-");
1531
1532 xscale->ibcr_available = 2;
1533 xscale->ibcr0_used = 0;
1534 xscale->ibcr1_used = 0;
1535
1536 xscale->dbr_available = 2;
1537 xscale->dbr0_used = 0;
1538 xscale->dbr1_used = 0;
1539
1540 /* mark all hardware breakpoints as unset */
1541 while (breakpoint)
1542 {
1543 if (breakpoint->type == BKPT_HARD)
1544 {
1545 breakpoint->set = 0;
1546 }
1547 breakpoint = breakpoint->next;
1548 }
1549
1550 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1551
1552 /* FIXME mark hardware watchpoints got unset too. Also,
1553 * at least some of the XScale registers are invalid...
1554 */
1555
1556 /*
1557 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1558 * contents got invalidated. Safer to force that, so writing new
1559 * contents can't ever fail..
1560 */
1561 {
1562 uint32_t address;
1563 unsigned buf_cnt;
1564 const uint8_t *buffer = xscale_debug_handler;
1565 int retval;
1566
1567 /* release SRST */
1568 jtag_add_reset(0, 0);
1569
1570 /* wait 300ms; 150 and 100ms were not enough */
1571 jtag_add_sleep(300*1000);
1572
1573 jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
1574 jtag_execute_queue();
1575
1576 /* set Hold reset, Halt mode and Trap Reset */
1577 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1578 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1579 xscale_write_dcsr(target, 1, 0);
1580
1581 /* Load the debug handler into the mini-icache. Since
1582 * it's using halt mode (not monitor mode), it runs in
1583 * "Special Debug State" for access to registers, memory,
1584 * coprocessors, trace data, etc.
1585 */
1586 address = xscale->handler_address;
1587 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1588 binary_size > 0;
1589 binary_size -= buf_cnt, buffer += buf_cnt)
1590 {
1591 uint32_t cache_line[8];
1592 unsigned i;
1593
1594 buf_cnt = binary_size;
1595 if (buf_cnt > 32)
1596 buf_cnt = 32;
1597
1598 for (i = 0; i < buf_cnt; i += 4)
1599 {
1600 /* convert LE buffer to host-endian uint32_t */
1601 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1602 }
1603
1604 for (; i < 32; i += 4)
1605 {
1606 cache_line[i / 4] = 0xe1a08008;
1607 }
1608
1609 /* only load addresses other than the reset vectors */
1610 if ((address % 0x400) != 0x0)
1611 {
1612 retval = xscale_load_ic(target, address,
1613 cache_line);
1614 if (retval != ERROR_OK)
1615 return retval;
1616 }
1617
1618 address += buf_cnt;
1619 };
1620
1621 retval = xscale_load_ic(target, 0x0,
1622 xscale->low_vectors);
1623 if (retval != ERROR_OK)
1624 return retval;
1625 retval = xscale_load_ic(target, 0xffff0000,
1626 xscale->high_vectors);
1627 if (retval != ERROR_OK)
1628 return retval;
1629
1630 jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
1631
1632 jtag_add_sleep(100000);
1633
1634 /* set Hold reset, Halt mode and Trap Reset */
1635 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1636 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1637 xscale_write_dcsr(target, 1, 0);
1638
1639 /* clear Hold reset to let the target run (should enter debug handler) */
1640 xscale_write_dcsr(target, 0, 1);
1641 target->state = TARGET_RUNNING;
1642
1643 if (!target->reset_halt)
1644 {
1645 jtag_add_sleep(10000);
1646
1647 /* we should have entered debug now */
1648 xscale_debug_entry(target);
1649 target->state = TARGET_HALTED;
1650
1651 /* resume the target */
1652 xscale_resume(target, 1, 0x0, 1, 0);
1653 }
1654 }
1655
1656 return ERROR_OK;
1657 }
1658
1659 static int xscale_read_core_reg(struct target *target, struct reg *r,
1660 int num, enum armv4_5_mode mode)
1661 {
1662 /** \todo add debug handler support for core register reads */
1663 LOG_ERROR("not implemented");
1664 return ERROR_OK;
1665 }
1666
1667 static int xscale_write_core_reg(struct target *target, struct reg *r,
1668 int num, enum armv4_5_mode mode, uint32_t value)
1669 {
1670 /** \todo add debug handler support for core register writes */
1671 LOG_ERROR("not implemented");
1672 return ERROR_OK;
1673 }
1674
1675 static int xscale_full_context(struct target *target)
1676 {
1677 struct arm *armv4_5 = target_to_armv4_5(target);
1678
1679 uint32_t *buffer;
1680
1681 int i, j;
1682
1683 LOG_DEBUG("-");
1684
1685 if (target->state != TARGET_HALTED)
1686 {
1687 LOG_WARNING("target not halted");
1688 return ERROR_TARGET_NOT_HALTED;
1689 }
1690
1691 buffer = malloc(4 * 8);
1692
1693 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1694 * we can't enter User mode on an XScale (unpredictable),
1695 * but User shares registers with SYS
1696 */
1697 for (i = 1; i < 7; i++)
1698 {
1699 int valid = 1;
1700
1701 /* check if there are invalid registers in the current mode
1702 */
1703 for (j = 0; j <= 16; j++)
1704 {
1705 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
1706 valid = 0;
1707 }
1708
1709 if (!valid)
1710 {
1711 uint32_t tmp_cpsr;
1712
1713 /* request banked registers */
1714 xscale_send_u32(target, 0x0);
1715
1716 tmp_cpsr = 0x0;
1717 tmp_cpsr |= armv4_5_number_to_mode(i);
1718 tmp_cpsr |= 0xc0; /* I/F bits */
1719
1720 /* send CPSR for desired mode */
1721 xscale_send_u32(target, tmp_cpsr);
1722
1723 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1724 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1725 {
1726 xscale_receive(target, buffer, 8);
1727 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1728 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1729 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
1730 }
1731 else
1732 {
1733 xscale_receive(target, buffer, 7);
1734 }
1735
1736 /* move data from buffer to register cache */
1737 for (j = 8; j <= 14; j++)
1738 {
1739 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
1740 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1741 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
1742 }
1743 }
1744 }
1745
1746 free(buffer);
1747
1748 return ERROR_OK;
1749 }
1750
1751 static int xscale_restore_context(struct target *target)
1752 {
1753 struct arm *armv4_5 = target_to_armv4_5(target);
1754
1755 int i, j;
1756
1757 if (target->state != TARGET_HALTED)
1758 {
1759 LOG_WARNING("target not halted");
1760 return ERROR_TARGET_NOT_HALTED;
1761 }
1762
1763 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1764 * we can't enter User mode on an XScale (unpredictable),
1765 * but User shares registers with SYS
1766 */
1767 for (i = 1; i < 7; i++)
1768 {
1769 int dirty = 0;
1770
1771 /* check if there are invalid registers in the current mode
1772 */
1773 for (j = 8; j <= 14; j++)
1774 {
1775 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
1776 dirty = 1;
1777 }
1778
1779 /* if not USR/SYS, check if the SPSR needs to be written */
1780 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1781 {
1782 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
1783 dirty = 1;
1784 }
1785
1786 if (dirty)
1787 {
1788 uint32_t tmp_cpsr;
1789
1790 /* send banked registers */
1791 xscale_send_u32(target, 0x1);
1792
1793 tmp_cpsr = 0x0;
1794 tmp_cpsr |= armv4_5_number_to_mode(i);
1795 tmp_cpsr |= 0xc0; /* I/F bits */
1796
1797 /* send CPSR for desired mode */
1798 xscale_send_u32(target, tmp_cpsr);
1799
1800 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1801 for (j = 8; j <= 14; j++)
1802 {
1803 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
1804 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1805 }
1806
1807 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1808 {
1809 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
1810 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1811 }
1812 }
1813 }
1814
1815 return ERROR_OK;
1816 }
1817
1818 static int xscale_read_memory(struct target *target, uint32_t address,
1819 uint32_t size, uint32_t count, uint8_t *buffer)
1820 {
1821 struct xscale_common *xscale = target_to_xscale(target);
1822 uint32_t *buf32;
1823 uint32_t i;
1824 int retval;
1825
1826 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1827
1828 if (target->state != TARGET_HALTED)
1829 {
1830 LOG_WARNING("target not halted");
1831 return ERROR_TARGET_NOT_HALTED;
1832 }
1833
1834 /* sanitize arguments */
1835 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1836 return ERROR_INVALID_ARGUMENTS;
1837
1838 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1839 return ERROR_TARGET_UNALIGNED_ACCESS;
1840
1841 /* send memory read request (command 0x1n, n: access size) */
1842 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1843 return retval;
1844
1845 /* send base address for read request */
1846 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1847 return retval;
1848
1849 /* send number of requested data words */
1850 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1851 return retval;
1852
1853 /* receive data from target (count times 32-bit words in host endianness) */
1854 buf32 = malloc(4 * count);
1855 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1856 return retval;
1857
1858 /* extract data from host-endian buffer into byte stream */
1859 for (i = 0; i < count; i++)
1860 {
1861 switch (size)
1862 {
1863 case 4:
1864 target_buffer_set_u32(target, buffer, buf32[i]);
1865 buffer += 4;
1866 break;
1867 case 2:
1868 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1869 buffer += 2;
1870 break;
1871 case 1:
1872 *buffer++ = buf32[i] & 0xff;
1873 break;
1874 default:
1875 LOG_ERROR("invalid read size");
1876 return ERROR_INVALID_ARGUMENTS;
1877 }
1878 }
1879
1880 free(buf32);
1881
1882 /* examine DCSR, to see if Sticky Abort (SA) got set */
1883 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1884 return retval;
1885 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1886 {
1887 /* clear SA bit */
1888 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1889 return retval;
1890
1891 return ERROR_TARGET_DATA_ABORT;
1892 }
1893
1894 return ERROR_OK;
1895 }
1896
1897 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1898 uint32_t size, uint32_t count, uint8_t *buffer)
1899 {
1900 /** \todo: provide a non-stub implementtion of this routine. */
1901 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1902 target_name(target), __func__);
1903 return ERROR_FAIL;
1904 }
1905
1906 static int xscale_write_memory(struct target *target, uint32_t address,
1907 uint32_t size, uint32_t count, uint8_t *buffer)
1908 {
1909 struct xscale_common *xscale = target_to_xscale(target);
1910 int retval;
1911
1912 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1913
1914 if (target->state != TARGET_HALTED)
1915 {
1916 LOG_WARNING("target not halted");
1917 return ERROR_TARGET_NOT_HALTED;
1918 }
1919
1920 /* sanitize arguments */
1921 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1922 return ERROR_INVALID_ARGUMENTS;
1923
1924 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1925 return ERROR_TARGET_UNALIGNED_ACCESS;
1926
1927 /* send memory write request (command 0x2n, n: access size) */
1928 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1929 return retval;
1930
1931 /* send base address for read request */
1932 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1933 return retval;
1934
1935 /* send number of requested data words to be written*/
1936 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1937 return retval;
1938
1939 /* extract data from host-endian buffer into byte stream */
1940 #if 0
1941 for (i = 0; i < count; i++)
1942 {
1943 switch (size)
1944 {
1945 case 4:
1946 value = target_buffer_get_u32(target, buffer);
1947 xscale_send_u32(target, value);
1948 buffer += 4;
1949 break;
1950 case 2:
1951 value = target_buffer_get_u16(target, buffer);
1952 xscale_send_u32(target, value);
1953 buffer += 2;
1954 break;
1955 case 1:
1956 value = *buffer;
1957 xscale_send_u32(target, value);
1958 buffer += 1;
1959 break;
1960 default:
1961 LOG_ERROR("should never get here");
1962 exit(-1);
1963 }
1964 }
1965 #endif
1966 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1967 return retval;
1968
1969 /* examine DCSR, to see if Sticky Abort (SA) got set */
1970 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1971 return retval;
1972 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1973 {
1974 /* clear SA bit */
1975 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1976 return retval;
1977
1978 return ERROR_TARGET_DATA_ABORT;
1979 }
1980
1981 return ERROR_OK;
1982 }
1983
1984 static int xscale_write_phys_memory(struct target *target, uint32_t address,
1985 uint32_t size, uint32_t count, uint8_t *buffer)
1986 {
1987 /** \todo: provide a non-stub implementtion of this routine. */
1988 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1989 target_name(target), __func__);
1990 return ERROR_FAIL;
1991 }
1992
1993 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
1994 uint32_t count, uint8_t *buffer)
1995 {
1996 return xscale_write_memory(target, address, 4, count, buffer);
1997 }
1998
1999 static uint32_t xscale_get_ttb(struct target *target)
2000 {
2001 struct xscale_common *xscale = target_to_xscale(target);
2002 uint32_t ttb;
2003
2004 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2005 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2006
2007 return ttb;
2008 }
2009
2010 static void xscale_disable_mmu_caches(struct target *target, int mmu,
2011 int d_u_cache, int i_cache)
2012 {
2013 struct xscale_common *xscale = target_to_xscale(target);
2014 uint32_t cp15_control;
2015
2016 /* read cp15 control register */
2017 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2018 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2019
2020 if (mmu)
2021 cp15_control &= ~0x1U;
2022
2023 if (d_u_cache)
2024 {
2025 /* clean DCache */
2026 xscale_send_u32(target, 0x50);
2027 xscale_send_u32(target, xscale->cache_clean_address);
2028
2029 /* invalidate DCache */
2030 xscale_send_u32(target, 0x51);
2031
2032 cp15_control &= ~0x4U;
2033 }
2034
2035 if (i_cache)
2036 {
2037 /* invalidate ICache */
2038 xscale_send_u32(target, 0x52);
2039 cp15_control &= ~0x1000U;
2040 }
2041
2042 /* write new cp15 control register */
2043 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2044
2045 /* execute cpwait to ensure outstanding operations complete */
2046 xscale_send_u32(target, 0x53);
2047 }
2048
2049 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2050 int d_u_cache, int i_cache)
2051 {
2052 struct xscale_common *xscale = target_to_xscale(target);
2053 uint32_t cp15_control;
2054
2055 /* read cp15 control register */
2056 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2057 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2058
2059 if (mmu)
2060 cp15_control |= 0x1U;
2061
2062 if (d_u_cache)
2063 cp15_control |= 0x4U;
2064
2065 if (i_cache)
2066 cp15_control |= 0x1000U;
2067
2068 /* write new cp15 control register */
2069 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2070
2071 /* execute cpwait to ensure outstanding operations complete */
2072 xscale_send_u32(target, 0x53);
2073 }
2074
2075 static int xscale_set_breakpoint(struct target *target,
2076 struct breakpoint *breakpoint)
2077 {
2078 int retval;
2079 struct xscale_common *xscale = target_to_xscale(target);
2080
2081 if (target->state != TARGET_HALTED)
2082 {
2083 LOG_WARNING("target not halted");
2084 return ERROR_TARGET_NOT_HALTED;
2085 }
2086
2087 if (breakpoint->set)
2088 {
2089 LOG_WARNING("breakpoint already set");
2090 return ERROR_OK;
2091 }
2092
2093 if (breakpoint->type == BKPT_HARD)
2094 {
2095 uint32_t value = breakpoint->address | 1;
2096 if (!xscale->ibcr0_used)
2097 {
2098 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2099 xscale->ibcr0_used = 1;
2100 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2101 }
2102 else if (!xscale->ibcr1_used)
2103 {
2104 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2105 xscale->ibcr1_used = 1;
2106 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2107 }
2108 else
2109 {
2110 LOG_ERROR("BUG: no hardware comparator available");
2111 return ERROR_OK;
2112 }
2113 }
2114 else if (breakpoint->type == BKPT_SOFT)
2115 {
2116 if (breakpoint->length == 4)
2117 {
2118 /* keep the original instruction in target endianness */
2119 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2120 {
2121 return retval;
2122 }
2123 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2124 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2125 {
2126 return retval;
2127 }
2128 }
2129 else
2130 {
2131 /* keep the original instruction in target endianness */
2132 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2133 {
2134 return retval;
2135 }
2136 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2137 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2138 {
2139 return retval;
2140 }
2141 }
2142 breakpoint->set = 1;
2143 }
2144
2145 return ERROR_OK;
2146 }
2147
2148 static int xscale_add_breakpoint(struct target *target,
2149 struct breakpoint *breakpoint)
2150 {
2151 struct xscale_common *xscale = target_to_xscale(target);
2152
2153 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2154 {
2155 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2156 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2157 }
2158
2159 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2160 {
2161 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2162 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2163 }
2164
2165 if (breakpoint->type == BKPT_HARD)
2166 {
2167 xscale->ibcr_available--;
2168 }
2169
2170 return ERROR_OK;
2171 }
2172
2173 static int xscale_unset_breakpoint(struct target *target,
2174 struct breakpoint *breakpoint)
2175 {
2176 int retval;
2177 struct xscale_common *xscale = target_to_xscale(target);
2178
2179 if (target->state != TARGET_HALTED)
2180 {
2181 LOG_WARNING("target not halted");
2182 return ERROR_TARGET_NOT_HALTED;
2183 }
2184
2185 if (!breakpoint->set)
2186 {
2187 LOG_WARNING("breakpoint not set");
2188 return ERROR_OK;
2189 }
2190
2191 if (breakpoint->type == BKPT_HARD)
2192 {
2193 if (breakpoint->set == 1)
2194 {
2195 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2196 xscale->ibcr0_used = 0;
2197 }
2198 else if (breakpoint->set == 2)
2199 {
2200 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2201 xscale->ibcr1_used = 0;
2202 }
2203 breakpoint->set = 0;
2204 }
2205 else
2206 {
2207 /* restore original instruction (kept in target endianness) */
2208 if (breakpoint->length == 4)
2209 {
2210 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2211 {
2212 return retval;
2213 }
2214 }
2215 else
2216 {
2217 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2218 {
2219 return retval;
2220 }
2221 }
2222 breakpoint->set = 0;
2223 }
2224
2225 return ERROR_OK;
2226 }
2227
2228 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2229 {
2230 struct xscale_common *xscale = target_to_xscale(target);
2231
2232 if (target->state != TARGET_HALTED)
2233 {
2234 LOG_WARNING("target not halted");
2235 return ERROR_TARGET_NOT_HALTED;
2236 }
2237
2238 if (breakpoint->set)
2239 {
2240 xscale_unset_breakpoint(target, breakpoint);
2241 }
2242
2243 if (breakpoint->type == BKPT_HARD)
2244 xscale->ibcr_available++;
2245
2246 return ERROR_OK;
2247 }
2248
2249 static int xscale_set_watchpoint(struct target *target,
2250 struct watchpoint *watchpoint)
2251 {
2252 struct xscale_common *xscale = target_to_xscale(target);
2253 uint8_t enable = 0;
2254 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2255 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2256
2257 if (target->state != TARGET_HALTED)
2258 {
2259 LOG_WARNING("target not halted");
2260 return ERROR_TARGET_NOT_HALTED;
2261 }
2262
2263 xscale_get_reg(dbcon);
2264
2265 switch (watchpoint->rw)
2266 {
2267 case WPT_READ:
2268 enable = 0x3;
2269 break;
2270 case WPT_ACCESS:
2271 enable = 0x2;
2272 break;
2273 case WPT_WRITE:
2274 enable = 0x1;
2275 break;
2276 default:
2277 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2278 }
2279
2280 if (!xscale->dbr0_used)
2281 {
2282 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2283 dbcon_value |= enable;
2284 xscale_set_reg_u32(dbcon, dbcon_value);
2285 watchpoint->set = 1;
2286 xscale->dbr0_used = 1;
2287 }
2288 else if (!xscale->dbr1_used)
2289 {
2290 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2291 dbcon_value |= enable << 2;
2292 xscale_set_reg_u32(dbcon, dbcon_value);
2293 watchpoint->set = 2;
2294 xscale->dbr1_used = 1;
2295 }
2296 else
2297 {
2298 LOG_ERROR("BUG: no hardware comparator available");
2299 return ERROR_OK;
2300 }
2301
2302 return ERROR_OK;
2303 }
2304
2305 static int xscale_add_watchpoint(struct target *target,
2306 struct watchpoint *watchpoint)
2307 {
2308 struct xscale_common *xscale = target_to_xscale(target);
2309
2310 if (xscale->dbr_available < 1)
2311 {
2312 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2313 }
2314
2315 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2316 {
2317 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2318 }
2319
2320 xscale->dbr_available--;
2321
2322 return ERROR_OK;
2323 }
2324
2325 static int xscale_unset_watchpoint(struct target *target,
2326 struct watchpoint *watchpoint)
2327 {
2328 struct xscale_common *xscale = target_to_xscale(target);
2329 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2330 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2331
2332 if (target->state != TARGET_HALTED)
2333 {
2334 LOG_WARNING("target not halted");
2335 return ERROR_TARGET_NOT_HALTED;
2336 }
2337
2338 if (!watchpoint->set)
2339 {
2340 LOG_WARNING("breakpoint not set");
2341 return ERROR_OK;
2342 }
2343
2344 if (watchpoint->set == 1)
2345 {
2346 dbcon_value &= ~0x3;
2347 xscale_set_reg_u32(dbcon, dbcon_value);
2348 xscale->dbr0_used = 0;
2349 }
2350 else if (watchpoint->set == 2)
2351 {
2352 dbcon_value &= ~0xc;
2353 xscale_set_reg_u32(dbcon, dbcon_value);
2354 xscale->dbr1_used = 0;
2355 }
2356 watchpoint->set = 0;
2357
2358 return ERROR_OK;
2359 }
2360
2361 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2362 {
2363 struct xscale_common *xscale = target_to_xscale(target);
2364
2365 if (target->state != TARGET_HALTED)
2366 {
2367 LOG_WARNING("target not halted");
2368 return ERROR_TARGET_NOT_HALTED;
2369 }
2370
2371 if (watchpoint->set)
2372 {
2373 xscale_unset_watchpoint(target, watchpoint);
2374 }
2375
2376 xscale->dbr_available++;
2377
2378 return ERROR_OK;
2379 }
2380
2381 static int xscale_get_reg(struct reg *reg)
2382 {
2383 struct xscale_reg *arch_info = reg->arch_info;
2384 struct target *target = arch_info->target;
2385 struct xscale_common *xscale = target_to_xscale(target);
2386
2387 /* DCSR, TX and RX are accessible via JTAG */
2388 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2389 {
2390 return xscale_read_dcsr(arch_info->target);
2391 }
2392 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2393 {
2394 /* 1 = consume register content */
2395 return xscale_read_tx(arch_info->target, 1);
2396 }
2397 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2398 {
2399 /* can't read from RX register (host -> debug handler) */
2400 return ERROR_OK;
2401 }
2402 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2403 {
2404 /* can't (explicitly) read from TXRXCTRL register */
2405 return ERROR_OK;
2406 }
2407 else /* Other DBG registers have to be transfered by the debug handler */
2408 {
2409 /* send CP read request (command 0x40) */
2410 xscale_send_u32(target, 0x40);
2411
2412 /* send CP register number */
2413 xscale_send_u32(target, arch_info->dbg_handler_number);
2414
2415 /* read register value */
2416 xscale_read_tx(target, 1);
2417 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2418
2419 reg->dirty = 0;
2420 reg->valid = 1;
2421 }
2422
2423 return ERROR_OK;
2424 }
2425
2426 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2427 {
2428 struct xscale_reg *arch_info = reg->arch_info;
2429 struct target *target = arch_info->target;
2430 struct xscale_common *xscale = target_to_xscale(target);
2431 uint32_t value = buf_get_u32(buf, 0, 32);
2432
2433 /* DCSR, TX and RX are accessible via JTAG */
2434 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2435 {
2436 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2437 return xscale_write_dcsr(arch_info->target, -1, -1);
2438 }
2439 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2440 {
2441 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2442 return xscale_write_rx(arch_info->target);
2443 }
2444 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2445 {
2446 /* can't write to TX register (debug-handler -> host) */
2447 return ERROR_OK;
2448 }
2449 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2450 {
2451 /* can't (explicitly) write to TXRXCTRL register */
2452 return ERROR_OK;
2453 }
2454 else /* Other DBG registers have to be transfered by the debug handler */
2455 {
2456 /* send CP write request (command 0x41) */
2457 xscale_send_u32(target, 0x41);
2458
2459 /* send CP register number */
2460 xscale_send_u32(target, arch_info->dbg_handler_number);
2461
2462 /* send CP register value */
2463 xscale_send_u32(target, value);
2464 buf_set_u32(reg->value, 0, 32, value);
2465 }
2466
2467 return ERROR_OK;
2468 }
2469
2470 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2471 {
2472 struct xscale_common *xscale = target_to_xscale(target);
2473 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2474 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2475
2476 /* send CP write request (command 0x41) */
2477 xscale_send_u32(target, 0x41);
2478
2479 /* send CP register number */
2480 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2481
2482 /* send CP register value */
2483 xscale_send_u32(target, value);
2484 buf_set_u32(dcsr->value, 0, 32, value);
2485
2486 return ERROR_OK;
2487 }
2488
2489 static int xscale_read_trace(struct target *target)
2490 {
2491 struct xscale_common *xscale = target_to_xscale(target);
2492 struct arm *armv4_5 = &xscale->armv4_5_common;
2493 struct xscale_trace_data **trace_data_p;
2494
2495 /* 258 words from debug handler
2496 * 256 trace buffer entries
2497 * 2 checkpoint addresses
2498 */
2499 uint32_t trace_buffer[258];
2500 int is_address[256];
2501 int i, j;
2502
2503 if (target->state != TARGET_HALTED)
2504 {
2505 LOG_WARNING("target must be stopped to read trace data");
2506 return ERROR_TARGET_NOT_HALTED;
2507 }
2508
2509 /* send read trace buffer command (command 0x61) */
2510 xscale_send_u32(target, 0x61);
2511
2512 /* receive trace buffer content */
2513 xscale_receive(target, trace_buffer, 258);
2514
2515 /* parse buffer backwards to identify address entries */
2516 for (i = 255; i >= 0; i--)
2517 {
2518 is_address[i] = 0;
2519 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2520 ((trace_buffer[i] & 0xf0) == 0xd0))
2521 {
2522 if (i >= 3)
2523 is_address[--i] = 1;
2524 if (i >= 2)
2525 is_address[--i] = 1;
2526 if (i >= 1)
2527 is_address[--i] = 1;
2528 if (i >= 0)
2529 is_address[--i] = 1;
2530 }
2531 }
2532
2533
2534 /* search first non-zero entry */
2535 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2536 ;
2537
2538 if (j == 256)
2539 {
2540 LOG_DEBUG("no trace data collected");
2541 return ERROR_XSCALE_NO_TRACE_DATA;
2542 }
2543
2544 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2545 ;
2546
2547 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2548 (*trace_data_p)->next = NULL;
2549 (*trace_data_p)->chkpt0 = trace_buffer[256];
2550 (*trace_data_p)->chkpt1 = trace_buffer[257];
2551 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2552 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2553 (*trace_data_p)->depth = 256 - j;
2554
2555 for (i = j; i < 256; i++)
2556 {
2557 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2558 if (is_address[i])
2559 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2560 else
2561 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2562 }
2563
2564 return ERROR_OK;
2565 }
2566
2567 static int xscale_read_instruction(struct target *target,
2568 struct arm_instruction *instruction)
2569 {
2570 struct xscale_common *xscale = target_to_xscale(target);
2571 int i;
2572 int section = -1;
2573 size_t size_read;
2574 uint32_t opcode;
2575 int retval;
2576
2577 if (!xscale->trace.image)
2578 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2579
2580 /* search for the section the current instruction belongs to */
2581 for (i = 0; i < xscale->trace.image->num_sections; i++)
2582 {
2583 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2584 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2585 {
2586 section = i;
2587 break;
2588 }
2589 }
2590
2591 if (section == -1)
2592 {
2593 /* current instruction couldn't be found in the image */
2594 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2595 }
2596
2597 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2598 {
2599 uint8_t buf[4];
2600 if ((retval = image_read_section(xscale->trace.image, section,
2601 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2602 4, buf, &size_read)) != ERROR_OK)
2603 {
2604 LOG_ERROR("error while reading instruction: %i", retval);
2605 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2606 }
2607 opcode = target_buffer_get_u32(target, buf);
2608 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2609 }
2610 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2611 {
2612 uint8_t buf[2];
2613 if ((retval = image_read_section(xscale->trace.image, section,
2614 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2615 2, buf, &size_read)) != ERROR_OK)
2616 {
2617 LOG_ERROR("error while reading instruction: %i", retval);
2618 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2619 }
2620 opcode = target_buffer_get_u16(target, buf);
2621 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2622 }
2623 else
2624 {
2625 LOG_ERROR("BUG: unknown core state encountered");
2626 exit(-1);
2627 }
2628
2629 return ERROR_OK;
2630 }
2631
2632 static int xscale_branch_address(struct xscale_trace_data *trace_data,
2633 int i, uint32_t *target)
2634 {
2635 /* if there are less than four entries prior to the indirect branch message
2636 * we can't extract the address */
2637 if (i < 4)
2638 {
2639 return -1;
2640 }
2641
2642 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2643 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2644
2645 return 0;
2646 }
2647
2648 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2649 {
2650 struct xscale_common *xscale = target_to_xscale(target);
2651 int next_pc_ok = 0;
2652 uint32_t next_pc = 0x0;
2653 struct xscale_trace_data *trace_data = xscale->trace.data;
2654 int retval;
2655
2656 while (trace_data)
2657 {
2658 int i, chkpt;
2659 int rollover;
2660 int branch;
2661 int exception;
2662 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2663
2664 chkpt = 0;
2665 rollover = 0;
2666
2667 for (i = 0; i < trace_data->depth; i++)
2668 {
2669 next_pc_ok = 0;
2670 branch = 0;
2671 exception = 0;
2672
2673 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2674 continue;
2675
2676 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2677 {
2678 case 0: /* Exceptions */
2679 case 1:
2680 case 2:
2681 case 3:
2682 case 4:
2683 case 5:
2684 case 6:
2685 case 7:
2686 exception = (trace_data->entries[i].data & 0x70) >> 4;
2687 next_pc_ok = 1;
2688 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2689 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2690 break;
2691 case 8: /* Direct Branch */
2692 branch = 1;
2693 break;
2694 case 9: /* Indirect Branch */
2695 branch = 1;
2696 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2697 {
2698 next_pc_ok = 1;
2699 }
2700 break;
2701 case 13: /* Checkpointed Indirect Branch */
2702 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2703 {
2704 next_pc_ok = 1;
2705 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2706 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2707 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2708 }
2709 /* explicit fall-through */
2710 case 12: /* Checkpointed Direct Branch */
2711 branch = 1;
2712 if (chkpt == 0)
2713 {
2714 next_pc_ok = 1;
2715 next_pc = trace_data->chkpt0;
2716 chkpt++;
2717 }
2718 else if (chkpt == 1)
2719 {
2720 next_pc_ok = 1;
2721 next_pc = trace_data->chkpt0;
2722 chkpt++;
2723 }
2724 else
2725 {
2726 LOG_WARNING("more than two checkpointed branches encountered");
2727 }
2728 break;
2729 case 15: /* Roll-over */
2730 rollover++;
2731 continue;
2732 default: /* Reserved */
2733 command_print(cmd_ctx, "--- reserved trace message ---");
2734 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2735 return ERROR_OK;
2736 }
2737
2738 if (xscale->trace.pc_ok)
2739 {
2740 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2741 struct arm_instruction instruction;
2742
2743 if ((exception == 6) || (exception == 7))
2744 {
2745 /* IRQ or FIQ exception, no instruction executed */
2746 executed -= 1;
2747 }
2748
2749 while (executed-- >= 0)
2750 {
2751 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2752 {
2753 /* can't continue tracing with no image available */
2754 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2755 {
2756 return retval;
2757 }
2758 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2759 {
2760 /* TODO: handle incomplete images */
2761 }
2762 }
2763
2764 /* a precise abort on a load to the PC is included in the incremental
2765 * word count, other instructions causing data aborts are not included
2766 */
2767 if ((executed == 0) && (exception == 4)
2768 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2769 {
2770 if ((instruction.type == ARM_LDM)
2771 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2772 {
2773 executed--;
2774 }
2775 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2776 && (instruction.info.load_store.Rd != 15))
2777 {
2778 executed--;
2779 }
2780 }
2781
2782 /* only the last instruction executed
2783 * (the one that caused the control flow change)
2784 * could be a taken branch
2785 */
2786 if (((executed == -1) && (branch == 1)) &&
2787 (((instruction.type == ARM_B) ||
2788 (instruction.type == ARM_BL) ||
2789 (instruction.type == ARM_BLX)) &&
2790 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2791 {
2792 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2793 }
2794 else
2795 {
2796 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2797 }
2798 command_print(cmd_ctx, "%s", instruction.text);
2799 }
2800
2801 rollover = 0;
2802 }
2803
2804 if (next_pc_ok)
2805 {
2806 xscale->trace.current_pc = next_pc;
2807 xscale->trace.pc_ok = 1;
2808 }
2809 }
2810
2811 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2812 {
2813 struct arm_instruction instruction;
2814 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2815 {
2816 /* can't continue tracing with no image available */
2817 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2818 {
2819 return retval;
2820 }
2821 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2822 {
2823 /* TODO: handle incomplete images */
2824 }
2825 }
2826 command_print(cmd_ctx, "%s", instruction.text);
2827 }
2828
2829 trace_data = trace_data->next;
2830 }
2831
2832 return ERROR_OK;
2833 }
2834
2835 static const struct reg_arch_type xscale_reg_type = {
2836 .get = xscale_get_reg,
2837 .set = xscale_set_reg,
2838 };
2839
2840 static void xscale_build_reg_cache(struct target *target)
2841 {
2842 struct xscale_common *xscale = target_to_xscale(target);
2843 struct arm *armv4_5 = &xscale->armv4_5_common;
2844 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2845 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2846 int i;
2847 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2848
2849 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
2850
2851 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2852 cache_p = &(*cache_p)->next;
2853
2854 /* fill in values for the xscale reg cache */
2855 (*cache_p)->name = "XScale registers";
2856 (*cache_p)->next = NULL;
2857 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2858 (*cache_p)->num_regs = num_regs;
2859
2860 for (i = 0; i < num_regs; i++)
2861 {
2862 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2863 (*cache_p)->reg_list[i].value = calloc(4, 1);
2864 (*cache_p)->reg_list[i].dirty = 0;
2865 (*cache_p)->reg_list[i].valid = 0;
2866 (*cache_p)->reg_list[i].size = 32;
2867 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2868 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2869 arch_info[i] = xscale_reg_arch_info[i];
2870 arch_info[i].target = target;
2871 }
2872
2873 xscale->reg_cache = (*cache_p);
2874 }
2875
2876 static int xscale_init_target(struct command_context *cmd_ctx,
2877 struct target *target)
2878 {
2879 xscale_build_reg_cache(target);
2880 return ERROR_OK;
2881 }
2882
2883 static int xscale_init_arch_info(struct target *target,
2884 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2885 {
2886 struct arm *armv4_5;
2887 uint32_t high_reset_branch, low_reset_branch;
2888 int i;
2889
2890 armv4_5 = &xscale->armv4_5_common;
2891
2892 /* store architecture specfic data */
2893 xscale->common_magic = XSCALE_COMMON_MAGIC;
2894
2895 /* we don't really *need* a variant param ... */
2896 if (variant) {
2897 int ir_length = 0;
2898
2899 if (strcmp(variant, "pxa250") == 0
2900 || strcmp(variant, "pxa255") == 0
2901 || strcmp(variant, "pxa26x") == 0)
2902 ir_length = 5;
2903 else if (strcmp(variant, "pxa27x") == 0
2904 || strcmp(variant, "ixp42x") == 0
2905 || strcmp(variant, "ixp45x") == 0
2906 || strcmp(variant, "ixp46x") == 0)
2907 ir_length = 7;
2908 else if (strcmp(variant, "pxa3xx") == 0)
2909 ir_length = 11;
2910 else
2911 LOG_WARNING("%s: unrecognized variant %s",
2912 tap->dotted_name, variant);
2913
2914 if (ir_length && ir_length != tap->ir_length) {
2915 LOG_WARNING("%s: IR length for %s is %d; fixing",
2916 tap->dotted_name, variant, ir_length);
2917 tap->ir_length = ir_length;
2918 }
2919 }
2920
2921 /* PXA3xx shifts the JTAG instructions */
2922 if (tap->ir_length == 11)
2923 xscale->xscale_variant = XSCALE_PXA3XX;
2924 else
2925 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
2926
2927 /* the debug handler isn't installed (and thus not running) at this time */
2928 xscale->handler_address = 0xfe000800;
2929
2930 /* clear the vectors we keep locally for reference */
2931 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2932 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2933
2934 /* no user-specified vectors have been configured yet */
2935 xscale->static_low_vectors_set = 0x0;
2936 xscale->static_high_vectors_set = 0x0;
2937
2938 /* calculate branches to debug handler */
2939 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2940 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2941
2942 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2943 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2944
2945 for (i = 1; i <= 7; i++)
2946 {
2947 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2948 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2949 }
2950
2951 /* 64kB aligned region used for DCache cleaning */
2952 xscale->cache_clean_address = 0xfffe0000;
2953
2954 xscale->hold_rst = 0;
2955 xscale->external_debug_break = 0;
2956
2957 xscale->ibcr_available = 2;
2958 xscale->ibcr0_used = 0;
2959 xscale->ibcr1_used = 0;
2960
2961 xscale->dbr_available = 2;
2962 xscale->dbr0_used = 0;
2963 xscale->dbr1_used = 0;
2964
2965 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2966 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2967
2968 xscale->vector_catch = 0x1;
2969
2970 xscale->trace.capture_status = TRACE_IDLE;
2971 xscale->trace.data = NULL;
2972 xscale->trace.image = NULL;
2973 xscale->trace.buffer_enabled = 0;
2974 xscale->trace.buffer_fill = 0;
2975
2976 /* prepare ARMv4/5 specific information */
2977 armv4_5->arch_info = xscale;
2978 armv4_5->read_core_reg = xscale_read_core_reg;
2979 armv4_5->write_core_reg = xscale_write_core_reg;
2980 armv4_5->full_context = xscale_full_context;
2981
2982 armv4_5_init_arch_info(target, armv4_5);
2983
2984 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
2985 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
2986 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
2987 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
2988 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
2989 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
2990 xscale->armv4_5_mmu.has_tiny_pages = 1;
2991 xscale->armv4_5_mmu.mmu_enabled = 0;
2992
2993 return ERROR_OK;
2994 }
2995
2996 static int xscale_target_create(struct target *target, Jim_Interp *interp)
2997 {
2998 struct xscale_common *xscale;
2999
3000 if (sizeof xscale_debug_handler - 1 > 0x800) {
3001 LOG_ERROR("debug_handler.bin: larger than 2kb");
3002 return ERROR_FAIL;
3003 }
3004
3005 xscale = calloc(1, sizeof(*xscale));
3006 if (!xscale)
3007 return ERROR_FAIL;
3008
3009 return xscale_init_arch_info(target, xscale, target->tap,
3010 target->variant);
3011 }
3012
3013 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3014 {
3015 struct target *target = NULL;
3016 struct xscale_common *xscale;
3017 int retval;
3018 uint32_t handler_address;
3019
3020 if (CMD_ARGC < 2)
3021 {
3022 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3023 return ERROR_OK;
3024 }
3025
3026 if ((target = get_target(CMD_ARGV[0])) == NULL)
3027 {
3028 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3029 return ERROR_FAIL;
3030 }
3031
3032 xscale = target_to_xscale(target);
3033 retval = xscale_verify_pointer(CMD_CTX, xscale);
3034 if (retval != ERROR_OK)
3035 return retval;
3036
3037 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3038
3039 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3040 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3041 {
3042 xscale->handler_address = handler_address;
3043 }
3044 else
3045 {
3046 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3047 return ERROR_FAIL;
3048 }
3049
3050 return ERROR_OK;
3051 }
3052
3053 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3054 {
3055 struct target *target = NULL;
3056 struct xscale_common *xscale;
3057 int retval;
3058 uint32_t cache_clean_address;
3059
3060 if (CMD_ARGC < 2)
3061 {
3062 return ERROR_COMMAND_SYNTAX_ERROR;
3063 }
3064
3065 target = get_target(CMD_ARGV[0]);
3066 if (target == NULL)
3067 {
3068 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3069 return ERROR_FAIL;
3070 }
3071 xscale = target_to_xscale(target);
3072 retval = xscale_verify_pointer(CMD_CTX, xscale);
3073 if (retval != ERROR_OK)
3074 return retval;
3075
3076 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3077
3078 if (cache_clean_address & 0xffff)
3079 {
3080 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3081 }
3082 else
3083 {
3084 xscale->cache_clean_address = cache_clean_address;
3085 }
3086
3087 return ERROR_OK;
3088 }
3089
3090 COMMAND_HANDLER(xscale_handle_cache_info_command)
3091 {
3092 struct target *target = get_current_target(CMD_CTX);
3093 struct xscale_common *xscale = target_to_xscale(target);
3094 int retval;
3095
3096 retval = xscale_verify_pointer(CMD_CTX, xscale);
3097 if (retval != ERROR_OK)
3098 return retval;
3099
3100 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3101 }
3102
3103 static int xscale_virt2phys(struct target *target,
3104 uint32_t virtual, uint32_t *physical)
3105 {
3106 struct xscale_common *xscale = target_to_xscale(target);
3107 int type;
3108 uint32_t cb;
3109 int domain;
3110 uint32_t ap;
3111
3112 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3113 LOG_ERROR(xscale_not);
3114 return ERROR_TARGET_INVALID;
3115 }
3116
3117 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3118 if (type == -1)
3119 {
3120 return ret;
3121 }
3122 *physical = ret;
3123 return ERROR_OK;
3124 }
3125
3126 static int xscale_mmu(struct target *target, int *enabled)
3127 {
3128 struct xscale_common *xscale = target_to_xscale(target);
3129
3130 if (target->state != TARGET_HALTED)
3131 {
3132 LOG_ERROR("Target not halted");
3133 return ERROR_TARGET_INVALID;
3134 }
3135 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3136 return ERROR_OK;
3137 }
3138
3139 COMMAND_HANDLER(xscale_handle_mmu_command)
3140 {
3141 struct target *target = get_current_target(CMD_CTX);
3142 struct xscale_common *xscale = target_to_xscale(target);
3143 int retval;
3144
3145 retval = xscale_verify_pointer(CMD_CTX, xscale);
3146 if (retval != ERROR_OK)
3147 return retval;
3148
3149 if (target->state != TARGET_HALTED)
3150 {
3151 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3152 return ERROR_OK;
3153 }
3154
3155 if (CMD_ARGC >= 1)
3156 {
3157 bool enable;
3158 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3159 if (enable)
3160 xscale_enable_mmu_caches(target, 1, 0, 0);
3161 else
3162 xscale_disable_mmu_caches(target, 1, 0, 0);
3163 xscale->armv4_5_mmu.mmu_enabled = enable;
3164 }
3165
3166 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3167
3168 return ERROR_OK;
3169 }
3170
3171 COMMAND_HANDLER(xscale_handle_idcache_command)
3172 {
3173 struct target *target = get_current_target(CMD_CTX);
3174 struct xscale_common *xscale = target_to_xscale(target);
3175
3176 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3177 if (retval != ERROR_OK)
3178 return retval;
3179
3180 if (target->state != TARGET_HALTED)
3181 {
3182 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3183 return ERROR_OK;
3184 }
3185
3186 bool icache;
3187 COMMAND_PARSE_BOOL(CMD_NAME, icache, "icache", "dcache");
3188
3189 if (CMD_ARGC >= 1)
3190 {
3191 bool enable;
3192 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3193 if (enable)
3194 xscale_enable_mmu_caches(target, 1, 0, 0);
3195 else
3196 xscale_disable_mmu_caches(target, 1, 0, 0);
3197 if (icache)
3198 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3199 else
3200 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3201 }
3202
3203 bool enabled = icache ?
3204 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3205 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3206 const char *msg = enabled ? "enabled" : "disabled";
3207 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3208
3209 return ERROR_OK;
3210 }
3211
3212 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3213 {
3214 struct target *target = get_current_target(CMD_CTX);
3215 struct xscale_common *xscale = target_to_xscale(target);
3216 int retval;
3217
3218 retval = xscale_verify_pointer(CMD_CTX, xscale);
3219 if (retval != ERROR_OK)
3220 return retval;
3221
3222 if (CMD_ARGC < 1)
3223 {
3224 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3225 }
3226 else
3227 {
3228 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3229 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3230 xscale_write_dcsr(target, -1, -1);
3231 }
3232
3233 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3234
3235 return ERROR_OK;
3236 }
3237
3238
3239 COMMAND_HANDLER(xscale_handle_vector_table_command)
3240 {
3241 struct target *target = get_current_target(CMD_CTX);
3242 struct xscale_common *xscale = target_to_xscale(target);
3243 int err = 0;
3244 int retval;
3245
3246 retval = xscale_verify_pointer(CMD_CTX, xscale);
3247 if (retval != ERROR_OK)
3248 return retval;
3249
3250 if (CMD_ARGC == 0) /* print current settings */
3251 {
3252 int idx;
3253
3254 command_print(CMD_CTX, "active user-set static vectors:");
3255 for (idx = 1; idx < 8; idx++)
3256 if (xscale->static_low_vectors_set & (1 << idx))
3257 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3258 for (idx = 1; idx < 8; idx++)
3259 if (xscale->static_high_vectors_set & (1 << idx))
3260 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3261 return ERROR_OK;
3262 }
3263
3264 if (CMD_ARGC != 3)
3265 err = 1;
3266 else
3267 {
3268 int idx;
3269 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3270 uint32_t vec;
3271 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3272
3273 if (idx < 1 || idx >= 8)
3274 err = 1;
3275
3276 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3277 {
3278 xscale->static_low_vectors_set |= (1<<idx);
3279 xscale->static_low_vectors[idx] = vec;
3280 }
3281 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3282 {
3283 xscale->static_high_vectors_set |= (1<<idx);
3284 xscale->static_high_vectors[idx] = vec;
3285 }
3286 else
3287 err = 1;
3288 }
3289
3290 if (err)
3291 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3292
3293 return ERROR_OK;
3294 }
3295
3296
3297 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3298 {
3299 struct target *target = get_current_target(CMD_CTX);
3300 struct xscale_common *xscale = target_to_xscale(target);
3301 struct arm *armv4_5 = &xscale->armv4_5_common;
3302 uint32_t dcsr_value;
3303 int retval;
3304
3305 retval = xscale_verify_pointer(CMD_CTX, xscale);
3306 if (retval != ERROR_OK)
3307 return retval;
3308
3309 if (target->state != TARGET_HALTED)
3310 {
3311 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3312 return ERROR_OK;
3313 }
3314
3315 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3316 {
3317 struct xscale_trace_data *td, *next_td;
3318 xscale->trace.buffer_enabled = 1;
3319
3320 /* free old trace data */
3321 td = xscale->trace.data;
3322 while (td)
3323 {
3324 next_td = td->next;
3325
3326 if (td->entries)
3327 free(td->entries);
3328 free(td);
3329 td = next_td;
3330 }
3331 xscale->trace.data = NULL;
3332 }
3333 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3334 {
3335 xscale->trace.buffer_enabled = 0;
3336 }
3337
3338 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3339 {
3340 uint32_t fill = 1;
3341 if (CMD_ARGC >= 3)
3342 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3343 xscale->trace.buffer_fill = fill;
3344 }
3345 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3346 {
3347 xscale->trace.buffer_fill = -1;
3348 }
3349
3350 if (xscale->trace.buffer_enabled)
3351 {
3352 /* if we enable the trace buffer in fill-once
3353 * mode we know the address of the first instruction */
3354 xscale->trace.pc_ok = 1;
3355 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3356 }
3357 else
3358 {
3359 /* otherwise the address is unknown, and we have no known good PC */
3360 xscale->trace.pc_ok = 0;
3361 }
3362
3363 command_print(CMD_CTX, "trace buffer %s (%s)",
3364 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3365 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3366
3367 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3368 if (xscale->trace.buffer_fill >= 0)
3369 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3370 else
3371 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3372
3373 return ERROR_OK;
3374 }
3375
3376 COMMAND_HANDLER(xscale_handle_trace_image_command)
3377 {
3378 struct target *target = get_current_target(CMD_CTX);
3379 struct xscale_common *xscale = target_to_xscale(target);
3380 int retval;
3381
3382 if (CMD_ARGC < 1)
3383 {
3384 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3385 return ERROR_OK;
3386 }
3387
3388 retval = xscale_verify_pointer(CMD_CTX, xscale);
3389 if (retval != ERROR_OK)
3390 return retval;
3391
3392 if (xscale->trace.image)
3393 {
3394 image_close(xscale->trace.image);
3395 free(xscale->trace.image);
3396 command_print(CMD_CTX, "previously loaded image found and closed");
3397 }
3398
3399 xscale->trace.image = malloc(sizeof(struct image));
3400 xscale->trace.image->base_address_set = 0;
3401 xscale->trace.image->start_address_set = 0;
3402
3403 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3404 if (CMD_ARGC >= 2)
3405 {
3406 xscale->trace.image->base_address_set = 1;
3407 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], xscale->trace.image->base_address);
3408 }
3409 else
3410 {
3411 xscale->trace.image->base_address_set = 0;
3412 }
3413
3414 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3415 {
3416 free(xscale->trace.image);
3417 xscale->trace.image = NULL;
3418 return ERROR_OK;
3419 }
3420
3421 return ERROR_OK;
3422 }
3423
3424 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3425 {
3426 struct target *target = get_current_target(CMD_CTX);
3427 struct xscale_common *xscale = target_to_xscale(target);
3428 struct xscale_trace_data *trace_data;
3429 struct fileio file;
3430 int retval;
3431
3432 retval = xscale_verify_pointer(CMD_CTX, xscale);
3433 if (retval != ERROR_OK)
3434 return retval;
3435
3436 if (target->state != TARGET_HALTED)
3437 {
3438 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3439 return ERROR_OK;
3440 }
3441
3442 if (CMD_ARGC < 1)
3443 {
3444 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3445 return ERROR_OK;
3446 }
3447
3448 trace_data = xscale->trace.data;
3449
3450 if (!trace_data)
3451 {
3452 command_print(CMD_CTX, "no trace data collected");
3453 return ERROR_OK;
3454 }
3455
3456 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3457 {
3458 return ERROR_OK;
3459 }
3460
3461 while (trace_data)
3462 {
3463 int i;
3464
3465 fileio_write_u32(&file, trace_data->chkpt0);
3466 fileio_write_u32(&file, trace_data->chkpt1);
3467 fileio_write_u32(&file, trace_data->last_instruction);
3468 fileio_write_u32(&file, trace_data->depth);
3469
3470 for (i = 0; i < trace_data->depth; i++)
3471 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3472
3473 trace_data = trace_data->next;
3474 }
3475
3476 fileio_close(&file);
3477
3478 return ERROR_OK;
3479 }
3480
3481 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3482 {
3483 struct target *target = get_current_target(CMD_CTX);
3484 struct xscale_common *xscale = target_to_xscale(target);
3485 int retval;
3486
3487 retval = xscale_verify_pointer(CMD_CTX, xscale);
3488 if (retval != ERROR_OK)
3489 return retval;
3490
3491 xscale_analyze_trace(target, CMD_CTX);
3492
3493 return ERROR_OK;
3494 }
3495
3496 COMMAND_HANDLER(xscale_handle_cp15)
3497 {
3498 struct target *target = get_current_target(CMD_CTX);
3499 struct xscale_common *xscale = target_to_xscale(target);
3500 int retval;
3501
3502 retval = xscale_verify_pointer(CMD_CTX, xscale);
3503 if (retval != ERROR_OK)
3504 return retval;
3505
3506 if (target->state != TARGET_HALTED)
3507 {
3508 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3509 return ERROR_OK;
3510 }
3511 uint32_t reg_no = 0;
3512 struct reg *reg = NULL;
3513 if (CMD_ARGC > 0)
3514 {
3515 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3516 /*translate from xscale cp15 register no to openocd register*/
3517 switch (reg_no)
3518 {
3519 case 0:
3520 reg_no = XSCALE_MAINID;
3521 break;
3522 case 1:
3523 reg_no = XSCALE_CTRL;
3524 break;
3525 case 2:
3526 reg_no = XSCALE_TTB;
3527 break;
3528 case 3:
3529 reg_no = XSCALE_DAC;
3530 break;
3531 case 5:
3532 reg_no = XSCALE_FSR;
3533 break;
3534 case 6:
3535 reg_no = XSCALE_FAR;
3536 break;
3537 case 13:
3538 reg_no = XSCALE_PID;
3539 break;
3540 case 15:
3541 reg_no = XSCALE_CPACCESS;
3542 break;
3543 default:
3544 command_print(CMD_CTX, "invalid register number");
3545 return ERROR_INVALID_ARGUMENTS;
3546 }
3547 reg = &xscale->reg_cache->reg_list[reg_no];
3548
3549 }
3550 if (CMD_ARGC == 1)
3551 {
3552 uint32_t value;
3553
3554 /* read cp15 control register */
3555 xscale_get_reg(reg);
3556 value = buf_get_u32(reg->value, 0, 32);
3557 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3558 }
3559 else if (CMD_ARGC == 2)
3560 {
3561 uint32_t value;
3562 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3563
3564 /* send CP write request (command 0x41) */
3565 xscale_send_u32(target, 0x41);
3566
3567 /* send CP register number */
3568 xscale_send_u32(target, reg_no);
3569
3570 /* send CP register value */
3571 xscale_send_u32(target, value);
3572
3573 /* execute cpwait to ensure outstanding operations complete */
3574 xscale_send_u32(target, 0x53);
3575 }
3576 else
3577 {
3578 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3579 }
3580
3581 return ERROR_OK;
3582 }
3583
3584 static const struct command_registration xscale_exec_command_handlers[] = {
3585 {
3586 .name = "cache_info",
3587 .handler = &xscale_handle_cache_info_command,
3588 .mode = COMMAND_EXEC, NULL,
3589 },
3590
3591 {
3592 .name = "mmu",
3593 .handler = &xscale_handle_mmu_command,
3594 .mode = COMMAND_EXEC,
3595 .usage = "[enable|disable]",
3596 .help = "enable or disable the MMU",
3597 },
3598 {
3599 .name = "icache",
3600 .handler = &xscale_handle_idcache_command,
3601 .mode = COMMAND_EXEC,
3602 .usage = "[enable|disable]",
3603 .help = "enable or disable the ICache",
3604 },
3605 {
3606 .name = "dcache",
3607 .handler = &xscale_handle_idcache_command,
3608 .mode = COMMAND_EXEC,
3609 .usage = "[enable|disable]",
3610 .help = "enable or disable the DCache",
3611 },
3612
3613 {
3614 .name = "vector_catch",
3615 .handler = &xscale_handle_vector_catch_command,
3616 .mode = COMMAND_EXEC,
3617 .help = "mask of vectors that should be caught",
3618 .usage = "[<mask>]",
3619 },
3620 {
3621 .name = "vector_table",
3622 .handler = &xscale_handle_vector_table_command,
3623 .mode = COMMAND_EXEC,
3624 .usage = "<high|low> <index> <code>",
3625 .help = "set static code for exception handler entry",
3626 },
3627
3628 {
3629 .name = "trace_buffer",
3630 .handler = &xscale_handle_trace_buffer_command,
3631 .mode = COMMAND_EXEC,
3632 .usage = "<enable | disable> [fill [n]|wrap]",
3633 },
3634 {
3635 .name = "dump_trace",
3636 .handler = &xscale_handle_dump_trace_command,
3637 .mode = COMMAND_EXEC,
3638 .help = "dump content of trace buffer to <file>",
3639 .usage = "<file>",
3640 },
3641 {
3642 .name = "analyze_trace",
3643 .handler = &xscale_handle_analyze_trace_buffer_command,
3644 .mode = COMMAND_EXEC,
3645 .help = "analyze content of trace buffer",
3646 },
3647 {
3648 .name = "trace_image",
3649 .handler = &xscale_handle_trace_image_command,
3650 COMMAND_EXEC,
3651 .help = "load image from <file> [base address]",
3652 .usage = "<file> [address] [type]",
3653 },
3654
3655 {
3656 .name = "cp15",
3657 .handler = &xscale_handle_cp15,
3658 .mode = COMMAND_EXEC,
3659 .help = "access coproc 15",
3660 .usage = "<register> [value]",
3661 },
3662 COMMAND_REGISTRATION_DONE
3663 };
3664 static const struct command_registration xscale_any_command_handlers[] = {
3665 {
3666 .name = "debug_handler",
3667 .handler = &xscale_handle_debug_handler_command,
3668 .mode = COMMAND_ANY,
3669 .usage = "<target#> <address>",
3670 },
3671 {
3672 .name = "cache_clean_address",
3673 .handler = &xscale_handle_cache_clean_address_command,
3674 .mode = COMMAND_ANY,
3675 },
3676 {
3677 .chain = xscale_exec_command_handlers,
3678 },
3679 COMMAND_REGISTRATION_DONE
3680 };
3681 static const struct command_registration xscale_command_handlers[] = {
3682 {
3683 .chain = arm_command_handlers,
3684 },
3685 {
3686 .name = "xscale",
3687 .mode = COMMAND_ANY,
3688 .help = "xscale command group",
3689 .chain = xscale_any_command_handlers,
3690 },
3691 COMMAND_REGISTRATION_DONE
3692 };
3693
3694 struct target_type xscale_target =
3695 {
3696 .name = "xscale",
3697
3698 .poll = xscale_poll,
3699 .arch_state = xscale_arch_state,
3700
3701 .target_request_data = NULL,
3702
3703 .halt = xscale_halt,
3704 .resume = xscale_resume,
3705 .step = xscale_step,
3706
3707 .assert_reset = xscale_assert_reset,
3708 .deassert_reset = xscale_deassert_reset,
3709 .soft_reset_halt = NULL,
3710
3711 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
3712
3713 .read_memory = xscale_read_memory,
3714 .read_phys_memory = xscale_read_phys_memory,
3715 .write_memory = xscale_write_memory,
3716 .write_phys_memory = xscale_write_phys_memory,
3717 .bulk_write_memory = xscale_bulk_write_memory,
3718
3719 .checksum_memory = arm_checksum_memory,
3720 .blank_check_memory = arm_blank_check_memory,
3721
3722 .run_algorithm = armv4_5_run_algorithm,
3723
3724 .add_breakpoint = xscale_add_breakpoint,
3725 .remove_breakpoint = xscale_remove_breakpoint,
3726 .add_watchpoint = xscale_add_watchpoint,
3727 .remove_watchpoint = xscale_remove_watchpoint,
3728
3729 .commands = xscale_command_handlers,
3730 .target_create = xscale_target_create,
3731 .init_target = xscale_init_target,
3732
3733 .virt2phys = xscale_virt2phys,
3734 .mmu = xscale_mmu
3735 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)