ARM: define two register utilities
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include "time_support.h"
37 #include "register.h"
38 #include "image.h"
39
40
41 /*
42 * Important XScale documents available as of October 2009 include:
43 *
44 * Intel XScale® Core Developer’s Manual, January 2004
45 * Order Number: 273473-002
46 * This has a chapter detailing debug facilities, and punts some
47 * details to chip-specific microarchitecture documents.
48 *
49 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
50 * Document Number: 273539-005
51 * Less detailed than the developer's manual, but summarizes those
52 * missing details (for most XScales) and gives LOTS of notes about
53 * debugger/handler interaction issues. Presents a simpler reset
54 * and load-handler sequence than the arch doc. (Note, OpenOCD
55 * doesn't currently support "Hot-Debug" as defined there.)
56 *
57 * Chip-specific microarchitecture documents may also be useful.
58 */
59
60
61 /* forward declarations */
62 static int xscale_resume(struct target *, int current,
63 uint32_t address, int handle_breakpoints, int debug_execution);
64 static int xscale_debug_entry(struct target *);
65 static int xscale_restore_context(struct target *);
66 static int xscale_get_reg(struct reg *reg);
67 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
68 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
69 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
70 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_read_trace(struct target *);
72
73
74 /* This XScale "debug handler" is loaded into the processor's
75 * mini-ICache, which is 2K of code writable only via JTAG.
76 *
77 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
78 * binary files cleanly. It's string oriented, and terminates them
79 * with a NUL character. Better would be to generate the constants
80 * and let other code decide names, scoping, and other housekeeping.
81 */
82 static /* unsigned const char xscale_debug_handler[] = ... */
83 #include "xscale_debug.h"
84
85 static char *const xscale_reg_list[] =
86 {
87 "XSCALE_MAINID", /* 0 */
88 "XSCALE_CACHETYPE",
89 "XSCALE_CTRL",
90 "XSCALE_AUXCTRL",
91 "XSCALE_TTB",
92 "XSCALE_DAC",
93 "XSCALE_FSR",
94 "XSCALE_FAR",
95 "XSCALE_PID",
96 "XSCALE_CPACCESS",
97 "XSCALE_IBCR0", /* 10 */
98 "XSCALE_IBCR1",
99 "XSCALE_DBR0",
100 "XSCALE_DBR1",
101 "XSCALE_DBCON",
102 "XSCALE_TBREG",
103 "XSCALE_CHKPT0",
104 "XSCALE_CHKPT1",
105 "XSCALE_DCSR",
106 "XSCALE_TX",
107 "XSCALE_RX", /* 20 */
108 "XSCALE_TXRXCTRL",
109 };
110
111 static const struct xscale_reg xscale_reg_arch_info[] =
112 {
113 {XSCALE_MAINID, NULL},
114 {XSCALE_CACHETYPE, NULL},
115 {XSCALE_CTRL, NULL},
116 {XSCALE_AUXCTRL, NULL},
117 {XSCALE_TTB, NULL},
118 {XSCALE_DAC, NULL},
119 {XSCALE_FSR, NULL},
120 {XSCALE_FAR, NULL},
121 {XSCALE_PID, NULL},
122 {XSCALE_CPACCESS, NULL},
123 {XSCALE_IBCR0, NULL},
124 {XSCALE_IBCR1, NULL},
125 {XSCALE_DBR0, NULL},
126 {XSCALE_DBR1, NULL},
127 {XSCALE_DBCON, NULL},
128 {XSCALE_TBREG, NULL},
129 {XSCALE_CHKPT0, NULL},
130 {XSCALE_CHKPT1, NULL},
131 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
132 {-1, NULL}, /* TX accessed via JTAG */
133 {-1, NULL}, /* RX accessed via JTAG */
134 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
135 };
136
137 /* convenience wrapper to access XScale specific registers */
138 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
139 {
140 uint8_t buf[4];
141
142 buf_set_u32(buf, 0, 32, value);
143
144 return xscale_set_reg(reg, buf);
145 }
146
147 static const char xscale_not[] = "target is not an XScale";
148
149 static int xscale_verify_pointer(struct command_context *cmd_ctx,
150 struct xscale_common *xscale)
151 {
152 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
153 command_print(cmd_ctx, xscale_not);
154 return ERROR_TARGET_INVALID;
155 }
156 return ERROR_OK;
157 }
158
159 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr)
160 {
161 if (tap == NULL)
162 return ERROR_FAIL;
163
164 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
165 {
166 struct scan_field field;
167 uint8_t scratch[4];
168
169 memset(&field, 0, sizeof field);
170 field.tap = tap;
171 field.num_bits = tap->ir_length;
172 field.out_value = scratch;
173 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
174
175 jtag_add_ir_scan(1, &field, jtag_get_end_state());
176 }
177
178 return ERROR_OK;
179 }
180
181 static int xscale_read_dcsr(struct target *target)
182 {
183 struct xscale_common *xscale = target_to_xscale(target);
184 int retval;
185 struct scan_field fields[3];
186 uint8_t field0 = 0x0;
187 uint8_t field0_check_value = 0x2;
188 uint8_t field0_check_mask = 0x7;
189 uint8_t field2 = 0x0;
190 uint8_t field2_check_value = 0x0;
191 uint8_t field2_check_mask = 0x1;
192
193 jtag_set_end_state(TAP_DRPAUSE);
194 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
195
196 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
197 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
198
199 memset(&fields, 0, sizeof fields);
200
201 fields[0].tap = target->tap;
202 fields[0].num_bits = 3;
203 fields[0].out_value = &field0;
204 uint8_t tmp;
205 fields[0].in_value = &tmp;
206
207 fields[1].tap = target->tap;
208 fields[1].num_bits = 32;
209 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
210
211 fields[2].tap = target->tap;
212 fields[2].num_bits = 1;
213 fields[2].out_value = &field2;
214 uint8_t tmp2;
215 fields[2].in_value = &tmp2;
216
217 jtag_add_dr_scan(3, fields, jtag_get_end_state());
218
219 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
220 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
221
222 if ((retval = jtag_execute_queue()) != ERROR_OK)
223 {
224 LOG_ERROR("JTAG error while reading DCSR");
225 return retval;
226 }
227
228 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
229 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
230
231 /* write the register with the value we just read
232 * on this second pass, only the first bit of field0 is guaranteed to be 0)
233 */
234 field0_check_mask = 0x1;
235 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
236 fields[1].in_value = NULL;
237
238 jtag_set_end_state(TAP_IDLE);
239
240 jtag_add_dr_scan(3, fields, jtag_get_end_state());
241
242 /* DANGER!!! this must be here. It will make sure that the arguments
243 * to jtag_set_check_value() does not go out of scope! */
244 return jtag_execute_queue();
245 }
246
247
248 static void xscale_getbuf(jtag_callback_data_t arg)
249 {
250 uint8_t *in = (uint8_t *)arg;
251 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
252 }
253
254 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
255 {
256 if (num_words == 0)
257 return ERROR_INVALID_ARGUMENTS;
258
259 int retval = ERROR_OK;
260 tap_state_t path[3];
261 struct scan_field fields[3];
262 uint8_t *field0 = malloc(num_words * 1);
263 uint8_t field0_check_value = 0x2;
264 uint8_t field0_check_mask = 0x6;
265 uint32_t *field1 = malloc(num_words * 4);
266 uint8_t field2_check_value = 0x0;
267 uint8_t field2_check_mask = 0x1;
268 int words_done = 0;
269 int words_scheduled = 0;
270 int i;
271
272 path[0] = TAP_DRSELECT;
273 path[1] = TAP_DRCAPTURE;
274 path[2] = TAP_DRSHIFT;
275
276 memset(&fields, 0, sizeof fields);
277
278 fields[0].tap = target->tap;
279 fields[0].num_bits = 3;
280 fields[0].check_value = &field0_check_value;
281 fields[0].check_mask = &field0_check_mask;
282
283 fields[1].tap = target->tap;
284 fields[1].num_bits = 32;
285
286 fields[2].tap = target->tap;
287 fields[2].num_bits = 1;
288 fields[2].check_value = &field2_check_value;
289 fields[2].check_mask = &field2_check_mask;
290
291 jtag_set_end_state(TAP_IDLE);
292 xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
293 jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
294
295 /* repeat until all words have been collected */
296 int attempts = 0;
297 while (words_done < num_words)
298 {
299 /* schedule reads */
300 words_scheduled = 0;
301 for (i = words_done; i < num_words; i++)
302 {
303 fields[0].in_value = &field0[i];
304
305 jtag_add_pathmove(3, path);
306
307 fields[1].in_value = (uint8_t *)(field1 + i);
308
309 jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
310
311 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
312
313 words_scheduled++;
314 }
315
316 if ((retval = jtag_execute_queue()) != ERROR_OK)
317 {
318 LOG_ERROR("JTAG error while receiving data from debug handler");
319 break;
320 }
321
322 /* examine results */
323 for (i = words_done; i < num_words; i++)
324 {
325 if (!(field0[0] & 1))
326 {
327 /* move backwards if necessary */
328 int j;
329 for (j = i; j < num_words - 1; j++)
330 {
331 field0[j] = field0[j + 1];
332 field1[j] = field1[j + 1];
333 }
334 words_scheduled--;
335 }
336 }
337 if (words_scheduled == 0)
338 {
339 if (attempts++==1000)
340 {
341 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
342 retval = ERROR_TARGET_TIMEOUT;
343 break;
344 }
345 }
346
347 words_done += words_scheduled;
348 }
349
350 for (i = 0; i < num_words; i++)
351 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
352
353 free(field1);
354
355 return retval;
356 }
357
358 static int xscale_read_tx(struct target *target, int consume)
359 {
360 struct xscale_common *xscale = target_to_xscale(target);
361 tap_state_t path[3];
362 tap_state_t noconsume_path[6];
363 int retval;
364 struct timeval timeout, now;
365 struct scan_field fields[3];
366 uint8_t field0_in = 0x0;
367 uint8_t field0_check_value = 0x2;
368 uint8_t field0_check_mask = 0x6;
369 uint8_t field2_check_value = 0x0;
370 uint8_t field2_check_mask = 0x1;
371
372 jtag_set_end_state(TAP_IDLE);
373
374 xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
375
376 path[0] = TAP_DRSELECT;
377 path[1] = TAP_DRCAPTURE;
378 path[2] = TAP_DRSHIFT;
379
380 noconsume_path[0] = TAP_DRSELECT;
381 noconsume_path[1] = TAP_DRCAPTURE;
382 noconsume_path[2] = TAP_DREXIT1;
383 noconsume_path[3] = TAP_DRPAUSE;
384 noconsume_path[4] = TAP_DREXIT2;
385 noconsume_path[5] = TAP_DRSHIFT;
386
387 memset(&fields, 0, sizeof fields);
388
389 fields[0].tap = target->tap;
390 fields[0].num_bits = 3;
391 fields[0].in_value = &field0_in;
392
393 fields[1].tap = target->tap;
394 fields[1].num_bits = 32;
395 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
396
397 fields[2].tap = target->tap;
398 fields[2].num_bits = 1;
399 uint8_t tmp;
400 fields[2].in_value = &tmp;
401
402 gettimeofday(&timeout, NULL);
403 timeval_add_time(&timeout, 1, 0);
404
405 for (;;)
406 {
407 /* if we want to consume the register content (i.e. clear TX_READY),
408 * we have to go straight from Capture-DR to Shift-DR
409 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
410 */
411 if (consume)
412 jtag_add_pathmove(3, path);
413 else
414 {
415 jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
416 }
417
418 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
419
420 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
421 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
422
423 if ((retval = jtag_execute_queue()) != ERROR_OK)
424 {
425 LOG_ERROR("JTAG error while reading TX");
426 return ERROR_TARGET_TIMEOUT;
427 }
428
429 gettimeofday(&now, NULL);
430 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
431 {
432 LOG_ERROR("time out reading TX register");
433 return ERROR_TARGET_TIMEOUT;
434 }
435 if (!((!(field0_in & 1)) && consume))
436 {
437 goto done;
438 }
439 if (debug_level >= 3)
440 {
441 LOG_DEBUG("waiting 100ms");
442 alive_sleep(100); /* avoid flooding the logs */
443 } else
444 {
445 keep_alive();
446 }
447 }
448 done:
449
450 if (!(field0_in & 1))
451 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
452
453 return ERROR_OK;
454 }
455
456 static int xscale_write_rx(struct target *target)
457 {
458 struct xscale_common *xscale = target_to_xscale(target);
459 int retval;
460 struct timeval timeout, now;
461 struct scan_field fields[3];
462 uint8_t field0_out = 0x0;
463 uint8_t field0_in = 0x0;
464 uint8_t field0_check_value = 0x2;
465 uint8_t field0_check_mask = 0x6;
466 uint8_t field2 = 0x0;
467 uint8_t field2_check_value = 0x0;
468 uint8_t field2_check_mask = 0x1;
469
470 jtag_set_end_state(TAP_IDLE);
471
472 xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
473
474 memset(&fields, 0, sizeof fields);
475
476 fields[0].tap = target->tap;
477 fields[0].num_bits = 3;
478 fields[0].out_value = &field0_out;
479 fields[0].in_value = &field0_in;
480
481 fields[1].tap = target->tap;
482 fields[1].num_bits = 32;
483 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
484
485 fields[2].tap = target->tap;
486 fields[2].num_bits = 1;
487 fields[2].out_value = &field2;
488 uint8_t tmp;
489 fields[2].in_value = &tmp;
490
491 gettimeofday(&timeout, NULL);
492 timeval_add_time(&timeout, 1, 0);
493
494 /* poll until rx_read is low */
495 LOG_DEBUG("polling RX");
496 for (;;)
497 {
498 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
499
500 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
501 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
502
503 if ((retval = jtag_execute_queue()) != ERROR_OK)
504 {
505 LOG_ERROR("JTAG error while writing RX");
506 return retval;
507 }
508
509 gettimeofday(&now, NULL);
510 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
511 {
512 LOG_ERROR("time out writing RX register");
513 return ERROR_TARGET_TIMEOUT;
514 }
515 if (!(field0_in & 1))
516 goto done;
517 if (debug_level >= 3)
518 {
519 LOG_DEBUG("waiting 100ms");
520 alive_sleep(100); /* avoid flooding the logs */
521 } else
522 {
523 keep_alive();
524 }
525 }
526 done:
527
528 /* set rx_valid */
529 field2 = 0x1;
530 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
531
532 if ((retval = jtag_execute_queue()) != ERROR_OK)
533 {
534 LOG_ERROR("JTAG error while writing RX");
535 return retval;
536 }
537
538 return ERROR_OK;
539 }
540
541 /* send count elements of size byte to the debug handler */
542 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
543 {
544 uint32_t t[3];
545 int bits[3];
546 int retval;
547 int done_count = 0;
548
549 jtag_set_end_state(TAP_IDLE);
550
551 xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
552
553 bits[0]=3;
554 t[0]=0;
555 bits[1]=32;
556 t[2]=1;
557 bits[2]=1;
558 int endianness = target->endianness;
559 while (done_count++ < count)
560 {
561 switch (size)
562 {
563 case 4:
564 if (endianness == TARGET_LITTLE_ENDIAN)
565 {
566 t[1]=le_to_h_u32(buffer);
567 } else
568 {
569 t[1]=be_to_h_u32(buffer);
570 }
571 break;
572 case 2:
573 if (endianness == TARGET_LITTLE_ENDIAN)
574 {
575 t[1]=le_to_h_u16(buffer);
576 } else
577 {
578 t[1]=be_to_h_u16(buffer);
579 }
580 break;
581 case 1:
582 t[1]=buffer[0];
583 break;
584 default:
585 LOG_ERROR("BUG: size neither 4, 2 nor 1");
586 return ERROR_INVALID_ARGUMENTS;
587 }
588 jtag_add_dr_out(target->tap,
589 3,
590 bits,
591 t,
592 jtag_set_end_state(TAP_IDLE));
593 buffer += size;
594 }
595
596 if ((retval = jtag_execute_queue()) != ERROR_OK)
597 {
598 LOG_ERROR("JTAG error while sending data to debug handler");
599 return retval;
600 }
601
602 return ERROR_OK;
603 }
604
605 static int xscale_send_u32(struct target *target, uint32_t value)
606 {
607 struct xscale_common *xscale = target_to_xscale(target);
608
609 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
610 return xscale_write_rx(target);
611 }
612
613 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
614 {
615 struct xscale_common *xscale = target_to_xscale(target);
616 int retval;
617 struct scan_field fields[3];
618 uint8_t field0 = 0x0;
619 uint8_t field0_check_value = 0x2;
620 uint8_t field0_check_mask = 0x7;
621 uint8_t field2 = 0x0;
622 uint8_t field2_check_value = 0x0;
623 uint8_t field2_check_mask = 0x1;
624
625 if (hold_rst != -1)
626 xscale->hold_rst = hold_rst;
627
628 if (ext_dbg_brk != -1)
629 xscale->external_debug_break = ext_dbg_brk;
630
631 jtag_set_end_state(TAP_IDLE);
632 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
633
634 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
635 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
636
637 memset(&fields, 0, sizeof fields);
638
639 fields[0].tap = target->tap;
640 fields[0].num_bits = 3;
641 fields[0].out_value = &field0;
642 uint8_t tmp;
643 fields[0].in_value = &tmp;
644
645 fields[1].tap = target->tap;
646 fields[1].num_bits = 32;
647 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
648
649 fields[2].tap = target->tap;
650 fields[2].num_bits = 1;
651 fields[2].out_value = &field2;
652 uint8_t tmp2;
653 fields[2].in_value = &tmp2;
654
655 jtag_add_dr_scan(3, fields, jtag_get_end_state());
656
657 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
658 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
659
660 if ((retval = jtag_execute_queue()) != ERROR_OK)
661 {
662 LOG_ERROR("JTAG error while writing DCSR");
663 return retval;
664 }
665
666 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
667 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
668
669 return ERROR_OK;
670 }
671
672 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
673 static unsigned int parity (unsigned int v)
674 {
675 // unsigned int ov = v;
676 v ^= v >> 16;
677 v ^= v >> 8;
678 v ^= v >> 4;
679 v &= 0xf;
680 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
681 return (0x6996 >> v) & 1;
682 }
683
684 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
685 {
686 uint8_t packet[4];
687 uint8_t cmd;
688 int word;
689 struct scan_field fields[2];
690
691 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
692
693 /* LDIC into IR */
694 jtag_set_end_state(TAP_IDLE);
695 xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
696
697 /* CMD is b011 to load a cacheline into the Mini ICache.
698 * Loading into the main ICache is deprecated, and unused.
699 * It's followed by three zero bits, and 27 address bits.
700 */
701 buf_set_u32(&cmd, 0, 6, 0x3);
702
703 /* virtual address of desired cache line */
704 buf_set_u32(packet, 0, 27, va >> 5);
705
706 memset(&fields, 0, sizeof fields);
707
708 fields[0].tap = target->tap;
709 fields[0].num_bits = 6;
710 fields[0].out_value = &cmd;
711
712 fields[1].tap = target->tap;
713 fields[1].num_bits = 27;
714 fields[1].out_value = packet;
715
716 jtag_add_dr_scan(2, fields, jtag_get_end_state());
717
718 /* rest of packet is a cacheline: 8 instructions, with parity */
719 fields[0].num_bits = 32;
720 fields[0].out_value = packet;
721
722 fields[1].num_bits = 1;
723 fields[1].out_value = &cmd;
724
725 for (word = 0; word < 8; word++)
726 {
727 buf_set_u32(packet, 0, 32, buffer[word]);
728
729 uint32_t value;
730 memcpy(&value, packet, sizeof(uint32_t));
731 cmd = parity(value);
732
733 jtag_add_dr_scan(2, fields, jtag_get_end_state());
734 }
735
736 return jtag_execute_queue();
737 }
738
739 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
740 {
741 uint8_t packet[4];
742 uint8_t cmd;
743 struct scan_field fields[2];
744
745 jtag_set_end_state(TAP_IDLE);
746 xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
747
748 /* CMD for invalidate IC line b000, bits [6:4] b000 */
749 buf_set_u32(&cmd, 0, 6, 0x0);
750
751 /* virtual address of desired cache line */
752 buf_set_u32(packet, 0, 27, va >> 5);
753
754 memset(&fields, 0, sizeof fields);
755
756 fields[0].tap = target->tap;
757 fields[0].num_bits = 6;
758 fields[0].out_value = &cmd;
759
760 fields[1].tap = target->tap;
761 fields[1].num_bits = 27;
762 fields[1].out_value = packet;
763
764 jtag_add_dr_scan(2, fields, jtag_get_end_state());
765
766 return ERROR_OK;
767 }
768
769 static int xscale_update_vectors(struct target *target)
770 {
771 struct xscale_common *xscale = target_to_xscale(target);
772 int i;
773 int retval;
774
775 uint32_t low_reset_branch, high_reset_branch;
776
777 for (i = 1; i < 8; i++)
778 {
779 /* if there's a static vector specified for this exception, override */
780 if (xscale->static_high_vectors_set & (1 << i))
781 {
782 xscale->high_vectors[i] = xscale->static_high_vectors[i];
783 }
784 else
785 {
786 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
787 if (retval == ERROR_TARGET_TIMEOUT)
788 return retval;
789 if (retval != ERROR_OK)
790 {
791 /* Some of these reads will fail as part of normal execution */
792 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
793 }
794 }
795 }
796
797 for (i = 1; i < 8; i++)
798 {
799 if (xscale->static_low_vectors_set & (1 << i))
800 {
801 xscale->low_vectors[i] = xscale->static_low_vectors[i];
802 }
803 else
804 {
805 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
806 if (retval == ERROR_TARGET_TIMEOUT)
807 return retval;
808 if (retval != ERROR_OK)
809 {
810 /* Some of these reads will fail as part of normal execution */
811 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
812 }
813 }
814 }
815
816 /* calculate branches to debug handler */
817 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
818 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
819
820 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
821 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
822
823 /* invalidate and load exception vectors in mini i-cache */
824 xscale_invalidate_ic_line(target, 0x0);
825 xscale_invalidate_ic_line(target, 0xffff0000);
826
827 xscale_load_ic(target, 0x0, xscale->low_vectors);
828 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
829
830 return ERROR_OK;
831 }
832
833 static int xscale_arch_state(struct target *target)
834 {
835 struct xscale_common *xscale = target_to_xscale(target);
836 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
837
838 static const char *state[] =
839 {
840 "disabled", "enabled"
841 };
842
843 static const char *arch_dbg_reason[] =
844 {
845 "", "\n(processor reset)", "\n(trace buffer full)"
846 };
847
848 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
849 {
850 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
851 return ERROR_INVALID_ARGUMENTS;
852 }
853
854 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
855 "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "\n"
856 "MMU: %s, D-Cache: %s, I-Cache: %s"
857 "%s",
858 armv4_5_state_strings[armv4_5->core_state],
859 Jim_Nvp_value2name_simple(nvp_target_debug_reason, target->debug_reason)->name ,
860 arm_mode_name(armv4_5->core_mode),
861 buf_get_u32(armv4_5->cpsr->value, 0, 32),
862 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
863 state[xscale->armv4_5_mmu.mmu_enabled],
864 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
865 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
866 arch_dbg_reason[xscale->arch_debug_reason]);
867
868 return ERROR_OK;
869 }
870
871 static int xscale_poll(struct target *target)
872 {
873 int retval = ERROR_OK;
874
875 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
876 {
877 enum target_state previous_state = target->state;
878 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
879 {
880
881 /* there's data to read from the tx register, we entered debug state */
882 target->state = TARGET_HALTED;
883
884 /* process debug entry, fetching current mode regs */
885 retval = xscale_debug_entry(target);
886 }
887 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
888 {
889 LOG_USER("error while polling TX register, reset CPU");
890 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
891 target->state = TARGET_HALTED;
892 }
893
894 /* debug_entry could have overwritten target state (i.e. immediate resume)
895 * don't signal event handlers in that case
896 */
897 if (target->state != TARGET_HALTED)
898 return ERROR_OK;
899
900 /* if target was running, signal that we halted
901 * otherwise we reentered from debug execution */
902 if (previous_state == TARGET_RUNNING)
903 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
904 else
905 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
906 }
907
908 return retval;
909 }
910
911 static int xscale_debug_entry(struct target *target)
912 {
913 struct xscale_common *xscale = target_to_xscale(target);
914 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
915 uint32_t pc;
916 uint32_t buffer[10];
917 int i;
918 int retval;
919 uint32_t moe;
920
921 /* clear external dbg break (will be written on next DCSR read) */
922 xscale->external_debug_break = 0;
923 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
924 return retval;
925
926 /* get r0, pc, r1 to r7 and cpsr */
927 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
928 return retval;
929
930 /* move r0 from buffer to register cache */
931 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
932 armv4_5->core_cache->reg_list[0].dirty = 1;
933 armv4_5->core_cache->reg_list[0].valid = 1;
934 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
935
936 /* move pc from buffer to register cache */
937 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
938 armv4_5->core_cache->reg_list[15].dirty = 1;
939 armv4_5->core_cache->reg_list[15].valid = 1;
940 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
941
942 /* move data from buffer to register cache */
943 for (i = 1; i <= 7; i++)
944 {
945 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
946 armv4_5->core_cache->reg_list[i].dirty = 1;
947 armv4_5->core_cache->reg_list[i].valid = 1;
948 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
949 }
950
951 arm_set_cpsr(armv4_5, buffer[9]);
952 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
953
954 if (!is_arm_mode(armv4_5->core_mode))
955 {
956 target->state = TARGET_UNKNOWN;
957 LOG_ERROR("cpsr contains invalid mode value - communication failure");
958 return ERROR_TARGET_FAILURE;
959 }
960 LOG_DEBUG("target entered debug state in %s mode",
961 arm_mode_name(armv4_5->core_mode));
962
963 if (buffer[9] & 0x20)
964 armv4_5->core_state = ARMV4_5_STATE_THUMB;
965 else
966 armv4_5->core_state = ARMV4_5_STATE_ARM;
967
968
969 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
970 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
971 {
972 xscale_receive(target, buffer, 8);
973 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
974 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
975 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
976 }
977 else
978 {
979 /* r8 to r14, but no spsr */
980 xscale_receive(target, buffer, 7);
981 }
982
983 /* move data from buffer to register cache */
984 for (i = 8; i <= 14; i++)
985 {
986 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
987 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
988 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
989 }
990
991 /* examine debug reason */
992 xscale_read_dcsr(target);
993 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
994
995 /* stored PC (for calculating fixup) */
996 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
997
998 switch (moe)
999 {
1000 case 0x0: /* Processor reset */
1001 target->debug_reason = DBG_REASON_DBGRQ;
1002 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1003 pc -= 4;
1004 break;
1005 case 0x1: /* Instruction breakpoint hit */
1006 target->debug_reason = DBG_REASON_BREAKPOINT;
1007 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1008 pc -= 4;
1009 break;
1010 case 0x2: /* Data breakpoint hit */
1011 target->debug_reason = DBG_REASON_WATCHPOINT;
1012 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1013 pc -= 4;
1014 break;
1015 case 0x3: /* BKPT instruction executed */
1016 target->debug_reason = DBG_REASON_BREAKPOINT;
1017 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1018 pc -= 4;
1019 break;
1020 case 0x4: /* Ext. debug event */
1021 target->debug_reason = DBG_REASON_DBGRQ;
1022 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1023 pc -= 4;
1024 break;
1025 case 0x5: /* Vector trap occured */
1026 target->debug_reason = DBG_REASON_BREAKPOINT;
1027 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1028 pc -= 4;
1029 break;
1030 case 0x6: /* Trace buffer full break */
1031 target->debug_reason = DBG_REASON_DBGRQ;
1032 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1033 pc -= 4;
1034 break;
1035 case 0x7: /* Reserved (may flag Hot-Debug support) */
1036 default:
1037 LOG_ERROR("Method of Entry is 'Reserved'");
1038 exit(-1);
1039 break;
1040 }
1041
1042 /* apply PC fixup */
1043 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1044
1045 /* on the first debug entry, identify cache type */
1046 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1047 {
1048 uint32_t cache_type_reg;
1049
1050 /* read cp15 cache type register */
1051 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1052 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1053
1054 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1055 }
1056
1057 /* examine MMU and Cache settings */
1058 /* read cp15 control register */
1059 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1060 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1061 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1062 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1063 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1064
1065 /* tracing enabled, read collected trace data */
1066 if (xscale->trace.buffer_enabled)
1067 {
1068 xscale_read_trace(target);
1069 xscale->trace.buffer_fill--;
1070
1071 /* resume if we're still collecting trace data */
1072 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1073 && (xscale->trace.buffer_fill > 0))
1074 {
1075 xscale_resume(target, 1, 0x0, 1, 0);
1076 }
1077 else
1078 {
1079 xscale->trace.buffer_enabled = 0;
1080 }
1081 }
1082
1083 return ERROR_OK;
1084 }
1085
1086 static int xscale_halt(struct target *target)
1087 {
1088 struct xscale_common *xscale = target_to_xscale(target);
1089
1090 LOG_DEBUG("target->state: %s",
1091 target_state_name(target));
1092
1093 if (target->state == TARGET_HALTED)
1094 {
1095 LOG_DEBUG("target was already halted");
1096 return ERROR_OK;
1097 }
1098 else if (target->state == TARGET_UNKNOWN)
1099 {
1100 /* this must not happen for a xscale target */
1101 LOG_ERROR("target was in unknown state when halt was requested");
1102 return ERROR_TARGET_INVALID;
1103 }
1104 else if (target->state == TARGET_RESET)
1105 {
1106 LOG_DEBUG("target->state == TARGET_RESET");
1107 }
1108 else
1109 {
1110 /* assert external dbg break */
1111 xscale->external_debug_break = 1;
1112 xscale_read_dcsr(target);
1113
1114 target->debug_reason = DBG_REASON_DBGRQ;
1115 }
1116
1117 return ERROR_OK;
1118 }
1119
1120 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1121 {
1122 struct xscale_common *xscale = target_to_xscale(target);
1123 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1124 int retval;
1125
1126 if (xscale->ibcr0_used)
1127 {
1128 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1129
1130 if (ibcr0_bp)
1131 {
1132 xscale_unset_breakpoint(target, ibcr0_bp);
1133 }
1134 else
1135 {
1136 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1137 exit(-1);
1138 }
1139 }
1140
1141 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1142 return retval;
1143
1144 return ERROR_OK;
1145 }
1146
1147 static int xscale_disable_single_step(struct target *target)
1148 {
1149 struct xscale_common *xscale = target_to_xscale(target);
1150 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1151 int retval;
1152
1153 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1154 return retval;
1155
1156 return ERROR_OK;
1157 }
1158
1159 static void xscale_enable_watchpoints(struct target *target)
1160 {
1161 struct watchpoint *watchpoint = target->watchpoints;
1162
1163 while (watchpoint)
1164 {
1165 if (watchpoint->set == 0)
1166 xscale_set_watchpoint(target, watchpoint);
1167 watchpoint = watchpoint->next;
1168 }
1169 }
1170
1171 static void xscale_enable_breakpoints(struct target *target)
1172 {
1173 struct breakpoint *breakpoint = target->breakpoints;
1174
1175 /* set any pending breakpoints */
1176 while (breakpoint)
1177 {
1178 if (breakpoint->set == 0)
1179 xscale_set_breakpoint(target, breakpoint);
1180 breakpoint = breakpoint->next;
1181 }
1182 }
1183
1184 static int xscale_resume(struct target *target, int current,
1185 uint32_t address, int handle_breakpoints, int debug_execution)
1186 {
1187 struct xscale_common *xscale = target_to_xscale(target);
1188 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
1189 struct breakpoint *breakpoint = target->breakpoints;
1190 uint32_t current_pc;
1191 int retval;
1192 int i;
1193
1194 LOG_DEBUG("-");
1195
1196 if (target->state != TARGET_HALTED)
1197 {
1198 LOG_WARNING("target not halted");
1199 return ERROR_TARGET_NOT_HALTED;
1200 }
1201
1202 if (!debug_execution)
1203 {
1204 target_free_all_working_areas(target);
1205 }
1206
1207 /* update vector tables */
1208 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1209 return retval;
1210
1211 /* current = 1: continue on current pc, otherwise continue at <address> */
1212 if (!current)
1213 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1214
1215 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1216
1217 /* if we're at the reset vector, we have to simulate the branch */
1218 if (current_pc == 0x0)
1219 {
1220 arm_simulate_step(target, NULL);
1221 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1222 }
1223
1224 /* the front-end may request us not to handle breakpoints */
1225 if (handle_breakpoints)
1226 {
1227 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1228 {
1229 uint32_t next_pc;
1230
1231 /* there's a breakpoint at the current PC, we have to step over it */
1232 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1233 xscale_unset_breakpoint(target, breakpoint);
1234
1235 /* calculate PC of next instruction */
1236 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1237 {
1238 uint32_t current_opcode;
1239 target_read_u32(target, current_pc, &current_opcode);
1240 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1241 }
1242
1243 LOG_DEBUG("enable single-step");
1244 xscale_enable_single_step(target, next_pc);
1245
1246 /* restore banked registers */
1247 xscale_restore_context(target);
1248
1249 /* send resume request (command 0x30 or 0x31)
1250 * clean the trace buffer if it is to be enabled (0x62) */
1251 if (xscale->trace.buffer_enabled)
1252 {
1253 xscale_send_u32(target, 0x62);
1254 xscale_send_u32(target, 0x31);
1255 }
1256 else
1257 xscale_send_u32(target, 0x30);
1258
1259 /* send CPSR */
1260 xscale_send_u32(target,
1261 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1262 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1263 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1264
1265 for (i = 7; i >= 0; i--)
1266 {
1267 /* send register */
1268 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1269 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1270 }
1271
1272 /* send PC */
1273 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1274 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1275
1276 /* wait for and process debug entry */
1277 xscale_debug_entry(target);
1278
1279 LOG_DEBUG("disable single-step");
1280 xscale_disable_single_step(target);
1281
1282 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1283 xscale_set_breakpoint(target, breakpoint);
1284 }
1285 }
1286
1287 /* enable any pending breakpoints and watchpoints */
1288 xscale_enable_breakpoints(target);
1289 xscale_enable_watchpoints(target);
1290
1291 /* restore banked registers */
1292 xscale_restore_context(target);
1293
1294 /* send resume request (command 0x30 or 0x31)
1295 * clean the trace buffer if it is to be enabled (0x62) */
1296 if (xscale->trace.buffer_enabled)
1297 {
1298 xscale_send_u32(target, 0x62);
1299 xscale_send_u32(target, 0x31);
1300 }
1301 else
1302 xscale_send_u32(target, 0x30);
1303
1304 /* send CPSR */
1305 xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
1306 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1307 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1308
1309 for (i = 7; i >= 0; i--)
1310 {
1311 /* send register */
1312 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1313 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1314 }
1315
1316 /* send PC */
1317 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1318 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1319
1320 target->debug_reason = DBG_REASON_NOTHALTED;
1321
1322 if (!debug_execution)
1323 {
1324 /* registers are now invalid */
1325 register_cache_invalidate(armv4_5->core_cache);
1326 target->state = TARGET_RUNNING;
1327 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1328 }
1329 else
1330 {
1331 target->state = TARGET_DEBUG_RUNNING;
1332 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1333 }
1334
1335 LOG_DEBUG("target resumed");
1336
1337 return ERROR_OK;
1338 }
1339
1340 static int xscale_step_inner(struct target *target, int current,
1341 uint32_t address, int handle_breakpoints)
1342 {
1343 struct xscale_common *xscale = target_to_xscale(target);
1344 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
1345 uint32_t next_pc;
1346 int retval;
1347 int i;
1348
1349 target->debug_reason = DBG_REASON_SINGLESTEP;
1350
1351 /* calculate PC of next instruction */
1352 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1353 {
1354 uint32_t current_opcode, current_pc;
1355 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1356
1357 target_read_u32(target, current_pc, &current_opcode);
1358 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1359 return retval;
1360 }
1361
1362 LOG_DEBUG("enable single-step");
1363 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1364 return retval;
1365
1366 /* restore banked registers */
1367 if ((retval = xscale_restore_context(target)) != ERROR_OK)
1368 return retval;
1369
1370 /* send resume request (command 0x30 or 0x31)
1371 * clean the trace buffer if it is to be enabled (0x62) */
1372 if (xscale->trace.buffer_enabled)
1373 {
1374 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1375 return retval;
1376 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1377 return retval;
1378 }
1379 else
1380 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1381 return retval;
1382
1383 /* send CPSR */
1384 retval = xscale_send_u32(target,
1385 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1386 if (retval != ERROR_OK)
1387 return retval;
1388 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1389 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1390
1391 for (i = 7; i >= 0; i--)
1392 {
1393 /* send register */
1394 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1395 return retval;
1396 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1397 }
1398
1399 /* send PC */
1400 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))) != ERROR_OK)
1401 return retval;
1402 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1403
1404 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1405
1406 /* registers are now invalid */
1407 register_cache_invalidate(armv4_5->core_cache);
1408
1409 /* wait for and process debug entry */
1410 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1411 return retval;
1412
1413 LOG_DEBUG("disable single-step");
1414 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1415 return retval;
1416
1417 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1418
1419 return ERROR_OK;
1420 }
1421
1422 static int xscale_step(struct target *target, int current,
1423 uint32_t address, int handle_breakpoints)
1424 {
1425 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1426 struct breakpoint *breakpoint = target->breakpoints;
1427
1428 uint32_t current_pc;
1429 int retval;
1430
1431 if (target->state != TARGET_HALTED)
1432 {
1433 LOG_WARNING("target not halted");
1434 return ERROR_TARGET_NOT_HALTED;
1435 }
1436
1437 /* current = 1: continue on current pc, otherwise continue at <address> */
1438 if (!current)
1439 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1440
1441 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1442
1443 /* if we're at the reset vector, we have to simulate the step */
1444 if (current_pc == 0x0)
1445 {
1446 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1447 return retval;
1448 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1449
1450 target->debug_reason = DBG_REASON_SINGLESTEP;
1451 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1452
1453 return ERROR_OK;
1454 }
1455
1456 /* the front-end may request us not to handle breakpoints */
1457 if (handle_breakpoints)
1458 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1459 {
1460 if ((retval = xscale_unset_breakpoint(target, breakpoint)) != ERROR_OK)
1461 return retval;
1462 }
1463
1464 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1465
1466 if (breakpoint)
1467 {
1468 xscale_set_breakpoint(target, breakpoint);
1469 }
1470
1471 LOG_DEBUG("target stepped");
1472
1473 return ERROR_OK;
1474
1475 }
1476
1477 static int xscale_assert_reset(struct target *target)
1478 {
1479 struct xscale_common *xscale = target_to_xscale(target);
1480
1481 LOG_DEBUG("target->state: %s",
1482 target_state_name(target));
1483
1484 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1485 * end up in T-L-R, which would reset JTAG
1486 */
1487 jtag_set_end_state(TAP_IDLE);
1488 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
1489
1490 /* set Hold reset, Halt mode and Trap Reset */
1491 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1492 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1493 xscale_write_dcsr(target, 1, 0);
1494
1495 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1496 xscale_jtag_set_instr(target->tap, 0x7f);
1497 jtag_execute_queue();
1498
1499 /* assert reset */
1500 jtag_add_reset(0, 1);
1501
1502 /* sleep 1ms, to be sure we fulfill any requirements */
1503 jtag_add_sleep(1000);
1504 jtag_execute_queue();
1505
1506 target->state = TARGET_RESET;
1507
1508 if (target->reset_halt)
1509 {
1510 int retval;
1511 if ((retval = target_halt(target)) != ERROR_OK)
1512 return retval;
1513 }
1514
1515 return ERROR_OK;
1516 }
1517
1518 static int xscale_deassert_reset(struct target *target)
1519 {
1520 struct xscale_common *xscale = target_to_xscale(target);
1521 struct breakpoint *breakpoint = target->breakpoints;
1522
1523 LOG_DEBUG("-");
1524
1525 xscale->ibcr_available = 2;
1526 xscale->ibcr0_used = 0;
1527 xscale->ibcr1_used = 0;
1528
1529 xscale->dbr_available = 2;
1530 xscale->dbr0_used = 0;
1531 xscale->dbr1_used = 0;
1532
1533 /* mark all hardware breakpoints as unset */
1534 while (breakpoint)
1535 {
1536 if (breakpoint->type == BKPT_HARD)
1537 {
1538 breakpoint->set = 0;
1539 }
1540 breakpoint = breakpoint->next;
1541 }
1542
1543 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1544
1545 /* FIXME mark hardware watchpoints got unset too. Also,
1546 * at least some of the XScale registers are invalid...
1547 */
1548
1549 /*
1550 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1551 * contents got invalidated. Safer to force that, so writing new
1552 * contents can't ever fail..
1553 */
1554 {
1555 uint32_t address;
1556 unsigned buf_cnt;
1557 const uint8_t *buffer = xscale_debug_handler;
1558 int retval;
1559
1560 /* release SRST */
1561 jtag_add_reset(0, 0);
1562
1563 /* wait 300ms; 150 and 100ms were not enough */
1564 jtag_add_sleep(300*1000);
1565
1566 jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
1567 jtag_execute_queue();
1568
1569 /* set Hold reset, Halt mode and Trap Reset */
1570 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1571 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1572 xscale_write_dcsr(target, 1, 0);
1573
1574 /* Load the debug handler into the mini-icache. Since
1575 * it's using halt mode (not monitor mode), it runs in
1576 * "Special Debug State" for access to registers, memory,
1577 * coprocessors, trace data, etc.
1578 */
1579 address = xscale->handler_address;
1580 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1581 binary_size > 0;
1582 binary_size -= buf_cnt, buffer += buf_cnt)
1583 {
1584 uint32_t cache_line[8];
1585 unsigned i;
1586
1587 buf_cnt = binary_size;
1588 if (buf_cnt > 32)
1589 buf_cnt = 32;
1590
1591 for (i = 0; i < buf_cnt; i += 4)
1592 {
1593 /* convert LE buffer to host-endian uint32_t */
1594 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1595 }
1596
1597 for (; i < 32; i += 4)
1598 {
1599 cache_line[i / 4] = 0xe1a08008;
1600 }
1601
1602 /* only load addresses other than the reset vectors */
1603 if ((address % 0x400) != 0x0)
1604 {
1605 retval = xscale_load_ic(target, address,
1606 cache_line);
1607 if (retval != ERROR_OK)
1608 return retval;
1609 }
1610
1611 address += buf_cnt;
1612 };
1613
1614 retval = xscale_load_ic(target, 0x0,
1615 xscale->low_vectors);
1616 if (retval != ERROR_OK)
1617 return retval;
1618 retval = xscale_load_ic(target, 0xffff0000,
1619 xscale->high_vectors);
1620 if (retval != ERROR_OK)
1621 return retval;
1622
1623 jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
1624
1625 jtag_add_sleep(100000);
1626
1627 /* set Hold reset, Halt mode and Trap Reset */
1628 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1629 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1630 xscale_write_dcsr(target, 1, 0);
1631
1632 /* clear Hold reset to let the target run (should enter debug handler) */
1633 xscale_write_dcsr(target, 0, 1);
1634 target->state = TARGET_RUNNING;
1635
1636 if (!target->reset_halt)
1637 {
1638 jtag_add_sleep(10000);
1639
1640 /* we should have entered debug now */
1641 xscale_debug_entry(target);
1642 target->state = TARGET_HALTED;
1643
1644 /* resume the target */
1645 xscale_resume(target, 1, 0x0, 1, 0);
1646 }
1647 }
1648
1649 return ERROR_OK;
1650 }
1651
1652 static int xscale_read_core_reg(struct target *target, struct reg *r,
1653 int num, enum armv4_5_mode mode)
1654 {
1655 /** \todo add debug handler support for core register reads */
1656 LOG_ERROR("not implemented");
1657 return ERROR_OK;
1658 }
1659
1660 static int xscale_write_core_reg(struct target *target, struct reg *r,
1661 int num, enum armv4_5_mode mode, uint32_t value)
1662 {
1663 /** \todo add debug handler support for core register writes */
1664 LOG_ERROR("not implemented");
1665 return ERROR_OK;
1666 }
1667
1668 static int xscale_full_context(struct target *target)
1669 {
1670 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1671
1672 uint32_t *buffer;
1673
1674 int i, j;
1675
1676 LOG_DEBUG("-");
1677
1678 if (target->state != TARGET_HALTED)
1679 {
1680 LOG_WARNING("target not halted");
1681 return ERROR_TARGET_NOT_HALTED;
1682 }
1683
1684 buffer = malloc(4 * 8);
1685
1686 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1687 * we can't enter User mode on an XScale (unpredictable),
1688 * but User shares registers with SYS
1689 */
1690 for (i = 1; i < 7; i++)
1691 {
1692 int valid = 1;
1693
1694 /* check if there are invalid registers in the current mode
1695 */
1696 for (j = 0; j <= 16; j++)
1697 {
1698 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
1699 valid = 0;
1700 }
1701
1702 if (!valid)
1703 {
1704 uint32_t tmp_cpsr;
1705
1706 /* request banked registers */
1707 xscale_send_u32(target, 0x0);
1708
1709 tmp_cpsr = 0x0;
1710 tmp_cpsr |= armv4_5_number_to_mode(i);
1711 tmp_cpsr |= 0xc0; /* I/F bits */
1712
1713 /* send CPSR for desired mode */
1714 xscale_send_u32(target, tmp_cpsr);
1715
1716 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1717 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1718 {
1719 xscale_receive(target, buffer, 8);
1720 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1721 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1722 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
1723 }
1724 else
1725 {
1726 xscale_receive(target, buffer, 7);
1727 }
1728
1729 /* move data from buffer to register cache */
1730 for (j = 8; j <= 14; j++)
1731 {
1732 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
1733 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1734 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
1735 }
1736 }
1737 }
1738
1739 free(buffer);
1740
1741 return ERROR_OK;
1742 }
1743
1744 static int xscale_restore_context(struct target *target)
1745 {
1746 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1747
1748 int i, j;
1749
1750 if (target->state != TARGET_HALTED)
1751 {
1752 LOG_WARNING("target not halted");
1753 return ERROR_TARGET_NOT_HALTED;
1754 }
1755
1756 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1757 * we can't enter User mode on an XScale (unpredictable),
1758 * but User shares registers with SYS
1759 */
1760 for (i = 1; i < 7; i++)
1761 {
1762 int dirty = 0;
1763
1764 /* check if there are invalid registers in the current mode
1765 */
1766 for (j = 8; j <= 14; j++)
1767 {
1768 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
1769 dirty = 1;
1770 }
1771
1772 /* if not USR/SYS, check if the SPSR needs to be written */
1773 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1774 {
1775 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
1776 dirty = 1;
1777 }
1778
1779 if (dirty)
1780 {
1781 uint32_t tmp_cpsr;
1782
1783 /* send banked registers */
1784 xscale_send_u32(target, 0x1);
1785
1786 tmp_cpsr = 0x0;
1787 tmp_cpsr |= armv4_5_number_to_mode(i);
1788 tmp_cpsr |= 0xc0; /* I/F bits */
1789
1790 /* send CPSR for desired mode */
1791 xscale_send_u32(target, tmp_cpsr);
1792
1793 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1794 for (j = 8; j <= 14; j++)
1795 {
1796 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
1797 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1798 }
1799
1800 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1801 {
1802 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
1803 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1804 }
1805 }
1806 }
1807
1808 return ERROR_OK;
1809 }
1810
1811 static int xscale_read_memory(struct target *target, uint32_t address,
1812 uint32_t size, uint32_t count, uint8_t *buffer)
1813 {
1814 struct xscale_common *xscale = target_to_xscale(target);
1815 uint32_t *buf32;
1816 uint32_t i;
1817 int retval;
1818
1819 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1820
1821 if (target->state != TARGET_HALTED)
1822 {
1823 LOG_WARNING("target not halted");
1824 return ERROR_TARGET_NOT_HALTED;
1825 }
1826
1827 /* sanitize arguments */
1828 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1829 return ERROR_INVALID_ARGUMENTS;
1830
1831 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1832 return ERROR_TARGET_UNALIGNED_ACCESS;
1833
1834 /* send memory read request (command 0x1n, n: access size) */
1835 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1836 return retval;
1837
1838 /* send base address for read request */
1839 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1840 return retval;
1841
1842 /* send number of requested data words */
1843 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1844 return retval;
1845
1846 /* receive data from target (count times 32-bit words in host endianness) */
1847 buf32 = malloc(4 * count);
1848 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1849 return retval;
1850
1851 /* extract data from host-endian buffer into byte stream */
1852 for (i = 0; i < count; i++)
1853 {
1854 switch (size)
1855 {
1856 case 4:
1857 target_buffer_set_u32(target, buffer, buf32[i]);
1858 buffer += 4;
1859 break;
1860 case 2:
1861 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1862 buffer += 2;
1863 break;
1864 case 1:
1865 *buffer++ = buf32[i] & 0xff;
1866 break;
1867 default:
1868 LOG_ERROR("invalid read size");
1869 return ERROR_INVALID_ARGUMENTS;
1870 }
1871 }
1872
1873 free(buf32);
1874
1875 /* examine DCSR, to see if Sticky Abort (SA) got set */
1876 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1877 return retval;
1878 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1879 {
1880 /* clear SA bit */
1881 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1882 return retval;
1883
1884 return ERROR_TARGET_DATA_ABORT;
1885 }
1886
1887 return ERROR_OK;
1888 }
1889
1890 static int xscale_write_memory(struct target *target, uint32_t address,
1891 uint32_t size, uint32_t count, uint8_t *buffer)
1892 {
1893 struct xscale_common *xscale = target_to_xscale(target);
1894 int retval;
1895
1896 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1897
1898 if (target->state != TARGET_HALTED)
1899 {
1900 LOG_WARNING("target not halted");
1901 return ERROR_TARGET_NOT_HALTED;
1902 }
1903
1904 /* sanitize arguments */
1905 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1906 return ERROR_INVALID_ARGUMENTS;
1907
1908 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1909 return ERROR_TARGET_UNALIGNED_ACCESS;
1910
1911 /* send memory write request (command 0x2n, n: access size) */
1912 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1913 return retval;
1914
1915 /* send base address for read request */
1916 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1917 return retval;
1918
1919 /* send number of requested data words to be written*/
1920 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1921 return retval;
1922
1923 /* extract data from host-endian buffer into byte stream */
1924 #if 0
1925 for (i = 0; i < count; i++)
1926 {
1927 switch (size)
1928 {
1929 case 4:
1930 value = target_buffer_get_u32(target, buffer);
1931 xscale_send_u32(target, value);
1932 buffer += 4;
1933 break;
1934 case 2:
1935 value = target_buffer_get_u16(target, buffer);
1936 xscale_send_u32(target, value);
1937 buffer += 2;
1938 break;
1939 case 1:
1940 value = *buffer;
1941 xscale_send_u32(target, value);
1942 buffer += 1;
1943 break;
1944 default:
1945 LOG_ERROR("should never get here");
1946 exit(-1);
1947 }
1948 }
1949 #endif
1950 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1951 return retval;
1952
1953 /* examine DCSR, to see if Sticky Abort (SA) got set */
1954 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1955 return retval;
1956 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1957 {
1958 /* clear SA bit */
1959 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1960 return retval;
1961
1962 return ERROR_TARGET_DATA_ABORT;
1963 }
1964
1965 return ERROR_OK;
1966 }
1967
1968 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
1969 uint32_t count, uint8_t *buffer)
1970 {
1971 return xscale_write_memory(target, address, 4, count, buffer);
1972 }
1973
1974 static uint32_t xscale_get_ttb(struct target *target)
1975 {
1976 struct xscale_common *xscale = target_to_xscale(target);
1977 uint32_t ttb;
1978
1979 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
1980 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
1981
1982 return ttb;
1983 }
1984
1985 static void xscale_disable_mmu_caches(struct target *target, int mmu,
1986 int d_u_cache, int i_cache)
1987 {
1988 struct xscale_common *xscale = target_to_xscale(target);
1989 uint32_t cp15_control;
1990
1991 /* read cp15 control register */
1992 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1993 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1994
1995 if (mmu)
1996 cp15_control &= ~0x1U;
1997
1998 if (d_u_cache)
1999 {
2000 /* clean DCache */
2001 xscale_send_u32(target, 0x50);
2002 xscale_send_u32(target, xscale->cache_clean_address);
2003
2004 /* invalidate DCache */
2005 xscale_send_u32(target, 0x51);
2006
2007 cp15_control &= ~0x4U;
2008 }
2009
2010 if (i_cache)
2011 {
2012 /* invalidate ICache */
2013 xscale_send_u32(target, 0x52);
2014 cp15_control &= ~0x1000U;
2015 }
2016
2017 /* write new cp15 control register */
2018 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2019
2020 /* execute cpwait to ensure outstanding operations complete */
2021 xscale_send_u32(target, 0x53);
2022 }
2023
2024 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2025 int d_u_cache, int i_cache)
2026 {
2027 struct xscale_common *xscale = target_to_xscale(target);
2028 uint32_t cp15_control;
2029
2030 /* read cp15 control register */
2031 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2032 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2033
2034 if (mmu)
2035 cp15_control |= 0x1U;
2036
2037 if (d_u_cache)
2038 cp15_control |= 0x4U;
2039
2040 if (i_cache)
2041 cp15_control |= 0x1000U;
2042
2043 /* write new cp15 control register */
2044 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2045
2046 /* execute cpwait to ensure outstanding operations complete */
2047 xscale_send_u32(target, 0x53);
2048 }
2049
2050 static int xscale_set_breakpoint(struct target *target,
2051 struct breakpoint *breakpoint)
2052 {
2053 int retval;
2054 struct xscale_common *xscale = target_to_xscale(target);
2055
2056 if (target->state != TARGET_HALTED)
2057 {
2058 LOG_WARNING("target not halted");
2059 return ERROR_TARGET_NOT_HALTED;
2060 }
2061
2062 if (breakpoint->set)
2063 {
2064 LOG_WARNING("breakpoint already set");
2065 return ERROR_OK;
2066 }
2067
2068 if (breakpoint->type == BKPT_HARD)
2069 {
2070 uint32_t value = breakpoint->address | 1;
2071 if (!xscale->ibcr0_used)
2072 {
2073 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2074 xscale->ibcr0_used = 1;
2075 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2076 }
2077 else if (!xscale->ibcr1_used)
2078 {
2079 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2080 xscale->ibcr1_used = 1;
2081 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2082 }
2083 else
2084 {
2085 LOG_ERROR("BUG: no hardware comparator available");
2086 return ERROR_OK;
2087 }
2088 }
2089 else if (breakpoint->type == BKPT_SOFT)
2090 {
2091 if (breakpoint->length == 4)
2092 {
2093 /* keep the original instruction in target endianness */
2094 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2095 {
2096 return retval;
2097 }
2098 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2099 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2100 {
2101 return retval;
2102 }
2103 }
2104 else
2105 {
2106 /* keep the original instruction in target endianness */
2107 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2108 {
2109 return retval;
2110 }
2111 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2112 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2113 {
2114 return retval;
2115 }
2116 }
2117 breakpoint->set = 1;
2118 }
2119
2120 return ERROR_OK;
2121 }
2122
2123 static int xscale_add_breakpoint(struct target *target,
2124 struct breakpoint *breakpoint)
2125 {
2126 struct xscale_common *xscale = target_to_xscale(target);
2127
2128 if (target->state != TARGET_HALTED)
2129 {
2130 LOG_WARNING("target not halted");
2131 return ERROR_TARGET_NOT_HALTED;
2132 }
2133
2134 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2135 {
2136 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2137 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2138 }
2139
2140 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2141 {
2142 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2143 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2144 }
2145
2146 if (breakpoint->type == BKPT_HARD)
2147 {
2148 xscale->ibcr_available--;
2149 }
2150
2151 return ERROR_OK;
2152 }
2153
2154 static int xscale_unset_breakpoint(struct target *target,
2155 struct breakpoint *breakpoint)
2156 {
2157 int retval;
2158 struct xscale_common *xscale = target_to_xscale(target);
2159
2160 if (target->state != TARGET_HALTED)
2161 {
2162 LOG_WARNING("target not halted");
2163 return ERROR_TARGET_NOT_HALTED;
2164 }
2165
2166 if (!breakpoint->set)
2167 {
2168 LOG_WARNING("breakpoint not set");
2169 return ERROR_OK;
2170 }
2171
2172 if (breakpoint->type == BKPT_HARD)
2173 {
2174 if (breakpoint->set == 1)
2175 {
2176 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2177 xscale->ibcr0_used = 0;
2178 }
2179 else if (breakpoint->set == 2)
2180 {
2181 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2182 xscale->ibcr1_used = 0;
2183 }
2184 breakpoint->set = 0;
2185 }
2186 else
2187 {
2188 /* restore original instruction (kept in target endianness) */
2189 if (breakpoint->length == 4)
2190 {
2191 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2192 {
2193 return retval;
2194 }
2195 }
2196 else
2197 {
2198 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2199 {
2200 return retval;
2201 }
2202 }
2203 breakpoint->set = 0;
2204 }
2205
2206 return ERROR_OK;
2207 }
2208
2209 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2210 {
2211 struct xscale_common *xscale = target_to_xscale(target);
2212
2213 if (target->state != TARGET_HALTED)
2214 {
2215 LOG_WARNING("target not halted");
2216 return ERROR_TARGET_NOT_HALTED;
2217 }
2218
2219 if (breakpoint->set)
2220 {
2221 xscale_unset_breakpoint(target, breakpoint);
2222 }
2223
2224 if (breakpoint->type == BKPT_HARD)
2225 xscale->ibcr_available++;
2226
2227 return ERROR_OK;
2228 }
2229
2230 static int xscale_set_watchpoint(struct target *target,
2231 struct watchpoint *watchpoint)
2232 {
2233 struct xscale_common *xscale = target_to_xscale(target);
2234 uint8_t enable = 0;
2235 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2236 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2237
2238 if (target->state != TARGET_HALTED)
2239 {
2240 LOG_WARNING("target not halted");
2241 return ERROR_TARGET_NOT_HALTED;
2242 }
2243
2244 xscale_get_reg(dbcon);
2245
2246 switch (watchpoint->rw)
2247 {
2248 case WPT_READ:
2249 enable = 0x3;
2250 break;
2251 case WPT_ACCESS:
2252 enable = 0x2;
2253 break;
2254 case WPT_WRITE:
2255 enable = 0x1;
2256 break;
2257 default:
2258 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2259 }
2260
2261 if (!xscale->dbr0_used)
2262 {
2263 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2264 dbcon_value |= enable;
2265 xscale_set_reg_u32(dbcon, dbcon_value);
2266 watchpoint->set = 1;
2267 xscale->dbr0_used = 1;
2268 }
2269 else if (!xscale->dbr1_used)
2270 {
2271 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2272 dbcon_value |= enable << 2;
2273 xscale_set_reg_u32(dbcon, dbcon_value);
2274 watchpoint->set = 2;
2275 xscale->dbr1_used = 1;
2276 }
2277 else
2278 {
2279 LOG_ERROR("BUG: no hardware comparator available");
2280 return ERROR_OK;
2281 }
2282
2283 return ERROR_OK;
2284 }
2285
2286 static int xscale_add_watchpoint(struct target *target,
2287 struct watchpoint *watchpoint)
2288 {
2289 struct xscale_common *xscale = target_to_xscale(target);
2290
2291 if (target->state != TARGET_HALTED)
2292 {
2293 LOG_WARNING("target not halted");
2294 return ERROR_TARGET_NOT_HALTED;
2295 }
2296
2297 if (xscale->dbr_available < 1)
2298 {
2299 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2300 }
2301
2302 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2303 {
2304 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2305 }
2306
2307 xscale->dbr_available--;
2308
2309 return ERROR_OK;
2310 }
2311
2312 static int xscale_unset_watchpoint(struct target *target,
2313 struct watchpoint *watchpoint)
2314 {
2315 struct xscale_common *xscale = target_to_xscale(target);
2316 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2317 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2318
2319 if (target->state != TARGET_HALTED)
2320 {
2321 LOG_WARNING("target not halted");
2322 return ERROR_TARGET_NOT_HALTED;
2323 }
2324
2325 if (!watchpoint->set)
2326 {
2327 LOG_WARNING("breakpoint not set");
2328 return ERROR_OK;
2329 }
2330
2331 if (watchpoint->set == 1)
2332 {
2333 dbcon_value &= ~0x3;
2334 xscale_set_reg_u32(dbcon, dbcon_value);
2335 xscale->dbr0_used = 0;
2336 }
2337 else if (watchpoint->set == 2)
2338 {
2339 dbcon_value &= ~0xc;
2340 xscale_set_reg_u32(dbcon, dbcon_value);
2341 xscale->dbr1_used = 0;
2342 }
2343 watchpoint->set = 0;
2344
2345 return ERROR_OK;
2346 }
2347
2348 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2349 {
2350 struct xscale_common *xscale = target_to_xscale(target);
2351
2352 if (target->state != TARGET_HALTED)
2353 {
2354 LOG_WARNING("target not halted");
2355 return ERROR_TARGET_NOT_HALTED;
2356 }
2357
2358 if (watchpoint->set)
2359 {
2360 xscale_unset_watchpoint(target, watchpoint);
2361 }
2362
2363 xscale->dbr_available++;
2364
2365 return ERROR_OK;
2366 }
2367
2368 static int xscale_get_reg(struct reg *reg)
2369 {
2370 struct xscale_reg *arch_info = reg->arch_info;
2371 struct target *target = arch_info->target;
2372 struct xscale_common *xscale = target_to_xscale(target);
2373
2374 /* DCSR, TX and RX are accessible via JTAG */
2375 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2376 {
2377 return xscale_read_dcsr(arch_info->target);
2378 }
2379 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2380 {
2381 /* 1 = consume register content */
2382 return xscale_read_tx(arch_info->target, 1);
2383 }
2384 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2385 {
2386 /* can't read from RX register (host -> debug handler) */
2387 return ERROR_OK;
2388 }
2389 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2390 {
2391 /* can't (explicitly) read from TXRXCTRL register */
2392 return ERROR_OK;
2393 }
2394 else /* Other DBG registers have to be transfered by the debug handler */
2395 {
2396 /* send CP read request (command 0x40) */
2397 xscale_send_u32(target, 0x40);
2398
2399 /* send CP register number */
2400 xscale_send_u32(target, arch_info->dbg_handler_number);
2401
2402 /* read register value */
2403 xscale_read_tx(target, 1);
2404 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2405
2406 reg->dirty = 0;
2407 reg->valid = 1;
2408 }
2409
2410 return ERROR_OK;
2411 }
2412
2413 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2414 {
2415 struct xscale_reg *arch_info = reg->arch_info;
2416 struct target *target = arch_info->target;
2417 struct xscale_common *xscale = target_to_xscale(target);
2418 uint32_t value = buf_get_u32(buf, 0, 32);
2419
2420 /* DCSR, TX and RX are accessible via JTAG */
2421 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2422 {
2423 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2424 return xscale_write_dcsr(arch_info->target, -1, -1);
2425 }
2426 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2427 {
2428 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2429 return xscale_write_rx(arch_info->target);
2430 }
2431 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2432 {
2433 /* can't write to TX register (debug-handler -> host) */
2434 return ERROR_OK;
2435 }
2436 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2437 {
2438 /* can't (explicitly) write to TXRXCTRL register */
2439 return ERROR_OK;
2440 }
2441 else /* Other DBG registers have to be transfered by the debug handler */
2442 {
2443 /* send CP write request (command 0x41) */
2444 xscale_send_u32(target, 0x41);
2445
2446 /* send CP register number */
2447 xscale_send_u32(target, arch_info->dbg_handler_number);
2448
2449 /* send CP register value */
2450 xscale_send_u32(target, value);
2451 buf_set_u32(reg->value, 0, 32, value);
2452 }
2453
2454 return ERROR_OK;
2455 }
2456
2457 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2458 {
2459 struct xscale_common *xscale = target_to_xscale(target);
2460 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2461 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2462
2463 /* send CP write request (command 0x41) */
2464 xscale_send_u32(target, 0x41);
2465
2466 /* send CP register number */
2467 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2468
2469 /* send CP register value */
2470 xscale_send_u32(target, value);
2471 buf_set_u32(dcsr->value, 0, 32, value);
2472
2473 return ERROR_OK;
2474 }
2475
2476 static int xscale_read_trace(struct target *target)
2477 {
2478 struct xscale_common *xscale = target_to_xscale(target);
2479 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
2480 struct xscale_trace_data **trace_data_p;
2481
2482 /* 258 words from debug handler
2483 * 256 trace buffer entries
2484 * 2 checkpoint addresses
2485 */
2486 uint32_t trace_buffer[258];
2487 int is_address[256];
2488 int i, j;
2489
2490 if (target->state != TARGET_HALTED)
2491 {
2492 LOG_WARNING("target must be stopped to read trace data");
2493 return ERROR_TARGET_NOT_HALTED;
2494 }
2495
2496 /* send read trace buffer command (command 0x61) */
2497 xscale_send_u32(target, 0x61);
2498
2499 /* receive trace buffer content */
2500 xscale_receive(target, trace_buffer, 258);
2501
2502 /* parse buffer backwards to identify address entries */
2503 for (i = 255; i >= 0; i--)
2504 {
2505 is_address[i] = 0;
2506 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2507 ((trace_buffer[i] & 0xf0) == 0xd0))
2508 {
2509 if (i >= 3)
2510 is_address[--i] = 1;
2511 if (i >= 2)
2512 is_address[--i] = 1;
2513 if (i >= 1)
2514 is_address[--i] = 1;
2515 if (i >= 0)
2516 is_address[--i] = 1;
2517 }
2518 }
2519
2520
2521 /* search first non-zero entry */
2522 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2523 ;
2524
2525 if (j == 256)
2526 {
2527 LOG_DEBUG("no trace data collected");
2528 return ERROR_XSCALE_NO_TRACE_DATA;
2529 }
2530
2531 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2532 ;
2533
2534 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2535 (*trace_data_p)->next = NULL;
2536 (*trace_data_p)->chkpt0 = trace_buffer[256];
2537 (*trace_data_p)->chkpt1 = trace_buffer[257];
2538 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2539 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2540 (*trace_data_p)->depth = 256 - j;
2541
2542 for (i = j; i < 256; i++)
2543 {
2544 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2545 if (is_address[i])
2546 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2547 else
2548 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2549 }
2550
2551 return ERROR_OK;
2552 }
2553
2554 static int xscale_read_instruction(struct target *target,
2555 struct arm_instruction *instruction)
2556 {
2557 struct xscale_common *xscale = target_to_xscale(target);
2558 int i;
2559 int section = -1;
2560 size_t size_read;
2561 uint32_t opcode;
2562 int retval;
2563
2564 if (!xscale->trace.image)
2565 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2566
2567 /* search for the section the current instruction belongs to */
2568 for (i = 0; i < xscale->trace.image->num_sections; i++)
2569 {
2570 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2571 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2572 {
2573 section = i;
2574 break;
2575 }
2576 }
2577
2578 if (section == -1)
2579 {
2580 /* current instruction couldn't be found in the image */
2581 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2582 }
2583
2584 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2585 {
2586 uint8_t buf[4];
2587 if ((retval = image_read_section(xscale->trace.image, section,
2588 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2589 4, buf, &size_read)) != ERROR_OK)
2590 {
2591 LOG_ERROR("error while reading instruction: %i", retval);
2592 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2593 }
2594 opcode = target_buffer_get_u32(target, buf);
2595 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2596 }
2597 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2598 {
2599 uint8_t buf[2];
2600 if ((retval = image_read_section(xscale->trace.image, section,
2601 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2602 2, buf, &size_read)) != ERROR_OK)
2603 {
2604 LOG_ERROR("error while reading instruction: %i", retval);
2605 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2606 }
2607 opcode = target_buffer_get_u16(target, buf);
2608 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2609 }
2610 else
2611 {
2612 LOG_ERROR("BUG: unknown core state encountered");
2613 exit(-1);
2614 }
2615
2616 return ERROR_OK;
2617 }
2618
2619 static int xscale_branch_address(struct xscale_trace_data *trace_data,
2620 int i, uint32_t *target)
2621 {
2622 /* if there are less than four entries prior to the indirect branch message
2623 * we can't extract the address */
2624 if (i < 4)
2625 {
2626 return -1;
2627 }
2628
2629 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2630 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2631
2632 return 0;
2633 }
2634
2635 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2636 {
2637 struct xscale_common *xscale = target_to_xscale(target);
2638 int next_pc_ok = 0;
2639 uint32_t next_pc = 0x0;
2640 struct xscale_trace_data *trace_data = xscale->trace.data;
2641 int retval;
2642
2643 while (trace_data)
2644 {
2645 int i, chkpt;
2646 int rollover;
2647 int branch;
2648 int exception;
2649 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2650
2651 chkpt = 0;
2652 rollover = 0;
2653
2654 for (i = 0; i < trace_data->depth; i++)
2655 {
2656 next_pc_ok = 0;
2657 branch = 0;
2658 exception = 0;
2659
2660 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2661 continue;
2662
2663 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2664 {
2665 case 0: /* Exceptions */
2666 case 1:
2667 case 2:
2668 case 3:
2669 case 4:
2670 case 5:
2671 case 6:
2672 case 7:
2673 exception = (trace_data->entries[i].data & 0x70) >> 4;
2674 next_pc_ok = 1;
2675 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2676 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2677 break;
2678 case 8: /* Direct Branch */
2679 branch = 1;
2680 break;
2681 case 9: /* Indirect Branch */
2682 branch = 1;
2683 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2684 {
2685 next_pc_ok = 1;
2686 }
2687 break;
2688 case 13: /* Checkpointed Indirect Branch */
2689 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2690 {
2691 next_pc_ok = 1;
2692 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2693 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2694 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2695 }
2696 /* explicit fall-through */
2697 case 12: /* Checkpointed Direct Branch */
2698 branch = 1;
2699 if (chkpt == 0)
2700 {
2701 next_pc_ok = 1;
2702 next_pc = trace_data->chkpt0;
2703 chkpt++;
2704 }
2705 else if (chkpt == 1)
2706 {
2707 next_pc_ok = 1;
2708 next_pc = trace_data->chkpt0;
2709 chkpt++;
2710 }
2711 else
2712 {
2713 LOG_WARNING("more than two checkpointed branches encountered");
2714 }
2715 break;
2716 case 15: /* Roll-over */
2717 rollover++;
2718 continue;
2719 default: /* Reserved */
2720 command_print(cmd_ctx, "--- reserved trace message ---");
2721 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2722 return ERROR_OK;
2723 }
2724
2725 if (xscale->trace.pc_ok)
2726 {
2727 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2728 struct arm_instruction instruction;
2729
2730 if ((exception == 6) || (exception == 7))
2731 {
2732 /* IRQ or FIQ exception, no instruction executed */
2733 executed -= 1;
2734 }
2735
2736 while (executed-- >= 0)
2737 {
2738 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2739 {
2740 /* can't continue tracing with no image available */
2741 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2742 {
2743 return retval;
2744 }
2745 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2746 {
2747 /* TODO: handle incomplete images */
2748 }
2749 }
2750
2751 /* a precise abort on a load to the PC is included in the incremental
2752 * word count, other instructions causing data aborts are not included
2753 */
2754 if ((executed == 0) && (exception == 4)
2755 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2756 {
2757 if ((instruction.type == ARM_LDM)
2758 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2759 {
2760 executed--;
2761 }
2762 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2763 && (instruction.info.load_store.Rd != 15))
2764 {
2765 executed--;
2766 }
2767 }
2768
2769 /* only the last instruction executed
2770 * (the one that caused the control flow change)
2771 * could be a taken branch
2772 */
2773 if (((executed == -1) && (branch == 1)) &&
2774 (((instruction.type == ARM_B) ||
2775 (instruction.type == ARM_BL) ||
2776 (instruction.type == ARM_BLX)) &&
2777 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2778 {
2779 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2780 }
2781 else
2782 {
2783 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2784 }
2785 command_print(cmd_ctx, "%s", instruction.text);
2786 }
2787
2788 rollover = 0;
2789 }
2790
2791 if (next_pc_ok)
2792 {
2793 xscale->trace.current_pc = next_pc;
2794 xscale->trace.pc_ok = 1;
2795 }
2796 }
2797
2798 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2799 {
2800 struct arm_instruction instruction;
2801 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2802 {
2803 /* can't continue tracing with no image available */
2804 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2805 {
2806 return retval;
2807 }
2808 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2809 {
2810 /* TODO: handle incomplete images */
2811 }
2812 }
2813 command_print(cmd_ctx, "%s", instruction.text);
2814 }
2815
2816 trace_data = trace_data->next;
2817 }
2818
2819 return ERROR_OK;
2820 }
2821
2822 static const struct reg_arch_type xscale_reg_type = {
2823 .get = xscale_get_reg,
2824 .set = xscale_set_reg,
2825 };
2826
2827 static void xscale_build_reg_cache(struct target *target)
2828 {
2829 struct xscale_common *xscale = target_to_xscale(target);
2830 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
2831 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2832 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2833 int i;
2834 int num_regs = sizeof(xscale_reg_arch_info) / sizeof(struct xscale_reg);
2835
2836 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
2837
2838 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2839 cache_p = &(*cache_p)->next;
2840
2841 /* fill in values for the xscale reg cache */
2842 (*cache_p)->name = "XScale registers";
2843 (*cache_p)->next = NULL;
2844 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2845 (*cache_p)->num_regs = num_regs;
2846
2847 for (i = 0; i < num_regs; i++)
2848 {
2849 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2850 (*cache_p)->reg_list[i].value = calloc(4, 1);
2851 (*cache_p)->reg_list[i].dirty = 0;
2852 (*cache_p)->reg_list[i].valid = 0;
2853 (*cache_p)->reg_list[i].size = 32;
2854 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2855 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2856 arch_info[i] = xscale_reg_arch_info[i];
2857 arch_info[i].target = target;
2858 }
2859
2860 xscale->reg_cache = (*cache_p);
2861 }
2862
2863 static int xscale_init_target(struct command_context *cmd_ctx,
2864 struct target *target)
2865 {
2866 xscale_build_reg_cache(target);
2867 return ERROR_OK;
2868 }
2869
2870 static int xscale_init_arch_info(struct target *target,
2871 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2872 {
2873 struct arm *armv4_5;
2874 uint32_t high_reset_branch, low_reset_branch;
2875 int i;
2876
2877 armv4_5 = &xscale->armv4_5_common;
2878
2879 /* store architecture specfic data (none so far) */
2880 xscale->common_magic = XSCALE_COMMON_MAGIC;
2881
2882 /* we don't really *need* variant info ... */
2883 if (variant) {
2884 int ir_length = 0;
2885
2886 if (strcmp(variant, "pxa250") == 0
2887 || strcmp(variant, "pxa255") == 0
2888 || strcmp(variant, "pxa26x") == 0)
2889 ir_length = 5;
2890 else if (strcmp(variant, "pxa27x") == 0
2891 || strcmp(variant, "ixp42x") == 0
2892 || strcmp(variant, "ixp45x") == 0
2893 || strcmp(variant, "ixp46x") == 0)
2894 ir_length = 7;
2895 else
2896 LOG_WARNING("%s: unrecognized variant %s",
2897 tap->dotted_name, variant);
2898
2899 if (ir_length && ir_length != tap->ir_length) {
2900 LOG_WARNING("%s: IR length for %s is %d; fixing",
2901 tap->dotted_name, variant, ir_length);
2902 tap->ir_length = ir_length;
2903 }
2904 }
2905
2906 /* the debug handler isn't installed (and thus not running) at this time */
2907 xscale->handler_address = 0xfe000800;
2908
2909 /* clear the vectors we keep locally for reference */
2910 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2911 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2912
2913 /* no user-specified vectors have been configured yet */
2914 xscale->static_low_vectors_set = 0x0;
2915 xscale->static_high_vectors_set = 0x0;
2916
2917 /* calculate branches to debug handler */
2918 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2919 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2920
2921 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2922 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2923
2924 for (i = 1; i <= 7; i++)
2925 {
2926 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2927 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2928 }
2929
2930 /* 64kB aligned region used for DCache cleaning */
2931 xscale->cache_clean_address = 0xfffe0000;
2932
2933 xscale->hold_rst = 0;
2934 xscale->external_debug_break = 0;
2935
2936 xscale->ibcr_available = 2;
2937 xscale->ibcr0_used = 0;
2938 xscale->ibcr1_used = 0;
2939
2940 xscale->dbr_available = 2;
2941 xscale->dbr0_used = 0;
2942 xscale->dbr1_used = 0;
2943
2944 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2945 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2946
2947 xscale->vector_catch = 0x1;
2948
2949 xscale->trace.capture_status = TRACE_IDLE;
2950 xscale->trace.data = NULL;
2951 xscale->trace.image = NULL;
2952 xscale->trace.buffer_enabled = 0;
2953 xscale->trace.buffer_fill = 0;
2954
2955 /* prepare ARMv4/5 specific information */
2956 armv4_5->arch_info = xscale;
2957 armv4_5->read_core_reg = xscale_read_core_reg;
2958 armv4_5->write_core_reg = xscale_write_core_reg;
2959 armv4_5->full_context = xscale_full_context;
2960
2961 armv4_5_init_arch_info(target, armv4_5);
2962
2963 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
2964 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
2965 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
2966 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
2967 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
2968 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
2969 xscale->armv4_5_mmu.has_tiny_pages = 1;
2970 xscale->armv4_5_mmu.mmu_enabled = 0;
2971
2972 return ERROR_OK;
2973 }
2974
2975 static int xscale_target_create(struct target *target, Jim_Interp *interp)
2976 {
2977 struct xscale_common *xscale;
2978
2979 if (sizeof xscale_debug_handler - 1 > 0x800) {
2980 LOG_ERROR("debug_handler.bin: larger than 2kb");
2981 return ERROR_FAIL;
2982 }
2983
2984 xscale = calloc(1, sizeof(*xscale));
2985 if (!xscale)
2986 return ERROR_FAIL;
2987
2988 return xscale_init_arch_info(target, xscale, target->tap,
2989 target->variant);
2990 }
2991
2992 COMMAND_HANDLER(xscale_handle_debug_handler_command)
2993 {
2994 struct target *target = NULL;
2995 struct xscale_common *xscale;
2996 int retval;
2997 uint32_t handler_address;
2998
2999 if (CMD_ARGC < 2)
3000 {
3001 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3002 return ERROR_OK;
3003 }
3004
3005 if ((target = get_target(CMD_ARGV[0])) == NULL)
3006 {
3007 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3008 return ERROR_FAIL;
3009 }
3010
3011 xscale = target_to_xscale(target);
3012 retval = xscale_verify_pointer(CMD_CTX, xscale);
3013 if (retval != ERROR_OK)
3014 return retval;
3015
3016 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3017
3018 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3019 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3020 {
3021 xscale->handler_address = handler_address;
3022 }
3023 else
3024 {
3025 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3026 return ERROR_FAIL;
3027 }
3028
3029 return ERROR_OK;
3030 }
3031
3032 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3033 {
3034 struct target *target = NULL;
3035 struct xscale_common *xscale;
3036 int retval;
3037 uint32_t cache_clean_address;
3038
3039 if (CMD_ARGC < 2)
3040 {
3041 return ERROR_COMMAND_SYNTAX_ERROR;
3042 }
3043
3044 target = get_target(CMD_ARGV[0]);
3045 if (target == NULL)
3046 {
3047 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3048 return ERROR_FAIL;
3049 }
3050 xscale = target_to_xscale(target);
3051 retval = xscale_verify_pointer(CMD_CTX, xscale);
3052 if (retval != ERROR_OK)
3053 return retval;
3054
3055 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3056
3057 if (cache_clean_address & 0xffff)
3058 {
3059 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3060 }
3061 else
3062 {
3063 xscale->cache_clean_address = cache_clean_address;
3064 }
3065
3066 return ERROR_OK;
3067 }
3068
3069 COMMAND_HANDLER(xscale_handle_cache_info_command)
3070 {
3071 struct target *target = get_current_target(CMD_CTX);
3072 struct xscale_common *xscale = target_to_xscale(target);
3073 int retval;
3074
3075 retval = xscale_verify_pointer(CMD_CTX, xscale);
3076 if (retval != ERROR_OK)
3077 return retval;
3078
3079 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3080 }
3081
3082 static int xscale_virt2phys(struct target *target,
3083 uint32_t virtual, uint32_t *physical)
3084 {
3085 struct xscale_common *xscale = target_to_xscale(target);
3086 int type;
3087 uint32_t cb;
3088 int domain;
3089 uint32_t ap;
3090
3091 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3092 LOG_ERROR(xscale_not);
3093 return ERROR_TARGET_INVALID;
3094 }
3095
3096 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3097 if (type == -1)
3098 {
3099 return ret;
3100 }
3101 *physical = ret;
3102 return ERROR_OK;
3103 }
3104
3105 static int xscale_mmu(struct target *target, int *enabled)
3106 {
3107 struct xscale_common *xscale = target_to_xscale(target);
3108
3109 if (target->state != TARGET_HALTED)
3110 {
3111 LOG_ERROR("Target not halted");
3112 return ERROR_TARGET_INVALID;
3113 }
3114 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3115 return ERROR_OK;
3116 }
3117
3118 COMMAND_HANDLER(xscale_handle_mmu_command)
3119 {
3120 struct target *target = get_current_target(CMD_CTX);
3121 struct xscale_common *xscale = target_to_xscale(target);
3122 int retval;
3123
3124 retval = xscale_verify_pointer(CMD_CTX, xscale);
3125 if (retval != ERROR_OK)
3126 return retval;
3127
3128 if (target->state != TARGET_HALTED)
3129 {
3130 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3131 return ERROR_OK;
3132 }
3133
3134 if (CMD_ARGC >= 1)
3135 {
3136 bool enable;
3137 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3138 if (enable)
3139 xscale_enable_mmu_caches(target, 1, 0, 0);
3140 else
3141 xscale_disable_mmu_caches(target, 1, 0, 0);
3142 xscale->armv4_5_mmu.mmu_enabled = enable;
3143 }
3144
3145 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3146
3147 return ERROR_OK;
3148 }
3149
3150 COMMAND_HANDLER(xscale_handle_idcache_command)
3151 {
3152 struct target *target = get_current_target(CMD_CTX);
3153 struct xscale_common *xscale = target_to_xscale(target);
3154
3155 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3156 if (retval != ERROR_OK)
3157 return retval;
3158
3159 if (target->state != TARGET_HALTED)
3160 {
3161 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3162 return ERROR_OK;
3163 }
3164
3165 bool icache;
3166 COMMAND_PARSE_BOOL(CMD_NAME, icache, "icache", "dcache");
3167
3168 if (CMD_ARGC >= 1)
3169 {
3170 bool enable;
3171 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3172 if (enable)
3173 xscale_enable_mmu_caches(target, 1, 0, 0);
3174 else
3175 xscale_disable_mmu_caches(target, 1, 0, 0);
3176 if (icache)
3177 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3178 else
3179 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3180 }
3181
3182 bool enabled = icache ?
3183 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3184 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3185 const char *msg = enabled ? "enabled" : "disabled";
3186 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3187
3188 return ERROR_OK;
3189 }
3190
3191 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3192 {
3193 struct target *target = get_current_target(CMD_CTX);
3194 struct xscale_common *xscale = target_to_xscale(target);
3195 int retval;
3196
3197 retval = xscale_verify_pointer(CMD_CTX, xscale);
3198 if (retval != ERROR_OK)
3199 return retval;
3200
3201 if (CMD_ARGC < 1)
3202 {
3203 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3204 }
3205 else
3206 {
3207 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3208 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3209 xscale_write_dcsr(target, -1, -1);
3210 }
3211
3212 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3213
3214 return ERROR_OK;
3215 }
3216
3217
3218 COMMAND_HANDLER(xscale_handle_vector_table_command)
3219 {
3220 struct target *target = get_current_target(CMD_CTX);
3221 struct xscale_common *xscale = target_to_xscale(target);
3222 int err = 0;
3223 int retval;
3224
3225 retval = xscale_verify_pointer(CMD_CTX, xscale);
3226 if (retval != ERROR_OK)
3227 return retval;
3228
3229 if (CMD_ARGC == 0) /* print current settings */
3230 {
3231 int idx;
3232
3233 command_print(CMD_CTX, "active user-set static vectors:");
3234 for (idx = 1; idx < 8; idx++)
3235 if (xscale->static_low_vectors_set & (1 << idx))
3236 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3237 for (idx = 1; idx < 8; idx++)
3238 if (xscale->static_high_vectors_set & (1 << idx))
3239 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3240 return ERROR_OK;
3241 }
3242
3243 if (CMD_ARGC != 3)
3244 err = 1;
3245 else
3246 {
3247 int idx;
3248 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3249 uint32_t vec;
3250 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3251
3252 if (idx < 1 || idx >= 8)
3253 err = 1;
3254
3255 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3256 {
3257 xscale->static_low_vectors_set |= (1<<idx);
3258 xscale->static_low_vectors[idx] = vec;
3259 }
3260 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3261 {
3262 xscale->static_high_vectors_set |= (1<<idx);
3263 xscale->static_high_vectors[idx] = vec;
3264 }
3265 else
3266 err = 1;
3267 }
3268
3269 if (err)
3270 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3271
3272 return ERROR_OK;
3273 }
3274
3275
3276 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3277 {
3278 struct target *target = get_current_target(CMD_CTX);
3279 struct xscale_common *xscale = target_to_xscale(target);
3280 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
3281 uint32_t dcsr_value;
3282 int retval;
3283
3284 retval = xscale_verify_pointer(CMD_CTX, xscale);
3285 if (retval != ERROR_OK)
3286 return retval;
3287
3288 if (target->state != TARGET_HALTED)
3289 {
3290 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3291 return ERROR_OK;
3292 }
3293
3294 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3295 {
3296 struct xscale_trace_data *td, *next_td;
3297 xscale->trace.buffer_enabled = 1;
3298
3299 /* free old trace data */
3300 td = xscale->trace.data;
3301 while (td)
3302 {
3303 next_td = td->next;
3304
3305 if (td->entries)
3306 free(td->entries);
3307 free(td);
3308 td = next_td;
3309 }
3310 xscale->trace.data = NULL;
3311 }
3312 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3313 {
3314 xscale->trace.buffer_enabled = 0;
3315 }
3316
3317 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3318 {
3319 uint32_t fill = 1;
3320 if (CMD_ARGC >= 3)
3321 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3322 xscale->trace.buffer_fill = fill;
3323 }
3324 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3325 {
3326 xscale->trace.buffer_fill = -1;
3327 }
3328
3329 if (xscale->trace.buffer_enabled)
3330 {
3331 /* if we enable the trace buffer in fill-once
3332 * mode we know the address of the first instruction */
3333 xscale->trace.pc_ok = 1;
3334 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3335 }
3336 else
3337 {
3338 /* otherwise the address is unknown, and we have no known good PC */
3339 xscale->trace.pc_ok = 0;
3340 }
3341
3342 command_print(CMD_CTX, "trace buffer %s (%s)",
3343 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3344 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3345
3346 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3347 if (xscale->trace.buffer_fill >= 0)
3348 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3349 else
3350 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3351
3352 return ERROR_OK;
3353 }
3354
3355 COMMAND_HANDLER(xscale_handle_trace_image_command)
3356 {
3357 struct target *target = get_current_target(CMD_CTX);
3358 struct xscale_common *xscale = target_to_xscale(target);
3359 int retval;
3360
3361 if (CMD_ARGC < 1)
3362 {
3363 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3364 return ERROR_OK;
3365 }
3366
3367 retval = xscale_verify_pointer(CMD_CTX, xscale);
3368 if (retval != ERROR_OK)
3369 return retval;
3370
3371 if (xscale->trace.image)
3372 {
3373 image_close(xscale->trace.image);
3374 free(xscale->trace.image);
3375 command_print(CMD_CTX, "previously loaded image found and closed");
3376 }
3377
3378 xscale->trace.image = malloc(sizeof(struct image));
3379 xscale->trace.image->base_address_set = 0;
3380 xscale->trace.image->start_address_set = 0;
3381
3382 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3383 if (CMD_ARGC >= 2)
3384 {
3385 xscale->trace.image->base_address_set = 1;
3386 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], xscale->trace.image->base_address);
3387 }
3388 else
3389 {
3390 xscale->trace.image->base_address_set = 0;
3391 }
3392
3393 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3394 {
3395 free(xscale->trace.image);
3396 xscale->trace.image = NULL;
3397 return ERROR_OK;
3398 }
3399
3400 return ERROR_OK;
3401 }
3402
3403 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3404 {
3405 struct target *target = get_current_target(CMD_CTX);
3406 struct xscale_common *xscale = target_to_xscale(target);
3407 struct xscale_trace_data *trace_data;
3408 struct fileio file;
3409 int retval;
3410
3411 retval = xscale_verify_pointer(CMD_CTX, xscale);
3412 if (retval != ERROR_OK)
3413 return retval;
3414
3415 if (target->state != TARGET_HALTED)
3416 {
3417 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3418 return ERROR_OK;
3419 }
3420
3421 if (CMD_ARGC < 1)
3422 {
3423 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3424 return ERROR_OK;
3425 }
3426
3427 trace_data = xscale->trace.data;
3428
3429 if (!trace_data)
3430 {
3431 command_print(CMD_CTX, "no trace data collected");
3432 return ERROR_OK;
3433 }
3434
3435 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3436 {
3437 return ERROR_OK;
3438 }
3439
3440 while (trace_data)
3441 {
3442 int i;
3443
3444 fileio_write_u32(&file, trace_data->chkpt0);
3445 fileio_write_u32(&file, trace_data->chkpt1);
3446 fileio_write_u32(&file, trace_data->last_instruction);
3447 fileio_write_u32(&file, trace_data->depth);
3448
3449 for (i = 0; i < trace_data->depth; i++)
3450 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3451
3452 trace_data = trace_data->next;
3453 }
3454
3455 fileio_close(&file);
3456
3457 return ERROR_OK;
3458 }
3459
3460 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3461 {
3462 struct target *target = get_current_target(CMD_CTX);
3463 struct xscale_common *xscale = target_to_xscale(target);
3464 int retval;
3465
3466 retval = xscale_verify_pointer(CMD_CTX, xscale);
3467 if (retval != ERROR_OK)
3468 return retval;
3469
3470 xscale_analyze_trace(target, CMD_CTX);
3471
3472 return ERROR_OK;
3473 }
3474
3475 COMMAND_HANDLER(xscale_handle_cp15)
3476 {
3477 struct target *target = get_current_target(CMD_CTX);
3478 struct xscale_common *xscale = target_to_xscale(target);
3479 int retval;
3480
3481 retval = xscale_verify_pointer(CMD_CTX, xscale);
3482 if (retval != ERROR_OK)
3483 return retval;
3484
3485 if (target->state != TARGET_HALTED)
3486 {
3487 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3488 return ERROR_OK;
3489 }
3490 uint32_t reg_no = 0;
3491 struct reg *reg = NULL;
3492 if (CMD_ARGC > 0)
3493 {
3494 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3495 /*translate from xscale cp15 register no to openocd register*/
3496 switch (reg_no)
3497 {
3498 case 0:
3499 reg_no = XSCALE_MAINID;
3500 break;
3501 case 1:
3502 reg_no = XSCALE_CTRL;
3503 break;
3504 case 2:
3505 reg_no = XSCALE_TTB;
3506 break;
3507 case 3:
3508 reg_no = XSCALE_DAC;
3509 break;
3510 case 5:
3511 reg_no = XSCALE_FSR;
3512 break;
3513 case 6:
3514 reg_no = XSCALE_FAR;
3515 break;
3516 case 13:
3517 reg_no = XSCALE_PID;
3518 break;
3519 case 15:
3520 reg_no = XSCALE_CPACCESS;
3521 break;
3522 default:
3523 command_print(CMD_CTX, "invalid register number");
3524 return ERROR_INVALID_ARGUMENTS;
3525 }
3526 reg = &xscale->reg_cache->reg_list[reg_no];
3527
3528 }
3529 if (CMD_ARGC == 1)
3530 {
3531 uint32_t value;
3532
3533 /* read cp15 control register */
3534 xscale_get_reg(reg);
3535 value = buf_get_u32(reg->value, 0, 32);
3536 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3537 }
3538 else if (CMD_ARGC == 2)
3539 {
3540 uint32_t value;
3541 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3542
3543 /* send CP write request (command 0x41) */
3544 xscale_send_u32(target, 0x41);
3545
3546 /* send CP register number */
3547 xscale_send_u32(target, reg_no);
3548
3549 /* send CP register value */
3550 xscale_send_u32(target, value);
3551
3552 /* execute cpwait to ensure outstanding operations complete */
3553 xscale_send_u32(target, 0x53);
3554 }
3555 else
3556 {
3557 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3558 }
3559
3560 return ERROR_OK;
3561 }
3562
3563 static int xscale_register_commands(struct command_context *cmd_ctx)
3564 {
3565 struct command *xscale_cmd;
3566
3567 xscale_cmd = register_command(cmd_ctx, NULL, "xscale", NULL, COMMAND_ANY, "xscale specific commands");
3568
3569 register_command(cmd_ctx, xscale_cmd, "debug_handler", xscale_handle_debug_handler_command, COMMAND_ANY, "'xscale debug_handler <target#> <address>' command takes two required operands");
3570 register_command(cmd_ctx, xscale_cmd, "cache_clean_address", xscale_handle_cache_clean_address_command, COMMAND_ANY, NULL);
3571
3572 register_command(cmd_ctx, xscale_cmd, "cache_info", xscale_handle_cache_info_command, COMMAND_EXEC, NULL);
3573 register_command(cmd_ctx, xscale_cmd, "mmu", xscale_handle_mmu_command, COMMAND_EXEC, "['enable'|'disable'] the MMU");
3574 register_command(cmd_ctx, xscale_cmd, "icache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the ICache");
3575 register_command(cmd_ctx, xscale_cmd, "dcache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the DCache");
3576
3577 register_command(cmd_ctx, xscale_cmd, "vector_catch", xscale_handle_vector_catch_command, COMMAND_EXEC, "<mask> of vectors that should be catched");
3578 register_command(cmd_ctx, xscale_cmd, "vector_table", xscale_handle_vector_table_command, COMMAND_EXEC, "<high|low> <index> <code> set static code for exception handler entry");
3579
3580 register_command(cmd_ctx, xscale_cmd, "trace_buffer", xscale_handle_trace_buffer_command, COMMAND_EXEC, "<enable | disable> ['fill' [n]|'wrap']");
3581
3582 register_command(cmd_ctx, xscale_cmd, "dump_trace", xscale_handle_dump_trace_command, COMMAND_EXEC, "dump content of trace buffer to <file>");
3583 register_command(cmd_ctx, xscale_cmd, "analyze_trace", xscale_handle_analyze_trace_buffer_command, COMMAND_EXEC, "analyze content of trace buffer");
3584 register_command(cmd_ctx, xscale_cmd, "trace_image", xscale_handle_trace_image_command,
3585 COMMAND_EXEC, "load image from <file> [base address]");
3586
3587 register_command(cmd_ctx, xscale_cmd, "cp15", xscale_handle_cp15, COMMAND_EXEC, "access coproc 15 <register> [value]");
3588
3589 armv4_5_register_commands(cmd_ctx);
3590
3591 return ERROR_OK;
3592 }
3593
3594 struct target_type xscale_target =
3595 {
3596 .name = "xscale",
3597
3598 .poll = xscale_poll,
3599 .arch_state = xscale_arch_state,
3600
3601 .target_request_data = NULL,
3602
3603 .halt = xscale_halt,
3604 .resume = xscale_resume,
3605 .step = xscale_step,
3606
3607 .assert_reset = xscale_assert_reset,
3608 .deassert_reset = xscale_deassert_reset,
3609 .soft_reset_halt = NULL,
3610
3611 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
3612
3613 .read_memory = xscale_read_memory,
3614 .write_memory = xscale_write_memory,
3615 .bulk_write_memory = xscale_bulk_write_memory,
3616
3617 .checksum_memory = arm_checksum_memory,
3618 .blank_check_memory = arm_blank_check_memory,
3619
3620 .run_algorithm = armv4_5_run_algorithm,
3621
3622 .add_breakpoint = xscale_add_breakpoint,
3623 .remove_breakpoint = xscale_remove_breakpoint,
3624 .add_watchpoint = xscale_add_watchpoint,
3625 .remove_watchpoint = xscale_remove_watchpoint,
3626
3627 .register_commands = xscale_register_commands,
3628 .target_create = xscale_target_create,
3629 .init_target = xscale_init_target,
3630
3631 .virt2phys = xscale_virt2phys,
3632 .mmu = xscale_mmu
3633 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)