ARM: rename some generic routines
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include <helper/time_support.h>
37 #include "register.h"
38 #include "image.h"
39 #include "arm_opcodes.h"
40
41
42 /*
43 * Important XScale documents available as of October 2009 include:
44 *
45 * Intel XScale® Core Developer’s Manual, January 2004
46 * Order Number: 273473-002
47 * This has a chapter detailing debug facilities, and punts some
48 * details to chip-specific microarchitecture documents.
49 *
50 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
51 * Document Number: 273539-005
52 * Less detailed than the developer's manual, but summarizes those
53 * missing details (for most XScales) and gives LOTS of notes about
54 * debugger/handler interaction issues. Presents a simpler reset
55 * and load-handler sequence than the arch doc. (Note, OpenOCD
56 * doesn't currently support "Hot-Debug" as defined there.)
57 *
58 * Chip-specific microarchitecture documents may also be useful.
59 */
60
61
62 /* forward declarations */
63 static int xscale_resume(struct target *, int current,
64 uint32_t address, int handle_breakpoints, int debug_execution);
65 static int xscale_debug_entry(struct target *);
66 static int xscale_restore_banked(struct target *);
67 static int xscale_get_reg(struct reg *reg);
68 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
69 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
70 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
71 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
72 static int xscale_read_trace(struct target *);
73
74
75 /* This XScale "debug handler" is loaded into the processor's
76 * mini-ICache, which is 2K of code writable only via JTAG.
77 *
78 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
79 * binary files cleanly. It's string oriented, and terminates them
80 * with a NUL character. Better would be to generate the constants
81 * and let other code decide names, scoping, and other housekeeping.
82 */
83 static /* unsigned const char xscale_debug_handler[] = ... */
84 #include "xscale_debug.h"
85
86 static char *const xscale_reg_list[] =
87 {
88 "XSCALE_MAINID", /* 0 */
89 "XSCALE_CACHETYPE",
90 "XSCALE_CTRL",
91 "XSCALE_AUXCTRL",
92 "XSCALE_TTB",
93 "XSCALE_DAC",
94 "XSCALE_FSR",
95 "XSCALE_FAR",
96 "XSCALE_PID",
97 "XSCALE_CPACCESS",
98 "XSCALE_IBCR0", /* 10 */
99 "XSCALE_IBCR1",
100 "XSCALE_DBR0",
101 "XSCALE_DBR1",
102 "XSCALE_DBCON",
103 "XSCALE_TBREG",
104 "XSCALE_CHKPT0",
105 "XSCALE_CHKPT1",
106 "XSCALE_DCSR",
107 "XSCALE_TX",
108 "XSCALE_RX", /* 20 */
109 "XSCALE_TXRXCTRL",
110 };
111
112 static const struct xscale_reg xscale_reg_arch_info[] =
113 {
114 {XSCALE_MAINID, NULL},
115 {XSCALE_CACHETYPE, NULL},
116 {XSCALE_CTRL, NULL},
117 {XSCALE_AUXCTRL, NULL},
118 {XSCALE_TTB, NULL},
119 {XSCALE_DAC, NULL},
120 {XSCALE_FSR, NULL},
121 {XSCALE_FAR, NULL},
122 {XSCALE_PID, NULL},
123 {XSCALE_CPACCESS, NULL},
124 {XSCALE_IBCR0, NULL},
125 {XSCALE_IBCR1, NULL},
126 {XSCALE_DBR0, NULL},
127 {XSCALE_DBR1, NULL},
128 {XSCALE_DBCON, NULL},
129 {XSCALE_TBREG, NULL},
130 {XSCALE_CHKPT0, NULL},
131 {XSCALE_CHKPT1, NULL},
132 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
133 {-1, NULL}, /* TX accessed via JTAG */
134 {-1, NULL}, /* RX accessed via JTAG */
135 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
136 };
137
138 /* convenience wrapper to access XScale specific registers */
139 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
140 {
141 uint8_t buf[4];
142
143 buf_set_u32(buf, 0, 32, value);
144
145 return xscale_set_reg(reg, buf);
146 }
147
148 static const char xscale_not[] = "target is not an XScale";
149
150 static int xscale_verify_pointer(struct command_context *cmd_ctx,
151 struct xscale_common *xscale)
152 {
153 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
154 command_print(cmd_ctx, xscale_not);
155 return ERROR_TARGET_INVALID;
156 }
157 return ERROR_OK;
158 }
159
160 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr)
161 {
162 if (tap == NULL)
163 return ERROR_FAIL;
164
165 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
166 {
167 struct scan_field field;
168 uint8_t scratch[4];
169
170 memset(&field, 0, sizeof field);
171 field.tap = tap;
172 field.num_bits = tap->ir_length;
173 field.out_value = scratch;
174 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
175
176 jtag_add_ir_scan(1, &field, jtag_get_end_state());
177 }
178
179 return ERROR_OK;
180 }
181
182 static int xscale_read_dcsr(struct target *target)
183 {
184 struct xscale_common *xscale = target_to_xscale(target);
185 int retval;
186 struct scan_field fields[3];
187 uint8_t field0 = 0x0;
188 uint8_t field0_check_value = 0x2;
189 uint8_t field0_check_mask = 0x7;
190 uint8_t field2 = 0x0;
191 uint8_t field2_check_value = 0x0;
192 uint8_t field2_check_mask = 0x1;
193
194 jtag_set_end_state(TAP_DRPAUSE);
195 xscale_jtag_set_instr(target->tap,
196 XSCALE_SELDCSR << xscale->xscale_variant);
197
198 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
199 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
200
201 memset(&fields, 0, sizeof fields);
202
203 fields[0].tap = target->tap;
204 fields[0].num_bits = 3;
205 fields[0].out_value = &field0;
206 uint8_t tmp;
207 fields[0].in_value = &tmp;
208
209 fields[1].tap = target->tap;
210 fields[1].num_bits = 32;
211 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
212
213 fields[2].tap = target->tap;
214 fields[2].num_bits = 1;
215 fields[2].out_value = &field2;
216 uint8_t tmp2;
217 fields[2].in_value = &tmp2;
218
219 jtag_add_dr_scan(3, fields, jtag_get_end_state());
220
221 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
222 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
223
224 if ((retval = jtag_execute_queue()) != ERROR_OK)
225 {
226 LOG_ERROR("JTAG error while reading DCSR");
227 return retval;
228 }
229
230 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
231 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
232
233 /* write the register with the value we just read
234 * on this second pass, only the first bit of field0 is guaranteed to be 0)
235 */
236 field0_check_mask = 0x1;
237 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
238 fields[1].in_value = NULL;
239
240 jtag_set_end_state(TAP_IDLE);
241
242 jtag_add_dr_scan(3, fields, jtag_get_end_state());
243
244 /* DANGER!!! this must be here. It will make sure that the arguments
245 * to jtag_set_check_value() does not go out of scope! */
246 return jtag_execute_queue();
247 }
248
249
250 static void xscale_getbuf(jtag_callback_data_t arg)
251 {
252 uint8_t *in = (uint8_t *)arg;
253 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
254 }
255
256 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
257 {
258 if (num_words == 0)
259 return ERROR_INVALID_ARGUMENTS;
260
261 struct xscale_common *xscale = target_to_xscale(target);
262 int retval = ERROR_OK;
263 tap_state_t path[3];
264 struct scan_field fields[3];
265 uint8_t *field0 = malloc(num_words * 1);
266 uint8_t field0_check_value = 0x2;
267 uint8_t field0_check_mask = 0x6;
268 uint32_t *field1 = malloc(num_words * 4);
269 uint8_t field2_check_value = 0x0;
270 uint8_t field2_check_mask = 0x1;
271 int words_done = 0;
272 int words_scheduled = 0;
273 int i;
274
275 path[0] = TAP_DRSELECT;
276 path[1] = TAP_DRCAPTURE;
277 path[2] = TAP_DRSHIFT;
278
279 memset(&fields, 0, sizeof fields);
280
281 fields[0].tap = target->tap;
282 fields[0].num_bits = 3;
283 fields[0].check_value = &field0_check_value;
284 fields[0].check_mask = &field0_check_mask;
285
286 fields[1].tap = target->tap;
287 fields[1].num_bits = 32;
288
289 fields[2].tap = target->tap;
290 fields[2].num_bits = 1;
291 fields[2].check_value = &field2_check_value;
292 fields[2].check_mask = &field2_check_mask;
293
294 jtag_set_end_state(TAP_IDLE);
295 xscale_jtag_set_instr(target->tap,
296 XSCALE_DBGTX << xscale->xscale_variant);
297 jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
298
299 /* repeat until all words have been collected */
300 int attempts = 0;
301 while (words_done < num_words)
302 {
303 /* schedule reads */
304 words_scheduled = 0;
305 for (i = words_done; i < num_words; i++)
306 {
307 fields[0].in_value = &field0[i];
308
309 jtag_add_pathmove(3, path);
310
311 fields[1].in_value = (uint8_t *)(field1 + i);
312
313 jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
314
315 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
316
317 words_scheduled++;
318 }
319
320 if ((retval = jtag_execute_queue()) != ERROR_OK)
321 {
322 LOG_ERROR("JTAG error while receiving data from debug handler");
323 break;
324 }
325
326 /* examine results */
327 for (i = words_done; i < num_words; i++)
328 {
329 if (!(field0[0] & 1))
330 {
331 /* move backwards if necessary */
332 int j;
333 for (j = i; j < num_words - 1; j++)
334 {
335 field0[j] = field0[j + 1];
336 field1[j] = field1[j + 1];
337 }
338 words_scheduled--;
339 }
340 }
341 if (words_scheduled == 0)
342 {
343 if (attempts++==1000)
344 {
345 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
346 retval = ERROR_TARGET_TIMEOUT;
347 break;
348 }
349 }
350
351 words_done += words_scheduled;
352 }
353
354 for (i = 0; i < num_words; i++)
355 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
356
357 free(field1);
358
359 return retval;
360 }
361
362 static int xscale_read_tx(struct target *target, int consume)
363 {
364 struct xscale_common *xscale = target_to_xscale(target);
365 tap_state_t path[3];
366 tap_state_t noconsume_path[6];
367 int retval;
368 struct timeval timeout, now;
369 struct scan_field fields[3];
370 uint8_t field0_in = 0x0;
371 uint8_t field0_check_value = 0x2;
372 uint8_t field0_check_mask = 0x6;
373 uint8_t field2_check_value = 0x0;
374 uint8_t field2_check_mask = 0x1;
375
376 jtag_set_end_state(TAP_IDLE);
377
378 xscale_jtag_set_instr(target->tap,
379 XSCALE_DBGTX << xscale->xscale_variant);
380
381 path[0] = TAP_DRSELECT;
382 path[1] = TAP_DRCAPTURE;
383 path[2] = TAP_DRSHIFT;
384
385 noconsume_path[0] = TAP_DRSELECT;
386 noconsume_path[1] = TAP_DRCAPTURE;
387 noconsume_path[2] = TAP_DREXIT1;
388 noconsume_path[3] = TAP_DRPAUSE;
389 noconsume_path[4] = TAP_DREXIT2;
390 noconsume_path[5] = TAP_DRSHIFT;
391
392 memset(&fields, 0, sizeof fields);
393
394 fields[0].tap = target->tap;
395 fields[0].num_bits = 3;
396 fields[0].in_value = &field0_in;
397
398 fields[1].tap = target->tap;
399 fields[1].num_bits = 32;
400 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
401
402 fields[2].tap = target->tap;
403 fields[2].num_bits = 1;
404 uint8_t tmp;
405 fields[2].in_value = &tmp;
406
407 gettimeofday(&timeout, NULL);
408 timeval_add_time(&timeout, 1, 0);
409
410 for (;;)
411 {
412 /* if we want to consume the register content (i.e. clear TX_READY),
413 * we have to go straight from Capture-DR to Shift-DR
414 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
415 */
416 if (consume)
417 jtag_add_pathmove(3, path);
418 else
419 {
420 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
421 }
422
423 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
424
425 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
426 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
427
428 if ((retval = jtag_execute_queue()) != ERROR_OK)
429 {
430 LOG_ERROR("JTAG error while reading TX");
431 return ERROR_TARGET_TIMEOUT;
432 }
433
434 gettimeofday(&now, NULL);
435 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
436 {
437 LOG_ERROR("time out reading TX register");
438 return ERROR_TARGET_TIMEOUT;
439 }
440 if (!((!(field0_in & 1)) && consume))
441 {
442 goto done;
443 }
444 if (debug_level >= 3)
445 {
446 LOG_DEBUG("waiting 100ms");
447 alive_sleep(100); /* avoid flooding the logs */
448 } else
449 {
450 keep_alive();
451 }
452 }
453 done:
454
455 if (!(field0_in & 1))
456 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
457
458 return ERROR_OK;
459 }
460
461 static int xscale_write_rx(struct target *target)
462 {
463 struct xscale_common *xscale = target_to_xscale(target);
464 int retval;
465 struct timeval timeout, now;
466 struct scan_field fields[3];
467 uint8_t field0_out = 0x0;
468 uint8_t field0_in = 0x0;
469 uint8_t field0_check_value = 0x2;
470 uint8_t field0_check_mask = 0x6;
471 uint8_t field2 = 0x0;
472 uint8_t field2_check_value = 0x0;
473 uint8_t field2_check_mask = 0x1;
474
475 jtag_set_end_state(TAP_IDLE);
476
477 xscale_jtag_set_instr(target->tap,
478 XSCALE_DBGRX << xscale->xscale_variant);
479
480 memset(&fields, 0, sizeof fields);
481
482 fields[0].tap = target->tap;
483 fields[0].num_bits = 3;
484 fields[0].out_value = &field0_out;
485 fields[0].in_value = &field0_in;
486
487 fields[1].tap = target->tap;
488 fields[1].num_bits = 32;
489 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
490
491 fields[2].tap = target->tap;
492 fields[2].num_bits = 1;
493 fields[2].out_value = &field2;
494 uint8_t tmp;
495 fields[2].in_value = &tmp;
496
497 gettimeofday(&timeout, NULL);
498 timeval_add_time(&timeout, 1, 0);
499
500 /* poll until rx_read is low */
501 LOG_DEBUG("polling RX");
502 for (;;)
503 {
504 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
505
506 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
507 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
508
509 if ((retval = jtag_execute_queue()) != ERROR_OK)
510 {
511 LOG_ERROR("JTAG error while writing RX");
512 return retval;
513 }
514
515 gettimeofday(&now, NULL);
516 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
517 {
518 LOG_ERROR("time out writing RX register");
519 return ERROR_TARGET_TIMEOUT;
520 }
521 if (!(field0_in & 1))
522 goto done;
523 if (debug_level >= 3)
524 {
525 LOG_DEBUG("waiting 100ms");
526 alive_sleep(100); /* avoid flooding the logs */
527 } else
528 {
529 keep_alive();
530 }
531 }
532 done:
533
534 /* set rx_valid */
535 field2 = 0x1;
536 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
537
538 if ((retval = jtag_execute_queue()) != ERROR_OK)
539 {
540 LOG_ERROR("JTAG error while writing RX");
541 return retval;
542 }
543
544 return ERROR_OK;
545 }
546
547 /* send count elements of size byte to the debug handler */
548 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
549 {
550 struct xscale_common *xscale = target_to_xscale(target);
551 uint32_t t[3];
552 int bits[3];
553 int retval;
554 int done_count = 0;
555
556 jtag_set_end_state(TAP_IDLE);
557
558 xscale_jtag_set_instr(target->tap,
559 XSCALE_DBGRX << xscale->xscale_variant);
560
561 bits[0]=3;
562 t[0]=0;
563 bits[1]=32;
564 t[2]=1;
565 bits[2]=1;
566 int endianness = target->endianness;
567 while (done_count++ < count)
568 {
569 switch (size)
570 {
571 case 4:
572 if (endianness == TARGET_LITTLE_ENDIAN)
573 {
574 t[1]=le_to_h_u32(buffer);
575 } else
576 {
577 t[1]=be_to_h_u32(buffer);
578 }
579 break;
580 case 2:
581 if (endianness == TARGET_LITTLE_ENDIAN)
582 {
583 t[1]=le_to_h_u16(buffer);
584 } else
585 {
586 t[1]=be_to_h_u16(buffer);
587 }
588 break;
589 case 1:
590 t[1]=buffer[0];
591 break;
592 default:
593 LOG_ERROR("BUG: size neither 4, 2 nor 1");
594 return ERROR_INVALID_ARGUMENTS;
595 }
596 jtag_add_dr_out(target->tap,
597 3,
598 bits,
599 t,
600 jtag_set_end_state(TAP_IDLE));
601 buffer += size;
602 }
603
604 if ((retval = jtag_execute_queue()) != ERROR_OK)
605 {
606 LOG_ERROR("JTAG error while sending data to debug handler");
607 return retval;
608 }
609
610 return ERROR_OK;
611 }
612
613 static int xscale_send_u32(struct target *target, uint32_t value)
614 {
615 struct xscale_common *xscale = target_to_xscale(target);
616
617 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
618 return xscale_write_rx(target);
619 }
620
621 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
622 {
623 struct xscale_common *xscale = target_to_xscale(target);
624 int retval;
625 struct scan_field fields[3];
626 uint8_t field0 = 0x0;
627 uint8_t field0_check_value = 0x2;
628 uint8_t field0_check_mask = 0x7;
629 uint8_t field2 = 0x0;
630 uint8_t field2_check_value = 0x0;
631 uint8_t field2_check_mask = 0x1;
632
633 if (hold_rst != -1)
634 xscale->hold_rst = hold_rst;
635
636 if (ext_dbg_brk != -1)
637 xscale->external_debug_break = ext_dbg_brk;
638
639 jtag_set_end_state(TAP_IDLE);
640 xscale_jtag_set_instr(target->tap,
641 XSCALE_SELDCSR << xscale->xscale_variant);
642
643 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
644 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
645
646 memset(&fields, 0, sizeof fields);
647
648 fields[0].tap = target->tap;
649 fields[0].num_bits = 3;
650 fields[0].out_value = &field0;
651 uint8_t tmp;
652 fields[0].in_value = &tmp;
653
654 fields[1].tap = target->tap;
655 fields[1].num_bits = 32;
656 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
657
658 fields[2].tap = target->tap;
659 fields[2].num_bits = 1;
660 fields[2].out_value = &field2;
661 uint8_t tmp2;
662 fields[2].in_value = &tmp2;
663
664 jtag_add_dr_scan(3, fields, jtag_get_end_state());
665
666 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
667 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
668
669 if ((retval = jtag_execute_queue()) != ERROR_OK)
670 {
671 LOG_ERROR("JTAG error while writing DCSR");
672 return retval;
673 }
674
675 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
676 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
677
678 return ERROR_OK;
679 }
680
681 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
682 static unsigned int parity (unsigned int v)
683 {
684 // unsigned int ov = v;
685 v ^= v >> 16;
686 v ^= v >> 8;
687 v ^= v >> 4;
688 v &= 0xf;
689 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
690 return (0x6996 >> v) & 1;
691 }
692
693 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
694 {
695 struct xscale_common *xscale = target_to_xscale(target);
696 uint8_t packet[4];
697 uint8_t cmd;
698 int word;
699 struct scan_field fields[2];
700
701 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
702
703 /* LDIC into IR */
704 jtag_set_end_state(TAP_IDLE);
705 xscale_jtag_set_instr(target->tap,
706 XSCALE_LDIC << xscale->xscale_variant);
707
708 /* CMD is b011 to load a cacheline into the Mini ICache.
709 * Loading into the main ICache is deprecated, and unused.
710 * It's followed by three zero bits, and 27 address bits.
711 */
712 buf_set_u32(&cmd, 0, 6, 0x3);
713
714 /* virtual address of desired cache line */
715 buf_set_u32(packet, 0, 27, va >> 5);
716
717 memset(&fields, 0, sizeof fields);
718
719 fields[0].tap = target->tap;
720 fields[0].num_bits = 6;
721 fields[0].out_value = &cmd;
722
723 fields[1].tap = target->tap;
724 fields[1].num_bits = 27;
725 fields[1].out_value = packet;
726
727 jtag_add_dr_scan(2, fields, jtag_get_end_state());
728
729 /* rest of packet is a cacheline: 8 instructions, with parity */
730 fields[0].num_bits = 32;
731 fields[0].out_value = packet;
732
733 fields[1].num_bits = 1;
734 fields[1].out_value = &cmd;
735
736 for (word = 0; word < 8; word++)
737 {
738 buf_set_u32(packet, 0, 32, buffer[word]);
739
740 uint32_t value;
741 memcpy(&value, packet, sizeof(uint32_t));
742 cmd = parity(value);
743
744 jtag_add_dr_scan(2, fields, jtag_get_end_state());
745 }
746
747 return jtag_execute_queue();
748 }
749
750 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
751 {
752 struct xscale_common *xscale = target_to_xscale(target);
753 uint8_t packet[4];
754 uint8_t cmd;
755 struct scan_field fields[2];
756
757 jtag_set_end_state(TAP_IDLE);
758 xscale_jtag_set_instr(target->tap,
759 XSCALE_LDIC << xscale->xscale_variant);
760
761 /* CMD for invalidate IC line b000, bits [6:4] b000 */
762 buf_set_u32(&cmd, 0, 6, 0x0);
763
764 /* virtual address of desired cache line */
765 buf_set_u32(packet, 0, 27, va >> 5);
766
767 memset(&fields, 0, sizeof fields);
768
769 fields[0].tap = target->tap;
770 fields[0].num_bits = 6;
771 fields[0].out_value = &cmd;
772
773 fields[1].tap = target->tap;
774 fields[1].num_bits = 27;
775 fields[1].out_value = packet;
776
777 jtag_add_dr_scan(2, fields, jtag_get_end_state());
778
779 return ERROR_OK;
780 }
781
782 static int xscale_update_vectors(struct target *target)
783 {
784 struct xscale_common *xscale = target_to_xscale(target);
785 int i;
786 int retval;
787
788 uint32_t low_reset_branch, high_reset_branch;
789
790 for (i = 1; i < 8; i++)
791 {
792 /* if there's a static vector specified for this exception, override */
793 if (xscale->static_high_vectors_set & (1 << i))
794 {
795 xscale->high_vectors[i] = xscale->static_high_vectors[i];
796 }
797 else
798 {
799 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
800 if (retval == ERROR_TARGET_TIMEOUT)
801 return retval;
802 if (retval != ERROR_OK)
803 {
804 /* Some of these reads will fail as part of normal execution */
805 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
806 }
807 }
808 }
809
810 for (i = 1; i < 8; i++)
811 {
812 if (xscale->static_low_vectors_set & (1 << i))
813 {
814 xscale->low_vectors[i] = xscale->static_low_vectors[i];
815 }
816 else
817 {
818 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
819 if (retval == ERROR_TARGET_TIMEOUT)
820 return retval;
821 if (retval != ERROR_OK)
822 {
823 /* Some of these reads will fail as part of normal execution */
824 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
825 }
826 }
827 }
828
829 /* calculate branches to debug handler */
830 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
831 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
832
833 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
834 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
835
836 /* invalidate and load exception vectors in mini i-cache */
837 xscale_invalidate_ic_line(target, 0x0);
838 xscale_invalidate_ic_line(target, 0xffff0000);
839
840 xscale_load_ic(target, 0x0, xscale->low_vectors);
841 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
842
843 return ERROR_OK;
844 }
845
846 static int xscale_arch_state(struct target *target)
847 {
848 struct xscale_common *xscale = target_to_xscale(target);
849 struct arm *armv4_5 = &xscale->armv4_5_common;
850
851 static const char *state[] =
852 {
853 "disabled", "enabled"
854 };
855
856 static const char *arch_dbg_reason[] =
857 {
858 "", "\n(processor reset)", "\n(trace buffer full)"
859 };
860
861 if (armv4_5->common_magic != ARM_COMMON_MAGIC)
862 {
863 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
864 return ERROR_INVALID_ARGUMENTS;
865 }
866
867 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
868 "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "\n"
869 "MMU: %s, D-Cache: %s, I-Cache: %s"
870 "%s",
871 arm_state_strings[armv4_5->core_state],
872 Jim_Nvp_value2name_simple(nvp_target_debug_reason, target->debug_reason)->name ,
873 arm_mode_name(armv4_5->core_mode),
874 buf_get_u32(armv4_5->cpsr->value, 0, 32),
875 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
876 state[xscale->armv4_5_mmu.mmu_enabled],
877 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
878 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
879 arch_dbg_reason[xscale->arch_debug_reason]);
880
881 return ERROR_OK;
882 }
883
884 static int xscale_poll(struct target *target)
885 {
886 int retval = ERROR_OK;
887
888 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
889 {
890 enum target_state previous_state = target->state;
891 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
892 {
893
894 /* there's data to read from the tx register, we entered debug state */
895 target->state = TARGET_HALTED;
896
897 /* process debug entry, fetching current mode regs */
898 retval = xscale_debug_entry(target);
899 }
900 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
901 {
902 LOG_USER("error while polling TX register, reset CPU");
903 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
904 target->state = TARGET_HALTED;
905 }
906
907 /* debug_entry could have overwritten target state (i.e. immediate resume)
908 * don't signal event handlers in that case
909 */
910 if (target->state != TARGET_HALTED)
911 return ERROR_OK;
912
913 /* if target was running, signal that we halted
914 * otherwise we reentered from debug execution */
915 if (previous_state == TARGET_RUNNING)
916 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
917 else
918 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
919 }
920
921 return retval;
922 }
923
924 static int xscale_debug_entry(struct target *target)
925 {
926 struct xscale_common *xscale = target_to_xscale(target);
927 struct arm *armv4_5 = &xscale->armv4_5_common;
928 uint32_t pc;
929 uint32_t buffer[10];
930 int i;
931 int retval;
932 uint32_t moe;
933
934 /* clear external dbg break (will be written on next DCSR read) */
935 xscale->external_debug_break = 0;
936 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
937 return retval;
938
939 /* get r0, pc, r1 to r7 and cpsr */
940 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
941 return retval;
942
943 /* move r0 from buffer to register cache */
944 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
945 armv4_5->core_cache->reg_list[0].dirty = 1;
946 armv4_5->core_cache->reg_list[0].valid = 1;
947 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
948
949 /* move pc from buffer to register cache */
950 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
951 armv4_5->core_cache->reg_list[15].dirty = 1;
952 armv4_5->core_cache->reg_list[15].valid = 1;
953 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
954
955 /* move data from buffer to register cache */
956 for (i = 1; i <= 7; i++)
957 {
958 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
959 armv4_5->core_cache->reg_list[i].dirty = 1;
960 armv4_5->core_cache->reg_list[i].valid = 1;
961 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
962 }
963
964 arm_set_cpsr(armv4_5, buffer[9]);
965 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
966
967 if (!is_arm_mode(armv4_5->core_mode))
968 {
969 target->state = TARGET_UNKNOWN;
970 LOG_ERROR("cpsr contains invalid mode value - communication failure");
971 return ERROR_TARGET_FAILURE;
972 }
973 LOG_DEBUG("target entered debug state in %s mode",
974 arm_mode_name(armv4_5->core_mode));
975
976 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
977 if (armv4_5->spsr) {
978 xscale_receive(target, buffer, 8);
979 buf_set_u32(armv4_5->spsr->value, 0, 32, buffer[7]);
980 armv4_5->spsr->dirty = false;
981 armv4_5->spsr->valid = true;
982 }
983 else
984 {
985 /* r8 to r14, but no spsr */
986 xscale_receive(target, buffer, 7);
987 }
988
989 /* move data from buffer to right banked register in cache */
990 for (i = 8; i <= 14; i++)
991 {
992 struct reg *r = arm_reg_current(armv4_5, i);
993
994 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
995 r->dirty = false;
996 r->valid = true;
997 }
998
999 /* examine debug reason */
1000 xscale_read_dcsr(target);
1001 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
1002
1003 /* stored PC (for calculating fixup) */
1004 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1005
1006 switch (moe)
1007 {
1008 case 0x0: /* Processor reset */
1009 target->debug_reason = DBG_REASON_DBGRQ;
1010 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1011 pc -= 4;
1012 break;
1013 case 0x1: /* Instruction breakpoint hit */
1014 target->debug_reason = DBG_REASON_BREAKPOINT;
1015 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1016 pc -= 4;
1017 break;
1018 case 0x2: /* Data breakpoint hit */
1019 target->debug_reason = DBG_REASON_WATCHPOINT;
1020 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1021 pc -= 4;
1022 break;
1023 case 0x3: /* BKPT instruction executed */
1024 target->debug_reason = DBG_REASON_BREAKPOINT;
1025 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1026 pc -= 4;
1027 break;
1028 case 0x4: /* Ext. debug event */
1029 target->debug_reason = DBG_REASON_DBGRQ;
1030 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1031 pc -= 4;
1032 break;
1033 case 0x5: /* Vector trap occured */
1034 target->debug_reason = DBG_REASON_BREAKPOINT;
1035 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1036 pc -= 4;
1037 break;
1038 case 0x6: /* Trace buffer full break */
1039 target->debug_reason = DBG_REASON_DBGRQ;
1040 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1041 pc -= 4;
1042 break;
1043 case 0x7: /* Reserved (may flag Hot-Debug support) */
1044 default:
1045 LOG_ERROR("Method of Entry is 'Reserved'");
1046 exit(-1);
1047 break;
1048 }
1049
1050 /* apply PC fixup */
1051 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1052
1053 /* on the first debug entry, identify cache type */
1054 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1055 {
1056 uint32_t cache_type_reg;
1057
1058 /* read cp15 cache type register */
1059 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1060 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1061
1062 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1063 }
1064
1065 /* examine MMU and Cache settings */
1066 /* read cp15 control register */
1067 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1068 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1069 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1070 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1071 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1072
1073 /* tracing enabled, read collected trace data */
1074 if (xscale->trace.buffer_enabled)
1075 {
1076 xscale_read_trace(target);
1077 xscale->trace.buffer_fill--;
1078
1079 /* resume if we're still collecting trace data */
1080 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1081 && (xscale->trace.buffer_fill > 0))
1082 {
1083 xscale_resume(target, 1, 0x0, 1, 0);
1084 }
1085 else
1086 {
1087 xscale->trace.buffer_enabled = 0;
1088 }
1089 }
1090
1091 return ERROR_OK;
1092 }
1093
1094 static int xscale_halt(struct target *target)
1095 {
1096 struct xscale_common *xscale = target_to_xscale(target);
1097
1098 LOG_DEBUG("target->state: %s",
1099 target_state_name(target));
1100
1101 if (target->state == TARGET_HALTED)
1102 {
1103 LOG_DEBUG("target was already halted");
1104 return ERROR_OK;
1105 }
1106 else if (target->state == TARGET_UNKNOWN)
1107 {
1108 /* this must not happen for a xscale target */
1109 LOG_ERROR("target was in unknown state when halt was requested");
1110 return ERROR_TARGET_INVALID;
1111 }
1112 else if (target->state == TARGET_RESET)
1113 {
1114 LOG_DEBUG("target->state == TARGET_RESET");
1115 }
1116 else
1117 {
1118 /* assert external dbg break */
1119 xscale->external_debug_break = 1;
1120 xscale_read_dcsr(target);
1121
1122 target->debug_reason = DBG_REASON_DBGRQ;
1123 }
1124
1125 return ERROR_OK;
1126 }
1127
1128 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1129 {
1130 struct xscale_common *xscale = target_to_xscale(target);
1131 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1132 int retval;
1133
1134 if (xscale->ibcr0_used)
1135 {
1136 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1137
1138 if (ibcr0_bp)
1139 {
1140 xscale_unset_breakpoint(target, ibcr0_bp);
1141 }
1142 else
1143 {
1144 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1145 exit(-1);
1146 }
1147 }
1148
1149 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1150 return retval;
1151
1152 return ERROR_OK;
1153 }
1154
1155 static int xscale_disable_single_step(struct target *target)
1156 {
1157 struct xscale_common *xscale = target_to_xscale(target);
1158 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1159 int retval;
1160
1161 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1162 return retval;
1163
1164 return ERROR_OK;
1165 }
1166
1167 static void xscale_enable_watchpoints(struct target *target)
1168 {
1169 struct watchpoint *watchpoint = target->watchpoints;
1170
1171 while (watchpoint)
1172 {
1173 if (watchpoint->set == 0)
1174 xscale_set_watchpoint(target, watchpoint);
1175 watchpoint = watchpoint->next;
1176 }
1177 }
1178
1179 static void xscale_enable_breakpoints(struct target *target)
1180 {
1181 struct breakpoint *breakpoint = target->breakpoints;
1182
1183 /* set any pending breakpoints */
1184 while (breakpoint)
1185 {
1186 if (breakpoint->set == 0)
1187 xscale_set_breakpoint(target, breakpoint);
1188 breakpoint = breakpoint->next;
1189 }
1190 }
1191
1192 static int xscale_resume(struct target *target, int current,
1193 uint32_t address, int handle_breakpoints, int debug_execution)
1194 {
1195 struct xscale_common *xscale = target_to_xscale(target);
1196 struct arm *armv4_5 = &xscale->armv4_5_common;
1197 struct breakpoint *breakpoint = target->breakpoints;
1198 uint32_t current_pc;
1199 int retval;
1200 int i;
1201
1202 LOG_DEBUG("-");
1203
1204 if (target->state != TARGET_HALTED)
1205 {
1206 LOG_WARNING("target not halted");
1207 return ERROR_TARGET_NOT_HALTED;
1208 }
1209
1210 if (!debug_execution)
1211 {
1212 target_free_all_working_areas(target);
1213 }
1214
1215 /* update vector tables */
1216 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1217 return retval;
1218
1219 /* current = 1: continue on current pc, otherwise continue at <address> */
1220 if (!current)
1221 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1222
1223 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1224
1225 /* if we're at the reset vector, we have to simulate the branch */
1226 if (current_pc == 0x0)
1227 {
1228 arm_simulate_step(target, NULL);
1229 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1230 }
1231
1232 /* the front-end may request us not to handle breakpoints */
1233 if (handle_breakpoints)
1234 {
1235 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1236 {
1237 uint32_t next_pc;
1238
1239 /* there's a breakpoint at the current PC, we have to step over it */
1240 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1241 xscale_unset_breakpoint(target, breakpoint);
1242
1243 /* calculate PC of next instruction */
1244 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1245 {
1246 uint32_t current_opcode;
1247 target_read_u32(target, current_pc, &current_opcode);
1248 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1249 }
1250
1251 LOG_DEBUG("enable single-step");
1252 xscale_enable_single_step(target, next_pc);
1253
1254 /* restore banked registers */
1255 retval = xscale_restore_banked(target);
1256
1257 /* send resume request (command 0x30 or 0x31)
1258 * clean the trace buffer if it is to be enabled (0x62) */
1259 if (xscale->trace.buffer_enabled)
1260 {
1261 xscale_send_u32(target, 0x62);
1262 xscale_send_u32(target, 0x31);
1263 }
1264 else
1265 xscale_send_u32(target, 0x30);
1266
1267 /* send CPSR */
1268 xscale_send_u32(target,
1269 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1270 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1271 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1272
1273 for (i = 7; i >= 0; i--)
1274 {
1275 /* send register */
1276 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1277 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1278 }
1279
1280 /* send PC */
1281 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1282 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1283
1284 /* wait for and process debug entry */
1285 xscale_debug_entry(target);
1286
1287 LOG_DEBUG("disable single-step");
1288 xscale_disable_single_step(target);
1289
1290 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1291 xscale_set_breakpoint(target, breakpoint);
1292 }
1293 }
1294
1295 /* enable any pending breakpoints and watchpoints */
1296 xscale_enable_breakpoints(target);
1297 xscale_enable_watchpoints(target);
1298
1299 /* restore banked registers */
1300 retval = xscale_restore_banked(target);
1301
1302 /* send resume request (command 0x30 or 0x31)
1303 * clean the trace buffer if it is to be enabled (0x62) */
1304 if (xscale->trace.buffer_enabled)
1305 {
1306 xscale_send_u32(target, 0x62);
1307 xscale_send_u32(target, 0x31);
1308 }
1309 else
1310 xscale_send_u32(target, 0x30);
1311
1312 /* send CPSR */
1313 xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
1314 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1315 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1316
1317 for (i = 7; i >= 0; i--)
1318 {
1319 /* send register */
1320 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1321 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1322 }
1323
1324 /* send PC */
1325 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1326 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1327
1328 target->debug_reason = DBG_REASON_NOTHALTED;
1329
1330 if (!debug_execution)
1331 {
1332 /* registers are now invalid */
1333 register_cache_invalidate(armv4_5->core_cache);
1334 target->state = TARGET_RUNNING;
1335 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1336 }
1337 else
1338 {
1339 target->state = TARGET_DEBUG_RUNNING;
1340 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1341 }
1342
1343 LOG_DEBUG("target resumed");
1344
1345 return ERROR_OK;
1346 }
1347
1348 static int xscale_step_inner(struct target *target, int current,
1349 uint32_t address, int handle_breakpoints)
1350 {
1351 struct xscale_common *xscale = target_to_xscale(target);
1352 struct arm *armv4_5 = &xscale->armv4_5_common;
1353 uint32_t next_pc;
1354 int retval;
1355 int i;
1356
1357 target->debug_reason = DBG_REASON_SINGLESTEP;
1358
1359 /* calculate PC of next instruction */
1360 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1361 {
1362 uint32_t current_opcode, current_pc;
1363 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1364
1365 target_read_u32(target, current_pc, &current_opcode);
1366 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1367 return retval;
1368 }
1369
1370 LOG_DEBUG("enable single-step");
1371 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1372 return retval;
1373
1374 /* restore banked registers */
1375 if ((retval = xscale_restore_banked(target)) != ERROR_OK)
1376 return retval;
1377
1378 /* send resume request (command 0x30 or 0x31)
1379 * clean the trace buffer if it is to be enabled (0x62) */
1380 if (xscale->trace.buffer_enabled)
1381 {
1382 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1383 return retval;
1384 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1385 return retval;
1386 }
1387 else
1388 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1389 return retval;
1390
1391 /* send CPSR */
1392 retval = xscale_send_u32(target,
1393 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1394 if (retval != ERROR_OK)
1395 return retval;
1396 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1397 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1398
1399 for (i = 7; i >= 0; i--)
1400 {
1401 /* send register */
1402 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1403 return retval;
1404 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1405 }
1406
1407 /* send PC */
1408 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))) != ERROR_OK)
1409 return retval;
1410 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1411
1412 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1413
1414 /* registers are now invalid */
1415 register_cache_invalidate(armv4_5->core_cache);
1416
1417 /* wait for and process debug entry */
1418 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1419 return retval;
1420
1421 LOG_DEBUG("disable single-step");
1422 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1423 return retval;
1424
1425 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1426
1427 return ERROR_OK;
1428 }
1429
1430 static int xscale_step(struct target *target, int current,
1431 uint32_t address, int handle_breakpoints)
1432 {
1433 struct arm *armv4_5 = target_to_arm(target);
1434 struct breakpoint *breakpoint = target->breakpoints;
1435
1436 uint32_t current_pc;
1437 int retval;
1438
1439 if (target->state != TARGET_HALTED)
1440 {
1441 LOG_WARNING("target not halted");
1442 return ERROR_TARGET_NOT_HALTED;
1443 }
1444
1445 /* current = 1: continue on current pc, otherwise continue at <address> */
1446 if (!current)
1447 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1448
1449 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1450
1451 /* if we're at the reset vector, we have to simulate the step */
1452 if (current_pc == 0x0)
1453 {
1454 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1455 return retval;
1456 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1457
1458 target->debug_reason = DBG_REASON_SINGLESTEP;
1459 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1460
1461 return ERROR_OK;
1462 }
1463
1464 /* the front-end may request us not to handle breakpoints */
1465 if (handle_breakpoints)
1466 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1467 {
1468 if ((retval = xscale_unset_breakpoint(target, breakpoint)) != ERROR_OK)
1469 return retval;
1470 }
1471
1472 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1473
1474 if (breakpoint)
1475 {
1476 xscale_set_breakpoint(target, breakpoint);
1477 }
1478
1479 LOG_DEBUG("target stepped");
1480
1481 return ERROR_OK;
1482
1483 }
1484
1485 static int xscale_assert_reset(struct target *target)
1486 {
1487 struct xscale_common *xscale = target_to_xscale(target);
1488
1489 LOG_DEBUG("target->state: %s",
1490 target_state_name(target));
1491
1492 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1493 * end up in T-L-R, which would reset JTAG
1494 */
1495 jtag_set_end_state(TAP_IDLE);
1496 xscale_jtag_set_instr(target->tap,
1497 XSCALE_SELDCSR << xscale->xscale_variant);
1498
1499 /* set Hold reset, Halt mode and Trap Reset */
1500 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1501 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1502 xscale_write_dcsr(target, 1, 0);
1503
1504 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1505 xscale_jtag_set_instr(target->tap, 0x7f);
1506 jtag_execute_queue();
1507
1508 /* assert reset */
1509 jtag_add_reset(0, 1);
1510
1511 /* sleep 1ms, to be sure we fulfill any requirements */
1512 jtag_add_sleep(1000);
1513 jtag_execute_queue();
1514
1515 target->state = TARGET_RESET;
1516
1517 if (target->reset_halt)
1518 {
1519 int retval;
1520 if ((retval = target_halt(target)) != ERROR_OK)
1521 return retval;
1522 }
1523
1524 return ERROR_OK;
1525 }
1526
1527 static int xscale_deassert_reset(struct target *target)
1528 {
1529 struct xscale_common *xscale = target_to_xscale(target);
1530 struct breakpoint *breakpoint = target->breakpoints;
1531
1532 LOG_DEBUG("-");
1533
1534 xscale->ibcr_available = 2;
1535 xscale->ibcr0_used = 0;
1536 xscale->ibcr1_used = 0;
1537
1538 xscale->dbr_available = 2;
1539 xscale->dbr0_used = 0;
1540 xscale->dbr1_used = 0;
1541
1542 /* mark all hardware breakpoints as unset */
1543 while (breakpoint)
1544 {
1545 if (breakpoint->type == BKPT_HARD)
1546 {
1547 breakpoint->set = 0;
1548 }
1549 breakpoint = breakpoint->next;
1550 }
1551
1552 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1553
1554 /* FIXME mark hardware watchpoints got unset too. Also,
1555 * at least some of the XScale registers are invalid...
1556 */
1557
1558 /*
1559 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1560 * contents got invalidated. Safer to force that, so writing new
1561 * contents can't ever fail..
1562 */
1563 {
1564 uint32_t address;
1565 unsigned buf_cnt;
1566 const uint8_t *buffer = xscale_debug_handler;
1567 int retval;
1568
1569 /* release SRST */
1570 jtag_add_reset(0, 0);
1571
1572 /* wait 300ms; 150 and 100ms were not enough */
1573 jtag_add_sleep(300*1000);
1574
1575 jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
1576 jtag_execute_queue();
1577
1578 /* set Hold reset, Halt mode and Trap Reset */
1579 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1580 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1581 xscale_write_dcsr(target, 1, 0);
1582
1583 /* Load the debug handler into the mini-icache. Since
1584 * it's using halt mode (not monitor mode), it runs in
1585 * "Special Debug State" for access to registers, memory,
1586 * coprocessors, trace data, etc.
1587 */
1588 address = xscale->handler_address;
1589 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1590 binary_size > 0;
1591 binary_size -= buf_cnt, buffer += buf_cnt)
1592 {
1593 uint32_t cache_line[8];
1594 unsigned i;
1595
1596 buf_cnt = binary_size;
1597 if (buf_cnt > 32)
1598 buf_cnt = 32;
1599
1600 for (i = 0; i < buf_cnt; i += 4)
1601 {
1602 /* convert LE buffer to host-endian uint32_t */
1603 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1604 }
1605
1606 for (; i < 32; i += 4)
1607 {
1608 cache_line[i / 4] = 0xe1a08008;
1609 }
1610
1611 /* only load addresses other than the reset vectors */
1612 if ((address % 0x400) != 0x0)
1613 {
1614 retval = xscale_load_ic(target, address,
1615 cache_line);
1616 if (retval != ERROR_OK)
1617 return retval;
1618 }
1619
1620 address += buf_cnt;
1621 };
1622
1623 retval = xscale_load_ic(target, 0x0,
1624 xscale->low_vectors);
1625 if (retval != ERROR_OK)
1626 return retval;
1627 retval = xscale_load_ic(target, 0xffff0000,
1628 xscale->high_vectors);
1629 if (retval != ERROR_OK)
1630 return retval;
1631
1632 jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
1633
1634 jtag_add_sleep(100000);
1635
1636 /* set Hold reset, Halt mode and Trap Reset */
1637 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1638 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1639 xscale_write_dcsr(target, 1, 0);
1640
1641 /* clear Hold reset to let the target run (should enter debug handler) */
1642 xscale_write_dcsr(target, 0, 1);
1643 target->state = TARGET_RUNNING;
1644
1645 if (!target->reset_halt)
1646 {
1647 jtag_add_sleep(10000);
1648
1649 /* we should have entered debug now */
1650 xscale_debug_entry(target);
1651 target->state = TARGET_HALTED;
1652
1653 /* resume the target */
1654 xscale_resume(target, 1, 0x0, 1, 0);
1655 }
1656 }
1657
1658 return ERROR_OK;
1659 }
1660
1661 static int xscale_read_core_reg(struct target *target, struct reg *r,
1662 int num, enum arm_mode mode)
1663 {
1664 /** \todo add debug handler support for core register reads */
1665 LOG_ERROR("not implemented");
1666 return ERROR_OK;
1667 }
1668
1669 static int xscale_write_core_reg(struct target *target, struct reg *r,
1670 int num, enum arm_mode mode, uint32_t value)
1671 {
1672 /** \todo add debug handler support for core register writes */
1673 LOG_ERROR("not implemented");
1674 return ERROR_OK;
1675 }
1676
1677 static int xscale_full_context(struct target *target)
1678 {
1679 struct arm *armv4_5 = target_to_arm(target);
1680
1681 uint32_t *buffer;
1682
1683 int i, j;
1684
1685 LOG_DEBUG("-");
1686
1687 if (target->state != TARGET_HALTED)
1688 {
1689 LOG_WARNING("target not halted");
1690 return ERROR_TARGET_NOT_HALTED;
1691 }
1692
1693 buffer = malloc(4 * 8);
1694
1695 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1696 * we can't enter User mode on an XScale (unpredictable),
1697 * but User shares registers with SYS
1698 */
1699 for (i = 1; i < 7; i++)
1700 {
1701 enum arm_mode mode = armv4_5_number_to_mode(i);
1702 bool valid = true;
1703 struct reg *r;
1704
1705 if (mode == ARM_MODE_USR)
1706 continue;
1707
1708 /* check if there are invalid registers in the current mode
1709 */
1710 for (j = 0; valid && j <= 16; j++)
1711 {
1712 if (!ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1713 mode, j).valid)
1714 valid = false;
1715 }
1716 if (valid)
1717 continue;
1718
1719 /* request banked registers */
1720 xscale_send_u32(target, 0x0);
1721
1722 /* send CPSR for desired bank mode */
1723 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1724
1725 /* get banked registers: r8 to r14; and SPSR
1726 * except in USR/SYS mode
1727 */
1728 if (mode != ARM_MODE_SYS) {
1729 /* SPSR */
1730 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1731 mode, 16);
1732
1733 xscale_receive(target, buffer, 8);
1734
1735 buf_set_u32(r->value, 0, 32, buffer[7]);
1736 r->dirty = false;
1737 r->valid = true;
1738 } else {
1739 xscale_receive(target, buffer, 7);
1740 }
1741
1742 /* move data from buffer to register cache */
1743 for (j = 8; j <= 14; j++)
1744 {
1745 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1746 mode, j);
1747
1748 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1749 r->dirty = false;
1750 r->valid = true;
1751 }
1752 }
1753
1754 free(buffer);
1755
1756 return ERROR_OK;
1757 }
1758
1759 static int xscale_restore_banked(struct target *target)
1760 {
1761 struct arm *armv4_5 = target_to_arm(target);
1762
1763 int i, j;
1764
1765 if (target->state != TARGET_HALTED)
1766 {
1767 LOG_WARNING("target not halted");
1768 return ERROR_TARGET_NOT_HALTED;
1769 }
1770
1771 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1772 * and check if any banked registers need to be written. Ignore
1773 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1774 * an XScale (unpredictable), but they share all registers.
1775 */
1776 for (i = 1; i < 7; i++)
1777 {
1778 enum arm_mode mode = armv4_5_number_to_mode(i);
1779 struct reg *r;
1780
1781 if (mode == ARM_MODE_USR)
1782 continue;
1783
1784 /* check if there are dirty registers in this mode */
1785 for (j = 8; j <= 14; j++)
1786 {
1787 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1788 mode, j).dirty)
1789 goto dirty;
1790 }
1791
1792 /* if not USR/SYS, check if the SPSR needs to be written */
1793 if (mode != ARM_MODE_SYS)
1794 {
1795 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1796 mode, 16).dirty)
1797 goto dirty;
1798 }
1799
1800 /* there's nothing to flush for this mode */
1801 continue;
1802
1803 dirty:
1804 /* command 0x1: "send banked registers" */
1805 xscale_send_u32(target, 0x1);
1806
1807 /* send CPSR for desired mode */
1808 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1809
1810 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1811 * but this protocol doesn't understand that nuance.
1812 */
1813 for (j = 8; j <= 14; j++) {
1814 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1815 mode, j);
1816 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1817 r->dirty = false;
1818 }
1819
1820 /* send spsr if not in USR/SYS mode */
1821 if (mode != ARM_MODE_SYS) {
1822 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1823 mode, 16);
1824 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1825 r->dirty = false;
1826 }
1827 }
1828
1829 return ERROR_OK;
1830 }
1831
1832 static int xscale_read_memory(struct target *target, uint32_t address,
1833 uint32_t size, uint32_t count, uint8_t *buffer)
1834 {
1835 struct xscale_common *xscale = target_to_xscale(target);
1836 uint32_t *buf32;
1837 uint32_t i;
1838 int retval;
1839
1840 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1841
1842 if (target->state != TARGET_HALTED)
1843 {
1844 LOG_WARNING("target not halted");
1845 return ERROR_TARGET_NOT_HALTED;
1846 }
1847
1848 /* sanitize arguments */
1849 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1850 return ERROR_INVALID_ARGUMENTS;
1851
1852 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1853 return ERROR_TARGET_UNALIGNED_ACCESS;
1854
1855 /* send memory read request (command 0x1n, n: access size) */
1856 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1857 return retval;
1858
1859 /* send base address for read request */
1860 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1861 return retval;
1862
1863 /* send number of requested data words */
1864 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1865 return retval;
1866
1867 /* receive data from target (count times 32-bit words in host endianness) */
1868 buf32 = malloc(4 * count);
1869 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1870 return retval;
1871
1872 /* extract data from host-endian buffer into byte stream */
1873 for (i = 0; i < count; i++)
1874 {
1875 switch (size)
1876 {
1877 case 4:
1878 target_buffer_set_u32(target, buffer, buf32[i]);
1879 buffer += 4;
1880 break;
1881 case 2:
1882 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1883 buffer += 2;
1884 break;
1885 case 1:
1886 *buffer++ = buf32[i] & 0xff;
1887 break;
1888 default:
1889 LOG_ERROR("invalid read size");
1890 return ERROR_INVALID_ARGUMENTS;
1891 }
1892 }
1893
1894 free(buf32);
1895
1896 /* examine DCSR, to see if Sticky Abort (SA) got set */
1897 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1898 return retval;
1899 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1900 {
1901 /* clear SA bit */
1902 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1903 return retval;
1904
1905 return ERROR_TARGET_DATA_ABORT;
1906 }
1907
1908 return ERROR_OK;
1909 }
1910
1911 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1912 uint32_t size, uint32_t count, uint8_t *buffer)
1913 {
1914 /** \todo: provide a non-stub implementtion of this routine. */
1915 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1916 target_name(target), __func__);
1917 return ERROR_FAIL;
1918 }
1919
1920 static int xscale_write_memory(struct target *target, uint32_t address,
1921 uint32_t size, uint32_t count, uint8_t *buffer)
1922 {
1923 struct xscale_common *xscale = target_to_xscale(target);
1924 int retval;
1925
1926 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1927
1928 if (target->state != TARGET_HALTED)
1929 {
1930 LOG_WARNING("target not halted");
1931 return ERROR_TARGET_NOT_HALTED;
1932 }
1933
1934 /* sanitize arguments */
1935 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1936 return ERROR_INVALID_ARGUMENTS;
1937
1938 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1939 return ERROR_TARGET_UNALIGNED_ACCESS;
1940
1941 /* send memory write request (command 0x2n, n: access size) */
1942 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1943 return retval;
1944
1945 /* send base address for read request */
1946 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1947 return retval;
1948
1949 /* send number of requested data words to be written*/
1950 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1951 return retval;
1952
1953 /* extract data from host-endian buffer into byte stream */
1954 #if 0
1955 for (i = 0; i < count; i++)
1956 {
1957 switch (size)
1958 {
1959 case 4:
1960 value = target_buffer_get_u32(target, buffer);
1961 xscale_send_u32(target, value);
1962 buffer += 4;
1963 break;
1964 case 2:
1965 value = target_buffer_get_u16(target, buffer);
1966 xscale_send_u32(target, value);
1967 buffer += 2;
1968 break;
1969 case 1:
1970 value = *buffer;
1971 xscale_send_u32(target, value);
1972 buffer += 1;
1973 break;
1974 default:
1975 LOG_ERROR("should never get here");
1976 exit(-1);
1977 }
1978 }
1979 #endif
1980 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1981 return retval;
1982
1983 /* examine DCSR, to see if Sticky Abort (SA) got set */
1984 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1985 return retval;
1986 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1987 {
1988 /* clear SA bit */
1989 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1990 return retval;
1991
1992 return ERROR_TARGET_DATA_ABORT;
1993 }
1994
1995 return ERROR_OK;
1996 }
1997
1998 static int xscale_write_phys_memory(struct target *target, uint32_t address,
1999 uint32_t size, uint32_t count, uint8_t *buffer)
2000 {
2001 /** \todo: provide a non-stub implementtion of this routine. */
2002 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
2003 target_name(target), __func__);
2004 return ERROR_FAIL;
2005 }
2006
2007 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
2008 uint32_t count, uint8_t *buffer)
2009 {
2010 return xscale_write_memory(target, address, 4, count, buffer);
2011 }
2012
2013 static uint32_t xscale_get_ttb(struct target *target)
2014 {
2015 struct xscale_common *xscale = target_to_xscale(target);
2016 uint32_t ttb;
2017
2018 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2019 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2020
2021 return ttb;
2022 }
2023
2024 static void xscale_disable_mmu_caches(struct target *target, int mmu,
2025 int d_u_cache, int i_cache)
2026 {
2027 struct xscale_common *xscale = target_to_xscale(target);
2028 uint32_t cp15_control;
2029
2030 /* read cp15 control register */
2031 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2032 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2033
2034 if (mmu)
2035 cp15_control &= ~0x1U;
2036
2037 if (d_u_cache)
2038 {
2039 /* clean DCache */
2040 xscale_send_u32(target, 0x50);
2041 xscale_send_u32(target, xscale->cache_clean_address);
2042
2043 /* invalidate DCache */
2044 xscale_send_u32(target, 0x51);
2045
2046 cp15_control &= ~0x4U;
2047 }
2048
2049 if (i_cache)
2050 {
2051 /* invalidate ICache */
2052 xscale_send_u32(target, 0x52);
2053 cp15_control &= ~0x1000U;
2054 }
2055
2056 /* write new cp15 control register */
2057 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2058
2059 /* execute cpwait to ensure outstanding operations complete */
2060 xscale_send_u32(target, 0x53);
2061 }
2062
2063 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2064 int d_u_cache, int i_cache)
2065 {
2066 struct xscale_common *xscale = target_to_xscale(target);
2067 uint32_t cp15_control;
2068
2069 /* read cp15 control register */
2070 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2071 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2072
2073 if (mmu)
2074 cp15_control |= 0x1U;
2075
2076 if (d_u_cache)
2077 cp15_control |= 0x4U;
2078
2079 if (i_cache)
2080 cp15_control |= 0x1000U;
2081
2082 /* write new cp15 control register */
2083 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2084
2085 /* execute cpwait to ensure outstanding operations complete */
2086 xscale_send_u32(target, 0x53);
2087 }
2088
2089 static int xscale_set_breakpoint(struct target *target,
2090 struct breakpoint *breakpoint)
2091 {
2092 int retval;
2093 struct xscale_common *xscale = target_to_xscale(target);
2094
2095 if (target->state != TARGET_HALTED)
2096 {
2097 LOG_WARNING("target not halted");
2098 return ERROR_TARGET_NOT_HALTED;
2099 }
2100
2101 if (breakpoint->set)
2102 {
2103 LOG_WARNING("breakpoint already set");
2104 return ERROR_OK;
2105 }
2106
2107 if (breakpoint->type == BKPT_HARD)
2108 {
2109 uint32_t value = breakpoint->address | 1;
2110 if (!xscale->ibcr0_used)
2111 {
2112 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2113 xscale->ibcr0_used = 1;
2114 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2115 }
2116 else if (!xscale->ibcr1_used)
2117 {
2118 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2119 xscale->ibcr1_used = 1;
2120 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2121 }
2122 else
2123 {
2124 LOG_ERROR("BUG: no hardware comparator available");
2125 return ERROR_OK;
2126 }
2127 }
2128 else if (breakpoint->type == BKPT_SOFT)
2129 {
2130 if (breakpoint->length == 4)
2131 {
2132 /* keep the original instruction in target endianness */
2133 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2134 {
2135 return retval;
2136 }
2137 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2138 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2139 {
2140 return retval;
2141 }
2142 }
2143 else
2144 {
2145 /* keep the original instruction in target endianness */
2146 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2147 {
2148 return retval;
2149 }
2150 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2151 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2152 {
2153 return retval;
2154 }
2155 }
2156 breakpoint->set = 1;
2157 }
2158
2159 return ERROR_OK;
2160 }
2161
2162 static int xscale_add_breakpoint(struct target *target,
2163 struct breakpoint *breakpoint)
2164 {
2165 struct xscale_common *xscale = target_to_xscale(target);
2166
2167 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2168 {
2169 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2170 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2171 }
2172
2173 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2174 {
2175 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2176 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2177 }
2178
2179 if (breakpoint->type == BKPT_HARD)
2180 {
2181 xscale->ibcr_available--;
2182 }
2183
2184 return ERROR_OK;
2185 }
2186
2187 static int xscale_unset_breakpoint(struct target *target,
2188 struct breakpoint *breakpoint)
2189 {
2190 int retval;
2191 struct xscale_common *xscale = target_to_xscale(target);
2192
2193 if (target->state != TARGET_HALTED)
2194 {
2195 LOG_WARNING("target not halted");
2196 return ERROR_TARGET_NOT_HALTED;
2197 }
2198
2199 if (!breakpoint->set)
2200 {
2201 LOG_WARNING("breakpoint not set");
2202 return ERROR_OK;
2203 }
2204
2205 if (breakpoint->type == BKPT_HARD)
2206 {
2207 if (breakpoint->set == 1)
2208 {
2209 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2210 xscale->ibcr0_used = 0;
2211 }
2212 else if (breakpoint->set == 2)
2213 {
2214 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2215 xscale->ibcr1_used = 0;
2216 }
2217 breakpoint->set = 0;
2218 }
2219 else
2220 {
2221 /* restore original instruction (kept in target endianness) */
2222 if (breakpoint->length == 4)
2223 {
2224 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2225 {
2226 return retval;
2227 }
2228 }
2229 else
2230 {
2231 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2232 {
2233 return retval;
2234 }
2235 }
2236 breakpoint->set = 0;
2237 }
2238
2239 return ERROR_OK;
2240 }
2241
2242 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2243 {
2244 struct xscale_common *xscale = target_to_xscale(target);
2245
2246 if (target->state != TARGET_HALTED)
2247 {
2248 LOG_WARNING("target not halted");
2249 return ERROR_TARGET_NOT_HALTED;
2250 }
2251
2252 if (breakpoint->set)
2253 {
2254 xscale_unset_breakpoint(target, breakpoint);
2255 }
2256
2257 if (breakpoint->type == BKPT_HARD)
2258 xscale->ibcr_available++;
2259
2260 return ERROR_OK;
2261 }
2262
2263 static int xscale_set_watchpoint(struct target *target,
2264 struct watchpoint *watchpoint)
2265 {
2266 struct xscale_common *xscale = target_to_xscale(target);
2267 uint8_t enable = 0;
2268 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2269 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2270
2271 if (target->state != TARGET_HALTED)
2272 {
2273 LOG_WARNING("target not halted");
2274 return ERROR_TARGET_NOT_HALTED;
2275 }
2276
2277 xscale_get_reg(dbcon);
2278
2279 switch (watchpoint->rw)
2280 {
2281 case WPT_READ:
2282 enable = 0x3;
2283 break;
2284 case WPT_ACCESS:
2285 enable = 0x2;
2286 break;
2287 case WPT_WRITE:
2288 enable = 0x1;
2289 break;
2290 default:
2291 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2292 }
2293
2294 if (!xscale->dbr0_used)
2295 {
2296 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2297 dbcon_value |= enable;
2298 xscale_set_reg_u32(dbcon, dbcon_value);
2299 watchpoint->set = 1;
2300 xscale->dbr0_used = 1;
2301 }
2302 else if (!xscale->dbr1_used)
2303 {
2304 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2305 dbcon_value |= enable << 2;
2306 xscale_set_reg_u32(dbcon, dbcon_value);
2307 watchpoint->set = 2;
2308 xscale->dbr1_used = 1;
2309 }
2310 else
2311 {
2312 LOG_ERROR("BUG: no hardware comparator available");
2313 return ERROR_OK;
2314 }
2315
2316 return ERROR_OK;
2317 }
2318
2319 static int xscale_add_watchpoint(struct target *target,
2320 struct watchpoint *watchpoint)
2321 {
2322 struct xscale_common *xscale = target_to_xscale(target);
2323
2324 if (xscale->dbr_available < 1)
2325 {
2326 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2327 }
2328
2329 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2330 {
2331 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2332 }
2333
2334 xscale->dbr_available--;
2335
2336 return ERROR_OK;
2337 }
2338
2339 static int xscale_unset_watchpoint(struct target *target,
2340 struct watchpoint *watchpoint)
2341 {
2342 struct xscale_common *xscale = target_to_xscale(target);
2343 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2344 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2345
2346 if (target->state != TARGET_HALTED)
2347 {
2348 LOG_WARNING("target not halted");
2349 return ERROR_TARGET_NOT_HALTED;
2350 }
2351
2352 if (!watchpoint->set)
2353 {
2354 LOG_WARNING("breakpoint not set");
2355 return ERROR_OK;
2356 }
2357
2358 if (watchpoint->set == 1)
2359 {
2360 dbcon_value &= ~0x3;
2361 xscale_set_reg_u32(dbcon, dbcon_value);
2362 xscale->dbr0_used = 0;
2363 }
2364 else if (watchpoint->set == 2)
2365 {
2366 dbcon_value &= ~0xc;
2367 xscale_set_reg_u32(dbcon, dbcon_value);
2368 xscale->dbr1_used = 0;
2369 }
2370 watchpoint->set = 0;
2371
2372 return ERROR_OK;
2373 }
2374
2375 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2376 {
2377 struct xscale_common *xscale = target_to_xscale(target);
2378
2379 if (target->state != TARGET_HALTED)
2380 {
2381 LOG_WARNING("target not halted");
2382 return ERROR_TARGET_NOT_HALTED;
2383 }
2384
2385 if (watchpoint->set)
2386 {
2387 xscale_unset_watchpoint(target, watchpoint);
2388 }
2389
2390 xscale->dbr_available++;
2391
2392 return ERROR_OK;
2393 }
2394
2395 static int xscale_get_reg(struct reg *reg)
2396 {
2397 struct xscale_reg *arch_info = reg->arch_info;
2398 struct target *target = arch_info->target;
2399 struct xscale_common *xscale = target_to_xscale(target);
2400
2401 /* DCSR, TX and RX are accessible via JTAG */
2402 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2403 {
2404 return xscale_read_dcsr(arch_info->target);
2405 }
2406 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2407 {
2408 /* 1 = consume register content */
2409 return xscale_read_tx(arch_info->target, 1);
2410 }
2411 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2412 {
2413 /* can't read from RX register (host -> debug handler) */
2414 return ERROR_OK;
2415 }
2416 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2417 {
2418 /* can't (explicitly) read from TXRXCTRL register */
2419 return ERROR_OK;
2420 }
2421 else /* Other DBG registers have to be transfered by the debug handler */
2422 {
2423 /* send CP read request (command 0x40) */
2424 xscale_send_u32(target, 0x40);
2425
2426 /* send CP register number */
2427 xscale_send_u32(target, arch_info->dbg_handler_number);
2428
2429 /* read register value */
2430 xscale_read_tx(target, 1);
2431 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2432
2433 reg->dirty = 0;
2434 reg->valid = 1;
2435 }
2436
2437 return ERROR_OK;
2438 }
2439
2440 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2441 {
2442 struct xscale_reg *arch_info = reg->arch_info;
2443 struct target *target = arch_info->target;
2444 struct xscale_common *xscale = target_to_xscale(target);
2445 uint32_t value = buf_get_u32(buf, 0, 32);
2446
2447 /* DCSR, TX and RX are accessible via JTAG */
2448 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2449 {
2450 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2451 return xscale_write_dcsr(arch_info->target, -1, -1);
2452 }
2453 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2454 {
2455 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2456 return xscale_write_rx(arch_info->target);
2457 }
2458 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2459 {
2460 /* can't write to TX register (debug-handler -> host) */
2461 return ERROR_OK;
2462 }
2463 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2464 {
2465 /* can't (explicitly) write to TXRXCTRL register */
2466 return ERROR_OK;
2467 }
2468 else /* Other DBG registers have to be transfered by the debug handler */
2469 {
2470 /* send CP write request (command 0x41) */
2471 xscale_send_u32(target, 0x41);
2472
2473 /* send CP register number */
2474 xscale_send_u32(target, arch_info->dbg_handler_number);
2475
2476 /* send CP register value */
2477 xscale_send_u32(target, value);
2478 buf_set_u32(reg->value, 0, 32, value);
2479 }
2480
2481 return ERROR_OK;
2482 }
2483
2484 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2485 {
2486 struct xscale_common *xscale = target_to_xscale(target);
2487 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2488 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2489
2490 /* send CP write request (command 0x41) */
2491 xscale_send_u32(target, 0x41);
2492
2493 /* send CP register number */
2494 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2495
2496 /* send CP register value */
2497 xscale_send_u32(target, value);
2498 buf_set_u32(dcsr->value, 0, 32, value);
2499
2500 return ERROR_OK;
2501 }
2502
2503 static int xscale_read_trace(struct target *target)
2504 {
2505 struct xscale_common *xscale = target_to_xscale(target);
2506 struct arm *armv4_5 = &xscale->armv4_5_common;
2507 struct xscale_trace_data **trace_data_p;
2508
2509 /* 258 words from debug handler
2510 * 256 trace buffer entries
2511 * 2 checkpoint addresses
2512 */
2513 uint32_t trace_buffer[258];
2514 int is_address[256];
2515 int i, j;
2516
2517 if (target->state != TARGET_HALTED)
2518 {
2519 LOG_WARNING("target must be stopped to read trace data");
2520 return ERROR_TARGET_NOT_HALTED;
2521 }
2522
2523 /* send read trace buffer command (command 0x61) */
2524 xscale_send_u32(target, 0x61);
2525
2526 /* receive trace buffer content */
2527 xscale_receive(target, trace_buffer, 258);
2528
2529 /* parse buffer backwards to identify address entries */
2530 for (i = 255; i >= 0; i--)
2531 {
2532 is_address[i] = 0;
2533 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2534 ((trace_buffer[i] & 0xf0) == 0xd0))
2535 {
2536 if (i >= 3)
2537 is_address[--i] = 1;
2538 if (i >= 2)
2539 is_address[--i] = 1;
2540 if (i >= 1)
2541 is_address[--i] = 1;
2542 if (i >= 0)
2543 is_address[--i] = 1;
2544 }
2545 }
2546
2547
2548 /* search first non-zero entry */
2549 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2550 ;
2551
2552 if (j == 256)
2553 {
2554 LOG_DEBUG("no trace data collected");
2555 return ERROR_XSCALE_NO_TRACE_DATA;
2556 }
2557
2558 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2559 ;
2560
2561 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2562 (*trace_data_p)->next = NULL;
2563 (*trace_data_p)->chkpt0 = trace_buffer[256];
2564 (*trace_data_p)->chkpt1 = trace_buffer[257];
2565 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2566 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2567 (*trace_data_p)->depth = 256 - j;
2568
2569 for (i = j; i < 256; i++)
2570 {
2571 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2572 if (is_address[i])
2573 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2574 else
2575 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2576 }
2577
2578 return ERROR_OK;
2579 }
2580
2581 static int xscale_read_instruction(struct target *target,
2582 struct arm_instruction *instruction)
2583 {
2584 struct xscale_common *xscale = target_to_xscale(target);
2585 int i;
2586 int section = -1;
2587 size_t size_read;
2588 uint32_t opcode;
2589 int retval;
2590
2591 if (!xscale->trace.image)
2592 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2593
2594 /* search for the section the current instruction belongs to */
2595 for (i = 0; i < xscale->trace.image->num_sections; i++)
2596 {
2597 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2598 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2599 {
2600 section = i;
2601 break;
2602 }
2603 }
2604
2605 if (section == -1)
2606 {
2607 /* current instruction couldn't be found in the image */
2608 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2609 }
2610
2611 if (xscale->trace.core_state == ARM_STATE_ARM)
2612 {
2613 uint8_t buf[4];
2614 if ((retval = image_read_section(xscale->trace.image, section,
2615 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2616 4, buf, &size_read)) != ERROR_OK)
2617 {
2618 LOG_ERROR("error while reading instruction: %i", retval);
2619 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2620 }
2621 opcode = target_buffer_get_u32(target, buf);
2622 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2623 }
2624 else if (xscale->trace.core_state == ARM_STATE_THUMB)
2625 {
2626 uint8_t buf[2];
2627 if ((retval = image_read_section(xscale->trace.image, section,
2628 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2629 2, buf, &size_read)) != ERROR_OK)
2630 {
2631 LOG_ERROR("error while reading instruction: %i", retval);
2632 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2633 }
2634 opcode = target_buffer_get_u16(target, buf);
2635 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2636 }
2637 else
2638 {
2639 LOG_ERROR("BUG: unknown core state encountered");
2640 exit(-1);
2641 }
2642
2643 return ERROR_OK;
2644 }
2645
2646 static int xscale_branch_address(struct xscale_trace_data *trace_data,
2647 int i, uint32_t *target)
2648 {
2649 /* if there are less than four entries prior to the indirect branch message
2650 * we can't extract the address */
2651 if (i < 4)
2652 {
2653 return -1;
2654 }
2655
2656 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2657 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2658
2659 return 0;
2660 }
2661
2662 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2663 {
2664 struct xscale_common *xscale = target_to_xscale(target);
2665 int next_pc_ok = 0;
2666 uint32_t next_pc = 0x0;
2667 struct xscale_trace_data *trace_data = xscale->trace.data;
2668 int retval;
2669
2670 while (trace_data)
2671 {
2672 int i, chkpt;
2673 int rollover;
2674 int branch;
2675 int exception;
2676 xscale->trace.core_state = ARM_STATE_ARM;
2677
2678 chkpt = 0;
2679 rollover = 0;
2680
2681 for (i = 0; i < trace_data->depth; i++)
2682 {
2683 next_pc_ok = 0;
2684 branch = 0;
2685 exception = 0;
2686
2687 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2688 continue;
2689
2690 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2691 {
2692 case 0: /* Exceptions */
2693 case 1:
2694 case 2:
2695 case 3:
2696 case 4:
2697 case 5:
2698 case 6:
2699 case 7:
2700 exception = (trace_data->entries[i].data & 0x70) >> 4;
2701 next_pc_ok = 1;
2702 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2703 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2704 break;
2705 case 8: /* Direct Branch */
2706 branch = 1;
2707 break;
2708 case 9: /* Indirect Branch */
2709 branch = 1;
2710 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2711 {
2712 next_pc_ok = 1;
2713 }
2714 break;
2715 case 13: /* Checkpointed Indirect Branch */
2716 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2717 {
2718 next_pc_ok = 1;
2719 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2720 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2721 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2722 }
2723 /* explicit fall-through */
2724 case 12: /* Checkpointed Direct Branch */
2725 branch = 1;
2726 if (chkpt == 0)
2727 {
2728 next_pc_ok = 1;
2729 next_pc = trace_data->chkpt0;
2730 chkpt++;
2731 }
2732 else if (chkpt == 1)
2733 {
2734 next_pc_ok = 1;
2735 next_pc = trace_data->chkpt0;
2736 chkpt++;
2737 }
2738 else
2739 {
2740 LOG_WARNING("more than two checkpointed branches encountered");
2741 }
2742 break;
2743 case 15: /* Roll-over */
2744 rollover++;
2745 continue;
2746 default: /* Reserved */
2747 command_print(cmd_ctx, "--- reserved trace message ---");
2748 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2749 return ERROR_OK;
2750 }
2751
2752 if (xscale->trace.pc_ok)
2753 {
2754 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2755 struct arm_instruction instruction;
2756
2757 if ((exception == 6) || (exception == 7))
2758 {
2759 /* IRQ or FIQ exception, no instruction executed */
2760 executed -= 1;
2761 }
2762
2763 while (executed-- >= 0)
2764 {
2765 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2766 {
2767 /* can't continue tracing with no image available */
2768 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2769 {
2770 return retval;
2771 }
2772 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2773 {
2774 /* TODO: handle incomplete images */
2775 }
2776 }
2777
2778 /* a precise abort on a load to the PC is included in the incremental
2779 * word count, other instructions causing data aborts are not included
2780 */
2781 if ((executed == 0) && (exception == 4)
2782 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2783 {
2784 if ((instruction.type == ARM_LDM)
2785 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2786 {
2787 executed--;
2788 }
2789 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2790 && (instruction.info.load_store.Rd != 15))
2791 {
2792 executed--;
2793 }
2794 }
2795
2796 /* only the last instruction executed
2797 * (the one that caused the control flow change)
2798 * could be a taken branch
2799 */
2800 if (((executed == -1) && (branch == 1)) &&
2801 (((instruction.type == ARM_B) ||
2802 (instruction.type == ARM_BL) ||
2803 (instruction.type == ARM_BLX)) &&
2804 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2805 {
2806 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2807 }
2808 else
2809 {
2810 xscale->trace.current_pc += (xscale->trace.core_state == ARM_STATE_ARM) ? 4 : 2;
2811 }
2812 command_print(cmd_ctx, "%s", instruction.text);
2813 }
2814
2815 rollover = 0;
2816 }
2817
2818 if (next_pc_ok)
2819 {
2820 xscale->trace.current_pc = next_pc;
2821 xscale->trace.pc_ok = 1;
2822 }
2823 }
2824
2825 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARM_STATE_ARM) ? 4 : 2)
2826 {
2827 struct arm_instruction instruction;
2828 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2829 {
2830 /* can't continue tracing with no image available */
2831 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2832 {
2833 return retval;
2834 }
2835 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2836 {
2837 /* TODO: handle incomplete images */
2838 }
2839 }
2840 command_print(cmd_ctx, "%s", instruction.text);
2841 }
2842
2843 trace_data = trace_data->next;
2844 }
2845
2846 return ERROR_OK;
2847 }
2848
2849 static const struct reg_arch_type xscale_reg_type = {
2850 .get = xscale_get_reg,
2851 .set = xscale_set_reg,
2852 };
2853
2854 static void xscale_build_reg_cache(struct target *target)
2855 {
2856 struct xscale_common *xscale = target_to_xscale(target);
2857 struct arm *armv4_5 = &xscale->armv4_5_common;
2858 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2859 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2860 int i;
2861 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2862
2863 (*cache_p) = arm_build_reg_cache(target, armv4_5);
2864
2865 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2866 cache_p = &(*cache_p)->next;
2867
2868 /* fill in values for the xscale reg cache */
2869 (*cache_p)->name = "XScale registers";
2870 (*cache_p)->next = NULL;
2871 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2872 (*cache_p)->num_regs = num_regs;
2873
2874 for (i = 0; i < num_regs; i++)
2875 {
2876 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2877 (*cache_p)->reg_list[i].value = calloc(4, 1);
2878 (*cache_p)->reg_list[i].dirty = 0;
2879 (*cache_p)->reg_list[i].valid = 0;
2880 (*cache_p)->reg_list[i].size = 32;
2881 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2882 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2883 arch_info[i] = xscale_reg_arch_info[i];
2884 arch_info[i].target = target;
2885 }
2886
2887 xscale->reg_cache = (*cache_p);
2888 }
2889
2890 static int xscale_init_target(struct command_context *cmd_ctx,
2891 struct target *target)
2892 {
2893 xscale_build_reg_cache(target);
2894 return ERROR_OK;
2895 }
2896
2897 static int xscale_init_arch_info(struct target *target,
2898 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2899 {
2900 struct arm *armv4_5;
2901 uint32_t high_reset_branch, low_reset_branch;
2902 int i;
2903
2904 armv4_5 = &xscale->armv4_5_common;
2905
2906 /* store architecture specfic data */
2907 xscale->common_magic = XSCALE_COMMON_MAGIC;
2908
2909 /* we don't really *need* a variant param ... */
2910 if (variant) {
2911 int ir_length = 0;
2912
2913 if (strcmp(variant, "pxa250") == 0
2914 || strcmp(variant, "pxa255") == 0
2915 || strcmp(variant, "pxa26x") == 0)
2916 ir_length = 5;
2917 else if (strcmp(variant, "pxa27x") == 0
2918 || strcmp(variant, "ixp42x") == 0
2919 || strcmp(variant, "ixp45x") == 0
2920 || strcmp(variant, "ixp46x") == 0)
2921 ir_length = 7;
2922 else if (strcmp(variant, "pxa3xx") == 0)
2923 ir_length = 11;
2924 else
2925 LOG_WARNING("%s: unrecognized variant %s",
2926 tap->dotted_name, variant);
2927
2928 if (ir_length && ir_length != tap->ir_length) {
2929 LOG_WARNING("%s: IR length for %s is %d; fixing",
2930 tap->dotted_name, variant, ir_length);
2931 tap->ir_length = ir_length;
2932 }
2933 }
2934
2935 /* PXA3xx shifts the JTAG instructions */
2936 if (tap->ir_length == 11)
2937 xscale->xscale_variant = XSCALE_PXA3XX;
2938 else
2939 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
2940
2941 /* the debug handler isn't installed (and thus not running) at this time */
2942 xscale->handler_address = 0xfe000800;
2943
2944 /* clear the vectors we keep locally for reference */
2945 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2946 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2947
2948 /* no user-specified vectors have been configured yet */
2949 xscale->static_low_vectors_set = 0x0;
2950 xscale->static_high_vectors_set = 0x0;
2951
2952 /* calculate branches to debug handler */
2953 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2954 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2955
2956 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2957 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2958
2959 for (i = 1; i <= 7; i++)
2960 {
2961 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2962 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2963 }
2964
2965 /* 64kB aligned region used for DCache cleaning */
2966 xscale->cache_clean_address = 0xfffe0000;
2967
2968 xscale->hold_rst = 0;
2969 xscale->external_debug_break = 0;
2970
2971 xscale->ibcr_available = 2;
2972 xscale->ibcr0_used = 0;
2973 xscale->ibcr1_used = 0;
2974
2975 xscale->dbr_available = 2;
2976 xscale->dbr0_used = 0;
2977 xscale->dbr1_used = 0;
2978
2979 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2980 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2981
2982 xscale->vector_catch = 0x1;
2983
2984 xscale->trace.capture_status = TRACE_IDLE;
2985 xscale->trace.data = NULL;
2986 xscale->trace.image = NULL;
2987 xscale->trace.buffer_enabled = 0;
2988 xscale->trace.buffer_fill = 0;
2989
2990 /* prepare ARMv4/5 specific information */
2991 armv4_5->arch_info = xscale;
2992 armv4_5->read_core_reg = xscale_read_core_reg;
2993 armv4_5->write_core_reg = xscale_write_core_reg;
2994 armv4_5->full_context = xscale_full_context;
2995
2996 arm_init_arch_info(target, armv4_5);
2997
2998 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
2999 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3000 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3001 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3002 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3003 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3004 xscale->armv4_5_mmu.has_tiny_pages = 1;
3005 xscale->armv4_5_mmu.mmu_enabled = 0;
3006
3007 return ERROR_OK;
3008 }
3009
3010 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3011 {
3012 struct xscale_common *xscale;
3013
3014 if (sizeof xscale_debug_handler - 1 > 0x800) {
3015 LOG_ERROR("debug_handler.bin: larger than 2kb");
3016 return ERROR_FAIL;
3017 }
3018
3019 xscale = calloc(1, sizeof(*xscale));
3020 if (!xscale)
3021 return ERROR_FAIL;
3022
3023 return xscale_init_arch_info(target, xscale, target->tap,
3024 target->variant);
3025 }
3026
3027 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3028 {
3029 struct target *target = NULL;
3030 struct xscale_common *xscale;
3031 int retval;
3032 uint32_t handler_address;
3033
3034 if (CMD_ARGC < 2)
3035 {
3036 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3037 return ERROR_OK;
3038 }
3039
3040 if ((target = get_target(CMD_ARGV[0])) == NULL)
3041 {
3042 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3043 return ERROR_FAIL;
3044 }
3045
3046 xscale = target_to_xscale(target);
3047 retval = xscale_verify_pointer(CMD_CTX, xscale);
3048 if (retval != ERROR_OK)
3049 return retval;
3050
3051 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3052
3053 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3054 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3055 {
3056 xscale->handler_address = handler_address;
3057 }
3058 else
3059 {
3060 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3061 return ERROR_FAIL;
3062 }
3063
3064 return ERROR_OK;
3065 }
3066
3067 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3068 {
3069 struct target *target = NULL;
3070 struct xscale_common *xscale;
3071 int retval;
3072 uint32_t cache_clean_address;
3073
3074 if (CMD_ARGC < 2)
3075 {
3076 return ERROR_COMMAND_SYNTAX_ERROR;
3077 }
3078
3079 target = get_target(CMD_ARGV[0]);
3080 if (target == NULL)
3081 {
3082 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3083 return ERROR_FAIL;
3084 }
3085 xscale = target_to_xscale(target);
3086 retval = xscale_verify_pointer(CMD_CTX, xscale);
3087 if (retval != ERROR_OK)
3088 return retval;
3089
3090 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3091
3092 if (cache_clean_address & 0xffff)
3093 {
3094 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3095 }
3096 else
3097 {
3098 xscale->cache_clean_address = cache_clean_address;
3099 }
3100
3101 return ERROR_OK;
3102 }
3103
3104 COMMAND_HANDLER(xscale_handle_cache_info_command)
3105 {
3106 struct target *target = get_current_target(CMD_CTX);
3107 struct xscale_common *xscale = target_to_xscale(target);
3108 int retval;
3109
3110 retval = xscale_verify_pointer(CMD_CTX, xscale);
3111 if (retval != ERROR_OK)
3112 return retval;
3113
3114 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3115 }
3116
3117 static int xscale_virt2phys(struct target *target,
3118 uint32_t virtual, uint32_t *physical)
3119 {
3120 struct xscale_common *xscale = target_to_xscale(target);
3121 int type;
3122 uint32_t cb;
3123 int domain;
3124 uint32_t ap;
3125
3126 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3127 LOG_ERROR(xscale_not);
3128 return ERROR_TARGET_INVALID;
3129 }
3130
3131 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3132 if (type == -1)
3133 {
3134 return ret;
3135 }
3136 *physical = ret;
3137 return ERROR_OK;
3138 }
3139
3140 static int xscale_mmu(struct target *target, int *enabled)
3141 {
3142 struct xscale_common *xscale = target_to_xscale(target);
3143
3144 if (target->state != TARGET_HALTED)
3145 {
3146 LOG_ERROR("Target not halted");
3147 return ERROR_TARGET_INVALID;
3148 }
3149 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3150 return ERROR_OK;
3151 }
3152
3153 COMMAND_HANDLER(xscale_handle_mmu_command)
3154 {
3155 struct target *target = get_current_target(CMD_CTX);
3156 struct xscale_common *xscale = target_to_xscale(target);
3157 int retval;
3158
3159 retval = xscale_verify_pointer(CMD_CTX, xscale);
3160 if (retval != ERROR_OK)
3161 return retval;
3162
3163 if (target->state != TARGET_HALTED)
3164 {
3165 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3166 return ERROR_OK;
3167 }
3168
3169 if (CMD_ARGC >= 1)
3170 {
3171 bool enable;
3172 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3173 if (enable)
3174 xscale_enable_mmu_caches(target, 1, 0, 0);
3175 else
3176 xscale_disable_mmu_caches(target, 1, 0, 0);
3177 xscale->armv4_5_mmu.mmu_enabled = enable;
3178 }
3179
3180 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3181
3182 return ERROR_OK;
3183 }
3184
3185 COMMAND_HANDLER(xscale_handle_idcache_command)
3186 {
3187 struct target *target = get_current_target(CMD_CTX);
3188 struct xscale_common *xscale = target_to_xscale(target);
3189
3190 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3191 if (retval != ERROR_OK)
3192 return retval;
3193
3194 if (target->state != TARGET_HALTED)
3195 {
3196 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3197 return ERROR_OK;
3198 }
3199
3200 bool icache;
3201 COMMAND_PARSE_BOOL(CMD_NAME, icache, "icache", "dcache");
3202
3203 if (CMD_ARGC >= 1)
3204 {
3205 bool enable;
3206 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3207 if (enable)
3208 xscale_enable_mmu_caches(target, 1, 0, 0);
3209 else
3210 xscale_disable_mmu_caches(target, 1, 0, 0);
3211 if (icache)
3212 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3213 else
3214 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3215 }
3216
3217 bool enabled = icache ?
3218 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3219 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3220 const char *msg = enabled ? "enabled" : "disabled";
3221 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3222
3223 return ERROR_OK;
3224 }
3225
3226 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3227 {
3228 struct target *target = get_current_target(CMD_CTX);
3229 struct xscale_common *xscale = target_to_xscale(target);
3230 int retval;
3231
3232 retval = xscale_verify_pointer(CMD_CTX, xscale);
3233 if (retval != ERROR_OK)
3234 return retval;
3235
3236 if (CMD_ARGC < 1)
3237 {
3238 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3239 }
3240 else
3241 {
3242 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3243 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3244 xscale_write_dcsr(target, -1, -1);
3245 }
3246
3247 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3248
3249 return ERROR_OK;
3250 }
3251
3252
3253 COMMAND_HANDLER(xscale_handle_vector_table_command)
3254 {
3255 struct target *target = get_current_target(CMD_CTX);
3256 struct xscale_common *xscale = target_to_xscale(target);
3257 int err = 0;
3258 int retval;
3259
3260 retval = xscale_verify_pointer(CMD_CTX, xscale);
3261 if (retval != ERROR_OK)
3262 return retval;
3263
3264 if (CMD_ARGC == 0) /* print current settings */
3265 {
3266 int idx;
3267
3268 command_print(CMD_CTX, "active user-set static vectors:");
3269 for (idx = 1; idx < 8; idx++)
3270 if (xscale->static_low_vectors_set & (1 << idx))
3271 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3272 for (idx = 1; idx < 8; idx++)
3273 if (xscale->static_high_vectors_set & (1 << idx))
3274 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3275 return ERROR_OK;
3276 }
3277
3278 if (CMD_ARGC != 3)
3279 err = 1;
3280 else
3281 {
3282 int idx;
3283 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3284 uint32_t vec;
3285 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3286
3287 if (idx < 1 || idx >= 8)
3288 err = 1;
3289
3290 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3291 {
3292 xscale->static_low_vectors_set |= (1<<idx);
3293 xscale->static_low_vectors[idx] = vec;
3294 }
3295 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3296 {
3297 xscale->static_high_vectors_set |= (1<<idx);
3298 xscale->static_high_vectors[idx] = vec;
3299 }
3300 else
3301 err = 1;
3302 }
3303
3304 if (err)
3305 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3306
3307 return ERROR_OK;
3308 }
3309
3310
3311 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3312 {
3313 struct target *target = get_current_target(CMD_CTX);
3314 struct xscale_common *xscale = target_to_xscale(target);
3315 struct arm *armv4_5 = &xscale->armv4_5_common;
3316 uint32_t dcsr_value;
3317 int retval;
3318
3319 retval = xscale_verify_pointer(CMD_CTX, xscale);
3320 if (retval != ERROR_OK)
3321 return retval;
3322
3323 if (target->state != TARGET_HALTED)
3324 {
3325 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3326 return ERROR_OK;
3327 }
3328
3329 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3330 {
3331 struct xscale_trace_data *td, *next_td;
3332 xscale->trace.buffer_enabled = 1;
3333
3334 /* free old trace data */
3335 td = xscale->trace.data;
3336 while (td)
3337 {
3338 next_td = td->next;
3339
3340 if (td->entries)
3341 free(td->entries);
3342 free(td);
3343 td = next_td;
3344 }
3345 xscale->trace.data = NULL;
3346 }
3347 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3348 {
3349 xscale->trace.buffer_enabled = 0;
3350 }
3351
3352 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3353 {
3354 uint32_t fill = 1;
3355 if (CMD_ARGC >= 3)
3356 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3357 xscale->trace.buffer_fill = fill;
3358 }
3359 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3360 {
3361 xscale->trace.buffer_fill = -1;
3362 }
3363
3364 if (xscale->trace.buffer_enabled)
3365 {
3366 /* if we enable the trace buffer in fill-once
3367 * mode we know the address of the first instruction */
3368 xscale->trace.pc_ok = 1;
3369 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3370 }
3371 else
3372 {
3373 /* otherwise the address is unknown, and we have no known good PC */
3374 xscale->trace.pc_ok = 0;
3375 }
3376
3377 command_print(CMD_CTX, "trace buffer %s (%s)",
3378 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3379 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3380
3381 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3382 if (xscale->trace.buffer_fill >= 0)
3383 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3384 else
3385 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3386
3387 return ERROR_OK;
3388 }
3389
3390 COMMAND_HANDLER(xscale_handle_trace_image_command)
3391 {
3392 struct target *target = get_current_target(CMD_CTX);
3393 struct xscale_common *xscale = target_to_xscale(target);
3394 int retval;
3395
3396 if (CMD_ARGC < 1)
3397 {
3398 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3399 return ERROR_OK;
3400 }
3401
3402 retval = xscale_verify_pointer(CMD_CTX, xscale);
3403 if (retval != ERROR_OK)
3404 return retval;
3405
3406 if (xscale->trace.image)
3407 {
3408 image_close(xscale->trace.image);
3409 free(xscale->trace.image);
3410 command_print(CMD_CTX, "previously loaded image found and closed");
3411 }
3412
3413 xscale->trace.image = malloc(sizeof(struct image));
3414 xscale->trace.image->base_address_set = 0;
3415 xscale->trace.image->start_address_set = 0;
3416
3417 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3418 if (CMD_ARGC >= 2)
3419 {
3420 xscale->trace.image->base_address_set = 1;
3421 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], xscale->trace.image->base_address);
3422 }
3423 else
3424 {
3425 xscale->trace.image->base_address_set = 0;
3426 }
3427
3428 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3429 {
3430 free(xscale->trace.image);
3431 xscale->trace.image = NULL;
3432 return ERROR_OK;
3433 }
3434
3435 return ERROR_OK;
3436 }
3437
3438 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3439 {
3440 struct target *target = get_current_target(CMD_CTX);
3441 struct xscale_common *xscale = target_to_xscale(target);
3442 struct xscale_trace_data *trace_data;
3443 struct fileio file;
3444 int retval;
3445
3446 retval = xscale_verify_pointer(CMD_CTX, xscale);
3447 if (retval != ERROR_OK)
3448 return retval;
3449
3450 if (target->state != TARGET_HALTED)
3451 {
3452 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3453 return ERROR_OK;
3454 }
3455
3456 if (CMD_ARGC < 1)
3457 {
3458 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3459 return ERROR_OK;
3460 }
3461
3462 trace_data = xscale->trace.data;
3463
3464 if (!trace_data)
3465 {
3466 command_print(CMD_CTX, "no trace data collected");
3467 return ERROR_OK;
3468 }
3469
3470 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3471 {
3472 return ERROR_OK;
3473 }
3474
3475 while (trace_data)
3476 {
3477 int i;
3478
3479 fileio_write_u32(&file, trace_data->chkpt0);
3480 fileio_write_u32(&file, trace_data->chkpt1);
3481 fileio_write_u32(&file, trace_data->last_instruction);
3482 fileio_write_u32(&file, trace_data->depth);
3483
3484 for (i = 0; i < trace_data->depth; i++)
3485 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3486
3487 trace_data = trace_data->next;
3488 }
3489
3490 fileio_close(&file);
3491
3492 return ERROR_OK;
3493 }
3494
3495 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3496 {
3497 struct target *target = get_current_target(CMD_CTX);
3498 struct xscale_common *xscale = target_to_xscale(target);
3499 int retval;
3500
3501 retval = xscale_verify_pointer(CMD_CTX, xscale);
3502 if (retval != ERROR_OK)
3503 return retval;
3504
3505 xscale_analyze_trace(target, CMD_CTX);
3506
3507 return ERROR_OK;
3508 }
3509
3510 COMMAND_HANDLER(xscale_handle_cp15)
3511 {
3512 struct target *target = get_current_target(CMD_CTX);
3513 struct xscale_common *xscale = target_to_xscale(target);
3514 int retval;
3515
3516 retval = xscale_verify_pointer(CMD_CTX, xscale);
3517 if (retval != ERROR_OK)
3518 return retval;
3519
3520 if (target->state != TARGET_HALTED)
3521 {
3522 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3523 return ERROR_OK;
3524 }
3525 uint32_t reg_no = 0;
3526 struct reg *reg = NULL;
3527 if (CMD_ARGC > 0)
3528 {
3529 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3530 /*translate from xscale cp15 register no to openocd register*/
3531 switch (reg_no)
3532 {
3533 case 0:
3534 reg_no = XSCALE_MAINID;
3535 break;
3536 case 1:
3537 reg_no = XSCALE_CTRL;
3538 break;
3539 case 2:
3540 reg_no = XSCALE_TTB;
3541 break;
3542 case 3:
3543 reg_no = XSCALE_DAC;
3544 break;
3545 case 5:
3546 reg_no = XSCALE_FSR;
3547 break;
3548 case 6:
3549 reg_no = XSCALE_FAR;
3550 break;
3551 case 13:
3552 reg_no = XSCALE_PID;
3553 break;
3554 case 15:
3555 reg_no = XSCALE_CPACCESS;
3556 break;
3557 default:
3558 command_print(CMD_CTX, "invalid register number");
3559 return ERROR_INVALID_ARGUMENTS;
3560 }
3561 reg = &xscale->reg_cache->reg_list[reg_no];
3562
3563 }
3564 if (CMD_ARGC == 1)
3565 {
3566 uint32_t value;
3567
3568 /* read cp15 control register */
3569 xscale_get_reg(reg);
3570 value = buf_get_u32(reg->value, 0, 32);
3571 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3572 }
3573 else if (CMD_ARGC == 2)
3574 {
3575 uint32_t value;
3576 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3577
3578 /* send CP write request (command 0x41) */
3579 xscale_send_u32(target, 0x41);
3580
3581 /* send CP register number */
3582 xscale_send_u32(target, reg_no);
3583
3584 /* send CP register value */
3585 xscale_send_u32(target, value);
3586
3587 /* execute cpwait to ensure outstanding operations complete */
3588 xscale_send_u32(target, 0x53);
3589 }
3590 else
3591 {
3592 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3593 }
3594
3595 return ERROR_OK;
3596 }
3597
3598 static const struct command_registration xscale_exec_command_handlers[] = {
3599 {
3600 .name = "cache_info",
3601 .handler = &xscale_handle_cache_info_command,
3602 .mode = COMMAND_EXEC, NULL,
3603 },
3604
3605 {
3606 .name = "mmu",
3607 .handler = &xscale_handle_mmu_command,
3608 .mode = COMMAND_EXEC,
3609 .usage = "[enable|disable]",
3610 .help = "enable or disable the MMU",
3611 },
3612 {
3613 .name = "icache",
3614 .handler = &xscale_handle_idcache_command,
3615 .mode = COMMAND_EXEC,
3616 .usage = "[enable|disable]",
3617 .help = "enable or disable the ICache",
3618 },
3619 {
3620 .name = "dcache",
3621 .handler = &xscale_handle_idcache_command,
3622 .mode = COMMAND_EXEC,
3623 .usage = "[enable|disable]",
3624 .help = "enable or disable the DCache",
3625 },
3626
3627 {
3628 .name = "vector_catch",
3629 .handler = &xscale_handle_vector_catch_command,
3630 .mode = COMMAND_EXEC,
3631 .help = "mask of vectors that should be caught",
3632 .usage = "[<mask>]",
3633 },
3634 {
3635 .name = "vector_table",
3636 .handler = &xscale_handle_vector_table_command,
3637 .mode = COMMAND_EXEC,
3638 .usage = "<high|low> <index> <code>",
3639 .help = "set static code for exception handler entry",
3640 },
3641
3642 {
3643 .name = "trace_buffer",
3644 .handler = &xscale_handle_trace_buffer_command,
3645 .mode = COMMAND_EXEC,
3646 .usage = "<enable | disable> [fill [n]|wrap]",
3647 },
3648 {
3649 .name = "dump_trace",
3650 .handler = &xscale_handle_dump_trace_command,
3651 .mode = COMMAND_EXEC,
3652 .help = "dump content of trace buffer to <file>",
3653 .usage = "<file>",
3654 },
3655 {
3656 .name = "analyze_trace",
3657 .handler = &xscale_handle_analyze_trace_buffer_command,
3658 .mode = COMMAND_EXEC,
3659 .help = "analyze content of trace buffer",
3660 },
3661 {
3662 .name = "trace_image",
3663 .handler = &xscale_handle_trace_image_command,
3664 COMMAND_EXEC,
3665 .help = "load image from <file> [base address]",
3666 .usage = "<file> [address] [type]",
3667 },
3668
3669 {
3670 .name = "cp15",
3671 .handler = &xscale_handle_cp15,
3672 .mode = COMMAND_EXEC,
3673 .help = "access coproc 15",
3674 .usage = "<register> [value]",
3675 },
3676 COMMAND_REGISTRATION_DONE
3677 };
3678 static const struct command_registration xscale_any_command_handlers[] = {
3679 {
3680 .name = "debug_handler",
3681 .handler = &xscale_handle_debug_handler_command,
3682 .mode = COMMAND_ANY,
3683 .usage = "<target#> <address>",
3684 },
3685 {
3686 .name = "cache_clean_address",
3687 .handler = &xscale_handle_cache_clean_address_command,
3688 .mode = COMMAND_ANY,
3689 },
3690 {
3691 .chain = xscale_exec_command_handlers,
3692 },
3693 COMMAND_REGISTRATION_DONE
3694 };
3695 static const struct command_registration xscale_command_handlers[] = {
3696 {
3697 .chain = arm_command_handlers,
3698 },
3699 {
3700 .name = "xscale",
3701 .mode = COMMAND_ANY,
3702 .help = "xscale command group",
3703 .chain = xscale_any_command_handlers,
3704 },
3705 COMMAND_REGISTRATION_DONE
3706 };
3707
3708 struct target_type xscale_target =
3709 {
3710 .name = "xscale",
3711
3712 .poll = xscale_poll,
3713 .arch_state = xscale_arch_state,
3714
3715 .target_request_data = NULL,
3716
3717 .halt = xscale_halt,
3718 .resume = xscale_resume,
3719 .step = xscale_step,
3720
3721 .assert_reset = xscale_assert_reset,
3722 .deassert_reset = xscale_deassert_reset,
3723 .soft_reset_halt = NULL,
3724
3725 .get_gdb_reg_list = arm_get_gdb_reg_list,
3726
3727 .read_memory = xscale_read_memory,
3728 .read_phys_memory = xscale_read_phys_memory,
3729 .write_memory = xscale_write_memory,
3730 .write_phys_memory = xscale_write_phys_memory,
3731 .bulk_write_memory = xscale_bulk_write_memory,
3732
3733 .checksum_memory = arm_checksum_memory,
3734 .blank_check_memory = arm_blank_check_memory,
3735
3736 .run_algorithm = armv4_5_run_algorithm,
3737
3738 .add_breakpoint = xscale_add_breakpoint,
3739 .remove_breakpoint = xscale_remove_breakpoint,
3740 .add_watchpoint = xscale_add_watchpoint,
3741 .remove_watchpoint = xscale_remove_watchpoint,
3742
3743 .commands = xscale_command_handlers,
3744 .target_create = xscale_target_create,
3745 .init_target = xscale_init_target,
3746
3747 .virt2phys = xscale_virt2phys,
3748 .mmu = xscale_mmu
3749 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)