error handling: the error number is not part of the user interface
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include <helper/time_support.h>
37 #include "register.h"
38 #include "image.h"
39 #include "arm_opcodes.h"
40 #include "armv4_5.h"
41
42
43 /*
44 * Important XScale documents available as of October 2009 include:
45 *
46 * Intel XScale® Core Developer’s Manual, January 2004
47 * Order Number: 273473-002
48 * This has a chapter detailing debug facilities, and punts some
49 * details to chip-specific microarchitecture documents.
50 *
51 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
52 * Document Number: 273539-005
53 * Less detailed than the developer's manual, but summarizes those
54 * missing details (for most XScales) and gives LOTS of notes about
55 * debugger/handler interaction issues. Presents a simpler reset
56 * and load-handler sequence than the arch doc. (Note, OpenOCD
57 * doesn't currently support "Hot-Debug" as defined there.)
58 *
59 * Chip-specific microarchitecture documents may also be useful.
60 */
61
62
63 /* forward declarations */
64 static int xscale_resume(struct target *, int current,
65 uint32_t address, int handle_breakpoints, int debug_execution);
66 static int xscale_debug_entry(struct target *);
67 static int xscale_restore_banked(struct target *);
68 static int xscale_get_reg(struct reg *reg);
69 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
70 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
72 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
73 static int xscale_read_trace(struct target *);
74
75
76 /* This XScale "debug handler" is loaded into the processor's
77 * mini-ICache, which is 2K of code writable only via JTAG.
78 *
79 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
80 * binary files cleanly. It's string oriented, and terminates them
81 * with a NUL character. Better would be to generate the constants
82 * and let other code decide names, scoping, and other housekeeping.
83 */
84 static /* unsigned const char xscale_debug_handler[] = ... */
85 #include "xscale_debug.h"
86
87 static char *const xscale_reg_list[] =
88 {
89 "XSCALE_MAINID", /* 0 */
90 "XSCALE_CACHETYPE",
91 "XSCALE_CTRL",
92 "XSCALE_AUXCTRL",
93 "XSCALE_TTB",
94 "XSCALE_DAC",
95 "XSCALE_FSR",
96 "XSCALE_FAR",
97 "XSCALE_PID",
98 "XSCALE_CPACCESS",
99 "XSCALE_IBCR0", /* 10 */
100 "XSCALE_IBCR1",
101 "XSCALE_DBR0",
102 "XSCALE_DBR1",
103 "XSCALE_DBCON",
104 "XSCALE_TBREG",
105 "XSCALE_CHKPT0",
106 "XSCALE_CHKPT1",
107 "XSCALE_DCSR",
108 "XSCALE_TX",
109 "XSCALE_RX", /* 20 */
110 "XSCALE_TXRXCTRL",
111 };
112
113 static const struct xscale_reg xscale_reg_arch_info[] =
114 {
115 {XSCALE_MAINID, NULL},
116 {XSCALE_CACHETYPE, NULL},
117 {XSCALE_CTRL, NULL},
118 {XSCALE_AUXCTRL, NULL},
119 {XSCALE_TTB, NULL},
120 {XSCALE_DAC, NULL},
121 {XSCALE_FSR, NULL},
122 {XSCALE_FAR, NULL},
123 {XSCALE_PID, NULL},
124 {XSCALE_CPACCESS, NULL},
125 {XSCALE_IBCR0, NULL},
126 {XSCALE_IBCR1, NULL},
127 {XSCALE_DBR0, NULL},
128 {XSCALE_DBR1, NULL},
129 {XSCALE_DBCON, NULL},
130 {XSCALE_TBREG, NULL},
131 {XSCALE_CHKPT0, NULL},
132 {XSCALE_CHKPT1, NULL},
133 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
134 {-1, NULL}, /* TX accessed via JTAG */
135 {-1, NULL}, /* RX accessed via JTAG */
136 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
137 };
138
139 /* convenience wrapper to access XScale specific registers */
140 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
141 {
142 uint8_t buf[4];
143
144 buf_set_u32(buf, 0, 32, value);
145
146 return xscale_set_reg(reg, buf);
147 }
148
149 static const char xscale_not[] = "target is not an XScale";
150
151 static int xscale_verify_pointer(struct command_context *cmd_ctx,
152 struct xscale_common *xscale)
153 {
154 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
155 command_print(cmd_ctx, xscale_not);
156 return ERROR_TARGET_INVALID;
157 }
158 return ERROR_OK;
159 }
160
161 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr, tap_state_t end_state)
162 {
163 assert (tap != NULL);
164
165 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
166 {
167 struct scan_field field;
168 uint8_t scratch[4];
169
170 memset(&field, 0, sizeof field);
171 field.num_bits = tap->ir_length;
172 field.out_value = scratch;
173 buf_set_u32(scratch, 0, field.num_bits, new_instr);
174
175 jtag_add_ir_scan(tap, &field, end_state);
176 }
177
178 return ERROR_OK;
179 }
180
181 static int xscale_read_dcsr(struct target *target)
182 {
183 struct xscale_common *xscale = target_to_xscale(target);
184 int retval;
185 struct scan_field fields[3];
186 uint8_t field0 = 0x0;
187 uint8_t field0_check_value = 0x2;
188 uint8_t field0_check_mask = 0x7;
189 uint8_t field2 = 0x0;
190 uint8_t field2_check_value = 0x0;
191 uint8_t field2_check_mask = 0x1;
192
193 xscale_jtag_set_instr(target->tap,
194 XSCALE_SELDCSR << xscale->xscale_variant,
195 TAP_DRPAUSE);
196
197 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
198 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
199
200 memset(&fields, 0, sizeof fields);
201
202 fields[0].num_bits = 3;
203 fields[0].out_value = &field0;
204 uint8_t tmp;
205 fields[0].in_value = &tmp;
206
207 fields[1].num_bits = 32;
208 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
209
210 fields[2].num_bits = 1;
211 fields[2].out_value = &field2;
212 uint8_t tmp2;
213 fields[2].in_value = &tmp2;
214
215 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
216
217 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
218 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
219
220 if ((retval = jtag_execute_queue()) != ERROR_OK)
221 {
222 LOG_ERROR("JTAG error while reading DCSR");
223 return retval;
224 }
225
226 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
227 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
228
229 /* write the register with the value we just read
230 * on this second pass, only the first bit of field0 is guaranteed to be 0)
231 */
232 field0_check_mask = 0x1;
233 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
234 fields[1].in_value = NULL;
235
236 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
237
238 /* DANGER!!! this must be here. It will make sure that the arguments
239 * to jtag_set_check_value() does not go out of scope! */
240 return jtag_execute_queue();
241 }
242
243
244 static void xscale_getbuf(jtag_callback_data_t arg)
245 {
246 uint8_t *in = (uint8_t *)arg;
247 *((uint32_t *)arg) = buf_get_u32(in, 0, 32);
248 }
249
250 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
251 {
252 if (num_words == 0)
253 return ERROR_INVALID_ARGUMENTS;
254
255 struct xscale_common *xscale = target_to_xscale(target);
256 int retval = ERROR_OK;
257 tap_state_t path[3];
258 struct scan_field fields[3];
259 uint8_t *field0 = malloc(num_words * 1);
260 uint8_t field0_check_value = 0x2;
261 uint8_t field0_check_mask = 0x6;
262 uint32_t *field1 = malloc(num_words * 4);
263 uint8_t field2_check_value = 0x0;
264 uint8_t field2_check_mask = 0x1;
265 int words_done = 0;
266 int words_scheduled = 0;
267 int i;
268
269 path[0] = TAP_DRSELECT;
270 path[1] = TAP_DRCAPTURE;
271 path[2] = TAP_DRSHIFT;
272
273 memset(&fields, 0, sizeof fields);
274
275 fields[0].num_bits = 3;
276 fields[0].check_value = &field0_check_value;
277 fields[0].check_mask = &field0_check_mask;
278
279 fields[1].num_bits = 32;
280
281 fields[2].num_bits = 1;
282 fields[2].check_value = &field2_check_value;
283 fields[2].check_mask = &field2_check_mask;
284
285 xscale_jtag_set_instr(target->tap,
286 XSCALE_DBGTX << xscale->xscale_variant,
287 TAP_IDLE);
288 jtag_add_runtest(1, TAP_IDLE); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
289
290 /* repeat until all words have been collected */
291 int attempts = 0;
292 while (words_done < num_words)
293 {
294 /* schedule reads */
295 words_scheduled = 0;
296 for (i = words_done; i < num_words; i++)
297 {
298 fields[0].in_value = &field0[i];
299
300 jtag_add_pathmove(3, path);
301
302 fields[1].in_value = (uint8_t *)(field1 + i);
303
304 jtag_add_dr_scan_check(target->tap, 3, fields, TAP_IDLE);
305
306 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
307
308 words_scheduled++;
309 }
310
311 if ((retval = jtag_execute_queue()) != ERROR_OK)
312 {
313 LOG_ERROR("JTAG error while receiving data from debug handler");
314 break;
315 }
316
317 /* examine results */
318 for (i = words_done; i < num_words; i++)
319 {
320 if (!(field0[0] & 1))
321 {
322 /* move backwards if necessary */
323 int j;
324 for (j = i; j < num_words - 1; j++)
325 {
326 field0[j] = field0[j + 1];
327 field1[j] = field1[j + 1];
328 }
329 words_scheduled--;
330 }
331 }
332 if (words_scheduled == 0)
333 {
334 if (attempts++==1000)
335 {
336 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
337 retval = ERROR_TARGET_TIMEOUT;
338 break;
339 }
340 }
341
342 words_done += words_scheduled;
343 }
344
345 for (i = 0; i < num_words; i++)
346 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
347
348 free(field1);
349
350 return retval;
351 }
352
353 static int xscale_read_tx(struct target *target, int consume)
354 {
355 struct xscale_common *xscale = target_to_xscale(target);
356 tap_state_t path[3];
357 tap_state_t noconsume_path[6];
358 int retval;
359 struct timeval timeout, now;
360 struct scan_field fields[3];
361 uint8_t field0_in = 0x0;
362 uint8_t field0_check_value = 0x2;
363 uint8_t field0_check_mask = 0x6;
364 uint8_t field2_check_value = 0x0;
365 uint8_t field2_check_mask = 0x1;
366
367 xscale_jtag_set_instr(target->tap,
368 XSCALE_DBGTX << xscale->xscale_variant,
369 TAP_IDLE);
370
371 path[0] = TAP_DRSELECT;
372 path[1] = TAP_DRCAPTURE;
373 path[2] = TAP_DRSHIFT;
374
375 noconsume_path[0] = TAP_DRSELECT;
376 noconsume_path[1] = TAP_DRCAPTURE;
377 noconsume_path[2] = TAP_DREXIT1;
378 noconsume_path[3] = TAP_DRPAUSE;
379 noconsume_path[4] = TAP_DREXIT2;
380 noconsume_path[5] = TAP_DRSHIFT;
381
382 memset(&fields, 0, sizeof fields);
383
384 fields[0].num_bits = 3;
385 fields[0].in_value = &field0_in;
386
387 fields[1].num_bits = 32;
388 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
389
390 fields[2].num_bits = 1;
391 uint8_t tmp;
392 fields[2].in_value = &tmp;
393
394 gettimeofday(&timeout, NULL);
395 timeval_add_time(&timeout, 1, 0);
396
397 for (;;)
398 {
399 /* if we want to consume the register content (i.e. clear TX_READY),
400 * we have to go straight from Capture-DR to Shift-DR
401 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
402 */
403 if (consume)
404 jtag_add_pathmove(3, path);
405 else
406 {
407 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
408 }
409
410 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
411
412 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
413 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
414
415 if ((retval = jtag_execute_queue()) != ERROR_OK)
416 {
417 LOG_ERROR("JTAG error while reading TX");
418 return ERROR_TARGET_TIMEOUT;
419 }
420
421 gettimeofday(&now, NULL);
422 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
423 {
424 LOG_ERROR("time out reading TX register");
425 return ERROR_TARGET_TIMEOUT;
426 }
427 if (!((!(field0_in & 1)) && consume))
428 {
429 goto done;
430 }
431 if (debug_level >= 3)
432 {
433 LOG_DEBUG("waiting 100ms");
434 alive_sleep(100); /* avoid flooding the logs */
435 } else
436 {
437 keep_alive();
438 }
439 }
440 done:
441
442 if (!(field0_in & 1))
443 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
444
445 return ERROR_OK;
446 }
447
448 static int xscale_write_rx(struct target *target)
449 {
450 struct xscale_common *xscale = target_to_xscale(target);
451 int retval;
452 struct timeval timeout, now;
453 struct scan_field fields[3];
454 uint8_t field0_out = 0x0;
455 uint8_t field0_in = 0x0;
456 uint8_t field0_check_value = 0x2;
457 uint8_t field0_check_mask = 0x6;
458 uint8_t field2 = 0x0;
459 uint8_t field2_check_value = 0x0;
460 uint8_t field2_check_mask = 0x1;
461
462 xscale_jtag_set_instr(target->tap,
463 XSCALE_DBGRX << xscale->xscale_variant,
464 TAP_IDLE);
465
466 memset(&fields, 0, sizeof fields);
467
468 fields[0].num_bits = 3;
469 fields[0].out_value = &field0_out;
470 fields[0].in_value = &field0_in;
471
472 fields[1].num_bits = 32;
473 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
474
475 fields[2].num_bits = 1;
476 fields[2].out_value = &field2;
477 uint8_t tmp;
478 fields[2].in_value = &tmp;
479
480 gettimeofday(&timeout, NULL);
481 timeval_add_time(&timeout, 1, 0);
482
483 /* poll until rx_read is low */
484 LOG_DEBUG("polling RX");
485 for (;;)
486 {
487 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
488
489 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
490 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
491
492 if ((retval = jtag_execute_queue()) != ERROR_OK)
493 {
494 LOG_ERROR("JTAG error while writing RX");
495 return retval;
496 }
497
498 gettimeofday(&now, NULL);
499 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
500 {
501 LOG_ERROR("time out writing RX register");
502 return ERROR_TARGET_TIMEOUT;
503 }
504 if (!(field0_in & 1))
505 goto done;
506 if (debug_level >= 3)
507 {
508 LOG_DEBUG("waiting 100ms");
509 alive_sleep(100); /* avoid flooding the logs */
510 } else
511 {
512 keep_alive();
513 }
514 }
515 done:
516
517 /* set rx_valid */
518 field2 = 0x1;
519 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
520
521 if ((retval = jtag_execute_queue()) != ERROR_OK)
522 {
523 LOG_ERROR("JTAG error while writing RX");
524 return retval;
525 }
526
527 return ERROR_OK;
528 }
529
530 /* send count elements of size byte to the debug handler */
531 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
532 {
533 struct xscale_common *xscale = target_to_xscale(target);
534 uint32_t t[3];
535 int bits[3];
536 int retval;
537 int done_count = 0;
538
539 xscale_jtag_set_instr(target->tap,
540 XSCALE_DBGRX << xscale->xscale_variant,
541 TAP_IDLE);
542
543 bits[0]=3;
544 t[0]=0;
545 bits[1]=32;
546 t[2]=1;
547 bits[2]=1;
548 int endianness = target->endianness;
549 while (done_count++ < count)
550 {
551 switch (size)
552 {
553 case 4:
554 if (endianness == TARGET_LITTLE_ENDIAN)
555 {
556 t[1]=le_to_h_u32(buffer);
557 } else
558 {
559 t[1]=be_to_h_u32(buffer);
560 }
561 break;
562 case 2:
563 if (endianness == TARGET_LITTLE_ENDIAN)
564 {
565 t[1]=le_to_h_u16(buffer);
566 } else
567 {
568 t[1]=be_to_h_u16(buffer);
569 }
570 break;
571 case 1:
572 t[1]=buffer[0];
573 break;
574 default:
575 LOG_ERROR("BUG: size neither 4, 2 nor 1");
576 return ERROR_INVALID_ARGUMENTS;
577 }
578 jtag_add_dr_out(target->tap,
579 3,
580 bits,
581 t,
582 TAP_IDLE);
583 buffer += size;
584 }
585
586 if ((retval = jtag_execute_queue()) != ERROR_OK)
587 {
588 LOG_ERROR("JTAG error while sending data to debug handler");
589 return retval;
590 }
591
592 return ERROR_OK;
593 }
594
595 static int xscale_send_u32(struct target *target, uint32_t value)
596 {
597 struct xscale_common *xscale = target_to_xscale(target);
598
599 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
600 return xscale_write_rx(target);
601 }
602
603 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
604 {
605 struct xscale_common *xscale = target_to_xscale(target);
606 int retval;
607 struct scan_field fields[3];
608 uint8_t field0 = 0x0;
609 uint8_t field0_check_value = 0x2;
610 uint8_t field0_check_mask = 0x7;
611 uint8_t field2 = 0x0;
612 uint8_t field2_check_value = 0x0;
613 uint8_t field2_check_mask = 0x1;
614
615 if (hold_rst != -1)
616 xscale->hold_rst = hold_rst;
617
618 if (ext_dbg_brk != -1)
619 xscale->external_debug_break = ext_dbg_brk;
620
621 xscale_jtag_set_instr(target->tap,
622 XSCALE_SELDCSR << xscale->xscale_variant,
623 TAP_IDLE);
624
625 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
626 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
627
628 memset(&fields, 0, sizeof fields);
629
630 fields[0].num_bits = 3;
631 fields[0].out_value = &field0;
632 uint8_t tmp;
633 fields[0].in_value = &tmp;
634
635 fields[1].num_bits = 32;
636 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
637
638 fields[2].num_bits = 1;
639 fields[2].out_value = &field2;
640 uint8_t tmp2;
641 fields[2].in_value = &tmp2;
642
643 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
644
645 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
646 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
647
648 if ((retval = jtag_execute_queue()) != ERROR_OK)
649 {
650 LOG_ERROR("JTAG error while writing DCSR");
651 return retval;
652 }
653
654 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
655 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
656
657 return ERROR_OK;
658 }
659
660 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
661 static unsigned int parity (unsigned int v)
662 {
663 // unsigned int ov = v;
664 v ^= v >> 16;
665 v ^= v >> 8;
666 v ^= v >> 4;
667 v &= 0xf;
668 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
669 return (0x6996 >> v) & 1;
670 }
671
672 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
673 {
674 struct xscale_common *xscale = target_to_xscale(target);
675 uint8_t packet[4];
676 uint8_t cmd;
677 int word;
678 struct scan_field fields[2];
679
680 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
681
682 /* LDIC into IR */
683 xscale_jtag_set_instr(target->tap,
684 XSCALE_LDIC << xscale->xscale_variant,
685 TAP_IDLE);
686
687 /* CMD is b011 to load a cacheline into the Mini ICache.
688 * Loading into the main ICache is deprecated, and unused.
689 * It's followed by three zero bits, and 27 address bits.
690 */
691 buf_set_u32(&cmd, 0, 6, 0x3);
692
693 /* virtual address of desired cache line */
694 buf_set_u32(packet, 0, 27, va >> 5);
695
696 memset(&fields, 0, sizeof fields);
697
698 fields[0].num_bits = 6;
699 fields[0].out_value = &cmd;
700
701 fields[1].num_bits = 27;
702 fields[1].out_value = packet;
703
704 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
705
706 /* rest of packet is a cacheline: 8 instructions, with parity */
707 fields[0].num_bits = 32;
708 fields[0].out_value = packet;
709
710 fields[1].num_bits = 1;
711 fields[1].out_value = &cmd;
712
713 for (word = 0; word < 8; word++)
714 {
715 buf_set_u32(packet, 0, 32, buffer[word]);
716
717 uint32_t value;
718 memcpy(&value, packet, sizeof(uint32_t));
719 cmd = parity(value);
720
721 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
722 }
723
724 return jtag_execute_queue();
725 }
726
727 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
728 {
729 struct xscale_common *xscale = target_to_xscale(target);
730 uint8_t packet[4];
731 uint8_t cmd;
732 struct scan_field fields[2];
733
734 xscale_jtag_set_instr(target->tap,
735 XSCALE_LDIC << xscale->xscale_variant,
736 TAP_IDLE);
737
738 /* CMD for invalidate IC line b000, bits [6:4] b000 */
739 buf_set_u32(&cmd, 0, 6, 0x0);
740
741 /* virtual address of desired cache line */
742 buf_set_u32(packet, 0, 27, va >> 5);
743
744 memset(&fields, 0, sizeof fields);
745
746 fields[0].num_bits = 6;
747 fields[0].out_value = &cmd;
748
749 fields[1].num_bits = 27;
750 fields[1].out_value = packet;
751
752 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
753
754 return ERROR_OK;
755 }
756
757 static int xscale_update_vectors(struct target *target)
758 {
759 struct xscale_common *xscale = target_to_xscale(target);
760 int i;
761 int retval;
762
763 uint32_t low_reset_branch, high_reset_branch;
764
765 for (i = 1; i < 8; i++)
766 {
767 /* if there's a static vector specified for this exception, override */
768 if (xscale->static_high_vectors_set & (1 << i))
769 {
770 xscale->high_vectors[i] = xscale->static_high_vectors[i];
771 }
772 else
773 {
774 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
775 if (retval == ERROR_TARGET_TIMEOUT)
776 return retval;
777 if (retval != ERROR_OK)
778 {
779 /* Some of these reads will fail as part of normal execution */
780 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
781 }
782 }
783 }
784
785 for (i = 1; i < 8; i++)
786 {
787 if (xscale->static_low_vectors_set & (1 << i))
788 {
789 xscale->low_vectors[i] = xscale->static_low_vectors[i];
790 }
791 else
792 {
793 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
794 if (retval == ERROR_TARGET_TIMEOUT)
795 return retval;
796 if (retval != ERROR_OK)
797 {
798 /* Some of these reads will fail as part of normal execution */
799 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
800 }
801 }
802 }
803
804 /* calculate branches to debug handler */
805 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
806 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
807
808 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
809 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
810
811 /* invalidate and load exception vectors in mini i-cache */
812 xscale_invalidate_ic_line(target, 0x0);
813 xscale_invalidate_ic_line(target, 0xffff0000);
814
815 xscale_load_ic(target, 0x0, xscale->low_vectors);
816 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
817
818 return ERROR_OK;
819 }
820
821 static int xscale_arch_state(struct target *target)
822 {
823 struct xscale_common *xscale = target_to_xscale(target);
824 struct arm *armv4_5 = &xscale->armv4_5_common;
825
826 static const char *state[] =
827 {
828 "disabled", "enabled"
829 };
830
831 static const char *arch_dbg_reason[] =
832 {
833 "", "\n(processor reset)", "\n(trace buffer full)"
834 };
835
836 if (armv4_5->common_magic != ARM_COMMON_MAGIC)
837 {
838 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
839 return ERROR_INVALID_ARGUMENTS;
840 }
841
842 arm_arch_state(target);
843 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
844 state[xscale->armv4_5_mmu.mmu_enabled],
845 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
846 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
847 arch_dbg_reason[xscale->arch_debug_reason]);
848
849 return ERROR_OK;
850 }
851
852 static int xscale_poll(struct target *target)
853 {
854 int retval = ERROR_OK;
855
856 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
857 {
858 enum target_state previous_state = target->state;
859 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
860 {
861
862 /* there's data to read from the tx register, we entered debug state */
863 target->state = TARGET_HALTED;
864
865 /* process debug entry, fetching current mode regs */
866 retval = xscale_debug_entry(target);
867 }
868 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
869 {
870 LOG_USER("error while polling TX register, reset CPU");
871 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
872 target->state = TARGET_HALTED;
873 }
874
875 /* debug_entry could have overwritten target state (i.e. immediate resume)
876 * don't signal event handlers in that case
877 */
878 if (target->state != TARGET_HALTED)
879 return ERROR_OK;
880
881 /* if target was running, signal that we halted
882 * otherwise we reentered from debug execution */
883 if (previous_state == TARGET_RUNNING)
884 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
885 else
886 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
887 }
888
889 return retval;
890 }
891
892 static int xscale_debug_entry(struct target *target)
893 {
894 struct xscale_common *xscale = target_to_xscale(target);
895 struct arm *armv4_5 = &xscale->armv4_5_common;
896 uint32_t pc;
897 uint32_t buffer[10];
898 unsigned i;
899 int retval;
900 uint32_t moe;
901
902 /* clear external dbg break (will be written on next DCSR read) */
903 xscale->external_debug_break = 0;
904 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
905 return retval;
906
907 /* get r0, pc, r1 to r7 and cpsr */
908 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
909 return retval;
910
911 /* move r0 from buffer to register cache */
912 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
913 armv4_5->core_cache->reg_list[0].dirty = 1;
914 armv4_5->core_cache->reg_list[0].valid = 1;
915 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
916
917 /* move pc from buffer to register cache */
918 buf_set_u32(armv4_5->pc->value, 0, 32, buffer[1]);
919 armv4_5->pc->dirty = 1;
920 armv4_5->pc->valid = 1;
921 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
922
923 /* move data from buffer to register cache */
924 for (i = 1; i <= 7; i++)
925 {
926 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
927 armv4_5->core_cache->reg_list[i].dirty = 1;
928 armv4_5->core_cache->reg_list[i].valid = 1;
929 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
930 }
931
932 arm_set_cpsr(armv4_5, buffer[9]);
933 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
934
935 if (!is_arm_mode(armv4_5->core_mode))
936 {
937 target->state = TARGET_UNKNOWN;
938 LOG_ERROR("cpsr contains invalid mode value - communication failure");
939 return ERROR_TARGET_FAILURE;
940 }
941 LOG_DEBUG("target entered debug state in %s mode",
942 arm_mode_name(armv4_5->core_mode));
943
944 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
945 if (armv4_5->spsr) {
946 xscale_receive(target, buffer, 8);
947 buf_set_u32(armv4_5->spsr->value, 0, 32, buffer[7]);
948 armv4_5->spsr->dirty = false;
949 armv4_5->spsr->valid = true;
950 }
951 else
952 {
953 /* r8 to r14, but no spsr */
954 xscale_receive(target, buffer, 7);
955 }
956
957 /* move data from buffer to right banked register in cache */
958 for (i = 8; i <= 14; i++)
959 {
960 struct reg *r = arm_reg_current(armv4_5, i);
961
962 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
963 r->dirty = false;
964 r->valid = true;
965 }
966
967 /* mark xscale regs invalid to ensure they are retrieved from the
968 * debug handler if requested */
969 for (i = 0; i < xscale->reg_cache->num_regs; i++)
970 xscale->reg_cache->reg_list[i].valid = 0;
971
972 /* examine debug reason */
973 xscale_read_dcsr(target);
974 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
975
976 /* stored PC (for calculating fixup) */
977 pc = buf_get_u32(armv4_5->pc->value, 0, 32);
978
979 switch (moe)
980 {
981 case 0x0: /* Processor reset */
982 target->debug_reason = DBG_REASON_DBGRQ;
983 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
984 pc -= 4;
985 break;
986 case 0x1: /* Instruction breakpoint hit */
987 target->debug_reason = DBG_REASON_BREAKPOINT;
988 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
989 pc -= 4;
990 break;
991 case 0x2: /* Data breakpoint hit */
992 target->debug_reason = DBG_REASON_WATCHPOINT;
993 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
994 pc -= 4;
995 break;
996 case 0x3: /* BKPT instruction executed */
997 target->debug_reason = DBG_REASON_BREAKPOINT;
998 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
999 pc -= 4;
1000 break;
1001 case 0x4: /* Ext. debug event */
1002 target->debug_reason = DBG_REASON_DBGRQ;
1003 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1004 pc -= 4;
1005 break;
1006 case 0x5: /* Vector trap occured */
1007 target->debug_reason = DBG_REASON_BREAKPOINT;
1008 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1009 pc -= 4;
1010 break;
1011 case 0x6: /* Trace buffer full break */
1012 target->debug_reason = DBG_REASON_DBGRQ;
1013 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1014 pc -= 4;
1015 break;
1016 case 0x7: /* Reserved (may flag Hot-Debug support) */
1017 default:
1018 LOG_ERROR("Method of Entry is 'Reserved'");
1019 exit(-1);
1020 break;
1021 }
1022
1023 /* apply PC fixup */
1024 buf_set_u32(armv4_5->pc->value, 0, 32, pc);
1025
1026 /* on the first debug entry, identify cache type */
1027 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1028 {
1029 uint32_t cache_type_reg;
1030
1031 /* read cp15 cache type register */
1032 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1033 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1034
1035 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1036 }
1037
1038 /* examine MMU and Cache settings */
1039 /* read cp15 control register */
1040 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1041 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1042 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1043 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1044 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1045
1046 /* tracing enabled, read collected trace data */
1047 if (xscale->trace.mode != XSCALE_TRACE_DISABLED)
1048 {
1049 xscale_read_trace(target);
1050
1051 /* Resume if entered debug due to buffer fill and we're still collecting
1052 * trace data. Note that a debug exception due to trace buffer full
1053 * can only happen in fill mode. */
1054 if (xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1055 {
1056 if (--xscale->trace.fill_counter > 0)
1057 xscale_resume(target, 1, 0x0, 1, 0);
1058 }
1059 else /* entered debug for other reason; reset counter */
1060 xscale->trace.fill_counter = 0;
1061 }
1062
1063 return ERROR_OK;
1064 }
1065
1066 static int xscale_halt(struct target *target)
1067 {
1068 struct xscale_common *xscale = target_to_xscale(target);
1069
1070 LOG_DEBUG("target->state: %s",
1071 target_state_name(target));
1072
1073 if (target->state == TARGET_HALTED)
1074 {
1075 LOG_DEBUG("target was already halted");
1076 return ERROR_OK;
1077 }
1078 else if (target->state == TARGET_UNKNOWN)
1079 {
1080 /* this must not happen for a xscale target */
1081 LOG_ERROR("target was in unknown state when halt was requested");
1082 return ERROR_TARGET_INVALID;
1083 }
1084 else if (target->state == TARGET_RESET)
1085 {
1086 LOG_DEBUG("target->state == TARGET_RESET");
1087 }
1088 else
1089 {
1090 /* assert external dbg break */
1091 xscale->external_debug_break = 1;
1092 xscale_read_dcsr(target);
1093
1094 target->debug_reason = DBG_REASON_DBGRQ;
1095 }
1096
1097 return ERROR_OK;
1098 }
1099
1100 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1101 {
1102 struct xscale_common *xscale = target_to_xscale(target);
1103 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1104 int retval;
1105
1106 if (xscale->ibcr0_used)
1107 {
1108 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1109
1110 if (ibcr0_bp)
1111 {
1112 xscale_unset_breakpoint(target, ibcr0_bp);
1113 }
1114 else
1115 {
1116 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1117 exit(-1);
1118 }
1119 }
1120
1121 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1122 return retval;
1123
1124 return ERROR_OK;
1125 }
1126
1127 static int xscale_disable_single_step(struct target *target)
1128 {
1129 struct xscale_common *xscale = target_to_xscale(target);
1130 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1131 int retval;
1132
1133 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1134 return retval;
1135
1136 return ERROR_OK;
1137 }
1138
1139 static void xscale_enable_watchpoints(struct target *target)
1140 {
1141 struct watchpoint *watchpoint = target->watchpoints;
1142
1143 while (watchpoint)
1144 {
1145 if (watchpoint->set == 0)
1146 xscale_set_watchpoint(target, watchpoint);
1147 watchpoint = watchpoint->next;
1148 }
1149 }
1150
1151 static void xscale_enable_breakpoints(struct target *target)
1152 {
1153 struct breakpoint *breakpoint = target->breakpoints;
1154
1155 /* set any pending breakpoints */
1156 while (breakpoint)
1157 {
1158 if (breakpoint->set == 0)
1159 xscale_set_breakpoint(target, breakpoint);
1160 breakpoint = breakpoint->next;
1161 }
1162 }
1163
1164 static void xscale_free_trace_data(struct xscale_common *xscale)
1165 {
1166 struct xscale_trace_data *td = xscale->trace.data;
1167 while (td)
1168 {
1169 struct xscale_trace_data *next_td = td->next;
1170 if (td->entries)
1171 free(td->entries);
1172 free(td);
1173 td = next_td;
1174 }
1175 xscale->trace.data = NULL;
1176 }
1177
1178 static int xscale_resume(struct target *target, int current,
1179 uint32_t address, int handle_breakpoints, int debug_execution)
1180 {
1181 struct xscale_common *xscale = target_to_xscale(target);
1182 struct arm *armv4_5 = &xscale->armv4_5_common;
1183 struct breakpoint *breakpoint = target->breakpoints;
1184 uint32_t current_pc;
1185 int retval;
1186 int i;
1187
1188 LOG_DEBUG("-");
1189
1190 if (target->state != TARGET_HALTED)
1191 {
1192 LOG_WARNING("target not halted");
1193 return ERROR_TARGET_NOT_HALTED;
1194 }
1195
1196 if (!debug_execution)
1197 {
1198 target_free_all_working_areas(target);
1199 }
1200
1201 /* update vector tables */
1202 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1203 return retval;
1204
1205 /* current = 1: continue on current pc, otherwise continue at <address> */
1206 if (!current)
1207 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1208
1209 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1210
1211 /* if we're at the reset vector, we have to simulate the branch */
1212 if (current_pc == 0x0)
1213 {
1214 arm_simulate_step(target, NULL);
1215 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1216 }
1217
1218 /* the front-end may request us not to handle breakpoints */
1219 if (handle_breakpoints)
1220 {
1221 breakpoint = breakpoint_find(target,
1222 buf_get_u32(armv4_5->pc->value, 0, 32));
1223 if (breakpoint != NULL)
1224 {
1225 uint32_t next_pc;
1226 enum trace_mode saved_trace_mode;
1227
1228 /* there's a breakpoint at the current PC, we have to step over it */
1229 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1230 xscale_unset_breakpoint(target, breakpoint);
1231
1232 /* calculate PC of next instruction */
1233 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1234 {
1235 uint32_t current_opcode;
1236 target_read_u32(target, current_pc, &current_opcode);
1237 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1238 }
1239
1240 LOG_DEBUG("enable single-step");
1241 xscale_enable_single_step(target, next_pc);
1242
1243 /* restore banked registers */
1244 retval = xscale_restore_banked(target);
1245
1246 /* send resume request */
1247 xscale_send_u32(target, 0x30);
1248
1249 /* send CPSR */
1250 xscale_send_u32(target,
1251 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1252 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1253 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1254
1255 for (i = 7; i >= 0; i--)
1256 {
1257 /* send register */
1258 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1259 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1260 }
1261
1262 /* send PC */
1263 xscale_send_u32(target,
1264 buf_get_u32(armv4_5->pc->value, 0, 32));
1265 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
1266 buf_get_u32(armv4_5->pc->value, 0, 32));
1267
1268 /* disable trace data collection in xscale_debug_entry() */
1269 saved_trace_mode = xscale->trace.mode;
1270 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1271
1272 /* wait for and process debug entry */
1273 xscale_debug_entry(target);
1274
1275 /* re-enable trace buffer, if enabled previously */
1276 xscale->trace.mode = saved_trace_mode;
1277
1278 LOG_DEBUG("disable single-step");
1279 xscale_disable_single_step(target);
1280
1281 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1282 xscale_set_breakpoint(target, breakpoint);
1283 }
1284 }
1285
1286 /* enable any pending breakpoints and watchpoints */
1287 xscale_enable_breakpoints(target);
1288 xscale_enable_watchpoints(target);
1289
1290 /* restore banked registers */
1291 retval = xscale_restore_banked(target);
1292
1293 /* send resume request (command 0x30 or 0x31)
1294 * clean the trace buffer if it is to be enabled (0x62) */
1295 if (xscale->trace.mode != XSCALE_TRACE_DISABLED)
1296 {
1297 if (xscale->trace.mode == XSCALE_TRACE_FILL)
1298 {
1299 /* If trace enabled in fill mode and starting collection of new set
1300 * of buffers, initialize buffer counter and free previous buffers */
1301 if (xscale->trace.fill_counter == 0)
1302 {
1303 xscale->trace.fill_counter = xscale->trace.buffer_fill;
1304 xscale_free_trace_data(xscale);
1305 }
1306 }
1307 else /* wrap mode; free previous buffer */
1308 xscale_free_trace_data(xscale);
1309
1310 xscale_send_u32(target, 0x62);
1311 xscale_send_u32(target, 0x31);
1312 }
1313 else
1314 xscale_send_u32(target, 0x30);
1315
1316 /* send CPSR */
1317 xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
1318 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1319 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1320
1321 for (i = 7; i >= 0; i--)
1322 {
1323 /* send register */
1324 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1325 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1326 }
1327
1328 /* send PC */
1329 xscale_send_u32(target, buf_get_u32(armv4_5->pc->value, 0, 32));
1330 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1331 buf_get_u32(armv4_5->pc->value, 0, 32));
1332
1333 target->debug_reason = DBG_REASON_NOTHALTED;
1334
1335 if (!debug_execution)
1336 {
1337 /* registers are now invalid */
1338 register_cache_invalidate(armv4_5->core_cache);
1339 target->state = TARGET_RUNNING;
1340 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1341 }
1342 else
1343 {
1344 target->state = TARGET_DEBUG_RUNNING;
1345 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1346 }
1347
1348 LOG_DEBUG("target resumed");
1349
1350 return ERROR_OK;
1351 }
1352
1353 static int xscale_step_inner(struct target *target, int current,
1354 uint32_t address, int handle_breakpoints)
1355 {
1356 struct xscale_common *xscale = target_to_xscale(target);
1357 struct arm *armv4_5 = &xscale->armv4_5_common;
1358 uint32_t next_pc;
1359 int retval;
1360 int i;
1361
1362 target->debug_reason = DBG_REASON_SINGLESTEP;
1363
1364 /* calculate PC of next instruction */
1365 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1366 {
1367 uint32_t current_opcode, current_pc;
1368 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1369
1370 target_read_u32(target, current_pc, &current_opcode);
1371 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1372 return retval;
1373 }
1374
1375 LOG_DEBUG("enable single-step");
1376 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1377 return retval;
1378
1379 /* restore banked registers */
1380 if ((retval = xscale_restore_banked(target)) != ERROR_OK)
1381 return retval;
1382
1383 /* send resume request (command 0x30 or 0x31)
1384 * clean the trace buffer if it is to be enabled (0x62) */
1385 if (xscale->trace.mode != XSCALE_TRACE_DISABLED)
1386 {
1387 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1388 return retval;
1389 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1390 return retval;
1391 }
1392 else
1393 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1394 return retval;
1395
1396 /* send CPSR */
1397 retval = xscale_send_u32(target,
1398 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1399 if (retval != ERROR_OK)
1400 return retval;
1401 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1402 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1403
1404 for (i = 7; i >= 0; i--)
1405 {
1406 /* send register */
1407 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1408 return retval;
1409 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1410 }
1411
1412 /* send PC */
1413 retval = xscale_send_u32(target,
1414 buf_get_u32(armv4_5->pc->value, 0, 32));
1415 if (retval != ERROR_OK)
1416 return retval;
1417 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1418 buf_get_u32(armv4_5->pc->value, 0, 32));
1419
1420 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1421
1422 /* registers are now invalid */
1423 register_cache_invalidate(armv4_5->core_cache);
1424
1425 /* wait for and process debug entry */
1426 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1427 return retval;
1428
1429 LOG_DEBUG("disable single-step");
1430 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1431 return retval;
1432
1433 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1434
1435 return ERROR_OK;
1436 }
1437
1438 static int xscale_step(struct target *target, int current,
1439 uint32_t address, int handle_breakpoints)
1440 {
1441 struct arm *armv4_5 = target_to_arm(target);
1442 struct breakpoint *breakpoint = NULL;
1443
1444 uint32_t current_pc;
1445 int retval;
1446
1447 if (target->state != TARGET_HALTED)
1448 {
1449 LOG_WARNING("target not halted");
1450 return ERROR_TARGET_NOT_HALTED;
1451 }
1452
1453 /* current = 1: continue on current pc, otherwise continue at <address> */
1454 if (!current)
1455 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1456
1457 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1458
1459 /* if we're at the reset vector, we have to simulate the step */
1460 if (current_pc == 0x0)
1461 {
1462 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1463 return retval;
1464 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1465
1466 target->debug_reason = DBG_REASON_SINGLESTEP;
1467 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1468
1469 return ERROR_OK;
1470 }
1471
1472 /* the front-end may request us not to handle breakpoints */
1473 if (handle_breakpoints)
1474 breakpoint = breakpoint_find(target,
1475 buf_get_u32(armv4_5->pc->value, 0, 32));
1476 if (breakpoint != NULL) {
1477 retval = xscale_unset_breakpoint(target, breakpoint);
1478 if (retval != ERROR_OK)
1479 return retval;
1480 }
1481
1482 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1483
1484 if (breakpoint)
1485 {
1486 xscale_set_breakpoint(target, breakpoint);
1487 }
1488
1489 LOG_DEBUG("target stepped");
1490
1491 return ERROR_OK;
1492
1493 }
1494
1495 static int xscale_assert_reset(struct target *target)
1496 {
1497 struct xscale_common *xscale = target_to_xscale(target);
1498
1499 LOG_DEBUG("target->state: %s",
1500 target_state_name(target));
1501
1502 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1503 * end up in T-L-R, which would reset JTAG
1504 */
1505 xscale_jtag_set_instr(target->tap,
1506 XSCALE_SELDCSR << xscale->xscale_variant,
1507 TAP_IDLE);
1508
1509 /* set Hold reset, Halt mode and Trap Reset */
1510 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1511 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1512 xscale_write_dcsr(target, 1, 0);
1513
1514 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1515 xscale_jtag_set_instr(target->tap, ~0, TAP_IDLE);
1516 jtag_execute_queue();
1517
1518 /* assert reset */
1519 jtag_add_reset(0, 1);
1520
1521 /* sleep 1ms, to be sure we fulfill any requirements */
1522 jtag_add_sleep(1000);
1523 jtag_execute_queue();
1524
1525 target->state = TARGET_RESET;
1526
1527 if (target->reset_halt)
1528 {
1529 int retval;
1530 if ((retval = target_halt(target)) != ERROR_OK)
1531 return retval;
1532 }
1533
1534 return ERROR_OK;
1535 }
1536
1537 static int xscale_deassert_reset(struct target *target)
1538 {
1539 struct xscale_common *xscale = target_to_xscale(target);
1540 struct breakpoint *breakpoint = target->breakpoints;
1541
1542 LOG_DEBUG("-");
1543
1544 xscale->ibcr_available = 2;
1545 xscale->ibcr0_used = 0;
1546 xscale->ibcr1_used = 0;
1547
1548 xscale->dbr_available = 2;
1549 xscale->dbr0_used = 0;
1550 xscale->dbr1_used = 0;
1551
1552 /* mark all hardware breakpoints as unset */
1553 while (breakpoint)
1554 {
1555 if (breakpoint->type == BKPT_HARD)
1556 {
1557 breakpoint->set = 0;
1558 }
1559 breakpoint = breakpoint->next;
1560 }
1561
1562 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1563 xscale_free_trace_data(xscale);
1564
1565 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1566
1567 /* FIXME mark hardware watchpoints got unset too. Also,
1568 * at least some of the XScale registers are invalid...
1569 */
1570
1571 /*
1572 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1573 * contents got invalidated. Safer to force that, so writing new
1574 * contents can't ever fail..
1575 */
1576 {
1577 uint32_t address;
1578 unsigned buf_cnt;
1579 const uint8_t *buffer = xscale_debug_handler;
1580 int retval;
1581
1582 /* release SRST */
1583 jtag_add_reset(0, 0);
1584
1585 /* wait 300ms; 150 and 100ms were not enough */
1586 jtag_add_sleep(300*1000);
1587
1588 jtag_add_runtest(2030, TAP_IDLE);
1589 jtag_execute_queue();
1590
1591 /* set Hold reset, Halt mode and Trap Reset */
1592 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1593 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1594 xscale_write_dcsr(target, 1, 0);
1595
1596 /* Load the debug handler into the mini-icache. Since
1597 * it's using halt mode (not monitor mode), it runs in
1598 * "Special Debug State" for access to registers, memory,
1599 * coprocessors, trace data, etc.
1600 */
1601 address = xscale->handler_address;
1602 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1603 binary_size > 0;
1604 binary_size -= buf_cnt, buffer += buf_cnt)
1605 {
1606 uint32_t cache_line[8];
1607 unsigned i;
1608
1609 buf_cnt = binary_size;
1610 if (buf_cnt > 32)
1611 buf_cnt = 32;
1612
1613 for (i = 0; i < buf_cnt; i += 4)
1614 {
1615 /* convert LE buffer to host-endian uint32_t */
1616 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1617 }
1618
1619 for (; i < 32; i += 4)
1620 {
1621 cache_line[i / 4] = 0xe1a08008;
1622 }
1623
1624 /* only load addresses other than the reset vectors */
1625 if ((address % 0x400) != 0x0)
1626 {
1627 retval = xscale_load_ic(target, address,
1628 cache_line);
1629 if (retval != ERROR_OK)
1630 return retval;
1631 }
1632
1633 address += buf_cnt;
1634 };
1635
1636 retval = xscale_load_ic(target, 0x0,
1637 xscale->low_vectors);
1638 if (retval != ERROR_OK)
1639 return retval;
1640 retval = xscale_load_ic(target, 0xffff0000,
1641 xscale->high_vectors);
1642 if (retval != ERROR_OK)
1643 return retval;
1644
1645 jtag_add_runtest(30, TAP_IDLE);
1646
1647 jtag_add_sleep(100000);
1648
1649 /* set Hold reset, Halt mode and Trap Reset */
1650 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1651 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1652 xscale_write_dcsr(target, 1, 0);
1653
1654 /* clear Hold reset to let the target run (should enter debug handler) */
1655 xscale_write_dcsr(target, 0, 1);
1656 target->state = TARGET_RUNNING;
1657
1658 if (!target->reset_halt)
1659 {
1660 jtag_add_sleep(10000);
1661
1662 /* we should have entered debug now */
1663 xscale_debug_entry(target);
1664 target->state = TARGET_HALTED;
1665
1666 /* resume the target */
1667 xscale_resume(target, 1, 0x0, 1, 0);
1668 }
1669 }
1670
1671 return ERROR_OK;
1672 }
1673
1674 static int xscale_read_core_reg(struct target *target, struct reg *r,
1675 int num, enum arm_mode mode)
1676 {
1677 /** \todo add debug handler support for core register reads */
1678 LOG_ERROR("not implemented");
1679 return ERROR_OK;
1680 }
1681
1682 static int xscale_write_core_reg(struct target *target, struct reg *r,
1683 int num, enum arm_mode mode, uint32_t value)
1684 {
1685 /** \todo add debug handler support for core register writes */
1686 LOG_ERROR("not implemented");
1687 return ERROR_OK;
1688 }
1689
1690 static int xscale_full_context(struct target *target)
1691 {
1692 struct arm *armv4_5 = target_to_arm(target);
1693
1694 uint32_t *buffer;
1695
1696 int i, j;
1697
1698 LOG_DEBUG("-");
1699
1700 if (target->state != TARGET_HALTED)
1701 {
1702 LOG_WARNING("target not halted");
1703 return ERROR_TARGET_NOT_HALTED;
1704 }
1705
1706 buffer = malloc(4 * 8);
1707
1708 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1709 * we can't enter User mode on an XScale (unpredictable),
1710 * but User shares registers with SYS
1711 */
1712 for (i = 1; i < 7; i++)
1713 {
1714 enum arm_mode mode = armv4_5_number_to_mode(i);
1715 bool valid = true;
1716 struct reg *r;
1717
1718 if (mode == ARM_MODE_USR)
1719 continue;
1720
1721 /* check if there are invalid registers in the current mode
1722 */
1723 for (j = 0; valid && j <= 16; j++)
1724 {
1725 if (!ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1726 mode, j).valid)
1727 valid = false;
1728 }
1729 if (valid)
1730 continue;
1731
1732 /* request banked registers */
1733 xscale_send_u32(target, 0x0);
1734
1735 /* send CPSR for desired bank mode */
1736 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1737
1738 /* get banked registers: r8 to r14; and SPSR
1739 * except in USR/SYS mode
1740 */
1741 if (mode != ARM_MODE_SYS) {
1742 /* SPSR */
1743 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1744 mode, 16);
1745
1746 xscale_receive(target, buffer, 8);
1747
1748 buf_set_u32(r->value, 0, 32, buffer[7]);
1749 r->dirty = false;
1750 r->valid = true;
1751 } else {
1752 xscale_receive(target, buffer, 7);
1753 }
1754
1755 /* move data from buffer to register cache */
1756 for (j = 8; j <= 14; j++)
1757 {
1758 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1759 mode, j);
1760
1761 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1762 r->dirty = false;
1763 r->valid = true;
1764 }
1765 }
1766
1767 free(buffer);
1768
1769 return ERROR_OK;
1770 }
1771
1772 static int xscale_restore_banked(struct target *target)
1773 {
1774 struct arm *armv4_5 = target_to_arm(target);
1775
1776 int i, j;
1777
1778 if (target->state != TARGET_HALTED)
1779 {
1780 LOG_WARNING("target not halted");
1781 return ERROR_TARGET_NOT_HALTED;
1782 }
1783
1784 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1785 * and check if any banked registers need to be written. Ignore
1786 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1787 * an XScale (unpredictable), but they share all registers.
1788 */
1789 for (i = 1; i < 7; i++)
1790 {
1791 enum arm_mode mode = armv4_5_number_to_mode(i);
1792 struct reg *r;
1793
1794 if (mode == ARM_MODE_USR)
1795 continue;
1796
1797 /* check if there are dirty registers in this mode */
1798 for (j = 8; j <= 14; j++)
1799 {
1800 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1801 mode, j).dirty)
1802 goto dirty;
1803 }
1804
1805 /* if not USR/SYS, check if the SPSR needs to be written */
1806 if (mode != ARM_MODE_SYS)
1807 {
1808 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1809 mode, 16).dirty)
1810 goto dirty;
1811 }
1812
1813 /* there's nothing to flush for this mode */
1814 continue;
1815
1816 dirty:
1817 /* command 0x1: "send banked registers" */
1818 xscale_send_u32(target, 0x1);
1819
1820 /* send CPSR for desired mode */
1821 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1822
1823 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1824 * but this protocol doesn't understand that nuance.
1825 */
1826 for (j = 8; j <= 14; j++) {
1827 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1828 mode, j);
1829 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1830 r->dirty = false;
1831 }
1832
1833 /* send spsr if not in USR/SYS mode */
1834 if (mode != ARM_MODE_SYS) {
1835 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1836 mode, 16);
1837 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1838 r->dirty = false;
1839 }
1840 }
1841
1842 return ERROR_OK;
1843 }
1844
1845 static int xscale_read_memory(struct target *target, uint32_t address,
1846 uint32_t size, uint32_t count, uint8_t *buffer)
1847 {
1848 struct xscale_common *xscale = target_to_xscale(target);
1849 uint32_t *buf32;
1850 uint32_t i;
1851 int retval;
1852
1853 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1854
1855 if (target->state != TARGET_HALTED)
1856 {
1857 LOG_WARNING("target not halted");
1858 return ERROR_TARGET_NOT_HALTED;
1859 }
1860
1861 /* sanitize arguments */
1862 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1863 return ERROR_INVALID_ARGUMENTS;
1864
1865 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1866 return ERROR_TARGET_UNALIGNED_ACCESS;
1867
1868 /* send memory read request (command 0x1n, n: access size) */
1869 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1870 return retval;
1871
1872 /* send base address for read request */
1873 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1874 return retval;
1875
1876 /* send number of requested data words */
1877 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1878 return retval;
1879
1880 /* receive data from target (count times 32-bit words in host endianness) */
1881 buf32 = malloc(4 * count);
1882 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1883 return retval;
1884
1885 /* extract data from host-endian buffer into byte stream */
1886 for (i = 0; i < count; i++)
1887 {
1888 switch (size)
1889 {
1890 case 4:
1891 target_buffer_set_u32(target, buffer, buf32[i]);
1892 buffer += 4;
1893 break;
1894 case 2:
1895 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1896 buffer += 2;
1897 break;
1898 case 1:
1899 *buffer++ = buf32[i] & 0xff;
1900 break;
1901 default:
1902 LOG_ERROR("invalid read size");
1903 return ERROR_INVALID_ARGUMENTS;
1904 }
1905 }
1906
1907 free(buf32);
1908
1909 /* examine DCSR, to see if Sticky Abort (SA) got set */
1910 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1911 return retval;
1912 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1913 {
1914 /* clear SA bit */
1915 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1916 return retval;
1917
1918 return ERROR_TARGET_DATA_ABORT;
1919 }
1920
1921 return ERROR_OK;
1922 }
1923
1924 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1925 uint32_t size, uint32_t count, uint8_t *buffer)
1926 {
1927 struct xscale_common *xscale = target_to_xscale(target);
1928
1929 /* with MMU inactive, there are only physical addresses */
1930 if (!xscale->armv4_5_mmu.mmu_enabled)
1931 return xscale_read_memory(target, address, size, count, buffer);
1932
1933 /** \todo: provide a non-stub implementation of this routine. */
1934 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1935 target_name(target), __func__);
1936 return ERROR_FAIL;
1937 }
1938
1939 static int xscale_write_memory(struct target *target, uint32_t address,
1940 uint32_t size, uint32_t count, uint8_t *buffer)
1941 {
1942 struct xscale_common *xscale = target_to_xscale(target);
1943 int retval;
1944
1945 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1946
1947 if (target->state != TARGET_HALTED)
1948 {
1949 LOG_WARNING("target not halted");
1950 return ERROR_TARGET_NOT_HALTED;
1951 }
1952
1953 /* sanitize arguments */
1954 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1955 return ERROR_INVALID_ARGUMENTS;
1956
1957 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1958 return ERROR_TARGET_UNALIGNED_ACCESS;
1959
1960 /* send memory write request (command 0x2n, n: access size) */
1961 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1962 return retval;
1963
1964 /* send base address for read request */
1965 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1966 return retval;
1967
1968 /* send number of requested data words to be written*/
1969 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1970 return retval;
1971
1972 /* extract data from host-endian buffer into byte stream */
1973 #if 0
1974 for (i = 0; i < count; i++)
1975 {
1976 switch (size)
1977 {
1978 case 4:
1979 value = target_buffer_get_u32(target, buffer);
1980 xscale_send_u32(target, value);
1981 buffer += 4;
1982 break;
1983 case 2:
1984 value = target_buffer_get_u16(target, buffer);
1985 xscale_send_u32(target, value);
1986 buffer += 2;
1987 break;
1988 case 1:
1989 value = *buffer;
1990 xscale_send_u32(target, value);
1991 buffer += 1;
1992 break;
1993 default:
1994 LOG_ERROR("should never get here");
1995 exit(-1);
1996 }
1997 }
1998 #endif
1999 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
2000 return retval;
2001
2002 /* examine DCSR, to see if Sticky Abort (SA) got set */
2003 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
2004 return retval;
2005 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
2006 {
2007 /* clear SA bit */
2008 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
2009 return retval;
2010
2011 LOG_ERROR("data abort writing memory");
2012 return ERROR_TARGET_DATA_ABORT;
2013 }
2014
2015 return ERROR_OK;
2016 }
2017
2018 static int xscale_write_phys_memory(struct target *target, uint32_t address,
2019 uint32_t size, uint32_t count, uint8_t *buffer)
2020 {
2021 struct xscale_common *xscale = target_to_xscale(target);
2022
2023 /* with MMU inactive, there are only physical addresses */
2024 if (!xscale->armv4_5_mmu.mmu_enabled)
2025 return xscale_read_memory(target, address, size, count, buffer);
2026
2027 /** \todo: provide a non-stub implementation of this routine. */
2028 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
2029 target_name(target), __func__);
2030 return ERROR_FAIL;
2031 }
2032
2033 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
2034 uint32_t count, uint8_t *buffer)
2035 {
2036 return xscale_write_memory(target, address, 4, count, buffer);
2037 }
2038
2039 static int xscale_get_ttb(struct target *target, uint32_t *result)
2040 {
2041 struct xscale_common *xscale = target_to_xscale(target);
2042 uint32_t ttb;
2043 int retval;
2044
2045 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2046 if (retval != ERROR_OK)
2047 return retval;
2048 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2049
2050 *result = ttb;
2051
2052 return ERROR_OK;
2053 }
2054
2055 static int xscale_disable_mmu_caches(struct target *target, int mmu,
2056 int d_u_cache, int i_cache)
2057 {
2058 struct xscale_common *xscale = target_to_xscale(target);
2059 uint32_t cp15_control;
2060 int retval;
2061
2062 /* read cp15 control register */
2063 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2064 if (retval !=ERROR_OK)
2065 return retval;
2066 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2067
2068 if (mmu)
2069 cp15_control &= ~0x1U;
2070
2071 if (d_u_cache)
2072 {
2073 /* clean DCache */
2074 retval = xscale_send_u32(target, 0x50);
2075 if (retval !=ERROR_OK)
2076 return retval;
2077 retval = xscale_send_u32(target, xscale->cache_clean_address);
2078 if (retval !=ERROR_OK)
2079 return retval;
2080
2081 /* invalidate DCache */
2082 retval = xscale_send_u32(target, 0x51);
2083 if (retval !=ERROR_OK)
2084 return retval;
2085
2086 cp15_control &= ~0x4U;
2087 }
2088
2089 if (i_cache)
2090 {
2091 /* invalidate ICache */
2092 retval = xscale_send_u32(target, 0x52);
2093 if (retval !=ERROR_OK)
2094 return retval;
2095 cp15_control &= ~0x1000U;
2096 }
2097
2098 /* write new cp15 control register */
2099 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2100 if (retval !=ERROR_OK)
2101 return retval;
2102
2103 /* execute cpwait to ensure outstanding operations complete */
2104 retval = xscale_send_u32(target, 0x53);
2105 return retval;
2106 }
2107
2108 static int xscale_enable_mmu_caches(struct target *target, int mmu,
2109 int d_u_cache, int i_cache)
2110 {
2111 struct xscale_common *xscale = target_to_xscale(target);
2112 uint32_t cp15_control;
2113 int retval;
2114
2115 /* read cp15 control register */
2116 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2117 if (retval !=ERROR_OK)
2118 return retval;
2119 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2120
2121 if (mmu)
2122 cp15_control |= 0x1U;
2123
2124 if (d_u_cache)
2125 cp15_control |= 0x4U;
2126
2127 if (i_cache)
2128 cp15_control |= 0x1000U;
2129
2130 /* write new cp15 control register */
2131 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2132 if (retval !=ERROR_OK)
2133 return retval;
2134
2135 /* execute cpwait to ensure outstanding operations complete */
2136 retval = xscale_send_u32(target, 0x53);
2137 return retval;
2138 }
2139
2140 static int xscale_set_breakpoint(struct target *target,
2141 struct breakpoint *breakpoint)
2142 {
2143 int retval;
2144 struct xscale_common *xscale = target_to_xscale(target);
2145
2146 if (target->state != TARGET_HALTED)
2147 {
2148 LOG_WARNING("target not halted");
2149 return ERROR_TARGET_NOT_HALTED;
2150 }
2151
2152 if (breakpoint->set)
2153 {
2154 LOG_WARNING("breakpoint already set");
2155 return ERROR_OK;
2156 }
2157
2158 if (breakpoint->type == BKPT_HARD)
2159 {
2160 uint32_t value = breakpoint->address | 1;
2161 if (!xscale->ibcr0_used)
2162 {
2163 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2164 xscale->ibcr0_used = 1;
2165 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2166 }
2167 else if (!xscale->ibcr1_used)
2168 {
2169 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2170 xscale->ibcr1_used = 1;
2171 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2172 }
2173 else
2174 { /* bug: availability previously verified in xscale_add_breakpoint() */
2175 LOG_ERROR("BUG: no hardware comparator available");
2176 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2177 }
2178 }
2179 else if (breakpoint->type == BKPT_SOFT)
2180 {
2181 if (breakpoint->length == 4)
2182 {
2183 /* keep the original instruction in target endianness */
2184 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2185 {
2186 return retval;
2187 }
2188 /* write the bkpt instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2189 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2190 {
2191 return retval;
2192 }
2193 }
2194 else
2195 {
2196 /* keep the original instruction in target endianness */
2197 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2198 {
2199 return retval;
2200 }
2201 /* write the bkpt instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2202 if ((retval = target_write_u16(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2203 {
2204 return retval;
2205 }
2206 }
2207 breakpoint->set = 1;
2208
2209 xscale_send_u32(target, 0x50); /* clean dcache */
2210 xscale_send_u32(target, xscale->cache_clean_address);
2211 xscale_send_u32(target, 0x51); /* invalidate dcache */
2212 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2213 }
2214
2215 return ERROR_OK;
2216 }
2217
2218 static int xscale_add_breakpoint(struct target *target,
2219 struct breakpoint *breakpoint)
2220 {
2221 struct xscale_common *xscale = target_to_xscale(target);
2222
2223 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2224 {
2225 LOG_ERROR("no breakpoint unit available for hardware breakpoint");
2226 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2227 }
2228
2229 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2230 {
2231 LOG_ERROR("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2232 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2233 }
2234
2235 if (breakpoint->type == BKPT_HARD)
2236 {
2237 xscale->ibcr_available--;
2238 }
2239
2240 return xscale_set_breakpoint(target, breakpoint);
2241 }
2242
2243 static int xscale_unset_breakpoint(struct target *target,
2244 struct breakpoint *breakpoint)
2245 {
2246 int retval;
2247 struct xscale_common *xscale = target_to_xscale(target);
2248
2249 if (target->state != TARGET_HALTED)
2250 {
2251 LOG_WARNING("target not halted");
2252 return ERROR_TARGET_NOT_HALTED;
2253 }
2254
2255 if (!breakpoint->set)
2256 {
2257 LOG_WARNING("breakpoint not set");
2258 return ERROR_OK;
2259 }
2260
2261 if (breakpoint->type == BKPT_HARD)
2262 {
2263 if (breakpoint->set == 1)
2264 {
2265 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2266 xscale->ibcr0_used = 0;
2267 }
2268 else if (breakpoint->set == 2)
2269 {
2270 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2271 xscale->ibcr1_used = 0;
2272 }
2273 breakpoint->set = 0;
2274 }
2275 else
2276 {
2277 /* restore original instruction (kept in target endianness) */
2278 if (breakpoint->length == 4)
2279 {
2280 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2281 {
2282 return retval;
2283 }
2284 }
2285 else
2286 {
2287 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2288 {
2289 return retval;
2290 }
2291 }
2292 breakpoint->set = 0;
2293
2294 xscale_send_u32(target, 0x50); /* clean dcache */
2295 xscale_send_u32(target, xscale->cache_clean_address);
2296 xscale_send_u32(target, 0x51); /* invalidate dcache */
2297 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2298 }
2299
2300 return ERROR_OK;
2301 }
2302
2303 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2304 {
2305 struct xscale_common *xscale = target_to_xscale(target);
2306
2307 if (target->state != TARGET_HALTED)
2308 {
2309 LOG_ERROR("target not halted");
2310 return ERROR_TARGET_NOT_HALTED;
2311 }
2312
2313 if (breakpoint->set)
2314 {
2315 xscale_unset_breakpoint(target, breakpoint);
2316 }
2317
2318 if (breakpoint->type == BKPT_HARD)
2319 xscale->ibcr_available++;
2320
2321 return ERROR_OK;
2322 }
2323
2324 static int xscale_set_watchpoint(struct target *target,
2325 struct watchpoint *watchpoint)
2326 {
2327 struct xscale_common *xscale = target_to_xscale(target);
2328 uint32_t enable = 0;
2329 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2330 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2331
2332 if (target->state != TARGET_HALTED)
2333 {
2334 LOG_ERROR("target not halted");
2335 return ERROR_TARGET_NOT_HALTED;
2336 }
2337
2338 switch (watchpoint->rw)
2339 {
2340 case WPT_READ:
2341 enable = 0x3;
2342 break;
2343 case WPT_ACCESS:
2344 enable = 0x2;
2345 break;
2346 case WPT_WRITE:
2347 enable = 0x1;
2348 break;
2349 default:
2350 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2351 }
2352
2353 /* For watchpoint across more than one word, both DBR registers must
2354 be enlisted, with the second used as a mask. */
2355 if (watchpoint->length > 4)
2356 {
2357 if (xscale->dbr0_used || xscale->dbr1_used)
2358 {
2359 LOG_ERROR("BUG: sufficient hardware comparators unavailable");
2360 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2361 }
2362
2363 /* Write mask value to DBR1, based on the length argument.
2364 * Address bits ignored by the comparator are those set in mask. */
2365 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1],
2366 watchpoint->length - 1);
2367 xscale->dbr1_used = 1;
2368 enable |= 0x100; /* DBCON[M] */
2369 }
2370
2371 if (!xscale->dbr0_used)
2372 {
2373 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2374 dbcon_value |= enable;
2375 xscale_set_reg_u32(dbcon, dbcon_value);
2376 watchpoint->set = 1;
2377 xscale->dbr0_used = 1;
2378 }
2379 else if (!xscale->dbr1_used)
2380 {
2381 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2382 dbcon_value |= enable << 2;
2383 xscale_set_reg_u32(dbcon, dbcon_value);
2384 watchpoint->set = 2;
2385 xscale->dbr1_used = 1;
2386 }
2387 else
2388 {
2389 LOG_ERROR("BUG: no hardware comparator available");
2390 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2391 }
2392
2393 return ERROR_OK;
2394 }
2395
2396 static int xscale_add_watchpoint(struct target *target,
2397 struct watchpoint *watchpoint)
2398 {
2399 struct xscale_common *xscale = target_to_xscale(target);
2400
2401 if (xscale->dbr_available < 1)
2402 {
2403 LOG_ERROR("no more watchpoint registers available");
2404 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2405 }
2406
2407 if (watchpoint->value)
2408 LOG_WARNING("xscale does not support value, mask arguments; ignoring");
2409
2410 /* check that length is a power of two */
2411 for (uint32_t len = watchpoint->length; len != 1; len /= 2)
2412 {
2413 if (len % 2)
2414 {
2415 LOG_ERROR("xscale requires that watchpoint length is a power of two");
2416 return ERROR_COMMAND_ARGUMENT_INVALID;
2417 }
2418 }
2419
2420 if (watchpoint->length == 4) /* single word watchpoint */
2421 {
2422 xscale->dbr_available--; /* one DBR reg used */
2423 return ERROR_OK;
2424 }
2425
2426 /* watchpoints across multiple words require both DBR registers */
2427 if (xscale->dbr_available < 2)
2428 {
2429 LOG_ERROR("insufficient watchpoint registers available");
2430 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2431 }
2432
2433 if (watchpoint->length > watchpoint->address)
2434 {
2435 LOG_ERROR("xscale does not support watchpoints with length "
2436 "greater than address");
2437 return ERROR_COMMAND_ARGUMENT_INVALID;
2438 }
2439
2440 xscale->dbr_available = 0;
2441 return ERROR_OK;
2442 }
2443
2444 static int xscale_unset_watchpoint(struct target *target,
2445 struct watchpoint *watchpoint)
2446 {
2447 struct xscale_common *xscale = target_to_xscale(target);
2448 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2449 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2450
2451 if (target->state != TARGET_HALTED)
2452 {
2453 LOG_WARNING("target not halted");
2454 return ERROR_TARGET_NOT_HALTED;
2455 }
2456
2457 if (!watchpoint->set)
2458 {
2459 LOG_WARNING("breakpoint not set");
2460 return ERROR_OK;
2461 }
2462
2463 if (watchpoint->set == 1)
2464 {
2465 if (watchpoint->length > 4)
2466 {
2467 dbcon_value &= ~0x103; /* clear DBCON[M] as well */
2468 xscale->dbr1_used = 0; /* DBR1 was used for mask */
2469 }
2470 else
2471 dbcon_value &= ~0x3;
2472
2473 xscale_set_reg_u32(dbcon, dbcon_value);
2474 xscale->dbr0_used = 0;
2475 }
2476 else if (watchpoint->set == 2)
2477 {
2478 dbcon_value &= ~0xc;
2479 xscale_set_reg_u32(dbcon, dbcon_value);
2480 xscale->dbr1_used = 0;
2481 }
2482 watchpoint->set = 0;
2483
2484 return ERROR_OK;
2485 }
2486
2487 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2488 {
2489 struct xscale_common *xscale = target_to_xscale(target);
2490
2491 if (target->state != TARGET_HALTED)
2492 {
2493 LOG_ERROR("target not halted");
2494 return ERROR_TARGET_NOT_HALTED;
2495 }
2496
2497 if (watchpoint->set)
2498 {
2499 xscale_unset_watchpoint(target, watchpoint);
2500 }
2501
2502 if (watchpoint->length > 4)
2503 xscale->dbr_available++; /* both DBR regs now available */
2504
2505 xscale->dbr_available++;
2506
2507 return ERROR_OK;
2508 }
2509
2510 static int xscale_get_reg(struct reg *reg)
2511 {
2512 struct xscale_reg *arch_info = reg->arch_info;
2513 struct target *target = arch_info->target;
2514 struct xscale_common *xscale = target_to_xscale(target);
2515
2516 /* DCSR, TX and RX are accessible via JTAG */
2517 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2518 {
2519 return xscale_read_dcsr(arch_info->target);
2520 }
2521 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2522 {
2523 /* 1 = consume register content */
2524 return xscale_read_tx(arch_info->target, 1);
2525 }
2526 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2527 {
2528 /* can't read from RX register (host -> debug handler) */
2529 return ERROR_OK;
2530 }
2531 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2532 {
2533 /* can't (explicitly) read from TXRXCTRL register */
2534 return ERROR_OK;
2535 }
2536 else /* Other DBG registers have to be transfered by the debug handler */
2537 {
2538 /* send CP read request (command 0x40) */
2539 xscale_send_u32(target, 0x40);
2540
2541 /* send CP register number */
2542 xscale_send_u32(target, arch_info->dbg_handler_number);
2543
2544 /* read register value */
2545 xscale_read_tx(target, 1);
2546 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2547
2548 reg->dirty = 0;
2549 reg->valid = 1;
2550 }
2551
2552 return ERROR_OK;
2553 }
2554
2555 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2556 {
2557 struct xscale_reg *arch_info = reg->arch_info;
2558 struct target *target = arch_info->target;
2559 struct xscale_common *xscale = target_to_xscale(target);
2560 uint32_t value = buf_get_u32(buf, 0, 32);
2561
2562 /* DCSR, TX and RX are accessible via JTAG */
2563 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2564 {
2565 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2566 return xscale_write_dcsr(arch_info->target, -1, -1);
2567 }
2568 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2569 {
2570 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2571 return xscale_write_rx(arch_info->target);
2572 }
2573 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2574 {
2575 /* can't write to TX register (debug-handler -> host) */
2576 return ERROR_OK;
2577 }
2578 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2579 {
2580 /* can't (explicitly) write to TXRXCTRL register */
2581 return ERROR_OK;
2582 }
2583 else /* Other DBG registers have to be transfered by the debug handler */
2584 {
2585 /* send CP write request (command 0x41) */
2586 xscale_send_u32(target, 0x41);
2587
2588 /* send CP register number */
2589 xscale_send_u32(target, arch_info->dbg_handler_number);
2590
2591 /* send CP register value */
2592 xscale_send_u32(target, value);
2593 buf_set_u32(reg->value, 0, 32, value);
2594 }
2595
2596 return ERROR_OK;
2597 }
2598
2599 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2600 {
2601 struct xscale_common *xscale = target_to_xscale(target);
2602 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2603 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2604
2605 /* send CP write request (command 0x41) */
2606 xscale_send_u32(target, 0x41);
2607
2608 /* send CP register number */
2609 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2610
2611 /* send CP register value */
2612 xscale_send_u32(target, value);
2613 buf_set_u32(dcsr->value, 0, 32, value);
2614
2615 return ERROR_OK;
2616 }
2617
2618 static int xscale_read_trace(struct target *target)
2619 {
2620 struct xscale_common *xscale = target_to_xscale(target);
2621 struct arm *armv4_5 = &xscale->armv4_5_common;
2622 struct xscale_trace_data **trace_data_p;
2623
2624 /* 258 words from debug handler
2625 * 256 trace buffer entries
2626 * 2 checkpoint addresses
2627 */
2628 uint32_t trace_buffer[258];
2629 int is_address[256];
2630 int i, j;
2631 unsigned int num_checkpoints = 0;
2632
2633 if (target->state != TARGET_HALTED)
2634 {
2635 LOG_WARNING("target must be stopped to read trace data");
2636 return ERROR_TARGET_NOT_HALTED;
2637 }
2638
2639 /* send read trace buffer command (command 0x61) */
2640 xscale_send_u32(target, 0x61);
2641
2642 /* receive trace buffer content */
2643 xscale_receive(target, trace_buffer, 258);
2644
2645 /* parse buffer backwards to identify address entries */
2646 for (i = 255; i >= 0; i--)
2647 {
2648 /* also count number of checkpointed entries */
2649 if ((trace_buffer[i] & 0xe0) == 0xc0)
2650 num_checkpoints++;
2651
2652 is_address[i] = 0;
2653 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2654 ((trace_buffer[i] & 0xf0) == 0xd0))
2655 {
2656 if (i > 0)
2657 is_address[--i] = 1;
2658 if (i > 0)
2659 is_address[--i] = 1;
2660 if (i > 0)
2661 is_address[--i] = 1;
2662 if (i > 0)
2663 is_address[--i] = 1;
2664 }
2665 }
2666
2667
2668 /* search first non-zero entry that is not part of an address */
2669 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2670 ;
2671
2672 if (j == 256)
2673 {
2674 LOG_DEBUG("no trace data collected");
2675 return ERROR_XSCALE_NO_TRACE_DATA;
2676 }
2677
2678 /* account for possible partial address at buffer start (wrap mode only) */
2679 if (is_address[0])
2680 { /* first entry is address; complete set of 4? */
2681 i = 1;
2682 while (i < 4)
2683 if (!is_address[i++])
2684 break;
2685 if (i < 4)
2686 j += i; /* partial address; can't use it */
2687 }
2688
2689 /* if first valid entry is indirect branch, can't use that either (no address) */
2690 if (((trace_buffer[j] & 0xf0) == 0x90) || ((trace_buffer[j] & 0xf0) == 0xd0))
2691 j++;
2692
2693 /* walk linked list to terminating entry */
2694 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2695 ;
2696
2697 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2698 (*trace_data_p)->next = NULL;
2699 (*trace_data_p)->chkpt0 = trace_buffer[256];
2700 (*trace_data_p)->chkpt1 = trace_buffer[257];
2701 (*trace_data_p)->last_instruction =
2702 buf_get_u32(armv4_5->pc->value, 0, 32);
2703 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2704 (*trace_data_p)->depth = 256 - j;
2705 (*trace_data_p)->num_checkpoints = num_checkpoints;
2706
2707 for (i = j; i < 256; i++)
2708 {
2709 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2710 if (is_address[i])
2711 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2712 else
2713 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2714 }
2715
2716 return ERROR_OK;
2717 }
2718
2719 static int xscale_read_instruction(struct target *target, uint32_t pc,
2720 struct arm_instruction *instruction)
2721 {
2722 struct xscale_common *const xscale = target_to_xscale(target);
2723 int i;
2724 int section = -1;
2725 size_t size_read;
2726 uint32_t opcode;
2727 int retval;
2728
2729 if (!xscale->trace.image)
2730 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2731
2732 /* search for the section the current instruction belongs to */
2733 for (i = 0; i < xscale->trace.image->num_sections; i++)
2734 {
2735 if ((xscale->trace.image->sections[i].base_address <= pc) &&
2736 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > pc))
2737 {
2738 section = i;
2739 break;
2740 }
2741 }
2742
2743 if (section == -1)
2744 {
2745 /* current instruction couldn't be found in the image */
2746 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2747 }
2748
2749 if (xscale->trace.core_state == ARM_STATE_ARM)
2750 {
2751 uint8_t buf[4];
2752 if ((retval = image_read_section(xscale->trace.image, section,
2753 pc - xscale->trace.image->sections[section].base_address,
2754 4, buf, &size_read)) != ERROR_OK)
2755 {
2756 LOG_ERROR("error while reading instruction");
2757 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2758 }
2759 opcode = target_buffer_get_u32(target, buf);
2760 arm_evaluate_opcode(opcode, pc, instruction);
2761 }
2762 else if (xscale->trace.core_state == ARM_STATE_THUMB)
2763 {
2764 uint8_t buf[2];
2765 if ((retval = image_read_section(xscale->trace.image, section,
2766 pc - xscale->trace.image->sections[section].base_address,
2767 2, buf, &size_read)) != ERROR_OK)
2768 {
2769 LOG_ERROR("error while reading instruction");
2770 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2771 }
2772 opcode = target_buffer_get_u16(target, buf);
2773 thumb_evaluate_opcode(opcode, pc, instruction);
2774 }
2775 else
2776 {
2777 LOG_ERROR("BUG: unknown core state encountered");
2778 exit(-1);
2779 }
2780
2781 return ERROR_OK;
2782 }
2783
2784 /* Extract address encoded into trace data.
2785 * Write result to address referenced by argument 'target', or 0 if incomplete. */
2786 static inline void xscale_branch_address(struct xscale_trace_data *trace_data,
2787 int i, uint32_t *target)
2788 {
2789 /* if there are less than four entries prior to the indirect branch message
2790 * we can't extract the address */
2791 if (i < 4)
2792 *target = 0;
2793 else
2794 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2795 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2796 }
2797
2798 static inline void xscale_display_instruction(struct target *target, uint32_t pc,
2799 struct arm_instruction *instruction,
2800 struct command_context *cmd_ctx)
2801 {
2802 int retval = xscale_read_instruction(target, pc, instruction);
2803 if (retval == ERROR_OK)
2804 command_print(cmd_ctx, "%s", instruction->text);
2805 else
2806 command_print(cmd_ctx, "0x%8.8" PRIx32 "\t<not found in image>", pc);
2807 }
2808
2809 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2810 {
2811 struct xscale_common *xscale = target_to_xscale(target);
2812 struct xscale_trace_data *trace_data = xscale->trace.data;
2813 int i, retval;
2814 uint32_t breakpoint_pc;
2815 struct arm_instruction instruction;
2816 uint32_t current_pc = 0; /* initialized when address determined */
2817
2818 if (!xscale->trace.image)
2819 LOG_WARNING("No trace image loaded; use 'xscale trace_image'");
2820
2821 /* loop for each trace buffer that was loaded from target */
2822 while (trace_data)
2823 {
2824 int chkpt = 0; /* incremented as checkpointed entries found */
2825 int j;
2826
2827 /* FIXME: set this to correct mode when trace buffer is first enabled */
2828 xscale->trace.core_state = ARM_STATE_ARM;
2829
2830 /* loop for each entry in this trace buffer */
2831 for (i = 0; i < trace_data->depth; i++)
2832 {
2833 int exception = 0;
2834 uint32_t chkpt_reg = 0x0;
2835 uint32_t branch_target = 0;
2836 int count;
2837
2838 /* trace entry type is upper nybble of 'message byte' */
2839 int trace_msg_type = (trace_data->entries[i].data & 0xf0) >> 4;
2840
2841 /* Target addresses of indirect branches are written into buffer
2842 * before the message byte representing the branch. Skip past it */
2843 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2844 continue;
2845
2846 switch (trace_msg_type)
2847 {
2848 case 0: /* Exceptions */
2849 case 1:
2850 case 2:
2851 case 3:
2852 case 4:
2853 case 5:
2854 case 6:
2855 case 7:
2856 exception = (trace_data->entries[i].data & 0x70) >> 4;
2857
2858 /* FIXME: vector table may be at ffff0000 */
2859 branch_target = (trace_data->entries[i].data & 0xf0) >> 2;
2860 break;
2861
2862 case 8: /* Direct Branch */
2863 break;
2864
2865 case 9: /* Indirect Branch */
2866 xscale_branch_address(trace_data, i, &branch_target);
2867 break;
2868
2869 case 13: /* Checkpointed Indirect Branch */
2870 xscale_branch_address(trace_data, i, &branch_target);
2871 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2872 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is oldest */
2873 else
2874 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and newest */
2875
2876 chkpt++;
2877 break;
2878
2879 case 12: /* Checkpointed Direct Branch */
2880 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2881 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is oldest */
2882 else
2883 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and newest */
2884
2885 /* if no current_pc, checkpoint will be starting point */
2886 if (current_pc == 0)
2887 branch_target = chkpt_reg;
2888
2889 chkpt++;
2890 break;
2891
2892 case 15: /* Roll-over */
2893 break;
2894
2895 default: /* Reserved */
2896 LOG_WARNING("trace is suspect: invalid trace message byte");
2897 continue;
2898
2899 }
2900
2901 /* If we don't have the current_pc yet, but we did get the branch target
2902 * (either from the trace buffer on indirect branch, or from a checkpoint reg),
2903 * then we can start displaying instructions at the next iteration, with
2904 * branch_target as the starting point.
2905 */
2906 if (current_pc == 0)
2907 {
2908 current_pc = branch_target; /* remains 0 unless branch_target obtained */
2909 continue;
2910 }
2911
2912 /* We have current_pc. Read and display the instructions from the image.
2913 * First, display count instructions (lower nybble of message byte). */
2914 count = trace_data->entries[i].data & 0x0f;
2915 for (j = 0; j < count; j++)
2916 {
2917 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2918 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2919 }
2920
2921 /* An additional instruction is implicitly added to count for
2922 * rollover and some exceptions: undef, swi, prefetch abort. */
2923 if ((trace_msg_type == 15) || (exception > 0 && exception < 4))
2924 {
2925 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2926 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2927 }
2928
2929 if (trace_msg_type == 15) /* rollover */
2930 continue;
2931
2932 if (exception)
2933 {
2934 command_print(cmd_ctx, "--- exception %i ---", exception);
2935 continue;
2936 }
2937
2938 /* not exception or rollover; next instruction is a branch and is
2939 * not included in the count */
2940 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2941
2942 /* for direct branches, extract branch destination from instruction */
2943 if ((trace_msg_type == 8) || (trace_msg_type == 12))
2944 {
2945 retval = xscale_read_instruction(target, current_pc, &instruction);
2946 if (retval == ERROR_OK)
2947 current_pc = instruction.info.b_bl_bx_blx.target_address;
2948 else
2949 current_pc = 0; /* branch destination unknown */
2950
2951 /* direct branch w/ checkpoint; can also get from checkpoint reg */
2952 if (trace_msg_type == 12)
2953 {
2954 if (current_pc == 0)
2955 current_pc = chkpt_reg;
2956 else if (current_pc != chkpt_reg) /* sanity check */
2957 LOG_WARNING("trace is suspect: checkpoint register "
2958 "inconsistent with adddress from image");
2959 }
2960
2961 if (current_pc == 0)
2962 command_print(cmd_ctx, "address unknown");
2963
2964 continue;
2965 }
2966
2967 /* indirect branch; the branch destination was read from trace buffer */
2968 if ((trace_msg_type == 9) || (trace_msg_type == 13))
2969 {
2970 current_pc = branch_target;
2971
2972 /* sanity check (checkpoint reg is redundant) */
2973 if ((trace_msg_type == 13) && (chkpt_reg != branch_target))
2974 LOG_WARNING("trace is suspect: checkpoint register "
2975 "inconsistent with address from trace buffer");
2976 }
2977
2978 } /* END: for (i = 0; i < trace_data->depth; i++) */
2979
2980 breakpoint_pc = trace_data->last_instruction; /* used below */
2981 trace_data = trace_data->next;
2982
2983 } /* END: while (trace_data) */
2984
2985 /* Finally... display all instructions up to the value of the pc when the
2986 * debug break occurred (saved when trace data was collected from target).
2987 * This is necessary because the trace only records execution branches and 16
2988 * consecutive instructions (rollovers), so last few typically missed.
2989 */
2990 if (current_pc == 0)
2991 return ERROR_OK; /* current_pc was never found */
2992
2993 /* how many instructions remaining? */
2994 int gap_count = (breakpoint_pc - current_pc) /
2995 (xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2);
2996
2997 /* should never be negative or over 16, but verify */
2998 if (gap_count < 0 || gap_count > 16)
2999 {
3000 LOG_WARNING("trace is suspect: excessive gap at end of trace");
3001 return ERROR_OK; /* bail; large number or negative value no good */
3002 }
3003
3004 /* display remaining instructions */
3005 for (i = 0; i < gap_count; i++)
3006 {
3007 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
3008 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
3009 }
3010
3011 return ERROR_OK;
3012 }
3013
3014 static const struct reg_arch_type xscale_reg_type = {
3015 .get = xscale_get_reg,
3016 .set = xscale_set_reg,
3017 };
3018
3019 static void xscale_build_reg_cache(struct target *target)
3020 {
3021 struct xscale_common *xscale = target_to_xscale(target);
3022 struct arm *armv4_5 = &xscale->armv4_5_common;
3023 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
3024 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
3025 int i;
3026 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
3027
3028 (*cache_p) = arm_build_reg_cache(target, armv4_5);
3029
3030 (*cache_p)->next = malloc(sizeof(struct reg_cache));
3031 cache_p = &(*cache_p)->next;
3032
3033 /* fill in values for the xscale reg cache */
3034 (*cache_p)->name = "XScale registers";
3035 (*cache_p)->next = NULL;
3036 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
3037 (*cache_p)->num_regs = num_regs;
3038
3039 for (i = 0; i < num_regs; i++)
3040 {
3041 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
3042 (*cache_p)->reg_list[i].value = calloc(4, 1);
3043 (*cache_p)->reg_list[i].dirty = 0;
3044 (*cache_p)->reg_list[i].valid = 0;
3045 (*cache_p)->reg_list[i].size = 32;
3046 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
3047 (*cache_p)->reg_list[i].type = &xscale_reg_type;
3048 arch_info[i] = xscale_reg_arch_info[i];
3049 arch_info[i].target = target;
3050 }
3051
3052 xscale->reg_cache = (*cache_p);
3053 }
3054
3055 static int xscale_init_target(struct command_context *cmd_ctx,
3056 struct target *target)
3057 {
3058 xscale_build_reg_cache(target);
3059 return ERROR_OK;
3060 }
3061
3062 static int xscale_init_arch_info(struct target *target,
3063 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
3064 {
3065 struct arm *armv4_5;
3066 uint32_t high_reset_branch, low_reset_branch;
3067 int i;
3068
3069 armv4_5 = &xscale->armv4_5_common;
3070
3071 /* store architecture specfic data */
3072 xscale->common_magic = XSCALE_COMMON_MAGIC;
3073
3074 /* we don't really *need* a variant param ... */
3075 if (variant) {
3076 int ir_length = 0;
3077
3078 if (strcmp(variant, "pxa250") == 0
3079 || strcmp(variant, "pxa255") == 0
3080 || strcmp(variant, "pxa26x") == 0)
3081 ir_length = 5;
3082 else if (strcmp(variant, "pxa27x") == 0
3083 || strcmp(variant, "ixp42x") == 0
3084 || strcmp(variant, "ixp45x") == 0
3085 || strcmp(variant, "ixp46x") == 0)
3086 ir_length = 7;
3087 else if (strcmp(variant, "pxa3xx") == 0)
3088 ir_length = 11;
3089 else
3090 LOG_WARNING("%s: unrecognized variant %s",
3091 tap->dotted_name, variant);
3092
3093 if (ir_length && ir_length != tap->ir_length) {
3094 LOG_WARNING("%s: IR length for %s is %d; fixing",
3095 tap->dotted_name, variant, ir_length);
3096 tap->ir_length = ir_length;
3097 }
3098 }
3099
3100 /* PXA3xx shifts the JTAG instructions */
3101 if (tap->ir_length == 11)
3102 xscale->xscale_variant = XSCALE_PXA3XX;
3103 else
3104 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
3105
3106 /* the debug handler isn't installed (and thus not running) at this time */
3107 xscale->handler_address = 0xfe000800;
3108
3109 /* clear the vectors we keep locally for reference */
3110 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
3111 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
3112
3113 /* no user-specified vectors have been configured yet */
3114 xscale->static_low_vectors_set = 0x0;
3115 xscale->static_high_vectors_set = 0x0;
3116
3117 /* calculate branches to debug handler */
3118 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
3119 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
3120
3121 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
3122 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
3123
3124 for (i = 1; i <= 7; i++)
3125 {
3126 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3127 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3128 }
3129
3130 /* 64kB aligned region used for DCache cleaning */
3131 xscale->cache_clean_address = 0xfffe0000;
3132
3133 xscale->hold_rst = 0;
3134 xscale->external_debug_break = 0;
3135
3136 xscale->ibcr_available = 2;
3137 xscale->ibcr0_used = 0;