arm: add error propagation to generic get_ttb fn
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include <helper/time_support.h>
37 #include "register.h"
38 #include "image.h"
39 #include "arm_opcodes.h"
40 #include "armv4_5.h"
41
42
43 /*
44 * Important XScale documents available as of October 2009 include:
45 *
46 * Intel XScale® Core Developer’s Manual, January 2004
47 * Order Number: 273473-002
48 * This has a chapter detailing debug facilities, and punts some
49 * details to chip-specific microarchitecture documents.
50 *
51 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
52 * Document Number: 273539-005
53 * Less detailed than the developer's manual, but summarizes those
54 * missing details (for most XScales) and gives LOTS of notes about
55 * debugger/handler interaction issues. Presents a simpler reset
56 * and load-handler sequence than the arch doc. (Note, OpenOCD
57 * doesn't currently support "Hot-Debug" as defined there.)
58 *
59 * Chip-specific microarchitecture documents may also be useful.
60 */
61
62
63 /* forward declarations */
64 static int xscale_resume(struct target *, int current,
65 uint32_t address, int handle_breakpoints, int debug_execution);
66 static int xscale_debug_entry(struct target *);
67 static int xscale_restore_banked(struct target *);
68 static int xscale_get_reg(struct reg *reg);
69 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
70 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
72 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
73 static int xscale_read_trace(struct target *);
74
75
76 /* This XScale "debug handler" is loaded into the processor's
77 * mini-ICache, which is 2K of code writable only via JTAG.
78 *
79 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
80 * binary files cleanly. It's string oriented, and terminates them
81 * with a NUL character. Better would be to generate the constants
82 * and let other code decide names, scoping, and other housekeeping.
83 */
84 static /* unsigned const char xscale_debug_handler[] = ... */
85 #include "xscale_debug.h"
86
87 static char *const xscale_reg_list[] =
88 {
89 "XSCALE_MAINID", /* 0 */
90 "XSCALE_CACHETYPE",
91 "XSCALE_CTRL",
92 "XSCALE_AUXCTRL",
93 "XSCALE_TTB",
94 "XSCALE_DAC",
95 "XSCALE_FSR",
96 "XSCALE_FAR",
97 "XSCALE_PID",
98 "XSCALE_CPACCESS",
99 "XSCALE_IBCR0", /* 10 */
100 "XSCALE_IBCR1",
101 "XSCALE_DBR0",
102 "XSCALE_DBR1",
103 "XSCALE_DBCON",
104 "XSCALE_TBREG",
105 "XSCALE_CHKPT0",
106 "XSCALE_CHKPT1",
107 "XSCALE_DCSR",
108 "XSCALE_TX",
109 "XSCALE_RX", /* 20 */
110 "XSCALE_TXRXCTRL",
111 };
112
113 static const struct xscale_reg xscale_reg_arch_info[] =
114 {
115 {XSCALE_MAINID, NULL},
116 {XSCALE_CACHETYPE, NULL},
117 {XSCALE_CTRL, NULL},
118 {XSCALE_AUXCTRL, NULL},
119 {XSCALE_TTB, NULL},
120 {XSCALE_DAC, NULL},
121 {XSCALE_FSR, NULL},
122 {XSCALE_FAR, NULL},
123 {XSCALE_PID, NULL},
124 {XSCALE_CPACCESS, NULL},
125 {XSCALE_IBCR0, NULL},
126 {XSCALE_IBCR1, NULL},
127 {XSCALE_DBR0, NULL},
128 {XSCALE_DBR1, NULL},
129 {XSCALE_DBCON, NULL},
130 {XSCALE_TBREG, NULL},
131 {XSCALE_CHKPT0, NULL},
132 {XSCALE_CHKPT1, NULL},
133 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
134 {-1, NULL}, /* TX accessed via JTAG */
135 {-1, NULL}, /* RX accessed via JTAG */
136 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
137 };
138
139 /* convenience wrapper to access XScale specific registers */
140 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
141 {
142 uint8_t buf[4];
143
144 buf_set_u32(buf, 0, 32, value);
145
146 return xscale_set_reg(reg, buf);
147 }
148
149 static const char xscale_not[] = "target is not an XScale";
150
151 static int xscale_verify_pointer(struct command_context *cmd_ctx,
152 struct xscale_common *xscale)
153 {
154 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
155 command_print(cmd_ctx, xscale_not);
156 return ERROR_TARGET_INVALID;
157 }
158 return ERROR_OK;
159 }
160
161 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr, tap_state_t end_state)
162 {
163 if (tap == NULL)
164 return ERROR_FAIL;
165
166 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
167 {
168 struct scan_field field;
169 uint8_t scratch[4];
170
171 memset(&field, 0, sizeof field);
172 field.num_bits = tap->ir_length;
173 field.out_value = scratch;
174 buf_set_u32(scratch, 0, field.num_bits, new_instr);
175
176 jtag_add_ir_scan(tap, &field, end_state);
177 }
178
179 return ERROR_OK;
180 }
181
182 static int xscale_read_dcsr(struct target *target)
183 {
184 struct xscale_common *xscale = target_to_xscale(target);
185 int retval;
186 struct scan_field fields[3];
187 uint8_t field0 = 0x0;
188 uint8_t field0_check_value = 0x2;
189 uint8_t field0_check_mask = 0x7;
190 uint8_t field2 = 0x0;
191 uint8_t field2_check_value = 0x0;
192 uint8_t field2_check_mask = 0x1;
193
194 xscale_jtag_set_instr(target->tap,
195 XSCALE_SELDCSR << xscale->xscale_variant,
196 TAP_DRPAUSE);
197
198 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
199 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
200
201 memset(&fields, 0, sizeof fields);
202
203 fields[0].num_bits = 3;
204 fields[0].out_value = &field0;
205 uint8_t tmp;
206 fields[0].in_value = &tmp;
207
208 fields[1].num_bits = 32;
209 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
210
211 fields[2].num_bits = 1;
212 fields[2].out_value = &field2;
213 uint8_t tmp2;
214 fields[2].in_value = &tmp2;
215
216 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
217
218 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
219 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
220
221 if ((retval = jtag_execute_queue()) != ERROR_OK)
222 {
223 LOG_ERROR("JTAG error while reading DCSR");
224 return retval;
225 }
226
227 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
228 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
229
230 /* write the register with the value we just read
231 * on this second pass, only the first bit of field0 is guaranteed to be 0)
232 */
233 field0_check_mask = 0x1;
234 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
235 fields[1].in_value = NULL;
236
237 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
238
239 /* DANGER!!! this must be here. It will make sure that the arguments
240 * to jtag_set_check_value() does not go out of scope! */
241 return jtag_execute_queue();
242 }
243
244
245 static void xscale_getbuf(jtag_callback_data_t arg)
246 {
247 uint8_t *in = (uint8_t *)arg;
248 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
249 }
250
251 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
252 {
253 if (num_words == 0)
254 return ERROR_INVALID_ARGUMENTS;
255
256 struct xscale_common *xscale = target_to_xscale(target);
257 int retval = ERROR_OK;
258 tap_state_t path[3];
259 struct scan_field fields[3];
260 uint8_t *field0 = malloc(num_words * 1);
261 uint8_t field0_check_value = 0x2;
262 uint8_t field0_check_mask = 0x6;
263 uint32_t *field1 = malloc(num_words * 4);
264 uint8_t field2_check_value = 0x0;
265 uint8_t field2_check_mask = 0x1;
266 int words_done = 0;
267 int words_scheduled = 0;
268 int i;
269
270 path[0] = TAP_DRSELECT;
271 path[1] = TAP_DRCAPTURE;
272 path[2] = TAP_DRSHIFT;
273
274 memset(&fields, 0, sizeof fields);
275
276 fields[0].num_bits = 3;
277 fields[0].check_value = &field0_check_value;
278 fields[0].check_mask = &field0_check_mask;
279
280 fields[1].num_bits = 32;
281
282 fields[2].num_bits = 1;
283 fields[2].check_value = &field2_check_value;
284 fields[2].check_mask = &field2_check_mask;
285
286 xscale_jtag_set_instr(target->tap,
287 XSCALE_DBGTX << xscale->xscale_variant,
288 TAP_IDLE);
289 jtag_add_runtest(1, TAP_IDLE); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
290
291 /* repeat until all words have been collected */
292 int attempts = 0;
293 while (words_done < num_words)
294 {
295 /* schedule reads */
296 words_scheduled = 0;
297 for (i = words_done; i < num_words; i++)
298 {
299 fields[0].in_value = &field0[i];
300
301 jtag_add_pathmove(3, path);
302
303 fields[1].in_value = (uint8_t *)(field1 + i);
304
305 jtag_add_dr_scan_check(target->tap, 3, fields, TAP_IDLE);
306
307 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
308
309 words_scheduled++;
310 }
311
312 if ((retval = jtag_execute_queue()) != ERROR_OK)
313 {
314 LOG_ERROR("JTAG error while receiving data from debug handler");
315 break;
316 }
317
318 /* examine results */
319 for (i = words_done; i < num_words; i++)
320 {
321 if (!(field0[0] & 1))
322 {
323 /* move backwards if necessary */
324 int j;
325 for (j = i; j < num_words - 1; j++)
326 {
327 field0[j] = field0[j + 1];
328 field1[j] = field1[j + 1];
329 }
330 words_scheduled--;
331 }
332 }
333 if (words_scheduled == 0)
334 {
335 if (attempts++==1000)
336 {
337 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
338 retval = ERROR_TARGET_TIMEOUT;
339 break;
340 }
341 }
342
343 words_done += words_scheduled;
344 }
345
346 for (i = 0; i < num_words; i++)
347 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
348
349 free(field1);
350
351 return retval;
352 }
353
354 static int xscale_read_tx(struct target *target, int consume)
355 {
356 struct xscale_common *xscale = target_to_xscale(target);
357 tap_state_t path[3];
358 tap_state_t noconsume_path[6];
359 int retval;
360 struct timeval timeout, now;
361 struct scan_field fields[3];
362 uint8_t field0_in = 0x0;
363 uint8_t field0_check_value = 0x2;
364 uint8_t field0_check_mask = 0x6;
365 uint8_t field2_check_value = 0x0;
366 uint8_t field2_check_mask = 0x1;
367
368 xscale_jtag_set_instr(target->tap,
369 XSCALE_DBGTX << xscale->xscale_variant,
370 TAP_IDLE);
371
372 path[0] = TAP_DRSELECT;
373 path[1] = TAP_DRCAPTURE;
374 path[2] = TAP_DRSHIFT;
375
376 noconsume_path[0] = TAP_DRSELECT;
377 noconsume_path[1] = TAP_DRCAPTURE;
378 noconsume_path[2] = TAP_DREXIT1;
379 noconsume_path[3] = TAP_DRPAUSE;
380 noconsume_path[4] = TAP_DREXIT2;
381 noconsume_path[5] = TAP_DRSHIFT;
382
383 memset(&fields, 0, sizeof fields);
384
385 fields[0].num_bits = 3;
386 fields[0].in_value = &field0_in;
387
388 fields[1].num_bits = 32;
389 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
390
391 fields[2].num_bits = 1;
392 uint8_t tmp;
393 fields[2].in_value = &tmp;
394
395 gettimeofday(&timeout, NULL);
396 timeval_add_time(&timeout, 1, 0);
397
398 for (;;)
399 {
400 /* if we want to consume the register content (i.e. clear TX_READY),
401 * we have to go straight from Capture-DR to Shift-DR
402 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
403 */
404 if (consume)
405 jtag_add_pathmove(3, path);
406 else
407 {
408 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
409 }
410
411 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
412
413 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
414 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
415
416 if ((retval = jtag_execute_queue()) != ERROR_OK)
417 {
418 LOG_ERROR("JTAG error while reading TX");
419 return ERROR_TARGET_TIMEOUT;
420 }
421
422 gettimeofday(&now, NULL);
423 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
424 {
425 LOG_ERROR("time out reading TX register");
426 return ERROR_TARGET_TIMEOUT;
427 }
428 if (!((!(field0_in & 1)) && consume))
429 {
430 goto done;
431 }
432 if (debug_level >= 3)
433 {
434 LOG_DEBUG("waiting 100ms");
435 alive_sleep(100); /* avoid flooding the logs */
436 } else
437 {
438 keep_alive();
439 }
440 }
441 done:
442
443 if (!(field0_in & 1))
444 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
445
446 return ERROR_OK;
447 }
448
449 static int xscale_write_rx(struct target *target)
450 {
451 struct xscale_common *xscale = target_to_xscale(target);
452 int retval;
453 struct timeval timeout, now;
454 struct scan_field fields[3];
455 uint8_t field0_out = 0x0;
456 uint8_t field0_in = 0x0;
457 uint8_t field0_check_value = 0x2;
458 uint8_t field0_check_mask = 0x6;
459 uint8_t field2 = 0x0;
460 uint8_t field2_check_value = 0x0;
461 uint8_t field2_check_mask = 0x1;
462
463 xscale_jtag_set_instr(target->tap,
464 XSCALE_DBGRX << xscale->xscale_variant,
465 TAP_IDLE);
466
467 memset(&fields, 0, sizeof fields);
468
469 fields[0].num_bits = 3;
470 fields[0].out_value = &field0_out;
471 fields[0].in_value = &field0_in;
472
473 fields[1].num_bits = 32;
474 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
475
476 fields[2].num_bits = 1;
477 fields[2].out_value = &field2;
478 uint8_t tmp;
479 fields[2].in_value = &tmp;
480
481 gettimeofday(&timeout, NULL);
482 timeval_add_time(&timeout, 1, 0);
483
484 /* poll until rx_read is low */
485 LOG_DEBUG("polling RX");
486 for (;;)
487 {
488 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
489
490 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
491 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
492
493 if ((retval = jtag_execute_queue()) != ERROR_OK)
494 {
495 LOG_ERROR("JTAG error while writing RX");
496 return retval;
497 }
498
499 gettimeofday(&now, NULL);
500 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
501 {
502 LOG_ERROR("time out writing RX register");
503 return ERROR_TARGET_TIMEOUT;
504 }
505 if (!(field0_in & 1))
506 goto done;
507 if (debug_level >= 3)
508 {
509 LOG_DEBUG("waiting 100ms");
510 alive_sleep(100); /* avoid flooding the logs */
511 } else
512 {
513 keep_alive();
514 }
515 }
516 done:
517
518 /* set rx_valid */
519 field2 = 0x1;
520 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
521
522 if ((retval = jtag_execute_queue()) != ERROR_OK)
523 {
524 LOG_ERROR("JTAG error while writing RX");
525 return retval;
526 }
527
528 return ERROR_OK;
529 }
530
531 /* send count elements of size byte to the debug handler */
532 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
533 {
534 struct xscale_common *xscale = target_to_xscale(target);
535 uint32_t t[3];
536 int bits[3];
537 int retval;
538 int done_count = 0;
539
540 xscale_jtag_set_instr(target->tap,
541 XSCALE_DBGRX << xscale->xscale_variant,
542 TAP_IDLE);
543
544 bits[0]=3;
545 t[0]=0;
546 bits[1]=32;
547 t[2]=1;
548 bits[2]=1;
549 int endianness = target->endianness;
550 while (done_count++ < count)
551 {
552 switch (size)
553 {
554 case 4:
555 if (endianness == TARGET_LITTLE_ENDIAN)
556 {
557 t[1]=le_to_h_u32(buffer);
558 } else
559 {
560 t[1]=be_to_h_u32(buffer);
561 }
562 break;
563 case 2:
564 if (endianness == TARGET_LITTLE_ENDIAN)
565 {
566 t[1]=le_to_h_u16(buffer);
567 } else
568 {
569 t[1]=be_to_h_u16(buffer);
570 }
571 break;
572 case 1:
573 t[1]=buffer[0];
574 break;
575 default:
576 LOG_ERROR("BUG: size neither 4, 2 nor 1");
577 return ERROR_INVALID_ARGUMENTS;
578 }
579 jtag_add_dr_out(target->tap,
580 3,
581 bits,
582 t,
583 TAP_IDLE);
584 buffer += size;
585 }
586
587 if ((retval = jtag_execute_queue()) != ERROR_OK)
588 {
589 LOG_ERROR("JTAG error while sending data to debug handler");
590 return retval;
591 }
592
593 return ERROR_OK;
594 }
595
596 static int xscale_send_u32(struct target *target, uint32_t value)
597 {
598 struct xscale_common *xscale = target_to_xscale(target);
599
600 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
601 return xscale_write_rx(target);
602 }
603
604 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
605 {
606 struct xscale_common *xscale = target_to_xscale(target);
607 int retval;
608 struct scan_field fields[3];
609 uint8_t field0 = 0x0;
610 uint8_t field0_check_value = 0x2;
611 uint8_t field0_check_mask = 0x7;
612 uint8_t field2 = 0x0;
613 uint8_t field2_check_value = 0x0;
614 uint8_t field2_check_mask = 0x1;
615
616 if (hold_rst != -1)
617 xscale->hold_rst = hold_rst;
618
619 if (ext_dbg_brk != -1)
620 xscale->external_debug_break = ext_dbg_brk;
621
622 xscale_jtag_set_instr(target->tap,
623 XSCALE_SELDCSR << xscale->xscale_variant,
624 TAP_IDLE);
625
626 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
627 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
628
629 memset(&fields, 0, sizeof fields);
630
631 fields[0].num_bits = 3;
632 fields[0].out_value = &field0;
633 uint8_t tmp;
634 fields[0].in_value = &tmp;
635
636 fields[1].num_bits = 32;
637 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
638
639 fields[2].num_bits = 1;
640 fields[2].out_value = &field2;
641 uint8_t tmp2;
642 fields[2].in_value = &tmp2;
643
644 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
645
646 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
647 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
648
649 if ((retval = jtag_execute_queue()) != ERROR_OK)
650 {
651 LOG_ERROR("JTAG error while writing DCSR");
652 return retval;
653 }
654
655 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
656 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
657
658 return ERROR_OK;
659 }
660
661 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
662 static unsigned int parity (unsigned int v)
663 {
664 // unsigned int ov = v;
665 v ^= v >> 16;
666 v ^= v >> 8;
667 v ^= v >> 4;
668 v &= 0xf;
669 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
670 return (0x6996 >> v) & 1;
671 }
672
673 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
674 {
675 struct xscale_common *xscale = target_to_xscale(target);
676 uint8_t packet[4];
677 uint8_t cmd;
678 int word;
679 struct scan_field fields[2];
680
681 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
682
683 /* LDIC into IR */
684 xscale_jtag_set_instr(target->tap,
685 XSCALE_LDIC << xscale->xscale_variant,
686 TAP_IDLE);
687
688 /* CMD is b011 to load a cacheline into the Mini ICache.
689 * Loading into the main ICache is deprecated, and unused.
690 * It's followed by three zero bits, and 27 address bits.
691 */
692 buf_set_u32(&cmd, 0, 6, 0x3);
693
694 /* virtual address of desired cache line */
695 buf_set_u32(packet, 0, 27, va >> 5);
696
697 memset(&fields, 0, sizeof fields);
698
699 fields[0].num_bits = 6;
700 fields[0].out_value = &cmd;
701
702 fields[1].num_bits = 27;
703 fields[1].out_value = packet;
704
705 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
706
707 /* rest of packet is a cacheline: 8 instructions, with parity */
708 fields[0].num_bits = 32;
709 fields[0].out_value = packet;
710
711 fields[1].num_bits = 1;
712 fields[1].out_value = &cmd;
713
714 for (word = 0; word < 8; word++)
715 {
716 buf_set_u32(packet, 0, 32, buffer[word]);
717
718 uint32_t value;
719 memcpy(&value, packet, sizeof(uint32_t));
720 cmd = parity(value);
721
722 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
723 }
724
725 return jtag_execute_queue();
726 }
727
728 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
729 {
730 struct xscale_common *xscale = target_to_xscale(target);
731 uint8_t packet[4];
732 uint8_t cmd;
733 struct scan_field fields[2];
734
735 xscale_jtag_set_instr(target->tap,
736 XSCALE_LDIC << xscale->xscale_variant,
737 TAP_IDLE);
738
739 /* CMD for invalidate IC line b000, bits [6:4] b000 */
740 buf_set_u32(&cmd, 0, 6, 0x0);
741
742 /* virtual address of desired cache line */
743 buf_set_u32(packet, 0, 27, va >> 5);
744
745 memset(&fields, 0, sizeof fields);
746
747 fields[0].num_bits = 6;
748 fields[0].out_value = &cmd;
749
750 fields[1].num_bits = 27;
751 fields[1].out_value = packet;
752
753 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
754
755 return ERROR_OK;
756 }
757
758 static int xscale_update_vectors(struct target *target)
759 {
760 struct xscale_common *xscale = target_to_xscale(target);
761 int i;
762 int retval;
763
764 uint32_t low_reset_branch, high_reset_branch;
765
766 for (i = 1; i < 8; i++)
767 {
768 /* if there's a static vector specified for this exception, override */
769 if (xscale->static_high_vectors_set & (1 << i))
770 {
771 xscale->high_vectors[i] = xscale->static_high_vectors[i];
772 }
773 else
774 {
775 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
776 if (retval == ERROR_TARGET_TIMEOUT)
777 return retval;
778 if (retval != ERROR_OK)
779 {
780 /* Some of these reads will fail as part of normal execution */
781 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
782 }
783 }
784 }
785
786 for (i = 1; i < 8; i++)
787 {
788 if (xscale->static_low_vectors_set & (1 << i))
789 {
790 xscale->low_vectors[i] = xscale->static_low_vectors[i];
791 }
792 else
793 {
794 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
795 if (retval == ERROR_TARGET_TIMEOUT)
796 return retval;
797 if (retval != ERROR_OK)
798 {
799 /* Some of these reads will fail as part of normal execution */
800 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
801 }
802 }
803 }
804
805 /* calculate branches to debug handler */
806 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
807 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
808
809 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
810 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
811
812 /* invalidate and load exception vectors in mini i-cache */
813 xscale_invalidate_ic_line(target, 0x0);
814 xscale_invalidate_ic_line(target, 0xffff0000);
815
816 xscale_load_ic(target, 0x0, xscale->low_vectors);
817 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
818
819 return ERROR_OK;
820 }
821
822 static int xscale_arch_state(struct target *target)
823 {
824 struct xscale_common *xscale = target_to_xscale(target);
825 struct arm *armv4_5 = &xscale->armv4_5_common;
826
827 static const char *state[] =
828 {
829 "disabled", "enabled"
830 };
831
832 static const char *arch_dbg_reason[] =
833 {
834 "", "\n(processor reset)", "\n(trace buffer full)"
835 };
836
837 if (armv4_5->common_magic != ARM_COMMON_MAGIC)
838 {
839 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
840 return ERROR_INVALID_ARGUMENTS;
841 }
842
843 arm_arch_state(target);
844 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
845 state[xscale->armv4_5_mmu.mmu_enabled],
846 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
847 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
848 arch_dbg_reason[xscale->arch_debug_reason]);
849
850 return ERROR_OK;
851 }
852
853 static int xscale_poll(struct target *target)
854 {
855 int retval = ERROR_OK;
856
857 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
858 {
859 enum target_state previous_state = target->state;
860 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
861 {
862
863 /* there's data to read from the tx register, we entered debug state */
864 target->state = TARGET_HALTED;
865
866 /* process debug entry, fetching current mode regs */
867 retval = xscale_debug_entry(target);
868 }
869 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
870 {
871 LOG_USER("error while polling TX register, reset CPU");
872 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
873 target->state = TARGET_HALTED;
874 }
875
876 /* debug_entry could have overwritten target state (i.e. immediate resume)
877 * don't signal event handlers in that case
878 */
879 if (target->state != TARGET_HALTED)
880 return ERROR_OK;
881
882 /* if target was running, signal that we halted
883 * otherwise we reentered from debug execution */
884 if (previous_state == TARGET_RUNNING)
885 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
886 else
887 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
888 }
889
890 return retval;
891 }
892
893 static int xscale_debug_entry(struct target *target)
894 {
895 struct xscale_common *xscale = target_to_xscale(target);
896 struct arm *armv4_5 = &xscale->armv4_5_common;
897 uint32_t pc;
898 uint32_t buffer[10];
899 int i;
900 int retval;
901 uint32_t moe;
902
903 /* clear external dbg break (will be written on next DCSR read) */
904 xscale->external_debug_break = 0;
905 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
906 return retval;
907
908 /* get r0, pc, r1 to r7 and cpsr */
909 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
910 return retval;
911
912 /* move r0 from buffer to register cache */
913 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
914 armv4_5->core_cache->reg_list[0].dirty = 1;
915 armv4_5->core_cache->reg_list[0].valid = 1;
916 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
917
918 /* move pc from buffer to register cache */
919 buf_set_u32(armv4_5->pc->value, 0, 32, buffer[1]);
920 armv4_5->pc->dirty = 1;
921 armv4_5->pc->valid = 1;
922 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
923
924 /* move data from buffer to register cache */
925 for (i = 1; i <= 7; i++)
926 {
927 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
928 armv4_5->core_cache->reg_list[i].dirty = 1;
929 armv4_5->core_cache->reg_list[i].valid = 1;
930 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
931 }
932
933 arm_set_cpsr(armv4_5, buffer[9]);
934 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
935
936 if (!is_arm_mode(armv4_5->core_mode))
937 {
938 target->state = TARGET_UNKNOWN;
939 LOG_ERROR("cpsr contains invalid mode value - communication failure");
940 return ERROR_TARGET_FAILURE;
941 }
942 LOG_DEBUG("target entered debug state in %s mode",
943 arm_mode_name(armv4_5->core_mode));
944
945 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
946 if (armv4_5->spsr) {
947 xscale_receive(target, buffer, 8);
948 buf_set_u32(armv4_5->spsr->value, 0, 32, buffer[7]);
949 armv4_5->spsr->dirty = false;
950 armv4_5->spsr->valid = true;
951 }
952 else
953 {
954 /* r8 to r14, but no spsr */
955 xscale_receive(target, buffer, 7);
956 }
957
958 /* move data from buffer to right banked register in cache */
959 for (i = 8; i <= 14; i++)
960 {
961 struct reg *r = arm_reg_current(armv4_5, i);
962
963 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
964 r->dirty = false;
965 r->valid = true;
966 }
967
968 /* examine debug reason */
969 xscale_read_dcsr(target);
970 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
971
972 /* stored PC (for calculating fixup) */
973 pc = buf_get_u32(armv4_5->pc->value, 0, 32);
974
975 switch (moe)
976 {
977 case 0x0: /* Processor reset */
978 target->debug_reason = DBG_REASON_DBGRQ;
979 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
980 pc -= 4;
981 break;
982 case 0x1: /* Instruction breakpoint hit */
983 target->debug_reason = DBG_REASON_BREAKPOINT;
984 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
985 pc -= 4;
986 break;
987 case 0x2: /* Data breakpoint hit */
988 target->debug_reason = DBG_REASON_WATCHPOINT;
989 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
990 pc -= 4;
991 break;
992 case 0x3: /* BKPT instruction executed */
993 target->debug_reason = DBG_REASON_BREAKPOINT;
994 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
995 pc -= 4;
996 break;
997 case 0x4: /* Ext. debug event */
998 target->debug_reason = DBG_REASON_DBGRQ;
999 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1000 pc -= 4;
1001 break;
1002 case 0x5: /* Vector trap occured */
1003 target->debug_reason = DBG_REASON_BREAKPOINT;
1004 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1005 pc -= 4;
1006 break;
1007 case 0x6: /* Trace buffer full break */
1008 target->debug_reason = DBG_REASON_DBGRQ;
1009 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1010 pc -= 4;
1011 break;
1012 case 0x7: /* Reserved (may flag Hot-Debug support) */
1013 default:
1014 LOG_ERROR("Method of Entry is 'Reserved'");
1015 exit(-1);
1016 break;
1017 }
1018
1019 /* apply PC fixup */
1020 buf_set_u32(armv4_5->pc->value, 0, 32, pc);
1021
1022 /* on the first debug entry, identify cache type */
1023 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1024 {
1025 uint32_t cache_type_reg;
1026
1027 /* read cp15 cache type register */
1028 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1029 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1030
1031 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1032 }
1033
1034 /* examine MMU and Cache settings */
1035 /* read cp15 control register */
1036 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1037 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1038 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1039 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1040 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1041
1042 /* tracing enabled, read collected trace data */
1043 if (xscale->trace.buffer_enabled)
1044 {
1045 xscale_read_trace(target);
1046 xscale->trace.buffer_fill--;
1047
1048 /* resume if we're still collecting trace data */
1049 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1050 && (xscale->trace.buffer_fill > 0))
1051 {
1052 xscale_resume(target, 1, 0x0, 1, 0);
1053 }
1054 else
1055 {
1056 xscale->trace.buffer_enabled = 0;
1057 }
1058 }
1059
1060 return ERROR_OK;
1061 }
1062
1063 static int xscale_halt(struct target *target)
1064 {
1065 struct xscale_common *xscale = target_to_xscale(target);
1066
1067 LOG_DEBUG("target->state: %s",
1068 target_state_name(target));
1069
1070 if (target->state == TARGET_HALTED)
1071 {
1072 LOG_DEBUG("target was already halted");
1073 return ERROR_OK;
1074 }
1075 else if (target->state == TARGET_UNKNOWN)
1076 {
1077 /* this must not happen for a xscale target */
1078 LOG_ERROR("target was in unknown state when halt was requested");
1079 return ERROR_TARGET_INVALID;
1080 }
1081 else if (target->state == TARGET_RESET)
1082 {
1083 LOG_DEBUG("target->state == TARGET_RESET");
1084 }
1085 else
1086 {
1087 /* assert external dbg break */
1088 xscale->external_debug_break = 1;
1089 xscale_read_dcsr(target);
1090
1091 target->debug_reason = DBG_REASON_DBGRQ;
1092 }
1093
1094 return ERROR_OK;
1095 }
1096
1097 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1098 {
1099 struct xscale_common *xscale = target_to_xscale(target);
1100 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1101 int retval;
1102
1103 if (xscale->ibcr0_used)
1104 {
1105 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1106
1107 if (ibcr0_bp)
1108 {
1109 xscale_unset_breakpoint(target, ibcr0_bp);
1110 }
1111 else
1112 {
1113 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1114 exit(-1);
1115 }
1116 }
1117
1118 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1119 return retval;
1120
1121 return ERROR_OK;
1122 }
1123
1124 static int xscale_disable_single_step(struct target *target)
1125 {
1126 struct xscale_common *xscale = target_to_xscale(target);
1127 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1128 int retval;
1129
1130 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1131 return retval;
1132
1133 return ERROR_OK;
1134 }
1135
1136 static void xscale_enable_watchpoints(struct target *target)
1137 {
1138 struct watchpoint *watchpoint = target->watchpoints;
1139
1140 while (watchpoint)
1141 {
1142 if (watchpoint->set == 0)
1143 xscale_set_watchpoint(target, watchpoint);
1144 watchpoint = watchpoint->next;
1145 }
1146 }
1147
1148 static void xscale_enable_breakpoints(struct target *target)
1149 {
1150 struct breakpoint *breakpoint = target->breakpoints;
1151
1152 /* set any pending breakpoints */
1153 while (breakpoint)
1154 {
1155 if (breakpoint->set == 0)
1156 xscale_set_breakpoint(target, breakpoint);
1157 breakpoint = breakpoint->next;
1158 }
1159 }
1160
1161 static int xscale_resume(struct target *target, int current,
1162 uint32_t address, int handle_breakpoints, int debug_execution)
1163 {
1164 struct xscale_common *xscale = target_to_xscale(target);
1165 struct arm *armv4_5 = &xscale->armv4_5_common;
1166 struct breakpoint *breakpoint = target->breakpoints;
1167 uint32_t current_pc;
1168 int retval;
1169 int i;
1170
1171 LOG_DEBUG("-");
1172
1173 if (target->state != TARGET_HALTED)
1174 {
1175 LOG_WARNING("target not halted");
1176 return ERROR_TARGET_NOT_HALTED;
1177 }
1178
1179 if (!debug_execution)
1180 {
1181 target_free_all_working_areas(target);
1182 }
1183
1184 /* update vector tables */
1185 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1186 return retval;
1187
1188 /* current = 1: continue on current pc, otherwise continue at <address> */
1189 if (!current)
1190 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1191
1192 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1193
1194 /* if we're at the reset vector, we have to simulate the branch */
1195 if (current_pc == 0x0)
1196 {
1197 arm_simulate_step(target, NULL);
1198 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1199 }
1200
1201 /* the front-end may request us not to handle breakpoints */
1202 if (handle_breakpoints)
1203 {
1204 breakpoint = breakpoint_find(target,
1205 buf_get_u32(armv4_5->pc->value, 0, 32));
1206 if (breakpoint != NULL)
1207 {
1208 uint32_t next_pc;
1209 int saved_trace_buffer_enabled;
1210
1211 /* there's a breakpoint at the current PC, we have to step over it */
1212 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1213 xscale_unset_breakpoint(target, breakpoint);
1214
1215 /* calculate PC of next instruction */
1216 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1217 {
1218 uint32_t current_opcode;
1219 target_read_u32(target, current_pc, &current_opcode);
1220 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1221 }
1222
1223 LOG_DEBUG("enable single-step");
1224 xscale_enable_single_step(target, next_pc);
1225
1226 /* restore banked registers */
1227 retval = xscale_restore_banked(target);
1228
1229 /* send resume request */
1230 xscale_send_u32(target, 0x30);
1231
1232 /* send CPSR */
1233 xscale_send_u32(target,
1234 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1235 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1236 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1237
1238 for (i = 7; i >= 0; i--)
1239 {
1240 /* send register */
1241 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1242 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1243 }
1244
1245 /* send PC */
1246 xscale_send_u32(target,
1247 buf_get_u32(armv4_5->pc->value, 0, 32));
1248 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
1249 buf_get_u32(armv4_5->pc->value, 0, 32));
1250
1251 /* disable trace data collection in xscale_debug_entry() */
1252 saved_trace_buffer_enabled = xscale->trace.buffer_enabled;
1253 xscale->trace.buffer_enabled = 0;
1254
1255 /* wait for and process debug entry */
1256 xscale_debug_entry(target);
1257
1258 /* re-enable trace buffer, if enabled previously */
1259 xscale->trace.buffer_enabled = saved_trace_buffer_enabled;
1260
1261 LOG_DEBUG("disable single-step");
1262 xscale_disable_single_step(target);
1263
1264 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1265 xscale_set_breakpoint(target, breakpoint);
1266 }
1267 }
1268
1269 /* enable any pending breakpoints and watchpoints */
1270 xscale_enable_breakpoints(target);
1271 xscale_enable_watchpoints(target);
1272
1273 /* restore banked registers */
1274 retval = xscale_restore_banked(target);
1275
1276 /* send resume request (command 0x30 or 0x31)
1277 * clean the trace buffer if it is to be enabled (0x62) */
1278 if (xscale->trace.buffer_enabled)
1279 {
1280 xscale_send_u32(target, 0x62);
1281 xscale_send_u32(target, 0x31);
1282 }
1283 else
1284 xscale_send_u32(target, 0x30);
1285
1286 /* send CPSR */
1287 xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
1288 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1289 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1290
1291 for (i = 7; i >= 0; i--)
1292 {
1293 /* send register */
1294 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1295 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1296 }
1297
1298 /* send PC */
1299 xscale_send_u32(target, buf_get_u32(armv4_5->pc->value, 0, 32));
1300 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1301 buf_get_u32(armv4_5->pc->value, 0, 32));
1302
1303 target->debug_reason = DBG_REASON_NOTHALTED;
1304
1305 if (!debug_execution)
1306 {
1307 /* registers are now invalid */
1308 register_cache_invalidate(armv4_5->core_cache);
1309 target->state = TARGET_RUNNING;
1310 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1311 }
1312 else
1313 {
1314 target->state = TARGET_DEBUG_RUNNING;
1315 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1316 }
1317
1318 LOG_DEBUG("target resumed");
1319
1320 return ERROR_OK;
1321 }
1322
1323 static int xscale_step_inner(struct target *target, int current,
1324 uint32_t address, int handle_breakpoints)
1325 {
1326 struct xscale_common *xscale = target_to_xscale(target);
1327 struct arm *armv4_5 = &xscale->armv4_5_common;
1328 uint32_t next_pc;
1329 int retval;
1330 int i;
1331
1332 target->debug_reason = DBG_REASON_SINGLESTEP;
1333
1334 /* calculate PC of next instruction */
1335 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1336 {
1337 uint32_t current_opcode, current_pc;
1338 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1339
1340 target_read_u32(target, current_pc, &current_opcode);
1341 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1342 return retval;
1343 }
1344
1345 LOG_DEBUG("enable single-step");
1346 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1347 return retval;
1348
1349 /* restore banked registers */
1350 if ((retval = xscale_restore_banked(target)) != ERROR_OK)
1351 return retval;
1352
1353 /* send resume request (command 0x30 or 0x31)
1354 * clean the trace buffer if it is to be enabled (0x62) */
1355 if (xscale->trace.buffer_enabled)
1356 {
1357 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1358 return retval;
1359 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1360 return retval;
1361 }
1362 else
1363 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1364 return retval;
1365
1366 /* send CPSR */
1367 retval = xscale_send_u32(target,
1368 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1369 if (retval != ERROR_OK)
1370 return retval;
1371 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1372 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1373
1374 for (i = 7; i >= 0; i--)
1375 {
1376 /* send register */
1377 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1378 return retval;
1379 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1380 }
1381
1382 /* send PC */
1383 retval = xscale_send_u32(target,
1384 buf_get_u32(armv4_5->pc->value, 0, 32));
1385 if (retval != ERROR_OK)
1386 return retval;
1387 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1388 buf_get_u32(armv4_5->pc->value, 0, 32));
1389
1390 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1391
1392 /* registers are now invalid */
1393 register_cache_invalidate(armv4_5->core_cache);
1394
1395 /* wait for and process debug entry */
1396 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1397 return retval;
1398
1399 LOG_DEBUG("disable single-step");
1400 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1401 return retval;
1402
1403 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1404
1405 return ERROR_OK;
1406 }
1407
1408 static int xscale_step(struct target *target, int current,
1409 uint32_t address, int handle_breakpoints)
1410 {
1411 struct arm *armv4_5 = target_to_arm(target);
1412 struct breakpoint *breakpoint = NULL;
1413
1414 uint32_t current_pc;
1415 int retval;
1416
1417 if (target->state != TARGET_HALTED)
1418 {
1419 LOG_WARNING("target not halted");
1420 return ERROR_TARGET_NOT_HALTED;
1421 }
1422
1423 /* current = 1: continue on current pc, otherwise continue at <address> */
1424 if (!current)
1425 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1426
1427 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1428
1429 /* if we're at the reset vector, we have to simulate the step */
1430 if (current_pc == 0x0)
1431 {
1432 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1433 return retval;
1434 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1435
1436 target->debug_reason = DBG_REASON_SINGLESTEP;
1437 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1438
1439 return ERROR_OK;
1440 }
1441
1442 /* the front-end may request us not to handle breakpoints */
1443 if (handle_breakpoints)
1444 breakpoint = breakpoint_find(target,
1445 buf_get_u32(armv4_5->pc->value, 0, 32));
1446 if (breakpoint != NULL) {
1447 retval = xscale_unset_breakpoint(target, breakpoint);
1448 if (retval != ERROR_OK)
1449 return retval;
1450 }
1451
1452 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1453
1454 if (breakpoint)
1455 {
1456 xscale_set_breakpoint(target, breakpoint);
1457 }
1458
1459 LOG_DEBUG("target stepped");
1460
1461 return ERROR_OK;
1462
1463 }
1464
1465 static int xscale_assert_reset(struct target *target)
1466 {
1467 struct xscale_common *xscale = target_to_xscale(target);
1468
1469 LOG_DEBUG("target->state: %s",
1470 target_state_name(target));
1471
1472 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1473 * end up in T-L-R, which would reset JTAG
1474 */
1475 xscale_jtag_set_instr(target->tap,
1476 XSCALE_SELDCSR << xscale->xscale_variant,
1477 TAP_IDLE);
1478
1479 /* set Hold reset, Halt mode and Trap Reset */
1480 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1481 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1482 xscale_write_dcsr(target, 1, 0);
1483
1484 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1485 xscale_jtag_set_instr(target->tap, ~0, TAP_IDLE);
1486 jtag_execute_queue();
1487
1488 /* assert reset */
1489 jtag_add_reset(0, 1);
1490
1491 /* sleep 1ms, to be sure we fulfill any requirements */
1492 jtag_add_sleep(1000);
1493 jtag_execute_queue();
1494
1495 target->state = TARGET_RESET;
1496
1497 if (target->reset_halt)
1498 {
1499 int retval;
1500 if ((retval = target_halt(target)) != ERROR_OK)
1501 return retval;
1502 }
1503
1504 return ERROR_OK;
1505 }
1506
1507 static int xscale_deassert_reset(struct target *target)
1508 {
1509 struct xscale_common *xscale = target_to_xscale(target);
1510 struct breakpoint *breakpoint = target->breakpoints;
1511
1512 LOG_DEBUG("-");
1513
1514 xscale->ibcr_available = 2;
1515 xscale->ibcr0_used = 0;
1516 xscale->ibcr1_used = 0;
1517
1518 xscale->dbr_available = 2;
1519 xscale->dbr0_used = 0;
1520 xscale->dbr1_used = 0;
1521
1522 /* mark all hardware breakpoints as unset */
1523 while (breakpoint)
1524 {
1525 if (breakpoint->type == BKPT_HARD)
1526 {
1527 breakpoint->set = 0;
1528 }
1529 breakpoint = breakpoint->next;
1530 }
1531
1532 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1533
1534 /* FIXME mark hardware watchpoints got unset too. Also,
1535 * at least some of the XScale registers are invalid...
1536 */
1537
1538 /*
1539 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1540 * contents got invalidated. Safer to force that, so writing new
1541 * contents can't ever fail..
1542 */
1543 {
1544 uint32_t address;
1545 unsigned buf_cnt;
1546 const uint8_t *buffer = xscale_debug_handler;
1547 int retval;
1548
1549 /* release SRST */
1550 jtag_add_reset(0, 0);
1551
1552 /* wait 300ms; 150 and 100ms were not enough */
1553 jtag_add_sleep(300*1000);
1554
1555 jtag_add_runtest(2030, TAP_IDLE);
1556 jtag_execute_queue();
1557
1558 /* set Hold reset, Halt mode and Trap Reset */
1559 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1560 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1561 xscale_write_dcsr(target, 1, 0);
1562
1563 /* Load the debug handler into the mini-icache. Since
1564 * it's using halt mode (not monitor mode), it runs in
1565 * "Special Debug State" for access to registers, memory,
1566 * coprocessors, trace data, etc.
1567 */
1568 address = xscale->handler_address;
1569 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1570 binary_size > 0;
1571 binary_size -= buf_cnt, buffer += buf_cnt)
1572 {
1573 uint32_t cache_line[8];
1574 unsigned i;
1575
1576 buf_cnt = binary_size;
1577 if (buf_cnt > 32)
1578 buf_cnt = 32;
1579
1580 for (i = 0; i < buf_cnt; i += 4)
1581 {
1582 /* convert LE buffer to host-endian uint32_t */
1583 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1584 }
1585
1586 for (; i < 32; i += 4)
1587 {
1588 cache_line[i / 4] = 0xe1a08008;
1589 }
1590
1591 /* only load addresses other than the reset vectors */
1592 if ((address % 0x400) != 0x0)
1593 {
1594 retval = xscale_load_ic(target, address,
1595 cache_line);
1596 if (retval != ERROR_OK)
1597 return retval;
1598 }
1599
1600 address += buf_cnt;
1601 };
1602
1603 retval = xscale_load_ic(target, 0x0,
1604 xscale->low_vectors);
1605 if (retval != ERROR_OK)
1606 return retval;
1607 retval = xscale_load_ic(target, 0xffff0000,
1608 xscale->high_vectors);
1609 if (retval != ERROR_OK)
1610 return retval;
1611
1612 jtag_add_runtest(30, TAP_IDLE);
1613
1614 jtag_add_sleep(100000);
1615
1616 /* set Hold reset, Halt mode and Trap Reset */
1617 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1618 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1619 xscale_write_dcsr(target, 1, 0);
1620
1621 /* clear Hold reset to let the target run (should enter debug handler) */
1622 xscale_write_dcsr(target, 0, 1);
1623 target->state = TARGET_RUNNING;
1624
1625 if (!target->reset_halt)
1626 {
1627 jtag_add_sleep(10000);
1628
1629 /* we should have entered debug now */
1630 xscale_debug_entry(target);
1631 target->state = TARGET_HALTED;
1632
1633 /* resume the target */
1634 xscale_resume(target, 1, 0x0, 1, 0);
1635 }
1636 }
1637
1638 return ERROR_OK;
1639 }
1640
1641 static int xscale_read_core_reg(struct target *target, struct reg *r,
1642 int num, enum arm_mode mode)
1643 {
1644 /** \todo add debug handler support for core register reads */
1645 LOG_ERROR("not implemented");
1646 return ERROR_OK;
1647 }
1648
1649 static int xscale_write_core_reg(struct target *target, struct reg *r,
1650 int num, enum arm_mode mode, uint32_t value)
1651 {
1652 /** \todo add debug handler support for core register writes */
1653 LOG_ERROR("not implemented");
1654 return ERROR_OK;
1655 }
1656
1657 static int xscale_full_context(struct target *target)
1658 {
1659 struct arm *armv4_5 = target_to_arm(target);
1660
1661 uint32_t *buffer;
1662
1663 int i, j;
1664
1665 LOG_DEBUG("-");
1666
1667 if (target->state != TARGET_HALTED)
1668 {
1669 LOG_WARNING("target not halted");
1670 return ERROR_TARGET_NOT_HALTED;
1671 }
1672
1673 buffer = malloc(4 * 8);
1674
1675 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1676 * we can't enter User mode on an XScale (unpredictable),
1677 * but User shares registers with SYS
1678 */
1679 for (i = 1; i < 7; i++)
1680 {
1681 enum arm_mode mode = armv4_5_number_to_mode(i);
1682 bool valid = true;
1683 struct reg *r;
1684
1685 if (mode == ARM_MODE_USR)
1686 continue;
1687
1688 /* check if there are invalid registers in the current mode
1689 */
1690 for (j = 0; valid && j <= 16; j++)
1691 {
1692 if (!ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1693 mode, j).valid)
1694 valid = false;
1695 }
1696 if (valid)
1697 continue;
1698
1699 /* request banked registers */
1700 xscale_send_u32(target, 0x0);
1701
1702 /* send CPSR for desired bank mode */
1703 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1704
1705 /* get banked registers: r8 to r14; and SPSR
1706 * except in USR/SYS mode
1707 */
1708 if (mode != ARM_MODE_SYS) {
1709 /* SPSR */
1710 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1711 mode, 16);
1712
1713 xscale_receive(target, buffer, 8);
1714
1715 buf_set_u32(r->value, 0, 32, buffer[7]);
1716 r->dirty = false;
1717 r->valid = true;
1718 } else {
1719 xscale_receive(target, buffer, 7);
1720 }
1721
1722 /* move data from buffer to register cache */
1723 for (j = 8; j <= 14; j++)
1724 {
1725 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1726 mode, j);
1727
1728 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1729 r->dirty = false;
1730 r->valid = true;
1731 }
1732 }
1733
1734 free(buffer);
1735
1736 return ERROR_OK;
1737 }
1738
1739 static int xscale_restore_banked(struct target *target)
1740 {
1741 struct arm *armv4_5 = target_to_arm(target);
1742
1743 int i, j;
1744
1745 if (target->state != TARGET_HALTED)
1746 {
1747 LOG_WARNING("target not halted");
1748 return ERROR_TARGET_NOT_HALTED;
1749 }
1750
1751 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1752 * and check if any banked registers need to be written. Ignore
1753 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1754 * an XScale (unpredictable), but they share all registers.
1755 */
1756 for (i = 1; i < 7; i++)
1757 {
1758 enum arm_mode mode = armv4_5_number_to_mode(i);
1759 struct reg *r;
1760
1761 if (mode == ARM_MODE_USR)
1762 continue;
1763
1764 /* check if there are dirty registers in this mode */
1765 for (j = 8; j <= 14; j++)
1766 {
1767 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1768 mode, j).dirty)
1769 goto dirty;
1770 }
1771
1772 /* if not USR/SYS, check if the SPSR needs to be written */
1773 if (mode != ARM_MODE_SYS)
1774 {
1775 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1776 mode, 16).dirty)
1777 goto dirty;
1778 }
1779
1780 /* there's nothing to flush for this mode */
1781 continue;
1782
1783 dirty:
1784 /* command 0x1: "send banked registers" */
1785 xscale_send_u32(target, 0x1);
1786
1787 /* send CPSR for desired mode */
1788 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1789
1790 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1791 * but this protocol doesn't understand that nuance.
1792 */
1793 for (j = 8; j <= 14; j++) {
1794 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1795 mode, j);
1796 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1797 r->dirty = false;
1798 }
1799
1800 /* send spsr if not in USR/SYS mode */
1801 if (mode != ARM_MODE_SYS) {
1802 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1803 mode, 16);
1804 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1805 r->dirty = false;
1806 }
1807 }
1808
1809 return ERROR_OK;
1810 }
1811
1812 static int xscale_read_memory(struct target *target, uint32_t address,
1813 uint32_t size, uint32_t count, uint8_t *buffer)
1814 {
1815 struct xscale_common *xscale = target_to_xscale(target);
1816 uint32_t *buf32;
1817 uint32_t i;
1818 int retval;
1819
1820 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1821
1822 if (target->state != TARGET_HALTED)
1823 {
1824 LOG_WARNING("target not halted");
1825 return ERROR_TARGET_NOT_HALTED;
1826 }
1827
1828 /* sanitize arguments */
1829 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1830 return ERROR_INVALID_ARGUMENTS;
1831
1832 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1833 return ERROR_TARGET_UNALIGNED_ACCESS;
1834
1835 /* send memory read request (command 0x1n, n: access size) */
1836 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1837 return retval;
1838
1839 /* send base address for read request */
1840 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1841 return retval;
1842
1843 /* send number of requested data words */
1844 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1845 return retval;
1846
1847 /* receive data from target (count times 32-bit words in host endianness) */
1848 buf32 = malloc(4 * count);
1849 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1850 return retval;
1851
1852 /* extract data from host-endian buffer into byte stream */
1853 for (i = 0; i < count; i++)
1854 {
1855 switch (size)
1856 {
1857 case 4:
1858 target_buffer_set_u32(target, buffer, buf32[i]);
1859 buffer += 4;
1860 break;
1861 case 2:
1862 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1863 buffer += 2;
1864 break;
1865 case 1:
1866 *buffer++ = buf32[i] & 0xff;
1867 break;
1868 default:
1869 LOG_ERROR("invalid read size");
1870 return ERROR_INVALID_ARGUMENTS;
1871 }
1872 }
1873
1874 free(buf32);
1875
1876 /* examine DCSR, to see if Sticky Abort (SA) got set */
1877 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1878 return retval;
1879 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1880 {
1881 /* clear SA bit */
1882 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1883 return retval;
1884
1885 return ERROR_TARGET_DATA_ABORT;
1886 }
1887
1888 return ERROR_OK;
1889 }
1890
1891 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1892 uint32_t size, uint32_t count, uint8_t *buffer)
1893 {
1894 struct xscale_common *xscale = target_to_xscale(target);
1895
1896 /* with MMU inactive, there are only physical addresses */
1897 if (!xscale->armv4_5_mmu.mmu_enabled)
1898 return xscale_read_memory(target, address, size, count, buffer);
1899
1900 /** \todo: provide a non-stub implementation of this routine. */
1901 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1902 target_name(target), __func__);
1903 return ERROR_FAIL;
1904 }
1905
1906 static int xscale_write_memory(struct target *target, uint32_t address,
1907 uint32_t size, uint32_t count, uint8_t *buffer)
1908 {
1909 struct xscale_common *xscale = target_to_xscale(target);
1910 int retval;
1911
1912 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1913
1914 if (target->state != TARGET_HALTED)
1915 {
1916 LOG_WARNING("target not halted");
1917 return ERROR_TARGET_NOT_HALTED;
1918 }
1919
1920 /* sanitize arguments */
1921 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1922 return ERROR_INVALID_ARGUMENTS;
1923
1924 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1925 return ERROR_TARGET_UNALIGNED_ACCESS;
1926
1927 /* send memory write request (command 0x2n, n: access size) */
1928 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1929 return retval;
1930
1931 /* send base address for read request */
1932 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1933 return retval;
1934
1935 /* send number of requested data words to be written*/
1936 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1937 return retval;
1938
1939 /* extract data from host-endian buffer into byte stream */
1940 #if 0
1941 for (i = 0; i < count; i++)
1942 {
1943 switch (size)
1944 {
1945 case 4:
1946 value = target_buffer_get_u32(target, buffer);
1947 xscale_send_u32(target, value);
1948 buffer += 4;
1949 break;
1950 case 2:
1951 value = target_buffer_get_u16(target, buffer);
1952 xscale_send_u32(target, value);
1953 buffer += 2;
1954 break;
1955 case 1:
1956 value = *buffer;
1957 xscale_send_u32(target, value);
1958 buffer += 1;
1959 break;
1960 default:
1961 LOG_ERROR("should never get here");
1962 exit(-1);
1963 }
1964 }
1965 #endif
1966 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1967 return retval;
1968
1969 /* examine DCSR, to see if Sticky Abort (SA) got set */
1970 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1971 return retval;
1972 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1973 {
1974 /* clear SA bit */
1975 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1976 return retval;
1977
1978 return ERROR_TARGET_DATA_ABORT;
1979 }
1980
1981 return ERROR_OK;
1982 }
1983
1984 static int xscale_write_phys_memory(struct target *target, uint32_t address,
1985 uint32_t size, uint32_t count, uint8_t *buffer)
1986 {
1987 struct xscale_common *xscale = target_to_xscale(target);
1988
1989 /* with MMU inactive, there are only physical addresses */
1990 if (!xscale->armv4_5_mmu.mmu_enabled)
1991 return xscale_read_memory(target, address, size, count, buffer);
1992
1993 /** \todo: provide a non-stub implementation of this routine. */
1994 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1995 target_name(target), __func__);
1996 return ERROR_FAIL;
1997 }
1998
1999 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
2000 uint32_t count, uint8_t *buffer)
2001 {
2002 return xscale_write_memory(target, address, 4, count, buffer);
2003 }
2004
2005 static int xscale_get_ttb(struct target *target, uint32_t *result)
2006 {
2007 struct xscale_common *xscale = target_to_xscale(target);
2008 uint32_t ttb;
2009 int retval;
2010
2011 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2012 if (retval != ERROR_OK)
2013 return retval;
2014 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2015
2016 *result = ttb;
2017
2018 return ERROR_OK;
2019 }
2020
2021 static void xscale_disable_mmu_caches(struct target *target, int mmu,
2022 int d_u_cache, int i_cache)
2023 {
2024 struct xscale_common *xscale = target_to_xscale(target);
2025 uint32_t cp15_control;
2026
2027 /* read cp15 control register */
2028 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2029 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2030
2031 if (mmu)
2032 cp15_control &= ~0x1U;
2033
2034 if (d_u_cache)
2035 {
2036 /* clean DCache */
2037 xscale_send_u32(target, 0x50);
2038 xscale_send_u32(target, xscale->cache_clean_address);
2039
2040 /* invalidate DCache */
2041 xscale_send_u32(target, 0x51);
2042
2043 cp15_control &= ~0x4U;
2044 }
2045
2046 if (i_cache)
2047 {
2048 /* invalidate ICache */
2049 xscale_send_u32(target, 0x52);
2050 cp15_control &= ~0x1000U;
2051 }
2052
2053 /* write new cp15 control register */
2054 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2055
2056 /* execute cpwait to ensure outstanding operations complete */
2057 xscale_send_u32(target, 0x53);
2058 }
2059
2060 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2061 int d_u_cache, int i_cache)
2062 {
2063 struct xscale_common *xscale = target_to_xscale(target);
2064 uint32_t cp15_control;
2065
2066 /* read cp15 control register */
2067 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2068 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2069
2070 if (mmu)
2071 cp15_control |= 0x1U;
2072
2073 if (d_u_cache)
2074 cp15_control |= 0x4U;
2075
2076 if (i_cache)
2077 cp15_control |= 0x1000U;
2078
2079 /* write new cp15 control register */
2080 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2081
2082 /* execute cpwait to ensure outstanding operations complete */
2083 xscale_send_u32(target, 0x53);
2084 }
2085
2086 static int xscale_set_breakpoint(struct target *target,
2087 struct breakpoint *breakpoint)
2088 {
2089 int retval;
2090 struct xscale_common *xscale = target_to_xscale(target);
2091
2092 if (target->state != TARGET_HALTED)
2093 {
2094 LOG_WARNING("target not halted");
2095 return ERROR_TARGET_NOT_HALTED;
2096 }
2097
2098 if (breakpoint->set)
2099 {
2100 LOG_WARNING("breakpoint already set");
2101 return ERROR_OK;
2102 }
2103
2104 if (breakpoint->type == BKPT_HARD)
2105 {
2106 uint32_t value = breakpoint->address | 1;
2107 if (!xscale->ibcr0_used)
2108 {
2109 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2110 xscale->ibcr0_used = 1;
2111 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2112 }
2113 else if (!xscale->ibcr1_used)
2114 {
2115 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2116 xscale->ibcr1_used = 1;
2117 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2118 }
2119 else
2120 {
2121 LOG_ERROR("BUG: no hardware comparator available");
2122 return ERROR_OK;
2123 }
2124 }
2125 else if (breakpoint->type == BKPT_SOFT)
2126 {
2127 if (breakpoint->length == 4)
2128 {
2129 /* keep the original instruction in target endianness */
2130 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2131 {
2132 return retval;
2133 }
2134 /* write the bkpt instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2135 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2136 {
2137 return retval;
2138 }
2139 }
2140 else
2141 {
2142 /* keep the original instruction in target endianness */
2143 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2144 {
2145 return retval;
2146 }
2147 /* write the bkpt instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2148 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2149 {
2150 return retval;
2151 }
2152 }
2153 breakpoint->set = 1;
2154
2155 xscale_send_u32(target, 0x50); /* clean dcache */
2156 xscale_send_u32(target, xscale->cache_clean_address);
2157 xscale_send_u32(target, 0x51); /* invalidate dcache */
2158 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2159 }
2160
2161 return ERROR_OK;
2162 }
2163
2164 static int xscale_add_breakpoint(struct target *target,
2165 struct breakpoint *breakpoint)
2166 {
2167 struct xscale_common *xscale = target_to_xscale(target);
2168
2169 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2170 {
2171 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2172 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2173 }
2174
2175 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2176 {
2177 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2178 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2179 }
2180
2181 if (breakpoint->type == BKPT_HARD)
2182 {
2183 xscale->ibcr_available--;
2184 }
2185
2186 return ERROR_OK;
2187 }
2188
2189 static int xscale_unset_breakpoint(struct target *target,
2190 struct breakpoint *breakpoint)
2191 {
2192 int retval;
2193 struct xscale_common *xscale = target_to_xscale(target);
2194
2195 if (target->state != TARGET_HALTED)
2196 {
2197 LOG_WARNING("target not halted");
2198 return ERROR_TARGET_NOT_HALTED;
2199 }
2200
2201 if (!breakpoint->set)
2202 {
2203 LOG_WARNING("breakpoint not set");
2204 return ERROR_OK;
2205 }
2206
2207 if (breakpoint->type == BKPT_HARD)
2208 {
2209 if (breakpoint->set == 1)
2210 {
2211 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2212 xscale->ibcr0_used = 0;
2213 }
2214 else if (breakpoint->set == 2)
2215 {
2216 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2217 xscale->ibcr1_used = 0;
2218 }
2219 breakpoint->set = 0;
2220 }
2221 else
2222 {
2223 /* restore original instruction (kept in target endianness) */
2224 if (breakpoint->length == 4)
2225 {
2226 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2227 {
2228 return retval;
2229 }
2230 }
2231 else
2232 {
2233 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2234 {
2235 return retval;
2236 }
2237 }
2238 breakpoint->set = 0;
2239
2240 xscale_send_u32(target, 0x50); /* clean dcache */
2241 xscale_send_u32(target, xscale->cache_clean_address);
2242 xscale_send_u32(target, 0x51); /* invalidate dcache */
2243 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2244 }
2245
2246 return ERROR_OK;
2247 }
2248
2249 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2250 {
2251 struct xscale_common *xscale = target_to_xscale(target);
2252
2253 if (target->state != TARGET_HALTED)
2254 {
2255 LOG_WARNING("target not halted");
2256 return ERROR_TARGET_NOT_HALTED;
2257 }
2258
2259 if (breakpoint->set)
2260 {
2261 xscale_unset_breakpoint(target, breakpoint);
2262 }
2263
2264 if (breakpoint->type == BKPT_HARD)
2265 xscale->ibcr_available++;
2266
2267 return ERROR_OK;
2268 }
2269
2270 static int xscale_set_watchpoint(struct target *target,
2271 struct watchpoint *watchpoint)
2272 {
2273 struct xscale_common *xscale = target_to_xscale(target);
2274 uint32_t enable = 0;
2275 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2276 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2277
2278 if (target->state != TARGET_HALTED)
2279 {
2280 LOG_WARNING("target not halted");
2281 return ERROR_TARGET_NOT_HALTED;
2282 }
2283
2284 switch (watchpoint->rw)
2285 {
2286 case WPT_READ:
2287 enable = 0x3;
2288 break;
2289 case WPT_ACCESS:
2290 enable = 0x2;
2291 break;
2292 case WPT_WRITE:
2293 enable = 0x1;
2294 break;
2295 default:
2296 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2297 }
2298
2299 /* For watchpoint across more than one word, both DBR registers must
2300 be enlisted, with the second used as a mask. */
2301 if (watchpoint->length > 4)
2302 {
2303 if (xscale->dbr0_used || xscale->dbr1_used)
2304 {
2305 LOG_ERROR("BUG: sufficient hardware comparators unavailable");
2306 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2307 }
2308
2309 /* Write mask value to DBR1, based on the length argument.
2310 * Address bits ignored by the comparator are those set in mask. */
2311 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1],
2312 watchpoint->length - 1);
2313 xscale->dbr1_used = 1;
2314 enable |= 0x100; /* DBCON[M] */
2315 }
2316
2317 if (!xscale->dbr0_used)
2318 {
2319 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2320 dbcon_value |= enable;
2321 xscale_set_reg_u32(dbcon, dbcon_value);
2322 watchpoint->set = 1;
2323 xscale->dbr0_used = 1;
2324 }
2325 else if (!xscale->dbr1_used)
2326 {
2327 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2328 dbcon_value |= enable << 2;
2329 xscale_set_reg_u32(dbcon, dbcon_value);
2330 watchpoint->set = 2;
2331 xscale->dbr1_used = 1;
2332 }
2333 else
2334 {
2335 LOG_ERROR("BUG: no hardware comparator available");
2336 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2337 }
2338
2339 return ERROR_OK;
2340 }
2341
2342 static int xscale_add_watchpoint(struct target *target,
2343 struct watchpoint *watchpoint)
2344 {
2345 struct xscale_common *xscale = target_to_xscale(target);
2346
2347 if (xscale->dbr_available < 1)
2348 {
2349 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2350 }
2351
2352 if (watchpoint->value)
2353 LOG_WARNING("xscale does not support value, mask arguments; ignoring");
2354
2355 /* check that length is a power of two */
2356 for (uint32_t len = watchpoint->length; len != 1; len /= 2)
2357 {
2358 if (len % 2)
2359 {
2360 LOG_ERROR("xscale requires that watchpoint length is a power of two");
2361 return ERROR_COMMAND_ARGUMENT_INVALID;
2362 }
2363 }
2364
2365 if (watchpoint->length == 4) /* single word watchpoint */
2366 {
2367 xscale->dbr_available--; /* one DBR reg used */
2368 return ERROR_OK;
2369 }
2370
2371 /* watchpoints across multiple words require both DBR registers */
2372 if (xscale->dbr_available < 2)
2373 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2374
2375 xscale->dbr_available = 0;
2376 return ERROR_OK;
2377 }
2378
2379 static int xscale_unset_watchpoint(struct target *target,
2380 struct watchpoint *watchpoint)
2381 {
2382 struct xscale_common *xscale = target_to_xscale(target);
2383 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2384 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2385
2386 if (target->state != TARGET_HALTED)
2387 {
2388 LOG_WARNING("target not halted");
2389 return ERROR_TARGET_NOT_HALTED;
2390 }
2391
2392 if (!watchpoint->set)
2393 {
2394 LOG_WARNING("breakpoint not set");
2395 return ERROR_OK;
2396 }
2397
2398 if (watchpoint->set == 1)
2399 {
2400 if (watchpoint->length > 4)
2401 {
2402 dbcon_value &= ~0x103; /* clear DBCON[M] as well */
2403 xscale->dbr1_used = 0; /* DBR1 was used for mask */
2404 }
2405 else
2406 dbcon_value &= ~0x3;
2407
2408 xscale_set_reg_u32(dbcon, dbcon_value);
2409 xscale->dbr0_used = 0;
2410 }
2411 else if (watchpoint->set == 2)
2412 {
2413 dbcon_value &= ~0xc;
2414 xscale_set_reg_u32(dbcon, dbcon_value);
2415 xscale->dbr1_used = 0;
2416 }
2417 watchpoint->set = 0;
2418
2419 return ERROR_OK;
2420 }
2421
2422 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2423 {
2424 struct xscale_common *xscale = target_to_xscale(target);
2425
2426 if (target->state != TARGET_HALTED)
2427 {
2428 LOG_WARNING("target not halted");
2429 return ERROR_TARGET_NOT_HALTED;
2430 }
2431
2432 if (watchpoint->set)
2433 {
2434 xscale_unset_watchpoint(target, watchpoint);
2435 }
2436
2437 if (watchpoint->length > 4)
2438 xscale->dbr_available++; /* both DBR regs now available */
2439
2440 xscale->dbr_available++;
2441
2442 return ERROR_OK;
2443 }
2444
2445 static int xscale_get_reg(struct reg *reg)
2446 {
2447 struct xscale_reg *arch_info = reg->arch_info;
2448 struct target *target = arch_info->target;
2449 struct xscale_common *xscale = target_to_xscale(target);
2450
2451 /* DCSR, TX and RX are accessible via JTAG */
2452 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2453 {
2454 return xscale_read_dcsr(arch_info->target);
2455 }
2456 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2457 {
2458 /* 1 = consume register content */
2459 return xscale_read_tx(arch_info->target, 1);
2460 }
2461 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2462 {
2463 /* can't read from RX register (host -> debug handler) */
2464 return ERROR_OK;
2465 }
2466 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2467 {
2468 /* can't (explicitly) read from TXRXCTRL register */
2469 return ERROR_OK;
2470 }
2471 else /* Other DBG registers have to be transfered by the debug handler */
2472 {
2473 /* send CP read request (command 0x40) */
2474 xscale_send_u32(target, 0x40);
2475
2476 /* send CP register number */
2477 xscale_send_u32(target, arch_info->dbg_handler_number);
2478
2479 /* read register value */
2480 xscale_read_tx(target, 1);
2481 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2482
2483 reg->dirty = 0;
2484 reg->valid = 1;
2485 }
2486
2487 return ERROR_OK;
2488 }
2489
2490 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2491 {
2492 struct xscale_reg *arch_info = reg->arch_info;
2493 struct target *target = arch_info->target;
2494 struct xscale_common *xscale = target_to_xscale(target);
2495 uint32_t value = buf_get_u32(buf, 0, 32);
2496
2497 /* DCSR, TX and RX are accessible via JTAG */
2498 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2499 {
2500 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2501 return xscale_write_dcsr(arch_info->target, -1, -1);
2502 }
2503 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2504 {
2505 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2506 return xscale_write_rx(arch_info->target);
2507 }
2508 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2509 {
2510 /* can't write to TX register (debug-handler -> host) */
2511 return ERROR_OK;
2512 }
2513 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2514 {
2515 /* can't (explicitly) write to TXRXCTRL register */
2516 return ERROR_OK;
2517 }
2518 else /* Other DBG registers have to be transfered by the debug handler */
2519 {
2520 /* send CP write request (command 0x41) */
2521 xscale_send_u32(target, 0x41);
2522
2523 /* send CP register number */
2524 xscale_send_u32(target, arch_info->dbg_handler_number);
2525
2526 /* send CP register value */
2527 xscale_send_u32(target, value);
2528 buf_set_u32(reg->value, 0, 32, value);
2529 }
2530
2531 return ERROR_OK;
2532 }
2533
2534 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2535 {
2536 struct xscale_common *xscale = target_to_xscale(target);
2537 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2538 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2539
2540 /* send CP write request (command 0x41) */
2541 xscale_send_u32(target, 0x41);
2542
2543 /* send CP register number */
2544 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2545
2546 /* send CP register value */
2547 xscale_send_u32(target, value);
2548 buf_set_u32(dcsr->value, 0, 32, value);
2549
2550 return ERROR_OK;
2551 }
2552
2553 static int xscale_read_trace(struct target *target)
2554 {
2555 struct xscale_common *xscale = target_to_xscale(target);
2556 struct arm *armv4_5 = &xscale->armv4_5_common;
2557 struct xscale_trace_data **trace_data_p;
2558
2559 /* 258 words from debug handler
2560 * 256 trace buffer entries
2561 * 2 checkpoint addresses
2562 */
2563 uint32_t trace_buffer[258];
2564 int is_address[256];
2565 int i, j;
2566 unsigned int num_checkpoints = 0;
2567
2568 if (target->state != TARGET_HALTED)
2569 {
2570 LOG_WARNING("target must be stopped to read trace data");
2571 return ERROR_TARGET_NOT_HALTED;
2572 }
2573
2574 /* send read trace buffer command (command 0x61) */
2575 xscale_send_u32(target, 0x61);
2576
2577 /* receive trace buffer content */
2578 xscale_receive(target, trace_buffer, 258);
2579
2580 /* parse buffer backwards to identify address entries */
2581 for (i = 255; i >= 0; i--)
2582 {
2583 /* also count number of checkpointed entries */
2584 if ((trace_buffer[i] & 0xe0) == 0xc0)
2585 num_checkpoints++;
2586
2587 is_address[i] = 0;
2588 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2589 ((trace_buffer[i] & 0xf0) == 0xd0))
2590 {
2591 if (i > 0)
2592 is_address[--i] = 1;
2593 if (i > 0)
2594 is_address[--i] = 1;
2595 if (i > 0)
2596 is_address[--i] = 1;
2597 if (i > 0)
2598 is_address[--i] = 1;
2599 }
2600 }
2601
2602
2603 /* search first non-zero entry that is not part of an address */
2604 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2605 ;
2606
2607 if (j == 256)
2608 {
2609 LOG_DEBUG("no trace data collected");
2610 return ERROR_XSCALE_NO_TRACE_DATA;
2611 }
2612
2613 /* account for possible partial address at buffer start (wrap mode only) */
2614 if (is_address[0])
2615 { /* first entry is address; complete set of 4? */
2616 i = 1;
2617 while (i < 4)
2618 if (!is_address[i++])
2619 break;
2620 if (i < 4)
2621 j += i; /* partial address; can't use it */
2622 }
2623
2624 /* if first valid entry is indirect branch, can't use that either (no address) */
2625 if (((trace_buffer[j] & 0xf0) == 0x90) || ((trace_buffer[j] & 0xf0) == 0xd0))
2626 j++;
2627
2628 /* walk linked list to terminating entry */
2629 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2630 ;
2631
2632 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2633 (*trace_data_p)->next = NULL;
2634 (*trace_data_p)->chkpt0 = trace_buffer[256];
2635 (*trace_data_p)->chkpt1 = trace_buffer[257];
2636 (*trace_data_p)->last_instruction =
2637 buf_get_u32(armv4_5->pc->value, 0, 32);
2638 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2639 (*trace_data_p)->depth = 256 - j;
2640 (*trace_data_p)->num_checkpoints = num_checkpoints;
2641
2642 for (i = j; i < 256; i++)
2643 {
2644 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2645 if (is_address[i])
2646 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2647 else
2648 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2649 }
2650
2651 return ERROR_OK;
2652 }
2653
2654 static int xscale_read_instruction(struct target *target, uint32_t pc,
2655 struct arm_instruction *instruction)
2656 {
2657 struct xscale_common *const xscale = target_to_xscale(target);
2658 int i;
2659 int section = -1;
2660 size_t size_read;
2661 uint32_t opcode;
2662 int retval;
2663
2664 if (!xscale->trace.image)
2665 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2666
2667 /* search for the section the current instruction belongs to */
2668 for (i = 0; i < xscale->trace.image->num_sections; i++)
2669 {
2670 if ((xscale->trace.image->sections[i].base_address <= pc) &&
2671 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > pc))
2672 {
2673 section = i;
2674 break;
2675 }
2676 }
2677
2678 if (section == -1)
2679 {
2680 /* current instruction couldn't be found in the image */
2681 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2682 }
2683
2684 if (xscale->trace.core_state == ARM_STATE_ARM)
2685 {
2686 uint8_t buf[4];
2687 if ((retval = image_read_section(xscale->trace.image, section,
2688 pc - xscale->trace.image->sections[section].base_address,
2689 4, buf, &size_read)) != ERROR_OK)
2690 {
2691 LOG_ERROR("error while reading instruction: %i", retval);
2692 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2693 }
2694 opcode = target_buffer_get_u32(target, buf);
2695 arm_evaluate_opcode(opcode, pc, instruction);
2696 }
2697 else if (xscale->trace.core_state == ARM_STATE_THUMB)
2698 {
2699 uint8_t buf[2];
2700 if ((retval = image_read_section(xscale->trace.image, section,
2701 pc - xscale->trace.image->sections[section].base_address,
2702 2, buf, &size_read)) != ERROR_OK)
2703 {
2704 LOG_ERROR("error while reading instruction: %i", retval);
2705 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2706 }
2707 opcode = target_buffer_get_u16(target, buf);
2708 thumb_evaluate_opcode(opcode, pc, instruction);
2709 }
2710 else
2711 {
2712 LOG_ERROR("BUG: unknown core state encountered");
2713 exit(-1);
2714 }
2715
2716 return ERROR_OK;
2717 }
2718
2719 /* Extract address encoded into trace data.
2720 * Write result to address referenced by argument 'target', or 0 if incomplete. */
2721 static inline void xscale_branch_address(struct xscale_trace_data *trace_data,
2722 int i, uint32_t *target)
2723 {
2724 /* if there are less than four entries prior to the indirect branch message
2725 * we can't extract the address */
2726 if (i < 4)
2727 *target = 0;
2728 else
2729 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2730 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2731 }
2732
2733 static inline void xscale_display_instruction(struct target *target, uint32_t pc,
2734 struct arm_instruction *instruction,
2735 struct command_context *cmd_ctx)
2736 {
2737 int retval = xscale_read_instruction(target, pc, instruction);
2738 if (retval == ERROR_OK)
2739 command_print(cmd_ctx, "%s", instruction->text);
2740 else
2741 command_print(cmd_ctx, "0x%8.8" PRIx32 "\t<not found in image>", pc);
2742 }
2743
2744 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2745 {
2746 struct xscale_common *xscale = target_to_xscale(target);
2747 struct xscale_trace_data *trace_data = xscale->trace.data;
2748 int i, retval;
2749 uint32_t breakpoint_pc;
2750 struct arm_instruction instruction;
2751 uint32_t current_pc = 0; /* initialized when address determined */
2752
2753 if (!xscale->trace.image)
2754 LOG_WARNING("No trace image loaded; use 'xscale trace_image'");
2755
2756 /* loop for each trace buffer that was loaded from target */
2757 while (trace_data)
2758 {
2759 int chkpt = 0; /* incremented as checkpointed entries found */
2760 int j;
2761
2762 /* FIXME: set this to correct mode when trace buffer is first enabled */
2763 xscale->trace.core_state = ARM_STATE_ARM;
2764
2765 /* loop for each entry in this trace buffer */
2766 for (i = 0; i < trace_data->depth; i++)
2767 {
2768 int exception = 0;
2769 uint32_t chkpt_reg = 0x0;
2770 uint32_t branch_target = 0;
2771 int count;
2772
2773 /* trace entry type is upper nybble of 'message byte' */
2774 int trace_msg_type = (trace_data->entries[i].data & 0xf0) >> 4;
2775
2776 /* Target addresses of indirect branches are written into buffer
2777 * before the message byte representing the branch. Skip past it */
2778 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2779 continue;
2780
2781 switch (trace_msg_type)
2782 {
2783 case 0: /* Exceptions */
2784 case 1:
2785 case 2:
2786 case 3:
2787 case 4:
2788 case 5:
2789 case 6:
2790 case 7:
2791 exception = (trace_data->entries[i].data & 0x70) >> 4;
2792
2793 /* FIXME: vector table may be at ffff0000 */
2794 branch_target = (trace_data->entries[i].data & 0xf0) >> 2;
2795 break;
2796
2797 case 8: /* Direct Branch */
2798 break;
2799
2800 case 9: /* Indirect Branch */
2801 xscale_branch_address(trace_data, i, &branch_target);
2802 break;
2803
2804 case 13: /* Checkpointed Indirect Branch */
2805 xscale_branch_address(trace_data, i, &branch_target);
2806 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2807 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is oldest */
2808 else
2809 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and newest */
2810
2811 chkpt++;
2812 break;
2813
2814 case 12: /* Checkpointed Direct Branch */
2815 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2816 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is oldest */
2817 else
2818 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and newest */
2819
2820 /* if no current_pc, checkpoint will be starting point */
2821 if (current_pc == 0)
2822 branch_target = chkpt_reg;
2823
2824 chkpt++;
2825 break;
2826
2827 case 15: /* Roll-over */
2828 break;
2829
2830 default: /* Reserved */
2831 LOG_WARNING("trace is suspect: invalid trace message byte");
2832 continue;
2833
2834 }
2835
2836 /* If we don't have the current_pc yet, but we did get the branch target
2837 * (either from the trace buffer on indirect branch, or from a checkpoint reg),
2838 * then we can start displaying instructions at the next iteration, with
2839 * branch_target as the starting point.
2840 */
2841 if (current_pc == 0)
2842 {
2843 current_pc = branch_target; /* remains 0 unless branch_target obtained */
2844 continue;
2845 }
2846
2847 /* We have current_pc. Read and display the instructions from the image.
2848 * First, display count instructions (lower nybble of message byte). */
2849 count = trace_data->entries[i].data & 0x0f;
2850 for (j = 0; j < count; j++)
2851 {
2852 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2853 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2854 }
2855
2856 /* An additional instruction is implicitly added to count for
2857 * rollover and some exceptions: undef, swi, prefetch abort. */
2858 if ((trace_msg_type == 15) || (exception > 0 && exception < 4))
2859 {
2860 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2861 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2862 }
2863
2864 if (trace_msg_type == 15) /* rollover */
2865 continue;
2866
2867 if (exception)
2868 {
2869 command_print(cmd_ctx, "--- exception %i ---", exception);
2870 continue;
2871 }
2872
2873 /* not exception or rollover; next instruction is a branch and is
2874 * not included in the count */
2875 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2876
2877 /* for direct branches, extract branch destination from instruction */
2878 if ((trace_msg_type == 8) || (trace_msg_type == 12))
2879 {
2880 retval = xscale_read_instruction(target, current_pc, &instruction);
2881 if (retval == ERROR_OK)
2882 current_pc = instruction.info.b_bl_bx_blx.target_address;
2883 else
2884 current_pc = 0; /* branch destination unknown */
2885
2886 /* direct branch w/ checkpoint; can also get from checkpoint reg */
2887 if (trace_msg_type == 12)
2888 {
2889 if (current_pc == 0)
2890 current_pc = chkpt_reg;
2891 else if (current_pc != chkpt_reg) /* sanity check */
2892 LOG_WARNING("trace is suspect: checkpoint register "
2893 "inconsistent with adddress from image");
2894 }
2895
2896 if (current_pc == 0)
2897 command_print(cmd_ctx, "address unknown");
2898
2899 continue;
2900 }
2901
2902 /* indirect branch; the branch destination was read from trace buffer */
2903 if ((trace_msg_type == 9) || (trace_msg_type == 13))
2904 {
2905 current_pc = branch_target;
2906
2907 /* sanity check (checkpoint reg is redundant) */
2908 if ((trace_msg_type == 13) && (chkpt_reg != branch_target))
2909 LOG_WARNING("trace is suspect: checkpoint register "
2910 "inconsistent with address from trace buffer");
2911 }
2912
2913 } /* END: for (i = 0; i < trace_data->depth; i++) */
2914
2915 breakpoint_pc = trace_data->last_instruction; /* used below */
2916 trace_data = trace_data->next;
2917
2918 } /* END: while (trace_data) */
2919
2920 /* Finally... display all instructions up to the value of the pc when the
2921 * debug break occurred (saved when trace data was collected from target).
2922 * This is necessary because the trace only records execution branches and 16
2923 * consecutive instructions (rollovers), so last few typically missed.
2924 */
2925 if (current_pc == 0)
2926 return ERROR_OK; /* current_pc was never found */
2927
2928 /* how many instructions remaining? */
2929 int gap_count = (breakpoint_pc - current_pc) /
2930 (xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2);
2931
2932 /* should never be negative or over 16, but verify */
2933 if (gap_count < 0 || gap_count > 16)
2934 {
2935 LOG_WARNING("trace is suspect: excessive gap at end of trace");
2936 return ERROR_OK; /* bail; large number or negative value no good */
2937 }
2938
2939 /* display remaining instructions */
2940 for (i = 0; i < gap_count; i++)
2941 {
2942 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2943 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2944 }
2945
2946 return ERROR_OK;
2947 }
2948
2949 static const struct reg_arch_type xscale_reg_type = {
2950 .get = xscale_get_reg,
2951 .set = xscale_set_reg,
2952 };
2953
2954 static void xscale_build_reg_cache(struct target *target)
2955 {
2956 struct xscale_common *xscale = target_to_xscale(target);
2957 struct arm *armv4_5 = &xscale->armv4_5_common;
2958 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2959 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2960 int i;
2961 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2962
2963 (*cache_p) = arm_build_reg_cache(target, armv4_5);
2964
2965 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2966 cache_p = &(*cache_p)->next;
2967
2968 /* fill in values for the xscale reg cache */
2969 (*cache_p)->name = "XScale registers";
2970 (*cache_p)->next = NULL;
2971 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2972 (*cache_p)->num_regs = num_regs;
2973
2974 for (i = 0; i < num_regs; i++)
2975 {
2976 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2977 (*cache_p)->reg_list[i].value = calloc(4, 1);
2978 (*cache_p)->reg_list[i].dirty = 0;
2979 (*cache_p)->reg_list[i].valid = 0;
2980 (*cache_p)->reg_list[i].size = 32;
2981 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2982 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2983 arch_info[i] = xscale_reg_arch_info[i];
2984 arch_info[i].target = target;
2985 }
2986
2987 xscale->reg_cache = (*cache_p);
2988 }
2989
2990 static int xscale_init_target(struct command_context *cmd_ctx,
2991 struct target *target)
2992 {
2993 xscale_build_reg_cache(target);
2994 return ERROR_OK;
2995 }
2996
2997 static int xscale_init_arch_info(struct target *target,
2998 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2999 {
3000 struct arm *armv4_5;
3001 uint32_t high_reset_branch, low_reset_branch;
3002 int i;
3003
3004 armv4_5 = &xscale->armv4_5_common;
3005
3006 /* store architecture specfic data */
3007 xscale->common_magic = XSCALE_COMMON_MAGIC;
3008
3009 /* we don't really *need* a variant param ... */
3010 if (variant) {
3011 int ir_length = 0;
3012
3013 if (strcmp(variant, "pxa250") == 0
3014 || strcmp(variant, "pxa255") == 0
3015 || strcmp(variant, "pxa26x") == 0)
3016 ir_length = 5;
3017 else if (strcmp(variant, "pxa27x") == 0
3018 || strcmp(variant, "ixp42x") == 0
3019 || strcmp(variant, "ixp45x") == 0
3020 || strcmp(variant, "ixp46x") == 0)
3021 ir_length = 7;
3022 else if (strcmp(variant, "pxa3xx") == 0)
3023 ir_length = 11;
3024 else
3025 LOG_WARNING("%s: unrecognized variant %s",
3026 tap->dotted_name, variant);
3027
3028 if (ir_length && ir_length != tap->ir_length) {
3029 LOG_WARNING("%s: IR length for %s is %d; fixing",
3030 tap->dotted_name, variant, ir_length);
3031 tap->ir_length = ir_length;
3032 }
3033 }
3034
3035 /* PXA3xx shifts the JTAG instructions */
3036 if (tap->ir_length == 11)
3037 xscale->xscale_variant = XSCALE_PXA3XX;
3038 else
3039 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
3040
3041 /* the debug handler isn't installed (and thus not running) at this time */
3042 xscale->handler_address = 0xfe000800;
3043
3044 /* clear the vectors we keep locally for reference */
3045 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
3046 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
3047
3048 /* no user-specified vectors have been configured yet */
3049 xscale->static_low_vectors_set = 0x0;
3050 xscale->static_high_vectors_set = 0x0;
3051
3052 /* calculate branches to debug handler */
3053 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
3054 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
3055
3056 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
3057 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
3058
3059 for (i = 1; i <= 7; i++)
3060 {
3061 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3062 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3063 }
3064
3065 /* 64kB aligned region used for DCache cleaning */
3066 xscale->cache_clean_address = 0xfffe0000;
3067
3068 xscale->hold_rst = 0;
3069 xscale->external_debug_break = 0;
3070
3071 xscale->ibcr_available = 2;
3072 xscale->ibcr0_used = 0;
3073 xscale->ibcr1_used = 0;
3074
3075 xscale->dbr_available = 2;
3076 xscale->dbr0_used = 0;
3077 xscale->dbr1_used = 0;
3078
3079 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
3080 target_name(target));
3081
3082 xscale->arm_bkpt = ARMV5_BKPT(0x0);
3083 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
3084
3085 xscale->vector_catch = 0x1;
3086
3087 xscale->trace.capture_status = TRACE_IDLE;
3088 xscale->trace.data = NULL;
3089 xscale->trace.image = NULL;
3090 xscale->trace.buffer_enabled = 0;
3091 xscale->trace.buffer_fill = 0;
3092
3093 /* prepare ARMv4/5 specific information */
3094 armv4_5->arch_info = xscale;
3095 armv4_5->read_core_reg = xscale_read_core_reg;
3096 armv4_5->write_core_reg = xscale_write_core_reg;
3097 armv4_5->full_context = xscale_full_context;
3098
3099 arm_init_arch_info(target, armv4_5);
3100
3101 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3102 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3103 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3104 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3105 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3106 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3107 xscale->armv4_5_mmu.has_tiny_pages = 1;
3108 xscale->armv4_5_mmu.mmu_enabled = 0;
3109
3110 return ERROR_OK;
3111 }
3112
3113 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3114 {
3115 struct xscale_common *xscale;
3116
3117 if (sizeof xscale_debug_handler - 1 > 0x800) {
3118 LOG_ERROR("debug_handler.bin: larger than 2kb");
3119 return ERROR_FAIL;
3120 }
3121
3122 xscale = calloc(1, sizeof(*xscale));
3123 if (!xscale)
3124 return ERROR_FAIL;
3125
3126 return xscale_init_arch_info(target, xscale, target->tap,
3127 target->variant);
3128 }
3129
3130 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3131 {
3132 struct target *target = NULL;
3133 struct xscale_common *xscale;
3134 int retval;
3135 uint32_t handler_address;
3136
3137 if (CMD_ARGC < 2)
3138 {
3139 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3140 return ERROR_OK;
3141 }
3142
3143 if ((target = get_target(CMD_ARGV[0])) == NULL)
3144 {
3145 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3146 return ERROR_FAIL;
3147 }
3148
3149 xscale = target_to_xscale(target);
3150 retval = xscale_verify_pointer(CMD_CTX, xscale);
3151 if (retval != ERROR_OK)
3152 return retval;
3153
3154 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3155
3156 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3157 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3158 {
3159 xscale->handler_address = handler_address;
3160 }
3161 else
3162 {
3163 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3164 return ERROR_FAIL;
3165 }
3166
3167 return ERROR_OK;
3168 }
3169
3170 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3171 {
3172 struct target *target = NULL;
3173 struct xscale_common *xscale;
3174 int retval;
3175 uint32_t cache_clean_address;
3176
3177 if (CMD_ARGC < 2)
3178 {
3179 return ERROR_COMMAND_SYNTAX_ERROR;
3180 }
3181
3182 target = get_target(CMD_ARGV[0]);
3183 if (target == NULL)
3184 {
3185 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3186 return ERROR_FAIL;
3187 }
3188 xscale = target_to_xscale(target);
3189 retval = xscale_verify_pointer(CMD_CTX, xscale);
3190 if (retval != ERROR_OK)
3191 return retval;
3192
3193 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3194
3195 if (cache_clean_address & 0xffff)
3196 {
3197 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3198 }
3199 else
3200 {
3201 xscale->cache_clean_address = cache_clean_address;
3202 }
3203
3204 return ERROR_OK;
3205 }
3206
3207 COMMAND_HANDLER(xscale_handle_cache_info_command)
3208 {
3209 struct target *target = get_current_target(CMD_CTX);
3210 struct xscale_common *xscale = target_to_xscale(target);
3211 int retval;
3212
3213 retval = xscale_verify_pointer(CMD_CTX, xscale);
3214 if (retval != ERROR_OK)
3215 return retval;
3216
3217 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3218 }
3219
3220 static int xscale_virt2phys(struct target *target,
3221 uint32_t virtual, uint32_t *physical)
3222 {
3223 struct xscale_common *xscale = target_to_xscale(target);
3224 uint32_t cb;
3225
3226 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3227 LOG_ERROR(xscale_not);
3228 return ERROR_TARGET_INVALID;
3229 }
3230
3231 uint32_t ret;
3232 int retval = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu,
3233 virtual, &cb, &ret);
3234 if (retval != ERROR_OK)
3235 return retval;
3236 *physical = ret;
3237 return ERROR_OK;
3238 }
3239
3240 static int xscale_mmu(struct target *target, int *enabled)
3241 {
3242 struct xscale_common *xscale = target_to_xscale(target);
3243
3244 if (target->state != TARGET_HALTED)
3245 {
3246 LOG_ERROR("Target not halted");
3247 return ERROR_TARGET_INVALID;
3248 }
3249 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3250 return ERROR_OK;
3251 }
3252
3253 COMMAND_HANDLER(xscale_handle_mmu_command)
3254 {
3255 struct target *target = get_current_target(CMD_CTX);
3256 struct xscale_common *xscale = target_to_xscale(target);
3257 int retval;
3258
3259 retval = xscale_verify_pointer(CMD_CTX, xscale);
3260 if (retval != ERROR_OK)
3261 return retval;
3262
3263 if (target->state != TARGET_HALTED)
3264 {
3265 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3266 return ERROR_OK;
3267 }
3268
3269 if (CMD_ARGC >= 1)
3270 {
3271 bool enable;
3272 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3273 if (enable)
3274 xscale_enable_mmu_caches(target, 1, 0, 0);
3275 else
3276 xscale_disable_mmu_caches(target, 1, 0, 0);
3277 xscale->armv4_5_mmu.mmu_enabled = enable;
3278 }
3279
3280 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3281
3282 return ERROR_OK;
3283 }
3284
3285 COMMAND_HANDLER(xscale_handle_idcache_command)
3286 {
3287 struct target *target = get_current_target(CMD_CTX);
3288 struct xscale_common *xscale = target_to_xscale(target);
3289
3290 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3291 if (retval != ERROR_OK)
3292 return retval;
3293
3294 if (target->state != TARGET_HALTED)
3295 {
3296 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3297 return ERROR_OK;
3298 }
3299
3300 bool icache = false;
3301 if (strcmp(CMD_NAME, "icache") == 0)
3302 icache = true;
3303 if (CMD_ARGC >= 1)
3304 {
3305 bool enable;
3306 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3307 if (icache) {
3308 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3309 if (enable)
3310 xscale_enable_mmu_caches(target, 0, 0, 1);
3311 else
3312 xscale_disable_mmu_caches(target, 0, 0, 1);
3313 } else {
3314 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3315 if (enable)
3316 xscale_enable_mmu_caches(target, 0, 1, 0);
3317 else
3318 xscale_disable_mmu_caches(target, 0, 1, 0);
3319 }
3320 }
3321
3322 bool enabled = icache ?
3323 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3324 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3325 const char *msg = enabled ? "enabled" : "disabled";
3326 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3327
3328 return ERROR_OK;
3329 }
3330
3331 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3332 {
3333 struct target *target = get_current_target(CMD_CTX);
3334 struct xscale_common *xscale = target_to_xscale(target);
3335 int retval;
3336
3337 retval = xscale_verify_pointer(CMD_CTX, xscale);
3338 if (retval != ERROR_OK)
3339 return retval;
3340
3341 if (CMD_ARGC < 1)
3342 {
3343 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3344 }
3345 else
3346 {
3347 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3348 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3349 xscale_write_dcsr(target, -1, -1);
3350 }
3351
3352 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3353
3354 return ERROR_OK;
3355 }
3356
3357
3358 COMMAND_HANDLER(xscale_handle_vector_table_command)
3359 {
3360 struct target *target = get_current_target(CMD_CTX);
3361 struct xscale_common *xscale = target_to_xscale(target);
3362 int err = 0;
3363 int retval;
3364
3365 retval = xscale_verify_pointer(CMD_CTX, xscale);
3366 if (retval != ERROR_OK)
3367 return retval;
3368
3369 if (CMD_ARGC == 0) /* print current settings */
3370 {
3371 int idx;
3372
3373 command_print(CMD_CTX, "active user-set static vectors:");
3374 for (idx = 1; idx < 8; idx++)
3375 if (xscale->static_low_vectors_set & (1 << idx))
3376 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3377 for (idx = 1; idx < 8; idx++)
3378 if (xscale->static_high_vectors_set & (1 << idx))
3379 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3380 return ERROR_OK;
3381 }
3382
3383 if (CMD_ARGC != 3)
3384 err = 1;
3385 else
3386 {
3387 int idx;
3388 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3389 uint32_t vec;
3390 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3391
3392 if (idx < 1 || idx >= 8)
3393 err = 1;
3394
3395 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3396 {
3397 xscale->static_low_vectors_set |= (1<<idx);
3398 xscale->static_low_vectors[idx] = vec;
3399 }
3400 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3401 {
3402 xscale->static_high_vectors_set |= (1<<idx);
3403 xscale->static_high_vectors[idx] = vec;
3404 }
3405 else
3406 err = 1;
3407 }
3408
3409 if (err)
3410 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3411
3412 return ERROR_OK;
3413 }
3414
3415
3416 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3417 {
3418 struct target *target = get_current_target(CMD_CTX);
3419 struct xscale_common *xscale = target_to_xscale(target);
3420 uint32_t dcsr_value;
3421 int retval;
3422
3423 retval = xscale_verify_pointer(CMD_CTX, xscale);
3424 if (retval != ERROR_OK)
3425 return retval;
3426
3427 if (target->state != TARGET_HALTED)
3428 {
3429 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3430 return ERROR_OK;
3431 }
3432
3433 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3434 {
3435 struct xscale_trace_data *td, *next_td;
3436 xscale->trace.buffer_enabled = 1;
3437
3438 /* free old trace data */
3439 td = xscale->trace.data;
3440 while (td)
3441 {
3442 next_td = td->next;
3443
3444 if (td->entries)
3445 free(td->entries);
3446 free(td);
3447 td = next_td;
3448 }
3449 xscale->trace.data = NULL;
3450 }
3451 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3452 {
3453 xscale->trace.buffer_enabled = 0;
3454 }
3455
3456 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3457 {
3458 uint32_t fill = 1;
3459 if (CMD_ARGC >= 3)
3460 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3461 xscale->trace.buffer_fill = fill;
3462 }
3463 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3464 {
3465 xscale->trace.buffer_fill = -1;
3466 }
3467
3468 command_print(CMD_CTX, "trace buffer %s (%s)",
3469 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3470 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3471
3472 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3473 if (xscale->trace.buffer_fill >= 0)
3474 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3475 else
3476 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3477
3478 return ERROR_OK;
3479 }
3480
3481 COMMAND_HANDLER(xscale_handle_trace_image_command)
3482 {
3483 struct target *target = get_current_target(CMD_CTX);
3484 struct xscale_common *xscale = target_to_xscale(target);
3485 int retval;
3486
3487 if (CMD_ARGC < 1)
3488 {
3489 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3490 return ERROR_OK;
3491 }
3492
3493 retval = xscale_verify_pointer(CMD_CTX, xscale);
3494 if (retval != ERROR_OK)
3495 return retval;
3496
3497 if (xscale->trace.image)
3498 {
3499 image_close(xscale->trace.image);
3500 free(xscale->trace.image);
3501 command_print(CMD_CTX, "previously loaded image found and closed");
3502 }
3503
3504 xscale->trace.image = malloc(sizeof(struct image));
3505 xscale->trace.image->base_address_set = 0;
3506 xscale->trace.image->start_address_set = 0;
3507
3508 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3509 if (CMD_ARGC >= 2)
3510 {
3511 xscale->trace.image->base_address_set = 1;
3512 COMMAND_PARSE_NUMBER(llong, CMD_ARGV[1], xscale->trace.image->base_address);
3513 }
3514 else
3515 {
3516 xscale->trace.image->base_address_set = 0;
3517 }
3518
3519 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3520 {
3521 free(xscale->trace.image);
3522 xscale->trace.image = NULL;
3523 return ERROR_OK;
3524 }
3525
3526 return ERROR_OK;
3527 }
3528
3529 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3530 {
3531 struct target *target = get_current_target(CMD_CTX);
3532 struct xscale_common *xscale = target_to_xscale(target);
3533 struct xscale_trace_data *trace_data;
3534 struct fileio file;
3535 int retval;
3536
3537 retval = xscale_verify_pointer(CMD_CTX, xscale);
3538 if (retval != ERROR_OK)
3539 return retval;
3540
3541 if (target->state != TARGET_HALTED)
3542 {
3543 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3544 return ERROR_OK;
3545 }
3546
3547 if (CMD_ARGC < 1)
3548 {
3549 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3550 return ERROR_OK;
3551 }
3552
3553 trace_data = xscale->trace.data;
3554
3555 if (!trace_data)
3556 {
3557 command_print(CMD_CTX, "no trace data collected");
3558 return ERROR_OK;
3559 }
3560
3561 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3562 {
3563 return ERROR_OK;
3564 }
3565
3566 while (trace_data)
3567 {
3568 int i;
3569
3570 fileio_write_u32(&file, trace_data->chkpt0);
3571 fileio_write_u32(&file, trace_data->chkpt1);
3572 fileio_write_u32(&file, trace_data->last_instruction);
3573 fileio_write_u32(&file, trace_data->depth);
3574
3575 for (i = 0; i < trace_data->depth; i++)
3576 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3577
3578 trace_data = trace_data->next;
3579 }
3580
3581 fileio_close(&file);
3582
3583 return ERROR_OK;
3584 }
3585
3586 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3587 {
3588 struct target *target = get_current_target(CMD_CTX);
3589 struct xscale_common *xscale = target_to_xscale(target);
3590 int retval;
3591
3592 retval = xscale_verify_pointer(CMD_CTX, xscale);
3593 if (retval != ERROR_OK)
3594 return retval;
3595
3596 xscale_analyze_trace(target, CMD_CTX);
3597
3598 return ERROR_OK;
3599 }
3600
3601 COMMAND_HANDLER(xscale_handle_cp15)
3602 {
3603 struct target *target = get_current_target(CMD_CTX);
3604 struct xscale_common *xscale = target_to_xscale(target);
3605 int retval;
3606
3607 retval = xscale_verify_pointer(CMD_CTX, xscale);
3608 if (retval != ERROR_OK)
3609 return retval;
3610
3611 if (target->state != TARGET_HALTED)
3612 {
3613 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3614 return ERROR_OK;
3615 }
3616 uint32_t reg_no = 0;
3617 struct reg *reg = NULL;
3618 if (CMD_ARGC > 0)
3619 {
3620 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3621 /*translate from xscale cp15 register no to openocd register*/
3622 switch (reg_no)
3623 {
3624 case 0:
3625 reg_no = XSCALE_MAINID;
3626 break;
3627 case 1:
3628 reg_no = XSCALE_CTRL;
3629 break;
3630 case 2:
3631 reg_no = XSCALE_TTB;
3632 break;
3633 case 3:
3634 reg_no = XSCALE_DAC;
3635 break;
3636 case 5:
3637 reg_no = XSCALE_FSR;
3638 break;
3639 case 6:
3640 reg_no = XSCALE_FAR;
3641 break;
3642 case 13:
3643 reg_no = XSCALE_PID;
3644 break;
3645 case 15:
3646 reg_no = XSCALE_CPACCESS;
3647 break;
3648 default:
3649 command_print(CMD_CTX, "invalid register number");
3650 return ERROR_INVALID_ARGUMENTS;
3651 }
3652 reg = &xscale->reg_cache->reg_list[reg_no];
3653
3654 }
3655 if (CMD_ARGC == 1)
3656 {
3657 uint32_t value;
3658
3659 /* read cp15 control register */
3660 xscale_get_reg(reg);
3661 value = buf_get_u32(reg->value, 0, 32);
3662 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3663 }
3664 else if (CMD_ARGC == 2)
3665 {
3666 uint32_t value;
3667 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3668
3669 /* send CP write request (command 0x41) */
3670 xscale_send_u32(target, 0x41);
3671
3672 /* send CP register number */
3673 xscale_send_u32(target, reg_no);
3674
3675 /* send CP register value */
3676 xscale_send_u32(target, value);
3677
3678 /* execute cpwait to ensure outstanding operations complete */
3679 xscale_send_u32(target, 0x53);
3680 }
3681 else
3682 {
3683 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3684 }
3685
3686 return ERROR_OK;
3687 }
3688
3689 static const struct command_registration xscale_exec_command_handlers[] = {
3690 {
3691 .name = "cache_info",
3692 .handler = xscale_handle_cache_info_command,
3693 .mode = COMMAND_EXEC,
3694 .help = "display information about CPU caches",
3695 },
3696 {
3697 .name = "mmu",
3698 .handler = xscale_handle_mmu_command,
3699 .mode = COMMAND_EXEC,
3700 .help = "enable or disable the MMU",
3701 .usage = "['enable'|'disable']",
3702 },
3703 {
3704 .name = "icache",
3705 .handler = xscale_handle_idcache_command,
3706 .mode = COMMAND_EXEC,
3707 .help = "display ICache state, optionally enabling or "
3708 "disabling it",
3709 .usage = "['enable'|'disable']",
3710 },
3711 {
3712 .name = "dcache",
3713 .handler = xscale_handle_idcache_command,
3714 .mode = COMMAND_EXEC,
3715 .help = "display DCache state, optionally enabling or "
3716 "disabling it",
3717 .usage = "['enable'|'disable']",
3718 },
3719 {
3720 .name = "vector_catch",
3721 .handler = xscale_handle_vector_catch_command,
3722 .mode = COMMAND_EXEC,
3723 .help = "set or display 8-bit mask of vectors "
3724 "that should trigger debug entry",
3725 .usage = "[mask]",
3726 },
3727 {
3728 .name = "vector_table",
3729 .handler = xscale_handle_vector_table_command,
3730 .mode = COMMAND_EXEC,
3731 .help = "set vector table entry in mini-ICache, "
3732 "or display current tables",
3733 .usage = "[('high'|'low') index code]",
3734 },
3735 {
3736 .name = "trace_buffer",
3737 .handler = xscale_handle_trace_buffer_command,
3738 .mode = COMMAND_EXEC,
3739 .help = "display trace buffer status, enable or disable "
3740 "tracing, and optionally reconfigure trace mode",
3741 .usage = "['enable'|'disable' ['fill' number|'wrap']]",
3742 },
3743 {
3744 .name = "dump_trace",
3745 .handler = xscale_handle_dump_trace_command,
3746 .mode = COMMAND_EXEC,
3747 .help = "dump content of trace buffer to file",
3748 .usage = "filename",
3749 },
3750 {
3751 .name = "analyze_trace",
3752 .handler = xscale_handle_analyze_trace_buffer_command,
3753 .mode = COMMAND_EXEC,
3754 .help = "analyze content of trace buffer",
3755 .usage = "",
3756 },
3757 {
3758 .name = "trace_image",
3759 .handler = xscale_handle_trace_image_command,
3760 .mode = COMMAND_EXEC,
3761 .help = "load image from file to address (default 0)",
3762 .usage = "filename [offset [filetype]]",
3763 },
3764 {
3765 .name = "cp15",
3766 .handler = xscale_handle_cp15,
3767 .mode = COMMAND_EXEC,
3768 .help = "Read or write coprocessor 15 register.",
3769 .usage = "register [value]",
3770 },
3771 COMMAND_REGISTRATION_DONE
3772 };
3773 static const struct command_registration xscale_any_command_handlers[] = {
3774 {
3775 .name = "debug_handler",
3776 .handler = xscale_handle_debug_handler_command,
3777 .mode = COMMAND_ANY,
3778 .help = "Change address used for debug handler.",
3779 .usage = "target address",
3780 },
3781 {
3782 .name = "cache_clean_address",
3783 .handler = xscale_handle_cache_clean_address_command,
3784 .mode = COMMAND_ANY,
3785 .help = "Change address used for cleaning data cache.",
3786 .usage = "address",
3787 },
3788 {
3789 .chain = xscale_exec_command_handlers,
3790 },
3791 COMMAND_REGISTRATION_DONE
3792 };
3793 static const struct command_registration xscale_command_handlers[] = {
3794 {
3795 .chain = arm_command_handlers,
3796 },
3797 {
3798 .name = "xscale",
3799 .mode = COMMAND_ANY,
3800 .help = "xscale command group",
3801 .chain = xscale_any_command_handlers,
3802 },
3803 COMMAND_REGISTRATION_DONE
3804 };
3805
3806 struct target_type xscale_target =
3807 {
3808 .name = "xscale",
3809
3810 .poll = xscale_poll,
3811 .arch_state = xscale_arch_state,
3812
3813 .target_request_data = NULL,
3814
3815 .halt = xscale_halt,
3816 .resume = xscale_resume,
3817 .step = xscale_step,
3818
3819 .assert_reset = xscale_assert_reset,
3820 .deassert_reset = xscale_deassert_reset,
3821 .soft_reset_halt = NULL,
3822
3823 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3824 .get_gdb_reg_list = arm_get_gdb_reg_list,
3825
3826 .read_memory = xscale_read_memory,
3827 .read_phys_memory = xscale_read_phys_memory,
3828 .write_memory = xscale_write_memory,
3829 .write_phys_memory = xscale_write_phys_memory,
3830 .bulk_write_memory = xscale_bulk_write_memory,
3831
3832 .checksum_memory = arm_checksum_memory,
3833 .blank_check_memory = arm_blank_check_memory,
3834
3835 .run_algorithm = armv4_5_run_algorithm,
3836
3837 .add_breakpoint = xscale_add_breakpoint,
3838 .remove_breakpoint = xscale_remove_breakpoint,
3839 .add_watchpoint = xscale_add_watchpoint,
3840 .remove_watchpoint = xscale_remove_watchpoint,
3841
3842 .commands = xscale_command_handlers,
3843 .target_create = xscale_target_create,
3844 .init_target = xscale_init_target,
3845
3846 .virt2phys = xscale_virt2phys,
3847 .mmu = xscale_mmu
3848 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)