1a18ab85aac8de544d348f28dbb779417b7e6cbe
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include "time_support.h"
37 #include "register.h"
38 #include "image.h"
39
40
41 /*
42 * Important XScale documents available as of October 2009 include:
43 *
44 * Intel XScale® Core Developer’s Manual, January 2004
45 * Order Number: 273473-002
46 * This has a chapter detailing debug facilities, and punts some
47 * details to chip-specific microarchitecture documents.
48 *
49 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
50 * Document Number: 273539-005
51 * Less detailed than the developer's manual, but summarizes those
52 * missing details (for most XScales) and gives LOTS of notes about
53 * debugger/handler interaction issues. Presents a simpler reset
54 * and load-handler sequence than the arch doc. (Note, OpenOCD
55 * doesn't currently support "Hot-Debug" as defined there.)
56 *
57 * Chip-specific microarchitecture documents may also be useful.
58 */
59
60
61 /* forward declarations */
62 static int xscale_resume(struct target *, int current,
63 uint32_t address, int handle_breakpoints, int debug_execution);
64 static int xscale_debug_entry(struct target *);
65 static int xscale_restore_context(struct target *);
66 static int xscale_get_reg(struct reg *reg);
67 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
68 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
69 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
70 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_read_trace(struct target *);
72
73
74 /* This XScale "debug handler" is loaded into the processor's
75 * mini-ICache, which is 2K of code writable only via JTAG.
76 *
77 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
78 * binary files cleanly. It's string oriented, and terminates them
79 * with a NUL character. Better would be to generate the constants
80 * and let other code decide names, scoping, and other housekeeping.
81 */
82 static /* unsigned const char xscale_debug_handler[] = ... */
83 #include "xscale_debug.h"
84
85 static char *const xscale_reg_list[] =
86 {
87 "XSCALE_MAINID", /* 0 */
88 "XSCALE_CACHETYPE",
89 "XSCALE_CTRL",
90 "XSCALE_AUXCTRL",
91 "XSCALE_TTB",
92 "XSCALE_DAC",
93 "XSCALE_FSR",
94 "XSCALE_FAR",
95 "XSCALE_PID",
96 "XSCALE_CPACCESS",
97 "XSCALE_IBCR0", /* 10 */
98 "XSCALE_IBCR1",
99 "XSCALE_DBR0",
100 "XSCALE_DBR1",
101 "XSCALE_DBCON",
102 "XSCALE_TBREG",
103 "XSCALE_CHKPT0",
104 "XSCALE_CHKPT1",
105 "XSCALE_DCSR",
106 "XSCALE_TX",
107 "XSCALE_RX", /* 20 */
108 "XSCALE_TXRXCTRL",
109 };
110
111 static const struct xscale_reg xscale_reg_arch_info[] =
112 {
113 {XSCALE_MAINID, NULL},
114 {XSCALE_CACHETYPE, NULL},
115 {XSCALE_CTRL, NULL},
116 {XSCALE_AUXCTRL, NULL},
117 {XSCALE_TTB, NULL},
118 {XSCALE_DAC, NULL},
119 {XSCALE_FSR, NULL},
120 {XSCALE_FAR, NULL},
121 {XSCALE_PID, NULL},
122 {XSCALE_CPACCESS, NULL},
123 {XSCALE_IBCR0, NULL},
124 {XSCALE_IBCR1, NULL},
125 {XSCALE_DBR0, NULL},
126 {XSCALE_DBR1, NULL},
127 {XSCALE_DBCON, NULL},
128 {XSCALE_TBREG, NULL},
129 {XSCALE_CHKPT0, NULL},
130 {XSCALE_CHKPT1, NULL},
131 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
132 {-1, NULL}, /* TX accessed via JTAG */
133 {-1, NULL}, /* RX accessed via JTAG */
134 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
135 };
136
137 /* convenience wrapper to access XScale specific registers */
138 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
139 {
140 uint8_t buf[4];
141
142 buf_set_u32(buf, 0, 32, value);
143
144 return xscale_set_reg(reg, buf);
145 }
146
147 static const char xscale_not[] = "target is not an XScale";
148
149 static int xscale_verify_pointer(struct command_context *cmd_ctx,
150 struct xscale_common *xscale)
151 {
152 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
153 command_print(cmd_ctx, xscale_not);
154 return ERROR_TARGET_INVALID;
155 }
156 return ERROR_OK;
157 }
158
159 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr)
160 {
161 if (tap == NULL)
162 return ERROR_FAIL;
163
164 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
165 {
166 struct scan_field field;
167 uint8_t scratch[4];
168
169 memset(&field, 0, sizeof field);
170 field.tap = tap;
171 field.num_bits = tap->ir_length;
172 field.out_value = scratch;
173 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
174
175 jtag_add_ir_scan(1, &field, jtag_get_end_state());
176 }
177
178 return ERROR_OK;
179 }
180
181 static int xscale_read_dcsr(struct target *target)
182 {
183 struct xscale_common *xscale = target_to_xscale(target);
184 int retval;
185 struct scan_field fields[3];
186 uint8_t field0 = 0x0;
187 uint8_t field0_check_value = 0x2;
188 uint8_t field0_check_mask = 0x7;
189 uint8_t field2 = 0x0;
190 uint8_t field2_check_value = 0x0;
191 uint8_t field2_check_mask = 0x1;
192
193 jtag_set_end_state(TAP_DRPAUSE);
194 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
195
196 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
197 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
198
199 memset(&fields, 0, sizeof fields);
200
201 fields[0].tap = target->tap;
202 fields[0].num_bits = 3;
203 fields[0].out_value = &field0;
204 uint8_t tmp;
205 fields[0].in_value = &tmp;
206
207 fields[1].tap = target->tap;
208 fields[1].num_bits = 32;
209 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
210
211 fields[2].tap = target->tap;
212 fields[2].num_bits = 1;
213 fields[2].out_value = &field2;
214 uint8_t tmp2;
215 fields[2].in_value = &tmp2;
216
217 jtag_add_dr_scan(3, fields, jtag_get_end_state());
218
219 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
220 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
221
222 if ((retval = jtag_execute_queue()) != ERROR_OK)
223 {
224 LOG_ERROR("JTAG error while reading DCSR");
225 return retval;
226 }
227
228 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
229 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
230
231 /* write the register with the value we just read
232 * on this second pass, only the first bit of field0 is guaranteed to be 0)
233 */
234 field0_check_mask = 0x1;
235 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
236 fields[1].in_value = NULL;
237
238 jtag_set_end_state(TAP_IDLE);
239
240 jtag_add_dr_scan(3, fields, jtag_get_end_state());
241
242 /* DANGER!!! this must be here. It will make sure that the arguments
243 * to jtag_set_check_value() does not go out of scope! */
244 return jtag_execute_queue();
245 }
246
247
248 static void xscale_getbuf(jtag_callback_data_t arg)
249 {
250 uint8_t *in = (uint8_t *)arg;
251 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
252 }
253
254 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
255 {
256 if (num_words == 0)
257 return ERROR_INVALID_ARGUMENTS;
258
259 int retval = ERROR_OK;
260 tap_state_t path[3];
261 struct scan_field fields[3];
262 uint8_t *field0 = malloc(num_words * 1);
263 uint8_t field0_check_value = 0x2;
264 uint8_t field0_check_mask = 0x6;
265 uint32_t *field1 = malloc(num_words * 4);
266 uint8_t field2_check_value = 0x0;
267 uint8_t field2_check_mask = 0x1;
268 int words_done = 0;
269 int words_scheduled = 0;
270 int i;
271
272 path[0] = TAP_DRSELECT;
273 path[1] = TAP_DRCAPTURE;
274 path[2] = TAP_DRSHIFT;
275
276 memset(&fields, 0, sizeof fields);
277
278 fields[0].tap = target->tap;
279 fields[0].num_bits = 3;
280 fields[0].check_value = &field0_check_value;
281 fields[0].check_mask = &field0_check_mask;
282
283 fields[1].tap = target->tap;
284 fields[1].num_bits = 32;
285
286 fields[2].tap = target->tap;
287 fields[2].num_bits = 1;
288 fields[2].check_value = &field2_check_value;
289 fields[2].check_mask = &field2_check_mask;
290
291 jtag_set_end_state(TAP_IDLE);
292 xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
293 jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
294
295 /* repeat until all words have been collected */
296 int attempts = 0;
297 while (words_done < num_words)
298 {
299 /* schedule reads */
300 words_scheduled = 0;
301 for (i = words_done; i < num_words; i++)
302 {
303 fields[0].in_value = &field0[i];
304
305 jtag_add_pathmove(3, path);
306
307 fields[1].in_value = (uint8_t *)(field1 + i);
308
309 jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
310
311 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
312
313 words_scheduled++;
314 }
315
316 if ((retval = jtag_execute_queue()) != ERROR_OK)
317 {
318 LOG_ERROR("JTAG error while receiving data from debug handler");
319 break;
320 }
321
322 /* examine results */
323 for (i = words_done; i < num_words; i++)
324 {
325 if (!(field0[0] & 1))
326 {
327 /* move backwards if necessary */
328 int j;
329 for (j = i; j < num_words - 1; j++)
330 {
331 field0[j] = field0[j + 1];
332 field1[j] = field1[j + 1];
333 }
334 words_scheduled--;
335 }
336 }
337 if (words_scheduled == 0)
338 {
339 if (attempts++==1000)
340 {
341 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
342 retval = ERROR_TARGET_TIMEOUT;
343 break;
344 }
345 }
346
347 words_done += words_scheduled;
348 }
349
350 for (i = 0; i < num_words; i++)
351 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
352
353 free(field1);
354
355 return retval;
356 }
357
358 static int xscale_read_tx(struct target *target, int consume)
359 {
360 struct xscale_common *xscale = target_to_xscale(target);
361 tap_state_t path[3];
362 tap_state_t noconsume_path[6];
363 int retval;
364 struct timeval timeout, now;
365 struct scan_field fields[3];
366 uint8_t field0_in = 0x0;
367 uint8_t field0_check_value = 0x2;
368 uint8_t field0_check_mask = 0x6;
369 uint8_t field2_check_value = 0x0;
370 uint8_t field2_check_mask = 0x1;
371
372 jtag_set_end_state(TAP_IDLE);
373
374 xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
375
376 path[0] = TAP_DRSELECT;
377 path[1] = TAP_DRCAPTURE;
378 path[2] = TAP_DRSHIFT;
379
380 noconsume_path[0] = TAP_DRSELECT;
381 noconsume_path[1] = TAP_DRCAPTURE;
382 noconsume_path[2] = TAP_DREXIT1;
383 noconsume_path[3] = TAP_DRPAUSE;
384 noconsume_path[4] = TAP_DREXIT2;
385 noconsume_path[5] = TAP_DRSHIFT;
386
387 memset(&fields, 0, sizeof fields);
388
389 fields[0].tap = target->tap;
390 fields[0].num_bits = 3;
391 fields[0].in_value = &field0_in;
392
393 fields[1].tap = target->tap;
394 fields[1].num_bits = 32;
395 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
396
397 fields[2].tap = target->tap;
398 fields[2].num_bits = 1;
399 uint8_t tmp;
400 fields[2].in_value = &tmp;
401
402 gettimeofday(&timeout, NULL);
403 timeval_add_time(&timeout, 1, 0);
404
405 for (;;)
406 {
407 /* if we want to consume the register content (i.e. clear TX_READY),
408 * we have to go straight from Capture-DR to Shift-DR
409 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
410 */
411 if (consume)
412 jtag_add_pathmove(3, path);
413 else
414 {
415 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
416 }
417
418 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
419
420 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
421 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
422
423 if ((retval = jtag_execute_queue()) != ERROR_OK)
424 {
425 LOG_ERROR("JTAG error while reading TX");
426 return ERROR_TARGET_TIMEOUT;
427 }
428
429 gettimeofday(&now, NULL);
430 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
431 {
432 LOG_ERROR("time out reading TX register");
433 return ERROR_TARGET_TIMEOUT;
434 }
435 if (!((!(field0_in & 1)) && consume))
436 {
437 goto done;
438 }
439 if (debug_level >= 3)
440 {
441 LOG_DEBUG("waiting 100ms");
442 alive_sleep(100); /* avoid flooding the logs */
443 } else
444 {
445 keep_alive();
446 }
447 }
448 done:
449
450 if (!(field0_in & 1))
451 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
452
453 return ERROR_OK;
454 }
455
456 static int xscale_write_rx(struct target *target)
457 {
458 struct xscale_common *xscale = target_to_xscale(target);
459 int retval;
460 struct timeval timeout, now;
461 struct scan_field fields[3];
462 uint8_t field0_out = 0x0;
463 uint8_t field0_in = 0x0;
464 uint8_t field0_check_value = 0x2;
465 uint8_t field0_check_mask = 0x6;
466 uint8_t field2 = 0x0;
467 uint8_t field2_check_value = 0x0;
468 uint8_t field2_check_mask = 0x1;
469
470 jtag_set_end_state(TAP_IDLE);
471
472 xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
473
474 memset(&fields, 0, sizeof fields);
475
476 fields[0].tap = target->tap;
477 fields[0].num_bits = 3;
478 fields[0].out_value = &field0_out;
479 fields[0].in_value = &field0_in;
480
481 fields[1].tap = target->tap;
482 fields[1].num_bits = 32;
483 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
484
485 fields[2].tap = target->tap;
486 fields[2].num_bits = 1;
487 fields[2].out_value = &field2;
488 uint8_t tmp;
489 fields[2].in_value = &tmp;
490
491 gettimeofday(&timeout, NULL);
492 timeval_add_time(&timeout, 1, 0);
493
494 /* poll until rx_read is low */
495 LOG_DEBUG("polling RX");
496 for (;;)
497 {
498 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
499
500 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
501 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
502
503 if ((retval = jtag_execute_queue()) != ERROR_OK)
504 {
505 LOG_ERROR("JTAG error while writing RX");
506 return retval;
507 }
508
509 gettimeofday(&now, NULL);
510 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
511 {
512 LOG_ERROR("time out writing RX register");
513 return ERROR_TARGET_TIMEOUT;
514 }
515 if (!(field0_in & 1))
516 goto done;
517 if (debug_level >= 3)
518 {
519 LOG_DEBUG("waiting 100ms");
520 alive_sleep(100); /* avoid flooding the logs */
521 } else
522 {
523 keep_alive();
524 }
525 }
526 done:
527
528 /* set rx_valid */
529 field2 = 0x1;
530 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
531
532 if ((retval = jtag_execute_queue()) != ERROR_OK)
533 {
534 LOG_ERROR("JTAG error while writing RX");
535 return retval;
536 }
537
538 return ERROR_OK;
539 }
540
541 /* send count elements of size byte to the debug handler */
542 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
543 {
544 uint32_t t[3];
545 int bits[3];
546 int retval;
547 int done_count = 0;
548
549 jtag_set_end_state(TAP_IDLE);
550
551 xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
552
553 bits[0]=3;
554 t[0]=0;
555 bits[1]=32;
556 t[2]=1;
557 bits[2]=1;
558 int endianness = target->endianness;
559 while (done_count++ < count)
560 {
561 switch (size)
562 {
563 case 4:
564 if (endianness == TARGET_LITTLE_ENDIAN)
565 {
566 t[1]=le_to_h_u32(buffer);
567 } else
568 {
569 t[1]=be_to_h_u32(buffer);
570 }
571 break;
572 case 2:
573 if (endianness == TARGET_LITTLE_ENDIAN)
574 {
575 t[1]=le_to_h_u16(buffer);
576 } else
577 {
578 t[1]=be_to_h_u16(buffer);
579 }
580 break;
581 case 1:
582 t[1]=buffer[0];
583 break;
584 default:
585 LOG_ERROR("BUG: size neither 4, 2 nor 1");
586 return ERROR_INVALID_ARGUMENTS;
587 }
588 jtag_add_dr_out(target->tap,
589 3,
590 bits,
591 t,
592 jtag_set_end_state(TAP_IDLE));
593 buffer += size;
594 }
595
596 if ((retval = jtag_execute_queue()) != ERROR_OK)
597 {
598 LOG_ERROR("JTAG error while sending data to debug handler");
599 return retval;
600 }
601
602 return ERROR_OK;
603 }
604
605 static int xscale_send_u32(struct target *target, uint32_t value)
606 {
607 struct xscale_common *xscale = target_to_xscale(target);
608
609 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
610 return xscale_write_rx(target);
611 }
612
613 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
614 {
615 struct xscale_common *xscale = target_to_xscale(target);
616 int retval;
617 struct scan_field fields[3];
618 uint8_t field0 = 0x0;
619 uint8_t field0_check_value = 0x2;
620 uint8_t field0_check_mask = 0x7;
621 uint8_t field2 = 0x0;
622 uint8_t field2_check_value = 0x0;
623 uint8_t field2_check_mask = 0x1;
624
625 if (hold_rst != -1)
626 xscale->hold_rst = hold_rst;
627
628 if (ext_dbg_brk != -1)
629 xscale->external_debug_break = ext_dbg_brk;
630
631 jtag_set_end_state(TAP_IDLE);
632 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
633
634 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
635 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
636
637 memset(&fields, 0, sizeof fields);
638
639 fields[0].tap = target->tap;
640 fields[0].num_bits = 3;
641 fields[0].out_value = &field0;
642 uint8_t tmp;
643 fields[0].in_value = &tmp;
644
645 fields[1].tap = target->tap;
646 fields[1].num_bits = 32;
647 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
648
649 fields[2].tap = target->tap;
650 fields[2].num_bits = 1;
651 fields[2].out_value = &field2;
652 uint8_t tmp2;
653 fields[2].in_value = &tmp2;
654
655 jtag_add_dr_scan(3, fields, jtag_get_end_state());
656
657 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
658 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
659
660 if ((retval = jtag_execute_queue()) != ERROR_OK)
661 {
662 LOG_ERROR("JTAG error while writing DCSR");
663 return retval;
664 }
665
666 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
667 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
668
669 return ERROR_OK;
670 }
671
672 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
673 static unsigned int parity (unsigned int v)
674 {
675 // unsigned int ov = v;
676 v ^= v >> 16;
677 v ^= v >> 8;
678 v ^= v >> 4;
679 v &= 0xf;
680 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
681 return (0x6996 >> v) & 1;
682 }
683
684 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
685 {
686 uint8_t packet[4];
687 uint8_t cmd;
688 int word;
689 struct scan_field fields[2];
690
691 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
692
693 /* LDIC into IR */
694 jtag_set_end_state(TAP_IDLE);
695 xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
696
697 /* CMD is b011 to load a cacheline into the Mini ICache.
698 * Loading into the main ICache is deprecated, and unused.
699 * It's followed by three zero bits, and 27 address bits.
700 */
701 buf_set_u32(&cmd, 0, 6, 0x3);
702
703 /* virtual address of desired cache line */
704 buf_set_u32(packet, 0, 27, va >> 5);
705
706 memset(&fields, 0, sizeof fields);
707
708 fields[0].tap = target->tap;
709 fields[0].num_bits = 6;
710 fields[0].out_value = &cmd;
711
712 fields[1].tap = target->tap;
713 fields[1].num_bits = 27;
714 fields[1].out_value = packet;
715
716 jtag_add_dr_scan(2, fields, jtag_get_end_state());
717
718 /* rest of packet is a cacheline: 8 instructions, with parity */
719 fields[0].num_bits = 32;
720 fields[0].out_value = packet;
721
722 fields[1].num_bits = 1;
723 fields[1].out_value = &cmd;
724
725 for (word = 0; word < 8; word++)
726 {
727 buf_set_u32(packet, 0, 32, buffer[word]);
728
729 uint32_t value;
730 memcpy(&value, packet, sizeof(uint32_t));
731 cmd = parity(value);
732
733 jtag_add_dr_scan(2, fields, jtag_get_end_state());
734 }
735
736 return jtag_execute_queue();
737 }
738
739 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
740 {
741 uint8_t packet[4];
742 uint8_t cmd;
743 struct scan_field fields[2];
744
745 jtag_set_end_state(TAP_IDLE);
746 xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
747
748 /* CMD for invalidate IC line b000, bits [6:4] b000 */
749 buf_set_u32(&cmd, 0, 6, 0x0);
750
751 /* virtual address of desired cache line */
752 buf_set_u32(packet, 0, 27, va >> 5);
753
754 memset(&fields, 0, sizeof fields);
755
756 fields[0].tap = target->tap;
757 fields[0].num_bits = 6;
758 fields[0].out_value = &cmd;
759
760 fields[1].tap = target->tap;
761 fields[1].num_bits = 27;
762 fields[1].out_value = packet;
763
764 jtag_add_dr_scan(2, fields, jtag_get_end_state());
765
766 return ERROR_OK;
767 }
768
769 static int xscale_update_vectors(struct target *target)
770 {
771 struct xscale_common *xscale = target_to_xscale(target);
772 int i;
773 int retval;
774
775 uint32_t low_reset_branch, high_reset_branch;
776
777 for (i = 1; i < 8; i++)
778 {
779 /* if there's a static vector specified for this exception, override */
780 if (xscale->static_high_vectors_set & (1 << i))
781 {
782 xscale->high_vectors[i] = xscale->static_high_vectors[i];
783 }
784 else
785 {
786 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
787 if (retval == ERROR_TARGET_TIMEOUT)
788 return retval;
789 if (retval != ERROR_OK)
790 {
791 /* Some of these reads will fail as part of normal execution */
792 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
793 }
794 }
795 }
796
797 for (i = 1; i < 8; i++)
798 {
799 if (xscale->static_low_vectors_set & (1 << i))
800 {
801 xscale->low_vectors[i] = xscale->static_low_vectors[i];
802 }
803 else
804 {
805 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
806 if (retval == ERROR_TARGET_TIMEOUT)
807 return retval;
808 if (retval != ERROR_OK)
809 {
810 /* Some of these reads will fail as part of normal execution */
811 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
812 }
813 }
814 }
815
816 /* calculate branches to debug handler */
817 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
818 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
819
820 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
821 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
822
823 /* invalidate and load exception vectors in mini i-cache */
824 xscale_invalidate_ic_line(target, 0x0);
825 xscale_invalidate_ic_line(target, 0xffff0000);
826
827 xscale_load_ic(target, 0x0, xscale->low_vectors);
828 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
829
830 return ERROR_OK;
831 }
832
833 static int xscale_arch_state(struct target *target)
834 {
835 struct xscale_common *xscale = target_to_xscale(target);
836 struct arm *armv4_5 = &xscale->armv4_5_common;
837
838 static const char *state[] =
839 {
840 "disabled", "enabled"
841 };
842
843 static const char *arch_dbg_reason[] =
844 {
845 "", "\n(processor reset)", "\n(trace buffer full)"
846 };
847
848 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
849 {
850 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
851 return ERROR_INVALID_ARGUMENTS;
852 }
853
854 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
855 "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "\n"
856 "MMU: %s, D-Cache: %s, I-Cache: %s"
857 "%s",
858 armv4_5_state_strings[armv4_5->core_state],
859 Jim_Nvp_value2name_simple(nvp_target_debug_reason, target->debug_reason)->name ,
860 arm_mode_name(armv4_5->core_mode),
861 buf_get_u32(armv4_5->cpsr->value, 0, 32),
862 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
863 state[xscale->armv4_5_mmu.mmu_enabled],
864 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
865 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
866 arch_dbg_reason[xscale->arch_debug_reason]);
867
868 return ERROR_OK;
869 }
870
871 static int xscale_poll(struct target *target)
872 {
873 int retval = ERROR_OK;
874
875 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
876 {
877 enum target_state previous_state = target->state;
878 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
879 {
880
881 /* there's data to read from the tx register, we entered debug state */
882 target->state = TARGET_HALTED;
883
884 /* process debug entry, fetching current mode regs */
885 retval = xscale_debug_entry(target);
886 }
887 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
888 {
889 LOG_USER("error while polling TX register, reset CPU");
890 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
891 target->state = TARGET_HALTED;
892 }
893
894 /* debug_entry could have overwritten target state (i.e. immediate resume)
895 * don't signal event handlers in that case
896 */
897 if (target->state != TARGET_HALTED)
898 return ERROR_OK;
899
900 /* if target was running, signal that we halted
901 * otherwise we reentered from debug execution */
902 if (previous_state == TARGET_RUNNING)
903 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
904 else
905 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
906 }
907
908 return retval;
909 }
910
911 static int xscale_debug_entry(struct target *target)
912 {
913 struct xscale_common *xscale = target_to_xscale(target);
914 struct arm *armv4_5 = &xscale->armv4_5_common;
915 uint32_t pc;
916 uint32_t buffer[10];
917 int i;
918 int retval;
919 uint32_t moe;
920
921 /* clear external dbg break (will be written on next DCSR read) */
922 xscale->external_debug_break = 0;
923 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
924 return retval;
925
926 /* get r0, pc, r1 to r7 and cpsr */
927 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
928 return retval;
929
930 /* move r0 from buffer to register cache */
931 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
932 armv4_5->core_cache->reg_list[0].dirty = 1;
933 armv4_5->core_cache->reg_list[0].valid = 1;
934 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
935
936 /* move pc from buffer to register cache */
937 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
938 armv4_5->core_cache->reg_list[15].dirty = 1;
939 armv4_5->core_cache->reg_list[15].valid = 1;
940 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
941
942 /* move data from buffer to register cache */
943 for (i = 1; i <= 7; i++)
944 {
945 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
946 armv4_5->core_cache->reg_list[i].dirty = 1;
947 armv4_5->core_cache->reg_list[i].valid = 1;
948 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
949 }
950
951 arm_set_cpsr(armv4_5, buffer[9]);
952 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
953
954 if (!is_arm_mode(armv4_5->core_mode))
955 {
956 target->state = TARGET_UNKNOWN;
957 LOG_ERROR("cpsr contains invalid mode value - communication failure");
958 return ERROR_TARGET_FAILURE;
959 }
960 LOG_DEBUG("target entered debug state in %s mode",
961 arm_mode_name(armv4_5->core_mode));
962
963 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
964 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
965 {
966 xscale_receive(target, buffer, 8);
967 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
968 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
969 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
970 }
971 else
972 {
973 /* r8 to r14, but no spsr */
974 xscale_receive(target, buffer, 7);
975 }
976
977 /* move data from buffer to register cache */
978 for (i = 8; i <= 14; i++)
979 {
980 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
981 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
982 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
983 }
984
985 /* examine debug reason */
986 xscale_read_dcsr(target);
987 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
988
989 /* stored PC (for calculating fixup) */
990 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
991
992 switch (moe)
993 {
994 case 0x0: /* Processor reset */
995 target->debug_reason = DBG_REASON_DBGRQ;
996 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
997 pc -= 4;
998 break;
999 case 0x1: /* Instruction breakpoint hit */
1000 target->debug_reason = DBG_REASON_BREAKPOINT;
1001 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1002 pc -= 4;
1003 break;
1004 case 0x2: /* Data breakpoint hit */
1005 target->debug_reason = DBG_REASON_WATCHPOINT;
1006 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1007 pc -= 4;
1008 break;
1009 case 0x3: /* BKPT instruction executed */
1010 target->debug_reason = DBG_REASON_BREAKPOINT;
1011 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1012 pc -= 4;
1013 break;
1014 case 0x4: /* Ext. debug event */
1015 target->debug_reason = DBG_REASON_DBGRQ;
1016 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1017 pc -= 4;
1018 break;
1019 case 0x5: /* Vector trap occured */
1020 target->debug_reason = DBG_REASON_BREAKPOINT;
1021 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1022 pc -= 4;
1023 break;
1024 case 0x6: /* Trace buffer full break */
1025 target->debug_reason = DBG_REASON_DBGRQ;
1026 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1027 pc -= 4;
1028 break;
1029 case 0x7: /* Reserved (may flag Hot-Debug support) */
1030 default:
1031 LOG_ERROR("Method of Entry is 'Reserved'");
1032 exit(-1);
1033 break;
1034 }
1035
1036 /* apply PC fixup */
1037 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1038
1039 /* on the first debug entry, identify cache type */
1040 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1041 {
1042 uint32_t cache_type_reg;
1043
1044 /* read cp15 cache type register */
1045 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1046 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1047
1048 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1049 }
1050
1051 /* examine MMU and Cache settings */
1052 /* read cp15 control register */
1053 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1054 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1055 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1056 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1057 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1058
1059 /* tracing enabled, read collected trace data */
1060 if (xscale->trace.buffer_enabled)
1061 {
1062 xscale_read_trace(target);
1063 xscale->trace.buffer_fill--;
1064
1065 /* resume if we're still collecting trace data */
1066 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1067 && (xscale->trace.buffer_fill > 0))
1068 {
1069 xscale_resume(target, 1, 0x0, 1, 0);
1070 }
1071 else
1072 {
1073 xscale->trace.buffer_enabled = 0;
1074 }
1075 }
1076
1077 return ERROR_OK;
1078 }
1079
1080 static int xscale_halt(struct target *target)
1081 {
1082 struct xscale_common *xscale = target_to_xscale(target);
1083
1084 LOG_DEBUG("target->state: %s",
1085 target_state_name(target));
1086
1087 if (target->state == TARGET_HALTED)
1088 {
1089 LOG_DEBUG("target was already halted");
1090 return ERROR_OK;
1091 }
1092 else if (target->state == TARGET_UNKNOWN)
1093 {
1094 /* this must not happen for a xscale target */
1095 LOG_ERROR("target was in unknown state when halt was requested");
1096 return ERROR_TARGET_INVALID;
1097 }
1098 else if (target->state == TARGET_RESET)
1099 {
1100 LOG_DEBUG("target->state == TARGET_RESET");
1101 }
1102 else
1103 {
1104 /* assert external dbg break */
1105 xscale->external_debug_break = 1;
1106 xscale_read_dcsr(target);
1107
1108 target->debug_reason = DBG_REASON_DBGRQ;
1109 }
1110
1111 return ERROR_OK;
1112 }
1113
1114 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1115 {
1116 struct xscale_common *xscale = target_to_xscale(target);
1117 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1118 int retval;
1119
1120 if (xscale->ibcr0_used)
1121 {
1122 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1123
1124 if (ibcr0_bp)
1125 {
1126 xscale_unset_breakpoint(target, ibcr0_bp);
1127 }
1128 else
1129 {
1130 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1131 exit(-1);
1132 }
1133 }
1134
1135 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1136 return retval;
1137
1138 return ERROR_OK;
1139 }
1140
1141 static int xscale_disable_single_step(struct target *target)
1142 {
1143 struct xscale_common *xscale = target_to_xscale(target);
1144 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1145 int retval;
1146
1147 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1148 return retval;
1149
1150 return ERROR_OK;
1151 }
1152
1153 static void xscale_enable_watchpoints(struct target *target)
1154 {
1155 struct watchpoint *watchpoint = target->watchpoints;
1156
1157 while (watchpoint)
1158 {
1159 if (watchpoint->set == 0)
1160 xscale_set_watchpoint(target, watchpoint);
1161 watchpoint = watchpoint->next;
1162 }
1163 }
1164
1165 static void xscale_enable_breakpoints(struct target *target)
1166 {
1167 struct breakpoint *breakpoint = target->breakpoints;
1168
1169 /* set any pending breakpoints */
1170 while (breakpoint)
1171 {
1172 if (breakpoint->set == 0)
1173 xscale_set_breakpoint(target, breakpoint);
1174 breakpoint = breakpoint->next;
1175 }
1176 }
1177
1178 static int xscale_resume(struct target *target, int current,
1179 uint32_t address, int handle_breakpoints, int debug_execution)
1180 {
1181 struct xscale_common *xscale = target_to_xscale(target);
1182 struct arm *armv4_5 = &xscale->armv4_5_common;
1183 struct breakpoint *breakpoint = target->breakpoints;
1184 uint32_t current_pc;
1185 int retval;
1186 int i;
1187
1188 LOG_DEBUG("-");
1189
1190 if (target->state != TARGET_HALTED)
1191 {
1192 LOG_WARNING("target not halted");
1193 return ERROR_TARGET_NOT_HALTED;
1194 }
1195
1196 if (!debug_execution)
1197 {
1198 target_free_all_working_areas(target);
1199 }
1200
1201 /* update vector tables */
1202 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1203 return retval;
1204
1205 /* current = 1: continue on current pc, otherwise continue at <address> */
1206 if (!current)
1207 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1208
1209 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1210
1211 /* if we're at the reset vector, we have to simulate the branch */
1212 if (current_pc == 0x0)
1213 {
1214 arm_simulate_step(target, NULL);
1215 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1216 }
1217
1218 /* the front-end may request us not to handle breakpoints */
1219 if (handle_breakpoints)
1220 {
1221 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1222 {
1223 uint32_t next_pc;
1224
1225 /* there's a breakpoint at the current PC, we have to step over it */
1226 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1227 xscale_unset_breakpoint(target, breakpoint);
1228
1229 /* calculate PC of next instruction */
1230 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1231 {
1232 uint32_t current_opcode;
1233 target_read_u32(target, current_pc, &current_opcode);
1234 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1235 }
1236
1237 LOG_DEBUG("enable single-step");
1238 xscale_enable_single_step(target, next_pc);
1239
1240 /* restore banked registers */
1241 xscale_restore_context(target);
1242
1243 /* send resume request (command 0x30 or 0x31)
1244 * clean the trace buffer if it is to be enabled (0x62) */
1245 if (xscale->trace.buffer_enabled)
1246 {
1247 xscale_send_u32(target, 0x62);
1248 xscale_send_u32(target, 0x31);
1249 }
1250 else
1251 xscale_send_u32(target, 0x30);
1252
1253 /* send CPSR */
1254 xscale_send_u32(target,
1255 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1256 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1257 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1258
1259 for (i = 7; i >= 0; i--)
1260 {
1261 /* send register */
1262 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1263 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1264 }
1265
1266 /* send PC */
1267 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1268 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1269
1270 /* wait for and process debug entry */
1271 xscale_debug_entry(target);
1272
1273 LOG_DEBUG("disable single-step");
1274 xscale_disable_single_step(target);
1275
1276 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1277 xscale_set_breakpoint(target, breakpoint);
1278 }
1279 }
1280
1281 /* enable any pending breakpoints and watchpoints */
1282 xscale_enable_breakpoints(target);
1283 xscale_enable_watchpoints(target);
1284
1285 /* restore banked registers */
1286 xscale_restore_context(target);
1287
1288 /* send resume request (command 0x30 or 0x31)
1289 * clean the trace buffer if it is to be enabled (0x62) */
1290 if (xscale->trace.buffer_enabled)
1291 {
1292 xscale_send_u32(target, 0x62);
1293 xscale_send_u32(target, 0x31);
1294 }
1295 else
1296 xscale_send_u32(target, 0x30);
1297
1298 /* send CPSR */
1299 xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
1300 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1301 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1302
1303 for (i = 7; i >= 0; i--)
1304 {
1305 /* send register */
1306 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1307 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1308 }
1309
1310 /* send PC */
1311 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1312 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1313
1314 target->debug_reason = DBG_REASON_NOTHALTED;
1315
1316 if (!debug_execution)
1317 {
1318 /* registers are now invalid */
1319 register_cache_invalidate(armv4_5->core_cache);
1320 target->state = TARGET_RUNNING;
1321 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1322 }
1323 else
1324 {
1325 target->state = TARGET_DEBUG_RUNNING;
1326 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1327 }
1328
1329 LOG_DEBUG("target resumed");
1330
1331 return ERROR_OK;
1332 }
1333
1334 static int xscale_step_inner(struct target *target, int current,
1335 uint32_t address, int handle_breakpoints)
1336 {
1337 struct xscale_common *xscale = target_to_xscale(target);
1338 struct arm *armv4_5 = &xscale->armv4_5_common;
1339 uint32_t next_pc;
1340 int retval;
1341 int i;
1342
1343 target->debug_reason = DBG_REASON_SINGLESTEP;
1344
1345 /* calculate PC of next instruction */
1346 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1347 {
1348 uint32_t current_opcode, current_pc;
1349 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1350
1351 target_read_u32(target, current_pc, &current_opcode);
1352 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1353 return retval;
1354 }
1355
1356 LOG_DEBUG("enable single-step");
1357 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1358 return retval;
1359
1360 /* restore banked registers */
1361 if ((retval = xscale_restore_context(target)) != ERROR_OK)
1362 return retval;
1363
1364 /* send resume request (command 0x30 or 0x31)
1365 * clean the trace buffer if it is to be enabled (0x62) */
1366 if (xscale->trace.buffer_enabled)
1367 {
1368 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1369 return retval;
1370 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1371 return retval;
1372 }
1373 else
1374 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1375 return retval;
1376
1377 /* send CPSR */
1378 retval = xscale_send_u32(target,
1379 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1380 if (retval != ERROR_OK)
1381 return retval;
1382 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1383 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1384
1385 for (i = 7; i >= 0; i--)
1386 {
1387 /* send register */
1388 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1389 return retval;
1390 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1391 }
1392
1393 /* send PC */
1394 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))) != ERROR_OK)
1395 return retval;
1396 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1397
1398 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1399
1400 /* registers are now invalid */
1401 register_cache_invalidate(armv4_5->core_cache);
1402
1403 /* wait for and process debug entry */
1404 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1405 return retval;
1406
1407 LOG_DEBUG("disable single-step");
1408 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1409 return retval;
1410
1411 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1412
1413 return ERROR_OK;
1414 }
1415
1416 static int xscale_step(struct target *target, int current,
1417 uint32_t address, int handle_breakpoints)
1418 {
1419 struct arm *armv4_5 = target_to_armv4_5(target);
1420 struct breakpoint *breakpoint = target->breakpoints;
1421
1422 uint32_t current_pc;
1423 int retval;
1424
1425 if (target->state != TARGET_HALTED)
1426 {
1427 LOG_WARNING("target not halted");
1428 return ERROR_TARGET_NOT_HALTED;
1429 }
1430
1431 /* current = 1: continue on current pc, otherwise continue at <address> */
1432 if (!current)
1433 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1434
1435 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1436
1437 /* if we're at the reset vector, we have to simulate the step */
1438 if (current_pc == 0x0)
1439 {
1440 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1441 return retval;
1442 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1443
1444 target->debug_reason = DBG_REASON_SINGLESTEP;
1445 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1446
1447 return ERROR_OK;
1448 }
1449
1450 /* the front-end may request us not to handle breakpoints */
1451 if (handle_breakpoints)
1452 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1453 {
1454 if ((retval = xscale_unset_breakpoint(target, breakpoint)) != ERROR_OK)
1455 return retval;
1456 }
1457
1458 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1459
1460 if (breakpoint)
1461 {
1462 xscale_set_breakpoint(target, breakpoint);
1463 }
1464
1465 LOG_DEBUG("target stepped");
1466
1467 return ERROR_OK;
1468
1469 }
1470
1471 static int xscale_assert_reset(struct target *target)
1472 {
1473 struct xscale_common *xscale = target_to_xscale(target);
1474
1475 LOG_DEBUG("target->state: %s",
1476 target_state_name(target));
1477
1478 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1479 * end up in T-L-R, which would reset JTAG
1480 */
1481 jtag_set_end_state(TAP_IDLE);
1482 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
1483
1484 /* set Hold reset, Halt mode and Trap Reset */
1485 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1486 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1487 xscale_write_dcsr(target, 1, 0);
1488
1489 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1490 xscale_jtag_set_instr(target->tap, 0x7f);
1491 jtag_execute_queue();
1492
1493 /* assert reset */
1494 jtag_add_reset(0, 1);
1495
1496 /* sleep 1ms, to be sure we fulfill any requirements */
1497 jtag_add_sleep(1000);
1498 jtag_execute_queue();
1499
1500 target->state = TARGET_RESET;
1501
1502 if (target->reset_halt)
1503 {
1504 int retval;
1505 if ((retval = target_halt(target)) != ERROR_OK)
1506 return retval;
1507 }
1508
1509 return ERROR_OK;
1510 }
1511
1512 static int xscale_deassert_reset(struct target *target)
1513 {
1514 struct xscale_common *xscale = target_to_xscale(target);
1515 struct breakpoint *breakpoint = target->breakpoints;
1516
1517 LOG_DEBUG("-");
1518
1519 xscale->ibcr_available = 2;
1520 xscale->ibcr0_used = 0;
1521 xscale->ibcr1_used = 0;
1522
1523 xscale->dbr_available = 2;
1524 xscale->dbr0_used = 0;
1525 xscale->dbr1_used = 0;
1526
1527 /* mark all hardware breakpoints as unset */
1528 while (breakpoint)
1529 {
1530 if (breakpoint->type == BKPT_HARD)
1531 {
1532 breakpoint->set = 0;
1533 }
1534 breakpoint = breakpoint->next;
1535 }
1536
1537 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1538
1539 /* FIXME mark hardware watchpoints got unset too. Also,
1540 * at least some of the XScale registers are invalid...
1541 */
1542
1543 /*
1544 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1545 * contents got invalidated. Safer to force that, so writing new
1546 * contents can't ever fail..
1547 */
1548 {
1549 uint32_t address;
1550 unsigned buf_cnt;
1551 const uint8_t *buffer = xscale_debug_handler;
1552 int retval;
1553
1554 /* release SRST */
1555 jtag_add_reset(0, 0);
1556
1557 /* wait 300ms; 150 and 100ms were not enough */
1558 jtag_add_sleep(300*1000);
1559
1560 jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
1561 jtag_execute_queue();
1562
1563 /* set Hold reset, Halt mode and Trap Reset */
1564 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1565 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1566 xscale_write_dcsr(target, 1, 0);
1567
1568 /* Load the debug handler into the mini-icache. Since
1569 * it's using halt mode (not monitor mode), it runs in
1570 * "Special Debug State" for access to registers, memory,
1571 * coprocessors, trace data, etc.
1572 */
1573 address = xscale->handler_address;
1574 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1575 binary_size > 0;
1576 binary_size -= buf_cnt, buffer += buf_cnt)
1577 {
1578 uint32_t cache_line[8];
1579 unsigned i;
1580
1581 buf_cnt = binary_size;
1582 if (buf_cnt > 32)
1583 buf_cnt = 32;
1584
1585 for (i = 0; i < buf_cnt; i += 4)
1586 {
1587 /* convert LE buffer to host-endian uint32_t */
1588 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1589 }
1590
1591 for (; i < 32; i += 4)
1592 {
1593 cache_line[i / 4] = 0xe1a08008;
1594 }
1595
1596 /* only load addresses other than the reset vectors */
1597 if ((address % 0x400) != 0x0)
1598 {
1599 retval = xscale_load_ic(target, address,
1600 cache_line);
1601 if (retval != ERROR_OK)
1602 return retval;
1603 }
1604
1605 address += buf_cnt;
1606 };
1607
1608 retval = xscale_load_ic(target, 0x0,
1609 xscale->low_vectors);
1610 if (retval != ERROR_OK)
1611 return retval;
1612 retval = xscale_load_ic(target, 0xffff0000,
1613 xscale->high_vectors);
1614 if (retval != ERROR_OK)
1615 return retval;
1616
1617 jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
1618
1619 jtag_add_sleep(100000);
1620
1621 /* set Hold reset, Halt mode and Trap Reset */
1622 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1623 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1624 xscale_write_dcsr(target, 1, 0);
1625
1626 /* clear Hold reset to let the target run (should enter debug handler) */
1627 xscale_write_dcsr(target, 0, 1);
1628 target->state = TARGET_RUNNING;
1629
1630 if (!target->reset_halt)
1631 {
1632 jtag_add_sleep(10000);
1633
1634 /* we should have entered debug now */
1635 xscale_debug_entry(target);
1636 target->state = TARGET_HALTED;
1637
1638 /* resume the target */
1639 xscale_resume(target, 1, 0x0, 1, 0);
1640 }
1641 }
1642
1643 return ERROR_OK;
1644 }
1645
1646 static int xscale_read_core_reg(struct target *target, struct reg *r,
1647 int num, enum armv4_5_mode mode)
1648 {
1649 /** \todo add debug handler support for core register reads */
1650 LOG_ERROR("not implemented");
1651 return ERROR_OK;
1652 }
1653
1654 static int xscale_write_core_reg(struct target *target, struct reg *r,
1655 int num, enum armv4_5_mode mode, uint32_t value)
1656 {
1657 /** \todo add debug handler support for core register writes */
1658 LOG_ERROR("not implemented");
1659 return ERROR_OK;
1660 }
1661
1662 static int xscale_full_context(struct target *target)
1663 {
1664 struct arm *armv4_5 = target_to_armv4_5(target);
1665
1666 uint32_t *buffer;
1667
1668 int i, j;
1669
1670 LOG_DEBUG("-");
1671
1672 if (target->state != TARGET_HALTED)
1673 {
1674 LOG_WARNING("target not halted");
1675 return ERROR_TARGET_NOT_HALTED;
1676 }
1677
1678 buffer = malloc(4 * 8);
1679
1680 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1681 * we can't enter User mode on an XScale (unpredictable),
1682 * but User shares registers with SYS
1683 */
1684 for (i = 1; i < 7; i++)
1685 {
1686 int valid = 1;
1687
1688 /* check if there are invalid registers in the current mode
1689 */
1690 for (j = 0; j <= 16; j++)
1691 {
1692 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
1693 valid = 0;
1694 }
1695
1696 if (!valid)
1697 {
1698 uint32_t tmp_cpsr;
1699
1700 /* request banked registers */
1701 xscale_send_u32(target, 0x0);
1702
1703 tmp_cpsr = 0x0;
1704 tmp_cpsr |= armv4_5_number_to_mode(i);
1705 tmp_cpsr |= 0xc0; /* I/F bits */
1706
1707 /* send CPSR for desired mode */
1708 xscale_send_u32(target, tmp_cpsr);
1709
1710 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1711 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1712 {
1713 xscale_receive(target, buffer, 8);
1714 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1715 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1716 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
1717 }
1718 else
1719 {
1720 xscale_receive(target, buffer, 7);
1721 }
1722
1723 /* move data from buffer to register cache */
1724 for (j = 8; j <= 14; j++)
1725 {
1726 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
1727 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1728 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
1729 }
1730 }
1731 }
1732
1733 free(buffer);
1734
1735 return ERROR_OK;
1736 }
1737
1738 static int xscale_restore_context(struct target *target)
1739 {
1740 struct arm *armv4_5 = target_to_armv4_5(target);
1741
1742 int i, j;
1743
1744 if (target->state != TARGET_HALTED)
1745 {
1746 LOG_WARNING("target not halted");
1747 return ERROR_TARGET_NOT_HALTED;
1748 }
1749
1750 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1751 * we can't enter User mode on an XScale (unpredictable),
1752 * but User shares registers with SYS
1753 */
1754 for (i = 1; i < 7; i++)
1755 {
1756 int dirty = 0;
1757
1758 /* check if there are invalid registers in the current mode
1759 */
1760 for (j = 8; j <= 14; j++)
1761 {
1762 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
1763 dirty = 1;
1764 }
1765
1766 /* if not USR/SYS, check if the SPSR needs to be written */
1767 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1768 {
1769 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
1770 dirty = 1;
1771 }
1772
1773 if (dirty)
1774 {
1775 uint32_t tmp_cpsr;
1776
1777 /* send banked registers */
1778 xscale_send_u32(target, 0x1);
1779
1780 tmp_cpsr = 0x0;
1781 tmp_cpsr |= armv4_5_number_to_mode(i);
1782 tmp_cpsr |= 0xc0; /* I/F bits */
1783
1784 /* send CPSR for desired mode */
1785 xscale_send_u32(target, tmp_cpsr);
1786
1787 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1788 for (j = 8; j <= 14; j++)
1789 {
1790 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
1791 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1792 }
1793
1794 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1795 {
1796 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
1797 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1798 }
1799 }
1800 }
1801
1802 return ERROR_OK;
1803 }
1804
1805 static int xscale_read_memory(struct target *target, uint32_t address,
1806 uint32_t size, uint32_t count, uint8_t *buffer)
1807 {
1808 struct xscale_common *xscale = target_to_xscale(target);
1809 uint32_t *buf32;
1810 uint32_t i;
1811 int retval;
1812
1813 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1814
1815 if (target->state != TARGET_HALTED)
1816 {
1817 LOG_WARNING("target not halted");
1818 return ERROR_TARGET_NOT_HALTED;
1819 }
1820
1821 /* sanitize arguments */
1822 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1823 return ERROR_INVALID_ARGUMENTS;
1824
1825 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1826 return ERROR_TARGET_UNALIGNED_ACCESS;
1827
1828 /* send memory read request (command 0x1n, n: access size) */
1829 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1830 return retval;
1831
1832 /* send base address for read request */
1833 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1834 return retval;
1835
1836 /* send number of requested data words */
1837 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1838 return retval;
1839
1840 /* receive data from target (count times 32-bit words in host endianness) */
1841 buf32 = malloc(4 * count);
1842 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1843 return retval;
1844
1845 /* extract data from host-endian buffer into byte stream */
1846 for (i = 0; i < count; i++)
1847 {
1848 switch (size)
1849 {
1850 case 4:
1851 target_buffer_set_u32(target, buffer, buf32[i]);
1852 buffer += 4;
1853 break;
1854 case 2:
1855 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1856 buffer += 2;
1857 break;
1858 case 1:
1859 *buffer++ = buf32[i] & 0xff;
1860 break;
1861 default:
1862 LOG_ERROR("invalid read size");
1863 return ERROR_INVALID_ARGUMENTS;
1864 }
1865 }
1866
1867 free(buf32);
1868
1869 /* examine DCSR, to see if Sticky Abort (SA) got set */
1870 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1871 return retval;
1872 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1873 {
1874 /* clear SA bit */
1875 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1876 return retval;
1877
1878 return ERROR_TARGET_DATA_ABORT;
1879 }
1880
1881 return ERROR_OK;
1882 }
1883
1884 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1885 uint32_t size, uint32_t count, uint8_t *buffer)
1886 {
1887 /** \todo: provide a non-stub implementtion of this routine. */
1888 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1889 target_name(target), __func__);
1890 return ERROR_FAIL;
1891 }
1892
1893 static int xscale_write_memory(struct target *target, uint32_t address,
1894 uint32_t size, uint32_t count, uint8_t *buffer)
1895 {
1896 struct xscale_common *xscale = target_to_xscale(target);
1897 int retval;
1898
1899 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1900
1901 if (target->state != TARGET_HALTED)
1902 {
1903 LOG_WARNING("target not halted");
1904 return ERROR_TARGET_NOT_HALTED;
1905 }
1906
1907 /* sanitize arguments */
1908 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1909 return ERROR_INVALID_ARGUMENTS;
1910
1911 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1912 return ERROR_TARGET_UNALIGNED_ACCESS;
1913
1914 /* send memory write request (command 0x2n, n: access size) */
1915 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1916 return retval;
1917
1918 /* send base address for read request */
1919 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1920 return retval;
1921
1922 /* send number of requested data words to be written*/
1923 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1924 return retval;
1925
1926 /* extract data from host-endian buffer into byte stream */
1927 #if 0
1928 for (i = 0; i < count; i++)
1929 {
1930 switch (size)
1931 {
1932 case 4:
1933 value = target_buffer_get_u32(target, buffer);
1934 xscale_send_u32(target, value);
1935 buffer += 4;
1936 break;
1937 case 2:
1938 value = target_buffer_get_u16(target, buffer);
1939 xscale_send_u32(target, value);
1940 buffer += 2;
1941 break;
1942 case 1:
1943 value = *buffer;
1944 xscale_send_u32(target, value);
1945 buffer += 1;
1946 break;
1947 default:
1948 LOG_ERROR("should never get here");
1949 exit(-1);
1950 }
1951 }
1952 #endif
1953 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1954 return retval;
1955
1956 /* examine DCSR, to see if Sticky Abort (SA) got set */
1957 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1958 return retval;
1959 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1960 {
1961 /* clear SA bit */
1962 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1963 return retval;
1964
1965 return ERROR_TARGET_DATA_ABORT;
1966 }
1967
1968 return ERROR_OK;
1969 }
1970
1971 static int xscale_write_phys_memory(struct target *target, uint32_t address,
1972 uint32_t size, uint32_t count, uint8_t *buffer)
1973 {
1974 /** \todo: provide a non-stub implementtion of this routine. */
1975 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1976 target_name(target), __func__);
1977 return ERROR_FAIL;
1978 }
1979
1980 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
1981 uint32_t count, uint8_t *buffer)
1982 {
1983 return xscale_write_memory(target, address, 4, count, buffer);
1984 }
1985
1986 static uint32_t xscale_get_ttb(struct target *target)
1987 {
1988 struct xscale_common *xscale = target_to_xscale(target);
1989 uint32_t ttb;
1990
1991 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
1992 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
1993
1994 return ttb;
1995 }
1996
1997 static void xscale_disable_mmu_caches(struct target *target, int mmu,
1998 int d_u_cache, int i_cache)
1999 {
2000 struct xscale_common *xscale = target_to_xscale(target);
2001 uint32_t cp15_control;
2002
2003 /* read cp15 control register */
2004 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2005 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2006
2007 if (mmu)
2008 cp15_control &= ~0x1U;
2009
2010 if (d_u_cache)
2011 {
2012 /* clean DCache */
2013 xscale_send_u32(target, 0x50);
2014 xscale_send_u32(target, xscale->cache_clean_address);
2015
2016 /* invalidate DCache */
2017 xscale_send_u32(target, 0x51);
2018
2019 cp15_control &= ~0x4U;
2020 }
2021
2022 if (i_cache)
2023 {
2024 /* invalidate ICache */
2025 xscale_send_u32(target, 0x52);
2026 cp15_control &= ~0x1000U;
2027 }
2028
2029 /* write new cp15 control register */
2030 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2031
2032 /* execute cpwait to ensure outstanding operations complete */
2033 xscale_send_u32(target, 0x53);
2034 }
2035
2036 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2037 int d_u_cache, int i_cache)
2038 {
2039 struct xscale_common *xscale = target_to_xscale(target);
2040 uint32_t cp15_control;
2041
2042 /* read cp15 control register */
2043 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2044 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2045
2046 if (mmu)
2047 cp15_control |= 0x1U;
2048
2049 if (d_u_cache)
2050 cp15_control |= 0x4U;
2051
2052 if (i_cache)
2053 cp15_control |= 0x1000U;
2054
2055 /* write new cp15 control register */
2056 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2057
2058 /* execute cpwait to ensure outstanding operations complete */
2059 xscale_send_u32(target, 0x53);
2060 }
2061
2062 static int xscale_set_breakpoint(struct target *target,
2063 struct breakpoint *breakpoint)
2064 {
2065 int retval;
2066 struct xscale_common *xscale = target_to_xscale(target);
2067
2068 if (target->state != TARGET_HALTED)
2069 {
2070 LOG_WARNING("target not halted");
2071 return ERROR_TARGET_NOT_HALTED;
2072 }
2073
2074 if (breakpoint->set)
2075 {
2076 LOG_WARNING("breakpoint already set");
2077 return ERROR_OK;
2078 }
2079
2080 if (breakpoint->type == BKPT_HARD)
2081 {
2082 uint32_t value = breakpoint->address | 1;
2083 if (!xscale->ibcr0_used)
2084 {
2085 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2086 xscale->ibcr0_used = 1;
2087 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2088 }
2089 else if (!xscale->ibcr1_used)
2090 {
2091 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2092 xscale->ibcr1_used = 1;
2093 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2094 }
2095 else
2096 {
2097 LOG_ERROR("BUG: no hardware comparator available");
2098 return ERROR_OK;
2099 }
2100 }
2101 else if (breakpoint->type == BKPT_SOFT)
2102 {
2103 if (breakpoint->length == 4)
2104 {
2105 /* keep the original instruction in target endianness */
2106 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2107 {
2108 return retval;
2109 }
2110 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2111 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2112 {
2113 return retval;
2114 }
2115 }
2116 else
2117 {
2118 /* keep the original instruction in target endianness */
2119 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2120 {
2121 return retval;
2122 }
2123 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2124 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2125 {
2126 return retval;
2127 }
2128 }
2129 breakpoint->set = 1;
2130 }
2131
2132 return ERROR_OK;
2133 }
2134
2135 static int xscale_add_breakpoint(struct target *target,
2136 struct breakpoint *breakpoint)
2137 {
2138 struct xscale_common *xscale = target_to_xscale(target);
2139
2140 if (target->state != TARGET_HALTED)
2141 {
2142 LOG_WARNING("target not halted");
2143 return ERROR_TARGET_NOT_HALTED;
2144 }
2145
2146 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2147 {
2148 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2149 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2150 }
2151
2152 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2153 {
2154 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2155 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2156 }
2157
2158 if (breakpoint->type == BKPT_HARD)
2159 {
2160 xscale->ibcr_available--;
2161 }
2162
2163 return ERROR_OK;
2164 }
2165
2166 static int xscale_unset_breakpoint(struct target *target,
2167 struct breakpoint *breakpoint)
2168 {
2169 int retval;
2170 struct xscale_common *xscale = target_to_xscale(target);
2171
2172 if (target->state != TARGET_HALTED)
2173 {
2174 LOG_WARNING("target not halted");
2175 return ERROR_TARGET_NOT_HALTED;
2176 }
2177
2178 if (!breakpoint->set)
2179 {
2180 LOG_WARNING("breakpoint not set");
2181 return ERROR_OK;
2182 }
2183
2184 if (breakpoint->type == BKPT_HARD)
2185 {
2186 if (breakpoint->set == 1)
2187 {
2188 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2189 xscale->ibcr0_used = 0;
2190 }
2191 else if (breakpoint->set == 2)
2192 {
2193 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2194 xscale->ibcr1_used = 0;
2195 }
2196 breakpoint->set = 0;
2197 }
2198 else
2199 {
2200 /* restore original instruction (kept in target endianness) */
2201 if (breakpoint->length == 4)
2202 {
2203 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2204 {
2205 return retval;
2206 }
2207 }
2208 else
2209 {
2210 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2211 {
2212 return retval;
2213 }
2214 }
2215 breakpoint->set = 0;
2216 }
2217
2218 return ERROR_OK;
2219 }
2220
2221 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2222 {
2223 struct xscale_common *xscale = target_to_xscale(target);
2224
2225 if (target->state != TARGET_HALTED)
2226 {
2227 LOG_WARNING("target not halted");
2228 return ERROR_TARGET_NOT_HALTED;
2229 }
2230
2231 if (breakpoint->set)
2232 {
2233 xscale_unset_breakpoint(target, breakpoint);
2234 }
2235
2236 if (breakpoint->type == BKPT_HARD)
2237 xscale->ibcr_available++;
2238
2239 return ERROR_OK;
2240 }
2241
2242 static int xscale_set_watchpoint(struct target *target,
2243 struct watchpoint *watchpoint)
2244 {
2245 struct xscale_common *xscale = target_to_xscale(target);
2246 uint8_t enable = 0;
2247 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2248 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2249
2250 if (target->state != TARGET_HALTED)
2251 {
2252 LOG_WARNING("target not halted");
2253 return ERROR_TARGET_NOT_HALTED;
2254 }
2255
2256 xscale_get_reg(dbcon);
2257
2258 switch (watchpoint->rw)
2259 {
2260 case WPT_READ:
2261 enable = 0x3;
2262 break;
2263 case WPT_ACCESS:
2264 enable = 0x2;
2265 break;
2266 case WPT_WRITE:
2267 enable = 0x1;
2268 break;
2269 default:
2270 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2271 }
2272
2273 if (!xscale->dbr0_used)
2274 {
2275 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2276 dbcon_value |= enable;
2277 xscale_set_reg_u32(dbcon, dbcon_value);
2278 watchpoint->set = 1;
2279 xscale->dbr0_used = 1;
2280 }
2281 else if (!xscale->dbr1_used)
2282 {
2283 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2284 dbcon_value |= enable << 2;
2285 xscale_set_reg_u32(dbcon, dbcon_value);
2286 watchpoint->set = 2;
2287 xscale->dbr1_used = 1;
2288 }
2289 else
2290 {
2291 LOG_ERROR("BUG: no hardware comparator available");
2292 return ERROR_OK;
2293 }
2294
2295 return ERROR_OK;
2296 }
2297
2298 static int xscale_add_watchpoint(struct target *target,
2299 struct watchpoint *watchpoint)
2300 {
2301 struct xscale_common *xscale = target_to_xscale(target);
2302
2303 if (target->state != TARGET_HALTED)
2304 {
2305 LOG_WARNING("target not halted");
2306 return ERROR_TARGET_NOT_HALTED;
2307 }
2308
2309 if (xscale->dbr_available < 1)
2310 {
2311 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2312 }
2313
2314 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2315 {
2316 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2317 }
2318
2319 xscale->dbr_available--;
2320
2321 return ERROR_OK;
2322 }
2323
2324 static int xscale_unset_watchpoint(struct target *target,
2325 struct watchpoint *watchpoint)
2326 {
2327 struct xscale_common *xscale = target_to_xscale(target);
2328 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2329 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2330
2331 if (target->state != TARGET_HALTED)
2332 {
2333 LOG_WARNING("target not halted");
2334 return ERROR_TARGET_NOT_HALTED;
2335 }
2336
2337 if (!watchpoint->set)
2338 {
2339 LOG_WARNING("breakpoint not set");
2340 return ERROR_OK;
2341 }
2342
2343 if (watchpoint->set == 1)
2344 {
2345 dbcon_value &= ~0x3;
2346 xscale_set_reg_u32(dbcon, dbcon_value);
2347 xscale->dbr0_used = 0;
2348 }
2349 else if (watchpoint->set == 2)
2350 {
2351 dbcon_value &= ~0xc;
2352 xscale_set_reg_u32(dbcon, dbcon_value);
2353 xscale->dbr1_used = 0;
2354 }
2355 watchpoint->set = 0;
2356
2357 return ERROR_OK;
2358 }
2359
2360 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2361 {
2362 struct xscale_common *xscale = target_to_xscale(target);
2363
2364 if (target->state != TARGET_HALTED)
2365 {
2366 LOG_WARNING("target not halted");
2367 return ERROR_TARGET_NOT_HALTED;
2368 }
2369
2370 if (watchpoint->set)
2371 {
2372 xscale_unset_watchpoint(target, watchpoint);
2373 }
2374
2375 xscale->dbr_available++;
2376
2377 return ERROR_OK;
2378 }
2379
2380 static int xscale_get_reg(struct reg *reg)
2381 {
2382 struct xscale_reg *arch_info = reg->arch_info;
2383 struct target *target = arch_info->target;
2384 struct xscale_common *xscale = target_to_xscale(target);
2385
2386 /* DCSR, TX and RX are accessible via JTAG */
2387 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2388 {
2389 return xscale_read_dcsr(arch_info->target);
2390 }
2391 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2392 {
2393 /* 1 = consume register content */
2394 return xscale_read_tx(arch_info->target, 1);
2395 }
2396 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2397 {
2398 /* can't read from RX register (host -> debug handler) */
2399 return ERROR_OK;
2400 }
2401 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2402 {
2403 /* can't (explicitly) read from TXRXCTRL register */
2404 return ERROR_OK;
2405 }
2406 else /* Other DBG registers have to be transfered by the debug handler */
2407 {
2408 /* send CP read request (command 0x40) */
2409 xscale_send_u32(target, 0x40);
2410
2411 /* send CP register number */
2412 xscale_send_u32(target, arch_info->dbg_handler_number);
2413
2414 /* read register value */
2415 xscale_read_tx(target, 1);
2416 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2417
2418 reg->dirty = 0;
2419 reg->valid = 1;
2420 }
2421
2422 return ERROR_OK;
2423 }
2424
2425 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2426 {
2427 struct xscale_reg *arch_info = reg->arch_info;
2428 struct target *target = arch_info->target;
2429 struct xscale_common *xscale = target_to_xscale(target);
2430 uint32_t value = buf_get_u32(buf, 0, 32);
2431
2432 /* DCSR, TX and RX are accessible via JTAG */
2433 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2434 {
2435 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2436 return xscale_write_dcsr(arch_info->target, -1, -1);
2437 }
2438 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2439 {
2440 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2441 return xscale_write_rx(arch_info->target);
2442 }
2443 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2444 {
2445 /* can't write to TX register (debug-handler -> host) */
2446 return ERROR_OK;
2447 }
2448 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2449 {
2450 /* can't (explicitly) write to TXRXCTRL register */
2451 return ERROR_OK;
2452 }
2453 else /* Other DBG registers have to be transfered by the debug handler */
2454 {
2455 /* send CP write request (command 0x41) */
2456 xscale_send_u32(target, 0x41);
2457
2458 /* send CP register number */
2459 xscale_send_u32(target, arch_info->dbg_handler_number);
2460
2461 /* send CP register value */
2462 xscale_send_u32(target, value);
2463 buf_set_u32(reg->value, 0, 32, value);
2464 }
2465
2466 return ERROR_OK;
2467 }
2468
2469 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2470 {
2471 struct xscale_common *xscale = target_to_xscale(target);
2472 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2473 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2474
2475 /* send CP write request (command 0x41) */
2476 xscale_send_u32(target, 0x41);
2477
2478 /* send CP register number */
2479 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2480
2481 /* send CP register value */
2482 xscale_send_u32(target, value);
2483 buf_set_u32(dcsr->value, 0, 32, value);
2484
2485 return ERROR_OK;
2486 }
2487
2488 static int xscale_read_trace(struct target *target)
2489 {
2490 struct xscale_common *xscale = target_to_xscale(target);
2491 struct arm *armv4_5 = &xscale->armv4_5_common;
2492 struct xscale_trace_data **trace_data_p;
2493
2494 /* 258 words from debug handler
2495 * 256 trace buffer entries
2496 * 2 checkpoint addresses
2497 */
2498 uint32_t trace_buffer[258];
2499 int is_address[256];
2500 int i, j;
2501
2502 if (target->state != TARGET_HALTED)
2503 {
2504 LOG_WARNING("target must be stopped to read trace data");
2505 return ERROR_TARGET_NOT_HALTED;
2506 }
2507
2508 /* send read trace buffer command (command 0x61) */
2509 xscale_send_u32(target, 0x61);
2510
2511 /* receive trace buffer content */
2512 xscale_receive(target, trace_buffer, 258);
2513
2514 /* parse buffer backwards to identify address entries */
2515 for (i = 255; i >= 0; i--)
2516 {
2517 is_address[i] = 0;
2518 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2519 ((trace_buffer[i] & 0xf0) == 0xd0))
2520 {
2521 if (i >= 3)
2522 is_address[--i] = 1;
2523 if (i >= 2)
2524 is_address[--i] = 1;
2525 if (i >= 1)
2526 is_address[--i] = 1;
2527 if (i >= 0)
2528 is_address[--i] = 1;
2529 }
2530 }
2531
2532
2533 /* search first non-zero entry */
2534 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2535 ;
2536
2537 if (j == 256)
2538 {
2539 LOG_DEBUG("no trace data collected");
2540 return ERROR_XSCALE_NO_TRACE_DATA;
2541 }
2542
2543 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2544 ;
2545
2546 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2547 (*trace_data_p)->next = NULL;
2548 (*trace_data_p)->chkpt0 = trace_buffer[256];
2549 (*trace_data_p)->chkpt1 = trace_buffer[257];
2550 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2551 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2552 (*trace_data_p)->depth = 256 - j;
2553
2554 for (i = j; i < 256; i++)
2555 {
2556 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2557 if (is_address[i])
2558 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2559 else
2560 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2561 }
2562
2563 return ERROR_OK;
2564 }
2565
2566 static int xscale_read_instruction(struct target *target,
2567 struct arm_instruction *instruction)
2568 {
2569 struct xscale_common *xscale = target_to_xscale(target);
2570 int i;
2571 int section = -1;
2572 size_t size_read;
2573 uint32_t opcode;
2574 int retval;
2575
2576 if (!xscale->trace.image)
2577 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2578
2579 /* search for the section the current instruction belongs to */
2580 for (i = 0; i < xscale->trace.image->num_sections; i++)
2581 {
2582 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2583 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2584 {
2585 section = i;
2586 break;
2587 }
2588 }
2589
2590 if (section == -1)
2591 {
2592 /* current instruction couldn't be found in the image */
2593 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2594 }
2595
2596 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2597 {
2598 uint8_t buf[4];
2599 if ((retval = image_read_section(xscale->trace.image, section,
2600 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2601 4, buf, &size_read)) != ERROR_OK)
2602 {
2603 LOG_ERROR("error while reading instruction: %i", retval);
2604 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2605 }
2606 opcode = target_buffer_get_u32(target, buf);
2607 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2608 }
2609 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2610 {
2611 uint8_t buf[2];
2612 if ((retval = image_read_section(xscale->trace.image, section,
2613 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2614 2, buf, &size_read)) != ERROR_OK)
2615 {
2616 LOG_ERROR("error while reading instruction: %i", retval);
2617 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2618 }
2619 opcode = target_buffer_get_u16(target, buf);
2620 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2621 }
2622 else
2623 {
2624 LOG_ERROR("BUG: unknown core state encountered");
2625 exit(-1);
2626 }
2627
2628 return ERROR_OK;
2629 }
2630
2631 static int xscale_branch_address(struct xscale_trace_data *trace_data,
2632 int i, uint32_t *target)
2633 {
2634 /* if there are less than four entries prior to the indirect branch message
2635 * we can't extract the address */
2636 if (i < 4)
2637 {
2638 return -1;
2639 }
2640
2641 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2642 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2643
2644 return 0;
2645 }
2646
2647 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2648 {
2649 struct xscale_common *xscale = target_to_xscale(target);
2650 int next_pc_ok = 0;
2651 uint32_t next_pc = 0x0;
2652 struct xscale_trace_data *trace_data = xscale->trace.data;
2653 int retval;
2654
2655 while (trace_data)
2656 {
2657 int i, chkpt;
2658 int rollover;
2659 int branch;
2660 int exception;
2661 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2662
2663 chkpt = 0;
2664 rollover = 0;
2665
2666 for (i = 0; i < trace_data->depth; i++)
2667 {
2668 next_pc_ok = 0;
2669 branch = 0;
2670 exception = 0;
2671
2672 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2673 continue;
2674
2675 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2676 {
2677 case 0: /* Exceptions */
2678 case 1:
2679 case 2:
2680 case 3:
2681 case 4:
2682 case 5:
2683 case 6:
2684 case 7:
2685 exception = (trace_data->entries[i].data & 0x70) >> 4;
2686 next_pc_ok = 1;
2687 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2688 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2689 break;
2690 case 8: /* Direct Branch */
2691 branch = 1;
2692 break;
2693 case 9: /* Indirect Branch */
2694 branch = 1;
2695 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2696 {
2697 next_pc_ok = 1;
2698 }
2699 break;
2700 case 13: /* Checkpointed Indirect Branch */
2701 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2702 {
2703 next_pc_ok = 1;
2704 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2705 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2706 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2707 }
2708 /* explicit fall-through */
2709 case 12: /* Checkpointed Direct Branch */
2710 branch = 1;
2711 if (chkpt == 0)
2712 {
2713 next_pc_ok = 1;
2714 next_pc = trace_data->chkpt0;
2715 chkpt++;
2716 }
2717 else if (chkpt == 1)
2718 {
2719 next_pc_ok = 1;
2720 next_pc = trace_data->chkpt0;
2721 chkpt++;
2722 }
2723 else
2724 {
2725 LOG_WARNING("more than two checkpointed branches encountered");
2726 }
2727 break;
2728 case 15: /* Roll-over */
2729 rollover++;
2730 continue;
2731 default: /* Reserved */
2732 command_print(cmd_ctx, "--- reserved trace message ---");
2733 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2734 return ERROR_OK;
2735 }
2736
2737 if (xscale->trace.pc_ok)
2738 {
2739 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2740 struct arm_instruction instruction;
2741
2742 if ((exception == 6) || (exception == 7))
2743 {
2744 /* IRQ or FIQ exception, no instruction executed */
2745 executed -= 1;
2746 }
2747
2748 while (executed-- >= 0)
2749 {
2750 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2751 {
2752 /* can't continue tracing with no image available */
2753 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2754 {
2755 return retval;
2756 }
2757 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2758 {
2759 /* TODO: handle incomplete images */
2760 }
2761 }
2762
2763 /* a precise abort on a load to the PC is included in the incremental
2764 * word count, other instructions causing data aborts are not included
2765 */
2766 if ((executed == 0) && (exception == 4)
2767 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2768 {
2769 if ((instruction.type == ARM_LDM)
2770 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2771 {
2772 executed--;
2773 }
2774 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2775 && (instruction.info.load_store.Rd != 15))
2776 {
2777 executed--;
2778 }
2779 }
2780
2781 /* only the last instruction executed
2782 * (the one that caused the control flow change)
2783 * could be a taken branch
2784 */
2785 if (((executed == -1) && (branch == 1)) &&
2786 (((instruction.type == ARM_B) ||
2787 (instruction.type == ARM_BL) ||
2788 (instruction.type == ARM_BLX)) &&
2789 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2790 {
2791 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2792 }
2793 else
2794 {
2795 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2796 }
2797 command_print(cmd_ctx, "%s", instruction.text);
2798 }
2799
2800 rollover = 0;
2801 }
2802
2803 if (next_pc_ok)
2804 {
2805 xscale->trace.current_pc = next_pc;
2806 xscale->trace.pc_ok = 1;
2807 }
2808 }
2809
2810 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2811 {
2812 struct arm_instruction instruction;
2813 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2814 {
2815 /* can't continue tracing with no image available */
2816 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2817 {
2818 return retval;
2819 }
2820 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2821 {
2822 /* TODO: handle incomplete images */
2823 }
2824 }
2825 command_print(cmd_ctx, "%s", instruction.text);
2826 }
2827
2828 trace_data = trace_data->next;
2829 }
2830
2831 return ERROR_OK;
2832 }
2833
2834 static const struct reg_arch_type xscale_reg_type = {
2835 .get = xscale_get_reg,
2836 .set = xscale_set_reg,
2837 };
2838
2839 static void xscale_build_reg_cache(struct target *target)
2840 {
2841 struct xscale_common *xscale = target_to_xscale(target);
2842 struct arm *armv4_5 = &xscale->armv4_5_common;
2843 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2844 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2845 int i;
2846 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2847
2848 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
2849
2850 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2851 cache_p = &(*cache_p)->next;
2852
2853 /* fill in values for the xscale reg cache */
2854 (*cache_p)->name = "XScale registers";
2855 (*cache_p)->next = NULL;
2856 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2857 (*cache_p)->num_regs = num_regs;
2858
2859 for (i = 0; i < num_regs; i++)
2860 {
2861 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2862 (*cache_p)->reg_list[i].value = calloc(4, 1);
2863 (*cache_p)->reg_list[i].dirty = 0;
2864 (*cache_p)->reg_list[i].valid = 0;
2865 (*cache_p)->reg_list[i].size = 32;
2866 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2867 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2868 arch_info[i] = xscale_reg_arch_info[i];
2869 arch_info[i].target = target;
2870 }
2871
2872 xscale->reg_cache = (*cache_p);
2873 }
2874
2875 static int xscale_init_target(struct command_context *cmd_ctx,
2876 struct target *target)
2877 {
2878 xscale_build_reg_cache(target);
2879 return ERROR_OK;
2880 }
2881
2882 static int xscale_init_arch_info(struct target *target,
2883 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2884 {
2885 struct arm *armv4_5;
2886 uint32_t high_reset_branch, low_reset_branch;
2887 int i;
2888
2889 armv4_5 = &xscale->armv4_5_common;
2890
2891 /* store architecture specfic data (none so far) */
2892 xscale->common_magic = XSCALE_COMMON_MAGIC;
2893
2894 /* we don't really *need* variant info ... */
2895 if (variant) {
2896 int ir_length = 0;
2897
2898 if (strcmp(variant, "pxa250") == 0
2899 || strcmp(variant, "pxa255") == 0
2900 || strcmp(variant, "pxa26x") == 0)
2901 ir_length = 5;
2902 else if (strcmp(variant, "pxa27x") == 0
2903 || strcmp(variant, "ixp42x") == 0
2904 || strcmp(variant, "ixp45x") == 0
2905 || strcmp(variant, "ixp46x") == 0)
2906 ir_length = 7;
2907 else
2908 LOG_WARNING("%s: unrecognized variant %s",
2909 tap->dotted_name, variant);
2910
2911 if (ir_length && ir_length != tap->ir_length) {
2912 LOG_WARNING("%s: IR length for %s is %d; fixing",
2913 tap->dotted_name, variant, ir_length);
2914 tap->ir_length = ir_length;
2915 }
2916 }
2917
2918 /* the debug handler isn't installed (and thus not running) at this time */
2919 xscale->handler_address = 0xfe000800;
2920
2921 /* clear the vectors we keep locally for reference */
2922 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2923 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2924
2925 /* no user-specified vectors have been configured yet */
2926 xscale->static_low_vectors_set = 0x0;
2927 xscale->static_high_vectors_set = 0x0;
2928
2929 /* calculate branches to debug handler */
2930 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2931 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2932
2933 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2934 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2935
2936 for (i = 1; i <= 7; i++)
2937 {
2938 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2939 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2940 }
2941
2942 /* 64kB aligned region used for DCache cleaning */
2943 xscale->cache_clean_address = 0xfffe0000;
2944
2945 xscale->hold_rst = 0;
2946 xscale->external_debug_break = 0;
2947
2948 xscale->ibcr_available = 2;
2949 xscale->ibcr0_used = 0;
2950 xscale->ibcr1_used = 0;
2951
2952 xscale->dbr_available = 2;
2953 xscale->dbr0_used = 0;
2954 xscale->dbr1_used = 0;
2955
2956 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2957 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2958
2959 xscale->vector_catch = 0x1;
2960
2961 xscale->trace.capture_status = TRACE_IDLE;
2962 xscale->trace.data = NULL;
2963 xscale->trace.image = NULL;
2964 xscale->trace.buffer_enabled = 0;
2965 xscale->trace.buffer_fill = 0;
2966
2967 /* prepare ARMv4/5 specific information */
2968 armv4_5->arch_info = xscale;
2969 armv4_5->read_core_reg = xscale_read_core_reg;
2970 armv4_5->write_core_reg = xscale_write_core_reg;
2971 armv4_5->full_context = xscale_full_context;
2972
2973 armv4_5_init_arch_info(target, armv4_5);
2974
2975 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
2976 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
2977 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
2978 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
2979 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
2980 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
2981 xscale->armv4_5_mmu.has_tiny_pages = 1;
2982 xscale->armv4_5_mmu.mmu_enabled = 0;
2983
2984 return ERROR_OK;
2985 }
2986
2987 static int xscale_target_create(struct target *target, Jim_Interp *interp)
2988 {
2989 struct xscale_common *xscale;
2990
2991 if (sizeof xscale_debug_handler - 1 > 0x800) {
2992 LOG_ERROR("debug_handler.bin: larger than 2kb");
2993 return ERROR_FAIL;
2994 }
2995
2996 xscale = calloc(1, sizeof(*xscale));
2997 if (!xscale)
2998 return ERROR_FAIL;
2999
3000 return xscale_init_arch_info(target, xscale, target->tap,
3001 target->variant);
3002 }
3003
3004 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3005 {
3006 struct target *target = NULL;
3007 struct xscale_common *xscale;
3008 int retval;
3009 uint32_t handler_address;
3010
3011 if (CMD_ARGC < 2)
3012 {
3013 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3014 return ERROR_OK;
3015 }
3016
3017 if ((target = get_target(CMD_ARGV[0])) == NULL)
3018 {
3019 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3020 return ERROR_FAIL;
3021 }
3022
3023 xscale = target_to_xscale(target);
3024 retval = xscale_verify_pointer(CMD_CTX, xscale);
3025 if (retval != ERROR_OK)
3026 return retval;
3027
3028 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3029
3030 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3031 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3032 {
3033 xscale->handler_address = handler_address;
3034 }
3035 else
3036 {
3037 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3038 return ERROR_FAIL;
3039 }
3040
3041 return ERROR_OK;
3042 }
3043
3044 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3045 {
3046 struct target *target = NULL;
3047 struct xscale_common *xscale;
3048 int retval;
3049 uint32_t cache_clean_address;
3050
3051 if (CMD_ARGC < 2)
3052 {
3053 return ERROR_COMMAND_SYNTAX_ERROR;
3054 }
3055
3056 target = get_target(CMD_ARGV[0]);
3057 if (target == NULL)
3058 {
3059 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3060 return ERROR_FAIL;
3061 }
3062 xscale = target_to_xscale(target);
3063 retval = xscale_verify_pointer(CMD_CTX, xscale);
3064 if (retval != ERROR_OK)
3065 return retval;
3066
3067 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3068
3069 if (cache_clean_address & 0xffff)
3070 {
3071 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3072 }
3073 else
3074 {
3075 xscale->cache_clean_address = cache_clean_address;
3076 }
3077
3078 return ERROR_OK;
3079 }
3080
3081 COMMAND_HANDLER(xscale_handle_cache_info_command)
3082 {
3083 struct target *target = get_current_target(CMD_CTX);
3084 struct xscale_common *xscale = target_to_xscale(target);
3085 int retval;
3086
3087 retval = xscale_verify_pointer(CMD_CTX, xscale);
3088 if (retval != ERROR_OK)
3089 return retval;
3090
3091 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3092 }
3093
3094 static int xscale_virt2phys(struct target *target,
3095 uint32_t virtual, uint32_t *physical)
3096 {
3097 struct xscale_common *xscale = target_to_xscale(target);
3098 int type;
3099 uint32_t cb;
3100 int domain;
3101 uint32_t ap;
3102
3103 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3104 LOG_ERROR(xscale_not);
3105 return ERROR_TARGET_INVALID;
3106 }
3107
3108 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3109 if (type == -1)
3110 {
3111 return ret;
3112 }
3113 *physical = ret;
3114 return ERROR_OK;
3115 }
3116
3117 static int xscale_mmu(struct target *target, int *enabled)
3118 {
3119 struct xscale_common *xscale = target_to_xscale(target);
3120
3121 if (target->state != TARGET_HALTED)
3122 {
3123 LOG_ERROR("Target not halted");
3124 return ERROR_TARGET_INVALID;
3125 }
3126 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3127 return ERROR_OK;
3128 }
3129
3130 COMMAND_HANDLER(xscale_handle_mmu_command)
3131 {
3132 struct target *target = get_current_target(CMD_CTX);
3133 struct xscale_common *xscale = target_to_xscale(target);
3134 int retval;
3135
3136 retval = xscale_verify_pointer(CMD_CTX, xscale);
3137 if (retval != ERROR_OK)
3138 return retval;
3139
3140 if (target->state != TARGET_HALTED)
3141 {
3142 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3143 return ERROR_OK;
3144 }
3145
3146 if (CMD_ARGC >= 1)
3147 {
3148 bool enable;
3149 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3150 if (enable)
3151 xscale_enable_mmu_caches(target, 1, 0, 0);
3152 else
3153 xscale_disable_mmu_caches(target, 1, 0, 0);
3154 xscale->armv4_5_mmu.mmu_enabled = enable;
3155 }
3156
3157 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3158
3159 return ERROR_OK;
3160 }
3161
3162 COMMAND_HANDLER(xscale_handle_idcache_command)
3163 {
3164 struct target *target = get_current_target(CMD_CTX);
3165 struct xscale_common *xscale = target_to_xscale(target);
3166
3167 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3168 if (retval != ERROR_OK)
3169 return retval;
3170
3171 if (target->state != TARGET_HALTED)
3172 {
3173 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3174 return ERROR_OK;
3175 }
3176
3177 bool icache;
3178 COMMAND_PARSE_BOOL(CMD_NAME, icache, "icache", "dcache");
3179
3180 if (CMD_ARGC >= 1)
3181 {
3182 bool enable;
3183 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3184 if (enable)
3185 xscale_enable_mmu_caches(target, 1, 0, 0);
3186 else
3187 xscale_disable_mmu_caches(target, 1, 0, 0);
3188 if (icache)
3189 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3190 else
3191 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3192 }
3193
3194 bool enabled = icache ?
3195 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3196 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3197 const char *msg = enabled ? "enabled" : "disabled";
3198 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3199
3200 return ERROR_OK;
3201 }
3202
3203 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3204 {
3205 struct target *target = get_current_target(CMD_CTX);
3206 struct xscale_common *xscale = target_to_xscale(target);
3207 int retval;
3208
3209 retval = xscale_verify_pointer(CMD_CTX, xscale);
3210 if (retval != ERROR_OK)
3211 return retval;
3212
3213 if (CMD_ARGC < 1)
3214 {
3215 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3216 }
3217 else
3218 {
3219 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3220 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3221 xscale_write_dcsr(target, -1, -1);
3222 }
3223
3224 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3225
3226 return ERROR_OK;
3227 }
3228
3229
3230 COMMAND_HANDLER(xscale_handle_vector_table_command)
3231 {
3232 struct target *target = get_current_target(CMD_CTX);
3233 struct xscale_common *xscale = target_to_xscale(target);
3234 int err = 0;
3235 int retval;
3236
3237 retval = xscale_verify_pointer(CMD_CTX, xscale);
3238 if (retval != ERROR_OK)
3239 return retval;
3240
3241 if (CMD_ARGC == 0) /* print current settings */
3242 {
3243 int idx;
3244
3245 command_print(CMD_CTX, "active user-set static vectors:");
3246 for (idx = 1; idx < 8; idx++)
3247 if (xscale->static_low_vectors_set & (1 << idx))
3248 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3249 for (idx = 1; idx < 8; idx++)
3250 if (xscale->static_high_vectors_set & (1 << idx))
3251 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3252 return ERROR_OK;
3253 }
3254
3255 if (CMD_ARGC != 3)
3256 err = 1;
3257 else
3258 {
3259 int idx;
3260 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3261 uint32_t vec;
3262 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3263
3264 if (idx < 1 || idx >= 8)
3265 err = 1;
3266
3267 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3268 {
3269 xscale->static_low_vectors_set |= (1<<idx);
3270 xscale->static_low_vectors[idx] = vec;
3271 }
3272 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3273 {
3274 xscale->static_high_vectors_set |= (1<<idx);
3275 xscale->static_high_vectors[idx] = vec;
3276 }
3277 else
3278 err = 1;
3279 }
3280
3281 if (err)
3282 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3283
3284 return ERROR_OK;
3285 }
3286
3287
3288 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3289 {
3290 struct target *target = get_current_target(CMD_CTX);
3291 struct xscale_common *xscale = target_to_xscale(target);
3292 struct arm *armv4_5 = &xscale->armv4_5_common;
3293 uint32_t dcsr_value;
3294 int retval;
3295
3296 retval = xscale_verify_pointer(CMD_CTX, xscale);
3297 if (retval != ERROR_OK)
3298 return retval;
3299
3300 if (target->state != TARGET_HALTED)
3301 {
3302 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3303 return ERROR_OK;
3304 }
3305
3306 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3307 {
3308 struct xscale_trace_data *td, *next_td;
3309 xscale->trace.buffer_enabled = 1;
3310
3311 /* free old trace data */
3312 td = xscale->trace.data;
3313 while (td)
3314 {
3315 next_td = td->next;
3316
3317 if (td->entries)
3318 free(td->entries);
3319 free(td);
3320 td = next_td;
3321 }
3322 xscale->trace.data = NULL;
3323 }
3324 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3325 {
3326 xscale->trace.buffer_enabled = 0;
3327 }
3328
3329 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3330 {
3331 uint32_t fill = 1;
3332 if (CMD_ARGC >= 3)
3333 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3334 xscale->trace.buffer_fill = fill;
3335 }
3336 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3337 {
3338 xscale->trace.buffer_fill = -1;
3339 }
3340
3341 if (xscale->trace.buffer_enabled)
3342 {
3343 /* if we enable the trace buffer in fill-once
3344 * mode we know the address of the first instruction */
3345 xscale->trace.pc_ok = 1;
3346 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3347 }
3348 else
3349 {
3350 /* otherwise the address is unknown, and we have no known good PC */
3351 xscale->trace.pc_ok = 0;
3352 }
3353
3354 command_print(CMD_CTX, "trace buffer %s (%s)",
3355 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3356 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3357
3358 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3359 if (xscale->trace.buffer_fill >= 0)
3360 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3361 else
3362 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3363
3364 return ERROR_OK;
3365 }
3366
3367 COMMAND_HANDLER(xscale_handle_trace_image_command)
3368 {
3369 struct target *target = get_current_target(CMD_CTX);
3370 struct xscale_common *xscale = target_to_xscale(target);
3371 int retval;
3372
3373 if (CMD_ARGC < 1)
3374 {
3375 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3376 return ERROR_OK;
3377 }
3378
3379 retval = xscale_verify_pointer(CMD_CTX, xscale);
3380 if (retval != ERROR_OK)
3381 return retval;
3382
3383 if (xscale->trace.image)
3384 {
3385 image_close(xscale->trace.image);
3386 free(xscale->trace.image);
3387 command_print(CMD_CTX, "previously loaded image found and closed");
3388 }
3389
3390 xscale->trace.image = malloc(sizeof(struct image));
3391 xscale->trace.image->base_address_set = 0;
3392 xscale->trace.image->start_address_set = 0;
3393
3394 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3395 if (CMD_ARGC >= 2)
3396 {
3397 xscale->trace.image->base_address_set = 1;
3398 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], xscale->trace.image->base_address);
3399 }
3400 else
3401 {
3402 xscale->trace.image->base_address_set = 0;
3403 }
3404
3405 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3406 {
3407 free(xscale->trace.image);
3408 xscale->trace.image = NULL;
3409 return ERROR_OK;
3410 }
3411
3412 return ERROR_OK;
3413 }
3414
3415 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3416 {
3417 struct target *target = get_current_target(CMD_CTX);
3418 struct xscale_common *xscale = target_to_xscale(target);
3419 struct xscale_trace_data *trace_data;
3420 struct fileio file;
3421 int retval;
3422
3423 retval = xscale_verify_pointer(CMD_CTX, xscale);
3424 if (retval != ERROR_OK)
3425 return retval;
3426
3427 if (target->state != TARGET_HALTED)
3428 {
3429 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3430 return ERROR_OK;
3431 }
3432
3433 if (CMD_ARGC < 1)
3434 {
3435 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3436 return ERROR_OK;
3437 }
3438
3439 trace_data = xscale->trace.data;
3440
3441 if (!trace_data)
3442 {
3443 command_print(CMD_CTX, "no trace data collected");
3444 return ERROR_OK;
3445 }
3446
3447 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3448 {
3449 return ERROR_OK;
3450 }
3451
3452 while (trace_data)
3453 {
3454 int i;
3455
3456 fileio_write_u32(&file, trace_data->chkpt0);
3457 fileio_write_u32(&file, trace_data->chkpt1);
3458 fileio_write_u32(&file, trace_data->last_instruction);
3459 fileio_write_u32(&file, trace_data->depth);
3460
3461 for (i = 0; i < trace_data->depth; i++)
3462 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3463
3464 trace_data = trace_data->next;
3465 }
3466
3467 fileio_close(&file);
3468
3469 return ERROR_OK;
3470 }
3471
3472 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3473 {
3474 struct target *target = get_current_target(CMD_CTX);
3475 struct xscale_common *xscale = target_to_xscale(target);
3476 int retval;
3477
3478 retval = xscale_verify_pointer(CMD_CTX, xscale);
3479 if (retval != ERROR_OK)
3480 return retval;
3481
3482 xscale_analyze_trace(target, CMD_CTX);
3483
3484 return ERROR_OK;
3485 }
3486
3487 COMMAND_HANDLER(xscale_handle_cp15)
3488 {
3489 struct target *target = get_current_target(CMD_CTX);
3490 struct xscale_common *xscale = target_to_xscale(target);
3491 int retval;
3492
3493 retval = xscale_verify_pointer(CMD_CTX, xscale);
3494 if (retval != ERROR_OK)
3495 return retval;
3496
3497 if (target->state != TARGET_HALTED)
3498 {
3499 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3500 return ERROR_OK;
3501 }
3502 uint32_t reg_no = 0;
3503 struct reg *reg = NULL;
3504 if (CMD_ARGC > 0)
3505 {
3506 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3507 /*translate from xscale cp15 register no to openocd register*/
3508 switch (reg_no)
3509 {
3510 case 0:
3511 reg_no = XSCALE_MAINID;
3512 break;
3513 case 1:
3514 reg_no = XSCALE_CTRL;
3515 break;
3516 case 2:
3517 reg_no = XSCALE_TTB;
3518 break;
3519 case 3:
3520 reg_no = XSCALE_DAC;
3521 break;
3522 case 5:
3523 reg_no = XSCALE_FSR;
3524 break;
3525 case 6:
3526 reg_no = XSCALE_FAR;
3527 break;
3528 case 13:
3529 reg_no = XSCALE_PID;
3530 break;
3531 case 15:
3532 reg_no = XSCALE_CPACCESS;
3533 break;
3534 default:
3535 command_print(CMD_CTX, "invalid register number");
3536 return ERROR_INVALID_ARGUMENTS;
3537 }
3538 reg = &xscale->reg_cache->reg_list[reg_no];
3539
3540 }
3541 if (CMD_ARGC == 1)
3542 {
3543 uint32_t value;
3544
3545 /* read cp15 control register */
3546 xscale_get_reg(reg);
3547 value = buf_get_u32(reg->value, 0, 32);
3548 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3549 }
3550 else if (CMD_ARGC == 2)
3551 {
3552 uint32_t value;
3553 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3554
3555 /* send CP write request (command 0x41) */
3556 xscale_send_u32(target, 0x41);
3557
3558 /* send CP register number */
3559 xscale_send_u32(target, reg_no);
3560
3561 /* send CP register value */
3562 xscale_send_u32(target, value);
3563
3564 /* execute cpwait to ensure outstanding operations complete */
3565 xscale_send_u32(target, 0x53);
3566 }
3567 else
3568 {
3569 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3570 }
3571
3572 return ERROR_OK;
3573 }
3574
3575 static const struct command_registration xscale_exec_command_handlers[] = {
3576 {
3577 .name = "cache_info",
3578 .handler = &xscale_handle_cache_info_command,
3579 .mode = COMMAND_EXEC, NULL,
3580 },
3581
3582 {
3583 .name = "mmu",
3584 .handler = &xscale_handle_mmu_command,
3585 .mode = COMMAND_EXEC,
3586 .usage = "[enable|disable]",
3587 .help = "enable or disable the MMU",
3588 },
3589 {
3590 .name = "icache",
3591 .handler = &xscale_handle_idcache_command,
3592 .mode = COMMAND_EXEC,
3593 .usage = "[enable|disable]",
3594 .help = "enable or disable the ICache",
3595 },
3596 {
3597 .name = "dcache",
3598 .handler = &xscale_handle_idcache_command,
3599 .mode = COMMAND_EXEC,
3600 .usage = "[enable|disable]",
3601 .help = "enable or disable the DCache",
3602 },
3603
3604 {
3605 .name = "vector_catch",
3606 .handler = &xscale_handle_vector_catch_command,
3607 .mode = COMMAND_EXEC,
3608 .help = "mask of vectors that should be caught",
3609 .usage = "[<mask>]",
3610 },
3611 {
3612 .name = "vector_table",
3613 .handler = &xscale_handle_vector_table_command,
3614 .mode = COMMAND_EXEC,
3615 .usage = "<high|low> <index> <code>",
3616 .help = "set static code for exception handler entry",
3617 },
3618
3619 {
3620 .name = "trace_buffer",
3621 .handler = &xscale_handle_trace_buffer_command,
3622 .mode = COMMAND_EXEC,
3623 .usage = "<enable | disable> [fill [n]|wrap]",
3624 },
3625 {
3626 .name = "dump_trace",
3627 .handler = &xscale_handle_dump_trace_command,
3628 .mode = COMMAND_EXEC,
3629 .help = "dump content of trace buffer to <file>",
3630 .usage = "<file>",
3631 },
3632 {
3633 .name = "analyze_trace",
3634 .handler = &xscale_handle_analyze_trace_buffer_command,
3635 .mode = COMMAND_EXEC,
3636 .help = "analyze content of trace buffer",
3637 },
3638 {
3639 .name = "trace_image",
3640 .handler = &xscale_handle_trace_image_command,
3641 COMMAND_EXEC,
3642 .help = "load image from <file> [base address]",
3643 .usage = "<file> [address] [type]",
3644 },
3645
3646 {
3647 .name = "cp15",
3648 .handler = &xscale_handle_cp15,
3649 .mode = COMMAND_EXEC,
3650 .help = "access coproc 15",
3651 .usage = "<register> [value]",
3652 },
3653 COMMAND_REGISTRATION_DONE
3654 };
3655 static const struct command_registration xscale_any_command_handlers[] = {
3656 {
3657 .name = "debug_handler",
3658 .handler = &xscale_handle_debug_handler_command,
3659 .mode = COMMAND_ANY,
3660 .usage = "<target#> <address>",
3661 },
3662 {
3663 .name = "cache_clean_address",
3664 .handler = &xscale_handle_cache_clean_address_command,
3665 .mode = COMMAND_ANY,
3666 },
3667 {
3668 .chain = xscale_exec_command_handlers,
3669 },
3670 COMMAND_REGISTRATION_DONE
3671 };
3672 static const struct command_registration xscale_command_handlers[] = {
3673 {
3674 .chain = arm_command_handlers,
3675 },
3676 {
3677 .name = "xscale",
3678 .mode = COMMAND_ANY,
3679 .help = "xscale command group",
3680 .chain = xscale_any_command_handlers,
3681 },
3682 COMMAND_REGISTRATION_DONE
3683 };
3684
3685 struct target_type xscale_target =
3686 {
3687 .name = "xscale",
3688
3689 .poll = xscale_poll,
3690 .arch_state = xscale_arch_state,
3691
3692 .target_request_data = NULL,
3693
3694 .halt = xscale_halt,
3695 .resume = xscale_resume,
3696 .step = xscale_step,
3697
3698 .assert_reset = xscale_assert_reset,
3699 .deassert_reset = xscale_deassert_reset,
3700 .soft_reset_halt = NULL,
3701
3702 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
3703
3704 .read_memory = xscale_read_memory,
3705 .read_phys_memory = xscale_read_phys_memory,
3706 .write_memory = xscale_write_memory,
3707 .write_phys_memory = xscale_write_phys_memory,
3708 .bulk_write_memory = xscale_bulk_write_memory,
3709
3710 .checksum_memory = arm_checksum_memory,
3711 .blank_check_memory = arm_blank_check_memory,
3712
3713 .run_algorithm = armv4_5_run_algorithm,
3714
3715 .add_breakpoint = xscale_add_breakpoint,
3716 .remove_breakpoint = xscale_remove_breakpoint,
3717 .add_watchpoint = xscale_add_watchpoint,
3718 .remove_watchpoint = xscale_remove_watchpoint,
3719
3720 .commands = xscale_command_handlers,
3721 .target_create = xscale_target_create,
3722 .init_target = xscale_init_target,
3723
3724 .virt2phys = xscale_virt2phys,
3725 .mmu = xscale_mmu
3726 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)