ARM: pass 'struct reg *' to register r/w routines
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include "time_support.h"
37 #include "register.h"
38 #include "image.h"
39
40
41 /*
42 * Important XScale documents available as of October 2009 include:
43 *
44 * Intel XScale® Core Developer’s Manual, January 2004
45 * Order Number: 273473-002
46 * This has a chapter detailing debug facilities, and punts some
47 * details to chip-specific microarchitecture documents.
48 *
49 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
50 * Document Number: 273539-005
51 * Less detailed than the developer's manual, but summarizes those
52 * missing details (for most XScales) and gives LOTS of notes about
53 * debugger/handler interaction issues. Presents a simpler reset
54 * and load-handler sequence than the arch doc. (Note, OpenOCD
55 * doesn't currently support "Hot-Debug" as defined there.)
56 *
57 * Chip-specific microarchitecture documents may also be useful.
58 */
59
60
61 /* forward declarations */
62 static int xscale_resume(struct target *, int current,
63 uint32_t address, int handle_breakpoints, int debug_execution);
64 static int xscale_debug_entry(struct target *);
65 static int xscale_restore_context(struct target *);
66 static int xscale_get_reg(struct reg *reg);
67 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
68 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
69 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
70 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_read_trace(struct target *);
72
73
74 /* This XScale "debug handler" is loaded into the processor's
75 * mini-ICache, which is 2K of code writable only via JTAG.
76 *
77 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
78 * binary files cleanly. It's string oriented, and terminates them
79 * with a NUL character. Better would be to generate the constants
80 * and let other code decide names, scoping, and other housekeeping.
81 */
82 static /* unsigned const char xscale_debug_handler[] = ... */
83 #include "xscale_debug.h"
84
85 static char *const xscale_reg_list[] =
86 {
87 "XSCALE_MAINID", /* 0 */
88 "XSCALE_CACHETYPE",
89 "XSCALE_CTRL",
90 "XSCALE_AUXCTRL",
91 "XSCALE_TTB",
92 "XSCALE_DAC",
93 "XSCALE_FSR",
94 "XSCALE_FAR",
95 "XSCALE_PID",
96 "XSCALE_CPACCESS",
97 "XSCALE_IBCR0", /* 10 */
98 "XSCALE_IBCR1",
99 "XSCALE_DBR0",
100 "XSCALE_DBR1",
101 "XSCALE_DBCON",
102 "XSCALE_TBREG",
103 "XSCALE_CHKPT0",
104 "XSCALE_CHKPT1",
105 "XSCALE_DCSR",
106 "XSCALE_TX",
107 "XSCALE_RX", /* 20 */
108 "XSCALE_TXRXCTRL",
109 };
110
111 static const struct xscale_reg xscale_reg_arch_info[] =
112 {
113 {XSCALE_MAINID, NULL},
114 {XSCALE_CACHETYPE, NULL},
115 {XSCALE_CTRL, NULL},
116 {XSCALE_AUXCTRL, NULL},
117 {XSCALE_TTB, NULL},
118 {XSCALE_DAC, NULL},
119 {XSCALE_FSR, NULL},
120 {XSCALE_FAR, NULL},
121 {XSCALE_PID, NULL},
122 {XSCALE_CPACCESS, NULL},
123 {XSCALE_IBCR0, NULL},
124 {XSCALE_IBCR1, NULL},
125 {XSCALE_DBR0, NULL},
126 {XSCALE_DBR1, NULL},
127 {XSCALE_DBCON, NULL},
128 {XSCALE_TBREG, NULL},
129 {XSCALE_CHKPT0, NULL},
130 {XSCALE_CHKPT1, NULL},
131 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
132 {-1, NULL}, /* TX accessed via JTAG */
133 {-1, NULL}, /* RX accessed via JTAG */
134 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
135 };
136
137 /* convenience wrapper to access XScale specific registers */
138 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
139 {
140 uint8_t buf[4];
141
142 buf_set_u32(buf, 0, 32, value);
143
144 return xscale_set_reg(reg, buf);
145 }
146
147 static const char xscale_not[] = "target is not an XScale";
148
149 static int xscale_verify_pointer(struct command_context *cmd_ctx,
150 struct xscale_common *xscale)
151 {
152 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
153 command_print(cmd_ctx, xscale_not);
154 return ERROR_TARGET_INVALID;
155 }
156 return ERROR_OK;
157 }
158
159 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr)
160 {
161 if (tap == NULL)
162 return ERROR_FAIL;
163
164 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
165 {
166 struct scan_field field;
167 uint8_t scratch[4];
168
169 memset(&field, 0, sizeof field);
170 field.tap = tap;
171 field.num_bits = tap->ir_length;
172 field.out_value = scratch;
173 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
174
175 jtag_add_ir_scan(1, &field, jtag_get_end_state());
176 }
177
178 return ERROR_OK;
179 }
180
181 static int xscale_read_dcsr(struct target *target)
182 {
183 struct xscale_common *xscale = target_to_xscale(target);
184 int retval;
185 struct scan_field fields[3];
186 uint8_t field0 = 0x0;
187 uint8_t field0_check_value = 0x2;
188 uint8_t field0_check_mask = 0x7;
189 uint8_t field2 = 0x0;
190 uint8_t field2_check_value = 0x0;
191 uint8_t field2_check_mask = 0x1;
192
193 jtag_set_end_state(TAP_DRPAUSE);
194 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
195
196 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
197 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
198
199 memset(&fields, 0, sizeof fields);
200
201 fields[0].tap = target->tap;
202 fields[0].num_bits = 3;
203 fields[0].out_value = &field0;
204 uint8_t tmp;
205 fields[0].in_value = &tmp;
206
207 fields[1].tap = target->tap;
208 fields[1].num_bits = 32;
209 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
210
211 fields[2].tap = target->tap;
212 fields[2].num_bits = 1;
213 fields[2].out_value = &field2;
214 uint8_t tmp2;
215 fields[2].in_value = &tmp2;
216
217 jtag_add_dr_scan(3, fields, jtag_get_end_state());
218
219 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
220 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
221
222 if ((retval = jtag_execute_queue()) != ERROR_OK)
223 {
224 LOG_ERROR("JTAG error while reading DCSR");
225 return retval;
226 }
227
228 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
229 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
230
231 /* write the register with the value we just read
232 * on this second pass, only the first bit of field0 is guaranteed to be 0)
233 */
234 field0_check_mask = 0x1;
235 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
236 fields[1].in_value = NULL;
237
238 jtag_set_end_state(TAP_IDLE);
239
240 jtag_add_dr_scan(3, fields, jtag_get_end_state());
241
242 /* DANGER!!! this must be here. It will make sure that the arguments
243 * to jtag_set_check_value() does not go out of scope! */
244 return jtag_execute_queue();
245 }
246
247
248 static void xscale_getbuf(jtag_callback_data_t arg)
249 {
250 uint8_t *in = (uint8_t *)arg;
251 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
252 }
253
254 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
255 {
256 if (num_words == 0)
257 return ERROR_INVALID_ARGUMENTS;
258
259 int retval = ERROR_OK;
260 tap_state_t path[3];
261 struct scan_field fields[3];
262 uint8_t *field0 = malloc(num_words * 1);
263 uint8_t field0_check_value = 0x2;
264 uint8_t field0_check_mask = 0x6;
265 uint32_t *field1 = malloc(num_words * 4);
266 uint8_t field2_check_value = 0x0;
267 uint8_t field2_check_mask = 0x1;
268 int words_done = 0;
269 int words_scheduled = 0;
270 int i;
271
272 path[0] = TAP_DRSELECT;
273 path[1] = TAP_DRCAPTURE;
274 path[2] = TAP_DRSHIFT;
275
276 memset(&fields, 0, sizeof fields);
277
278 fields[0].tap = target->tap;
279 fields[0].num_bits = 3;
280 fields[0].check_value = &field0_check_value;
281 fields[0].check_mask = &field0_check_mask;
282
283 fields[1].tap = target->tap;
284 fields[1].num_bits = 32;
285
286 fields[2].tap = target->tap;
287 fields[2].num_bits = 1;
288 fields[2].check_value = &field2_check_value;
289 fields[2].check_mask = &field2_check_mask;
290
291 jtag_set_end_state(TAP_IDLE);
292 xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
293 jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
294
295 /* repeat until all words have been collected */
296 int attempts = 0;
297 while (words_done < num_words)
298 {
299 /* schedule reads */
300 words_scheduled = 0;
301 for (i = words_done; i < num_words; i++)
302 {
303 fields[0].in_value = &field0[i];
304
305 jtag_add_pathmove(3, path);
306
307 fields[1].in_value = (uint8_t *)(field1 + i);
308
309 jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
310
311 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
312
313 words_scheduled++;
314 }
315
316 if ((retval = jtag_execute_queue()) != ERROR_OK)
317 {
318 LOG_ERROR("JTAG error while receiving data from debug handler");
319 break;
320 }
321
322 /* examine results */
323 for (i = words_done; i < num_words; i++)
324 {
325 if (!(field0[0] & 1))
326 {
327 /* move backwards if necessary */
328 int j;
329 for (j = i; j < num_words - 1; j++)
330 {
331 field0[j] = field0[j + 1];
332 field1[j] = field1[j + 1];
333 }
334 words_scheduled--;
335 }
336 }
337 if (words_scheduled == 0)
338 {
339 if (attempts++==1000)
340 {
341 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
342 retval = ERROR_TARGET_TIMEOUT;
343 break;
344 }
345 }
346
347 words_done += words_scheduled;
348 }
349
350 for (i = 0; i < num_words; i++)
351 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
352
353 free(field1);
354
355 return retval;
356 }
357
358 static int xscale_read_tx(struct target *target, int consume)
359 {
360 struct xscale_common *xscale = target_to_xscale(target);
361 tap_state_t path[3];
362 tap_state_t noconsume_path[6];
363 int retval;
364 struct timeval timeout, now;
365 struct scan_field fields[3];
366 uint8_t field0_in = 0x0;
367 uint8_t field0_check_value = 0x2;
368 uint8_t field0_check_mask = 0x6;
369 uint8_t field2_check_value = 0x0;
370 uint8_t field2_check_mask = 0x1;
371
372 jtag_set_end_state(TAP_IDLE);
373
374 xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
375
376 path[0] = TAP_DRSELECT;
377 path[1] = TAP_DRCAPTURE;
378 path[2] = TAP_DRSHIFT;
379
380 noconsume_path[0] = TAP_DRSELECT;
381 noconsume_path[1] = TAP_DRCAPTURE;
382 noconsume_path[2] = TAP_DREXIT1;
383 noconsume_path[3] = TAP_DRPAUSE;
384 noconsume_path[4] = TAP_DREXIT2;
385 noconsume_path[5] = TAP_DRSHIFT;
386
387 memset(&fields, 0, sizeof fields);
388
389 fields[0].tap = target->tap;
390 fields[0].num_bits = 3;
391 fields[0].in_value = &field0_in;
392
393 fields[1].tap = target->tap;
394 fields[1].num_bits = 32;
395 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
396
397 fields[2].tap = target->tap;
398 fields[2].num_bits = 1;
399 uint8_t tmp;
400 fields[2].in_value = &tmp;
401
402 gettimeofday(&timeout, NULL);
403 timeval_add_time(&timeout, 1, 0);
404
405 for (;;)
406 {
407 /* if we want to consume the register content (i.e. clear TX_READY),
408 * we have to go straight from Capture-DR to Shift-DR
409 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
410 */
411 if (consume)
412 jtag_add_pathmove(3, path);
413 else
414 {
415 jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
416 }
417
418 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
419
420 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
421 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
422
423 if ((retval = jtag_execute_queue()) != ERROR_OK)
424 {
425 LOG_ERROR("JTAG error while reading TX");
426 return ERROR_TARGET_TIMEOUT;
427 }
428
429 gettimeofday(&now, NULL);
430 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
431 {
432 LOG_ERROR("time out reading TX register");
433 return ERROR_TARGET_TIMEOUT;
434 }
435 if (!((!(field0_in & 1)) && consume))
436 {
437 goto done;
438 }
439 if (debug_level >= 3)
440 {
441 LOG_DEBUG("waiting 100ms");
442 alive_sleep(100); /* avoid flooding the logs */
443 } else
444 {
445 keep_alive();
446 }
447 }
448 done:
449
450 if (!(field0_in & 1))
451 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
452
453 return ERROR_OK;
454 }
455
456 static int xscale_write_rx(struct target *target)
457 {
458 struct xscale_common *xscale = target_to_xscale(target);
459 int retval;
460 struct timeval timeout, now;
461 struct scan_field fields[3];
462 uint8_t field0_out = 0x0;
463 uint8_t field0_in = 0x0;
464 uint8_t field0_check_value = 0x2;
465 uint8_t field0_check_mask = 0x6;
466 uint8_t field2 = 0x0;
467 uint8_t field2_check_value = 0x0;
468 uint8_t field2_check_mask = 0x1;
469
470 jtag_set_end_state(TAP_IDLE);
471
472 xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
473
474 memset(&fields, 0, sizeof fields);
475
476 fields[0].tap = target->tap;
477 fields[0].num_bits = 3;
478 fields[0].out_value = &field0_out;
479 fields[0].in_value = &field0_in;
480
481 fields[1].tap = target->tap;
482 fields[1].num_bits = 32;
483 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
484
485 fields[2].tap = target->tap;
486 fields[2].num_bits = 1;
487 fields[2].out_value = &field2;
488 uint8_t tmp;
489 fields[2].in_value = &tmp;
490
491 gettimeofday(&timeout, NULL);
492 timeval_add_time(&timeout, 1, 0);
493
494 /* poll until rx_read is low */
495 LOG_DEBUG("polling RX");
496 for (;;)
497 {
498 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
499
500 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
501 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
502
503 if ((retval = jtag_execute_queue()) != ERROR_OK)
504 {
505 LOG_ERROR("JTAG error while writing RX");
506 return retval;
507 }
508
509 gettimeofday(&now, NULL);
510 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
511 {
512 LOG_ERROR("time out writing RX register");
513 return ERROR_TARGET_TIMEOUT;
514 }
515 if (!(field0_in & 1))
516 goto done;
517 if (debug_level >= 3)
518 {
519 LOG_DEBUG("waiting 100ms");
520 alive_sleep(100); /* avoid flooding the logs */
521 } else
522 {
523 keep_alive();
524 }
525 }
526 done:
527
528 /* set rx_valid */
529 field2 = 0x1;
530 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
531
532 if ((retval = jtag_execute_queue()) != ERROR_OK)
533 {
534 LOG_ERROR("JTAG error while writing RX");
535 return retval;
536 }
537
538 return ERROR_OK;
539 }
540
541 /* send count elements of size byte to the debug handler */
542 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
543 {
544 uint32_t t[3];
545 int bits[3];
546 int retval;
547 int done_count = 0;
548
549 jtag_set_end_state(TAP_IDLE);
550
551 xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
552
553 bits[0]=3;
554 t[0]=0;
555 bits[1]=32;
556 t[2]=1;
557 bits[2]=1;
558 int endianness = target->endianness;
559 while (done_count++ < count)
560 {
561 switch (size)
562 {
563 case 4:
564 if (endianness == TARGET_LITTLE_ENDIAN)
565 {
566 t[1]=le_to_h_u32(buffer);
567 } else
568 {
569 t[1]=be_to_h_u32(buffer);
570 }
571 break;
572 case 2:
573 if (endianness == TARGET_LITTLE_ENDIAN)
574 {
575 t[1]=le_to_h_u16(buffer);
576 } else
577 {
578 t[1]=be_to_h_u16(buffer);
579 }
580 break;
581 case 1:
582 t[1]=buffer[0];
583 break;
584 default:
585 LOG_ERROR("BUG: size neither 4, 2 nor 1");
586 return ERROR_INVALID_ARGUMENTS;
587 }
588 jtag_add_dr_out(target->tap,
589 3,
590 bits,
591 t,
592 jtag_set_end_state(TAP_IDLE));
593 buffer += size;
594 }
595
596 if ((retval = jtag_execute_queue()) != ERROR_OK)
597 {
598 LOG_ERROR("JTAG error while sending data to debug handler");
599 return retval;
600 }
601
602 return ERROR_OK;
603 }
604
605 static int xscale_send_u32(struct target *target, uint32_t value)
606 {
607 struct xscale_common *xscale = target_to_xscale(target);
608
609 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
610 return xscale_write_rx(target);
611 }
612
613 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
614 {
615 struct xscale_common *xscale = target_to_xscale(target);
616 int retval;
617 struct scan_field fields[3];
618 uint8_t field0 = 0x0;
619 uint8_t field0_check_value = 0x2;
620 uint8_t field0_check_mask = 0x7;
621 uint8_t field2 = 0x0;
622 uint8_t field2_check_value = 0x0;
623 uint8_t field2_check_mask = 0x1;
624
625 if (hold_rst != -1)
626 xscale->hold_rst = hold_rst;
627
628 if (ext_dbg_brk != -1)
629 xscale->external_debug_break = ext_dbg_brk;
630
631 jtag_set_end_state(TAP_IDLE);
632 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
633
634 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
635 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
636
637 memset(&fields, 0, sizeof fields);
638
639 fields[0].tap = target->tap;
640 fields[0].num_bits = 3;
641 fields[0].out_value = &field0;
642 uint8_t tmp;
643 fields[0].in_value = &tmp;
644
645 fields[1].tap = target->tap;
646 fields[1].num_bits = 32;
647 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
648
649 fields[2].tap = target->tap;
650 fields[2].num_bits = 1;
651 fields[2].out_value = &field2;
652 uint8_t tmp2;
653 fields[2].in_value = &tmp2;
654
655 jtag_add_dr_scan(3, fields, jtag_get_end_state());
656
657 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
658 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
659
660 if ((retval = jtag_execute_queue()) != ERROR_OK)
661 {
662 LOG_ERROR("JTAG error while writing DCSR");
663 return retval;
664 }
665
666 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
667 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
668
669 return ERROR_OK;
670 }
671
672 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
673 static unsigned int parity (unsigned int v)
674 {
675 // unsigned int ov = v;
676 v ^= v >> 16;
677 v ^= v >> 8;
678 v ^= v >> 4;
679 v &= 0xf;
680 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
681 return (0x6996 >> v) & 1;
682 }
683
684 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
685 {
686 uint8_t packet[4];
687 uint8_t cmd;
688 int word;
689 struct scan_field fields[2];
690
691 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
692
693 /* LDIC into IR */
694 jtag_set_end_state(TAP_IDLE);
695 xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
696
697 /* CMD is b011 to load a cacheline into the Mini ICache.
698 * Loading into the main ICache is deprecated, and unused.
699 * It's followed by three zero bits, and 27 address bits.
700 */
701 buf_set_u32(&cmd, 0, 6, 0x3);
702
703 /* virtual address of desired cache line */
704 buf_set_u32(packet, 0, 27, va >> 5);
705
706 memset(&fields, 0, sizeof fields);
707
708 fields[0].tap = target->tap;
709 fields[0].num_bits = 6;
710 fields[0].out_value = &cmd;
711
712 fields[1].tap = target->tap;
713 fields[1].num_bits = 27;
714 fields[1].out_value = packet;
715
716 jtag_add_dr_scan(2, fields, jtag_get_end_state());
717
718 /* rest of packet is a cacheline: 8 instructions, with parity */
719 fields[0].num_bits = 32;
720 fields[0].out_value = packet;
721
722 fields[1].num_bits = 1;
723 fields[1].out_value = &cmd;
724
725 for (word = 0; word < 8; word++)
726 {
727 buf_set_u32(packet, 0, 32, buffer[word]);
728
729 uint32_t value;
730 memcpy(&value, packet, sizeof(uint32_t));
731 cmd = parity(value);
732
733 jtag_add_dr_scan(2, fields, jtag_get_end_state());
734 }
735
736 return jtag_execute_queue();
737 }
738
739 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
740 {
741 uint8_t packet[4];
742 uint8_t cmd;
743 struct scan_field fields[2];
744
745 jtag_set_end_state(TAP_IDLE);
746 xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
747
748 /* CMD for invalidate IC line b000, bits [6:4] b000 */
749 buf_set_u32(&cmd, 0, 6, 0x0);
750
751 /* virtual address of desired cache line */
752 buf_set_u32(packet, 0, 27, va >> 5);
753
754 memset(&fields, 0, sizeof fields);
755
756 fields[0].tap = target->tap;
757 fields[0].num_bits = 6;
758 fields[0].out_value = &cmd;
759
760 fields[1].tap = target->tap;
761 fields[1].num_bits = 27;
762 fields[1].out_value = packet;
763
764 jtag_add_dr_scan(2, fields, jtag_get_end_state());
765
766 return ERROR_OK;
767 }
768
769 static int xscale_update_vectors(struct target *target)
770 {
771 struct xscale_common *xscale = target_to_xscale(target);
772 int i;
773 int retval;
774
775 uint32_t low_reset_branch, high_reset_branch;
776
777 for (i = 1; i < 8; i++)
778 {
779 /* if there's a static vector specified for this exception, override */
780 if (xscale->static_high_vectors_set & (1 << i))
781 {
782 xscale->high_vectors[i] = xscale->static_high_vectors[i];
783 }
784 else
785 {
786 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
787 if (retval == ERROR_TARGET_TIMEOUT)
788 return retval;
789 if (retval != ERROR_OK)
790 {
791 /* Some of these reads will fail as part of normal execution */
792 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
793 }
794 }
795 }
796
797 for (i = 1; i < 8; i++)
798 {
799 if (xscale->static_low_vectors_set & (1 << i))
800 {
801 xscale->low_vectors[i] = xscale->static_low_vectors[i];
802 }
803 else
804 {
805 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
806 if (retval == ERROR_TARGET_TIMEOUT)
807 return retval;
808 if (retval != ERROR_OK)
809 {
810 /* Some of these reads will fail as part of normal execution */
811 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
812 }
813 }
814 }
815
816 /* calculate branches to debug handler */
817 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
818 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
819
820 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
821 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
822
823 /* invalidate and load exception vectors in mini i-cache */
824 xscale_invalidate_ic_line(target, 0x0);
825 xscale_invalidate_ic_line(target, 0xffff0000);
826
827 xscale_load_ic(target, 0x0, xscale->low_vectors);
828 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
829
830 return ERROR_OK;
831 }
832
833 static int xscale_arch_state(struct target *target)
834 {
835 struct xscale_common *xscale = target_to_xscale(target);
836 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
837
838 static const char *state[] =
839 {
840 "disabled", "enabled"
841 };
842
843 static const char *arch_dbg_reason[] =
844 {
845 "", "\n(processor reset)", "\n(trace buffer full)"
846 };
847
848 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
849 {
850 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
851 return ERROR_INVALID_ARGUMENTS;
852 }
853
854 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
855 "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "\n"
856 "MMU: %s, D-Cache: %s, I-Cache: %s"
857 "%s",
858 armv4_5_state_strings[armv4_5->core_state],
859 Jim_Nvp_value2name_simple(nvp_target_debug_reason, target->debug_reason)->name ,
860 arm_mode_name(armv4_5->core_mode),
861 buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
862 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
863 state[xscale->armv4_5_mmu.mmu_enabled],
864 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
865 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
866 arch_dbg_reason[xscale->arch_debug_reason]);
867
868 return ERROR_OK;
869 }
870
871 static int xscale_poll(struct target *target)
872 {
873 int retval = ERROR_OK;
874
875 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
876 {
877 enum target_state previous_state = target->state;
878 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
879 {
880
881 /* there's data to read from the tx register, we entered debug state */
882 target->state = TARGET_HALTED;
883
884 /* process debug entry, fetching current mode regs */
885 retval = xscale_debug_entry(target);
886 }
887 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
888 {
889 LOG_USER("error while polling TX register, reset CPU");
890 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
891 target->state = TARGET_HALTED;
892 }
893
894 /* debug_entry could have overwritten target state (i.e. immediate resume)
895 * don't signal event handlers in that case
896 */
897 if (target->state != TARGET_HALTED)
898 return ERROR_OK;
899
900 /* if target was running, signal that we halted
901 * otherwise we reentered from debug execution */
902 if (previous_state == TARGET_RUNNING)
903 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
904 else
905 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
906 }
907
908 return retval;
909 }
910
911 static int xscale_debug_entry(struct target *target)
912 {
913 struct xscale_common *xscale = target_to_xscale(target);
914 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
915 uint32_t pc;
916 uint32_t buffer[10];
917 int i;
918 int retval;
919 uint32_t moe;
920
921 /* clear external dbg break (will be written on next DCSR read) */
922 xscale->external_debug_break = 0;
923 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
924 return retval;
925
926 /* get r0, pc, r1 to r7 and cpsr */
927 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
928 return retval;
929
930 /* move r0 from buffer to register cache */
931 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
932 armv4_5->core_cache->reg_list[0].dirty = 1;
933 armv4_5->core_cache->reg_list[0].valid = 1;
934 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
935
936 /* move pc from buffer to register cache */
937 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
938 armv4_5->core_cache->reg_list[15].dirty = 1;
939 armv4_5->core_cache->reg_list[15].valid = 1;
940 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
941
942 /* move data from buffer to register cache */
943 for (i = 1; i <= 7; i++)
944 {
945 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
946 armv4_5->core_cache->reg_list[i].dirty = 1;
947 armv4_5->core_cache->reg_list[i].valid = 1;
948 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
949 }
950
951 buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, buffer[9]);
952 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
953 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
954 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
955
956 armv4_5->core_mode = buffer[9] & 0x1f;
957 if (!is_arm_mode(armv4_5->core_mode))
958 {
959 target->state = TARGET_UNKNOWN;
960 LOG_ERROR("cpsr contains invalid mode value - communication failure");
961 return ERROR_TARGET_FAILURE;
962 }
963 LOG_DEBUG("target entered debug state in %s mode",
964 arm_mode_name(armv4_5->core_mode));
965
966 if (buffer[9] & 0x20)
967 armv4_5->core_state = ARMV4_5_STATE_THUMB;
968 else
969 armv4_5->core_state = ARMV4_5_STATE_ARM;
970
971
972 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
973 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
974 {
975 xscale_receive(target, buffer, 8);
976 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
977 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
978 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
979 }
980 else
981 {
982 /* r8 to r14, but no spsr */
983 xscale_receive(target, buffer, 7);
984 }
985
986 /* move data from buffer to register cache */
987 for (i = 8; i <= 14; i++)
988 {
989 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
990 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
991 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
992 }
993
994 /* examine debug reason */
995 xscale_read_dcsr(target);
996 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
997
998 /* stored PC (for calculating fixup) */
999 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1000
1001 switch (moe)
1002 {
1003 case 0x0: /* Processor reset */
1004 target->debug_reason = DBG_REASON_DBGRQ;
1005 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1006 pc -= 4;
1007 break;
1008 case 0x1: /* Instruction breakpoint hit */
1009 target->debug_reason = DBG_REASON_BREAKPOINT;
1010 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1011 pc -= 4;
1012 break;
1013 case 0x2: /* Data breakpoint hit */
1014 target->debug_reason = DBG_REASON_WATCHPOINT;
1015 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1016 pc -= 4;
1017 break;
1018 case 0x3: /* BKPT instruction executed */
1019 target->debug_reason = DBG_REASON_BREAKPOINT;
1020 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1021 pc -= 4;
1022 break;
1023 case 0x4: /* Ext. debug event */
1024 target->debug_reason = DBG_REASON_DBGRQ;
1025 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1026 pc -= 4;
1027 break;
1028 case 0x5: /* Vector trap occured */
1029 target->debug_reason = DBG_REASON_BREAKPOINT;
1030 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1031 pc -= 4;
1032 break;
1033 case 0x6: /* Trace buffer full break */
1034 target->debug_reason = DBG_REASON_DBGRQ;
1035 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1036 pc -= 4;
1037 break;
1038 case 0x7: /* Reserved (may flag Hot-Debug support) */
1039 default:
1040 LOG_ERROR("Method of Entry is 'Reserved'");
1041 exit(-1);
1042 break;
1043 }
1044
1045 /* apply PC fixup */
1046 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1047
1048 /* on the first debug entry, identify cache type */
1049 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1050 {
1051 uint32_t cache_type_reg;
1052
1053 /* read cp15 cache type register */
1054 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1055 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1056
1057 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1058 }
1059
1060 /* examine MMU and Cache settings */
1061 /* read cp15 control register */
1062 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1063 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1064 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1065 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1066 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1067
1068 /* tracing enabled, read collected trace data */
1069 if (xscale->trace.buffer_enabled)
1070 {
1071 xscale_read_trace(target);
1072 xscale->trace.buffer_fill--;
1073
1074 /* resume if we're still collecting trace data */
1075 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1076 && (xscale->trace.buffer_fill > 0))
1077 {
1078 xscale_resume(target, 1, 0x0, 1, 0);
1079 }
1080 else
1081 {
1082 xscale->trace.buffer_enabled = 0;
1083 }
1084 }
1085
1086 return ERROR_OK;
1087 }
1088
1089 static int xscale_halt(struct target *target)
1090 {
1091 struct xscale_common *xscale = target_to_xscale(target);
1092
1093 LOG_DEBUG("target->state: %s",
1094 target_state_name(target));
1095
1096 if (target->state == TARGET_HALTED)
1097 {
1098 LOG_DEBUG("target was already halted");
1099 return ERROR_OK;
1100 }
1101 else if (target->state == TARGET_UNKNOWN)
1102 {
1103 /* this must not happen for a xscale target */
1104 LOG_ERROR("target was in unknown state when halt was requested");
1105 return ERROR_TARGET_INVALID;
1106 }
1107 else if (target->state == TARGET_RESET)
1108 {
1109 LOG_DEBUG("target->state == TARGET_RESET");
1110 }
1111 else
1112 {
1113 /* assert external dbg break */
1114 xscale->external_debug_break = 1;
1115 xscale_read_dcsr(target);
1116
1117 target->debug_reason = DBG_REASON_DBGRQ;
1118 }
1119
1120 return ERROR_OK;
1121 }
1122
1123 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1124 {
1125 struct xscale_common *xscale = target_to_xscale(target);
1126 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1127 int retval;
1128
1129 if (xscale->ibcr0_used)
1130 {
1131 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1132
1133 if (ibcr0_bp)
1134 {
1135 xscale_unset_breakpoint(target, ibcr0_bp);
1136 }
1137 else
1138 {
1139 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1140 exit(-1);
1141 }
1142 }
1143
1144 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1145 return retval;
1146
1147 return ERROR_OK;
1148 }
1149
1150 static int xscale_disable_single_step(struct target *target)
1151 {
1152 struct xscale_common *xscale = target_to_xscale(target);
1153 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1154 int retval;
1155
1156 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1157 return retval;
1158
1159 return ERROR_OK;
1160 }
1161
1162 static void xscale_enable_watchpoints(struct target *target)
1163 {
1164 struct watchpoint *watchpoint = target->watchpoints;
1165
1166 while (watchpoint)
1167 {
1168 if (watchpoint->set == 0)
1169 xscale_set_watchpoint(target, watchpoint);
1170 watchpoint = watchpoint->next;
1171 }
1172 }
1173
1174 static void xscale_enable_breakpoints(struct target *target)
1175 {
1176 struct breakpoint *breakpoint = target->breakpoints;
1177
1178 /* set any pending breakpoints */
1179 while (breakpoint)
1180 {
1181 if (breakpoint->set == 0)
1182 xscale_set_breakpoint(target, breakpoint);
1183 breakpoint = breakpoint->next;
1184 }
1185 }
1186
1187 static int xscale_resume(struct target *target, int current,
1188 uint32_t address, int handle_breakpoints, int debug_execution)
1189 {
1190 struct xscale_common *xscale = target_to_xscale(target);
1191 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
1192 struct breakpoint *breakpoint = target->breakpoints;
1193 uint32_t current_pc;
1194 int retval;
1195 int i;
1196
1197 LOG_DEBUG("-");
1198
1199 if (target->state != TARGET_HALTED)
1200 {
1201 LOG_WARNING("target not halted");
1202 return ERROR_TARGET_NOT_HALTED;
1203 }
1204
1205 if (!debug_execution)
1206 {
1207 target_free_all_working_areas(target);
1208 }
1209
1210 /* update vector tables */
1211 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1212 return retval;
1213
1214 /* current = 1: continue on current pc, otherwise continue at <address> */
1215 if (!current)
1216 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1217
1218 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1219
1220 /* if we're at the reset vector, we have to simulate the branch */
1221 if (current_pc == 0x0)
1222 {
1223 arm_simulate_step(target, NULL);
1224 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1225 }
1226
1227 /* the front-end may request us not to handle breakpoints */
1228 if (handle_breakpoints)
1229 {
1230 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1231 {
1232 uint32_t next_pc;
1233
1234 /* there's a breakpoint at the current PC, we have to step over it */
1235 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1236 xscale_unset_breakpoint(target, breakpoint);
1237
1238 /* calculate PC of next instruction */
1239 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1240 {
1241 uint32_t current_opcode;
1242 target_read_u32(target, current_pc, &current_opcode);
1243 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1244 }
1245
1246 LOG_DEBUG("enable single-step");
1247 xscale_enable_single_step(target, next_pc);
1248
1249 /* restore banked registers */
1250 xscale_restore_context(target);
1251
1252 /* send resume request (command 0x30 or 0x31)
1253 * clean the trace buffer if it is to be enabled (0x62) */
1254 if (xscale->trace.buffer_enabled)
1255 {
1256 xscale_send_u32(target, 0x62);
1257 xscale_send_u32(target, 0x31);
1258 }
1259 else
1260 xscale_send_u32(target, 0x30);
1261
1262 /* send CPSR */
1263 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1264 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1265
1266 for (i = 7; i >= 0; i--)
1267 {
1268 /* send register */
1269 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1270 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1271 }
1272
1273 /* send PC */
1274 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1275 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1276
1277 /* wait for and process debug entry */
1278 xscale_debug_entry(target);
1279
1280 LOG_DEBUG("disable single-step");
1281 xscale_disable_single_step(target);
1282
1283 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1284 xscale_set_breakpoint(target, breakpoint);
1285 }
1286 }
1287
1288 /* enable any pending breakpoints and watchpoints */
1289 xscale_enable_breakpoints(target);
1290 xscale_enable_watchpoints(target);
1291
1292 /* restore banked registers */
1293 xscale_restore_context(target);
1294
1295 /* send resume request (command 0x30 or 0x31)
1296 * clean the trace buffer if it is to be enabled (0x62) */
1297 if (xscale->trace.buffer_enabled)
1298 {
1299 xscale_send_u32(target, 0x62);
1300 xscale_send_u32(target, 0x31);
1301 }
1302 else
1303 xscale_send_u32(target, 0x30);
1304
1305 /* send CPSR */
1306 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1307 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1308
1309 for (i = 7; i >= 0; i--)
1310 {
1311 /* send register */
1312 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1313 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1314 }
1315
1316 /* send PC */
1317 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1318 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1319
1320 target->debug_reason = DBG_REASON_NOTHALTED;
1321
1322 if (!debug_execution)
1323 {
1324 /* registers are now invalid */
1325 register_cache_invalidate(armv4_5->core_cache);
1326 target->state = TARGET_RUNNING;
1327 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1328 }
1329 else
1330 {
1331 target->state = TARGET_DEBUG_RUNNING;
1332 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1333 }
1334
1335 LOG_DEBUG("target resumed");
1336
1337 return ERROR_OK;
1338 }
1339
1340 static int xscale_step_inner(struct target *target, int current,
1341 uint32_t address, int handle_breakpoints)
1342 {
1343 struct xscale_common *xscale = target_to_xscale(target);
1344 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
1345 uint32_t next_pc;
1346 int retval;
1347 int i;
1348
1349 target->debug_reason = DBG_REASON_SINGLESTEP;
1350
1351 /* calculate PC of next instruction */
1352 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1353 {
1354 uint32_t current_opcode, current_pc;
1355 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1356
1357 target_read_u32(target, current_pc, &current_opcode);
1358 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1359 return retval;
1360 }
1361
1362 LOG_DEBUG("enable single-step");
1363 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1364 return retval;
1365
1366 /* restore banked registers */
1367 if ((retval = xscale_restore_context(target)) != ERROR_OK)
1368 return retval;
1369
1370 /* send resume request (command 0x30 or 0x31)
1371 * clean the trace buffer if it is to be enabled (0x62) */
1372 if (xscale->trace.buffer_enabled)
1373 {
1374 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1375 return retval;
1376 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1377 return retval;
1378 }
1379 else
1380 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1381 return retval;
1382
1383 /* send CPSR */
1384 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32))) != ERROR_OK)
1385 return retval;
1386 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1387
1388 for (i = 7; i >= 0; i--)
1389 {
1390 /* send register */
1391 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1392 return retval;
1393 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1394 }
1395
1396 /* send PC */
1397 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))) != ERROR_OK)
1398 return retval;
1399 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1400
1401 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1402
1403 /* registers are now invalid */
1404 register_cache_invalidate(armv4_5->core_cache);
1405
1406 /* wait for and process debug entry */
1407 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1408 return retval;
1409
1410 LOG_DEBUG("disable single-step");
1411 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1412 return retval;
1413
1414 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1415
1416 return ERROR_OK;
1417 }
1418
1419 static int xscale_step(struct target *target, int current,
1420 uint32_t address, int handle_breakpoints)
1421 {
1422 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1423 struct breakpoint *breakpoint = target->breakpoints;
1424
1425 uint32_t current_pc;
1426 int retval;
1427
1428 if (target->state != TARGET_HALTED)
1429 {
1430 LOG_WARNING("target not halted");
1431 return ERROR_TARGET_NOT_HALTED;
1432 }
1433
1434 /* current = 1: continue on current pc, otherwise continue at <address> */
1435 if (!current)
1436 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1437
1438 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1439
1440 /* if we're at the reset vector, we have to simulate the step */
1441 if (current_pc == 0x0)
1442 {
1443 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1444 return retval;
1445 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1446
1447 target->debug_reason = DBG_REASON_SINGLESTEP;
1448 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1449
1450 return ERROR_OK;
1451 }
1452
1453 /* the front-end may request us not to handle breakpoints */
1454 if (handle_breakpoints)
1455 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1456 {
1457 if ((retval = xscale_unset_breakpoint(target, breakpoint)) != ERROR_OK)
1458 return retval;
1459 }
1460
1461 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1462
1463 if (breakpoint)
1464 {
1465 xscale_set_breakpoint(target, breakpoint);
1466 }
1467
1468 LOG_DEBUG("target stepped");
1469
1470 return ERROR_OK;
1471
1472 }
1473
1474 static int xscale_assert_reset(struct target *target)
1475 {
1476 struct xscale_common *xscale = target_to_xscale(target);
1477
1478 LOG_DEBUG("target->state: %s",
1479 target_state_name(target));
1480
1481 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1482 * end up in T-L-R, which would reset JTAG
1483 */
1484 jtag_set_end_state(TAP_IDLE);
1485 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
1486
1487 /* set Hold reset, Halt mode and Trap Reset */
1488 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1489 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1490 xscale_write_dcsr(target, 1, 0);
1491
1492 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1493 xscale_jtag_set_instr(target->tap, 0x7f);
1494 jtag_execute_queue();
1495
1496 /* assert reset */
1497 jtag_add_reset(0, 1);
1498
1499 /* sleep 1ms, to be sure we fulfill any requirements */
1500 jtag_add_sleep(1000);
1501 jtag_execute_queue();
1502
1503 target->state = TARGET_RESET;
1504
1505 if (target->reset_halt)
1506 {
1507 int retval;
1508 if ((retval = target_halt(target)) != ERROR_OK)
1509 return retval;
1510 }
1511
1512 return ERROR_OK;
1513 }
1514
1515 static int xscale_deassert_reset(struct target *target)
1516 {
1517 struct xscale_common *xscale = target_to_xscale(target);
1518 struct breakpoint *breakpoint = target->breakpoints;
1519
1520 LOG_DEBUG("-");
1521
1522 xscale->ibcr_available = 2;
1523 xscale->ibcr0_used = 0;
1524 xscale->ibcr1_used = 0;
1525
1526 xscale->dbr_available = 2;
1527 xscale->dbr0_used = 0;
1528 xscale->dbr1_used = 0;
1529
1530 /* mark all hardware breakpoints as unset */
1531 while (breakpoint)
1532 {
1533 if (breakpoint->type == BKPT_HARD)
1534 {
1535 breakpoint->set = 0;
1536 }
1537 breakpoint = breakpoint->next;
1538 }
1539
1540 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1541
1542 /* FIXME mark hardware watchpoints got unset too. Also,
1543 * at least some of the XScale registers are invalid...
1544 */
1545
1546 /*
1547 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1548 * contents got invalidated. Safer to force that, so writing new
1549 * contents can't ever fail..
1550 */
1551 {
1552 uint32_t address;
1553 unsigned buf_cnt;
1554 const uint8_t *buffer = xscale_debug_handler;
1555 int retval;
1556
1557 /* release SRST */
1558 jtag_add_reset(0, 0);
1559
1560 /* wait 300ms; 150 and 100ms were not enough */
1561 jtag_add_sleep(300*1000);
1562
1563 jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
1564 jtag_execute_queue();
1565
1566 /* set Hold reset, Halt mode and Trap Reset */
1567 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1568 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1569 xscale_write_dcsr(target, 1, 0);
1570
1571 /* Load the debug handler into the mini-icache. Since
1572 * it's using halt mode (not monitor mode), it runs in
1573 * "Special Debug State" for access to registers, memory,
1574 * coprocessors, trace data, etc.
1575 */
1576 address = xscale->handler_address;
1577 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1578 binary_size > 0;
1579 binary_size -= buf_cnt, buffer += buf_cnt)
1580 {
1581 uint32_t cache_line[8];
1582 unsigned i;
1583
1584 buf_cnt = binary_size;
1585 if (buf_cnt > 32)
1586 buf_cnt = 32;
1587
1588 for (i = 0; i < buf_cnt; i += 4)
1589 {
1590 /* convert LE buffer to host-endian uint32_t */
1591 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1592 }
1593
1594 for (; i < 32; i += 4)
1595 {
1596 cache_line[i / 4] = 0xe1a08008;
1597 }
1598
1599 /* only load addresses other than the reset vectors */
1600 if ((address % 0x400) != 0x0)
1601 {
1602 retval = xscale_load_ic(target, address,
1603 cache_line);
1604 if (retval != ERROR_OK)
1605 return retval;
1606 }
1607
1608 address += buf_cnt;
1609 };
1610
1611 retval = xscale_load_ic(target, 0x0,
1612 xscale->low_vectors);
1613 if (retval != ERROR_OK)
1614 return retval;
1615 retval = xscale_load_ic(target, 0xffff0000,
1616 xscale->high_vectors);
1617 if (retval != ERROR_OK)
1618 return retval;
1619
1620 jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
1621
1622 jtag_add_sleep(100000);
1623
1624 /* set Hold reset, Halt mode and Trap Reset */
1625 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1626 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1627 xscale_write_dcsr(target, 1, 0);
1628
1629 /* clear Hold reset to let the target run (should enter debug handler) */
1630 xscale_write_dcsr(target, 0, 1);
1631 target->state = TARGET_RUNNING;
1632
1633 if (!target->reset_halt)
1634 {
1635 jtag_add_sleep(10000);
1636
1637 /* we should have entered debug now */
1638 xscale_debug_entry(target);
1639 target->state = TARGET_HALTED;
1640
1641 /* resume the target */
1642 xscale_resume(target, 1, 0x0, 1, 0);
1643 }
1644 }
1645
1646 return ERROR_OK;
1647 }
1648
1649 static int xscale_read_core_reg(struct target *target, struct reg *r,
1650 int num, enum armv4_5_mode mode)
1651 {
1652 /** \todo add debug handler support for core register reads */
1653 LOG_ERROR("not implemented");
1654 return ERROR_OK;
1655 }
1656
1657 static int xscale_write_core_reg(struct target *target, struct reg *r,
1658 int num, enum armv4_5_mode mode, uint32_t value)
1659 {
1660 /** \todo add debug handler support for core register writes */
1661 LOG_ERROR("not implemented");
1662 return ERROR_OK;
1663 }
1664
1665 static int xscale_full_context(struct target *target)
1666 {
1667 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1668
1669 uint32_t *buffer;
1670
1671 int i, j;
1672
1673 LOG_DEBUG("-");
1674
1675 if (target->state != TARGET_HALTED)
1676 {
1677 LOG_WARNING("target not halted");
1678 return ERROR_TARGET_NOT_HALTED;
1679 }
1680
1681 buffer = malloc(4 * 8);
1682
1683 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1684 * we can't enter User mode on an XScale (unpredictable),
1685 * but User shares registers with SYS
1686 */
1687 for (i = 1; i < 7; i++)
1688 {
1689 int valid = 1;
1690
1691 /* check if there are invalid registers in the current mode
1692 */
1693 for (j = 0; j <= 16; j++)
1694 {
1695 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
1696 valid = 0;
1697 }
1698
1699 if (!valid)
1700 {
1701 uint32_t tmp_cpsr;
1702
1703 /* request banked registers */
1704 xscale_send_u32(target, 0x0);
1705
1706 tmp_cpsr = 0x0;
1707 tmp_cpsr |= armv4_5_number_to_mode(i);
1708 tmp_cpsr |= 0xc0; /* I/F bits */
1709
1710 /* send CPSR for desired mode */
1711 xscale_send_u32(target, tmp_cpsr);
1712
1713 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1714 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1715 {
1716 xscale_receive(target, buffer, 8);
1717 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1718 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1719 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
1720 }
1721 else
1722 {
1723 xscale_receive(target, buffer, 7);
1724 }
1725
1726 /* move data from buffer to register cache */
1727 for (j = 8; j <= 14; j++)
1728 {
1729 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
1730 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1731 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
1732 }
1733 }
1734 }
1735
1736 free(buffer);
1737
1738 return ERROR_OK;
1739 }
1740
1741 static int xscale_restore_context(struct target *target)
1742 {
1743 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1744
1745 int i, j;
1746
1747 if (target->state != TARGET_HALTED)
1748 {
1749 LOG_WARNING("target not halted");
1750 return ERROR_TARGET_NOT_HALTED;
1751 }
1752
1753 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1754 * we can't enter User mode on an XScale (unpredictable),
1755 * but User shares registers with SYS
1756 */
1757 for (i = 1; i < 7; i++)
1758 {
1759 int dirty = 0;
1760
1761 /* check if there are invalid registers in the current mode
1762 */
1763 for (j = 8; j <= 14; j++)
1764 {
1765 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
1766 dirty = 1;
1767 }
1768
1769 /* if not USR/SYS, check if the SPSR needs to be written */
1770 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1771 {
1772 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
1773 dirty = 1;
1774 }
1775
1776 if (dirty)
1777 {
1778 uint32_t tmp_cpsr;
1779
1780 /* send banked registers */
1781 xscale_send_u32(target, 0x1);
1782
1783 tmp_cpsr = 0x0;
1784 tmp_cpsr |= armv4_5_number_to_mode(i);
1785 tmp_cpsr |= 0xc0; /* I/F bits */
1786
1787 /* send CPSR for desired mode */
1788 xscale_send_u32(target, tmp_cpsr);
1789
1790 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1791 for (j = 8; j <= 14; j++)
1792 {
1793 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
1794 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1795 }
1796
1797 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1798 {
1799 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
1800 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1801 }
1802 }
1803 }
1804
1805 return ERROR_OK;
1806 }
1807
1808 static int xscale_read_memory(struct target *target, uint32_t address,
1809 uint32_t size, uint32_t count, uint8_t *buffer)
1810 {
1811 struct xscale_common *xscale = target_to_xscale(target);
1812 uint32_t *buf32;
1813 uint32_t i;
1814 int retval;
1815
1816 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1817
1818 if (target->state != TARGET_HALTED)
1819 {
1820 LOG_WARNING("target not halted");
1821 return ERROR_TARGET_NOT_HALTED;
1822 }
1823
1824 /* sanitize arguments */
1825 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1826 return ERROR_INVALID_ARGUMENTS;
1827
1828 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1829 return ERROR_TARGET_UNALIGNED_ACCESS;
1830
1831 /* send memory read request (command 0x1n, n: access size) */
1832 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1833 return retval;
1834
1835 /* send base address for read request */
1836 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1837 return retval;
1838
1839 /* send number of requested data words */
1840 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1841 return retval;
1842
1843 /* receive data from target (count times 32-bit words in host endianness) */
1844 buf32 = malloc(4 * count);
1845 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1846 return retval;
1847
1848 /* extract data from host-endian buffer into byte stream */
1849 for (i = 0; i < count; i++)
1850 {
1851 switch (size)
1852 {
1853 case 4:
1854 target_buffer_set_u32(target, buffer, buf32[i]);
1855 buffer += 4;
1856 break;
1857 case 2:
1858 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1859 buffer += 2;
1860 break;
1861 case 1:
1862 *buffer++ = buf32[i] & 0xff;
1863 break;
1864 default:
1865 LOG_ERROR("invalid read size");
1866 return ERROR_INVALID_ARGUMENTS;
1867 }
1868 }
1869
1870 free(buf32);
1871
1872 /* examine DCSR, to see if Sticky Abort (SA) got set */
1873 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1874 return retval;
1875 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1876 {
1877 /* clear SA bit */
1878 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1879 return retval;
1880
1881 return ERROR_TARGET_DATA_ABORT;
1882 }
1883
1884 return ERROR_OK;
1885 }
1886
1887 static int xscale_write_memory(struct target *target, uint32_t address,
1888 uint32_t size, uint32_t count, uint8_t *buffer)
1889 {
1890 struct xscale_common *xscale = target_to_xscale(target);
1891 int retval;
1892
1893 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1894
1895 if (target->state != TARGET_HALTED)
1896 {
1897 LOG_WARNING("target not halted");
1898 return ERROR_TARGET_NOT_HALTED;
1899 }
1900
1901 /* sanitize arguments */
1902 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1903 return ERROR_INVALID_ARGUMENTS;
1904
1905 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1906 return ERROR_TARGET_UNALIGNED_ACCESS;
1907
1908 /* send memory write request (command 0x2n, n: access size) */
1909 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1910 return retval;
1911
1912 /* send base address for read request */
1913 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1914 return retval;
1915
1916 /* send number of requested data words to be written*/
1917 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1918 return retval;
1919
1920 /* extract data from host-endian buffer into byte stream */
1921 #if 0
1922 for (i = 0; i < count; i++)
1923 {
1924 switch (size)
1925 {
1926 case 4:
1927 value = target_buffer_get_u32(target, buffer);
1928 xscale_send_u32(target, value);
1929 buffer += 4;
1930 break;
1931 case 2:
1932 value = target_buffer_get_u16(target, buffer);
1933 xscale_send_u32(target, value);
1934 buffer += 2;
1935 break;
1936 case 1:
1937 value = *buffer;
1938 xscale_send_u32(target, value);
1939 buffer += 1;
1940 break;
1941 default:
1942 LOG_ERROR("should never get here");
1943 exit(-1);
1944 }
1945 }
1946 #endif
1947 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1948 return retval;
1949
1950 /* examine DCSR, to see if Sticky Abort (SA) got set */
1951 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1952 return retval;
1953 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1954 {
1955 /* clear SA bit */
1956 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1957 return retval;
1958
1959 return ERROR_TARGET_DATA_ABORT;
1960 }
1961
1962 return ERROR_OK;
1963 }
1964
1965 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
1966 uint32_t count, uint8_t *buffer)
1967 {
1968 return xscale_write_memory(target, address, 4, count, buffer);
1969 }
1970
1971 static uint32_t xscale_get_ttb(struct target *target)
1972 {
1973 struct xscale_common *xscale = target_to_xscale(target);
1974 uint32_t ttb;
1975
1976 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
1977 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
1978
1979 return ttb;
1980 }
1981
1982 static void xscale_disable_mmu_caches(struct target *target, int mmu,
1983 int d_u_cache, int i_cache)
1984 {
1985 struct xscale_common *xscale = target_to_xscale(target);
1986 uint32_t cp15_control;
1987
1988 /* read cp15 control register */
1989 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1990 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1991
1992 if (mmu)
1993 cp15_control &= ~0x1U;
1994
1995 if (d_u_cache)
1996 {
1997 /* clean DCache */
1998 xscale_send_u32(target, 0x50);
1999 xscale_send_u32(target, xscale->cache_clean_address);
2000
2001 /* invalidate DCache */
2002 xscale_send_u32(target, 0x51);
2003
2004 cp15_control &= ~0x4U;
2005 }
2006
2007 if (i_cache)
2008 {
2009 /* invalidate ICache */
2010 xscale_send_u32(target, 0x52);
2011 cp15_control &= ~0x1000U;
2012 }
2013
2014 /* write new cp15 control register */
2015 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2016
2017 /* execute cpwait to ensure outstanding operations complete */
2018 xscale_send_u32(target, 0x53);
2019 }
2020
2021 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2022 int d_u_cache, int i_cache)
2023 {
2024 struct xscale_common *xscale = target_to_xscale(target);
2025 uint32_t cp15_control;
2026
2027 /* read cp15 control register */
2028 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2029 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2030
2031 if (mmu)
2032 cp15_control |= 0x1U;
2033
2034 if (d_u_cache)
2035 cp15_control |= 0x4U;
2036
2037 if (i_cache)
2038 cp15_control |= 0x1000U;
2039
2040 /* write new cp15 control register */
2041 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2042
2043 /* execute cpwait to ensure outstanding operations complete */
2044 xscale_send_u32(target, 0x53);
2045 }
2046
2047 static int xscale_set_breakpoint(struct target *target,
2048 struct breakpoint *breakpoint)
2049 {
2050 int retval;
2051 struct xscale_common *xscale = target_to_xscale(target);
2052
2053 if (target->state != TARGET_HALTED)
2054 {
2055 LOG_WARNING("target not halted");
2056 return ERROR_TARGET_NOT_HALTED;
2057 }
2058
2059 if (breakpoint->set)
2060 {
2061 LOG_WARNING("breakpoint already set");
2062 return ERROR_OK;
2063 }
2064
2065 if (breakpoint->type == BKPT_HARD)
2066 {
2067 uint32_t value = breakpoint->address | 1;
2068 if (!xscale->ibcr0_used)
2069 {
2070 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2071 xscale->ibcr0_used = 1;
2072 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2073 }
2074 else if (!xscale->ibcr1_used)
2075 {
2076 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2077 xscale->ibcr1_used = 1;
2078 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2079 }
2080 else
2081 {
2082 LOG_ERROR("BUG: no hardware comparator available");
2083 return ERROR_OK;
2084 }
2085 }
2086 else if (breakpoint->type == BKPT_SOFT)
2087 {
2088 if (breakpoint->length == 4)
2089 {
2090 /* keep the original instruction in target endianness */
2091 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2092 {
2093 return retval;
2094 }
2095 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2096 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2097 {
2098 return retval;
2099 }
2100 }
2101 else
2102 {
2103 /* keep the original instruction in target endianness */
2104 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2105 {
2106 return retval;
2107 }
2108 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2109 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2110 {
2111 return retval;
2112 }
2113 }
2114 breakpoint->set = 1;
2115 }
2116
2117 return ERROR_OK;
2118 }
2119
2120 static int xscale_add_breakpoint(struct target *target,
2121 struct breakpoint *breakpoint)
2122 {
2123 struct xscale_common *xscale = target_to_xscale(target);
2124
2125 if (target->state != TARGET_HALTED)
2126 {
2127 LOG_WARNING("target not halted");
2128 return ERROR_TARGET_NOT_HALTED;
2129 }
2130
2131 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2132 {
2133 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2134 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2135 }
2136
2137 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2138 {
2139 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2140 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2141 }
2142
2143 if (breakpoint->type == BKPT_HARD)
2144 {
2145 xscale->ibcr_available--;
2146 }
2147
2148 return ERROR_OK;
2149 }
2150
2151 static int xscale_unset_breakpoint(struct target *target,
2152 struct breakpoint *breakpoint)
2153 {
2154 int retval;
2155 struct xscale_common *xscale = target_to_xscale(target);
2156
2157 if (target->state != TARGET_HALTED)
2158 {
2159 LOG_WARNING("target not halted");
2160 return ERROR_TARGET_NOT_HALTED;
2161 }
2162
2163 if (!breakpoint->set)
2164 {
2165 LOG_WARNING("breakpoint not set");
2166 return ERROR_OK;
2167 }
2168
2169 if (breakpoint->type == BKPT_HARD)
2170 {
2171 if (breakpoint->set == 1)
2172 {
2173 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2174 xscale->ibcr0_used = 0;
2175 }
2176 else if (breakpoint->set == 2)
2177 {
2178 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2179 xscale->ibcr1_used = 0;
2180 }
2181 breakpoint->set = 0;
2182 }
2183 else
2184 {
2185 /* restore original instruction (kept in target endianness) */
2186 if (breakpoint->length == 4)
2187 {
2188 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2189 {
2190 return retval;
2191 }
2192 }
2193 else
2194 {
2195 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2196 {
2197 return retval;
2198 }
2199 }
2200 breakpoint->set = 0;
2201 }
2202
2203 return ERROR_OK;
2204 }
2205
2206 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2207 {
2208 struct xscale_common *xscale = target_to_xscale(target);
2209
2210 if (target->state != TARGET_HALTED)
2211 {
2212 LOG_WARNING("target not halted");
2213 return ERROR_TARGET_NOT_HALTED;
2214 }
2215
2216 if (breakpoint->set)
2217 {
2218 xscale_unset_breakpoint(target, breakpoint);
2219 }
2220
2221 if (breakpoint->type == BKPT_HARD)
2222 xscale->ibcr_available++;
2223
2224 return ERROR_OK;
2225 }
2226
2227 static int xscale_set_watchpoint(struct target *target,
2228 struct watchpoint *watchpoint)
2229 {
2230 struct xscale_common *xscale = target_to_xscale(target);
2231 uint8_t enable = 0;
2232 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2233 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2234
2235 if (target->state != TARGET_HALTED)
2236 {
2237 LOG_WARNING("target not halted");
2238 return ERROR_TARGET_NOT_HALTED;
2239 }
2240
2241 xscale_get_reg(dbcon);
2242
2243 switch (watchpoint->rw)
2244 {
2245 case WPT_READ:
2246 enable = 0x3;
2247 break;
2248 case WPT_ACCESS:
2249 enable = 0x2;
2250 break;
2251 case WPT_WRITE:
2252 enable = 0x1;
2253 break;
2254 default:
2255 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2256 }
2257
2258 if (!xscale->dbr0_used)
2259 {
2260 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2261 dbcon_value |= enable;
2262 xscale_set_reg_u32(dbcon, dbcon_value);
2263 watchpoint->set = 1;
2264 xscale->dbr0_used = 1;
2265 }
2266 else if (!xscale->dbr1_used)
2267 {
2268 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2269 dbcon_value |= enable << 2;
2270 xscale_set_reg_u32(dbcon, dbcon_value);
2271 watchpoint->set = 2;
2272 xscale->dbr1_used = 1;
2273 }
2274 else
2275 {
2276 LOG_ERROR("BUG: no hardware comparator available");
2277 return ERROR_OK;
2278 }
2279
2280 return ERROR_OK;
2281 }
2282
2283 static int xscale_add_watchpoint(struct target *target,
2284 struct watchpoint *watchpoint)
2285 {
2286 struct xscale_common *xscale = target_to_xscale(target);
2287
2288 if (target->state != TARGET_HALTED)
2289 {
2290 LOG_WARNING("target not halted");
2291 return ERROR_TARGET_NOT_HALTED;
2292 }
2293
2294 if (xscale->dbr_available < 1)
2295 {
2296 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2297 }
2298
2299 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2300 {
2301 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2302 }
2303
2304 xscale->dbr_available--;
2305
2306 return ERROR_OK;
2307 }
2308
2309 static int xscale_unset_watchpoint(struct target *target,
2310 struct watchpoint *watchpoint)
2311 {
2312 struct xscale_common *xscale = target_to_xscale(target);
2313 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2314 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2315
2316 if (target->state != TARGET_HALTED)
2317 {
2318 LOG_WARNING("target not halted");
2319 return ERROR_TARGET_NOT_HALTED;
2320 }
2321
2322 if (!watchpoint->set)
2323 {
2324 LOG_WARNING("breakpoint not set");
2325 return ERROR_OK;
2326 }
2327
2328 if (watchpoint->set == 1)
2329 {
2330 dbcon_value &= ~0x3;
2331 xscale_set_reg_u32(dbcon, dbcon_value);
2332 xscale->dbr0_used = 0;
2333 }
2334 else if (watchpoint->set == 2)
2335 {
2336 dbcon_value &= ~0xc;
2337 xscale_set_reg_u32(dbcon, dbcon_value);
2338 xscale->dbr1_used = 0;
2339 }
2340 watchpoint->set = 0;
2341
2342 return ERROR_OK;
2343 }
2344
2345 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2346 {
2347 struct xscale_common *xscale = target_to_xscale(target);
2348
2349 if (target->state != TARGET_HALTED)
2350 {
2351 LOG_WARNING("target not halted");
2352 return ERROR_TARGET_NOT_HALTED;
2353 }
2354
2355 if (watchpoint->set)
2356 {
2357 xscale_unset_watchpoint(target, watchpoint);
2358 }
2359
2360 xscale->dbr_available++;
2361
2362 return ERROR_OK;
2363 }
2364
2365 static int xscale_get_reg(struct reg *reg)
2366 {
2367 struct xscale_reg *arch_info = reg->arch_info;
2368 struct target *target = arch_info->target;
2369 struct xscale_common *xscale = target_to_xscale(target);
2370
2371 /* DCSR, TX and RX are accessible via JTAG */
2372 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2373 {
2374 return xscale_read_dcsr(arch_info->target);
2375 }
2376 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2377 {
2378 /* 1 = consume register content */
2379 return xscale_read_tx(arch_info->target, 1);
2380 }
2381 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2382 {
2383 /* can't read from RX register (host -> debug handler) */
2384 return ERROR_OK;
2385 }
2386 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2387 {
2388 /* can't (explicitly) read from TXRXCTRL register */
2389 return ERROR_OK;
2390 }
2391 else /* Other DBG registers have to be transfered by the debug handler */
2392 {
2393 /* send CP read request (command 0x40) */
2394 xscale_send_u32(target, 0x40);
2395
2396 /* send CP register number */
2397 xscale_send_u32(target, arch_info->dbg_handler_number);
2398
2399 /* read register value */
2400 xscale_read_tx(target, 1);
2401 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2402
2403 reg->dirty = 0;
2404 reg->valid = 1;
2405 }
2406
2407 return ERROR_OK;
2408 }
2409
2410 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2411 {
2412 struct xscale_reg *arch_info = reg->arch_info;
2413 struct target *target = arch_info->target;
2414 struct xscale_common *xscale = target_to_xscale(target);
2415 uint32_t value = buf_get_u32(buf, 0, 32);
2416
2417 /* DCSR, TX and RX are accessible via JTAG */
2418 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2419 {
2420 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2421 return xscale_write_dcsr(arch_info->target, -1, -1);
2422 }
2423 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2424 {
2425 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2426 return xscale_write_rx(arch_info->target);
2427 }
2428 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2429 {
2430 /* can't write to TX register (debug-handler -> host) */
2431 return ERROR_OK;
2432 }
2433 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2434 {
2435 /* can't (explicitly) write to TXRXCTRL register */
2436 return ERROR_OK;
2437 }
2438 else /* Other DBG registers have to be transfered by the debug handler */
2439 {
2440 /* send CP write request (command 0x41) */
2441 xscale_send_u32(target, 0x41);
2442
2443 /* send CP register number */
2444 xscale_send_u32(target, arch_info->dbg_handler_number);
2445
2446 /* send CP register value */
2447 xscale_send_u32(target, value);
2448 buf_set_u32(reg->value, 0, 32, value);
2449 }
2450
2451 return ERROR_OK;
2452 }
2453
2454 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2455 {
2456 struct xscale_common *xscale = target_to_xscale(target);
2457 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2458 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2459
2460 /* send CP write request (command 0x41) */
2461 xscale_send_u32(target, 0x41);
2462
2463 /* send CP register number */
2464 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2465
2466 /* send CP register value */
2467 xscale_send_u32(target, value);
2468 buf_set_u32(dcsr->value, 0, 32, value);
2469
2470 return ERROR_OK;
2471 }
2472
2473 static int xscale_read_trace(struct target *target)
2474 {
2475 struct xscale_common *xscale = target_to_xscale(target);
2476 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
2477 struct xscale_trace_data **trace_data_p;
2478
2479 /* 258 words from debug handler
2480 * 256 trace buffer entries
2481 * 2 checkpoint addresses
2482 */
2483 uint32_t trace_buffer[258];
2484 int is_address[256];
2485 int i, j;
2486
2487 if (target->state != TARGET_HALTED)
2488 {
2489 LOG_WARNING("target must be stopped to read trace data");
2490 return ERROR_TARGET_NOT_HALTED;
2491 }
2492
2493 /* send read trace buffer command (command 0x61) */
2494 xscale_send_u32(target, 0x61);
2495
2496 /* receive trace buffer content */
2497 xscale_receive(target, trace_buffer, 258);
2498
2499 /* parse buffer backwards to identify address entries */
2500 for (i = 255; i >= 0; i--)
2501 {
2502 is_address[i] = 0;
2503 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2504 ((trace_buffer[i] & 0xf0) == 0xd0))
2505 {
2506 if (i >= 3)
2507 is_address[--i] = 1;
2508 if (i >= 2)
2509 is_address[--i] = 1;
2510 if (i >= 1)
2511 is_address[--i] = 1;
2512 if (i >= 0)
2513 is_address[--i] = 1;
2514 }
2515 }
2516
2517
2518 /* search first non-zero entry */
2519 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2520 ;
2521
2522 if (j == 256)
2523 {
2524 LOG_DEBUG("no trace data collected");
2525 return ERROR_XSCALE_NO_TRACE_DATA;
2526 }
2527
2528 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2529 ;
2530
2531 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2532 (*trace_data_p)->next = NULL;
2533 (*trace_data_p)->chkpt0 = trace_buffer[256];
2534 (*trace_data_p)->chkpt1 = trace_buffer[257];
2535 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2536 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2537 (*trace_data_p)->depth = 256 - j;
2538
2539 for (i = j; i < 256; i++)
2540 {
2541 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2542 if (is_address[i])
2543 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2544 else
2545 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2546 }
2547
2548 return ERROR_OK;
2549 }
2550
2551 static int xscale_read_instruction(struct target *target,
2552 struct arm_instruction *instruction)
2553 {
2554 struct xscale_common *xscale = target_to_xscale(target);
2555 int i;
2556 int section = -1;
2557 size_t size_read;
2558 uint32_t opcode;
2559 int retval;
2560
2561 if (!xscale->trace.image)
2562 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2563
2564 /* search for the section the current instruction belongs to */
2565 for (i = 0; i < xscale->trace.image->num_sections; i++)
2566 {
2567 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2568 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2569 {
2570 section = i;
2571 break;
2572 }
2573 }
2574
2575 if (section == -1)
2576 {
2577 /* current instruction couldn't be found in the image */
2578 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2579 }
2580
2581 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2582 {
2583 uint8_t buf[4];
2584 if ((retval = image_read_section(xscale->trace.image, section,
2585 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2586 4, buf, &size_read)) != ERROR_OK)
2587 {
2588 LOG_ERROR("error while reading instruction: %i", retval);
2589 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2590 }
2591 opcode = target_buffer_get_u32(target, buf);
2592 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2593 }
2594 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2595 {
2596 uint8_t buf[2];
2597 if ((retval = image_read_section(xscale->trace.image, section,
2598 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2599 2, buf, &size_read)) != ERROR_OK)
2600 {
2601 LOG_ERROR("error while reading instruction: %i", retval);
2602 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2603 }
2604 opcode = target_buffer_get_u16(target, buf);
2605 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2606 }
2607 else
2608 {
2609 LOG_ERROR("BUG: unknown core state encountered");
2610 exit(-1);
2611 }
2612
2613 return ERROR_OK;
2614 }
2615
2616 static int xscale_branch_address(struct xscale_trace_data *trace_data,
2617 int i, uint32_t *target)
2618 {
2619 /* if there are less than four entries prior to the indirect branch message
2620 * we can't extract the address */
2621 if (i < 4)
2622 {
2623 return -1;
2624 }
2625
2626 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2627 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2628
2629 return 0;
2630 }
2631
2632 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2633 {
2634 struct xscale_common *xscale = target_to_xscale(target);
2635 int next_pc_ok = 0;
2636 uint32_t next_pc = 0x0;
2637 struct xscale_trace_data *trace_data = xscale->trace.data;
2638 int retval;
2639
2640 while (trace_data)
2641 {
2642 int i, chkpt;
2643 int rollover;
2644 int branch;
2645 int exception;
2646 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2647
2648 chkpt = 0;
2649 rollover = 0;
2650
2651 for (i = 0; i < trace_data->depth; i++)
2652 {
2653 next_pc_ok = 0;
2654 branch = 0;
2655 exception = 0;
2656
2657 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2658 continue;
2659
2660 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2661 {
2662 case 0: /* Exceptions */
2663 case 1:
2664 case 2:
2665 case 3:
2666 case 4:
2667 case 5:
2668 case 6:
2669 case 7:
2670 exception = (trace_data->entries[i].data & 0x70) >> 4;
2671 next_pc_ok = 1;
2672 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2673 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2674 break;
2675 case 8: /* Direct Branch */
2676 branch = 1;
2677 break;
2678 case 9: /* Indirect Branch */
2679 branch = 1;
2680 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2681 {
2682 next_pc_ok = 1;
2683 }
2684 break;
2685 case 13: /* Checkpointed Indirect Branch */
2686 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2687 {
2688 next_pc_ok = 1;
2689 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2690 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2691 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2692 }
2693 /* explicit fall-through */
2694 case 12: /* Checkpointed Direct Branch */
2695 branch = 1;
2696 if (chkpt == 0)
2697 {
2698 next_pc_ok = 1;
2699 next_pc = trace_data->chkpt0;
2700 chkpt++;
2701 }
2702 else if (chkpt == 1)
2703 {
2704 next_pc_ok = 1;
2705 next_pc = trace_data->chkpt0;
2706 chkpt++;
2707 }
2708 else
2709 {
2710 LOG_WARNING("more than two checkpointed branches encountered");
2711 }
2712 break;
2713 case 15: /* Roll-over */
2714 rollover++;
2715 continue;
2716 default: /* Reserved */
2717 command_print(cmd_ctx, "--- reserved trace message ---");
2718 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2719 return ERROR_OK;
2720 }
2721
2722 if (xscale->trace.pc_ok)
2723 {
2724 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2725 struct arm_instruction instruction;
2726
2727 if ((exception == 6) || (exception == 7))
2728 {
2729 /* IRQ or FIQ exception, no instruction executed */
2730 executed -= 1;
2731 }
2732
2733 while (executed-- >= 0)
2734 {
2735 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2736 {
2737 /* can't continue tracing with no image available */
2738 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2739 {
2740 return retval;
2741 }
2742 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2743 {
2744 /* TODO: handle incomplete images */
2745 }
2746 }
2747
2748 /* a precise abort on a load to the PC is included in the incremental
2749 * word count, other instructions causing data aborts are not included
2750 */
2751 if ((executed == 0) && (exception == 4)
2752 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2753 {
2754 if ((instruction.type == ARM_LDM)
2755 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2756 {
2757 executed--;
2758 }
2759 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2760 && (instruction.info.load_store.Rd != 15))
2761 {
2762 executed--;
2763 }
2764 }
2765
2766 /* only the last instruction executed
2767 * (the one that caused the control flow change)
2768 * could be a taken branch
2769 */
2770 if (((executed == -1) && (branch == 1)) &&
2771 (((instruction.type == ARM_B) ||
2772 (instruction.type == ARM_BL) ||
2773 (instruction.type == ARM_BLX)) &&
2774 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2775 {
2776 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2777 }
2778 else
2779 {
2780 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2781 }
2782 command_print(cmd_ctx, "%s", instruction.text);
2783 }
2784
2785 rollover = 0;
2786 }
2787
2788 if (next_pc_ok)
2789 {
2790 xscale->trace.current_pc = next_pc;
2791 xscale->trace.pc_ok = 1;
2792 }
2793 }
2794
2795 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2796 {
2797 struct arm_instruction instruction;
2798 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2799 {
2800 /* can't continue tracing with no image available */
2801 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2802 {
2803 return retval;
2804 }
2805 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2806 {
2807 /* TODO: handle incomplete images */
2808 }
2809 }
2810 command_print(cmd_ctx, "%s", instruction.text);
2811 }
2812
2813 trace_data = trace_data->next;
2814 }
2815
2816 return ERROR_OK;
2817 }
2818
2819 static const struct reg_arch_type xscale_reg_type = {
2820 .get = xscale_get_reg,
2821 .set = xscale_set_reg,
2822 };
2823
2824 static void xscale_build_reg_cache(struct target *target)
2825 {
2826 struct xscale_common *xscale = target_to_xscale(target);
2827 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
2828 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2829 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2830 int i;
2831 int num_regs = sizeof(xscale_reg_arch_info) / sizeof(struct xscale_reg);
2832
2833 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
2834
2835 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2836 cache_p = &(*cache_p)->next;
2837
2838 /* fill in values for the xscale reg cache */
2839 (*cache_p)->name = "XScale registers";
2840 (*cache_p)->next = NULL;
2841 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2842 (*cache_p)->num_regs = num_regs;
2843
2844 for (i = 0; i < num_regs; i++)
2845 {
2846 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2847 (*cache_p)->reg_list[i].value = calloc(4, 1);
2848 (*cache_p)->reg_list[i].dirty = 0;
2849 (*cache_p)->reg_list[i].valid = 0;
2850 (*cache_p)->reg_list[i].size = 32;
2851 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2852 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2853 arch_info[i] = xscale_reg_arch_info[i];
2854 arch_info[i].target = target;
2855 }
2856
2857 xscale->reg_cache = (*cache_p);
2858 }
2859
2860 static int xscale_init_target(struct command_context *cmd_ctx,
2861 struct target *target)
2862 {
2863 xscale_build_reg_cache(target);
2864 return ERROR_OK;
2865 }
2866
2867 static int xscale_init_arch_info(struct target *target,
2868 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2869 {
2870 struct arm *armv4_5;
2871 uint32_t high_reset_branch, low_reset_branch;
2872 int i;
2873
2874 armv4_5 = &xscale->armv4_5_common;
2875
2876 /* store architecture specfic data (none so far) */
2877 xscale->common_magic = XSCALE_COMMON_MAGIC;
2878
2879 /* we don't really *need* variant info ... */
2880 if (variant) {
2881 int ir_length = 0;
2882
2883 if (strcmp(variant, "pxa250") == 0
2884 || strcmp(variant, "pxa255") == 0
2885 || strcmp(variant, "pxa26x") == 0)
2886 ir_length = 5;
2887 else if (strcmp(variant, "pxa27x") == 0
2888 || strcmp(variant, "ixp42x") == 0
2889 || strcmp(variant, "ixp45x") == 0
2890 || strcmp(variant, "ixp46x") == 0)
2891 ir_length = 7;
2892 else
2893 LOG_WARNING("%s: unrecognized variant %s",
2894 tap->dotted_name, variant);
2895
2896 if (ir_length && ir_length != tap->ir_length) {
2897 LOG_WARNING("%s: IR length for %s is %d; fixing",
2898 tap->dotted_name, variant, ir_length);
2899 tap->ir_length = ir_length;
2900 }
2901 }
2902
2903 /* the debug handler isn't installed (and thus not running) at this time */
2904 xscale->handler_address = 0xfe000800;
2905
2906 /* clear the vectors we keep locally for reference */
2907 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2908 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2909
2910 /* no user-specified vectors have been configured yet */
2911 xscale->static_low_vectors_set = 0x0;
2912 xscale->static_high_vectors_set = 0x0;
2913
2914 /* calculate branches to debug handler */
2915 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2916 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2917
2918 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2919 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2920
2921 for (i = 1; i <= 7; i++)
2922 {
2923 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2924 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2925 }
2926
2927 /* 64kB aligned region used for DCache cleaning */
2928 xscale->cache_clean_address = 0xfffe0000;
2929
2930 xscale->hold_rst = 0;
2931 xscale->external_debug_break = 0;
2932
2933 xscale->ibcr_available = 2;
2934 xscale->ibcr0_used = 0;
2935 xscale->ibcr1_used = 0;
2936
2937 xscale->dbr_available = 2;
2938 xscale->dbr0_used = 0;
2939 xscale->dbr1_used = 0;
2940
2941 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2942 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2943
2944 xscale->vector_catch = 0x1;
2945
2946 xscale->trace.capture_status = TRACE_IDLE;
2947 xscale->trace.data = NULL;
2948 xscale->trace.image = NULL;
2949 xscale->trace.buffer_enabled = 0;
2950 xscale->trace.buffer_fill = 0;
2951
2952 /* prepare ARMv4/5 specific information */
2953 armv4_5->arch_info = xscale;
2954 armv4_5->read_core_reg = xscale_read_core_reg;
2955 armv4_5->write_core_reg = xscale_write_core_reg;
2956 armv4_5->full_context = xscale_full_context;
2957
2958 armv4_5_init_arch_info(target, armv4_5);
2959
2960 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
2961 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
2962 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
2963 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
2964 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
2965 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
2966 xscale->armv4_5_mmu.has_tiny_pages = 1;
2967 xscale->armv4_5_mmu.mmu_enabled = 0;
2968
2969 return ERROR_OK;
2970 }
2971
2972 static int xscale_target_create(struct target *target, Jim_Interp *interp)
2973 {
2974 struct xscale_common *xscale;
2975
2976 if (sizeof xscale_debug_handler - 1 > 0x800) {
2977 LOG_ERROR("debug_handler.bin: larger than 2kb");
2978 return ERROR_FAIL;
2979 }
2980
2981 xscale = calloc(1, sizeof(*xscale));
2982 if (!xscale)
2983 return ERROR_FAIL;
2984
2985 return xscale_init_arch_info(target, xscale, target->tap,
2986 target->variant);
2987 }
2988
2989 COMMAND_HANDLER(xscale_handle_debug_handler_command)
2990 {
2991 struct target *target = NULL;
2992 struct xscale_common *xscale;
2993 int retval;
2994 uint32_t handler_address;
2995
2996 if (CMD_ARGC < 2)
2997 {
2998 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
2999 return ERROR_OK;
3000 }
3001
3002 if ((target = get_target(CMD_ARGV[0])) == NULL)
3003 {
3004 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3005 return ERROR_FAIL;
3006 }
3007
3008 xscale = target_to_xscale(target);
3009 retval = xscale_verify_pointer(CMD_CTX, xscale);
3010 if (retval != ERROR_OK)
3011 return retval;
3012
3013 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3014
3015 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3016 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3017 {
3018 xscale->handler_address = handler_address;
3019 }
3020 else
3021 {
3022 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3023 return ERROR_FAIL;
3024 }
3025
3026 return ERROR_OK;
3027 }
3028
3029 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3030 {
3031 struct target *target = NULL;
3032 struct xscale_common *xscale;
3033 int retval;
3034 uint32_t cache_clean_address;
3035
3036 if (CMD_ARGC < 2)
3037 {
3038 return ERROR_COMMAND_SYNTAX_ERROR;
3039 }
3040
3041 target = get_target(CMD_ARGV[0]);
3042 if (target == NULL)
3043 {
3044 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3045 return ERROR_FAIL;
3046 }
3047 xscale = target_to_xscale(target);
3048 retval = xscale_verify_pointer(CMD_CTX, xscale);
3049 if (retval != ERROR_OK)
3050 return retval;
3051
3052 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3053
3054 if (cache_clean_address & 0xffff)
3055 {
3056 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3057 }
3058 else
3059 {
3060 xscale->cache_clean_address = cache_clean_address;
3061 }
3062
3063 return ERROR_OK;
3064 }
3065
3066 COMMAND_HANDLER(xscale_handle_cache_info_command)
3067 {
3068 struct target *target = get_current_target(CMD_CTX);
3069 struct xscale_common *xscale = target_to_xscale(target);
3070 int retval;
3071
3072 retval = xscale_verify_pointer(CMD_CTX, xscale);
3073 if (retval != ERROR_OK)
3074 return retval;
3075
3076 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3077 }
3078
3079 static int xscale_virt2phys(struct target *target,
3080 uint32_t virtual, uint32_t *physical)
3081 {
3082 struct xscale_common *xscale = target_to_xscale(target);
3083 int type;
3084 uint32_t cb;
3085 int domain;
3086 uint32_t ap;
3087
3088 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3089 LOG_ERROR(xscale_not);
3090 return ERROR_TARGET_INVALID;
3091 }
3092
3093 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3094 if (type == -1)
3095 {
3096 return ret;
3097 }
3098 *physical = ret;
3099 return ERROR_OK;
3100 }
3101
3102 static int xscale_mmu(struct target *target, int *enabled)
3103 {
3104 struct xscale_common *xscale = target_to_xscale(target);
3105
3106 if (target->state != TARGET_HALTED)
3107 {
3108 LOG_ERROR("Target not halted");
3109 return ERROR_TARGET_INVALID;
3110 }
3111 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3112 return ERROR_OK;
3113 }
3114
3115 COMMAND_HANDLER(xscale_handle_mmu_command)
3116 {
3117 struct target *target = get_current_target(CMD_CTX);
3118 struct xscale_common *xscale = target_to_xscale(target);
3119 int retval;
3120
3121 retval = xscale_verify_pointer(CMD_CTX, xscale);
3122 if (retval != ERROR_OK)
3123 return retval;
3124
3125 if (target->state != TARGET_HALTED)
3126 {
3127 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3128 return ERROR_OK;
3129 }
3130
3131 if (CMD_ARGC >= 1)
3132 {
3133 bool enable;
3134 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3135 if (enable)
3136 xscale_enable_mmu_caches(target, 1, 0, 0);
3137 else
3138 xscale_disable_mmu_caches(target, 1, 0, 0);
3139 xscale->armv4_5_mmu.mmu_enabled = enable;
3140 }
3141
3142 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3143
3144 return ERROR_OK;
3145 }
3146
3147 COMMAND_HANDLER(xscale_handle_idcache_command)
3148 {
3149 struct target *target = get_current_target(CMD_CTX);
3150 struct xscale_common *xscale = target_to_xscale(target);
3151
3152 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3153 if (retval != ERROR_OK)
3154 return retval;
3155
3156 if (target->state != TARGET_HALTED)
3157 {
3158 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3159 return ERROR_OK;
3160 }
3161
3162 bool icache;
3163 COMMAND_PARSE_BOOL(CMD_NAME, icache, "icache", "dcache");
3164
3165 if (CMD_ARGC >= 1)
3166 {
3167 bool enable;
3168 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3169 if (enable)
3170 xscale_enable_mmu_caches(target, 1, 0, 0);
3171 else
3172 xscale_disable_mmu_caches(target, 1, 0, 0);
3173 if (icache)
3174 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3175 else
3176 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3177 }
3178
3179 bool enabled = icache ?
3180 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3181 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3182 const char *msg = enabled ? "enabled" : "disabled";
3183 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3184
3185 return ERROR_OK;
3186 }
3187
3188 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3189 {
3190 struct target *target = get_current_target(CMD_CTX);
3191 struct xscale_common *xscale = target_to_xscale(target);
3192 int retval;
3193
3194 retval = xscale_verify_pointer(CMD_CTX, xscale);
3195 if (retval != ERROR_OK)
3196 return retval;
3197
3198 if (CMD_ARGC < 1)
3199 {
3200 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3201 }
3202 else
3203 {
3204 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3205 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3206 xscale_write_dcsr(target, -1, -1);
3207 }
3208
3209 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3210
3211 return ERROR_OK;
3212 }
3213
3214
3215 COMMAND_HANDLER(xscale_handle_vector_table_command)
3216 {
3217 struct target *target = get_current_target(CMD_CTX);
3218 struct xscale_common *xscale = target_to_xscale(target);
3219 int err = 0;
3220 int retval;
3221
3222 retval = xscale_verify_pointer(CMD_CTX, xscale);
3223 if (retval != ERROR_OK)
3224 return retval;
3225
3226 if (CMD_ARGC == 0) /* print current settings */
3227 {
3228 int idx;
3229
3230 command_print(CMD_CTX, "active user-set static vectors:");
3231 for (idx = 1; idx < 8; idx++)
3232 if (xscale->static_low_vectors_set & (1 << idx))
3233 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3234 for (idx = 1; idx < 8; idx++)
3235 if (xscale->static_high_vectors_set & (1 << idx))
3236 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3237 return ERROR_OK;
3238 }
3239
3240 if (CMD_ARGC != 3)
3241 err = 1;
3242 else
3243 {
3244 int idx;
3245 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3246 uint32_t vec;
3247 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3248
3249 if (idx < 1 || idx >= 8)
3250 err = 1;
3251
3252 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3253 {
3254 xscale->static_low_vectors_set |= (1<<idx);
3255 xscale->static_low_vectors[idx] = vec;
3256 }
3257 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3258 {
3259 xscale->static_high_vectors_set |= (1<<idx);
3260 xscale->static_high_vectors[idx] = vec;
3261 }
3262 else
3263 err = 1;
3264 }
3265
3266 if (err)
3267 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3268
3269 return ERROR_OK;
3270 }
3271
3272
3273 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3274 {
3275 struct target *target = get_current_target(CMD_CTX);
3276 struct xscale_common *xscale = target_to_xscale(target);
3277 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
3278 uint32_t dcsr_value;
3279 int retval;
3280
3281 retval = xscale_verify_pointer(CMD_CTX, xscale);
3282 if (retval != ERROR_OK)
3283 return retval;
3284
3285 if (target->state != TARGET_HALTED)
3286 {
3287 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3288 return ERROR_OK;
3289 }
3290
3291 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3292 {
3293 struct xscale_trace_data *td, *next_td;
3294 xscale->trace.buffer_enabled = 1;
3295
3296 /* free old trace data */
3297 td = xscale->trace.data;
3298 while (td)
3299 {
3300 next_td = td->next;
3301
3302 if (td->entries)
3303 free(td->entries);
3304 free(td);
3305 td = next_td;
3306 }
3307 xscale->trace.data = NULL;
3308 }
3309 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3310 {
3311 xscale->trace.buffer_enabled = 0;
3312 }
3313
3314 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3315 {
3316 uint32_t fill = 1;
3317 if (CMD_ARGC >= 3)
3318 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3319 xscale->trace.buffer_fill = fill;
3320 }
3321 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3322 {
3323 xscale->trace.buffer_fill = -1;
3324 }
3325
3326 if (xscale->trace.buffer_enabled)
3327 {
3328 /* if we enable the trace buffer in fill-once
3329 * mode we know the address of the first instruction */
3330 xscale->trace.pc_ok = 1;
3331 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3332 }
3333 else
3334 {
3335 /* otherwise the address is unknown, and we have no known good PC */
3336 xscale->trace.pc_ok = 0;
3337 }
3338
3339 command_print(CMD_CTX, "trace buffer %s (%s)",
3340 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3341 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3342
3343 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3344 if (xscale->trace.buffer_fill >= 0)
3345 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3346 else
3347 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3348
3349 return ERROR_OK;
3350 }
3351
3352 COMMAND_HANDLER(xscale_handle_trace_image_command)
3353 {
3354 struct target *target = get_current_target(CMD_CTX);
3355 struct xscale_common *xscale = target_to_xscale(target);
3356 int retval;
3357
3358 if (CMD_ARGC < 1)
3359 {
3360 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3361 return ERROR_OK;
3362 }
3363
3364 retval = xscale_verify_pointer(CMD_CTX, xscale);
3365 if (retval != ERROR_OK)
3366 return retval;
3367
3368 if (xscale->trace.image)
3369 {
3370 image_close(xscale->trace.image);
3371 free(xscale->trace.image);
3372 command_print(CMD_CTX, "previously loaded image found and closed");
3373 }
3374
3375 xscale->trace.image = malloc(sizeof(struct image));
3376 xscale->trace.image->base_address_set = 0;
3377 xscale->trace.image->start_address_set = 0;
3378
3379 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3380 if (CMD_ARGC >= 2)
3381 {
3382 xscale->trace.image->base_address_set = 1;
3383 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], xscale->trace.image->base_address);
3384 }
3385 else
3386 {
3387 xscale->trace.image->base_address_set = 0;
3388 }
3389
3390 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3391 {
3392 free(xscale->trace.image);
3393 xscale->trace.image = NULL;
3394 return ERROR_OK;
3395 }
3396
3397 return ERROR_OK;
3398 }
3399
3400 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3401 {
3402 struct target *target = get_current_target(CMD_CTX);
3403 struct xscale_common *xscale = target_to_xscale(target);
3404 struct xscale_trace_data *trace_data;
3405 struct fileio file;
3406 int retval;
3407
3408 retval = xscale_verify_pointer(CMD_CTX, xscale);
3409 if (retval != ERROR_OK)
3410 return retval;
3411
3412 if (target->state != TARGET_HALTED)
3413 {
3414 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3415 return ERROR_OK;
3416 }
3417
3418 if (CMD_ARGC < 1)
3419 {
3420 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3421 return ERROR_OK;
3422 }
3423
3424 trace_data = xscale->trace.data;
3425
3426 if (!trace_data)
3427 {
3428 command_print(CMD_CTX, "no trace data collected");
3429 return ERROR_OK;
3430 }
3431
3432 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3433 {
3434 return ERROR_OK;
3435 }
3436
3437 while (trace_data)
3438 {
3439 int i;
3440
3441 fileio_write_u32(&file, trace_data->chkpt0);
3442 fileio_write_u32(&file, trace_data->chkpt1);
3443 fileio_write_u32(&file, trace_data->last_instruction);
3444 fileio_write_u32(&file, trace_data->depth);
3445
3446 for (i = 0; i < trace_data->depth; i++)
3447 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3448
3449 trace_data = trace_data->next;
3450 }
3451
3452 fileio_close(&file);
3453
3454 return ERROR_OK;
3455 }
3456
3457 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3458 {
3459 struct target *target = get_current_target(CMD_CTX);
3460 struct xscale_common *xscale = target_to_xscale(target);
3461 int retval;
3462
3463 retval = xscale_verify_pointer(CMD_CTX, xscale);
3464 if (retval != ERROR_OK)
3465 return retval;
3466
3467 xscale_analyze_trace(target, CMD_CTX);
3468
3469 return ERROR_OK;
3470 }
3471
3472 COMMAND_HANDLER(xscale_handle_cp15)
3473 {
3474 struct target *target = get_current_target(CMD_CTX);
3475 struct xscale_common *xscale = target_to_xscale(target);
3476 int retval;
3477
3478 retval = xscale_verify_pointer(CMD_CTX, xscale);
3479 if (retval != ERROR_OK)
3480 return retval;
3481
3482 if (target->state != TARGET_HALTED)
3483 {
3484 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3485 return ERROR_OK;
3486 }
3487 uint32_t reg_no = 0;
3488 struct reg *reg = NULL;
3489 if (CMD_ARGC > 0)
3490 {
3491 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3492 /*translate from xscale cp15 register no to openocd register*/
3493 switch (reg_no)
3494 {
3495 case 0:
3496 reg_no = XSCALE_MAINID;
3497 break;
3498 case 1:
3499 reg_no = XSCALE_CTRL;
3500 break;
3501 case 2:
3502 reg_no = XSCALE_TTB;
3503 break;
3504 case 3:
3505 reg_no = XSCALE_DAC;
3506 break;
3507 case 5:
3508 reg_no = XSCALE_FSR;
3509 break;
3510 case 6:
3511 reg_no = XSCALE_FAR;
3512 break;
3513 case 13:
3514 reg_no = XSCALE_PID;
3515 break;
3516 case 15:
3517 reg_no = XSCALE_CPACCESS;
3518 break;
3519 default:
3520 command_print(CMD_CTX, "invalid register number");
3521 return ERROR_INVALID_ARGUMENTS;
3522 }
3523 reg = &xscale->reg_cache->reg_list[reg_no];
3524
3525 }
3526 if (CMD_ARGC == 1)
3527 {
3528 uint32_t value;
3529
3530 /* read cp15 control register */
3531 xscale_get_reg(reg);
3532 value = buf_get_u32(reg->value, 0, 32);
3533 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3534 }
3535 else if (CMD_ARGC == 2)
3536 {
3537 uint32_t value;
3538 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3539
3540 /* send CP write request (command 0x41) */
3541 xscale_send_u32(target, 0x41);
3542
3543 /* send CP register number */
3544 xscale_send_u32(target, reg_no);
3545
3546 /* send CP register value */
3547 xscale_send_u32(target, value);
3548
3549 /* execute cpwait to ensure outstanding operations complete */
3550 xscale_send_u32(target, 0x53);
3551 }
3552 else
3553 {
3554 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3555 }
3556
3557 return ERROR_OK;
3558 }
3559
3560 static int xscale_register_commands(struct command_context *cmd_ctx)
3561 {
3562 struct command *xscale_cmd;
3563
3564 xscale_cmd = register_command(cmd_ctx, NULL, "xscale", NULL, COMMAND_ANY, "xscale specific commands");
3565
3566 register_command(cmd_ctx, xscale_cmd, "debug_handler", xscale_handle_debug_handler_command, COMMAND_ANY, "'xscale debug_handler <target#> <address>' command takes two required operands");
3567 register_command(cmd_ctx, xscale_cmd, "cache_clean_address", xscale_handle_cache_clean_address_command, COMMAND_ANY, NULL);
3568
3569 register_command(cmd_ctx, xscale_cmd, "cache_info", xscale_handle_cache_info_command, COMMAND_EXEC, NULL);
3570 register_command(cmd_ctx, xscale_cmd, "mmu", xscale_handle_mmu_command, COMMAND_EXEC, "['enable'|'disable'] the MMU");
3571 register_command(cmd_ctx, xscale_cmd, "icache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the ICache");
3572 register_command(cmd_ctx, xscale_cmd, "dcache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the DCache");
3573
3574 register_command(cmd_ctx, xscale_cmd, "vector_catch", xscale_handle_vector_catch_command, COMMAND_EXEC, "<mask> of vectors that should be catched");
3575 register_command(cmd_ctx, xscale_cmd, "vector_table", xscale_handle_vector_table_command, COMMAND_EXEC, "<high|low> <index> <code> set static code for exception handler entry");
3576
3577 register_command(cmd_ctx, xscale_cmd, "trace_buffer", xscale_handle_trace_buffer_command, COMMAND_EXEC, "<enable | disable> ['fill' [n]|'wrap']");
3578
3579 register_command(cmd_ctx, xscale_cmd, "dump_trace", xscale_handle_dump_trace_command, COMMAND_EXEC, "dump content of trace buffer to <file>");
3580 register_command(cmd_ctx, xscale_cmd, "analyze_trace", xscale_handle_analyze_trace_buffer_command, COMMAND_EXEC, "analyze content of trace buffer");
3581 register_command(cmd_ctx, xscale_cmd, "trace_image", xscale_handle_trace_image_command,
3582 COMMAND_EXEC, "load image from <file> [base address]");
3583
3584 register_command(cmd_ctx, xscale_cmd, "cp15", xscale_handle_cp15, COMMAND_EXEC, "access coproc 15 <register> [value]");
3585
3586 armv4_5_register_commands(cmd_ctx);
3587
3588 return ERROR_OK;
3589 }
3590
3591 struct target_type xscale_target =
3592 {
3593 .name = "xscale",
3594
3595 .poll = xscale_poll,
3596 .arch_state = xscale_arch_state,
3597
3598 .target_request_data = NULL,
3599
3600 .halt = xscale_halt,
3601 .resume = xscale_resume,
3602 .step = xscale_step,
3603
3604 .assert_reset = xscale_assert_reset,
3605 .deassert_reset = xscale_deassert_reset,
3606 .soft_reset_halt = NULL,
3607
3608 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
3609
3610 .read_memory = xscale_read_memory,
3611 .write_memory = xscale_write_memory,
3612 .bulk_write_memory = xscale_bulk_write_memory,
3613
3614 .checksum_memory = arm_checksum_memory,
3615 .blank_check_memory = arm_blank_check_memory,
3616
3617 .run_algorithm = armv4_5_run_algorithm,
3618
3619 .add_breakpoint = xscale_add_breakpoint,
3620 .remove_breakpoint = xscale_remove_breakpoint,
3621 .add_watchpoint = xscale_add_watchpoint,
3622 .remove_watchpoint = xscale_remove_watchpoint,
3623
3624 .register_commands = xscale_register_commands,
3625 .target_create = xscale_target_create,
3626 .init_target = xscale_init_target,
3627
3628 .virt2phys = xscale_virt2phys,
3629 .mmu = xscale_mmu
3630 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)