ARM: use <target/arm.h> not armv4_5.h
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include <helper/time_support.h>
37 #include "register.h"
38 #include "image.h"
39 #include "arm_opcodes.h"
40 #include "armv4_5.h"
41
42
43 /*
44 * Important XScale documents available as of October 2009 include:
45 *
46 * Intel XScale® Core Developer’s Manual, January 2004
47 * Order Number: 273473-002
48 * This has a chapter detailing debug facilities, and punts some
49 * details to chip-specific microarchitecture documents.
50 *
51 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
52 * Document Number: 273539-005
53 * Less detailed than the developer's manual, but summarizes those
54 * missing details (for most XScales) and gives LOTS of notes about
55 * debugger/handler interaction issues. Presents a simpler reset
56 * and load-handler sequence than the arch doc. (Note, OpenOCD
57 * doesn't currently support "Hot-Debug" as defined there.)
58 *
59 * Chip-specific microarchitecture documents may also be useful.
60 */
61
62
63 /* forward declarations */
64 static int xscale_resume(struct target *, int current,
65 uint32_t address, int handle_breakpoints, int debug_execution);
66 static int xscale_debug_entry(struct target *);
67 static int xscale_restore_banked(struct target *);
68 static int xscale_get_reg(struct reg *reg);
69 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
70 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
72 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
73 static int xscale_read_trace(struct target *);
74
75
76 /* This XScale "debug handler" is loaded into the processor's
77 * mini-ICache, which is 2K of code writable only via JTAG.
78 *
79 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
80 * binary files cleanly. It's string oriented, and terminates them
81 * with a NUL character. Better would be to generate the constants
82 * and let other code decide names, scoping, and other housekeeping.
83 */
84 static /* unsigned const char xscale_debug_handler[] = ... */
85 #include "xscale_debug.h"
86
87 static char *const xscale_reg_list[] =
88 {
89 "XSCALE_MAINID", /* 0 */
90 "XSCALE_CACHETYPE",
91 "XSCALE_CTRL",
92 "XSCALE_AUXCTRL",
93 "XSCALE_TTB",
94 "XSCALE_DAC",
95 "XSCALE_FSR",
96 "XSCALE_FAR",
97 "XSCALE_PID",
98 "XSCALE_CPACCESS",
99 "XSCALE_IBCR0", /* 10 */
100 "XSCALE_IBCR1",
101 "XSCALE_DBR0",
102 "XSCALE_DBR1",
103 "XSCALE_DBCON",
104 "XSCALE_TBREG",
105 "XSCALE_CHKPT0",
106 "XSCALE_CHKPT1",
107 "XSCALE_DCSR",
108 "XSCALE_TX",
109 "XSCALE_RX", /* 20 */
110 "XSCALE_TXRXCTRL",
111 };
112
113 static const struct xscale_reg xscale_reg_arch_info[] =
114 {
115 {XSCALE_MAINID, NULL},
116 {XSCALE_CACHETYPE, NULL},
117 {XSCALE_CTRL, NULL},
118 {XSCALE_AUXCTRL, NULL},
119 {XSCALE_TTB, NULL},
120 {XSCALE_DAC, NULL},
121 {XSCALE_FSR, NULL},
122 {XSCALE_FAR, NULL},
123 {XSCALE_PID, NULL},
124 {XSCALE_CPACCESS, NULL},
125 {XSCALE_IBCR0, NULL},
126 {XSCALE_IBCR1, NULL},
127 {XSCALE_DBR0, NULL},
128 {XSCALE_DBR1, NULL},
129 {XSCALE_DBCON, NULL},
130 {XSCALE_TBREG, NULL},
131 {XSCALE_CHKPT0, NULL},
132 {XSCALE_CHKPT1, NULL},
133 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
134 {-1, NULL}, /* TX accessed via JTAG */
135 {-1, NULL}, /* RX accessed via JTAG */
136 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
137 };
138
139 /* convenience wrapper to access XScale specific registers */
140 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
141 {
142 uint8_t buf[4];
143
144 buf_set_u32(buf, 0, 32, value);
145
146 return xscale_set_reg(reg, buf);
147 }
148
149 static const char xscale_not[] = "target is not an XScale";
150
151 static int xscale_verify_pointer(struct command_context *cmd_ctx,
152 struct xscale_common *xscale)
153 {
154 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
155 command_print(cmd_ctx, xscale_not);
156 return ERROR_TARGET_INVALID;
157 }
158 return ERROR_OK;
159 }
160
161 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr)
162 {
163 if (tap == NULL)
164 return ERROR_FAIL;
165
166 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
167 {
168 struct scan_field field;
169 uint8_t scratch[4];
170
171 memset(&field, 0, sizeof field);
172 field.tap = tap;
173 field.num_bits = tap->ir_length;
174 field.out_value = scratch;
175 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
176
177 jtag_add_ir_scan(1, &field, jtag_get_end_state());
178 }
179
180 return ERROR_OK;
181 }
182
183 static int xscale_read_dcsr(struct target *target)
184 {
185 struct xscale_common *xscale = target_to_xscale(target);
186 int retval;
187 struct scan_field fields[3];
188 uint8_t field0 = 0x0;
189 uint8_t field0_check_value = 0x2;
190 uint8_t field0_check_mask = 0x7;
191 uint8_t field2 = 0x0;
192 uint8_t field2_check_value = 0x0;
193 uint8_t field2_check_mask = 0x1;
194
195 jtag_set_end_state(TAP_DRPAUSE);
196 xscale_jtag_set_instr(target->tap,
197 XSCALE_SELDCSR << xscale->xscale_variant);
198
199 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
200 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
201
202 memset(&fields, 0, sizeof fields);
203
204 fields[0].tap = target->tap;
205 fields[0].num_bits = 3;
206 fields[0].out_value = &field0;
207 uint8_t tmp;
208 fields[0].in_value = &tmp;
209
210 fields[1].tap = target->tap;
211 fields[1].num_bits = 32;
212 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
213
214 fields[2].tap = target->tap;
215 fields[2].num_bits = 1;
216 fields[2].out_value = &field2;
217 uint8_t tmp2;
218 fields[2].in_value = &tmp2;
219
220 jtag_add_dr_scan(3, fields, jtag_get_end_state());
221
222 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
223 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
224
225 if ((retval = jtag_execute_queue()) != ERROR_OK)
226 {
227 LOG_ERROR("JTAG error while reading DCSR");
228 return retval;
229 }
230
231 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
232 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
233
234 /* write the register with the value we just read
235 * on this second pass, only the first bit of field0 is guaranteed to be 0)
236 */
237 field0_check_mask = 0x1;
238 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
239 fields[1].in_value = NULL;
240
241 jtag_set_end_state(TAP_IDLE);
242
243 jtag_add_dr_scan(3, fields, jtag_get_end_state());
244
245 /* DANGER!!! this must be here. It will make sure that the arguments
246 * to jtag_set_check_value() does not go out of scope! */
247 return jtag_execute_queue();
248 }
249
250
251 static void xscale_getbuf(jtag_callback_data_t arg)
252 {
253 uint8_t *in = (uint8_t *)arg;
254 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
255 }
256
257 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
258 {
259 if (num_words == 0)
260 return ERROR_INVALID_ARGUMENTS;
261
262 struct xscale_common *xscale = target_to_xscale(target);
263 int retval = ERROR_OK;
264 tap_state_t path[3];
265 struct scan_field fields[3];
266 uint8_t *field0 = malloc(num_words * 1);
267 uint8_t field0_check_value = 0x2;
268 uint8_t field0_check_mask = 0x6;
269 uint32_t *field1 = malloc(num_words * 4);
270 uint8_t field2_check_value = 0x0;
271 uint8_t field2_check_mask = 0x1;
272 int words_done = 0;
273 int words_scheduled = 0;
274 int i;
275
276 path[0] = TAP_DRSELECT;
277 path[1] = TAP_DRCAPTURE;
278 path[2] = TAP_DRSHIFT;
279
280 memset(&fields, 0, sizeof fields);
281
282 fields[0].tap = target->tap;
283 fields[0].num_bits = 3;
284 fields[0].check_value = &field0_check_value;
285 fields[0].check_mask = &field0_check_mask;
286
287 fields[1].tap = target->tap;
288 fields[1].num_bits = 32;
289
290 fields[2].tap = target->tap;
291 fields[2].num_bits = 1;
292 fields[2].check_value = &field2_check_value;
293 fields[2].check_mask = &field2_check_mask;
294
295 jtag_set_end_state(TAP_IDLE);
296 xscale_jtag_set_instr(target->tap,
297 XSCALE_DBGTX << xscale->xscale_variant);
298 jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
299
300 /* repeat until all words have been collected */
301 int attempts = 0;
302 while (words_done < num_words)
303 {
304 /* schedule reads */
305 words_scheduled = 0;
306 for (i = words_done; i < num_words; i++)
307 {
308 fields[0].in_value = &field0[i];
309
310 jtag_add_pathmove(3, path);
311
312 fields[1].in_value = (uint8_t *)(field1 + i);
313
314 jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
315
316 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
317
318 words_scheduled++;
319 }
320
321 if ((retval = jtag_execute_queue()) != ERROR_OK)
322 {
323 LOG_ERROR("JTAG error while receiving data from debug handler");
324 break;
325 }
326
327 /* examine results */
328 for (i = words_done; i < num_words; i++)
329 {
330 if (!(field0[0] & 1))
331 {
332 /* move backwards if necessary */
333 int j;
334 for (j = i; j < num_words - 1; j++)
335 {
336 field0[j] = field0[j + 1];
337 field1[j] = field1[j + 1];
338 }
339 words_scheduled--;
340 }
341 }
342 if (words_scheduled == 0)
343 {
344 if (attempts++==1000)
345 {
346 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
347 retval = ERROR_TARGET_TIMEOUT;
348 break;
349 }
350 }
351
352 words_done += words_scheduled;
353 }
354
355 for (i = 0; i < num_words; i++)
356 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
357
358 free(field1);
359
360 return retval;
361 }
362
363 static int xscale_read_tx(struct target *target, int consume)
364 {
365 struct xscale_common *xscale = target_to_xscale(target);
366 tap_state_t path[3];
367 tap_state_t noconsume_path[6];
368 int retval;
369 struct timeval timeout, now;
370 struct scan_field fields[3];
371 uint8_t field0_in = 0x0;
372 uint8_t field0_check_value = 0x2;
373 uint8_t field0_check_mask = 0x6;
374 uint8_t field2_check_value = 0x0;
375 uint8_t field2_check_mask = 0x1;
376
377 jtag_set_end_state(TAP_IDLE);
378
379 xscale_jtag_set_instr(target->tap,
380 XSCALE_DBGTX << xscale->xscale_variant);
381
382 path[0] = TAP_DRSELECT;
383 path[1] = TAP_DRCAPTURE;
384 path[2] = TAP_DRSHIFT;
385
386 noconsume_path[0] = TAP_DRSELECT;
387 noconsume_path[1] = TAP_DRCAPTURE;
388 noconsume_path[2] = TAP_DREXIT1;
389 noconsume_path[3] = TAP_DRPAUSE;
390 noconsume_path[4] = TAP_DREXIT2;
391 noconsume_path[5] = TAP_DRSHIFT;
392
393 memset(&fields, 0, sizeof fields);
394
395 fields[0].tap = target->tap;
396 fields[0].num_bits = 3;
397 fields[0].in_value = &field0_in;
398
399 fields[1].tap = target->tap;
400 fields[1].num_bits = 32;
401 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
402
403 fields[2].tap = target->tap;
404 fields[2].num_bits = 1;
405 uint8_t tmp;
406 fields[2].in_value = &tmp;
407
408 gettimeofday(&timeout, NULL);
409 timeval_add_time(&timeout, 1, 0);
410
411 for (;;)
412 {
413 /* if we want to consume the register content (i.e. clear TX_READY),
414 * we have to go straight from Capture-DR to Shift-DR
415 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
416 */
417 if (consume)
418 jtag_add_pathmove(3, path);
419 else
420 {
421 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
422 }
423
424 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
425
426 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
427 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
428
429 if ((retval = jtag_execute_queue()) != ERROR_OK)
430 {
431 LOG_ERROR("JTAG error while reading TX");
432 return ERROR_TARGET_TIMEOUT;
433 }
434
435 gettimeofday(&now, NULL);
436 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
437 {
438 LOG_ERROR("time out reading TX register");
439 return ERROR_TARGET_TIMEOUT;
440 }
441 if (!((!(field0_in & 1)) && consume))
442 {
443 goto done;
444 }
445 if (debug_level >= 3)
446 {
447 LOG_DEBUG("waiting 100ms");
448 alive_sleep(100); /* avoid flooding the logs */
449 } else
450 {
451 keep_alive();
452 }
453 }
454 done:
455
456 if (!(field0_in & 1))
457 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
458
459 return ERROR_OK;
460 }
461
462 static int xscale_write_rx(struct target *target)
463 {
464 struct xscale_common *xscale = target_to_xscale(target);
465 int retval;
466 struct timeval timeout, now;
467 struct scan_field fields[3];
468 uint8_t field0_out = 0x0;
469 uint8_t field0_in = 0x0;
470 uint8_t field0_check_value = 0x2;
471 uint8_t field0_check_mask = 0x6;
472 uint8_t field2 = 0x0;
473 uint8_t field2_check_value = 0x0;
474 uint8_t field2_check_mask = 0x1;
475
476 jtag_set_end_state(TAP_IDLE);
477
478 xscale_jtag_set_instr(target->tap,
479 XSCALE_DBGRX << xscale->xscale_variant);
480
481 memset(&fields, 0, sizeof fields);
482
483 fields[0].tap = target->tap;
484 fields[0].num_bits = 3;
485 fields[0].out_value = &field0_out;
486 fields[0].in_value = &field0_in;
487
488 fields[1].tap = target->tap;
489 fields[1].num_bits = 32;
490 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
491
492 fields[2].tap = target->tap;
493 fields[2].num_bits = 1;
494 fields[2].out_value = &field2;
495 uint8_t tmp;
496 fields[2].in_value = &tmp;
497
498 gettimeofday(&timeout, NULL);
499 timeval_add_time(&timeout, 1, 0);
500
501 /* poll until rx_read is low */
502 LOG_DEBUG("polling RX");
503 for (;;)
504 {
505 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
506
507 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
508 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
509
510 if ((retval = jtag_execute_queue()) != ERROR_OK)
511 {
512 LOG_ERROR("JTAG error while writing RX");
513 return retval;
514 }
515
516 gettimeofday(&now, NULL);
517 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
518 {
519 LOG_ERROR("time out writing RX register");
520 return ERROR_TARGET_TIMEOUT;
521 }
522 if (!(field0_in & 1))
523 goto done;
524 if (debug_level >= 3)
525 {
526 LOG_DEBUG("waiting 100ms");
527 alive_sleep(100); /* avoid flooding the logs */
528 } else
529 {
530 keep_alive();
531 }
532 }
533 done:
534
535 /* set rx_valid */
536 field2 = 0x1;
537 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
538
539 if ((retval = jtag_execute_queue()) != ERROR_OK)
540 {
541 LOG_ERROR("JTAG error while writing RX");
542 return retval;
543 }
544
545 return ERROR_OK;
546 }
547
548 /* send count elements of size byte to the debug handler */
549 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
550 {
551 struct xscale_common *xscale = target_to_xscale(target);
552 uint32_t t[3];
553 int bits[3];
554 int retval;
555 int done_count = 0;
556
557 jtag_set_end_state(TAP_IDLE);
558
559 xscale_jtag_set_instr(target->tap,
560 XSCALE_DBGRX << xscale->xscale_variant);
561
562 bits[0]=3;
563 t[0]=0;
564 bits[1]=32;
565 t[2]=1;
566 bits[2]=1;
567 int endianness = target->endianness;
568 while (done_count++ < count)
569 {
570 switch (size)
571 {
572 case 4:
573 if (endianness == TARGET_LITTLE_ENDIAN)
574 {
575 t[1]=le_to_h_u32(buffer);
576 } else
577 {
578 t[1]=be_to_h_u32(buffer);
579 }
580 break;
581 case 2:
582 if (endianness == TARGET_LITTLE_ENDIAN)
583 {
584 t[1]=le_to_h_u16(buffer);
585 } else
586 {
587 t[1]=be_to_h_u16(buffer);
588 }
589 break;
590 case 1:
591 t[1]=buffer[0];
592 break;
593 default:
594 LOG_ERROR("BUG: size neither 4, 2 nor 1");
595 return ERROR_INVALID_ARGUMENTS;
596 }
597 jtag_add_dr_out(target->tap,
598 3,
599 bits,
600 t,
601 jtag_set_end_state(TAP_IDLE));
602 buffer += size;
603 }
604
605 if ((retval = jtag_execute_queue()) != ERROR_OK)
606 {
607 LOG_ERROR("JTAG error while sending data to debug handler");
608 return retval;
609 }
610
611 return ERROR_OK;
612 }
613
614 static int xscale_send_u32(struct target *target, uint32_t value)
615 {
616 struct xscale_common *xscale = target_to_xscale(target);
617
618 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
619 return xscale_write_rx(target);
620 }
621
622 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
623 {
624 struct xscale_common *xscale = target_to_xscale(target);
625 int retval;
626 struct scan_field fields[3];
627 uint8_t field0 = 0x0;
628 uint8_t field0_check_value = 0x2;
629 uint8_t field0_check_mask = 0x7;
630 uint8_t field2 = 0x0;
631 uint8_t field2_check_value = 0x0;
632 uint8_t field2_check_mask = 0x1;
633
634 if (hold_rst != -1)
635 xscale->hold_rst = hold_rst;
636
637 if (ext_dbg_brk != -1)
638 xscale->external_debug_break = ext_dbg_brk;
639
640 jtag_set_end_state(TAP_IDLE);
641 xscale_jtag_set_instr(target->tap,
642 XSCALE_SELDCSR << xscale->xscale_variant);
643
644 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
645 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
646
647 memset(&fields, 0, sizeof fields);
648
649 fields[0].tap = target->tap;
650 fields[0].num_bits = 3;
651 fields[0].out_value = &field0;
652 uint8_t tmp;
653 fields[0].in_value = &tmp;
654
655 fields[1].tap = target->tap;
656 fields[1].num_bits = 32;
657 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
658
659 fields[2].tap = target->tap;
660 fields[2].num_bits = 1;
661 fields[2].out_value = &field2;
662 uint8_t tmp2;
663 fields[2].in_value = &tmp2;
664
665 jtag_add_dr_scan(3, fields, jtag_get_end_state());
666
667 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
668 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
669
670 if ((retval = jtag_execute_queue()) != ERROR_OK)
671 {
672 LOG_ERROR("JTAG error while writing DCSR");
673 return retval;
674 }
675
676 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
677 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
678
679 return ERROR_OK;
680 }
681
682 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
683 static unsigned int parity (unsigned int v)
684 {
685 // unsigned int ov = v;
686 v ^= v >> 16;
687 v ^= v >> 8;
688 v ^= v >> 4;
689 v &= 0xf;
690 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
691 return (0x6996 >> v) & 1;
692 }
693
694 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
695 {
696 struct xscale_common *xscale = target_to_xscale(target);
697 uint8_t packet[4];
698 uint8_t cmd;
699 int word;
700 struct scan_field fields[2];
701
702 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
703
704 /* LDIC into IR */
705 jtag_set_end_state(TAP_IDLE);
706 xscale_jtag_set_instr(target->tap,
707 XSCALE_LDIC << xscale->xscale_variant);
708
709 /* CMD is b011 to load a cacheline into the Mini ICache.
710 * Loading into the main ICache is deprecated, and unused.
711 * It's followed by three zero bits, and 27 address bits.
712 */
713 buf_set_u32(&cmd, 0, 6, 0x3);
714
715 /* virtual address of desired cache line */
716 buf_set_u32(packet, 0, 27, va >> 5);
717
718 memset(&fields, 0, sizeof fields);
719
720 fields[0].tap = target->tap;
721 fields[0].num_bits = 6;
722 fields[0].out_value = &cmd;
723
724 fields[1].tap = target->tap;
725 fields[1].num_bits = 27;
726 fields[1].out_value = packet;
727
728 jtag_add_dr_scan(2, fields, jtag_get_end_state());
729
730 /* rest of packet is a cacheline: 8 instructions, with parity */
731 fields[0].num_bits = 32;
732 fields[0].out_value = packet;
733
734 fields[1].num_bits = 1;
735 fields[1].out_value = &cmd;
736
737 for (word = 0; word < 8; word++)
738 {
739 buf_set_u32(packet, 0, 32, buffer[word]);
740
741 uint32_t value;
742 memcpy(&value, packet, sizeof(uint32_t));
743 cmd = parity(value);
744
745 jtag_add_dr_scan(2, fields, jtag_get_end_state());
746 }
747
748 return jtag_execute_queue();
749 }
750
751 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
752 {
753 struct xscale_common *xscale = target_to_xscale(target);
754 uint8_t packet[4];
755 uint8_t cmd;
756 struct scan_field fields[2];
757
758 jtag_set_end_state(TAP_IDLE);
759 xscale_jtag_set_instr(target->tap,
760 XSCALE_LDIC << xscale->xscale_variant);
761
762 /* CMD for invalidate IC line b000, bits [6:4] b000 */
763 buf_set_u32(&cmd, 0, 6, 0x0);
764
765 /* virtual address of desired cache line */
766 buf_set_u32(packet, 0, 27, va >> 5);
767
768 memset(&fields, 0, sizeof fields);
769
770 fields[0].tap = target->tap;
771 fields[0].num_bits = 6;
772 fields[0].out_value = &cmd;
773
774 fields[1].tap = target->tap;
775 fields[1].num_bits = 27;
776 fields[1].out_value = packet;
777
778 jtag_add_dr_scan(2, fields, jtag_get_end_state());
779
780 return ERROR_OK;
781 }
782
783 static int xscale_update_vectors(struct target *target)
784 {
785 struct xscale_common *xscale = target_to_xscale(target);
786 int i;
787 int retval;
788
789 uint32_t low_reset_branch, high_reset_branch;
790
791 for (i = 1; i < 8; i++)
792 {
793 /* if there's a static vector specified for this exception, override */
794 if (xscale->static_high_vectors_set & (1 << i))
795 {
796 xscale->high_vectors[i] = xscale->static_high_vectors[i];
797 }
798 else
799 {
800 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
801 if (retval == ERROR_TARGET_TIMEOUT)
802 return retval;
803 if (retval != ERROR_OK)
804 {
805 /* Some of these reads will fail as part of normal execution */
806 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
807 }
808 }
809 }
810
811 for (i = 1; i < 8; i++)
812 {
813 if (xscale->static_low_vectors_set & (1 << i))
814 {
815 xscale->low_vectors[i] = xscale->static_low_vectors[i];
816 }
817 else
818 {
819 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
820 if (retval == ERROR_TARGET_TIMEOUT)
821 return retval;
822 if (retval != ERROR_OK)
823 {
824 /* Some of these reads will fail as part of normal execution */
825 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
826 }
827 }
828 }
829
830 /* calculate branches to debug handler */
831 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
832 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
833
834 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
835 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
836
837 /* invalidate and load exception vectors in mini i-cache */
838 xscale_invalidate_ic_line(target, 0x0);
839 xscale_invalidate_ic_line(target, 0xffff0000);
840
841 xscale_load_ic(target, 0x0, xscale->low_vectors);
842 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
843
844 return ERROR_OK;
845 }
846
847 static int xscale_arch_state(struct target *target)
848 {
849 struct xscale_common *xscale = target_to_xscale(target);
850 struct arm *armv4_5 = &xscale->armv4_5_common;
851
852 static const char *state[] =
853 {
854 "disabled", "enabled"
855 };
856
857 static const char *arch_dbg_reason[] =
858 {
859 "", "\n(processor reset)", "\n(trace buffer full)"
860 };
861
862 if (armv4_5->common_magic != ARM_COMMON_MAGIC)
863 {
864 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
865 return ERROR_INVALID_ARGUMENTS;
866 }
867
868 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
869 "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "\n"
870 "MMU: %s, D-Cache: %s, I-Cache: %s"
871 "%s",
872 arm_state_strings[armv4_5->core_state],
873 Jim_Nvp_value2name_simple(nvp_target_debug_reason, target->debug_reason)->name ,
874 arm_mode_name(armv4_5->core_mode),
875 buf_get_u32(armv4_5->cpsr->value, 0, 32),
876 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
877 state[xscale->armv4_5_mmu.mmu_enabled],
878 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
879 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
880 arch_dbg_reason[xscale->arch_debug_reason]);
881
882 return ERROR_OK;
883 }
884
885 static int xscale_poll(struct target *target)
886 {
887 int retval = ERROR_OK;
888
889 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
890 {
891 enum target_state previous_state = target->state;
892 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
893 {
894
895 /* there's data to read from the tx register, we entered debug state */
896 target->state = TARGET_HALTED;
897
898 /* process debug entry, fetching current mode regs */
899 retval = xscale_debug_entry(target);
900 }
901 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
902 {
903 LOG_USER("error while polling TX register, reset CPU");
904 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
905 target->state = TARGET_HALTED;
906 }
907
908 /* debug_entry could have overwritten target state (i.e. immediate resume)
909 * don't signal event handlers in that case
910 */
911 if (target->state != TARGET_HALTED)
912 return ERROR_OK;
913
914 /* if target was running, signal that we halted
915 * otherwise we reentered from debug execution */
916 if (previous_state == TARGET_RUNNING)
917 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
918 else
919 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
920 }
921
922 return retval;
923 }
924
925 static int xscale_debug_entry(struct target *target)
926 {
927 struct xscale_common *xscale = target_to_xscale(target);
928 struct arm *armv4_5 = &xscale->armv4_5_common;
929 uint32_t pc;
930 uint32_t buffer[10];
931 int i;
932 int retval;
933 uint32_t moe;
934
935 /* clear external dbg break (will be written on next DCSR read) */
936 xscale->external_debug_break = 0;
937 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
938 return retval;
939
940 /* get r0, pc, r1 to r7 and cpsr */
941 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
942 return retval;
943
944 /* move r0 from buffer to register cache */
945 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
946 armv4_5->core_cache->reg_list[0].dirty = 1;
947 armv4_5->core_cache->reg_list[0].valid = 1;
948 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
949
950 /* move pc from buffer to register cache */
951 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
952 armv4_5->core_cache->reg_list[15].dirty = 1;
953 armv4_5->core_cache->reg_list[15].valid = 1;
954 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
955
956 /* move data from buffer to register cache */
957 for (i = 1; i <= 7; i++)
958 {
959 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
960 armv4_5->core_cache->reg_list[i].dirty = 1;
961 armv4_5->core_cache->reg_list[i].valid = 1;
962 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
963 }
964
965 arm_set_cpsr(armv4_5, buffer[9]);
966 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
967
968 if (!is_arm_mode(armv4_5->core_mode))
969 {
970 target->state = TARGET_UNKNOWN;
971 LOG_ERROR("cpsr contains invalid mode value - communication failure");
972 return ERROR_TARGET_FAILURE;
973 }
974 LOG_DEBUG("target entered debug state in %s mode",
975 arm_mode_name(armv4_5->core_mode));
976
977 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
978 if (armv4_5->spsr) {
979 xscale_receive(target, buffer, 8);
980 buf_set_u32(armv4_5->spsr->value, 0, 32, buffer[7]);
981 armv4_5->spsr->dirty = false;
982 armv4_5->spsr->valid = true;
983 }
984 else
985 {
986 /* r8 to r14, but no spsr */
987 xscale_receive(target, buffer, 7);
988 }
989
990 /* move data from buffer to right banked register in cache */
991 for (i = 8; i <= 14; i++)
992 {
993 struct reg *r = arm_reg_current(armv4_5, i);
994
995 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
996 r->dirty = false;
997 r->valid = true;
998 }
999
1000 /* examine debug reason */
1001 xscale_read_dcsr(target);
1002 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
1003
1004 /* stored PC (for calculating fixup) */
1005 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1006
1007 switch (moe)
1008 {
1009 case 0x0: /* Processor reset */
1010 target->debug_reason = DBG_REASON_DBGRQ;
1011 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1012 pc -= 4;
1013 break;
1014 case 0x1: /* Instruction breakpoint hit */
1015 target->debug_reason = DBG_REASON_BREAKPOINT;
1016 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1017 pc -= 4;
1018 break;
1019 case 0x2: /* Data breakpoint hit */
1020 target->debug_reason = DBG_REASON_WATCHPOINT;
1021 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1022 pc -= 4;
1023 break;
1024 case 0x3: /* BKPT instruction executed */
1025 target->debug_reason = DBG_REASON_BREAKPOINT;
1026 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1027 pc -= 4;
1028 break;
1029 case 0x4: /* Ext. debug event */
1030 target->debug_reason = DBG_REASON_DBGRQ;
1031 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1032 pc -= 4;
1033 break;
1034 case 0x5: /* Vector trap occured */
1035 target->debug_reason = DBG_REASON_BREAKPOINT;
1036 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1037 pc -= 4;
1038 break;
1039 case 0x6: /* Trace buffer full break */
1040 target->debug_reason = DBG_REASON_DBGRQ;
1041 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1042 pc -= 4;
1043 break;
1044 case 0x7: /* Reserved (may flag Hot-Debug support) */
1045 default:
1046 LOG_ERROR("Method of Entry is 'Reserved'");
1047 exit(-1);
1048 break;
1049 }
1050
1051 /* apply PC fixup */
1052 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1053
1054 /* on the first debug entry, identify cache type */
1055 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1056 {
1057 uint32_t cache_type_reg;
1058
1059 /* read cp15 cache type register */
1060 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1061 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1062
1063 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1064 }
1065
1066 /* examine MMU and Cache settings */
1067 /* read cp15 control register */
1068 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1069 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1070 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1071 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1072 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1073
1074 /* tracing enabled, read collected trace data */
1075 if (xscale->trace.buffer_enabled)
1076 {
1077 xscale_read_trace(target);
1078 xscale->trace.buffer_fill--;
1079
1080 /* resume if we're still collecting trace data */
1081 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1082 && (xscale->trace.buffer_fill > 0))
1083 {
1084 xscale_resume(target, 1, 0x0, 1, 0);
1085 }
1086 else
1087 {
1088 xscale->trace.buffer_enabled = 0;
1089 }
1090 }
1091
1092 return ERROR_OK;
1093 }
1094
1095 static int xscale_halt(struct target *target)
1096 {
1097 struct xscale_common *xscale = target_to_xscale(target);
1098
1099 LOG_DEBUG("target->state: %s",
1100 target_state_name(target));
1101
1102 if (target->state == TARGET_HALTED)
1103 {
1104 LOG_DEBUG("target was already halted");
1105 return ERROR_OK;
1106 }
1107 else if (target->state == TARGET_UNKNOWN)
1108 {
1109 /* this must not happen for a xscale target */
1110 LOG_ERROR("target was in unknown state when halt was requested");
1111 return ERROR_TARGET_INVALID;
1112 }
1113 else if (target->state == TARGET_RESET)
1114 {
1115 LOG_DEBUG("target->state == TARGET_RESET");
1116 }
1117 else
1118 {
1119 /* assert external dbg break */
1120 xscale->external_debug_break = 1;
1121 xscale_read_dcsr(target);
1122
1123 target->debug_reason = DBG_REASON_DBGRQ;
1124 }
1125
1126 return ERROR_OK;
1127 }
1128
1129 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1130 {
1131 struct xscale_common *xscale = target_to_xscale(target);
1132 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1133 int retval;
1134
1135 if (xscale->ibcr0_used)
1136 {
1137 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1138
1139 if (ibcr0_bp)
1140 {
1141 xscale_unset_breakpoint(target, ibcr0_bp);
1142 }
1143 else
1144 {
1145 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1146 exit(-1);
1147 }
1148 }
1149
1150 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1151 return retval;
1152
1153 return ERROR_OK;
1154 }
1155
1156 static int xscale_disable_single_step(struct target *target)
1157 {
1158 struct xscale_common *xscale = target_to_xscale(target);
1159 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1160 int retval;
1161
1162 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1163 return retval;
1164
1165 return ERROR_OK;
1166 }
1167
1168 static void xscale_enable_watchpoints(struct target *target)
1169 {
1170 struct watchpoint *watchpoint = target->watchpoints;
1171
1172 while (watchpoint)
1173 {
1174 if (watchpoint->set == 0)
1175 xscale_set_watchpoint(target, watchpoint);
1176 watchpoint = watchpoint->next;
1177 }
1178 }
1179
1180 static void xscale_enable_breakpoints(struct target *target)
1181 {
1182 struct breakpoint *breakpoint = target->breakpoints;
1183
1184 /* set any pending breakpoints */
1185 while (breakpoint)
1186 {
1187 if (breakpoint->set == 0)
1188 xscale_set_breakpoint(target, breakpoint);
1189 breakpoint = breakpoint->next;
1190 }
1191 }
1192
1193 static int xscale_resume(struct target *target, int current,
1194 uint32_t address, int handle_breakpoints, int debug_execution)
1195 {
1196 struct xscale_common *xscale = target_to_xscale(target);
1197 struct arm *armv4_5 = &xscale->armv4_5_common;
1198 struct breakpoint *breakpoint = target->breakpoints;
1199 uint32_t current_pc;
1200 int retval;
1201 int i;
1202
1203 LOG_DEBUG("-");
1204
1205 if (target->state != TARGET_HALTED)
1206 {
1207 LOG_WARNING("target not halted");
1208 return ERROR_TARGET_NOT_HALTED;
1209 }
1210
1211 if (!debug_execution)
1212 {
1213 target_free_all_working_areas(target);
1214 }
1215
1216 /* update vector tables */
1217 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1218 return retval;
1219
1220 /* current = 1: continue on current pc, otherwise continue at <address> */
1221 if (!current)
1222 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1223
1224 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1225
1226 /* if we're at the reset vector, we have to simulate the branch */
1227 if (current_pc == 0x0)
1228 {
1229 arm_simulate_step(target, NULL);
1230 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1231 }
1232
1233 /* the front-end may request us not to handle breakpoints */
1234 if (handle_breakpoints)
1235 {
1236 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1237 {
1238 uint32_t next_pc;
1239
1240 /* there's a breakpoint at the current PC, we have to step over it */
1241 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1242 xscale_unset_breakpoint(target, breakpoint);
1243
1244 /* calculate PC of next instruction */
1245 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1246 {
1247 uint32_t current_opcode;
1248 target_read_u32(target, current_pc, &current_opcode);
1249 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1250 }
1251
1252 LOG_DEBUG("enable single-step");
1253 xscale_enable_single_step(target, next_pc);
1254
1255 /* restore banked registers */
1256 retval = xscale_restore_banked(target);
1257
1258 /* send resume request (command 0x30 or 0x31)
1259 * clean the trace buffer if it is to be enabled (0x62) */
1260 if (xscale->trace.buffer_enabled)
1261 {
1262 xscale_send_u32(target, 0x62);
1263 xscale_send_u32(target, 0x31);
1264 }
1265 else
1266 xscale_send_u32(target, 0x30);
1267
1268 /* send CPSR */
1269 xscale_send_u32(target,
1270 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1271 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1272 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1273
1274 for (i = 7; i >= 0; i--)
1275 {
1276 /* send register */
1277 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1278 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1279 }
1280
1281 /* send PC */
1282 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1283 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1284
1285 /* wait for and process debug entry */
1286 xscale_debug_entry(target);
1287
1288 LOG_DEBUG("disable single-step");
1289 xscale_disable_single_step(target);
1290
1291 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1292 xscale_set_breakpoint(target, breakpoint);
1293 }
1294 }
1295
1296 /* enable any pending breakpoints and watchpoints */
1297 xscale_enable_breakpoints(target);
1298 xscale_enable_watchpoints(target);
1299
1300 /* restore banked registers */
1301 retval = xscale_restore_banked(target);
1302
1303 /* send resume request (command 0x30 or 0x31)
1304 * clean the trace buffer if it is to be enabled (0x62) */
1305 if (xscale->trace.buffer_enabled)
1306 {
1307 xscale_send_u32(target, 0x62);
1308 xscale_send_u32(target, 0x31);
1309 }
1310 else
1311 xscale_send_u32(target, 0x30);
1312
1313 /* send CPSR */
1314 xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
1315 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1316 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1317
1318 for (i = 7; i >= 0; i--)
1319 {
1320 /* send register */
1321 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1322 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1323 }
1324
1325 /* send PC */
1326 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1327 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1328
1329 target->debug_reason = DBG_REASON_NOTHALTED;
1330
1331 if (!debug_execution)
1332 {
1333 /* registers are now invalid */
1334 register_cache_invalidate(armv4_5->core_cache);
1335 target->state = TARGET_RUNNING;
1336 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1337 }
1338 else
1339 {
1340 target->state = TARGET_DEBUG_RUNNING;
1341 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1342 }
1343
1344 LOG_DEBUG("target resumed");
1345
1346 return ERROR_OK;
1347 }
1348
1349 static int xscale_step_inner(struct target *target, int current,
1350 uint32_t address, int handle_breakpoints)
1351 {
1352 struct xscale_common *xscale = target_to_xscale(target);
1353 struct arm *armv4_5 = &xscale->armv4_5_common;
1354 uint32_t next_pc;
1355 int retval;
1356 int i;
1357
1358 target->debug_reason = DBG_REASON_SINGLESTEP;
1359
1360 /* calculate PC of next instruction */
1361 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1362 {
1363 uint32_t current_opcode, current_pc;
1364 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1365
1366 target_read_u32(target, current_pc, &current_opcode);
1367 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1368 return retval;
1369 }
1370
1371 LOG_DEBUG("enable single-step");
1372 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1373 return retval;
1374
1375 /* restore banked registers */
1376 if ((retval = xscale_restore_banked(target)) != ERROR_OK)
1377 return retval;
1378
1379 /* send resume request (command 0x30 or 0x31)
1380 * clean the trace buffer if it is to be enabled (0x62) */
1381 if (xscale->trace.buffer_enabled)
1382 {
1383 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1384 return retval;
1385 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1386 return retval;
1387 }
1388 else
1389 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1390 return retval;
1391
1392 /* send CPSR */
1393 retval = xscale_send_u32(target,
1394 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1395 if (retval != ERROR_OK)
1396 return retval;
1397 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1398 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1399
1400 for (i = 7; i >= 0; i--)
1401 {
1402 /* send register */
1403 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1404 return retval;
1405 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1406 }
1407
1408 /* send PC */
1409 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))) != ERROR_OK)
1410 return retval;
1411 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1412
1413 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1414
1415 /* registers are now invalid */
1416 register_cache_invalidate(armv4_5->core_cache);
1417
1418 /* wait for and process debug entry */
1419 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1420 return retval;
1421
1422 LOG_DEBUG("disable single-step");
1423 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1424 return retval;
1425
1426 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1427
1428 return ERROR_OK;
1429 }
1430
1431 static int xscale_step(struct target *target, int current,
1432 uint32_t address, int handle_breakpoints)
1433 {
1434 struct arm *armv4_5 = target_to_arm(target);
1435 struct breakpoint *breakpoint = target->breakpoints;
1436
1437 uint32_t current_pc;
1438 int retval;
1439
1440 if (target->state != TARGET_HALTED)
1441 {
1442 LOG_WARNING("target not halted");
1443 return ERROR_TARGET_NOT_HALTED;
1444 }
1445
1446 /* current = 1: continue on current pc, otherwise continue at <address> */
1447 if (!current)
1448 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1449
1450 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1451
1452 /* if we're at the reset vector, we have to simulate the step */
1453 if (current_pc == 0x0)
1454 {
1455 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1456 return retval;
1457 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1458
1459 target->debug_reason = DBG_REASON_SINGLESTEP;
1460 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1461
1462 return ERROR_OK;
1463 }
1464
1465 /* the front-end may request us not to handle breakpoints */
1466 if (handle_breakpoints)
1467 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1468 {
1469 if ((retval = xscale_unset_breakpoint(target, breakpoint)) != ERROR_OK)
1470 return retval;
1471 }
1472
1473 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1474
1475 if (breakpoint)
1476 {
1477 xscale_set_breakpoint(target, breakpoint);
1478 }
1479
1480 LOG_DEBUG("target stepped");
1481
1482 return ERROR_OK;
1483
1484 }
1485
1486 static int xscale_assert_reset(struct target *target)
1487 {
1488 struct xscale_common *xscale = target_to_xscale(target);
1489
1490 LOG_DEBUG("target->state: %s",
1491 target_state_name(target));
1492
1493 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1494 * end up in T-L-R, which would reset JTAG
1495 */
1496 jtag_set_end_state(TAP_IDLE);
1497 xscale_jtag_set_instr(target->tap,
1498 XSCALE_SELDCSR << xscale->xscale_variant);
1499
1500 /* set Hold reset, Halt mode and Trap Reset */
1501 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1502 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1503 xscale_write_dcsr(target, 1, 0);
1504
1505 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1506 xscale_jtag_set_instr(target->tap, 0x7f);
1507 jtag_execute_queue();
1508
1509 /* assert reset */
1510 jtag_add_reset(0, 1);
1511
1512 /* sleep 1ms, to be sure we fulfill any requirements */
1513 jtag_add_sleep(1000);
1514 jtag_execute_queue();
1515
1516 target->state = TARGET_RESET;
1517
1518 if (target->reset_halt)
1519 {
1520 int retval;
1521 if ((retval = target_halt(target)) != ERROR_OK)
1522 return retval;
1523 }
1524
1525 return ERROR_OK;
1526 }
1527
1528 static int xscale_deassert_reset(struct target *target)
1529 {
1530 struct xscale_common *xscale = target_to_xscale(target);
1531 struct breakpoint *breakpoint = target->breakpoints;
1532
1533 LOG_DEBUG("-");
1534
1535 xscale->ibcr_available = 2;
1536 xscale->ibcr0_used = 0;
1537 xscale->ibcr1_used = 0;
1538
1539 xscale->dbr_available = 2;
1540 xscale->dbr0_used = 0;
1541 xscale->dbr1_used = 0;
1542
1543 /* mark all hardware breakpoints as unset */
1544 while (breakpoint)
1545 {
1546 if (breakpoint->type == BKPT_HARD)
1547 {
1548 breakpoint->set = 0;
1549 }
1550 breakpoint = breakpoint->next;
1551 }
1552
1553 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1554
1555 /* FIXME mark hardware watchpoints got unset too. Also,
1556 * at least some of the XScale registers are invalid...
1557 */
1558
1559 /*
1560 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1561 * contents got invalidated. Safer to force that, so writing new
1562 * contents can't ever fail..
1563 */
1564 {
1565 uint32_t address;
1566 unsigned buf_cnt;
1567 const uint8_t *buffer = xscale_debug_handler;
1568 int retval;
1569
1570 /* release SRST */
1571 jtag_add_reset(0, 0);
1572
1573 /* wait 300ms; 150 and 100ms were not enough */
1574 jtag_add_sleep(300*1000);
1575
1576 jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
1577 jtag_execute_queue();
1578
1579 /* set Hold reset, Halt mode and Trap Reset */
1580 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1581 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1582 xscale_write_dcsr(target, 1, 0);
1583
1584 /* Load the debug handler into the mini-icache. Since
1585 * it's using halt mode (not monitor mode), it runs in
1586 * "Special Debug State" for access to registers, memory,
1587 * coprocessors, trace data, etc.
1588 */
1589 address = xscale->handler_address;
1590 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1591 binary_size > 0;
1592 binary_size -= buf_cnt, buffer += buf_cnt)
1593 {
1594 uint32_t cache_line[8];
1595 unsigned i;
1596
1597 buf_cnt = binary_size;
1598 if (buf_cnt > 32)
1599 buf_cnt = 32;
1600
1601 for (i = 0; i < buf_cnt; i += 4)
1602 {
1603 /* convert LE buffer to host-endian uint32_t */
1604 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1605 }
1606
1607 for (; i < 32; i += 4)
1608 {
1609 cache_line[i / 4] = 0xe1a08008;
1610 }
1611
1612 /* only load addresses other than the reset vectors */
1613 if ((address % 0x400) != 0x0)
1614 {
1615 retval = xscale_load_ic(target, address,
1616 cache_line);
1617 if (retval != ERROR_OK)
1618 return retval;
1619 }
1620
1621 address += buf_cnt;
1622 };
1623
1624 retval = xscale_load_ic(target, 0x0,
1625 xscale->low_vectors);
1626 if (retval != ERROR_OK)
1627 return retval;
1628 retval = xscale_load_ic(target, 0xffff0000,
1629 xscale->high_vectors);
1630 if (retval != ERROR_OK)
1631 return retval;
1632
1633 jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
1634
1635 jtag_add_sleep(100000);
1636
1637 /* set Hold reset, Halt mode and Trap Reset */
1638 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1639 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1640 xscale_write_dcsr(target, 1, 0);
1641
1642 /* clear Hold reset to let the target run (should enter debug handler) */
1643 xscale_write_dcsr(target, 0, 1);
1644 target->state = TARGET_RUNNING;
1645
1646 if (!target->reset_halt)
1647 {
1648 jtag_add_sleep(10000);
1649
1650 /* we should have entered debug now */
1651 xscale_debug_entry(target);
1652 target->state = TARGET_HALTED;
1653
1654 /* resume the target */
1655 xscale_resume(target, 1, 0x0, 1, 0);
1656 }
1657 }
1658
1659 return ERROR_OK;
1660 }
1661
1662 static int xscale_read_core_reg(struct target *target, struct reg *r,
1663 int num, enum arm_mode mode)
1664 {
1665 /** \todo add debug handler support for core register reads */
1666 LOG_ERROR("not implemented");
1667 return ERROR_OK;
1668 }
1669
1670 static int xscale_write_core_reg(struct target *target, struct reg *r,
1671 int num, enum arm_mode mode, uint32_t value)
1672 {
1673 /** \todo add debug handler support for core register writes */
1674 LOG_ERROR("not implemented");
1675 return ERROR_OK;
1676 }
1677
1678 static int xscale_full_context(struct target *target)
1679 {
1680 struct arm *armv4_5 = target_to_arm(target);
1681
1682 uint32_t *buffer;
1683
1684 int i, j;
1685
1686 LOG_DEBUG("-");
1687
1688 if (target->state != TARGET_HALTED)
1689 {
1690 LOG_WARNING("target not halted");
1691 return ERROR_TARGET_NOT_HALTED;
1692 }
1693
1694 buffer = malloc(4 * 8);
1695
1696 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1697 * we can't enter User mode on an XScale (unpredictable),
1698 * but User shares registers with SYS
1699 */
1700 for (i = 1; i < 7; i++)
1701 {
1702 enum arm_mode mode = armv4_5_number_to_mode(i);
1703 bool valid = true;
1704 struct reg *r;
1705
1706 if (mode == ARM_MODE_USR)
1707 continue;
1708
1709 /* check if there are invalid registers in the current mode
1710 */
1711 for (j = 0; valid && j <= 16; j++)
1712 {
1713 if (!ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1714 mode, j).valid)
1715 valid = false;
1716 }
1717 if (valid)
1718 continue;
1719
1720 /* request banked registers */
1721 xscale_send_u32(target, 0x0);
1722
1723 /* send CPSR for desired bank mode */
1724 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1725
1726 /* get banked registers: r8 to r14; and SPSR
1727 * except in USR/SYS mode
1728 */
1729 if (mode != ARM_MODE_SYS) {
1730 /* SPSR */
1731 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1732 mode, 16);
1733
1734 xscale_receive(target, buffer, 8);
1735
1736 buf_set_u32(r->value, 0, 32, buffer[7]);
1737 r->dirty = false;
1738 r->valid = true;
1739 } else {
1740 xscale_receive(target, buffer, 7);
1741 }
1742
1743 /* move data from buffer to register cache */
1744 for (j = 8; j <= 14; j++)
1745 {
1746 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1747 mode, j);
1748
1749 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1750 r->dirty = false;
1751 r->valid = true;
1752 }
1753 }
1754
1755 free(buffer);
1756
1757 return ERROR_OK;
1758 }
1759
1760 static int xscale_restore_banked(struct target *target)
1761 {
1762 struct arm *armv4_5 = target_to_arm(target);
1763
1764 int i, j;
1765
1766 if (target->state != TARGET_HALTED)
1767 {
1768 LOG_WARNING("target not halted");
1769 return ERROR_TARGET_NOT_HALTED;
1770 }
1771
1772 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1773 * and check if any banked registers need to be written. Ignore
1774 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1775 * an XScale (unpredictable), but they share all registers.
1776 */
1777 for (i = 1; i < 7; i++)
1778 {
1779 enum arm_mode mode = armv4_5_number_to_mode(i);
1780 struct reg *r;
1781
1782 if (mode == ARM_MODE_USR)
1783 continue;
1784
1785 /* check if there are dirty registers in this mode */
1786 for (j = 8; j <= 14; j++)
1787 {
1788 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1789 mode, j).dirty)
1790 goto dirty;
1791 }
1792
1793 /* if not USR/SYS, check if the SPSR needs to be written */
1794 if (mode != ARM_MODE_SYS)
1795 {
1796 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1797 mode, 16).dirty)
1798 goto dirty;
1799 }
1800
1801 /* there's nothing to flush for this mode */
1802 continue;
1803
1804 dirty:
1805 /* command 0x1: "send banked registers" */
1806 xscale_send_u32(target, 0x1);
1807
1808 /* send CPSR for desired mode */
1809 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1810
1811 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1812 * but this protocol doesn't understand that nuance.
1813 */
1814 for (j = 8; j <= 14; j++) {
1815 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1816 mode, j);
1817 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1818 r->dirty = false;
1819 }
1820
1821 /* send spsr if not in USR/SYS mode */
1822 if (mode != ARM_MODE_SYS) {
1823 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1824 mode, 16);
1825 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1826 r->dirty = false;
1827 }
1828 }
1829
1830 return ERROR_OK;
1831 }
1832
1833 static int xscale_read_memory(struct target *target, uint32_t address,
1834 uint32_t size, uint32_t count, uint8_t *buffer)
1835 {
1836 struct xscale_common *xscale = target_to_xscale(target);
1837 uint32_t *buf32;
1838 uint32_t i;
1839 int retval;
1840
1841 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1842
1843 if (target->state != TARGET_HALTED)
1844 {
1845 LOG_WARNING("target not halted");
1846 return ERROR_TARGET_NOT_HALTED;
1847 }
1848
1849 /* sanitize arguments */
1850 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1851 return ERROR_INVALID_ARGUMENTS;
1852
1853 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1854 return ERROR_TARGET_UNALIGNED_ACCESS;
1855
1856 /* send memory read request (command 0x1n, n: access size) */
1857 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1858 return retval;
1859
1860 /* send base address for read request */
1861 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1862 return retval;
1863
1864 /* send number of requested data words */
1865 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1866 return retval;
1867
1868 /* receive data from target (count times 32-bit words in host endianness) */
1869 buf32 = malloc(4 * count);
1870 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1871 return retval;
1872
1873 /* extract data from host-endian buffer into byte stream */
1874 for (i = 0; i < count; i++)
1875 {
1876 switch (size)
1877 {
1878 case 4:
1879 target_buffer_set_u32(target, buffer, buf32[i]);
1880 buffer += 4;
1881 break;
1882 case 2:
1883 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1884 buffer += 2;
1885 break;
1886 case 1:
1887 *buffer++ = buf32[i] & 0xff;
1888 break;
1889 default:
1890 LOG_ERROR("invalid read size");
1891 return ERROR_INVALID_ARGUMENTS;
1892 }
1893 }
1894
1895 free(buf32);
1896
1897 /* examine DCSR, to see if Sticky Abort (SA) got set */
1898 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1899 return retval;
1900 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1901 {
1902 /* clear SA bit */
1903 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1904 return retval;
1905
1906 return ERROR_TARGET_DATA_ABORT;
1907 }
1908
1909 return ERROR_OK;
1910 }
1911
1912 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1913 uint32_t size, uint32_t count, uint8_t *buffer)
1914 {
1915 /** \todo: provide a non-stub implementtion of this routine. */
1916 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1917 target_name(target), __func__);
1918 return ERROR_FAIL;
1919 }
1920
1921 static int xscale_write_memory(struct target *target, uint32_t address,
1922 uint32_t size, uint32_t count, uint8_t *buffer)
1923 {
1924 struct xscale_common *xscale = target_to_xscale(target);
1925 int retval;
1926
1927 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1928
1929 if (target->state != TARGET_HALTED)
1930 {
1931 LOG_WARNING("target not halted");
1932 return ERROR_TARGET_NOT_HALTED;
1933 }
1934
1935 /* sanitize arguments */
1936 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1937 return ERROR_INVALID_ARGUMENTS;
1938
1939 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1940 return ERROR_TARGET_UNALIGNED_ACCESS;
1941
1942 /* send memory write request (command 0x2n, n: access size) */
1943 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1944 return retval;
1945
1946 /* send base address for read request */
1947 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1948 return retval;
1949
1950 /* send number of requested data words to be written*/
1951 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1952 return retval;
1953
1954 /* extract data from host-endian buffer into byte stream */
1955 #if 0
1956 for (i = 0; i < count; i++)
1957 {
1958 switch (size)
1959 {
1960 case 4:
1961 value = target_buffer_get_u32(target, buffer);
1962 xscale_send_u32(target, value);
1963 buffer += 4;
1964 break;
1965 case 2:
1966 value = target_buffer_get_u16(target, buffer);
1967 xscale_send_u32(target, value);
1968 buffer += 2;
1969 break;
1970 case 1:
1971 value = *buffer;
1972 xscale_send_u32(target, value);
1973 buffer += 1;
1974 break;
1975 default:
1976 LOG_ERROR("should never get here");
1977 exit(-1);
1978 }
1979 }
1980 #endif
1981 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1982 return retval;
1983
1984 /* examine DCSR, to see if Sticky Abort (SA) got set */
1985 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1986 return retval;
1987 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1988 {
1989 /* clear SA bit */
1990 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1991 return retval;
1992
1993 return ERROR_TARGET_DATA_ABORT;
1994 }
1995
1996 return ERROR_OK;
1997 }
1998
1999 static int xscale_write_phys_memory(struct target *target, uint32_t address,
2000 uint32_t size, uint32_t count, uint8_t *buffer)
2001 {
2002 /** \todo: provide a non-stub implementtion of this routine. */
2003 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
2004 target_name(target), __func__);
2005 return ERROR_FAIL;
2006 }
2007
2008 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
2009 uint32_t count, uint8_t *buffer)
2010 {
2011 return xscale_write_memory(target, address, 4, count, buffer);
2012 }
2013
2014 static uint32_t xscale_get_ttb(struct target *target)
2015 {
2016 struct xscale_common *xscale = target_to_xscale(target);
2017 uint32_t ttb;
2018
2019 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2020 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2021
2022 return ttb;
2023 }
2024
2025 static void xscale_disable_mmu_caches(struct target *target, int mmu,
2026 int d_u_cache, int i_cache)
2027 {
2028 struct xscale_common *xscale = target_to_xscale(target);
2029 uint32_t cp15_control;
2030
2031 /* read cp15 control register */
2032 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2033 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2034
2035 if (mmu)
2036 cp15_control &= ~0x1U;
2037
2038 if (d_u_cache)
2039 {
2040 /* clean DCache */
2041 xscale_send_u32(target, 0x50);
2042 xscale_send_u32(target, xscale->cache_clean_address);
2043
2044 /* invalidate DCache */
2045 xscale_send_u32(target, 0x51);
2046
2047 cp15_control &= ~0x4U;
2048 }
2049
2050 if (i_cache)
2051 {
2052 /* invalidate ICache */
2053 xscale_send_u32(target, 0x52);
2054 cp15_control &= ~0x1000U;
2055 }
2056
2057 /* write new cp15 control register */
2058 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2059
2060 /* execute cpwait to ensure outstanding operations complete */
2061 xscale_send_u32(target, 0x53);
2062 }
2063
2064 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2065 int d_u_cache, int i_cache)
2066 {
2067 struct xscale_common *xscale = target_to_xscale(target);
2068 uint32_t cp15_control;
2069
2070 /* read cp15 control register */
2071 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2072 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2073
2074 if (mmu)
2075 cp15_control |= 0x1U;
2076
2077 if (d_u_cache)
2078 cp15_control |= 0x4U;
2079
2080 if (i_cache)
2081 cp15_control |= 0x1000U;
2082
2083 /* write new cp15 control register */
2084 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2085
2086 /* execute cpwait to ensure outstanding operations complete */
2087 xscale_send_u32(target, 0x53);
2088 }
2089
2090 static int xscale_set_breakpoint(struct target *target,
2091 struct breakpoint *breakpoint)
2092 {
2093 int retval;
2094 struct xscale_common *xscale = target_to_xscale(target);
2095
2096 if (target->state != TARGET_HALTED)
2097 {
2098 LOG_WARNING("target not halted");
2099 return ERROR_TARGET_NOT_HALTED;
2100 }
2101
2102 if (breakpoint->set)
2103 {
2104 LOG_WARNING("breakpoint already set");
2105 return ERROR_OK;
2106 }
2107
2108 if (breakpoint->type == BKPT_HARD)
2109 {
2110 uint32_t value = breakpoint->address | 1;
2111 if (!xscale->ibcr0_used)
2112 {
2113 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2114 xscale->ibcr0_used = 1;
2115 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2116 }
2117 else if (!xscale->ibcr1_used)
2118 {
2119 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2120 xscale->ibcr1_used = 1;
2121 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2122 }
2123 else
2124 {
2125 LOG_ERROR("BUG: no hardware comparator available");
2126 return ERROR_OK;
2127 }
2128 }
2129 else if (breakpoint->type == BKPT_SOFT)
2130 {
2131 if (breakpoint->length == 4)
2132 {
2133 /* keep the original instruction in target endianness */
2134 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2135 {
2136 return retval;
2137 }
2138 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2139 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2140 {
2141 return retval;
2142 }
2143 }
2144 else
2145 {
2146 /* keep the original instruction in target endianness */
2147 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2148 {
2149 return retval;
2150 }
2151 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2152 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2153 {
2154 return retval;
2155 }
2156 }
2157 breakpoint->set = 1;
2158 }
2159
2160 return ERROR_OK;
2161 }
2162
2163 static int xscale_add_breakpoint(struct target *target,
2164 struct breakpoint *breakpoint)
2165 {
2166 struct xscale_common *xscale = target_to_xscale(target);
2167
2168 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2169 {
2170 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2171 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2172 }
2173
2174 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2175 {
2176 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2177 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2178 }
2179
2180 if (breakpoint->type == BKPT_HARD)
2181 {
2182 xscale->ibcr_available--;
2183 }
2184
2185 return ERROR_OK;
2186 }
2187
2188 static int xscale_unset_breakpoint(struct target *target,
2189 struct breakpoint *breakpoint)
2190 {
2191 int retval;
2192 struct xscale_common *xscale = target_to_xscale(target);
2193
2194 if (target->state != TARGET_HALTED)
2195 {
2196 LOG_WARNING("target not halted");
2197 return ERROR_TARGET_NOT_HALTED;
2198 }
2199
2200 if (!breakpoint->set)
2201 {
2202 LOG_WARNING("breakpoint not set");
2203 return ERROR_OK;
2204 }
2205
2206 if (breakpoint->type == BKPT_HARD)
2207 {
2208 if (breakpoint->set == 1)
2209 {
2210 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2211 xscale->ibcr0_used = 0;
2212 }
2213 else if (breakpoint->set == 2)
2214 {
2215 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2216 xscale->ibcr1_used = 0;
2217 }
2218 breakpoint->set = 0;
2219 }
2220 else
2221 {
2222 /* restore original instruction (kept in target endianness) */
2223 if (breakpoint->length == 4)
2224 {
2225 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2226 {
2227 return retval;
2228 }
2229 }
2230 else
2231 {
2232 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2233 {
2234 return retval;
2235 }
2236 }
2237 breakpoint->set = 0;
2238 }
2239
2240 return ERROR_OK;
2241 }
2242
2243 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2244 {
2245 struct xscale_common *xscale = target_to_xscale(target);
2246
2247 if (target->state != TARGET_HALTED)
2248 {
2249 LOG_WARNING("target not halted");
2250 return ERROR_TARGET_NOT_HALTED;
2251 }
2252
2253 if (breakpoint->set)
2254 {
2255 xscale_unset_breakpoint(target, breakpoint);
2256 }
2257
2258 if (breakpoint->type == BKPT_HARD)
2259 xscale->ibcr_available++;
2260
2261 return ERROR_OK;
2262 }
2263
2264 static int xscale_set_watchpoint(struct target *target,
2265 struct watchpoint *watchpoint)
2266 {
2267 struct xscale_common *xscale = target_to_xscale(target);
2268 uint8_t enable = 0;
2269 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2270 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2271
2272 if (target->state != TARGET_HALTED)
2273 {
2274 LOG_WARNING("target not halted");
2275 return ERROR_TARGET_NOT_HALTED;
2276 }
2277
2278 xscale_get_reg(dbcon);
2279
2280 switch (watchpoint->rw)
2281 {
2282 case WPT_READ:
2283 enable = 0x3;
2284 break;
2285 case WPT_ACCESS:
2286 enable = 0x2;
2287 break;
2288 case WPT_WRITE:
2289 enable = 0x1;
2290 break;
2291 default:
2292 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2293 }
2294
2295 if (!xscale->dbr0_used)
2296 {
2297 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2298 dbcon_value |= enable;
2299 xscale_set_reg_u32(dbcon, dbcon_value);
2300 watchpoint->set = 1;
2301 xscale->dbr0_used = 1;
2302 }
2303 else if (!xscale->dbr1_used)
2304 {
2305 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2306 dbcon_value |= enable << 2;
2307 xscale_set_reg_u32(dbcon, dbcon_value);
2308 watchpoint->set = 2;
2309 xscale->dbr1_used = 1;
2310 }
2311 else
2312 {
2313 LOG_ERROR("BUG: no hardware comparator available");
2314 return ERROR_OK;
2315 }
2316
2317 return ERROR_OK;
2318 }
2319
2320 static int xscale_add_watchpoint(struct target *target,
2321 struct watchpoint *watchpoint)
2322 {
2323 struct xscale_common *xscale = target_to_xscale(target);
2324
2325 if (xscale->dbr_available < 1)
2326 {
2327 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2328 }
2329
2330 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2331 {
2332 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2333 }
2334
2335 xscale->dbr_available--;
2336
2337 return ERROR_OK;
2338 }
2339
2340 static int xscale_unset_watchpoint(struct target *target,
2341 struct watchpoint *watchpoint)
2342 {
2343 struct xscale_common *xscale = target_to_xscale(target);
2344 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2345 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2346
2347 if (target->state != TARGET_HALTED)
2348 {
2349 LOG_WARNING("target not halted");
2350 return ERROR_TARGET_NOT_HALTED;
2351 }
2352
2353 if (!watchpoint->set)
2354 {
2355 LOG_WARNING("breakpoint not set");
2356 return ERROR_OK;
2357 }
2358
2359 if (watchpoint->set == 1)
2360 {
2361 dbcon_value &= ~0x3;
2362 xscale_set_reg_u32(dbcon, dbcon_value);
2363 xscale->dbr0_used = 0;
2364 }
2365 else if (watchpoint->set == 2)
2366 {
2367 dbcon_value &= ~0xc;
2368 xscale_set_reg_u32(dbcon, dbcon_value);
2369 xscale->dbr1_used = 0;
2370 }
2371 watchpoint->set = 0;
2372
2373 return ERROR_OK;
2374 }
2375
2376 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2377 {
2378 struct xscale_common *xscale = target_to_xscale(target);
2379
2380 if (target->state != TARGET_HALTED)
2381 {
2382 LOG_WARNING("target not halted");
2383 return ERROR_TARGET_NOT_HALTED;
2384 }
2385
2386 if (watchpoint->set)
2387 {
2388 xscale_unset_watchpoint(target, watchpoint);
2389 }
2390
2391 xscale->dbr_available++;
2392
2393 return ERROR_OK;
2394 }
2395
2396 static int xscale_get_reg(struct reg *reg)
2397 {
2398 struct xscale_reg *arch_info = reg->arch_info;
2399 struct target *target = arch_info->target;
2400 struct xscale_common *xscale = target_to_xscale(target);
2401
2402 /* DCSR, TX and RX are accessible via JTAG */
2403 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2404 {
2405 return xscale_read_dcsr(arch_info->target);
2406 }
2407 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2408 {
2409 /* 1 = consume register content */
2410 return xscale_read_tx(arch_info->target, 1);
2411 }
2412 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2413 {
2414 /* can't read from RX register (host -> debug handler) */
2415 return ERROR_OK;
2416 }
2417 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2418 {
2419 /* can't (explicitly) read from TXRXCTRL register */
2420 return ERROR_OK;
2421 }
2422 else /* Other DBG registers have to be transfered by the debug handler */
2423 {
2424 /* send CP read request (command 0x40) */
2425 xscale_send_u32(target, 0x40);
2426
2427 /* send CP register number */
2428 xscale_send_u32(target, arch_info->dbg_handler_number);
2429
2430 /* read register value */
2431 xscale_read_tx(target, 1);
2432 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2433
2434 reg->dirty = 0;
2435 reg->valid = 1;
2436 }
2437
2438 return ERROR_OK;
2439 }
2440
2441 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2442 {
2443 struct xscale_reg *arch_info = reg->arch_info;
2444 struct target *target = arch_info->target;
2445 struct xscale_common *xscale = target_to_xscale(target);
2446 uint32_t value = buf_get_u32(buf, 0, 32);
2447
2448 /* DCSR, TX and RX are accessible via JTAG */
2449 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2450 {
2451 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2452 return xscale_write_dcsr(arch_info->target, -1, -1);
2453 }
2454 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2455 {
2456 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2457 return xscale_write_rx(arch_info->target);
2458 }
2459 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2460 {
2461 /* can't write to TX register (debug-handler -> host) */
2462 return ERROR_OK;
2463 }
2464 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2465 {
2466 /* can't (explicitly) write to TXRXCTRL register */
2467 return ERROR_OK;
2468 }
2469 else /* Other DBG registers have to be transfered by the debug handler */
2470 {
2471 /* send CP write request (command 0x41) */
2472 xscale_send_u32(target, 0x41);
2473
2474 /* send CP register number */
2475 xscale_send_u32(target, arch_info->dbg_handler_number);
2476
2477 /* send CP register value */
2478 xscale_send_u32(target, value);
2479 buf_set_u32(reg->value, 0, 32, value);
2480 }
2481
2482 return ERROR_OK;
2483 }
2484
2485 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2486 {
2487 struct xscale_common *xscale = target_to_xscale(target);
2488 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2489 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2490
2491 /* send CP write request (command 0x41) */
2492 xscale_send_u32(target, 0x41);
2493
2494 /* send CP register number */
2495 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2496
2497 /* send CP register value */
2498 xscale_send_u32(target, value);
2499 buf_set_u32(dcsr->value, 0, 32, value);
2500
2501 return ERROR_OK;
2502 }
2503
2504 static int xscale_read_trace(struct target *target)
2505 {
2506 struct xscale_common *xscale = target_to_xscale(target);
2507 struct arm *armv4_5 = &xscale->armv4_5_common;
2508 struct xscale_trace_data **trace_data_p;
2509
2510 /* 258 words from debug handler
2511 * 256 trace buffer entries
2512 * 2 checkpoint addresses
2513 */
2514 uint32_t trace_buffer[258];
2515 int is_address[256];
2516 int i, j;
2517
2518 if (target->state != TARGET_HALTED)
2519 {
2520 LOG_WARNING("target must be stopped to read trace data");
2521 return ERROR_TARGET_NOT_HALTED;
2522 }
2523
2524 /* send read trace buffer command (command 0x61) */
2525 xscale_send_u32(target, 0x61);
2526
2527 /* receive trace buffer content */
2528 xscale_receive(target, trace_buffer, 258);
2529
2530 /* parse buffer backwards to identify address entries */
2531 for (i = 255; i >= 0; i--)
2532 {
2533 is_address[i] = 0;
2534 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2535 ((trace_buffer[i] & 0xf0) == 0xd0))
2536 {
2537 if (i >= 3)
2538 is_address[--i] = 1;
2539 if (i >= 2)
2540 is_address[--i] = 1;
2541 if (i >= 1)
2542 is_address[--i] = 1;
2543 if (i >= 0)
2544 is_address[--i] = 1;
2545 }
2546 }
2547
2548
2549 /* search first non-zero entry */
2550 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2551 ;
2552
2553 if (j == 256)
2554 {
2555 LOG_DEBUG("no trace data collected");
2556 return ERROR_XSCALE_NO_TRACE_DATA;
2557 }
2558
2559 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2560 ;
2561
2562 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2563 (*trace_data_p)->next = NULL;
2564 (*trace_data_p)->chkpt0 = trace_buffer[256];
2565 (*trace_data_p)->chkpt1 = trace_buffer[257];
2566 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2567 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2568 (*trace_data_p)->depth = 256 - j;
2569
2570 for (i = j; i < 256; i++)
2571 {
2572 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2573 if (is_address[i])
2574 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2575 else
2576 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2577 }
2578
2579 return ERROR_OK;
2580 }
2581
2582 static int xscale_read_instruction(struct target *target,
2583 struct arm_instruction *instruction)
2584 {
2585 struct xscale_common *xscale = target_to_xscale(target);
2586 int i;
2587 int section = -1;
2588 size_t size_read;
2589 uint32_t opcode;
2590 int retval;
2591
2592 if (!xscale->trace.image)
2593 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2594
2595 /* search for the section the current instruction belongs to */
2596 for (i = 0; i < xscale->trace.image->num_sections; i++)
2597 {
2598 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2599 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2600 {
2601 section = i;
2602 break;
2603 }
2604 }
2605
2606 if (section == -1)
2607 {
2608 /* current instruction couldn't be found in the image */
2609 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2610 }
2611
2612 if (xscale->trace.core_state == ARM_STATE_ARM)
2613 {
2614 uint8_t buf[4];
2615 if ((retval = image_read_section(xscale->trace.image, section,
2616 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2617 4, buf, &size_read)) != ERROR_OK)
2618 {
2619 LOG_ERROR("error while reading instruction: %i", retval);
2620 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2621 }
2622 opcode = target_buffer_get_u32(target, buf);
2623 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2624 }
2625 else if (xscale->trace.core_state == ARM_STATE_THUMB)
2626 {
2627 uint8_t buf[2];
2628 if ((retval = image_read_section(xscale->trace.image, section,
2629 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2630 2, buf, &size_read)) != ERROR_OK)
2631 {
2632 LOG_ERROR("error while reading instruction: %i", retval);
2633 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2634 }
2635 opcode = target_buffer_get_u16(target, buf);
2636 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2637 }
2638 else
2639 {
2640 LOG_ERROR("BUG: unknown core state encountered");
2641 exit(-1);
2642 }
2643
2644 return ERROR_OK;
2645 }
2646
2647 static int xscale_branch_address(struct xscale_trace_data *trace_data,
2648 int i, uint32_t *target)
2649 {
2650 /* if there are less than four entries prior to the indirect branch message
2651 * we can't extract the address */
2652 if (i < 4)
2653 {
2654 return -1;
2655 }
2656
2657 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2658 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2659
2660 return 0;
2661 }
2662
2663 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2664 {
2665 struct xscale_common *xscale = target_to_xscale(target);
2666 int next_pc_ok = 0;
2667 uint32_t next_pc = 0x0;
2668 struct xscale_trace_data *trace_data = xscale->trace.data;
2669 int retval;
2670
2671 while (trace_data)
2672 {
2673 int i, chkpt;
2674 int rollover;
2675 int branch;
2676 int exception;
2677 xscale->trace.core_state = ARM_STATE_ARM;
2678
2679 chkpt = 0;
2680 rollover = 0;
2681
2682 for (i = 0; i < trace_data->depth; i++)
2683 {
2684 next_pc_ok = 0;
2685 branch = 0;
2686 exception = 0;
2687
2688 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2689 continue;
2690
2691 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2692 {
2693 case 0: /* Exceptions */
2694 case 1:
2695 case 2:
2696 case 3:
2697 case 4:
2698 case 5:
2699 case 6:
2700 case 7:
2701 exception = (trace_data->entries[i].data & 0x70) >> 4;
2702 next_pc_ok = 1;
2703 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2704 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2705 break;
2706 case 8: /* Direct Branch */
2707 branch = 1;
2708 break;
2709 case 9: /* Indirect Branch */
2710 branch = 1;
2711 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2712 {
2713 next_pc_ok = 1;
2714 }
2715 break;
2716 case 13: /* Checkpointed Indirect Branch */
2717 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2718 {
2719 next_pc_ok = 1;
2720 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2721 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2722 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2723 }
2724 /* explicit fall-through */
2725 case 12: /* Checkpointed Direct Branch */
2726 branch = 1;
2727 if (chkpt == 0)
2728 {
2729 next_pc_ok = 1;
2730 next_pc = trace_data->chkpt0;
2731 chkpt++;
2732 }
2733 else if (chkpt == 1)
2734 {
2735 next_pc_ok = 1;
2736 next_pc = trace_data->chkpt0;
2737 chkpt++;
2738 }
2739 else
2740 {
2741 LOG_WARNING("more than two checkpointed branches encountered");
2742 }
2743 break;
2744 case 15: /* Roll-over */
2745 rollover++;
2746 continue;
2747 default: /* Reserved */
2748 command_print(cmd_ctx, "--- reserved trace message ---");
2749 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2750 return ERROR_OK;
2751 }
2752
2753 if (xscale->trace.pc_ok)
2754 {
2755 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2756 struct arm_instruction instruction;
2757
2758 if ((exception == 6) || (exception == 7))
2759 {
2760 /* IRQ or FIQ exception, no instruction executed */
2761 executed -= 1;
2762 }
2763
2764 while (executed-- >= 0)
2765 {
2766 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2767 {
2768 /* can't continue tracing with no image available */
2769 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2770 {
2771 return retval;
2772 }
2773 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2774 {
2775 /* TODO: handle incomplete images */
2776 }
2777 }
2778
2779 /* a precise abort on a load to the PC is included in the incremental
2780 * word count, other instructions causing data aborts are not included
2781 */
2782 if ((executed == 0) && (exception == 4)
2783 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2784 {
2785 if ((instruction.type == ARM_LDM)
2786 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2787 {
2788 executed--;
2789 }
2790 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2791 && (instruction.info.load_store.Rd != 15))
2792 {
2793 executed--;
2794 }
2795 }
2796
2797 /* only the last instruction executed
2798 * (the one that caused the control flow change)
2799 * could be a taken branch
2800 */
2801 if (((executed == -1) && (branch == 1)) &&
2802 (((instruction.type == ARM_B) ||
2803 (instruction.type == ARM_BL) ||
2804 (instruction.type == ARM_BLX)) &&
2805 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2806 {
2807 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2808 }
2809 else
2810 {
2811 xscale->trace.current_pc += (xscale->trace.core_state == ARM_STATE_ARM) ? 4 : 2;
2812 }
2813 command_print(cmd_ctx, "%s", instruction.text);
2814 }
2815
2816 rollover = 0;
2817 }
2818
2819 if (next_pc_ok)
2820 {
2821 xscale->trace.current_pc = next_pc;
2822 xscale->trace.pc_ok = 1;
2823 }
2824 }
2825
2826 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARM_STATE_ARM) ? 4 : 2)
2827 {
2828 struct arm_instruction instruction;
2829 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2830 {
2831 /* can't continue tracing with no image available */
2832 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2833 {
2834 return retval;
2835 }
2836 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2837 {
2838 /* TODO: handle incomplete images */
2839 }
2840 }
2841 command_print(cmd_ctx, "%s", instruction.text);
2842 }
2843
2844 trace_data = trace_data->next;
2845 }
2846
2847 return ERROR_OK;
2848 }
2849
2850 static const struct reg_arch_type xscale_reg_type = {
2851 .get = xscale_get_reg,
2852 .set = xscale_set_reg,
2853 };
2854
2855 static void xscale_build_reg_cache(struct target *target)
2856 {
2857 struct xscale_common *xscale = target_to_xscale(target);
2858 struct arm *armv4_5 = &xscale->armv4_5_common;
2859 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2860 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2861 int i;
2862 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2863
2864 (*cache_p) = arm_build_reg_cache(target, armv4_5);
2865
2866 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2867 cache_p = &(*cache_p)->next;
2868
2869 /* fill in values for the xscale reg cache */
2870 (*cache_p)->name = "XScale registers";
2871 (*cache_p)->next = NULL;
2872 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2873 (*cache_p)->num_regs = num_regs;
2874
2875 for (i = 0; i < num_regs; i++)
2876 {
2877 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2878 (*cache_p)->reg_list[i].value = calloc(4, 1);
2879 (*cache_p)->reg_list[i].dirty = 0;
2880 (*cache_p)->reg_list[i].valid = 0;
2881 (*cache_p)->reg_list[i].size = 32;
2882 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2883 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2884 arch_info[i] = xscale_reg_arch_info[i];
2885 arch_info[i].target = target;
2886 }
2887
2888 xscale->reg_cache = (*cache_p);
2889 }
2890
2891 static int xscale_init_target(struct command_context *cmd_ctx,
2892 struct target *target)
2893 {
2894 xscale_build_reg_cache(target);
2895 return ERROR_OK;
2896 }
2897
2898 static int xscale_init_arch_info(struct target *target,
2899 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2900 {
2901 struct arm *armv4_5;
2902 uint32_t high_reset_branch, low_reset_branch;
2903 int i;
2904
2905 armv4_5 = &xscale->armv4_5_common;
2906
2907 /* store architecture specfic data */
2908 xscale->common_magic = XSCALE_COMMON_MAGIC;
2909
2910 /* we don't really *need* a variant param ... */
2911 if (variant) {
2912 int ir_length = 0;
2913
2914 if (strcmp(variant, "pxa250") == 0
2915 || strcmp(variant, "pxa255") == 0
2916 || strcmp(variant, "pxa26x") == 0)
2917 ir_length = 5;
2918 else if (strcmp(variant, "pxa27x") == 0
2919 || strcmp(variant, "ixp42x") == 0
2920 || strcmp(variant, "ixp45x") == 0
2921 || strcmp(variant, "ixp46x") == 0)
2922 ir_length = 7;
2923 else if (strcmp(variant, "pxa3xx") == 0)
2924 ir_length = 11;
2925 else
2926 LOG_WARNING("%s: unrecognized variant %s",
2927 tap->dotted_name, variant);
2928
2929 if (ir_length && ir_length != tap->ir_length) {
2930 LOG_WARNING("%s: IR length for %s is %d; fixing",
2931 tap->dotted_name, variant, ir_length);
2932 tap->ir_length = ir_length;
2933 }
2934 }
2935
2936 /* PXA3xx shifts the JTAG instructions */
2937 if (tap->ir_length == 11)
2938 xscale->xscale_variant = XSCALE_PXA3XX;
2939 else
2940 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
2941
2942 /* the debug handler isn't installed (and thus not running) at this time */
2943 xscale->handler_address = 0xfe000800;
2944
2945 /* clear the vectors we keep locally for reference */
2946 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2947 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2948
2949 /* no user-specified vectors have been configured yet */
2950 xscale->static_low_vectors_set = 0x0;
2951 xscale->static_high_vectors_set = 0x0;
2952
2953 /* calculate branches to debug handler */
2954 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2955 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2956
2957 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2958 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2959
2960 for (i = 1; i <= 7; i++)
2961 {
2962 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2963 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2964 }
2965
2966 /* 64kB aligned region used for DCache cleaning */
2967 xscale->cache_clean_address = 0xfffe0000;
2968
2969 xscale->hold_rst = 0;
2970 xscale->external_debug_break = 0;
2971
2972 xscale->ibcr_available = 2;
2973 xscale->ibcr0_used = 0;
2974 xscale->ibcr1_used = 0;
2975
2976 xscale->dbr_available = 2;
2977 xscale->dbr0_used = 0;
2978 xscale->dbr1_used = 0;
2979
2980 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2981 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2982
2983 xscale->vector_catch = 0x1;
2984
2985 xscale->trace.capture_status = TRACE_IDLE;
2986 xscale->trace.data = NULL;
2987 xscale->trace.image = NULL;
2988 xscale->trace.buffer_enabled = 0;
2989 xscale->trace.buffer_fill = 0;
2990
2991 /* prepare ARMv4/5 specific information */
2992 armv4_5->arch_info = xscale;
2993 armv4_5->read_core_reg = xscale_read_core_reg;
2994 armv4_5->write_core_reg = xscale_write_core_reg;
2995 armv4_5->full_context = xscale_full_context;
2996
2997 arm_init_arch_info(target, armv4_5);
2998
2999 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3000 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3001 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3002 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3003 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3004 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3005 xscale->armv4_5_mmu.has_tiny_pages = 1;
3006 xscale->armv4_5_mmu.mmu_enabled = 0;
3007
3008 return ERROR_OK;
3009 }
3010
3011 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3012 {
3013 struct xscale_common *xscale;
3014
3015 if (sizeof xscale_debug_handler - 1 > 0x800) {
3016 LOG_ERROR("debug_handler.bin: larger than 2kb");
3017 return ERROR_FAIL;
3018 }
3019
3020 xscale = calloc(1, sizeof(*xscale));
3021 if (!xscale)
3022 return ERROR_FAIL;
3023
3024 return xscale_init_arch_info(target, xscale, target->tap,
3025 target->variant);
3026 }
3027
3028 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3029 {
3030 struct target *target = NULL;
3031 struct xscale_common *xscale;
3032 int retval;
3033 uint32_t handler_address;
3034
3035 if (CMD_ARGC < 2)
3036 {
3037 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3038 return ERROR_OK;
3039 }
3040
3041 if ((target = get_target(CMD_ARGV[0])) == NULL)
3042 {
3043 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3044 return ERROR_FAIL;
3045 }
3046
3047 xscale = target_to_xscale(target);
3048 retval = xscale_verify_pointer(CMD_CTX, xscale);
3049 if (retval != ERROR_OK)
3050 return retval;
3051
3052 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3053
3054 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3055 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3056 {
3057 xscale->handler_address = handler_address;
3058 }
3059 else
3060 {
3061 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3062 return ERROR_FAIL;
3063 }
3064
3065 return ERROR_OK;
3066 }
3067
3068 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3069 {
3070 struct target *target = NULL;
3071 struct xscale_common *xscale;
3072 int retval;
3073 uint32_t cache_clean_address;
3074
3075 if (CMD_ARGC < 2)
3076 {
3077 return ERROR_COMMAND_SYNTAX_ERROR;
3078 }
3079
3080 target = get_target(CMD_ARGV[0]);
3081 if (target == NULL)
3082 {
3083 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3084 return ERROR_FAIL;
3085 }
3086 xscale = target_to_xscale(target);
3087 retval = xscale_verify_pointer(CMD_CTX, xscale);
3088 if (retval != ERROR_OK)
3089 return retval;
3090
3091 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3092
3093 if (cache_clean_address & 0xffff)
3094 {
3095 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3096 }
3097 else
3098 {
3099 xscale->cache_clean_address = cache_clean_address;
3100 }
3101
3102 return ERROR_OK;
3103 }
3104
3105 COMMAND_HANDLER(xscale_handle_cache_info_command)
3106 {
3107 struct target *target = get_current_target(CMD_CTX);
3108 struct xscale_common *xscale = target_to_xscale(target);
3109 int retval;
3110
3111 retval = xscale_verify_pointer(CMD_CTX, xscale);
3112 if (retval != ERROR_OK)
3113 return retval;
3114
3115 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3116 }
3117
3118 static int xscale_virt2phys(struct target *target,
3119 uint32_t virtual, uint32_t *physical)
3120 {
3121 struct xscale_common *xscale = target_to_xscale(target);
3122 int type;
3123 uint32_t cb;
3124 int domain;
3125 uint32_t ap;
3126
3127 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3128 LOG_ERROR(xscale_not);
3129 return ERROR_TARGET_INVALID;
3130 }
3131
3132 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3133 if (type == -1)
3134 {
3135 return ret;
3136 }
3137 *physical = ret;
3138 return ERROR_OK;
3139 }
3140
3141 static int xscale_mmu(struct target *target, int *enabled)
3142 {
3143 struct xscale_common *xscale = target_to_xscale(target);
3144
3145 if (target->state != TARGET_HALTED)
3146 {
3147 LOG_ERROR("Target not halted");
3148 return ERROR_TARGET_INVALID;
3149 }
3150 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3151 return ERROR_OK;
3152 }
3153
3154 COMMAND_HANDLER(xscale_handle_mmu_command)
3155 {
3156 struct target *target = get_current_target(CMD_CTX);
3157 struct xscale_common *xscale = target_to_xscale(target);
3158 int retval;
3159
3160 retval = xscale_verify_pointer(CMD_CTX, xscale);
3161 if (retval != ERROR_OK)
3162 return retval;
3163
3164 if (target->state != TARGET_HALTED)
3165 {
3166 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3167 return ERROR_OK;
3168 }
3169
3170 if (CMD_ARGC >= 1)
3171 {
3172 bool enable;
3173 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3174 if (enable)
3175 xscale_enable_mmu_caches(target, 1, 0, 0);
3176 else
3177 xscale_disable_mmu_caches(target, 1, 0, 0);
3178 xscale->armv4_5_mmu.mmu_enabled = enable;
3179 }
3180
3181 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3182
3183 return ERROR_OK;
3184 }
3185
3186 COMMAND_HANDLER(xscale_handle_idcache_command)
3187 {
3188 struct target *target = get_current_target(CMD_CTX);
3189 struct xscale_common *xscale = target_to_xscale(target);
3190
3191 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3192 if (retval != ERROR_OK)
3193 return retval;
3194
3195 if (target->state != TARGET_HALTED)
3196 {
3197 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3198 return ERROR_OK;
3199 }
3200
3201 bool icache;
3202 COMMAND_PARSE_BOOL(CMD_NAME, icache, "icache", "dcache");
3203
3204 if (CMD_ARGC >= 1)
3205 {
3206 bool enable;
3207 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3208 if (enable)
3209 xscale_enable_mmu_caches(target, 1, 0, 0);
3210 else
3211 xscale_disable_mmu_caches(target, 1, 0, 0);
3212 if (icache)
3213 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3214 else
3215 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3216 }
3217
3218 bool enabled = icache ?
3219 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3220 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3221 const char *msg = enabled ? "enabled" : "disabled";
3222 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3223
3224 return ERROR_OK;
3225 }
3226
3227 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3228 {
3229 struct target *target = get_current_target(CMD_CTX);
3230 struct xscale_common *xscale = target_to_xscale(target);
3231 int retval;
3232
3233 retval = xscale_verify_pointer(CMD_CTX, xscale);
3234 if (retval != ERROR_OK)
3235 return retval;
3236
3237 if (CMD_ARGC < 1)
3238 {
3239 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3240 }
3241 else
3242 {
3243 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3244 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3245 xscale_write_dcsr(target, -1, -1);
3246 }
3247
3248 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3249
3250 return ERROR_OK;
3251 }
3252
3253
3254 COMMAND_HANDLER(xscale_handle_vector_table_command)
3255 {
3256 struct target *target = get_current_target(CMD_CTX);
3257 struct xscale_common *xscale = target_to_xscale(target);
3258 int err = 0;
3259 int retval;
3260
3261 retval = xscale_verify_pointer(CMD_CTX, xscale);
3262 if (retval != ERROR_OK)
3263 return retval;
3264
3265 if (CMD_ARGC == 0) /* print current settings */
3266 {
3267 int idx;
3268
3269 command_print(CMD_CTX, "active user-set static vectors:");
3270 for (idx = 1; idx < 8; idx++)
3271 if (xscale->static_low_vectors_set & (1 << idx))
3272 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3273 for (idx = 1; idx < 8; idx++)
3274 if (xscale->static_high_vectors_set & (1 << idx))
3275 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3276 return ERROR_OK;
3277 }
3278
3279 if (CMD_ARGC != 3)
3280 err = 1;
3281 else
3282 {
3283 int idx;
3284 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3285 uint32_t vec;
3286 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3287
3288 if (idx < 1 || idx >= 8)
3289 err = 1;
3290
3291 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3292 {
3293 xscale->static_low_vectors_set |= (1<<idx);
3294 xscale->static_low_vectors[idx] = vec;
3295 }
3296 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3297 {
3298 xscale->static_high_vectors_set |= (1<<idx);
3299 xscale->static_high_vectors[idx] = vec;
3300 }
3301 else
3302 err = 1;
3303 }
3304
3305 if (err)
3306 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3307
3308 return ERROR_OK;
3309 }
3310
3311
3312 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3313 {
3314 struct target *target = get_current_target(CMD_CTX);
3315 struct xscale_common *xscale = target_to_xscale(target);
3316 struct arm *armv4_5 = &xscale->armv4_5_common;
3317 uint32_t dcsr_value;
3318 int retval;
3319
3320 retval = xscale_verify_pointer(CMD_CTX, xscale);
3321 if (retval != ERROR_OK)
3322 return retval;
3323
3324 if (target->state != TARGET_HALTED)
3325 {
3326 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3327 return ERROR_OK;
3328 }
3329
3330 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3331 {
3332 struct xscale_trace_data *td, *next_td;
3333 xscale->trace.buffer_enabled = 1;
3334
3335 /* free old trace data */
3336 td = xscale->trace.data;
3337 while (td)
3338 {
3339 next_td = td->next;
3340
3341 if (td->entries)
3342 free(td->entries);
3343 free(td);
3344 td = next_td;
3345 }
3346 xscale->trace.data = NULL;
3347 }
3348 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3349 {
3350 xscale->trace.buffer_enabled = 0;
3351 }
3352
3353 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3354 {
3355 uint32_t fill = 1;
3356 if (CMD_ARGC >= 3)
3357 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3358 xscale->trace.buffer_fill = fill;
3359 }
3360 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3361 {
3362 xscale->trace.buffer_fill = -1;
3363 }
3364
3365 if (xscale->trace.buffer_enabled)
3366 {
3367 /* if we enable the trace buffer in fill-once
3368 * mode we know the address of the first instruction */
3369 xscale->trace.pc_ok = 1;
3370 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3371 }
3372 else
3373 {
3374 /* otherwise the address is unknown, and we have no known good PC */
3375 xscale->trace.pc_ok = 0;
3376 }
3377
3378 command_print(CMD_CTX, "trace buffer %s (%s)",
3379 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3380 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3381
3382 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3383 if (xscale->trace.buffer_fill >= 0)
3384 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3385 else
3386 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3387
3388 return ERROR_OK;
3389 }
3390
3391 COMMAND_HANDLER(xscale_handle_trace_image_command)
3392 {
3393 struct target *target = get_current_target(CMD_CTX);
3394 struct xscale_common *xscale = target_to_xscale(target);
3395 int retval;
3396
3397 if (CMD_ARGC < 1)
3398 {
3399 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3400 return ERROR_OK;
3401 }
3402
3403 retval = xscale_verify_pointer(CMD_CTX, xscale);
3404 if (retval != ERROR_OK)
3405 return retval;
3406
3407 if (xscale->trace.image)
3408 {
3409 image_close(xscale->trace.image);
3410 free(xscale->trace.image);
3411 command_print(CMD_CTX, "previously loaded image found and closed");
3412 }
3413
3414 xscale->trace.image = malloc(sizeof(struct image));
3415 xscale->trace.image->base_address_set = 0;
3416 xscale->trace.image->start_address_set = 0;
3417
3418 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3419 if (CMD_ARGC >= 2)
3420 {
3421 xscale->trace.image->base_address_set = 1;
3422 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], xscale->trace.image->base_address);
3423 }
3424 else
3425 {
3426 xscale->trace.image->base_address_set = 0;
3427 }
3428
3429 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3430 {
3431 free(xscale->trace.image);
3432 xscale->trace.image = NULL;
3433 return ERROR_OK;
3434 }
3435
3436 return ERROR_OK;
3437 }
3438
3439 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3440 {
3441 struct target *target = get_current_target(CMD_CTX);
3442 struct xscale_common *xscale = target_to_xscale(target);
3443 struct xscale_trace_data *trace_data;
3444 struct fileio file;
3445 int retval;
3446
3447 retval = xscale_verify_pointer(CMD_CTX, xscale);
3448 if (retval != ERROR_OK)
3449 return retval;
3450
3451 if (target->state != TARGET_HALTED)
3452 {
3453 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3454 return ERROR_OK;
3455 }
3456
3457 if (CMD_ARGC < 1)
3458 {
3459 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3460 return ERROR_OK;
3461 }
3462
3463 trace_data = xscale->trace.data;
3464
3465 if (!trace_data)
3466 {
3467 command_print(CMD_CTX, "no trace data collected");
3468 return ERROR_OK;
3469 }
3470
3471 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3472 {
3473 return ERROR_OK;
3474 }
3475
3476 while (trace_data)
3477 {
3478 int i;
3479
3480 fileio_write_u32(&file, trace_data->chkpt0);
3481 fileio_write_u32(&file, trace_data->chkpt1);
3482 fileio_write_u32(&file, trace_data->last_instruction);
3483 fileio_write_u32(&file, trace_data->depth);
3484
3485 for (i = 0; i < trace_data->depth; i++)
3486 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3487
3488 trace_data = trace_data->next;
3489 }
3490
3491 fileio_close(&file);
3492
3493 return ERROR_OK;
3494 }
3495
3496 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3497 {
3498 struct target *target = get_current_target(CMD_CTX);
3499 struct xscale_common *xscale = target_to_xscale(target);
3500 int retval;
3501
3502 retval = xscale_verify_pointer(CMD_CTX, xscale);
3503 if (retval != ERROR_OK)
3504 return retval;
3505
3506 xscale_analyze_trace(target, CMD_CTX);
3507
3508 return ERROR_OK;
3509 }
3510
3511 COMMAND_HANDLER(xscale_handle_cp15)
3512 {
3513 struct target *target = get_current_target(CMD_CTX);
3514 struct xscale_common *xscale = target_to_xscale(target);
3515 int retval;
3516
3517 retval = xscale_verify_pointer(CMD_CTX, xscale);
3518 if (retval != ERROR_OK)
3519 return retval;
3520
3521 if (target->state != TARGET_HALTED)
3522 {
3523 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3524 return ERROR_OK;
3525 }
3526 uint32_t reg_no = 0;
3527 struct reg *reg = NULL;
3528 if (CMD_ARGC > 0)
3529 {
3530 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3531 /*translate from xscale cp15 register no to openocd register*/
3532 switch (reg_no)
3533 {
3534 case 0:
3535 reg_no = XSCALE_MAINID;
3536 break;
3537 case 1:
3538 reg_no = XSCALE_CTRL;
3539 break;
3540 case 2:
3541 reg_no = XSCALE_TTB;
3542 break;
3543 case 3:
3544 reg_no = XSCALE_DAC;
3545 break;
3546 case 5:
3547 reg_no = XSCALE_FSR;
3548 break;
3549 case 6:
3550 reg_no = XSCALE_FAR;
3551 break;
3552 case 13:
3553 reg_no = XSCALE_PID;
3554 break;
3555 case 15:
3556 reg_no = XSCALE_CPACCESS;
3557 break;
3558 default:
3559 command_print(CMD_CTX, "invalid register number");
3560 return ERROR_INVALID_ARGUMENTS;
3561 }
3562 reg = &xscale->reg_cache->reg_list[reg_no];
3563
3564 }
3565 if (CMD_ARGC == 1)
3566 {
3567 uint32_t value;
3568
3569 /* read cp15 control register */
3570 xscale_get_reg(reg);
3571 value = buf_get_u32(reg->value, 0, 32);
3572 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3573 }
3574 else if (CMD_ARGC == 2)
3575 {
3576 uint32_t value;
3577 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3578
3579 /* send CP write request (command 0x41) */
3580 xscale_send_u32(target, 0x41);
3581
3582 /* send CP register number */
3583 xscale_send_u32(target, reg_no);
3584
3585 /* send CP register value */
3586 xscale_send_u32(target, value);
3587
3588 /* execute cpwait to ensure outstanding operations complete */
3589 xscale_send_u32(target, 0x53);
3590 }
3591 else
3592 {
3593 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3594 }
3595
3596 return ERROR_OK;
3597 }
3598
3599 static const struct command_registration xscale_exec_command_handlers[] = {
3600 {
3601 .name = "cache_info",
3602 .handler = &xscale_handle_cache_info_command,
3603 .mode = COMMAND_EXEC, NULL,
3604 },
3605
3606 {
3607 .name = "mmu",
3608 .handler = &xscale_handle_mmu_command,
3609 .mode = COMMAND_EXEC,
3610 .usage = "[enable|disable]",
3611 .help = "enable or disable the MMU",
3612 },
3613 {
3614 .name = "icache",
3615 .handler = &xscale_handle_idcache_command,
3616 .mode = COMMAND_EXEC,
3617 .usage = "[enable|disable]",
3618 .help = "enable or disable the ICache",
3619 },
3620 {
3621 .name = "dcache",
3622 .handler = &xscale_handle_idcache_command,
3623 .mode = COMMAND_EXEC,
3624 .usage = "[enable|disable]",
3625 .help = "enable or disable the DCache",
3626 },
3627
3628 {
3629 .name = "vector_catch",
3630 .handler = &xscale_handle_vector_catch_command,
3631 .mode = COMMAND_EXEC,
3632 .help = "mask of vectors that should be caught",
3633 .usage = "[<mask>]",
3634 },
3635 {
3636 .name = "vector_table",
3637 .handler = &xscale_handle_vector_table_command,
3638 .mode = COMMAND_EXEC,
3639 .usage = "<high|low> <index> <code>",
3640 .help = "set static code for exception handler entry",
3641 },
3642
3643 {
3644 .name = "trace_buffer",
3645 .handler = &xscale_handle_trace_buffer_command,
3646 .mode = COMMAND_EXEC,
3647 .usage = "<enable | disable> [fill [n]|wrap]",
3648 },
3649 {
3650 .name = "dump_trace",
3651 .handler = &xscale_handle_dump_trace_command,
3652 .mode = COMMAND_EXEC,
3653 .help = "dump content of trace buffer to <file>",
3654 .usage = "<file>",
3655 },
3656 {
3657 .name = "analyze_trace",
3658 .handler = &xscale_handle_analyze_trace_buffer_command,
3659 .mode = COMMAND_EXEC,
3660 .help = "analyze content of trace buffer",
3661 },
3662 {
3663 .name = "trace_image",
3664 .handler = &xscale_handle_trace_image_command,
3665 COMMAND_EXEC,
3666 .help = "load image from <file> [base address]",
3667 .usage = "<file> [address] [type]",
3668 },
3669
3670 {
3671 .name = "cp15",
3672 .handler = &xscale_handle_cp15,
3673 .mode = COMMAND_EXEC,
3674 .help = "access coproc 15",
3675 .usage = "<register> [value]",
3676 },
3677 COMMAND_REGISTRATION_DONE
3678 };
3679 static const struct command_registration xscale_any_command_handlers[] = {
3680 {
3681 .name = "debug_handler",
3682 .handler = &xscale_handle_debug_handler_command,
3683 .mode = COMMAND_ANY,
3684 .usage = "<target#> <address>",
3685 },
3686 {
3687 .name = "cache_clean_address",
3688 .handler = &xscale_handle_cache_clean_address_command,
3689 .mode = COMMAND_ANY,
3690 },
3691 {
3692 .chain = xscale_exec_command_handlers,
3693 },
3694 COMMAND_REGISTRATION_DONE
3695 };
3696 static const struct command_registration xscale_command_handlers[] = {
3697 {
3698 .chain = arm_command_handlers,
3699 },
3700 {
3701 .name = "xscale",
3702 .mode = COMMAND_ANY,
3703 .help = "xscale command group",
3704 .chain = xscale_any_command_handlers,
3705 },
3706 COMMAND_REGISTRATION_DONE
3707 };
3708
3709 struct target_type xscale_target =
3710 {
3711 .name = "xscale",
3712
3713 .poll = xscale_poll,
3714 .arch_state = xscale_arch_state,
3715
3716 .target_request_data = NULL,
3717
3718 .halt = xscale_halt,
3719 .resume = xscale_resume,
3720 .step = xscale_step,
3721
3722 .assert_reset = xscale_assert_reset,
3723 .deassert_reset = xscale_deassert_reset,
3724 .soft_reset_halt = NULL,
3725
3726 .get_gdb_reg_list = arm_get_gdb_reg_list,
3727
3728 .read_memory = xscale_read_memory,
3729 .read_phys_memory = xscale_read_phys_memory,
3730 .write_memory = xscale_write_memory,
3731 .write_phys_memory = xscale_write_phys_memory,
3732 .bulk_write_memory = xscale_bulk_write_memory,
3733
3734 .checksum_memory = arm_checksum_memory,
3735 .blank_check_memory = arm_blank_check_memory,
3736
3737 .run_algorithm = armv4_5_run_algorithm,
3738
3739 .add_breakpoint = xscale_add_breakpoint,
3740 .remove_breakpoint = xscale_remove_breakpoint,
3741 .add_watchpoint = xscale_add_watchpoint,
3742 .remove_watchpoint = xscale_remove_watchpoint,
3743
3744 .commands = xscale_command_handlers,
3745 .target_create = xscale_target_create,
3746 .init_target = xscale_init_target,
3747
3748 .virt2phys = xscale_virt2phys,
3749 .mmu = xscale_mmu
3750 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)