cleanup: rename armv4_5 to arm for readability
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include <helper/time_support.h>
37 #include "register.h"
38 #include "image.h"
39 #include "arm_opcodes.h"
40 #include "armv4_5.h"
41
42
43 /*
44 * Important XScale documents available as of October 2009 include:
45 *
46 * Intel XScale® Core Developer’s Manual, January 2004
47 * Order Number: 273473-002
48 * This has a chapter detailing debug facilities, and punts some
49 * details to chip-specific microarchitecture documents.
50 *
51 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
52 * Document Number: 273539-005
53 * Less detailed than the developer's manual, but summarizes those
54 * missing details (for most XScales) and gives LOTS of notes about
55 * debugger/handler interaction issues. Presents a simpler reset
56 * and load-handler sequence than the arch doc. (Note, OpenOCD
57 * doesn't currently support "Hot-Debug" as defined there.)
58 *
59 * Chip-specific microarchitecture documents may also be useful.
60 */
61
62
63 /* forward declarations */
64 static int xscale_resume(struct target *, int current,
65 uint32_t address, int handle_breakpoints, int debug_execution);
66 static int xscale_debug_entry(struct target *);
67 static int xscale_restore_banked(struct target *);
68 static int xscale_get_reg(struct reg *reg);
69 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
70 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
72 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
73 static int xscale_read_trace(struct target *);
74
75
76 /* This XScale "debug handler" is loaded into the processor's
77 * mini-ICache, which is 2K of code writable only via JTAG.
78 *
79 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
80 * binary files cleanly. It's string oriented, and terminates them
81 * with a NUL character. Better would be to generate the constants
82 * and let other code decide names, scoping, and other housekeeping.
83 */
84 static /* unsigned const char xscale_debug_handler[] = ... */
85 #include "xscale_debug.h"
86
87 static char *const xscale_reg_list[] =
88 {
89 "XSCALE_MAINID", /* 0 */
90 "XSCALE_CACHETYPE",
91 "XSCALE_CTRL",
92 "XSCALE_AUXCTRL",
93 "XSCALE_TTB",
94 "XSCALE_DAC",
95 "XSCALE_FSR",
96 "XSCALE_FAR",
97 "XSCALE_PID",
98 "XSCALE_CPACCESS",
99 "XSCALE_IBCR0", /* 10 */
100 "XSCALE_IBCR1",
101 "XSCALE_DBR0",
102 "XSCALE_DBR1",
103 "XSCALE_DBCON",
104 "XSCALE_TBREG",
105 "XSCALE_CHKPT0",
106 "XSCALE_CHKPT1",
107 "XSCALE_DCSR",
108 "XSCALE_TX",
109 "XSCALE_RX", /* 20 */
110 "XSCALE_TXRXCTRL",
111 };
112
113 static const struct xscale_reg xscale_reg_arch_info[] =
114 {
115 {XSCALE_MAINID, NULL},
116 {XSCALE_CACHETYPE, NULL},
117 {XSCALE_CTRL, NULL},
118 {XSCALE_AUXCTRL, NULL},
119 {XSCALE_TTB, NULL},
120 {XSCALE_DAC, NULL},
121 {XSCALE_FSR, NULL},
122 {XSCALE_FAR, NULL},
123 {XSCALE_PID, NULL},
124 {XSCALE_CPACCESS, NULL},
125 {XSCALE_IBCR0, NULL},
126 {XSCALE_IBCR1, NULL},
127 {XSCALE_DBR0, NULL},
128 {XSCALE_DBR1, NULL},
129 {XSCALE_DBCON, NULL},
130 {XSCALE_TBREG, NULL},
131 {XSCALE_CHKPT0, NULL},
132 {XSCALE_CHKPT1, NULL},
133 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
134 {-1, NULL}, /* TX accessed via JTAG */
135 {-1, NULL}, /* RX accessed via JTAG */
136 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
137 };
138
139 /* convenience wrapper to access XScale specific registers */
140 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
141 {
142 uint8_t buf[4];
143
144 buf_set_u32(buf, 0, 32, value);
145
146 return xscale_set_reg(reg, buf);
147 }
148
149 static const char xscale_not[] = "target is not an XScale";
150
151 static int xscale_verify_pointer(struct command_context *cmd_ctx,
152 struct xscale_common *xscale)
153 {
154 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
155 command_print(cmd_ctx, xscale_not);
156 return ERROR_TARGET_INVALID;
157 }
158 return ERROR_OK;
159 }
160
161 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr, tap_state_t end_state)
162 {
163 assert (tap != NULL);
164
165 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
166 {
167 struct scan_field field;
168 uint8_t scratch[4];
169
170 memset(&field, 0, sizeof field);
171 field.num_bits = tap->ir_length;
172 field.out_value = scratch;
173 buf_set_u32(scratch, 0, field.num_bits, new_instr);
174
175 jtag_add_ir_scan(tap, &field, end_state);
176 }
177
178 return ERROR_OK;
179 }
180
181 static int xscale_read_dcsr(struct target *target)
182 {
183 struct xscale_common *xscale = target_to_xscale(target);
184 int retval;
185 struct scan_field fields[3];
186 uint8_t field0 = 0x0;
187 uint8_t field0_check_value = 0x2;
188 uint8_t field0_check_mask = 0x7;
189 uint8_t field2 = 0x0;
190 uint8_t field2_check_value = 0x0;
191 uint8_t field2_check_mask = 0x1;
192
193 xscale_jtag_set_instr(target->tap,
194 XSCALE_SELDCSR << xscale->xscale_variant,
195 TAP_DRPAUSE);
196
197 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
198 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
199
200 memset(&fields, 0, sizeof fields);
201
202 fields[0].num_bits = 3;
203 fields[0].out_value = &field0;
204 uint8_t tmp;
205 fields[0].in_value = &tmp;
206
207 fields[1].num_bits = 32;
208 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
209
210 fields[2].num_bits = 1;
211 fields[2].out_value = &field2;
212 uint8_t tmp2;
213 fields[2].in_value = &tmp2;
214
215 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
216
217 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
218 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
219
220 if ((retval = jtag_execute_queue()) != ERROR_OK)
221 {
222 LOG_ERROR("JTAG error while reading DCSR");
223 return retval;
224 }
225
226 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
227 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
228
229 /* write the register with the value we just read
230 * on this second pass, only the first bit of field0 is guaranteed to be 0)
231 */
232 field0_check_mask = 0x1;
233 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
234 fields[1].in_value = NULL;
235
236 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
237
238 /* DANGER!!! this must be here. It will make sure that the arguments
239 * to jtag_set_check_value() does not go out of scope! */
240 return jtag_execute_queue();
241 }
242
243
244 static void xscale_getbuf(jtag_callback_data_t arg)
245 {
246 uint8_t *in = (uint8_t *)arg;
247 *((uint32_t *)arg) = buf_get_u32(in, 0, 32);
248 }
249
250 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
251 {
252 if (num_words == 0)
253 return ERROR_COMMAND_SYNTAX_ERROR;
254
255 struct xscale_common *xscale = target_to_xscale(target);
256 int retval = ERROR_OK;
257 tap_state_t path[3];
258 struct scan_field fields[3];
259 uint8_t *field0 = malloc(num_words * 1);
260 uint8_t field0_check_value = 0x2;
261 uint8_t field0_check_mask = 0x6;
262 uint32_t *field1 = malloc(num_words * 4);
263 uint8_t field2_check_value = 0x0;
264 uint8_t field2_check_mask = 0x1;
265 int words_done = 0;
266 int words_scheduled = 0;
267 int i;
268
269 path[0] = TAP_DRSELECT;
270 path[1] = TAP_DRCAPTURE;
271 path[2] = TAP_DRSHIFT;
272
273 memset(&fields, 0, sizeof fields);
274
275 fields[0].num_bits = 3;
276 uint8_t tmp;
277 fields[0].in_value = &tmp;
278 fields[0].check_value = &field0_check_value;
279 fields[0].check_mask = &field0_check_mask;
280
281 fields[1].num_bits = 32;
282
283 fields[2].num_bits = 1;
284 uint8_t tmp2;
285 fields[2].in_value = &tmp2;
286 fields[2].check_value = &field2_check_value;
287 fields[2].check_mask = &field2_check_mask;
288
289 xscale_jtag_set_instr(target->tap,
290 XSCALE_DBGTX << xscale->xscale_variant,
291 TAP_IDLE);
292 jtag_add_runtest(1, TAP_IDLE); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
293
294 /* repeat until all words have been collected */
295 int attempts = 0;
296 while (words_done < num_words)
297 {
298 /* schedule reads */
299 words_scheduled = 0;
300 for (i = words_done; i < num_words; i++)
301 {
302 fields[0].in_value = &field0[i];
303
304 jtag_add_pathmove(3, path);
305
306 fields[1].in_value = (uint8_t *)(field1 + i);
307
308 jtag_add_dr_scan_check(target->tap, 3, fields, TAP_IDLE);
309
310 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
311
312 words_scheduled++;
313 }
314
315 if ((retval = jtag_execute_queue()) != ERROR_OK)
316 {
317 LOG_ERROR("JTAG error while receiving data from debug handler");
318 break;
319 }
320
321 /* examine results */
322 for (i = words_done; i < num_words; i++)
323 {
324 if (!(field0[i] & 1))
325 {
326 /* move backwards if necessary */
327 int j;
328 for (j = i; j < num_words - 1; j++)
329 {
330 field0[j] = field0[j + 1];
331 field1[j] = field1[j + 1];
332 }
333 words_scheduled--;
334 }
335 }
336 if (words_scheduled == 0)
337 {
338 if (attempts++==1000)
339 {
340 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
341 retval = ERROR_TARGET_TIMEOUT;
342 break;
343 }
344 }
345
346 words_done += words_scheduled;
347 }
348
349 for (i = 0; i < num_words; i++)
350 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
351
352 free(field1);
353
354 return retval;
355 }
356
357 static int xscale_read_tx(struct target *target, int consume)
358 {
359 struct xscale_common *xscale = target_to_xscale(target);
360 tap_state_t path[3];
361 tap_state_t noconsume_path[6];
362 int retval;
363 struct timeval timeout, now;
364 struct scan_field fields[3];
365 uint8_t field0_in = 0x0;
366 uint8_t field0_check_value = 0x2;
367 uint8_t field0_check_mask = 0x6;
368 uint8_t field2_check_value = 0x0;
369 uint8_t field2_check_mask = 0x1;
370
371 xscale_jtag_set_instr(target->tap,
372 XSCALE_DBGTX << xscale->xscale_variant,
373 TAP_IDLE);
374
375 path[0] = TAP_DRSELECT;
376 path[1] = TAP_DRCAPTURE;
377 path[2] = TAP_DRSHIFT;
378
379 noconsume_path[0] = TAP_DRSELECT;
380 noconsume_path[1] = TAP_DRCAPTURE;
381 noconsume_path[2] = TAP_DREXIT1;
382 noconsume_path[3] = TAP_DRPAUSE;
383 noconsume_path[4] = TAP_DREXIT2;
384 noconsume_path[5] = TAP_DRSHIFT;
385
386 memset(&fields, 0, sizeof fields);
387
388 fields[0].num_bits = 3;
389 fields[0].in_value = &field0_in;
390
391 fields[1].num_bits = 32;
392 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
393
394 fields[2].num_bits = 1;
395 uint8_t tmp;
396 fields[2].in_value = &tmp;
397
398 gettimeofday(&timeout, NULL);
399 timeval_add_time(&timeout, 1, 0);
400
401 for (;;)
402 {
403 /* if we want to consume the register content (i.e. clear TX_READY),
404 * we have to go straight from Capture-DR to Shift-DR
405 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
406 */
407 if (consume)
408 jtag_add_pathmove(3, path);
409 else
410 {
411 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
412 }
413
414 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
415
416 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
417 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
418
419 if ((retval = jtag_execute_queue()) != ERROR_OK)
420 {
421 LOG_ERROR("JTAG error while reading TX");
422 return ERROR_TARGET_TIMEOUT;
423 }
424
425 gettimeofday(&now, NULL);
426 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
427 {
428 LOG_ERROR("time out reading TX register");
429 return ERROR_TARGET_TIMEOUT;
430 }
431 if (!((!(field0_in & 1)) && consume))
432 {
433 goto done;
434 }
435 if (debug_level >= 3)
436 {
437 LOG_DEBUG("waiting 100ms");
438 alive_sleep(100); /* avoid flooding the logs */
439 } else
440 {
441 keep_alive();
442 }
443 }
444 done:
445
446 if (!(field0_in & 1))
447 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
448
449 return ERROR_OK;
450 }
451
452 static int xscale_write_rx(struct target *target)
453 {
454 struct xscale_common *xscale = target_to_xscale(target);
455 int retval;
456 struct timeval timeout, now;
457 struct scan_field fields[3];
458 uint8_t field0_out = 0x0;
459 uint8_t field0_in = 0x0;
460 uint8_t field0_check_value = 0x2;
461 uint8_t field0_check_mask = 0x6;
462 uint8_t field2 = 0x0;
463 uint8_t field2_check_value = 0x0;
464 uint8_t field2_check_mask = 0x1;
465
466 xscale_jtag_set_instr(target->tap,
467 XSCALE_DBGRX << xscale->xscale_variant,
468 TAP_IDLE);
469
470 memset(&fields, 0, sizeof fields);
471
472 fields[0].num_bits = 3;
473 fields[0].out_value = &field0_out;
474 fields[0].in_value = &field0_in;
475
476 fields[1].num_bits = 32;
477 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
478
479 fields[2].num_bits = 1;
480 fields[2].out_value = &field2;
481 uint8_t tmp;
482 fields[2].in_value = &tmp;
483
484 gettimeofday(&timeout, NULL);
485 timeval_add_time(&timeout, 1, 0);
486
487 /* poll until rx_read is low */
488 LOG_DEBUG("polling RX");
489 for (;;)
490 {
491 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
492
493 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
494 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
495
496 if ((retval = jtag_execute_queue()) != ERROR_OK)
497 {
498 LOG_ERROR("JTAG error while writing RX");
499 return retval;
500 }
501
502 gettimeofday(&now, NULL);
503 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
504 {
505 LOG_ERROR("time out writing RX register");
506 return ERROR_TARGET_TIMEOUT;
507 }
508 if (!(field0_in & 1))
509 goto done;
510 if (debug_level >= 3)
511 {
512 LOG_DEBUG("waiting 100ms");
513 alive_sleep(100); /* avoid flooding the logs */
514 } else
515 {
516 keep_alive();
517 }
518 }
519 done:
520
521 /* set rx_valid */
522 field2 = 0x1;
523 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
524
525 if ((retval = jtag_execute_queue()) != ERROR_OK)
526 {
527 LOG_ERROR("JTAG error while writing RX");
528 return retval;
529 }
530
531 return ERROR_OK;
532 }
533
534 /* send count elements of size byte to the debug handler */
535 static int xscale_send(struct target *target, const uint8_t *buffer, int count, int size)
536 {
537 struct xscale_common *xscale = target_to_xscale(target);
538 uint32_t t[3];
539 int bits[3];
540 int retval;
541 int done_count = 0;
542
543 xscale_jtag_set_instr(target->tap,
544 XSCALE_DBGRX << xscale->xscale_variant,
545 TAP_IDLE);
546
547 bits[0]=3;
548 t[0]=0;
549 bits[1]=32;
550 t[2]=1;
551 bits[2]=1;
552 int endianness = target->endianness;
553 while (done_count++ < count)
554 {
555 switch (size)
556 {
557 case 4:
558 if (endianness == TARGET_LITTLE_ENDIAN)
559 {
560 t[1]=le_to_h_u32(buffer);
561 } else
562 {
563 t[1]=be_to_h_u32(buffer);
564 }
565 break;
566 case 2:
567 if (endianness == TARGET_LITTLE_ENDIAN)
568 {
569 t[1]=le_to_h_u16(buffer);
570 } else
571 {
572 t[1]=be_to_h_u16(buffer);
573 }
574 break;
575 case 1:
576 t[1]=buffer[0];
577 break;
578 default:
579 LOG_ERROR("BUG: size neither 4, 2 nor 1");
580 return ERROR_COMMAND_SYNTAX_ERROR;
581 }
582 jtag_add_dr_out(target->tap,
583 3,
584 bits,
585 t,
586 TAP_IDLE);
587 buffer += size;
588 }
589
590 if ((retval = jtag_execute_queue()) != ERROR_OK)
591 {
592 LOG_ERROR("JTAG error while sending data to debug handler");
593 return retval;
594 }
595
596 return ERROR_OK;
597 }
598
599 static int xscale_send_u32(struct target *target, uint32_t value)
600 {
601 struct xscale_common *xscale = target_to_xscale(target);
602
603 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
604 return xscale_write_rx(target);
605 }
606
607 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
608 {
609 struct xscale_common *xscale = target_to_xscale(target);
610 int retval;
611 struct scan_field fields[3];
612 uint8_t field0 = 0x0;
613 uint8_t field0_check_value = 0x2;
614 uint8_t field0_check_mask = 0x7;
615 uint8_t field2 = 0x0;
616 uint8_t field2_check_value = 0x0;
617 uint8_t field2_check_mask = 0x1;
618
619 if (hold_rst != -1)
620 xscale->hold_rst = hold_rst;
621
622 if (ext_dbg_brk != -1)
623 xscale->external_debug_break = ext_dbg_brk;
624
625 xscale_jtag_set_instr(target->tap,
626 XSCALE_SELDCSR << xscale->xscale_variant,
627 TAP_IDLE);
628
629 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
630 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
631
632 memset(&fields, 0, sizeof fields);
633
634 fields[0].num_bits = 3;
635 fields[0].out_value = &field0;
636 uint8_t tmp;
637 fields[0].in_value = &tmp;
638
639 fields[1].num_bits = 32;
640 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
641
642 fields[2].num_bits = 1;
643 fields[2].out_value = &field2;
644 uint8_t tmp2;
645 fields[2].in_value = &tmp2;
646
647 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
648
649 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
650 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
651
652 if ((retval = jtag_execute_queue()) != ERROR_OK)
653 {
654 LOG_ERROR("JTAG error while writing DCSR");
655 return retval;
656 }
657
658 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
659 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
660
661 return ERROR_OK;
662 }
663
664 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
665 static unsigned int parity (unsigned int v)
666 {
667 // unsigned int ov = v;
668 v ^= v >> 16;
669 v ^= v >> 8;
670 v ^= v >> 4;
671 v &= 0xf;
672 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
673 return (0x6996 >> v) & 1;
674 }
675
676 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
677 {
678 struct xscale_common *xscale = target_to_xscale(target);
679 uint8_t packet[4];
680 uint8_t cmd;
681 int word;
682 struct scan_field fields[2];
683
684 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
685
686 /* LDIC into IR */
687 xscale_jtag_set_instr(target->tap,
688 XSCALE_LDIC << xscale->xscale_variant,
689 TAP_IDLE);
690
691 /* CMD is b011 to load a cacheline into the Mini ICache.
692 * Loading into the main ICache is deprecated, and unused.
693 * It's followed by three zero bits, and 27 address bits.
694 */
695 buf_set_u32(&cmd, 0, 6, 0x3);
696
697 /* virtual address of desired cache line */
698 buf_set_u32(packet, 0, 27, va >> 5);
699
700 memset(&fields, 0, sizeof fields);
701
702 fields[0].num_bits = 6;
703 fields[0].out_value = &cmd;
704
705 fields[1].num_bits = 27;
706 fields[1].out_value = packet;
707
708 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
709
710 /* rest of packet is a cacheline: 8 instructions, with parity */
711 fields[0].num_bits = 32;
712 fields[0].out_value = packet;
713
714 fields[1].num_bits = 1;
715 fields[1].out_value = &cmd;
716
717 for (word = 0; word < 8; word++)
718 {
719 buf_set_u32(packet, 0, 32, buffer[word]);
720
721 uint32_t value;
722 memcpy(&value, packet, sizeof(uint32_t));
723 cmd = parity(value);
724
725 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
726 }
727
728 return jtag_execute_queue();
729 }
730
731 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
732 {
733 struct xscale_common *xscale = target_to_xscale(target);
734 uint8_t packet[4];
735 uint8_t cmd;
736 struct scan_field fields[2];
737
738 xscale_jtag_set_instr(target->tap,
739 XSCALE_LDIC << xscale->xscale_variant,
740 TAP_IDLE);
741
742 /* CMD for invalidate IC line b000, bits [6:4] b000 */
743 buf_set_u32(&cmd, 0, 6, 0x0);
744
745 /* virtual address of desired cache line */
746 buf_set_u32(packet, 0, 27, va >> 5);
747
748 memset(&fields, 0, sizeof fields);
749
750 fields[0].num_bits = 6;
751 fields[0].out_value = &cmd;
752
753 fields[1].num_bits = 27;
754 fields[1].out_value = packet;
755
756 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
757
758 return ERROR_OK;
759 }
760
761 static int xscale_update_vectors(struct target *target)
762 {
763 struct xscale_common *xscale = target_to_xscale(target);
764 int i;
765 int retval;
766
767 uint32_t low_reset_branch, high_reset_branch;
768
769 for (i = 1; i < 8; i++)
770 {
771 /* if there's a static vector specified for this exception, override */
772 if (xscale->static_high_vectors_set & (1 << i))
773 {
774 xscale->high_vectors[i] = xscale->static_high_vectors[i];
775 }
776 else
777 {
778 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
779 if (retval == ERROR_TARGET_TIMEOUT)
780 return retval;
781 if (retval != ERROR_OK)
782 {
783 /* Some of these reads will fail as part of normal execution */
784 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
785 }
786 }
787 }
788
789 for (i = 1; i < 8; i++)
790 {
791 if (xscale->static_low_vectors_set & (1 << i))
792 {
793 xscale->low_vectors[i] = xscale->static_low_vectors[i];
794 }
795 else
796 {
797 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
798 if (retval == ERROR_TARGET_TIMEOUT)
799 return retval;
800 if (retval != ERROR_OK)
801 {
802 /* Some of these reads will fail as part of normal execution */
803 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
804 }
805 }
806 }
807
808 /* calculate branches to debug handler */
809 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
810 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
811
812 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
813 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
814
815 /* invalidate and load exception vectors in mini i-cache */
816 xscale_invalidate_ic_line(target, 0x0);
817 xscale_invalidate_ic_line(target, 0xffff0000);
818
819 xscale_load_ic(target, 0x0, xscale->low_vectors);
820 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
821
822 return ERROR_OK;
823 }
824
825 static int xscale_arch_state(struct target *target)
826 {
827 struct xscale_common *xscale = target_to_xscale(target);
828 struct arm *arm = &xscale->arm;
829
830 static const char *state[] =
831 {
832 "disabled", "enabled"
833 };
834
835 static const char *arch_dbg_reason[] =
836 {
837 "", "\n(processor reset)", "\n(trace buffer full)"
838 };
839
840 if (arm->common_magic != ARM_COMMON_MAGIC)
841 {
842 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
843 return ERROR_COMMAND_SYNTAX_ERROR;
844 }
845
846 arm_arch_state(target);
847 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
848 state[xscale->armv4_5_mmu.mmu_enabled],
849 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
850 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
851 arch_dbg_reason[xscale->arch_debug_reason]);
852
853 return ERROR_OK;
854 }
855
856 static int xscale_poll(struct target *target)
857 {
858 int retval = ERROR_OK;
859
860 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
861 {
862 enum target_state previous_state = target->state;
863 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
864 {
865
866 /* there's data to read from the tx register, we entered debug state */
867 target->state = TARGET_HALTED;
868
869 /* process debug entry, fetching current mode regs */
870 retval = xscale_debug_entry(target);
871 }
872 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
873 {
874 LOG_USER("error while polling TX register, reset CPU");
875 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
876 target->state = TARGET_HALTED;
877 }
878
879 /* debug_entry could have overwritten target state (i.e. immediate resume)
880 * don't signal event handlers in that case
881 */
882 if (target->state != TARGET_HALTED)
883 return ERROR_OK;
884
885 /* if target was running, signal that we halted
886 * otherwise we reentered from debug execution */
887 if (previous_state == TARGET_RUNNING)
888 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
889 else
890 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
891 }
892
893 return retval;
894 }
895
896 static int xscale_debug_entry(struct target *target)
897 {
898 struct xscale_common *xscale = target_to_xscale(target);
899 struct arm *arm = &xscale->arm;
900 uint32_t pc;
901 uint32_t buffer[10];
902 unsigned i;
903 int retval;
904 uint32_t moe;
905
906 /* clear external dbg break (will be written on next DCSR read) */
907 xscale->external_debug_break = 0;
908 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
909 return retval;
910
911 /* get r0, pc, r1 to r7 and cpsr */
912 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
913 return retval;
914
915 /* move r0 from buffer to register cache */
916 buf_set_u32(arm->core_cache->reg_list[0].value, 0, 32, buffer[0]);
917 arm->core_cache->reg_list[0].dirty = 1;
918 arm->core_cache->reg_list[0].valid = 1;
919 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
920
921 /* move pc from buffer to register cache */
922 buf_set_u32(arm->pc->value, 0, 32, buffer[1]);
923 arm->pc->dirty = 1;
924 arm->pc->valid = 1;
925 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
926
927 /* move data from buffer to register cache */
928 for (i = 1; i <= 7; i++)
929 {
930 buf_set_u32(arm->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
931 arm->core_cache->reg_list[i].dirty = 1;
932 arm->core_cache->reg_list[i].valid = 1;
933 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
934 }
935
936 arm_set_cpsr(arm, buffer[9]);
937 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
938
939 if (!is_arm_mode(arm->core_mode))
940 {
941 target->state = TARGET_UNKNOWN;
942 LOG_ERROR("cpsr contains invalid mode value - communication failure");
943 return ERROR_TARGET_FAILURE;
944 }
945 LOG_DEBUG("target entered debug state in %s mode",
946 arm_mode_name(arm->core_mode));
947
948 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
949 if (arm->spsr) {
950 xscale_receive(target, buffer, 8);
951 buf_set_u32(arm->spsr->value, 0, 32, buffer[7]);
952 arm->spsr->dirty = false;
953 arm->spsr->valid = true;
954 }
955 else
956 {
957 /* r8 to r14, but no spsr */
958 xscale_receive(target, buffer, 7);
959 }
960
961 /* move data from buffer to right banked register in cache */
962 for (i = 8; i <= 14; i++)
963 {
964 struct reg *r = arm_reg_current(arm, i);
965
966 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
967 r->dirty = false;
968 r->valid = true;
969 }
970
971 /* mark xscale regs invalid to ensure they are retrieved from the
972 * debug handler if requested */
973 for (i = 0; i < xscale->reg_cache->num_regs; i++)
974 xscale->reg_cache->reg_list[i].valid = 0;
975
976 /* examine debug reason */
977 xscale_read_dcsr(target);
978 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
979
980 /* stored PC (for calculating fixup) */
981 pc = buf_get_u32(arm->pc->value, 0, 32);
982
983 switch (moe)
984 {
985 case 0x0: /* Processor reset */
986 target->debug_reason = DBG_REASON_DBGRQ;
987 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
988 pc -= 4;
989 break;
990 case 0x1: /* Instruction breakpoint hit */
991 target->debug_reason = DBG_REASON_BREAKPOINT;
992 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
993 pc -= 4;
994 break;
995 case 0x2: /* Data breakpoint hit */
996 target->debug_reason = DBG_REASON_WATCHPOINT;
997 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
998 pc -= 4;
999 break;
1000 case 0x3: /* BKPT instruction executed */
1001 target->debug_reason = DBG_REASON_BREAKPOINT;
1002 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1003 pc -= 4;
1004 break;
1005 case 0x4: /* Ext. debug event */
1006 target->debug_reason = DBG_REASON_DBGRQ;
1007 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1008 pc -= 4;
1009 break;
1010 case 0x5: /* Vector trap occured */
1011 target->debug_reason = DBG_REASON_BREAKPOINT;
1012 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1013 pc -= 4;
1014 break;
1015 case 0x6: /* Trace buffer full break */
1016 target->debug_reason = DBG_REASON_DBGRQ;
1017 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1018 pc -= 4;
1019 break;
1020 case 0x7: /* Reserved (may flag Hot-Debug support) */
1021 default:
1022 LOG_ERROR("Method of Entry is 'Reserved'");
1023 exit(-1);
1024 break;
1025 }
1026
1027 /* apply PC fixup */
1028 buf_set_u32(arm->pc->value, 0, 32, pc);
1029
1030 /* on the first debug entry, identify cache type */
1031 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1032 {
1033 uint32_t cache_type_reg;
1034
1035 /* read cp15 cache type register */
1036 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1037 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1038
1039 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1040 }
1041
1042 /* examine MMU and Cache settings */
1043 /* read cp15 control register */
1044 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1045 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1046 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1047 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1048 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1049
1050 /* tracing enabled, read collected trace data */
1051 if (xscale->trace.mode != XSCALE_TRACE_DISABLED)
1052 {
1053 xscale_read_trace(target);
1054
1055 /* Resume if entered debug due to buffer fill and we're still collecting
1056 * trace data. Note that a debug exception due to trace buffer full
1057 * can only happen in fill mode. */
1058 if (xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1059 {
1060 if (--xscale->trace.fill_counter > 0)
1061 xscale_resume(target, 1, 0x0, 1, 0);
1062 }
1063 else /* entered debug for other reason; reset counter */
1064 xscale->trace.fill_counter = 0;
1065 }
1066
1067 return ERROR_OK;
1068 }
1069
1070 static int xscale_halt(struct target *target)
1071 {
1072 struct xscale_common *xscale = target_to_xscale(target);
1073
1074 LOG_DEBUG("target->state: %s",
1075 target_state_name(target));
1076
1077 if (target->state == TARGET_HALTED)
1078 {
1079 LOG_DEBUG("target was already halted");
1080 return ERROR_OK;
1081 }
1082 else if (target->state == TARGET_UNKNOWN)
1083 {
1084 /* this must not happen for a xscale target */
1085 LOG_ERROR("target was in unknown state when halt was requested");
1086 return ERROR_TARGET_INVALID;
1087 }
1088 else if (target->state == TARGET_RESET)
1089 {
1090 LOG_DEBUG("target->state == TARGET_RESET");
1091 }
1092 else
1093 {
1094 /* assert external dbg break */
1095 xscale->external_debug_break = 1;
1096 xscale_read_dcsr(target);
1097
1098 target->debug_reason = DBG_REASON_DBGRQ;
1099 }
1100
1101 return ERROR_OK;
1102 }
1103
1104 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1105 {
1106 struct xscale_common *xscale = target_to_xscale(target);
1107 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1108 int retval;
1109
1110 if (xscale->ibcr0_used)
1111 {
1112 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1113
1114 if (ibcr0_bp)
1115 {
1116 xscale_unset_breakpoint(target, ibcr0_bp);
1117 }
1118 else
1119 {
1120 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1121 exit(-1);
1122 }
1123 }
1124
1125 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1126 return retval;
1127
1128 return ERROR_OK;
1129 }
1130
1131 static int xscale_disable_single_step(struct target *target)
1132 {
1133 struct xscale_common *xscale = target_to_xscale(target);
1134 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1135 int retval;
1136
1137 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1138 return retval;
1139
1140 return ERROR_OK;
1141 }
1142
1143 static void xscale_enable_watchpoints(struct target *target)
1144 {
1145 struct watchpoint *watchpoint = target->watchpoints;
1146
1147 while (watchpoint)
1148 {
1149 if (watchpoint->set == 0)
1150 xscale_set_watchpoint(target, watchpoint);
1151 watchpoint = watchpoint->next;
1152 }
1153 }
1154
1155 static void xscale_enable_breakpoints(struct target *target)
1156 {
1157 struct breakpoint *breakpoint = target->breakpoints;
1158
1159 /* set any pending breakpoints */
1160 while (breakpoint)
1161 {
1162 if (breakpoint->set == 0)
1163 xscale_set_breakpoint(target, breakpoint);
1164 breakpoint = breakpoint->next;
1165 }
1166 }
1167
1168 static void xscale_free_trace_data(struct xscale_common *xscale)
1169 {
1170 struct xscale_trace_data *td = xscale->trace.data;
1171 while (td)
1172 {
1173 struct xscale_trace_data *next_td = td->next;
1174 if (td->entries)
1175 free(td->entries);
1176 free(td);
1177 td = next_td;
1178 }
1179 xscale->trace.data = NULL;
1180 }
1181
1182 static int xscale_resume(struct target *target, int current,
1183 uint32_t address, int handle_breakpoints, int debug_execution)
1184 {
1185 struct xscale_common *xscale = target_to_xscale(target);
1186 struct arm *arm = &xscale->arm;
1187 uint32_t current_pc;
1188 int retval;
1189 int i;
1190
1191 LOG_DEBUG("-");
1192
1193 if (target->state != TARGET_HALTED)
1194 {
1195 LOG_WARNING("target not halted");
1196 return ERROR_TARGET_NOT_HALTED;
1197 }
1198
1199 if (!debug_execution)
1200 {
1201 target_free_all_working_areas(target);
1202 }
1203
1204 /* update vector tables */
1205 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1206 return retval;
1207
1208 /* current = 1: continue on current pc, otherwise continue at <address> */
1209 if (!current)
1210 buf_set_u32(arm->pc->value, 0, 32, address);
1211
1212 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1213
1214 /* if we're at the reset vector, we have to simulate the branch */
1215 if (current_pc == 0x0)
1216 {
1217 arm_simulate_step(target, NULL);
1218 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1219 }
1220
1221 /* the front-end may request us not to handle breakpoints */
1222 if (handle_breakpoints)
1223 {
1224 struct breakpoint *breakpoint;
1225 breakpoint = breakpoint_find(target,
1226 buf_get_u32(arm->pc->value, 0, 32));
1227 if (breakpoint != NULL)
1228 {
1229 uint32_t next_pc;
1230 enum trace_mode saved_trace_mode;
1231
1232 /* there's a breakpoint at the current PC, we have to step over it */
1233 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1234 xscale_unset_breakpoint(target, breakpoint);
1235
1236 /* calculate PC of next instruction */
1237 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1238 {
1239 uint32_t current_opcode;
1240 target_read_u32(target, current_pc, &current_opcode);
1241 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1242 }
1243
1244 LOG_DEBUG("enable single-step");
1245 xscale_enable_single_step(target, next_pc);
1246
1247 /* restore banked registers */
1248 retval = xscale_restore_banked(target);
1249 if (retval != ERROR_OK)
1250 return retval;
1251
1252 /* send resume request */
1253 xscale_send_u32(target, 0x30);
1254
1255 /* send CPSR */
1256 xscale_send_u32(target,
1257 buf_get_u32(arm->cpsr->value, 0, 32));
1258 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1259 buf_get_u32(arm->cpsr->value, 0, 32));
1260
1261 for (i = 7; i >= 0; i--)
1262 {
1263 /* send register */
1264 xscale_send_u32(target, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1265 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "",
1266 i, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1267 }
1268
1269 /* send PC */
1270 xscale_send_u32(target,
1271 buf_get_u32(arm->pc->value, 0, 32));
1272 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
1273 buf_get_u32(arm->pc->value, 0, 32));
1274
1275 /* disable trace data collection in xscale_debug_entry() */
1276 saved_trace_mode = xscale->trace.mode;
1277 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1278
1279 /* wait for and process debug entry */
1280 xscale_debug_entry(target);
1281
1282 /* re-enable trace buffer, if enabled previously */
1283 xscale->trace.mode = saved_trace_mode;
1284
1285 LOG_DEBUG("disable single-step");
1286 xscale_disable_single_step(target);
1287
1288 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1289 xscale_set_breakpoint(target, breakpoint);
1290 }
1291 }
1292
1293 /* enable any pending breakpoints and watchpoints */
1294 xscale_enable_breakpoints(target);
1295 xscale_enable_watchpoints(target);
1296
1297 /* restore banked registers */
1298 retval = xscale_restore_banked(target);
1299 if (retval != ERROR_OK)
1300 return retval;
1301
1302 /* send resume request (command 0x30 or 0x31)
1303 * clean the trace buffer if it is to be enabled (0x62) */
1304 if (xscale->trace.mode != XSCALE_TRACE_DISABLED)
1305 {
1306 if (xscale->trace.mode == XSCALE_TRACE_FILL)
1307 {
1308 /* If trace enabled in fill mode and starting collection of new set
1309 * of buffers, initialize buffer counter and free previous buffers */
1310 if (xscale->trace.fill_counter == 0)
1311 {
1312 xscale->trace.fill_counter = xscale->trace.buffer_fill;
1313 xscale_free_trace_data(xscale);
1314 }
1315 }
1316 else /* wrap mode; free previous buffer */
1317 xscale_free_trace_data(xscale);
1318
1319 xscale_send_u32(target, 0x62);
1320 xscale_send_u32(target, 0x31);
1321 }
1322 else
1323 xscale_send_u32(target, 0x30);
1324
1325 /* send CPSR */
1326 xscale_send_u32(target, buf_get_u32(arm->cpsr->value, 0, 32));
1327 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1328 buf_get_u32(arm->cpsr->value, 0, 32));
1329
1330 for (i = 7; i >= 0; i--)
1331 {
1332 /* send register */
1333 xscale_send_u32(target, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1334 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "",
1335 i, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1336 }
1337
1338 /* send PC */
1339 xscale_send_u32(target, buf_get_u32(arm->pc->value, 0, 32));
1340 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1341 buf_get_u32(arm->pc->value, 0, 32));
1342
1343 target->debug_reason = DBG_REASON_NOTHALTED;
1344
1345 if (!debug_execution)
1346 {
1347 /* registers are now invalid */
1348 register_cache_invalidate(arm->core_cache);
1349 target->state = TARGET_RUNNING;
1350 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1351 }
1352 else
1353 {
1354 target->state = TARGET_DEBUG_RUNNING;
1355 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1356 }
1357
1358 LOG_DEBUG("target resumed");
1359
1360 return ERROR_OK;
1361 }
1362
1363 static int xscale_step_inner(struct target *target, int current,
1364 uint32_t address, int handle_breakpoints)
1365 {
1366 struct xscale_common *xscale = target_to_xscale(target);
1367 struct arm *arm = &xscale->arm;
1368 uint32_t next_pc;
1369 int retval;
1370 int i;
1371
1372 target->debug_reason = DBG_REASON_SINGLESTEP;
1373
1374 /* calculate PC of next instruction */
1375 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1376 {
1377 uint32_t current_opcode, current_pc;
1378 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1379
1380 target_read_u32(target, current_pc, &current_opcode);
1381 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1382 return retval;
1383 }
1384
1385 LOG_DEBUG("enable single-step");
1386 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1387 return retval;
1388
1389 /* restore banked registers */
1390 if ((retval = xscale_restore_banked(target)) != ERROR_OK)
1391 return retval;
1392
1393 /* send resume request (command 0x30 or 0x31)
1394 * clean the trace buffer if it is to be enabled (0x62) */
1395 if (xscale->trace.mode != XSCALE_TRACE_DISABLED)
1396 {
1397 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1398 return retval;
1399 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1400 return retval;
1401 }
1402 else
1403 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1404 return retval;
1405
1406 /* send CPSR */
1407 retval = xscale_send_u32(target,
1408 buf_get_u32(arm->cpsr->value, 0, 32));
1409 if (retval != ERROR_OK)
1410 return retval;
1411 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1412 buf_get_u32(arm->cpsr->value, 0, 32));
1413
1414 for (i = 7; i >= 0; i--) {
1415 /* send register */
1416 retval = xscale_send_u32(target,
1417 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1418 if (retval != ERROR_OK)
1419 return retval;
1420 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i,
1421 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1422 }
1423
1424 /* send PC */
1425 retval = xscale_send_u32(target,
1426 buf_get_u32(arm->pc->value, 0, 32));
1427 if (retval != ERROR_OK)
1428 return retval;
1429 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1430 buf_get_u32(arm->pc->value, 0, 32));
1431
1432 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1433
1434 /* registers are now invalid */
1435 register_cache_invalidate(arm->core_cache);
1436
1437 /* wait for and process debug entry */
1438 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1439 return retval;
1440
1441 LOG_DEBUG("disable single-step");
1442 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1443 return retval;
1444
1445 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1446
1447 return ERROR_OK;
1448 }
1449
1450 static int xscale_step(struct target *target, int current,
1451 uint32_t address, int handle_breakpoints)
1452 {
1453 struct arm *arm = target_to_arm(target);
1454 struct breakpoint *breakpoint = NULL;
1455
1456 uint32_t current_pc;
1457 int retval;
1458
1459 if (target->state != TARGET_HALTED)
1460 {
1461 LOG_WARNING("target not halted");
1462 return ERROR_TARGET_NOT_HALTED;
1463 }
1464
1465 /* current = 1: continue on current pc, otherwise continue at <address> */
1466 if (!current)
1467 buf_set_u32(arm->pc->value, 0, 32, address);
1468
1469 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1470
1471 /* if we're at the reset vector, we have to simulate the step */
1472 if (current_pc == 0x0)
1473 {
1474 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1475 return retval;
1476 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1477 LOG_DEBUG("current pc %" PRIx32, current_pc);
1478
1479 target->debug_reason = DBG_REASON_SINGLESTEP;
1480 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1481
1482 return ERROR_OK;
1483 }
1484
1485 /* the front-end may request us not to handle breakpoints */
1486 if (handle_breakpoints)
1487 breakpoint = breakpoint_find(target,
1488 buf_get_u32(arm->pc->value, 0, 32));
1489 if (breakpoint != NULL) {
1490 retval = xscale_unset_breakpoint(target, breakpoint);
1491 if (retval != ERROR_OK)
1492 return retval;
1493 }
1494
1495 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1496 if (retval != ERROR_OK)
1497 return retval;
1498
1499 if (breakpoint)
1500 {
1501 xscale_set_breakpoint(target, breakpoint);
1502 }
1503
1504 LOG_DEBUG("target stepped");
1505
1506 return ERROR_OK;
1507
1508 }
1509
1510 static int xscale_assert_reset(struct target *target)
1511 {
1512 struct xscale_common *xscale = target_to_xscale(target);
1513
1514 LOG_DEBUG("target->state: %s",
1515 target_state_name(target));
1516
1517 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1518 * end up in T-L-R, which would reset JTAG
1519 */
1520 xscale_jtag_set_instr(target->tap,
1521 XSCALE_SELDCSR << xscale->xscale_variant,
1522 TAP_IDLE);
1523
1524 /* set Hold reset, Halt mode and Trap Reset */
1525 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1526 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1527 xscale_write_dcsr(target, 1, 0);
1528
1529 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1530 xscale_jtag_set_instr(target->tap, ~0, TAP_IDLE);
1531 jtag_execute_queue();
1532
1533 /* assert reset */
1534 jtag_add_reset(0, 1);
1535
1536 /* sleep 1ms, to be sure we fulfill any requirements */
1537 jtag_add_sleep(1000);
1538 jtag_execute_queue();
1539
1540 target->state = TARGET_RESET;
1541
1542 if (target->reset_halt)
1543 {
1544 int retval;
1545 if ((retval = target_halt(target)) != ERROR_OK)
1546 return retval;
1547 }
1548
1549 return ERROR_OK;
1550 }
1551
1552 static int xscale_deassert_reset(struct target *target)
1553 {
1554 struct xscale_common *xscale = target_to_xscale(target);
1555 struct breakpoint *breakpoint = target->breakpoints;
1556
1557 LOG_DEBUG("-");
1558
1559 xscale->ibcr_available = 2;
1560 xscale->ibcr0_used = 0;
1561 xscale->ibcr1_used = 0;
1562
1563 xscale->dbr_available = 2;
1564 xscale->dbr0_used = 0;
1565 xscale->dbr1_used = 0;
1566
1567 /* mark all hardware breakpoints as unset */
1568 while (breakpoint)
1569 {
1570 if (breakpoint->type == BKPT_HARD)
1571 {
1572 breakpoint->set = 0;
1573 }
1574 breakpoint = breakpoint->next;
1575 }
1576
1577 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1578 xscale_free_trace_data(xscale);
1579
1580 register_cache_invalidate(xscale->arm.core_cache);
1581
1582 /* FIXME mark hardware watchpoints got unset too. Also,
1583 * at least some of the XScale registers are invalid...
1584 */
1585
1586 /*
1587 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1588 * contents got invalidated. Safer to force that, so writing new
1589 * contents can't ever fail..
1590 */
1591 {
1592 uint32_t address;
1593 unsigned buf_cnt;
1594 const uint8_t *buffer = xscale_debug_handler;
1595 int retval;
1596
1597 /* release SRST */
1598 jtag_add_reset(0, 0);
1599
1600 /* wait 300ms; 150 and 100ms were not enough */
1601 jtag_add_sleep(300*1000);
1602
1603 jtag_add_runtest(2030, TAP_IDLE);
1604 jtag_execute_queue();
1605
1606 /* set Hold reset, Halt mode and Trap Reset */
1607 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1608 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1609 xscale_write_dcsr(target, 1, 0);
1610
1611 /* Load the debug handler into the mini-icache. Since
1612 * it's using halt mode (not monitor mode), it runs in
1613 * "Special Debug State" for access to registers, memory,
1614 * coprocessors, trace data, etc.
1615 */
1616 address = xscale->handler_address;
1617 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1618 binary_size > 0;
1619 binary_size -= buf_cnt, buffer += buf_cnt)
1620 {
1621 uint32_t cache_line[8];
1622 unsigned i;
1623
1624 buf_cnt = binary_size;
1625 if (buf_cnt > 32)
1626 buf_cnt = 32;
1627
1628 for (i = 0; i < buf_cnt; i += 4)
1629 {
1630 /* convert LE buffer to host-endian uint32_t */
1631 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1632 }
1633
1634 for (; i < 32; i += 4)
1635 {
1636 cache_line[i / 4] = 0xe1a08008;
1637 }
1638
1639 /* only load addresses other than the reset vectors */
1640 if ((address % 0x400) != 0x0)
1641 {
1642 retval = xscale_load_ic(target, address,
1643 cache_line);
1644 if (retval != ERROR_OK)
1645 return retval;
1646 }
1647
1648 address += buf_cnt;
1649 };
1650
1651 retval = xscale_load_ic(target, 0x0,
1652 xscale->low_vectors);
1653 if (retval != ERROR_OK)
1654 return retval;
1655 retval = xscale_load_ic(target, 0xffff0000,
1656 xscale->high_vectors);
1657 if (retval != ERROR_OK)
1658 return retval;
1659
1660 jtag_add_runtest(30, TAP_IDLE);
1661
1662 jtag_add_sleep(100000);
1663
1664 /* set Hold reset, Halt mode and Trap Reset */
1665 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1666 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1667 xscale_write_dcsr(target, 1, 0);
1668
1669 /* clear Hold reset to let the target run (should enter debug handler) */
1670 xscale_write_dcsr(target, 0, 1);
1671 target->state = TARGET_RUNNING;
1672
1673 if (!target->reset_halt)
1674 {
1675 jtag_add_sleep(10000);
1676
1677 /* we should have entered debug now */
1678 xscale_debug_entry(target);
1679 target->state = TARGET_HALTED;
1680
1681 /* resume the target */
1682 xscale_resume(target, 1, 0x0, 1, 0);
1683 }
1684 }
1685
1686 return ERROR_OK;
1687 }
1688
1689 static int xscale_read_core_reg(struct target *target, struct reg *r,
1690 int num, enum arm_mode mode)
1691 {
1692 /** \todo add debug handler support for core register reads */
1693 LOG_ERROR("not implemented");
1694 return ERROR_OK;
1695 }
1696
1697 static int xscale_write_core_reg(struct target *target, struct reg *r,
1698 int num, enum arm_mode mode, uint32_t value)
1699 {
1700 /** \todo add debug handler support for core register writes */
1701 LOG_ERROR("not implemented");
1702 return ERROR_OK;
1703 }
1704
1705 static int xscale_full_context(struct target *target)
1706 {
1707 struct arm *arm = target_to_arm(target);
1708
1709 uint32_t *buffer;
1710
1711 int i, j;
1712
1713 LOG_DEBUG("-");
1714
1715 if (target->state != TARGET_HALTED)
1716 {
1717 LOG_WARNING("target not halted");
1718 return ERROR_TARGET_NOT_HALTED;
1719 }
1720
1721 buffer = malloc(4 * 8);
1722
1723 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1724 * we can't enter User mode on an XScale (unpredictable),
1725 * but User shares registers with SYS
1726 */
1727 for (i = 1; i < 7; i++)
1728 {
1729 enum arm_mode mode = armv4_5_number_to_mode(i);
1730 bool valid = true;
1731 struct reg *r;
1732
1733 if (mode == ARM_MODE_USR)
1734 continue;
1735
1736 /* check if there are invalid registers in the current mode
1737 */
1738 for (j = 0; valid && j <= 16; j++)
1739 {
1740 if (!ARMV4_5_CORE_REG_MODE(arm->core_cache,
1741 mode, j).valid)
1742 valid = false;
1743 }
1744 if (valid)
1745 continue;
1746
1747 /* request banked registers */
1748 xscale_send_u32(target, 0x0);
1749
1750 /* send CPSR for desired bank mode */
1751 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1752
1753 /* get banked registers: r8 to r14; and SPSR
1754 * except in USR/SYS mode
1755 */
1756 if (mode != ARM_MODE_SYS) {
1757 /* SPSR */
1758 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1759 mode, 16);
1760
1761 xscale_receive(target, buffer, 8);
1762
1763 buf_set_u32(r->value, 0, 32, buffer[7]);
1764 r->dirty = false;
1765 r->valid = true;
1766 } else {
1767 xscale_receive(target, buffer, 7);
1768 }
1769
1770 /* move data from buffer to register cache */
1771 for (j = 8; j <= 14; j++)
1772 {
1773 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1774 mode, j);
1775
1776 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1777 r->dirty = false;
1778 r->valid = true;
1779 }
1780 }
1781
1782 free(buffer);
1783
1784 return ERROR_OK;
1785 }
1786
1787 static int xscale_restore_banked(struct target *target)
1788 {
1789 struct arm *arm = target_to_arm(target);
1790
1791 int i, j;
1792
1793 if (target->state != TARGET_HALTED)
1794 {
1795 LOG_WARNING("target not halted");
1796 return ERROR_TARGET_NOT_HALTED;
1797 }
1798
1799 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1800 * and check if any banked registers need to be written. Ignore
1801 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1802 * an XScale (unpredictable), but they share all registers.
1803 */
1804 for (i = 1; i < 7; i++)
1805 {
1806 enum arm_mode mode = armv4_5_number_to_mode(i);
1807 struct reg *r;
1808
1809 if (mode == ARM_MODE_USR)
1810 continue;
1811
1812 /* check if there are dirty registers in this mode */
1813 for (j = 8; j <= 14; j++)
1814 {
1815 if (ARMV4_5_CORE_REG_MODE(arm->core_cache,
1816 mode, j).dirty)
1817 goto dirty;
1818 }
1819
1820 /* if not USR/SYS, check if the SPSR needs to be written */
1821 if (mode != ARM_MODE_SYS)
1822 {
1823 if (ARMV4_5_CORE_REG_MODE(arm->core_cache,
1824 mode, 16).dirty)
1825 goto dirty;
1826 }
1827
1828 /* there's nothing to flush for this mode */
1829 continue;
1830
1831 dirty:
1832 /* command 0x1: "send banked registers" */
1833 xscale_send_u32(target, 0x1);
1834
1835 /* send CPSR for desired mode */
1836 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1837
1838 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1839 * but this protocol doesn't understand that nuance.
1840 */
1841 for (j = 8; j <= 14; j++) {
1842 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1843 mode, j);
1844 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1845 r->dirty = false;
1846 }
1847
1848 /* send spsr if not in USR/SYS mode */
1849 if (mode != ARM_MODE_SYS) {
1850 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1851 mode, 16);
1852 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1853 r->dirty = false;
1854 }
1855 }
1856
1857 return ERROR_OK;
1858 }
1859
1860 static int xscale_read_memory(struct target *target, uint32_t address,
1861 uint32_t size, uint32_t count, uint8_t *buffer)
1862 {
1863 struct xscale_common *xscale = target_to_xscale(target);
1864 uint32_t *buf32;
1865 uint32_t i;
1866 int retval;
1867
1868 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1869
1870 if (target->state != TARGET_HALTED)
1871 {
1872 LOG_WARNING("target not halted");
1873 return ERROR_TARGET_NOT_HALTED;
1874 }
1875
1876 /* sanitize arguments */
1877 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1878 return ERROR_COMMAND_SYNTAX_ERROR;
1879
1880 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1881 return ERROR_TARGET_UNALIGNED_ACCESS;
1882
1883 /* send memory read request (command 0x1n, n: access size) */
1884 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1885 return retval;
1886
1887 /* send base address for read request */
1888 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1889 return retval;
1890
1891 /* send number of requested data words */
1892 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1893 return retval;
1894
1895 /* receive data from target (count times 32-bit words in host endianness) */
1896 buf32 = malloc(4 * count);
1897 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1898 return retval;
1899
1900 /* extract data from host-endian buffer into byte stream */
1901 for (i = 0; i < count; i++)
1902 {
1903 switch (size)
1904 {
1905 case 4:
1906 target_buffer_set_u32(target, buffer, buf32[i]);
1907 buffer += 4;
1908 break;
1909 case 2:
1910 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1911 buffer += 2;
1912 break;
1913 case 1:
1914 *buffer++ = buf32[i] & 0xff;
1915 break;
1916 default:
1917 LOG_ERROR("invalid read size");
1918 return ERROR_COMMAND_SYNTAX_ERROR;
1919 }
1920 }
1921
1922 free(buf32);
1923
1924 /* examine DCSR, to see if Sticky Abort (SA) got set */
1925 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1926 return retval;
1927 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1928 {
1929 /* clear SA bit */
1930 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1931 return retval;
1932
1933 return ERROR_TARGET_DATA_ABORT;
1934 }
1935
1936 return ERROR_OK;
1937 }
1938
1939 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1940 uint32_t size, uint32_t count, uint8_t *buffer)
1941 {
1942 struct xscale_common *xscale = target_to_xscale(target);
1943
1944 /* with MMU inactive, there are only physical addresses */
1945 if (!xscale->armv4_5_mmu.mmu_enabled)
1946 return xscale_read_memory(target, address, size, count, buffer);
1947
1948 /** \todo: provide a non-stub implementation of this routine. */
1949 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1950 target_name(target), __func__);
1951 return ERROR_FAIL;
1952 }
1953
1954 static int xscale_write_memory(struct target *target, uint32_t address,
1955 uint32_t size, uint32_t count, const uint8_t *buffer)
1956 {
1957 struct xscale_common *xscale = target_to_xscale(target);
1958 int retval;
1959
1960 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1961
1962 if (target->state != TARGET_HALTED)
1963 {
1964 LOG_WARNING("target not halted");
1965 return ERROR_TARGET_NOT_HALTED;
1966 }
1967
1968 /* sanitize arguments */
1969 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1970 return ERROR_COMMAND_SYNTAX_ERROR;
1971
1972 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1973 return ERROR_TARGET_UNALIGNED_ACCESS;
1974
1975 /* send memory write request (command 0x2n, n: access size) */
1976 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1977 return retval;
1978
1979 /* send base address for read request */
1980 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1981 return retval;
1982
1983 /* send number of requested data words to be written*/
1984 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1985 return retval;
1986
1987 /* extract data from host-endian buffer into byte stream */
1988 #if 0
1989 for (i = 0; i < count; i++)
1990 {
1991 switch (size)
1992 {
1993 case 4:
1994 value = target_buffer_get_u32(target, buffer);
1995 xscale_send_u32(target, value);
1996 buffer += 4;
1997 break;
1998 case 2:
1999 value = target_buffer_get_u16(target, buffer);
2000 xscale_send_u32(target, value);
2001 buffer += 2;
2002 break;
2003 case 1:
2004 value = *buffer;
2005 xscale_send_u32(target, value);
2006 buffer += 1;
2007 break;
2008 default:
2009 LOG_ERROR("should never get here");
2010 exit(-1);
2011 }
2012 }
2013 #endif
2014 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
2015 return retval;
2016
2017 /* examine DCSR, to see if Sticky Abort (SA) got set */
2018 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
2019 return retval;
2020 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
2021 {
2022 /* clear SA bit */
2023 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
2024 return retval;
2025
2026 LOG_ERROR("data abort writing memory");
2027 return ERROR_TARGET_DATA_ABORT;
2028 }
2029
2030 return ERROR_OK;
2031 }
2032
2033 static int xscale_write_phys_memory(struct target *target, uint32_t address,
2034 uint32_t size, uint32_t count, const uint8_t *buffer)
2035 {
2036 struct xscale_common *xscale = target_to_xscale(target);
2037
2038 /* with MMU inactive, there are only physical addresses */
2039 if (!xscale->armv4_5_mmu.mmu_enabled)
2040 return xscale_write_memory(target, address, size, count, buffer);
2041
2042 /** \todo: provide a non-stub implementation of this routine. */
2043 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
2044 target_name(target), __func__);
2045 return ERROR_FAIL;
2046 }
2047
2048 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
2049 uint32_t count, const uint8_t *buffer)
2050 {
2051 return xscale_write_memory(target, address, 4, count, buffer);
2052 }
2053
2054 static int xscale_get_ttb(struct target *target, uint32_t *result)
2055 {
2056 struct xscale_common *xscale = target_to_xscale(target);
2057 uint32_t ttb;
2058 int retval;
2059
2060 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2061 if (retval != ERROR_OK)
2062 return retval;
2063 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2064
2065 *result = ttb;
2066
2067 return ERROR_OK;
2068 }
2069
2070 static int xscale_disable_mmu_caches(struct target *target, int mmu,
2071 int d_u_cache, int i_cache)
2072 {
2073 struct xscale_common *xscale = target_to_xscale(target);
2074 uint32_t cp15_control;
2075 int retval;
2076
2077 /* read cp15 control register */
2078 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2079 if (retval !=ERROR_OK)
2080 return retval;
2081 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2082
2083 if (mmu)
2084 cp15_control &= ~0x1U;
2085
2086 if (d_u_cache)
2087 {
2088 /* clean DCache */
2089 retval = xscale_send_u32(target, 0x50);
2090 if (retval !=ERROR_OK)
2091 return retval;
2092 retval = xscale_send_u32(target, xscale->cache_clean_address);
2093 if (retval !=ERROR_OK)
2094 return retval;
2095
2096 /* invalidate DCache */
2097 retval = xscale_send_u32(target, 0x51);
2098 if (retval !=ERROR_OK)
2099 return retval;
2100
2101 cp15_control &= ~0x4U;
2102 }
2103
2104 if (i_cache)
2105 {
2106 /* invalidate ICache */
2107 retval = xscale_send_u32(target, 0x52);
2108 if (retval !=ERROR_OK)
2109 return retval;
2110 cp15_control &= ~0x1000U;
2111 }
2112
2113 /* write new cp15 control register */
2114 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2115 if (retval !=ERROR_OK)
2116 return retval;
2117
2118 /* execute cpwait to ensure outstanding operations complete */
2119 retval = xscale_send_u32(target, 0x53);
2120 return retval;
2121 }
2122
2123 static int xscale_enable_mmu_caches(struct target *target, int mmu,
2124 int d_u_cache, int i_cache)
2125 {
2126 struct xscale_common *xscale = target_to_xscale(target);
2127 uint32_t cp15_control;
2128 int retval;
2129
2130 /* read cp15 control register */
2131 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2132 if (retval !=ERROR_OK)
2133 return retval;
2134 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2135
2136 if (mmu)
2137 cp15_control |= 0x1U;
2138
2139 if (d_u_cache)
2140 cp15_control |= 0x4U;
2141
2142 if (i_cache)
2143 cp15_control |= 0x1000U;
2144
2145 /* write new cp15 control register */
2146 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2147 if (retval !=ERROR_OK)
2148 return retval;
2149
2150 /* execute cpwait to ensure outstanding operations complete */
2151 retval = xscale_send_u32(target, 0x53);
2152 return retval;
2153 }
2154
2155 static int xscale_set_breakpoint(struct target *target,
2156 struct breakpoint *breakpoint)
2157 {
2158 int retval;
2159 struct xscale_common *xscale = target_to_xscale(target);
2160
2161 if (target->state != TARGET_HALTED)
2162 {
2163 LOG_WARNING("target not halted");
2164 return ERROR_TARGET_NOT_HALTED;
2165 }
2166
2167 if (breakpoint->set)
2168 {
2169 LOG_WARNING("breakpoint already set");
2170 return ERROR_OK;
2171 }
2172
2173 if (breakpoint->type == BKPT_HARD)
2174 {
2175 uint32_t value = breakpoint->address | 1;
2176 if (!xscale->ibcr0_used)
2177 {
2178 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2179 xscale->ibcr0_used = 1;
2180 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2181 }
2182 else if (!xscale->ibcr1_used)
2183 {
2184 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2185 xscale->ibcr1_used = 1;
2186 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2187 }
2188 else
2189 { /* bug: availability previously verified in xscale_add_breakpoint() */
2190 LOG_ERROR("BUG: no hardware comparator available");
2191 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2192 }
2193 }
2194 else if (breakpoint->type == BKPT_SOFT)
2195 {
2196 if (breakpoint->length == 4)
2197 {
2198 /* keep the original instruction in target endianness */
2199 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2200 {
2201 return retval;
2202 }
2203 /* write the bkpt instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2204 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2205 {
2206 return retval;
2207 }
2208 }
2209 else
2210 {
2211 /* keep the original instruction in target endianness */
2212 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2213 {
2214 return retval;
2215 }
2216 /* write the bkpt instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2217 if ((retval = target_write_u16(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2218 {
2219 return retval;
2220 }
2221 }
2222 breakpoint->set = 1;
2223
2224 xscale_send_u32(target, 0x50); /* clean dcache */
2225 xscale_send_u32(target, xscale->cache_clean_address);
2226 xscale_send_u32(target, 0x51); /* invalidate dcache */
2227 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2228 }
2229
2230 return ERROR_OK;
2231 }
2232
2233 static int xscale_add_breakpoint(struct target *target,
2234 struct breakpoint *breakpoint)
2235 {
2236 struct xscale_common *xscale = target_to_xscale(target);
2237
2238 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2239 {
2240 LOG_ERROR("no breakpoint unit available for hardware breakpoint");
2241 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2242 }
2243
2244 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2245 {
2246 LOG_ERROR("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2247 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2248 }
2249
2250 if (breakpoint->type == BKPT_HARD)
2251 {
2252 xscale->ibcr_available--;
2253 }
2254
2255 return xscale_set_breakpoint(target, breakpoint);
2256 }
2257
2258 static int xscale_unset_breakpoint(struct target *target,
2259 struct breakpoint *breakpoint)
2260 {
2261 int retval;
2262 struct xscale_common *xscale = target_to_xscale(target);
2263
2264 if (target->state != TARGET_HALTED)
2265 {
2266 LOG_WARNING("target not halted");
2267 return ERROR_TARGET_NOT_HALTED;
2268 }
2269
2270 if (!breakpoint->set)
2271 {
2272 LOG_WARNING("breakpoint not set");
2273 return ERROR_OK;
2274 }
2275
2276 if (breakpoint->type == BKPT_HARD)
2277 {
2278 if (breakpoint->set == 1)
2279 {
2280 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2281 xscale->ibcr0_used = 0;
2282 }
2283 else if (breakpoint->set == 2)
2284 {
2285 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2286 xscale->ibcr1_used = 0;
2287 }
2288 breakpoint->set = 0;
2289 }
2290 else
2291 {
2292 /* restore original instruction (kept in target endianness) */
2293 if (breakpoint->length == 4)
2294 {
2295 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2296 {
2297 return retval;
2298 }
2299 }
2300 else
2301 {
2302 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2303 {
2304 return retval;
2305 }
2306 }
2307 breakpoint->set = 0;
2308
2309 xscale_send_u32(target, 0x50); /* clean dcache */
2310 xscale_send_u32(target, xscale->cache_clean_address);
2311 xscale_send_u32(target, 0x51); /* invalidate dcache */
2312 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2313 }
2314
2315 return ERROR_OK;
2316 }
2317
2318 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2319 {
2320 struct xscale_common *xscale = target_to_xscale(target);
2321
2322 if (target->state != TARGET_HALTED)
2323 {
2324 LOG_ERROR("target not halted");
2325 return ERROR_TARGET_NOT_HALTED;
2326 }
2327
2328 if (breakpoint->set)
2329 {
2330 xscale_unset_breakpoint(target, breakpoint);
2331 }
2332
2333 if (breakpoint->type == BKPT_HARD)
2334 xscale->ibcr_available++;
2335
2336 return ERROR_OK;
2337 }
2338
2339 static int xscale_set_watchpoint(struct target *target,
2340 struct watchpoint *watchpoint)
2341 {
2342 struct xscale_common *xscale = target_to_xscale(target);
2343 uint32_t enable = 0;
2344 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2345 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2346
2347 if (target->state != TARGET_HALTED)
2348 {
2349 LOG_ERROR("target not halted");
2350 return ERROR_TARGET_NOT_HALTED;
2351 }
2352
2353 switch (watchpoint->rw)
2354 {
2355 case WPT_READ:
2356 enable = 0x3;
2357 break;
2358 case WPT_ACCESS:
2359 enable = 0x2;
2360 break;
2361 case WPT_WRITE:
2362 enable = 0x1;
2363 break;
2364 default:
2365 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2366 }
2367
2368 /* For watchpoint across more than one word, both DBR registers must
2369 be enlisted, with the second used as a mask. */
2370 if (watchpoint->length > 4)
2371 {
2372 if (xscale->dbr0_used || xscale->dbr1_used)
2373 {
2374 LOG_ERROR("BUG: sufficient hardware comparators unavailable");
2375 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2376 }
2377
2378 /* Write mask value to DBR1, based on the length argument.
2379 * Address bits ignored by the comparator are those set in mask. */
2380 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1],
2381 watchpoint->length - 1);
2382 xscale->dbr1_used = 1;
2383 enable |= 0x100; /* DBCON[M] */
2384 }
2385
2386 if (!xscale->dbr0_used)
2387 {
2388 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2389 dbcon_value |= enable;
2390 xscale_set_reg_u32(dbcon, dbcon_value);
2391 watchpoint->set = 1;
2392 xscale->dbr0_used = 1;
2393 }
2394 else if (!xscale->dbr1_used)
2395 {
2396 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2397 dbcon_value |= enable << 2;
2398 xscale_set_reg_u32(dbcon, dbcon_value);
2399 watchpoint->set = 2;
2400 xscale->dbr1_used = 1;
2401 }
2402 else
2403 {
2404 LOG_ERROR("BUG: no hardware comparator available");
2405 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2406 }
2407
2408 return ERROR_OK;
2409 }
2410
2411 static int xscale_add_watchpoint(struct target *target,
2412 struct watchpoint *watchpoint)
2413 {
2414 struct xscale_common *xscale = target_to_xscale(target);
2415
2416 if (xscale->dbr_available < 1)
2417 {
2418 LOG_ERROR("no more watchpoint registers available");
2419 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2420 }
2421
2422 if (watchpoint->value)
2423 LOG_WARNING("xscale does not support value, mask arguments; ignoring");
2424
2425 /* check that length is a power of two */
2426 for (uint32_t len = watchpoint->length; len != 1; len /= 2)
2427 {
2428 if (len % 2)
2429 {
2430 LOG_ERROR("xscale requires that watchpoint length is a power of two");
2431 return ERROR_COMMAND_ARGUMENT_INVALID;
2432 }
2433 }
2434
2435 if (watchpoint->length == 4) /* single word watchpoint */
2436 {
2437 xscale->dbr_available--; /* one DBR reg used */
2438 return ERROR_OK;
2439 }
2440
2441 /* watchpoints across multiple words require both DBR registers */
2442 if (xscale->dbr_available < 2)
2443 {
2444 LOG_ERROR("insufficient watchpoint registers available");
2445 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2446 }
2447
2448 if (watchpoint->length > watchpoint->address)
2449 {
2450 LOG_ERROR("xscale does not support watchpoints with length "
2451 "greater than address");
2452 return ERROR_COMMAND_ARGUMENT_INVALID;
2453 }
2454
2455 xscale->dbr_available = 0;
2456 return ERROR_OK;
2457 }
2458
2459 static int xscale_unset_watchpoint(struct target *target,
2460 struct watchpoint *watchpoint)
2461 {
2462 struct xscale_common *xscale = target_to_xscale(target);
2463 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2464 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2465
2466 if (target->state != TARGET_HALTED)
2467 {
2468 LOG_WARNING("target not halted");
2469 return ERROR_TARGET_NOT_HALTED;
2470 }
2471
2472 if (!watchpoint->set)
2473 {
2474 LOG_WARNING("breakpoint not set");
2475 return ERROR_OK;
2476 }
2477
2478 if (watchpoint->set == 1)
2479 {
2480 if (watchpoint->length > 4)
2481 {
2482 dbcon_value &= ~0x103; /* clear DBCON[M] as well */
2483 xscale->dbr1_used = 0; /* DBR1 was used for mask */
2484 }
2485 else
2486 dbcon_value &= ~0x3;
2487
2488 xscale_set_reg_u32(dbcon, dbcon_value);
2489 xscale->dbr0_used = 0;
2490 }
2491 else if (watchpoint->set == 2)
2492 {
2493 dbcon_value &= ~0xc;
2494 xscale_set_reg_u32(dbcon, dbcon_value);
2495 xscale->dbr1_used = 0;
2496 }
2497 watchpoint->set = 0;
2498
2499 return ERROR_OK;
2500 }
2501
2502 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2503 {
2504 struct xscale_common *xscale = target_to_xscale(target);
2505
2506 if (target->state != TARGET_HALTED)
2507 {
2508 LOG_ERROR("target not halted");
2509 return ERROR_TARGET_NOT_HALTED;
2510 }
2511
2512 if (watchpoint->set)
2513 {
2514 xscale_unset_watchpoint(target, watchpoint);
2515 }
2516
2517 if (watchpoint->length > 4)
2518 xscale->dbr_available++; /* both DBR regs now available */
2519
2520 xscale->dbr_available++;
2521
2522 return ERROR_OK;
2523 }
2524
2525 static int xscale_get_reg(struct reg *reg)
2526 {
2527 struct xscale_reg *arch_info = reg->arch_info;
2528 struct target *target = arch_info->target;
2529 struct xscale_common *xscale = target_to_xscale(target);
2530
2531 /* DCSR, TX and RX are accessible via JTAG */
2532 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2533 {
2534 return xscale_read_dcsr(arch_info->target);
2535 }
2536 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2537 {
2538 /* 1 = consume register content */
2539 return xscale_read_tx(arch_info->target, 1);
2540 }
2541 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2542 {
2543 /* can't read from RX register (host -> debug handler) */
2544 return ERROR_OK;
2545 }
2546 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2547 {
2548 /* can't (explicitly) read from TXRXCTRL register */
2549 return ERROR_OK;
2550 }
2551 else /* Other DBG registers have to be transfered by the debug handler */
2552 {
2553 /* send CP read request (command 0x40) */
2554 xscale_send_u32(target, 0x40);
2555
2556 /* send CP register number */
2557 xscale_send_u32(target, arch_info->dbg_handler_number);
2558
2559 /* read register value */
2560 xscale_read_tx(target, 1);
2561 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2562
2563 reg->dirty = 0;
2564 reg->valid = 1;
2565 }
2566
2567 return ERROR_OK;
2568 }
2569
2570 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2571 {
2572 struct xscale_reg *arch_info = reg->arch_info;
2573 struct target *target = arch_info->target;
2574 struct xscale_common *xscale = target_to_xscale(target);
2575 uint32_t value = buf_get_u32(buf, 0, 32);
2576
2577 /* DCSR, TX and RX are accessible via JTAG */
2578 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2579 {
2580 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2581 return xscale_write_dcsr(arch_info->target, -1, -1);
2582 }
2583 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2584 {
2585 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2586 return xscale_write_rx(arch_info->target);
2587 }
2588 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2589 {
2590 /* can't write to TX register (debug-handler -> host) */
2591 return ERROR_OK;
2592 }
2593 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2594 {
2595 /* can't (explicitly) write to TXRXCTRL register */
2596 return ERROR_OK;
2597 }
2598 else /* Other DBG registers have to be transfered by the debug handler */
2599 {
2600 /* send CP write request (command 0x41) */
2601 xscale_send_u32(target, 0x41);
2602
2603 /* send CP register number */
2604 xscale_send_u32(target, arch_info->dbg_handler_number);
2605
2606 /* send CP register value */
2607 xscale_send_u32(target, value);
2608 buf_set_u32(reg->value, 0, 32, value);
2609 }
2610
2611 return ERROR_OK;
2612 }
2613
2614 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2615 {
2616 struct xscale_common *xscale = target_to_xscale(target);
2617 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2618 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2619
2620 /* send CP write request (command 0x41) */
2621 xscale_send_u32(target, 0x41);
2622
2623 /* send CP register number */
2624 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2625
2626 /* send CP register value */
2627 xscale_send_u32(target, value);
2628 buf_set_u32(dcsr->value, 0, 32, value);
2629
2630 return ERROR_OK;
2631 }
2632
2633 static int xscale_read_trace(struct target *target)
2634 {
2635 struct xscale_common *xscale = target_to_xscale(target);
2636 struct arm *arm = &xscale->arm;
2637 struct xscale_trace_data **trace_data_p;
2638
2639 /* 258 words from debug handler
2640 * 256 trace buffer entries
2641 * 2 checkpoint addresses
2642 */
2643 uint32_t trace_buffer[258];
2644 int is_address[256];
2645 int i, j;
2646 unsigned int num_checkpoints = 0;
2647
2648 if (target->state != TARGET_HALTED)
2649 {
2650 LOG_WARNING("target must be stopped to read trace data");
2651 return ERROR_TARGET_NOT_HALTED;
2652 }
2653
2654 /* send read trace buffer command (command 0x61) */
2655 xscale_send_u32(target, 0x61);
2656
2657 /* receive trace buffer content */
2658 xscale_receive(target, trace_buffer, 258);
2659
2660 /* parse buffer backwards to identify address entries */
2661 for (i = 255; i >= 0; i--)
2662 {
2663 /* also count number of checkpointed entries */
2664 if ((trace_buffer[i] & 0xe0) == 0xc0)
2665 num_checkpoints++;
2666
2667 is_address[i] = 0;
2668 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2669 ((trace_buffer[i] & 0xf0) == 0xd0))
2670 {
2671 if (i > 0)
2672 is_address[--i] = 1;
2673 if (i > 0)
2674 is_address[--i] = 1;
2675 if (i > 0)
2676 is_address[--i] = 1;
2677 if (i > 0)
2678 is_address[--i] = 1;
2679 }
2680 }
2681
2682
2683 /* search first non-zero entry that is not part of an address */
2684 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2685 ;
2686
2687 if (j == 256)
2688 {
2689 LOG_DEBUG("no trace data collected");
2690 return ERROR_XSCALE_NO_TRACE_DATA;
2691 }
2692
2693 /* account for possible partial address at buffer start (wrap mode only) */
2694 if (is_address[0])
2695 { /* first entry is address; complete set of 4? */
2696 i = 1;
2697 while (i < 4)
2698 if (!is_address[i++])
2699 break;
2700 if (i < 4)
2701 j += i; /* partial address; can't use it */
2702 }
2703
2704 /* if first valid entry is indirect branch, can't use that either (no address) */
2705 if (((trace_buffer[j] & 0xf0) == 0x90) || ((trace_buffer[j] & 0xf0) == 0xd0))
2706 j++;
2707
2708 /* walk linked list to terminating entry */
2709 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2710 ;
2711
2712 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2713 (*trace_data_p)->next = NULL;
2714 (*trace_data_p)->chkpt0 = trace_buffer[256];
2715 (*trace_data_p)->chkpt1 = trace_buffer[257];
2716 (*trace_data_p)->last_instruction =
2717 buf_get_u32(arm->pc->value, 0, 32);
2718 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2719 (*trace_data_p)->depth = 256 - j;
2720 (*trace_data_p)->num_checkpoints = num_checkpoints;
2721
2722 for (i = j; i < 256; i++)
2723 {
2724 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2725 if (is_address[i])
2726 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2727 else
2728 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2729 }
2730
2731 return ERROR_OK;
2732 }
2733
2734 static int xscale_read_instruction(struct target *target, uint32_t pc,
2735 struct arm_instruction *instruction)
2736 {
2737 struct xscale_common *const xscale = target_to_xscale(target);
2738 int i;
2739 int section = -1;
2740 size_t size_read;
2741 uint32_t opcode;
2742 int retval;
2743
2744 if (!xscale->trace.image)
2745 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2746
2747 /* search for the section the current instruction belongs to */
2748 for (i = 0; i < xscale->trace.image->num_sections; i++)
2749 {
2750 if ((xscale->trace.image->sections[i].base_address <= pc) &&
2751 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > pc))
2752 {
2753 section = i;
2754 break;
2755 }
2756 }
2757
2758 if (section == -1)
2759 {
2760 /* current instruction couldn't be found in the image */
2761 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2762 }
2763
2764 if (xscale->trace.core_state == ARM_STATE_ARM)
2765 {
2766 uint8_t buf[4];
2767 if ((retval = image_read_section(xscale->trace.image, section,
2768 pc - xscale->trace.image->sections[section].base_address,
2769 4, buf, &size_read)) != ERROR_OK)
2770 {
2771 LOG_ERROR("error while reading instruction");
2772 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2773 }
2774 opcode = target_buffer_get_u32(target, buf);
2775 arm_evaluate_opcode(opcode, pc, instruction);
2776 }
2777 else if (xscale->trace.core_state == ARM_STATE_THUMB)
2778 {
2779 uint8_t buf[2];
2780 if ((retval = image_read_section(xscale->trace.image, section,
2781 pc - xscale->trace.image->sections[section].base_address,
2782 2, buf, &size_read)) != ERROR_OK)
2783 {
2784 LOG_ERROR("error while reading instruction");
2785 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2786 }
2787 opcode = target_buffer_get_u16(target, buf);
2788 thumb_evaluate_opcode(opcode, pc, instruction);
2789 }
2790 else
2791 {
2792 LOG_ERROR("BUG: unknown core state encountered");
2793 exit(-1);
2794 }
2795
2796 return ERROR_OK;
2797 }
2798
2799 /* Extract address encoded into trace data.
2800 * Write result to address referenced by argument 'target', or 0 if incomplete. */
2801 static inline void xscale_branch_address(struct xscale_trace_data *trace_data,
2802 int i, uint32_t *target)
2803 {
2804 /* if there are less than four entries prior to the indirect branch message
2805 * we can't extract the address */
2806 if (i < 4)
2807 *target = 0;
2808 else
2809 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2810 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2811 }
2812
2813 static inline void xscale_display_instruction(struct target *target, uint32_t pc,
2814 struct arm_instruction *instruction,
2815 struct command_context *cmd_ctx)
2816 {
2817 int retval = xscale_read_instruction(target, pc, instruction);
2818 if (retval == ERROR_OK)
2819 command_print(cmd_ctx, "%s", instruction->text);
2820 else
2821 command_print(cmd_ctx, "0x%8.8" PRIx32 "\t<not found in image>", pc);
2822 }
2823
2824 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2825 {
2826 struct xscale_common *xscale = target_to_xscale(target);
2827 struct xscale_trace_data *trace_data = xscale->trace.data;
2828 int i, retval;
2829 uint32_t breakpoint_pc;
2830 struct arm_instruction instruction;
2831 uint32_t current_pc = 0; /* initialized when address determined */
2832
2833 if (!xscale->trace.image)
2834 LOG_WARNING("No trace image loaded; use 'xscale trace_image'");
2835
2836 /* loop for each trace buffer that was loaded from target */
2837 while (trace_data)
2838 {
2839 int chkpt = 0; /* incremented as checkpointed entries found */
2840 int j;
2841
2842 /* FIXME: set this to correct mode when trace buffer is first enabled */
2843 xscale->trace.core_state = ARM_STATE_ARM;
2844
2845 /* loop for each entry in this trace buffer */
2846 for (i = 0; i < trace_data->depth; i++)
2847 {
2848 int exception = 0;
2849 uint32_t chkpt_reg = 0x0;
2850 uint32_t branch_target = 0;
2851 int count;
2852
2853 /* trace entry type is upper nybble of 'message byte' */
2854 int trace_msg_type = (trace_data->entries[i].data & 0xf0) >> 4;
2855
2856 /* Target addresses of indirect branches are written into buffer
2857 * before the message byte representing the branch. Skip past it */
2858 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2859 continue;
2860
2861 switch (trace_msg_type)
2862 {
2863 case 0: /* Exceptions */
2864 case 1:
2865 case 2:
2866 case 3:
2867 case 4:
2868 case 5:
2869 case 6:
2870 case 7:
2871 exception = (trace_data->entries[i].data & 0x70) >> 4;
2872
2873 /* FIXME: vector table may be at ffff0000 */
2874 branch_target = (trace_data->entries[i].data & 0xf0) >> 2;
2875 break;
2876
2877 case 8: /* Direct Branch */
2878 break;
2879
2880 case 9: /* Indirect Branch */
2881 xscale_branch_address(trace_data, i, &branch_target);
2882 break;
2883
2884 case 13: /* Checkpointed Indirect Branch */
2885 xscale_branch_address(trace_data, i, &branch_target);
2886 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2887 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is oldest */
2888 else
2889 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and newest */
2890
2891 chkpt++;
2892 break;
2893
2894 case 12: /* Checkpointed Direct Branch */
2895 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2896 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is oldest */
2897 else
2898 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and newest */
2899
2900 /* if no current_pc, checkpoint will be starting point */
2901 if (current_pc == 0)
2902 branch_target = chkpt_reg;
2903
2904 chkpt++;
2905 break;
2906
2907 case 15: /* Roll-over */
2908 break;
2909
2910 default: /* Reserved */
2911 LOG_WARNING("trace is suspect: invalid trace message byte");
2912 continue;
2913
2914 }
2915
2916 /* If we don't have the current_pc yet, but we did get the branch target
2917 * (either from the trace buffer on indirect branch, or from a checkpoint reg),
2918 * then we can start displaying instructions at the next iteration, with
2919 * branch_target as the starting point.
2920 */
2921 if (current_pc == 0)
2922 {
2923 current_pc = branch_target; /* remains 0 unless branch_target obtained */
2924 continue;
2925 }
2926
2927 /* We have current_pc. Read and display the instructions from the image.
2928 * First, display count instructions (lower nybble of message byte). */
2929 count = trace_data->entries[i].data & 0x0f;
2930 for (j = 0; j < count; j++)
2931 {
2932 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2933 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2934 }
2935
2936 /* An additional instruction is implicitly added to count for
2937 * rollover and some exceptions: undef, swi, prefetch abort. */
2938 if ((trace_msg_type == 15) || (exception > 0 && exception < 4))
2939 {
2940 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2941 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2942 }
2943
2944 if (trace_msg_type == 15) /* rollover */
2945 continue;
2946
2947 if (exception)
2948 {
2949 command_print(cmd_ctx, "--- exception %i ---", exception);
2950 continue;
2951 }
2952
2953 /* not exception or rollover; next instruction is a branch and is
2954 * not included in the count */
2955 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2956
2957 /* for direct branches, extract branch destination from instruction */
2958 if ((trace_msg_type == 8) || (trace_msg_type == 12))
2959 {
2960 retval = xscale_read_instruction(target, current_pc, &instruction);
2961 if (retval == ERROR_OK)
2962 current_pc = instruction.info.b_bl_bx_blx.target_address;
2963 else
2964 current_pc = 0; /* branch destination unknown */
2965
2966 /* direct branch w/ checkpoint; can also get from checkpoint reg */
2967 if (trace_msg_type == 12)
2968 {
2969 if (current_pc == 0)
2970 current_pc = chkpt_reg;
2971 else if (current_pc != chkpt_reg) /* sanity check */
2972 LOG_WARNING("trace is suspect: checkpoint register "
2973 "inconsistent with adddress from image");
2974 }
2975
2976 if (current_pc == 0)
2977 command_print(cmd_ctx, "address unknown");
2978
2979 continue;
2980 }
2981
2982 /* indirect branch; the branch destination was read from trace buffer */
2983 if ((trace_msg_type == 9) || (trace_msg_type == 13))
2984 {
2985 current_pc = branch_target;
2986
2987 /* sanity check (checkpoint reg is redundant) */
2988 if ((trace_msg_type == 13) && (chkpt_reg != branch_target))
2989 LOG_WARNING("trace is suspect: checkpoint register "
2990 "inconsistent with address from trace buffer");
2991 }
2992
2993 } /* END: for (i = 0; i < trace_data->depth; i++) */
2994
2995 breakpoint_pc = trace_data->last_instruction; /* used below */
2996 trace_data = trace_data->next;
2997
2998 } /* END: while (trace_data) */
2999
3000 /* Finally... display all instructions up to the value of the pc when the
3001 * debug break occurred (saved when trace data was collected from target).
3002 * This is necessary because the trace only records execution branches and 16
3003 * consecutive instructions (rollovers), so last few typically missed.
3004 */
3005 if (current_pc == 0)
3006 return ERROR_OK; /* current_pc was never found */
3007
3008 /* how many instructions remaining? */
3009 int gap_count = (breakpoint_pc - current_pc) /
3010 (xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2);
3011
3012 /* should never be negative or over 16, but verify */
3013 if (gap_count < 0 || gap_count > 16)
3014 {
3015 LOG_WARNING("trace is suspect: excessive gap at end of trace");
3016 return ERROR_OK; /* bail; large number or negative value no good */
3017 }
3018
3019 /* display remaining instructions */
3020 for (i = 0; i < gap_count; i++)
3021 {
3022 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
3023 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
3024 }
3025
3026 return ERROR_OK;
3027 }
3028
3029 static const struct reg_arch_type xscale_reg_type = {
3030 .get = xscale_get_reg,
3031 .set = xscale_set_reg,
3032 };
3033
3034 static void xscale_build_reg_cache(struct target *target)
3035 {
3036 struct xscale_common *xscale = target_to_xscale(target);
3037 struct arm *arm = &xscale->arm;
3038 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
3039 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
3040 int i;
3041 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
3042
3043 (*cache_p) = arm_build_reg_cache(target, arm);
3044
3045 (*cache_p)->next = malloc(sizeof(struct reg_cache));
3046 cache_p = &(*cache_p)->next;
3047
3048 /* fill in values for the xscale reg cache */
3049 (*cache_p)->name = "XScale registers";
3050 (*cache_p)->next = NULL;
3051 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
3052 (*cache_p)->num_regs = num_regs;
3053
3054 for (i = 0; i < num_regs; i++)
3055 {
3056 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
3057 (*cache_p)->reg_list[i].value = calloc(4, 1);
3058 (*cache_p)->reg_list[i].dirty = 0;
3059 (*cache_p)->reg_list[i].valid = 0;
3060 (*cache_p)->reg_list[i].size = 32;
3061 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
3062 (*cache_p)->reg_list[i].type = &xscale_reg_type;
3063 arch_info[i] = xscale_reg_arch_info[i];
3064 arch_info[i].target = target;
3065 }
3066
3067 xscale->reg_cache = (*cache_p);
3068 }
3069
3070 static int xscale_init_target(struct command_context *cmd_ctx,
3071 struct target *target)
3072 {
3073 xscale_build_reg_cache(target);
3074 return ERROR_OK;
3075 }
3076
3077 static int xscale_init_arch_info(struct target *target,
3078 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
3079 {
3080 struct arm *arm;
3081 uint32_t high_reset_branch, low_reset_branch;
3082 int i;
3083
3084 arm = &xscale->arm;
3085
3086 /* store architecture specfic data */
3087 xscale->common_magic = XSCALE_COMMON_MAGIC;
3088
3089 /* we don't really *need* a variant param ... */
3090 if (variant) {
3091 int ir_length = 0;
3092
3093 if (strcmp(variant, "pxa250") == 0
3094 || strcmp(variant, "pxa255") == 0
3095 || strcmp(variant, "pxa26x") == 0)
3096 ir_length = 5;
3097 else if (strcmp(variant, "pxa27x") == 0
3098 || strcmp(variant, "ixp42x") == 0
3099 || strcmp(variant, "ixp45x") == 0
3100 || strcmp(variant, "ixp46x") == 0)
3101 ir_length = 7;
3102 else if (strcmp(variant, "pxa3xx") == 0)
3103 ir_length = 11;
3104 else
3105 LOG_WARNING("%s: unrecognized variant %s",
3106 tap->dotted_name, variant);
3107
3108 if (ir_length && ir_length != tap->ir_length) {
3109 LOG_WARNING("%s: IR length for %s is %d; fixing",
3110 tap->dotted_name, variant, ir_length);
3111 tap->ir_length = ir_length;
3112 }
3113 }
3114
3115 /* PXA3xx shifts the JTAG instructions */
3116 if (tap->ir_length == 11)
3117 xscale->xscale_variant = XSCALE_PXA3XX;
3118 else
3119 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
3120
3121 /* the debug handler isn't installed (and thus not running) at this time */
3122 xscale->handler_address = 0xfe000800;
3123
3124 /* clear the vectors we keep locally for reference */
3125 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
3126 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
3127
3128 /* no user-specified vectors have been configured yet */
3129 xscale->static_low_vectors_set = 0x0;
3130 xscale->static_high_vectors_set = 0x0;
3131
3132 /* calculate branches to debug handler */
3133 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
3134 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
3135
3136 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
3137 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
3138
3139 for (i = 1; i <= 7; i++)
3140 {
3141 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3142 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3143 }
3144
3145 /* 64kB aligned region used for DCache cleaning */
3146 xscale->cache_clean_address = 0xfffe0000;
3147
3148 xscale->hold_rst = 0;
3149 xscale->external_debug_break = 0;
3150
3151 xscale->ibcr_available = 2;
3152 xscale->ibcr0_used = 0;
3153 xscale->ibcr1_used = 0;
3154
3155 xscale->dbr_available = 2;
3156 xscale->dbr0_used = 0;
3157 xscale->dbr1_used = 0;
3158
3159 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
3160 target_name(target));
3161
3162 xscale->arm_bkpt = ARMV5_BKPT(0x0);
3163 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
3164
3165 xscale->vector_catch = 0x1;
3166
3167 xscale->trace.data = NULL;
3168 xscale->trace.image = NULL;
3169 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3170 xscale->trace.buffer_fill = 0;
3171 xscale->trace.fill_counter = 0;
3172
3173 /* prepare ARMv4/5 specific information */
3174 arm->arch_info = xscale;
3175 arm->read_core_reg = xscale_read_core_reg;
3176 arm->write_core_reg = xscale_write_core_reg;
3177 arm->full_context = xscale_full_context;
3178
3179 arm_init_arch_info(target, arm);
3180
3181 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3182 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3183 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3184 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3185 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3186 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3187 xscale->armv4_5_mmu.has_tiny_pages = 1;
3188 xscale->armv4_5_mmu.mmu_enabled = 0;
3189
3190 return ERROR_OK;
3191 }
3192
3193 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3194 {
3195 struct xscale_common *xscale;
3196
3197 if (sizeof xscale_debug_handler - 1 > 0x800) {
3198 LOG_ERROR("debug_handler.bin: larger than 2kb");
3199 return ERROR_FAIL;
3200 }
3201
3202 xscale = calloc(1, sizeof(*xscale));
3203 if (!xscale)
3204 return ERROR_FAIL;
3205
3206 return xscale_init_arch_info(target, xscale, target->tap,
3207 target->variant);
3208 }
3209
3210 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3211 {
3212 struct target *target = NULL;
3213 struct xscale_common *xscale;
3214 int retval;
3215 uint32_t handler_address;
3216
3217 if (CMD_ARGC < 2)
3218 {
3219 return ERROR_COMMAND_SYNTAX_ERROR;
3220 }
3221
3222 if ((target = get_target(CMD_ARGV[0])) == NULL)
3223 {
3224 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3225 return ERROR_FAIL;
3226 }
3227
3228 xscale = target_to_xscale(target);
3229 retval = xscale_verify_pointer(CMD_CTX, xscale);
3230 if (retval != ERROR_OK)
3231 return retval;
3232
3233 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3234
3235 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3236 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3237 {
3238 xscale->handler_address = handler_address;
3239 }
3240 else
3241 {
3242 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3243 return ERROR_FAIL;
3244 }
3245
3246 return ERROR_OK;
3247 }
3248
3249 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3250 {
3251 struct target *target = NULL;
3252 struct xscale_common *xscale;
3253 int retval;
3254 uint32_t cache_clean_address;
3255
3256 if (CMD_ARGC < 2)
3257 {
3258 return ERROR_COMMAND_SYNTAX_ERROR;
3259 }
3260
3261 target = get_target(CMD_ARGV[0]);
3262 if (target == NULL)
3263 {
3264 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3265 return ERROR_FAIL;
3266 }
3267 xscale = target_to_xscale(target);
3268 retval = xscale_verify_pointer(CMD_CTX, xscale);
3269 if (retval != ERROR_OK)
3270 return retval;
3271
3272 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3273
3274 if (cache_clean_address & 0xffff)
3275 {
3276 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3277 }
3278 else
3279 {
3280 xscale->cache_clean_address = cache_clean_address;
3281 }
3282
3283 return ERROR_OK;
3284 }
3285
3286 COMMAND_HANDLER(xscale_handle_cache_info_command)
3287 {
3288 struct target *target = get_current_target(CMD_CTX);
3289 struct xscale_common *xscale = target_to_xscale(target);
3290 int retval;
3291
3292 retval = xscale_verify_pointer(CMD_CTX, xscale);
3293 if (retval != ERROR_OK)
3294 return retval;
3295
3296 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3297 }
3298
3299 static int xscale_virt2phys(struct target *target,
3300 uint32_t virtual, uint32_t *physical)
3301 {
3302 struct xscale_common *xscale = target_to_xscale(target);
3303 uint32_t cb;
3304
3305 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3306 LOG_ERROR(xscale_not);
3307 return ERROR_TARGET_INVALID;
3308 }
3309
3310 uint32_t ret;
3311 int retval = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu,
3312 virtual, &cb, &ret);
3313 if (retval != ERROR_OK)
3314 return retval;
3315 *physical = ret;
3316 return ERROR_OK;
3317 }
3318
3319 static int xscale_mmu(struct target *target, int *enabled)
3320 {
3321 struct xscale_common *xscale = target_to_xscale(target);
3322
3323 if (target->state != TARGET_HALTED)
3324 {
3325 LOG_ERROR("Target not halted");
3326 return ERROR_TARGET_INVALID;
3327 }
3328 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3329 return ERROR_OK;
3330 }
3331
3332 COMMAND_HANDLER(xscale_handle_mmu_command)
3333 {
3334 struct target *target = get_current_target(CMD_CTX);
3335 struct xscale_common *xscale = target_to_xscale(target);
3336 int retval;
3337
3338 retval = xscale_verify_pointer(CMD_CTX, xscale);
3339 if (retval != ERROR_OK)
3340 return retval;
3341
3342 if (target->state != TARGET_HALTED)
3343 {
3344 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3345 return ERROR_OK;
3346 }
3347
3348 if (CMD_ARGC >= 1)
3349 {
3350 bool enable;
3351 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3352 if (enable)
3353 xscale_enable_mmu_caches(target, 1, 0, 0);
3354 else
3355 xscale_disable_mmu_caches(target, 1, 0, 0);
3356 xscale->armv4_5_mmu.mmu_enabled = enable;
3357 }
3358
3359 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3360
3361 return ERROR_OK;
3362 }
3363
3364 COMMAND_HANDLER(xscale_handle_idcache_command)
3365 {
3366 struct target *target = get_current_target(CMD_CTX);
3367 struct xscale_common *xscale = target_to_xscale(target);
3368
3369 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3370 if (retval != ERROR_OK)
3371 return retval;
3372
3373 if (target->state != TARGET_HALTED)
3374 {
3375 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3376 return ERROR_OK;
3377 }
3378
3379 bool icache = false;
3380 if (strcmp(CMD_NAME, "icache") == 0)
3381 icache = true;
3382 if (CMD_ARGC >= 1)
3383 {
3384 bool enable;
3385 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3386 if (icache) {
3387 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3388 if (enable)
3389 xscale_enable_mmu_caches(target, 0, 0, 1);
3390 else
3391 xscale_disable_mmu_caches(target, 0, 0, 1);
3392 } else {
3393 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3394 if (enable)
3395 xscale_enable_mmu_caches(target, 0, 1, 0);
3396 else
3397 xscale_disable_mmu_caches(target, 0, 1, 0);
3398 }
3399 }
3400
3401 bool enabled = icache ?
3402 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3403 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3404 const char *msg = enabled ? "enabled" : "disabled";
3405 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3406
3407 return ERROR_OK;
3408 }
3409
3410 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3411 {
3412 struct target *target = get_current_target(CMD_CTX);
3413 struct xscale_common *xscale = target_to_xscale(target);
3414 int retval;
3415
3416 retval = xscale_verify_pointer(CMD_CTX, xscale);
3417 if (retval != ERROR_OK)
3418 return retval;
3419
3420 if (CMD_ARGC < 1)
3421 {
3422 return ERROR_COMMAND_SYNTAX_ERROR;
3423 }
3424 else
3425 {
3426 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3427 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3428 xscale_write_dcsr(target, -1, -1);
3429 }
3430
3431 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3432
3433 return ERROR_OK;
3434 }
3435
3436
3437 COMMAND_HANDLER(xscale_handle_vector_table_command)
3438 {
3439 struct target *target = get_current_target(CMD_CTX);
3440 struct xscale_common *xscale = target_to_xscale(target);
3441 int err = 0;
3442 int retval;
3443
3444 retval = xscale_verify_pointer(CMD_CTX, xscale);
3445 if (retval != ERROR_OK)
3446 return retval;
3447
3448 if (CMD_ARGC == 0) /* print current settings */
3449 {
3450 int idx;
3451
3452 command_print(CMD_CTX, "active user-set static vectors:");
3453 for (idx = 1; idx < 8; idx++)
3454 if (xscale->static_low_vectors_set & (1 << idx))
3455 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3456 for (idx = 1; idx < 8; idx++)
3457 if (xscale->static_high_vectors_set & (1 << idx))
3458 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3459 return ERROR_OK;
3460 }
3461
3462 if (CMD_ARGC != 3)
3463 err = 1;
3464 else
3465 {
3466 int idx;
3467 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3468 uint32_t vec;
3469 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3470
3471 if (idx < 1 || idx >= 8)
3472 err = 1;
3473
3474 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3475 {
3476 xscale->static_low_vectors_set |= (1<<idx);
3477 xscale->static_low_vectors[idx] = vec;
3478 }
3479 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3480 {
3481 xscale->static_high_vectors_set |= (1<<idx);
3482 xscale->static_high_vectors[idx] = vec;
3483 }
3484 else
3485 err = 1;
3486 }
3487
3488 if (err)
3489 return ERROR_COMMAND_SYNTAX_ERROR;
3490
3491 return ERROR_OK;
3492 }
3493
3494
3495 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3496 {
3497 struct target *target = get_current_target(CMD_CTX);
3498 struct xscale_common *xscale = target_to_xscale(target);
3499 uint32_t dcsr_value;
3500 int retval;
3501
3502 retval = xscale_verify_pointer(CMD_CTX, xscale);
3503 if (retval != ERROR_OK)
3504 return retval;
3505
3506 if (target->state != TARGET_HALTED)
3507 {
3508 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3509 return ERROR_OK;
3510 }
3511
3512 if (CMD_ARGC >= 1)
3513 {
3514 if (strcmp("enable", CMD_ARGV[0]) == 0)
3515 xscale->trace.mode = XSCALE_TRACE_WRAP; /* default */
3516 else if (strcmp("disable", CMD_ARGV[0]) == 0)
3517 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3518 else
3519 return ERROR_COMMAND_SYNTAX_ERROR;
3520 }
3521
3522 if (CMD_ARGC >= 2 && xscale->trace.mode != XSCALE_TRACE_DISABLED)
3523 {
3524 if (strcmp("fill", CMD_ARGV[1]) == 0)
3525 {
3526 int buffcount = 1; /* default */
3527 if (CMD_ARGC >= 3)
3528 COMMAND_PARSE_NUMBER(int, CMD_ARGV[2], buffcount);
3529 if (buffcount < 1) /* invalid */
3530 {
3531 command_print(CMD_CTX, "fill buffer count must be > 0");
3532 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3533 return ERROR_COMMAND_SYNTAX_ERROR;
3534 }
3535 xscale->trace.buffer_fill = buffcount;
3536 xscale->trace.mode = XSCALE_TRACE_FILL;
3537 }
3538 else if (strcmp("wrap", CMD_ARGV[1]) == 0)
3539 xscale->trace.mode = XSCALE_TRACE_WRAP;
3540 else
3541 {
3542 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3543 return ERROR_COMMAND_SYNTAX_ERROR;
3544 }
3545 }
3546
3547 if (xscale->trace.mode != XSCALE_TRACE_DISABLED)
3548 {
3549 char fill_string[12];
3550 sprintf(fill_string, "fill %" PRId32, xscale->trace.buffer_fill);
3551 command_print(CMD_CTX, "trace buffer enabled (%s)",
3552 (xscale->trace.mode == XSCALE_TRACE_FILL)
3553 ? fill_string : "wrap");
3554 }
3555 else
3556 command_print(CMD_CTX, "trace buffer disabled");
3557
3558 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3559 if (xscale->trace.mode == XSCALE_TRACE_FILL)
3560 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3561 else
3562 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3563
3564 return ERROR_OK;
3565 }
3566
3567 COMMAND_HANDLER(xscale_handle_trace_image_command)
3568 {
3569 struct target *target = get_current_target(CMD_CTX);
3570 struct xscale_common *xscale = target_to_xscale(target);
3571 int retval;
3572
3573 if (CMD_ARGC < 1)
3574 {
3575 return ERROR_COMMAND_SYNTAX_ERROR;
3576 }
3577
3578 retval = xscale_verify_pointer(CMD_CTX, xscale);
3579 if (retval != ERROR_OK)
3580 return retval;
3581
3582 if (xscale->trace.image)
3583 {
3584 image_close(xscale->trace.image);
3585 free(xscale->trace.image);
3586 command_print(CMD_CTX, "previously loaded image found and closed");
3587 }
3588
3589 xscale->trace.image = malloc(sizeof(struct image));
3590 xscale->trace.image->base_address_set = 0;
3591 xscale->trace.image->start_address_set = 0;
3592
3593 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3594 if (CMD_ARGC >= 2)
3595 {
3596 xscale->trace.image->base_address_set = 1;
3597 COMMAND_PARSE_NUMBER(llong, CMD_ARGV[1], xscale->trace.image->base_address);
3598 }
3599 else
3600 {
3601 xscale->trace.image->base_address_set = 0;
3602 }
3603
3604 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3605 {
3606 free(xscale->trace.image);
3607 xscale->trace.image = NULL;
3608 return ERROR_OK;
3609 }
3610
3611 return ERROR_OK;
3612 }
3613
3614 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3615 {
3616 struct target *target = get_current_target(CMD_CTX);
3617 struct xscale_common *xscale = target_to_xscale(target);
3618 struct xscale_trace_data *trace_data;
3619 struct fileio file;
3620 int retval;
3621
3622 retval = xscale_verify_pointer(CMD_CTX, xscale);
3623 if (retval != ERROR_OK)
3624 return retval;
3625
3626 if (target->state != TARGET_HALTED)
3627 {
3628 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3629 return ERROR_OK;
3630 }
3631
3632 if (CMD_ARGC < 1)
3633 {
3634 return ERROR_COMMAND_SYNTAX_ERROR;
3635 }
3636
3637 trace_data = xscale->trace.data;
3638
3639 if (!trace_data)
3640 {
3641 command_print(CMD_CTX, "no trace data collected");
3642 return ERROR_OK;
3643 }
3644
3645 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3646 {
3647 return ERROR_OK;
3648 }
3649
3650 while (trace_data)
3651 {
3652 int i;
3653
3654 fileio_write_u32(&file, trace_data->chkpt0);
3655 fileio_write_u32(&file, trace_data->chkpt1);
3656 fileio_write_u32(&file, trace_data->last_instruction);
3657 fileio_write_u32(&file, trace_data->depth);
3658
3659 for (i = 0; i < trace_data->depth; i++)
3660 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3661
3662 trace_data = trace_data->next;
3663 }
3664
3665 fileio_close(&file);
3666
3667 return ERROR_OK;
3668 }
3669
3670 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3671 {
3672 struct target *target = get_current_target(CMD_CTX);
3673 struct xscale_common *xscale = target_to_xscale(target);
3674 int retval;
3675
3676 retval = xscale_verify_pointer(CMD_CTX, xscale);
3677 if (retval != ERROR_OK)
3678 return retval;
3679
3680 xscale_analyze_trace(target, CMD_CTX);
3681
3682 return ERROR_OK;
3683 }
3684
3685 COMMAND_HANDLER(xscale_handle_cp15)
3686 {
3687 struct target *target = get_current_target(CMD_CTX);
3688 struct xscale_common *xscale = target_to_xscale(target);
3689 int retval;
3690
3691 retval = xscale_verify_pointer(CMD_CTX, xscale);
3692 if (retval != ERROR_OK)
3693 return retval;
3694
3695 if (target->state != TARGET_HALTED)
3696 {
3697 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3698 return ERROR_OK;
3699 }
3700 uint32_t reg_no = 0;
3701 struct reg *reg = NULL;
3702 if (CMD_ARGC > 0)
3703 {
3704 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3705 /*translate from xscale cp15 register no to openocd register*/
3706 switch (reg_no)
3707 {
3708 case 0:
3709 reg_no = XSCALE_MAINID;
3710 break;
3711 case 1:
3712 reg_no = XSCALE_CTRL;
3713 break;
3714 case 2:
3715 reg_no = XSCALE_TTB;
3716 break;
3717 case 3:
3718 reg_no = XSCALE_DAC;
3719 break;
3720 case 5:
3721 reg_no = XSCALE_FSR;
3722 break;
3723 case 6:
3724 reg_no = XSCALE_FAR;
3725 break;
3726 case 13:
3727 reg_no = XSCALE_PID;
3728 break;
3729 case 15:
3730 reg_no = XSCALE_CPACCESS;
3731 break;
3732 default:
3733 command_print(CMD_CTX, "invalid register number");
3734 return ERROR_COMMAND_SYNTAX_ERROR;
3735 }
3736 reg = &xscale->reg_cache->reg_list[reg_no];
3737
3738 }
3739 if (CMD_ARGC == 1)
3740 {
3741 uint32_t value;
3742
3743 /* read cp15 control register */
3744 xscale_get_reg(reg);
3745 value = buf_get_u32(reg->value, 0, 32);
3746 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3747 }
3748 else if (CMD_ARGC == 2)
3749 {
3750 uint32_t value;
3751 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3752
3753 /* send CP write request (command 0x41) */
3754 xscale_send_u32(target, 0x41);
3755
3756 /* send CP register number */
3757 xscale_send_u32(target, reg_no);
3758
3759 /* send CP register value */
3760 xscale_send_u32(target, value);
3761
3762 /* execute cpwait to ensure outstanding operations complete */
3763 xscale_send_u32(target, 0x53);
3764 }
3765 else
3766 {
3767 return ERROR_COMMAND_SYNTAX_ERROR;
3768 }
3769
3770 return ERROR_OK;
3771 }
3772
3773 static const struct command_registration xscale_exec_command_handlers[] = {
3774 {
3775 .name = "cache_info",
3776 .handler = xscale_handle_cache_info_command,
3777 .mode = COMMAND_EXEC,
3778 .help = "display information about CPU caches",
3779 },
3780 {
3781 .name = "mmu",
3782 .handler = xscale_handle_mmu_command,
3783 .mode = COMMAND_EXEC,
3784 .help = "enable or disable the MMU",
3785 .usage = "['enable'|'disable']",
3786 },
3787 {
3788 .name = "icache",
3789 .handler = xscale_handle_idcache_command,
3790 .mode = COMMAND_EXEC,
3791 .help = "display ICache state, optionally enabling or "
3792 "disabling it",
3793 .usage = "['enable'|'disable']",
3794 },
3795 {
3796 .name = "dcache",
3797 .handler = xscale_handle_idcache_command,
3798 .mode = COMMAND_EXEC,
3799 .help = "display DCache state, optionally enabling or "
3800 "disabling it",
3801 .usage = "['enable'|'disable']",
3802 },
3803 {
3804 .name = "vector_catch",
3805 .handler = xscale_handle_vector_catch_command,
3806 .mode = COMMAND_EXEC,
3807 .help = "set or display 8-bit mask of vectors "
3808 "that should trigger debug entry",
3809 .usage = "[mask]",
3810 },
3811 {
3812 .name = "vector_table",
3813 .handler = xscale_handle_vector_table_command,
3814 .mode = COMMAND_EXEC,
3815 .help = "set vector table entry in mini-ICache, "
3816 "or display current tables",
3817 .usage = "[('high'|'low') index code]",
3818 },
3819 {
3820 .name = "trace_buffer",
3821 .handler = xscale_handle_trace_buffer_command,
3822 .mode = COMMAND_EXEC,
3823 .help = "display trace buffer status, enable or disable "
3824 "tracing, and optionally reconfigure trace mode",
3825 .usage = "['enable'|'disable' ['fill' [number]|'wrap']]",
3826 },
3827 {
3828 .name = "dump_trace",
3829 .handler = xscale_handle_dump_trace_command,
3830 .mode = COMMAND_EXEC,
3831 .help = "dump content of trace buffer to file",
3832 .usage = "filename",
3833 },
3834 {
3835 .name = "analyze_trace",
3836 .handler = xscale_handle_analyze_trace_buffer_command,
3837 .mode = COMMAND_EXEC,
3838 .help = "analyze content of trace buffer",
3839 .usage = "",
3840 },
3841 {
3842 .name = "trace_image",
3843 .handler = xscale_handle_trace_image_command,
3844 .mode = COMMAND_EXEC,
3845 .help = "load image from file to address (default 0)",
3846 .usage = "filename [offset [filetype]]",
3847 },
3848 {
3849 .name = "cp15",
3850 .handler = xscale_handle_cp15,
3851 .mode = COMMAND_EXEC,
3852 .help = "Read or write coprocessor 15 register.",
3853 .usage = "register [value]",
3854 },
3855 COMMAND_REGISTRATION_DONE
3856 };
3857 static const struct command_registration xscale_any_command_handlers[] = {
3858 {
3859 .name = "debug_handler",
3860 .handler = xscale_handle_debug_handler_command,
3861 .mode = COMMAND_ANY,
3862 .help = "Change address used for debug handler.",
3863 .usage = "<target> <address>",
3864 },
3865 {
3866 .name = "cache_clean_address",
3867 .handler = xscale_handle_cache_clean_address_command,
3868 .mode = COMMAND_ANY,
3869 .help = "Change address used for cleaning data cache.",
3870 .usage = "address",
3871 },
3872 {
3873 .chain = xscale_exec_command_handlers,
3874 },
3875 COMMAND_REGISTRATION_DONE
3876 };
3877 static const struct command_registration xscale_command_handlers[] = {
3878 {
3879 .chain = arm_command_handlers,
3880 },
3881 {
3882 .name = "xscale",
3883 .mode = COMMAND_ANY,
3884 .help = "xscale command group",
3885 .usage = "",
3886 .chain = xscale_any_command_handlers,
3887 },
3888 COMMAND_REGISTRATION_DONE
3889 };
3890
3891 struct target_type xscale_target =
3892 {
3893 .name = "xscale",
3894
3895 .poll = xscale_poll,
3896 .arch_state = xscale_arch_state,
3897
3898 .target_request_data = NULL,
3899
3900 .halt = xscale_halt,
3901 .resume = xscale_resume,
3902 .step = xscale_step,
3903
3904 .assert_reset = xscale_assert_reset,
3905 .deassert_reset = xscale_deassert_reset,
3906 .soft_reset_halt = NULL,
3907
3908 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3909 .get_gdb_reg_list = arm_get_gdb_reg_list,
3910
3911 .read_memory = xscale_read_memory,
3912 .read_phys_memory = xscale_read_phys_memory,
3913 .write_memory = xscale_write_memory,
3914 .write_phys_memory = xscale_write_phys_memory,
3915 .bulk_write_memory = xscale_bulk_write_memory,
3916
3917 .checksum_memory = arm_checksum_memory,
3918 .blank_check_memory = arm_blank_check_memory,
3919
3920 .run_algorithm = armv4_5_run_algorithm,
3921
3922 .add_breakpoint = xscale_add_breakpoint,
3923 .remove_breakpoint = xscale_remove_breakpoint,
3924 .add_watchpoint = xscale_add_watchpoint,
3925 .remove_watchpoint = xscale_remove_watchpoint,
3926
3927 .commands = xscale_command_handlers,
3928 .target_create = xscale_target_create,
3929 .init_target = xscale_init_target,
3930
3931 .virt2phys = xscale_virt2phys,
3932 .mmu = xscale_mmu
3933 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)