jtag: stop using sharp corner of JTAG API
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include <helper/time_support.h>
37 #include "register.h"
38 #include "image.h"
39 #include "arm_opcodes.h"
40 #include "armv4_5.h"
41
42
43 /*
44 * Important XScale documents available as of October 2009 include:
45 *
46 * Intel XScale® Core Developer’s Manual, January 2004
47 * Order Number: 273473-002
48 * This has a chapter detailing debug facilities, and punts some
49 * details to chip-specific microarchitecture documents.
50 *
51 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
52 * Document Number: 273539-005
53 * Less detailed than the developer's manual, but summarizes those
54 * missing details (for most XScales) and gives LOTS of notes about
55 * debugger/handler interaction issues. Presents a simpler reset
56 * and load-handler sequence than the arch doc. (Note, OpenOCD
57 * doesn't currently support "Hot-Debug" as defined there.)
58 *
59 * Chip-specific microarchitecture documents may also be useful.
60 */
61
62
63 /* forward declarations */
64 static int xscale_resume(struct target *, int current,
65 uint32_t address, int handle_breakpoints, int debug_execution);
66 static int xscale_debug_entry(struct target *);
67 static int xscale_restore_banked(struct target *);
68 static int xscale_get_reg(struct reg *reg);
69 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
70 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
72 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
73 static int xscale_read_trace(struct target *);
74
75
76 /* This XScale "debug handler" is loaded into the processor's
77 * mini-ICache, which is 2K of code writable only via JTAG.
78 *
79 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
80 * binary files cleanly. It's string oriented, and terminates them
81 * with a NUL character. Better would be to generate the constants
82 * and let other code decide names, scoping, and other housekeeping.
83 */
84 static /* unsigned const char xscale_debug_handler[] = ... */
85 #include "xscale_debug.h"
86
87 static char *const xscale_reg_list[] =
88 {
89 "XSCALE_MAINID", /* 0 */
90 "XSCALE_CACHETYPE",
91 "XSCALE_CTRL",
92 "XSCALE_AUXCTRL",
93 "XSCALE_TTB",
94 "XSCALE_DAC",
95 "XSCALE_FSR",
96 "XSCALE_FAR",
97 "XSCALE_PID",
98 "XSCALE_CPACCESS",
99 "XSCALE_IBCR0", /* 10 */
100 "XSCALE_IBCR1",
101 "XSCALE_DBR0",
102 "XSCALE_DBR1",
103 "XSCALE_DBCON",
104 "XSCALE_TBREG",
105 "XSCALE_CHKPT0",
106 "XSCALE_CHKPT1",
107 "XSCALE_DCSR",
108 "XSCALE_TX",
109 "XSCALE_RX", /* 20 */
110 "XSCALE_TXRXCTRL",
111 };
112
113 static const struct xscale_reg xscale_reg_arch_info[] =
114 {
115 {XSCALE_MAINID, NULL},
116 {XSCALE_CACHETYPE, NULL},
117 {XSCALE_CTRL, NULL},
118 {XSCALE_AUXCTRL, NULL},
119 {XSCALE_TTB, NULL},
120 {XSCALE_DAC, NULL},
121 {XSCALE_FSR, NULL},
122 {XSCALE_FAR, NULL},
123 {XSCALE_PID, NULL},
124 {XSCALE_CPACCESS, NULL},
125 {XSCALE_IBCR0, NULL},
126 {XSCALE_IBCR1, NULL},
127 {XSCALE_DBR0, NULL},
128 {XSCALE_DBR1, NULL},
129 {XSCALE_DBCON, NULL},
130 {XSCALE_TBREG, NULL},
131 {XSCALE_CHKPT0, NULL},
132 {XSCALE_CHKPT1, NULL},
133 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
134 {-1, NULL}, /* TX accessed via JTAG */
135 {-1, NULL}, /* RX accessed via JTAG */
136 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
137 };
138
139 /* convenience wrapper to access XScale specific registers */
140 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
141 {
142 uint8_t buf[4];
143
144 buf_set_u32(buf, 0, 32, value);
145
146 return xscale_set_reg(reg, buf);
147 }
148
149 static const char xscale_not[] = "target is not an XScale";
150
151 static int xscale_verify_pointer(struct command_context *cmd_ctx,
152 struct xscale_common *xscale)
153 {
154 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
155 command_print(cmd_ctx, xscale_not);
156 return ERROR_TARGET_INVALID;
157 }
158 return ERROR_OK;
159 }
160
161 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr, tap_state_t end_state)
162 {
163 assert (tap != NULL);
164
165 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
166 {
167 struct scan_field field;
168 uint8_t scratch[4];
169
170 memset(&field, 0, sizeof field);
171 field.num_bits = tap->ir_length;
172 field.out_value = scratch;
173 buf_set_u32(scratch, 0, field.num_bits, new_instr);
174
175 jtag_add_ir_scan(tap, &field, end_state);
176 }
177
178 return ERROR_OK;
179 }
180
181 static int xscale_read_dcsr(struct target *target)
182 {
183 struct xscale_common *xscale = target_to_xscale(target);
184 int retval;
185 struct scan_field fields[3];
186 uint8_t field0 = 0x0;
187 uint8_t field0_check_value = 0x2;
188 uint8_t field0_check_mask = 0x7;
189 uint8_t field2 = 0x0;
190 uint8_t field2_check_value = 0x0;
191 uint8_t field2_check_mask = 0x1;
192
193 xscale_jtag_set_instr(target->tap,
194 XSCALE_SELDCSR << xscale->xscale_variant,
195 TAP_DRPAUSE);
196
197 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
198 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
199
200 memset(&fields, 0, sizeof fields);
201
202 fields[0].num_bits = 3;
203 fields[0].out_value = &field0;
204 uint8_t tmp;
205 fields[0].in_value = &tmp;
206
207 fields[1].num_bits = 32;
208 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
209
210 fields[2].num_bits = 1;
211 fields[2].out_value = &field2;
212 uint8_t tmp2;
213 fields[2].in_value = &tmp2;
214
215 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
216
217 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
218 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
219
220 if ((retval = jtag_execute_queue()) != ERROR_OK)
221 {
222 LOG_ERROR("JTAG error while reading DCSR");
223 return retval;
224 }
225
226 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
227 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
228
229 /* write the register with the value we just read
230 * on this second pass, only the first bit of field0 is guaranteed to be 0)
231 */
232 field0_check_mask = 0x1;
233 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
234 fields[1].in_value = NULL;
235
236 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
237
238 /* DANGER!!! this must be here. It will make sure that the arguments
239 * to jtag_set_check_value() does not go out of scope! */
240 return jtag_execute_queue();
241 }
242
243
244 static void xscale_getbuf(jtag_callback_data_t arg)
245 {
246 uint8_t *in = (uint8_t *)arg;
247 *((uint32_t *)arg) = buf_get_u32(in, 0, 32);
248 }
249
250 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
251 {
252 if (num_words == 0)
253 return ERROR_INVALID_ARGUMENTS;
254
255 struct xscale_common *xscale = target_to_xscale(target);
256 int retval = ERROR_OK;
257 tap_state_t path[3];
258 struct scan_field fields[3];
259 uint8_t *field0 = malloc(num_words * 1);
260 uint8_t field0_check_value = 0x2;
261 uint8_t field0_check_mask = 0x6;
262 uint32_t *field1 = malloc(num_words * 4);
263 uint8_t field2_check_value = 0x0;
264 uint8_t field2_check_mask = 0x1;
265 int words_done = 0;
266 int words_scheduled = 0;
267 int i;
268
269 path[0] = TAP_DRSELECT;
270 path[1] = TAP_DRCAPTURE;
271 path[2] = TAP_DRSHIFT;
272
273 memset(&fields, 0, sizeof fields);
274
275 fields[0].num_bits = 3;
276 uint8_t tmp;
277 fields[0].in_value = &tmp;
278 fields[0].check_value = &field0_check_value;
279 fields[0].check_mask = &field0_check_mask;
280
281 fields[1].num_bits = 32;
282
283 fields[2].num_bits = 1;
284 uint8_t tmp2;
285 fields[2].in_value = &tmp2;
286 fields[2].check_value = &field2_check_value;
287 fields[2].check_mask = &field2_check_mask;
288
289 xscale_jtag_set_instr(target->tap,
290 XSCALE_DBGTX << xscale->xscale_variant,
291 TAP_IDLE);
292 jtag_add_runtest(1, TAP_IDLE); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
293
294 /* repeat until all words have been collected */
295 int attempts = 0;
296 while (words_done < num_words)
297 {
298 /* schedule reads */
299 words_scheduled = 0;
300 for (i = words_done; i < num_words; i++)
301 {
302 fields[0].in_value = &field0[i];
303
304 jtag_add_pathmove(3, path);
305
306 fields[1].in_value = (uint8_t *)(field1 + i);
307
308 jtag_add_dr_scan_check(target->tap, 3, fields, TAP_IDLE);
309
310 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
311
312 words_scheduled++;
313 }
314
315 if ((retval = jtag_execute_queue()) != ERROR_OK)
316 {
317 LOG_ERROR("JTAG error while receiving data from debug handler");
318 break;
319 }
320
321 /* examine results */
322 for (i = words_done; i < num_words; i++)
323 {
324 if (!(field0[i] & 1))
325 {
326 /* move backwards if necessary */
327 int j;
328 for (j = i; j < num_words - 1; j++)
329 {
330 field0[j] = field0[j + 1];
331 field1[j] = field1[j + 1];
332 }
333 words_scheduled--;
334 }
335 }
336 if (words_scheduled == 0)
337 {
338 if (attempts++==1000)
339 {
340 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
341 retval = ERROR_TARGET_TIMEOUT;
342 break;
343 }
344 }
345
346 words_done += words_scheduled;
347 }
348
349 for (i = 0; i < num_words; i++)
350 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
351
352 free(field1);
353
354 return retval;
355 }
356
357 static int xscale_read_tx(struct target *target, int consume)
358 {
359 struct xscale_common *xscale = target_to_xscale(target);
360 tap_state_t path[3];
361 tap_state_t noconsume_path[6];
362 int retval;
363 struct timeval timeout, now;
364 struct scan_field fields[3];
365 uint8_t field0_in = 0x0;
366 uint8_t field0_check_value = 0x2;
367 uint8_t field0_check_mask = 0x6;
368 uint8_t field2_check_value = 0x0;
369 uint8_t field2_check_mask = 0x1;
370
371 xscale_jtag_set_instr(target->tap,
372 XSCALE_DBGTX << xscale->xscale_variant,
373 TAP_IDLE);
374
375 path[0] = TAP_DRSELECT;
376 path[1] = TAP_DRCAPTURE;
377 path[2] = TAP_DRSHIFT;
378
379 noconsume_path[0] = TAP_DRSELECT;
380 noconsume_path[1] = TAP_DRCAPTURE;
381 noconsume_path[2] = TAP_DREXIT1;
382 noconsume_path[3] = TAP_DRPAUSE;
383 noconsume_path[4] = TAP_DREXIT2;
384 noconsume_path[5] = TAP_DRSHIFT;
385
386 memset(&fields, 0, sizeof fields);
387
388 fields[0].num_bits = 3;
389 fields[0].in_value = &field0_in;
390
391 fields[1].num_bits = 32;
392 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
393
394 fields[2].num_bits = 1;
395 uint8_t tmp;
396 fields[2].in_value = &tmp;
397
398 gettimeofday(&timeout, NULL);
399 timeval_add_time(&timeout, 1, 0);
400
401 for (;;)
402 {
403 /* if we want to consume the register content (i.e. clear TX_READY),
404 * we have to go straight from Capture-DR to Shift-DR
405 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
406 */
407 if (consume)
408 jtag_add_pathmove(3, path);
409 else
410 {
411 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
412 }
413
414 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
415
416 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
417 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
418
419 if ((retval = jtag_execute_queue()) != ERROR_OK)
420 {
421 LOG_ERROR("JTAG error while reading TX");
422 return ERROR_TARGET_TIMEOUT;
423 }
424
425 gettimeofday(&now, NULL);
426 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
427 {
428 LOG_ERROR("time out reading TX register");
429 return ERROR_TARGET_TIMEOUT;
430 }
431 if (!((!(field0_in & 1)) && consume))
432 {
433 goto done;
434 }
435 if (debug_level >= 3)
436 {
437 LOG_DEBUG("waiting 100ms");
438 alive_sleep(100); /* avoid flooding the logs */
439 } else
440 {
441 keep_alive();
442 }
443 }
444 done:
445
446 if (!(field0_in & 1))
447 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
448
449 return ERROR_OK;
450 }
451
452 static int xscale_write_rx(struct target *target)
453 {
454 struct xscale_common *xscale = target_to_xscale(target);
455 int retval;
456 struct timeval timeout, now;
457 struct scan_field fields[3];
458 uint8_t field0_out = 0x0;
459 uint8_t field0_in = 0x0;
460 uint8_t field0_check_value = 0x2;
461 uint8_t field0_check_mask = 0x6;
462 uint8_t field2 = 0x0;
463 uint8_t field2_check_value = 0x0;
464 uint8_t field2_check_mask = 0x1;
465
466 xscale_jtag_set_instr(target->tap,
467 XSCALE_DBGRX << xscale->xscale_variant,
468 TAP_IDLE);
469
470 memset(&fields, 0, sizeof fields);
471
472 fields[0].num_bits = 3;
473 fields[0].out_value = &field0_out;
474 fields[0].in_value = &field0_in;
475
476 fields[1].num_bits = 32;
477 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
478
479 fields[2].num_bits = 1;
480 fields[2].out_value = &field2;
481 uint8_t tmp;
482 fields[2].in_value = &tmp;
483
484 gettimeofday(&timeout, NULL);
485 timeval_add_time(&timeout, 1, 0);
486
487 /* poll until rx_read is low */
488 LOG_DEBUG("polling RX");
489 for (;;)
490 {
491 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
492
493 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
494 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
495
496 if ((retval = jtag_execute_queue()) != ERROR_OK)
497 {
498 LOG_ERROR("JTAG error while writing RX");
499 return retval;
500 }
501
502 gettimeofday(&now, NULL);
503 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
504 {
505 LOG_ERROR("time out writing RX register");
506 return ERROR_TARGET_TIMEOUT;
507 }
508 if (!(field0_in & 1))
509 goto done;
510 if (debug_level >= 3)
511 {
512 LOG_DEBUG("waiting 100ms");
513 alive_sleep(100); /* avoid flooding the logs */
514 } else
515 {
516 keep_alive();
517 }
518 }
519 done:
520
521 /* set rx_valid */
522 field2 = 0x1;
523 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
524
525 if ((retval = jtag_execute_queue()) != ERROR_OK)
526 {
527 LOG_ERROR("JTAG error while writing RX");
528 return retval;
529 }
530
531 return ERROR_OK;
532 }
533
534 /* send count elements of size byte to the debug handler */
535 static int xscale_send(struct target *target, const uint8_t *buffer, int count, int size)
536 {
537 struct xscale_common *xscale = target_to_xscale(target);
538 uint32_t t[3];
539 int bits[3];
540 int retval;
541 int done_count = 0;
542
543 xscale_jtag_set_instr(target->tap,
544 XSCALE_DBGRX << xscale->xscale_variant,
545 TAP_IDLE);
546
547 bits[0]=3;
548 t[0]=0;
549 bits[1]=32;
550 t[2]=1;
551 bits[2]=1;
552 int endianness = target->endianness;
553 while (done_count++ < count)
554 {
555 switch (size)
556 {
557 case 4:
558 if (endianness == TARGET_LITTLE_ENDIAN)
559 {
560 t[1]=le_to_h_u32(buffer);
561 } else
562 {
563 t[1]=be_to_h_u32(buffer);
564 }
565 break;
566 case 2:
567 if (endianness == TARGET_LITTLE_ENDIAN)
568 {
569 t[1]=le_to_h_u16(buffer);
570 } else
571 {
572 t[1]=be_to_h_u16(buffer);
573 }
574 break;
575 case 1:
576 t[1]=buffer[0];
577 break;
578 default:
579 LOG_ERROR("BUG: size neither 4, 2 nor 1");
580 return ERROR_INVALID_ARGUMENTS;
581 }
582 jtag_add_dr_out(target->tap,
583 3,
584 bits,
585 t,
586 TAP_IDLE);
587 buffer += size;
588 }
589
590 if ((retval = jtag_execute_queue()) != ERROR_OK)
591 {
592 LOG_ERROR("JTAG error while sending data to debug handler");
593 return retval;
594 }
595
596 return ERROR_OK;
597 }
598
599 static int xscale_send_u32(struct target *target, uint32_t value)
600 {
601 struct xscale_common *xscale = target_to_xscale(target);
602
603 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
604 return xscale_write_rx(target);
605 }
606
607 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
608 {
609 struct xscale_common *xscale = target_to_xscale(target);
610 int retval;
611 struct scan_field fields[3];
612 uint8_t field0 = 0x0;
613 uint8_t field0_check_value = 0x2;
614 uint8_t field0_check_mask = 0x7;
615 uint8_t field2 = 0x0;
616 uint8_t field2_check_value = 0x0;
617 uint8_t field2_check_mask = 0x1;
618
619 if (hold_rst != -1)
620 xscale->hold_rst = hold_rst;
621
622 if (ext_dbg_brk != -1)
623 xscale->external_debug_break = ext_dbg_brk;
624
625 xscale_jtag_set_instr(target->tap,
626 XSCALE_SELDCSR << xscale->xscale_variant,
627 TAP_IDLE);
628
629 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
630 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
631
632 memset(&fields, 0, sizeof fields);
633
634 fields[0].num_bits = 3;
635 fields[0].out_value = &field0;
636 uint8_t tmp;
637 fields[0].in_value = &tmp;
638
639 fields[1].num_bits = 32;
640 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
641
642 fields[2].num_bits = 1;
643 fields[2].out_value = &field2;
644 uint8_t tmp2;
645 fields[2].in_value = &tmp2;
646
647 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
648
649 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
650 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
651
652 if ((retval = jtag_execute_queue()) != ERROR_OK)
653 {
654 LOG_ERROR("JTAG error while writing DCSR");
655 return retval;
656 }
657
658 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
659 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
660
661 return ERROR_OK;
662 }
663
664 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
665 static unsigned int parity (unsigned int v)
666 {
667 // unsigned int ov = v;
668 v ^= v >> 16;
669 v ^= v >> 8;
670 v ^= v >> 4;
671 v &= 0xf;
672 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
673 return (0x6996 >> v) & 1;
674 }
675
676 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
677 {
678 struct xscale_common *xscale = target_to_xscale(target);
679 uint8_t packet[4];
680 uint8_t cmd;
681 int word;
682 struct scan_field fields[2];
683
684 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
685
686 /* LDIC into IR */
687 xscale_jtag_set_instr(target->tap,
688 XSCALE_LDIC << xscale->xscale_variant,
689 TAP_IDLE);
690
691 /* CMD is b011 to load a cacheline into the Mini ICache.
692 * Loading into the main ICache is deprecated, and unused.
693 * It's followed by three zero bits, and 27 address bits.
694 */
695 buf_set_u32(&cmd, 0, 6, 0x3);
696
697 /* virtual address of desired cache line */
698 buf_set_u32(packet, 0, 27, va >> 5);
699
700 memset(&fields, 0, sizeof fields);
701
702 fields[0].num_bits = 6;
703 fields[0].out_value = &cmd;
704
705 fields[1].num_bits = 27;
706 fields[1].out_value = packet;
707
708 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
709
710 /* rest of packet is a cacheline: 8 instructions, with parity */
711 fields[0].num_bits = 32;
712 fields[0].out_value = packet;
713
714 fields[1].num_bits = 1;
715 fields[1].out_value = &cmd;
716
717 for (word = 0; word < 8; word++)
718 {
719 buf_set_u32(packet, 0, 32, buffer[word]);
720
721 uint32_t value;
722 memcpy(&value, packet, sizeof(uint32_t));
723 cmd = parity(value);
724
725 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
726 }
727
728 return jtag_execute_queue();
729 }
730
731 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
732 {
733 struct xscale_common *xscale = target_to_xscale(target);
734 uint8_t packet[4];
735 uint8_t cmd;
736 struct scan_field fields[2];
737
738 xscale_jtag_set_instr(target->tap,
739 XSCALE_LDIC << xscale->xscale_variant,
740 TAP_IDLE);
741
742 /* CMD for invalidate IC line b000, bits [6:4] b000 */
743 buf_set_u32(&cmd, 0, 6, 0x0);
744
745 /* virtual address of desired cache line */
746 buf_set_u32(packet, 0, 27, va >> 5);
747
748 memset(&fields, 0, sizeof fields);
749
750 fields[0].num_bits = 6;
751 fields[0].out_value = &cmd;
752
753 fields[1].num_bits = 27;
754 fields[1].out_value = packet;
755
756 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
757
758 return ERROR_OK;
759 }
760
761 static int xscale_update_vectors(struct target *target)
762 {
763 struct xscale_common *xscale = target_to_xscale(target);
764 int i;
765 int retval;
766
767 uint32_t low_reset_branch, high_reset_branch;
768
769 for (i = 1; i < 8; i++)
770 {
771 /* if there's a static vector specified for this exception, override */
772 if (xscale->static_high_vectors_set & (1 << i))
773 {
774 xscale->high_vectors[i] = xscale->static_high_vectors[i];
775 }
776 else
777 {
778 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
779 if (retval == ERROR_TARGET_TIMEOUT)
780 return retval;
781 if (retval != ERROR_OK)
782 {
783 /* Some of these reads will fail as part of normal execution */
784 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
785 }
786 }
787 }
788
789 for (i = 1; i < 8; i++)
790 {
791 if (xscale->static_low_vectors_set & (1 << i))
792 {
793 xscale->low_vectors[i] = xscale->static_low_vectors[i];
794 }
795 else
796 {
797 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
798 if (retval == ERROR_TARGET_TIMEOUT)
799 return retval;
800 if (retval != ERROR_OK)
801 {
802 /* Some of these reads will fail as part of normal execution */
803 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
804 }
805 }
806 }
807
808 /* calculate branches to debug handler */
809 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
810 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
811
812 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
813 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
814
815 /* invalidate and load exception vectors in mini i-cache */
816 xscale_invalidate_ic_line(target, 0x0);
817 xscale_invalidate_ic_line(target, 0xffff0000);
818
819 xscale_load_ic(target, 0x0, xscale->low_vectors);
820 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
821
822 return ERROR_OK;
823 }
824
825 static int xscale_arch_state(struct target *target)
826 {
827 struct xscale_common *xscale = target_to_xscale(target);
828 struct arm *armv4_5 = &xscale->armv4_5_common;
829
830 static const char *state[] =
831 {
832 "disabled", "enabled"
833 };
834
835 static const char *arch_dbg_reason[] =
836 {
837 "", "\n(processor reset)", "\n(trace buffer full)"
838 };
839
840 if (armv4_5->common_magic != ARM_COMMON_MAGIC)
841 {
842 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
843 return ERROR_INVALID_ARGUMENTS;
844 }
845
846 arm_arch_state(target);
847 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
848 state[xscale->armv4_5_mmu.mmu_enabled],
849 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
850 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
851 arch_dbg_reason[xscale->arch_debug_reason]);
852
853 return ERROR_OK;
854 }
855
856 static int xscale_poll(struct target *target)
857 {
858 int retval = ERROR_OK;
859
860 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
861 {
862 enum target_state previous_state = target->state;
863 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
864 {
865
866 /* there's data to read from the tx register, we entered debug state */
867 target->state = TARGET_HALTED;
868
869 /* process debug entry, fetching current mode regs */
870 retval = xscale_debug_entry(target);
871 }
872 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
873 {
874 LOG_USER("error while polling TX register, reset CPU");
875 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
876 target->state = TARGET_HALTED;
877 }
878
879 /* debug_entry could have overwritten target state (i.e. immediate resume)
880 * don't signal event handlers in that case
881 */
882 if (target->state != TARGET_HALTED)
883 return ERROR_OK;
884
885 /* if target was running, signal that we halted
886 * otherwise we reentered from debug execution */
887 if (previous_state == TARGET_RUNNING)
888 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
889 else
890 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
891 }
892
893 return retval;
894 }
895
896 static int xscale_debug_entry(struct target *target)
897 {
898 struct xscale_common *xscale = target_to_xscale(target);
899 struct arm *armv4_5 = &xscale->armv4_5_common;
900 uint32_t pc;
901 uint32_t buffer[10];
902 unsigned i;
903 int retval;
904 uint32_t moe;
905
906 /* clear external dbg break (will be written on next DCSR read) */
907 xscale->external_debug_break = 0;
908 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
909 return retval;
910
911 /* get r0, pc, r1 to r7 and cpsr */
912 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
913 return retval;
914
915 /* move r0 from buffer to register cache */
916 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
917 armv4_5->core_cache->reg_list[0].dirty = 1;
918 armv4_5->core_cache->reg_list[0].valid = 1;
919 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
920
921 /* move pc from buffer to register cache */
922 buf_set_u32(armv4_5->pc->value, 0, 32, buffer[1]);
923 armv4_5->pc->dirty = 1;
924 armv4_5->pc->valid = 1;
925 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
926
927 /* move data from buffer to register cache */
928 for (i = 1; i <= 7; i++)
929 {
930 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
931 armv4_5->core_cache->reg_list[i].dirty = 1;
932 armv4_5->core_cache->reg_list[i].valid = 1;
933 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
934 }
935
936 arm_set_cpsr(armv4_5, buffer[9]);
937 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
938
939 if (!is_arm_mode(armv4_5->core_mode))
940 {
941 target->state = TARGET_UNKNOWN;
942 LOG_ERROR("cpsr contains invalid mode value - communication failure");
943 return ERROR_TARGET_FAILURE;
944 }
945 LOG_DEBUG("target entered debug state in %s mode",
946 arm_mode_name(armv4_5->core_mode));
947
948 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
949 if (armv4_5->spsr) {
950 xscale_receive(target, buffer, 8);
951 buf_set_u32(armv4_5->spsr->value, 0, 32, buffer[7]);
952 armv4_5->spsr->dirty = false;
953 armv4_5->spsr->valid = true;
954 }
955 else
956 {
957 /* r8 to r14, but no spsr */
958 xscale_receive(target, buffer, 7);
959 }
960
961 /* move data from buffer to right banked register in cache */
962 for (i = 8; i <= 14; i++)
963 {
964 struct reg *r = arm_reg_current(armv4_5, i);
965
966 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
967 r->dirty = false;
968 r->valid = true;
969 }
970
971 /* mark xscale regs invalid to ensure they are retrieved from the
972 * debug handler if requested */
973 for (i = 0; i < xscale->reg_cache->num_regs; i++)
974 xscale->reg_cache->reg_list[i].valid = 0;
975
976 /* examine debug reason */
977 xscale_read_dcsr(target);
978 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
979
980 /* stored PC (for calculating fixup) */
981 pc = buf_get_u32(armv4_5->pc->value, 0, 32);
982
983 switch (moe)
984 {
985 case 0x0: /* Processor reset */
986 target->debug_reason = DBG_REASON_DBGRQ;
987 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
988 pc -= 4;
989 break;
990 case 0x1: /* Instruction breakpoint hit */
991 target->debug_reason = DBG_REASON_BREAKPOINT;
992 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
993 pc -= 4;
994 break;
995 case 0x2: /* Data breakpoint hit */
996 target->debug_reason = DBG_REASON_WATCHPOINT;
997 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
998 pc -= 4;
999 break;
1000 case 0x3: /* BKPT instruction executed */
1001 target->debug_reason = DBG_REASON_BREAKPOINT;
1002 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1003 pc -= 4;
1004 break;
1005 case 0x4: /* Ext. debug event */
1006 target->debug_reason = DBG_REASON_DBGRQ;
1007 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1008 pc -= 4;
1009 break;
1010 case 0x5: /* Vector trap occured */
1011 target->debug_reason = DBG_REASON_BREAKPOINT;
1012 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1013 pc -= 4;
1014 break;
1015 case 0x6: /* Trace buffer full break */
1016 target->debug_reason = DBG_REASON_DBGRQ;
1017 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1018 pc -= 4;
1019 break;
1020 case 0x7: /* Reserved (may flag Hot-Debug support) */
1021 default:
1022 LOG_ERROR("Method of Entry is 'Reserved'");
1023 exit(-1);
1024 break;
1025 }
1026
1027 /* apply PC fixup */
1028 buf_set_u32(armv4_5->pc->value, 0, 32, pc);
1029
1030 /* on the first debug entry, identify cache type */
1031 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1032 {
1033 uint32_t cache_type_reg;
1034
1035 /* read cp15 cache type register */
1036 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1037 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1038
1039 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1040 }
1041
1042 /* examine MMU and Cache settings */
1043 /* read cp15 control register */
1044 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1045 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1046 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1047 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1048 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1049
1050 /* tracing enabled, read collected trace data */
1051 if (xscale->trace.mode != XSCALE_TRACE_DISABLED)
1052 {
1053 xscale_read_trace(target);
1054
1055 /* Resume if entered debug due to buffer fill and we're still collecting
1056 * trace data. Note that a debug exception due to trace buffer full
1057 * can only happen in fill mode. */
1058 if (xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1059 {
1060 if (--xscale->trace.fill_counter > 0)
1061 xscale_resume(target, 1, 0x0, 1, 0);
1062 }
1063 else /* entered debug for other reason; reset counter */
1064 xscale->trace.fill_counter = 0;
1065 }
1066
1067 return ERROR_OK;
1068 }
1069
1070 static int xscale_halt(struct target *target)
1071 {
1072 struct xscale_common *xscale = target_to_xscale(target);
1073
1074 LOG_DEBUG("target->state: %s",
1075 target_state_name(target));
1076
1077 if (target->state == TARGET_HALTED)
1078 {
1079 LOG_DEBUG("target was already halted");
1080 return ERROR_OK;
1081 }
1082 else if (target->state == TARGET_UNKNOWN)
1083 {
1084 /* this must not happen for a xscale target */
1085 LOG_ERROR("target was in unknown state when halt was requested");
1086 return ERROR_TARGET_INVALID;
1087 }
1088 else if (target->state == TARGET_RESET)
1089 {
1090 LOG_DEBUG("target->state == TARGET_RESET");
1091 }
1092 else
1093 {
1094 /* assert external dbg break */
1095 xscale->external_debug_break = 1;
1096 xscale_read_dcsr(target);
1097
1098 target->debug_reason = DBG_REASON_DBGRQ;
1099 }
1100
1101 return ERROR_OK;
1102 }
1103
1104 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1105 {
1106 struct xscale_common *xscale = target_to_xscale(target);
1107 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1108 int retval;
1109
1110 if (xscale->ibcr0_used)
1111 {
1112 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1113
1114 if (ibcr0_bp)
1115 {
1116 xscale_unset_breakpoint(target, ibcr0_bp);
1117 }
1118 else
1119 {
1120 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1121 exit(-1);
1122 }
1123 }
1124
1125 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1126 return retval;
1127
1128 return ERROR_OK;
1129 }
1130
1131 static int xscale_disable_single_step(struct target *target)
1132 {
1133 struct xscale_common *xscale = target_to_xscale(target);
1134 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1135 int retval;
1136
1137 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1138 return retval;
1139
1140 return ERROR_OK;
1141 }
1142
1143 static void xscale_enable_watchpoints(struct target *target)
1144 {
1145 struct watchpoint *watchpoint = target->watchpoints;
1146
1147 while (watchpoint)
1148 {
1149 if (watchpoint->set == 0)
1150 xscale_set_watchpoint(target, watchpoint);
1151 watchpoint = watchpoint->next;
1152 }
1153 }
1154
1155 static void xscale_enable_breakpoints(struct target *target)
1156 {
1157 struct breakpoint *breakpoint = target->breakpoints;
1158
1159 /* set any pending breakpoints */
1160 while (breakpoint)
1161 {
1162 if (breakpoint->set == 0)
1163 xscale_set_breakpoint(target, breakpoint);
1164 breakpoint = breakpoint->next;
1165 }
1166 }
1167
1168 static void xscale_free_trace_data(struct xscale_common *xscale)
1169 {
1170 struct xscale_trace_data *td = xscale->trace.data;
1171 while (td)
1172 {
1173 struct xscale_trace_data *next_td = td->next;
1174 if (td->entries)
1175 free(td->entries);
1176 free(td);
1177 td = next_td;
1178 }
1179 xscale->trace.data = NULL;
1180 }
1181
1182 static int xscale_resume(struct target *target, int current,
1183 uint32_t address, int handle_breakpoints, int debug_execution)
1184 {
1185 struct xscale_common *xscale = target_to_xscale(target);
1186 struct arm *armv4_5 = &xscale->armv4_5_common;
1187 uint32_t current_pc;
1188 int retval;
1189 int i;
1190
1191 LOG_DEBUG("-");
1192
1193 if (target->state != TARGET_HALTED)
1194 {
1195 LOG_WARNING("target not halted");
1196 return ERROR_TARGET_NOT_HALTED;
1197 }
1198
1199 if (!debug_execution)
1200 {
1201 target_free_all_working_areas(target);
1202 }
1203
1204 /* update vector tables */
1205 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1206 return retval;
1207
1208 /* current = 1: continue on current pc, otherwise continue at <address> */
1209 if (!current)
1210 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1211
1212 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1213
1214 /* if we're at the reset vector, we have to simulate the branch */
1215 if (current_pc == 0x0)
1216 {
1217 arm_simulate_step(target, NULL);
1218 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1219 }
1220
1221 /* the front-end may request us not to handle breakpoints */
1222 if (handle_breakpoints)
1223 {
1224 struct breakpoint *breakpoint;
1225 breakpoint = breakpoint_find(target,
1226 buf_get_u32(armv4_5->pc->value, 0, 32));
1227 if (breakpoint != NULL)
1228 {
1229 uint32_t next_pc;
1230 enum trace_mode saved_trace_mode;
1231
1232 /* there's a breakpoint at the current PC, we have to step over it */
1233 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1234 xscale_unset_breakpoint(target, breakpoint);
1235
1236 /* calculate PC of next instruction */
1237 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1238 {
1239 uint32_t current_opcode;
1240 target_read_u32(target, current_pc, &current_opcode);
1241 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1242 }
1243
1244 LOG_DEBUG("enable single-step");
1245 xscale_enable_single_step(target, next_pc);
1246
1247 /* restore banked registers */
1248 retval = xscale_restore_banked(target);
1249 if (retval != ERROR_OK)
1250 return retval;
1251
1252 /* send resume request */
1253 xscale_send_u32(target, 0x30);
1254
1255 /* send CPSR */
1256 xscale_send_u32(target,
1257 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1258 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1259 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1260
1261 for (i = 7; i >= 0; i--)
1262 {
1263 /* send register */
1264 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1265 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1266 }
1267
1268 /* send PC */
1269 xscale_send_u32(target,
1270 buf_get_u32(armv4_5->pc->value, 0, 32));
1271 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
1272 buf_get_u32(armv4_5->pc->value, 0, 32));
1273
1274 /* disable trace data collection in xscale_debug_entry() */
1275 saved_trace_mode = xscale->trace.mode;
1276 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1277
1278 /* wait for and process debug entry */
1279 xscale_debug_entry(target);
1280
1281 /* re-enable trace buffer, if enabled previously */
1282 xscale->trace.mode = saved_trace_mode;
1283
1284 LOG_DEBUG("disable single-step");
1285 xscale_disable_single_step(target);
1286
1287 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1288 xscale_set_breakpoint(target, breakpoint);
1289 }
1290 }
1291
1292 /* enable any pending breakpoints and watchpoints */
1293 xscale_enable_breakpoints(target);
1294 xscale_enable_watchpoints(target);
1295
1296 /* restore banked registers */
1297 retval = xscale_restore_banked(target);
1298 if (retval != ERROR_OK)
1299 return retval;
1300
1301 /* send resume request (command 0x30 or 0x31)
1302 * clean the trace buffer if it is to be enabled (0x62) */
1303 if (xscale->trace.mode != XSCALE_TRACE_DISABLED)
1304 {
1305 if (xscale->trace.mode == XSCALE_TRACE_FILL)
1306 {
1307 /* If trace enabled in fill mode and starting collection of new set
1308 * of buffers, initialize buffer counter and free previous buffers */
1309 if (xscale->trace.fill_counter == 0)
1310 {
1311 xscale->trace.fill_counter = xscale->trace.buffer_fill;
1312 xscale_free_trace_data(xscale);
1313 }
1314 }
1315 else /* wrap mode; free previous buffer */
1316 xscale_free_trace_data(xscale);
1317
1318 xscale_send_u32(target, 0x62);
1319 xscale_send_u32(target, 0x31);
1320 }
1321 else
1322 xscale_send_u32(target, 0x30);
1323
1324 /* send CPSR */
1325 xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
1326 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1327 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1328
1329 for (i = 7; i >= 0; i--)
1330 {
1331 /* send register */
1332 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1333 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1334 }
1335
1336 /* send PC */
1337 xscale_send_u32(target, buf_get_u32(armv4_5->pc->value, 0, 32));
1338 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1339 buf_get_u32(armv4_5->pc->value, 0, 32));
1340
1341 target->debug_reason = DBG_REASON_NOTHALTED;
1342
1343 if (!debug_execution)
1344 {
1345 /* registers are now invalid */
1346 register_cache_invalidate(armv4_5->core_cache);
1347 target->state = TARGET_RUNNING;
1348 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1349 }
1350 else
1351 {
1352 target->state = TARGET_DEBUG_RUNNING;
1353 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1354 }
1355
1356 LOG_DEBUG("target resumed");
1357
1358 return ERROR_OK;
1359 }
1360
1361 static int xscale_step_inner(struct target *target, int current,
1362 uint32_t address, int handle_breakpoints)
1363 {
1364 struct xscale_common *xscale = target_to_xscale(target);
1365 struct arm *armv4_5 = &xscale->armv4_5_common;
1366 uint32_t next_pc;
1367 int retval;
1368 int i;
1369
1370 target->debug_reason = DBG_REASON_SINGLESTEP;
1371
1372 /* calculate PC of next instruction */
1373 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1374 {
1375 uint32_t current_opcode, current_pc;
1376 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1377
1378 target_read_u32(target, current_pc, &current_opcode);
1379 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1380 return retval;
1381 }
1382
1383 LOG_DEBUG("enable single-step");
1384 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1385 return retval;
1386
1387 /* restore banked registers */
1388 if ((retval = xscale_restore_banked(target)) != ERROR_OK)
1389 return retval;
1390
1391 /* send resume request (command 0x30 or 0x31)
1392 * clean the trace buffer if it is to be enabled (0x62) */
1393 if (xscale->trace.mode != XSCALE_TRACE_DISABLED)
1394 {
1395 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1396 return retval;
1397 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1398 return retval;
1399 }
1400 else
1401 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1402 return retval;
1403
1404 /* send CPSR */
1405 retval = xscale_send_u32(target,
1406 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1407 if (retval != ERROR_OK)
1408 return retval;
1409 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1410 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1411
1412 for (i = 7; i >= 0; i--)
1413 {
1414 /* send register */
1415 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1416 return retval;
1417 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1418 }
1419
1420 /* send PC */
1421 retval = xscale_send_u32(target,
1422 buf_get_u32(armv4_5->pc->value, 0, 32));
1423 if (retval != ERROR_OK)
1424 return retval;
1425 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1426 buf_get_u32(armv4_5->pc->value, 0, 32));
1427
1428 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1429
1430 /* registers are now invalid */
1431 register_cache_invalidate(armv4_5->core_cache);
1432
1433 /* wait for and process debug entry */
1434 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1435 return retval;
1436
1437 LOG_DEBUG("disable single-step");
1438 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1439 return retval;
1440
1441 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1442
1443 return ERROR_OK;
1444 }
1445
1446 static int xscale_step(struct target *target, int current,
1447 uint32_t address, int handle_breakpoints)
1448 {
1449 struct arm *armv4_5 = target_to_arm(target);
1450 struct breakpoint *breakpoint = NULL;
1451
1452 uint32_t current_pc;
1453 int retval;
1454
1455 if (target->state != TARGET_HALTED)
1456 {
1457 LOG_WARNING("target not halted");
1458 return ERROR_TARGET_NOT_HALTED;
1459 }
1460
1461 /* current = 1: continue on current pc, otherwise continue at <address> */
1462 if (!current)
1463 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1464
1465 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1466
1467 /* if we're at the reset vector, we have to simulate the step */
1468 if (current_pc == 0x0)
1469 {
1470 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1471 return retval;
1472 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1473 LOG_DEBUG("current pc %" PRIx32, current_pc);
1474
1475 target->debug_reason = DBG_REASON_SINGLESTEP;
1476 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1477
1478 return ERROR_OK;
1479 }
1480
1481 /* the front-end may request us not to handle breakpoints */
1482 if (handle_breakpoints)
1483 breakpoint = breakpoint_find(target,
1484 buf_get_u32(armv4_5->pc->value, 0, 32));
1485 if (breakpoint != NULL) {
1486 retval = xscale_unset_breakpoint(target, breakpoint);
1487 if (retval != ERROR_OK)
1488 return retval;
1489 }
1490
1491 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1492 if (retval != ERROR_OK)
1493 return retval;
1494
1495 if (breakpoint)
1496 {
1497 xscale_set_breakpoint(target, breakpoint);
1498 }
1499
1500 LOG_DEBUG("target stepped");
1501
1502 return ERROR_OK;
1503
1504 }
1505
1506 static int xscale_assert_reset(struct target *target)
1507 {
1508 struct xscale_common *xscale = target_to_xscale(target);
1509
1510 LOG_DEBUG("target->state: %s",
1511 target_state_name(target));
1512
1513 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1514 * end up in T-L-R, which would reset JTAG
1515 */
1516 xscale_jtag_set_instr(target->tap,
1517 XSCALE_SELDCSR << xscale->xscale_variant,
1518 TAP_IDLE);
1519
1520 /* set Hold reset, Halt mode and Trap Reset */
1521 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1522 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1523 xscale_write_dcsr(target, 1, 0);
1524
1525 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1526 xscale_jtag_set_instr(target->tap, ~0, TAP_IDLE);
1527 jtag_execute_queue();
1528
1529 /* assert reset */
1530 jtag_add_reset(0, 1);
1531
1532 /* sleep 1ms, to be sure we fulfill any requirements */
1533 jtag_add_sleep(1000);
1534 jtag_execute_queue();
1535
1536 target->state = TARGET_RESET;
1537
1538 if (target->reset_halt)
1539 {
1540 int retval;
1541 if ((retval = target_halt(target)) != ERROR_OK)
1542 return retval;
1543 }
1544
1545 return ERROR_OK;
1546 }
1547
1548 static int xscale_deassert_reset(struct target *target)
1549 {
1550 struct xscale_common *xscale = target_to_xscale(target);
1551 struct breakpoint *breakpoint = target->breakpoints;
1552
1553 LOG_DEBUG("-");
1554
1555 xscale->ibcr_available = 2;
1556 xscale->ibcr0_used = 0;
1557 xscale->ibcr1_used = 0;
1558
1559 xscale->dbr_available = 2;
1560 xscale->dbr0_used = 0;
1561 xscale->dbr1_used = 0;
1562
1563 /* mark all hardware breakpoints as unset */
1564 while (breakpoint)
1565 {
1566 if (breakpoint->type == BKPT_HARD)
1567 {
1568 breakpoint->set = 0;
1569 }
1570 breakpoint = breakpoint->next;
1571 }
1572
1573 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1574 xscale_free_trace_data(xscale);
1575
1576 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1577
1578 /* FIXME mark hardware watchpoints got unset too. Also,
1579 * at least some of the XScale registers are invalid...
1580 */
1581
1582 /*
1583 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1584 * contents got invalidated. Safer to force that, so writing new
1585 * contents can't ever fail..
1586 */
1587 {
1588 uint32_t address;
1589 unsigned buf_cnt;
1590 const uint8_t *buffer = xscale_debug_handler;
1591 int retval;
1592
1593 /* release SRST */
1594 jtag_add_reset(0, 0);
1595
1596 /* wait 300ms; 150 and 100ms were not enough */
1597 jtag_add_sleep(300*1000);
1598
1599 jtag_add_runtest(2030, TAP_IDLE);
1600 jtag_execute_queue();
1601
1602 /* set Hold reset, Halt mode and Trap Reset */
1603 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1604 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1605 xscale_write_dcsr(target, 1, 0);
1606
1607 /* Load the debug handler into the mini-icache. Since
1608 * it's using halt mode (not monitor mode), it runs in
1609 * "Special Debug State" for access to registers, memory,
1610 * coprocessors, trace data, etc.
1611 */
1612 address = xscale->handler_address;
1613 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1614 binary_size > 0;
1615 binary_size -= buf_cnt, buffer += buf_cnt)
1616 {
1617 uint32_t cache_line[8];
1618 unsigned i;
1619
1620 buf_cnt = binary_size;
1621 if (buf_cnt > 32)
1622 buf_cnt = 32;
1623
1624 for (i = 0; i < buf_cnt; i += 4)
1625 {
1626 /* convert LE buffer to host-endian uint32_t */
1627 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1628 }
1629
1630 for (; i < 32; i += 4)
1631 {
1632 cache_line[i / 4] = 0xe1a08008;
1633 }
1634
1635 /* only load addresses other than the reset vectors */
1636 if ((address % 0x400) != 0x0)
1637 {
1638 retval = xscale_load_ic(target, address,
1639 cache_line);
1640 if (retval != ERROR_OK)
1641 return retval;
1642 }
1643
1644 address += buf_cnt;
1645 };
1646
1647 retval = xscale_load_ic(target, 0x0,
1648 xscale->low_vectors);
1649 if (retval != ERROR_OK)
1650 return retval;
1651 retval = xscale_load_ic(target, 0xffff0000,
1652 xscale->high_vectors);
1653 if (retval != ERROR_OK)
1654 return retval;
1655
1656 jtag_add_runtest(30, TAP_IDLE);
1657
1658 jtag_add_sleep(100000);
1659
1660 /* set Hold reset, Halt mode and Trap Reset */
1661 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1662 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1663 xscale_write_dcsr(target, 1, 0);
1664
1665 /* clear Hold reset to let the target run (should enter debug handler) */
1666 xscale_write_dcsr(target, 0, 1);
1667 target->state = TARGET_RUNNING;
1668
1669 if (!target->reset_halt)
1670 {
1671 jtag_add_sleep(10000);
1672
1673 /* we should have entered debug now */
1674 xscale_debug_entry(target);
1675 target->state = TARGET_HALTED;
1676
1677 /* resume the target */
1678 xscale_resume(target, 1, 0x0, 1, 0);
1679 }
1680 }
1681
1682 return ERROR_OK;
1683 }
1684
1685 static int xscale_read_core_reg(struct target *target, struct reg *r,
1686 int num, enum arm_mode mode)
1687 {
1688 /** \todo add debug handler support for core register reads */
1689 LOG_ERROR("not implemented");
1690 return ERROR_OK;
1691 }
1692
1693 static int xscale_write_core_reg(struct target *target, struct reg *r,
1694 int num, enum arm_mode mode, uint32_t value)
1695 {
1696 /** \todo add debug handler support for core register writes */
1697 LOG_ERROR("not implemented");
1698 return ERROR_OK;
1699 }
1700
1701 static int xscale_full_context(struct target *target)
1702 {
1703 struct arm *armv4_5 = target_to_arm(target);
1704
1705 uint32_t *buffer;
1706
1707 int i, j;
1708
1709 LOG_DEBUG("-");
1710
1711 if (target->state != TARGET_HALTED)
1712 {
1713 LOG_WARNING("target not halted");
1714 return ERROR_TARGET_NOT_HALTED;
1715 }
1716
1717 buffer = malloc(4 * 8);
1718
1719 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1720 * we can't enter User mode on an XScale (unpredictable),
1721 * but User shares registers with SYS
1722 */
1723 for (i = 1; i < 7; i++)
1724 {
1725 enum arm_mode mode = armv4_5_number_to_mode(i);
1726 bool valid = true;
1727 struct reg *r;
1728
1729 if (mode == ARM_MODE_USR)
1730 continue;
1731
1732 /* check if there are invalid registers in the current mode
1733 */
1734 for (j = 0; valid && j <= 16; j++)
1735 {
1736 if (!ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1737 mode, j).valid)
1738 valid = false;
1739 }
1740 if (valid)
1741 continue;
1742
1743 /* request banked registers */
1744 xscale_send_u32(target, 0x0);
1745
1746 /* send CPSR for desired bank mode */
1747 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1748
1749 /* get banked registers: r8 to r14; and SPSR
1750 * except in USR/SYS mode
1751 */
1752 if (mode != ARM_MODE_SYS) {
1753 /* SPSR */
1754 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1755 mode, 16);
1756
1757 xscale_receive(target, buffer, 8);
1758
1759 buf_set_u32(r->value, 0, 32, buffer[7]);
1760 r->dirty = false;
1761 r->valid = true;
1762 } else {
1763 xscale_receive(target, buffer, 7);
1764 }
1765
1766 /* move data from buffer to register cache */
1767 for (j = 8; j <= 14; j++)
1768 {
1769 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1770 mode, j);
1771
1772 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1773 r->dirty = false;
1774 r->valid = true;
1775 }
1776 }
1777
1778 free(buffer);
1779
1780 return ERROR_OK;
1781 }
1782
1783 static int xscale_restore_banked(struct target *target)
1784 {
1785 struct arm *armv4_5 = target_to_arm(target);
1786
1787 int i, j;
1788
1789 if (target->state != TARGET_HALTED)
1790 {
1791 LOG_WARNING("target not halted");
1792 return ERROR_TARGET_NOT_HALTED;
1793 }
1794
1795 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1796 * and check if any banked registers need to be written. Ignore
1797 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1798 * an XScale (unpredictable), but they share all registers.
1799 */
1800 for (i = 1; i < 7; i++)
1801 {
1802 enum arm_mode mode = armv4_5_number_to_mode(i);
1803 struct reg *r;
1804
1805 if (mode == ARM_MODE_USR)
1806 continue;
1807
1808 /* check if there are dirty registers in this mode */
1809 for (j = 8; j <= 14; j++)
1810 {
1811 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1812 mode, j).dirty)
1813 goto dirty;
1814 }
1815
1816 /* if not USR/SYS, check if the SPSR needs to be written */
1817 if (mode != ARM_MODE_SYS)
1818 {
1819 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1820 mode, 16).dirty)
1821 goto dirty;
1822 }
1823
1824 /* there's nothing to flush for this mode */
1825 continue;
1826
1827 dirty:
1828 /* command 0x1: "send banked registers" */
1829 xscale_send_u32(target, 0x1);
1830
1831 /* send CPSR for desired mode */
1832 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1833
1834 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1835 * but this protocol doesn't understand that nuance.
1836 */
1837 for (j = 8; j <= 14; j++) {
1838 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1839 mode, j);
1840 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1841 r->dirty = false;
1842 }
1843
1844 /* send spsr if not in USR/SYS mode */
1845 if (mode != ARM_MODE_SYS) {
1846 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1847 mode, 16);
1848 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1849 r->dirty = false;
1850 }
1851 }
1852
1853 return ERROR_OK;
1854 }
1855
1856 static int xscale_read_memory(struct target *target, uint32_t address,
1857 uint32_t size, uint32_t count, uint8_t *buffer)
1858 {
1859 struct xscale_common *xscale = target_to_xscale(target);
1860 uint32_t *buf32;
1861 uint32_t i;
1862 int retval;
1863
1864 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1865
1866 if (target->state != TARGET_HALTED)
1867 {
1868 LOG_WARNING("target not halted");
1869 return ERROR_TARGET_NOT_HALTED;
1870 }
1871
1872 /* sanitize arguments */
1873 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1874 return ERROR_INVALID_ARGUMENTS;
1875
1876 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1877 return ERROR_TARGET_UNALIGNED_ACCESS;
1878
1879 /* send memory read request (command 0x1n, n: access size) */
1880 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1881 return retval;
1882
1883 /* send base address for read request */
1884 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1885 return retval;
1886
1887 /* send number of requested data words */
1888 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1889 return retval;
1890
1891 /* receive data from target (count times 32-bit words in host endianness) */
1892 buf32 = malloc(4 * count);
1893 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1894 return retval;
1895
1896 /* extract data from host-endian buffer into byte stream */
1897 for (i = 0; i < count; i++)
1898 {
1899 switch (size)
1900 {
1901 case 4:
1902 target_buffer_set_u32(target, buffer, buf32[i]);
1903 buffer += 4;
1904 break;
1905 case 2:
1906 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1907 buffer += 2;
1908 break;
1909 case 1:
1910 *buffer++ = buf32[i] & 0xff;
1911 break;
1912 default:
1913 LOG_ERROR("invalid read size");
1914 return ERROR_INVALID_ARGUMENTS;
1915 }
1916 }
1917
1918 free(buf32);
1919
1920 /* examine DCSR, to see if Sticky Abort (SA) got set */
1921 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1922 return retval;
1923 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1924 {
1925 /* clear SA bit */
1926 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1927 return retval;
1928
1929 return ERROR_TARGET_DATA_ABORT;
1930 }
1931
1932 return ERROR_OK;
1933 }
1934
1935 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1936 uint32_t size, uint32_t count, uint8_t *buffer)
1937 {
1938 struct xscale_common *xscale = target_to_xscale(target);
1939
1940 /* with MMU inactive, there are only physical addresses */
1941 if (!xscale->armv4_5_mmu.mmu_enabled)
1942 return xscale_read_memory(target, address, size, count, buffer);
1943
1944 /** \todo: provide a non-stub implementation of this routine. */
1945 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1946 target_name(target), __func__);
1947 return ERROR_FAIL;
1948 }
1949
1950 static int xscale_write_memory(struct target *target, uint32_t address,
1951 uint32_t size, uint32_t count, const uint8_t *buffer)
1952 {
1953 struct xscale_common *xscale = target_to_xscale(target);
1954 int retval;
1955
1956 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1957
1958 if (target->state != TARGET_HALTED)
1959 {
1960 LOG_WARNING("target not halted");
1961 return ERROR_TARGET_NOT_HALTED;
1962 }
1963
1964 /* sanitize arguments */
1965 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1966 return ERROR_INVALID_ARGUMENTS;
1967
1968 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1969 return ERROR_TARGET_UNALIGNED_ACCESS;
1970
1971 /* send memory write request (command 0x2n, n: access size) */
1972 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1973 return retval;
1974
1975 /* send base address for read request */
1976 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1977 return retval;
1978
1979 /* send number of requested data words to be written*/
1980 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1981 return retval;
1982
1983 /* extract data from host-endian buffer into byte stream */
1984 #if 0
1985 for (i = 0; i < count; i++)
1986 {
1987 switch (size)
1988 {
1989 case 4:
1990 value = target_buffer_get_u32(target, buffer);
1991 xscale_send_u32(target, value);
1992 buffer += 4;
1993 break;
1994 case 2:
1995 value = target_buffer_get_u16(target, buffer);
1996 xscale_send_u32(target, value);
1997 buffer += 2;
1998 break;
1999 case 1:
2000 value = *buffer;
2001 xscale_send_u32(target, value);
2002 buffer += 1;
2003 break;
2004 default:
2005 LOG_ERROR("should never get here");
2006 exit(-1);
2007 }
2008 }
2009 #endif
2010 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
2011 return retval;
2012
2013 /* examine DCSR, to see if Sticky Abort (SA) got set */
2014 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
2015 return retval;
2016 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
2017 {
2018 /* clear SA bit */
2019 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
2020 return retval;
2021
2022 LOG_ERROR("data abort writing memory");
2023 return ERROR_TARGET_DATA_ABORT;
2024 }
2025
2026 return ERROR_OK;
2027 }
2028
2029 static int xscale_write_phys_memory(struct target *target, uint32_t address,
2030 uint32_t size, uint32_t count, const uint8_t *buffer)
2031 {
2032 struct xscale_common *xscale = target_to_xscale(target);
2033
2034 /* with MMU inactive, there are only physical addresses */
2035 if (!xscale->armv4_5_mmu.mmu_enabled)
2036 return xscale_write_memory(target, address, size, count, buffer);
2037
2038 /** \todo: provide a non-stub implementation of this routine. */
2039 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
2040 target_name(target), __func__);
2041 return ERROR_FAIL;
2042 }
2043
2044 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
2045 uint32_t count, const uint8_t *buffer)
2046 {
2047 return xscale_write_memory(target, address, 4, count, buffer);
2048 }
2049
2050 static int xscale_get_ttb(struct target *target, uint32_t *result)
2051 {
2052 struct xscale_common *xscale = target_to_xscale(target);
2053 uint32_t ttb;
2054 int retval;
2055
2056 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2057 if (retval != ERROR_OK)
2058 return retval;
2059 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2060
2061 *result = ttb;
2062
2063 return ERROR_OK;
2064 }
2065
2066 static int xscale_disable_mmu_caches(struct target *target, int mmu,
2067 int d_u_cache, int i_cache)
2068 {
2069 struct xscale_common *xscale = target_to_xscale(target);
2070 uint32_t cp15_control;
2071 int retval;
2072
2073 /* read cp15 control register */
2074 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2075 if (retval !=ERROR_OK)
2076 return retval;
2077 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2078
2079 if (mmu)
2080 cp15_control &= ~0x1U;
2081
2082 if (d_u_cache)
2083 {
2084 /* clean DCache */
2085 retval = xscale_send_u32(target, 0x50);
2086 if (retval !=ERROR_OK)
2087 return retval;
2088 retval = xscale_send_u32(target, xscale->cache_clean_address);
2089 if (retval !=ERROR_OK)
2090 return retval;
2091
2092 /* invalidate DCache */
2093 retval = xscale_send_u32(target, 0x51);
2094 if (retval !=ERROR_OK)
2095 return retval;
2096
2097 cp15_control &= ~0x4U;
2098 }
2099
2100 if (i_cache)
2101 {
2102 /* invalidate ICache */
2103 retval = xscale_send_u32(target, 0x52);
2104 if (retval !=ERROR_OK)
2105 return retval;
2106 cp15_control &= ~0x1000U;
2107 }
2108
2109 /* write new cp15 control register */
2110 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2111 if (retval !=ERROR_OK)
2112 return retval;
2113
2114 /* execute cpwait to ensure outstanding operations complete */
2115 retval = xscale_send_u32(target, 0x53);
2116 return retval;
2117 }
2118
2119 static int xscale_enable_mmu_caches(struct target *target, int mmu,
2120 int d_u_cache, int i_cache)
2121 {
2122 struct xscale_common *xscale = target_to_xscale(target);
2123 uint32_t cp15_control;
2124 int retval;
2125
2126 /* read cp15 control register */
2127 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2128 if (retval !=ERROR_OK)
2129 return retval;
2130 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2131
2132 if (mmu)
2133 cp15_control |= 0x1U;
2134
2135 if (d_u_cache)
2136 cp15_control |= 0x4U;
2137
2138 if (i_cache)
2139 cp15_control |= 0x1000U;
2140
2141 /* write new cp15 control register */
2142 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2143 if (retval !=ERROR_OK)
2144 return retval;
2145
2146 /* execute cpwait to ensure outstanding operations complete */
2147 retval = xscale_send_u32(target, 0x53);
2148 return retval;
2149 }
2150
2151 static int xscale_set_breakpoint(struct target *target,
2152 struct breakpoint *breakpoint)
2153 {
2154 int retval;
2155 struct xscale_common *xscale = target_to_xscale(target);
2156
2157 if (target->state != TARGET_HALTED)
2158 {
2159 LOG_WARNING("target not halted");
2160 return ERROR_TARGET_NOT_HALTED;
2161 }
2162
2163 if (breakpoint->set)
2164 {
2165 LOG_WARNING("breakpoint already set");
2166 return ERROR_OK;
2167 }
2168
2169 if (breakpoint->type == BKPT_HARD)
2170 {
2171 uint32_t value = breakpoint->address | 1;
2172 if (!xscale->ibcr0_used)
2173 {
2174 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2175 xscale->ibcr0_used = 1;
2176 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2177 }
2178 else if (!xscale->ibcr1_used)
2179 {
2180 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2181 xscale->ibcr1_used = 1;
2182 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2183 }
2184 else
2185 { /* bug: availability previously verified in xscale_add_breakpoint() */
2186 LOG_ERROR("BUG: no hardware comparator available");
2187 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2188 }
2189 }
2190 else if (breakpoint->type == BKPT_SOFT)
2191 {
2192 if (breakpoint->length == 4)
2193 {
2194 /* keep the original instruction in target endianness */
2195 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2196 {
2197 return retval;
2198 }
2199 /* write the bkpt instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2200 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2201 {
2202 return retval;
2203 }
2204 }
2205 else
2206 {
2207 /* keep the original instruction in target endianness */
2208 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2209 {
2210 return retval;
2211 }
2212 /* write the bkpt instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2213 if ((retval = target_write_u16(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2214 {
2215 return retval;
2216 }
2217 }
2218 breakpoint->set = 1;
2219
2220 xscale_send_u32(target, 0x50); /* clean dcache */
2221 xscale_send_u32(target, xscale->cache_clean_address);
2222 xscale_send_u32(target, 0x51); /* invalidate dcache */
2223 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2224 }
2225
2226 return ERROR_OK;
2227 }
2228
2229 static int xscale_add_breakpoint(struct target *target,
2230 struct breakpoint *breakpoint)
2231 {
2232 struct xscale_common *xscale = target_to_xscale(target);
2233
2234 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2235 {
2236 LOG_ERROR("no breakpoint unit available for hardware breakpoint");
2237 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2238 }
2239
2240 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2241 {
2242 LOG_ERROR("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2243 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2244 }
2245
2246 if (breakpoint->type == BKPT_HARD)
2247 {
2248 xscale->ibcr_available--;
2249 }
2250
2251 return xscale_set_breakpoint(target, breakpoint);
2252 }
2253
2254 static int xscale_unset_breakpoint(struct target *target,
2255 struct breakpoint *breakpoint)
2256 {
2257 int retval;
2258 struct xscale_common *xscale = target_to_xscale(target);
2259
2260 if (target->state != TARGET_HALTED)
2261 {
2262 LOG_WARNING("target not halted");
2263 return ERROR_TARGET_NOT_HALTED;
2264 }
2265
2266 if (!breakpoint->set)
2267 {
2268 LOG_WARNING("breakpoint not set");
2269 return ERROR_OK;
2270 }
2271
2272 if (breakpoint->type == BKPT_HARD)
2273 {
2274 if (breakpoint->set == 1)
2275 {
2276 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2277 xscale->ibcr0_used = 0;
2278 }
2279 else if (breakpoint->set == 2)
2280 {
2281 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2282 xscale->ibcr1_used = 0;
2283 }
2284 breakpoint->set = 0;
2285 }
2286 else
2287 {
2288 /* restore original instruction (kept in target endianness) */
2289 if (breakpoint->length == 4)
2290 {
2291 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2292 {
2293 return retval;
2294 }
2295 }
2296 else
2297 {
2298 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2299 {
2300 return retval;
2301 }
2302 }
2303 breakpoint->set = 0;
2304
2305 xscale_send_u32(target, 0x50); /* clean dcache */
2306 xscale_send_u32(target, xscale->cache_clean_address);
2307 xscale_send_u32(target, 0x51); /* invalidate dcache */
2308 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2309 }
2310
2311 return ERROR_OK;
2312 }
2313
2314 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2315 {
2316 struct xscale_common *xscale = target_to_xscale(target);
2317
2318 if (target->state != TARGET_HALTED)
2319 {
2320 LOG_ERROR("target not halted");
2321 return ERROR_TARGET_NOT_HALTED;
2322 }
2323
2324 if (breakpoint->set)
2325 {
2326 xscale_unset_breakpoint(target, breakpoint);
2327 }
2328
2329 if (breakpoint->type == BKPT_HARD)
2330 xscale->ibcr_available++;
2331
2332 return ERROR_OK;
2333 }
2334
2335 static int xscale_set_watchpoint(struct target *target,
2336 struct watchpoint *watchpoint)
2337 {
2338 struct xscale_common *xscale = target_to_xscale(target);
2339 uint32_t enable = 0;
2340 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2341 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2342
2343 if (target->state != TARGET_HALTED)
2344 {
2345 LOG_ERROR("target not halted");
2346 return ERROR_TARGET_NOT_HALTED;
2347 }
2348
2349 switch (watchpoint->rw)
2350 {
2351 case WPT_READ:
2352 enable = 0x3;
2353 break;
2354 case WPT_ACCESS:
2355 enable = 0x2;
2356 break;
2357 case WPT_WRITE:
2358 enable = 0x1;
2359 break;
2360 default:
2361 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2362 }
2363
2364 /* For watchpoint across more than one word, both DBR registers must
2365 be enlisted, with the second used as a mask. */
2366 if (watchpoint->length > 4)
2367 {
2368 if (xscale->dbr0_used || xscale->dbr1_used)
2369 {
2370 LOG_ERROR("BUG: sufficient hardware comparators unavailable");
2371 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2372 }
2373
2374 /* Write mask value to DBR1, based on the length argument.
2375 * Address bits ignored by the comparator are those set in mask. */
2376 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1],
2377 watchpoint->length - 1);
2378 xscale->dbr1_used = 1;
2379 enable |= 0x100; /* DBCON[M] */
2380 }
2381
2382 if (!xscale->dbr0_used)
2383 {
2384 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2385 dbcon_value |= enable;
2386 xscale_set_reg_u32(dbcon, dbcon_value);
2387 watchpoint->set = 1;
2388 xscale->dbr0_used = 1;
2389 }
2390 else if (!xscale->dbr1_used)
2391 {
2392 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2393 dbcon_value |= enable << 2;
2394 xscale_set_reg_u32(dbcon, dbcon_value);
2395 watchpoint->set = 2;
2396 xscale->dbr1_used = 1;
2397 }
2398 else
2399 {
2400 LOG_ERROR("BUG: no hardware comparator available");
2401 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2402 }
2403
2404 return ERROR_OK;
2405 }
2406
2407 static int xscale_add_watchpoint(struct target *target,
2408 struct watchpoint *watchpoint)
2409 {
2410 struct xscale_common *xscale = target_to_xscale(target);
2411
2412 if (xscale->dbr_available < 1)
2413 {
2414 LOG_ERROR("no more watchpoint registers available");
2415 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2416 }
2417
2418 if (watchpoint->value)
2419 LOG_WARNING("xscale does not support value, mask arguments; ignoring");
2420
2421 /* check that length is a power of two */
2422 for (uint32_t len = watchpoint->length; len != 1; len /= 2)
2423 {
2424 if (len % 2)
2425 {
2426 LOG_ERROR("xscale requires that watchpoint length is a power of two");
2427 return ERROR_COMMAND_ARGUMENT_INVALID;
2428 }
2429 }
2430
2431 if (watchpoint->length == 4) /* single word watchpoint */
2432 {
2433 xscale->dbr_available--; /* one DBR reg used */
2434 return ERROR_OK;
2435 }
2436
2437 /* watchpoints across multiple words require both DBR registers */
2438 if (xscale->dbr_available < 2)
2439 {
2440 LOG_ERROR("insufficient watchpoint registers available");
2441 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2442 }
2443
2444 if (watchpoint->length > watchpoint->address)
2445 {
2446 LOG_ERROR("xscale does not support watchpoints with length "
2447 "greater than address");
2448 return ERROR_COMMAND_ARGUMENT_INVALID;
2449 }
2450
2451 xscale->dbr_available = 0;
2452 return ERROR_OK;
2453 }
2454
2455 static int xscale_unset_watchpoint(struct target *target,
2456 struct watchpoint *watchpoint)
2457 {
2458 struct xscale_common *xscale = target_to_xscale(target);
2459 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2460 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2461
2462 if (target->state != TARGET_HALTED)
2463 {
2464 LOG_WARNING("target not halted");
2465 return ERROR_TARGET_NOT_HALTED;
2466 }
2467
2468 if (!watchpoint->set)
2469 {
2470 LOG_WARNING("breakpoint not set");
2471 return ERROR_OK;
2472 }
2473
2474 if (watchpoint->set == 1)
2475 {
2476 if (watchpoint->length > 4)
2477 {
2478 dbcon_value &= ~0x103; /* clear DBCON[M] as well */
2479 xscale->dbr1_used = 0; /* DBR1 was used for mask */
2480 }
2481 else
2482 dbcon_value &= ~0x3;
2483
2484 xscale_set_reg_u32(dbcon, dbcon_value);
2485 xscale->dbr0_used = 0;
2486 }
2487 else if (watchpoint->set == 2)
2488 {
2489 dbcon_value &= ~0xc;
2490 xscale_set_reg_u32(dbcon, dbcon_value);
2491 xscale->dbr1_used = 0;
2492 }
2493 watchpoint->set = 0;
2494
2495 return ERROR_OK;
2496 }
2497
2498 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2499 {
2500 struct xscale_common *xscale = target_to_xscale(target);
2501
2502 if (target->state != TARGET_HALTED)
2503 {
2504 LOG_ERROR("target not halted");
2505 return ERROR_TARGET_NOT_HALTED;
2506 }
2507
2508 if (watchpoint->set)
2509 {
2510 xscale_unset_watchpoint(target, watchpoint);
2511 }
2512
2513 if (watchpoint->length > 4)
2514 xscale->dbr_available++; /* both DBR regs now available */
2515
2516 xscale->dbr_available++;
2517
2518 return ERROR_OK;
2519 }
2520
2521 static int xscale_get_reg(struct reg *reg)
2522 {
2523 struct xscale_reg *arch_info = reg->arch_info;
2524 struct target *target = arch_info->target;
2525 struct xscale_common *xscale = target_to_xscale(target);
2526
2527 /* DCSR, TX and RX are accessible via JTAG */
2528 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2529 {
2530 return xscale_read_dcsr(arch_info->target);
2531 }
2532 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2533 {
2534 /* 1 = consume register content */
2535 return xscale_read_tx(arch_info->target, 1);
2536 }
2537 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2538 {
2539 /* can't read from RX register (host -> debug handler) */
2540 return ERROR_OK;
2541 }
2542 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2543 {
2544 /* can't (explicitly) read from TXRXCTRL register */
2545 return ERROR_OK;
2546 }
2547 else /* Other DBG registers have to be transfered by the debug handler */
2548 {
2549 /* send CP read request (command 0x40) */
2550 xscale_send_u32(target, 0x40);
2551
2552 /* send CP register number */
2553 xscale_send_u32(target, arch_info->dbg_handler_number);
2554
2555 /* read register value */
2556 xscale_read_tx(target, 1);
2557 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2558
2559 reg->dirty = 0;
2560 reg->valid = 1;
2561 }
2562
2563 return ERROR_OK;
2564 }
2565
2566 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2567 {
2568 struct xscale_reg *arch_info = reg->arch_info;
2569 struct target *target = arch_info->target;
2570 struct xscale_common *xscale = target_to_xscale(target);
2571 uint32_t value = buf_get_u32(buf, 0, 32);
2572
2573 /* DCSR, TX and RX are accessible via JTAG */
2574 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2575 {
2576 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2577 return xscale_write_dcsr(arch_info->target, -1, -1);
2578 }
2579 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2580 {
2581 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2582 return xscale_write_rx(arch_info->target);
2583 }
2584 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2585 {
2586 /* can't write to TX register (debug-handler -> host) */
2587 return ERROR_OK;
2588 }
2589 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2590 {
2591 /* can't (explicitly) write to TXRXCTRL register */
2592 return ERROR_OK;
2593 }
2594 else /* Other DBG registers have to be transfered by the debug handler */
2595 {
2596 /* send CP write request (command 0x41) */
2597 xscale_send_u32(target, 0x41);
2598
2599 /* send CP register number */
2600 xscale_send_u32(target, arch_info->dbg_handler_number);
2601
2602 /* send CP register value */
2603 xscale_send_u32(target, value);
2604 buf_set_u32(reg->value, 0, 32, value);
2605 }
2606
2607 return ERROR_OK;
2608 }
2609
2610 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2611 {
2612 struct xscale_common *xscale = target_to_xscale(target);
2613 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2614 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2615
2616 /* send CP write request (command 0x41) */
2617 xscale_send_u32(target, 0x41);
2618
2619 /* send CP register number */
2620 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2621
2622 /* send CP register value */
2623 xscale_send_u32(target, value);
2624 buf_set_u32(dcsr->value, 0, 32, value);
2625
2626 return ERROR_OK;
2627 }
2628
2629 static int xscale_read_trace(struct target *target)
2630 {
2631 struct xscale_common *xscale = target_to_xscale(target);
2632 struct arm *armv4_5 = &xscale->armv4_5_common;
2633 struct xscale_trace_data **trace_data_p;
2634
2635 /* 258 words from debug handler
2636 * 256 trace buffer entries
2637 * 2 checkpoint addresses
2638 */
2639 uint32_t trace_buffer[258];
2640 int is_address[256];
2641 int i, j;
2642 unsigned int num_checkpoints = 0;
2643
2644 if (target->state != TARGET_HALTED)
2645 {
2646 LOG_WARNING("target must be stopped to read trace data");
2647 return ERROR_TARGET_NOT_HALTED;
2648 }
2649
2650 /* send read trace buffer command (command 0x61) */
2651 xscale_send_u32(target, 0x61);
2652
2653 /* receive trace buffer content */
2654 xscale_receive(target, trace_buffer, 258);
2655
2656 /* parse buffer backwards to identify address entries */
2657 for (i = 255; i >= 0; i--)
2658 {
2659 /* also count number of checkpointed entries */
2660 if ((trace_buffer[i] & 0xe0) == 0xc0)
2661 num_checkpoints++;
2662
2663 is_address[i] = 0;
2664 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2665 ((trace_buffer[i] & 0xf0) == 0xd0))
2666 {
2667 if (i > 0)
2668 is_address[--i] = 1;
2669 if (i > 0)
2670 is_address[--i] = 1;
2671 if (i > 0)
2672 is_address[--i] = 1;
2673 if (i > 0)
2674 is_address[--i] = 1;
2675 }
2676 }
2677
2678
2679 /* search first non-zero entry that is not part of an address */
2680 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2681 ;
2682
2683 if (j == 256)
2684 {
2685 LOG_DEBUG("no trace data collected");
2686 return ERROR_XSCALE_NO_TRACE_DATA;
2687 }
2688
2689 /* account for possible partial address at buffer start (wrap mode only) */
2690 if (is_address[0])
2691 { /* first entry is address; complete set of 4? */
2692 i = 1;
2693 while (i < 4)
2694 if (!is_address[i++])
2695 break;
2696 if (i < 4)
2697 j += i; /* partial address; can't use it */
2698 }
2699
2700 /* if first valid entry is indirect branch, can't use that either (no address) */
2701 if (((trace_buffer[j] & 0xf0) == 0x90) || ((trace_buffer[j] & 0xf0) == 0xd0))
2702 j++;
2703
2704 /* walk linked list to terminating entry */
2705 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2706 ;
2707
2708 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2709 (*trace_data_p)->next = NULL;
2710 (*trace_data_p)->chkpt0 = trace_buffer[256];
2711 (*trace_data_p)->chkpt1 = trace_buffer[257];
2712 (*trace_data_p)->last_instruction =
2713 buf_get_u32(armv4_5->pc->value, 0, 32);
2714 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2715 (*trace_data_p)->depth = 256 - j;
2716 (*trace_data_p)->num_checkpoints = num_checkpoints;
2717
2718 for (i = j; i < 256; i++)
2719 {
2720 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2721 if (is_address[i])
2722 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2723 else
2724 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2725 }
2726
2727 return ERROR_OK;
2728 }
2729
2730 static int xscale_read_instruction(struct target *target, uint32_t pc,
2731 struct arm_instruction *instruction)
2732 {
2733 struct xscale_common *const xscale = target_to_xscale(target);
2734 int i;
2735 int section = -1;
2736 size_t size_read;
2737 uint32_t opcode;
2738 int retval;
2739
2740 if (!xscale->trace.image)
2741 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2742
2743 /* search for the section the current instruction belongs to */
2744 for (i = 0; i < xscale->trace.image->num_sections; i++)
2745 {
2746 if ((xscale->trace.image->sections[i].base_address <= pc) &&
2747 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > pc))
2748 {
2749 section = i;
2750 break;
2751 }
2752 }
2753
2754 if (section == -1)
2755 {
2756 /* current instruction couldn't be found in the image */
2757 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2758 }
2759
2760 if (xscale->trace.core_state == ARM_STATE_ARM)
2761 {
2762 uint8_t buf[4];
2763 if ((retval = image_read_section(xscale->trace.image, section,
2764 pc - xscale->trace.image->sections[section].base_address,
2765 4, buf, &size_read)) != ERROR_OK)
2766 {
2767 LOG_ERROR("error while reading instruction");
2768 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2769 }
2770 opcode = target_buffer_get_u32(target, buf);
2771 arm_evaluate_opcode(opcode, pc, instruction);
2772 }
2773 else if (xscale->trace.core_state == ARM_STATE_THUMB)
2774 {
2775 uint8_t buf[2];
2776 if ((retval = image_read_section(xscale->trace.image, section,
2777 pc - xscale->trace.image->sections[section].base_address,
2778 2, buf, &size_read)) != ERROR_OK)
2779 {
2780 LOG_ERROR("error while reading instruction");
2781 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2782 }
2783 opcode = target_buffer_get_u16(target, buf);
2784 thumb_evaluate_opcode(opcode, pc, instruction);
2785 }
2786 else
2787 {
2788 LOG_ERROR("BUG: unknown core state encountered");
2789 exit(-1);
2790 }
2791
2792 return ERROR_OK;
2793 }
2794
2795 /* Extract address encoded into trace data.
2796 * Write result to address referenced by argument 'target', or 0 if incomplete. */
2797 static inline void xscale_branch_address(struct xscale_trace_data *trace_data,
2798 int i, uint32_t *target)
2799 {
2800 /* if there are less than four entries prior to the indirect branch message
2801 * we can't extract the address */
2802 if (i < 4)
2803 *target = 0;
2804 else
2805 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2806 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2807 }
2808
2809 static inline void xscale_display_instruction(struct target *target, uint32_t pc,
2810 struct arm_instruction *instruction,
2811 struct command_context *cmd_ctx)
2812 {
2813 int retval = xscale_read_instruction(target, pc, instruction);
2814 if (retval == ERROR_OK)
2815 command_print(cmd_ctx, "%s", instruction->text);
2816 else
2817 command_print(cmd_ctx, "0x%8.8" PRIx32 "\t<not found in image>", pc);
2818 }
2819
2820 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2821 {
2822 struct xscale_common *xscale = target_to_xscale(target);
2823 struct xscale_trace_data *trace_data = xscale->trace.data;
2824 int i, retval;
2825 uint32_t breakpoint_pc;
2826 struct arm_instruction instruction;
2827 uint32_t current_pc = 0; /* initialized when address determined */
2828
2829 if (!xscale->trace.image)
2830 LOG_WARNING("No trace image loaded; use 'xscale trace_image'");
2831
2832 /* loop for each trace buffer that was loaded from target */
2833 while (trace_data)
2834 {
2835 int chkpt = 0; /* incremented as checkpointed entries found */
2836 int j;
2837
2838 /* FIXME: set this to correct mode when trace buffer is first enabled */
2839 xscale->trace.core_state = ARM_STATE_ARM;
2840
2841 /* loop for each entry in this trace buffer */
2842 for (i = 0; i < trace_data->depth; i++)
2843 {
2844 int exception = 0;
2845 uint32_t chkpt_reg = 0x0;
2846 uint32_t branch_target = 0;
2847 int count;
2848
2849 /* trace entry type is upper nybble of 'message byte' */
2850 int trace_msg_type = (trace_data->entries[i].data & 0xf0) >> 4;
2851
2852 /* Target addresses of indirect branches are written into buffer
2853 * before the message byte representing the branch. Skip past it */
2854 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2855 continue;
2856
2857 switch (trace_msg_type)
2858 {
2859 case 0: /* Exceptions */
2860 case 1:
2861 case 2:
2862 case 3:
2863 case 4:
2864 case 5:
2865 case 6:
2866 case 7:
2867 exception = (trace_data->entries[i].data & 0x70) >> 4;
2868
2869 /* FIXME: vector table may be at ffff0000 */
2870 branch_target = (trace_data->entries[i].data & 0xf0) >> 2;
2871 break;
2872
2873 case 8: /* Direct Branch */
2874 break;
2875
2876 case 9: /* Indirect Branch */
2877 xscale_branch_address(trace_data, i, &branch_target);
2878 break;
2879
2880 case 13: /* Checkpointed Indirect Branch */
2881 xscale_branch_address(trace_data, i, &branch_target);
2882 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2883 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is oldest */
2884 else
2885 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and newest */
2886
2887 chkpt++;
2888 break;
2889
2890 case 12: /* Checkpointed Direct Branch */
2891 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2892 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is oldest */
2893 else
2894 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and newest */
2895
2896 /* if no current_pc, checkpoint will be starting point */
2897 if (current_pc == 0)
2898 branch_target = chkpt_reg;
2899
2900 chkpt++;
2901 break;
2902
2903 case 15: /* Roll-over */
2904 break;
2905
2906 default: /* Reserved */
2907 LOG_WARNING("trace is suspect: invalid trace message byte");
2908 continue;
2909
2910 }
2911
2912 /* If we don't have the current_pc yet, but we did get the branch target
2913 * (either from the trace buffer on indirect branch, or from a checkpoint reg),
2914 * then we can start displaying instructions at the next iteration, with
2915 * branch_target as the starting point.
2916 */
2917 if (current_pc == 0)
2918 {
2919 current_pc = branch_target; /* remains 0 unless branch_target obtained */
2920 continue;
2921 }
2922
2923 /* We have current_pc. Read and display the instructions from the image.
2924 * First, display count instructions (lower nybble of message byte). */
2925 count = trace_data->entries[i].data & 0x0f;
2926 for (j = 0; j < count; j++)
2927 {
2928 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2929 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2930 }
2931
2932 /* An additional instruction is implicitly added to count for
2933 * rollover and some exceptions: undef, swi, prefetch abort. */
2934 if ((trace_msg_type == 15) || (exception > 0 && exception < 4))
2935 {
2936 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2937 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2938 }
2939
2940 if (trace_msg_type == 15) /* rollover */
2941 continue;
2942
2943 if (exception)
2944 {
2945 command_print(cmd_ctx, "--- exception %i ---", exception);
2946 continue;
2947 }
2948
2949 /* not exception or rollover; next instruction is a branch and is
2950 * not included in the count */
2951 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2952
2953 /* for direct branches, extract branch destination from instruction */
2954 if ((trace_msg_type == 8) || (trace_msg_type == 12))
2955 {
2956 retval = xscale_read_instruction(target, current_pc, &instruction);
2957 if (retval == ERROR_OK)
2958 current_pc = instruction.info.b_bl_bx_blx.target_address;
2959 else
2960 current_pc = 0; /* branch destination unknown */
2961
2962 /* direct branch w/ checkpoint; can also get from checkpoint reg */
2963 if (trace_msg_type == 12)
2964 {
2965 if (current_pc == 0)
2966 current_pc = chkpt_reg;
2967 else if (current_pc != chkpt_reg) /* sanity check */
2968 LOG_WARNING("trace is suspect: checkpoint register "
2969 "inconsistent with adddress from image");
2970 }
2971
2972 if (current_pc == 0)
2973 command_print(cmd_ctx, "address unknown");
2974
2975 continue;
2976 }
2977
2978 /* indirect branch; the branch destination was read from trace buffer */
2979 if ((trace_msg_type == 9) || (trace_msg_type == 13))
2980 {
2981 current_pc = branch_target;
2982
2983 /* sanity check (checkpoint reg is redundant) */
2984 if ((trace_msg_type == 13) && (chkpt_reg != branch_target))
2985 LOG_WARNING("trace is suspect: checkpoint register "
2986 "inconsistent with address from trace buffer");
2987 }
2988
2989 } /* END: for (i = 0; i < trace_data->depth; i++) */
2990
2991 breakpoint_pc = trace_data->last_instruction; /* used below */
2992 trace_data = trace_data->next;
2993
2994 } /* END: while (trace_data) */
2995
2996 /* Finally... display all instructions up to the value of the pc when the
2997 * debug break occurred (saved when trace data was collected from target).
2998 * This is necessary because the trace only records execution branches and 16
2999 * consecutive instructions (rollovers), so last few typically missed.
3000 */
3001 if (current_pc == 0)
3002 return ERROR_OK; /* current_pc was never found */
3003
3004 /* how many instructions remaining? */
3005 int gap_count = (breakpoint_pc - current_pc) /
3006 (xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2);
3007
3008 /* should never be negative or over 16, but verify */
3009 if (gap_count < 0 || gap_count > 16)
3010 {
3011 LOG_WARNING("trace is suspect: excessive gap at end of trace");
3012 return ERROR_OK; /* bail; large number or negative value no good */
3013 }
3014
3015 /* display remaining instructions */
3016 for (i = 0; i < gap_count; i++)
3017 {
3018 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
3019 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
3020 }
3021
3022 return ERROR_OK;
3023 }
3024
3025 static const struct reg_arch_type xscale_reg_type = {
3026 .get = xscale_get_reg,
3027 .set = xscale_set_reg,
3028 };
3029
3030 static void xscale_build_reg_cache(struct target *target)
3031 {
3032 struct xscale_common *xscale = target_to_xscale(target);
3033 struct arm *armv4_5 = &xscale->armv4_5_common;
3034 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
3035 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
3036 int i;
3037 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
3038
3039 (*cache_p) = arm_build_reg_cache(target, armv4_5);
3040
3041 (*cache_p)->next = malloc(sizeof(struct reg_cache));
3042 cache_p = &(*cache_p)->next;
3043
3044 /* fill in values for the xscale reg cache */
3045 (*cache_p)->name = "XScale registers";
3046 (*cache_p)->next = NULL;
3047 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
3048 (*cache_p)->num_regs = num_regs;
3049
3050 for (i = 0; i < num_regs; i++)
3051 {
3052 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
3053 (*cache_p)->reg_list[i].value = calloc(4, 1);
3054 (*cache_p)->reg_list[i].dirty = 0;
3055 (*cache_p)->reg_list[i].valid = 0;
3056 (*cache_p)->reg_list[i].size = 32;
3057 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
3058 (*cache_p)->reg_list[i].type = &xscale_reg_type;
3059 arch_info[i] = xscale_reg_arch_info[i];
3060 arch_info[i].target = target;
3061 }
3062
3063 xscale->reg_cache = (*cache_p);
3064 }
3065
3066 static int xscale_init_target(struct command_context *cmd_ctx,
3067 struct target *target)
3068 {
3069 xscale_build_reg_cache(target);
3070 return ERROR_OK;
3071 }
3072
3073 static int xscale_init_arch_info(struct target *target,
3074 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
3075 {
3076 struct arm *armv4_5;
3077 uint32_t high_reset_branch, low_reset_branch;
3078 int i;
3079
3080 armv4_5 = &xscale->armv4_5_common;
3081
3082 /* store architecture specfic data */
3083 xscale->common_magic = XSCALE_COMMON_MAGIC;
3084
3085 /* we don't really *need* a variant param ... */
3086 if (variant) {
3087 int ir_length = 0;
3088
3089 if (strcmp(variant, "pxa250") == 0
3090 || strcmp(variant, "pxa255") == 0
3091 || strcmp(variant, "pxa26x") == 0)
3092 ir_length = 5;
3093 else if (strcmp(variant, "pxa27x") == 0
3094 || strcmp(variant, "ixp42x") == 0
3095 || strcmp(variant, "ixp45x") == 0
3096 || strcmp(variant, "ixp46x") == 0)
3097 ir_length = 7;
3098 else if (strcmp(variant, "pxa3xx") == 0)
3099 ir_length = 11;
3100 else
3101 LOG_WARNING("%s: unrecognized variant %s",
3102 tap->dotted_name, variant);
3103
3104 if (ir_length && ir_length != tap->ir_length) {
3105 LOG_WARNING("%s: IR length for %s is %d; fixing",
3106 tap->dotted_name, variant, ir_length);
3107 tap->ir_length = ir_length;
3108 }
3109 }
3110
3111 /* PXA3xx shifts the JTAG instructions */
3112 if (tap->ir_length == 11)
3113 xscale->xscale_variant = XSCALE_PXA3XX;
3114 else
3115 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
3116
3117 /* the debug handler isn't installed (and thus not running) at this time */
3118 xscale->handler_address = 0xfe000800;
3119
3120 /* clear the vectors we keep locally for reference */
3121 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
3122 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
3123
3124 /* no user-specified vectors have been configured yet */
3125 xscale->static_low_vectors_set = 0x0;
3126 xscale->static_high_vectors_set = 0x0;
3127
3128 /* calculate branches to debug handler */
3129 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
3130 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
3131
3132 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
3133 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
3134
3135 for (i = 1; i <= 7; i++)
3136 {
3137 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3138 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3139 }
3140
3141 /* 64kB aligned region used for DCache cleaning */
3142 xscale->cache_clean_address = 0xfffe0000;
3143
3144 xscale->hold_rst = 0;
3145 xscale->external_debug_break = 0;
3146
3147 xscale->ibcr_available = 2;
3148 xscale->ibcr0_used = 0;
3149 xscale->ibcr1_used = 0;
3150
3151 xscale->dbr_available = 2;
3152 xscale->dbr0_used = 0;
3153 xscale->dbr1_used = 0;
3154
3155 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
3156 target_name(target));
3157
3158 xscale->arm_bkpt = ARMV5_BKPT(0x0);
3159 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
3160
3161 xscale->vector_catch = 0x1;
3162
3163 xscale->trace.data = NULL;
3164 xscale->trace.image = NULL;
3165 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3166 xscale->trace.buffer_fill = 0;
3167 xscale->trace.fill_counter = 0;
3168
3169 /* prepare ARMv4/5 specific information */
3170 armv4_5->arch_info = xscale;
3171 armv4_5->read_core_reg = xscale_read_core_reg;
3172 armv4_5->write_core_reg = xscale_write_core_reg;
3173 armv4_5->full_context = xscale_full_context;
3174
3175 arm_init_arch_info(target, armv4_5);
3176
3177 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3178 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3179 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3180 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3181 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3182 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3183 xscale->armv4_5_mmu.has_tiny_pages = 1;
3184 xscale->armv4_5_mmu.mmu_enabled = 0;
3185
3186 return ERROR_OK;
3187 }
3188
3189 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3190 {
3191 struct xscale_common *xscale;
3192
3193 if (sizeof xscale_debug_handler - 1 > 0x800) {
3194 LOG_ERROR("debug_handler.bin: larger than 2kb");
3195 return ERROR_FAIL;
3196 }
3197
3198 xscale = calloc(1, sizeof(*xscale));
3199 if (!xscale)
3200 return ERROR_FAIL;
3201
3202 return xscale_init_arch_info(target, xscale, target->tap,
3203 target->variant);
3204 }
3205
3206 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3207 {
3208 struct target *target = NULL;
3209 struct xscale_common *xscale;
3210 int retval;
3211 uint32_t handler_address;
3212
3213 if (CMD_ARGC < 2)
3214 {
3215 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3216 return ERROR_OK;
3217 }
3218
3219 if ((target = get_target(CMD_ARGV[0])) == NULL)
3220 {
3221 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3222 return ERROR_FAIL;
3223 }
3224
3225 xscale = target_to_xscale(target);
3226 retval = xscale_verify_pointer(CMD_CTX, xscale);
3227 if (retval != ERROR_OK)
3228 return retval;
3229
3230 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3231
3232 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3233 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3234 {
3235 xscale->handler_address = handler_address;
3236 }
3237 else
3238 {
3239 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3240 return ERROR_FAIL;
3241 }
3242
3243 return ERROR_OK;
3244 }
3245
3246 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3247 {
3248 struct target *target = NULL;
3249 struct xscale_common *xscale;
3250 int retval;
3251 uint32_t cache_clean_address;
3252
3253 if (CMD_ARGC < 2)
3254 {
3255 return ERROR_COMMAND_SYNTAX_ERROR;
3256 }
3257
3258 target = get_target(CMD_ARGV[0]);
3259 if (target == NULL)
3260 {
3261 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3262 return ERROR_FAIL;
3263 }
3264 xscale = target_to_xscale(target);
3265 retval = xscale_verify_pointer(CMD_CTX, xscale);
3266 if (retval != ERROR_OK)
3267 return retval;
3268
3269 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3270
3271 if (cache_clean_address & 0xffff)
3272 {
3273 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3274 }
3275 else
3276 {
3277 xscale->cache_clean_address = cache_clean_address;
3278 }
3279
3280 return ERROR_OK;
3281 }
3282
3283 COMMAND_HANDLER(xscale_handle_cache_info_command)
3284 {
3285 struct target *target = get_current_target(CMD_CTX);
3286 struct xscale_common *xscale = target_to_xscale(target);
3287 int retval;
3288
3289 retval = xscale_verify_pointer(CMD_CTX, xscale);
3290 if (retval != ERROR_OK)
3291 return retval;
3292
3293 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3294 }
3295
3296 static int xscale_virt2phys(struct target *target,
3297 uint32_t virtual, uint32_t *physical)
3298 {
3299 struct xscale_common *xscale = target_to_xscale(target);
3300 uint32_t cb;
3301
3302 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3303 LOG_ERROR(xscale_not);
3304 return ERROR_TARGET_INVALID;
3305 }
3306
3307 uint32_t ret;
3308 int retval = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu,
3309 virtual, &cb, &ret);
3310 if (retval != ERROR_OK)
3311 return retval;
3312 *physical = ret;
3313 return ERROR_OK;
3314 }
3315
3316 static int xscale_mmu(struct target *target, int *enabled)
3317 {
3318 struct xscale_common *xscale = target_to_xscale(target);
3319
3320 if (target->state != TARGET_HALTED)
3321 {
3322 LOG_ERROR("Target not halted");
3323 return ERROR_TARGET_INVALID;
3324 }
3325 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3326 return ERROR_OK;
3327 }
3328
3329 COMMAND_HANDLER(xscale_handle_mmu_command)
3330 {
3331 struct target *target = get_current_target(CMD_CTX);
3332 struct xscale_common *xscale = target_to_xscale(target);
3333 int retval;
3334
3335 retval = xscale_verify_pointer(CMD_CTX, xscale);
3336 if (retval != ERROR_OK)
3337 return retval;
3338
3339 if (target->state != TARGET_HALTED)
3340 {
3341 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3342 return ERROR_OK;
3343 }
3344
3345 if (CMD_ARGC >= 1)
3346 {
3347 bool enable;
3348 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3349 if (enable)
3350 xscale_enable_mmu_caches(target, 1, 0, 0);
3351 else
3352 xscale_disable_mmu_caches(target, 1, 0, 0);
3353 xscale->armv4_5_mmu.mmu_enabled = enable;
3354 }
3355
3356 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3357
3358 return ERROR_OK;
3359 }
3360
3361 COMMAND_HANDLER(xscale_handle_idcache_command)
3362 {
3363 struct target *target = get_current_target(CMD_CTX);
3364 struct xscale_common *xscale = target_to_xscale(target);
3365
3366 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3367 if (retval != ERROR_OK)
3368 return retval;
3369
3370 if (target->state != TARGET_HALTED)
3371 {
3372 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3373 return ERROR_OK;
3374 }
3375
3376 bool icache = false;
3377 if (strcmp(CMD_NAME, "icache") == 0)
3378 icache = true;
3379 if (CMD_ARGC >= 1)
3380 {
3381 bool enable;
3382 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3383 if (icache) {
3384 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3385 if (enable)
3386 xscale_enable_mmu_caches(target, 0, 0, 1);
3387 else
3388 xscale_disable_mmu_caches(target, 0, 0, 1);
3389 } else {
3390 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3391 if (enable)
3392 xscale_enable_mmu_caches(target, 0, 1, 0);
3393 else
3394 xscale_disable_mmu_caches(target, 0, 1, 0);
3395 }
3396 }
3397
3398 bool enabled = icache ?
3399 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3400 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3401 const char *msg = enabled ? "enabled" : "disabled";
3402 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3403
3404 return ERROR_OK;
3405 }
3406
3407 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3408 {
3409 struct target *target = get_current_target(CMD_CTX);
3410 struct xscale_common *xscale = target_to_xscale(target);
3411 int retval;
3412
3413 retval = xscale_verify_pointer(CMD_CTX, xscale);
3414 if (retval != ERROR_OK)
3415 return retval;
3416
3417 if (CMD_ARGC < 1)
3418 {
3419 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3420 }
3421 else
3422 {
3423 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3424 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3425 xscale_write_dcsr(target, -1, -1);
3426 }
3427
3428 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3429
3430 return ERROR_OK;
3431 }
3432
3433
3434 COMMAND_HANDLER(xscale_handle_vector_table_command)
3435 {
3436 struct target *target = get_current_target(CMD_CTX);
3437 struct xscale_common *xscale = target_to_xscale(target);
3438 int err = 0;
3439 int retval;
3440
3441 retval = xscale_verify_pointer(CMD_CTX, xscale);
3442 if (retval != ERROR_OK)
3443 return retval;
3444
3445 if (CMD_ARGC == 0) /* print current settings */
3446 {
3447 int idx;
3448
3449 command_print(CMD_CTX, "active user-set static vectors:");
3450 for (idx = 1; idx < 8; idx++)
3451 if (xscale->static_low_vectors_set & (1 << idx))
3452 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3453 for (idx = 1; idx < 8; idx++)
3454 if (xscale->static_high_vectors_set & (1 << idx))
3455 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3456 return ERROR_OK;
3457 }
3458
3459 if (CMD_ARGC != 3)
3460 err = 1;
3461 else
3462 {
3463 int idx;
3464 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3465 uint32_t vec;
3466 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3467
3468 if (idx < 1 || idx >= 8)
3469 err = 1;
3470
3471 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3472 {
3473 xscale->static_low_vectors_set |= (1<<idx);
3474 xscale->static_low_vectors[idx] = vec;
3475 }
3476 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3477 {
3478 xscale->static_high_vectors_set |= (1<<idx);
3479 xscale->static_high_vectors[idx] = vec;
3480 }
3481 else
3482 err = 1;
3483 }
3484
3485 if (err)
3486 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3487
3488 return ERROR_OK;
3489 }
3490
3491
3492 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3493 {
3494 struct target *target = get_current_target(CMD_CTX);
3495 struct xscale_common *xscale = target_to_xscale(target);
3496 uint32_t dcsr_value;
3497 int retval;
3498
3499 retval = xscale_verify_pointer(CMD_CTX, xscale);
3500 if (retval != ERROR_OK)
3501 return retval;
3502
3503 if (target->state != TARGET_HALTED)
3504 {
3505 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3506 return ERROR_OK;
3507 }
3508
3509 if (CMD_ARGC >= 1)
3510 {
3511 if (strcmp("enable", CMD_ARGV[0]) == 0)
3512 xscale->trace.mode = XSCALE_TRACE_WRAP; /* default */
3513 else if (strcmp("disable", CMD_ARGV[0]) == 0)
3514 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3515 else
3516 return ERROR_INVALID_ARGUMENTS;
3517 }
3518
3519 if (CMD_ARGC >= 2 && xscale->trace.mode != XSCALE_TRACE_DISABLED)
3520 {
3521 if (strcmp("fill", CMD_ARGV[1]) == 0)
3522 {
3523 int buffcount = 1; /* default */
3524 if (CMD_ARGC >= 3)
3525 COMMAND_PARSE_NUMBER(int, CMD_ARGV[2], buffcount);
3526 if (buffcount < 1) /* invalid */
3527 {
3528 command_print(CMD_CTX, "fill buffer count must be > 0");
3529 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3530 return ERROR_INVALID_ARGUMENTS;
3531 }
3532 xscale->trace.buffer_fill = buffcount;
3533 xscale->trace.mode = XSCALE_TRACE_FILL;
3534 }
3535 else if (strcmp("wrap", CMD_ARGV[1]) == 0)
3536 xscale->trace.mode = XSCALE_TRACE_WRAP;
3537 else
3538 {
3539 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3540 return ERROR_INVALID_ARGUMENTS;
3541 }
3542 }
3543
3544 if (xscale->trace.mode != XSCALE_TRACE_DISABLED)
3545 {
3546 char fill_string[12];
3547 sprintf(fill_string, "fill %" PRId32, xscale->trace.buffer_fill);
3548 command_print(CMD_CTX, "trace buffer enabled (%s)",
3549 (xscale->trace.mode == XSCALE_TRACE_FILL)
3550 ? fill_string : "wrap");
3551 }
3552 else
3553 command_print(CMD_CTX, "trace buffer disabled");
3554
3555 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3556 if (xscale->trace.mode == XSCALE_TRACE_FILL)
3557 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3558 else
3559 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3560
3561 return ERROR_OK;
3562 }
3563
3564 COMMAND_HANDLER(xscale_handle_trace_image_command)
3565 {
3566 struct target *target = get_current_target(CMD_CTX);
3567 struct xscale_common *xscale = target_to_xscale(target);
3568 int retval;
3569
3570 if (CMD_ARGC < 1)
3571 {
3572 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3573 return ERROR_OK;
3574 }
3575
3576 retval = xscale_verify_pointer(CMD_CTX, xscale);
3577 if (retval != ERROR_OK)
3578 return retval;
3579
3580 if (xscale->trace.image)
3581 {
3582 image_close(xscale->trace.image);
3583 free(xscale->trace.image);
3584 command_print(CMD_CTX, "previously loaded image found and closed");
3585 }
3586
3587 xscale->trace.image = malloc(sizeof(struct image));
3588 xscale->trace.image->base_address_set = 0;
3589 xscale->trace.image->start_address_set = 0;
3590
3591 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3592 if (CMD_ARGC >= 2)
3593 {
3594 xscale->trace.image->base_address_set = 1;
3595 COMMAND_PARSE_NUMBER(llong, CMD_ARGV[1], xscale->trace.image->base_address);
3596 }
3597 else
3598 {
3599 xscale->trace.image->base_address_set = 0;
3600 }
3601
3602 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3603 {
3604 free(xscale->trace.image);
3605 xscale->trace.image = NULL;
3606 return ERROR_OK;
3607 }
3608
3609 return ERROR_OK;
3610 }
3611
3612 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3613 {
3614 struct target *target = get_current_target(CMD_CTX);
3615 struct xscale_common *xscale = target_to_xscale(target);
3616 struct xscale_trace_data *trace_data;
3617 struct fileio file;
3618 int retval;
3619
3620 retval = xscale_verify_pointer(CMD_CTX, xscale);
3621 if (retval != ERROR_OK)
3622 return retval;
3623
3624 if (target->state != TARGET_HALTED)
3625 {
3626 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3627 return ERROR_OK;
3628 }
3629
3630 if (CMD_ARGC < 1)
3631 {
3632 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3633 return ERROR_OK;
3634 }
3635
3636 trace_data = xscale->trace.data;
3637
3638 if (!trace_data)
3639 {
3640 command_print(CMD_CTX, "no trace data collected");
3641 return ERROR_OK;
3642 }
3643
3644 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3645 {
3646 return ERROR_OK;
3647 }
3648
3649 while (trace_data)
3650 {
3651 int i;
3652
3653 fileio_write_u32(&file, trace_data->chkpt0);
3654 fileio_write_u32(&file, trace_data->chkpt1);
3655 fileio_write_u32(&file, trace_data->last_instruction);
3656 fileio_write_u32(&file, trace_data->depth);
3657
3658 for (i = 0; i < trace_data->depth; i++)
3659 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3660
3661 trace_data = trace_data->next;
3662 }
3663
3664 fileio_close(&file);
3665
3666 return ERROR_OK;
3667 }
3668
3669 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3670 {
3671 struct target *target = get_current_target(CMD_CTX);
3672 struct xscale_common *xscale = target_to_xscale(target);
3673 int retval;
3674
3675 retval = xscale_verify_pointer(CMD_CTX, xscale);
3676 if (retval != ERROR_OK)
3677 return retval;
3678
3679 xscale_analyze_trace(target, CMD_CTX);
3680
3681 return ERROR_OK;
3682 }
3683
3684 COMMAND_HANDLER(xscale_handle_cp15)
3685 {
3686 struct target *target = get_current_target(CMD_CTX);
3687 struct xscale_common *xscale = target_to_xscale(target);
3688 int retval;
3689
3690 retval = xscale_verify_pointer(CMD_CTX, xscale);
3691 if (retval != ERROR_OK)
3692 return retval;
3693
3694 if (target->state != TARGET_HALTED)
3695 {
3696 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3697 return ERROR_OK;
3698 }
3699 uint32_t reg_no = 0;
3700 struct reg *reg = NULL;
3701 if (CMD_ARGC > 0)
3702 {
3703 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3704 /*translate from xscale cp15 register no to openocd register*/
3705 switch (reg_no)
3706 {
3707 case 0:
3708 reg_no = XSCALE_MAINID;
3709 break;
3710 case 1:
3711 reg_no = XSCALE_CTRL;
3712 break;
3713 case 2:
3714 reg_no = XSCALE_TTB;
3715 break;
3716 case 3:
3717 reg_no = XSCALE_DAC;
3718 break;
3719 case 5:
3720 reg_no = XSCALE_FSR;
3721 break;
3722 case 6:
3723 reg_no = XSCALE_FAR;
3724 break;
3725 case 13:
3726 reg_no = XSCALE_PID;
3727 break;
3728 case 15:
3729 reg_no = XSCALE_CPACCESS;
3730 break;
3731 default:
3732 command_print(CMD_CTX, "invalid register number");
3733 return ERROR_INVALID_ARGUMENTS;
3734 }
3735 reg = &xscale->reg_cache->reg_list[reg_no];
3736
3737 }
3738 if (CMD_ARGC == 1)
3739 {
3740 uint32_t value;
3741
3742 /* read cp15 control register */
3743 xscale_get_reg(reg);
3744 value = buf_get_u32(reg->value, 0, 32);
3745 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3746 }
3747 else if (CMD_ARGC == 2)
3748 {
3749 uint32_t value;
3750 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3751
3752 /* send CP write request (command 0x41) */
3753 xscale_send_u32(target, 0x41);
3754
3755 /* send CP register number */
3756 xscale_send_u32(target, reg_no);
3757
3758 /* send CP register value */
3759 xscale_send_u32(target, value);
3760
3761 /* execute cpwait to ensure outstanding operations complete */
3762 xscale_send_u32(target, 0x53);
3763 }
3764 else
3765 {
3766 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3767 }
3768
3769 return ERROR_OK;
3770 }
3771
3772 static const struct command_registration xscale_exec_command_handlers[] = {
3773 {
3774 .name = "cache_info",
3775 .handler = xscale_handle_cache_info_command,
3776 .mode = COMMAND_EXEC,
3777 .help = "display information about CPU caches",
3778 },
3779 {
3780 .name = "mmu",
3781 .handler = xscale_handle_mmu_command,
3782 .mode = COMMAND_EXEC,
3783 .help = "enable or disable the MMU",
3784 .usage = "['enable'|'disable']",
3785 },
3786 {
3787 .name = "icache",
3788 .handler = xscale_handle_idcache_command,
3789 .mode = COMMAND_EXEC,
3790 .help = "display ICache state, optionally enabling or "
3791 "disabling it",
3792 .usage = "['enable'|'disable']",
3793 },
3794 {
3795 .name = "dcache",
3796 .handler = xscale_handle_idcache_command,
3797 .mode = COMMAND_EXEC,
3798 .help = "display DCache state, optionally enabling or "
3799 "disabling it",
3800 .usage = "['enable'|'disable']",
3801 },
3802 {
3803 .name = "vector_catch",
3804 .handler = xscale_handle_vector_catch_command,
3805 .mode = COMMAND_EXEC,
3806 .help = "set or display 8-bit mask of vectors "
3807 "that should trigger debug entry",
3808 .usage = "[mask]",
3809 },
3810 {
3811 .name = "vector_table",
3812 .handler = xscale_handle_vector_table_command,
3813 .mode = COMMAND_EXEC,
3814 .help = "set vector table entry in mini-ICache, "
3815 "or display current tables",
3816 .usage = "[('high'|'low') index code]",
3817 },
3818 {
3819 .name = "trace_buffer",
3820 .handler = xscale_handle_trace_buffer_command,
3821 .mode = COMMAND_EXEC,
3822 .help = "display trace buffer status, enable or disable "
3823 "tracing, and optionally reconfigure trace mode",
3824 .usage = "['enable'|'disable' ['fill' [number]|'wrap']]",
3825 },
3826 {
3827 .name = "dump_trace",
3828 .handler = xscale_handle_dump_trace_command,
3829 .mode = COMMAND_EXEC,
3830 .help = "dump content of trace buffer to file",
3831 .usage = "filename",
3832 },
3833 {
3834 .name = "analyze_trace",
3835 .handler = xscale_handle_analyze_trace_buffer_command,
3836 .mode = COMMAND_EXEC,
3837 .help = "analyze content of trace buffer",
3838 .usage = "",
3839 },
3840 {
3841 .name = "trace_image",
3842 .handler = xscale_handle_trace_image_command,
3843 .mode = COMMAND_EXEC,
3844 .help = "load image from file to address (default 0)",
3845 .usage = "filename [offset [filetype]]",
3846 },
3847 {
3848 .name = "cp15",
3849 .handler = xscale_handle_cp15,
3850 .mode = COMMAND_EXEC,
3851 .help = "Read or write coprocessor 15 register.",
3852 .usage = "register [value]",
3853 },
3854 COMMAND_REGISTRATION_DONE
3855 };
3856 static const struct command_registration xscale_any_command_handlers[] = {
3857 {
3858 .name = "debug_handler",
3859 .handler = xscale_handle_debug_handler_command,
3860 .mode = COMMAND_ANY,
3861 .help = "Change address used for debug handler.",
3862 .usage = "target address",
3863 },
3864 {
3865 .name = "cache_clean_address",
3866 .handler = xscale_handle_cache_clean_address_command,
3867 .mode = COMMAND_ANY,
3868 .help = "Change address used for cleaning data cache.",
3869 .usage = "address",
3870 },
3871 {
3872 .chain = xscale_exec_command_handlers,
3873 },
3874 COMMAND_REGISTRATION_DONE
3875 };
3876 static const struct command_registration xscale_command_handlers[] = {
3877 {
3878 .chain = arm_command_handlers,
3879 },
3880 {
3881 .name = "xscale",
3882 .mode = COMMAND_ANY,
3883 .help = "xscale command group",
3884 .chain = xscale_any_command_handlers,
3885 },
3886 COMMAND_REGISTRATION_DONE
3887 };
3888
3889 struct target_type xscale_target =
3890 {
3891 .name = "xscale",
3892
3893 .poll = xscale_poll,
3894 .arch_state = xscale_arch_state,
3895
3896 .target_request_data = NULL,
3897
3898 .halt = xscale_halt,
3899 .resume = xscale_resume,
3900 .step = xscale_step,
3901
3902 .assert_reset = xscale_assert_reset,
3903 .deassert_reset = xscale_deassert_reset,
3904 .soft_reset_halt = NULL,
3905
3906 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3907 .get_gdb_reg_list = arm_get_gdb_reg_list,
3908
3909 .read_memory = xscale_read_memory,
3910 .read_phys_memory = xscale_read_phys_memory,
3911 .write_memory = xscale_write_memory,
3912 .write_phys_memory = xscale_write_phys_memory,
3913 .bulk_write_memory = xscale_bulk_write_memory,
3914
3915 .checksum_memory = arm_checksum_memory,
3916 .blank_check_memory = arm_blank_check_memory,
3917
3918 .run_algorithm = armv4_5_run_algorithm,
3919
3920 .add_breakpoint = xscale_add_breakpoint,
3921 .remove_breakpoint = xscale_remove_breakpoint,
3922 .add_watchpoint = xscale_add_watchpoint,
3923 .remove_watchpoint = xscale_remove_watchpoint,
3924
3925 .commands = xscale_command_handlers,
3926 .target_create = xscale_target_create,
3927 .init_target = xscale_init_target,
3928
3929 .virt2phys = xscale_virt2phys,
3930 .mmu = xscale_mmu
3931 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)