88914b524c9e30870b03d1b7b82b3e4e006eba08
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "xscale.h"
31 #include "target_type.h"
32 #include "arm7_9_common.h"
33 #include "arm_simulator.h"
34 #include "arm_disassembler.h"
35 #include "time_support.h"
36 #include "image.h"
37
38
39 /*
40 * Important XScale documents available as of October 2009 include:
41 *
42 * Intel XScale® Core Developer’s Manual, January 2004
43 * Order Number: 273473-002
44 * This has a chapter detailing debug facilities, and punts some
45 * details to chip-specific microarchitecture documents.
46 *
47 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
48 * Document Number: 273539-005
49 * Less detailed than the developer's manual, but summarizes those
50 * missing details (for most XScales) and gives LOTS of notes about
51 * debugger/handler interaction issues. Presents a simpler reset
52 * and load-handler sequence than the arch doc. (Note, OpenOCD
53 * doesn't currently support "Hot-Debug" as defined there.)
54 *
55 * Chip-specific microarchitecture documents may also be useful.
56 */
57
58
59 /* forward declarations */
60 static int xscale_resume(struct target_s *, int current,
61 uint32_t address, int handle_breakpoints, int debug_execution);
62 static int xscale_debug_entry(target_t *);
63 static int xscale_restore_context(target_t *);
64 static int xscale_get_reg(reg_t *reg);
65 static int xscale_set_reg(reg_t *reg, uint8_t *buf);
66 static int xscale_set_breakpoint(struct target_s *, breakpoint_t *);
67 static int xscale_set_watchpoint(struct target_s *, watchpoint_t *);
68 static int xscale_unset_breakpoint(struct target_s *, breakpoint_t *);
69 static int xscale_read_trace(target_t *);
70
71
72 /* This XScale "debug handler" is loaded into the processor's
73 * mini-ICache, which is 2K of code writable only via JTAG.
74 *
75 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
76 * binary files cleanly. It's string oriented, and terminates them
77 * with a NUL character. Better would be to generate the constants
78 * and let other code decide names, scoping, and other housekeeping.
79 */
80 static /* unsigned const char xscale_debug_handler[] = ... */
81 #include "xscale_debug.h"
82
83 static char *const xscale_reg_list[] =
84 {
85 "XSCALE_MAINID", /* 0 */
86 "XSCALE_CACHETYPE",
87 "XSCALE_CTRL",
88 "XSCALE_AUXCTRL",
89 "XSCALE_TTB",
90 "XSCALE_DAC",
91 "XSCALE_FSR",
92 "XSCALE_FAR",
93 "XSCALE_PID",
94 "XSCALE_CPACCESS",
95 "XSCALE_IBCR0", /* 10 */
96 "XSCALE_IBCR1",
97 "XSCALE_DBR0",
98 "XSCALE_DBR1",
99 "XSCALE_DBCON",
100 "XSCALE_TBREG",
101 "XSCALE_CHKPT0",
102 "XSCALE_CHKPT1",
103 "XSCALE_DCSR",
104 "XSCALE_TX",
105 "XSCALE_RX", /* 20 */
106 "XSCALE_TXRXCTRL",
107 };
108
109 static const xscale_reg_t xscale_reg_arch_info[] =
110 {
111 {XSCALE_MAINID, NULL},
112 {XSCALE_CACHETYPE, NULL},
113 {XSCALE_CTRL, NULL},
114 {XSCALE_AUXCTRL, NULL},
115 {XSCALE_TTB, NULL},
116 {XSCALE_DAC, NULL},
117 {XSCALE_FSR, NULL},
118 {XSCALE_FAR, NULL},
119 {XSCALE_PID, NULL},
120 {XSCALE_CPACCESS, NULL},
121 {XSCALE_IBCR0, NULL},
122 {XSCALE_IBCR1, NULL},
123 {XSCALE_DBR0, NULL},
124 {XSCALE_DBR1, NULL},
125 {XSCALE_DBCON, NULL},
126 {XSCALE_TBREG, NULL},
127 {XSCALE_CHKPT0, NULL},
128 {XSCALE_CHKPT1, NULL},
129 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
130 {-1, NULL}, /* TX accessed via JTAG */
131 {-1, NULL}, /* RX accessed via JTAG */
132 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
133 };
134
135 static int xscale_reg_arch_type = -1;
136
137 /* convenience wrapper to access XScale specific registers */
138 static int xscale_set_reg_u32(reg_t *reg, uint32_t value)
139 {
140 uint8_t buf[4];
141
142 buf_set_u32(buf, 0, 32, value);
143
144 return xscale_set_reg(reg, buf);
145 }
146
147 static const char xscale_not[] = "target is not an XScale";
148
149 static int xscale_verify_pointer(struct command_context_s *cmd_ctx,
150 struct xscale_common_s *xscale)
151 {
152 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
153 command_print(cmd_ctx, xscale_not);
154 return ERROR_TARGET_INVALID;
155 }
156 return ERROR_OK;
157 }
158
159 static int xscale_jtag_set_instr(jtag_tap_t *tap, uint32_t new_instr)
160 {
161 if (tap == NULL)
162 return ERROR_FAIL;
163
164 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
165 {
166 scan_field_t field;
167 uint8_t scratch[4];
168
169 memset(&field, 0, sizeof field);
170 field.tap = tap;
171 field.num_bits = tap->ir_length;
172 field.out_value = scratch;
173 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
174
175 jtag_add_ir_scan(1, &field, jtag_get_end_state());
176 }
177
178 return ERROR_OK;
179 }
180
181 static int xscale_read_dcsr(target_t *target)
182 {
183 struct xscale_common_s *xscale = target_to_xscale(target);
184 int retval;
185 scan_field_t fields[3];
186 uint8_t field0 = 0x0;
187 uint8_t field0_check_value = 0x2;
188 uint8_t field0_check_mask = 0x7;
189 uint8_t field2 = 0x0;
190 uint8_t field2_check_value = 0x0;
191 uint8_t field2_check_mask = 0x1;
192
193 jtag_set_end_state(TAP_DRPAUSE);
194 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
195
196 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
197 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
198
199 memset(&fields, 0, sizeof fields);
200
201 fields[0].tap = target->tap;
202 fields[0].num_bits = 3;
203 fields[0].out_value = &field0;
204 uint8_t tmp;
205 fields[0].in_value = &tmp;
206
207 fields[1].tap = target->tap;
208 fields[1].num_bits = 32;
209 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
210
211 fields[2].tap = target->tap;
212 fields[2].num_bits = 1;
213 fields[2].out_value = &field2;
214 uint8_t tmp2;
215 fields[2].in_value = &tmp2;
216
217 jtag_add_dr_scan(3, fields, jtag_get_end_state());
218
219 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
220 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
221
222 if ((retval = jtag_execute_queue()) != ERROR_OK)
223 {
224 LOG_ERROR("JTAG error while reading DCSR");
225 return retval;
226 }
227
228 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
229 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
230
231 /* write the register with the value we just read
232 * on this second pass, only the first bit of field0 is guaranteed to be 0)
233 */
234 field0_check_mask = 0x1;
235 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
236 fields[1].in_value = NULL;
237
238 jtag_set_end_state(TAP_IDLE);
239
240 jtag_add_dr_scan(3, fields, jtag_get_end_state());
241
242 /* DANGER!!! this must be here. It will make sure that the arguments
243 * to jtag_set_check_value() does not go out of scope! */
244 return jtag_execute_queue();
245 }
246
247
248 static void xscale_getbuf(jtag_callback_data_t arg)
249 {
250 uint8_t *in = (uint8_t *)arg;
251 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
252 }
253
254 static int xscale_receive(target_t *target, uint32_t *buffer, int num_words)
255 {
256 if (num_words == 0)
257 return ERROR_INVALID_ARGUMENTS;
258
259 int retval = ERROR_OK;
260 tap_state_t path[3];
261 scan_field_t fields[3];
262 uint8_t *field0 = malloc(num_words * 1);
263 uint8_t field0_check_value = 0x2;
264 uint8_t field0_check_mask = 0x6;
265 uint32_t *field1 = malloc(num_words * 4);
266 uint8_t field2_check_value = 0x0;
267 uint8_t field2_check_mask = 0x1;
268 int words_done = 0;
269 int words_scheduled = 0;
270 int i;
271
272 path[0] = TAP_DRSELECT;
273 path[1] = TAP_DRCAPTURE;
274 path[2] = TAP_DRSHIFT;
275
276 memset(&fields, 0, sizeof fields);
277
278 fields[0].tap = target->tap;
279 fields[0].num_bits = 3;
280 fields[0].check_value = &field0_check_value;
281 fields[0].check_mask = &field0_check_mask;
282
283 fields[1].tap = target->tap;
284 fields[1].num_bits = 32;
285
286 fields[2].tap = target->tap;
287 fields[2].num_bits = 1;
288 fields[2].check_value = &field2_check_value;
289 fields[2].check_mask = &field2_check_mask;
290
291 jtag_set_end_state(TAP_IDLE);
292 xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
293 jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
294
295 /* repeat until all words have been collected */
296 int attempts = 0;
297 while (words_done < num_words)
298 {
299 /* schedule reads */
300 words_scheduled = 0;
301 for (i = words_done; i < num_words; i++)
302 {
303 fields[0].in_value = &field0[i];
304
305 jtag_add_pathmove(3, path);
306
307 fields[1].in_value = (uint8_t *)(field1 + i);
308
309 jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
310
311 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
312
313 words_scheduled++;
314 }
315
316 if ((retval = jtag_execute_queue()) != ERROR_OK)
317 {
318 LOG_ERROR("JTAG error while receiving data from debug handler");
319 break;
320 }
321
322 /* examine results */
323 for (i = words_done; i < num_words; i++)
324 {
325 if (!(field0[0] & 1))
326 {
327 /* move backwards if necessary */
328 int j;
329 for (j = i; j < num_words - 1; j++)
330 {
331 field0[j] = field0[j + 1];
332 field1[j] = field1[j + 1];
333 }
334 words_scheduled--;
335 }
336 }
337 if (words_scheduled == 0)
338 {
339 if (attempts++==1000)
340 {
341 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
342 retval = ERROR_TARGET_TIMEOUT;
343 break;
344 }
345 }
346
347 words_done += words_scheduled;
348 }
349
350 for (i = 0; i < num_words; i++)
351 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
352
353 free(field1);
354
355 return retval;
356 }
357
358 static int xscale_read_tx(target_t *target, int consume)
359 {
360 struct xscale_common_s *xscale = target_to_xscale(target);
361 tap_state_t path[3];
362 tap_state_t noconsume_path[6];
363 int retval;
364 struct timeval timeout, now;
365 scan_field_t fields[3];
366 uint8_t field0_in = 0x0;
367 uint8_t field0_check_value = 0x2;
368 uint8_t field0_check_mask = 0x6;
369 uint8_t field2_check_value = 0x0;
370 uint8_t field2_check_mask = 0x1;
371
372 jtag_set_end_state(TAP_IDLE);
373
374 xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
375
376 path[0] = TAP_DRSELECT;
377 path[1] = TAP_DRCAPTURE;
378 path[2] = TAP_DRSHIFT;
379
380 noconsume_path[0] = TAP_DRSELECT;
381 noconsume_path[1] = TAP_DRCAPTURE;
382 noconsume_path[2] = TAP_DREXIT1;
383 noconsume_path[3] = TAP_DRPAUSE;
384 noconsume_path[4] = TAP_DREXIT2;
385 noconsume_path[5] = TAP_DRSHIFT;
386
387 memset(&fields, 0, sizeof fields);
388
389 fields[0].tap = target->tap;
390 fields[0].num_bits = 3;
391 fields[0].in_value = &field0_in;
392
393 fields[1].tap = target->tap;
394 fields[1].num_bits = 32;
395 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
396
397 fields[2].tap = target->tap;
398 fields[2].num_bits = 1;
399 uint8_t tmp;
400 fields[2].in_value = &tmp;
401
402 gettimeofday(&timeout, NULL);
403 timeval_add_time(&timeout, 1, 0);
404
405 for (;;)
406 {
407 /* if we want to consume the register content (i.e. clear TX_READY),
408 * we have to go straight from Capture-DR to Shift-DR
409 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
410 */
411 if (consume)
412 jtag_add_pathmove(3, path);
413 else
414 {
415 jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
416 }
417
418 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
419
420 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
421 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
422
423 if ((retval = jtag_execute_queue()) != ERROR_OK)
424 {
425 LOG_ERROR("JTAG error while reading TX");
426 return ERROR_TARGET_TIMEOUT;
427 }
428
429 gettimeofday(&now, NULL);
430 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
431 {
432 LOG_ERROR("time out reading TX register");
433 return ERROR_TARGET_TIMEOUT;
434 }
435 if (!((!(field0_in & 1)) && consume))
436 {
437 goto done;
438 }
439 if (debug_level >= 3)
440 {
441 LOG_DEBUG("waiting 100ms");
442 alive_sleep(100); /* avoid flooding the logs */
443 } else
444 {
445 keep_alive();
446 }
447 }
448 done:
449
450 if (!(field0_in & 1))
451 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
452
453 return ERROR_OK;
454 }
455
456 static int xscale_write_rx(target_t *target)
457 {
458 struct xscale_common_s *xscale = target_to_xscale(target);
459 int retval;
460 struct timeval timeout, now;
461 scan_field_t fields[3];
462 uint8_t field0_out = 0x0;
463 uint8_t field0_in = 0x0;
464 uint8_t field0_check_value = 0x2;
465 uint8_t field0_check_mask = 0x6;
466 uint8_t field2 = 0x0;
467 uint8_t field2_check_value = 0x0;
468 uint8_t field2_check_mask = 0x1;
469
470 jtag_set_end_state(TAP_IDLE);
471
472 xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
473
474 memset(&fields, 0, sizeof fields);
475
476 fields[0].tap = target->tap;
477 fields[0].num_bits = 3;
478 fields[0].out_value = &field0_out;
479 fields[0].in_value = &field0_in;
480
481 fields[1].tap = target->tap;
482 fields[1].num_bits = 32;
483 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
484
485 fields[2].tap = target->tap;
486 fields[2].num_bits = 1;
487 fields[2].out_value = &field2;
488 uint8_t tmp;
489 fields[2].in_value = &tmp;
490
491 gettimeofday(&timeout, NULL);
492 timeval_add_time(&timeout, 1, 0);
493
494 /* poll until rx_read is low */
495 LOG_DEBUG("polling RX");
496 for (;;)
497 {
498 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
499
500 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
501 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
502
503 if ((retval = jtag_execute_queue()) != ERROR_OK)
504 {
505 LOG_ERROR("JTAG error while writing RX");
506 return retval;
507 }
508
509 gettimeofday(&now, NULL);
510 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
511 {
512 LOG_ERROR("time out writing RX register");
513 return ERROR_TARGET_TIMEOUT;
514 }
515 if (!(field0_in & 1))
516 goto done;
517 if (debug_level >= 3)
518 {
519 LOG_DEBUG("waiting 100ms");
520 alive_sleep(100); /* avoid flooding the logs */
521 } else
522 {
523 keep_alive();
524 }
525 }
526 done:
527
528 /* set rx_valid */
529 field2 = 0x1;
530 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
531
532 if ((retval = jtag_execute_queue()) != ERROR_OK)
533 {
534 LOG_ERROR("JTAG error while writing RX");
535 return retval;
536 }
537
538 return ERROR_OK;
539 }
540
541 /* send count elements of size byte to the debug handler */
542 static int xscale_send(target_t *target, uint8_t *buffer, int count, int size)
543 {
544 uint32_t t[3];
545 int bits[3];
546 int retval;
547 int done_count = 0;
548
549 jtag_set_end_state(TAP_IDLE);
550
551 xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
552
553 bits[0]=3;
554 t[0]=0;
555 bits[1]=32;
556 t[2]=1;
557 bits[2]=1;
558 int endianness = target->endianness;
559 while (done_count++ < count)
560 {
561 switch (size)
562 {
563 case 4:
564 if (endianness == TARGET_LITTLE_ENDIAN)
565 {
566 t[1]=le_to_h_u32(buffer);
567 } else
568 {
569 t[1]=be_to_h_u32(buffer);
570 }
571 break;
572 case 2:
573 if (endianness == TARGET_LITTLE_ENDIAN)
574 {
575 t[1]=le_to_h_u16(buffer);
576 } else
577 {
578 t[1]=be_to_h_u16(buffer);
579 }
580 break;
581 case 1:
582 t[1]=buffer[0];
583 break;
584 default:
585 LOG_ERROR("BUG: size neither 4, 2 nor 1");
586 exit(-1);
587 }
588 jtag_add_dr_out(target->tap,
589 3,
590 bits,
591 t,
592 jtag_set_end_state(TAP_IDLE));
593 buffer += size;
594 }
595
596 if ((retval = jtag_execute_queue()) != ERROR_OK)
597 {
598 LOG_ERROR("JTAG error while sending data to debug handler");
599 return retval;
600 }
601
602 return ERROR_OK;
603 }
604
605 static int xscale_send_u32(target_t *target, uint32_t value)
606 {
607 struct xscale_common_s *xscale = target_to_xscale(target);
608
609 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
610 return xscale_write_rx(target);
611 }
612
613 static int xscale_write_dcsr(target_t *target, int hold_rst, int ext_dbg_brk)
614 {
615 struct xscale_common_s *xscale = target_to_xscale(target);
616 int retval;
617 scan_field_t fields[3];
618 uint8_t field0 = 0x0;
619 uint8_t field0_check_value = 0x2;
620 uint8_t field0_check_mask = 0x7;
621 uint8_t field2 = 0x0;
622 uint8_t field2_check_value = 0x0;
623 uint8_t field2_check_mask = 0x1;
624
625 if (hold_rst != -1)
626 xscale->hold_rst = hold_rst;
627
628 if (ext_dbg_brk != -1)
629 xscale->external_debug_break = ext_dbg_brk;
630
631 jtag_set_end_state(TAP_IDLE);
632 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
633
634 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
635 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
636
637 memset(&fields, 0, sizeof fields);
638
639 fields[0].tap = target->tap;
640 fields[0].num_bits = 3;
641 fields[0].out_value = &field0;
642 uint8_t tmp;
643 fields[0].in_value = &tmp;
644
645 fields[1].tap = target->tap;
646 fields[1].num_bits = 32;
647 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
648
649 fields[2].tap = target->tap;
650 fields[2].num_bits = 1;
651 fields[2].out_value = &field2;
652 uint8_t tmp2;
653 fields[2].in_value = &tmp2;
654
655 jtag_add_dr_scan(3, fields, jtag_get_end_state());
656
657 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
658 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
659
660 if ((retval = jtag_execute_queue()) != ERROR_OK)
661 {
662 LOG_ERROR("JTAG error while writing DCSR");
663 return retval;
664 }
665
666 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
667 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
668
669 return ERROR_OK;
670 }
671
672 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
673 static unsigned int parity (unsigned int v)
674 {
675 // unsigned int ov = v;
676 v ^= v >> 16;
677 v ^= v >> 8;
678 v ^= v >> 4;
679 v &= 0xf;
680 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
681 return (0x6996 >> v) & 1;
682 }
683
684 static int xscale_load_ic(target_t *target, uint32_t va, uint32_t buffer[8])
685 {
686 uint8_t packet[4];
687 uint8_t cmd;
688 int word;
689 scan_field_t fields[2];
690
691 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
692
693 /* LDIC into IR */
694 jtag_set_end_state(TAP_IDLE);
695 xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
696
697 /* CMD is b011 to load a cacheline into the Mini ICache.
698 * Loading into the main ICache is deprecated, and unused.
699 * It's followed by three zero bits, and 27 address bits.
700 */
701 buf_set_u32(&cmd, 0, 6, 0x3);
702
703 /* virtual address of desired cache line */
704 buf_set_u32(packet, 0, 27, va >> 5);
705
706 memset(&fields, 0, sizeof fields);
707
708 fields[0].tap = target->tap;
709 fields[0].num_bits = 6;
710 fields[0].out_value = &cmd;
711
712 fields[1].tap = target->tap;
713 fields[1].num_bits = 27;
714 fields[1].out_value = packet;
715
716 jtag_add_dr_scan(2, fields, jtag_get_end_state());
717
718 /* rest of packet is a cacheline: 8 instructions, with parity */
719 fields[0].num_bits = 32;
720 fields[0].out_value = packet;
721
722 fields[1].num_bits = 1;
723 fields[1].out_value = &cmd;
724
725 for (word = 0; word < 8; word++)
726 {
727 buf_set_u32(packet, 0, 32, buffer[word]);
728
729 uint32_t value;
730 memcpy(&value, packet, sizeof(uint32_t));
731 cmd = parity(value);
732
733 jtag_add_dr_scan(2, fields, jtag_get_end_state());
734 }
735
736 return jtag_execute_queue();
737 }
738
739 static int xscale_invalidate_ic_line(target_t *target, uint32_t va)
740 {
741 uint8_t packet[4];
742 uint8_t cmd;
743 scan_field_t fields[2];
744
745 jtag_set_end_state(TAP_IDLE);
746 xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
747
748 /* CMD for invalidate IC line b000, bits [6:4] b000 */
749 buf_set_u32(&cmd, 0, 6, 0x0);
750
751 /* virtual address of desired cache line */
752 buf_set_u32(packet, 0, 27, va >> 5);
753
754 memset(&fields, 0, sizeof fields);
755
756 fields[0].tap = target->tap;
757 fields[0].num_bits = 6;
758 fields[0].out_value = &cmd;
759
760 fields[1].tap = target->tap;
761 fields[1].num_bits = 27;
762 fields[1].out_value = packet;
763
764 jtag_add_dr_scan(2, fields, jtag_get_end_state());
765
766 return ERROR_OK;
767 }
768
769 static int xscale_update_vectors(target_t *target)
770 {
771 struct xscale_common_s *xscale = target_to_xscale(target);
772 int i;
773 int retval;
774
775 uint32_t low_reset_branch, high_reset_branch;
776
777 for (i = 1; i < 8; i++)
778 {
779 /* if there's a static vector specified for this exception, override */
780 if (xscale->static_high_vectors_set & (1 << i))
781 {
782 xscale->high_vectors[i] = xscale->static_high_vectors[i];
783 }
784 else
785 {
786 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
787 if (retval == ERROR_TARGET_TIMEOUT)
788 return retval;
789 if (retval != ERROR_OK)
790 {
791 /* Some of these reads will fail as part of normal execution */
792 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
793 }
794 }
795 }
796
797 for (i = 1; i < 8; i++)
798 {
799 if (xscale->static_low_vectors_set & (1 << i))
800 {
801 xscale->low_vectors[i] = xscale->static_low_vectors[i];
802 }
803 else
804 {
805 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
806 if (retval == ERROR_TARGET_TIMEOUT)
807 return retval;
808 if (retval != ERROR_OK)
809 {
810 /* Some of these reads will fail as part of normal execution */
811 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
812 }
813 }
814 }
815
816 /* calculate branches to debug handler */
817 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
818 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
819
820 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
821 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
822
823 /* invalidate and load exception vectors in mini i-cache */
824 xscale_invalidate_ic_line(target, 0x0);
825 xscale_invalidate_ic_line(target, 0xffff0000);
826
827 xscale_load_ic(target, 0x0, xscale->low_vectors);
828 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
829
830 return ERROR_OK;
831 }
832
833 static int xscale_arch_state(struct target_s *target)
834 {
835 struct xscale_common_s *xscale = target_to_xscale(target);
836 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
837
838 static const char *state[] =
839 {
840 "disabled", "enabled"
841 };
842
843 static const char *arch_dbg_reason[] =
844 {
845 "", "\n(processor reset)", "\n(trace buffer full)"
846 };
847
848 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
849 {
850 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
851 exit(-1);
852 }
853
854 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
855 "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "\n"
856 "MMU: %s, D-Cache: %s, I-Cache: %s"
857 "%s",
858 armv4_5_state_strings[armv4_5->core_state],
859 Jim_Nvp_value2name_simple(nvp_target_debug_reason, target->debug_reason)->name ,
860 armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)],
861 buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
862 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
863 state[xscale->armv4_5_mmu.mmu_enabled],
864 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
865 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
866 arch_dbg_reason[xscale->arch_debug_reason]);
867
868 return ERROR_OK;
869 }
870
871 static int xscale_poll(target_t *target)
872 {
873 int retval = ERROR_OK;
874
875 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
876 {
877 enum target_state previous_state = target->state;
878 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
879 {
880
881 /* there's data to read from the tx register, we entered debug state */
882 target->state = TARGET_HALTED;
883
884 /* process debug entry, fetching current mode regs */
885 retval = xscale_debug_entry(target);
886 }
887 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
888 {
889 LOG_USER("error while polling TX register, reset CPU");
890 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
891 target->state = TARGET_HALTED;
892 }
893
894 /* debug_entry could have overwritten target state (i.e. immediate resume)
895 * don't signal event handlers in that case
896 */
897 if (target->state != TARGET_HALTED)
898 return ERROR_OK;
899
900 /* if target was running, signal that we halted
901 * otherwise we reentered from debug execution */
902 if (previous_state == TARGET_RUNNING)
903 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
904 else
905 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
906 }
907
908 return retval;
909 }
910
911 static int xscale_debug_entry(target_t *target)
912 {
913 struct xscale_common_s *xscale = target_to_xscale(target);
914 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
915 uint32_t pc;
916 uint32_t buffer[10];
917 int i;
918 int retval;
919 uint32_t moe;
920
921 /* clear external dbg break (will be written on next DCSR read) */
922 xscale->external_debug_break = 0;
923 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
924 return retval;
925
926 /* get r0, pc, r1 to r7 and cpsr */
927 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
928 return retval;
929
930 /* move r0 from buffer to register cache */
931 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
932 armv4_5->core_cache->reg_list[0].dirty = 1;
933 armv4_5->core_cache->reg_list[0].valid = 1;
934 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
935
936 /* move pc from buffer to register cache */
937 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
938 armv4_5->core_cache->reg_list[15].dirty = 1;
939 armv4_5->core_cache->reg_list[15].valid = 1;
940 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
941
942 /* move data from buffer to register cache */
943 for (i = 1; i <= 7; i++)
944 {
945 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
946 armv4_5->core_cache->reg_list[i].dirty = 1;
947 armv4_5->core_cache->reg_list[i].valid = 1;
948 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
949 }
950
951 buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, buffer[9]);
952 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
953 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
954 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
955
956 armv4_5->core_mode = buffer[9] & 0x1f;
957 if (armv4_5_mode_to_number(armv4_5->core_mode) == -1)
958 {
959 target->state = TARGET_UNKNOWN;
960 LOG_ERROR("cpsr contains invalid mode value - communication failure");
961 return ERROR_TARGET_FAILURE;
962 }
963 LOG_DEBUG("target entered debug state in %s mode", armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)]);
964
965 if (buffer[9] & 0x20)
966 armv4_5->core_state = ARMV4_5_STATE_THUMB;
967 else
968 armv4_5->core_state = ARMV4_5_STATE_ARM;
969
970
971 if (armv4_5_mode_to_number(armv4_5->core_mode)==-1)
972 return ERROR_FAIL;
973
974 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
975 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
976 {
977 xscale_receive(target, buffer, 8);
978 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
979 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
980 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
981 }
982 else
983 {
984 /* r8 to r14, but no spsr */
985 xscale_receive(target, buffer, 7);
986 }
987
988 /* move data from buffer to register cache */
989 for (i = 8; i <= 14; i++)
990 {
991 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
992 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
993 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
994 }
995
996 /* examine debug reason */
997 xscale_read_dcsr(target);
998 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
999
1000 /* stored PC (for calculating fixup) */
1001 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1002
1003 switch (moe)
1004 {
1005 case 0x0: /* Processor reset */
1006 target->debug_reason = DBG_REASON_DBGRQ;
1007 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1008 pc -= 4;
1009 break;
1010 case 0x1: /* Instruction breakpoint hit */
1011 target->debug_reason = DBG_REASON_BREAKPOINT;
1012 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1013 pc -= 4;
1014 break;
1015 case 0x2: /* Data breakpoint hit */
1016 target->debug_reason = DBG_REASON_WATCHPOINT;
1017 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1018 pc -= 4;
1019 break;
1020 case 0x3: /* BKPT instruction executed */
1021 target->debug_reason = DBG_REASON_BREAKPOINT;
1022 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1023 pc -= 4;
1024 break;
1025 case 0x4: /* Ext. debug event */
1026 target->debug_reason = DBG_REASON_DBGRQ;
1027 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1028 pc -= 4;
1029 break;
1030 case 0x5: /* Vector trap occured */
1031 target->debug_reason = DBG_REASON_BREAKPOINT;
1032 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1033 pc -= 4;
1034 break;
1035 case 0x6: /* Trace buffer full break */
1036 target->debug_reason = DBG_REASON_DBGRQ;
1037 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1038 pc -= 4;
1039 break;
1040 case 0x7: /* Reserved (may flag Hot-Debug support) */
1041 default:
1042 LOG_ERROR("Method of Entry is 'Reserved'");
1043 exit(-1);
1044 break;
1045 }
1046
1047 /* apply PC fixup */
1048 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1049
1050 /* on the first debug entry, identify cache type */
1051 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1052 {
1053 uint32_t cache_type_reg;
1054
1055 /* read cp15 cache type register */
1056 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1057 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1058
1059 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1060 }
1061
1062 /* examine MMU and Cache settings */
1063 /* read cp15 control register */
1064 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1065 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1066 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1067 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1068 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1069
1070 /* tracing enabled, read collected trace data */
1071 if (xscale->trace.buffer_enabled)
1072 {
1073 xscale_read_trace(target);
1074 xscale->trace.buffer_fill--;
1075
1076 /* resume if we're still collecting trace data */
1077 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1078 && (xscale->trace.buffer_fill > 0))
1079 {
1080 xscale_resume(target, 1, 0x0, 1, 0);
1081 }
1082 else
1083 {
1084 xscale->trace.buffer_enabled = 0;
1085 }
1086 }
1087
1088 return ERROR_OK;
1089 }
1090
1091 static int xscale_halt(target_t *target)
1092 {
1093 struct xscale_common_s *xscale = target_to_xscale(target);
1094
1095 LOG_DEBUG("target->state: %s",
1096 target_state_name(target));
1097
1098 if (target->state == TARGET_HALTED)
1099 {
1100 LOG_DEBUG("target was already halted");
1101 return ERROR_OK;
1102 }
1103 else if (target->state == TARGET_UNKNOWN)
1104 {
1105 /* this must not happen for a xscale target */
1106 LOG_ERROR("target was in unknown state when halt was requested");
1107 return ERROR_TARGET_INVALID;
1108 }
1109 else if (target->state == TARGET_RESET)
1110 {
1111 LOG_DEBUG("target->state == TARGET_RESET");
1112 }
1113 else
1114 {
1115 /* assert external dbg break */
1116 xscale->external_debug_break = 1;
1117 xscale_read_dcsr(target);
1118
1119 target->debug_reason = DBG_REASON_DBGRQ;
1120 }
1121
1122 return ERROR_OK;
1123 }
1124
1125 static int xscale_enable_single_step(struct target_s *target, uint32_t next_pc)
1126 {
1127 struct xscale_common_s *xscale = target_to_xscale(target);
1128 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1129 int retval;
1130
1131 if (xscale->ibcr0_used)
1132 {
1133 breakpoint_t *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1134
1135 if (ibcr0_bp)
1136 {
1137 xscale_unset_breakpoint(target, ibcr0_bp);
1138 }
1139 else
1140 {
1141 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1142 exit(-1);
1143 }
1144 }
1145
1146 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1147 return retval;
1148
1149 return ERROR_OK;
1150 }
1151
1152 static int xscale_disable_single_step(struct target_s *target)
1153 {
1154 struct xscale_common_s *xscale = target_to_xscale(target);
1155 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1156 int retval;
1157
1158 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1159 return retval;
1160
1161 return ERROR_OK;
1162 }
1163
1164 static void xscale_enable_watchpoints(struct target_s *target)
1165 {
1166 watchpoint_t *watchpoint = target->watchpoints;
1167
1168 while (watchpoint)
1169 {
1170 if (watchpoint->set == 0)
1171 xscale_set_watchpoint(target, watchpoint);
1172 watchpoint = watchpoint->next;
1173 }
1174 }
1175
1176 static void xscale_enable_breakpoints(struct target_s *target)
1177 {
1178 breakpoint_t *breakpoint = target->breakpoints;
1179
1180 /* set any pending breakpoints */
1181 while (breakpoint)
1182 {
1183 if (breakpoint->set == 0)
1184 xscale_set_breakpoint(target, breakpoint);
1185 breakpoint = breakpoint->next;
1186 }
1187 }
1188
1189 static int xscale_resume(struct target_s *target, int current,
1190 uint32_t address, int handle_breakpoints, int debug_execution)
1191 {
1192 struct xscale_common_s *xscale = target_to_xscale(target);
1193 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
1194 breakpoint_t *breakpoint = target->breakpoints;
1195 uint32_t current_pc;
1196 int retval;
1197 int i;
1198
1199 LOG_DEBUG("-");
1200
1201 if (target->state != TARGET_HALTED)
1202 {
1203 LOG_WARNING("target not halted");
1204 return ERROR_TARGET_NOT_HALTED;
1205 }
1206
1207 if (!debug_execution)
1208 {
1209 target_free_all_working_areas(target);
1210 }
1211
1212 /* update vector tables */
1213 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1214 return retval;
1215
1216 /* current = 1: continue on current pc, otherwise continue at <address> */
1217 if (!current)
1218 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1219
1220 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1221
1222 /* if we're at the reset vector, we have to simulate the branch */
1223 if (current_pc == 0x0)
1224 {
1225 arm_simulate_step(target, NULL);
1226 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1227 }
1228
1229 /* the front-end may request us not to handle breakpoints */
1230 if (handle_breakpoints)
1231 {
1232 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1233 {
1234 uint32_t next_pc;
1235
1236 /* there's a breakpoint at the current PC, we have to step over it */
1237 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1238 xscale_unset_breakpoint(target, breakpoint);
1239
1240 /* calculate PC of next instruction */
1241 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1242 {
1243 uint32_t current_opcode;
1244 target_read_u32(target, current_pc, &current_opcode);
1245 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1246 }
1247
1248 LOG_DEBUG("enable single-step");
1249 xscale_enable_single_step(target, next_pc);
1250
1251 /* restore banked registers */
1252 xscale_restore_context(target);
1253
1254 /* send resume request (command 0x30 or 0x31)
1255 * clean the trace buffer if it is to be enabled (0x62) */
1256 if (xscale->trace.buffer_enabled)
1257 {
1258 xscale_send_u32(target, 0x62);
1259 xscale_send_u32(target, 0x31);
1260 }
1261 else
1262 xscale_send_u32(target, 0x30);
1263
1264 /* send CPSR */
1265 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1266 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1267
1268 for (i = 7; i >= 0; i--)
1269 {
1270 /* send register */
1271 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1272 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1273 }
1274
1275 /* send PC */
1276 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1277 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1278
1279 /* wait for and process debug entry */
1280 xscale_debug_entry(target);
1281
1282 LOG_DEBUG("disable single-step");
1283 xscale_disable_single_step(target);
1284
1285 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1286 xscale_set_breakpoint(target, breakpoint);
1287 }
1288 }
1289
1290 /* enable any pending breakpoints and watchpoints */
1291 xscale_enable_breakpoints(target);
1292 xscale_enable_watchpoints(target);
1293
1294 /* restore banked registers */
1295 xscale_restore_context(target);
1296
1297 /* send resume request (command 0x30 or 0x31)
1298 * clean the trace buffer if it is to be enabled (0x62) */
1299 if (xscale->trace.buffer_enabled)
1300 {
1301 xscale_send_u32(target, 0x62);
1302 xscale_send_u32(target, 0x31);
1303 }
1304 else
1305 xscale_send_u32(target, 0x30);
1306
1307 /* send CPSR */
1308 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1309 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1310
1311 for (i = 7; i >= 0; i--)
1312 {
1313 /* send register */
1314 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1315 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1316 }
1317
1318 /* send PC */
1319 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1320 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1321
1322 target->debug_reason = DBG_REASON_NOTHALTED;
1323
1324 if (!debug_execution)
1325 {
1326 /* registers are now invalid */
1327 armv4_5_invalidate_core_regs(target);
1328 target->state = TARGET_RUNNING;
1329 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1330 }
1331 else
1332 {
1333 target->state = TARGET_DEBUG_RUNNING;
1334 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1335 }
1336
1337 LOG_DEBUG("target resumed");
1338
1339 return ERROR_OK;
1340 }
1341
1342 static int xscale_step_inner(struct target_s *target, int current,
1343 uint32_t address, int handle_breakpoints)
1344 {
1345 struct xscale_common_s *xscale = target_to_xscale(target);
1346 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
1347 uint32_t next_pc;
1348 int retval;
1349 int i;
1350
1351 target->debug_reason = DBG_REASON_SINGLESTEP;
1352
1353 /* calculate PC of next instruction */
1354 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1355 {
1356 uint32_t current_opcode, current_pc;
1357 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1358
1359 target_read_u32(target, current_pc, &current_opcode);
1360 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1361 return retval;
1362 }
1363
1364 LOG_DEBUG("enable single-step");
1365 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1366 return retval;
1367
1368 /* restore banked registers */
1369 if ((retval = xscale_restore_context(target)) != ERROR_OK)
1370 return retval;
1371
1372 /* send resume request (command 0x30 or 0x31)
1373 * clean the trace buffer if it is to be enabled (0x62) */
1374 if (xscale->trace.buffer_enabled)
1375 {
1376 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1377 return retval;
1378 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1379 return retval;
1380 }
1381 else
1382 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1383 return retval;
1384
1385 /* send CPSR */
1386 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32))) != ERROR_OK)
1387 return retval;
1388 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1389
1390 for (i = 7; i >= 0; i--)
1391 {
1392 /* send register */
1393 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1394 return retval;
1395 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1396 }
1397
1398 /* send PC */
1399 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))) != ERROR_OK)
1400 return retval;
1401 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1402
1403 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1404
1405 /* registers are now invalid */
1406 if ((retval = armv4_5_invalidate_core_regs(target)) != ERROR_OK)
1407 return retval;
1408
1409 /* wait for and process debug entry */
1410 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1411 return retval;
1412
1413 LOG_DEBUG("disable single-step");
1414 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1415 return retval;
1416
1417 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1418
1419 return ERROR_OK;
1420 }
1421
1422 static int xscale_step(struct target_s *target, int current,
1423 uint32_t address, int handle_breakpoints)
1424 {
1425 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1426 breakpoint_t *breakpoint = target->breakpoints;
1427
1428 uint32_t current_pc;
1429 int retval;
1430
1431 if (target->state != TARGET_HALTED)
1432 {
1433 LOG_WARNING("target not halted");
1434 return ERROR_TARGET_NOT_HALTED;
1435 }
1436
1437 /* current = 1: continue on current pc, otherwise continue at <address> */
1438 if (!current)
1439 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1440
1441 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1442
1443 /* if we're at the reset vector, we have to simulate the step */
1444 if (current_pc == 0x0)
1445 {
1446 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1447 return retval;
1448 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1449
1450 target->debug_reason = DBG_REASON_SINGLESTEP;
1451 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1452
1453 return ERROR_OK;
1454 }
1455
1456 /* the front-end may request us not to handle breakpoints */
1457 if (handle_breakpoints)
1458 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1459 {
1460 if ((retval = xscale_unset_breakpoint(target, breakpoint)) != ERROR_OK)
1461 return retval;
1462 }
1463
1464 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1465
1466 if (breakpoint)
1467 {
1468 xscale_set_breakpoint(target, breakpoint);
1469 }
1470
1471 LOG_DEBUG("target stepped");
1472
1473 return ERROR_OK;
1474
1475 }
1476
1477 static int xscale_assert_reset(target_t *target)
1478 {
1479 struct xscale_common_s *xscale = target_to_xscale(target);
1480
1481 LOG_DEBUG("target->state: %s",
1482 target_state_name(target));
1483
1484 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1485 * end up in T-L-R, which would reset JTAG
1486 */
1487 jtag_set_end_state(TAP_IDLE);
1488 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
1489
1490 /* set Hold reset, Halt mode and Trap Reset */
1491 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1492 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1493 xscale_write_dcsr(target, 1, 0);
1494
1495 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1496 xscale_jtag_set_instr(target->tap, 0x7f);
1497 jtag_execute_queue();
1498
1499 /* assert reset */
1500 jtag_add_reset(0, 1);
1501
1502 /* sleep 1ms, to be sure we fulfill any requirements */
1503 jtag_add_sleep(1000);
1504 jtag_execute_queue();
1505
1506 target->state = TARGET_RESET;
1507
1508 if (target->reset_halt)
1509 {
1510 int retval;
1511 if ((retval = target_halt(target)) != ERROR_OK)
1512 return retval;
1513 }
1514
1515 return ERROR_OK;
1516 }
1517
1518 static int xscale_deassert_reset(target_t *target)
1519 {
1520 struct xscale_common_s *xscale = target_to_xscale(target);
1521 breakpoint_t *breakpoint = target->breakpoints;
1522
1523 LOG_DEBUG("-");
1524
1525 xscale->ibcr_available = 2;
1526 xscale->ibcr0_used = 0;
1527 xscale->ibcr1_used = 0;
1528
1529 xscale->dbr_available = 2;
1530 xscale->dbr0_used = 0;
1531 xscale->dbr1_used = 0;
1532
1533 /* mark all hardware breakpoints as unset */
1534 while (breakpoint)
1535 {
1536 if (breakpoint->type == BKPT_HARD)
1537 {
1538 breakpoint->set = 0;
1539 }
1540 breakpoint = breakpoint->next;
1541 }
1542
1543 armv4_5_invalidate_core_regs(target);
1544
1545 /* FIXME mark hardware watchpoints got unset too. Also,
1546 * at least some of the XScale registers are invalid...
1547 */
1548
1549 /*
1550 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1551 * contents got invalidated. Safer to force that, so writing new
1552 * contents can't ever fail..
1553 */
1554 {
1555 uint32_t address;
1556 unsigned buf_cnt;
1557 const uint8_t *buffer = xscale_debug_handler;
1558 int retval;
1559
1560 /* release SRST */
1561 jtag_add_reset(0, 0);
1562
1563 /* wait 300ms; 150 and 100ms were not enough */
1564 jtag_add_sleep(300*1000);
1565
1566 jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
1567 jtag_execute_queue();
1568
1569 /* set Hold reset, Halt mode and Trap Reset */
1570 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1571 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1572 xscale_write_dcsr(target, 1, 0);
1573
1574 /* Load the debug handler into the mini-icache. Since
1575 * it's using halt mode (not monitor mode), it runs in
1576 * "Special Debug State" for access to registers, memory,
1577 * coprocessors, trace data, etc.
1578 */
1579 address = xscale->handler_address;
1580 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1581 binary_size > 0;
1582 binary_size -= buf_cnt, buffer += buf_cnt)
1583 {
1584 uint32_t cache_line[8];
1585 unsigned i;
1586
1587 buf_cnt = binary_size;
1588 if (buf_cnt > 32)
1589 buf_cnt = 32;
1590
1591 for (i = 0; i < buf_cnt; i += 4)
1592 {
1593 /* convert LE buffer to host-endian uint32_t */
1594 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1595 }
1596
1597 for (; i < 32; i += 4)
1598 {
1599 cache_line[i / 4] = 0xe1a08008;
1600 }
1601
1602 /* only load addresses other than the reset vectors */
1603 if ((address % 0x400) != 0x0)
1604 {
1605 retval = xscale_load_ic(target, address,
1606 cache_line);
1607 if (retval != ERROR_OK)
1608 return retval;
1609 }
1610
1611 address += buf_cnt;
1612 };
1613
1614 retval = xscale_load_ic(target, 0x0,
1615 xscale->low_vectors);
1616 if (retval != ERROR_OK)
1617 return retval;
1618 retval = xscale_load_ic(target, 0xffff0000,
1619 xscale->high_vectors);
1620 if (retval != ERROR_OK)
1621 return retval;
1622
1623 jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
1624
1625 jtag_add_sleep(100000);
1626
1627 /* set Hold reset, Halt mode and Trap Reset */
1628 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1629 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1630 xscale_write_dcsr(target, 1, 0);
1631
1632 /* clear Hold reset to let the target run (should enter debug handler) */
1633 xscale_write_dcsr(target, 0, 1);
1634 target->state = TARGET_RUNNING;
1635
1636 if (!target->reset_halt)
1637 {
1638 jtag_add_sleep(10000);
1639
1640 /* we should have entered debug now */
1641 xscale_debug_entry(target);
1642 target->state = TARGET_HALTED;
1643
1644 /* resume the target */
1645 xscale_resume(target, 1, 0x0, 1, 0);
1646 }
1647 }
1648
1649 return ERROR_OK;
1650 }
1651
1652 static int xscale_read_core_reg(struct target_s *target, int num,
1653 enum armv4_5_mode mode)
1654 {
1655 LOG_ERROR("not implemented");
1656 return ERROR_OK;
1657 }
1658
1659 static int xscale_write_core_reg(struct target_s *target, int num,
1660 enum armv4_5_mode mode, uint32_t value)
1661 {
1662 LOG_ERROR("not implemented");
1663 return ERROR_OK;
1664 }
1665
1666 static int xscale_full_context(target_t *target)
1667 {
1668 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1669
1670 uint32_t *buffer;
1671
1672 int i, j;
1673
1674 LOG_DEBUG("-");
1675
1676 if (target->state != TARGET_HALTED)
1677 {
1678 LOG_WARNING("target not halted");
1679 return ERROR_TARGET_NOT_HALTED;
1680 }
1681
1682 buffer = malloc(4 * 8);
1683
1684 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1685 * we can't enter User mode on an XScale (unpredictable),
1686 * but User shares registers with SYS
1687 */
1688 for (i = 1; i < 7; i++)
1689 {
1690 int valid = 1;
1691
1692 /* check if there are invalid registers in the current mode
1693 */
1694 for (j = 0; j <= 16; j++)
1695 {
1696 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
1697 valid = 0;
1698 }
1699
1700 if (!valid)
1701 {
1702 uint32_t tmp_cpsr;
1703
1704 /* request banked registers */
1705 xscale_send_u32(target, 0x0);
1706
1707 tmp_cpsr = 0x0;
1708 tmp_cpsr |= armv4_5_number_to_mode(i);
1709 tmp_cpsr |= 0xc0; /* I/F bits */
1710
1711 /* send CPSR for desired mode */
1712 xscale_send_u32(target, tmp_cpsr);
1713
1714 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1715 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1716 {
1717 xscale_receive(target, buffer, 8);
1718 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1719 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1720 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
1721 }
1722 else
1723 {
1724 xscale_receive(target, buffer, 7);
1725 }
1726
1727 /* move data from buffer to register cache */
1728 for (j = 8; j <= 14; j++)
1729 {
1730 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
1731 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1732 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
1733 }
1734 }
1735 }
1736
1737 free(buffer);
1738
1739 return ERROR_OK;
1740 }
1741
1742 static int xscale_restore_context(target_t *target)
1743 {
1744 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1745
1746 int i, j;
1747
1748 if (target->state != TARGET_HALTED)
1749 {
1750 LOG_WARNING("target not halted");
1751 return ERROR_TARGET_NOT_HALTED;
1752 }
1753
1754 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1755 * we can't enter User mode on an XScale (unpredictable),
1756 * but User shares registers with SYS
1757 */
1758 for (i = 1; i < 7; i++)
1759 {
1760 int dirty = 0;
1761
1762 /* check if there are invalid registers in the current mode
1763 */
1764 for (j = 8; j <= 14; j++)
1765 {
1766 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
1767 dirty = 1;
1768 }
1769
1770 /* if not USR/SYS, check if the SPSR needs to be written */
1771 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1772 {
1773 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
1774 dirty = 1;
1775 }
1776
1777 if (dirty)
1778 {
1779 uint32_t tmp_cpsr;
1780
1781 /* send banked registers */
1782 xscale_send_u32(target, 0x1);
1783
1784 tmp_cpsr = 0x0;
1785 tmp_cpsr |= armv4_5_number_to_mode(i);
1786 tmp_cpsr |= 0xc0; /* I/F bits */
1787
1788 /* send CPSR for desired mode */
1789 xscale_send_u32(target, tmp_cpsr);
1790
1791 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1792 for (j = 8; j <= 14; j++)
1793 {
1794 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
1795 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1796 }
1797
1798 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1799 {
1800 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
1801 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1802 }
1803 }
1804 }
1805
1806 return ERROR_OK;
1807 }
1808
1809 static int xscale_read_memory(struct target_s *target, uint32_t address,
1810 uint32_t size, uint32_t count, uint8_t *buffer)
1811 {
1812 struct xscale_common_s *xscale = target_to_xscale(target);
1813 uint32_t *buf32;
1814 uint32_t i;
1815 int retval;
1816
1817 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1818
1819 if (target->state != TARGET_HALTED)
1820 {
1821 LOG_WARNING("target not halted");
1822 return ERROR_TARGET_NOT_HALTED;
1823 }
1824
1825 /* sanitize arguments */
1826 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1827 return ERROR_INVALID_ARGUMENTS;
1828
1829 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1830 return ERROR_TARGET_UNALIGNED_ACCESS;
1831
1832 /* send memory read request (command 0x1n, n: access size) */
1833 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1834 return retval;
1835
1836 /* send base address for read request */
1837 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1838 return retval;
1839
1840 /* send number of requested data words */
1841 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1842 return retval;
1843
1844 /* receive data from target (count times 32-bit words in host endianness) */
1845 buf32 = malloc(4 * count);
1846 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1847 return retval;
1848
1849 /* extract data from host-endian buffer into byte stream */
1850 for (i = 0; i < count; i++)
1851 {
1852 switch (size)
1853 {
1854 case 4:
1855 target_buffer_set_u32(target, buffer, buf32[i]);
1856 buffer += 4;
1857 break;
1858 case 2:
1859 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1860 buffer += 2;
1861 break;
1862 case 1:
1863 *buffer++ = buf32[i] & 0xff;
1864 break;
1865 default:
1866 LOG_ERROR("should never get here");
1867 exit(-1);
1868 }
1869 }
1870
1871 free(buf32);
1872
1873 /* examine DCSR, to see if Sticky Abort (SA) got set */
1874 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1875 return retval;
1876 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1877 {
1878 /* clear SA bit */
1879 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1880 return retval;
1881
1882 return ERROR_TARGET_DATA_ABORT;
1883 }
1884
1885 return ERROR_OK;
1886 }
1887
1888 static int xscale_write_memory(struct target_s *target, uint32_t address,
1889 uint32_t size, uint32_t count, uint8_t *buffer)
1890 {
1891 struct xscale_common_s *xscale = target_to_xscale(target);
1892 int retval;
1893
1894 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1895
1896 if (target->state != TARGET_HALTED)
1897 {
1898 LOG_WARNING("target not halted");
1899 return ERROR_TARGET_NOT_HALTED;
1900 }
1901
1902 /* sanitize arguments */
1903 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1904 return ERROR_INVALID_ARGUMENTS;
1905
1906 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1907 return ERROR_TARGET_UNALIGNED_ACCESS;
1908
1909 /* send memory write request (command 0x2n, n: access size) */
1910 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1911 return retval;
1912
1913 /* send base address for read request */
1914 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1915 return retval;
1916
1917 /* send number of requested data words to be written*/
1918 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1919 return retval;
1920
1921 /* extract data from host-endian buffer into byte stream */
1922 #if 0
1923 for (i = 0; i < count; i++)
1924 {
1925 switch (size)
1926 {
1927 case 4:
1928 value = target_buffer_get_u32(target, buffer);
1929 xscale_send_u32(target, value);
1930 buffer += 4;
1931 break;
1932 case 2:
1933 value = target_buffer_get_u16(target, buffer);
1934 xscale_send_u32(target, value);
1935 buffer += 2;
1936 break;
1937 case 1:
1938 value = *buffer;
1939 xscale_send_u32(target, value);
1940 buffer += 1;
1941 break;
1942 default:
1943 LOG_ERROR("should never get here");
1944 exit(-1);
1945 }
1946 }
1947 #endif
1948 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1949 return retval;
1950
1951 /* examine DCSR, to see if Sticky Abort (SA) got set */
1952 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1953 return retval;
1954 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1955 {
1956 /* clear SA bit */
1957 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1958 return retval;
1959
1960 return ERROR_TARGET_DATA_ABORT;
1961 }
1962
1963 return ERROR_OK;
1964 }
1965
1966 static int xscale_bulk_write_memory(target_t *target, uint32_t address,
1967 uint32_t count, uint8_t *buffer)
1968 {
1969 return xscale_write_memory(target, address, 4, count, buffer);
1970 }
1971
1972 static uint32_t xscale_get_ttb(target_t *target)
1973 {
1974 struct xscale_common_s *xscale = target_to_xscale(target);
1975 uint32_t ttb;
1976
1977 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
1978 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
1979
1980 return ttb;
1981 }
1982
1983 static void xscale_disable_mmu_caches(target_t *target, int mmu,
1984 int d_u_cache, int i_cache)
1985 {
1986 struct xscale_common_s *xscale = target_to_xscale(target);
1987 uint32_t cp15_control;
1988
1989 /* read cp15 control register */
1990 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1991 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1992
1993 if (mmu)
1994 cp15_control &= ~0x1U;
1995
1996 if (d_u_cache)
1997 {
1998 /* clean DCache */
1999 xscale_send_u32(target, 0x50);
2000 xscale_send_u32(target, xscale->cache_clean_address);
2001
2002 /* invalidate DCache */
2003 xscale_send_u32(target, 0x51);
2004
2005 cp15_control &= ~0x4U;
2006 }
2007
2008 if (i_cache)
2009 {
2010 /* invalidate ICache */
2011 xscale_send_u32(target, 0x52);
2012 cp15_control &= ~0x1000U;
2013 }
2014
2015 /* write new cp15 control register */
2016 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2017
2018 /* execute cpwait to ensure outstanding operations complete */
2019 xscale_send_u32(target, 0x53);
2020 }
2021
2022 static void xscale_enable_mmu_caches(target_t *target, int mmu,
2023 int d_u_cache, int i_cache)
2024 {
2025 struct xscale_common_s *xscale = target_to_xscale(target);
2026 uint32_t cp15_control;
2027
2028 /* read cp15 control register */
2029 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2030 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2031
2032 if (mmu)
2033 cp15_control |= 0x1U;
2034
2035 if (d_u_cache)
2036 cp15_control |= 0x4U;
2037
2038 if (i_cache)
2039 cp15_control |= 0x1000U;
2040
2041 /* write new cp15 control register */
2042 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2043
2044 /* execute cpwait to ensure outstanding operations complete */
2045 xscale_send_u32(target, 0x53);
2046 }
2047
2048 static int xscale_set_breakpoint(struct target_s *target,
2049 breakpoint_t *breakpoint)
2050 {
2051 int retval;
2052 struct xscale_common_s *xscale = target_to_xscale(target);
2053
2054 if (target->state != TARGET_HALTED)
2055 {
2056 LOG_WARNING("target not halted");
2057 return ERROR_TARGET_NOT_HALTED;
2058 }
2059
2060 if (breakpoint->set)
2061 {
2062 LOG_WARNING("breakpoint already set");
2063 return ERROR_OK;
2064 }
2065
2066 if (breakpoint->type == BKPT_HARD)
2067 {
2068 uint32_t value = breakpoint->address | 1;
2069 if (!xscale->ibcr0_used)
2070 {
2071 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2072 xscale->ibcr0_used = 1;
2073 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2074 }
2075 else if (!xscale->ibcr1_used)
2076 {
2077 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2078 xscale->ibcr1_used = 1;
2079 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2080 }
2081 else
2082 {
2083 LOG_ERROR("BUG: no hardware comparator available");
2084 return ERROR_OK;
2085 }
2086 }
2087 else if (breakpoint->type == BKPT_SOFT)
2088 {
2089 if (breakpoint->length == 4)
2090 {
2091 /* keep the original instruction in target endianness */
2092 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2093 {
2094 return retval;
2095 }
2096 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2097 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2098 {
2099 return retval;
2100 }
2101 }
2102 else
2103 {
2104 /* keep the original instruction in target endianness */
2105 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2106 {
2107 return retval;
2108 }
2109 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2110 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2111 {
2112 return retval;
2113 }
2114 }
2115 breakpoint->set = 1;
2116 }
2117
2118 return ERROR_OK;
2119 }
2120
2121 static int xscale_add_breakpoint(struct target_s *target,
2122 breakpoint_t *breakpoint)
2123 {
2124 struct xscale_common_s *xscale = target_to_xscale(target);
2125
2126 if (target->state != TARGET_HALTED)
2127 {
2128 LOG_WARNING("target not halted");
2129 return ERROR_TARGET_NOT_HALTED;
2130 }
2131
2132 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2133 {
2134 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2135 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2136 }
2137
2138 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2139 {
2140 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2141 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2142 }
2143
2144 if (breakpoint->type == BKPT_HARD)
2145 {
2146 xscale->ibcr_available--;
2147 }
2148
2149 return ERROR_OK;
2150 }
2151
2152 static int xscale_unset_breakpoint(struct target_s *target,
2153 breakpoint_t *breakpoint)
2154 {
2155 int retval;
2156 struct xscale_common_s *xscale = target_to_xscale(target);
2157
2158 if (target->state != TARGET_HALTED)
2159 {
2160 LOG_WARNING("target not halted");
2161 return ERROR_TARGET_NOT_HALTED;
2162 }
2163
2164 if (!breakpoint->set)
2165 {
2166 LOG_WARNING("breakpoint not set");
2167 return ERROR_OK;
2168 }
2169
2170 if (breakpoint->type == BKPT_HARD)
2171 {
2172 if (breakpoint->set == 1)
2173 {
2174 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2175 xscale->ibcr0_used = 0;
2176 }
2177 else if (breakpoint->set == 2)
2178 {
2179 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2180 xscale->ibcr1_used = 0;
2181 }
2182 breakpoint->set = 0;
2183 }
2184 else
2185 {
2186 /* restore original instruction (kept in target endianness) */
2187 if (breakpoint->length == 4)
2188 {
2189 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2190 {
2191 return retval;
2192 }
2193 }
2194 else
2195 {
2196 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2197 {
2198 return retval;
2199 }
2200 }
2201 breakpoint->set = 0;
2202 }
2203
2204 return ERROR_OK;
2205 }
2206
2207 static int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2208 {
2209 struct xscale_common_s *xscale = target_to_xscale(target);
2210
2211 if (target->state != TARGET_HALTED)
2212 {
2213 LOG_WARNING("target not halted");
2214 return ERROR_TARGET_NOT_HALTED;
2215 }
2216
2217 if (breakpoint->set)
2218 {
2219 xscale_unset_breakpoint(target, breakpoint);
2220 }
2221
2222 if (breakpoint->type == BKPT_HARD)
2223 xscale->ibcr_available++;
2224
2225 return ERROR_OK;
2226 }
2227
2228 static int xscale_set_watchpoint(struct target_s *target,
2229 watchpoint_t *watchpoint)
2230 {
2231 struct xscale_common_s *xscale = target_to_xscale(target);
2232 uint8_t enable = 0;
2233 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2234 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2235
2236 if (target->state != TARGET_HALTED)
2237 {
2238 LOG_WARNING("target not halted");
2239 return ERROR_TARGET_NOT_HALTED;
2240 }
2241
2242 xscale_get_reg(dbcon);
2243
2244 switch (watchpoint->rw)
2245 {
2246 case WPT_READ:
2247 enable = 0x3;
2248 break;
2249 case WPT_ACCESS:
2250 enable = 0x2;
2251 break;
2252 case WPT_WRITE:
2253 enable = 0x1;
2254 break;
2255 default:
2256 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2257 }
2258
2259 if (!xscale->dbr0_used)
2260 {
2261 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2262 dbcon_value |= enable;
2263 xscale_set_reg_u32(dbcon, dbcon_value);
2264 watchpoint->set = 1;
2265 xscale->dbr0_used = 1;
2266 }
2267 else if (!xscale->dbr1_used)
2268 {
2269 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2270 dbcon_value |= enable << 2;
2271 xscale_set_reg_u32(dbcon, dbcon_value);
2272 watchpoint->set = 2;
2273 xscale->dbr1_used = 1;
2274 }
2275 else
2276 {
2277 LOG_ERROR("BUG: no hardware comparator available");
2278 return ERROR_OK;
2279 }
2280
2281 return ERROR_OK;
2282 }
2283
2284 static int xscale_add_watchpoint(struct target_s *target,
2285 watchpoint_t *watchpoint)
2286 {
2287 struct xscale_common_s *xscale = target_to_xscale(target);
2288
2289 if (target->state != TARGET_HALTED)
2290 {
2291 LOG_WARNING("target not halted");
2292 return ERROR_TARGET_NOT_HALTED;
2293 }
2294
2295 if (xscale->dbr_available < 1)
2296 {
2297 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2298 }
2299
2300 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2301 {
2302 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2303 }
2304
2305 xscale->dbr_available--;
2306
2307 return ERROR_OK;
2308 }
2309
2310 static int xscale_unset_watchpoint(struct target_s *target,
2311 watchpoint_t *watchpoint)
2312 {
2313 struct xscale_common_s *xscale = target_to_xscale(target);
2314 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2315 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2316
2317 if (target->state != TARGET_HALTED)
2318 {
2319 LOG_WARNING("target not halted");
2320 return ERROR_TARGET_NOT_HALTED;
2321 }
2322
2323 if (!watchpoint->set)
2324 {
2325 LOG_WARNING("breakpoint not set");
2326 return ERROR_OK;
2327 }
2328
2329 if (watchpoint->set == 1)
2330 {
2331 dbcon_value &= ~0x3;
2332 xscale_set_reg_u32(dbcon, dbcon_value);
2333 xscale->dbr0_used = 0;
2334 }
2335 else if (watchpoint->set == 2)
2336 {
2337 dbcon_value &= ~0xc;
2338 xscale_set_reg_u32(dbcon, dbcon_value);
2339 xscale->dbr1_used = 0;
2340 }
2341 watchpoint->set = 0;
2342
2343 return ERROR_OK;
2344 }
2345
2346 static int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2347 {
2348 struct xscale_common_s *xscale = target_to_xscale(target);
2349
2350 if (target->state != TARGET_HALTED)
2351 {
2352 LOG_WARNING("target not halted");
2353 return ERROR_TARGET_NOT_HALTED;
2354 }
2355
2356 if (watchpoint->set)
2357 {
2358 xscale_unset_watchpoint(target, watchpoint);
2359 }
2360
2361 xscale->dbr_available++;
2362
2363 return ERROR_OK;
2364 }
2365
2366 static int xscale_get_reg(reg_t *reg)
2367 {
2368 xscale_reg_t *arch_info = reg->arch_info;
2369 target_t *target = arch_info->target;
2370 struct xscale_common_s *xscale = target_to_xscale(target);
2371
2372 /* DCSR, TX and RX are accessible via JTAG */
2373 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2374 {
2375 return xscale_read_dcsr(arch_info->target);
2376 }
2377 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2378 {
2379 /* 1 = consume register content */
2380 return xscale_read_tx(arch_info->target, 1);
2381 }
2382 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2383 {
2384 /* can't read from RX register (host -> debug handler) */
2385 return ERROR_OK;
2386 }
2387 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2388 {
2389 /* can't (explicitly) read from TXRXCTRL register */
2390 return ERROR_OK;
2391 }
2392 else /* Other DBG registers have to be transfered by the debug handler */
2393 {
2394 /* send CP read request (command 0x40) */
2395 xscale_send_u32(target, 0x40);
2396
2397 /* send CP register number */
2398 xscale_send_u32(target, arch_info->dbg_handler_number);
2399
2400 /* read register value */
2401 xscale_read_tx(target, 1);
2402 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2403
2404 reg->dirty = 0;
2405 reg->valid = 1;
2406 }
2407
2408 return ERROR_OK;
2409 }
2410
2411 static int xscale_set_reg(reg_t *reg, uint8_t* buf)
2412 {
2413 xscale_reg_t *arch_info = reg->arch_info;
2414 target_t *target = arch_info->target;
2415 struct xscale_common_s *xscale = target_to_xscale(target);
2416 uint32_t value = buf_get_u32(buf, 0, 32);
2417
2418 /* DCSR, TX and RX are accessible via JTAG */
2419 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2420 {
2421 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2422 return xscale_write_dcsr(arch_info->target, -1, -1);
2423 }
2424 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2425 {
2426 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2427 return xscale_write_rx(arch_info->target);
2428 }
2429 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2430 {
2431 /* can't write to TX register (debug-handler -> host) */
2432 return ERROR_OK;
2433 }
2434 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2435 {
2436 /* can't (explicitly) write to TXRXCTRL register */
2437 return ERROR_OK;
2438 }
2439 else /* Other DBG registers have to be transfered by the debug handler */
2440 {
2441 /* send CP write request (command 0x41) */
2442 xscale_send_u32(target, 0x41);
2443
2444 /* send CP register number */
2445 xscale_send_u32(target, arch_info->dbg_handler_number);
2446
2447 /* send CP register value */
2448 xscale_send_u32(target, value);
2449 buf_set_u32(reg->value, 0, 32, value);
2450 }
2451
2452 return ERROR_OK;
2453 }
2454
2455 static int xscale_write_dcsr_sw(target_t *target, uint32_t value)
2456 {
2457 struct xscale_common_s *xscale = target_to_xscale(target);
2458 reg_t *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2459 xscale_reg_t *dcsr_arch_info = dcsr->arch_info;
2460
2461 /* send CP write request (command 0x41) */
2462 xscale_send_u32(target, 0x41);
2463
2464 /* send CP register number */
2465 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2466
2467 /* send CP register value */
2468 xscale_send_u32(target, value);
2469 buf_set_u32(dcsr->value, 0, 32, value);
2470
2471 return ERROR_OK;
2472 }
2473
2474 static int xscale_read_trace(target_t *target)
2475 {
2476 struct xscale_common_s *xscale = target_to_xscale(target);
2477 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
2478 xscale_trace_data_t **trace_data_p;
2479
2480 /* 258 words from debug handler
2481 * 256 trace buffer entries
2482 * 2 checkpoint addresses
2483 */
2484 uint32_t trace_buffer[258];
2485 int is_address[256];
2486 int i, j;
2487
2488 if (target->state != TARGET_HALTED)
2489 {
2490 LOG_WARNING("target must be stopped to read trace data");
2491 return ERROR_TARGET_NOT_HALTED;
2492 }
2493
2494 /* send read trace buffer command (command 0x61) */
2495 xscale_send_u32(target, 0x61);
2496
2497 /* receive trace buffer content */
2498 xscale_receive(target, trace_buffer, 258);
2499
2500 /* parse buffer backwards to identify address entries */
2501 for (i = 255; i >= 0; i--)
2502 {
2503 is_address[i] = 0;
2504 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2505 ((trace_buffer[i] & 0xf0) == 0xd0))
2506 {
2507 if (i >= 3)
2508 is_address[--i] = 1;
2509 if (i >= 2)
2510 is_address[--i] = 1;
2511 if (i >= 1)
2512 is_address[--i] = 1;
2513 if (i >= 0)
2514 is_address[--i] = 1;
2515 }
2516 }
2517
2518
2519 /* search first non-zero entry */
2520 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2521 ;
2522
2523 if (j == 256)
2524 {
2525 LOG_DEBUG("no trace data collected");
2526 return ERROR_XSCALE_NO_TRACE_DATA;
2527 }
2528
2529 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2530 ;
2531
2532 *trace_data_p = malloc(sizeof(xscale_trace_data_t));
2533 (*trace_data_p)->next = NULL;
2534 (*trace_data_p)->chkpt0 = trace_buffer[256];
2535 (*trace_data_p)->chkpt1 = trace_buffer[257];
2536 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2537 (*trace_data_p)->entries = malloc(sizeof(xscale_trace_entry_t) * (256 - j));
2538 (*trace_data_p)->depth = 256 - j;
2539
2540 for (i = j; i < 256; i++)
2541 {
2542 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2543 if (is_address[i])
2544 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2545 else
2546 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2547 }
2548
2549 return ERROR_OK;
2550 }
2551
2552 static int xscale_read_instruction(target_t *target,
2553 arm_instruction_t *instruction)
2554 {
2555 struct xscale_common_s *xscale = target_to_xscale(target);
2556 int i;
2557 int section = -1;
2558 uint32_t size_read;
2559 uint32_t opcode;
2560 int retval;
2561
2562 if (!xscale->trace.image)
2563 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2564
2565 /* search for the section the current instruction belongs to */
2566 for (i = 0; i < xscale->trace.image->num_sections; i++)
2567 {
2568 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2569 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2570 {
2571 section = i;
2572 break;
2573 }
2574 }
2575
2576 if (section == -1)
2577 {
2578 /* current instruction couldn't be found in the image */
2579 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2580 }
2581
2582 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2583 {
2584 uint8_t buf[4];
2585 if ((retval = image_read_section(xscale->trace.image, section,
2586 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2587 4, buf, &size_read)) != ERROR_OK)
2588 {
2589 LOG_ERROR("error while reading instruction: %i", retval);
2590 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2591 }
2592 opcode = target_buffer_get_u32(target, buf);
2593 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2594 }
2595 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2596 {
2597 uint8_t buf[2];
2598 if ((retval = image_read_section(xscale->trace.image, section,
2599 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2600 2, buf, &size_read)) != ERROR_OK)
2601 {
2602 LOG_ERROR("error while reading instruction: %i", retval);
2603 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2604 }
2605 opcode = target_buffer_get_u16(target, buf);
2606 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2607 }
2608 else
2609 {
2610 LOG_ERROR("BUG: unknown core state encountered");
2611 exit(-1);
2612 }
2613
2614 return ERROR_OK;
2615 }
2616
2617 static int xscale_branch_address(xscale_trace_data_t *trace_data,
2618 int i, uint32_t *target)
2619 {
2620 /* if there are less than four entries prior to the indirect branch message
2621 * we can't extract the address */
2622 if (i < 4)
2623 {
2624 return -1;
2625 }
2626
2627 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2628 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2629
2630 return 0;
2631 }
2632
2633 static int xscale_analyze_trace(target_t *target, command_context_t *cmd_ctx)
2634 {
2635 struct xscale_common_s *xscale = target_to_xscale(target);
2636 int next_pc_ok = 0;
2637 uint32_t next_pc = 0x0;
2638 xscale_trace_data_t *trace_data = xscale->trace.data;
2639 int retval;
2640
2641 while (trace_data)
2642 {
2643 int i, chkpt;
2644 int rollover;
2645 int branch;
2646 int exception;
2647 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2648
2649 chkpt = 0;
2650 rollover = 0;
2651
2652 for (i = 0; i < trace_data->depth; i++)
2653 {
2654 next_pc_ok = 0;
2655 branch = 0;
2656 exception = 0;
2657
2658 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2659 continue;
2660
2661 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2662 {
2663 case 0: /* Exceptions */
2664 case 1:
2665 case 2:
2666 case 3:
2667 case 4:
2668 case 5:
2669 case 6:
2670 case 7:
2671 exception = (trace_data->entries[i].data & 0x70) >> 4;
2672 next_pc_ok = 1;
2673 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2674 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2675 break;
2676 case 8: /* Direct Branch */
2677 branch = 1;
2678 break;
2679 case 9: /* Indirect Branch */
2680 branch = 1;
2681 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2682 {
2683 next_pc_ok = 1;
2684 }
2685 break;
2686 case 13: /* Checkpointed Indirect Branch */
2687 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2688 {
2689 next_pc_ok = 1;
2690 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2691 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2692 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2693 }
2694 /* explicit fall-through */
2695 case 12: /* Checkpointed Direct Branch */
2696 branch = 1;
2697 if (chkpt == 0)
2698 {
2699 next_pc_ok = 1;
2700 next_pc = trace_data->chkpt0;
2701 chkpt++;
2702 }
2703 else if (chkpt == 1)
2704 {
2705 next_pc_ok = 1;
2706 next_pc = trace_data->chkpt0;
2707 chkpt++;
2708 }
2709 else
2710 {
2711 LOG_WARNING("more than two checkpointed branches encountered");
2712 }
2713 break;
2714 case 15: /* Roll-over */
2715 rollover++;
2716 continue;
2717 default: /* Reserved */
2718 command_print(cmd_ctx, "--- reserved trace message ---");
2719 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2720 return ERROR_OK;
2721 }
2722
2723 if (xscale->trace.pc_ok)
2724 {
2725 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2726 arm_instruction_t instruction;
2727
2728 if ((exception == 6) || (exception == 7))
2729 {
2730 /* IRQ or FIQ exception, no instruction executed */
2731 executed -= 1;
2732 }
2733
2734 while (executed-- >= 0)
2735 {
2736 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2737 {
2738 /* can't continue tracing with no image available */
2739 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2740 {
2741 return retval;
2742 }
2743 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2744 {
2745 /* TODO: handle incomplete images */
2746 }
2747 }
2748
2749 /* a precise abort on a load to the PC is included in the incremental
2750 * word count, other instructions causing data aborts are not included
2751 */
2752 if ((executed == 0) && (exception == 4)
2753 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2754 {
2755 if ((instruction.type == ARM_LDM)
2756 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2757 {
2758 executed--;
2759 }
2760 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2761 && (instruction.info.load_store.Rd != 15))
2762 {
2763 executed--;
2764 }
2765 }
2766
2767 /* only the last instruction executed
2768 * (the one that caused the control flow change)
2769 * could be a taken branch
2770 */
2771 if (((executed == -1) && (branch == 1)) &&
2772 (((instruction.type == ARM_B) ||
2773 (instruction.type == ARM_BL) ||
2774 (instruction.type == ARM_BLX)) &&
2775 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2776 {
2777 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2778 }
2779 else
2780 {
2781 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2782 }
2783 command_print(cmd_ctx, "%s", instruction.text);
2784 }
2785
2786 rollover = 0;
2787 }
2788
2789 if (next_pc_ok)
2790 {
2791 xscale->trace.current_pc = next_pc;
2792 xscale->trace.pc_ok = 1;
2793 }
2794 }
2795
2796 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2797 {
2798 arm_instruction_t instruction;
2799 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2800 {
2801 /* can't continue tracing with no image available */
2802 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2803 {
2804 return retval;
2805 }
2806 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2807 {
2808 /* TODO: handle incomplete images */
2809 }
2810 }
2811 command_print(cmd_ctx, "%s", instruction.text);
2812 }
2813
2814 trace_data = trace_data->next;
2815 }
2816
2817 return ERROR_OK;
2818 }
2819
2820 static void xscale_build_reg_cache(target_t *target)
2821 {
2822 struct xscale_common_s *xscale = target_to_xscale(target);
2823 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
2824 reg_cache_t **cache_p = register_get_last_cache_p(&target->reg_cache);
2825 xscale_reg_t *arch_info = malloc(sizeof(xscale_reg_arch_info));
2826 int i;
2827 int num_regs = sizeof(xscale_reg_arch_info) / sizeof(xscale_reg_t);
2828
2829 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
2830 armv4_5->core_cache = (*cache_p);
2831
2832 /* register a register arch-type for XScale dbg registers only once */
2833 if (xscale_reg_arch_type == -1)
2834 xscale_reg_arch_type = register_reg_arch_type(xscale_get_reg, xscale_set_reg);
2835
2836 (*cache_p)->next = malloc(sizeof(reg_cache_t));
2837 cache_p = &(*cache_p)->next;
2838
2839 /* fill in values for the xscale reg cache */
2840 (*cache_p)->name = "XScale registers";
2841 (*cache_p)->next = NULL;
2842 (*cache_p)->reg_list = malloc(num_regs * sizeof(reg_t));
2843 (*cache_p)->num_regs = num_regs;
2844
2845 for (i = 0; i < num_regs; i++)
2846 {
2847 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2848 (*cache_p)->reg_list[i].value = calloc(4, 1);
2849 (*cache_p)->reg_list[i].dirty = 0;
2850 (*cache_p)->reg_list[i].valid = 0;
2851 (*cache_p)->reg_list[i].size = 32;
2852 (*cache_p)->reg_list[i].bitfield_desc = NULL;
2853 (*cache_p)->reg_list[i].num_bitfields = 0;
2854 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2855 (*cache_p)->reg_list[i].arch_type = xscale_reg_arch_type;
2856 arch_info[i] = xscale_reg_arch_info[i];
2857 arch_info[i].target = target;
2858 }
2859
2860 xscale->reg_cache = (*cache_p);
2861 }
2862
2863 static int xscale_init_target(struct command_context_s *cmd_ctx,
2864 struct target_s *target)
2865 {
2866 xscale_build_reg_cache(target);
2867 return ERROR_OK;
2868 }
2869
2870 static int xscale_init_arch_info(target_t *target,
2871 xscale_common_t *xscale, jtag_tap_t *tap, const char *variant)
2872 {
2873 armv4_5_common_t *armv4_5;
2874 uint32_t high_reset_branch, low_reset_branch;
2875 int i;
2876
2877 armv4_5 = &xscale->armv4_5_common;
2878
2879 /* store architecture specfic data (none so far) */
2880 xscale->common_magic = XSCALE_COMMON_MAGIC;
2881
2882 /* we don't really *need* variant info ... */
2883 if (variant) {
2884 int ir_length = 0;
2885
2886 if (strcmp(variant, "pxa250") == 0
2887 || strcmp(variant, "pxa255") == 0
2888 || strcmp(variant, "pxa26x") == 0)
2889 ir_length = 5;
2890 else if (strcmp(variant, "pxa27x") == 0
2891 || strcmp(variant, "ixp42x") == 0
2892 || strcmp(variant, "ixp45x") == 0
2893 || strcmp(variant, "ixp46x") == 0)
2894 ir_length = 7;
2895 else
2896 LOG_WARNING("%s: unrecognized variant %s",
2897 tap->dotted_name, variant);
2898
2899 if (ir_length && ir_length != tap->ir_length) {
2900 LOG_WARNING("%s: IR length for %s is %d; fixing",
2901 tap->dotted_name, variant, ir_length);
2902 tap->ir_length = ir_length;
2903 }
2904 }
2905
2906 /* the debug handler isn't installed (and thus not running) at this time */
2907 xscale->handler_address = 0xfe000800;
2908
2909 /* clear the vectors we keep locally for reference */
2910 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2911 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2912
2913 /* no user-specified vectors have been configured yet */
2914 xscale->static_low_vectors_set = 0x0;
2915 xscale->static_high_vectors_set = 0x0;
2916
2917 /* calculate branches to debug handler */
2918 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2919 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2920
2921 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2922 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2923
2924 for (i = 1; i <= 7; i++)
2925 {
2926 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2927 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2928 }
2929
2930 /* 64kB aligned region used for DCache cleaning */
2931 xscale->cache_clean_address = 0xfffe0000;
2932
2933 xscale->hold_rst = 0;
2934 xscale->external_debug_break = 0;
2935
2936 xscale->ibcr_available = 2;
2937 xscale->ibcr0_used = 0;
2938 xscale->ibcr1_used = 0;
2939
2940 xscale->dbr_available = 2;
2941 xscale->dbr0_used = 0;
2942 xscale->dbr1_used = 0;
2943
2944 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2945 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2946
2947 xscale->vector_catch = 0x1;
2948
2949 xscale->trace.capture_status = TRACE_IDLE;
2950 xscale->trace.data = NULL;
2951 xscale->trace.image = NULL;
2952 xscale->trace.buffer_enabled = 0;
2953 xscale->trace.buffer_fill = 0;
2954
2955 /* prepare ARMv4/5 specific information */
2956 armv4_5->arch_info = xscale;
2957 armv4_5->read_core_reg = xscale_read_core_reg;
2958 armv4_5->write_core_reg = xscale_write_core_reg;
2959 armv4_5->full_context = xscale_full_context;
2960
2961 armv4_5_init_arch_info(target, armv4_5);
2962
2963 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
2964 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
2965 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
2966 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
2967 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
2968 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
2969 xscale->armv4_5_mmu.has_tiny_pages = 1;
2970 xscale->armv4_5_mmu.mmu_enabled = 0;
2971
2972 return ERROR_OK;
2973 }
2974
2975 static int xscale_target_create(struct target_s *target, Jim_Interp *interp)
2976 {
2977 xscale_common_t *xscale;
2978
2979 if (sizeof xscale_debug_handler - 1 > 0x800) {
2980 LOG_ERROR("debug_handler.bin: larger than 2kb");
2981 return ERROR_FAIL;
2982 }
2983
2984 xscale = calloc(1, sizeof(*xscale));
2985 if (!xscale)
2986 return ERROR_FAIL;
2987
2988 return xscale_init_arch_info(target, xscale, target->tap,
2989 target->variant);
2990 }
2991
2992 COMMAND_HANDLER(xscale_handle_debug_handler_command)
2993 {
2994 target_t *target = NULL;
2995 xscale_common_t *xscale;
2996 int retval;
2997 uint32_t handler_address;
2998
2999 if (argc < 2)
3000 {
3001 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3002 return ERROR_OK;
3003 }
3004
3005 if ((target = get_target(args[0])) == NULL)
3006 {
3007 LOG_ERROR("target '%s' not defined", args[0]);
3008 return ERROR_FAIL;
3009 }
3010
3011 xscale = target_to_xscale(target);
3012 retval = xscale_verify_pointer(cmd_ctx, xscale);
3013 if (retval != ERROR_OK)
3014 return retval;
3015
3016 COMMAND_PARSE_NUMBER(u32, args[1], handler_address);
3017
3018 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3019 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3020 {
3021 xscale->handler_address = handler_address;
3022 }
3023 else
3024 {
3025 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3026 return ERROR_FAIL;
3027 }
3028
3029 return ERROR_OK;
3030 }
3031
3032 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3033 {
3034 target_t *target = NULL;
3035 xscale_common_t *xscale;
3036 int retval;
3037 uint32_t cache_clean_address;
3038
3039 if (argc < 2)
3040 {
3041 return ERROR_COMMAND_SYNTAX_ERROR;
3042 }
3043
3044 target = get_target(args[0]);
3045 if (target == NULL)
3046 {
3047 LOG_ERROR("target '%s' not defined", args[0]);
3048 return ERROR_FAIL;
3049 }
3050 xscale = target_to_xscale(target);
3051 retval = xscale_verify_pointer(cmd_ctx, xscale);
3052 if (retval != ERROR_OK)
3053 return retval;
3054
3055 COMMAND_PARSE_NUMBER(u32, args[1], cache_clean_address);
3056
3057 if (cache_clean_address & 0xffff)
3058 {
3059 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3060 }
3061 else
3062 {
3063 xscale->cache_clean_address = cache_clean_address;
3064 }
3065
3066 return ERROR_OK;
3067 }
3068
3069 COMMAND_HANDLER(xscale_handle_cache_info_command)
3070 {
3071 target_t *target = get_current_target(cmd_ctx);
3072 struct xscale_common_s *xscale = target_to_xscale(target);
3073 int retval;
3074
3075 retval = xscale_verify_pointer(cmd_ctx, xscale);
3076 if (retval != ERROR_OK)
3077 return retval;
3078
3079 return armv4_5_handle_cache_info_command(cmd_ctx, &xscale->armv4_5_mmu.armv4_5_cache);
3080 }
3081
3082 static int xscale_virt2phys(struct target_s *target,
3083 uint32_t virtual, uint32_t *physical)
3084 {
3085 struct xscale_common_s *xscale = target_to_xscale(target);
3086 int type;
3087 uint32_t cb;
3088 int domain;
3089 uint32_t ap;
3090
3091 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3092 LOG_ERROR(xscale_not);
3093 return ERROR_TARGET_INVALID;
3094 }
3095
3096 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3097 if (type == -1)
3098 {
3099 return ret;
3100 }
3101 *physical = ret;
3102 return ERROR_OK;
3103 }
3104
3105 static int xscale_mmu(struct target_s *target, int *enabled)
3106 {
3107 struct xscale_common_s *xscale = target_to_xscale(target);
3108
3109 if (target->state != TARGET_HALTED)
3110 {
3111 LOG_ERROR("Target not halted");
3112 return ERROR_TARGET_INVALID;
3113 }
3114 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3115 return ERROR_OK;
3116 }
3117
3118 COMMAND_HANDLER(xscale_handle_mmu_command)
3119 {
3120 target_t *target = get_current_target(cmd_ctx);
3121 struct xscale_common_s *xscale = target_to_xscale(target);
3122 int retval;
3123
3124 retval = xscale_verify_pointer(cmd_ctx, xscale);
3125 if (retval != ERROR_OK)
3126 return retval;
3127
3128 if (target->state != TARGET_HALTED)
3129 {
3130 command_print(cmd_ctx, "target must be stopped for \"%s\" command", CMD_NAME);
3131 return ERROR_OK;
3132 }
3133
3134 if (argc >= 1)
3135 {
3136 if (strcmp("enable", args[0]) == 0)
3137 {
3138 xscale_enable_mmu_caches(target, 1, 0, 0);
3139 xscale->armv4_5_mmu.mmu_enabled = 1;
3140 }
3141 else if (strcmp("disable", args[0]) == 0)
3142 {
3143 xscale_disable_mmu_caches(target, 1, 0, 0);
3144 xscale->armv4_5_mmu.mmu_enabled = 0;
3145 }
3146 }
3147
3148 command_print(cmd_ctx, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3149
3150 return ERROR_OK;
3151 }
3152
3153 COMMAND_HANDLER(xscale_handle_idcache_command)
3154 {
3155 target_t *target = get_current_target(cmd_ctx);
3156 struct xscale_common_s *xscale = target_to_xscale(target);
3157 int icache = 0, dcache = 0;
3158 int retval;
3159
3160 retval = xscale_verify_pointer(cmd_ctx, xscale);
3161 if (retval != ERROR_OK)
3162 return retval;
3163
3164 if (target->state != TARGET_HALTED)
3165 {
3166 command_print(cmd_ctx, "target must be stopped for \"%s\" command", CMD_NAME);
3167 return ERROR_OK;
3168 }
3169
3170 if (strcmp(CMD_NAME, "icache") == 0)
3171 icache = 1;
3172 else if (strcmp(CMD_NAME, "dcache") == 0)
3173 dcache = 1;
3174
3175 if (argc >= 1)
3176 {
3177 if (strcmp("enable", args[0]) == 0)
3178 {
3179 xscale_enable_mmu_caches(target, 0, dcache, icache);
3180
3181 if (icache)
3182 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 1;
3183 else if (dcache)
3184 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 1;
3185 }
3186 else if (strcmp("disable", args[0]) == 0)
3187 {
3188 xscale_disable_mmu_caches(target, 0, dcache, icache);
3189
3190 if (icache)
3191 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 0;
3192 else if (dcache)
3193 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 0;
3194 }
3195 }
3196
3197 if (icache)
3198 command_print(cmd_ctx, "icache %s", (xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled) ? "enabled" : "disabled");
3199
3200 if (dcache)
3201 command_print(cmd_ctx, "dcache %s", (xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled) ? "enabled" : "disabled");
3202
3203 return ERROR_OK;
3204 }
3205
3206 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3207 {
3208 target_t *target = get_current_target(cmd_ctx);
3209 struct xscale_common_s *xscale = target_to_xscale(target);
3210 int retval;
3211
3212 retval = xscale_verify_pointer(cmd_ctx, xscale);
3213 if (retval != ERROR_OK)
3214 return retval;
3215
3216 if (argc < 1)
3217 {
3218 command_print(cmd_ctx, "usage: xscale vector_catch [mask]");
3219 }
3220 else
3221 {
3222 COMMAND_PARSE_NUMBER(u8, args[0], xscale->vector_catch);
3223 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3224 xscale_write_dcsr(target, -1, -1);
3225 }
3226
3227 command_print(cmd_ctx, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3228
3229 return ERROR_OK;
3230 }
3231
3232
3233 COMMAND_HANDLER(xscale_handle_vector_table_command)
3234 {
3235 target_t *target = get_current_target(cmd_ctx);
3236 struct xscale_common_s *xscale = target_to_xscale(target);
3237 int err = 0;
3238 int retval;
3239
3240 retval = xscale_verify_pointer(cmd_ctx, xscale);
3241 if (retval != ERROR_OK)
3242 return retval;
3243
3244 if (argc == 0) /* print current settings */
3245 {
3246 int idx;
3247
3248 command_print(cmd_ctx, "active user-set static vectors:");
3249 for (idx = 1; idx < 8; idx++)
3250 if (xscale->static_low_vectors_set & (1 << idx))
3251 command_print(cmd_ctx, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3252 for (idx = 1; idx < 8; idx++)
3253 if (xscale->static_high_vectors_set & (1 << idx))
3254 command_print(cmd_ctx, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3255 return ERROR_OK;
3256 }
3257
3258 if (argc != 3)
3259 err = 1;
3260 else
3261 {
3262 int idx;
3263 COMMAND_PARSE_NUMBER(int, args[1], idx);
3264 uint32_t vec;
3265 COMMAND_PARSE_NUMBER(u32, args[2], vec);
3266
3267 if (idx < 1 || idx >= 8)
3268 err = 1;
3269
3270 if (!err && strcmp(args[0], "low") == 0)
3271 {
3272 xscale->static_low_vectors_set |= (1<<idx);
3273 xscale->static_low_vectors[idx] = vec;
3274 }
3275 else if (!err && (strcmp(args[0], "high") == 0))
3276 {
3277 xscale->static_high_vectors_set |= (1<<idx);
3278 xscale->static_high_vectors[idx] = vec;
3279 }
3280 else
3281 err = 1;
3282 }
3283
3284 if (err)
3285 command_print(cmd_ctx, "usage: xscale vector_table <high|low> <index> <code>");
3286
3287 return ERROR_OK;
3288 }
3289
3290
3291 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3292 {
3293 target_t *target = get_current_target(cmd_ctx);
3294 struct xscale_common_s *xscale = target_to_xscale(target);
3295 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
3296 uint32_t dcsr_value;
3297 int retval;
3298
3299 retval = xscale_verify_pointer(cmd_ctx, xscale);
3300 if (retval != ERROR_OK)
3301 return retval;
3302
3303 if (target->state != TARGET_HALTED)
3304 {
3305 command_print(cmd_ctx, "target must be stopped for \"%s\" command", CMD_NAME);
3306 return ERROR_OK;
3307 }
3308
3309 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
3310 {
3311 xscale_trace_data_t *td, *next_td;
3312 xscale->trace.buffer_enabled = 1;
3313
3314 /* free old trace data */
3315 td = xscale->trace.data;
3316 while (td)
3317 {
3318 next_td = td->next;
3319
3320 if (td->entries)
3321 free(td->entries);
3322 free(td);
3323 td = next_td;
3324 }
3325 xscale->trace.data = NULL;
3326 }
3327 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
3328 {
3329 xscale->trace.buffer_enabled = 0;
3330 }
3331
3332 if ((argc >= 2) && (strcmp("fill", args[1]) == 0))
3333 {
3334 uint32_t fill = 1;
3335 if (argc >= 3)
3336 COMMAND_PARSE_NUMBER(u32, args[2], fill);
3337 xscale->trace.buffer_fill = fill;
3338 }
3339 else if ((argc >= 2) && (strcmp("wrap", args[1]) == 0))
3340 {
3341 xscale->trace.buffer_fill = -1;
3342 }
3343
3344 if (xscale->trace.buffer_enabled)
3345 {
3346 /* if we enable the trace buffer in fill-once
3347 * mode we know the address of the first instruction */
3348 xscale->trace.pc_ok = 1;
3349 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3350 }
3351 else
3352 {
3353 /* otherwise the address is unknown, and we have no known good PC */
3354 xscale->trace.pc_ok = 0;
3355 }
3356
3357 command_print(cmd_ctx, "trace buffer %s (%s)",
3358 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3359 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3360
3361 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3362 if (xscale->trace.buffer_fill >= 0)
3363 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3364 else
3365 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3366
3367 return ERROR_OK;
3368 }
3369
3370 COMMAND_HANDLER(xscale_handle_trace_image_command)
3371 {
3372 target_t *target = get_current_target(cmd_ctx);
3373 struct xscale_common_s *xscale = target_to_xscale(target);
3374 int retval;
3375
3376 if (argc < 1)
3377 {
3378 command_print(cmd_ctx, "usage: xscale trace_image <file> [base address] [type]");
3379 return ERROR_OK;
3380 }
3381
3382 retval = xscale_verify_pointer(cmd_ctx, xscale);
3383 if (retval != ERROR_OK)
3384 return retval;
3385
3386 if (xscale->trace.image)
3387 {
3388 image_close(xscale->trace.image);
3389 free(xscale->trace.image);
3390 command_print(cmd_ctx, "previously loaded image found and closed");
3391 }
3392
3393 xscale->trace.image = malloc(sizeof(image_t));
3394 xscale->trace.image->base_address_set = 0;
3395 xscale->trace.image->start_address_set = 0;
3396
3397 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3398 if (argc >= 2)
3399 {
3400 xscale->trace.image->base_address_set = 1;
3401 COMMAND_PARSE_NUMBER(int, args[1], xscale->trace.image->base_address);
3402 }
3403 else
3404 {
3405 xscale->trace.image->base_address_set = 0;
3406 }
3407
3408 if (image_open(xscale->trace.image, args[0], (argc >= 3) ? args[2] : NULL) != ERROR_OK)
3409 {
3410 free(xscale->trace.image);
3411 xscale->trace.image = NULL;
3412 return ERROR_OK;
3413 }
3414
3415 return ERROR_OK;
3416 }
3417
3418 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3419 {
3420 target_t *target = get_current_target(cmd_ctx);
3421 struct xscale_common_s *xscale = target_to_xscale(target);
3422 xscale_trace_data_t *trace_data;
3423 struct fileio file;
3424 int retval;
3425
3426 retval = xscale_verify_pointer(cmd_ctx, xscale);
3427 if (retval != ERROR_OK)
3428 return retval;
3429
3430 if (target->state != TARGET_HALTED)
3431 {
3432 command_print(cmd_ctx, "target must be stopped for \"%s\" command", CMD_NAME);
3433 return ERROR_OK;
3434 }
3435
3436 if (argc < 1)
3437 {
3438 command_print(cmd_ctx, "usage: xscale dump_trace <file>");
3439 return ERROR_OK;
3440 }
3441
3442 trace_data = xscale->trace.data;
3443
3444 if (!trace_data)
3445 {
3446 command_print(cmd_ctx, "no trace data collected");
3447 return ERROR_OK;
3448 }
3449
3450 if (fileio_open(&file, args[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3451 {
3452 return ERROR_OK;
3453 }
3454
3455 while (trace_data)
3456 {
3457 int i;
3458
3459 fileio_write_u32(&file, trace_data->chkpt0);
3460 fileio_write_u32(&file, trace_data->chkpt1);
3461 fileio_write_u32(&file, trace_data->last_instruction);
3462 fileio_write_u32(&file, trace_data->depth);
3463
3464 for (i = 0; i < trace_data->depth; i++)
3465 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3466
3467 trace_data = trace_data->next;
3468 }
3469
3470 fileio_close(&file);
3471
3472 return ERROR_OK;
3473 }
3474
3475 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3476 {
3477 target_t *target = get_current_target(cmd_ctx);
3478 struct xscale_common_s *xscale = target_to_xscale(target);
3479 int retval;
3480
3481 retval = xscale_verify_pointer(cmd_ctx, xscale);
3482 if (retval != ERROR_OK)
3483 return retval;
3484
3485 xscale_analyze_trace(target, cmd_ctx);
3486
3487 return ERROR_OK;
3488 }
3489
3490 COMMAND_HANDLER(xscale_handle_cp15)
3491 {
3492 target_t *target = get_current_target(cmd_ctx);
3493 struct xscale_common_s *xscale = target_to_xscale(target);
3494 int retval;
3495
3496 retval = xscale_verify_pointer(cmd_ctx, xscale);
3497 if (retval != ERROR_OK)
3498 return retval;
3499
3500 if (target->state != TARGET_HALTED)
3501 {
3502 command_print(cmd_ctx, "target must be stopped for \"%s\" command", CMD_NAME);
3503 return ERROR_OK;
3504 }
3505 uint32_t reg_no = 0;
3506 reg_t *reg = NULL;
3507 if (argc > 0)
3508 {
3509 COMMAND_PARSE_NUMBER(u32, args[0], reg_no);
3510 /*translate from xscale cp15 register no to openocd register*/
3511 switch (reg_no)
3512 {
3513 case 0:
3514 reg_no = XSCALE_MAINID;
3515 break;
3516 case 1:
3517 reg_no = XSCALE_CTRL;
3518 break;
3519 case 2:
3520 reg_no = XSCALE_TTB;
3521 break;
3522 case 3:
3523 reg_no = XSCALE_DAC;
3524 break;
3525 case 5:
3526 reg_no = XSCALE_FSR;
3527 break;
3528 case 6:
3529 reg_no = XSCALE_FAR;
3530 break;
3531 case 13:
3532 reg_no = XSCALE_PID;
3533 break;
3534 case 15:
3535 reg_no = XSCALE_CPACCESS;
3536 break;
3537 default:
3538 command_print(cmd_ctx, "invalid register number");
3539 return ERROR_INVALID_ARGUMENTS;
3540 }
3541 reg = &xscale->reg_cache->reg_list[reg_no];
3542
3543 }
3544 if (argc == 1)
3545 {
3546 uint32_t value;
3547
3548 /* read cp15 control register */
3549 xscale_get_reg(reg);
3550 value = buf_get_u32(reg->value, 0, 32);
3551 command_print(cmd_ctx, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3552 }
3553 else if (argc == 2)
3554 {
3555 uint32_t value;
3556 COMMAND_PARSE_NUMBER(u32, args[1], value);
3557
3558 /* send CP write request (command 0x41) */
3559 xscale_send_u32(target, 0x41);
3560
3561 /* send CP register number */
3562 xscale_send_u32(target, reg_no);
3563
3564 /* send CP register value */
3565 xscale_send_u32(target, value);
3566
3567 /* execute cpwait to ensure outstanding operations complete */
3568 xscale_send_u32(target, 0x53);
3569 }
3570 else
3571 {
3572 command_print(cmd_ctx, "usage: cp15 [register]<, [value]>");
3573 }
3574
3575 return ERROR_OK;
3576 }
3577
3578 static int xscale_register_commands(struct command_context_s *cmd_ctx)
3579 {
3580 command_t *xscale_cmd;
3581
3582 xscale_cmd = register_command(cmd_ctx, NULL, "xscale", NULL, COMMAND_ANY, "xscale specific commands");
3583
3584 register_command(cmd_ctx, xscale_cmd, "debug_handler", xscale_handle_debug_handler_command, COMMAND_ANY, "'xscale debug_handler <target#> <address>' command takes two required operands");
3585 register_command(cmd_ctx, xscale_cmd, "cache_clean_address", xscale_handle_cache_clean_address_command, COMMAND_ANY, NULL);
3586
3587 register_command(cmd_ctx, xscale_cmd, "cache_info", xscale_handle_cache_info_command, COMMAND_EXEC, NULL);
3588 register_command(cmd_ctx, xscale_cmd, "mmu", xscale_handle_mmu_command, COMMAND_EXEC, "['enable'|'disable'] the MMU");
3589 register_command(cmd_ctx, xscale_cmd, "icache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the ICache");
3590 register_command(cmd_ctx, xscale_cmd, "dcache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the DCache");
3591
3592 register_command(cmd_ctx, xscale_cmd, "vector_catch", xscale_handle_vector_catch_command, COMMAND_EXEC, "<mask> of vectors that should be catched");
3593 register_command(cmd_ctx, xscale_cmd, "vector_table", xscale_handle_vector_table_command, COMMAND_EXEC, "<high|low> <index> <code> set static code for exception handler entry");
3594
3595 register_command(cmd_ctx, xscale_cmd, "trace_buffer", xscale_handle_trace_buffer_command, COMMAND_EXEC, "<enable | disable> ['fill' [n]|'wrap']");
3596
3597 register_command(cmd_ctx, xscale_cmd, "dump_trace", xscale_handle_dump_trace_command, COMMAND_EXEC, "dump content of trace buffer to <file>");
3598 register_command(cmd_ctx, xscale_cmd, "analyze_trace", xscale_handle_analyze_trace_buffer_command, COMMAND_EXEC, "analyze content of trace buffer");
3599 register_command(cmd_ctx, xscale_cmd, "trace_image", xscale_handle_trace_image_command,
3600 COMMAND_EXEC, "load image from <file> [base address]");
3601
3602 register_command(cmd_ctx, xscale_cmd, "cp15", xscale_handle_cp15, COMMAND_EXEC, "access coproc 15 <register> [value]");
3603
3604 armv4_5_register_commands(cmd_ctx);
3605
3606 return ERROR_OK;
3607 }
3608
3609 target_type_t xscale_target =
3610 {
3611 .name = "xscale",
3612
3613 .poll = xscale_poll,
3614 .arch_state = xscale_arch_state,
3615
3616 .target_request_data = NULL,
3617
3618 .halt = xscale_halt,
3619 .resume = xscale_resume,
3620 .step = xscale_step,
3621
3622 .assert_reset = xscale_assert_reset,
3623 .deassert_reset = xscale_deassert_reset,
3624 .soft_reset_halt = NULL,
3625
3626 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
3627
3628 .read_memory = xscale_read_memory,
3629 .write_memory = xscale_write_memory,
3630 .bulk_write_memory = xscale_bulk_write_memory,
3631 .checksum_memory = arm7_9_checksum_memory,
3632 .blank_check_memory = arm7_9_blank_check_memory,
3633
3634 .run_algorithm = armv4_5_run_algorithm,
3635
3636 .add_breakpoint = xscale_add_breakpoint,
3637 .remove_breakpoint = xscale_remove_breakpoint,
3638 .add_watchpoint = xscale_add_watchpoint,
3639 .remove_watchpoint = xscale_remove_watchpoint,
3640
3641 .register_commands = xscale_register_commands,
3642 .target_create = xscale_target_create,
3643 .init_target = xscale_init_target,
3644
3645 .virt2phys = xscale_virt2phys,
3646 .mmu = xscale_mmu
3647 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)