simplify XScale debug handler installation
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "xscale.h"
31 #include "target_type.h"
32 #include "arm7_9_common.h"
33 #include "arm_simulator.h"
34 #include "arm_disassembler.h"
35 #include "time_support.h"
36 #include "image.h"
37
38
39 /*
40 * Important XScale documents available as of October 2009 include:
41 *
42 * Intel XScale® Core Developer’s Manual, January 2004
43 * Order Number: 273473-002
44 * This has a chapter detailing debug facilities, and punts some
45 * details to chip-specific microarchitecture documentats.
46 *
47 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
48 * Document Number: 273539-005
49 * Less detailed than the developer's manual, but summarizes those
50 * missing details (for most XScales) and gives LOTS of notes about
51 * debugger/handler interaction issues. Presents a simpler reset
52 * and load-handler sequence than the arch doc. (Note, OpenOCD
53 * doesn't currently support "Hot-Debug" as defined there.)
54 *
55 * Chip-specific microarchitecture documents may also be useful.
56 */
57
58
59 /* forward declarations */
60 static int xscale_resume(struct target_s *, int current,
61 uint32_t address, int handle_breakpoints, int debug_execution);
62 static int xscale_debug_entry(target_t *);
63 static int xscale_restore_context(target_t *);
64 static int xscale_get_reg(reg_t *reg);
65 static int xscale_set_reg(reg_t *reg, uint8_t *buf);
66 static int xscale_set_breakpoint(struct target_s *, breakpoint_t *);
67 static int xscale_set_watchpoint(struct target_s *, watchpoint_t *);
68 static int xscale_unset_breakpoint(struct target_s *, breakpoint_t *);
69 static int xscale_read_trace(target_t *);
70
71
72 static char *const xscale_reg_list[] =
73 {
74 "XSCALE_MAINID", /* 0 */
75 "XSCALE_CACHETYPE",
76 "XSCALE_CTRL",
77 "XSCALE_AUXCTRL",
78 "XSCALE_TTB",
79 "XSCALE_DAC",
80 "XSCALE_FSR",
81 "XSCALE_FAR",
82 "XSCALE_PID",
83 "XSCALE_CPACCESS",
84 "XSCALE_IBCR0", /* 10 */
85 "XSCALE_IBCR1",
86 "XSCALE_DBR0",
87 "XSCALE_DBR1",
88 "XSCALE_DBCON",
89 "XSCALE_TBREG",
90 "XSCALE_CHKPT0",
91 "XSCALE_CHKPT1",
92 "XSCALE_DCSR",
93 "XSCALE_TX",
94 "XSCALE_RX", /* 20 */
95 "XSCALE_TXRXCTRL",
96 };
97
98 static const xscale_reg_t xscale_reg_arch_info[] =
99 {
100 {XSCALE_MAINID, NULL},
101 {XSCALE_CACHETYPE, NULL},
102 {XSCALE_CTRL, NULL},
103 {XSCALE_AUXCTRL, NULL},
104 {XSCALE_TTB, NULL},
105 {XSCALE_DAC, NULL},
106 {XSCALE_FSR, NULL},
107 {XSCALE_FAR, NULL},
108 {XSCALE_PID, NULL},
109 {XSCALE_CPACCESS, NULL},
110 {XSCALE_IBCR0, NULL},
111 {XSCALE_IBCR1, NULL},
112 {XSCALE_DBR0, NULL},
113 {XSCALE_DBR1, NULL},
114 {XSCALE_DBCON, NULL},
115 {XSCALE_TBREG, NULL},
116 {XSCALE_CHKPT0, NULL},
117 {XSCALE_CHKPT1, NULL},
118 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
119 {-1, NULL}, /* TX accessed via JTAG */
120 {-1, NULL}, /* RX accessed via JTAG */
121 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
122 };
123
124 static int xscale_reg_arch_type = -1;
125
126 /* convenience wrapper to access XScale specific registers */
127 static int xscale_set_reg_u32(reg_t *reg, uint32_t value)
128 {
129 uint8_t buf[4];
130
131 buf_set_u32(buf, 0, 32, value);
132
133 return xscale_set_reg(reg, buf);
134 }
135
136
137 static int xscale_get_arch_pointers(target_t *target,
138 armv4_5_common_t **armv4_5_p, xscale_common_t **xscale_p)
139 {
140 armv4_5_common_t *armv4_5 = target->arch_info;
141 xscale_common_t *xscale = armv4_5->arch_info;
142
143 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
144 {
145 LOG_ERROR("target isn't an XScale target");
146 return -1;
147 }
148
149 if (xscale->common_magic != XSCALE_COMMON_MAGIC)
150 {
151 LOG_ERROR("target isn't an XScale target");
152 return -1;
153 }
154
155 *armv4_5_p = armv4_5;
156 *xscale_p = xscale;
157
158 return ERROR_OK;
159 }
160
161 static int xscale_jtag_set_instr(jtag_tap_t *tap, uint32_t new_instr)
162 {
163 if (tap == NULL)
164 return ERROR_FAIL;
165
166 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
167 {
168 scan_field_t field;
169
170 field.tap = tap;
171 field.num_bits = tap->ir_length;
172 field.out_value = calloc(CEIL(field.num_bits, 8), 1);
173 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
174
175 uint8_t tmp[4];
176 field.in_value = tmp;
177
178 jtag_add_ir_scan(1, &field, jtag_get_end_state());
179
180 /* FIX!!!! isn't this check superfluous? verify_ircapture handles this? */
181 jtag_check_value_mask(&field, tap->expected, tap->expected_mask);
182
183 free(field.out_value);
184 }
185
186 return ERROR_OK;
187 }
188
189 static int xscale_read_dcsr(target_t *target)
190 {
191 armv4_5_common_t *armv4_5 = target->arch_info;
192 xscale_common_t *xscale = armv4_5->arch_info;
193
194 int retval;
195
196 scan_field_t fields[3];
197 uint8_t field0 = 0x0;
198 uint8_t field0_check_value = 0x2;
199 uint8_t field0_check_mask = 0x7;
200 uint8_t field2 = 0x0;
201 uint8_t field2_check_value = 0x0;
202 uint8_t field2_check_mask = 0x1;
203
204 jtag_set_end_state(TAP_DRPAUSE);
205 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
206
207 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
208 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
209
210 fields[0].tap = target->tap;
211 fields[0].num_bits = 3;
212 fields[0].out_value = &field0;
213 uint8_t tmp;
214 fields[0].in_value = &tmp;
215
216 fields[1].tap = target->tap;
217 fields[1].num_bits = 32;
218 fields[1].out_value = NULL;
219 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
220
221 fields[2].tap = target->tap;
222 fields[2].num_bits = 1;
223 fields[2].out_value = &field2;
224 uint8_t tmp2;
225 fields[2].in_value = &tmp2;
226
227 jtag_add_dr_scan(3, fields, jtag_get_end_state());
228
229 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
230 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
231
232 if ((retval = jtag_execute_queue()) != ERROR_OK)
233 {
234 LOG_ERROR("JTAG error while reading DCSR");
235 return retval;
236 }
237
238 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
239 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
240
241 /* write the register with the value we just read
242 * on this second pass, only the first bit of field0 is guaranteed to be 0)
243 */
244 field0_check_mask = 0x1;
245 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
246 fields[1].in_value = NULL;
247
248 jtag_set_end_state(TAP_IDLE);
249
250 jtag_add_dr_scan(3, fields, jtag_get_end_state());
251
252 /* DANGER!!! this must be here. It will make sure that the arguments
253 * to jtag_set_check_value() does not go out of scope! */
254 return jtag_execute_queue();
255 }
256
257
258 static void xscale_getbuf(jtag_callback_data_t arg)
259 {
260 uint8_t *in = (uint8_t *)arg;
261 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
262 }
263
264 static int xscale_receive(target_t *target, uint32_t *buffer, int num_words)
265 {
266 if (num_words == 0)
267 return ERROR_INVALID_ARGUMENTS;
268
269 int retval = ERROR_OK;
270 tap_state_t path[3];
271 scan_field_t fields[3];
272 uint8_t *field0 = malloc(num_words * 1);
273 uint8_t field0_check_value = 0x2;
274 uint8_t field0_check_mask = 0x6;
275 uint32_t *field1 = malloc(num_words * 4);
276 uint8_t field2_check_value = 0x0;
277 uint8_t field2_check_mask = 0x1;
278 int words_done = 0;
279 int words_scheduled = 0;
280
281 int i;
282
283 path[0] = TAP_DRSELECT;
284 path[1] = TAP_DRCAPTURE;
285 path[2] = TAP_DRSHIFT;
286
287 fields[0].tap = target->tap;
288 fields[0].num_bits = 3;
289 fields[0].out_value = NULL;
290 fields[0].in_value = NULL;
291 fields[0].check_value = &field0_check_value;
292 fields[0].check_mask = &field0_check_mask;
293
294 fields[1].tap = target->tap;
295 fields[1].num_bits = 32;
296 fields[1].out_value = NULL;
297 fields[1].check_value = NULL;
298 fields[1].check_mask = NULL;
299
300 fields[2].tap = target->tap;
301 fields[2].num_bits = 1;
302 fields[2].out_value = NULL;
303 fields[2].in_value = NULL;
304 fields[2].check_value = &field2_check_value;
305 fields[2].check_mask = &field2_check_mask;
306
307 jtag_set_end_state(TAP_IDLE);
308 xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
309 jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
310
311 /* repeat until all words have been collected */
312 int attempts = 0;
313 while (words_done < num_words)
314 {
315 /* schedule reads */
316 words_scheduled = 0;
317 for (i = words_done; i < num_words; i++)
318 {
319 fields[0].in_value = &field0[i];
320
321 jtag_add_pathmove(3, path);
322
323 fields[1].in_value = (uint8_t *)(field1 + i);
324
325 jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
326
327 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
328
329 words_scheduled++;
330 }
331
332 if ((retval = jtag_execute_queue()) != ERROR_OK)
333 {
334 LOG_ERROR("JTAG error while receiving data from debug handler");
335 break;
336 }
337
338 /* examine results */
339 for (i = words_done; i < num_words; i++)
340 {
341 if (!(field0[0] & 1))
342 {
343 /* move backwards if necessary */
344 int j;
345 for (j = i; j < num_words - 1; j++)
346 {
347 field0[j] = field0[j + 1];
348 field1[j] = field1[j + 1];
349 }
350 words_scheduled--;
351 }
352 }
353 if (words_scheduled == 0)
354 {
355 if (attempts++==1000)
356 {
357 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
358 retval = ERROR_TARGET_TIMEOUT;
359 break;
360 }
361 }
362
363 words_done += words_scheduled;
364 }
365
366 for (i = 0; i < num_words; i++)
367 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
368
369 free(field1);
370
371 return retval;
372 }
373
374 static int xscale_read_tx(target_t *target, int consume)
375 {
376 armv4_5_common_t *armv4_5 = target->arch_info;
377 xscale_common_t *xscale = armv4_5->arch_info;
378 tap_state_t path[3];
379 tap_state_t noconsume_path[6];
380
381 int retval;
382 struct timeval timeout, now;
383
384 scan_field_t fields[3];
385 uint8_t field0_in = 0x0;
386 uint8_t field0_check_value = 0x2;
387 uint8_t field0_check_mask = 0x6;
388 uint8_t field2_check_value = 0x0;
389 uint8_t field2_check_mask = 0x1;
390
391 jtag_set_end_state(TAP_IDLE);
392
393 xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
394
395 path[0] = TAP_DRSELECT;
396 path[1] = TAP_DRCAPTURE;
397 path[2] = TAP_DRSHIFT;
398
399 noconsume_path[0] = TAP_DRSELECT;
400 noconsume_path[1] = TAP_DRCAPTURE;
401 noconsume_path[2] = TAP_DREXIT1;
402 noconsume_path[3] = TAP_DRPAUSE;
403 noconsume_path[4] = TAP_DREXIT2;
404 noconsume_path[5] = TAP_DRSHIFT;
405
406 fields[0].tap = target->tap;
407 fields[0].num_bits = 3;
408 fields[0].out_value = NULL;
409 fields[0].in_value = &field0_in;
410
411 fields[1].tap = target->tap;
412 fields[1].num_bits = 32;
413 fields[1].out_value = NULL;
414 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
415
416 fields[2].tap = target->tap;
417 fields[2].num_bits = 1;
418 fields[2].out_value = NULL;
419 uint8_t tmp;
420 fields[2].in_value = &tmp;
421
422 gettimeofday(&timeout, NULL);
423 timeval_add_time(&timeout, 1, 0);
424
425 for (;;)
426 {
427 /* if we want to consume the register content (i.e. clear TX_READY),
428 * we have to go straight from Capture-DR to Shift-DR
429 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
430 */
431 if (consume)
432 jtag_add_pathmove(3, path);
433 else
434 {
435 jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
436 }
437
438 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
439
440 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
441 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
442
443 if ((retval = jtag_execute_queue()) != ERROR_OK)
444 {
445 LOG_ERROR("JTAG error while reading TX");
446 return ERROR_TARGET_TIMEOUT;
447 }
448
449 gettimeofday(&now, NULL);
450 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
451 {
452 LOG_ERROR("time out reading TX register");
453 return ERROR_TARGET_TIMEOUT;
454 }
455 if (!((!(field0_in & 1)) && consume))
456 {
457 goto done;
458 }
459 if (debug_level >= 3)
460 {
461 LOG_DEBUG("waiting 100ms");
462 alive_sleep(100); /* avoid flooding the logs */
463 } else
464 {
465 keep_alive();
466 }
467 }
468 done:
469
470 if (!(field0_in & 1))
471 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
472
473 return ERROR_OK;
474 }
475
476 static int xscale_write_rx(target_t *target)
477 {
478 armv4_5_common_t *armv4_5 = target->arch_info;
479 xscale_common_t *xscale = armv4_5->arch_info;
480
481 int retval;
482 struct timeval timeout, now;
483
484 scan_field_t fields[3];
485 uint8_t field0_out = 0x0;
486 uint8_t field0_in = 0x0;
487 uint8_t field0_check_value = 0x2;
488 uint8_t field0_check_mask = 0x6;
489 uint8_t field2 = 0x0;
490 uint8_t field2_check_value = 0x0;
491 uint8_t field2_check_mask = 0x1;
492
493 jtag_set_end_state(TAP_IDLE);
494
495 xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
496
497 fields[0].tap = target->tap;
498 fields[0].num_bits = 3;
499 fields[0].out_value = &field0_out;
500 fields[0].in_value = &field0_in;
501
502 fields[1].tap = target->tap;
503 fields[1].num_bits = 32;
504 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
505 fields[1].in_value = NULL;
506
507 fields[2].tap = target->tap;
508 fields[2].num_bits = 1;
509 fields[2].out_value = &field2;
510 uint8_t tmp;
511 fields[2].in_value = &tmp;
512
513 gettimeofday(&timeout, NULL);
514 timeval_add_time(&timeout, 1, 0);
515
516 /* poll until rx_read is low */
517 LOG_DEBUG("polling RX");
518 for (;;)
519 {
520 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
521
522 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
523 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
524
525 if ((retval = jtag_execute_queue()) != ERROR_OK)
526 {
527 LOG_ERROR("JTAG error while writing RX");
528 return retval;
529 }
530
531 gettimeofday(&now, NULL);
532 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
533 {
534 LOG_ERROR("time out writing RX register");
535 return ERROR_TARGET_TIMEOUT;
536 }
537 if (!(field0_in & 1))
538 goto done;
539 if (debug_level >= 3)
540 {
541 LOG_DEBUG("waiting 100ms");
542 alive_sleep(100); /* avoid flooding the logs */
543 } else
544 {
545 keep_alive();
546 }
547 }
548 done:
549
550 /* set rx_valid */
551 field2 = 0x1;
552 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
553
554 if ((retval = jtag_execute_queue()) != ERROR_OK)
555 {
556 LOG_ERROR("JTAG error while writing RX");
557 return retval;
558 }
559
560 return ERROR_OK;
561 }
562
563 /* send count elements of size byte to the debug handler */
564 static int xscale_send(target_t *target, uint8_t *buffer, int count, int size)
565 {
566 uint32_t t[3];
567 int bits[3];
568 int retval;
569 int done_count = 0;
570
571 jtag_set_end_state(TAP_IDLE);
572
573 xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
574
575 bits[0]=3;
576 t[0]=0;
577 bits[1]=32;
578 t[2]=1;
579 bits[2]=1;
580 int endianness = target->endianness;
581 while (done_count++ < count)
582 {
583 switch (size)
584 {
585 case 4:
586 if (endianness == TARGET_LITTLE_ENDIAN)
587 {
588 t[1]=le_to_h_u32(buffer);
589 } else
590 {
591 t[1]=be_to_h_u32(buffer);
592 }
593 break;
594 case 2:
595 if (endianness == TARGET_LITTLE_ENDIAN)
596 {
597 t[1]=le_to_h_u16(buffer);
598 } else
599 {
600 t[1]=be_to_h_u16(buffer);
601 }
602 break;
603 case 1:
604 t[1]=buffer[0];
605 break;
606 default:
607 LOG_ERROR("BUG: size neither 4, 2 nor 1");
608 exit(-1);
609 }
610 jtag_add_dr_out(target->tap,
611 3,
612 bits,
613 t,
614 jtag_set_end_state(TAP_IDLE));
615 buffer += size;
616 }
617
618 if ((retval = jtag_execute_queue()) != ERROR_OK)
619 {
620 LOG_ERROR("JTAG error while sending data to debug handler");
621 return retval;
622 }
623
624 return ERROR_OK;
625 }
626
627 static int xscale_send_u32(target_t *target, uint32_t value)
628 {
629 armv4_5_common_t *armv4_5 = target->arch_info;
630 xscale_common_t *xscale = armv4_5->arch_info;
631
632 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
633 return xscale_write_rx(target);
634 }
635
636 static int xscale_write_dcsr(target_t *target, int hold_rst, int ext_dbg_brk)
637 {
638 armv4_5_common_t *armv4_5 = target->arch_info;
639 xscale_common_t *xscale = armv4_5->arch_info;
640
641 int retval;
642
643 scan_field_t fields[3];
644 uint8_t field0 = 0x0;
645 uint8_t field0_check_value = 0x2;
646 uint8_t field0_check_mask = 0x7;
647 uint8_t field2 = 0x0;
648 uint8_t field2_check_value = 0x0;
649 uint8_t field2_check_mask = 0x1;
650
651 if (hold_rst != -1)
652 xscale->hold_rst = hold_rst;
653
654 if (ext_dbg_brk != -1)
655 xscale->external_debug_break = ext_dbg_brk;
656
657 jtag_set_end_state(TAP_IDLE);
658 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
659
660 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
661 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
662
663 fields[0].tap = target->tap;
664 fields[0].num_bits = 3;
665 fields[0].out_value = &field0;
666 uint8_t tmp;
667 fields[0].in_value = &tmp;
668
669 fields[1].tap = target->tap;
670 fields[1].num_bits = 32;
671 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
672 fields[1].in_value = NULL;
673
674 fields[2].tap = target->tap;
675 fields[2].num_bits = 1;
676 fields[2].out_value = &field2;
677 uint8_t tmp2;
678 fields[2].in_value = &tmp2;
679
680 jtag_add_dr_scan(3, fields, jtag_get_end_state());
681
682 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
683 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
684
685 if ((retval = jtag_execute_queue()) != ERROR_OK)
686 {
687 LOG_ERROR("JTAG error while writing DCSR");
688 return retval;
689 }
690
691 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
692 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
693
694 return ERROR_OK;
695 }
696
697 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
698 static unsigned int parity (unsigned int v)
699 {
700 // unsigned int ov = v;
701 v ^= v >> 16;
702 v ^= v >> 8;
703 v ^= v >> 4;
704 v &= 0xf;
705 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
706 return (0x6996 >> v) & 1;
707 }
708
709 static int xscale_load_ic(target_t *target, uint32_t va, uint32_t buffer[8])
710 {
711 uint8_t packet[4];
712 uint8_t cmd;
713 int word;
714 scan_field_t fields[2];
715
716 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
717
718 /* LDIC into IR */
719 jtag_set_end_state(TAP_IDLE);
720 xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
721
722 /* CMD is b011 to load a cacheline into the Mini ICache.
723 * Loading into the main ICache is deprecated, and unused.
724 * It's followed by three zero bits, and 27 address bits.
725 */
726 buf_set_u32(&cmd, 0, 6, 0x3);
727
728 /* virtual address of desired cache line */
729 buf_set_u32(packet, 0, 27, va >> 5);
730
731 fields[0].tap = target->tap;
732 fields[0].num_bits = 6;
733 fields[0].out_value = &cmd;
734 fields[0].in_value = NULL;
735
736 fields[1].tap = target->tap;
737 fields[1].num_bits = 27;
738 fields[1].out_value = packet;
739 fields[1].in_value = NULL;
740
741 jtag_add_dr_scan(2, fields, jtag_get_end_state());
742
743 /* rest of packet is a cacheline: 8 instructions, with parity */
744 fields[0].num_bits = 32;
745 fields[0].out_value = packet;
746
747 fields[1].num_bits = 1;
748 fields[1].out_value = &cmd;
749
750 for (word = 0; word < 8; word++)
751 {
752 buf_set_u32(packet, 0, 32, buffer[word]);
753
754 uint32_t value;
755 memcpy(&value, packet, sizeof(uint32_t));
756 cmd = parity(value);
757
758 jtag_add_dr_scan(2, fields, jtag_get_end_state());
759 }
760
761 return jtag_execute_queue();
762 }
763
764 static int xscale_invalidate_ic_line(target_t *target, uint32_t va)
765 {
766 uint8_t packet[4];
767 uint8_t cmd;
768 scan_field_t fields[2];
769
770 jtag_set_end_state(TAP_IDLE);
771 xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
772
773 /* CMD for invalidate IC line b000, bits [6:4] b000 */
774 buf_set_u32(&cmd, 0, 6, 0x0);
775
776 /* virtual address of desired cache line */
777 buf_set_u32(packet, 0, 27, va >> 5);
778
779 fields[0].tap = target->tap;
780 fields[0].num_bits = 6;
781 fields[0].out_value = &cmd;
782 fields[0].in_value = NULL;
783
784 fields[1].tap = target->tap;
785 fields[1].num_bits = 27;
786 fields[1].out_value = packet;
787 fields[1].in_value = NULL;
788
789 jtag_add_dr_scan(2, fields, jtag_get_end_state());
790
791 return ERROR_OK;
792 }
793
794 static int xscale_update_vectors(target_t *target)
795 {
796 armv4_5_common_t *armv4_5 = target->arch_info;
797 xscale_common_t *xscale = armv4_5->arch_info;
798 int i;
799 int retval;
800
801 uint32_t low_reset_branch, high_reset_branch;
802
803 for (i = 1; i < 8; i++)
804 {
805 /* if there's a static vector specified for this exception, override */
806 if (xscale->static_high_vectors_set & (1 << i))
807 {
808 xscale->high_vectors[i] = xscale->static_high_vectors[i];
809 }
810 else
811 {
812 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
813 if (retval == ERROR_TARGET_TIMEOUT)
814 return retval;
815 if (retval != ERROR_OK)
816 {
817 /* Some of these reads will fail as part of normal execution */
818 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
819 }
820 }
821 }
822
823 for (i = 1; i < 8; i++)
824 {
825 if (xscale->static_low_vectors_set & (1 << i))
826 {
827 xscale->low_vectors[i] = xscale->static_low_vectors[i];
828 }
829 else
830 {
831 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
832 if (retval == ERROR_TARGET_TIMEOUT)
833 return retval;
834 if (retval != ERROR_OK)
835 {
836 /* Some of these reads will fail as part of normal execution */
837 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
838 }
839 }
840 }
841
842 /* calculate branches to debug handler */
843 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
844 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
845
846 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
847 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
848
849 /* invalidate and load exception vectors in mini i-cache */
850 xscale_invalidate_ic_line(target, 0x0);
851 xscale_invalidate_ic_line(target, 0xffff0000);
852
853 xscale_load_ic(target, 0x0, xscale->low_vectors);
854 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
855
856 return ERROR_OK;
857 }
858
859 static int xscale_arch_state(struct target_s *target)
860 {
861 armv4_5_common_t *armv4_5 = target->arch_info;
862 xscale_common_t *xscale = armv4_5->arch_info;
863
864 static const char *state[] =
865 {
866 "disabled", "enabled"
867 };
868
869 static const char *arch_dbg_reason[] =
870 {
871 "", "\n(processor reset)", "\n(trace buffer full)"
872 };
873
874 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
875 {
876 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
877 exit(-1);
878 }
879
880 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
881 "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "\n"
882 "MMU: %s, D-Cache: %s, I-Cache: %s"
883 "%s",
884 armv4_5_state_strings[armv4_5->core_state],
885 Jim_Nvp_value2name_simple(nvp_target_debug_reason, target->debug_reason)->name ,
886 armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)],
887 buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
888 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
889 state[xscale->armv4_5_mmu.mmu_enabled],
890 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
891 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
892 arch_dbg_reason[xscale->arch_debug_reason]);
893
894 return ERROR_OK;
895 }
896
897 static int xscale_poll(target_t *target)
898 {
899 int retval = ERROR_OK;
900 armv4_5_common_t *armv4_5 = target->arch_info;
901 xscale_common_t *xscale = armv4_5->arch_info;
902
903 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
904 {
905 enum target_state previous_state = target->state;
906 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
907 {
908
909 /* there's data to read from the tx register, we entered debug state */
910 xscale->handler_running = 1;
911
912 target->state = TARGET_HALTED;
913
914 /* process debug entry, fetching current mode regs */
915 retval = xscale_debug_entry(target);
916 }
917 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
918 {
919 LOG_USER("error while polling TX register, reset CPU");
920 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
921 target->state = TARGET_HALTED;
922 }
923
924 /* debug_entry could have overwritten target state (i.e. immediate resume)
925 * don't signal event handlers in that case
926 */
927 if (target->state != TARGET_HALTED)
928 return ERROR_OK;
929
930 /* if target was running, signal that we halted
931 * otherwise we reentered from debug execution */
932 if (previous_state == TARGET_RUNNING)
933 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
934 else
935 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
936 }
937
938 return retval;
939 }
940
941 static int xscale_debug_entry(target_t *target)
942 {
943 armv4_5_common_t *armv4_5 = target->arch_info;
944 xscale_common_t *xscale = armv4_5->arch_info;
945 uint32_t pc;
946 uint32_t buffer[10];
947 int i;
948 int retval;
949
950 uint32_t moe;
951
952 /* clear external dbg break (will be written on next DCSR read) */
953 xscale->external_debug_break = 0;
954 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
955 return retval;
956
957 /* get r0, pc, r1 to r7 and cpsr */
958 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
959 return retval;
960
961 /* move r0 from buffer to register cache */
962 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
963 armv4_5->core_cache->reg_list[0].dirty = 1;
964 armv4_5->core_cache->reg_list[0].valid = 1;
965 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
966
967 /* move pc from buffer to register cache */
968 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
969 armv4_5->core_cache->reg_list[15].dirty = 1;
970 armv4_5->core_cache->reg_list[15].valid = 1;
971 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
972
973 /* move data from buffer to register cache */
974 for (i = 1; i <= 7; i++)
975 {
976 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
977 armv4_5->core_cache->reg_list[i].dirty = 1;
978 armv4_5->core_cache->reg_list[i].valid = 1;
979 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
980 }
981
982 buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, buffer[9]);
983 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
984 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
985 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
986
987 armv4_5->core_mode = buffer[9] & 0x1f;
988 if (armv4_5_mode_to_number(armv4_5->core_mode) == -1)
989 {
990 target->state = TARGET_UNKNOWN;
991 LOG_ERROR("cpsr contains invalid mode value - communication failure");
992 return ERROR_TARGET_FAILURE;
993 }
994 LOG_DEBUG("target entered debug state in %s mode", armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)]);
995
996 if (buffer[9] & 0x20)
997 armv4_5->core_state = ARMV4_5_STATE_THUMB;
998 else
999 armv4_5->core_state = ARMV4_5_STATE_ARM;
1000
1001
1002 if (armv4_5_mode_to_number(armv4_5->core_mode)==-1)
1003 return ERROR_FAIL;
1004
1005 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1006 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
1007 {
1008 xscale_receive(target, buffer, 8);
1009 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1010 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
1011 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
1012 }
1013 else
1014 {
1015 /* r8 to r14, but no spsr */
1016 xscale_receive(target, buffer, 7);
1017 }
1018
1019 /* move data from buffer to register cache */
1020 for (i = 8; i <= 14; i++)
1021 {
1022 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
1023 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
1024 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
1025 }
1026
1027 /* examine debug reason */
1028 xscale_read_dcsr(target);
1029 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
1030
1031 /* stored PC (for calculating fixup) */
1032 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1033
1034 switch (moe)
1035 {
1036 case 0x0: /* Processor reset */
1037 target->debug_reason = DBG_REASON_DBGRQ;
1038 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1039 pc -= 4;
1040 break;
1041 case 0x1: /* Instruction breakpoint hit */
1042 target->debug_reason = DBG_REASON_BREAKPOINT;
1043 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1044 pc -= 4;
1045 break;
1046 case 0x2: /* Data breakpoint hit */
1047 target->debug_reason = DBG_REASON_WATCHPOINT;
1048 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1049 pc -= 4;
1050 break;
1051 case 0x3: /* BKPT instruction executed */
1052 target->debug_reason = DBG_REASON_BREAKPOINT;
1053 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1054 pc -= 4;
1055 break;
1056 case 0x4: /* Ext. debug event */
1057 target->debug_reason = DBG_REASON_DBGRQ;
1058 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1059 pc -= 4;
1060 break;
1061 case 0x5: /* Vector trap occured */
1062 target->debug_reason = DBG_REASON_BREAKPOINT;
1063 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1064 pc -= 4;
1065 break;
1066 case 0x6: /* Trace buffer full break */
1067 target->debug_reason = DBG_REASON_DBGRQ;
1068 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1069 pc -= 4;
1070 break;
1071 case 0x7: /* Reserved (may flag Hot-Debug support) */
1072 default:
1073 LOG_ERROR("Method of Entry is 'Reserved'");
1074 exit(-1);
1075 break;
1076 }
1077
1078 /* apply PC fixup */
1079 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1080
1081 /* on the first debug entry, identify cache type */
1082 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1083 {
1084 uint32_t cache_type_reg;
1085
1086 /* read cp15 cache type register */
1087 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1088 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1089
1090 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1091 }
1092
1093 /* examine MMU and Cache settings */
1094 /* read cp15 control register */
1095 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1096 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1097 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1098 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1099 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1100
1101 /* tracing enabled, read collected trace data */
1102 if (xscale->trace.buffer_enabled)
1103 {
1104 xscale_read_trace(target);
1105 xscale->trace.buffer_fill--;
1106
1107 /* resume if we're still collecting trace data */
1108 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1109 && (xscale->trace.buffer_fill > 0))
1110 {
1111 xscale_resume(target, 1, 0x0, 1, 0);
1112 }
1113 else
1114 {
1115 xscale->trace.buffer_enabled = 0;
1116 }
1117 }
1118
1119 return ERROR_OK;
1120 }
1121
1122 static int xscale_halt(target_t *target)
1123 {
1124 armv4_5_common_t *armv4_5 = target->arch_info;
1125 xscale_common_t *xscale = armv4_5->arch_info;
1126
1127 LOG_DEBUG("target->state: %s",
1128 target_state_name(target));
1129
1130 if (target->state == TARGET_HALTED)
1131 {
1132 LOG_DEBUG("target was already halted");
1133 return ERROR_OK;
1134 }
1135 else if (target->state == TARGET_UNKNOWN)
1136 {
1137 /* this must not happen for a xscale target */
1138 LOG_ERROR("target was in unknown state when halt was requested");
1139 return ERROR_TARGET_INVALID;
1140 }
1141 else if (target->state == TARGET_RESET)
1142 {
1143 LOG_DEBUG("target->state == TARGET_RESET");
1144 }
1145 else
1146 {
1147 /* assert external dbg break */
1148 xscale->external_debug_break = 1;
1149 xscale_read_dcsr(target);
1150
1151 target->debug_reason = DBG_REASON_DBGRQ;
1152 }
1153
1154 return ERROR_OK;
1155 }
1156
1157 static int xscale_enable_single_step(struct target_s *target, uint32_t next_pc)
1158 {
1159 armv4_5_common_t *armv4_5 = target->arch_info;
1160 xscale_common_t *xscale= armv4_5->arch_info;
1161 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1162 int retval;
1163
1164 if (xscale->ibcr0_used)
1165 {
1166 breakpoint_t *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1167
1168 if (ibcr0_bp)
1169 {
1170 xscale_unset_breakpoint(target, ibcr0_bp);
1171 }
1172 else
1173 {
1174 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1175 exit(-1);
1176 }
1177 }
1178
1179 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1180 return retval;
1181
1182 return ERROR_OK;
1183 }
1184
1185 static int xscale_disable_single_step(struct target_s *target)
1186 {
1187 armv4_5_common_t *armv4_5 = target->arch_info;
1188 xscale_common_t *xscale= armv4_5->arch_info;
1189 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1190 int retval;
1191
1192 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1193 return retval;
1194
1195 return ERROR_OK;
1196 }
1197
1198 static void xscale_enable_watchpoints(struct target_s *target)
1199 {
1200 watchpoint_t *watchpoint = target->watchpoints;
1201
1202 while (watchpoint)
1203 {
1204 if (watchpoint->set == 0)
1205 xscale_set_watchpoint(target, watchpoint);
1206 watchpoint = watchpoint->next;
1207 }
1208 }
1209
1210 static void xscale_enable_breakpoints(struct target_s *target)
1211 {
1212 breakpoint_t *breakpoint = target->breakpoints;
1213
1214 /* set any pending breakpoints */
1215 while (breakpoint)
1216 {
1217 if (breakpoint->set == 0)
1218 xscale_set_breakpoint(target, breakpoint);
1219 breakpoint = breakpoint->next;
1220 }
1221 }
1222
1223 static int xscale_resume(struct target_s *target, int current,
1224 uint32_t address, int handle_breakpoints, int debug_execution)
1225 {
1226 armv4_5_common_t *armv4_5 = target->arch_info;
1227 xscale_common_t *xscale= armv4_5->arch_info;
1228 breakpoint_t *breakpoint = target->breakpoints;
1229
1230 uint32_t current_pc;
1231
1232 int retval;
1233 int i;
1234
1235 LOG_DEBUG("-");
1236
1237 if (target->state != TARGET_HALTED)
1238 {
1239 LOG_WARNING("target not halted");
1240 return ERROR_TARGET_NOT_HALTED;
1241 }
1242
1243 if (!debug_execution)
1244 {
1245 target_free_all_working_areas(target);
1246 }
1247
1248 /* update vector tables */
1249 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1250 return retval;
1251
1252 /* current = 1: continue on current pc, otherwise continue at <address> */
1253 if (!current)
1254 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1255
1256 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1257
1258 /* if we're at the reset vector, we have to simulate the branch */
1259 if (current_pc == 0x0)
1260 {
1261 arm_simulate_step(target, NULL);
1262 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1263 }
1264
1265 /* the front-end may request us not to handle breakpoints */
1266 if (handle_breakpoints)
1267 {
1268 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1269 {
1270 uint32_t next_pc;
1271
1272 /* there's a breakpoint at the current PC, we have to step over it */
1273 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1274 xscale_unset_breakpoint(target, breakpoint);
1275
1276 /* calculate PC of next instruction */
1277 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1278 {
1279 uint32_t current_opcode;
1280 target_read_u32(target, current_pc, &current_opcode);
1281 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1282 }
1283
1284 LOG_DEBUG("enable single-step");
1285 xscale_enable_single_step(target, next_pc);
1286
1287 /* restore banked registers */
1288 xscale_restore_context(target);
1289
1290 /* send resume request (command 0x30 or 0x31)
1291 * clean the trace buffer if it is to be enabled (0x62) */
1292 if (xscale->trace.buffer_enabled)
1293 {
1294 xscale_send_u32(target, 0x62);
1295 xscale_send_u32(target, 0x31);
1296 }
1297 else
1298 xscale_send_u32(target, 0x30);
1299
1300 /* send CPSR */
1301 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1302 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1303
1304 for (i = 7; i >= 0; i--)
1305 {
1306 /* send register */
1307 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1308 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1309 }
1310
1311 /* send PC */
1312 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1313 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1314
1315 /* wait for and process debug entry */
1316 xscale_debug_entry(target);
1317
1318 LOG_DEBUG("disable single-step");
1319 xscale_disable_single_step(target);
1320
1321 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1322 xscale_set_breakpoint(target, breakpoint);
1323 }
1324 }
1325
1326 /* enable any pending breakpoints and watchpoints */
1327 xscale_enable_breakpoints(target);
1328 xscale_enable_watchpoints(target);
1329
1330 /* restore banked registers */
1331 xscale_restore_context(target);
1332
1333 /* send resume request (command 0x30 or 0x31)
1334 * clean the trace buffer if it is to be enabled (0x62) */
1335 if (xscale->trace.buffer_enabled)
1336 {
1337 xscale_send_u32(target, 0x62);
1338 xscale_send_u32(target, 0x31);
1339 }
1340 else
1341 xscale_send_u32(target, 0x30);
1342
1343 /* send CPSR */
1344 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1345 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1346
1347 for (i = 7; i >= 0; i--)
1348 {
1349 /* send register */
1350 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1351 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1352 }
1353
1354 /* send PC */
1355 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1356 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1357
1358 target->debug_reason = DBG_REASON_NOTHALTED;
1359
1360 if (!debug_execution)
1361 {
1362 /* registers are now invalid */
1363 armv4_5_invalidate_core_regs(target);
1364 target->state = TARGET_RUNNING;
1365 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1366 }
1367 else
1368 {
1369 target->state = TARGET_DEBUG_RUNNING;
1370 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1371 }
1372
1373 LOG_DEBUG("target resumed");
1374
1375 xscale->handler_running = 1;
1376
1377 return ERROR_OK;
1378 }
1379
1380 static int xscale_step_inner(struct target_s *target, int current,
1381 uint32_t address, int handle_breakpoints)
1382 {
1383 armv4_5_common_t *armv4_5 = target->arch_info;
1384 xscale_common_t *xscale = armv4_5->arch_info;
1385
1386 uint32_t next_pc;
1387 int retval;
1388 int i;
1389
1390 target->debug_reason = DBG_REASON_SINGLESTEP;
1391
1392 /* calculate PC of next instruction */
1393 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1394 {
1395 uint32_t current_opcode, current_pc;
1396 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1397
1398 target_read_u32(target, current_pc, &current_opcode);
1399 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1400 return retval;
1401 }
1402
1403 LOG_DEBUG("enable single-step");
1404 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1405 return retval;
1406
1407 /* restore banked registers */
1408 if ((retval = xscale_restore_context(target)) != ERROR_OK)
1409 return retval;
1410
1411 /* send resume request (command 0x30 or 0x31)
1412 * clean the trace buffer if it is to be enabled (0x62) */
1413 if (xscale->trace.buffer_enabled)
1414 {
1415 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1416 return retval;
1417 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1418 return retval;
1419 }
1420 else
1421 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1422 return retval;
1423
1424 /* send CPSR */
1425 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32))) != ERROR_OK)
1426 return retval;
1427 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1428
1429 for (i = 7; i >= 0; i--)
1430 {
1431 /* send register */
1432 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1433 return retval;
1434 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1435 }
1436
1437 /* send PC */
1438 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))) != ERROR_OK)
1439 return retval;
1440 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1441
1442 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1443
1444 /* registers are now invalid */
1445 if ((retval = armv4_5_invalidate_core_regs(target)) != ERROR_OK)
1446 return retval;
1447
1448 /* wait for and process debug entry */
1449 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1450 return retval;
1451
1452 LOG_DEBUG("disable single-step");
1453 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1454 return retval;
1455
1456 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1457
1458 return ERROR_OK;
1459 }
1460
1461 static int xscale_step(struct target_s *target, int current,
1462 uint32_t address, int handle_breakpoints)
1463 {
1464 armv4_5_common_t *armv4_5 = target->arch_info;
1465 breakpoint_t *breakpoint = target->breakpoints;
1466
1467 uint32_t current_pc;
1468 int retval;
1469
1470 if (target->state != TARGET_HALTED)
1471 {
1472 LOG_WARNING("target not halted");
1473 return ERROR_TARGET_NOT_HALTED;
1474 }
1475
1476 /* current = 1: continue on current pc, otherwise continue at <address> */
1477 if (!current)
1478 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1479
1480 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1481
1482 /* if we're at the reset vector, we have to simulate the step */
1483 if (current_pc == 0x0)
1484 {
1485 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1486 return retval;
1487 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1488
1489 target->debug_reason = DBG_REASON_SINGLESTEP;
1490 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1491
1492 return ERROR_OK;
1493 }
1494
1495 /* the front-end may request us not to handle breakpoints */
1496 if (handle_breakpoints)
1497 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1498 {
1499 if ((retval = xscale_unset_breakpoint(target, breakpoint)) != ERROR_OK)
1500 return retval;
1501 }
1502
1503 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1504
1505 if (breakpoint)
1506 {
1507 xscale_set_breakpoint(target, breakpoint);
1508 }
1509
1510 LOG_DEBUG("target stepped");
1511
1512 return ERROR_OK;
1513
1514 }
1515
1516 static int xscale_assert_reset(target_t *target)
1517 {
1518 armv4_5_common_t *armv4_5 = target->arch_info;
1519 xscale_common_t *xscale = armv4_5->arch_info;
1520
1521 LOG_DEBUG("target->state: %s",
1522 target_state_name(target));
1523
1524 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1525 * end up in T-L-R, which would reset JTAG
1526 */
1527 jtag_set_end_state(TAP_IDLE);
1528 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
1529
1530 /* set Hold reset, Halt mode and Trap Reset */
1531 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1532 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1533 xscale_write_dcsr(target, 1, 0);
1534
1535 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1536 xscale_jtag_set_instr(target->tap, 0x7f);
1537 jtag_execute_queue();
1538
1539 /* assert reset */
1540 jtag_add_reset(0, 1);
1541
1542 /* sleep 1ms, to be sure we fulfill any requirements */
1543 jtag_add_sleep(1000);
1544 jtag_execute_queue();
1545
1546 target->state = TARGET_RESET;
1547
1548 if (target->reset_halt)
1549 {
1550 int retval;
1551 if ((retval = target_halt(target)) != ERROR_OK)
1552 return retval;
1553 }
1554
1555 return ERROR_OK;
1556 }
1557
1558 static int xscale_deassert_reset(target_t *target)
1559 {
1560 armv4_5_common_t *armv4_5 = target->arch_info;
1561 xscale_common_t *xscale = armv4_5->arch_info;
1562 breakpoint_t *breakpoint = target->breakpoints;
1563
1564 LOG_DEBUG("-");
1565
1566 xscale->ibcr_available = 2;
1567 xscale->ibcr0_used = 0;
1568 xscale->ibcr1_used = 0;
1569
1570 xscale->dbr_available = 2;
1571 xscale->dbr0_used = 0;
1572 xscale->dbr1_used = 0;
1573
1574 /* mark all hardware breakpoints as unset */
1575 while (breakpoint)
1576 {
1577 if (breakpoint->type == BKPT_HARD)
1578 {
1579 breakpoint->set = 0;
1580 }
1581 breakpoint = breakpoint->next;
1582 }
1583
1584 if (!xscale->handler_installed)
1585 {
1586 uint32_t address;
1587 unsigned buf_cnt;
1588 const uint8_t *buffer = xscale_debug_handler;
1589 int retval;
1590
1591 /* release SRST */
1592 jtag_add_reset(0, 0);
1593
1594 /* wait 300ms; 150 and 100ms were not enough */
1595 jtag_add_sleep(300*1000);
1596
1597 jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
1598 jtag_execute_queue();
1599
1600 /* set Hold reset, Halt mode and Trap Reset */
1601 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1602 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1603 xscale_write_dcsr(target, 1, 0);
1604
1605 /* Load the debug handler into the mini-icache. Since
1606 * it's using halt mode (not monitor mode), it runs in
1607 * "Special Debug State" for access to registers, memory,
1608 * coprocessors, trace data, etc.
1609 *
1610 * REVISIT: *assumes* we've had a SRST+TRST reset so the
1611 * mini-icache contents have been invalidated. Safest to
1612 * force that, so writing new contents is reliable...
1613 */
1614 address = xscale->handler_address;
1615 for (unsigned binary_size = xscale_debug_handler_size;
1616 binary_size > 0;
1617 binary_size -= buf_cnt, buffer += buf_cnt)
1618 {
1619 uint32_t cache_line[8];
1620 unsigned i;
1621
1622 buf_cnt = binary_size;
1623 if (buf_cnt > 32)
1624 buf_cnt = 32;
1625
1626 for (i = 0; i < buf_cnt; i += 4)
1627 {
1628 /* convert LE buffer to host-endian uint32_t */
1629 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1630 }
1631
1632 for (; i < 32; i += 4)
1633 {
1634 cache_line[i / 4] = 0xe1a08008;
1635 }
1636
1637 /* only load addresses other than the reset vectors */
1638 if ((address % 0x400) != 0x0)
1639 {
1640 retval = xscale_load_ic(target, address,
1641 cache_line);
1642 if (retval != ERROR_OK)
1643 return retval;
1644 }
1645
1646 address += buf_cnt;
1647 };
1648
1649 retval = xscale_load_ic(target, 0x0,
1650 xscale->low_vectors);
1651 if (retval != ERROR_OK)
1652 return retval;
1653 retval = xscale_load_ic(target, 0xffff0000,
1654 xscale->high_vectors);
1655 if (retval != ERROR_OK)
1656 return retval;
1657
1658 jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
1659
1660 jtag_add_sleep(100000);
1661
1662 /* set Hold reset, Halt mode and Trap Reset */
1663 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1664 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1665 xscale_write_dcsr(target, 1, 0);
1666
1667 /* clear Hold reset to let the target run (should enter debug handler) */
1668 xscale_write_dcsr(target, 0, 1);
1669 target->state = TARGET_RUNNING;
1670
1671 if (!target->reset_halt)
1672 {
1673 jtag_add_sleep(10000);
1674
1675 /* we should have entered debug now */
1676 xscale_debug_entry(target);
1677 target->state = TARGET_HALTED;
1678
1679 /* resume the target */
1680 xscale_resume(target, 1, 0x0, 1, 0);
1681 }
1682 }
1683 else
1684 {
1685 jtag_add_reset(0, 0);
1686 }
1687
1688 return ERROR_OK;
1689 }
1690
1691 static int xscale_read_core_reg(struct target_s *target, int num,
1692 enum armv4_5_mode mode)
1693 {
1694 LOG_ERROR("not implemented");
1695 return ERROR_OK;
1696 }
1697
1698 static int xscale_write_core_reg(struct target_s *target, int num,
1699 enum armv4_5_mode mode, uint32_t value)
1700 {
1701 LOG_ERROR("not implemented");
1702 return ERROR_OK;
1703 }
1704
1705 static int xscale_full_context(target_t *target)
1706 {
1707 armv4_5_common_t *armv4_5 = target->arch_info;
1708
1709 uint32_t *buffer;
1710
1711 int i, j;
1712
1713 LOG_DEBUG("-");
1714
1715 if (target->state != TARGET_HALTED)
1716 {
1717 LOG_WARNING("target not halted");
1718 return ERROR_TARGET_NOT_HALTED;
1719 }
1720
1721 buffer = malloc(4 * 8);
1722
1723 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1724 * we can't enter User mode on an XScale (unpredictable),
1725 * but User shares registers with SYS
1726 */
1727 for (i = 1; i < 7; i++)
1728 {
1729 int valid = 1;
1730
1731 /* check if there are invalid registers in the current mode
1732 */
1733 for (j = 0; j <= 16; j++)
1734 {
1735 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
1736 valid = 0;
1737 }
1738
1739 if (!valid)
1740 {
1741 uint32_t tmp_cpsr;
1742
1743 /* request banked registers */
1744 xscale_send_u32(target, 0x0);
1745
1746 tmp_cpsr = 0x0;
1747 tmp_cpsr |= armv4_5_number_to_mode(i);
1748 tmp_cpsr |= 0xc0; /* I/F bits */
1749
1750 /* send CPSR for desired mode */
1751 xscale_send_u32(target, tmp_cpsr);
1752
1753 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1754 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1755 {
1756 xscale_receive(target, buffer, 8);
1757 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1758 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1759 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
1760 }
1761 else
1762 {
1763 xscale_receive(target, buffer, 7);
1764 }
1765
1766 /* move data from buffer to register cache */
1767 for (j = 8; j <= 14; j++)
1768 {
1769 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
1770 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1771 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
1772 }
1773 }
1774 }
1775
1776 free(buffer);
1777
1778 return ERROR_OK;
1779 }
1780
1781 static int xscale_restore_context(target_t *target)
1782 {
1783 armv4_5_common_t *armv4_5 = target->arch_info;
1784
1785 int i, j;
1786
1787 if (target->state != TARGET_HALTED)
1788 {
1789 LOG_WARNING("target not halted");
1790 return ERROR_TARGET_NOT_HALTED;
1791 }
1792
1793 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1794 * we can't enter User mode on an XScale (unpredictable),
1795 * but User shares registers with SYS
1796 */
1797 for (i = 1; i < 7; i++)
1798 {
1799 int dirty = 0;
1800
1801 /* check if there are invalid registers in the current mode
1802 */
1803 for (j = 8; j <= 14; j++)
1804 {
1805 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
1806 dirty = 1;
1807 }
1808
1809 /* if not USR/SYS, check if the SPSR needs to be written */
1810 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1811 {
1812 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
1813 dirty = 1;
1814 }
1815
1816 if (dirty)
1817 {
1818 uint32_t tmp_cpsr;
1819
1820 /* send banked registers */
1821 xscale_send_u32(target, 0x1);
1822
1823 tmp_cpsr = 0x0;
1824 tmp_cpsr |= armv4_5_number_to_mode(i);
1825 tmp_cpsr |= 0xc0; /* I/F bits */
1826
1827 /* send CPSR for desired mode */
1828 xscale_send_u32(target, tmp_cpsr);
1829
1830 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1831 for (j = 8; j <= 14; j++)
1832 {
1833 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
1834 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1835 }
1836
1837 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1838 {
1839 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
1840 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1841 }
1842 }
1843 }
1844
1845 return ERROR_OK;
1846 }
1847
1848 static int xscale_read_memory(struct target_s *target, uint32_t address,
1849 uint32_t size, uint32_t count, uint8_t *buffer)
1850 {
1851 armv4_5_common_t *armv4_5 = target->arch_info;
1852 xscale_common_t *xscale = armv4_5->arch_info;
1853 uint32_t *buf32;
1854 uint32_t i;
1855 int retval;
1856
1857 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1858
1859 if (target->state != TARGET_HALTED)
1860 {
1861 LOG_WARNING("target not halted");
1862 return ERROR_TARGET_NOT_HALTED;
1863 }
1864
1865 /* sanitize arguments */
1866 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1867 return ERROR_INVALID_ARGUMENTS;
1868
1869 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1870 return ERROR_TARGET_UNALIGNED_ACCESS;
1871
1872 /* send memory read request (command 0x1n, n: access size) */
1873 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1874 return retval;
1875
1876 /* send base address for read request */
1877 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1878 return retval;
1879
1880 /* send number of requested data words */
1881 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1882 return retval;
1883
1884 /* receive data from target (count times 32-bit words in host endianness) */
1885 buf32 = malloc(4 * count);
1886 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1887 return retval;
1888
1889 /* extract data from host-endian buffer into byte stream */
1890 for (i = 0; i < count; i++)
1891 {
1892 switch (size)
1893 {
1894 case 4:
1895 target_buffer_set_u32(target, buffer, buf32[i]);
1896 buffer += 4;
1897 break;
1898 case 2:
1899 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1900 buffer += 2;
1901 break;
1902 case 1:
1903 *buffer++ = buf32[i] & 0xff;
1904 break;
1905 default:
1906 LOG_ERROR("should never get here");
1907 exit(-1);
1908 }
1909 }
1910
1911 free(buf32);
1912
1913 /* examine DCSR, to see if Sticky Abort (SA) got set */
1914 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1915 return retval;
1916 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1917 {
1918 /* clear SA bit */
1919 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1920 return retval;
1921
1922 return ERROR_TARGET_DATA_ABORT;
1923 }
1924
1925 return ERROR_OK;
1926 }
1927
1928 static int xscale_write_memory(struct target_s *target, uint32_t address,
1929 uint32_t size, uint32_t count, uint8_t *buffer)
1930 {
1931 armv4_5_common_t *armv4_5 = target->arch_info;
1932 xscale_common_t *xscale = armv4_5->arch_info;
1933 int retval;
1934
1935 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1936
1937 if (target->state != TARGET_HALTED)
1938 {
1939 LOG_WARNING("target not halted");
1940 return ERROR_TARGET_NOT_HALTED;
1941 }
1942
1943 /* sanitize arguments */
1944 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1945 return ERROR_INVALID_ARGUMENTS;
1946
1947 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1948 return ERROR_TARGET_UNALIGNED_ACCESS;
1949
1950 /* send memory write request (command 0x2n, n: access size) */
1951 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1952 return retval;
1953
1954 /* send base address for read request */
1955 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1956 return retval;
1957
1958 /* send number of requested data words to be written*/
1959 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1960 return retval;
1961
1962 /* extract data from host-endian buffer into byte stream */
1963 #if 0
1964 for (i = 0; i < count; i++)
1965 {
1966 switch (size)
1967 {
1968 case 4:
1969 value = target_buffer_get_u32(target, buffer);
1970 xscale_send_u32(target, value);
1971 buffer += 4;
1972 break;
1973 case 2:
1974 value = target_buffer_get_u16(target, buffer);
1975 xscale_send_u32(target, value);
1976 buffer += 2;
1977 break;
1978 case 1:
1979 value = *buffer;
1980 xscale_send_u32(target, value);
1981 buffer += 1;
1982 break;
1983 default:
1984 LOG_ERROR("should never get here");
1985 exit(-1);
1986 }
1987 }
1988 #endif
1989 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1990 return retval;
1991
1992 /* examine DCSR, to see if Sticky Abort (SA) got set */
1993 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1994 return retval;
1995 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1996 {
1997 /* clear SA bit */
1998 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1999 return retval;
2000
2001 return ERROR_TARGET_DATA_ABORT;
2002 }
2003
2004 return ERROR_OK;
2005 }
2006
2007 static int xscale_bulk_write_memory(target_t *target, uint32_t address,
2008 uint32_t count, uint8_t *buffer)
2009 {
2010 return xscale_write_memory(target, address, 4, count, buffer);
2011 }
2012
2013 static uint32_t xscale_get_ttb(target_t *target)
2014 {
2015 armv4_5_common_t *armv4_5 = target->arch_info;
2016 xscale_common_t *xscale = armv4_5->arch_info;
2017 uint32_t ttb;
2018
2019 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2020 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2021
2022 return ttb;
2023 }
2024
2025 static void xscale_disable_mmu_caches(target_t *target, int mmu,
2026 int d_u_cache, int i_cache)
2027 {
2028 armv4_5_common_t *armv4_5 = target->arch_info;
2029 xscale_common_t *xscale = armv4_5->arch_info;
2030 uint32_t cp15_control;
2031
2032 /* read cp15 control register */
2033 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2034 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2035
2036 if (mmu)
2037 cp15_control &= ~0x1U;
2038
2039 if (d_u_cache)
2040 {
2041 /* clean DCache */
2042 xscale_send_u32(target, 0x50);
2043 xscale_send_u32(target, xscale->cache_clean_address);
2044
2045 /* invalidate DCache */
2046 xscale_send_u32(target, 0x51);
2047
2048 cp15_control &= ~0x4U;
2049 }
2050
2051 if (i_cache)
2052 {
2053 /* invalidate ICache */
2054 xscale_send_u32(target, 0x52);
2055 cp15_control &= ~0x1000U;
2056 }
2057
2058 /* write new cp15 control register */
2059 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2060
2061 /* execute cpwait to ensure outstanding operations complete */
2062 xscale_send_u32(target, 0x53);
2063 }
2064
2065 static void xscale_enable_mmu_caches(target_t *target, int mmu,
2066 int d_u_cache, int i_cache)
2067 {
2068 armv4_5_common_t *armv4_5 = target->arch_info;
2069 xscale_common_t *xscale = armv4_5->arch_info;
2070 uint32_t cp15_control;
2071
2072 /* read cp15 control register */
2073 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2074 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2075
2076 if (mmu)
2077 cp15_control |= 0x1U;
2078
2079 if (d_u_cache)
2080 cp15_control |= 0x4U;
2081
2082 if (i_cache)
2083 cp15_control |= 0x1000U;
2084
2085 /* write new cp15 control register */
2086 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2087
2088 /* execute cpwait to ensure outstanding operations complete */
2089 xscale_send_u32(target, 0x53);
2090 }
2091
2092 static int xscale_set_breakpoint(struct target_s *target,
2093 breakpoint_t *breakpoint)
2094 {
2095 int retval;
2096 armv4_5_common_t *armv4_5 = target->arch_info;
2097 xscale_common_t *xscale = armv4_5->arch_info;
2098
2099 if (target->state != TARGET_HALTED)
2100 {
2101 LOG_WARNING("target not halted");
2102 return ERROR_TARGET_NOT_HALTED;
2103 }
2104
2105 if (breakpoint->set)
2106 {
2107 LOG_WARNING("breakpoint already set");
2108 return ERROR_OK;
2109 }
2110
2111 if (breakpoint->type == BKPT_HARD)
2112 {
2113 uint32_t value = breakpoint->address | 1;
2114 if (!xscale->ibcr0_used)
2115 {
2116 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2117 xscale->ibcr0_used = 1;
2118 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2119 }
2120 else if (!xscale->ibcr1_used)
2121 {
2122 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2123 xscale->ibcr1_used = 1;
2124 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2125 }
2126 else
2127 {
2128 LOG_ERROR("BUG: no hardware comparator available");
2129 return ERROR_OK;
2130 }
2131 }
2132 else if (breakpoint->type == BKPT_SOFT)
2133 {
2134 if (breakpoint->length == 4)
2135 {
2136 /* keep the original instruction in target endianness */
2137 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2138 {
2139 return retval;
2140 }
2141 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2142 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2143 {
2144 return retval;
2145 }
2146 }
2147 else
2148 {
2149 /* keep the original instruction in target endianness */
2150 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2151 {
2152 return retval;
2153 }
2154 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2155 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2156 {
2157 return retval;
2158 }
2159 }
2160 breakpoint->set = 1;
2161 }
2162
2163 return ERROR_OK;
2164 }
2165
2166 static int xscale_add_breakpoint(struct target_s *target,
2167 breakpoint_t *breakpoint)
2168 {
2169 armv4_5_common_t *armv4_5 = target->arch_info;
2170 xscale_common_t *xscale = armv4_5->arch_info;
2171
2172 if (target->state != TARGET_HALTED)
2173 {
2174 LOG_WARNING("target not halted");
2175 return ERROR_TARGET_NOT_HALTED;
2176 }
2177
2178 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2179 {
2180 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2181 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2182 }
2183
2184 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2185 {
2186 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2187 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2188 }
2189
2190 if (breakpoint->type == BKPT_HARD)
2191 {
2192 xscale->ibcr_available--;
2193 }
2194
2195 return ERROR_OK;
2196 }
2197
2198 static int xscale_unset_breakpoint(struct target_s *target,
2199 breakpoint_t *breakpoint)
2200 {
2201 int retval;
2202 armv4_5_common_t *armv4_5 = target->arch_info;
2203 xscale_common_t *xscale = armv4_5->arch_info;
2204
2205 if (target->state != TARGET_HALTED)
2206 {
2207 LOG_WARNING("target not halted");
2208 return ERROR_TARGET_NOT_HALTED;
2209 }
2210
2211 if (!breakpoint->set)
2212 {
2213 LOG_WARNING("breakpoint not set");
2214 return ERROR_OK;
2215 }
2216
2217 if (breakpoint->type == BKPT_HARD)
2218 {
2219 if (breakpoint->set == 1)
2220 {
2221 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2222 xscale->ibcr0_used = 0;
2223 }
2224 else if (breakpoint->set == 2)
2225 {
2226 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2227 xscale->ibcr1_used = 0;
2228 }
2229 breakpoint->set = 0;
2230 }
2231 else
2232 {
2233 /* restore original instruction (kept in target endianness) */
2234 if (breakpoint->length == 4)
2235 {
2236 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2237 {
2238 return retval;
2239 }
2240 }
2241 else
2242 {
2243 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2244 {
2245 return retval;
2246 }
2247 }
2248 breakpoint->set = 0;
2249 }
2250
2251 return ERROR_OK;
2252 }
2253
2254 static int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2255 {
2256 armv4_5_common_t *armv4_5 = target->arch_info;
2257 xscale_common_t *xscale = armv4_5->arch_info;
2258
2259 if (target->state != TARGET_HALTED)
2260 {
2261 LOG_WARNING("target not halted");
2262 return ERROR_TARGET_NOT_HALTED;
2263 }
2264
2265 if (breakpoint->set)
2266 {
2267 xscale_unset_breakpoint(target, breakpoint);
2268 }
2269
2270 if (breakpoint->type == BKPT_HARD)
2271 xscale->ibcr_available++;
2272
2273 return ERROR_OK;
2274 }
2275
2276 static int xscale_set_watchpoint(struct target_s *target,
2277 watchpoint_t *watchpoint)
2278 {
2279 armv4_5_common_t *armv4_5 = target->arch_info;
2280 xscale_common_t *xscale = armv4_5->arch_info;
2281 uint8_t enable = 0;
2282 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2283 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2284
2285 if (target->state != TARGET_HALTED)
2286 {
2287 LOG_WARNING("target not halted");
2288 return ERROR_TARGET_NOT_HALTED;
2289 }
2290
2291 xscale_get_reg(dbcon);
2292
2293 switch (watchpoint->rw)
2294 {
2295 case WPT_READ:
2296 enable = 0x3;
2297 break;
2298 case WPT_ACCESS:
2299 enable = 0x2;
2300 break;
2301 case WPT_WRITE:
2302 enable = 0x1;
2303 break;
2304 default:
2305 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2306 }
2307
2308 if (!xscale->dbr0_used)
2309 {
2310 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2311 dbcon_value |= enable;
2312 xscale_set_reg_u32(dbcon, dbcon_value);
2313 watchpoint->set = 1;
2314 xscale->dbr0_used = 1;
2315 }
2316 else if (!xscale->dbr1_used)
2317 {
2318 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2319 dbcon_value |= enable << 2;
2320 xscale_set_reg_u32(dbcon, dbcon_value);
2321 watchpoint->set = 2;
2322 xscale->dbr1_used = 1;
2323 }
2324 else
2325 {
2326 LOG_ERROR("BUG: no hardware comparator available");
2327 return ERROR_OK;
2328 }
2329
2330 return ERROR_OK;
2331 }
2332
2333 static int xscale_add_watchpoint(struct target_s *target,
2334 watchpoint_t *watchpoint)
2335 {
2336 armv4_5_common_t *armv4_5 = target->arch_info;
2337 xscale_common_t *xscale = armv4_5->arch_info;
2338
2339 if (target->state != TARGET_HALTED)
2340 {
2341 LOG_WARNING("target not halted");
2342 return ERROR_TARGET_NOT_HALTED;
2343 }
2344
2345 if (xscale->dbr_available < 1)
2346 {
2347 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2348 }
2349
2350 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2351 {
2352 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2353 }
2354
2355 xscale->dbr_available--;
2356
2357 return ERROR_OK;
2358 }
2359
2360 static int xscale_unset_watchpoint(struct target_s *target,
2361 watchpoint_t *watchpoint)
2362 {
2363 armv4_5_common_t *armv4_5 = target->arch_info;
2364 xscale_common_t *xscale = armv4_5->arch_info;
2365 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2366 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2367
2368 if (target->state != TARGET_HALTED)
2369 {
2370 LOG_WARNING("target not halted");
2371 return ERROR_TARGET_NOT_HALTED;
2372 }
2373
2374 if (!watchpoint->set)
2375 {
2376 LOG_WARNING("breakpoint not set");
2377 return ERROR_OK;
2378 }
2379
2380 if (watchpoint->set == 1)
2381 {
2382 dbcon_value &= ~0x3;
2383 xscale_set_reg_u32(dbcon, dbcon_value);
2384 xscale->dbr0_used = 0;
2385 }
2386 else if (watchpoint->set == 2)
2387 {
2388 dbcon_value &= ~0xc;
2389 xscale_set_reg_u32(dbcon, dbcon_value);
2390 xscale->dbr1_used = 0;
2391 }
2392 watchpoint->set = 0;
2393
2394 return ERROR_OK;
2395 }
2396
2397 static int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2398 {
2399 armv4_5_common_t *armv4_5 = target->arch_info;
2400 xscale_common_t *xscale = armv4_5->arch_info;
2401
2402 if (target->state != TARGET_HALTED)
2403 {
2404 LOG_WARNING("target not halted");
2405 return ERROR_TARGET_NOT_HALTED;
2406 }
2407
2408 if (watchpoint->set)
2409 {
2410 xscale_unset_watchpoint(target, watchpoint);
2411 }
2412
2413 xscale->dbr_available++;
2414
2415 return ERROR_OK;
2416 }
2417
2418 static int xscale_get_reg(reg_t *reg)
2419 {
2420 xscale_reg_t *arch_info = reg->arch_info;
2421 target_t *target = arch_info->target;
2422 armv4_5_common_t *armv4_5 = target->arch_info;
2423 xscale_common_t *xscale = armv4_5->arch_info;
2424
2425 /* DCSR, TX and RX are accessible via JTAG */
2426 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2427 {
2428 return xscale_read_dcsr(arch_info->target);
2429 }
2430 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2431 {
2432 /* 1 = consume register content */
2433 return xscale_read_tx(arch_info->target, 1);
2434 }
2435 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2436 {
2437 /* can't read from RX register (host -> debug handler) */
2438 return ERROR_OK;
2439 }
2440 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2441 {
2442 /* can't (explicitly) read from TXRXCTRL register */
2443 return ERROR_OK;
2444 }
2445 else /* Other DBG registers have to be transfered by the debug handler */
2446 {
2447 /* send CP read request (command 0x40) */
2448 xscale_send_u32(target, 0x40);
2449
2450 /* send CP register number */
2451 xscale_send_u32(target, arch_info->dbg_handler_number);
2452
2453 /* read register value */
2454 xscale_read_tx(target, 1);
2455 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2456
2457 reg->dirty = 0;
2458 reg->valid = 1;
2459 }
2460
2461 return ERROR_OK;
2462 }
2463
2464 static int xscale_set_reg(reg_t *reg, uint8_t* buf)
2465 {
2466 xscale_reg_t *arch_info = reg->arch_info;
2467 target_t *target = arch_info->target;
2468 armv4_5_common_t *armv4_5 = target->arch_info;
2469 xscale_common_t *xscale = armv4_5->arch_info;
2470 uint32_t value = buf_get_u32(buf, 0, 32);
2471
2472 /* DCSR, TX and RX are accessible via JTAG */
2473 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2474 {
2475 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2476 return xscale_write_dcsr(arch_info->target, -1, -1);
2477 }
2478 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2479 {
2480 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2481 return xscale_write_rx(arch_info->target);
2482 }
2483 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2484 {
2485 /* can't write to TX register (debug-handler -> host) */
2486 return ERROR_OK;
2487 }
2488 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2489 {
2490 /* can't (explicitly) write to TXRXCTRL register */
2491 return ERROR_OK;
2492 }
2493 else /* Other DBG registers have to be transfered by the debug handler */
2494 {
2495 /* send CP write request (command 0x41) */
2496 xscale_send_u32(target, 0x41);
2497
2498 /* send CP register number */
2499 xscale_send_u32(target, arch_info->dbg_handler_number);
2500
2501 /* send CP register value */
2502 xscale_send_u32(target, value);
2503 buf_set_u32(reg->value, 0, 32, value);
2504 }
2505
2506 return ERROR_OK;
2507 }
2508
2509 static int xscale_write_dcsr_sw(target_t *target, uint32_t value)
2510 {
2511 /* get pointers to arch-specific information */
2512 armv4_5_common_t *armv4_5 = target->arch_info;
2513 xscale_common_t *xscale = armv4_5->arch_info;
2514 reg_t *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2515 xscale_reg_t *dcsr_arch_info = dcsr->arch_info;
2516
2517 /* send CP write request (command 0x41) */
2518 xscale_send_u32(target, 0x41);
2519
2520 /* send CP register number */
2521 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2522
2523 /* send CP register value */
2524 xscale_send_u32(target, value);
2525 buf_set_u32(dcsr->value, 0, 32, value);
2526
2527 return ERROR_OK;
2528 }
2529
2530 static int xscale_read_trace(target_t *target)
2531 {
2532 /* get pointers to arch-specific information */
2533 armv4_5_common_t *armv4_5 = target->arch_info;
2534 xscale_common_t *xscale = armv4_5->arch_info;
2535 xscale_trace_data_t **trace_data_p;
2536
2537 /* 258 words from debug handler
2538 * 256 trace buffer entries
2539 * 2 checkpoint addresses
2540 */
2541 uint32_t trace_buffer[258];
2542 int is_address[256];
2543 int i, j;
2544
2545 if (target->state != TARGET_HALTED)
2546 {
2547 LOG_WARNING("target must be stopped to read trace data");
2548 return ERROR_TARGET_NOT_HALTED;
2549 }
2550
2551 /* send read trace buffer command (command 0x61) */
2552 xscale_send_u32(target, 0x61);
2553
2554 /* receive trace buffer content */
2555 xscale_receive(target, trace_buffer, 258);
2556
2557 /* parse buffer backwards to identify address entries */
2558 for (i = 255; i >= 0; i--)
2559 {
2560 is_address[i] = 0;
2561 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2562 ((trace_buffer[i] & 0xf0) == 0xd0))
2563 {
2564 if (i >= 3)
2565 is_address[--i] = 1;
2566 if (i >= 2)
2567 is_address[--i] = 1;
2568 if (i >= 1)
2569 is_address[--i] = 1;
2570 if (i >= 0)
2571 is_address[--i] = 1;
2572 }
2573 }
2574
2575
2576 /* search first non-zero entry */
2577 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2578 ;
2579
2580 if (j == 256)
2581 {
2582 LOG_DEBUG("no trace data collected");
2583 return ERROR_XSCALE_NO_TRACE_DATA;
2584 }
2585
2586 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2587 ;
2588
2589 *trace_data_p = malloc(sizeof(xscale_trace_data_t));
2590 (*trace_data_p)->next = NULL;
2591 (*trace_data_p)->chkpt0 = trace_buffer[256];
2592 (*trace_data_p)->chkpt1 = trace_buffer[257];
2593 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2594 (*trace_data_p)->entries = malloc(sizeof(xscale_trace_entry_t) * (256 - j));
2595 (*trace_data_p)->depth = 256 - j;
2596
2597 for (i = j; i < 256; i++)
2598 {
2599 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2600 if (is_address[i])
2601 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2602 else
2603 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2604 }
2605
2606 return ERROR_OK;
2607 }
2608
2609 static int xscale_read_instruction(target_t *target,
2610 arm_instruction_t *instruction)
2611 {
2612 /* get pointers to arch-specific information */
2613 armv4_5_common_t *armv4_5 = target->arch_info;
2614 xscale_common_t *xscale = armv4_5->arch_info;
2615 int i;
2616 int section = -1;
2617 uint32_t size_read;
2618 uint32_t opcode;
2619 int retval;
2620
2621 if (!xscale->trace.image)
2622 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2623
2624 /* search for the section the current instruction belongs to */
2625 for (i = 0; i < xscale->trace.image->num_sections; i++)
2626 {
2627 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2628 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2629 {
2630 section = i;
2631 break;
2632 }
2633 }
2634
2635 if (section == -1)
2636 {
2637 /* current instruction couldn't be found in the image */
2638 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2639 }
2640
2641 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2642 {
2643 uint8_t buf[4];
2644 if ((retval = image_read_section(xscale->trace.image, section,
2645 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2646 4, buf, &size_read)) != ERROR_OK)
2647 {
2648 LOG_ERROR("error while reading instruction: %i", retval);
2649 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2650 }
2651 opcode = target_buffer_get_u32(target, buf);
2652 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2653 }
2654 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2655 {
2656 uint8_t buf[2];
2657 if ((retval = image_read_section(xscale->trace.image, section,
2658 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2659 2, buf, &size_read)) != ERROR_OK)
2660 {
2661 LOG_ERROR("error while reading instruction: %i", retval);
2662 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2663 }
2664 opcode = target_buffer_get_u16(target, buf);
2665 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2666 }
2667 else
2668 {
2669 LOG_ERROR("BUG: unknown core state encountered");
2670 exit(-1);
2671 }
2672
2673 return ERROR_OK;
2674 }
2675
2676 static int xscale_branch_address(xscale_trace_data_t *trace_data,
2677 int i, uint32_t *target)
2678 {
2679 /* if there are less than four entries prior to the indirect branch message
2680 * we can't extract the address */
2681 if (i < 4)
2682 {
2683 return -1;
2684 }
2685
2686 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2687 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2688
2689 return 0;
2690 }
2691
2692 static int xscale_analyze_trace(target_t *target, command_context_t *cmd_ctx)
2693 {
2694 /* get pointers to arch-specific information */
2695 armv4_5_common_t *armv4_5 = target->arch_info;
2696 xscale_common_t *xscale = armv4_5->arch_info;
2697 int next_pc_ok = 0;
2698 uint32_t next_pc = 0x0;
2699 xscale_trace_data_t *trace_data = xscale->trace.data;
2700 int retval;
2701
2702 while (trace_data)
2703 {
2704 int i, chkpt;
2705 int rollover;
2706 int branch;
2707 int exception;
2708 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2709
2710 chkpt = 0;
2711 rollover = 0;
2712
2713 for (i = 0; i < trace_data->depth; i++)
2714 {
2715 next_pc_ok = 0;
2716 branch = 0;
2717 exception = 0;
2718
2719 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2720 continue;
2721
2722 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2723 {
2724 case 0: /* Exceptions */
2725 case 1:
2726 case 2:
2727 case 3:
2728 case 4:
2729 case 5:
2730 case 6:
2731 case 7:
2732 exception = (trace_data->entries[i].data & 0x70) >> 4;
2733 next_pc_ok = 1;
2734 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2735 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2736 break;
2737 case 8: /* Direct Branch */
2738 branch = 1;
2739 break;
2740 case 9: /* Indirect Branch */
2741 branch = 1;
2742 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2743 {
2744 next_pc_ok = 1;
2745 }
2746 break;
2747 case 13: /* Checkpointed Indirect Branch */
2748 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2749 {
2750 next_pc_ok = 1;
2751 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2752 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2753 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2754 }
2755 /* explicit fall-through */
2756 case 12: /* Checkpointed Direct Branch */
2757 branch = 1;
2758 if (chkpt == 0)
2759 {
2760 next_pc_ok = 1;
2761 next_pc = trace_data->chkpt0;
2762 chkpt++;
2763 }
2764 else if (chkpt == 1)
2765 {
2766 next_pc_ok = 1;
2767 next_pc = trace_data->chkpt0;
2768 chkpt++;
2769 }
2770 else
2771 {
2772 LOG_WARNING("more than two checkpointed branches encountered");
2773 }
2774 break;
2775 case 15: /* Roll-over */
2776 rollover++;
2777 continue;
2778 default: /* Reserved */
2779 command_print(cmd_ctx, "--- reserved trace message ---");
2780 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2781 return ERROR_OK;
2782 }
2783
2784 if (xscale->trace.pc_ok)
2785 {
2786 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2787 arm_instruction_t instruction;
2788
2789 if ((exception == 6) || (exception == 7))
2790 {
2791 /* IRQ or FIQ exception, no instruction executed */
2792 executed -= 1;
2793 }
2794
2795 while (executed-- >= 0)
2796 {
2797 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2798 {
2799 /* can't continue tracing with no image available */
2800 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2801 {
2802 return retval;
2803 }
2804 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2805 {
2806 /* TODO: handle incomplete images */
2807 }
2808 }
2809
2810 /* a precise abort on a load to the PC is included in the incremental
2811 * word count, other instructions causing data aborts are not included
2812 */
2813 if ((executed == 0) && (exception == 4)
2814 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2815 {
2816 if ((instruction.type == ARM_LDM)
2817 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2818 {
2819 executed--;
2820 }
2821 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2822 && (instruction.info.load_store.Rd != 15))
2823 {
2824 executed--;
2825 }
2826 }
2827
2828 /* only the last instruction executed
2829 * (the one that caused the control flow change)
2830 * could be a taken branch
2831 */
2832 if (((executed == -1) && (branch == 1)) &&
2833 (((instruction.type == ARM_B) ||
2834 (instruction.type == ARM_BL) ||
2835 (instruction.type == ARM_BLX)) &&
2836 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2837 {
2838 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2839 }
2840 else
2841 {
2842 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2843 }
2844 command_print(cmd_ctx, "%s", instruction.text);
2845 }
2846
2847 rollover = 0;
2848 }
2849
2850 if (next_pc_ok)
2851 {
2852 xscale->trace.current_pc = next_pc;
2853 xscale->trace.pc_ok = 1;
2854 }
2855 }
2856
2857 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2858 {
2859 arm_instruction_t instruction;
2860 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2861 {
2862 /* can't continue tracing with no image available */
2863 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2864 {
2865 return retval;
2866 }
2867 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2868 {
2869 /* TODO: handle incomplete images */
2870 }
2871 }
2872 command_print(cmd_ctx, "%s", instruction.text);
2873 }
2874
2875 trace_data = trace_data->next;
2876 }
2877
2878 return ERROR_OK;
2879 }
2880
2881 static void xscale_build_reg_cache(target_t *target)
2882 {
2883 /* get pointers to arch-specific information */
2884 armv4_5_common_t *armv4_5 = target->arch_info;
2885 xscale_common_t *xscale = armv4_5->arch_info;
2886
2887 reg_cache_t **cache_p = register_get_last_cache_p(&target->reg_cache);
2888 xscale_reg_t *arch_info = malloc(sizeof(xscale_reg_arch_info));
2889 int i;
2890 int num_regs = sizeof(xscale_reg_arch_info) / sizeof(xscale_reg_t);
2891
2892 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
2893 armv4_5->core_cache = (*cache_p);
2894
2895 /* register a register arch-type for XScale dbg registers only once */
2896 if (xscale_reg_arch_type == -1)
2897 xscale_reg_arch_type = register_reg_arch_type(xscale_get_reg, xscale_set_reg);
2898
2899 (*cache_p)->next = malloc(sizeof(reg_cache_t));
2900 cache_p = &(*cache_p)->next;
2901
2902 /* fill in values for the xscale reg cache */
2903 (*cache_p)->name = "XScale registers";
2904 (*cache_p)->next = NULL;
2905 (*cache_p)->reg_list = malloc(num_regs * sizeof(reg_t));
2906 (*cache_p)->num_regs = num_regs;
2907
2908 for (i = 0; i < num_regs; i++)
2909 {
2910 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2911 (*cache_p)->reg_list[i].value = calloc(4, 1);
2912 (*cache_p)->reg_list[i].dirty = 0;
2913 (*cache_p)->reg_list[i].valid = 0;
2914 (*cache_p)->reg_list[i].size = 32;
2915 (*cache_p)->reg_list[i].bitfield_desc = NULL;
2916 (*cache_p)->reg_list[i].num_bitfields = 0;
2917 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2918 (*cache_p)->reg_list[i].arch_type = xscale_reg_arch_type;
2919 arch_info[i] = xscale_reg_arch_info[i];
2920 arch_info[i].target = target;
2921 }
2922
2923 xscale->reg_cache = (*cache_p);
2924 }
2925
2926 static int xscale_init_target(struct command_context_s *cmd_ctx,
2927 struct target_s *target)
2928 {
2929 xscale_build_reg_cache(target);
2930 return ERROR_OK;
2931 }
2932
2933 static int xscale_quit(void)
2934 {
2935 jtag_add_runtest(100, TAP_RESET);
2936 return ERROR_OK;
2937 }
2938
2939 static int xscale_init_arch_info(target_t *target,
2940 xscale_common_t *xscale, jtag_tap_t *tap, const char *variant)
2941 {
2942 armv4_5_common_t *armv4_5;
2943 uint32_t high_reset_branch, low_reset_branch;
2944 int i;
2945
2946 armv4_5 = &xscale->armv4_5_common;
2947
2948 /* store architecture specfic data (none so far) */
2949 xscale->arch_info = NULL;
2950 xscale->common_magic = XSCALE_COMMON_MAGIC;
2951
2952 /* we don't really *need* variant info ... */
2953 if (variant) {
2954 int ir_length = 0;
2955
2956 if (strcmp(variant, "pxa250") == 0
2957 || strcmp(variant, "pxa255") == 0
2958 || strcmp(variant, "pxa26x") == 0)
2959 ir_length = 5;
2960 else if (strcmp(variant, "pxa27x") == 0
2961 || strcmp(variant, "ixp42x") == 0
2962 || strcmp(variant, "ixp45x") == 0
2963 || strcmp(variant, "ixp46x") == 0)
2964 ir_length = 7;
2965 else
2966 LOG_WARNING("%s: unrecognized variant %s",
2967 tap->dotted_name, variant);
2968
2969 if (ir_length && ir_length != tap->ir_length) {
2970 LOG_WARNING("%s: IR length for %s is %d; fixing",
2971 tap->dotted_name, variant, ir_length);
2972 tap->ir_length = ir_length;
2973 }
2974 }
2975
2976 /* the debug handler isn't installed (and thus not running) at this time */
2977 xscale->handler_installed = 0;
2978 xscale->handler_running = 0;
2979 xscale->handler_address = 0xfe000800;
2980
2981 /* clear the vectors we keep locally for reference */
2982 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2983 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2984
2985 /* no user-specified vectors have been configured yet */
2986 xscale->static_low_vectors_set = 0x0;
2987 xscale->static_high_vectors_set = 0x0;
2988
2989 /* calculate branches to debug handler */
2990 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2991 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2992
2993 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2994 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2995
2996 for (i = 1; i <= 7; i++)
2997 {
2998 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2999 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3000 }
3001
3002 /* 64kB aligned region used for DCache cleaning */
3003 xscale->cache_clean_address = 0xfffe0000;
3004
3005 xscale->hold_rst = 0;
3006 xscale->external_debug_break = 0;
3007
3008 xscale->ibcr_available = 2;
3009 xscale->ibcr0_used = 0;
3010 xscale->ibcr1_used = 0;
3011
3012 xscale->dbr_available = 2;
3013 xscale->dbr0_used = 0;
3014 xscale->dbr1_used = 0;
3015
3016 xscale->arm_bkpt = ARMV5_BKPT(0x0);
3017 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
3018
3019 xscale->vector_catch = 0x1;
3020
3021 xscale->trace.capture_status = TRACE_IDLE;
3022 xscale->trace.data = NULL;
3023 xscale->trace.image = NULL;
3024 xscale->trace.buffer_enabled = 0;
3025 xscale->trace.buffer_fill = 0;
3026
3027 /* prepare ARMv4/5 specific information */
3028 armv4_5->arch_info = xscale;
3029 armv4_5->read_core_reg = xscale_read_core_reg;
3030 armv4_5->write_core_reg = xscale_write_core_reg;
3031 armv4_5->full_context = xscale_full_context;
3032
3033 armv4_5_init_arch_info(target, armv4_5);
3034
3035 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3036 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3037 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3038 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3039 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3040 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3041 xscale->armv4_5_mmu.has_tiny_pages = 1;
3042 xscale->armv4_5_mmu.mmu_enabled = 0;
3043
3044 return ERROR_OK;
3045 }
3046
3047 static int xscale_target_create(struct target_s *target, Jim_Interp *interp)
3048 {
3049 xscale_common_t *xscale;
3050
3051 if (xscale_debug_handler_size > 0x800) {
3052 LOG_ERROR("debug_handler.bin: larger than 2kb");
3053 return ERROR_FAIL;
3054 }
3055
3056 xscale = calloc(1, sizeof(*xscale));
3057 if (!xscale)
3058 return ERROR_FAIL;
3059
3060 return xscale_init_arch_info(target, xscale, target->tap,
3061 target->variant);
3062 }
3063
3064 static int
3065 xscale_handle_debug_handler_command(struct command_context_s *cmd_ctx,
3066 char *cmd, char **args, int argc)
3067 {
3068 target_t *target = NULL;
3069 armv4_5_common_t *armv4_5;
3070 xscale_common_t *xscale;
3071
3072 uint32_t handler_address;
3073
3074 if (argc < 2)
3075 {
3076 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3077 return ERROR_OK;
3078 }
3079
3080 if ((target = get_target(args[0])) == NULL)
3081 {
3082 LOG_ERROR("target '%s' not defined", args[0]);
3083 return ERROR_FAIL;
3084 }
3085
3086 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3087 {
3088 return ERROR_FAIL;
3089 }
3090
3091 handler_address = strtoul(args[1], NULL, 0);
3092
3093 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3094 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3095 {
3096 xscale->handler_address = handler_address;
3097 }
3098 else
3099 {
3100 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3101 return ERROR_FAIL;
3102 }
3103
3104 return ERROR_OK;
3105 }
3106
3107 static int
3108 xscale_handle_cache_clean_address_command(struct command_context_s *cmd_ctx,
3109 char *cmd, char **args, int argc)
3110 {
3111 target_t *target = NULL;
3112 armv4_5_common_t *armv4_5;
3113 xscale_common_t *xscale;
3114
3115 uint32_t cache_clean_address;
3116
3117 if (argc < 2)
3118 {
3119 return ERROR_COMMAND_SYNTAX_ERROR;
3120 }
3121
3122 target = get_target(args[0]);
3123 if (target == NULL)
3124 {
3125 LOG_ERROR("target '%s' not defined", args[0]);
3126 return ERROR_FAIL;
3127 }
3128
3129 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3130 {
3131 return ERROR_FAIL;
3132 }
3133
3134 cache_clean_address = strtoul(args[1], NULL, 0);
3135
3136 if (cache_clean_address & 0xffff)
3137 {
3138 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3139 }
3140 else
3141 {
3142 xscale->cache_clean_address = cache_clean_address;
3143 }
3144
3145 return ERROR_OK;
3146 }
3147
3148 static int
3149 xscale_handle_cache_info_command(struct command_context_s *cmd_ctx,
3150 char *cmd, char **args, int argc)
3151 {
3152 target_t *target = get_current_target(cmd_ctx);
3153 armv4_5_common_t *armv4_5;
3154 xscale_common_t *xscale;
3155
3156 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3157 {
3158 return ERROR_OK;
3159 }
3160
3161 return armv4_5_handle_cache_info_command(cmd_ctx, &xscale->armv4_5_mmu.armv4_5_cache);
3162 }
3163
3164 static int xscale_virt2phys(struct target_s *target,
3165 uint32_t virtual, uint32_t *physical)
3166 {
3167 armv4_5_common_t *armv4_5;
3168 xscale_common_t *xscale;
3169 int retval;
3170 int type;
3171 uint32_t cb;
3172 int domain;
3173 uint32_t ap;
3174
3175 if ((retval = xscale_get_arch_pointers(target, &armv4_5, &xscale)) != ERROR_OK)
3176 {
3177 return retval;
3178 }
3179 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3180 if (type == -1)
3181 {
3182 return ret;
3183 }
3184 *physical = ret;
3185 return ERROR_OK;
3186 }
3187
3188 static int xscale_mmu(struct target_s *target, int *enabled)
3189 {
3190 armv4_5_common_t *armv4_5 = target->arch_info;
3191 xscale_common_t *xscale = armv4_5->arch_info;
3192
3193 if (target->state != TARGET_HALTED)
3194 {
3195 LOG_ERROR("Target not halted");
3196 return ERROR_TARGET_INVALID;
3197 }
3198 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3199 return ERROR_OK;
3200 }
3201
3202 static int xscale_handle_mmu_command(command_context_t *cmd_ctx,
3203 char *cmd, char **args, int argc)
3204 {
3205 target_t *target = get_current_target(cmd_ctx);
3206 armv4_5_common_t *armv4_5;
3207 xscale_common_t *xscale;
3208
3209 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3210 {
3211 return ERROR_OK;
3212 }
3213
3214 if (target->state != TARGET_HALTED)
3215 {
3216 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3217 return ERROR_OK;
3218 }
3219
3220 if (argc >= 1)
3221 {
3222 if (strcmp("enable", args[0]) == 0)
3223 {
3224 xscale_enable_mmu_caches(target, 1, 0, 0);
3225 xscale->armv4_5_mmu.mmu_enabled = 1;
3226 }
3227 else if (strcmp("disable", args[0]) == 0)
3228 {
3229 xscale_disable_mmu_caches(target, 1, 0, 0);
3230 xscale->armv4_5_mmu.mmu_enabled = 0;
3231 }
3232 }
3233
3234 command_print(cmd_ctx, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3235
3236 return ERROR_OK;
3237 }
3238
3239 static int xscale_handle_idcache_command(command_context_t *cmd_ctx,
3240 char *cmd, char **args, int argc)
3241 {
3242 target_t *target = get_current_target(cmd_ctx);
3243 armv4_5_common_t *armv4_5;
3244 xscale_common_t *xscale;
3245 int icache = 0, dcache = 0;
3246
3247 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3248 {
3249 return ERROR_OK;
3250 }
3251
3252 if (target->state != TARGET_HALTED)
3253 {
3254 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3255 return ERROR_OK;
3256 }
3257
3258 if (strcmp(cmd, "icache") == 0)
3259 icache = 1;
3260 else if (strcmp(cmd, "dcache") == 0)
3261 dcache = 1;
3262
3263 if (argc >= 1)
3264 {
3265 if (strcmp("enable", args[0]) == 0)
3266 {
3267 xscale_enable_mmu_caches(target, 0, dcache, icache);
3268
3269 if (icache)
3270 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 1;
3271 else if (dcache)
3272 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 1;
3273 }
3274 else if (strcmp("disable", args[0]) == 0)
3275 {
3276 xscale_disable_mmu_caches(target, 0, dcache, icache);
3277
3278 if (icache)
3279 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 0;
3280 else if (dcache)
3281 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 0;
3282 }
3283 }
3284
3285 if (icache)
3286 command_print(cmd_ctx, "icache %s", (xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled) ? "enabled" : "disabled");
3287
3288 if (dcache)
3289 command_print(cmd_ctx, "dcache %s", (xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled) ? "enabled" : "disabled");
3290
3291 return ERROR_OK;
3292 }
3293
3294 static int xscale_handle_vector_catch_command(command_context_t *cmd_ctx,
3295 char *cmd, char **args, int argc)
3296 {
3297 target_t *target = get_current_target(cmd_ctx);
3298 armv4_5_common_t *armv4_5;
3299 xscale_common_t *xscale;
3300
3301 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3302 {
3303 return ERROR_OK;
3304 }
3305
3306 if (argc < 1)
3307 {
3308 command_print(cmd_ctx, "usage: xscale vector_catch [mask]");
3309 }
3310 else
3311 {
3312 xscale->vector_catch = strtoul(args[0], NULL, 0);
3313 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3314 xscale_write_dcsr(target, -1, -1);
3315 }
3316
3317 command_print(cmd_ctx, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3318
3319 return ERROR_OK;
3320 }
3321
3322
3323 static int xscale_handle_vector_table_command(command_context_t *cmd_ctx,
3324 char *cmd, char **args, int argc)
3325 {
3326 target_t *target = get_current_target(cmd_ctx);
3327 armv4_5_common_t *armv4_5;
3328 xscale_common_t *xscale;
3329 int err = 0;
3330
3331 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3332 {
3333 return ERROR_OK;
3334 }
3335
3336 if (argc == 0) /* print current settings */
3337 {
3338 int idx;
3339
3340 command_print(cmd_ctx, "active user-set static vectors:");
3341 for (idx = 1; idx < 8; idx++)
3342 if (xscale->static_low_vectors_set & (1 << idx))
3343 command_print(cmd_ctx, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3344 for (idx = 1; idx < 8; idx++)
3345 if (xscale->static_high_vectors_set & (1 << idx))
3346 command_print(cmd_ctx, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3347 return ERROR_OK;
3348 }
3349
3350 if (argc != 3)
3351 err = 1;
3352 else
3353 {
3354 int idx;
3355 uint32_t vec;
3356 idx = strtoul(args[1], NULL, 0);
3357 vec = strtoul(args[2], NULL, 0);
3358
3359 if (idx < 1 || idx >= 8)
3360 err = 1;
3361
3362 if (!err && strcmp(args[0], "low") == 0)
3363 {
3364 xscale->static_low_vectors_set |= (1<<idx);
3365 xscale->static_low_vectors[idx] = vec;
3366 }
3367 else if (!err && (strcmp(args[0], "high") == 0))
3368 {
3369 xscale->static_high_vectors_set |= (1<<idx);
3370 xscale->static_high_vectors[idx] = vec;
3371 }
3372 else
3373 err = 1;
3374 }
3375
3376 if (err)
3377 command_print(cmd_ctx, "usage: xscale vector_table <high|low> <index> <code>");
3378
3379 return ERROR_OK;
3380 }
3381
3382
3383 static int
3384 xscale_handle_trace_buffer_command(struct command_context_s *cmd_ctx,
3385 char *cmd, char **args, int argc)
3386 {
3387 target_t *target = get_current_target(cmd_ctx);
3388 armv4_5_common_t *armv4_5;
3389 xscale_common_t *xscale;
3390 uint32_t dcsr_value;
3391
3392 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3393 {
3394 return ERROR_OK;
3395 }
3396
3397 if (target->state != TARGET_HALTED)
3398 {
3399 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3400 return ERROR_OK;
3401 }
3402
3403 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
3404 {
3405 xscale_trace_data_t *td, *next_td;
3406 xscale->trace.buffer_enabled = 1;
3407
3408 /* free old trace data */
3409 td = xscale->trace.data;
3410 while (td)
3411 {
3412 next_td = td->next;
3413
3414 if (td->entries)
3415 free(td->entries);
3416 free(td);
3417 td = next_td;
3418 }
3419 xscale->trace.data = NULL;
3420 }
3421 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
3422 {
3423 xscale->trace.buffer_enabled = 0;
3424 }
3425
3426 if ((argc >= 2) && (strcmp("fill", args[1]) == 0))
3427 {
3428 if (argc >= 3)
3429 xscale->trace.buffer_fill = strtoul(args[2], NULL, 0);
3430 else
3431 xscale->trace.buffer_fill = 1;
3432 }
3433 else if ((argc >= 2) && (strcmp("wrap", args[1]) == 0))
3434 {
3435 xscale->trace.buffer_fill = -1;
3436 }
3437
3438 if (xscale->trace.buffer_enabled)
3439 {
3440 /* if we enable the trace buffer in fill-once
3441 * mode we know the address of the first instruction */
3442 xscale->trace.pc_ok = 1;
3443 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3444 }
3445 else
3446 {
3447 /* otherwise the address is unknown, and we have no known good PC */
3448 xscale->trace.pc_ok = 0;
3449 }
3450
3451 command_print(cmd_ctx, "trace buffer %s (%s)",
3452 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3453 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3454
3455 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3456 if (xscale->trace.buffer_fill >= 0)
3457 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3458 else
3459 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3460
3461 return ERROR_OK;
3462 }
3463
3464 static int
3465 xscale_handle_trace_image_command(struct command_context_s *cmd_ctx,
3466 char *cmd, char **args, int argc)
3467 {
3468 target_t *target;
3469 armv4_5_common_t *armv4_5;
3470 xscale_common_t *xscale;
3471
3472 if (argc < 1)
3473 {
3474 command_print(cmd_ctx, "usage: xscale trace_image <file> [base address] [type]");
3475 return ERROR_OK;
3476 }
3477
3478 target = get_current_target(cmd_ctx);
3479
3480 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3481 {
3482 return ERROR_OK;
3483 }
3484
3485 if (xscale->trace.image)
3486 {
3487 image_close(xscale->trace.image);
3488 free(xscale->trace.image);
3489 command_print(cmd_ctx, "previously loaded image found and closed");
3490 }
3491
3492 xscale->trace.image = malloc(sizeof(image_t));
3493 xscale->trace.image->base_address_set = 0;
3494 xscale->trace.image->start_address_set = 0;
3495
3496 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3497 if (argc >= 2)
3498 {
3499 xscale->trace.image->base_address_set = 1;
3500 xscale->trace.image->base_address = strtoul(args[1], NULL, 0);
3501 }
3502 else
3503 {
3504 xscale->trace.image->base_address_set = 0;
3505 }
3506
3507 if (image_open(xscale->trace.image, args[0], (argc >= 3) ? args[2] : NULL) != ERROR_OK)
3508 {
3509 free(xscale->trace.image);
3510 xscale->trace.image = NULL;
3511 return ERROR_OK;
3512 }
3513
3514 return ERROR_OK;
3515 }
3516
3517 static int xscale_handle_dump_trace_command(struct command_context_s *cmd_ctx,
3518 char *cmd, char **args, int argc)
3519 {
3520 target_t *target = get_current_target(cmd_ctx);
3521 armv4_5_common_t *armv4_5;
3522 xscale_common_t *xscale;
3523 xscale_trace_data_t *trace_data;
3524 fileio_t file;
3525
3526 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3527 {
3528 return ERROR_OK;
3529 }
3530
3531 if (target->state != TARGET_HALTED)
3532 {
3533 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3534 return ERROR_OK;
3535 }
3536
3537 if (argc < 1)
3538 {
3539 command_print(cmd_ctx, "usage: xscale dump_trace <file>");
3540 return ERROR_OK;
3541 }
3542
3543 trace_data = xscale->trace.data;
3544
3545 if (!trace_data)
3546 {
3547 command_print(cmd_ctx, "no trace data collected");
3548 return ERROR_OK;
3549 }
3550
3551 if (fileio_open(&file, args[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3552 {
3553 return ERROR_OK;
3554 }
3555
3556 while (trace_data)
3557 {
3558 int i;
3559
3560 fileio_write_u32(&file, trace_data->chkpt0);
3561 fileio_write_u32(&file, trace_data->chkpt1);
3562 fileio_write_u32(&file, trace_data->last_instruction);
3563 fileio_write_u32(&file, trace_data->depth);
3564
3565 for (i = 0; i < trace_data->depth; i++)
3566 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3567
3568 trace_data = trace_data->next;
3569 }
3570
3571 fileio_close(&file);
3572
3573 return ERROR_OK;
3574 }
3575
3576 static int
3577 xscale_handle_analyze_trace_buffer_command(struct command_context_s *cmd_ctx,
3578 char *cmd, char **args, int argc)
3579 {
3580 target_t *target = get_current_target(cmd_ctx);
3581 armv4_5_common_t *armv4_5;
3582 xscale_common_t *xscale;
3583
3584 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3585 {
3586 return ERROR_OK;
3587 }
3588
3589 xscale_analyze_trace(target, cmd_ctx);
3590
3591 return ERROR_OK;
3592 }
3593
3594 static int xscale_handle_cp15(command_context_t *cmd_ctx,
3595 char *cmd, char **args, int argc)
3596 {
3597 target_t *target = get_current_target(cmd_ctx);
3598 armv4_5_common_t *armv4_5;
3599 xscale_common_t *xscale;
3600
3601 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3602 {
3603 return ERROR_OK;
3604 }
3605
3606 if (target->state != TARGET_HALTED)
3607 {
3608 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3609 return ERROR_OK;
3610 }
3611 uint32_t reg_no = 0;
3612 reg_t *reg = NULL;
3613 if (argc > 0)
3614 {
3615 reg_no = strtoul(args[0], NULL, 0);
3616 /*translate from xscale cp15 register no to openocd register*/
3617 switch (reg_no)
3618 {
3619 case 0:
3620 reg_no = XSCALE_MAINID;
3621 break;
3622 case 1:
3623 reg_no = XSCALE_CTRL;
3624 break;
3625 case 2:
3626 reg_no = XSCALE_TTB;
3627 break;
3628 case 3:
3629 reg_no = XSCALE_DAC;
3630 break;
3631 case 5:
3632 reg_no = XSCALE_FSR;
3633 break;
3634 case 6:
3635 reg_no = XSCALE_FAR;
3636 break;
3637 case 13:
3638 reg_no = XSCALE_PID;
3639 break;
3640 case 15:
3641 reg_no = XSCALE_CPACCESS;
3642 break;
3643 default:
3644 command_print(cmd_ctx, "invalid register number");
3645 return ERROR_INVALID_ARGUMENTS;
3646 }
3647 reg = &xscale->reg_cache->reg_list[reg_no];
3648
3649 }
3650 if (argc == 1)
3651 {
3652 uint32_t value;
3653
3654 /* read cp15 control register */
3655 xscale_get_reg(reg);
3656 value = buf_get_u32(reg->value, 0, 32);
3657 command_print(cmd_ctx, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3658 }
3659 else if (argc == 2)
3660 {
3661
3662 uint32_t value = strtoul(args[1], NULL, 0);
3663
3664 /* send CP write request (command 0x41) */
3665 xscale_send_u32(target, 0x41);
3666
3667 /* send CP register number */
3668 xscale_send_u32(target, reg_no);
3669
3670 /* send CP register value */
3671 xscale_send_u32(target, value);
3672
3673 /* execute cpwait to ensure outstanding operations complete */
3674 xscale_send_u32(target, 0x53);
3675 }
3676 else
3677 {
3678 command_print(cmd_ctx, "usage: cp15 [register]<, [value]>");
3679 }
3680
3681 return ERROR_OK;
3682 }
3683
3684 static int xscale_register_commands(struct command_context_s *cmd_ctx)
3685 {
3686 command_t *xscale_cmd;
3687
3688 xscale_cmd = register_command(cmd_ctx, NULL, "xscale", NULL, COMMAND_ANY, "xscale specific commands");
3689
3690 register_command(cmd_ctx, xscale_cmd, "debug_handler", xscale_handle_debug_handler_command, COMMAND_ANY, "'xscale debug_handler <target#> <address>' command takes two required operands");
3691 register_command(cmd_ctx, xscale_cmd, "cache_clean_address", xscale_handle_cache_clean_address_command, COMMAND_ANY, NULL);
3692
3693 register_command(cmd_ctx, xscale_cmd, "cache_info", xscale_handle_cache_info_command, COMMAND_EXEC, NULL);
3694 register_command(cmd_ctx, xscale_cmd, "mmu", xscale_handle_mmu_command, COMMAND_EXEC, "['enable'|'disable'] the MMU");
3695 register_command(cmd_ctx, xscale_cmd, "icache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the ICache");
3696 register_command(cmd_ctx, xscale_cmd, "dcache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the DCache");
3697
3698 register_command(cmd_ctx, xscale_cmd, "vector_catch", xscale_handle_vector_catch_command, COMMAND_EXEC, "<mask> of vectors that should be catched");
3699 register_command(cmd_ctx, xscale_cmd, "vector_table", xscale_handle_vector_table_command, COMMAND_EXEC, "<high|low> <index> <code> set static code for exception handler entry");
3700
3701 register_command(cmd_ctx, xscale_cmd, "trace_buffer", xscale_handle_trace_buffer_command, COMMAND_EXEC, "<enable | disable> ['fill' [n]|'wrap']");
3702
3703 register_command(cmd_ctx, xscale_cmd, "dump_trace", xscale_handle_dump_trace_command, COMMAND_EXEC, "dump content of trace buffer to <file>");
3704 register_command(cmd_ctx, xscale_cmd, "analyze_trace", xscale_handle_analyze_trace_buffer_command, COMMAND_EXEC, "analyze content of trace buffer");
3705 register_command(cmd_ctx, xscale_cmd, "trace_image", xscale_handle_trace_image_command,
3706 COMMAND_EXEC, "load image from <file> [base address]");
3707
3708 register_command(cmd_ctx, xscale_cmd, "cp15", xscale_handle_cp15, COMMAND_EXEC, "access coproc 15 <register> [value]");
3709
3710 armv4_5_register_commands(cmd_ctx);
3711
3712 return ERROR_OK;
3713 }
3714
3715 target_type_t xscale_target =
3716 {
3717 .name = "xscale",
3718
3719 .poll = xscale_poll,
3720 .arch_state = xscale_arch_state,
3721
3722 .target_request_data = NULL,
3723
3724 .halt = xscale_halt,
3725 .resume = xscale_resume,
3726 .step = xscale_step,
3727
3728 .assert_reset = xscale_assert_reset,
3729 .deassert_reset = xscale_deassert_reset,
3730 .soft_reset_halt = NULL,
3731
3732 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
3733
3734 .read_memory = xscale_read_memory,
3735 .write_memory = xscale_write_memory,
3736 .bulk_write_memory = xscale_bulk_write_memory,
3737 .checksum_memory = arm7_9_checksum_memory,
3738 .blank_check_memory = arm7_9_blank_check_memory,
3739
3740 .run_algorithm = armv4_5_run_algorithm,
3741
3742 .add_breakpoint = xscale_add_breakpoint,
3743 .remove_breakpoint = xscale_remove_breakpoint,
3744 .add_watchpoint = xscale_add_watchpoint,
3745 .remove_watchpoint = xscale_remove_watchpoint,
3746
3747 .register_commands = xscale_register_commands,
3748 .target_create = xscale_target_create,
3749 .init_target = xscale_init_target,
3750 .quit = xscale_quit,
3751
3752 .virt2phys = xscale_virt2phys,
3753 .mmu = xscale_mmu
3754 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)