007bdbd65ce8e61dd708ec4eaa3e8a2faa2ccdc0
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 √ėyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * This program is free software; you can redistribute it and/or modify *
9 * it under the terms of the GNU General Public License as published by *
10 * the Free Software Foundation; either version 2 of the License, or *
11 * (at your option) any later version. *
12 * *
13 * This program is distributed in the hope that it will be useful, *
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
16 * GNU General Public License for more details. *
17 * *
18 * You should have received a copy of the GNU General Public License *
19 * along with this program; if not, write to the *
20 * Free Software Foundation, Inc., *
21 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
22 ***************************************************************************/
23 #ifdef HAVE_CONFIG_H
24 #include "config.h"
25 #endif
26
27 #include "replacements.h"
28
29 #include "xscale.h"
30
31 #include "arm7_9_common.h"
32 #include "register.h"
33 #include "target.h"
34 #include "armv4_5.h"
35 #include "arm_simulator.h"
36 #include "arm_disassembler.h"
37 #include "log.h"
38 #include "jtag.h"
39 #include "binarybuffer.h"
40 #include "time_support.h"
41 #include "breakpoints.h"
42 #include "fileio.h"
43
44 #include <stdlib.h>
45 #include <string.h>
46
47 #include <sys/types.h>
48 #include <unistd.h>
49 #include <errno.h>
50
51
52 /* cli handling */
53 int xscale_register_commands(struct command_context_s *cmd_ctx);
54
55 /* forward declarations */
56 int xscale_target_create(struct target_s *target, Jim_Interp *interp);
57 int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target);
58 int xscale_quit(void);
59
60 int xscale_arch_state(struct target_s *target);
61 int xscale_poll(target_t *target);
62 int xscale_halt(target_t *target);
63 int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution);
64 int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints);
65 int xscale_debug_entry(target_t *target);
66 int xscale_restore_context(target_t *target);
67
68 int xscale_assert_reset(target_t *target);
69 int xscale_deassert_reset(target_t *target);
70 int xscale_soft_reset_halt(struct target_s *target);
71
72 int xscale_set_reg_u32(reg_t *reg, u32 value);
73
74 int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode);
75 int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value);
76
77 int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
78 int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
79 int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer);
80
81 int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
82 int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
83 int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
84 int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
85 int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
86 int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
87 void xscale_enable_watchpoints(struct target_s *target);
88 void xscale_enable_breakpoints(struct target_s *target);
89 static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical);
90 static int xscale_mmu(struct target_s *target, int *enabled);
91
92 int xscale_read_trace(target_t *target);
93
94 target_type_t xscale_target =
95 {
96 .name = "xscale",
97
98 .poll = xscale_poll,
99 .arch_state = xscale_arch_state,
100
101 .target_request_data = NULL,
102
103 .halt = xscale_halt,
104 .resume = xscale_resume,
105 .step = xscale_step,
106
107 .assert_reset = xscale_assert_reset,
108 .deassert_reset = xscale_deassert_reset,
109 .soft_reset_halt = xscale_soft_reset_halt,
110
111 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
112
113 .read_memory = xscale_read_memory,
114 .write_memory = xscale_write_memory,
115 .bulk_write_memory = xscale_bulk_write_memory,
116 .checksum_memory = arm7_9_checksum_memory,
117 .blank_check_memory = arm7_9_blank_check_memory,
118
119 .run_algorithm = armv4_5_run_algorithm,
120
121 .add_breakpoint = xscale_add_breakpoint,
122 .remove_breakpoint = xscale_remove_breakpoint,
123 .add_watchpoint = xscale_add_watchpoint,
124 .remove_watchpoint = xscale_remove_watchpoint,
125
126 .register_commands = xscale_register_commands,
127 .target_create = xscale_target_create,
128 .init_target = xscale_init_target,
129 .quit = xscale_quit,
130
131 .virt2phys = xscale_virt2phys,
132 .mmu = xscale_mmu
133 };
134
135 char* xscale_reg_list[] =
136 {
137 "XSCALE_MAINID", /* 0 */
138 "XSCALE_CACHETYPE",
139 "XSCALE_CTRL",
140 "XSCALE_AUXCTRL",
141 "XSCALE_TTB",
142 "XSCALE_DAC",
143 "XSCALE_FSR",
144 "XSCALE_FAR",
145 "XSCALE_PID",
146 "XSCALE_CPACCESS",
147 "XSCALE_IBCR0", /* 10 */
148 "XSCALE_IBCR1",
149 "XSCALE_DBR0",
150 "XSCALE_DBR1",
151 "XSCALE_DBCON",
152 "XSCALE_TBREG",
153 "XSCALE_CHKPT0",
154 "XSCALE_CHKPT1",
155 "XSCALE_DCSR",
156 "XSCALE_TX",
157 "XSCALE_RX", /* 20 */
158 "XSCALE_TXRXCTRL",
159 };
160
161 xscale_reg_t xscale_reg_arch_info[] =
162 {
163 {XSCALE_MAINID, NULL},
164 {XSCALE_CACHETYPE, NULL},
165 {XSCALE_CTRL, NULL},
166 {XSCALE_AUXCTRL, NULL},
167 {XSCALE_TTB, NULL},
168 {XSCALE_DAC, NULL},
169 {XSCALE_FSR, NULL},
170 {XSCALE_FAR, NULL},
171 {XSCALE_PID, NULL},
172 {XSCALE_CPACCESS, NULL},
173 {XSCALE_IBCR0, NULL},
174 {XSCALE_IBCR1, NULL},
175 {XSCALE_DBR0, NULL},
176 {XSCALE_DBR1, NULL},
177 {XSCALE_DBCON, NULL},
178 {XSCALE_TBREG, NULL},
179 {XSCALE_CHKPT0, NULL},
180 {XSCALE_CHKPT1, NULL},
181 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
182 {-1, NULL}, /* TX accessed via JTAG */
183 {-1, NULL}, /* RX accessed via JTAG */
184 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
185 };
186
187 int xscale_reg_arch_type = -1;
188
189 int xscale_get_reg(reg_t *reg);
190 int xscale_set_reg(reg_t *reg, u8 *buf);
191
192 int xscale_get_arch_pointers(target_t *target, armv4_5_common_t **armv4_5_p, xscale_common_t **xscale_p)
193 {
194 armv4_5_common_t *armv4_5 = target->arch_info;
195 xscale_common_t *xscale = armv4_5->arch_info;
196
197 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
198 {
199 LOG_ERROR("target isn't an XScale target");
200 return -1;
201 }
202
203 if (xscale->common_magic != XSCALE_COMMON_MAGIC)
204 {
205 LOG_ERROR("target isn't an XScale target");
206 return -1;
207 }
208
209 *armv4_5_p = armv4_5;
210 *xscale_p = xscale;
211
212 return ERROR_OK;
213 }
214
215 int xscale_jtag_set_instr(jtag_tap_t *tap, u32 new_instr)
216 {
217 if (tap==NULL)
218 return ERROR_FAIL;
219
220 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
221 {
222 scan_field_t field;
223
224 field.tap = tap;
225 field.num_bits = tap->ir_length;
226 field.out_value = calloc(CEIL(field.num_bits, 8), 1);
227 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
228 field.out_mask = NULL;
229 field.in_value = NULL;
230 jtag_set_check_value(&field, tap->expected, tap->expected_mask, NULL);
231
232 jtag_add_ir_scan(1, &field, TAP_INVALID);
233
234 free(field.out_value);
235 }
236
237 return ERROR_OK;
238 }
239
240 int xscale_read_dcsr(target_t *target)
241 {
242 armv4_5_common_t *armv4_5 = target->arch_info;
243 xscale_common_t *xscale = armv4_5->arch_info;
244
245 int retval;
246
247 scan_field_t fields[3];
248 u8 field0 = 0x0;
249 u8 field0_check_value = 0x2;
250 u8 field0_check_mask = 0x7;
251 u8 field2 = 0x0;
252 u8 field2_check_value = 0x0;
253 u8 field2_check_mask = 0x1;
254
255 jtag_add_end_state(TAP_DRPAUSE);
256 xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dcsr);
257
258 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
259 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
260
261 fields[0].tap = xscale->jtag_info.tap;
262 fields[0].num_bits = 3;
263 fields[0].out_value = &field0;
264 fields[0].out_mask = NULL;
265 fields[0].in_value = NULL;
266 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
267
268 fields[1].tap = xscale->jtag_info.tap;
269 fields[1].num_bits = 32;
270 fields[1].out_value = NULL;
271 fields[1].out_mask = NULL;
272 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
273 fields[1].in_handler = NULL;
274 fields[1].in_handler_priv = NULL;
275 fields[1].in_check_value = NULL;
276 fields[1].in_check_mask = NULL;
277
278 fields[2].tap = xscale->jtag_info.tap;
279 fields[2].num_bits = 1;
280 fields[2].out_value = &field2;
281 fields[2].out_mask = NULL;
282 fields[2].in_value = NULL;
283 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
284
285 jtag_add_dr_scan(3, fields, TAP_INVALID);
286
287 if ((retval = jtag_execute_queue()) != ERROR_OK)
288 {
289 LOG_ERROR("JTAG error while reading DCSR");
290 return retval;
291 }
292
293 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
294 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
295
296 /* write the register with the value we just read
297 * on this second pass, only the first bit of field0 is guaranteed to be 0)
298 */
299 field0_check_mask = 0x1;
300 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
301 fields[1].in_value = NULL;
302
303 jtag_add_end_state(TAP_IDLE);
304
305 jtag_add_dr_scan(3, fields, TAP_INVALID);
306
307 /* DANGER!!! this must be here. It will make sure that the arguments
308 * to jtag_set_check_value() does not go out of scope! */
309 return jtag_execute_queue();
310 }
311
312 int xscale_receive(target_t *target, u32 *buffer, int num_words)
313 {
314 if (num_words==0)
315 return ERROR_INVALID_ARGUMENTS;
316
317 int retval=ERROR_OK;
318 armv4_5_common_t *armv4_5 = target->arch_info;
319 xscale_common_t *xscale = armv4_5->arch_info;
320
321 tap_state_t path[3];
322 scan_field_t fields[3];
323
324 u8 *field0 = malloc(num_words * 1);
325 u8 field0_check_value = 0x2;
326 u8 field0_check_mask = 0x6;
327 u32 *field1 = malloc(num_words * 4);
328 u8 field2_check_value = 0x0;
329 u8 field2_check_mask = 0x1;
330 int words_done = 0;
331 int words_scheduled = 0;
332
333 int i;
334
335 path[0] = TAP_DRSELECT;
336 path[1] = TAP_DRCAPTURE;
337 path[2] = TAP_DRSHIFT;
338
339 fields[0].tap = xscale->jtag_info.tap;
340 fields[0].num_bits = 3;
341 fields[0].out_value = NULL;
342 fields[0].out_mask = NULL;
343 fields[0].in_value = NULL;
344 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
345
346 fields[1].tap = xscale->jtag_info.tap;
347 fields[1].num_bits = 32;
348 fields[1].out_value = NULL;
349 fields[1].out_mask = NULL;
350 fields[1].in_value = NULL;
351 fields[1].in_handler = NULL;
352 fields[1].in_handler_priv = NULL;
353 fields[1].in_check_value = NULL;
354 fields[1].in_check_mask = NULL;
355
356 fields[2].tap = xscale->jtag_info.tap;
357 fields[2].num_bits = 1;
358 fields[2].out_value = NULL;
359 fields[2].out_mask = NULL;
360 fields[2].in_value = NULL;
361 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
362
363 jtag_add_end_state(TAP_IDLE);
364 xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dbgtx);
365 jtag_add_runtest(1, TAP_INVALID); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
366
367 /* repeat until all words have been collected */
368 int attempts=0;
369 while (words_done < num_words)
370 {
371 /* schedule reads */
372 words_scheduled = 0;
373 for (i = words_done; i < num_words; i++)
374 {
375 fields[0].in_value = &field0[i];
376 fields[1].in_handler = buf_to_u32_handler;
377 fields[1].in_handler_priv = (u8*)&field1[i];
378
379 jtag_add_pathmove(3, path);
380 jtag_add_dr_scan(3, fields, TAP_IDLE);
381 words_scheduled++;
382 }
383
384 if ((retval = jtag_execute_queue()) != ERROR_OK)
385 {
386 LOG_ERROR("JTAG error while receiving data from debug handler");
387 break;
388 }
389
390 /* examine results */
391 for (i = words_done; i < num_words; i++)
392 {
393 if (!(field0[0] & 1))
394 {
395 /* move backwards if necessary */
396 int j;
397 for (j = i; j < num_words - 1; j++)
398 {
399 field0[j] = field0[j+1];
400 field1[j] = field1[j+1];
401 }
402 words_scheduled--;
403 }
404 }
405 if (words_scheduled==0)
406 {
407 if (attempts++==1000)
408 {
409 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
410 retval=ERROR_TARGET_TIMEOUT;
411 break;
412 }
413 }
414
415 words_done += words_scheduled;
416 }
417
418 for (i = 0; i < num_words; i++)
419 *(buffer++) = buf_get_u32((u8*)&field1[i], 0, 32);
420
421 free(field1);
422
423 return retval;
424 }
425
426 int xscale_read_tx(target_t *target, int consume)
427 {
428 armv4_5_common_t *armv4_5 = target->arch_info;
429 xscale_common_t *xscale = armv4_5->arch_info;
430 tap_state_t path[3];
431 tap_state_t noconsume_path[6];
432
433 int retval;
434 struct timeval timeout, now;
435
436 scan_field_t fields[3];
437 u8 field0_in = 0x0;
438 u8 field0_check_value = 0x2;
439 u8 field0_check_mask = 0x6;
440 u8 field2_check_value = 0x0;
441 u8 field2_check_mask = 0x1;
442
443 jtag_add_end_state(TAP_IDLE);
444
445 xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dbgtx);
446
447 path[0] = TAP_DRSELECT;
448 path[1] = TAP_DRCAPTURE;
449 path[2] = TAP_DRSHIFT;
450
451 noconsume_path[0] = TAP_DRSELECT;
452 noconsume_path[1] = TAP_DRCAPTURE;
453 noconsume_path[2] = TAP_DREXIT1;
454 noconsume_path[3] = TAP_DRPAUSE;
455 noconsume_path[4] = TAP_DREXIT2;
456 noconsume_path[5] = TAP_DRSHIFT;
457
458 fields[0].tap = xscale->jtag_info.tap;
459 fields[0].num_bits = 3;
460 fields[0].out_value = NULL;
461 fields[0].out_mask = NULL;
462 fields[0].in_value = &field0_in;
463 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
464
465 fields[1].tap = xscale->jtag_info.tap;
466 fields[1].num_bits = 32;
467 fields[1].out_value = NULL;
468 fields[1].out_mask = NULL;
469 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
470 fields[1].in_handler = NULL;
471 fields[1].in_handler_priv = NULL;
472 fields[1].in_check_value = NULL;
473 fields[1].in_check_mask = NULL;
474
475 fields[2].tap = xscale->jtag_info.tap;
476 fields[2].num_bits = 1;
477 fields[2].out_value = NULL;
478 fields[2].out_mask = NULL;
479 fields[2].in_value = NULL;
480 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
481
482 gettimeofday(&timeout, NULL);
483 timeval_add_time(&timeout, 1, 0);
484
485 for (;;)
486 {
487 /* if we want to consume the register content (i.e. clear TX_READY),
488 * we have to go straight from Capture-DR to Shift-DR
489 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
490 */
491 if (consume)
492 jtag_add_pathmove(3, path);
493 else
494 {
495 jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
496 }
497
498 jtag_add_dr_scan(3, fields, TAP_IDLE);
499
500 if ((retval = jtag_execute_queue()) != ERROR_OK)
501 {
502 LOG_ERROR("JTAG error while reading TX");
503 return ERROR_TARGET_TIMEOUT;
504 }
505
506 gettimeofday(&now, NULL);
507 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
508 {
509 LOG_ERROR("time out reading TX register");
510 return ERROR_TARGET_TIMEOUT;
511 }
512 if (!((!(field0_in & 1)) && consume))
513 {
514 goto done;
515 }
516 if (debug_level>=3)
517 {
518 LOG_DEBUG("waiting 100ms");
519 alive_sleep(100); /* avoid flooding the logs */
520 } else
521 {
522 keep_alive();
523 }
524 }
525 done:
526
527 if (!(field0_in & 1))
528 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
529
530 return ERROR_OK;
531 }
532
533 int xscale_write_rx(target_t *target)
534 {
535 armv4_5_common_t *armv4_5 = target->arch_info;
536 xscale_common_t *xscale = armv4_5->arch_info;
537
538 int retval;
539 struct timeval timeout, now;
540
541 scan_field_t fields[3];
542 u8 field0_out = 0x0;
543 u8 field0_in = 0x0;
544 u8 field0_check_value = 0x2;
545 u8 field0_check_mask = 0x6;
546 u8 field2 = 0x0;
547 u8 field2_check_value = 0x0;
548 u8 field2_check_mask = 0x1;
549
550 jtag_add_end_state(TAP_IDLE);
551
552 xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dbgrx);
553
554 fields[0].tap = xscale->jtag_info.tap;
555 fields[0].num_bits = 3;
556 fields[0].out_value = &field0_out;
557 fields[0].out_mask = NULL;
558 fields[0].in_value = &field0_in;
559 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
560
561 fields[1].tap = xscale->jtag_info.tap;
562 fields[1].num_bits = 32;
563 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
564 fields[1].out_mask = NULL;
565 fields[1].in_value = NULL;
566 fields[1].in_handler = NULL;
567 fields[1].in_handler_priv = NULL;
568 fields[1].in_check_value = NULL;
569 fields[1].in_check_mask = NULL;
570
571 fields[2].tap = xscale->jtag_info.tap;
572 fields[2].num_bits = 1;
573 fields[2].out_value = &field2;
574 fields[2].out_mask = NULL;
575 fields[2].in_value = NULL;
576 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
577
578 gettimeofday(&timeout, NULL);
579 timeval_add_time(&timeout, 1, 0);
580
581 /* poll until rx_read is low */
582 LOG_DEBUG("polling RX");
583 for (;;)
584 {
585 jtag_add_dr_scan(3, fields, TAP_IDLE);
586
587 if ((retval = jtag_execute_queue()) != ERROR_OK)
588 {
589 LOG_ERROR("JTAG error while writing RX");
590 return retval;
591 }
592
593 gettimeofday(&now, NULL);
594 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
595 {
596 LOG_ERROR("time out writing RX register");
597 return ERROR_TARGET_TIMEOUT;
598 }
599 if (!(field0_in & 1))
600 goto done;
601 if (debug_level>=3)
602 {
603 LOG_DEBUG("waiting 100ms");
604 alive_sleep(100); /* avoid flooding the logs */
605 } else
606 {
607 keep_alive();
608 }
609 }
610 done:
611
612 /* set rx_valid */
613 field2 = 0x1;
614 jtag_add_dr_scan(3, fields, TAP_IDLE);
615
616 if ((retval = jtag_execute_queue()) != ERROR_OK)
617 {
618 LOG_ERROR("JTAG error while writing RX");
619 return retval;
620 }
621
622 return ERROR_OK;
623 }
624
625 /* send count elements of size byte to the debug handler */
626 int xscale_send(target_t *target, u8 *buffer, int count, int size)
627 {
628 armv4_5_common_t *armv4_5 = target->arch_info;
629 xscale_common_t *xscale = armv4_5->arch_info;
630 u32 t[3];
631 int bits[3];
632
633 int retval;
634
635 int done_count = 0;
636
637 jtag_add_end_state(TAP_IDLE);
638
639 xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dbgrx);
640
641 bits[0]=3;
642 t[0]=0;
643 bits[1]=32;
644 t[2]=1;
645 bits[2]=1;
646 int endianness = target->endianness;
647 while (done_count++ < count)
648 {
649 switch (size)
650 {
651 case 4:
652 if (endianness == TARGET_LITTLE_ENDIAN)
653 {
654 t[1]=le_to_h_u32(buffer);
655 } else
656 {
657 t[1]=be_to_h_u32(buffer);
658 }
659 break;
660 case 2:
661 if (endianness == TARGET_LITTLE_ENDIAN)
662 {
663 t[1]=le_to_h_u16(buffer);
664 } else
665 {
666 t[1]=be_to_h_u16(buffer);
667 }
668 break;
669 case 1:
670 t[1]=buffer[0];
671 break;
672 default:
673 LOG_ERROR("BUG: size neither 4, 2 nor 1");
674 exit(-1);
675 }
676 jtag_add_dr_out(xscale->jtag_info.tap,
677 3,
678 bits,
679 t,
680 TAP_IDLE);
681 buffer += size;
682 }
683
684 if ((retval = jtag_execute_queue()) != ERROR_OK)
685 {
686 LOG_ERROR("JTAG error while sending data to debug handler");
687 return retval;
688 }
689
690 return ERROR_OK;
691 }
692
693 int xscale_send_u32(target_t *target, u32 value)
694 {
695 armv4_5_common_t *armv4_5 = target->arch_info;
696 xscale_common_t *xscale = armv4_5->arch_info;
697
698 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
699 return xscale_write_rx(target);
700 }
701
702 int xscale_write_dcsr(target_t *target, int hold_rst, int ext_dbg_brk)
703 {
704 armv4_5_common_t *armv4_5 = target->arch_info;
705 xscale_common_t *xscale = armv4_5->arch_info;
706
707 int retval;
708
709 scan_field_t fields[3];
710 u8 field0 = 0x0;
711 u8 field0_check_value = 0x2;
712 u8 field0_check_mask = 0x7;
713 u8 field2 = 0x0;
714 u8 field2_check_value = 0x0;
715 u8 field2_check_mask = 0x1;
716
717 if (hold_rst != -1)
718 xscale->hold_rst = hold_rst;
719
720 if (ext_dbg_brk != -1)
721 xscale->external_debug_break = ext_dbg_brk;
722
723 jtag_add_end_state(TAP_IDLE);
724 xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dcsr);
725
726 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
727 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
728
729 fields[0].tap = xscale->jtag_info.tap;
730 fields[0].num_bits = 3;
731 fields[0].out_value = &field0;
732 fields[0].out_mask = NULL;
733 fields[0].in_value = NULL;
734 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
735
736 fields[1].tap = xscale->jtag_info.tap;
737 fields[1].num_bits = 32;
738 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
739 fields[1].out_mask = NULL;
740 fields[1].in_value = NULL;
741 fields[1].in_handler = NULL;
742 fields[1].in_handler_priv = NULL;
743 fields[1].in_check_value = NULL;
744 fields[1].in_check_mask = NULL;
745
746 fields[2].tap = xscale->jtag_info.tap;
747 fields[2].num_bits = 1;
748 fields[2].out_value = &field2;
749 fields[2].out_mask = NULL;
750 fields[2].in_value = NULL;
751 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
752
753 jtag_add_dr_scan(3, fields, TAP_INVALID);
754
755 if ((retval = jtag_execute_queue()) != ERROR_OK)
756 {
757 LOG_ERROR("JTAG error while writing DCSR");
758 return retval;
759 }
760
761 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
762 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
763
764 return ERROR_OK;
765 }
766
767 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
768 unsigned int parity (unsigned int v)
769 {
770 unsigned int ov = v;
771 v ^= v >> 16;
772 v ^= v >> 8;
773 v ^= v >> 4;
774 v &= 0xf;
775 LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
776 return (0x6996 >> v) & 1;
777 }
778
779 int xscale_load_ic(target_t *target, int mini, u32 va, u32 buffer[8])
780 {
781 armv4_5_common_t *armv4_5 = target->arch_info;
782 xscale_common_t *xscale = armv4_5->arch_info;
783 u8 packet[4];
784 u8 cmd;
785 int word;
786
787 scan_field_t fields[2];
788
789 LOG_DEBUG("loading miniIC at 0x%8.8x", va);
790
791 jtag_add_end_state(TAP_IDLE);
792 xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.ldic); /* LDIC */
793
794 /* CMD is b010 for Main IC and b011 for Mini IC */
795 if (mini)
796 buf_set_u32(&cmd, 0, 3, 0x3);
797 else
798 buf_set_u32(&cmd, 0, 3, 0x2);
799
800 buf_set_u32(&cmd, 3, 3, 0x0);
801
802 /* virtual address of desired cache line */
803 buf_set_u32(packet, 0, 27, va >> 5);
804
805 fields[0].tap = xscale->jtag_info.tap;
806 fields[0].num_bits = 6;
807 fields[0].out_value = &cmd;
808 fields[0].out_mask = NULL;
809 fields[0].in_value = NULL;
810 fields[0].in_check_value = NULL;
811 fields[0].in_check_mask = NULL;
812 fields[0].in_handler = NULL;
813 fields[0].in_handler_priv = NULL;
814
815 fields[1].tap = xscale->jtag_info.tap;
816 fields[1].num_bits = 27;
817 fields[1].out_value = packet;
818 fields[1].out_mask = NULL;
819 fields[1].in_value = NULL;
820 fields[1].in_check_value = NULL;
821 fields[1].in_check_mask = NULL;
822 fields[1].in_handler = NULL;
823 fields[1].in_handler_priv = NULL;
824
825 jtag_add_dr_scan(2, fields, TAP_INVALID);
826
827 fields[0].num_bits = 32;
828 fields[0].out_value = packet;
829
830 fields[1].num_bits = 1;
831 fields[1].out_value = &cmd;
832
833 for (word = 0; word < 8; word++)
834 {
835 buf_set_u32(packet, 0, 32, buffer[word]);
836 cmd = parity(*((u32*)packet));
837 jtag_add_dr_scan(2, fields, TAP_INVALID);
838 }
839
840 jtag_execute_queue();
841
842 return ERROR_OK;
843 }
844
845 int xscale_invalidate_ic_line(target_t *target, u32 va)
846 {
847 armv4_5_common_t *armv4_5 = target->arch_info;
848 xscale_common_t *xscale = armv4_5->arch_info;
849 u8 packet[4];
850 u8 cmd;
851
852 scan_field_t fields[2];
853
854 jtag_add_end_state(TAP_IDLE);
855 xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.ldic); /* LDIC */
856
857 /* CMD for invalidate IC line b000, bits [6:4] b000 */
858 buf_set_u32(&cmd, 0, 6, 0x0);
859
860 /* virtual address of desired cache line */
861 buf_set_u32(packet, 0, 27, va >> 5);
862
863 fields[0].tap = xscale->jtag_info.tap;
864 fields[0].num_bits = 6;
865 fields[0].out_value = &cmd;
866 fields[0].out_mask = NULL;
867 fields[0].in_value = NULL;
868 fields[0].in_check_value = NULL;
869 fields[0].in_check_mask = NULL;
870 fields[0].in_handler = NULL;
871 fields[0].in_handler_priv = NULL;
872
873 fields[1].tap = xscale->jtag_info.tap;
874 fields[1].num_bits = 27;
875 fields[1].out_value = packet;
876 fields[1].out_mask = NULL;
877 fields[1].in_value = NULL;
878 fields[1].in_check_value = NULL;
879 fields[1].in_check_mask = NULL;
880 fields[1].in_handler = NULL;
881 fields[1].in_handler_priv = NULL;
882
883 jtag_add_dr_scan(2, fields, TAP_INVALID);
884
885 return ERROR_OK;
886 }
887
888 int xscale_update_vectors(target_t *target)
889 {
890 armv4_5_common_t *armv4_5 = target->arch_info;
891 xscale_common_t *xscale = armv4_5->arch_info;
892 int i;
893 int retval;
894
895 u32 low_reset_branch, high_reset_branch;
896
897 for (i = 1; i < 8; i++)
898 {
899 /* if there's a static vector specified for this exception, override */
900 if (xscale->static_high_vectors_set & (1 << i))
901 {
902 xscale->high_vectors[i] = xscale->static_high_vectors[i];
903 }
904 else
905 {
906 retval=target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
907 if (retval == ERROR_TARGET_TIMEOUT)
908 return retval;
909 if (retval!=ERROR_OK)
910 {
911 /* Some of these reads will fail as part of normal execution */
912 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
913 }
914 }
915 }
916
917 for (i = 1; i < 8; i++)
918 {
919 if (xscale->static_low_vectors_set & (1 << i))
920 {
921 xscale->low_vectors[i] = xscale->static_low_vectors[i];
922 }
923 else
924 {
925 retval=target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
926 if (retval == ERROR_TARGET_TIMEOUT)
927 return retval;
928 if (retval!=ERROR_OK)
929 {
930 /* Some of these reads will fail as part of normal execution */
931 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
932 }
933 }
934 }
935
936 /* calculate branches to debug handler */
937 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
938 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
939
940 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
941 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
942
943 /* invalidate and load exception vectors in mini i-cache */
944 xscale_invalidate_ic_line(target, 0x0);
945 xscale_invalidate_ic_line(target, 0xffff0000);
946
947 xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
948 xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
949
950 return ERROR_OK;
951 }
952
953 int xscale_arch_state(struct target_s *target)
954 {
955 armv4_5_common_t *armv4_5 = target->arch_info;
956 xscale_common_t *xscale = armv4_5->arch_info;
957
958 char *state[] =
959 {
960 "disabled", "enabled"
961 };
962
963 char *arch_dbg_reason[] =
964 {
965 "", "\n(processor reset)", "\n(trace buffer full)"
966 };
967
968 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
969 {
970 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
971 exit(-1);
972 }
973
974 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
975 "cpsr: 0x%8.8x pc: 0x%8.8x\n"
976 "MMU: %s, D-Cache: %s, I-Cache: %s"
977 "%s",
978 armv4_5_state_strings[armv4_5->core_state],
979 Jim_Nvp_value2name_simple( nvp_target_debug_reason, target->debug_reason )->name ,
980 armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)],
981 buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
982 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
983 state[xscale->armv4_5_mmu.mmu_enabled],
984 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
985 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
986 arch_dbg_reason[xscale->arch_debug_reason]);
987
988 return ERROR_OK;
989 }
990
991 int xscale_poll(target_t *target)
992 {
993 int retval=ERROR_OK;
994 armv4_5_common_t *armv4_5 = target->arch_info;
995 xscale_common_t *xscale = armv4_5->arch_info;
996
997 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
998 {
999 enum target_state previous_state = target->state;
1000 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
1001 {
1002
1003 /* there's data to read from the tx register, we entered debug state */
1004 xscale->handler_running = 1;
1005
1006 target->state = TARGET_HALTED;
1007
1008 /* process debug entry, fetching current mode regs */
1009 retval = xscale_debug_entry(target);
1010 }
1011 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1012 {
1013 LOG_USER("error while polling TX register, reset CPU");
1014 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
1015 target->state = TARGET_HALTED;
1016 }
1017
1018 /* debug_entry could have overwritten target state (i.e. immediate resume)
1019 * don't signal event handlers in that case
1020 */
1021 if (target->state != TARGET_HALTED)
1022 return ERROR_OK;
1023
1024 /* if target was running, signal that we halted
1025 * otherwise we reentered from debug execution */
1026 if (previous_state == TARGET_RUNNING)
1027 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1028 else
1029 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
1030 }
1031
1032 return retval;
1033 }
1034
1035 int xscale_debug_entry(target_t *target)
1036 {
1037 armv4_5_common_t *armv4_5 = target->arch_info;
1038 xscale_common_t *xscale = armv4_5->arch_info;
1039 u32 pc;
1040 u32 buffer[10];
1041 int i;
1042 int retval;
1043
1044 u32 moe;
1045
1046 /* clear external dbg break (will be written on next DCSR read) */
1047 xscale->external_debug_break = 0;
1048 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
1049 return retval;
1050
1051 /* get r0, pc, r1 to r7 and cpsr */
1052 if ((retval=xscale_receive(target, buffer, 10))!=ERROR_OK)
1053 return retval;
1054
1055 /* move r0 from buffer to register cache */
1056 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
1057 armv4_5->core_cache->reg_list[15].dirty = 1;
1058 armv4_5->core_cache->reg_list[15].valid = 1;
1059 LOG_DEBUG("r0: 0x%8.8x", buffer[0]);
1060
1061 /* move pc from buffer to register cache */
1062 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
1063 armv4_5->core_cache->reg_list[15].dirty = 1;
1064 armv4_5->core_cache->reg_list[15].valid = 1;
1065 LOG_DEBUG("pc: 0x%8.8x", buffer[1]);
1066
1067 /* move data from buffer to register cache */
1068 for (i = 1; i <= 7; i++)
1069 {
1070 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
1071 armv4_5->core_cache->reg_list[i].dirty = 1;
1072 armv4_5->core_cache->reg_list[i].valid = 1;
1073 LOG_DEBUG("r%i: 0x%8.8x", i, buffer[i + 1]);
1074 }
1075
1076 buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, buffer[9]);
1077 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
1078 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
1079 LOG_DEBUG("cpsr: 0x%8.8x", buffer[9]);
1080
1081 armv4_5->core_mode = buffer[9] & 0x1f;
1082 if (armv4_5_mode_to_number(armv4_5->core_mode) == -1)
1083 {
1084 target->state = TARGET_UNKNOWN;
1085 LOG_ERROR("cpsr contains invalid mode value - communication failure");
1086 return ERROR_TARGET_FAILURE;
1087 }
1088 LOG_DEBUG("target entered debug state in %s mode", armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)]);
1089
1090 if (buffer[9] & 0x20)
1091 armv4_5->core_state = ARMV4_5_STATE_THUMB;
1092 else
1093 armv4_5->core_state = ARMV4_5_STATE_ARM;
1094
1095
1096 if (armv4_5_mode_to_number(armv4_5->core_mode)==-1)
1097 return ERROR_FAIL;
1098
1099 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1100 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
1101 {
1102 xscale_receive(target, buffer, 8);
1103 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1104 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
1105 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
1106 }
1107 else
1108 {
1109 /* r8 to r14, but no spsr */
1110 xscale_receive(target, buffer, 7);
1111 }
1112
1113 /* move data from buffer to register cache */
1114 for (i = 8; i <= 14; i++)
1115 {
1116 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
1117 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
1118 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
1119 }
1120
1121 /* examine debug reason */
1122 xscale_read_dcsr(target);
1123 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
1124
1125 /* stored PC (for calculating fixup) */
1126 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1127
1128 switch (moe)
1129 {
1130 case 0x0: /* Processor reset */
1131 target->debug_reason = DBG_REASON_DBGRQ;
1132 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1133 pc -= 4;
1134 break;
1135 case 0x1: /* Instruction breakpoint hit */
1136 target->debug_reason = DBG_REASON_BREAKPOINT;
1137 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1138 pc -= 4;
1139 break;
1140 case 0x2: /* Data breakpoint hit */
1141 target->debug_reason = DBG_REASON_WATCHPOINT;
1142 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1143 pc -= 4;
1144 break;
1145 case 0x3: /* BKPT instruction executed */
1146 target->debug_reason = DBG_REASON_BREAKPOINT;
1147 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1148 pc -= 4;
1149 break;
1150 case 0x4: /* Ext. debug event */
1151 target->debug_reason = DBG_REASON_DBGRQ;
1152 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1153 pc -= 4;
1154 break;
1155 case 0x5: /* Vector trap occured */
1156 target->debug_reason = DBG_REASON_BREAKPOINT;
1157 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1158 pc -= 4;
1159 break;
1160 case 0x6: /* Trace buffer full break */
1161 target->debug_reason = DBG_REASON_DBGRQ;
1162 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1163 pc -= 4;
1164 break;
1165 case 0x7: /* Reserved */
1166 default:
1167 LOG_ERROR("Method of Entry is 'Reserved'");
1168 exit(-1);
1169 break;
1170 }
1171
1172 /* apply PC fixup */
1173 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1174
1175 /* on the first debug entry, identify cache type */
1176 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1177 {
1178 u32 cache_type_reg;
1179
1180 /* read cp15 cache type register */
1181 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1182 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1183
1184 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1185 }
1186
1187 /* examine MMU and Cache settings */
1188 /* read cp15 control register */
1189 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1190 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1191 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1192 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1193 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1194
1195 /* tracing enabled, read collected trace data */
1196 if (xscale->trace.buffer_enabled)
1197 {
1198 xscale_read_trace(target);
1199 xscale->trace.buffer_fill--;
1200
1201 /* resume if we're still collecting trace data */
1202 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1203 && (xscale->trace.buffer_fill > 0))
1204 {
1205 xscale_resume(target, 1, 0x0, 1, 0);
1206 }
1207 else
1208 {
1209 xscale->trace.buffer_enabled = 0;
1210 }
1211 }
1212
1213 return ERROR_OK;
1214 }
1215
1216 int xscale_halt(target_t *target)
1217 {
1218 armv4_5_common_t *armv4_5 = target->arch_info;
1219 xscale_common_t *xscale = armv4_5->arch_info;
1220
1221 LOG_DEBUG("target->state: %s",
1222 Jim_Nvp_value2name_simple( nvp_target_state, target->state )->name);
1223
1224 if (target->state == TARGET_HALTED)
1225 {
1226 LOG_DEBUG("target was already halted");
1227 return ERROR_OK;
1228 }
1229 else if (target->state == TARGET_UNKNOWN)
1230 {
1231 /* this must not happen for a xscale target */
1232 LOG_ERROR("target was in unknown state when halt was requested");
1233 return ERROR_TARGET_INVALID;
1234 }
1235 else if (target->state == TARGET_RESET)
1236 {
1237 LOG_DEBUG("target->state == TARGET_RESET");
1238 }
1239 else
1240 {
1241 /* assert external dbg break */
1242 xscale->external_debug_break = 1;
1243 xscale_read_dcsr(target);
1244
1245 target->debug_reason = DBG_REASON_DBGRQ;
1246 }
1247
1248 return ERROR_OK;
1249 }
1250
1251 int xscale_enable_single_step(struct target_s *target, u32 next_pc)
1252 {
1253 armv4_5_common_t *armv4_5 = target->arch_info;
1254 xscale_common_t *xscale= armv4_5->arch_info;
1255 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1256 int retval;
1257
1258 if (xscale->ibcr0_used)
1259 {
1260 breakpoint_t *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1261
1262 if (ibcr0_bp)
1263 {
1264 xscale_unset_breakpoint(target, ibcr0_bp);
1265 }
1266 else
1267 {
1268 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1269 exit(-1);
1270 }
1271 }
1272
1273 if ((retval=xscale_set_reg_u32(ibcr0, next_pc | 0x1))!=ERROR_OK)
1274 return retval;
1275
1276 return ERROR_OK;
1277 }
1278
1279 int xscale_disable_single_step(struct target_s *target)
1280 {
1281 armv4_5_common_t *armv4_5 = target->arch_info;
1282 xscale_common_t *xscale= armv4_5->arch_info;
1283 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1284 int retval;
1285
1286 if ((retval=xscale_set_reg_u32(ibcr0, 0x0))!=ERROR_OK)
1287 return retval;
1288
1289 return ERROR_OK;
1290 }
1291
1292 int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution)
1293 {
1294 armv4_5_common_t *armv4_5 = target->arch_info;
1295 xscale_common_t *xscale= armv4_5->arch_info;
1296 breakpoint_t *breakpoint = target->breakpoints;
1297
1298 u32 current_pc;
1299
1300 int retval;
1301 int i;
1302
1303 LOG_DEBUG("-");
1304
1305 if (target->state != TARGET_HALTED)
1306 {
1307 LOG_WARNING("target not halted");
1308 return ERROR_TARGET_NOT_HALTED;
1309 }
1310
1311 if (!debug_execution)
1312 {
1313 target_free_all_working_areas(target);
1314 }
1315
1316 /* update vector tables */
1317 if ((retval=xscale_update_vectors(target))!=ERROR_OK)
1318 return retval;
1319
1320 /* current = 1: continue on current pc, otherwise continue at <address> */
1321 if (!current)
1322 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1323
1324 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1325
1326 /* if we're at the reset vector, we have to simulate the branch */
1327 if (current_pc == 0x0)
1328 {
1329 arm_simulate_step(target, NULL);
1330 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1331 }
1332
1333 /* the front-end may request us not to handle breakpoints */
1334 if (handle_breakpoints)
1335 {
1336 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1337 {
1338 u32 next_pc;
1339
1340 /* there's a breakpoint at the current PC, we have to step over it */
1341 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1342 xscale_unset_breakpoint(target, breakpoint);
1343
1344 /* calculate PC of next instruction */
1345 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1346 {
1347 u32 current_opcode;
1348 target_read_u32(target, current_pc, &current_opcode);
1349 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
1350 }
1351
1352 LOG_DEBUG("enable single-step");
1353 xscale_enable_single_step(target, next_pc);
1354
1355 /* restore banked registers */
1356 xscale_restore_context(target);
1357
1358 /* send resume request (command 0x30 or 0x31)
1359 * clean the trace buffer if it is to be enabled (0x62) */
1360 if (xscale->trace.buffer_enabled)
1361 {
1362 xscale_send_u32(target, 0x62);
1363 xscale_send_u32(target, 0x31);
1364 }
1365 else
1366 xscale_send_u32(target, 0x30);
1367
1368 /* send CPSR */
1369 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1370 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1371
1372 for (i = 7; i >= 0; i--)
1373 {
1374 /* send register */
1375 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1376 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1377 }
1378
1379 /* send PC */
1380 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1381 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1382
1383 /* wait for and process debug entry */
1384 xscale_debug_entry(target);
1385
1386 LOG_DEBUG("disable single-step");
1387 xscale_disable_single_step(target);
1388
1389 LOG_DEBUG("set breakpoint at 0x%8.8x", breakpoint->address);
1390 xscale_set_breakpoint(target, breakpoint);
1391 }
1392 }
1393
1394 /* enable any pending breakpoints and watchpoints */
1395 xscale_enable_breakpoints(target);
1396 xscale_enable_watchpoints(target);
1397
1398 /* restore banked registers */
1399 xscale_restore_context(target);
1400
1401 /* send resume request (command 0x30 or 0x31)
1402 * clean the trace buffer if it is to be enabled (0x62) */
1403 if (xscale->trace.buffer_enabled)
1404 {
1405 xscale_send_u32(target, 0x62);
1406 xscale_send_u32(target, 0x31);
1407 }
1408 else
1409 xscale_send_u32(target, 0x30);
1410
1411 /* send CPSR */
1412 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1413 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1414
1415 for (i = 7; i >= 0; i--)
1416 {
1417 /* send register */
1418 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1419 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1420 }
1421
1422 /* send PC */
1423 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1424 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1425
1426 target->debug_reason = DBG_REASON_NOTHALTED;
1427
1428 if (!debug_execution)
1429 {
1430 /* registers are now invalid */
1431 armv4_5_invalidate_core_regs(target);
1432 target->state = TARGET_RUNNING;
1433 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1434 }
1435 else
1436 {
1437 target->state = TARGET_DEBUG_RUNNING;
1438 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1439 }
1440
1441 LOG_DEBUG("target resumed");
1442
1443 xscale->handler_running = 1;
1444
1445 return ERROR_OK;
1446 }
1447
1448 static int xscale_step_inner(struct target_s *target, int current, u32 address, int handle_breakpoints)
1449 {
1450 armv4_5_common_t *armv4_5 = target->arch_info;
1451 xscale_common_t *xscale = armv4_5->arch_info;
1452
1453 u32 next_pc;
1454 int retval;
1455 int i;
1456
1457 target->debug_reason = DBG_REASON_SINGLESTEP;
1458
1459 /* calculate PC of next instruction */
1460 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1461 {
1462 u32 current_opcode, current_pc;
1463 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1464
1465 target_read_u32(target, current_pc, &current_opcode);
1466 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
1467 return retval;
1468 }
1469
1470 LOG_DEBUG("enable single-step");
1471 if ((retval=xscale_enable_single_step(target, next_pc))!=ERROR_OK)
1472 return retval;
1473
1474 /* restore banked registers */
1475 if ((retval=xscale_restore_context(target))!=ERROR_OK)
1476 return retval;
1477
1478 /* send resume request (command 0x30 or 0x31)
1479 * clean the trace buffer if it is to be enabled (0x62) */
1480 if (xscale->trace.buffer_enabled)
1481 {
1482 if ((retval=xscale_send_u32(target, 0x62))!=ERROR_OK)
1483 return retval;
1484 if ((retval=xscale_send_u32(target, 0x31))!=ERROR_OK)
1485 return retval;
1486 }
1487 else
1488 if ((retval=xscale_send_u32(target, 0x30))!=ERROR_OK)
1489 return retval;
1490
1491 /* send CPSR */
1492 if ((retval=xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32)))!=ERROR_OK)
1493 return retval;
1494 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1495
1496 for (i = 7; i >= 0; i--)
1497 {
1498 /* send register */
1499 if ((retval=xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32)))!=ERROR_OK)
1500 return retval;
1501 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1502 }
1503
1504 /* send PC */
1505 if ((retval=xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32)))!=ERROR_OK)
1506 return retval;
1507 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1508
1509 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1510
1511 /* registers are now invalid */
1512 if ((retval=armv4_5_invalidate_core_regs(target))!=ERROR_OK)
1513 return retval;
1514
1515 /* wait for and process debug entry */
1516 if ((retval=xscale_debug_entry(target))!=ERROR_OK)
1517 return retval;
1518
1519 LOG_DEBUG("disable single-step");
1520 if ((retval=xscale_disable_single_step(target))!=ERROR_OK)
1521 return retval;
1522
1523 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1524
1525 return ERROR_OK;
1526 }
1527
1528 int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints)
1529 {
1530 armv4_5_common_t *armv4_5 = target->arch_info;
1531 breakpoint_t *breakpoint = target->breakpoints;
1532
1533 u32 current_pc;
1534 int retval;
1535
1536 if (target->state != TARGET_HALTED)
1537 {
1538 LOG_WARNING("target not halted");
1539 return ERROR_TARGET_NOT_HALTED;
1540 }
1541
1542 /* current = 1: continue on current pc, otherwise continue at <address> */
1543 if (!current)
1544 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1545
1546 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1547
1548 /* if we're at the reset vector, we have to simulate the step */
1549 if (current_pc == 0x0)
1550 {
1551 if ((retval=arm_simulate_step(target, NULL))!=ERROR_OK)
1552 return retval;
1553 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1554
1555 target->debug_reason = DBG_REASON_SINGLESTEP;
1556 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1557
1558 return ERROR_OK;
1559 }
1560
1561 /* the front-end may request us not to handle breakpoints */
1562 if (handle_breakpoints)
1563 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1564 {
1565 if ((retval=xscale_unset_breakpoint(target, breakpoint))!=ERROR_OK)
1566 return retval;
1567 }
1568
1569 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1570
1571 if (breakpoint)
1572 {
1573 xscale_set_breakpoint(target, breakpoint);
1574 }
1575
1576 LOG_DEBUG("target stepped");
1577
1578 return ERROR_OK;
1579
1580 }
1581
1582 int xscale_assert_reset(target_t *target)
1583 {
1584 armv4_5_common_t *armv4_5 = target->arch_info;
1585 xscale_common_t *xscale = armv4_5->arch_info;
1586
1587 LOG_DEBUG("target->state: %s",
1588 Jim_Nvp_value2name_simple( nvp_target_state, target->state )->name);
1589
1590 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1591 * end up in T-L-R, which would reset JTAG
1592 */
1593 jtag_add_end_state(TAP_IDLE);
1594 xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dcsr);
1595
1596 /* set Hold reset, Halt mode and Trap Reset */
1597 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1598 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1599 xscale_write_dcsr(target, 1, 0);
1600
1601 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1602 xscale_jtag_set_instr(xscale->jtag_info.tap, 0x7f);
1603 jtag_execute_queue();
1604
1605 /* assert reset */
1606 jtag_add_reset(0, 1);
1607
1608 /* sleep 1ms, to be sure we fulfill any requirements */
1609 jtag_add_sleep(1000);
1610 jtag_execute_queue();
1611
1612 target->state = TARGET_RESET;
1613
1614 if (target->reset_halt)
1615 {
1616 int retval;
1617 if ((retval = target_halt(target))!=ERROR_OK)
1618 return retval;
1619 }
1620
1621 return ERROR_OK;
1622 }
1623
1624 int xscale_deassert_reset(target_t *target)
1625 {
1626 armv4_5_common_t *armv4_5 = target->arch_info;
1627 xscale_common_t *xscale = armv4_5->arch_info;
1628
1629 fileio_t debug_handler;
1630 u32 address;
1631 u32 binary_size;
1632
1633 u32 buf_cnt;
1634 int i;
1635 int retval;
1636
1637 breakpoint_t *breakpoint = target->breakpoints;
1638
1639 LOG_DEBUG("-");
1640
1641 xscale->ibcr_available = 2;
1642 xscale->ibcr0_used = 0;
1643 xscale->ibcr1_used = 0;
1644
1645 xscale->dbr_available = 2;
1646 xscale->dbr0_used = 0;
1647 xscale->dbr1_used = 0;
1648
1649 /* mark all hardware breakpoints as unset */
1650 while (breakpoint)
1651 {
1652 if (breakpoint->type == BKPT_HARD)
1653 {
1654 breakpoint->set = 0;
1655 }
1656 breakpoint = breakpoint->next;
1657 }
1658
1659 if (!xscale->handler_installed)
1660 {
1661 /* release SRST */
1662 jtag_add_reset(0, 0);
1663
1664 /* wait 300ms; 150 and 100ms were not enough */
1665 jtag_add_sleep(300*1000);
1666
1667 jtag_add_runtest(2030, TAP_IDLE);
1668 jtag_execute_queue();
1669
1670 /* set Hold reset, Halt mode and Trap Reset */
1671 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1672 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1673 xscale_write_dcsr(target, 1, 0);
1674
1675 /* Load debug handler */
1676 if (fileio_open(&debug_handler, "xscale/debug_handler.bin", FILEIO_READ, FILEIO_BINARY) != ERROR_OK)
1677 {
1678 return ERROR_OK;
1679 }
1680
1681 if ((binary_size = debug_handler.size) % 4)
1682 {
1683 LOG_ERROR("debug_handler.bin: size not a multiple of 4");
1684 exit(-1);
1685 }
1686
1687 if (binary_size > 0x800)
1688 {
1689 LOG_ERROR("debug_handler.bin: larger than 2kb");
1690 exit(-1);
1691 }
1692
1693 binary_size = CEIL(binary_size, 32) * 32;
1694
1695 address = xscale->handler_address;
1696 while (binary_size > 0)
1697 {
1698 u32 cache_line[8];
1699 u8 buffer[32];
1700
1701 if ((retval = fileio_read(&debug_handler, 32, buffer, &buf_cnt)) != ERROR_OK)
1702 {
1703
1704 }
1705
1706 for (i = 0; i < buf_cnt; i += 4)
1707 {
1708 /* convert LE buffer to host-endian u32 */
1709 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1710 }
1711
1712 for (; buf_cnt < 32; buf_cnt += 4)
1713 {
1714 cache_line[buf_cnt / 4] = 0xe1a08008;
1715 }
1716
1717 /* only load addresses other than the reset vectors */
1718 if ((address % 0x400) != 0x0)
1719 {
1720 xscale_load_ic(target, 1, address, cache_line);
1721 }
1722
1723 address += buf_cnt;
1724 binary_size -= buf_cnt;
1725 };
1726
1727 xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
1728 xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
1729
1730 jtag_add_runtest(30, TAP_IDLE);
1731
1732 jtag_add_sleep(100000);
1733
1734 /* set Hold reset, Halt mode and Trap Reset */
1735 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1736 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1737 xscale_write_dcsr(target, 1, 0);
1738
1739 /* clear Hold reset to let the target run (should enter debug handler) */
1740 xscale_write_dcsr(target, 0, 1);
1741 target->state = TARGET_RUNNING;
1742
1743 if (!target->reset_halt)
1744 {
1745 jtag_add_sleep(10000);
1746
1747 /* we should have entered debug now */
1748 xscale_debug_entry(target);
1749 target->state = TARGET_HALTED;
1750
1751 /* resume the target */
1752 xscale_resume(target, 1, 0x0, 1, 0);
1753 }
1754
1755 fileio_close(&debug_handler);
1756 }
1757 else
1758 {
1759 jtag_add_reset(0, 0);
1760 }
1761
1762 return ERROR_OK;
1763 }
1764
1765 int xscale_soft_reset_halt(struct target_s *target)
1766 {
1767 return ERROR_OK;
1768 }
1769
1770 int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode)
1771 {
1772 return ERROR_OK;
1773 }
1774
1775 int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value)
1776 {
1777
1778 return ERROR_OK;
1779 }
1780
1781 int xscale_full_context(target_t *target)
1782 {
1783 armv4_5_common_t *armv4_5 = target->arch_info;
1784
1785 u32 *buffer;
1786
1787 int i, j;
1788
1789 LOG_DEBUG("-");
1790
1791 if (target->state != TARGET_HALTED)
1792 {
1793 LOG_WARNING("target not halted");
1794 return ERROR_TARGET_NOT_HALTED;
1795 }
1796
1797 buffer = malloc(4 * 8);
1798
1799 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1800 * we can't enter User mode on an XScale (unpredictable),
1801 * but User shares registers with SYS
1802 */
1803 for(i = 1; i < 7; i++)
1804 {
1805 int valid = 1;
1806
1807 /* check if there are invalid registers in the current mode
1808 */
1809 for (j = 0; j <= 16; j++)
1810 {
1811 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
1812 valid = 0;
1813 }
1814
1815 if (!valid)
1816 {
1817 u32 tmp_cpsr;
1818
1819 /* request banked registers */
1820 xscale_send_u32(target, 0x0);
1821
1822 tmp_cpsr = 0x0;
1823 tmp_cpsr |= armv4_5_number_to_mode(i);
1824 tmp_cpsr |= 0xc0; /* I/F bits */
1825
1826 /* send CPSR for desired mode */
1827 xscale_send_u32(target, tmp_cpsr);
1828
1829 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1830 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1831 {
1832 xscale_receive(target, buffer, 8);
1833 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1834 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1835 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
1836 }
1837 else
1838 {
1839 xscale_receive(target, buffer, 7);
1840 }
1841
1842 /* move data from buffer to register cache */
1843 for (j = 8; j <= 14; j++)
1844 {
1845 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
1846 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1847 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
1848 }
1849 }
1850 }
1851
1852 free(buffer);
1853
1854 return ERROR_OK;
1855 }
1856
1857 int xscale_restore_context(target_t *target)
1858 {
1859 armv4_5_common_t *armv4_5 = target->arch_info;
1860
1861 int i, j;
1862
1863 LOG_DEBUG("-");
1864
1865 if (target->state != TARGET_HALTED)
1866 {
1867 LOG_WARNING("target not halted");
1868 return ERROR_TARGET_NOT_HALTED;
1869 }
1870
1871 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1872 * we can't enter User mode on an XScale (unpredictable),
1873 * but User shares registers with SYS
1874 */
1875 for(i = 1; i < 7; i++)
1876 {
1877 int dirty = 0;
1878
1879 /* check if there are invalid registers in the current mode
1880 */
1881 for (j = 8; j <= 14; j++)
1882 {
1883 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
1884 dirty = 1;
1885 }
1886
1887 /* if not USR/SYS, check if the SPSR needs to be written */
1888 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1889 {
1890 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
1891 dirty = 1;
1892 }
1893
1894 if (dirty)
1895 {
1896 u32 tmp_cpsr;
1897
1898 /* send banked registers */
1899 xscale_send_u32(target, 0x1);
1900
1901 tmp_cpsr = 0x0;
1902 tmp_cpsr |= armv4_5_number_to_mode(i);
1903 tmp_cpsr |= 0xc0; /* I/F bits */
1904
1905 /* send CPSR for desired mode */
1906 xscale_send_u32(target, tmp_cpsr);
1907
1908 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1909 for (j = 8; j <= 14; j++)
1910 {
1911 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
1912 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1913 }
1914
1915 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1916 {
1917 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
1918 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1919 }
1920 }
1921 }
1922
1923 return ERROR_OK;
1924 }
1925
1926 int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
1927 {
1928 armv4_5_common_t *armv4_5 = target->arch_info;
1929 xscale_common_t *xscale = armv4_5->arch_info;
1930 u32 *buf32;
1931 int i;
1932 int retval;
1933
1934 LOG_DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
1935
1936 if (target->state != TARGET_HALTED)
1937 {
1938 LOG_WARNING("target not halted");
1939 return ERROR_TARGET_NOT_HALTED;
1940 }
1941
1942 /* sanitize arguments */
1943 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1944 return ERROR_INVALID_ARGUMENTS;
1945
1946 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1947 return ERROR_TARGET_UNALIGNED_ACCESS;
1948
1949 /* send memory read request (command 0x1n, n: access size) */
1950 if ((retval=xscale_send_u32(target, 0x10 | size))!=ERROR_OK)
1951 return retval;
1952
1953 /* send base address for read request */
1954 if ((retval=xscale_send_u32(target, address))!=ERROR_OK)
1955 return retval;
1956
1957 /* send number of requested data words */
1958 if ((retval=xscale_send_u32(target, count))!=ERROR_OK)
1959 return retval;
1960
1961 /* receive data from target (count times 32-bit words in host endianness) */
1962 buf32 = malloc(4 * count);
1963 if ((retval=xscale_receive(target, buf32, count))!=ERROR_OK)
1964 return retval;
1965
1966 /* extract data from host-endian buffer into byte stream */
1967 for (i = 0; i < count; i++)
1968 {
1969 switch (size)
1970 {
1971 case 4:
1972 target_buffer_set_u32(target, buffer, buf32[i]);
1973 buffer += 4;
1974 break;
1975 case 2:
1976 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1977 buffer += 2;
1978 break;
1979 case 1:
1980 *buffer++ = buf32[i] & 0xff;
1981 break;
1982 default:
1983 LOG_ERROR("should never get here");
1984 exit(-1);
1985 }
1986 }
1987
1988 free(buf32);
1989
1990 /* examine DCSR, to see if Sticky Abort (SA) got set */
1991 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
1992 return retval;
1993 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1994 {
1995 /* clear SA bit */
1996 if ((retval=xscale_send_u32(target, 0x60))!=ERROR_OK)
1997 return retval;
1998
1999 return ERROR_TARGET_DATA_ABORT;
2000 }
2001
2002 return ERROR_OK;
2003 }
2004
2005 int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
2006 {
2007 armv4_5_common_t *armv4_5 = target->arch_info;
2008 xscale_common_t *xscale = armv4_5->arch_info;
2009 int retval;
2010
2011 LOG_DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
2012
2013 if (target->state != TARGET_HALTED)
2014 {
2015 LOG_WARNING("target not halted");
2016 return ERROR_TARGET_NOT_HALTED;
2017 }
2018
2019 /* sanitize arguments */
2020 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
2021 return ERROR_INVALID_ARGUMENTS;
2022
2023 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
2024 return ERROR_TARGET_UNALIGNED_ACCESS;
2025
2026 /* send memory write request (command 0x2n, n: access size) */
2027 if ((retval=xscale_send_u32(target, 0x20 | size))!=ERROR_OK)
2028 return retval;
2029
2030 /* send base address for read request */
2031 if ((retval=xscale_send_u32(target, address))!=ERROR_OK)
2032 return retval;
2033
2034 /* send number of requested data words to be written*/
2035 if ((retval=xscale_send_u32(target, count))!=ERROR_OK)
2036 return retval;
2037
2038 /* extract data from host-endian buffer into byte stream */
2039 #if 0
2040 for (i = 0; i < count; i++)
2041 {
2042 switch (size)
2043 {
2044 case 4:
2045 value = target_buffer_get_u32(target, buffer);
2046 xscale_send_u32(target, value);
2047 buffer += 4;
2048 break;
2049 case 2:
2050 value = target_buffer_get_u16(target, buffer);
2051 xscale_send_u32(target, value);
2052 buffer += 2;
2053 break;
2054 case 1:
2055 value = *buffer;
2056 xscale_send_u32(target, value);
2057 buffer += 1;
2058 break;
2059 default:
2060 LOG_ERROR("should never get here");
2061 exit(-1);
2062 }
2063 }
2064 #endif
2065 if ((retval=xscale_send(target, buffer, count, size))!=ERROR_OK)
2066 return retval;
2067
2068 /* examine DCSR, to see if Sticky Abort (SA) got set */
2069 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
2070 return retval;
2071 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
2072 {
2073 /* clear SA bit */
2074 if ((retval=xscale_send_u32(target, 0x60))!=ERROR_OK)
2075 return retval;
2076
2077 return ERROR_TARGET_DATA_ABORT;
2078 }
2079
2080 return ERROR_OK;
2081 }
2082
2083 int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer)
2084 {
2085 return xscale_write_memory(target, address, 4, count, buffer);
2086 }
2087
2088 u32 xscale_get_ttb(target_t *target)
2089 {
2090 armv4_5_common_t *armv4_5 = target->arch_info;
2091 xscale_common_t *xscale = armv4_5->arch_info;
2092 u32 ttb;
2093
2094 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2095 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2096
2097 return ttb;
2098 }
2099
2100 void xscale_disable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
2101 {
2102 armv4_5_common_t *armv4_5 = target->arch_info;
2103 xscale_common_t *xscale = armv4_5->arch_info;
2104 u32 cp15_control;
2105
2106 /* read cp15 control register */
2107 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2108 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2109
2110 if (mmu)
2111 cp15_control &= ~0x1U;
2112
2113 if (d_u_cache)
2114 {
2115 /* clean DCache */
2116 xscale_send_u32(target, 0x50);
2117 xscale_send_u32(target, xscale->cache_clean_address);
2118
2119 /* invalidate DCache */
2120 xscale_send_u32(target, 0x51);
2121
2122 cp15_control &= ~0x4U;
2123 }
2124
2125 if (i_cache)
2126 {
2127 /* invalidate ICache */
2128 xscale_send_u32(target, 0x52);
2129 cp15_control &= ~0x1000U;
2130 }
2131
2132 /* write new cp15 control register */
2133 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2134
2135 /* execute cpwait to ensure outstanding operations complete */
2136 xscale_send_u32(target, 0x53);
2137 }
2138
2139 void xscale_enable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
2140 {
2141 armv4_5_common_t *armv4_5 = target->arch_info;
2142 xscale_common_t *xscale = armv4_5->arch_info;
2143 u32 cp15_control;
2144
2145 /* read cp15 control register */
2146 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2147 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2148
2149 if (mmu)
2150 cp15_control |= 0x1U;
2151
2152 if (d_u_cache)
2153 cp15_control |= 0x4U;
2154
2155 if (i_cache)
2156 cp15_control |= 0x1000U;
2157
2158 /* write new cp15 control register */
2159 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2160
2161 /* execute cpwait to ensure outstanding operations complete */
2162 xscale_send_u32(target, 0x53);
2163 }
2164
2165 int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2166 {
2167 int retval;
2168 armv4_5_common_t *armv4_5 = target->arch_info;
2169 xscale_common_t *xscale = armv4_5->arch_info;
2170
2171 if (target->state != TARGET_HALTED)
2172 {
2173 LOG_WARNING("target not halted");
2174 return ERROR_TARGET_NOT_HALTED;
2175 }
2176
2177 if (breakpoint->set)
2178 {
2179 LOG_WARNING("breakpoint already set");
2180 return ERROR_OK;
2181 }
2182
2183 if (breakpoint->type == BKPT_HARD)
2184 {
2185 u32 value = breakpoint->address | 1;
2186 if (!xscale->ibcr0_used)
2187 {
2188 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2189 xscale->ibcr0_used = 1;
2190 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2191 }
2192 else if (!xscale->ibcr1_used)
2193 {
2194 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2195 xscale->ibcr1_used = 1;
2196 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2197 }
2198 else
2199 {
2200 LOG_ERROR("BUG: no hardware comparator available");
2201 return ERROR_OK;
2202 }
2203 }
2204 else if (breakpoint->type == BKPT_SOFT)
2205 {
2206 if (breakpoint->length == 4)
2207 {
2208 /* keep the original instruction in target endianness */
2209 if((retval = target->type->read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2210 {
2211 return retval;
2212 }
2213 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2214 if((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2215 {
2216 return retval;
2217 }
2218 }
2219 else
2220 {
2221 /* keep the original instruction in target endianness */
2222 if((retval = target->type->read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2223 {
2224 return retval;
2225 }
2226 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2227 if((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2228 {
2229 return retval;
2230 }
2231 }
2232 breakpoint->set = 1;
2233 }
2234
2235 return ERROR_OK;
2236 }
2237
2238 int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2239 {
2240 armv4_5_common_t *armv4_5 = target->arch_info;
2241 xscale_common_t *xscale = armv4_5->arch_info;
2242
2243 if (target->state != TARGET_HALTED)
2244 {
2245 LOG_WARNING("target not halted");
2246 return ERROR_TARGET_NOT_HALTED;
2247 }
2248
2249 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2250 {
2251 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2252 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2253 }
2254
2255 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2256 {
2257 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2258 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2259 }
2260
2261 if (breakpoint->type == BKPT_HARD)
2262 {
2263 xscale->ibcr_available--;
2264 }
2265
2266 return ERROR_OK;
2267 }
2268
2269 int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2270 {
2271 int retval;
2272 armv4_5_common_t *armv4_5 = target->arch_info;
2273 xscale_common_t *xscale = armv4_5->arch_info;
2274
2275 if (target->state != TARGET_HALTED)
2276 {
2277 LOG_WARNING("target not halted");
2278 return ERROR_TARGET_NOT_HALTED;
2279 }
2280
2281 if (!breakpoint->set)
2282 {
2283 LOG_WARNING("breakpoint not set");
2284 return ERROR_OK;
2285 }
2286
2287 if (breakpoint->type == BKPT_HARD)
2288 {
2289 if (breakpoint->set == 1)
2290 {
2291 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2292 xscale->ibcr0_used = 0;
2293 }
2294 else if (breakpoint->set == 2)
2295 {
2296 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2297 xscale->ibcr1_used = 0;
2298 }
2299 breakpoint->set = 0;
2300 }
2301 else
2302 {
2303 /* restore original instruction (kept in target endianness) */
2304 if (breakpoint->length == 4)
2305 {
2306 if((retval = target->type->write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2307 {
2308 return retval;
2309 }
2310 }
2311 else
2312 {
2313 if((retval = target->type->write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2314 {
2315 return retval;
2316 }
2317 }
2318 breakpoint->set = 0;
2319 }
2320
2321 return ERROR_OK;
2322 }
2323
2324 int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2325 {
2326 armv4_5_common_t *armv4_5 = target->arch_info;
2327 xscale_common_t *xscale = armv4_5->arch_info;
2328
2329 if (target->state != TARGET_HALTED)
2330 {
2331 LOG_WARNING("target not halted");
2332 return ERROR_TARGET_NOT_HALTED;
2333 }
2334
2335 if (breakpoint->set)
2336 {
2337 xscale_unset_breakpoint(target, breakpoint);
2338 }
2339
2340 if (breakpoint->type == BKPT_HARD)
2341 xscale->ibcr_available++;
2342
2343 return ERROR_OK;
2344 }
2345
2346 int xscale_set_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2347 {
2348 armv4_5_common_t *armv4_5 = target->arch_info;
2349 xscale_common_t *xscale = armv4_5->arch_info;
2350 u8 enable=0;
2351 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2352 u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2353
2354 if (target->state != TARGET_HALTED)
2355 {
2356 LOG_WARNING("target not halted");
2357 return ERROR_TARGET_NOT_HALTED;
2358 }
2359
2360 xscale_get_reg(dbcon);
2361
2362 switch (watchpoint->rw)
2363 {
2364 case WPT_READ:
2365 enable = 0x3;
2366 break;
2367 case WPT_ACCESS:
2368 enable = 0x2;
2369 break;
2370 case WPT_WRITE:
2371 enable = 0x1;
2372 break;
2373 default:
2374 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2375 }
2376
2377 if (!xscale->dbr0_used)
2378 {
2379 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2380 dbcon_value |= enable;
2381 xscale_set_reg_u32(dbcon, dbcon_value);
2382 watchpoint->set = 1;
2383 xscale->dbr0_used = 1;
2384 }
2385 else if (!xscale->dbr1_used)
2386 {
2387 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2388 dbcon_value |= enable << 2;
2389 xscale_set_reg_u32(dbcon, dbcon_value);
2390 watchpoint->set = 2;
2391 xscale->dbr1_used = 1;
2392 }
2393 else
2394 {
2395 LOG_ERROR("BUG: no hardware comparator available");
2396 return ERROR_OK;
2397 }
2398
2399 return ERROR_OK;
2400 }
2401
2402 int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2403 {
2404 armv4_5_common_t *armv4_5 = target->arch_info;
2405 xscale_common_t *xscale = armv4_5->arch_info;
2406
2407 if (target->state != TARGET_HALTED)
2408 {
2409 LOG_WARNING("target not halted");
2410 return ERROR_TARGET_NOT_HALTED;
2411 }
2412
2413 if (xscale->dbr_available < 1)
2414 {
2415 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2416 }
2417
2418 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2419 {
2420 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2421 }
2422
2423 xscale->dbr_available--;
2424
2425 return ERROR_OK;
2426 }
2427
2428 int xscale_unset_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2429 {
2430 armv4_5_common_t *armv4_5 = target->arch_info;
2431 xscale_common_t *xscale = armv4_5->arch_info;
2432 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2433 u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2434
2435 if (target->state != TARGET_HALTED)
2436 {
2437 LOG_WARNING("target not halted");
2438 return ERROR_TARGET_NOT_HALTED;
2439 }
2440
2441 if (!watchpoint->set)
2442 {
2443 LOG_WARNING("breakpoint not set");
2444 return ERROR_OK;
2445 }
2446
2447 if (watchpoint->set == 1)
2448 {
2449 dbcon_value &= ~0x3;
2450 xscale_set_reg_u32(dbcon, dbcon_value);
2451 xscale->dbr0_used = 0;
2452 }
2453 else if (watchpoint->set == 2)
2454 {
2455 dbcon_value &= ~0xc;
2456 xscale_set_reg_u32(dbcon, dbcon_value);
2457 xscale->dbr1_used = 0;
2458 }
2459 watchpoint->set = 0;
2460
2461 return ERROR_OK;
2462 }
2463
2464 int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2465 {
2466 armv4_5_common_t *armv4_5 = target->arch_info;
2467 xscale_common_t *xscale = armv4_5->arch_info;
2468
2469 if (target->state != TARGET_HALTED)
2470 {
2471 LOG_WARNING("target not halted");
2472 return ERROR_TARGET_NOT_HALTED;
2473 }
2474
2475 if (watchpoint->set)
2476 {
2477 xscale_unset_watchpoint(target, watchpoint);
2478 }
2479
2480 xscale->dbr_available++;
2481
2482 return ERROR_OK;
2483 }
2484
2485 void xscale_enable_watchpoints(struct target_s *target)
2486 {
2487 watchpoint_t *watchpoint = target->watchpoints;
2488
2489 while (watchpoint)
2490 {
2491 if (watchpoint->set == 0)
2492 xscale_set_watchpoint(target, watchpoint);
2493 watchpoint = watchpoint->next;
2494 }
2495 }
2496
2497 void xscale_enable_breakpoints(struct target_s *target)
2498 {
2499 breakpoint_t *breakpoint = target->breakpoints;
2500
2501 /* set any pending breakpoints */
2502 while (breakpoint)
2503 {
2504 if (breakpoint->set == 0)
2505 xscale_set_breakpoint(target, breakpoint);
2506 breakpoint = breakpoint->next;
2507 }
2508 }
2509
2510 int xscale_get_reg(reg_t *reg)
2511 {
2512 xscale_reg_t *arch_info = reg->arch_info;
2513 target_t *target = arch_info->target;
2514 armv4_5_common_t *armv4_5 = target->arch_info;
2515 xscale_common_t *xscale = armv4_5->arch_info;
2516
2517 /* DCSR, TX and RX are accessible via JTAG */
2518 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2519 {
2520 return xscale_read_dcsr(arch_info->target);
2521 }
2522 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2523 {
2524 /* 1 = consume register content */
2525 return xscale_read_tx(arch_info->target, 1);
2526 }
2527 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2528 {
2529 /* can't read from RX register (host -> debug handler) */
2530 return ERROR_OK;
2531 }
2532 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2533 {
2534 /* can't (explicitly) read from TXRXCTRL register */
2535 return ERROR_OK;
2536 }
2537 else /* Other DBG registers have to be transfered by the debug handler */
2538 {
2539 /* send CP read request (command 0x40) */
2540 xscale_send_u32(target, 0x40);
2541
2542 /* send CP register number */
2543 xscale_send_u32(target, arch_info->dbg_handler_number);
2544
2545 /* read register value */
2546 xscale_read_tx(target, 1);
2547 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2548
2549 reg->dirty = 0;
2550 reg->valid = 1;
2551 }
2552
2553 return ERROR_OK;
2554 }
2555
2556 int xscale_set_reg(reg_t *reg, u8* buf)
2557 {
2558 xscale_reg_t *arch_info = reg->arch_info;
2559 target_t *target = arch_info->target;
2560 armv4_5_common_t *armv4_5 = target->arch_info;
2561 xscale_common_t *xscale = armv4_5->arch_info;
2562 u32 value = buf_get_u32(buf, 0, 32);
2563
2564 /* DCSR, TX and RX are accessible via JTAG */
2565 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2566 {
2567 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2568 return xscale_write_dcsr(arch_info->target, -1, -1);
2569 }
2570 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2571 {
2572 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2573 return xscale_write_rx(arch_info->target);
2574 }
2575 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2576 {
2577 /* can't write to TX register (debug-handler -> host) */
2578 return ERROR_OK;
2579 }
2580 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2581 {
2582 /* can't (explicitly) write to TXRXCTRL register */
2583 return ERROR_OK;
2584 }
2585 else /* Other DBG registers have to be transfered by the debug handler */
2586 {
2587 /* send CP write request (command 0x41) */
2588 xscale_send_u32(target, 0x41);
2589
2590 /* send CP register number */
2591 xscale_send_u32(target, arch_info->dbg_handler_number);
2592
2593 /* send CP register value */
2594 xscale_send_u32(target, value);
2595 buf_set_u32(reg->value, 0, 32, value);
2596 }
2597
2598 return ERROR_OK;
2599 }
2600
2601 /* convenience wrapper to access XScale specific registers */
2602 int xscale_set_reg_u32(reg_t *reg, u32 value)
2603 {
2604 u8 buf[4];
2605
2606 buf_set_u32(buf, 0, 32, value);
2607
2608 return xscale_set_reg(reg, buf);
2609 }
2610
2611 int xscale_write_dcsr_sw(target_t *target, u32 value)
2612 {
2613 /* get pointers to arch-specific information */
2614 armv4_5_common_t *armv4_5 = target->arch_info;
2615 xscale_common_t *xscale = armv4_5->arch_info;
2616 reg_t *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2617 xscale_reg_t *dcsr_arch_info = dcsr->arch_info;
2618
2619 /* send CP write request (command 0x41) */
2620 xscale_send_u32(target, 0x41);
2621
2622 /* send CP register number */
2623 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2624
2625 /* send CP register value */
2626 xscale_send_u32(target, value);
2627 buf_set_u32(dcsr->value, 0, 32, value);
2628
2629 return ERROR_OK;
2630 }
2631
2632 int xscale_read_trace(target_t *target)
2633 {
2634 /* get pointers to arch-specific information */
2635 armv4_5_common_t *armv4_5 = target->arch_info;
2636 xscale_common_t *xscale = armv4_5->arch_info;
2637 xscale_trace_data_t **trace_data_p;
2638
2639 /* 258 words from debug handler
2640 * 256 trace buffer entries
2641 * 2 checkpoint addresses
2642 */
2643 u32 trace_buffer[258];
2644 int is_address[256];
2645 int i, j;
2646
2647 if (target->state != TARGET_HALTED)
2648 {
2649 LOG_WARNING("target must be stopped to read trace data");
2650 return ERROR_TARGET_NOT_HALTED;
2651 }
2652
2653 /* send read trace buffer command (command 0x61) */
2654 xscale_send_u32(target, 0x61);
2655
2656 /* receive trace buffer content */
2657 xscale_receive(target, trace_buffer, 258);
2658
2659 /* parse buffer backwards to identify address entries */
2660 for (i = 255; i >= 0; i--)
2661 {
2662 is_address[i] = 0;
2663 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2664 ((trace_buffer[i] & 0xf0) == 0xd0))
2665 {
2666 if (i >= 3)
2667 is_address[--i] = 1;
2668 if (i >= 2)
2669 is_address[--i] = 1;
2670 if (i >= 1)
2671 is_address[--i] = 1;
2672 if (i >= 0)
2673 is_address[--i] = 1;
2674 }
2675 }
2676
2677
2678 /* search first non-zero entry */
2679 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2680 ;
2681
2682 if (j == 256)
2683 {
2684 LOG_DEBUG("no trace data collected");
2685 return ERROR_XSCALE_NO_TRACE_DATA;
2686 }
2687
2688 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2689 ;
2690
2691 *trace_data_p = malloc(sizeof(xscale_trace_data_t));
2692 (*trace_data_p)->next = NULL;
2693 (*trace_data_p)->chkpt0 = trace_buffer[256];
2694 (*trace_data_p)->chkpt1 = trace_buffer[257];
2695 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2696 (*trace_data_p)->entries = malloc(sizeof(xscale_trace_entry_t) * (256 - j));
2697 (*trace_data_p)->depth = 256 - j;
2698
2699 for (i = j; i < 256; i++)
2700 {
2701 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2702 if (is_address[i])
2703 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2704 else
2705 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2706 }
2707
2708 return ERROR_OK;
2709 }
2710
2711 int xscale_read_instruction(target_t *target, arm_instruction_t *instruction)
2712 {
2713 /* get pointers to arch-specific information */
2714 armv4_5_common_t *armv4_5 = target->arch_info;
2715 xscale_common_t *xscale = armv4_5->arch_info;
2716 int i;
2717 int section = -1;
2718 u32 size_read;
2719 u32 opcode;
2720 int retval;
2721
2722 if (!xscale->trace.image)
2723 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2724
2725 /* search for the section the current instruction belongs to */
2726 for (i = 0; i < xscale->trace.image->num_sections; i++)
2727 {
2728 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2729 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2730 {
2731 section = i;
2732 break;
2733 }
2734 }
2735
2736 if (section == -1)
2737 {
2738 /* current instruction couldn't be found in the image */
2739 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2740 }
2741
2742 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2743 {
2744 u8 buf[4];
2745 if ((retval = image_read_section(xscale->trace.image, section,
2746 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2747 4, buf, &size_read)) != ERROR_OK)
2748 {
2749 LOG_ERROR("error while reading instruction: %i", retval);
2750 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2751 }
2752 opcode = target_buffer_get_u32(target, buf);
2753 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2754 }
2755 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2756 {
2757 u8 buf[2];
2758 if ((retval = image_read_section(xscale->trace.image, section,
2759 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2760 2, buf, &size_read)) != ERROR_OK)
2761 {
2762 LOG_ERROR("error while reading instruction: %i", retval);
2763 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2764 }
2765 opcode = target_buffer_get_u16(target, buf);
2766 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2767 }
2768 else
2769 {
2770 LOG_ERROR("BUG: unknown core state encountered");
2771 exit(-1);
2772 }
2773
2774 return ERROR_OK;
2775 }
2776
2777 int xscale_branch_address(xscale_trace_data_t *trace_data, int i, u32 *target)
2778 {
2779 /* if there are less than four entries prior to the indirect branch message
2780 * we can't extract the address */
2781 if (i < 4)
2782 {
2783 return -1;
2784 }
2785
2786 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2787 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2788
2789 return 0;
2790 }
2791
2792 int xscale_analyze_trace(target_t *target, command_context_t *cmd_ctx)
2793 {
2794 /* get pointers to arch-specific information */
2795 armv4_5_common_t *armv4_5 = target->arch_info;
2796 xscale_common_t *xscale = armv4_5->arch_info;
2797 int next_pc_ok = 0;
2798 u32 next_pc = 0x0;
2799 xscale_trace_data_t *trace_data = xscale->trace.data;
2800 int retval;
2801
2802 while (trace_data)
2803 {
2804 int i, chkpt;
2805 int rollover;
2806 int branch;
2807 int exception;
2808 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2809
2810 chkpt = 0;
2811 rollover = 0;
2812
2813 for (i = 0; i < trace_data->depth; i++)
2814 {
2815 next_pc_ok = 0;
2816 branch = 0;
2817 exception = 0;
2818
2819 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2820 continue;
2821
2822 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2823 {
2824 case 0: /* Exceptions */
2825 case 1:
2826 case 2:
2827 case 3:
2828 case 4:
2829 case 5:
2830 case 6:
2831 case 7:
2832 exception = (trace_data->entries[i].data & 0x70) >> 4;
2833 next_pc_ok = 1;
2834 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2835 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2836 break;
2837 case 8: /* Direct Branch */
2838 branch = 1;
2839 break;
2840 case 9: /* Indirect Branch */
2841 branch = 1;
2842 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2843 {
2844 next_pc_ok = 1;
2845 }
2846 break;
2847 case 13: /* Checkpointed Indirect Branch */
2848 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2849 {
2850 next_pc_ok = 1;
2851 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2852 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2853 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2854 }
2855 /* explicit fall-through */
2856 case 12: /* Checkpointed Direct Branch */
2857 branch = 1;
2858 if (chkpt == 0)
2859 {
2860 next_pc_ok = 1;
2861 next_pc = trace_data->chkpt0;
2862 chkpt++;
2863 }
2864 else if (chkpt == 1)
2865 {
2866 next_pc_ok = 1;
2867 next_pc = trace_data->chkpt0;
2868 chkpt++;
2869 }
2870 else
2871 {
2872 LOG_WARNING("more than two checkpointed branches encountered");
2873 }
2874 break;
2875 case 15: /* Roll-over */
2876 rollover++;
2877 continue;
2878 default: /* Reserved */
2879 command_print(cmd_ctx, "--- reserved trace message ---");
2880 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2881 return ERROR_OK;
2882 }
2883
2884 if (xscale->trace.pc_ok)
2885 {
2886 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2887 arm_instruction_t instruction;
2888
2889 if ((exception == 6) || (exception == 7))
2890 {
2891 /* IRQ or FIQ exception, no instruction executed */
2892 executed -= 1;
2893 }
2894
2895 while (executed-- >= 0)
2896 {
2897 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2898 {
2899 /* can't continue tracing with no image available */
2900 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2901 {
2902 return retval;
2903 }
2904 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2905 {
2906 /* TODO: handle incomplete images */
2907 }
2908 }
2909
2910 /* a precise abort on a load to the PC is included in the incremental
2911 * word count, other instructions causing data aborts are not included
2912 */
2913 if ((executed == 0) && (exception == 4)
2914 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2915 {
2916 if ((instruction.type == ARM_LDM)
2917 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2918 {
2919 executed--;
2920 }
2921 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2922 && (instruction.info.load_store.Rd != 15))
2923 {
2924 executed--;
2925 }
2926 }
2927
2928 /* only the last instruction executed
2929 * (the one that caused the control flow change)
2930 * could be a taken branch
2931 */
2932 if (((executed == -1) && (branch == 1)) &&
2933 (((instruction.type == ARM_B) ||
2934 (instruction.type == ARM_BL) ||
2935 (instruction.type == ARM_BLX)) &&
2936 (instruction.info.b_bl_bx_blx.target_address != -1)))
2937 {
2938 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2939 }
2940 else
2941 {
2942 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2943 }
2944 command_print(cmd_ctx, "%s", instruction.text);
2945 }
2946
2947 rollover = 0;
2948 }
2949
2950 if (next_pc_ok)
2951 {
2952 xscale->trace.current_pc = next_pc;
2953 xscale->trace.pc_ok = 1;
2954 }
2955 }
2956
2957 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2958 {
2959 arm_instruction_t instruction;
2960 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2961 {
2962 /* can't continue tracing with no image available */
2963 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2964 {
2965 return retval;
2966 }
2967 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2968 {
2969 /* TODO: handle incomplete images */
2970 }
2971 }
2972 command_print(cmd_ctx, "%s", instruction.text);
2973 }
2974
2975 trace_data = trace_data->next;
2976 }
2977
2978 return ERROR_OK;
2979 }
2980
2981 void xscale_build_reg_cache(target_t *target)
2982 {
2983 /* get pointers to arch-specific information */
2984 armv4_5_common_t *armv4_5 = target->arch_info;
2985 xscale_common_t *xscale = armv4_5->arch_info;
2986
2987 reg_cache_t **cache_p = register_get_last_cache_p(&target->reg_cache);
2988 xscale_reg_t *arch_info = malloc(sizeof(xscale_reg_arch_info));
2989 int i;
2990 int num_regs = sizeof(xscale_reg_arch_info) / sizeof(xscale_reg_t);
2991
2992 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
2993 armv4_5->core_cache = (*cache_p);
2994
2995 /* register a register arch-type for XScale dbg registers only once */
2996 if (xscale_reg_arch_type == -1)
2997 xscale_reg_arch_type = register_reg_arch_type(xscale_get_reg, xscale_set_reg);
2998
2999 (*cache_p)->next = malloc(sizeof(reg_cache_t));
3000 cache_p = &(*cache_p)->next;
3001
3002 /* fill in values for the xscale reg cache */
3003 (*cache_p)->name = "XScale registers";
3004 (*cache_p)->next = NULL;
3005 (*cache_p)->reg_list = malloc(num_regs * sizeof(reg_t));
3006 (*cache_p)->num_regs = num_regs;
3007
3008 for (i = 0; i < num_regs; i++)
3009 {
3010 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
3011 (*cache_p)->reg_list[i].value = calloc(4, 1);
3012 (*cache_p)->reg_list[i].dirty = 0;
3013 (*cache_p)->reg_list[i].valid = 0;
3014 (*cache_p)->reg_list[i].size = 32;
3015 (*cache_p)->reg_list[i].bitfield_desc = NULL;
3016 (*cache_p)->reg_list[i].num_bitfields = 0;
3017 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
3018 (*cache_p)->reg_list[i].arch_type = xscale_reg_arch_type;
3019 arch_info[i] = xscale_reg_arch_info[i];
3020 arch_info[i].target = target;
3021 }
3022
3023 xscale->reg_cache = (*cache_p);
3024 }
3025
3026 int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target)
3027 {
3028 return ERROR_OK;
3029 }
3030
3031 int xscale_quit(void)
3032 {
3033 return ERROR_OK;
3034 }
3035
3036 int xscale_init_arch_info(target_t *target, xscale_common_t *xscale, jtag_tap_t *tap, const char *variant)
3037 {
3038 armv4_5_common_t *armv4_5;
3039 u32 high_reset_branch, low_reset_branch;
3040 int i;
3041
3042 armv4_5 = &xscale->armv4_5_common;
3043
3044 /* store architecture specfic data (none so far) */
3045 xscale->arch_info = NULL;
3046 xscale->common_magic = XSCALE_COMMON_MAGIC;
3047
3048 /* remember the variant (PXA25x, PXA27x, IXP42x, ...) */
3049 xscale->variant = strdup(variant);
3050
3051 /* prepare JTAG information for the new target */
3052 xscale->jtag_info.tap = tap;
3053
3054 xscale->jtag_info.dbgrx = 0x02;
3055 xscale->jtag_info.dbgtx = 0x10;
3056 xscale->jtag_info.dcsr = 0x09;
3057 xscale->jtag_info.ldic = 0x07;
3058
3059 if ((strcmp(xscale->variant, "pxa250") == 0) ||
3060 (strcmp(xscale->variant, "pxa255") == 0) ||
3061 (strcmp(xscale->variant, "pxa26x") == 0))
3062 {
3063 xscale->jtag_info.ir_length = 5;
3064 }
3065 else if ((strcmp(xscale->variant, "pxa27x") == 0) ||
3066 (strcmp(xscale->variant, "ixp42x") == 0) ||
3067 (strcmp(xscale->variant, "ixp45x") == 0) ||
3068 (strcmp(xscale->variant, "ixp46x") == 0))
3069 {
3070 xscale->jtag_info.ir_length = 7;
3071 }
3072
3073 /* the debug handler isn't installed (and thus not running) at this time */
3074 xscale->handler_installed = 0;
3075 xscale->handler_running = 0;
3076 xscale->handler_address = 0xfe000800;
3077
3078 /* clear the vectors we keep locally for reference */
3079 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
3080 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
3081
3082 /* no user-specified vectors have been configured yet */
3083 xscale->static_low_vectors_set = 0x0;
3084 xscale->static_high_vectors_set = 0x0;
3085
3086 /* calculate branches to debug handler */
3087 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
3088 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
3089
3090 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
3091 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
3092
3093 for (i = 1; i <= 7; i++)
3094 {
3095 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3096 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3097 }
3098
3099 /* 64kB aligned region used for DCache cleaning */
3100 xscale->cache_clean_address = 0xfffe0000;
3101
3102 xscale->hold_rst = 0;
3103 xscale->external_debug_break = 0;
3104
3105 xscale->ibcr_available = 2;
3106 xscale->ibcr0_used = 0;
3107 xscale->ibcr1_used = 0;
3108
3109 xscale->dbr_available = 2;
3110 xscale->dbr0_used = 0;
3111 xscale->dbr1_used = 0;
3112
3113 xscale->arm_bkpt = ARMV5_BKPT(0x0);
3114 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
3115
3116 xscale->vector_catch = 0x1;
3117
3118 xscale->trace.capture_status = TRACE_IDLE;
3119 xscale->trace.data = NULL;
3120 xscale->trace.image = NULL;
3121 xscale->trace.buffer_enabled = 0;
3122 xscale->trace.buffer_fill = 0;
3123
3124 /* prepare ARMv4/5 specific information */
3125 armv4_5->arch_info = xscale;
3126 armv4_5->read_core_reg = xscale_read_core_reg;
3127 armv4_5->write_core_reg = xscale_write_core_reg;
3128 armv4_5->full_context = xscale_full_context;
3129
3130 armv4_5_init_arch_info(target, armv4_5);
3131
3132 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3133 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3134 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3135 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3136 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3137 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3138 xscale->armv4_5_mmu.has_tiny_pages = 1;
3139 xscale->armv4_5_mmu.mmu_enabled = 0;
3140
3141 return ERROR_OK;
3142 }
3143
3144 /* target xscale <endianess> <startup_mode> <chain_pos> <variant> */
3145 int xscale_target_create(struct target_s *target, Jim_Interp *interp)
3146 {
3147 xscale_common_t *xscale = calloc(1,sizeof(xscale_common_t));
3148
3149 xscale_init_arch_info(target, xscale, target->tap, target->variant);
3150 xscale_build_reg_cache(target);
3151
3152 return ERROR_OK;
3153 }
3154
3155 int xscale_handle_debug_handler_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3156 {
3157 target_t *target = NULL;
3158 armv4_5_common_t *armv4_5;
3159 xscale_common_t *xscale;
3160
3161 u32 handler_address;
3162
3163 if (argc < 2)
3164 {
3165 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3166 return ERROR_OK;
3167 }
3168
3169 if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
3170 {
3171 LOG_ERROR("no target '%s' configured", args[0]);
3172 return ERROR_FAIL;
3173 }
3174
3175 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3176 {
3177 return ERROR_FAIL;
3178 }
3179
3180 handler_address = strtoul(args[1], NULL, 0);
3181
3182 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3183 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3184 {
3185 xscale->handler_address = handler_address;
3186 }
3187 else
3188 {
3189 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3190 return ERROR_FAIL;
3191 }
3192
3193 return ERROR_OK;
3194 }
3195
3196 int xscale_handle_cache_clean_address_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3197 {
3198 target_t *target = NULL;
3199 armv4_5_common_t *armv4_5;
3200 xscale_common_t *xscale;
3201
3202 u32 cache_clean_address;
3203
3204 if (argc < 2)
3205 {
3206 return ERROR_COMMAND_SYNTAX_ERROR;
3207 }
3208
3209 if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
3210 {
3211 LOG_ERROR("no target '%s' configured", args[0]);
3212 return ERROR_FAIL;
3213 }
3214
3215 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3216 {
3217 return ERROR_FAIL;
3218 }
3219
3220 cache_clean_address = strtoul(args[1], NULL, 0);
3221
3222 if (cache_clean_address & 0xffff)
3223 {
3224 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3225 }
3226 else
3227 {
3228 xscale->cache_clean_address = cache_clean_address;
3229 }
3230
3231 return ERROR_OK;
3232 }
3233
3234 int xscale_handle_cache_info_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3235 {
3236 target_t *target = get_current_target(cmd_ctx);
3237 armv4_5_common_t *armv4_5;
3238 xscale_common_t *xscale;
3239
3240 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3241 {
3242 return ERROR_OK;
3243 }
3244
3245 return armv4_5_handle_cache_info_command(cmd_ctx, &xscale->armv4_5_mmu.armv4_5_cache);
3246 }
3247
3248 static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical)
3249 {
3250 armv4_5_common_t *armv4_5;
3251 xscale_common_t *xscale;
3252 int retval;
3253 int type;
3254 u32 cb;
3255 int domain;
3256 u32 ap;
3257
3258 if ((retval = xscale_get_arch_pointers(target, &armv4_5, &xscale)) != ERROR_OK)
3259 {
3260 return retval;
3261 }
3262 u32 ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3263 if (type == -1)
3264 {
3265 return ret;
3266 }
3267 *physical = ret;
3268 return ERROR_OK;
3269 }
3270
3271 static int xscale_mmu(struct target_s *target, int *enabled)
3272 {
3273 armv4_5_common_t *armv4_5 = target->arch_info;
3274 xscale_common_t *xscale = armv4_5->arch_info;
3275
3276 if (target->state != TARGET_HALTED)
3277 {
3278 LOG_ERROR("Target not halted");
3279 return ERROR_TARGET_INVALID;
3280 }
3281 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3282 return ERROR_OK;
3283 }
3284
3285 int xscale_handle_mmu_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3286 {
3287 target_t *target = get_current_target(cmd_ctx);
3288 armv4_5_common_t *armv4_5;
3289 xscale_common_t *xscale;
3290
3291 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3292 {
3293 return ERROR_OK;
3294 }
3295
3296 if (target->state != TARGET_HALTED)
3297 {
3298 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3299 return ERROR_OK;
3300 }
3301
3302 if (argc >= 1)
3303 {
3304 if (strcmp("enable", args[0]) == 0)
3305 {
3306 xscale_enable_mmu_caches(target, 1, 0, 0);
3307 xscale->armv4_5_mmu.mmu_enabled = 1;
3308 }
3309 else if (strcmp("disable", args[0]) == 0)
3310 {
3311 xscale_disable_mmu_caches(target, 1, 0, 0);
3312 xscale->armv4_5_mmu.mmu_enabled = 0;
3313 }
3314 }
3315
3316 command_print(cmd_ctx, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3317
3318 return ERROR_OK;
3319 }
3320
3321 int xscale_handle_idcache_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3322 {
3323 target_t *target = get_current_target(cmd_ctx);
3324 armv4_5_common_t *armv4_5;
3325 xscale_common_t *xscale;
3326 int icache = 0, dcache = 0;
3327
3328 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3329 {
3330 return ERROR_OK;
3331 }
3332
3333 if (target->state != TARGET_HALTED)
3334 {
3335 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3336 return ERROR_OK;
3337 }
3338
3339 if (strcmp(cmd, "icache") == 0)
3340 icache = 1;
3341 else if (strcmp(cmd, "dcache") == 0)
3342 dcache = 1;
3343
3344 if (argc >= 1)
3345 {
3346 if (strcmp("enable", args[0]) == 0)
3347 {
3348 xscale_enable_mmu_caches(target, 0, dcache, icache);
3349
3350 if (icache)
3351 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 1;
3352 else if (dcache)
3353 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 1;
3354 }
3355 else if (strcmp("disable", args[0]) == 0)
3356 {
3357 xscale_disable_mmu_caches(target, 0, dcache, icache);
3358
3359 if (icache)
3360 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 0;
3361 else if (dcache)
3362 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 0;
3363 }
3364 }
3365
3366 if (icache)
3367 command_print(cmd_ctx, "icache %s", (xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled) ? "enabled" : "disabled");
3368
3369 if (dcache)
3370 command_print(cmd_ctx, "dcache %s", (xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled) ? "enabled" : "disabled");
3371
3372 return ERROR_OK;
3373 }
3374
3375 int xscale_handle_vector_catch_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3376 {
3377 target_t *target = get_current_target(cmd_ctx);
3378 armv4_5_common_t *armv4_5;
3379 xscale_common_t *xscale;
3380
3381 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3382 {
3383 return ERROR_OK;
3384 }
3385
3386 if (argc < 1)
3387 {
3388 command_print(cmd_ctx, "usage: xscale vector_catch [mask]");
3389 }
3390 else
3391 {
3392 xscale->vector_catch = strtoul(args[0], NULL, 0);
3393 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3394 xscale_write_dcsr(target, -1, -1);
3395 }
3396
3397 command_print(cmd_ctx, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3398
3399 return ERROR_OK;
3400 }
3401
3402
3403 int xscale_handle_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3404 {
3405 target_t *target = get_current_target(cmd_ctx);
3406 armv4_5_common_t *armv4_5;
3407 xscale_common_t *xscale;
3408 u32 dcsr_value;
3409
3410 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3411 {
3412 return ERROR_OK;
3413 }
3414
3415 if (target->state != TARGET_HALTED)
3416 {
3417 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3418 return ERROR_OK;
3419 }
3420
3421 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
3422 {
3423 xscale_trace_data_t *td, *next_td;
3424 xscale->trace.buffer_enabled = 1;
3425
3426 /* free old trace data */
3427 td = xscale->trace.data;
3428 while (td)
3429 {
3430 next_td = td->next;
3431
3432 if (td->entries)
3433 free(td->entries);
3434 free(td);
3435 td = next_td;
3436 }
3437 xscale->trace.data = NULL;
3438 }
3439 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
3440 {
3441 xscale->trace.buffer_enabled = 0;
3442 }
3443
3444 if ((argc >= 2) && (strcmp("fill", args[1]) == 0))
3445 {
3446 if (argc >= 3)
3447 xscale->trace.buffer_fill = strtoul(args[2], NULL, 0);
3448 else
3449 xscale->trace.buffer_fill = 1;
3450 }
3451 else if ((argc >= 2) && (strcmp("wrap", args[1]) == 0))
3452 {
3453 xscale->trace.buffer_fill = -1;
3454 }
3455
3456 if (xscale->trace.buffer_enabled)
3457 {
3458 /* if we enable the trace buffer in fill-once
3459 * mode we know the address of the first instruction */
3460 xscale->trace.pc_ok = 1;
3461 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3462 }
3463 else
3464 {
3465 /* otherwise the address is unknown, and we have no known good PC */
3466 xscale->trace.pc_ok = 0;
3467 }
3468
3469 command_print(cmd_ctx, "trace buffer %s (%s)",
3470 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3471 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3472
3473 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3474 if (xscale->trace.buffer_fill >= 0)
3475 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3476 else
3477 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3478
3479 return ERROR_OK;
3480 }
3481
3482 int xscale_handle_trace_image_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3483 {
3484 target_t *target;
3485 armv4_5_common_t *armv4_5;
3486 xscale_common_t *xscale;
3487
3488 if (argc < 1)
3489 {
3490 command_print(cmd_ctx, "usage: xscale trace_image <file> [base address] [type]");
3491 return ERROR_OK;
3492 }
3493
3494 target = get_current_target(cmd_ctx);
3495
3496 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3497 {
3498 return ERROR_OK;
3499 }
3500
3501 if (xscale->trace.image)
3502 {
3503 image_close(xscale->trace.image);
3504 free(xscale->trace.image);
3505 command_print(cmd_ctx, "previously loaded image found and closed");
3506 }
3507
3508 xscale->trace.image = malloc(sizeof(image_t));
3509 xscale->trace.image->base_address_set = 0;
3510 xscale->trace.image->start_address_set = 0;
3511
3512 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3513 if (argc >= 2)
3514 {
3515 xscale->trace.image->base_address_set = 1;
3516 xscale->trace.image->base_address = strtoul(args[1], NULL, 0);
3517 }
3518 else
3519 {
3520 xscale->trace.image->base_address_set = 0;
3521 }
3522
3523 if (image_open(xscale->trace.image, args[0], (argc >= 3) ? args[2] : NULL) != ERROR_OK)
3524 {
3525 free(xscale->trace.image);
3526 xscale->trace.image = NULL;
3527 return ERROR_OK;
3528 }
3529
3530 return ERROR_OK;
3531 }
3532
3533 int xscale_handle_dump_trace_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3534 {
3535 target_t *target = get_current_target(cmd_ctx);
3536 armv4_5_common_t *armv4_5;
3537 xscale_common_t *xscale;
3538 xscale_trace_data_t *trace_data;
3539 fileio_t file;
3540
3541 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3542 {
3543 return ERROR_OK;
3544 }
3545
3546 if (target->state != TARGET_HALTED)
3547 {
3548 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3549 return ERROR_OK;
3550 }
3551
3552 if (argc < 1)
3553 {
3554 command_print(cmd_ctx, "usage: xscale dump_trace <file>");
3555 return ERROR_OK;
3556 }
3557
3558 trace_data = xscale->trace.data;
3559
3560 if (!trace_data)