Duane Ellis: fix warnings
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * This program is free software; you can redistribute it and/or modify *
9 * it under the terms of the GNU General Public License as published by *
10 * the Free Software Foundation; either version 2 of the License, or *
11 * (at your option) any later version. *
12 * *
13 * This program is distributed in the hope that it will be useful, *
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
16 * GNU General Public License for more details. *
17 * *
18 * You should have received a copy of the GNU General Public License *
19 * along with this program; if not, write to the *
20 * Free Software Foundation, Inc., *
21 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
22 ***************************************************************************/
23 #ifdef HAVE_CONFIG_H
24 #include "config.h"
25 #endif
26
27 #include "replacements.h"
28
29 #include "xscale.h"
30
31 #include "arm7_9_common.h"
32 #include "register.h"
33 #include "target.h"
34 #include "armv4_5.h"
35 #include "arm_simulator.h"
36 #include "arm_disassembler.h"
37 #include "log.h"
38 #include "jtag.h"
39 #include "binarybuffer.h"
40 #include "time_support.h"
41 #include "breakpoints.h"
42 #include "fileio.h"
43
44 #include <stdlib.h>
45 #include <string.h>
46
47 #include <sys/types.h>
48 #include <unistd.h>
49 #include <errno.h>
50
51
52 /* cli handling */
53 int xscale_register_commands(struct command_context_s *cmd_ctx);
54
55 /* forward declarations */
56 int xscale_target_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc, struct target_s *target);
57 int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target);
58 int xscale_quit(void);
59
60 int xscale_arch_state(struct target_s *target);
61 int xscale_poll(target_t *target);
62 int xscale_halt(target_t *target);
63 int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution);
64 int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints);
65 int xscale_debug_entry(target_t *target);
66 int xscale_restore_context(target_t *target);
67
68 int xscale_assert_reset(target_t *target);
69 int xscale_deassert_reset(target_t *target);
70 int xscale_soft_reset_halt(struct target_s *target);
71
72 int xscale_set_reg_u32(reg_t *reg, u32 value);
73
74 int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode);
75 int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value);
76
77 int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
78 int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
79 int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer);
80
81 int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
82 int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
83 int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
84 int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
85 int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
86 int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
87 void xscale_enable_watchpoints(struct target_s *target);
88 void xscale_enable_breakpoints(struct target_s *target);
89 static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical);
90 static int xscale_mmu(struct target_s *target, int *enabled);
91
92 int xscale_read_trace(target_t *target);
93
94 target_type_t xscale_target =
95 {
96 .name = "xscale",
97
98 .poll = xscale_poll,
99 .arch_state = xscale_arch_state,
100
101 .target_request_data = NULL,
102
103 .halt = xscale_halt,
104 .resume = xscale_resume,
105 .step = xscale_step,
106
107 .assert_reset = xscale_assert_reset,
108 .deassert_reset = xscale_deassert_reset,
109 .soft_reset_halt = xscale_soft_reset_halt,
110
111 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
112
113 .read_memory = xscale_read_memory,
114 .write_memory = xscale_write_memory,
115 .bulk_write_memory = xscale_bulk_write_memory,
116 .checksum_memory = arm7_9_checksum_memory,
117 .blank_check_memory = arm7_9_blank_check_memory,
118
119 .run_algorithm = armv4_5_run_algorithm,
120
121 .add_breakpoint = xscale_add_breakpoint,
122 .remove_breakpoint = xscale_remove_breakpoint,
123 .add_watchpoint = xscale_add_watchpoint,
124 .remove_watchpoint = xscale_remove_watchpoint,
125
126 .register_commands = xscale_register_commands,
127 .target_command = xscale_target_command,
128 .init_target = xscale_init_target,
129 .quit = xscale_quit,
130
131 .virt2phys = xscale_virt2phys,
132 .mmu = xscale_mmu
133 };
134
135 char* xscale_reg_list[] =
136 {
137 "XSCALE_MAINID", /* 0 */
138 "XSCALE_CACHETYPE",
139 "XSCALE_CTRL",
140 "XSCALE_AUXCTRL",
141 "XSCALE_TTB",
142 "XSCALE_DAC",
143 "XSCALE_FSR",
144 "XSCALE_FAR",
145 "XSCALE_PID",
146 "XSCALE_CPACCESS",
147 "XSCALE_IBCR0", /* 10 */
148 "XSCALE_IBCR1",
149 "XSCALE_DBR0",
150 "XSCALE_DBR1",
151 "XSCALE_DBCON",
152 "XSCALE_TBREG",
153 "XSCALE_CHKPT0",
154 "XSCALE_CHKPT1",
155 "XSCALE_DCSR",
156 "XSCALE_TX",
157 "XSCALE_RX", /* 20 */
158 "XSCALE_TXRXCTRL",
159 };
160
161 xscale_reg_t xscale_reg_arch_info[] =
162 {
163 {XSCALE_MAINID, NULL},
164 {XSCALE_CACHETYPE, NULL},
165 {XSCALE_CTRL, NULL},
166 {XSCALE_AUXCTRL, NULL},
167 {XSCALE_TTB, NULL},
168 {XSCALE_DAC, NULL},
169 {XSCALE_FSR, NULL},
170 {XSCALE_FAR, NULL},
171 {XSCALE_PID, NULL},
172 {XSCALE_CPACCESS, NULL},
173 {XSCALE_IBCR0, NULL},
174 {XSCALE_IBCR1, NULL},
175 {XSCALE_DBR0, NULL},
176 {XSCALE_DBR1, NULL},
177 {XSCALE_DBCON, NULL},
178 {XSCALE_TBREG, NULL},
179 {XSCALE_CHKPT0, NULL},
180 {XSCALE_CHKPT1, NULL},
181 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
182 {-1, NULL}, /* TX accessed via JTAG */
183 {-1, NULL}, /* RX accessed via JTAG */
184 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
185 };
186
187 int xscale_reg_arch_type = -1;
188
189 int xscale_get_reg(reg_t *reg);
190 int xscale_set_reg(reg_t *reg, u8 *buf);
191
192 int xscale_get_arch_pointers(target_t *target, armv4_5_common_t **armv4_5_p, xscale_common_t **xscale_p)
193 {
194 armv4_5_common_t *armv4_5 = target->arch_info;
195 xscale_common_t *xscale = armv4_5->arch_info;
196
197 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
198 {
199 LOG_ERROR("target isn't an XScale target");
200 return -1;
201 }
202
203 if (xscale->common_magic != XSCALE_COMMON_MAGIC)
204 {
205 LOG_ERROR("target isn't an XScale target");
206 return -1;
207 }
208
209 *armv4_5_p = armv4_5;
210 *xscale_p = xscale;
211
212 return ERROR_OK;
213 }
214
215 int xscale_jtag_set_instr(int chain_pos, u32 new_instr)
216 {
217 jtag_device_t *device = jtag_get_device(chain_pos);
218
219 if (buf_get_u32(device->cur_instr, 0, device->ir_length) != new_instr)
220 {
221 scan_field_t field;
222
223 field.device = chain_pos;
224 field.num_bits = device->ir_length;
225 field.out_value = calloc(CEIL(field.num_bits, 8), 1);
226 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
227 field.out_mask = NULL;
228 field.in_value = NULL;
229 jtag_set_check_value(&field, device->expected, device->expected_mask, NULL);
230
231 jtag_add_ir_scan(1, &field, -1);
232
233 free(field.out_value);
234 }
235
236 return ERROR_OK;
237 }
238
239 int xscale_read_dcsr(target_t *target)
240 {
241 armv4_5_common_t *armv4_5 = target->arch_info;
242 xscale_common_t *xscale = armv4_5->arch_info;
243
244 int retval;
245
246 scan_field_t fields[3];
247 u8 field0 = 0x0;
248 u8 field0_check_value = 0x2;
249 u8 field0_check_mask = 0x7;
250 u8 field2 = 0x0;
251 u8 field2_check_value = 0x0;
252 u8 field2_check_mask = 0x1;
253
254 jtag_add_end_state(TAP_PD);
255 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
256
257 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
258 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
259
260 fields[0].device = xscale->jtag_info.chain_pos;
261 fields[0].num_bits = 3;
262 fields[0].out_value = &field0;
263 fields[0].out_mask = NULL;
264 fields[0].in_value = NULL;
265 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
266
267 fields[1].device = xscale->jtag_info.chain_pos;
268 fields[1].num_bits = 32;
269 fields[1].out_value = NULL;
270 fields[1].out_mask = NULL;
271 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
272 fields[1].in_handler = NULL;
273 fields[1].in_handler_priv = NULL;
274 fields[1].in_check_value = NULL;
275 fields[1].in_check_mask = NULL;
276
277 fields[2].device = xscale->jtag_info.chain_pos;
278 fields[2].num_bits = 1;
279 fields[2].out_value = &field2;
280 fields[2].out_mask = NULL;
281 fields[2].in_value = NULL;
282 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
283
284 jtag_add_dr_scan(3, fields, -1);
285
286 if ((retval = jtag_execute_queue()) != ERROR_OK)
287 {
288 LOG_ERROR("JTAG error while reading DCSR");
289 return retval;
290 }
291
292 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
293 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
294
295 /* write the register with the value we just read
296 * on this second pass, only the first bit of field0 is guaranteed to be 0)
297 */
298 field0_check_mask = 0x1;
299 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
300 fields[1].in_value = NULL;
301
302 jtag_add_end_state(TAP_RTI);
303
304 jtag_add_dr_scan(3, fields, -1);
305
306 /* DANGER!!! this must be here. It will make sure that the arguments
307 * to jtag_set_check_value() does not go out of scope! */
308 return jtag_execute_queue();
309 }
310
311 int xscale_receive(target_t *target, u32 *buffer, int num_words)
312 {
313 if (num_words==0)
314 return ERROR_INVALID_ARGUMENTS;
315
316 int retval=ERROR_OK;
317 armv4_5_common_t *armv4_5 = target->arch_info;
318 xscale_common_t *xscale = armv4_5->arch_info;
319
320 enum tap_state path[3];
321 scan_field_t fields[3];
322
323 u8 *field0 = malloc(num_words * 1);
324 u8 field0_check_value = 0x2;
325 u8 field0_check_mask = 0x6;
326 u32 *field1 = malloc(num_words * 4);
327 u8 field2_check_value = 0x0;
328 u8 field2_check_mask = 0x1;
329 int words_done = 0;
330 int words_scheduled = 0;
331
332 int i;
333
334 path[0] = TAP_SDS;
335 path[1] = TAP_CD;
336 path[2] = TAP_SD;
337
338 fields[0].device = xscale->jtag_info.chain_pos;
339 fields[0].num_bits = 3;
340 fields[0].out_value = NULL;
341 fields[0].out_mask = NULL;
342 fields[0].in_value = NULL;
343 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
344
345 fields[1].device = xscale->jtag_info.chain_pos;
346 fields[1].num_bits = 32;
347 fields[1].out_value = NULL;
348 fields[1].out_mask = NULL;
349 fields[1].in_value = NULL;
350 fields[1].in_handler = NULL;
351 fields[1].in_handler_priv = NULL;
352 fields[1].in_check_value = NULL;
353 fields[1].in_check_mask = NULL;
354
355
356
357 fields[2].device = xscale->jtag_info.chain_pos;
358 fields[2].num_bits = 1;
359 fields[2].out_value = NULL;
360 fields[2].out_mask = NULL;
361 fields[2].in_value = NULL;
362 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
363
364 jtag_add_end_state(TAP_RTI);
365 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgtx);
366 jtag_add_runtest(1, -1); /* ensures that we're in the TAP_RTI state as the above could be a no-op */
367
368 /* repeat until all words have been collected */
369 int attempts=0;
370 while (words_done < num_words)
371 {
372 /* schedule reads */
373 words_scheduled = 0;
374 for (i = words_done; i < num_words; i++)
375 {
376 fields[0].in_value = &field0[i];
377 fields[1].in_handler = buf_to_u32_handler;
378 fields[1].in_handler_priv = (u8*)&field1[i];
379
380 jtag_add_pathmove(3, path);
381 jtag_add_dr_scan(3, fields, TAP_RTI);
382 words_scheduled++;
383 }
384
385 if ((retval = jtag_execute_queue()) != ERROR_OK)
386 {
387 LOG_ERROR("JTAG error while receiving data from debug handler");
388 break;
389 }
390
391 /* examine results */
392 for (i = words_done; i < num_words; i++)
393 {
394 if (!(field0[0] & 1))
395 {
396 /* move backwards if necessary */
397 int j;
398 for (j = i; j < num_words - 1; j++)
399 {
400 field0[j] = field0[j+1];
401 field1[j] = field1[j+1];
402 }
403 words_scheduled--;
404 }
405 }
406 if (words_scheduled==0)
407 {
408 if (attempts++==1000)
409 {
410 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
411 retval=ERROR_TARGET_TIMEOUT;
412 break;
413 }
414 }
415
416 words_done += words_scheduled;
417 }
418
419 for (i = 0; i < num_words; i++)
420 *(buffer++) = buf_get_u32((u8*)&field1[i], 0, 32);
421
422 free(field1);
423
424 return retval;
425 }
426
427 int xscale_read_tx(target_t *target, int consume)
428 {
429 armv4_5_common_t *armv4_5 = target->arch_info;
430 xscale_common_t *xscale = armv4_5->arch_info;
431 enum tap_state path[3];
432 enum tap_state noconsume_path[6];
433
434 int retval;
435 struct timeval timeout, now;
436
437 scan_field_t fields[3];
438 u8 field0_in = 0x0;
439 u8 field0_check_value = 0x2;
440 u8 field0_check_mask = 0x6;
441 u8 field2_check_value = 0x0;
442 u8 field2_check_mask = 0x1;
443
444 jtag_add_end_state(TAP_RTI);
445
446 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgtx);
447
448 path[0] = TAP_SDS;
449 path[1] = TAP_CD;
450 path[2] = TAP_SD;
451
452 noconsume_path[0] = TAP_SDS;
453 noconsume_path[1] = TAP_CD;
454 noconsume_path[2] = TAP_E1D;
455 noconsume_path[3] = TAP_PD;
456 noconsume_path[4] = TAP_E2D;
457 noconsume_path[5] = TAP_SD;
458
459 fields[0].device = xscale->jtag_info.chain_pos;
460 fields[0].num_bits = 3;
461 fields[0].out_value = NULL;
462 fields[0].out_mask = NULL;
463 fields[0].in_value = &field0_in;
464 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
465
466 fields[1].device = xscale->jtag_info.chain_pos;
467 fields[1].num_bits = 32;
468 fields[1].out_value = NULL;
469 fields[1].out_mask = NULL;
470 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
471 fields[1].in_handler = NULL;
472 fields[1].in_handler_priv = NULL;
473 fields[1].in_check_value = NULL;
474 fields[1].in_check_mask = NULL;
475
476
477
478 fields[2].device = xscale->jtag_info.chain_pos;
479 fields[2].num_bits = 1;
480 fields[2].out_value = NULL;
481 fields[2].out_mask = NULL;
482 fields[2].in_value = NULL;
483 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
484
485 gettimeofday(&timeout, NULL);
486 timeval_add_time(&timeout, 1, 0);
487
488 for (;;)
489 {
490 int i;
491 for (i=0; i<100; i++)
492 {
493 /* if we want to consume the register content (i.e. clear TX_READY),
494 * we have to go straight from Capture-DR to Shift-DR
495 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
496 */
497 if (consume)
498 jtag_add_pathmove(3, path);
499 else
500 {
501 jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
502 }
503
504 jtag_add_dr_scan(3, fields, TAP_RTI);
505
506 if ((retval = jtag_execute_queue()) != ERROR_OK)
507 {
508 LOG_ERROR("JTAG error while reading TX");
509 return ERROR_TARGET_TIMEOUT;
510 }
511
512 gettimeofday(&now, NULL);
513 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
514 {
515 LOG_ERROR("time out reading TX register");
516 return ERROR_TARGET_TIMEOUT;
517 }
518 if (!((!(field0_in & 1)) && consume))
519 {
520 goto done;
521 }
522 }
523 LOG_DEBUG("waiting 10ms");
524 usleep(10*1000); /* avoid flooding the logs */
525 }
526 done:
527
528 if (!(field0_in & 1))
529 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
530
531 return ERROR_OK;
532 }
533
534 int xscale_write_rx(target_t *target)
535 {
536 armv4_5_common_t *armv4_5 = target->arch_info;
537 xscale_common_t *xscale = armv4_5->arch_info;
538
539 int retval;
540 struct timeval timeout, now;
541
542 scan_field_t fields[3];
543 u8 field0_out = 0x0;
544 u8 field0_in = 0x0;
545 u8 field0_check_value = 0x2;
546 u8 field0_check_mask = 0x6;
547 u8 field2 = 0x0;
548 u8 field2_check_value = 0x0;
549 u8 field2_check_mask = 0x1;
550
551 jtag_add_end_state(TAP_RTI);
552
553 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgrx);
554
555 fields[0].device = xscale->jtag_info.chain_pos;
556 fields[0].num_bits = 3;
557 fields[0].out_value = &field0_out;
558 fields[0].out_mask = NULL;
559 fields[0].in_value = &field0_in;
560 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
561
562 fields[1].device = xscale->jtag_info.chain_pos;
563 fields[1].num_bits = 32;
564 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
565 fields[1].out_mask = NULL;
566 fields[1].in_value = NULL;
567 fields[1].in_handler = NULL;
568 fields[1].in_handler_priv = NULL;
569 fields[1].in_check_value = NULL;
570 fields[1].in_check_mask = NULL;
571
572
573
574 fields[2].device = xscale->jtag_info.chain_pos;
575 fields[2].num_bits = 1;
576 fields[2].out_value = &field2;
577 fields[2].out_mask = NULL;
578 fields[2].in_value = NULL;
579 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
580
581 gettimeofday(&timeout, NULL);
582 timeval_add_time(&timeout, 1, 0);
583
584 /* poll until rx_read is low */
585 LOG_DEBUG("polling RX");
586 for (;;)
587 {
588 int i;
589 for (i=0; i<10; i++)
590 {
591 jtag_add_dr_scan(3, fields, TAP_RTI);
592
593 if ((retval = jtag_execute_queue()) != ERROR_OK)
594 {
595 LOG_ERROR("JTAG error while writing RX");
596 return retval;
597 }
598
599 gettimeofday(&now, NULL);
600 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
601 {
602 LOG_ERROR("time out writing RX register");
603 return ERROR_TARGET_TIMEOUT;
604 }
605 if (!(field0_in & 1))
606 goto done;
607 }
608 LOG_DEBUG("waiting 10ms");
609 usleep(10*1000); /* wait 10ms to avoid flooding the logs */
610 }
611 done:
612
613 /* set rx_valid */
614 field2 = 0x1;
615 jtag_add_dr_scan(3, fields, TAP_RTI);
616
617 if ((retval = jtag_execute_queue()) != ERROR_OK)
618 {
619 LOG_ERROR("JTAG error while writing RX");
620 return retval;
621 }
622
623 return ERROR_OK;
624 }
625
626 /* send count elements of size byte to the debug handler */
627 int xscale_send(target_t *target, u8 *buffer, int count, int size)
628 {
629 armv4_5_common_t *armv4_5 = target->arch_info;
630 xscale_common_t *xscale = armv4_5->arch_info;
631 u32 t[3];
632 int bits[3];
633
634 int retval;
635
636 int done_count = 0;
637
638 jtag_add_end_state(TAP_RTI);
639
640 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgrx);
641
642 bits[0]=3;
643 t[0]=0;
644 bits[1]=32;
645 t[2]=1;
646 bits[2]=1;
647 int endianness = target->endianness;
648 while (done_count++ < count)
649 {
650 switch (size)
651 {
652 case 4:
653 if (endianness == TARGET_LITTLE_ENDIAN)
654 {
655 t[1]=le_to_h_u32(buffer);
656 } else
657 {
658 t[1]=be_to_h_u32(buffer);
659 }
660 break;
661 case 2:
662 if (endianness == TARGET_LITTLE_ENDIAN)
663 {
664 t[1]=le_to_h_u16(buffer);
665 } else
666 {
667 t[1]=be_to_h_u16(buffer);
668 }
669 break;
670 case 1:
671 t[1]=buffer[0];
672 break;
673 default:
674 LOG_ERROR("BUG: size neither 4, 2 nor 1");
675 exit(-1);
676 }
677 jtag_add_dr_out(xscale->jtag_info.chain_pos,
678 3,
679 bits,
680 t,
681 TAP_RTI);
682 buffer += size;
683 }
684
685 if ((retval = jtag_execute_queue()) != ERROR_OK)
686 {
687 LOG_ERROR("JTAG error while sending data to debug handler");
688 return retval;
689 }
690
691 return ERROR_OK;
692 }
693
694 int xscale_send_u32(target_t *target, u32 value)
695 {
696 armv4_5_common_t *armv4_5 = target->arch_info;
697 xscale_common_t *xscale = armv4_5->arch_info;
698
699 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
700 return xscale_write_rx(target);
701 }
702
703 int xscale_write_dcsr(target_t *target, int hold_rst, int ext_dbg_brk)
704 {
705 armv4_5_common_t *armv4_5 = target->arch_info;
706 xscale_common_t *xscale = armv4_5->arch_info;
707
708 int retval;
709
710 scan_field_t fields[3];
711 u8 field0 = 0x0;
712 u8 field0_check_value = 0x2;
713 u8 field0_check_mask = 0x7;
714 u8 field2 = 0x0;
715 u8 field2_check_value = 0x0;
716 u8 field2_check_mask = 0x1;
717
718 if (hold_rst != -1)
719 xscale->hold_rst = hold_rst;
720
721 if (ext_dbg_brk != -1)
722 xscale->external_debug_break = ext_dbg_brk;
723
724 jtag_add_end_state(TAP_RTI);
725 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
726
727 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
728 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
729
730 fields[0].device = xscale->jtag_info.chain_pos;
731 fields[0].num_bits = 3;
732 fields[0].out_value = &field0;
733 fields[0].out_mask = NULL;
734 fields[0].in_value = NULL;
735 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
736
737 fields[1].device = xscale->jtag_info.chain_pos;
738 fields[1].num_bits = 32;
739 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
740 fields[1].out_mask = NULL;
741 fields[1].in_value = NULL;
742 fields[1].in_handler = NULL;
743 fields[1].in_handler_priv = NULL;
744 fields[1].in_check_value = NULL;
745 fields[1].in_check_mask = NULL;
746
747
748
749 fields[2].device = xscale->jtag_info.chain_pos;
750 fields[2].num_bits = 1;
751 fields[2].out_value = &field2;
752 fields[2].out_mask = NULL;
753 fields[2].in_value = NULL;
754 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
755
756 jtag_add_dr_scan(3, fields, -1);
757
758 if ((retval = jtag_execute_queue()) != ERROR_OK)
759 {
760 LOG_ERROR("JTAG error while writing DCSR");
761 return retval;
762 }
763
764 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
765 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
766
767 return ERROR_OK;
768 }
769
770 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
771 unsigned int parity (unsigned int v)
772 {
773 unsigned int ov = v;
774 v ^= v >> 16;
775 v ^= v >> 8;
776 v ^= v >> 4;
777 v &= 0xf;
778 LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
779 return (0x6996 >> v) & 1;
780 }
781
782 int xscale_load_ic(target_t *target, int mini, u32 va, u32 buffer[8])
783 {
784 armv4_5_common_t *armv4_5 = target->arch_info;
785 xscale_common_t *xscale = armv4_5->arch_info;
786 u8 packet[4];
787 u8 cmd;
788 int word;
789
790 scan_field_t fields[2];
791
792 LOG_DEBUG("loading miniIC at 0x%8.8x", va);
793
794 jtag_add_end_state(TAP_RTI);
795 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.ldic); /* LDIC */
796
797 /* CMD is b010 for Main IC and b011 for Mini IC */
798 if (mini)
799 buf_set_u32(&cmd, 0, 3, 0x3);
800 else
801 buf_set_u32(&cmd, 0, 3, 0x2);
802
803 buf_set_u32(&cmd, 3, 3, 0x0);
804
805 /* virtual address of desired cache line */
806 buf_set_u32(packet, 0, 27, va >> 5);
807
808 fields[0].device = xscale->jtag_info.chain_pos;
809 fields[0].num_bits = 6;
810 fields[0].out_value = &cmd;
811 fields[0].out_mask = NULL;
812 fields[0].in_value = NULL;
813 fields[0].in_check_value = NULL;
814 fields[0].in_check_mask = NULL;
815 fields[0].in_handler = NULL;
816 fields[0].in_handler_priv = NULL;
817
818 fields[1].device = xscale->jtag_info.chain_pos;
819 fields[1].num_bits = 27;
820 fields[1].out_value = packet;
821 fields[1].out_mask = NULL;
822 fields[1].in_value = NULL;
823 fields[1].in_check_value = NULL;
824 fields[1].in_check_mask = NULL;
825 fields[1].in_handler = NULL;
826 fields[1].in_handler_priv = NULL;
827
828 jtag_add_dr_scan(2, fields, -1);
829
830 fields[0].num_bits = 32;
831 fields[0].out_value = packet;
832
833 fields[1].num_bits = 1;
834 fields[1].out_value = &cmd;
835
836 for (word = 0; word < 8; word++)
837 {
838 buf_set_u32(packet, 0, 32, buffer[word]);
839 cmd = parity(*((u32*)packet));
840 jtag_add_dr_scan(2, fields, -1);
841 }
842
843 jtag_execute_queue();
844
845 return ERROR_OK;
846 }
847
848 int xscale_invalidate_ic_line(target_t *target, u32 va)
849 {
850 armv4_5_common_t *armv4_5 = target->arch_info;
851 xscale_common_t *xscale = armv4_5->arch_info;
852 u8 packet[4];
853 u8 cmd;
854
855 scan_field_t fields[2];
856
857 jtag_add_end_state(TAP_RTI);
858 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.ldic); /* LDIC */
859
860 /* CMD for invalidate IC line b000, bits [6:4] b000 */
861 buf_set_u32(&cmd, 0, 6, 0x0);
862
863 /* virtual address of desired cache line */
864 buf_set_u32(packet, 0, 27, va >> 5);
865
866 fields[0].device = xscale->jtag_info.chain_pos;
867 fields[0].num_bits = 6;
868 fields[0].out_value = &cmd;
869 fields[0].out_mask = NULL;
870 fields[0].in_value = NULL;
871 fields[0].in_check_value = NULL;
872 fields[0].in_check_mask = NULL;
873 fields[0].in_handler = NULL;
874 fields[0].in_handler_priv = NULL;
875
876 fields[1].device = xscale->jtag_info.chain_pos;
877 fields[1].num_bits = 27;
878 fields[1].out_value = packet;
879 fields[1].out_mask = NULL;
880 fields[1].in_value = NULL;
881 fields[1].in_check_value = NULL;
882 fields[1].in_check_mask = NULL;
883 fields[1].in_handler = NULL;
884 fields[1].in_handler_priv = NULL;
885
886 jtag_add_dr_scan(2, fields, -1);
887
888 return ERROR_OK;
889 }
890
891 int xscale_update_vectors(target_t *target)
892 {
893 armv4_5_common_t *armv4_5 = target->arch_info;
894 xscale_common_t *xscale = armv4_5->arch_info;
895 int i;
896 int retval;
897
898 u32 low_reset_branch, high_reset_branch;
899
900 for (i = 1; i < 8; i++)
901 {
902 /* if there's a static vector specified for this exception, override */
903 if (xscale->static_high_vectors_set & (1 << i))
904 {
905 xscale->high_vectors[i] = xscale->static_high_vectors[i];
906 }
907 else
908 {
909 retval=target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
910 if (retval == ERROR_TARGET_TIMEOUT)
911 return retval;
912 if (retval!=ERROR_OK)
913 {
914 /* Some of these reads will fail as part of normal execution */
915 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
916 }
917 }
918 }
919
920 for (i = 1; i < 8; i++)
921 {
922 if (xscale->static_low_vectors_set & (1 << i))
923 {
924 xscale->low_vectors[i] = xscale->static_low_vectors[i];
925 }
926 else
927 {
928 retval=target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
929 if (retval == ERROR_TARGET_TIMEOUT)
930 return retval;
931 if (retval!=ERROR_OK)
932 {
933 /* Some of these reads will fail as part of normal execution */
934 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
935 }
936 }
937 }
938
939 /* calculate branches to debug handler */
940 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
941 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
942
943 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
944 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
945
946 /* invalidate and load exception vectors in mini i-cache */
947 xscale_invalidate_ic_line(target, 0x0);
948 xscale_invalidate_ic_line(target, 0xffff0000);
949
950 xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
951 xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
952
953 return ERROR_OK;
954 }
955
956 int xscale_arch_state(struct target_s *target)
957 {
958 armv4_5_common_t *armv4_5 = target->arch_info;
959 xscale_common_t *xscale = armv4_5->arch_info;
960
961 char *state[] =
962 {
963 "disabled", "enabled"
964 };
965
966 char *arch_dbg_reason[] =
967 {
968 "", "\n(processor reset)", "\n(trace buffer full)"
969 };
970
971 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
972 {
973 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
974 exit(-1);
975 }
976
977 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
978 "cpsr: 0x%8.8x pc: 0x%8.8x\n"
979 "MMU: %s, D-Cache: %s, I-Cache: %s"
980 "%s",
981 armv4_5_state_strings[armv4_5->core_state],
982 target_debug_reason_strings[target->debug_reason],
983 armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)],
984 buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
985 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
986 state[xscale->armv4_5_mmu.mmu_enabled],
987 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
988 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
989 arch_dbg_reason[xscale->arch_debug_reason]);
990
991 return ERROR_OK;
992 }
993
994 int xscale_poll(target_t *target)
995 {
996 int retval=ERROR_OK;
997 armv4_5_common_t *armv4_5 = target->arch_info;
998 xscale_common_t *xscale = armv4_5->arch_info;
999
1000 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
1001 {
1002 enum target_state previous_state = target->state;
1003 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
1004 {
1005
1006 /* there's data to read from the tx register, we entered debug state */
1007 xscale->handler_running = 1;
1008
1009 target->state = TARGET_HALTED;
1010
1011 /* process debug entry, fetching current mode regs */
1012 retval = xscale_debug_entry(target);
1013 }
1014 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1015 {
1016 LOG_USER("error while polling TX register, reset CPU");
1017 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
1018 target->state = TARGET_HALTED;
1019 }
1020
1021 /* debug_entry could have overwritten target state (i.e. immediate resume)
1022 * don't signal event handlers in that case
1023 */
1024 if (target->state != TARGET_HALTED)
1025 return ERROR_OK;
1026
1027 /* if target was running, signal that we halted
1028 * otherwise we reentered from debug execution */
1029 if (previous_state == TARGET_RUNNING)
1030 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1031 else
1032 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
1033 }
1034
1035 return retval;
1036 }
1037
1038 int xscale_debug_entry(target_t *target)
1039 {
1040 armv4_5_common_t *armv4_5 = target->arch_info;
1041 xscale_common_t *xscale = armv4_5->arch_info;
1042 u32 pc;
1043 u32 buffer[10];
1044 int i;
1045 int retval;
1046
1047 u32 moe;
1048
1049 /* clear external dbg break (will be written on next DCSR read) */
1050 xscale->external_debug_break = 0;
1051 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
1052 return retval;
1053
1054 /* get r0, pc, r1 to r7 and cpsr */
1055 if ((retval=xscale_receive(target, buffer, 10))!=ERROR_OK)
1056 return retval;
1057
1058 /* move r0 from buffer to register cache */
1059 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
1060 armv4_5->core_cache->reg_list[15].dirty = 1;
1061 armv4_5->core_cache->reg_list[15].valid = 1;
1062 LOG_DEBUG("r0: 0x%8.8x", buffer[0]);
1063
1064 /* move pc from buffer to register cache */
1065 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
1066 armv4_5->core_cache->reg_list[15].dirty = 1;
1067 armv4_5->core_cache->reg_list[15].valid = 1;
1068 LOG_DEBUG("pc: 0x%8.8x", buffer[1]);
1069
1070 /* move data from buffer to register cache */
1071 for (i = 1; i <= 7; i++)
1072 {
1073 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
1074 armv4_5->core_cache->reg_list[i].dirty = 1;
1075 armv4_5->core_cache->reg_list[i].valid = 1;
1076 LOG_DEBUG("r%i: 0x%8.8x", i, buffer[i + 1]);
1077 }
1078
1079 buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, buffer[9]);
1080 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
1081 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
1082 LOG_DEBUG("cpsr: 0x%8.8x", buffer[9]);
1083
1084 armv4_5->core_mode = buffer[9] & 0x1f;
1085 if (armv4_5_mode_to_number(armv4_5->core_mode) == -1)
1086 {
1087 target->state = TARGET_UNKNOWN;
1088 LOG_ERROR("cpsr contains invalid mode value - communication failure");
1089 return ERROR_TARGET_FAILURE;
1090 }
1091 LOG_DEBUG("target entered debug state in %s mode", armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)]);
1092
1093 if (buffer[9] & 0x20)
1094 armv4_5->core_state = ARMV4_5_STATE_THUMB;
1095 else
1096 armv4_5->core_state = ARMV4_5_STATE_ARM;
1097
1098
1099 if (armv4_5_mode_to_number(armv4_5->core_mode)==-1)
1100 return ERROR_FAIL;
1101
1102 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1103 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
1104 {
1105 xscale_receive(target, buffer, 8);
1106 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1107 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
1108 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
1109 }
1110 else
1111 {
1112 /* r8 to r14, but no spsr */
1113 xscale_receive(target, buffer, 7);
1114 }
1115
1116 /* move data from buffer to register cache */
1117 for (i = 8; i <= 14; i++)
1118 {
1119 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
1120 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
1121 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
1122 }
1123
1124 /* examine debug reason */
1125 xscale_read_dcsr(target);
1126 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
1127
1128 /* stored PC (for calculating fixup) */
1129 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1130
1131 switch (moe)
1132 {
1133 case 0x0: /* Processor reset */
1134 target->debug_reason = DBG_REASON_DBGRQ;
1135 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1136 pc -= 4;
1137 break;
1138 case 0x1: /* Instruction breakpoint hit */
1139 target->debug_reason = DBG_REASON_BREAKPOINT;
1140 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1141 pc -= 4;
1142 break;
1143 case 0x2: /* Data breakpoint hit */
1144 target->debug_reason = DBG_REASON_WATCHPOINT;
1145 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1146 pc -= 4;
1147 break;
1148 case 0x3: /* BKPT instruction executed */
1149 target->debug_reason = DBG_REASON_BREAKPOINT;
1150 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1151 pc -= 4;
1152 break;
1153 case 0x4: /* Ext. debug event */
1154 target->debug_reason = DBG_REASON_DBGRQ;
1155 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1156 pc -= 4;
1157 break;
1158 case 0x5: /* Vector trap occured */
1159 target->debug_reason = DBG_REASON_BREAKPOINT;
1160 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1161 pc -= 4;
1162 break;
1163 case 0x6: /* Trace buffer full break */
1164 target->debug_reason = DBG_REASON_DBGRQ;
1165 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1166 pc -= 4;
1167 break;
1168 case 0x7: /* Reserved */
1169 default:
1170 LOG_ERROR("Method of Entry is 'Reserved'");
1171 exit(-1);
1172 break;
1173 }
1174
1175 /* apply PC fixup */
1176 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1177
1178 /* on the first debug entry, identify cache type */
1179 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1180 {
1181 u32 cache_type_reg;
1182
1183 /* read cp15 cache type register */
1184 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1185 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1186
1187 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1188 }
1189
1190 /* examine MMU and Cache settings */
1191 /* read cp15 control register */
1192 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1193 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1194 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1195 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1196 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1197
1198 /* tracing enabled, read collected trace data */
1199 if (xscale->trace.buffer_enabled)
1200 {
1201 xscale_read_trace(target);
1202 xscale->trace.buffer_fill--;
1203
1204 /* resume if we're still collecting trace data */
1205 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1206 && (xscale->trace.buffer_fill > 0))
1207 {
1208 xscale_resume(target, 1, 0x0, 1, 0);
1209 }
1210 else
1211 {
1212 xscale->trace.buffer_enabled = 0;
1213 }
1214 }
1215
1216 return ERROR_OK;
1217 }
1218
1219 int xscale_halt(target_t *target)
1220 {
1221 armv4_5_common_t *armv4_5 = target->arch_info;
1222 xscale_common_t *xscale = armv4_5->arch_info;
1223
1224 LOG_DEBUG("target->state: %s", target_state_strings[target->state]);
1225
1226 if (target->state == TARGET_HALTED)
1227 {
1228 LOG_DEBUG("target was already halted");
1229 return ERROR_OK;
1230 }
1231 else if (target->state == TARGET_UNKNOWN)
1232 {
1233 /* this must not happen for a xscale target */
1234 LOG_ERROR("target was in unknown state when halt was requested");
1235 return ERROR_TARGET_INVALID;
1236 }
1237 else if (target->state == TARGET_RESET)
1238 {
1239 LOG_DEBUG("target->state == TARGET_RESET");
1240 }
1241 else
1242 {
1243 /* assert external dbg break */
1244 xscale->external_debug_break = 1;
1245 xscale_read_dcsr(target);
1246
1247 target->debug_reason = DBG_REASON_DBGRQ;
1248 }
1249
1250 return ERROR_OK;
1251 }
1252
1253 int xscale_enable_single_step(struct target_s *target, u32 next_pc)
1254 {
1255 armv4_5_common_t *armv4_5 = target->arch_info;
1256 xscale_common_t *xscale= armv4_5->arch_info;
1257 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1258
1259 if (xscale->ibcr0_used)
1260 {
1261 breakpoint_t *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1262
1263 if (ibcr0_bp)
1264 {
1265 xscale_unset_breakpoint(target, ibcr0_bp);
1266 }
1267 else
1268 {
1269 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1270 exit(-1);
1271 }
1272 }
1273
1274 xscale_set_reg_u32(ibcr0, next_pc | 0x1);
1275
1276 return ERROR_OK;
1277 }
1278
1279 int xscale_disable_single_step(struct target_s *target)
1280 {
1281 armv4_5_common_t *armv4_5 = target->arch_info;
1282 xscale_common_t *xscale= armv4_5->arch_info;
1283 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1284
1285 xscale_set_reg_u32(ibcr0, 0x0);
1286
1287 return ERROR_OK;
1288 }
1289
1290 int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution)
1291 {
1292 armv4_5_common_t *armv4_5 = target->arch_info;
1293 xscale_common_t *xscale= armv4_5->arch_info;
1294 breakpoint_t *breakpoint = target->breakpoints;
1295
1296 u32 current_pc;
1297
1298 int retval;
1299 int i;
1300
1301 LOG_DEBUG("-");
1302
1303 if (target->state != TARGET_HALTED)
1304 {
1305 LOG_WARNING("target not halted");
1306 return ERROR_TARGET_NOT_HALTED;
1307 }
1308
1309 if (!debug_execution)
1310 {
1311 target_free_all_working_areas(target);
1312 }
1313
1314 /* update vector tables */
1315 if ((retval=xscale_update_vectors(target))!=ERROR_OK)
1316 return retval;
1317
1318 /* current = 1: continue on current pc, otherwise continue at <address> */
1319 if (!current)
1320 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1321
1322 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1323
1324 /* if we're at the reset vector, we have to simulate the branch */
1325 if (current_pc == 0x0)
1326 {
1327 arm_simulate_step(target, NULL);
1328 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1329 }
1330
1331 /* the front-end may request us not to handle breakpoints */
1332 if (handle_breakpoints)
1333 {
1334 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1335 {
1336 u32 next_pc;
1337
1338 /* there's a breakpoint at the current PC, we have to step over it */
1339 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1340 xscale_unset_breakpoint(target, breakpoint);
1341
1342 /* calculate PC of next instruction */
1343 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1344 {
1345 u32 current_opcode;
1346 target_read_u32(target, current_pc, &current_opcode);
1347 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
1348 }
1349
1350 LOG_DEBUG("enable single-step");
1351 xscale_enable_single_step(target, next_pc);
1352
1353 /* restore banked registers */
1354 xscale_restore_context(target);
1355
1356 /* send resume request (command 0x30 or 0x31)
1357 * clean the trace buffer if it is to be enabled (0x62) */
1358 if (xscale->trace.buffer_enabled)
1359 {
1360 xscale_send_u32(target, 0x62);
1361 xscale_send_u32(target, 0x31);
1362 }
1363 else
1364 xscale_send_u32(target, 0x30);
1365
1366 /* send CPSR */
1367 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1368 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1369
1370 for (i = 7; i >= 0; i--)
1371 {
1372 /* send register */
1373 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1374 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1375 }
1376
1377 /* send PC */
1378 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1379 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1380
1381 /* wait for and process debug entry */
1382 xscale_debug_entry(target);
1383
1384 LOG_DEBUG("disable single-step");
1385 xscale_disable_single_step(target);
1386
1387 LOG_DEBUG("set breakpoint at 0x%8.8x", breakpoint->address);
1388 xscale_set_breakpoint(target, breakpoint);
1389 }
1390 }
1391
1392 /* enable any pending breakpoints and watchpoints */
1393 xscale_enable_breakpoints(target);
1394 xscale_enable_watchpoints(target);
1395
1396 /* restore banked registers */
1397 xscale_restore_context(target);
1398
1399 /* send resume request (command 0x30 or 0x31)
1400 * clean the trace buffer if it is to be enabled (0x62) */
1401 if (xscale->trace.buffer_enabled)
1402 {
1403 xscale_send_u32(target, 0x62);
1404 xscale_send_u32(target, 0x31);
1405 }
1406 else
1407 xscale_send_u32(target, 0x30);
1408
1409 /* send CPSR */
1410 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1411 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1412
1413 for (i = 7; i >= 0; i--)
1414 {
1415 /* send register */
1416 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1417 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1418 }
1419
1420 /* send PC */
1421 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1422 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1423
1424 target->debug_reason = DBG_REASON_NOTHALTED;
1425
1426 if (!debug_execution)
1427 {
1428 /* registers are now invalid */
1429 armv4_5_invalidate_core_regs(target);
1430 target->state = TARGET_RUNNING;
1431 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1432 }
1433 else
1434 {
1435 target->state = TARGET_DEBUG_RUNNING;
1436 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1437 }
1438
1439 LOG_DEBUG("target resumed");
1440
1441 xscale->handler_running = 1;
1442
1443 return ERROR_OK;
1444 }
1445
1446 int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints)
1447 {
1448 armv4_5_common_t *armv4_5 = target->arch_info;
1449 xscale_common_t *xscale = armv4_5->arch_info;
1450 breakpoint_t *breakpoint = target->breakpoints;
1451
1452 u32 current_pc, next_pc;
1453 int i;
1454 int retval;
1455
1456 if (target->state != TARGET_HALTED)
1457 {
1458 LOG_WARNING("target not halted");
1459 return ERROR_TARGET_NOT_HALTED;
1460 }
1461
1462 /* current = 1: continue on current pc, otherwise continue at <address> */
1463 if (!current)
1464 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1465
1466 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1467
1468 /* if we're at the reset vector, we have to simulate the step */
1469 if (current_pc == 0x0)
1470 {
1471 arm_simulate_step(target, NULL);
1472 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1473
1474 target->debug_reason = DBG_REASON_SINGLESTEP;
1475 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1476
1477 return ERROR_OK;
1478 }
1479
1480 /* the front-end may request us not to handle breakpoints */
1481 if (handle_breakpoints)
1482 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1483 {
1484 xscale_unset_breakpoint(target, breakpoint);
1485 }
1486
1487 target->debug_reason = DBG_REASON_SINGLESTEP;
1488
1489 /* calculate PC of next instruction */
1490 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1491 {
1492 u32 current_opcode;
1493 target_read_u32(target, current_pc, &current_opcode);
1494 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
1495 }
1496
1497 LOG_DEBUG("enable single-step");
1498 xscale_enable_single_step(target, next_pc);
1499
1500 /* restore banked registers */
1501 xscale_restore_context(target);
1502
1503 /* send resume request (command 0x30 or 0x31)
1504 * clean the trace buffer if it is to be enabled (0x62) */
1505 if (xscale->trace.buffer_enabled)
1506 {
1507 xscale_send_u32(target, 0x62);
1508 xscale_send_u32(target, 0x31);
1509 }
1510 else
1511 xscale_send_u32(target, 0x30);
1512
1513 /* send CPSR */
1514 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1515 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1516
1517 for (i = 7; i >= 0; i--)
1518 {
1519 /* send register */
1520 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1521 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1522 }
1523
1524 /* send PC */
1525 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1526 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1527
1528 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1529
1530 /* registers are now invalid */
1531 armv4_5_invalidate_core_regs(target);
1532
1533 /* wait for and process debug entry */
1534 xscale_debug_entry(target);
1535
1536 LOG_DEBUG("disable single-step");
1537 xscale_disable_single_step(target);
1538
1539 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1540
1541 if (breakpoint)
1542 {
1543 xscale_set_breakpoint(target, breakpoint);
1544 }
1545
1546 LOG_DEBUG("target stepped");
1547
1548 return ERROR_OK;
1549
1550 }
1551
1552 int xscale_assert_reset(target_t *target)
1553 {
1554 armv4_5_common_t *armv4_5 = target->arch_info;
1555 xscale_common_t *xscale = armv4_5->arch_info;
1556
1557 LOG_DEBUG("target->state: %s", target_state_strings[target->state]);
1558
1559 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1560 * end up in T-L-R, which would reset JTAG
1561 */
1562 jtag_add_end_state(TAP_RTI);
1563 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
1564
1565 /* set Hold reset, Halt mode and Trap Reset */
1566 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1567 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1568 xscale_write_dcsr(target, 1, 0);
1569
1570 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1571 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, 0x7f);
1572 jtag_execute_queue();
1573
1574 /* assert reset */
1575 jtag_add_reset(0, 1);
1576
1577 /* sleep 1ms, to be sure we fulfill any requirements */
1578 jtag_add_sleep(1000);
1579 jtag_execute_queue();
1580
1581 target->state = TARGET_RESET;
1582
1583 if (target->reset_halt)
1584 {
1585 int retval;
1586 if ((retval = target_halt(target))!=ERROR_OK)
1587 return retval;
1588 }
1589
1590 return ERROR_OK;
1591 }
1592
1593 int xscale_deassert_reset(target_t *target)
1594 {
1595 armv4_5_common_t *armv4_5 = target->arch_info;
1596 xscale_common_t *xscale = armv4_5->arch_info;
1597
1598 fileio_t debug_handler;
1599 u32 address;
1600 u32 binary_size;
1601
1602 u32 buf_cnt;
1603 int i;
1604 int retval;
1605
1606 breakpoint_t *breakpoint = target->breakpoints;
1607
1608 LOG_DEBUG("-");
1609
1610 xscale->ibcr_available = 2;
1611 xscale->ibcr0_used = 0;
1612 xscale->ibcr1_used = 0;
1613
1614 xscale->dbr_available = 2;
1615 xscale->dbr0_used = 0;
1616 xscale->dbr1_used = 0;
1617
1618 /* mark all hardware breakpoints as unset */
1619 while (breakpoint)
1620 {
1621 if (breakpoint->type == BKPT_HARD)
1622 {
1623 breakpoint->set = 0;
1624 }
1625 breakpoint = breakpoint->next;
1626 }
1627
1628 if (!xscale->handler_installed)
1629 {
1630 /* release SRST */
1631 jtag_add_reset(0, 0);
1632
1633 /* wait 300ms; 150 and 100ms were not enough */
1634 jtag_add_sleep(300*1000);
1635
1636 jtag_add_runtest(2030, TAP_RTI);
1637 jtag_execute_queue();
1638
1639 /* set Hold reset, Halt mode and Trap Reset */
1640 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1641 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1642 xscale_write_dcsr(target, 1, 0);
1643
1644 /* Load debug handler */
1645 if (fileio_open(&debug_handler, "xscale/debug_handler.bin", FILEIO_READ, FILEIO_BINARY) != ERROR_OK)
1646 {
1647 return ERROR_OK;
1648 }
1649
1650 if ((binary_size = debug_handler.size) % 4)
1651 {
1652 LOG_ERROR("debug_handler.bin: size not a multiple of 4");
1653 exit(-1);
1654 }
1655
1656 if (binary_size > 0x800)
1657 {
1658 LOG_ERROR("debug_handler.bin: larger than 2kb");
1659 exit(-1);
1660 }
1661
1662 binary_size = CEIL(binary_size, 32) * 32;
1663
1664 address = xscale->handler_address;
1665 while (binary_size > 0)
1666 {
1667 u32 cache_line[8];
1668 u8 buffer[32];
1669
1670 if ((retval = fileio_read(&debug_handler, 32, buffer, &buf_cnt)) != ERROR_OK)
1671 {
1672
1673 }
1674
1675 for (i = 0; i < buf_cnt; i += 4)
1676 {
1677 /* convert LE buffer to host-endian u32 */
1678 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1679 }
1680
1681 for (; buf_cnt < 32; buf_cnt += 4)
1682 {
1683 cache_line[buf_cnt / 4] = 0xe1a08008;
1684 }
1685
1686 /* only load addresses other than the reset vectors */
1687 if ((address % 0x400) != 0x0)
1688 {
1689 xscale_load_ic(target, 1, address, cache_line);
1690 }
1691
1692 address += buf_cnt;
1693 binary_size -= buf_cnt;
1694 };
1695
1696 xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
1697 xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
1698
1699 jtag_add_runtest(30, TAP_RTI);
1700
1701 jtag_add_sleep(100000);
1702
1703 /* set Hold reset, Halt mode and Trap Reset */
1704 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1705 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1706 xscale_write_dcsr(target, 1, 0);
1707
1708 /* clear Hold reset to let the target run (should enter debug handler) */
1709 xscale_write_dcsr(target, 0, 1);
1710 target->state = TARGET_RUNNING;
1711
1712 if (!target->reset_halt)
1713 {
1714 jtag_add_sleep(10000);
1715
1716 /* we should have entered debug now */
1717 xscale_debug_entry(target);
1718 target->state = TARGET_HALTED;
1719
1720 /* resume the target */
1721 xscale_resume(target, 1, 0x0, 1, 0);
1722 }
1723
1724 fileio_close(&debug_handler);
1725 }
1726 else
1727 {
1728 jtag_add_reset(0, 0);
1729 }
1730
1731
1732 return ERROR_OK;
1733 }
1734
1735 int xscale_soft_reset_halt(struct target_s *target)
1736 {
1737
1738 return ERROR_OK;
1739 }
1740
1741 int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode)
1742 {
1743
1744 return ERROR_OK;
1745 }
1746
1747 int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value)
1748 {
1749
1750 return ERROR_OK;
1751 }
1752
1753 int xscale_full_context(target_t *target)
1754 {
1755 armv4_5_common_t *armv4_5 = target->arch_info;
1756
1757 u32 *buffer;
1758
1759 int i, j;
1760
1761 LOG_DEBUG("-");
1762
1763 if (target->state != TARGET_HALTED)
1764 {
1765 LOG_WARNING("target not halted");
1766 return ERROR_TARGET_NOT_HALTED;
1767 }
1768
1769 buffer = malloc(4 * 8);
1770
1771 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1772 * we can't enter User mode on an XScale (unpredictable),
1773 * but User shares registers with SYS
1774 */
1775 for(i = 1; i < 7; i++)
1776 {
1777 int valid = 1;
1778
1779 /* check if there are invalid registers in the current mode
1780 */
1781 for (j = 0; j <= 16; j++)
1782 {
1783 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
1784 valid = 0;
1785 }
1786
1787 if (!valid)
1788 {
1789 u32 tmp_cpsr;
1790
1791 /* request banked registers */
1792 xscale_send_u32(target, 0x0);
1793
1794 tmp_cpsr = 0x0;
1795 tmp_cpsr |= armv4_5_number_to_mode(i);
1796 tmp_cpsr |= 0xc0; /* I/F bits */
1797
1798 /* send CPSR for desired mode */
1799 xscale_send_u32(target, tmp_cpsr);
1800
1801 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1802 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1803 {
1804 xscale_receive(target, buffer, 8);
1805 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1806 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1807 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
1808 }
1809 else
1810 {
1811 xscale_receive(target, buffer, 7);
1812 }
1813
1814 /* move data from buffer to register cache */
1815 for (j = 8; j <= 14; j++)
1816 {
1817 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
1818 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1819 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
1820 }
1821 }
1822 }
1823
1824 free(buffer);
1825
1826 return ERROR_OK;
1827 }
1828
1829 int xscale_restore_context(target_t *target)
1830 {
1831 armv4_5_common_t *armv4_5 = target->arch_info;
1832
1833 int i, j;
1834
1835 LOG_DEBUG("-");
1836
1837 if (target->state != TARGET_HALTED)
1838 {
1839 LOG_WARNING("target not halted");
1840 return ERROR_TARGET_NOT_HALTED;
1841 }
1842
1843 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1844 * we can't enter User mode on an XScale (unpredictable),
1845 * but User shares registers with SYS
1846 */
1847 for(i = 1; i < 7; i++)
1848 {
1849 int dirty = 0;
1850
1851 /* check if there are invalid registers in the current mode
1852 */
1853 for (j = 8; j <= 14; j++)
1854 {
1855 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
1856 dirty = 1;
1857 }
1858
1859 /* if not USR/SYS, check if the SPSR needs to be written */
1860 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1861 {
1862 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
1863 dirty = 1;
1864 }
1865
1866 if (dirty)
1867 {
1868 u32 tmp_cpsr;
1869
1870 /* send banked registers */
1871 xscale_send_u32(target, 0x1);
1872
1873 tmp_cpsr = 0x0;
1874 tmp_cpsr |= armv4_5_number_to_mode(i);
1875 tmp_cpsr |= 0xc0; /* I/F bits */
1876
1877 /* send CPSR for desired mode */
1878 xscale_send_u32(target, tmp_cpsr);
1879
1880 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1881 for (j = 8; j <= 14; j++)
1882 {
1883 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
1884 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1885 }
1886
1887 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1888 {
1889 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
1890 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1891 }
1892 }
1893 }
1894
1895 return ERROR_OK;
1896 }
1897
1898 int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
1899 {
1900 armv4_5_common_t *armv4_5 = target->arch_info;
1901 xscale_common_t *xscale = armv4_5->arch_info;
1902 u32 *buf32;
1903 int i;
1904 int retval;
1905
1906 LOG_DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
1907
1908 if (target->state != TARGET_HALTED)
1909 {
1910 LOG_WARNING("target not halted");
1911 return ERROR_TARGET_NOT_HALTED;
1912 }
1913
1914 /* sanitize arguments */
1915 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1916 return ERROR_INVALID_ARGUMENTS;
1917
1918 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1919 return ERROR_TARGET_UNALIGNED_ACCESS;
1920
1921 /* send memory read request (command 0x1n, n: access size) */
1922 if ((retval=xscale_send_u32(target, 0x10 | size))!=ERROR_OK)
1923 return retval;
1924
1925 /* send base address for read request */
1926 if ((retval=xscale_send_u32(target, address))!=ERROR_OK)
1927 return retval;
1928
1929 /* send number of requested data words */
1930 if ((retval=xscale_send_u32(target, count))!=ERROR_OK)
1931 return retval;
1932
1933 /* receive data from target (count times 32-bit words in host endianness) */
1934 buf32 = malloc(4 * count);
1935 if ((retval=xscale_receive(target, buf32, count))!=ERROR_OK)
1936 return retval;
1937
1938 /* extract data from host-endian buffer into byte stream */
1939 for (i = 0; i < count; i++)
1940 {
1941 switch (size)
1942 {
1943 case 4:
1944 target_buffer_set_u32(target, buffer, buf32[i]);
1945 buffer += 4;
1946 break;
1947 case 2:
1948 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1949 buffer += 2;
1950 break;
1951 case 1:
1952 *buffer++ = buf32[i] & 0xff;
1953 break;
1954 default:
1955 LOG_ERROR("should never get here");
1956 exit(-1);
1957 }
1958 }
1959
1960 free(buf32);
1961
1962 /* examine DCSR, to see if Sticky Abort (SA) got set */
1963 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
1964 return retval;
1965 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1966 {
1967 /* clear SA bit */
1968 if ((retval=xscale_send_u32(target, 0x60))!=ERROR_OK)
1969 return retval;
1970
1971 return ERROR_TARGET_DATA_ABORT;
1972 }
1973
1974 return ERROR_OK;
1975 }
1976
1977 int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
1978 {
1979 armv4_5_common_t *armv4_5 = target->arch_info;
1980 xscale_common_t *xscale = armv4_5->arch_info;
1981 int retval;
1982
1983 LOG_DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
1984
1985 if (target->state != TARGET_HALTED)
1986 {
1987 LOG_WARNING("target not halted");
1988 return ERROR_TARGET_NOT_HALTED;
1989 }
1990
1991 /* sanitize arguments */
1992 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1993 return ERROR_INVALID_ARGUMENTS;
1994
1995 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1996 return ERROR_TARGET_UNALIGNED_ACCESS;
1997
1998 /* send memory write request (command 0x2n, n: access size) */
1999 if ((retval=xscale_send_u32(target, 0x20 | size))!=ERROR_OK)
2000 return retval;
2001
2002 /* send base address for read request */
2003 if ((retval=xscale_send_u32(target, address))!=ERROR_OK)
2004 return retval;
2005
2006 /* send number of requested data words to be written*/
2007 if ((retval=xscale_send_u32(target, count))!=ERROR_OK)
2008 return retval;
2009
2010 /* extract data from host-endian buffer into byte stream */
2011 #if 0
2012 for (i = 0; i < count; i++)
2013 {
2014 switch (size)
2015 {
2016 case 4:
2017 value = target_buffer_get_u32(target, buffer);
2018 xscale_send_u32(target, value);
2019 buffer += 4;
2020 break;
2021 case 2:
2022 value = target_buffer_get_u16(target, buffer);
2023 xscale_send_u32(target, value);
2024 buffer += 2;
2025 break;
2026 case 1:
2027 value = *buffer;
2028 xscale_send_u32(target, value);
2029 buffer += 1;
2030 break;
2031 default:
2032 LOG_ERROR("should never get here");
2033 exit(-1);
2034 }
2035 }
2036 #endif
2037 if ((retval=xscale_send(target, buffer, count, size))!=ERROR_OK)
2038 return retval;
2039
2040 /* examine DCSR, to see if Sticky Abort (SA) got set */
2041 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
2042 return retval;
2043 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
2044 {
2045 /* clear SA bit */
2046 if ((retval=xscale_send_u32(target, 0x60))!=ERROR_OK)
2047 return retval;
2048
2049 return ERROR_TARGET_DATA_ABORT;
2050 }
2051
2052 return ERROR_OK;
2053 }
2054
2055 int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer)
2056 {
2057 return xscale_write_memory(target, address, 4, count, buffer);
2058 }
2059
2060 u32 xscale_get_ttb(target_t *target)
2061 {
2062 armv4_5_common_t *armv4_5 = target->arch_info;
2063 xscale_common_t *xscale = armv4_5->arch_info;
2064 u32 ttb;
2065
2066 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2067 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2068
2069 return ttb;
2070 }
2071
2072 void xscale_disable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
2073 {
2074 armv4_5_common_t *armv4_5 = target->arch_info;
2075 xscale_common_t *xscale = armv4_5->arch_info;
2076 u32 cp15_control;
2077
2078 /* read cp15 control register */
2079 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2080 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2081
2082 if (mmu)
2083 cp15_control &= ~0x1U;
2084
2085 if (d_u_cache)
2086 {
2087 /* clean DCache */
2088 xscale_send_u32(target, 0x50);
2089 xscale_send_u32(target, xscale->cache_clean_address);
2090
2091 /* invalidate DCache */
2092 xscale_send_u32(target, 0x51);
2093
2094 cp15_control &= ~0x4U;
2095 }
2096
2097 if (i_cache)
2098 {
2099 /* invalidate ICache */
2100 xscale_send_u32(target, 0x52);
2101 cp15_control &= ~0x1000U;
2102 }
2103
2104 /* write new cp15 control register */
2105 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2106
2107 /* execute cpwait to ensure outstanding operations complete */
2108 xscale_send_u32(target, 0x53);
2109 }
2110
2111 void xscale_enable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
2112 {
2113 armv4_5_common_t *armv4_5 = target->arch_info;
2114 xscale_common_t *xscale = armv4_5->arch_info;
2115 u32 cp15_control;
2116
2117 /* read cp15 control register */
2118 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2119 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2120
2121 if (mmu)
2122 cp15_control |= 0x1U;
2123
2124 if (d_u_cache)
2125 cp15_control |= 0x4U;
2126
2127 if (i_cache)
2128 cp15_control |= 0x1000U;
2129
2130 /* write new cp15 control register */
2131 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2132
2133 /* execute cpwait to ensure outstanding operations complete */
2134 xscale_send_u32(target, 0x53);
2135 }
2136
2137 int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2138 {
2139 armv4_5_common_t *armv4_5 = target->arch_info;
2140 xscale_common_t *xscale = armv4_5->arch_info;
2141
2142 if (target->state != TARGET_HALTED)
2143 {
2144 LOG_WARNING("target not halted");
2145 return ERROR_TARGET_NOT_HALTED;
2146 }
2147
2148 if (xscale->force_hw_bkpts)
2149 breakpoint->type = BKPT_HARD;
2150
2151 if (breakpoint->set)
2152 {
2153 LOG_WARNING("breakpoint already set");
2154 return ERROR_OK;
2155 }
2156
2157 if (breakpoint->type == BKPT_HARD)
2158 {
2159 u32 value = breakpoint->address | 1;
2160 if (!xscale->ibcr0_used)
2161 {
2162 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2163 xscale->ibcr0_used = 1;
2164 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2165 }
2166 else if (!xscale->ibcr1_used)
2167 {
2168 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2169 xscale->ibcr1_used = 1;
2170 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2171 }
2172 else
2173 {
2174 LOG_ERROR("BUG: no hardware comparator available");
2175 return ERROR_OK;
2176 }
2177 }
2178 else if (breakpoint->type == BKPT_SOFT)
2179 {
2180 if (breakpoint->length == 4)
2181 {
2182 /* keep the original instruction in target endianness */
2183 target->type->read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr);
2184 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2185 target_write_u32(target, breakpoint->address, xscale->arm_bkpt);
2186 }
2187 else
2188 {
2189 /* keep the original instruction in target endianness */
2190 target->type->read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr);
2191 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2192 target_write_u32(target, breakpoint->address, xscale->thumb_bkpt);
2193 }
2194 breakpoint->set = 1;
2195 }
2196
2197 return ERROR_OK;
2198
2199 }
2200
2201 int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2202 {
2203 armv4_5_common_t *armv4_5 = target->arch_info;
2204 xscale_common_t *xscale = armv4_5->arch_info;
2205
2206 if (target->state != TARGET_HALTED)
2207 {
2208 LOG_WARNING("target not halted");
2209 return ERROR_TARGET_NOT_HALTED;
2210 }
2211
2212 if (xscale->force_hw_bkpts)
2213 {
2214 LOG_DEBUG("forcing use of hardware breakpoint at address 0x%8.8x", breakpoint->address);
2215 breakpoint->type = BKPT_HARD;
2216 }
2217
2218 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2219 {
2220 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2221 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2222 }
2223 else
2224 {
2225 xscale->ibcr_available--;
2226 }
2227
2228 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2229 {
2230 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2231 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2232 }
2233
2234 return ERROR_OK;
2235 }
2236
2237 int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2238 {
2239 armv4_5_common_t *armv4_5 = target->arch_info;
2240 xscale_common_t *xscale = armv4_5->arch_info;
2241
2242 if (target->state != TARGET_HALTED)
2243 {
2244 LOG_WARNING("target not halted");
2245 return ERROR_TARGET_NOT_HALTED;
2246 }
2247
2248 if (!breakpoint->set)
2249 {
2250 LOG_WARNING("breakpoint not set");
2251 return ERROR_OK;
2252 }
2253
2254 if (breakpoint->type == BKPT_HARD)
2255 {
2256 if (breakpoint->set == 1)
2257 {
2258 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2259 xscale->ibcr0_used = 0;
2260 }
2261 else if (breakpoint->set == 2)
2262 {
2263 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2264 xscale->ibcr1_used = 0;
2265 }
2266 breakpoint->set = 0;
2267 }
2268 else
2269 {
2270 /* restore original instruction (kept in target endianness) */
2271 if (breakpoint->length == 4)
2272 {
2273 target->type->write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr);
2274 }
2275 else
2276 {
2277 target->type->write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr);
2278 }
2279 breakpoint->set = 0;
2280 }
2281
2282 return ERROR_OK;
2283 }
2284
2285 int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2286 {
2287 armv4_5_common_t *armv4_5 = target->arch_info;
2288 xscale_common_t *xscale = armv4_5->arch_info;
2289
2290 if (target->state != TARGET_HALTED)
2291 {
2292 LOG_WARNING("target not halted");
2293 return ERROR_TARGET_NOT_HALTED;
2294 }
2295
2296 if (breakpoint->set)
2297 {
2298 xscale_unset_breakpoint(target, breakpoint);
2299 }
2300
2301 if (breakpoint->type == BKPT_HARD)
2302 xscale->ibcr_available++;
2303
2304 return ERROR_OK;
2305 }
2306
2307 int xscale_set_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2308 {
2309 armv4_5_common_t *armv4_5 = target->arch_info;
2310 xscale_common_t *xscale = armv4_5->arch_info;
2311 u8 enable=0;
2312 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2313 u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2314
2315 if (target->state != TARGET_HALTED)
2316 {
2317 LOG_WARNING("target not halted");
2318 return ERROR_TARGET_NOT_HALTED;
2319 }
2320
2321 xscale_get_reg(dbcon);
2322
2323 switch (watchpoint->rw)
2324 {
2325 case WPT_READ:
2326 enable = 0x3;
2327 break;
2328 case WPT_ACCESS:
2329 enable = 0x2;
2330 break;
2331 case WPT_WRITE:
2332 enable = 0x1;
2333 break;
2334 default:
2335 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2336 }
2337
2338 if (!xscale->dbr0_used)
2339 {
2340 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2341 dbcon_value |= enable;
2342 xscale_set_reg_u32(dbcon, dbcon_value);
2343 watchpoint->set = 1;
2344 xscale->dbr0_used = 1;
2345 }
2346 else if (!xscale->dbr1_used)
2347 {
2348 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2349 dbcon_value |= enable << 2;
2350 xscale_set_reg_u32(dbcon, dbcon_value);
2351 watchpoint->set = 2;
2352 xscale->dbr1_used = 1;
2353 }
2354 else
2355 {
2356 LOG_ERROR("BUG: no hardware comparator available");
2357 return ERROR_OK;
2358 }
2359
2360 return ERROR_OK;
2361 }
2362
2363 int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2364 {
2365 armv4_5_common_t *armv4_5 = target->arch_info;
2366 xscale_common_t *xscale = armv4_5->arch_info;
2367
2368 if (target->state != TARGET_HALTED)
2369 {
2370 LOG_WARNING("target not halted");
2371 return ERROR_TARGET_NOT_HALTED;
2372 }
2373
2374 if (xscale->dbr_available < 1)
2375 {
2376 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2377 }
2378
2379 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2380 {
2381 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2382 }
2383
2384 xscale->dbr_available--;
2385
2386 return ERROR_OK;
2387 }
2388
2389 int xscale_unset_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2390 {
2391 armv4_5_common_t *armv4_5 = target->arch_info;
2392 xscale_common_t *xscale = armv4_5->arch_info;
2393 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2394 u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2395
2396 if (target->state != TARGET_HALTED)
2397 {
2398 LOG_WARNING("target not halted");
2399 return ERROR_TARGET_NOT_HALTED;
2400 }
2401
2402 if (!watchpoint->set)
2403 {
2404 LOG_WARNING("breakpoint not set");
2405 return ERROR_OK;
2406 }
2407
2408 if (watchpoint->set == 1)
2409 {
2410 dbcon_value &= ~0x3;
2411 xscale_set_reg_u32(dbcon, dbcon_value);
2412 xscale->dbr0_used = 0;
2413 }
2414 else if (watchpoint->set == 2)
2415 {
2416 dbcon_value &= ~0xc;
2417 xscale_set_reg_u32(dbcon, dbcon_value);
2418 xscale->dbr1_used = 0;
2419 }
2420 watchpoint->set = 0;
2421
2422 return ERROR_OK;
2423 }
2424
2425 int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2426 {
2427 armv4_5_common_t *armv4_5 = target->arch_info;
2428 xscale_common_t *xscale = armv4_5->arch_info;
2429
2430 if (target->state != TARGET_HALTED)
2431 {
2432 LOG_WARNING("target not halted");
2433 return ERROR_TARGET_NOT_HALTED;
2434 }
2435
2436 if (watchpoint->set)
2437 {
2438 xscale_unset_watchpoint(target, watchpoint);
2439 }
2440
2441 xscale->dbr_available++;
2442
2443 return ERROR_OK;
2444 }
2445
2446 void xscale_enable_watchpoints(struct target_s *target)
2447 {
2448 watchpoint_t *watchpoint = target->watchpoints;
2449
2450 while (watchpoint)
2451 {
2452 if (watchpoint->set == 0)
2453 xscale_set_watchpoint(target, watchpoint);
2454 watchpoint = watchpoint->next;
2455 }
2456 }
2457
2458 void xscale_enable_breakpoints(struct target_s *target)
2459 {
2460 breakpoint_t *breakpoint = target->breakpoints;
2461
2462 /* set any pending breakpoints */
2463 while (breakpoint)
2464 {
2465 if (breakpoint->set == 0)
2466 xscale_set_breakpoint(target, breakpoint);
2467 breakpoint = breakpoint->next;
2468 }
2469 }
2470
2471 int xscale_get_reg(reg_t *reg)
2472 {
2473 xscale_reg_t *arch_info = reg->arch_info;
2474 target_t *target = arch_info->target;
2475 armv4_5_common_t *armv4_5 = target->arch_info;
2476 xscale_common_t *xscale = armv4_5->arch_info;
2477
2478 /* DCSR, TX and RX are accessible via JTAG */
2479 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2480 {
2481 return xscale_read_dcsr(arch_info->target);
2482 }
2483 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2484 {
2485 /* 1 = consume register content */
2486 return xscale_read_tx(arch_info->target, 1);
2487 }
2488 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2489 {
2490 /* can't read from RX register (host -> debug handler) */
2491 return ERROR_OK;
2492 }
2493 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2494 {
2495 /* can't (explicitly) read from TXRXCTRL register */
2496 return ERROR_OK;
2497 }
2498 else /* Other DBG registers have to be transfered by the debug handler */
2499 {
2500 /* send CP read request (command 0x40) */
2501 xscale_send_u32(target, 0x40);
2502
2503 /* send CP register number */
2504 xscale_send_u32(target, arch_info->dbg_handler_number);
2505
2506 /* read register value */
2507 xscale_read_tx(target, 1);
2508 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2509
2510 reg->dirty = 0;
2511 reg->valid = 1;
2512 }
2513
2514 return ERROR_OK;
2515 }
2516
2517 int xscale_set_reg(reg_t *reg, u8* buf)
2518 {
2519 xscale_reg_t *arch_info = reg->arch_info;
2520 target_t *target = arch_info->target;
2521 armv4_5_common_t *armv4_5 = target->arch_info;
2522 xscale_common_t *xscale = armv4_5->arch_info;
2523 u32 value = buf_get_u32(buf, 0, 32);
2524
2525 /* DCSR, TX and RX are accessible via JTAG */
2526 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2527 {
2528 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2529 return xscale_write_dcsr(arch_info->target, -1, -1);
2530 }
2531 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2532 {
2533 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2534 return xscale_write_rx(arch_info->target);
2535 }
2536 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2537 {
2538 /* can't write to TX register (debug-handler -> host) */
2539 return ERROR_OK;
2540 }
2541 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2542 {
2543 /* can't (explicitly) write to TXRXCTRL register */
2544 return ERROR_OK;
2545 }
2546 else /* Other DBG registers have to be transfered by the debug handler */
2547 {
2548 /* send CP write request (command 0x41) */
2549 xscale_send_u32(target, 0x41);
2550
2551 /* send CP register number */
2552 xscale_send_u32(target, arch_info->dbg_handler_number);
2553
2554 /* send CP register value */
2555 xscale_send_u32(target, value);
2556 buf_set_u32(reg->value, 0, 32, value);
2557 }
2558
2559 return ERROR_OK;
2560 }
2561
2562 /* convenience wrapper to access XScale specific registers */
2563 int xscale_set_reg_u32(reg_t *reg, u32 value)
2564 {
2565 u8 buf[4];
2566
2567 buf_set_u32(buf, 0, 32, value);
2568
2569 return xscale_set_reg(reg, buf);
2570 }
2571
2572 int xscale_write_dcsr_sw(target_t *target, u32 value)
2573 {
2574 /* get pointers to arch-specific information */
2575 armv4_5_common_t *armv4_5 = target->arch_info;
2576 xscale_common_t *xscale = armv4_5->arch_info;
2577 reg_t *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2578 xscale_reg_t *dcsr_arch_info = dcsr->arch_info;
2579
2580 /* send CP write request (command 0x41) */
2581 xscale_send_u32(target, 0x41);
2582
2583 /* send CP register number */
2584 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2585
2586 /* send CP register value */
2587 xscale_send_u32(target, value);
2588 buf_set_u32(dcsr->value, 0, 32, value);
2589
2590 return ERROR_OK;
2591 }
2592
2593 int xscale_read_trace(target_t *target)
2594 {
2595 /* get pointers to arch-specific information */
2596 armv4_5_common_t *armv4_5 = target->arch_info;
2597 xscale_common_t *xscale = armv4_5->arch_info;
2598 xscale_trace_data_t **trace_data_p;
2599
2600 /* 258 words from debug handler
2601 * 256 trace buffer entries
2602 * 2 checkpoint addresses
2603 */
2604 u32 trace_buffer[258];
2605 int is_address[256];
2606 int i, j;
2607
2608 if (target->state != TARGET_HALTED)
2609 {
2610 LOG_WARNING("target must be stopped to read trace data");
2611 return ERROR_TARGET_NOT_HALTED;
2612 }
2613
2614 /* send read trace buffer command (command 0x61) */
2615 xscale_send_u32(target, 0x61);
2616
2617 /* receive trace buffer content */
2618 xscale_receive(target, trace_buffer, 258);
2619
2620 /* parse buffer backwards to identify address entries */
2621 for (i = 255; i >= 0; i--)
2622 {
2623 is_address[i] = 0;
2624 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2625 ((trace_buffer[i] & 0xf0) == 0xd0))
2626 {
2627 if (i >= 3)
2628 is_address[--i] = 1;
2629 if (i >= 2)
2630 is_address[--i] = 1;
2631 if (i >= 1)
2632 is_address[--i] = 1;
2633 if (i >= 0)
2634 is_address[--i] = 1;
2635 }
2636 }
2637
2638
2639 /* search first non-zero entry */
2640 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2641 ;
2642
2643 if (j == 256)
2644 {
2645 LOG_DEBUG("no trace data collected");
2646 return ERROR_XSCALE_NO_TRACE_DATA;
2647 }
2648
2649 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2650 ;
2651
2652 *trace_data_p = malloc(sizeof(xscale_trace_data_t));
2653 (*trace_data_p)->next = NULL;
2654 (*trace_data_p)->chkpt0 = trace_buffer[256];
2655 (*trace_data_p)->chkpt1 = trace_buffer[257];
2656 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2657 (*trace_data_p)->entries = malloc(sizeof(xscale_trace_entry_t) * (256 - j));
2658 (*trace_data_p)->depth = 256 - j;
2659
2660 for (i = j; i < 256; i++)
2661 {
2662 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2663 if (is_address[i])
2664 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2665 else
2666 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2667 }
2668
2669 return ERROR_OK;
2670 }
2671
2672 int xscale_read_instruction(target_t *target, arm_instruction_t *instruction)
2673 {
2674 /* get pointers to arch-specific information */
2675 armv4_5_common_t *armv4_5 = target->arch_info;
2676 xscale_common_t *xscale = armv4_5->arch_info;
2677 int i;
2678 int section = -1;
2679 u32 size_read;
2680 u32 opcode;
2681 int retval;
2682
2683 if (!xscale->trace.image)
2684 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2685
2686 /* search for the section the current instruction belongs to */
2687 for (i = 0; i < xscale->trace.image->num_sections; i++)
2688 {
2689 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2690 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2691 {
2692 section = i;
2693 break;
2694 }
2695 }
2696
2697 if (section == -1)
2698 {
2699 /* current instruction couldn't be found in the image */
2700 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2701 }
2702
2703 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2704 {
2705 u8 buf[4];
2706 if ((retval = image_read_section(xscale->trace.image, section,
2707 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2708 4, buf, &size_read)) != ERROR_OK)
2709 {
2710 LOG_ERROR("error while reading instruction: %i", retval);
2711 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2712 }
2713 opcode = target_buffer_get_u32(target, buf);
2714 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2715 }
2716 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2717 {
2718 u8 buf[2];
2719 if ((retval = image_read_section(xscale->trace.image, section,
2720 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2721 2, buf, &size_read)) != ERROR_OK)
2722 {
2723 LOG_ERROR("error while reading instruction: %i", retval);
2724 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2725 }
2726 opcode = target_buffer_get_u16(target, buf);
2727 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2728 }
2729 else
2730 {
2731 LOG_ERROR("BUG: unknown core state encountered");
2732 exit(-1);
2733 }
2734
2735 return ERROR_OK;
2736 }
2737
2738 int xscale_branch_address(xscale_trace_data_t *trace_data, int i, u32 *target)
2739 {
2740 /* if there are less than four entries prior to the indirect branch message
2741 * we can't extract the address */
2742 if (i < 4)
2743 {
2744 return -1;
2745 }
2746
2747 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2748 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2749
2750 return 0;
2751 }
2752
2753 int xscale_analyze_trace(target_t *target, command_context_t *cmd_ctx)
2754 {
2755 /* get pointers to arch-specific information */
2756 armv4_5_common_t *armv4_5 = target->arch_info;
2757 xscale_common_t *xscale = armv4_5->arch_info;
2758 int next_pc_ok = 0;
2759 u32 next_pc = 0x0;
2760 xscale_trace_data_t *trace_data = xscale->trace.data;
2761 int retval;
2762
2763 while (trace_data)
2764 {
2765 int i, chkpt;
2766 int rollover;
2767 int branch;
2768 int exception;
2769 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2770
2771 chkpt = 0;
2772 rollover = 0;
2773
2774 for (i = 0; i < trace_data->depth; i++)
2775 {
2776 next_pc_ok = 0;
2777 branch = 0;
2778 exception = 0;
2779
2780 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2781 continue;
2782
2783 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2784 {
2785 case 0: /* Exceptions */
2786 case 1:
2787 case 2:
2788 case 3:
2789 case 4:
2790 case 5:
2791 case 6:
2792 case 7:
2793 exception = (trace_data->entries[i].data & 0x70) >> 4;
2794 next_pc_ok = 1;
2795 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2796 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2797 break;
2798 case 8: /* Direct Branch */
2799 branch = 1;
2800 break;
2801 case 9: /* Indirect Branch */
2802 branch = 1;
2803 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2804 {
2805 next_pc_ok = 1;
2806 }
2807 break;
2808 case 13: /* Checkpointed Indirect Branch */
2809 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2810 {
2811 next_pc_ok = 1;
2812 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2813 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2814 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2815 }
2816 /* explicit fall-through */
2817 case 12: /* Checkpointed Direct Branch */
2818 branch = 1;
2819 if (chkpt == 0)
2820 {
2821 next_pc_ok = 1;
2822 next_pc = trace_data->chkpt0;
2823 chkpt++;
2824 }
2825 else if (chkpt == 1)
2826 {
2827 next_pc_ok = 1;
2828 next_pc = trace_data->chkpt0;
2829 chkpt++;
2830 }
2831 else
2832 {
2833 LOG_WARNING("more than two checkpointed branches encountered");
2834 }
2835 break;
2836 case 15: /* Roll-over */
2837 rollover++;
2838 continue;
2839 default: /* Reserved */
2840 command_print(cmd_ctx, "--- reserved trace message ---");
2841 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2842 return ERROR_OK;
2843 }
2844
2845 if (xscale->trace.pc_ok)
2846 {
2847 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2848 arm_instruction_t instruction;
2849
2850 if ((exception == 6) || (exception == 7))
2851 {
2852 /* IRQ or FIQ exception, no instruction executed */
2853 executed -= 1;
2854 }
2855
2856 while (executed-- >= 0)
2857 {
2858 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2859 {
2860 /* can't continue tracing with no image available */
2861 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2862 {
2863 return retval;
2864 }
2865 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2866 {
2867 /* TODO: handle incomplete images */
2868 }
2869 }
2870
2871 /* a precise abort on a load to the PC is included in the incremental
2872 * word count, other instructions causing data aborts are not included
2873 */
2874 if ((executed == 0) && (exception == 4)
2875 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2876 {
2877 if ((instruction.type == ARM_LDM)
2878 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2879 {
2880 executed--;
2881 }
2882 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2883 && (instruction.info.load_store.Rd != 15))
2884 {
2885 executed--;
2886 }
2887 }
2888
2889 /* only the last instruction executed
2890 * (the one that caused the control flow change)
2891 * could be a taken branch
2892 */
2893 if (((executed == -1) && (branch == 1)) &&
2894 (((instruction.type == ARM_B) ||
2895 (instruction.type == ARM_BL) ||
2896 (instruction.type == ARM_BLX)) &&
2897 (instruction.info.b_bl_bx_blx.target_address != -1)))
2898 {
2899 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2900 }
2901 else
2902 {
2903 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2904 }
2905 command_print(cmd_ctx, "%s", instruction.text);
2906 }
2907
2908 rollover = 0;
2909 }
2910
2911 if (next_pc_ok)
2912 {
2913 xscale->trace.current_pc = next_pc;
2914 xscale->trace.pc_ok = 1;
2915 }
2916 }
2917
2918 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2919 {
2920 arm_instruction_t instruction;
2921 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2922 {
2923 /* can't continue tracing with no image available */
2924 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2925 {
2926 return retval;
2927 }
2928 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2929 {
2930 /* TODO: handle incomplete images */
2931 }
2932 }
2933 command_print(cmd_ctx, "%s", instruction.text);
2934 }
2935
2936 trace_data = trace_data->next;
2937 }
2938
2939 return ERROR_OK;
2940 }
2941
2942 void xscale_build_reg_cache(target_t *target)
2943 {
2944 /* get pointers to arch-specific information */
2945 armv4_5_common_t *armv4_5 = target->arch_info;
2946 xscale_common_t *xscale = armv4_5->arch_info;
2947
2948 reg_cache_t **cache_p = register_get_last_cache_p(&target->reg_cache);
2949 xscale_reg_t *arch_info = malloc(sizeof(xscale_reg_arch_info));
2950 int i;
2951 int num_regs = sizeof(xscale_reg_arch_info) / sizeof(xscale_reg_t);
2952
2953 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
2954 armv4_5->core_cache = (*cache_p);
2955
2956 /* register a register arch-type for XScale dbg registers only once */
2957 if (xscale_reg_arch_type == -1)
2958 xscale_reg_arch_type = register_reg_arch_type(xscale_get_reg, xscale_set_reg);
2959
2960 (*cache_p)->next = malloc(sizeof(reg_cache_t));
2961 cache_p = &(*cache_p)->next;
2962
2963 /* fill in values for the xscale reg cache */
2964 (*cache_p)->name = "XScale registers";
2965 (*cache_p)->next = NULL;
2966 (*cache_p)->reg_list = malloc(num_regs * sizeof(reg_t));
2967 (*cache_p)->num_regs = num_regs;
2968
2969 for (i = 0; i < num_regs; i++)
2970 {
2971 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2972 (*cache_p)->reg_list[i].value = calloc(4, 1);
2973 (*cache_p)->reg_list[i].dirty = 0;
2974 (*cache_p)->reg_list[i].valid = 0;
2975 (*cache_p)->reg_list[i].size = 32;
2976 (*cache_p)->reg_list[i].bitfield_desc = NULL;
2977 (*cache_p)->reg_list[i].num_bitfields = 0;
2978 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2979 (*cache_p)->reg_list[i].arch_type = xscale_reg_arch_type;
2980 arch_info[i] = xscale_reg_arch_info[i];
2981 arch_info[i].target = target;
2982 }
2983
2984 xscale->reg_cache = (*cache_p);
2985 }
2986
2987 int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target)
2988 {
2989 return ERROR_OK;
2990 }
2991
2992 int xscale_quit(void)
2993 {
2994
2995 return ERROR_OK;
2996 }
2997
2998 int xscale_init_arch_info(target_t *target, xscale_common_t *xscale, int chain_pos, char *variant)
2999 {
3000 armv4_5_common_t *armv4_5;
3001 u32 high_reset_branch, low_reset_branch;
3002 int i;
3003
3004 armv4_5 = &xscale->armv4_5_common;
3005
3006 /* store architecture specfic data (none so far) */
3007 xscale->arch_info = NULL;
3008 xscale->common_magic = XSCALE_COMMON_MAGIC;
3009
3010 /* remember the variant (PXA25x, PXA27x, IXP42x, ...) */
3011 xscale->variant = strdup(variant);
3012
3013 /* prepare JTAG information for the new target */
3014 xscale->jtag_info.chain_pos = chain_pos;
3015
3016 xscale->jtag_info.dbgrx = 0x02;
3017 xscale->jtag_info.dbgtx = 0x10;
3018 xscale->jtag_info.dcsr = 0x09;
3019 xscale->jtag_info.ldic = 0x07;
3020
3021 if ((strcmp(xscale->variant, "pxa250") == 0) ||
3022 (strcmp(xscale->variant, "pxa255") == 0) ||
3023 (strcmp(xscale->variant, "pxa26x") == 0))
3024 {
3025 xscale->jtag_info.ir_length = 5;
3026 }
3027 else if ((strcmp(xscale->variant, "pxa27x") == 0) ||
3028 (strcmp(xscale->variant, "ixp42x") == 0) ||
3029 (strcmp(xscale->variant, "ixp45x") == 0) ||
3030 (strcmp(xscale->variant, "ixp46x") == 0))
3031 {
3032 xscale->jtag_info.ir_length = 7;
3033 }
3034
3035 /* the debug handler isn't installed (and thus not running) at this time */
3036 xscale->handler_installed = 0;
3037 xscale->handler_running = 0;
3038 xscale->handler_address = 0xfe000800;
3039
3040 /* clear the vectors we keep locally for reference */
3041 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
3042 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
3043
3044 /* no user-specified vectors have been configured yet */
3045 xscale->static_low_vectors_set = 0x0;
3046 xscale->static_high_vectors_set = 0x0;
3047
3048 /* calculate branches to debug handler */
3049 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
3050 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
3051
3052 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
3053 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
3054
3055 for (i = 1; i <= 7; i++)
3056 {
3057 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3058 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3059 }
3060
3061 /* 64kB aligned region used for DCache cleaning */
3062 xscale->cache_clean_address = 0xfffe0000;
3063
3064 xscale->hold_rst = 0;
3065 xscale->external_debug_break = 0;
3066
3067 xscale->force_hw_bkpts = 1;
3068
3069 xscale->ibcr_available = 2;
3070 xscale->ibcr0_used = 0;
3071 xscale->ibcr1_used = 0;
3072
3073 xscale->dbr_available = 2;
3074 xscale->dbr0_used = 0;
3075 xscale->dbr1_used = 0;
3076
3077 xscale->arm_bkpt = ARMV5_BKPT(0x0);
3078 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
3079
3080 xscale->vector_catch = 0x1;
3081
3082 xscale->trace.capture_status = TRACE_IDLE;
3083 xscale->trace.data = NULL;
3084 xscale->trace.image = NULL;
3085 xscale->trace.buffer_enabled = 0;
3086 xscale->trace.buffer_fill = 0;
3087
3088 /* prepare ARMv4/5 specific information */
3089 armv4_5->arch_info = xscale;
3090 armv4_5->read_core_reg = xscale_read_core_reg;
3091 armv4_5->write_core_reg = xscale_write_core_reg;
3092 armv4_5->full_context = xscale_full_context;
3093
3094 armv4_5_init_arch_info(target, armv4_5);
3095
3096 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3097 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3098 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3099 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3100 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3101 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3102 xscale->armv4_5_mmu.has_tiny_pages = 1;
3103 xscale->armv4_5_mmu.mmu_enabled = 0;
3104
3105 return ERROR_OK;
3106 }
3107
3108 /* target xscale <endianess> <startup_mode> <chain_pos> <variant> */
3109 int xscale_target_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc, struct target_s *target)
3110 {
3111 int chain_pos;
3112 char *variant = NULL;
3113 xscale_common_t *xscale = malloc(sizeof(xscale_common_t));
3114 memset(xscale, 0, sizeof(*xscale));
3115
3116 if (argc < 5)
3117 {
3118 LOG_ERROR("'target xscale' requires four arguments: <endianess> <startup_mode> <chain_pos> <variant>");
3119 return ERROR_OK;
3120 }
3121
3122 chain_pos = strtoul(args[3], NULL, 0);
3123
3124 variant = args[4];
3125
3126 xscale_init_arch_info(target, xscale, chain_pos, variant);
3127 xscale_build_reg_cache(target);
3128
3129 return ERROR_OK;
3130 }
3131
3132 int xscale_handle_debug_handler_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3133 {
3134 target_t *target = NULL;
3135 armv4_5_common_t *armv4_5;
3136 xscale_common_t *xscale;
3137
3138 u32 handler_address;
3139
3140 if (argc < 2)
3141 {
3142 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3143 return ERROR_OK;
3144 }
3145
3146 if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
3147 {
3148 LOG_ERROR("no target '%s' configured", args[0]);
3149 return ERROR_OK;
3150 }
3151
3152 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3153 {
3154 return ERROR_OK;
3155 }
3156
3157 handler_address = strtoul(args[1], NULL, 0);
3158
3159 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3160 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3161 {
3162 xscale->handler_address = handler_address;
3163 }
3164 else
3165 {
3166 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3167 }
3168
3169 return ERROR_OK;
3170 }
3171
3172 int xscale_handle_cache_clean_address_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3173 {
3174 target_t *target = NULL;
3175 armv4_5_common_t *armv4_5;
3176 xscale_common_t *xscale;
3177
3178 u32 cache_clean_address;
3179
3180 if (argc < 2)
3181 {
3182 LOG_ERROR("'xscale cache_clean_address <target#> <address>' command takes two required operands");
3183 return ERROR_OK;
3184 }
3185
3186 if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
3187 {
3188 LOG_ERROR("no target '%s' configured", args[0]);
3189 return ERROR_OK;
3190 }
3191
3192 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3193 {
3194 return ERROR_OK;
3195 }
3196
3197 cache_clean_address = strtoul(args[1], NULL, 0);
3198
3199 if (cache_clean_address & 0xffff)
3200 {
3201 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3202 }
3203 else
3204 {
3205 xscale->cache_clean_address = cache_clean_address;
3206 }
3207
3208 return ERROR_OK;
3209 }
3210
3211 int xscale_handle_cache_info_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3212 {
3213 target_t *target = get_current_target(cmd_ctx);
3214 armv4_5_common_t *armv4_5;
3215 xscale_common_t *xscale;
3216
3217 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3218 {
3219 return ERROR_OK;
3220 }
3221
3222 return armv4_5_handle_cache_info_command(cmd_ctx, &xscale->armv4_5_mmu.armv4_5_cache);
3223 }
3224
3225 static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical)
3226 {
3227 armv4_5_common_t *armv4_5;
3228 xscale_common_t *xscale;
3229 int retval;
3230 int type;
3231 u32 cb;
3232 int domain;
3233 u32 ap;
3234
3235
3236 if ((retval = xscale_get_arch_pointers(target, &armv4_5, &xscale)) != ERROR_OK)
3237 {
3238 return retval;
3239 }
3240 u32 ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3241 if (type == -1)
3242 {
3243 return ret;
3244 }
3245 *physical = ret;
3246 return ERROR_OK;
3247 }
3248
3249 static int xscale_mmu(struct target_s *target, int *enabled)
3250 {
3251 armv4_5_common_t *armv4_5 = target->arch_info;
3252 xscale_common_t *xscale = armv4_5->arch_info;
3253
3254 if (target->state != TARGET_HALTED)
3255 {
3256 LOG_ERROR("Target not halted");
3257 return ERROR_TARGET_INVALID;
3258 }
3259 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3260 return ERROR_OK;
3261 }
3262
3263
3264 int xscale_handle_mmu_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3265 {
3266 target_t *target = get_current_target(cmd_ctx);
3267 armv4_5_common_t *armv4_5;
3268 xscale_common_t *xscale;
3269
3270 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3271 {
3272 return ERROR_OK;
3273 }
3274
3275 if (target->state != TARGET_HALTED)
3276 {
3277 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3278 return ERROR_OK;
3279 }
3280
3281 if (argc >= 1)
3282 {
3283 if (strcmp("enable", args[0]) == 0)
3284 {
3285 xscale_enable_mmu_caches(target, 1, 0, 0);
3286 xscale->armv4_5_mmu.mmu_enabled = 1;
3287 }
3288 else if (strcmp("disable", args[0]) == 0)
3289 {
3290 xscale_disable_mmu_caches(target, 1, 0, 0);
3291 xscale->armv4_5_mmu.mmu_enabled = 0;
3292 }
3293 }
3294
3295 command_print(cmd_ctx, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3296
3297 return ERROR_OK;
3298 }
3299
3300 int xscale_handle_idcache_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3301 {
3302 target_t *target = get_current_target(cmd_ctx);
3303 armv4_5_common_t *armv4_5;
3304 xscale_common_t *xscale;
3305 int icache = 0, dcache = 0;
3306
3307 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3308 {
3309 return ERROR_OK;
3310 }
3311
3312 if (target->state != TARGET_HALTED)
3313 {
3314 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3315 return ERROR_OK;
3316 }
3317
3318 if (strcmp(cmd, "icache") == 0)
3319 icache = 1;
3320 else if (strcmp(cmd, "dcache") == 0)
3321 dcache = 1;
3322
3323 if (argc >= 1)
3324 {
3325 if (strcmp("enable", args[0]) == 0)
3326 {
3327 xscale_enable_mmu_caches(target, 0, dcache, icache);
3328
3329 if (icache)
3330 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 1;
3331 else if (dcache)
3332 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 1;
3333 }
3334 else if (strcmp("disable", args[0]) == 0)
3335 {
3336 xscale_disable_mmu_caches(target, 0, dcache, icache);
3337
3338 if (icache)
3339 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 0;
3340 else if (dcache)
3341 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 0;
3342 }
3343 }
3344
3345 if (icache)
3346 command_print(cmd_ctx, "icache %s", (xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled) ? "enabled" : "disabled");
3347
3348 if (dcache)
3349 command_print(cmd_ctx, "dcache %s", (xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled) ? "enabled" : "disabled");
3350
3351 return ERROR_OK;
3352 }
3353
3354 int xscale_handle_vector_catch_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3355 {
3356 target_t *target = get_current_target(cmd_ctx);
3357 armv4_5_common_t *armv4_5;
3358 xscale_common_t *xscale;
3359
3360 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3361 {
3362 return ERROR_OK;
3363 }
3364
3365 if (argc < 1)
3366 {
3367 command_print(cmd_ctx, "usage: xscale vector_catch [mask]");
3368 }
3369 else
3370 {
3371 xscale->vector_catch = strtoul(args[0], NULL, 0);
3372 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3373 xscale_write_dcsr(target, -1, -1);
3374 }
3375
3376 command_print(cmd_ctx, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3377
3378 return ERROR_OK;
3379 }
3380
3381 int xscale_handle_force_hw_bkpts_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3382 {
3383 target_t *target = get_current_target(cmd_ctx);
3384 armv4_5_common_t *armv4_5;
3385 xscale_common_t *xscale;
3386
3387 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3388 {
3389 return ERROR_OK;
3390 }
3391
3392 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
3393 {
3394 xscale->force_hw_bkpts = 1;
3395 }
3396 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
3397 {
3398 xscale->force_hw_bkpts = 0;
3399 }
3400 else
3401 {
3402 command_print(cmd_ctx, "usage: xscale force_hw_bkpts <enable|disable>");
3403 }
3404
3405 command_print(cmd_ctx, "force hardware breakpoints %s", (xscale->force_hw_bkpts) ? "enabled" : "disabled");
3406
3407 return ERROR_OK;
3408 }
3409
3410 int xscale_handle_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3411 {
3412 target_t *target = get_current_target(cmd_ctx);
3413 armv4_5_common_t *armv4_5;
3414 xscale_common_t *xscale;
3415 u32 dcsr_value;
3416
3417 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3418 {
3419 return ERROR_OK;
3420 }
3421
3422 if (target->state != TARGET_HALTED)
3423 {
3424 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3425 return ERROR_OK;
3426 }
3427
3428 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
3429 {
3430 xscale_trace_data_t *td, *next_td;
3431 xscale->trace.buffer_enabled = 1;
3432
3433 /* free old trace data */
3434 td = xscale->trace.data;
3435 while (td)
3436 {
3437 next_td = td->next;
3438
3439 if (td->entries)
3440 free(td->entries);
3441 free(td);
3442 td = next_td;
3443 }
3444 xscale->trace.data = NULL;
3445 }
3446 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
3447 {
3448 xscale->trace.buffer_enabled = 0;
3449 }
3450
3451 if ((argc >= 2) && (strcmp("fill", args[1]) == 0))
3452 {
3453 if (argc >= 3)
3454 xscale->trace.buffer_fill = strtoul(args[2], NULL, 0);
3455 else
3456 xscale->trace.buffer_fill = 1;
3457 }
3458 else if ((argc >= 2) && (strcmp("wrap", args[1]) == 0))
3459 {
3460 xscale->trace.buffer_fill = -1;
3461 }
3462
3463 if (xscale->trace.buffer_enabled)
3464 {
3465 /* if we enable the trace buffer in fill-once
3466 * mode we know the address of the first instruction */
3467 xscale->trace.pc_ok = 1;
3468 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3469 }
3470 else
3471 {
3472 /* otherwise the address is unknown, and we have no known good PC */
3473 xscale->trace.pc_ok = 0;
3474 }
3475
3476 command_print(cmd_ctx, "trace buffer %s (%s)",
3477 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3478 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3479
3480 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3481 if (xscale->trace.buffer_fill >= 0)
3482 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3483 else
3484 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3485
3486 return ERROR_OK;
3487 }
3488
3489 int xscale_handle_trace_image_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3490 {
3491 target_t *target;
3492 armv4_5_common_t *armv4_5;
3493 xscale_common_t *xscale;
3494
3495 if (argc < 1)
3496 {
3497 command_print(cmd_ctx, "usage: xscale trace_image <file> [base address] [type]");
3498 return ERROR_OK;
3499 }
3500
3501 target = get_current_target(cmd_ctx);
3502
3503 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3504 {
3505 return ERROR_OK;
3506 }
3507
3508 if (xscale->trace.image)
3509 {
3510 image_close(xscale->trace.image);
3511 free(xscale->trace.image);
3512 command_print(cmd_ctx, "previously loaded image found and closed");
3513 }
3514
3515 xscale->trace.image = malloc(sizeof(image_t));
3516 xscale->trace.image->base_address_set = 0;
3517 xscale->trace.image->start_address_set = 0;
3518
3519 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3520 if (argc >= 2)
3521 {
3522 xscale->trace.image->base_address_set = 1;
3523 xscale->trace.image->base_address = strtoul(args[1], NULL, 0);
3524 }
3525 else
3526 {
3527 xscale->trace.image->base_address_set = 0;
3528 }
3529
3530 if (image_open(xscale->trace.image, args[0], (argc >= 3) ? args[2] : NULL) != ERROR_OK)
3531 {
3532 free(xscale->trace.image);
3533 xscale->trace.image = NULL;
3534 return ERROR_OK;
3535 }
3536
3537 return ERROR_OK;
3538 }
3539
3540 int xscale_handle_dump_trace_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3541 {
3542 target_t *target = get_current_target(cmd_ctx);
3543 armv4_5_common_t *armv4_5;
3544 xscale_common_t *xscale;
3545 xscale_trace_data_t *trace_data;
3546 fileio_t file;
3547
3548 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3549 {
3550 return ERROR_OK;
3551 }
3552
3553 if (target->state != TARGET_HALTED)
3554 {
3555 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3556 return ERROR_OK;
3557 }
3558
3559 if (argc < 1)
3560 {
3561 command_print(cmd_ctx, "usage: xscale dump_trace <file>");
3562 return ERROR_OK;
3563 }
3564
3565 trace_data = xscale->trace.data;
3566
3567 if (!trace_data)
3568 {
3569 command_print(cmd_ctx, "no trace data collected");
3570 return ERROR_OK;
3571 }
3572
3573 if (fileio_open(&file, args[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3574 {
3575 return ERROR_OK;
3576 }
3577
3578 while (trace_data)
3579 {
3580 int i;
3581
3582 fileio_write_u32(&file, trace_data->chkpt0);
3583 fileio_write_u32(&file, trace_data->chkpt1);
3584 fileio_write_u32(&file, trace_data->last_instruction);
3585 fileio_write_u32(&file, trace_data->depth);
3586
3587 for (i = 0; i < trace_data->depth; i++)
3588 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3589
3590 trace_data = trace_data->next;
3591 }
3592
3593 fileio_close(&file);
3594
3595 return ERROR_OK;
3596 }
3597
3598 int xscale_handle_analyze_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3599 {
3600 target_t *target = get_current_target(cmd_ctx);
3601 armv4_5_common_t *armv4_5;
3602 xscale_common_t *xscale;
3603
3604 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3605 {
3606 return ERROR_OK;
3607 }
3608
3609 xscale_analyze_trace(target, cmd_ctx);
3610
3611 return ERROR_OK;
3612 }
3613
3614 int xscale_handle_cp15(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3615 {
3616 target_t *target = get_current_target(cmd_ctx);
3617 armv4_5_common_t *armv4_5;
3618 xscale_common_t *xscale;
3619
3620 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3621 {
3622 return ERROR_OK;
3623 }
3624
3625 if (target->state != TARGET_HALTED)
3626 {
3627 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3628 return ERROR_OK;
3629 }
3630 u32 reg_no = 0;
3631 reg_t *reg = NULL;
3632 if(argc > 0)
3633 {
3634 reg_no = strtoul(args[0], NULL, 0);
3635 /*translate from xscale cp15 register no to openocd register*/
3636 switch(reg_no)
3637 {
3638 case 0:
3639 reg_no = XSCALE_MAINID;
3640 break;
3641 case 1:
3642 reg_no = XSCALE_CTRL;
3643 break;
3644 case 2:
3645 reg_no = XSCALE_TTB;
3646 break;
3647 case 3:
3648 reg_no = XSCALE_DAC;
3649 break;
3650 case 5:
3651 reg_no = XSCALE_FSR;
3652 break;
3653 case 6:
3654 reg_no = XSCALE_FAR;
3655 break;
3656 case 13:
3657 reg_no = XSCALE_PID;
3658 break;
3659 case 15:
3660 reg_no = XSCALE_CPACCESS;
3661 break;
3662 default:
3663 command_print(cmd_ctx, "invalid register number");
3664 return ERROR_INVALID_ARGUMENTS;
3665 }
3666 reg = &xscale->reg_cache->reg_list[reg_no];
3667
3668 }
3669 if(argc == 1)
3670 {
3671 u32 value;
3672
3673 /* read cp15 control register */
3674 xscale_get_reg(reg);
3675 value = buf_get_u32(reg->value, 0, 32);
3676 command_print(cmd_ctx, "%s (/%i): 0x%x", reg->name, reg->size, value);
3677 }
3678 else if(argc == 2)
3679 {
3680
3681 u32 value = strtoul(args[1], NULL, 0);
3682
3683 /* send CP write request (command 0x41) */
3684 xscale_send_u32(target, 0x41);
3685
3686 /* send CP register number */
3687 xscale_send_u32(target, reg_no);
3688
3689 /* send CP register value */
3690 xscale_send_u32(target, value);
3691
3692 /* execute cpwait to ensure outstanding operations complete */
3693 xscale_send_u32(target, 0x53);
3694 }
3695 else
3696 {
3697 command_print(cmd_ctx, "usage: cp15 [register]<, [value]>");
3698 }
3699
3700 return ERROR_OK;
3701 }
3702
3703 int xscale_register_commands(struct command_context_s *cmd_ctx)
3704 {
3705 command_t *xscale_cmd;
3706
3707 xscale_cmd = register_command(cmd_ctx, NULL, "xscale", NULL, COMMAND_ANY, "xscale specific commands");
3708
3709 register_command(cmd_ctx, xscale_cmd, "debug_handler", xscale_handle_debug_handler_command, COMMAND_ANY, "'xscale debug_handler <target#> <address>' command takes two required operands");
3710 register_command(cmd_ctx, xscale_cmd, "cache_clean_address", xscale_handle_cache_clean_address_command, COMMAND_ANY, NULL);
3711
3712 register_command(cmd_ctx, xscale_cmd, "cache_info", xscale_handle_cache_info_command, COMMAND_EXEC, NULL);
3713 register_command(cmd_ctx, xscale_cmd, "mmu", xscale_handle_mmu_command, COMMAND_EXEC, "['enable'|'disable'] the MMU");
3714 register_command(cmd_ctx, xscale_cmd, "icache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the ICache");
3715 register_command(cmd_ctx, xscale_cmd, "dcache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the DCache");
3716
3717 register_command(cmd_ctx, xscale_cmd, "vector_catch", xscale_handle_idcache_command, COMMAND_EXEC, "<mask> of vectors that should be catched");
3718
3719 register_command(cmd_ctx, xscale_cmd, "trace_buffer", xscale_handle_trace_buffer_command, COMMAND_EXEC, "<enable|disable> ['fill' [n]|'wrap']");
3720
3721 register_command(cmd_ctx, xscale_cmd, "dump_trace", xscale_handle_dump_trace_command, COMMAND_EXEC, "dump content of trace buffer to <file>");
3722 register_command(cmd_ctx, xscale_cmd, "analyze_trace", xscale_handle_analyze_trace_buffer_command, COMMAND_EXEC, "analyze content of trace buffer");
3723 register_command(cmd_ctx, xscale_cmd, "trace_image", xscale_handle_trace_image_command,
3724 COMMAND_EXEC, "load image from <file> [base address]");
3725
3726 register_command(cmd_ctx, xscale_cmd, "cp15", xscale_handle_cp15, COMMAND_EXEC, "access coproc 15 <register> [value]");
3727
3728 armv4_5_register_commands(cmd_ctx);
3729
3730 return ERROR_OK;
3731 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)