added yours sincerely for files where I feel that I've made non-trivial contributions.
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * This program is free software; you can redistribute it and/or modify *
9 * it under the terms of the GNU General Public License as published by *
10 * the Free Software Foundation; either version 2 of the License, or *
11 * (at your option) any later version. *
12 * *
13 * This program is distributed in the hope that it will be useful, *
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
16 * GNU General Public License for more details. *
17 * *
18 * You should have received a copy of the GNU General Public License *
19 * along with this program; if not, write to the *
20 * Free Software Foundation, Inc., *
21 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
22 ***************************************************************************/
23 #ifdef HAVE_CONFIG_H
24 #include "config.h"
25 #endif
26
27 #include "replacements.h"
28
29 #include "xscale.h"
30
31 #include "arm7_9_common.h"
32 #include "register.h"
33 #include "target.h"
34 #include "armv4_5.h"
35 #include "arm_simulator.h"
36 #include "arm_disassembler.h"
37 #include "log.h"
38 #include "jtag.h"
39 #include "binarybuffer.h"
40 #include "time_support.h"
41 #include "breakpoints.h"
42 #include "fileio.h"
43
44 #include <stdlib.h>
45 #include <string.h>
46
47 #include <sys/types.h>
48 #include <unistd.h>
49 #include <errno.h>
50
51
52 /* cli handling */
53 int xscale_register_commands(struct command_context_s *cmd_ctx);
54
55 /* forward declarations */
56 int xscale_target_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc, struct target_s *target);
57 int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target);
58 int xscale_quit();
59
60 int xscale_arch_state(struct target_s *target);
61 int xscale_poll(target_t *target);
62 int xscale_halt(target_t *target);
63 int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution);
64 int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints);
65 int xscale_debug_entry(target_t *target);
66 int xscale_restore_context(target_t *target);
67
68 int xscale_assert_reset(target_t *target);
69 int xscale_deassert_reset(target_t *target);
70 int xscale_soft_reset_halt(struct target_s *target);
71
72 int xscale_set_reg_u32(reg_t *reg, u32 value);
73
74 int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode);
75 int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value);
76
77 int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
78 int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
79 int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer);
80
81 int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
82 int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
83 int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
84 int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
85 int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
86 int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
87 void xscale_enable_watchpoints(struct target_s *target);
88 void xscale_enable_breakpoints(struct target_s *target);
89 static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical);
90 static int xscale_mmu(struct target_s *target, int *enabled);
91
92 int xscale_read_trace(target_t *target);
93
94 target_type_t xscale_target =
95 {
96 .name = "xscale",
97
98 .poll = xscale_poll,
99 .arch_state = xscale_arch_state,
100
101 .target_request_data = NULL,
102
103 .halt = xscale_halt,
104 .resume = xscale_resume,
105 .step = xscale_step,
106
107 .assert_reset = xscale_assert_reset,
108 .deassert_reset = xscale_deassert_reset,
109 .soft_reset_halt = xscale_soft_reset_halt,
110
111 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
112
113 .read_memory = xscale_read_memory,
114 .write_memory = xscale_write_memory,
115 .bulk_write_memory = xscale_bulk_write_memory,
116 .checksum_memory = arm7_9_checksum_memory,
117 .blank_check_memory = arm7_9_blank_check_memory,
118
119 .run_algorithm = armv4_5_run_algorithm,
120
121 .add_breakpoint = xscale_add_breakpoint,
122 .remove_breakpoint = xscale_remove_breakpoint,
123 .add_watchpoint = xscale_add_watchpoint,
124 .remove_watchpoint = xscale_remove_watchpoint,
125
126 .register_commands = xscale_register_commands,
127 .target_command = xscale_target_command,
128 .init_target = xscale_init_target,
129 .quit = xscale_quit,
130
131 .virt2phys = xscale_virt2phys,
132 .mmu = xscale_mmu
133 };
134
135 char* xscale_reg_list[] =
136 {
137 "XSCALE_MAINID", /* 0 */
138 "XSCALE_CACHETYPE",
139 "XSCALE_CTRL",
140 "XSCALE_AUXCTRL",
141 "XSCALE_TTB",
142 "XSCALE_DAC",
143 "XSCALE_FSR",
144 "XSCALE_FAR",
145 "XSCALE_PID",
146 "XSCALE_CPACCESS",
147 "XSCALE_IBCR0", /* 10 */
148 "XSCALE_IBCR1",
149 "XSCALE_DBR0",
150 "XSCALE_DBR1",
151 "XSCALE_DBCON",
152 "XSCALE_TBREG",
153 "XSCALE_CHKPT0",
154 "XSCALE_CHKPT1",
155 "XSCALE_DCSR",
156 "XSCALE_TX",
157 "XSCALE_RX", /* 20 */
158 "XSCALE_TXRXCTRL",
159 };
160
161 xscale_reg_t xscale_reg_arch_info[] =
162 {
163 {XSCALE_MAINID, NULL},
164 {XSCALE_CACHETYPE, NULL},
165 {XSCALE_CTRL, NULL},
166 {XSCALE_AUXCTRL, NULL},
167 {XSCALE_TTB, NULL},
168 {XSCALE_DAC, NULL},
169 {XSCALE_FSR, NULL},
170 {XSCALE_FAR, NULL},
171 {XSCALE_PID, NULL},
172 {XSCALE_CPACCESS, NULL},
173 {XSCALE_IBCR0, NULL},
174 {XSCALE_IBCR1, NULL},
175 {XSCALE_DBR0, NULL},
176 {XSCALE_DBR1, NULL},
177 {XSCALE_DBCON, NULL},
178 {XSCALE_TBREG, NULL},
179 {XSCALE_CHKPT0, NULL},
180 {XSCALE_CHKPT1, NULL},
181 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
182 {-1, NULL}, /* TX accessed via JTAG */
183 {-1, NULL}, /* RX accessed via JTAG */
184 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
185 };
186
187 int xscale_reg_arch_type = -1;
188
189 int xscale_get_reg(reg_t *reg);
190 int xscale_set_reg(reg_t *reg, u8 *buf);
191
192 int xscale_get_arch_pointers(target_t *target, armv4_5_common_t **armv4_5_p, xscale_common_t **xscale_p)
193 {
194 armv4_5_common_t *armv4_5 = target->arch_info;
195 xscale_common_t *xscale = armv4_5->arch_info;
196
197 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
198 {
199 LOG_ERROR("target isn't an XScale target");
200 return -1;
201 }
202
203 if (xscale->common_magic != XSCALE_COMMON_MAGIC)
204 {
205 LOG_ERROR("target isn't an XScale target");
206 return -1;
207 }
208
209 *armv4_5_p = armv4_5;
210 *xscale_p = xscale;
211
212 return ERROR_OK;
213 }
214
215 int xscale_jtag_set_instr(int chain_pos, u32 new_instr)
216 {
217 jtag_device_t *device = jtag_get_device(chain_pos);
218
219 if (buf_get_u32(device->cur_instr, 0, device->ir_length) != new_instr)
220 {
221 scan_field_t field;
222
223 field.device = chain_pos;
224 field.num_bits = device->ir_length;
225 field.out_value = calloc(CEIL(field.num_bits, 8), 1);
226 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
227 field.out_mask = NULL;
228 field.in_value = NULL;
229 jtag_set_check_value(&field, device->expected, device->expected_mask, NULL);
230
231 jtag_add_ir_scan(1, &field, -1);
232
233 free(field.out_value);
234 }
235
236 return ERROR_OK;
237 }
238
239 int xscale_read_dcsr(target_t *target)
240 {
241 armv4_5_common_t *armv4_5 = target->arch_info;
242 xscale_common_t *xscale = armv4_5->arch_info;
243
244 int retval;
245
246 scan_field_t fields[3];
247 u8 field0 = 0x0;
248 u8 field0_check_value = 0x2;
249 u8 field0_check_mask = 0x7;
250 u8 field2 = 0x0;
251 u8 field2_check_value = 0x0;
252 u8 field2_check_mask = 0x1;
253
254 jtag_add_end_state(TAP_PD);
255 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
256
257 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
258 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
259
260 fields[0].device = xscale->jtag_info.chain_pos;
261 fields[0].num_bits = 3;
262 fields[0].out_value = &field0;
263 fields[0].out_mask = NULL;
264 fields[0].in_value = NULL;
265 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
266
267 fields[1].device = xscale->jtag_info.chain_pos;
268 fields[1].num_bits = 32;
269 fields[1].out_value = NULL;
270 fields[1].out_mask = NULL;
271 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
272 fields[1].in_handler = NULL;
273 fields[1].in_handler_priv = NULL;
274 fields[1].in_check_value = NULL;
275 fields[1].in_check_mask = NULL;
276
277 fields[2].device = xscale->jtag_info.chain_pos;
278 fields[2].num_bits = 1;
279 fields[2].out_value = &field2;
280 fields[2].out_mask = NULL;
281 fields[2].in_value = NULL;
282 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
283
284 jtag_add_dr_scan(3, fields, -1);
285
286 if ((retval = jtag_execute_queue()) != ERROR_OK)
287 {
288 LOG_ERROR("JTAG error while reading DCSR");
289 return retval;
290 }
291
292 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
293 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
294
295 /* write the register with the value we just read
296 * on this second pass, only the first bit of field0 is guaranteed to be 0)
297 */
298 field0_check_mask = 0x1;
299 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
300 fields[1].in_value = NULL;
301
302 jtag_add_end_state(TAP_RTI);
303
304 jtag_add_dr_scan(3, fields, -1);
305
306 /* DANGER!!! this must be here. It will make sure that the arguments
307 * to jtag_set_check_value() does not go out of scope! */
308 return jtag_execute_queue();
309 }
310
311 int xscale_receive(target_t *target, u32 *buffer, int num_words)
312 {
313 if (num_words==0)
314 return ERROR_INVALID_ARGUMENTS;
315
316 int retval=ERROR_OK;
317 armv4_5_common_t *armv4_5 = target->arch_info;
318 xscale_common_t *xscale = armv4_5->arch_info;
319
320 enum tap_state path[3];
321 scan_field_t fields[3];
322
323 u8 *field0 = malloc(num_words * 1);
324 u8 field0_check_value = 0x2;
325 u8 field0_check_mask = 0x6;
326 u32 *field1 = malloc(num_words * 4);
327 u8 field2_check_value = 0x0;
328 u8 field2_check_mask = 0x1;
329 int words_done = 0;
330 int words_scheduled = 0;
331
332 int i;
333
334 path[0] = TAP_SDS;
335 path[1] = TAP_CD;
336 path[2] = TAP_SD;
337
338 fields[0].device = xscale->jtag_info.chain_pos;
339 fields[0].num_bits = 3;
340 fields[0].out_value = NULL;
341 fields[0].out_mask = NULL;
342 fields[0].in_value = NULL;
343 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
344
345 fields[1].device = xscale->jtag_info.chain_pos;
346 fields[1].num_bits = 32;
347 fields[1].out_value = NULL;
348 fields[1].out_mask = NULL;
349 fields[1].in_value = NULL;
350 fields[1].in_handler = NULL;
351 fields[1].in_handler_priv = NULL;
352 fields[1].in_check_value = NULL;
353 fields[1].in_check_mask = NULL;
354
355
356
357 fields[2].device = xscale->jtag_info.chain_pos;
358 fields[2].num_bits = 1;
359 fields[2].out_value = NULL;
360 fields[2].out_mask = NULL;
361 fields[2].in_value = NULL;
362 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
363
364 jtag_add_end_state(TAP_RTI);
365 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgtx);
366 jtag_add_runtest(1, -1); /* ensures that we're in the TAP_RTI state as the above could be a no-op */
367
368 /* repeat until all words have been collected */
369 int attempts=0;
370 while (words_done < num_words)
371 {
372 /* schedule reads */
373 words_scheduled = 0;
374 for (i = words_done; i < num_words; i++)
375 {
376 fields[0].in_value = &field0[i];
377 fields[1].in_handler = buf_to_u32_handler;
378 fields[1].in_handler_priv = (u8*)&field1[i];
379
380 jtag_add_pathmove(3, path);
381 jtag_add_dr_scan(3, fields, TAP_RTI);
382 words_scheduled++;
383 }
384
385 if ((retval = jtag_execute_queue()) != ERROR_OK)
386 {
387 LOG_ERROR("JTAG error while receiving data from debug handler");
388 break;
389 }
390
391 /* examine results */
392 for (i = words_done; i < num_words; i++)
393 {
394 if (!(field0[0] & 1))
395 {
396 /* move backwards if necessary */
397 int j;
398 for (j = i; j < num_words - 1; j++)
399 {
400 field0[j] = field0[j+1];
401 field1[j] = field1[j+1];
402 }
403 words_scheduled--;
404 }
405 }
406 if (words_scheduled==0)
407 {
408 if (attempts++==1000)
409 {
410 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
411 retval=ERROR_TARGET_TIMEOUT;
412 break;
413 }
414 }
415
416 words_done += words_scheduled;
417 }
418
419 for (i = 0; i < num_words; i++)
420 *(buffer++) = buf_get_u32((u8*)&field1[i], 0, 32);
421
422 free(field1);
423
424 return retval;
425 }
426
427 int xscale_read_tx(target_t *target, int consume)
428 {
429 armv4_5_common_t *armv4_5 = target->arch_info;
430 xscale_common_t *xscale = armv4_5->arch_info;
431 enum tap_state path[3];
432 enum tap_state noconsume_path[6];
433
434 int retval;
435 struct timeval timeout, now;
436
437 scan_field_t fields[3];
438 u8 field0_in = 0x0;
439 u8 field0_check_value = 0x2;
440 u8 field0_check_mask = 0x6;
441 u8 field2_check_value = 0x0;
442 u8 field2_check_mask = 0x1;
443
444 jtag_add_end_state(TAP_RTI);
445
446 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgtx);
447
448 path[0] = TAP_SDS;
449 path[1] = TAP_CD;
450 path[2] = TAP_SD;
451
452 noconsume_path[0] = TAP_SDS;
453 noconsume_path[1] = TAP_CD;
454 noconsume_path[2] = TAP_E1D;
455 noconsume_path[3] = TAP_PD;
456 noconsume_path[4] = TAP_E2D;
457 noconsume_path[5] = TAP_SD;
458
459 fields[0].device = xscale->jtag_info.chain_pos;
460 fields[0].num_bits = 3;
461 fields[0].out_value = NULL;
462 fields[0].out_mask = NULL;
463 fields[0].in_value = &field0_in;
464 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
465
466 fields[1].device = xscale->jtag_info.chain_pos;
467 fields[1].num_bits = 32;
468 fields[1].out_value = NULL;
469 fields[1].out_mask = NULL;
470 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
471 fields[1].in_handler = NULL;
472 fields[1].in_handler_priv = NULL;
473 fields[1].in_check_value = NULL;
474 fields[1].in_check_mask = NULL;
475
476
477
478 fields[2].device = xscale->jtag_info.chain_pos;
479 fields[2].num_bits = 1;
480 fields[2].out_value = NULL;
481 fields[2].out_mask = NULL;
482 fields[2].in_value = NULL;
483 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
484
485 gettimeofday(&timeout, NULL);
486 timeval_add_time(&timeout, 1, 0);
487
488 for (;;)
489 {
490 int i;
491 for (i=0; i<100; i++)
492 {
493 /* if we want to consume the register content (i.e. clear TX_READY),
494 * we have to go straight from Capture-DR to Shift-DR
495 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
496 */
497 if (consume)
498 jtag_add_pathmove(3, path);
499 else
500 {
501 jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
502 }
503
504 jtag_add_dr_scan(3, fields, TAP_RTI);
505
506 if ((retval = jtag_execute_queue()) != ERROR_OK)
507 {
508 LOG_ERROR("JTAG error while reading TX");
509 return ERROR_TARGET_TIMEOUT;
510 }
511
512 gettimeofday(&now, NULL);
513 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
514 {
515 LOG_ERROR("time out reading TX register");
516 return ERROR_TARGET_TIMEOUT;
517 }
518 if (!((!(field0_in & 1)) && consume))
519 {
520 goto done;
521 }
522 }
523 LOG_DEBUG("waiting 10ms");
524 usleep(10*1000); /* avoid flooding the logs */
525 }
526 done:
527
528 if (!(field0_in & 1))
529 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
530
531 return ERROR_OK;
532 }
533
534 int xscale_write_rx(target_t *target)
535 {
536 armv4_5_common_t *armv4_5 = target->arch_info;
537 xscale_common_t *xscale = armv4_5->arch_info;
538
539 int retval;
540 struct timeval timeout, now;
541
542 scan_field_t fields[3];
543 u8 field0_out = 0x0;
544 u8 field0_in = 0x0;
545 u8 field0_check_value = 0x2;
546 u8 field0_check_mask = 0x6;
547 u8 field2 = 0x0;
548 u8 field2_check_value = 0x0;
549 u8 field2_check_mask = 0x1;
550
551 jtag_add_end_state(TAP_RTI);
552
553 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgrx);
554
555 fields[0].device = xscale->jtag_info.chain_pos;
556 fields[0].num_bits = 3;
557 fields[0].out_value = &field0_out;
558 fields[0].out_mask = NULL;
559 fields[0].in_value = &field0_in;
560 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
561
562 fields[1].device = xscale->jtag_info.chain_pos;
563 fields[1].num_bits = 32;
564 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
565 fields[1].out_mask = NULL;
566 fields[1].in_value = NULL;
567 fields[1].in_handler = NULL;
568 fields[1].in_handler_priv = NULL;
569 fields[1].in_check_value = NULL;
570 fields[1].in_check_mask = NULL;
571
572
573
574 fields[2].device = xscale->jtag_info.chain_pos;
575 fields[2].num_bits = 1;
576 fields[2].out_value = &field2;
577 fields[2].out_mask = NULL;
578 fields[2].in_value = NULL;
579 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
580
581 gettimeofday(&timeout, NULL);
582 timeval_add_time(&timeout, 1, 0);
583
584 /* poll until rx_read is low */
585 LOG_DEBUG("polling RX");
586 for (;;)
587 {
588 int i;
589 for (i=0; i<10; i++)
590 {
591 jtag_add_dr_scan(3, fields, TAP_RTI);
592
593 if ((retval = jtag_execute_queue()) != ERROR_OK)
594 {
595 LOG_ERROR("JTAG error while writing RX");
596 return retval;
597 }
598
599 gettimeofday(&now, NULL);
600 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
601 {
602 LOG_ERROR("time out writing RX register");
603 return ERROR_TARGET_TIMEOUT;
604 }
605 if (!(field0_in & 1))
606 goto done;
607 }
608 LOG_DEBUG("waiting 10ms");
609 usleep(10*1000); /* wait 10ms to avoid flooding the logs */
610 }
611 done:
612
613 /* set rx_valid */
614 field2 = 0x1;
615 jtag_add_dr_scan(3, fields, TAP_RTI);
616
617 if ((retval = jtag_execute_queue()) != ERROR_OK)
618 {
619 LOG_ERROR("JTAG error while writing RX");
620 return retval;
621 }
622
623 return ERROR_OK;
624 }
625
626 /* send count elements of size byte to the debug handler */
627 int xscale_send(target_t *target, u8 *buffer, int count, int size)
628 {
629 armv4_5_common_t *armv4_5 = target->arch_info;
630 xscale_common_t *xscale = armv4_5->arch_info;
631 u32 t[3];
632 int bits[3];
633
634 int retval;
635
636 int done_count = 0;
637
638 jtag_add_end_state(TAP_RTI);
639
640 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgrx);
641
642 bits[0]=3;
643 t[0]=0;
644 bits[1]=32;
645 t[2]=1;
646 bits[2]=1;
647 int endianness = target->endianness;
648 while (done_count++ < count)
649 {
650 switch (size)
651 {
652 case 4:
653 if (endianness == TARGET_LITTLE_ENDIAN)
654 {
655 t[1]=le_to_h_u32(buffer);
656 } else
657 {
658 t[1]=be_to_h_u32(buffer);
659 }
660 break;
661 case 2:
662 if (endianness == TARGET_LITTLE_ENDIAN)
663 {
664 t[1]=le_to_h_u16(buffer);
665 } else
666 {
667 t[1]=be_to_h_u16(buffer);
668 }
669 break;
670 case 1:
671 t[1]=buffer[0];
672 break;
673 default:
674 LOG_ERROR("BUG: size neither 4, 2 nor 1");
675 exit(-1);
676 }
677 jtag_add_dr_out(xscale->jtag_info.chain_pos,
678 3,
679 bits,
680 t,
681 TAP_RTI);
682 buffer += size;
683 }
684
685 if ((retval = jtag_execute_queue()) != ERROR_OK)
686 {
687 LOG_ERROR("JTAG error while sending data to debug handler");
688 return retval;
689 }
690
691 return ERROR_OK;
692 }
693
694 int xscale_send_u32(target_t *target, u32 value)
695 {
696 armv4_5_common_t *armv4_5 = target->arch_info;
697 xscale_common_t *xscale = armv4_5->arch_info;
698
699 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
700 return xscale_write_rx(target);
701 }
702
703 int xscale_write_dcsr(target_t *target, int hold_rst, int ext_dbg_brk)
704 {
705 armv4_5_common_t *armv4_5 = target->arch_info;
706 xscale_common_t *xscale = armv4_5->arch_info;
707
708 int retval;
709
710 scan_field_t fields[3];
711 u8 field0 = 0x0;
712 u8 field0_check_value = 0x2;
713 u8 field0_check_mask = 0x7;
714 u8 field2 = 0x0;
715 u8 field2_check_value = 0x0;
716 u8 field2_check_mask = 0x1;
717
718 if (hold_rst != -1)
719 xscale->hold_rst = hold_rst;
720
721 if (ext_dbg_brk != -1)
722 xscale->external_debug_break = ext_dbg_brk;
723
724 jtag_add_end_state(TAP_RTI);
725 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
726
727 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
728 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
729
730 fields[0].device = xscale->jtag_info.chain_pos;
731 fields[0].num_bits = 3;
732 fields[0].out_value = &field0;
733 fields[0].out_mask = NULL;
734 fields[0].in_value = NULL;
735 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
736
737 fields[1].device = xscale->jtag_info.chain_pos;
738 fields[1].num_bits = 32;
739 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
740 fields[1].out_mask = NULL;
741 fields[1].in_value = NULL;
742 fields[1].in_handler = NULL;
743 fields[1].in_handler_priv = NULL;
744 fields[1].in_check_value = NULL;
745 fields[1].in_check_mask = NULL;
746
747
748
749 fields[2].device = xscale->jtag_info.chain_pos;
750 fields[2].num_bits = 1;
751 fields[2].out_value = &field2;
752 fields[2].out_mask = NULL;
753 fields[2].in_value = NULL;
754 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
755
756 jtag_add_dr_scan(3, fields, -1);
757
758 if ((retval = jtag_execute_queue()) != ERROR_OK)
759 {
760 LOG_ERROR("JTAG error while writing DCSR");
761 return retval;
762 }
763
764 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
765 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
766
767 return ERROR_OK;
768 }
769
770 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
771 unsigned int parity (unsigned int v)
772 {
773 unsigned int ov = v;
774 v ^= v >> 16;
775 v ^= v >> 8;
776 v ^= v >> 4;
777 v &= 0xf;
778 LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
779 return (0x6996 >> v) & 1;
780 }
781
782 int xscale_load_ic(target_t *target, int mini, u32 va, u32 buffer[8])
783 {
784 armv4_5_common_t *armv4_5 = target->arch_info;
785 xscale_common_t *xscale = armv4_5->arch_info;
786 u8 packet[4];
787 u8 cmd;
788 int word;
789
790 scan_field_t fields[2];
791
792 LOG_DEBUG("loading miniIC at 0x%8.8x", va);
793
794 jtag_add_end_state(TAP_RTI);
795 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.ldic); /* LDIC */
796
797 /* CMD is b010 for Main IC and b011 for Mini IC */
798 if (mini)
799 buf_set_u32(&cmd, 0, 3, 0x3);
800 else
801 buf_set_u32(&cmd, 0, 3, 0x2);
802
803 buf_set_u32(&cmd, 3, 3, 0x0);
804
805 /* virtual address of desired cache line */
806 buf_set_u32(packet, 0, 27, va >> 5);
807
808 fields[0].device = xscale->jtag_info.chain_pos;
809 fields[0].num_bits = 6;
810 fields[0].out_value = &cmd;
811 fields[0].out_mask = NULL;
812 fields[0].in_value = NULL;
813 fields[0].in_check_value = NULL;
814 fields[0].in_check_mask = NULL;
815 fields[0].in_handler = NULL;
816 fields[0].in_handler_priv = NULL;
817
818 fields[1].device = xscale->jtag_info.chain_pos;
819 fields[1].num_bits = 27;
820 fields[1].out_value = packet;
821 fields[1].out_mask = NULL;
822 fields[1].in_value = NULL;
823 fields[1].in_check_value = NULL;
824 fields[1].in_check_mask = NULL;
825 fields[1].in_handler = NULL;
826 fields[1].in_handler_priv = NULL;
827
828 jtag_add_dr_scan(2, fields, -1);
829
830 fields[0].num_bits = 32;
831 fields[0].out_value = packet;
832
833 fields[1].num_bits = 1;
834 fields[1].out_value = &cmd;
835
836 for (word = 0; word < 8; word++)
837 {
838 buf_set_u32(packet, 0, 32, buffer[word]);
839 cmd = parity(*((u32*)packet));
840 jtag_add_dr_scan(2, fields, -1);
841 }
842
843 jtag_execute_queue();
844
845 return ERROR_OK;
846 }
847
848 int xscale_invalidate_ic_line(target_t *target, u32 va)
849 {
850 armv4_5_common_t *armv4_5 = target->arch_info;
851 xscale_common_t *xscale = armv4_5->arch_info;
852 u8 packet[4];
853 u8 cmd;
854
855 scan_field_t fields[2];
856
857 jtag_add_end_state(TAP_RTI);
858 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.ldic); /* LDIC */
859
860 /* CMD for invalidate IC line b000, bits [6:4] b000 */
861 buf_set_u32(&cmd, 0, 6, 0x0);
862
863 /* virtual address of desired cache line */
864 buf_set_u32(packet, 0, 27, va >> 5);
865
866 fields[0].device = xscale->jtag_info.chain_pos;
867 fields[0].num_bits = 6;
868 fields[0].out_value = &cmd;
869 fields[0].out_mask = NULL;
870 fields[0].in_value = NULL;
871 fields[0].in_check_value = NULL;
872 fields[0].in_check_mask = NULL;
873 fields[0].in_handler = NULL;
874 fields[0].in_handler_priv = NULL;
875
876 fields[1].device = xscale->jtag_info.chain_pos;
877 fields[1].num_bits = 27;
878 fields[1].out_value = packet;
879 fields[1].out_mask = NULL;
880 fields[1].in_value = NULL;
881 fields[1].in_check_value = NULL;
882 fields[1].in_check_mask = NULL;
883 fields[1].in_handler = NULL;
884 fields[1].in_handler_priv = NULL;
885
886 jtag_add_dr_scan(2, fields, -1);
887
888 return ERROR_OK;
889 }
890
891 int xscale_update_vectors(target_t *target)
892 {
893 armv4_5_common_t *armv4_5 = target->arch_info;
894 xscale_common_t *xscale = armv4_5->arch_info;
895 int i;
896 int retval;
897
898 u32 low_reset_branch, high_reset_branch;
899
900 for (i = 1; i < 8; i++)
901 {
902 /* if there's a static vector specified for this exception, override */
903 if (xscale->static_high_vectors_set & (1 << i))
904 {
905 xscale->high_vectors[i] = xscale->static_high_vectors[i];
906 }
907 else
908 {
909 retval=target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
910 if (retval == ERROR_TARGET_TIMEOUT)
911 return retval;
912 if (retval!=ERROR_OK)
913 {
914 /* Some of these reads will fail as part of normal execution */
915 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
916 }
917 }
918 }
919
920 for (i = 1; i < 8; i++)
921 {
922 if (xscale->static_low_vectors_set & (1 << i))
923 {
924 xscale->low_vectors[i] = xscale->static_low_vectors[i];
925 }
926 else
927 {
928 retval=target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
929 if (retval == ERROR_TARGET_TIMEOUT)
930 return retval;
931 if (retval!=ERROR_OK)
932 {
933 /* Some of these reads will fail as part of normal execution */
934 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
935 }
936 }
937 }
938
939 /* calculate branches to debug handler */
940 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
941 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
942
943 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
944 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
945
946 /* invalidate and load exception vectors in mini i-cache */
947 xscale_invalidate_ic_line(target, 0x0);
948 xscale_invalidate_ic_line(target, 0xffff0000);
949
950 xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
951 xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
952
953 return ERROR_OK;
954 }
955
956 int xscale_arch_state(struct target_s *target)
957 {
958 armv4_5_common_t *armv4_5 = target->arch_info;
959 xscale_common_t *xscale = armv4_5->arch_info;
960
961 char *state[] =
962 {
963 "disabled", "enabled"
964 };
965
966 char *arch_dbg_reason[] =
967 {
968 "", "\n(processor reset)", "\n(trace buffer full)"
969 };
970
971 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
972 {
973 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
974 exit(-1);
975 }
976
977 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
978 "cpsr: 0x%8.8x pc: 0x%8.8x\n"
979 "MMU: %s, D-Cache: %s, I-Cache: %s"
980 "%s",
981 armv4_5_state_strings[armv4_5->core_state],
982 target_debug_reason_strings[target->debug_reason],
983 armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)],
984 buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
985 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
986 state[xscale->armv4_5_mmu.mmu_enabled],
987 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
988 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
989 arch_dbg_reason[xscale->arch_debug_reason]);
990
991 return ERROR_OK;
992 }
993
994 int xscale_poll(target_t *target)
995 {
996 int retval=ERROR_OK;
997 armv4_5_common_t *armv4_5 = target->arch_info;
998 xscale_common_t *xscale = armv4_5->arch_info;
999
1000 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
1001 {
1002 enum target_state previous_state = target->state;
1003 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
1004 {
1005
1006 /* there's data to read from the tx register, we entered debug state */
1007 xscale->handler_running = 1;
1008
1009 target->state = TARGET_HALTED;
1010
1011 /* process debug entry, fetching current mode regs */
1012 retval = xscale_debug_entry(target);
1013 }
1014 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1015 {
1016 LOG_USER("error while polling TX register, reset CPU");
1017 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
1018 target->state = TARGET_HALTED;
1019 }
1020
1021 /* debug_entry could have overwritten target state (i.e. immediate resume)
1022 * don't signal event handlers in that case
1023 */
1024 if (target->state != TARGET_HALTED)
1025 return ERROR_OK;
1026
1027 /* if target was running, signal that we halted
1028 * otherwise we reentered from debug execution */
1029 if (previous_state == TARGET_RUNNING)
1030 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1031 else
1032 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
1033 }
1034
1035 return retval;
1036 }
1037
1038 int xscale_debug_entry(target_t *target)
1039 {
1040 armv4_5_common_t *armv4_5 = target->arch_info;
1041 xscale_common_t *xscale = armv4_5->arch_info;
1042 u32 pc;
1043 u32 buffer[10];
1044 int i;
1045 int retval;
1046
1047 u32 moe;
1048
1049 /* clear external dbg break (will be written on next DCSR read) */
1050 xscale->external_debug_break = 0;
1051 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
1052 return retval;
1053
1054 /* get r0, pc, r1 to r7 and cpsr */
1055 if ((retval=xscale_receive(target, buffer, 10))!=ERROR_OK)
1056 return retval;
1057
1058 /* move r0 from buffer to register cache */
1059 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
1060 armv4_5->core_cache->reg_list[15].dirty = 1;
1061 armv4_5->core_cache->reg_list[15].valid = 1;
1062 LOG_DEBUG("r0: 0x%8.8x", buffer[0]);
1063
1064 /* move pc from buffer to register cache */
1065 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
1066 armv4_5->core_cache->reg_list[15].dirty = 1;
1067 armv4_5->core_cache->reg_list[15].valid = 1;
1068 LOG_DEBUG("pc: 0x%8.8x", buffer[1]);
1069
1070 /* move data from buffer to register cache */
1071 for (i = 1; i <= 7; i++)
1072 {
1073 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
1074 armv4_5->core_cache->reg_list[i].dirty = 1;
1075 armv4_5->core_cache->reg_list[i].valid = 1;
1076 LOG_DEBUG("r%i: 0x%8.8x", i, buffer[i + 1]);
1077 }
1078
1079 buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, buffer[9]);
1080 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
1081 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
1082 LOG_DEBUG("cpsr: 0x%8.8x", buffer[9]);
1083
1084 armv4_5->core_mode = buffer[9] & 0x1f;
1085 if (armv4_5_mode_to_number(armv4_5->core_mode) == -1)
1086 {
1087 target->state = TARGET_UNKNOWN;
1088 LOG_ERROR("cpsr contains invalid mode value - communication failure");
1089 return ERROR_TARGET_FAILURE;
1090 }
1091 LOG_DEBUG("target entered debug state in %s mode", armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)]);
1092
1093 if (buffer[9] & 0x20)
1094 armv4_5->core_state = ARMV4_5_STATE_THUMB;
1095 else
1096 armv4_5->core_state = ARMV4_5_STATE_ARM;
1097
1098
1099 if (armv4_5_mode_to_number(armv4_5->core_mode)==-1)
1100 return ERROR_FAIL;
1101
1102 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1103 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
1104 {
1105 xscale_receive(target, buffer, 8);
1106 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1107 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
1108 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
1109 }
1110 else
1111 {
1112 /* r8 to r14, but no spsr */
1113 xscale_receive(target, buffer, 7);
1114 }
1115
1116 /* move data from buffer to register cache */
1117 for (i = 8; i <= 14; i++)
1118 {
1119 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
1120 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
1121 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
1122 }
1123
1124 /* examine debug reason */
1125 xscale_read_dcsr(target);
1126 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
1127
1128 /* stored PC (for calculating fixup) */
1129 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1130
1131 switch (moe)
1132 {
1133 case 0x0: /* Processor reset */
1134 target->debug_reason = DBG_REASON_DBGRQ;
1135 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1136 pc -= 4;
1137 break;
1138 case 0x1: /* Instruction breakpoint hit */
1139 target->debug_reason = DBG_REASON_BREAKPOINT;
1140 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1141 pc -= 4;
1142 break;
1143 case 0x2: /* Data breakpoint hit */
1144 target->debug_reason = DBG_REASON_WATCHPOINT;
1145 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1146 pc -= 4;
1147 break;
1148 case 0x3: /* BKPT instruction executed */
1149 target->debug_reason = DBG_REASON_BREAKPOINT;
1150 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1151 pc -= 4;
1152 break;
1153 case 0x4: /* Ext. debug event */
1154 target->debug_reason = DBG_REASON_DBGRQ;
1155 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1156 pc -= 4;
1157 break;
1158 case 0x5: /* Vector trap occured */
1159 target->debug_reason = DBG_REASON_BREAKPOINT;
1160 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1161 pc -= 4;
1162 break;
1163 case 0x6: /* Trace buffer full break */
1164 target->debug_reason = DBG_REASON_DBGRQ;
1165 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1166 pc -= 4;
1167 break;
1168 case 0x7: /* Reserved */
1169 default:
1170 LOG_ERROR("Method of Entry is 'Reserved'");
1171 exit(-1);
1172 break;
1173 }
1174
1175 /* apply PC fixup */
1176 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1177
1178 /* on the first debug entry, identify cache type */
1179 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1180 {
1181 u32 cache_type_reg;
1182
1183 /* read cp15 cache type register */
1184 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1185 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1186
1187 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1188 }
1189
1190 /* examine MMU and Cache settings */
1191 /* read cp15 control register */
1192 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1193 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1194 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1195 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1196 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1197
1198 /* tracing enabled, read collected trace data */
1199 if (xscale->trace.buffer_enabled)
1200 {
1201 xscale_read_trace(target);
1202 xscale->trace.buffer_fill--;
1203
1204 /* resume if we're still collecting trace data */
1205 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1206 && (xscale->trace.buffer_fill > 0))
1207 {
1208 xscale_resume(target, 1, 0x0, 1, 0);
1209 }
1210 else
1211 {
1212 xscale->trace.buffer_enabled = 0;
1213 }
1214 }
1215
1216 return ERROR_OK;
1217 }
1218
1219 int xscale_halt(target_t *target)
1220 {
1221 armv4_5_common_t *armv4_5 = target->arch_info;
1222 xscale_common_t *xscale = armv4_5->arch_info;
1223
1224 LOG_DEBUG("target->state: %s", target_state_strings[target->state]);
1225
1226 if (target->state == TARGET_HALTED)
1227 {
1228 LOG_DEBUG("target was already halted");
1229 return ERROR_OK;
1230 }
1231 else if (target->state == TARGET_UNKNOWN)
1232 {
1233 /* this must not happen for a xscale target */
1234 LOG_ERROR("target was in unknown state when halt was requested");
1235 return ERROR_TARGET_INVALID;
1236 }
1237 else if (target->state == TARGET_RESET)
1238 {
1239 LOG_DEBUG("target->state == TARGET_RESET");
1240 }
1241 else
1242 {
1243 /* assert external dbg break */
1244 xscale->external_debug_break = 1;
1245 xscale_read_dcsr(target);
1246
1247 target->debug_reason = DBG_REASON_DBGRQ;
1248 }
1249
1250 return ERROR_OK;
1251 }
1252
1253 int xscale_enable_single_step(struct target_s *target, u32 next_pc)
1254 {
1255 armv4_5_common_t *armv4_5 = target->arch_info;
1256 xscale_common_t *xscale= armv4_5->arch_info;
1257 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1258
1259 if (xscale->ibcr0_used)
1260 {
1261 breakpoint_t *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1262
1263 if (ibcr0_bp)
1264 {
1265 xscale_unset_breakpoint(target, ibcr0_bp);
1266 }
1267 else
1268 {
1269 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1270 exit(-1);
1271 }
1272 }
1273
1274 xscale_set_reg_u32(ibcr0, next_pc | 0x1);
1275
1276 return ERROR_OK;
1277 }
1278
1279 int xscale_disable_single_step(struct target_s *target)
1280 {
1281 armv4_5_common_t *armv4_5 = target->arch_info;
1282 xscale_common_t *xscale= armv4_5->arch_info;
1283 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1284
1285 xscale_set_reg_u32(ibcr0, 0x0);
1286
1287 return ERROR_OK;
1288 }
1289
1290 int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution)
1291 {
1292 armv4_5_common_t *armv4_5 = target->arch_info;
1293 xscale_common_t *xscale= armv4_5->arch_info;
1294 breakpoint_t *breakpoint = target->breakpoints;
1295
1296 u32 current_pc;
1297
1298 int retval;
1299 int i;
1300
1301 LOG_DEBUG("-");
1302
1303 if (target->state != TARGET_HALTED)
1304 {
1305 LOG_WARNING("target not halted");
1306 return ERROR_TARGET_NOT_HALTED;
1307 }
1308
1309 if (!debug_execution)
1310 {
1311 target_free_all_working_areas(target);
1312 }
1313
1314 /* update vector tables */
1315 if ((retval=xscale_update_vectors(target))!=ERROR_OK)
1316 return retval;
1317
1318 /* current = 1: continue on current pc, otherwise continue at <address> */
1319 if (!current)
1320 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1321
1322 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1323
1324 /* if we're at the reset vector, we have to simulate the branch */
1325 if (current_pc == 0x0)
1326 {
1327 arm_simulate_step(target, NULL);
1328 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1329 }
1330
1331 /* the front-end may request us not to handle breakpoints */
1332 if (handle_breakpoints)
1333 {
1334 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1335 {
1336 u32 next_pc;
1337
1338 /* there's a breakpoint at the current PC, we have to step over it */
1339 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1340 xscale_unset_breakpoint(target, breakpoint);
1341
1342 /* calculate PC of next instruction */
1343 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1344 {
1345 u32 current_opcode;
1346 target_read_u32(target, current_pc, &current_opcode);
1347 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
1348 }
1349
1350 LOG_DEBUG("enable single-step");
1351 xscale_enable_single_step(target, next_pc);
1352
1353 /* restore banked registers */
1354 xscale_restore_context(target);
1355
1356 /* send resume request (command 0x30 or 0x31)
1357 * clean the trace buffer if it is to be enabled (0x62) */
1358 if (xscale->trace.buffer_enabled)
1359 {
1360 xscale_send_u32(target, 0x62);
1361 xscale_send_u32(target, 0x31);
1362 }
1363 else
1364 xscale_send_u32(target, 0x30);
1365
1366 /* send CPSR */
1367 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1368 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1369
1370 for (i = 7; i >= 0; i--)
1371 {
1372 /* send register */
1373 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1374 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1375 }
1376
1377 /* send PC */
1378 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1379 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1380
1381 /* wait for and process debug entry */
1382 xscale_debug_entry(target);
1383
1384 LOG_DEBUG("disable single-step");
1385 xscale_disable_single_step(target);
1386
1387 LOG_DEBUG("set breakpoint at 0x%8.8x", breakpoint->address);
1388 xscale_set_breakpoint(target, breakpoint);
1389 }
1390 }
1391
1392 /* enable any pending breakpoints and watchpoints */
1393 xscale_enable_breakpoints(target);
1394 xscale_enable_watchpoints(target);
1395
1396 /* restore banked registers */
1397 xscale_restore_context(target);
1398
1399 /* send resume request (command 0x30 or 0x31)
1400 * clean the trace buffer if it is to be enabled (0x62) */
1401 if (xscale->trace.buffer_enabled)
1402 {
1403 xscale_send_u32(target, 0x62);
1404 xscale_send_u32(target, 0x31);
1405 }
1406 else
1407 xscale_send_u32(target, 0x30);
1408
1409 /* send CPSR */
1410 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1411 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1412
1413 for (i = 7; i >= 0; i--)
1414 {
1415 /* send register */
1416 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1417 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1418 }
1419
1420 /* send PC */
1421 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1422 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1423
1424 target->debug_reason = DBG_REASON_NOTHALTED;
1425
1426 if (!debug_execution)
1427 {
1428 /* registers are now invalid */
1429 armv4_5_invalidate_core_regs(target);
1430 target->state = TARGET_RUNNING;
1431 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1432 }
1433 else
1434 {
1435 target->state = TARGET_DEBUG_RUNNING;
1436 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1437 }
1438
1439 LOG_DEBUG("target resumed");
1440
1441 xscale->handler_running = 1;
1442
1443 return ERROR_OK;
1444 }
1445
1446 int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints)
1447 {
1448 armv4_5_common_t *armv4_5 = target->arch_info;
1449 xscale_common_t *xscale = armv4_5->arch_info;
1450 breakpoint_t *breakpoint = target->breakpoints;
1451
1452 u32 current_pc, next_pc;
1453 int i;
1454 int retval;
1455
1456 if (target->state != TARGET_HALTED)
1457 {
1458 LOG_WARNING("target not halted");
1459 return ERROR_TARGET_NOT_HALTED;
1460 }
1461
1462 /* current = 1: continue on current pc, otherwise continue at <address> */
1463 if (!current)
1464 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1465
1466 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1467
1468 /* if we're at the reset vector, we have to simulate the step */
1469 if (current_pc == 0x0)
1470 {
1471 arm_simulate_step(target, NULL);
1472 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1473
1474 target->debug_reason = DBG_REASON_SINGLESTEP;
1475 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1476
1477 return ERROR_OK;
1478 }
1479
1480 /* the front-end may request us not to handle breakpoints */
1481 if (handle_breakpoints)
1482 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1483 {
1484 xscale_unset_breakpoint(target, breakpoint);
1485 }
1486
1487 target->debug_reason = DBG_REASON_SINGLESTEP;
1488
1489 /* calculate PC of next instruction */
1490 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1491 {
1492 u32 current_opcode;
1493 target_read_u32(target, current_pc, &current_opcode);
1494 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
1495 }
1496
1497 LOG_DEBUG("enable single-step");
1498 xscale_enable_single_step(target, next_pc);
1499
1500 /* restore banked registers */
1501 xscale_restore_context(target);
1502
1503 /* send resume request (command 0x30 or 0x31)
1504 * clean the trace buffer if it is to be enabled (0x62) */
1505 if (xscale->trace.buffer_enabled)
1506 {
1507 xscale_send_u32(target, 0x62);
1508 xscale_send_u32(target, 0x31);
1509 }
1510 else
1511 xscale_send_u32(target, 0x30);
1512
1513 /* send CPSR */
1514 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1515 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1516
1517 for (i = 7; i >= 0; i--)
1518 {
1519 /* send register */
1520 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1521 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1522 }
1523
1524 /* send PC */
1525 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1526 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1527
1528 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1529
1530 /* registers are now invalid */
1531 armv4_5_invalidate_core_regs(target);
1532
1533 /* wait for and process debug entry */
1534 xscale_debug_entry(target);
1535
1536 LOG_DEBUG("disable single-step");
1537 xscale_disable_single_step(target);
1538
1539 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1540
1541 if (breakpoint)
1542 {
1543 xscale_set_breakpoint(target, breakpoint);
1544 }
1545
1546 LOG_DEBUG("target stepped");
1547
1548 return ERROR_OK;
1549
1550 }
1551
1552 int xscale_assert_reset(target_t *target)
1553 {
1554 armv4_5_common_t *armv4_5 = target->arch_info;
1555 xscale_common_t *xscale = armv4_5->arch_info;
1556
1557 LOG_DEBUG("target->state: %s", target_state_strings[target->state]);
1558
1559 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1560 * end up in T-L-R, which would reset JTAG
1561 */
1562 jtag_add_end_state(TAP_RTI);
1563 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
1564
1565 /* set Hold reset, Halt mode and Trap Reset */
1566 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1567 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1568 xscale_write_dcsr(target, 1, 0);
1569
1570 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1571 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, 0x7f);
1572 jtag_execute_queue();
1573
1574 /* assert reset */
1575 jtag_add_reset(0, 1);
1576
1577 /* sleep 1ms, to be sure we fulfill any requirements */
1578 jtag_add_sleep(1000);
1579 jtag_execute_queue();
1580
1581 target->state = TARGET_RESET;
1582
1583 return ERROR_OK;
1584 }
1585
1586 int xscale_deassert_reset(target_t *target)
1587 {
1588 armv4_5_common_t *armv4_5 = target->arch_info;
1589 xscale_common_t *xscale = armv4_5->arch_info;
1590
1591 fileio_t debug_handler;
1592 u32 address;
1593 u32 binary_size;
1594
1595 u32 buf_cnt;
1596 int i;
1597 int retval;
1598
1599 breakpoint_t *breakpoint = target->breakpoints;
1600
1601 LOG_DEBUG("-");
1602
1603 xscale->ibcr_available = 2;
1604 xscale->ibcr0_used = 0;
1605 xscale->ibcr1_used = 0;
1606
1607 xscale->dbr_available = 2;
1608 xscale->dbr0_used = 0;
1609 xscale->dbr1_used = 0;
1610
1611 /* mark all hardware breakpoints as unset */
1612 while (breakpoint)
1613 {
1614 if (breakpoint->type == BKPT_HARD)
1615 {
1616 breakpoint->set = 0;
1617 }
1618 breakpoint = breakpoint->next;
1619 }
1620
1621 if (!xscale->handler_installed)
1622 {
1623 /* release SRST */
1624 jtag_add_reset(0, 0);
1625
1626 /* wait 300ms; 150 and 100ms were not enough */
1627 jtag_add_sleep(300*1000);
1628
1629 jtag_add_runtest(2030, TAP_RTI);
1630 jtag_execute_queue();
1631
1632 /* set Hold reset, Halt mode and Trap Reset */
1633 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1634 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1635 xscale_write_dcsr(target, 1, 0);
1636
1637 /* Load debug handler */
1638 if (fileio_open(&debug_handler, "xscale/debug_handler.bin", FILEIO_READ, FILEIO_BINARY) != ERROR_OK)
1639 {
1640 return ERROR_OK;
1641 }
1642
1643 if ((binary_size = debug_handler.size) % 4)
1644 {
1645 LOG_ERROR("debug_handler.bin: size not a multiple of 4");
1646 exit(-1);
1647 }
1648
1649 if (binary_size > 0x800)
1650 {
1651 LOG_ERROR("debug_handler.bin: larger than 2kb");
1652 exit(-1);
1653 }
1654
1655 binary_size = CEIL(binary_size, 32) * 32;
1656
1657 address = xscale->handler_address;
1658 while (binary_size > 0)
1659 {
1660 u32 cache_line[8];
1661 u8 buffer[32];
1662
1663 if ((retval = fileio_read(&debug_handler, 32, buffer, &buf_cnt)) != ERROR_OK)
1664 {
1665
1666 }
1667
1668 for (i = 0; i < buf_cnt; i += 4)
1669 {
1670 /* convert LE buffer to host-endian u32 */
1671 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1672 }
1673
1674 for (; buf_cnt < 32; buf_cnt += 4)
1675 {
1676 cache_line[buf_cnt / 4] = 0xe1a08008;
1677 }
1678
1679 /* only load addresses other than the reset vectors */
1680 if ((address % 0x400) != 0x0)
1681 {
1682 xscale_load_ic(target, 1, address, cache_line);
1683 }
1684
1685 address += buf_cnt;
1686 binary_size -= buf_cnt;
1687 };
1688
1689 xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
1690 xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
1691
1692 jtag_add_runtest(30, TAP_RTI);
1693
1694 jtag_add_sleep(100000);
1695
1696 /* set Hold reset, Halt mode and Trap Reset */
1697 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1698 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1699 xscale_write_dcsr(target, 1, 0);
1700
1701 /* clear Hold reset to let the target run (should enter debug handler) */
1702 xscale_write_dcsr(target, 0, 1);
1703 target->state = TARGET_RUNNING;
1704
1705 if (!target->reset_halt)
1706 {
1707 jtag_add_sleep(10000);
1708
1709 /* we should have entered debug now */
1710 xscale_debug_entry(target);
1711 target->state = TARGET_HALTED;
1712
1713 /* resume the target */
1714 xscale_resume(target, 1, 0x0, 1, 0);
1715 }
1716
1717 fileio_close(&debug_handler);
1718 }
1719 else
1720 {
1721 jtag_add_reset(0, 0);
1722 }
1723
1724
1725 return ERROR_OK;
1726 }
1727
1728 int xscale_soft_reset_halt(struct target_s *target)
1729 {
1730
1731 return ERROR_OK;
1732 }
1733
1734 int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode)
1735 {
1736
1737 return ERROR_OK;
1738 }
1739
1740 int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value)
1741 {
1742
1743 return ERROR_OK;
1744 }
1745
1746 int xscale_full_context(target_t *target)
1747 {
1748 armv4_5_common_t *armv4_5 = target->arch_info;
1749
1750 u32 *buffer;
1751
1752 int i, j;
1753
1754 LOG_DEBUG("-");
1755
1756 if (target->state != TARGET_HALTED)
1757 {
1758 LOG_WARNING("target not halted");
1759 return ERROR_TARGET_NOT_HALTED;
1760 }
1761
1762 buffer = malloc(4 * 8);
1763
1764 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1765 * we can't enter User mode on an XScale (unpredictable),
1766 * but User shares registers with SYS
1767 */
1768 for(i = 1; i < 7; i++)
1769 {
1770 int valid = 1;
1771
1772 /* check if there are invalid registers in the current mode
1773 */
1774 for (j = 0; j <= 16; j++)
1775 {
1776 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
1777 valid = 0;
1778 }
1779
1780 if (!valid)
1781 {
1782 u32 tmp_cpsr;
1783
1784 /* request banked registers */
1785 xscale_send_u32(target, 0x0);
1786
1787 tmp_cpsr = 0x0;
1788 tmp_cpsr |= armv4_5_number_to_mode(i);
1789 tmp_cpsr |= 0xc0; /* I/F bits */
1790
1791 /* send CPSR for desired mode */
1792 xscale_send_u32(target, tmp_cpsr);
1793
1794 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1795 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1796 {
1797 xscale_receive(target, buffer, 8);
1798 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1799 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1800 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
1801 }
1802 else
1803 {
1804 xscale_receive(target, buffer, 7);
1805 }
1806
1807 /* move data from buffer to register cache */
1808 for (j = 8; j <= 14; j++)
1809 {
1810 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
1811 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1812 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
1813 }
1814 }
1815 }
1816
1817 free(buffer);
1818
1819 return ERROR_OK;
1820 }
1821
1822 int xscale_restore_context(target_t *target)
1823 {
1824 armv4_5_common_t *armv4_5 = target->arch_info;
1825
1826 int i, j;
1827
1828 LOG_DEBUG("-");
1829
1830 if (target->state != TARGET_HALTED)
1831 {
1832 LOG_WARNING("target not halted");
1833 return ERROR_TARGET_NOT_HALTED;
1834 }
1835
1836 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1837 * we can't enter User mode on an XScale (unpredictable),
1838 * but User shares registers with SYS
1839 */
1840 for(i = 1; i < 7; i++)
1841 {
1842 int dirty = 0;
1843
1844 /* check if there are invalid registers in the current mode
1845 */
1846 for (j = 8; j <= 14; j++)
1847 {
1848 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
1849 dirty = 1;
1850 }
1851
1852 /* if not USR/SYS, check if the SPSR needs to be written */
1853 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1854 {
1855 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
1856 dirty = 1;
1857 }
1858
1859 if (dirty)
1860 {
1861 u32 tmp_cpsr;
1862
1863 /* send banked registers */
1864 xscale_send_u32(target, 0x1);
1865
1866 tmp_cpsr = 0x0;
1867 tmp_cpsr |= armv4_5_number_to_mode(i);
1868 tmp_cpsr |= 0xc0; /* I/F bits */
1869
1870 /* send CPSR for desired mode */
1871 xscale_send_u32(target, tmp_cpsr);
1872
1873 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1874 for (j = 8; j <= 14; j++)
1875 {
1876 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
1877 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1878 }
1879
1880 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1881 {
1882 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
1883 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1884 }
1885 }
1886 }
1887
1888 return ERROR_OK;
1889 }
1890
1891 int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
1892 {
1893 armv4_5_common_t *armv4_5 = target->arch_info;
1894 xscale_common_t *xscale = armv4_5->arch_info;
1895 u32 *buf32;
1896 int i;
1897 int retval;
1898
1899 LOG_DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
1900
1901 if (target->state != TARGET_HALTED)
1902 {
1903 LOG_WARNING("target not halted");
1904 return ERROR_TARGET_NOT_HALTED;
1905 }
1906
1907 /* sanitize arguments */
1908 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1909 return ERROR_INVALID_ARGUMENTS;
1910
1911 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1912 return ERROR_TARGET_UNALIGNED_ACCESS;
1913
1914 /* send memory read request (command 0x1n, n: access size) */
1915 if ((retval=xscale_send_u32(target, 0x10 | size))!=ERROR_OK)
1916 return retval;
1917
1918 /* send base address for read request */
1919 if ((retval=xscale_send_u32(target, address))!=ERROR_OK)
1920 return retval;
1921
1922 /* send number of requested data words */
1923 if ((retval=xscale_send_u32(target, count))!=ERROR_OK)
1924 return retval;
1925
1926 /* receive data from target (count times 32-bit words in host endianness) */
1927 buf32 = malloc(4 * count);
1928 if ((retval=xscale_receive(target, buf32, count))!=ERROR_OK)
1929 return retval;
1930
1931 /* extract data from host-endian buffer into byte stream */
1932 for (i = 0; i < count; i++)
1933 {
1934 switch (size)
1935 {
1936 case 4:
1937 target_buffer_set_u32(target, buffer, buf32[i]);
1938 buffer += 4;
1939 break;
1940 case 2:
1941 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1942 buffer += 2;
1943 break;
1944 case 1:
1945 *buffer++ = buf32[i] & 0xff;
1946 break;
1947 default:
1948 LOG_ERROR("should never get here");
1949 exit(-1);
1950 }
1951 }
1952
1953 free(buf32);
1954
1955 /* examine DCSR, to see if Sticky Abort (SA) got set */
1956 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
1957 return retval;
1958 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1959 {
1960 /* clear SA bit */
1961 if ((retval=xscale_send_u32(target, 0x60))!=ERROR_OK)
1962 return retval;
1963
1964 return ERROR_TARGET_DATA_ABORT;
1965 }
1966
1967 return ERROR_OK;
1968 }
1969
1970 int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
1971 {
1972 armv4_5_common_t *armv4_5 = target->arch_info;
1973 xscale_common_t *xscale = armv4_5->arch_info;
1974 int retval;
1975
1976 LOG_DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
1977
1978 if (target->state != TARGET_HALTED)
1979 {
1980 LOG_WARNING("target not halted");
1981 return ERROR_TARGET_NOT_HALTED;
1982 }
1983
1984 /* sanitize arguments */
1985 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1986 return ERROR_INVALID_ARGUMENTS;
1987
1988 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1989 return ERROR_TARGET_UNALIGNED_ACCESS;
1990
1991 /* send memory write request (command 0x2n, n: access size) */
1992 if ((retval=xscale_send_u32(target, 0x20 | size))!=ERROR_OK)
1993 return retval;
1994
1995 /* send base address for read request */
1996 if ((retval=xscale_send_u32(target, address))!=ERROR_OK)
1997 return retval;
1998
1999 /* send number of requested data words to be written*/
2000 if ((retval=xscale_send_u32(target, count))!=ERROR_OK)
2001 return retval;
2002
2003 /* extract data from host-endian buffer into byte stream */
2004 #if 0
2005 for (i = 0; i < count; i++)
2006 {
2007 switch (size)
2008 {
2009 case 4:
2010 value = target_buffer_get_u32(target, buffer);
2011 xscale_send_u32(target, value);
2012 buffer += 4;
2013 break;
2014 case 2:
2015 value = target_buffer_get_u16(target, buffer);
2016 xscale_send_u32(target, value);
2017 buffer += 2;
2018 break;
2019 case 1:
2020 value = *buffer;
2021 xscale_send_u32(target, value);
2022 buffer += 1;
2023 break;
2024 default:
2025 LOG_ERROR("should never get here");
2026 exit(-1);
2027 }
2028 }
2029 #endif
2030 if ((retval=xscale_send(target, buffer, count, size))!=ERROR_OK)
2031 return retval;
2032
2033 /* examine DCSR, to see if Sticky Abort (SA) got set */
2034 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
2035 return retval;
2036 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
2037 {
2038 /* clear SA bit */
2039 if ((retval=xscale_send_u32(target, 0x60))!=ERROR_OK)
2040 return retval;
2041
2042 return ERROR_TARGET_DATA_ABORT;
2043 }
2044
2045 return ERROR_OK;
2046 }
2047
2048 int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer)
2049 {
2050 return xscale_write_memory(target, address, 4, count, buffer);
2051 }
2052
2053 u32 xscale_get_ttb(target_t *target)
2054 {
2055 armv4_5_common_t *armv4_5 = target->arch_info;
2056 xscale_common_t *xscale = armv4_5->arch_info;
2057 u32 ttb;
2058
2059 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2060 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2061
2062 return ttb;
2063 }
2064
2065 void xscale_disable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
2066 {
2067 armv4_5_common_t *armv4_5 = target->arch_info;
2068 xscale_common_t *xscale = armv4_5->arch_info;
2069 u32 cp15_control;
2070
2071 /* read cp15 control register */
2072 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2073 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2074
2075 if (mmu)
2076 cp15_control &= ~0x1U;
2077
2078 if (d_u_cache)
2079 {
2080 /* clean DCache */
2081 xscale_send_u32(target, 0x50);
2082 xscale_send_u32(target, xscale->cache_clean_address);
2083
2084 /* invalidate DCache */
2085 xscale_send_u32(target, 0x51);
2086
2087 cp15_control &= ~0x4U;
2088 }
2089
2090 if (i_cache)
2091 {
2092 /* invalidate ICache */
2093 xscale_send_u32(target, 0x52);
2094 cp15_control &= ~0x1000U;
2095 }
2096
2097 /* write new cp15 control register */
2098 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2099
2100 /* execute cpwait to ensure outstanding operations complete */
2101 xscale_send_u32(target, 0x53);
2102 }
2103
2104 void xscale_enable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
2105 {
2106 armv4_5_common_t *armv4_5 = target->arch_info;
2107 xscale_common_t *xscale = armv4_5->arch_info;
2108 u32 cp15_control;
2109
2110 /* read cp15 control register */
2111 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2112 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2113
2114 if (mmu)
2115 cp15_control |= 0x1U;
2116
2117 if (d_u_cache)
2118 cp15_control |= 0x4U;
2119
2120 if (i_cache)
2121 cp15_control |= 0x1000U;
2122
2123 /* write new cp15 control register */
2124 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2125
2126 /* execute cpwait to ensure outstanding operations complete */
2127 xscale_send_u32(target, 0x53);
2128 }
2129
2130 int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2131 {
2132 armv4_5_common_t *armv4_5 = target->arch_info;
2133 xscale_common_t *xscale = armv4_5->arch_info;
2134
2135 if (target->state != TARGET_HALTED)
2136 {
2137 LOG_WARNING("target not halted");
2138 return ERROR_TARGET_NOT_HALTED;
2139 }
2140
2141 if (xscale->force_hw_bkpts)
2142 breakpoint->type = BKPT_HARD;
2143
2144 if (breakpoint->set)
2145 {
2146 LOG_WARNING("breakpoint already set");
2147 return ERROR_OK;
2148 }
2149
2150 if (breakpoint->type == BKPT_HARD)
2151 {
2152 u32 value = breakpoint->address | 1;
2153 if (!xscale->ibcr0_used)
2154 {
2155 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2156 xscale->ibcr0_used = 1;
2157 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2158 }
2159 else if (!xscale->ibcr1_used)
2160 {
2161 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2162 xscale->ibcr1_used = 1;
2163 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2164 }
2165 else
2166 {
2167 LOG_ERROR("BUG: no hardware comparator available");
2168 return ERROR_OK;
2169 }
2170 }
2171 else if (breakpoint->type == BKPT_SOFT)
2172 {
2173 if (breakpoint->length == 4)
2174 {
2175 /* keep the original instruction in target endianness */
2176 target->type->read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr);
2177 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2178 target_write_u32(target, breakpoint->address, xscale->arm_bkpt);
2179 }
2180 else
2181 {
2182 /* keep the original instruction in target endianness */
2183 target->type->read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr);
2184 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2185 target_write_u32(target, breakpoint->address, xscale->thumb_bkpt);
2186 }
2187 breakpoint->set = 1;
2188 }
2189
2190 return ERROR_OK;
2191
2192 }
2193
2194 int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2195 {
2196 armv4_5_common_t *armv4_5 = target->arch_info;
2197 xscale_common_t *xscale = armv4_5->arch_info;
2198
2199 if (target->state != TARGET_HALTED)
2200 {
2201 LOG_WARNING("target not halted");
2202 return ERROR_TARGET_NOT_HALTED;
2203 }
2204
2205 if (xscale->force_hw_bkpts)
2206 {
2207 LOG_DEBUG("forcing use of hardware breakpoint at address 0x%8.8x", breakpoint->address);
2208 breakpoint->type = BKPT_HARD;
2209 }
2210
2211 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2212 {
2213 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2214 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2215 }
2216 else
2217 {
2218 xscale->ibcr_available--;
2219 }
2220
2221 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2222 {
2223 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2224 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2225 }
2226
2227 return ERROR_OK;
2228 }
2229
2230 int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2231 {
2232 armv4_5_common_t *armv4_5 = target->arch_info;
2233 xscale_common_t *xscale = armv4_5->arch_info;
2234
2235 if (target->state != TARGET_HALTED)
2236 {
2237 LOG_WARNING("target not halted");
2238 return ERROR_TARGET_NOT_HALTED;
2239 }
2240
2241 if (!breakpoint->set)
2242 {
2243 LOG_WARNING("breakpoint not set");
2244 return ERROR_OK;
2245 }
2246
2247 if (breakpoint->type == BKPT_HARD)
2248 {
2249 if (breakpoint->set == 1)
2250 {
2251 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2252 xscale->ibcr0_used = 0;
2253 }
2254 else if (breakpoint->set == 2)
2255 {
2256 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2257 xscale->ibcr1_used = 0;
2258 }
2259 breakpoint->set = 0;
2260 }
2261 else
2262 {
2263 /* restore original instruction (kept in target endianness) */
2264 if (breakpoint->length == 4)
2265 {
2266 target->type->write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr);
2267 }
2268 else
2269 {
2270 target->type->write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr);
2271 }
2272 breakpoint->set = 0;
2273 }
2274
2275 return ERROR_OK;
2276 }
2277
2278 int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2279 {
2280 armv4_5_common_t *armv4_5 = target->arch_info;
2281 xscale_common_t *xscale = armv4_5->arch_info;
2282
2283 if (target->state != TARGET_HALTED)
2284 {
2285 LOG_WARNING("target not halted");
2286 return ERROR_TARGET_NOT_HALTED;
2287 }
2288
2289 if (breakpoint->set)
2290 {
2291 xscale_unset_breakpoint(target, breakpoint);
2292 }
2293
2294 if (breakpoint->type == BKPT_HARD)
2295 xscale->ibcr_available++;
2296
2297 return ERROR_OK;
2298 }
2299
2300 int xscale_set_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2301 {
2302 armv4_5_common_t *armv4_5 = target->arch_info;
2303 xscale_common_t *xscale = armv4_5->arch_info;
2304 u8 enable=0;
2305 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2306 u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2307
2308 if (target->state != TARGET_HALTED)
2309 {
2310 LOG_WARNING("target not halted");
2311 return ERROR_TARGET_NOT_HALTED;
2312 }
2313
2314 xscale_get_reg(dbcon);
2315
2316 switch (watchpoint->rw)
2317 {
2318 case WPT_READ:
2319 enable = 0x3;
2320 break;
2321 case WPT_ACCESS:
2322 enable = 0x2;
2323 break;
2324 case WPT_WRITE:
2325 enable = 0x1;
2326 break;
2327 default:
2328 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2329 }
2330
2331 if (!xscale->dbr0_used)
2332 {
2333 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2334 dbcon_value |= enable;
2335 xscale_set_reg_u32(dbcon, dbcon_value);
2336 watchpoint->set = 1;
2337 xscale->dbr0_used = 1;
2338 }
2339 else if (!xscale->dbr1_used)
2340 {
2341 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2342 dbcon_value |= enable << 2;
2343 xscale_set_reg_u32(dbcon, dbcon_value);
2344 watchpoint->set = 2;
2345 xscale->dbr1_used = 1;
2346 }
2347 else
2348 {
2349 LOG_ERROR("BUG: no hardware comparator available");
2350 return ERROR_OK;
2351 }
2352
2353 return ERROR_OK;
2354 }
2355
2356 int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2357 {
2358 armv4_5_common_t *armv4_5 = target->arch_info;
2359 xscale_common_t *xscale = armv4_5->arch_info;
2360
2361 if (target->state != TARGET_HALTED)
2362 {
2363 LOG_WARNING("target not halted");
2364 return ERROR_TARGET_NOT_HALTED;
2365 }
2366
2367 if (xscale->dbr_available < 1)
2368 {
2369 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2370 }
2371
2372 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2373 {
2374 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2375 }
2376
2377 xscale->dbr_available--;
2378
2379 return ERROR_OK;
2380 }
2381
2382 int xscale_unset_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2383 {
2384 armv4_5_common_t *armv4_5 = target->arch_info;
2385 xscale_common_t *xscale = armv4_5->arch_info;
2386 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2387 u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2388
2389 if (target->state != TARGET_HALTED)
2390 {
2391 LOG_WARNING("target not halted");
2392 return ERROR_TARGET_NOT_HALTED;
2393 }
2394
2395 if (!watchpoint->set)
2396 {
2397 LOG_WARNING("breakpoint not set");
2398 return ERROR_OK;
2399 }
2400
2401 if (watchpoint->set == 1)
2402 {
2403 dbcon_value &= ~0x3;
2404 xscale_set_reg_u32(dbcon, dbcon_value);
2405 xscale->dbr0_used = 0;
2406 }
2407 else if (watchpoint->set == 2)
2408 {
2409 dbcon_value &= ~0xc;
2410 xscale_set_reg_u32(dbcon, dbcon_value);
2411 xscale->dbr1_used = 0;
2412 }
2413 watchpoint->set = 0;
2414
2415 return ERROR_OK;
2416 }
2417
2418 int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2419 {
2420 armv4_5_common_t *armv4_5 = target->arch_info;
2421 xscale_common_t *xscale = armv4_5->arch_info;
2422
2423 if (target->state != TARGET_HALTED)
2424 {
2425 LOG_WARNING("target not halted");
2426 return ERROR_TARGET_NOT_HALTED;
2427 }
2428
2429 if (watchpoint->set)
2430 {
2431 xscale_unset_watchpoint(target, watchpoint);
2432 }
2433
2434 xscale->dbr_available++;
2435
2436 return ERROR_OK;
2437 }
2438
2439 void xscale_enable_watchpoints(struct target_s *target)
2440 {
2441 watchpoint_t *watchpoint = target->watchpoints;
2442
2443 while (watchpoint)
2444 {
2445 if (watchpoint->set == 0)
2446 xscale_set_watchpoint(target, watchpoint);
2447 watchpoint = watchpoint->next;
2448 }
2449 }
2450
2451 void xscale_enable_breakpoints(struct target_s *target)
2452 {
2453 breakpoint_t *breakpoint = target->breakpoints;
2454
2455 /* set any pending breakpoints */
2456 while (breakpoint)
2457 {
2458 if (breakpoint->set == 0)
2459 xscale_set_breakpoint(target, breakpoint);
2460 breakpoint = breakpoint->next;
2461 }
2462 }
2463
2464 int xscale_get_reg(reg_t *reg)
2465 {
2466 xscale_reg_t *arch_info = reg->arch_info;
2467 target_t *target = arch_info->target;
2468 armv4_5_common_t *armv4_5 = target->arch_info;
2469 xscale_common_t *xscale = armv4_5->arch_info;
2470
2471 /* DCSR, TX and RX are accessible via JTAG */
2472 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2473 {
2474 return xscale_read_dcsr(arch_info->target);
2475 }
2476 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2477 {
2478 /* 1 = consume register content */
2479 return xscale_read_tx(arch_info->target, 1);
2480 }
2481 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2482 {
2483 /* can't read from RX register (host -> debug handler) */
2484 return ERROR_OK;
2485 }
2486 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2487 {
2488 /* can't (explicitly) read from TXRXCTRL register */
2489 return ERROR_OK;
2490 }
2491 else /* Other DBG registers have to be transfered by the debug handler */
2492 {
2493 /* send CP read request (command 0x40) */
2494 xscale_send_u32(target, 0x40);
2495
2496 /* send CP register number */
2497 xscale_send_u32(target, arch_info->dbg_handler_number);
2498
2499 /* read register value */
2500 xscale_read_tx(target, 1);
2501 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2502
2503 reg->dirty = 0;
2504 reg->valid = 1;
2505 }
2506
2507 return ERROR_OK;
2508 }
2509
2510 int xscale_set_reg(reg_t *reg, u8* buf)
2511 {
2512 xscale_reg_t *arch_info = reg->arch_info;
2513 target_t *target = arch_info->target;
2514 armv4_5_common_t *armv4_5 = target->arch_info;
2515 xscale_common_t *xscale = armv4_5->arch_info;
2516 u32 value = buf_get_u32(buf, 0, 32);
2517
2518 /* DCSR, TX and RX are accessible via JTAG */
2519 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2520 {
2521 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2522 return xscale_write_dcsr(arch_info->target, -1, -1);
2523 }
2524 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2525 {
2526 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2527 return xscale_write_rx(arch_info->target);
2528 }
2529 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2530 {
2531 /* can't write to TX register (debug-handler -> host) */
2532 return ERROR_OK;
2533 }
2534 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2535 {
2536 /* can't (explicitly) write to TXRXCTRL register */
2537 return ERROR_OK;
2538 }
2539 else /* Other DBG registers have to be transfered by the debug handler */
2540 {
2541 /* send CP write request (command 0x41) */
2542 xscale_send_u32(target, 0x41);
2543
2544 /* send CP register number */
2545 xscale_send_u32(target, arch_info->dbg_handler_number);
2546
2547 /* send CP register value */
2548 xscale_send_u32(target, value);
2549 buf_set_u32(reg->value, 0, 32, value);
2550 }
2551
2552 return ERROR_OK;
2553 }
2554
2555 /* convenience wrapper to access XScale specific registers */
2556 int xscale_set_reg_u32(reg_t *reg, u32 value)
2557 {
2558 u8 buf[4];
2559
2560 buf_set_u32(buf, 0, 32, value);
2561
2562 return xscale_set_reg(reg, buf);
2563 }
2564
2565 int xscale_write_dcsr_sw(target_t *target, u32 value)
2566 {
2567 /* get pointers to arch-specific information */
2568 armv4_5_common_t *armv4_5 = target->arch_info;
2569 xscale_common_t *xscale = armv4_5->arch_info;
2570 reg_t *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2571 xscale_reg_t *dcsr_arch_info = dcsr->arch_info;
2572
2573 /* send CP write request (command 0x41) */
2574 xscale_send_u32(target, 0x41);
2575
2576 /* send CP register number */
2577 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2578
2579 /* send CP register value */
2580 xscale_send_u32(target, value);
2581 buf_set_u32(dcsr->value, 0, 32, value);
2582
2583 return ERROR_OK;
2584 }
2585
2586 int xscale_read_trace(target_t *target)
2587 {
2588 /* get pointers to arch-specific information */
2589 armv4_5_common_t *armv4_5 = target->arch_info;
2590 xscale_common_t *xscale = armv4_5->arch_info;
2591 xscale_trace_data_t **trace_data_p;
2592
2593 /* 258 words from debug handler
2594 * 256 trace buffer entries
2595 * 2 checkpoint addresses
2596 */
2597 u32 trace_buffer[258];
2598 int is_address[256];
2599 int i, j;
2600
2601 if (target->state != TARGET_HALTED)
2602 {
2603 LOG_WARNING("target must be stopped to read trace data");
2604 return ERROR_TARGET_NOT_HALTED;
2605 }
2606
2607 /* send read trace buffer command (command 0x61) */
2608 xscale_send_u32(target, 0x61);
2609
2610 /* receive trace buffer content */
2611 xscale_receive(target, trace_buffer, 258);
2612
2613 /* parse buffer backwards to identify address entries */
2614 for (i = 255; i >= 0; i--)
2615 {
2616 is_address[i] = 0;
2617 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2618 ((trace_buffer[i] & 0xf0) == 0xd0))
2619 {
2620 if (i >= 3)
2621 is_address[--i] = 1;
2622 if (i >= 2)
2623 is_address[--i] = 1;
2624 if (i >= 1)
2625 is_address[--i] = 1;
2626 if (i >= 0)
2627 is_address[--i] = 1;
2628 }
2629 }
2630
2631
2632 /* search first non-zero entry */
2633 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2634 ;
2635
2636 if (j == 256)
2637 {
2638 LOG_DEBUG("no trace data collected");
2639 return ERROR_XSCALE_NO_TRACE_DATA;
2640 }
2641
2642 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2643 ;
2644
2645 *trace_data_p = malloc(sizeof(xscale_trace_data_t));
2646 (*trace_data_p)->next = NULL;
2647 (*trace_data_p)->chkpt0 = trace_buffer[256];
2648 (*trace_data_p)->chkpt1 = trace_buffer[257];
2649 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2650 (*trace_data_p)->entries = malloc(sizeof(xscale_trace_entry_t) * (256 - j));
2651 (*trace_data_p)->depth = 256 - j;
2652
2653 for (i = j; i < 256; i++)
2654 {
2655 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2656 if (is_address[i])
2657 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2658 else
2659 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2660 }
2661
2662 return ERROR_OK;
2663 }
2664
2665 int xscale_read_instruction(target_t *target, arm_instruction_t *instruction)
2666 {
2667 /* get pointers to arch-specific information */
2668 armv4_5_common_t *armv4_5 = target->arch_info;
2669 xscale_common_t *xscale = armv4_5->arch_info;
2670 int i;
2671 int section = -1;
2672 u32 size_read;
2673 u32 opcode;
2674 int retval;
2675
2676 if (!xscale->trace.image)
2677 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2678
2679 /* search for the section the current instruction belongs to */
2680 for (i = 0; i < xscale->trace.image->num_sections; i++)
2681 {
2682 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2683 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2684 {
2685 section = i;
2686 break;
2687 }
2688 }
2689
2690 if (section == -1)
2691 {
2692 /* current instruction couldn't be found in the image */
2693 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2694 }
2695
2696 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2697 {
2698 u8 buf[4];
2699 if ((retval = image_read_section(xscale->trace.image, section,
2700 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2701 4, buf, &size_read)) != ERROR_OK)
2702 {
2703 LOG_ERROR("error while reading instruction: %i", retval);
2704 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2705 }
2706 opcode = target_buffer_get_u32(target, buf);
2707 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2708 }
2709 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2710 {
2711 u8 buf[2];
2712 if ((retval = image_read_section(xscale->trace.image, section,
2713 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2714 2, buf, &size_read)) != ERROR_OK)
2715 {
2716 LOG_ERROR("error while reading instruction: %i", retval);
2717 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2718 }
2719 opcode = target_buffer_get_u16(target, buf);
2720 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2721 }
2722 else
2723 {
2724 LOG_ERROR("BUG: unknown core state encountered");
2725 exit(-1);
2726 }
2727
2728 return ERROR_OK;
2729 }
2730
2731 int xscale_branch_address(xscale_trace_data_t *trace_data, int i, u32 *target)
2732 {
2733 /* if there are less than four entries prior to the indirect branch message
2734 * we can't extract the address */
2735 if (i < 4)
2736 {
2737 return -1;
2738 }
2739
2740 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2741 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2742
2743 return 0;
2744 }
2745
2746 int xscale_analyze_trace(target_t *target, command_context_t *cmd_ctx)
2747 {
2748 /* get pointers to arch-specific information */
2749 armv4_5_common_t *armv4_5 = target->arch_info;
2750 xscale_common_t *xscale = armv4_5->arch_info;
2751 int next_pc_ok = 0;
2752 u32 next_pc = 0x0;
2753 xscale_trace_data_t *trace_data = xscale->trace.data;
2754 int retval;
2755
2756 while (trace_data)
2757 {
2758 int i, chkpt;
2759 int rollover;
2760 int branch;
2761 int exception;
2762 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2763
2764 chkpt = 0;
2765 rollover = 0;
2766
2767 for (i = 0; i < trace_data->depth; i++)
2768 {
2769 next_pc_ok = 0;
2770 branch = 0;
2771 exception = 0;
2772
2773 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2774 continue;
2775
2776 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2777 {
2778 case 0: /* Exceptions */
2779 case 1:
2780 case 2:
2781 case 3:
2782 case 4:
2783 case 5:
2784 case 6:
2785 case 7:
2786 exception = (trace_data->entries[i].data & 0x70) >> 4;
2787 next_pc_ok = 1;
2788 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2789 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2790 break;
2791 case 8: /* Direct Branch */
2792 branch = 1;
2793 break;
2794 case 9: /* Indirect Branch */
2795 branch = 1;
2796 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2797 {
2798 next_pc_ok = 1;
2799 }
2800 break;
2801 case 13: /* Checkpointed Indirect Branch */
2802 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2803 {
2804 next_pc_ok = 1;
2805 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2806 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2807 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2808 }
2809 /* explicit fall-through */
2810 case 12: /* Checkpointed Direct Branch */
2811 branch = 1;
2812 if (chkpt == 0)
2813 {
2814 next_pc_ok = 1;
2815 next_pc = trace_data->chkpt0;
2816 chkpt++;
2817 }
2818 else if (chkpt == 1)
2819 {
2820 next_pc_ok = 1;
2821 next_pc = trace_data->chkpt0;
2822 chkpt++;
2823 }
2824 else
2825 {
2826 LOG_WARNING("more than two checkpointed branches encountered");
2827 }
2828 break;
2829 case 15: /* Roll-over */
2830 rollover++;
2831 continue;
2832 default: /* Reserved */
2833 command_print(cmd_ctx, "--- reserved trace message ---");
2834 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2835 return ERROR_OK;
2836 }
2837
2838 if (xscale->trace.pc_ok)
2839 {
2840 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2841 arm_instruction_t instruction;
2842
2843 if ((exception == 6) || (exception == 7))
2844 {
2845 /* IRQ or FIQ exception, no instruction executed */
2846 executed -= 1;
2847 }
2848
2849 while (executed-- >= 0)
2850 {
2851 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2852 {
2853 /* can't continue tracing with no image available */
2854 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2855 {
2856 return retval;
2857 }
2858 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2859 {
2860 /* TODO: handle incomplete images */
2861 }
2862 }
2863
2864 /* a precise abort on a load to the PC is included in the incremental
2865 * word count, other instructions causing data aborts are not included
2866 */
2867 if ((executed == 0) && (exception == 4)
2868 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2869 {
2870 if ((instruction.type == ARM_LDM)
2871 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2872 {
2873 executed--;
2874 }
2875 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2876 && (instruction.info.load_store.Rd != 15))
2877 {
2878 executed--;
2879 }
2880 }
2881
2882 /* only the last instruction executed
2883 * (the one that caused the control flow change)
2884 * could be a taken branch
2885 */
2886 if (((executed == -1) && (branch == 1)) &&
2887 (((instruction.type == ARM_B) ||
2888 (instruction.type == ARM_BL) ||
2889 (instruction.type == ARM_BLX)) &&
2890 (instruction.info.b_bl_bx_blx.target_address != -1)))
2891 {
2892 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2893 }
2894 else
2895 {
2896 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2897 }
2898 command_print(cmd_ctx, "%s", instruction.text);
2899 }
2900
2901 rollover = 0;
2902 }
2903
2904 if (next_pc_ok)
2905 {
2906 xscale->trace.current_pc = next_pc;
2907 xscale->trace.pc_ok = 1;
2908 }
2909 }
2910
2911 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2912 {
2913 arm_instruction_t instruction;
2914 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2915 {
2916 /* can't continue tracing with no image available */
2917 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2918 {
2919 return retval;
2920 }
2921 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2922 {
2923 /* TODO: handle incomplete images */
2924 }
2925 }
2926 command_print(cmd_ctx, "%s", instruction.text);
2927 }
2928
2929 trace_data = trace_data->next;
2930 }
2931
2932 return ERROR_OK;
2933 }
2934
2935 void xscale_build_reg_cache(target_t *target)
2936 {
2937 /* get pointers to arch-specific information */
2938 armv4_5_common_t *armv4_5 = target->arch_info;
2939 xscale_common_t *xscale = armv4_5->arch_info;
2940
2941 reg_cache_t **cache_p = register_get_last_cache_p(&target->reg_cache);
2942 xscale_reg_t *arch_info = malloc(sizeof(xscale_reg_arch_info));
2943 int i;
2944 int num_regs = sizeof(xscale_reg_arch_info) / sizeof(xscale_reg_t);
2945
2946 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
2947 armv4_5->core_cache = (*cache_p);
2948
2949 /* register a register arch-type for XScale dbg registers only once */
2950 if (xscale_reg_arch_type == -1)
2951 xscale_reg_arch_type = register_reg_arch_type(xscale_get_reg, xscale_set_reg);
2952
2953 (*cache_p)->next = malloc(sizeof(reg_cache_t));
2954 cache_p = &(*cache_p)->next;
2955
2956 /* fill in values for the xscale reg cache */
2957 (*cache_p)->name = "XScale registers";
2958 (*cache_p)->next = NULL;
2959 (*cache_p)->reg_list = malloc(num_regs * sizeof(reg_t));
2960 (*cache_p)->num_regs = num_regs;
2961
2962 for (i = 0; i < num_regs; i++)
2963 {
2964 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2965 (*cache_p)->reg_list[i].value = calloc(4, 1);
2966 (*cache_p)->reg_list[i].dirty = 0;
2967 (*cache_p)->reg_list[i].valid = 0;
2968 (*cache_p)->reg_list[i].size = 32;
2969 (*cache_p)->reg_list[i].bitfield_desc = NULL;
2970 (*cache_p)->reg_list[i].num_bitfields = 0;
2971 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2972 (*cache_p)->reg_list[i].arch_type = xscale_reg_arch_type;
2973 arch_info[i] = xscale_reg_arch_info[i];
2974 arch_info[i].target = target;
2975 }
2976
2977 xscale->reg_cache = (*cache_p);
2978 }
2979
2980 int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target)
2981 {
2982 return ERROR_OK;
2983 }
2984
2985 int xscale_quit()
2986 {
2987
2988 return ERROR_OK;
2989 }
2990
2991 int xscale_init_arch_info(target_t *target, xscale_common_t *xscale, int chain_pos, char *variant)
2992 {
2993 armv4_5_common_t *armv4_5;
2994 u32 high_reset_branch, low_reset_branch;
2995 int i;
2996
2997 armv4_5 = &xscale->armv4_5_common;
2998
2999 /* store architecture specfic data (none so far) */
3000 xscale->arch_info = NULL;
3001 xscale->common_magic = XSCALE_COMMON_MAGIC;
3002
3003 /* remember the variant (PXA25x, PXA27x, IXP42x, ...) */
3004 xscale->variant = strdup(variant);
3005
3006 /* prepare JTAG information for the new target */
3007 xscale->jtag_info.chain_pos = chain_pos;
3008
3009 xscale->jtag_info.dbgrx = 0x02;
3010 xscale->jtag_info.dbgtx = 0x10;
3011 xscale->jtag_info.dcsr = 0x09;
3012 xscale->jtag_info.ldic = 0x07;
3013
3014 if ((strcmp(xscale->variant, "pxa250") == 0) ||
3015 (strcmp(xscale->variant, "pxa255") == 0) ||
3016 (strcmp(xscale->variant, "pxa26x") == 0))
3017 {
3018 xscale->jtag_info.ir_length = 5;
3019 }
3020 else if ((strcmp(xscale->variant, "pxa27x") == 0) ||
3021 (strcmp(xscale->variant, "ixp42x") == 0) ||
3022 (strcmp(xscale->variant, "ixp45x") == 0) ||
3023 (strcmp(xscale->variant, "ixp46x") == 0))
3024 {
3025 xscale->jtag_info.ir_length = 7;
3026 }
3027
3028 /* the debug handler isn't installed (and thus not running) at this time */
3029 xscale->handler_installed = 0;
3030 xscale->handler_running = 0;
3031 xscale->handler_address = 0xfe000800;
3032
3033 /* clear the vectors we keep locally for reference */
3034 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
3035 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
3036
3037 /* no user-specified vectors have been configured yet */
3038 xscale->static_low_vectors_set = 0x0;
3039 xscale->static_high_vectors_set = 0x0;
3040
3041 /* calculate branches to debug handler */
3042 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
3043 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
3044
3045 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
3046 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
3047
3048 for (i = 1; i <= 7; i++)
3049 {
3050 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3051 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3052 }
3053
3054 /* 64kB aligned region used for DCache cleaning */
3055 xscale->cache_clean_address = 0xfffe0000;
3056
3057 xscale->hold_rst = 0;
3058 xscale->external_debug_break = 0;
3059
3060 xscale->force_hw_bkpts = 1;
3061
3062 xscale->ibcr_available = 2;
3063 xscale->ibcr0_used = 0;
3064 xscale->ibcr1_used = 0;
3065
3066 xscale->dbr_available = 2;
3067 xscale->dbr0_used = 0;
3068 xscale->dbr1_used = 0;
3069
3070 xscale->arm_bkpt = ARMV5_BKPT(0x0);
3071 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
3072
3073 xscale->vector_catch = 0x1;
3074
3075 xscale->trace.capture_status = TRACE_IDLE;
3076 xscale->trace.data = NULL;
3077 xscale->trace.image = NULL;
3078 xscale->trace.buffer_enabled = 0;
3079 xscale->trace.buffer_fill = 0;
3080
3081 /* prepare ARMv4/5 specific information */
3082 armv4_5->arch_info = xscale;
3083 armv4_5->read_core_reg = xscale_read_core_reg;
3084 armv4_5->write_core_reg = xscale_write_core_reg;
3085 armv4_5->full_context = xscale_full_context;
3086
3087 armv4_5_init_arch_info(target, armv4_5);
3088
3089 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3090 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3091 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3092 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3093 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3094 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3095 xscale->armv4_5_mmu.has_tiny_pages = 1;
3096 xscale->armv4_5_mmu.mmu_enabled = 0;
3097
3098 return ERROR_OK;
3099 }
3100
3101 /* target xscale <endianess> <startup_mode> <chain_pos> <variant> */
3102 int xscale_target_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc, struct target_s *target)
3103 {
3104 int chain_pos;
3105 char *variant = NULL;
3106 xscale_common_t *xscale = malloc(sizeof(xscale_common_t));
3107 memset(xscale, 0, sizeof(*xscale));
3108
3109 if (argc < 5)
3110 {
3111 LOG_ERROR("'target xscale' requires four arguments: <endianess> <startup_mode> <chain_pos> <variant>");
3112 return ERROR_OK;
3113 }
3114
3115 chain_pos = strtoul(args[3], NULL, 0);
3116
3117 variant = args[4];
3118
3119 xscale_init_arch_info(target, xscale, chain_pos, variant);
3120 xscale_build_reg_cache(target);
3121
3122 return ERROR_OK;
3123 }
3124
3125 int xscale_handle_debug_handler_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3126 {
3127 target_t *target = NULL;
3128 armv4_5_common_t *armv4_5;
3129 xscale_common_t *xscale;
3130
3131 u32 handler_address;
3132
3133 if (argc < 2)
3134 {
3135 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3136 return ERROR_OK;
3137 }
3138
3139 if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
3140 {
3141 LOG_ERROR("no target '%s' configured", args[0]);
3142 return ERROR_OK;
3143 }
3144
3145 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3146 {
3147 return ERROR_OK;
3148 }
3149
3150 handler_address = strtoul(args[1], NULL, 0);
3151
3152 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3153 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3154 {
3155 xscale->handler_address = handler_address;
3156 }
3157 else
3158 {
3159 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3160 }
3161
3162 return ERROR_OK;
3163 }
3164
3165 int xscale_handle_cache_clean_address_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3166 {
3167 target_t *target = NULL;
3168 armv4_5_common_t *armv4_5;
3169 xscale_common_t *xscale;
3170
3171 u32 cache_clean_address;
3172
3173 if (argc < 2)
3174 {
3175 LOG_ERROR("'xscale cache_clean_address <target#> <address>' command takes two required operands");
3176 return ERROR_OK;
3177 }
3178
3179 if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
3180 {
3181 LOG_ERROR("no target '%s' configured", args[0]);
3182 return ERROR_OK;
3183 }
3184
3185 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3186 {
3187 return ERROR_OK;
3188 }
3189
3190 cache_clean_address = strtoul(args[1], NULL, 0);
3191
3192 if (cache_clean_address & 0xffff)
3193 {
3194 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3195 }
3196 else
3197 {
3198 xscale->cache_clean_address = cache_clean_address;
3199 }
3200
3201 return ERROR_OK;
3202 }
3203
3204 int xscale_handle_cache_info_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3205 {
3206 target_t *target = get_current_target(cmd_ctx);
3207 armv4_5_common_t *armv4_5;
3208 xscale_common_t *xscale;
3209
3210 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3211 {
3212 return ERROR_OK;
3213 }
3214
3215 return armv4_5_handle_cache_info_command(cmd_ctx, &xscale->armv4_5_mmu.armv4_5_cache);
3216 }
3217
3218 static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical)
3219 {
3220 armv4_5_common_t *armv4_5;
3221 xscale_common_t *xscale;
3222 int retval;
3223 int type;
3224 u32 cb;
3225 int domain;
3226 u32 ap;
3227
3228
3229 if ((retval = xscale_get_arch_pointers(target, &armv4_5, &xscale)) != ERROR_OK)
3230 {
3231 return retval;
3232 }
3233 u32 ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3234 if (type == -1)
3235 {
3236 return ret;
3237 }
3238 *physical = ret;
3239 return ERROR_OK;
3240 }
3241
3242 static int xscale_mmu(struct target_s *target, int *enabled)
3243 {
3244 armv4_5_common_t *armv4_5 = target->arch_info;
3245 xscale_common_t *xscale = armv4_5->arch_info;
3246
3247 if (target->state != TARGET_HALTED)
3248 {
3249 LOG_ERROR("Target not halted");
3250 return ERROR_TARGET_INVALID;
3251 }
3252 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3253 return ERROR_OK;
3254 }
3255
3256
3257 int xscale_handle_mmu_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3258 {
3259 target_t *target = get_current_target(cmd_ctx);
3260 armv4_5_common_t *armv4_5;
3261 xscale_common_t *xscale;
3262
3263 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3264 {
3265 return ERROR_OK;
3266 }
3267
3268 if (target->state != TARGET_HALTED)
3269 {
3270 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3271 return ERROR_OK;
3272 }
3273
3274 if (argc >= 1)
3275 {
3276 if (strcmp("enable", args[0]) == 0)
3277 {
3278 xscale_enable_mmu_caches(target, 1, 0, 0);
3279 xscale->armv4_5_mmu.mmu_enabled = 1;
3280 }
3281 else if (strcmp("disable", args[0]) == 0)
3282 {
3283 xscale_disable_mmu_caches(target, 1, 0, 0);
3284 xscale->armv4_5_mmu.mmu_enabled = 0;
3285 }
3286 }
3287
3288 command_print(cmd_ctx, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3289
3290 return ERROR_OK;
3291 }
3292
3293 int xscale_handle_idcache_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3294 {
3295 target_t *target = get_current_target(cmd_ctx);
3296 armv4_5_common_t *armv4_5;
3297 xscale_common_t *xscale;
3298 int icache = 0, dcache = 0;
3299
3300 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3301 {
3302 return ERROR_OK;
3303 }
3304
3305 if (target->state != TARGET_HALTED)
3306 {
3307 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3308 return ERROR_OK;
3309 }
3310
3311 if (strcmp(cmd, "icache") == 0)
3312 icache = 1;
3313 else if (strcmp(cmd, "dcache") == 0)
3314 dcache = 1;
3315
3316 if (argc >= 1)
3317 {
3318 if (strcmp("enable", args[0]) == 0)
3319 {
3320 xscale_enable_mmu_caches(target, 0, dcache, icache);
3321
3322 if (icache)
3323 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 1;
3324 else if (dcache)
3325 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 1;
3326 }
3327 else if (strcmp("disable", args[0]) == 0)
3328 {
3329 xscale_disable_mmu_caches(target, 0, dcache, icache);
3330
3331 if (icache)
3332 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 0;
3333 else if (dcache)
3334 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 0;
3335 }
3336 }
3337
3338 if (icache)
3339 command_print(cmd_ctx, "icache %s", (xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled) ? "enabled" : "disabled");
3340
3341 if (dcache)
3342 command_print(cmd_ctx, "dcache %s", (xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled) ? "enabled" : "disabled");
3343
3344 return ERROR_OK;
3345 }
3346
3347 int xscale_handle_vector_catch_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3348 {
3349 target_t *target = get_current_target(cmd_ctx);
3350 armv4_5_common_t *armv4_5;
3351 xscale_common_t *xscale;
3352
3353 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3354 {
3355 return ERROR_OK;
3356 }
3357
3358 if (argc < 1)
3359 {
3360 command_print(cmd_ctx, "usage: xscale vector_catch [mask]");
3361 }
3362 else
3363 {
3364 xscale->vector_catch = strtoul(args[0], NULL, 0);
3365 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3366 xscale_write_dcsr(target, -1, -1);
3367 }
3368
3369 command_print(cmd_ctx, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3370
3371 return ERROR_OK;
3372 }
3373
3374 int xscale_handle_force_hw_bkpts_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3375 {
3376 target_t *target = get_current_target(cmd_ctx);
3377 armv4_5_common_t *armv4_5;
3378 xscale_common_t *xscale;
3379
3380 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3381 {
3382 return ERROR_OK;
3383 }
3384
3385 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
3386 {
3387 xscale->force_hw_bkpts = 1;
3388 }
3389 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
3390 {
3391 xscale->force_hw_bkpts = 0;
3392 }
3393 else
3394 {
3395 command_print(cmd_ctx, "usage: xscale force_hw_bkpts <enable|disable>");
3396 }
3397
3398 command_print(cmd_ctx, "force hardware breakpoints %s", (xscale->force_hw_bkpts) ? "enabled" : "disabled");
3399
3400 return ERROR_OK;
3401 }
3402
3403 int xscale_handle_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3404 {
3405 target_t *target = get_current_target(cmd_ctx);
3406 armv4_5_common_t *armv4_5;
3407 xscale_common_t *xscale;
3408 u32 dcsr_value;
3409
3410 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3411 {
3412 return ERROR_OK;
3413 }
3414
3415 if (target->state != TARGET_HALTED)
3416 {
3417 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3418 return ERROR_OK;
3419 }
3420
3421 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
3422 {
3423 xscale_trace_data_t *td, *next_td;
3424 xscale->trace.buffer_enabled = 1;
3425
3426 /* free old trace data */
3427 td = xscale->trace.data;
3428 while (td)
3429 {
3430 next_td = td->next;
3431
3432 if (td->entries)
3433 free(td->entries);
3434 free(td);
3435 td = next_td;
3436 }
3437 xscale->trace.data = NULL;
3438 }
3439 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
3440 {
3441 xscale->trace.buffer_enabled = 0;
3442 }
3443
3444 if ((argc >= 2) && (strcmp("fill", args[1]) == 0))
3445 {
3446 if (argc >= 3)
3447 xscale->trace.buffer_fill = strtoul(args[2], NULL, 0);
3448 else
3449 xscale->trace.buffer_fill = 1;
3450 }
3451 else if ((argc >= 2) && (strcmp("wrap", args[1]) == 0))
3452 {
3453 xscale->trace.buffer_fill = -1;
3454 }
3455
3456 if (xscale->trace.buffer_enabled)
3457 {
3458 /* if we enable the trace buffer in fill-once
3459 * mode we know the address of the first instruction */
3460 xscale->trace.pc_ok = 1;
3461 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3462 }
3463 else
3464 {
3465 /* otherwise the address is unknown, and we have no known good PC */
3466 xscale->trace.pc_ok = 0;
3467 }
3468
3469 command_print(cmd_ctx, "trace buffer %s (%s)",
3470 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3471 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3472
3473 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3474 if (xscale->trace.buffer_fill >= 0)
3475 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3476 else
3477 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3478
3479 return ERROR_OK;
3480 }
3481
3482 int xscale_handle_trace_image_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3483 {
3484 target_t *target;
3485 armv4_5_common_t *armv4_5;
3486 xscale_common_t *xscale;
3487
3488 if (argc < 1)
3489 {
3490 command_print(cmd_ctx, "usage: xscale trace_image <file> [base address] [type]");
3491 return ERROR_OK;
3492 }
3493
3494 target = get_current_target(cmd_ctx);
3495
3496 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3497 {
3498 return ERROR_OK;
3499 }
3500
3501 if (xscale->trace.image)
3502 {
3503 image_close(xscale->trace.image);
3504 free(xscale->trace.image);
3505 command_print(cmd_ctx, "previously loaded image found and closed");
3506 }
3507
3508 xscale->trace.image = malloc(sizeof(image_t));
3509 xscale->trace.image->base_address_set = 0;
3510 xscale->trace.image->start_address_set = 0;
3511
3512 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3513 if (argc >= 2)
3514 {
3515 xscale->trace.image->base_address_set = 1;
3516 xscale->trace.image->base_address = strtoul(args[1], NULL, 0);
3517 }
3518 else
3519 {
3520 xscale->trace.image->base_address_set = 0;
3521 }
3522
3523 if (image_open(xscale->trace.image, args[0], (argc >= 3) ? args[2] : NULL) != ERROR_OK)
3524 {
3525 free(xscale->trace.image);
3526 xscale->trace.image = NULL;
3527 return ERROR_OK;
3528 }
3529
3530 return ERROR_OK;
3531 }
3532
3533 int xscale_handle_dump_trace_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3534 {
3535 target_t *target = get_current_target(cmd_ctx);
3536 armv4_5_common_t *armv4_5;
3537 xscale_common_t *xscale;
3538 xscale_trace_data_t *trace_data;
3539 fileio_t file;
3540
3541 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3542 {
3543 return ERROR_OK;
3544 }
3545
3546 if (target->state != TARGET_HALTED)
3547 {
3548 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3549 return ERROR_OK;
3550 }
3551
3552 if (argc < 1)
3553 {
3554 command_print(cmd_ctx, "usage: xscale dump_trace <file>");
3555 return ERROR_OK;
3556 }
3557
3558 trace_data = xscale->trace.data;
3559
3560 if (!trace_data)
3561 {
3562 command_print(cmd_ctx, "no trace data collected");
3563 return ERROR_OK;
3564 }
3565
3566 if (fileio_open(&file, args[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3567 {
3568 return ERROR_OK;
3569 }
3570
3571 while (trace_data)
3572 {
3573 int i;
3574
3575 fileio_write_u32(&file, trace_data->chkpt0);
3576 fileio_write_u32(&file, trace_data->chkpt1);
3577 fileio_write_u32(&file, trace_data->last_instruction);
3578 fileio_write_u32(&file, trace_data->depth);
3579
3580 for (i = 0; i < trace_data->depth; i++)
3581 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3582
3583 trace_data = trace_data->next;
3584 }
3585
3586 fileio_close(&file);
3587
3588 return ERROR_OK;
3589 }
3590
3591 int xscale_handle_analyze_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3592 {
3593 target_t *target = get_current_target(cmd_ctx);
3594 armv4_5_common_t *armv4_5;
3595 xscale_common_t *xscale;
3596
3597 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3598 {
3599 return ERROR_OK;
3600 }
3601
3602 xscale_analyze_trace(target, cmd_ctx);
3603
3604 return ERROR_OK;
3605 }
3606
3607 int xscale_handle_cp15(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3608 {
3609 target_t *target = get_current_target(cmd_ctx);
3610 armv4_5_common_t *armv4_5;
3611 xscale_common_t *xscale;
3612
3613 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3614 {
3615 return ERROR_OK;
3616 }
3617
3618 if (target->state != TARGET_HALTED)
3619 {
3620 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3621 return ERROR_OK;
3622 }
3623 u32 reg_no = 0;
3624 reg_t *reg = NULL;
3625 if(argc > 0)
3626 {
3627 reg_no = strtoul(args[0], NULL, 0);
3628 /*translate from xscale cp15 register no to openocd register*/
3629 switch(reg_no)
3630 {
3631 case 0:
3632 reg_no = XSCALE_MAINID;
3633 break;
3634 case 1:
3635 reg_no = XSCALE_CTRL;
3636 break;
3637 case 2:
3638 reg_no = XSCALE_TTB;
3639 break;
3640 case 3:
3641 reg_no = XSCALE_DAC;
3642 break;
3643 case 5:
3644 reg_no = XSCALE_FSR;
3645 break;
3646 case 6:
3647 reg_no = XSCALE_FAR;
3648 break;
3649 case 13:
3650 reg_no = XSCALE_PID;
3651 break;
3652 case 15:
3653 reg_no = XSCALE_CPACCESS;
3654 break;
3655 default:
3656 command_print(cmd_ctx, "invalid register number");
3657 return ERROR_INVALID_ARGUMENTS;
3658 }
3659 reg = &xscale->reg_cache->reg_list[reg_no];
3660
3661 }
3662 if(argc == 1)
3663 {
3664 u32 value;
3665
3666 /* read cp15 control register */
3667 xscale_get_reg(reg);
3668 value = buf_get_u32(reg->value, 0, 32);
3669 command_print(cmd_ctx, "%s (/%i): 0x%x", reg->name, reg->size, value);
3670 }
3671 else if(argc == 2)
3672 {
3673
3674 u32 value = strtoul(args[1], NULL, 0);
3675
3676 /* send CP write request (command 0x41) */
3677 xscale_send_u32(target, 0x41);
3678
3679 /* send CP register number */
3680 xscale_send_u32(target, reg_no);
3681
3682 /* send CP register value */
3683 xscale_send_u32(target, value);
3684
3685 /* execute cpwait to ensure outstanding operations complete */
3686 xscale_send_u32(target, 0x53);
3687 }
3688 else
3689 {
3690 command_print(cmd_ctx, "usage: cp15 [register]<, [value]>");
3691 }
3692
3693 return ERROR_OK;
3694 }
3695
3696 int xscale_register_commands(struct command_context_s *cmd_ctx)
3697 {
3698 command_t *xscale_cmd;
3699
3700 xscale_cmd = register_command(cmd_ctx, NULL, "xscale", NULL, COMMAND_ANY, "xscale specific commands");
3701
3702 register_command(cmd_ctx, xscale_cmd, "debug_handler", xscale_handle_debug_handler_command, COMMAND_ANY, "'xscale debug_handler <target#> <address>' command takes two required operands");
3703 register_command(cmd_ctx, xscale_cmd, "cache_clean_address", xscale_handle_cache_clean_address_command, COMMAND_ANY, NULL);
3704
3705 register_command(cmd_ctx, xscale_cmd, "cache_info", xscale_handle_cache_info_command, COMMAND_EXEC, NULL);
3706 register_command(cmd_ctx, xscale_cmd, "mmu", xscale_handle_mmu_command, COMMAND_EXEC, "['enable'|'disable'] the MMU");
3707 register_command(cmd_ctx, xscale_cmd, "icache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the ICache");
3708 register_command(cmd_ctx, xscale_cmd, "dcache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the DCache");
3709
3710 register_command(cmd_ctx, xscale_cmd, "vector_catch", xscale_handle_idcache_command, COMMAND_EXEC, "<mask> of vectors that should be catched");
3711
3712 register_command(cmd_ctx, xscale_cmd, "trace_buffer", xscale_handle_trace_buffer_command, COMMAND_EXEC, "<enable|disable> ['fill' [n]|'wrap']");
3713
3714 register_command(cmd_ctx, xscale_cmd, "dump_trace", xscale_handle_dump_trace_command, COMMAND_EXEC, "dump content of trace buffer to <file>");
3715 register_command(cmd_ctx, xscale_cmd, "analyze_trace", xscale_handle_analyze_trace_buffer_command, COMMAND_EXEC, "analyze content of trace buffer");
3716 register_command(cmd_ctx, xscale_cmd, "trace_image", xscale_handle_trace_image_command,
3717 COMMAND_EXEC, "load image from <file> [base address]");
3718
3719 register_command(cmd_ctx, xscale_cmd, "cp15", xscale_handle_cp15, COMMAND_EXEC, "access coproc 15 <register> [value]");
3720
3721 armv4_5_register_commands(cmd_ctx);
3722
3723 return ERROR_OK;
3724 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)