wi-9c target scripts
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
19 ***************************************************************************/
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "replacements.h"
25
26 #include "xscale.h"
27
28 #include "register.h"
29 #include "target.h"
30 #include "armv4_5.h"
31 #include "arm_simulator.h"
32 #include "arm_disassembler.h"
33 #include "log.h"
34 #include "jtag.h"
35 #include "binarybuffer.h"
36 #include "time_support.h"
37 #include "breakpoints.h"
38 #include "fileio.h"
39
40 #include <stdlib.h>
41 #include <string.h>
42
43 #include <sys/types.h>
44 #include <unistd.h>
45 #include <errno.h>
46
47
48 /* cli handling */
49 int xscale_register_commands(struct command_context_s *cmd_ctx);
50
51 /* forward declarations */
52 int xscale_target_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc, struct target_s *target);
53 int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target);
54 int xscale_quit();
55
56 int xscale_arch_state(struct target_s *target);
57 int xscale_poll(target_t *target);
58 int xscale_halt(target_t *target);
59 int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution);
60 int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints);
61 int xscale_debug_entry(target_t *target);
62 int xscale_restore_context(target_t *target);
63
64 int xscale_assert_reset(target_t *target);
65 int xscale_deassert_reset(target_t *target);
66 int xscale_soft_reset_halt(struct target_s *target);
67 int xscale_prepare_reset_halt(struct target_s *target);
68
69 int xscale_set_reg_u32(reg_t *reg, u32 value);
70
71 int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode);
72 int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value);
73
74 int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
75 int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
76 int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer);
77 int xscale_checksum_memory(struct target_s *target, u32 address, u32 count, u32* checksum);
78
79 int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
80 int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
81 int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
82 int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
83 int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
84 int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
85 void xscale_enable_watchpoints(struct target_s *target);
86 void xscale_enable_breakpoints(struct target_s *target);
87 static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical);
88 static int xscale_mmu(struct target_s *target, int *enabled);
89
90 int xscale_read_trace(target_t *target);
91
92 target_type_t xscale_target =
93 {
94 .name = "xscale",
95
96 .poll = xscale_poll,
97 .arch_state = xscale_arch_state,
98
99 .target_request_data = NULL,
100
101 .halt = xscale_halt,
102 .resume = xscale_resume,
103 .step = xscale_step,
104
105 .assert_reset = xscale_assert_reset,
106 .deassert_reset = xscale_deassert_reset,
107 .soft_reset_halt = xscale_soft_reset_halt,
108 .prepare_reset_halt = xscale_prepare_reset_halt,
109
110 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
111
112 .read_memory = xscale_read_memory,
113 .write_memory = xscale_write_memory,
114 .bulk_write_memory = xscale_bulk_write_memory,
115 .checksum_memory = xscale_checksum_memory,
116
117 .run_algorithm = armv4_5_run_algorithm,
118
119 .add_breakpoint = xscale_add_breakpoint,
120 .remove_breakpoint = xscale_remove_breakpoint,
121 .add_watchpoint = xscale_add_watchpoint,
122 .remove_watchpoint = xscale_remove_watchpoint,
123
124 .register_commands = xscale_register_commands,
125 .target_command = xscale_target_command,
126 .init_target = xscale_init_target,
127 .quit = xscale_quit,
128
129 .virt2phys = xscale_virt2phys,
130 .mmu = xscale_mmu
131 };
132
133 char* xscale_reg_list[] =
134 {
135 "XSCALE_MAINID", /* 0 */
136 "XSCALE_CACHETYPE",
137 "XSCALE_CTRL",
138 "XSCALE_AUXCTRL",
139 "XSCALE_TTB",
140 "XSCALE_DAC",
141 "XSCALE_FSR",
142 "XSCALE_FAR",
143 "XSCALE_PID",
144 "XSCALE_CPACCESS",
145 "XSCALE_IBCR0", /* 10 */
146 "XSCALE_IBCR1",
147 "XSCALE_DBR0",
148 "XSCALE_DBR1",
149 "XSCALE_DBCON",
150 "XSCALE_TBREG",
151 "XSCALE_CHKPT0",
152 "XSCALE_CHKPT1",
153 "XSCALE_DCSR",
154 "XSCALE_TX",
155 "XSCALE_RX", /* 20 */
156 "XSCALE_TXRXCTRL",
157 };
158
159 xscale_reg_t xscale_reg_arch_info[] =
160 {
161 {XSCALE_MAINID, NULL},
162 {XSCALE_CACHETYPE, NULL},
163 {XSCALE_CTRL, NULL},
164 {XSCALE_AUXCTRL, NULL},
165 {XSCALE_TTB, NULL},
166 {XSCALE_DAC, NULL},
167 {XSCALE_FSR, NULL},
168 {XSCALE_FAR, NULL},
169 {XSCALE_PID, NULL},
170 {XSCALE_CPACCESS, NULL},
171 {XSCALE_IBCR0, NULL},
172 {XSCALE_IBCR1, NULL},
173 {XSCALE_DBR0, NULL},
174 {XSCALE_DBR1, NULL},
175 {XSCALE_DBCON, NULL},
176 {XSCALE_TBREG, NULL},
177 {XSCALE_CHKPT0, NULL},
178 {XSCALE_CHKPT1, NULL},
179 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
180 {-1, NULL}, /* TX accessed via JTAG */
181 {-1, NULL}, /* RX accessed via JTAG */
182 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
183 };
184
185 int xscale_reg_arch_type = -1;
186
187 int xscale_get_reg(reg_t *reg);
188 int xscale_set_reg(reg_t *reg, u8 *buf);
189
190 int xscale_get_arch_pointers(target_t *target, armv4_5_common_t **armv4_5_p, xscale_common_t **xscale_p)
191 {
192 armv4_5_common_t *armv4_5 = target->arch_info;
193 xscale_common_t *xscale = armv4_5->arch_info;
194
195 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
196 {
197 ERROR("target isn't an XScale target");
198 return -1;
199 }
200
201 if (xscale->common_magic != XSCALE_COMMON_MAGIC)
202 {
203 ERROR("target isn't an XScale target");
204 return -1;
205 }
206
207 *armv4_5_p = armv4_5;
208 *xscale_p = xscale;
209
210 return ERROR_OK;
211 }
212
213 int xscale_jtag_set_instr(int chain_pos, u32 new_instr)
214 {
215 jtag_device_t *device = jtag_get_device(chain_pos);
216
217 if (buf_get_u32(device->cur_instr, 0, device->ir_length) != new_instr)
218 {
219 scan_field_t field;
220
221 field.device = chain_pos;
222 field.num_bits = device->ir_length;
223 field.out_value = calloc(CEIL(field.num_bits, 8), 1);
224 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
225 field.out_mask = NULL;
226 field.in_value = NULL;
227 jtag_set_check_value(&field, device->expected, device->expected_mask, NULL);
228
229 jtag_add_ir_scan(1, &field, -1);
230
231 free(field.out_value);
232 }
233
234 return ERROR_OK;
235 }
236
237 int xscale_jtag_callback(enum jtag_event event, void *priv)
238 {
239 switch (event)
240 {
241 case JTAG_TRST_ASSERTED:
242 break;
243 case JTAG_TRST_RELEASED:
244 break;
245 case JTAG_SRST_ASSERTED:
246 break;
247 case JTAG_SRST_RELEASED:
248 break;
249 default:
250 WARNING("unhandled JTAG event");
251 }
252
253 return ERROR_OK;
254 }
255
256 int xscale_read_dcsr(target_t *target)
257 {
258 armv4_5_common_t *armv4_5 = target->arch_info;
259 xscale_common_t *xscale = armv4_5->arch_info;
260
261 int retval;
262
263 scan_field_t fields[3];
264 u8 field0 = 0x0;
265 u8 field0_check_value = 0x2;
266 u8 field0_check_mask = 0x7;
267 u8 field2 = 0x0;
268 u8 field2_check_value = 0x0;
269 u8 field2_check_mask = 0x1;
270
271 jtag_add_end_state(TAP_PD);
272 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
273
274 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
275 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
276
277 fields[0].device = xscale->jtag_info.chain_pos;
278 fields[0].num_bits = 3;
279 fields[0].out_value = &field0;
280 fields[0].out_mask = NULL;
281 fields[0].in_value = NULL;
282 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
283
284 fields[1].device = xscale->jtag_info.chain_pos;
285 fields[1].num_bits = 32;
286 fields[1].out_value = NULL;
287 fields[1].out_mask = NULL;
288 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
289 fields[1].in_handler = NULL;
290 fields[1].in_handler_priv = NULL;
291 fields[1].in_check_value = NULL;
292 fields[1].in_check_mask = NULL;
293
294 fields[2].device = xscale->jtag_info.chain_pos;
295 fields[2].num_bits = 1;
296 fields[2].out_value = &field2;
297 fields[2].out_mask = NULL;
298 fields[2].in_value = NULL;
299 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
300
301 jtag_add_dr_scan(3, fields, -1);
302
303 if ((retval = jtag_execute_queue()) != ERROR_OK)
304 {
305 ERROR("JTAG error while reading DCSR");
306 return retval;
307 }
308
309 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
310 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
311
312 /* write the register with the value we just read
313 * on this second pass, only the first bit of field0 is guaranteed to be 0)
314 */
315 field0_check_mask = 0x1;
316 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
317 fields[1].in_value = NULL;
318
319 jtag_add_end_state(TAP_RTI);
320
321 jtag_add_dr_scan(3, fields, -1);
322
323 /* DANGER!!! this must be here. It will make sure that the arguments
324 * to jtag_set_check_value() does not go out of scope! */
325 return jtag_execute_queue();
326 }
327
328 int xscale_receive(target_t *target, u32 *buffer, int num_words)
329 {
330 if (num_words==0)
331 return ERROR_INVALID_ARGUMENTS;
332
333 int retval=ERROR_OK;
334 armv4_5_common_t *armv4_5 = target->arch_info;
335 xscale_common_t *xscale = armv4_5->arch_info;
336
337 enum tap_state path[3];
338 scan_field_t fields[3];
339
340 u8 *field0 = malloc(num_words * 1);
341 u8 field0_check_value = 0x2;
342 u8 field0_check_mask = 0x6;
343 u32 *field1 = malloc(num_words * 4);
344 u8 field2_check_value = 0x0;
345 u8 field2_check_mask = 0x1;
346 int words_done = 0;
347 int words_scheduled = 0;
348
349 int i;
350
351 path[0] = TAP_SDS;
352 path[1] = TAP_CD;
353 path[2] = TAP_SD;
354
355 fields[0].device = xscale->jtag_info.chain_pos;
356 fields[0].num_bits = 3;
357 fields[0].out_value = NULL;
358 fields[0].out_mask = NULL;
359 fields[0].in_value = NULL;
360 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
361
362 fields[1].device = xscale->jtag_info.chain_pos;
363 fields[1].num_bits = 32;
364 fields[1].out_value = NULL;
365 fields[1].out_mask = NULL;
366 fields[1].in_value = NULL;
367 fields[1].in_handler = NULL;
368 fields[1].in_handler_priv = NULL;
369 fields[1].in_check_value = NULL;
370 fields[1].in_check_mask = NULL;
371
372
373
374 fields[2].device = xscale->jtag_info.chain_pos;
375 fields[2].num_bits = 1;
376 fields[2].out_value = NULL;
377 fields[2].out_mask = NULL;
378 fields[2].in_value = NULL;
379 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
380
381 jtag_add_end_state(TAP_RTI);
382 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgtx);
383 jtag_add_runtest(1, -1); /* ensures that we're in the TAP_RTI state as the above could be a no-op */
384
385 /* repeat until all words have been collected */
386 int attempts=0;
387 while (words_done < num_words)
388 {
389 /* schedule reads */
390 words_scheduled = 0;
391 for (i = words_done; i < num_words; i++)
392 {
393 fields[0].in_value = &field0[i];
394 fields[1].in_handler = buf_to_u32_handler;
395 fields[1].in_handler_priv = (u8*)&field1[i];
396
397 jtag_add_pathmove(3, path);
398 jtag_add_dr_scan(3, fields, TAP_RTI);
399 words_scheduled++;
400 }
401
402 if ((retval = jtag_execute_queue()) != ERROR_OK)
403 {
404 ERROR("JTAG error while receiving data from debug handler");
405 break;
406 }
407
408 /* examine results */
409 for (i = words_done; i < num_words; i++)
410 {
411 if (!(field0[0] & 1))
412 {
413 /* move backwards if necessary */
414 int j;
415 for (j = i; j < num_words - 1; j++)
416 {
417 field0[j] = field0[j+1];
418 field1[j] = field1[j+1];
419 }
420 words_scheduled--;
421 }
422 }
423 if (words_scheduled==0)
424 {
425 if (attempts++==1000)
426 {
427 ERROR("Failed to receiving data from debug handler after 1000 attempts");
428 retval=ERROR_JTAG_QUEUE_FAILED;
429 break;
430 }
431 }
432
433 words_done += words_scheduled;
434 }
435
436 for (i = 0; i < num_words; i++)
437 *(buffer++) = buf_get_u32((u8*)&field1[i], 0, 32);
438
439 free(field1);
440
441 return retval;
442 }
443
444 int xscale_read_tx(target_t *target, int consume)
445 {
446 armv4_5_common_t *armv4_5 = target->arch_info;
447 xscale_common_t *xscale = armv4_5->arch_info;
448 enum tap_state path[3];
449 enum tap_state noconsume_path[9];
450
451 int retval;
452 struct timeval timeout, now;
453
454 scan_field_t fields[3];
455 u8 field0_in = 0x0;
456 u8 field0_check_value = 0x2;
457 u8 field0_check_mask = 0x6;
458 u8 field2_check_value = 0x0;
459 u8 field2_check_mask = 0x1;
460
461 jtag_add_end_state(TAP_RTI);
462
463 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgtx);
464
465 path[0] = TAP_SDS;
466 path[1] = TAP_CD;
467 path[2] = TAP_SD;
468
469 noconsume_path[0] = TAP_SDS;
470 noconsume_path[1] = TAP_CD;
471 noconsume_path[2] = TAP_E1D;
472 noconsume_path[3] = TAP_PD;
473 noconsume_path[4] = TAP_E2D;
474 noconsume_path[5] = TAP_UD;
475 noconsume_path[6] = TAP_SDS;
476 noconsume_path[7] = TAP_CD;
477 noconsume_path[8] = TAP_SD;
478
479 fields[0].device = xscale->jtag_info.chain_pos;
480 fields[0].num_bits = 3;
481 fields[0].out_value = NULL;
482 fields[0].out_mask = NULL;
483 fields[0].in_value = &field0_in;
484 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
485
486 fields[1].device = xscale->jtag_info.chain_pos;
487 fields[1].num_bits = 32;
488 fields[1].out_value = NULL;
489 fields[1].out_mask = NULL;
490 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
491 fields[1].in_handler = NULL;
492 fields[1].in_handler_priv = NULL;
493 fields[1].in_check_value = NULL;
494 fields[1].in_check_mask = NULL;
495
496
497
498 fields[2].device = xscale->jtag_info.chain_pos;
499 fields[2].num_bits = 1;
500 fields[2].out_value = NULL;
501 fields[2].out_mask = NULL;
502 fields[2].in_value = NULL;
503 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
504
505 gettimeofday(&timeout, NULL);
506 timeval_add_time(&timeout, 5, 0);
507
508 do
509 {
510 /* if we want to consume the register content (i.e. clear TX_READY),
511 * we have to go straight from Capture-DR to Shift-DR
512 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
513 */
514 if (consume)
515 jtag_add_pathmove(3, path);
516 else
517 jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
518
519 jtag_add_dr_scan(3, fields, TAP_RTI);
520
521 if ((retval = jtag_execute_queue()) != ERROR_OK)
522 {
523 ERROR("JTAG error while reading TX");
524 return ERROR_TARGET_TIMEOUT;
525 }
526
527 gettimeofday(&now, NULL);
528 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
529 {
530 ERROR("time out reading TX register");
531 return ERROR_TARGET_TIMEOUT;
532 }
533 } while ((!(field0_in & 1)) && consume);
534
535 if (!(field0_in & 1))
536 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
537
538 return ERROR_OK;
539 }
540
541 int xscale_write_rx(target_t *target)
542 {
543 armv4_5_common_t *armv4_5 = target->arch_info;
544 xscale_common_t *xscale = armv4_5->arch_info;
545
546 int retval;
547 struct timeval timeout, now;
548
549 scan_field_t fields[3];
550 u8 field0_out = 0x0;
551 u8 field0_in = 0x0;
552 u8 field0_check_value = 0x2;
553 u8 field0_check_mask = 0x6;
554 u8 field2 = 0x0;
555 u8 field2_check_value = 0x0;
556 u8 field2_check_mask = 0x1;
557
558 jtag_add_end_state(TAP_RTI);
559
560 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgrx);
561
562 fields[0].device = xscale->jtag_info.chain_pos;
563 fields[0].num_bits = 3;
564 fields[0].out_value = &field0_out;
565 fields[0].out_mask = NULL;
566 fields[0].in_value = &field0_in;
567 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
568
569 fields[1].device = xscale->jtag_info.chain_pos;
570 fields[1].num_bits = 32;
571 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
572 fields[1].out_mask = NULL;
573 fields[1].in_value = NULL;
574 fields[1].in_handler = NULL;
575 fields[1].in_handler_priv = NULL;
576 fields[1].in_check_value = NULL;
577 fields[1].in_check_mask = NULL;
578
579
580
581 fields[2].device = xscale->jtag_info.chain_pos;
582 fields[2].num_bits = 1;
583 fields[2].out_value = &field2;
584 fields[2].out_mask = NULL;
585 fields[2].in_value = NULL;
586 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
587
588 gettimeofday(&timeout, NULL);
589 timeval_add_time(&timeout, 5, 0);
590
591 /* poll until rx_read is low */
592 DEBUG("polling RX");
593 do
594 {
595 jtag_add_dr_scan(3, fields, TAP_RTI);
596
597 if ((retval = jtag_execute_queue()) != ERROR_OK)
598 {
599 ERROR("JTAG error while writing RX");
600 return retval;
601 }
602
603 gettimeofday(&now, NULL);
604 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
605 {
606 ERROR("time out writing RX register");
607 return ERROR_TARGET_TIMEOUT;
608 }
609 } while (field0_in & 1);
610
611 /* set rx_valid */
612 field2 = 0x1;
613 jtag_add_dr_scan(3, fields, TAP_RTI);
614
615 if ((retval = jtag_execute_queue()) != ERROR_OK)
616 {
617 ERROR("JTAG error while writing RX");
618 return retval;
619 }
620
621 return ERROR_OK;
622 }
623
624 /* send count elements of size byte to the debug handler */
625 int xscale_send(target_t *target, u8 *buffer, int count, int size)
626 {
627 armv4_5_common_t *armv4_5 = target->arch_info;
628 xscale_common_t *xscale = armv4_5->arch_info;
629
630 int retval;
631
632 int done_count = 0;
633 u8 output[4] = {0, 0, 0, 0};
634
635 scan_field_t fields[3];
636 u8 field0_out = 0x0;
637 u8 field0_check_value = 0x2;
638 u8 field0_check_mask = 0x6;
639 u8 field2 = 0x1;
640 u8 field2_check_value = 0x0;
641 u8 field2_check_mask = 0x1;
642
643 jtag_add_end_state(TAP_RTI);
644
645 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgrx);
646
647 fields[0].device = xscale->jtag_info.chain_pos;
648 fields[0].num_bits = 3;
649 fields[0].out_value = &field0_out;
650 fields[0].out_mask = NULL;
651 fields[0].in_handler = NULL;
652 fields[0].in_value = NULL;
653 if (!xscale->fast_memory_access)
654 {
655 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
656 }
657
658 fields[1].device = xscale->jtag_info.chain_pos;
659 fields[1].num_bits = 32;
660 fields[1].out_value = output;
661 fields[1].out_mask = NULL;
662 fields[1].in_value = NULL;
663 fields[1].in_handler = NULL;
664 fields[1].in_handler_priv = NULL;
665 fields[1].in_check_value = NULL;
666 fields[1].in_check_mask = NULL;
667
668
669
670 fields[2].device = xscale->jtag_info.chain_pos;
671 fields[2].num_bits = 1;
672 fields[2].out_value = &field2;
673 fields[2].out_mask = NULL;
674 fields[2].in_value = NULL;
675 fields[2].in_handler = NULL;
676 if (!xscale->fast_memory_access)
677 {
678 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
679 }
680
681 if (size==4)
682 {
683 int endianness = target->endianness;
684 while (done_count++ < count)
685 {
686 if (endianness == TARGET_LITTLE_ENDIAN)
687 {
688 output[0]=buffer[0];
689 output[1]=buffer[1];
690 output[2]=buffer[2];
691 output[3]=buffer[3];
692 } else
693 {
694 output[0]=buffer[3];
695 output[1]=buffer[2];
696 output[2]=buffer[1];
697 output[3]=buffer[0];
698 }
699 jtag_add_dr_scan(3, fields, TAP_RTI);
700 buffer += size;
701 }
702
703 } else
704 {
705 while (done_count++ < count)
706 {
707 /* extract sized element from target-endian buffer, and put it
708 * into little-endian output buffer
709 */
710 switch (size)
711 {
712 case 2:
713 buf_set_u32(output, 0, 32, target_buffer_get_u16(target, buffer));
714 break;
715 case 1:
716 output[0] = *buffer;
717 break;
718 default:
719 ERROR("BUG: size neither 4, 2 nor 1");
720 exit(-1);
721 }
722
723 jtag_add_dr_scan(3, fields, TAP_RTI);
724 buffer += size;
725 }
726
727 }
728
729 if ((retval = jtag_execute_queue()) != ERROR_OK)
730 {
731 ERROR("JTAG error while sending data to debug handler");
732 return retval;
733 }
734
735 return ERROR_OK;
736 }
737
738 int xscale_send_u32(target_t *target, u32 value)
739 {
740 armv4_5_common_t *armv4_5 = target->arch_info;
741 xscale_common_t *xscale = armv4_5->arch_info;
742
743 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
744 return xscale_write_rx(target);
745 }
746
747 int xscale_write_dcsr(target_t *target, int hold_rst, int ext_dbg_brk)
748 {
749 armv4_5_common_t *armv4_5 = target->arch_info;
750 xscale_common_t *xscale = armv4_5->arch_info;
751
752 int retval;
753
754 scan_field_t fields[3];
755 u8 field0 = 0x0;
756 u8 field0_check_value = 0x2;
757 u8 field0_check_mask = 0x7;
758 u8 field2 = 0x0;
759 u8 field2_check_value = 0x0;
760 u8 field2_check_mask = 0x1;
761
762 if (hold_rst != -1)
763 xscale->hold_rst = hold_rst;
764
765 if (ext_dbg_brk != -1)
766 xscale->external_debug_break = ext_dbg_brk;
767
768 jtag_add_end_state(TAP_RTI);
769 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
770
771 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
772 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
773
774 fields[0].device = xscale->jtag_info.chain_pos;
775 fields[0].num_bits = 3;
776 fields[0].out_value = &field0;
777 fields[0].out_mask = NULL;
778 fields[0].in_value = NULL;
779 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
780
781 fields[1].device = xscale->jtag_info.chain_pos;
782 fields[1].num_bits = 32;
783 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
784 fields[1].out_mask = NULL;
785 fields[1].in_value = NULL;
786 fields[1].in_handler = NULL;
787 fields[1].in_handler_priv = NULL;
788 fields[1].in_check_value = NULL;
789 fields[1].in_check_mask = NULL;
790
791
792
793 fields[2].device = xscale->jtag_info.chain_pos;
794 fields[2].num_bits = 1;
795 fields[2].out_value = &field2;
796 fields[2].out_mask = NULL;
797 fields[2].in_value = NULL;
798 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
799
800 jtag_add_dr_scan(3, fields, -1);
801
802 if ((retval = jtag_execute_queue()) != ERROR_OK)
803 {
804 ERROR("JTAG error while writing DCSR");
805 return retval;
806 }
807
808 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
809 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
810
811 return ERROR_OK;
812 }
813
814 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
815 unsigned int parity (unsigned int v)
816 {
817 unsigned int ov = v;
818 v ^= v >> 16;
819 v ^= v >> 8;
820 v ^= v >> 4;
821 v &= 0xf;
822 DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
823 return (0x6996 >> v) & 1;
824 }
825
826 int xscale_load_ic(target_t *target, int mini, u32 va, u32 buffer[8])
827 {
828 armv4_5_common_t *armv4_5 = target->arch_info;
829 xscale_common_t *xscale = armv4_5->arch_info;
830 u8 packet[4];
831 u8 cmd;
832 int word;
833
834 scan_field_t fields[2];
835
836 DEBUG("loading miniIC at 0x%8.8x", va);
837
838 jtag_add_end_state(TAP_RTI);
839 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.ldic); /* LDIC */
840
841 /* CMD is b010 for Main IC and b011 for Mini IC */
842 if (mini)
843 buf_set_u32(&cmd, 0, 3, 0x3);
844 else
845 buf_set_u32(&cmd, 0, 3, 0x2);
846
847 buf_set_u32(&cmd, 3, 3, 0x0);
848
849 /* virtual address of desired cache line */
850 buf_set_u32(packet, 0, 27, va >> 5);
851
852 fields[0].device = xscale->jtag_info.chain_pos;
853 fields[0].num_bits = 6;
854 fields[0].out_value = &cmd;
855 fields[0].out_mask = NULL;
856 fields[0].in_value = NULL;
857 fields[0].in_check_value = NULL;
858 fields[0].in_check_mask = NULL;
859 fields[0].in_handler = NULL;
860 fields[0].in_handler_priv = NULL;
861
862 fields[1].device = xscale->jtag_info.chain_pos;
863 fields[1].num_bits = 27;
864 fields[1].out_value = packet;
865 fields[1].out_mask = NULL;
866 fields[1].in_value = NULL;
867 fields[1].in_check_value = NULL;
868 fields[1].in_check_mask = NULL;
869 fields[1].in_handler = NULL;
870 fields[1].in_handler_priv = NULL;
871
872 jtag_add_dr_scan(2, fields, -1);
873
874 fields[0].num_bits = 32;
875 fields[0].out_value = packet;
876
877 fields[1].num_bits = 1;
878 fields[1].out_value = &cmd;
879
880 for (word = 0; word < 8; word++)
881 {
882 buf_set_u32(packet, 0, 32, buffer[word]);
883 cmd = parity(*((u32*)packet));
884 jtag_add_dr_scan(2, fields, -1);
885 }
886
887 jtag_execute_queue();
888
889 return ERROR_OK;
890 }
891
892 int xscale_invalidate_ic_line(target_t *target, u32 va)
893 {
894 armv4_5_common_t *armv4_5 = target->arch_info;
895 xscale_common_t *xscale = armv4_5->arch_info;
896 u8 packet[4];
897 u8 cmd;
898
899 scan_field_t fields[2];
900
901 jtag_add_end_state(TAP_RTI);
902 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.ldic); /* LDIC */
903
904 /* CMD for invalidate IC line b000, bits [6:4] b000 */
905 buf_set_u32(&cmd, 0, 6, 0x0);
906
907 /* virtual address of desired cache line */
908 buf_set_u32(packet, 0, 27, va >> 5);
909
910 fields[0].device = xscale->jtag_info.chain_pos;
911 fields[0].num_bits = 6;
912 fields[0].out_value = &cmd;
913 fields[0].out_mask = NULL;
914 fields[0].in_value = NULL;
915 fields[0].in_check_value = NULL;
916 fields[0].in_check_mask = NULL;
917 fields[0].in_handler = NULL;
918 fields[0].in_handler_priv = NULL;
919
920 fields[1].device = xscale->jtag_info.chain_pos;
921 fields[1].num_bits = 27;
922 fields[1].out_value = packet;
923 fields[1].out_mask = NULL;
924 fields[1].in_value = NULL;
925 fields[1].in_check_value = NULL;
926 fields[1].in_check_mask = NULL;
927 fields[1].in_handler = NULL;
928 fields[1].in_handler_priv = NULL;
929
930 jtag_add_dr_scan(2, fields, -1);
931
932 return ERROR_OK;
933 }
934
935 int xscale_update_vectors(target_t *target)
936 {
937 armv4_5_common_t *armv4_5 = target->arch_info;
938 xscale_common_t *xscale = armv4_5->arch_info;
939 int i;
940
941 u32 low_reset_branch, high_reset_branch;
942
943 for (i = 1; i < 8; i++)
944 {
945 /* if there's a static vector specified for this exception, override */
946 if (xscale->static_high_vectors_set & (1 << i))
947 {
948 xscale->high_vectors[i] = xscale->static_high_vectors[i];
949 }
950 else
951 {
952 if (target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]) != ERROR_OK)
953 {
954 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
955 }
956 }
957 }
958
959 for (i = 1; i < 8; i++)
960 {
961 if (xscale->static_low_vectors_set & (1 << i))
962 {
963 xscale->low_vectors[i] = xscale->static_low_vectors[i];
964 }
965 else
966 {
967 if (target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]) != ERROR_OK)
968 {
969 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
970 }
971 }
972 }
973
974 /* calculate branches to debug handler */
975 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
976 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
977
978 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
979 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
980
981 /* invalidate and load exception vectors in mini i-cache */
982 xscale_invalidate_ic_line(target, 0x0);
983 xscale_invalidate_ic_line(target, 0xffff0000);
984
985 xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
986 xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
987
988 return ERROR_OK;
989 }
990
991 int xscale_arch_state(struct target_s *target)
992 {
993 armv4_5_common_t *armv4_5 = target->arch_info;
994 xscale_common_t *xscale = armv4_5->arch_info;
995
996 char *state[] =
997 {
998 "disabled", "enabled"
999 };
1000
1001 char *arch_dbg_reason[] =
1002 {
1003 "", "\n(processor reset)", "\n(trace buffer full)"
1004 };
1005
1006 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
1007 {
1008 ERROR("BUG: called for a non-ARMv4/5 target");
1009 exit(-1);
1010 }
1011
1012 USER("target halted in %s state due to %s, current mode: %s\n"
1013 "cpsr: 0x%8.8x pc: 0x%8.8x\n"
1014 "MMU: %s, D-Cache: %s, I-Cache: %s"
1015 "%s",
1016 armv4_5_state_strings[armv4_5->core_state],
1017 target_debug_reason_strings[target->debug_reason],
1018 armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)],
1019 buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
1020 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
1021 state[xscale->armv4_5_mmu.mmu_enabled],
1022 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
1023 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
1024 arch_dbg_reason[xscale->arch_debug_reason]);
1025
1026 return ERROR_OK;
1027 }
1028
1029 int xscale_poll(target_t *target)
1030 {
1031 int retval=ERROR_OK;
1032 armv4_5_common_t *armv4_5 = target->arch_info;
1033 xscale_common_t *xscale = armv4_5->arch_info;
1034
1035 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
1036 {
1037 enum target_state previous_state = target->state;
1038 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
1039 {
1040
1041 /* there's data to read from the tx register, we entered debug state */
1042 xscale->handler_running = 1;
1043
1044 target->state = TARGET_HALTED;
1045
1046 /* process debug entry, fetching current mode regs */
1047 retval = xscale_debug_entry(target);
1048 }
1049 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1050 {
1051 USER("error while polling TX register, reset CPU");
1052 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
1053 target->state = TARGET_HALTED;
1054 }
1055
1056 /* debug_entry could have overwritten target state (i.e. immediate resume)
1057 * don't signal event handlers in that case
1058 */
1059 if (target->state != TARGET_HALTED)
1060 return ERROR_OK;
1061
1062 /* if target was running, signal that we halted
1063 * otherwise we reentered from debug execution */
1064 if (previous_state == TARGET_RUNNING)
1065 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1066 else
1067 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
1068 }
1069
1070 return retval;
1071 }
1072
1073 int xscale_debug_entry(target_t *target)
1074 {
1075 armv4_5_common_t *armv4_5 = target->arch_info;
1076 xscale_common_t *xscale = armv4_5->arch_info;
1077 u32 pc;
1078 u32 buffer[10];
1079 int i;
1080
1081 u32 moe;
1082
1083 /* clear external dbg break (will be written on next DCSR read) */
1084 xscale->external_debug_break = 0;
1085 xscale_read_dcsr(target);
1086
1087 /* get r0, pc, r1 to r7 and cpsr */
1088 xscale_receive(target, buffer, 10);
1089
1090 /* move r0 from buffer to register cache */
1091 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
1092 armv4_5->core_cache->reg_list[15].dirty = 1;
1093 armv4_5->core_cache->reg_list[15].valid = 1;
1094 DEBUG("r0: 0x%8.8x", buffer[0]);
1095
1096 /* move pc from buffer to register cache */
1097 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
1098 armv4_5->core_cache->reg_list[15].dirty = 1;
1099 armv4_5->core_cache->reg_list[15].valid = 1;
1100 DEBUG("pc: 0x%8.8x", buffer[1]);
1101
1102 /* move data from buffer to register cache */
1103 for (i = 1; i <= 7; i++)
1104 {
1105 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
1106 armv4_5->core_cache->reg_list[i].dirty = 1;
1107 armv4_5->core_cache->reg_list[i].valid = 1;
1108 DEBUG("r%i: 0x%8.8x", i, buffer[i + 1]);
1109 }
1110
1111 buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, buffer[9]);
1112 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
1113 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
1114 DEBUG("cpsr: 0x%8.8x", buffer[9]);
1115
1116 armv4_5->core_mode = buffer[9] & 0x1f;
1117 if (armv4_5_mode_to_number(armv4_5->core_mode) == -1)
1118 {
1119 target->state = TARGET_UNKNOWN;
1120 ERROR("cpsr contains invalid mode value - communication failure");
1121 return ERROR_TARGET_FAILURE;
1122 }
1123 DEBUG("target entered debug state in %s mode", armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)]);
1124
1125 if (buffer[9] & 0x20)
1126 armv4_5->core_state = ARMV4_5_STATE_THUMB;
1127 else
1128 armv4_5->core_state = ARMV4_5_STATE_ARM;
1129
1130 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1131 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
1132 {
1133 xscale_receive(target, buffer, 8);
1134 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1135 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
1136 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
1137 }
1138 else
1139 {
1140 /* r8 to r14, but no spsr */
1141 xscale_receive(target, buffer, 7);
1142 }
1143
1144 /* move data from buffer to register cache */
1145 for (i = 8; i <= 14; i++)
1146 {
1147 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
1148 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
1149 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
1150 }
1151
1152 /* examine debug reason */
1153 xscale_read_dcsr(target);
1154 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
1155
1156 /* stored PC (for calculating fixup) */
1157 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1158
1159 switch (moe)
1160 {
1161 case 0x0: /* Processor reset */
1162 target->debug_reason = DBG_REASON_DBGRQ;
1163 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1164 pc -= 4;
1165 break;
1166 case 0x1: /* Instruction breakpoint hit */
1167 target->debug_reason = DBG_REASON_BREAKPOINT;
1168 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1169 pc -= 4;
1170 break;
1171 case 0x2: /* Data breakpoint hit */
1172 target->debug_reason = DBG_REASON_WATCHPOINT;
1173 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1174 pc -= 4;
1175 break;
1176 case 0x3: /* BKPT instruction executed */
1177 target->debug_reason = DBG_REASON_BREAKPOINT;
1178 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1179 pc -= 4;
1180 break;
1181 case 0x4: /* Ext. debug event */
1182 target->debug_reason = DBG_REASON_DBGRQ;
1183 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1184 pc -= 4;
1185 break;
1186 case 0x5: /* Vector trap occured */
1187 target->debug_reason = DBG_REASON_BREAKPOINT;
1188 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1189 pc -= 4;
1190 break;
1191 case 0x6: /* Trace buffer full break */
1192 target->debug_reason = DBG_REASON_DBGRQ;
1193 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1194 pc -= 4;
1195 break;
1196 case 0x7: /* Reserved */
1197 default:
1198 ERROR("Method of Entry is 'Reserved'");
1199 exit(-1);
1200 break;
1201 }
1202
1203 /* apply PC fixup */
1204 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1205
1206 /* on the first debug entry, identify cache type */
1207 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1208 {
1209 u32 cache_type_reg;
1210
1211 /* read cp15 cache type register */
1212 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1213 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1214
1215 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1216 }
1217
1218 /* examine MMU and Cache settings */
1219 /* read cp15 control register */
1220 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1221 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1222 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1223 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1224 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1225
1226 /* tracing enabled, read collected trace data */
1227 if (xscale->trace.buffer_enabled)
1228 {
1229 xscale_read_trace(target);
1230 xscale->trace.buffer_fill--;
1231
1232 /* resume if we're still collecting trace data */
1233 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1234 && (xscale->trace.buffer_fill > 0))
1235 {
1236 xscale_resume(target, 1, 0x0, 1, 0);
1237 }
1238 else
1239 {
1240 xscale->trace.buffer_enabled = 0;
1241 }
1242 }
1243
1244 return ERROR_OK;
1245 }
1246
1247 int xscale_halt(target_t *target)
1248 {
1249 armv4_5_common_t *armv4_5 = target->arch_info;
1250 xscale_common_t *xscale = armv4_5->arch_info;
1251
1252 DEBUG("target->state: %s", target_state_strings[target->state]);
1253
1254 if (target->state == TARGET_HALTED)
1255 {
1256 WARNING("target was already halted");
1257 return ERROR_TARGET_ALREADY_HALTED;
1258 }
1259 else if (target->state == TARGET_UNKNOWN)
1260 {
1261 /* this must not happen for a xscale target */
1262 ERROR("target was in unknown state when halt was requested");
1263 return ERROR_TARGET_INVALID;
1264 }
1265 else if (target->state == TARGET_RESET)
1266 {
1267 DEBUG("target->state == TARGET_RESET");
1268 }
1269 else
1270 {
1271 /* assert external dbg break */
1272 xscale->external_debug_break = 1;
1273 xscale_read_dcsr(target);
1274
1275 target->debug_reason = DBG_REASON_DBGRQ;
1276 }
1277
1278 return ERROR_OK;
1279 }
1280
1281 int xscale_enable_single_step(struct target_s *target, u32 next_pc)
1282 {
1283 armv4_5_common_t *armv4_5 = target->arch_info;
1284 xscale_common_t *xscale= armv4_5->arch_info;
1285 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1286
1287 if (xscale->ibcr0_used)
1288 {
1289 breakpoint_t *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1290
1291 if (ibcr0_bp)
1292 {
1293 xscale_unset_breakpoint(target, ibcr0_bp);
1294 }
1295 else
1296 {
1297 ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1298 exit(-1);
1299 }
1300 }
1301
1302 xscale_set_reg_u32(ibcr0, next_pc | 0x1);
1303
1304 return ERROR_OK;
1305 }
1306
1307 int xscale_disable_single_step(struct target_s *target)
1308 {
1309 armv4_5_common_t *armv4_5 = target->arch_info;
1310 xscale_common_t *xscale= armv4_5->arch_info;
1311 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1312
1313 xscale_set_reg_u32(ibcr0, 0x0);
1314
1315 return ERROR_OK;
1316 }
1317
1318 int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution)
1319 {
1320 armv4_5_common_t *armv4_5 = target->arch_info;
1321 xscale_common_t *xscale= armv4_5->arch_info;
1322 breakpoint_t *breakpoint = target->breakpoints;
1323
1324 u32 current_pc;
1325
1326 int retval;
1327 int i;
1328
1329 DEBUG("-");
1330
1331 if (target->state != TARGET_HALTED)
1332 {
1333 WARNING("target not halted");
1334 return ERROR_TARGET_NOT_HALTED;
1335 }
1336
1337 if (!debug_execution)
1338 {
1339 target_free_all_working_areas(target);
1340 }
1341
1342 /* update vector tables */
1343 xscale_update_vectors(target);
1344
1345 /* current = 1: continue on current pc, otherwise continue at <address> */
1346 if (!current)
1347 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1348
1349 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1350
1351 /* if we're at the reset vector, we have to simulate the branch */
1352 if (current_pc == 0x0)
1353 {
1354 arm_simulate_step(target, NULL);
1355 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1356 }
1357
1358 /* the front-end may request us not to handle breakpoints */
1359 if (handle_breakpoints)
1360 {
1361 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1362 {
1363 u32 next_pc;
1364
1365 /* there's a breakpoint at the current PC, we have to step over it */
1366 DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1367 xscale_unset_breakpoint(target, breakpoint);
1368
1369 /* calculate PC of next instruction */
1370 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1371 {
1372 u32 current_opcode;
1373 target_read_u32(target, current_pc, &current_opcode);
1374 ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
1375 }
1376
1377 DEBUG("enable single-step");
1378 xscale_enable_single_step(target, next_pc);
1379
1380 /* restore banked registers */
1381 xscale_restore_context(target);
1382
1383 /* send resume request (command 0x30 or 0x31)
1384 * clean the trace buffer if it is to be enabled (0x62) */
1385 if (xscale->trace.buffer_enabled)
1386 {
1387 xscale_send_u32(target, 0x62);
1388 xscale_send_u32(target, 0x31);
1389 }
1390 else
1391 xscale_send_u32(target, 0x30);
1392
1393 /* send CPSR */
1394 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1395 DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1396
1397 for (i = 7; i >= 0; i--)
1398 {
1399 /* send register */
1400 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1401 DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1402 }
1403
1404 /* send PC */
1405 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1406 DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1407
1408 /* wait for and process debug entry */
1409 xscale_debug_entry(target);
1410
1411 DEBUG("disable single-step");
1412 xscale_disable_single_step(target);
1413
1414 DEBUG("set breakpoint at 0x%8.8x", breakpoint->address);
1415 xscale_set_breakpoint(target, breakpoint);
1416 }
1417 }
1418
1419 /* enable any pending breakpoints and watchpoints */
1420 xscale_enable_breakpoints(target);
1421 xscale_enable_watchpoints(target);
1422
1423 /* restore banked registers */
1424 xscale_restore_context(target);
1425
1426 /* send resume request (command 0x30 or 0x31)
1427 * clean the trace buffer if it is to be enabled (0x62) */
1428 if (xscale->trace.buffer_enabled)
1429 {
1430 xscale_send_u32(target, 0x62);
1431 xscale_send_u32(target, 0x31);
1432 }
1433 else
1434 xscale_send_u32(target, 0x30);
1435
1436 /* send CPSR */
1437 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1438 DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1439
1440 for (i = 7; i >= 0; i--)
1441 {
1442 /* send register */
1443 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1444 DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1445 }
1446
1447 /* send PC */
1448 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1449 DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1450
1451 target->debug_reason = DBG_REASON_NOTHALTED;
1452
1453 if (!debug_execution)
1454 {
1455 /* registers are now invalid */
1456 armv4_5_invalidate_core_regs(target);
1457 target->state = TARGET_RUNNING;
1458 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1459 }
1460 else
1461 {
1462 target->state = TARGET_DEBUG_RUNNING;
1463 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1464 }
1465
1466 DEBUG("target resumed");
1467
1468 xscale->handler_running = 1;
1469
1470 return ERROR_OK;
1471 }
1472
1473 int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints)
1474 {
1475 armv4_5_common_t *armv4_5 = target->arch_info;
1476 xscale_common_t *xscale = armv4_5->arch_info;
1477 breakpoint_t *breakpoint = target->breakpoints;
1478
1479 u32 current_pc, next_pc;
1480 int i;
1481 int retval;
1482
1483 if (target->state != TARGET_HALTED)
1484 {
1485 WARNING("target not halted");
1486 return ERROR_TARGET_NOT_HALTED;
1487 }
1488
1489 /* current = 1: continue on current pc, otherwise continue at <address> */
1490 if (!current)
1491 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1492
1493 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1494
1495 /* if we're at the reset vector, we have to simulate the step */
1496 if (current_pc == 0x0)
1497 {
1498 arm_simulate_step(target, NULL);
1499 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1500
1501 target->debug_reason = DBG_REASON_SINGLESTEP;
1502 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1503
1504 return ERROR_OK;
1505 }
1506
1507 /* the front-end may request us not to handle breakpoints */
1508 if (handle_breakpoints)
1509 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1510 {
1511 xscale_unset_breakpoint(target, breakpoint);
1512 }
1513
1514 target->debug_reason = DBG_REASON_SINGLESTEP;
1515
1516 /* calculate PC of next instruction */
1517 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1518 {
1519 u32 current_opcode;
1520 target_read_u32(target, current_pc, &current_opcode);
1521 ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
1522 }
1523
1524 DEBUG("enable single-step");
1525 xscale_enable_single_step(target, next_pc);
1526
1527 /* restore banked registers */
1528 xscale_restore_context(target);
1529
1530 /* send resume request (command 0x30 or 0x31)
1531 * clean the trace buffer if it is to be enabled (0x62) */
1532 if (xscale->trace.buffer_enabled)
1533 {
1534 xscale_send_u32(target, 0x62);
1535 xscale_send_u32(target, 0x31);
1536 }
1537 else
1538 xscale_send_u32(target, 0x30);
1539
1540 /* send CPSR */
1541 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1542 DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1543
1544 for (i = 7; i >= 0; i--)
1545 {
1546 /* send register */
1547 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1548 DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1549 }
1550
1551 /* send PC */
1552 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1553 DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1554
1555 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1556
1557 /* registers are now invalid */
1558 armv4_5_invalidate_core_regs(target);
1559
1560 /* wait for and process debug entry */
1561 xscale_debug_entry(target);
1562
1563 DEBUG("disable single-step");
1564 xscale_disable_single_step(target);
1565
1566 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1567
1568 if (breakpoint)
1569 {
1570 xscale_set_breakpoint(target, breakpoint);
1571 }
1572
1573 DEBUG("target stepped");
1574
1575 return ERROR_OK;
1576
1577 }
1578
1579 int xscale_assert_reset(target_t *target)
1580 {
1581 armv4_5_common_t *armv4_5 = target->arch_info;
1582 xscale_common_t *xscale = armv4_5->arch_info;
1583
1584 DEBUG("target->state: %s", target_state_strings[target->state]);
1585
1586 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1587 * end up in T-L-R, which would reset JTAG
1588 */
1589 jtag_add_end_state(TAP_RTI);
1590 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
1591
1592 /* set Hold reset, Halt mode and Trap Reset */
1593 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1594 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1595 xscale_write_dcsr(target, 1, 0);
1596
1597 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1598 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, 0x7f);
1599 jtag_execute_queue();
1600
1601 /* assert reset */
1602 jtag_add_reset(0, 1);
1603
1604 /* sleep 1ms, to be sure we fulfill any requirements */
1605 jtag_add_sleep(1000);
1606 jtag_execute_queue();
1607
1608 target->state = TARGET_RESET;
1609
1610 return ERROR_OK;
1611 }
1612
1613 int xscale_deassert_reset(target_t *target)
1614 {
1615 armv4_5_common_t *armv4_5 = target->arch_info;
1616 xscale_common_t *xscale = armv4_5->arch_info;
1617
1618 fileio_t debug_handler;
1619 u32 address;
1620 u32 binary_size;
1621
1622 u32 buf_cnt;
1623 int i;
1624 int retval;
1625
1626 breakpoint_t *breakpoint = target->breakpoints;
1627
1628 DEBUG("-");
1629
1630 xscale->ibcr_available = 2;
1631 xscale->ibcr0_used = 0;
1632 xscale->ibcr1_used = 0;
1633
1634 xscale->dbr_available = 2;
1635 xscale->dbr0_used = 0;
1636 xscale->dbr1_used = 0;
1637
1638 /* mark all hardware breakpoints as unset */
1639 while (breakpoint)
1640 {
1641 if (breakpoint->type == BKPT_HARD)
1642 {
1643 breakpoint->set = 0;
1644 }
1645 breakpoint = breakpoint->next;
1646 }
1647
1648 if (!xscale->handler_installed)
1649 {
1650 /* release SRST */
1651 jtag_add_reset(0, 0);
1652
1653 /* wait 300ms; 150 and 100ms were not enough */
1654 jtag_add_sleep(300*1000);
1655
1656 jtag_add_runtest(2030, TAP_RTI);
1657 jtag_execute_queue();
1658
1659 /* set Hold reset, Halt mode and Trap Reset */
1660 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1661 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1662 xscale_write_dcsr(target, 1, 0);
1663
1664 /* Load debug handler */
1665 if (fileio_open(&debug_handler, "xscale/debug_handler.bin", FILEIO_READ, FILEIO_BINARY) != ERROR_OK)
1666 {
1667 return ERROR_OK;
1668 }
1669
1670 if ((binary_size = debug_handler.size) % 4)
1671 {
1672 ERROR("debug_handler.bin: size not a multiple of 4");
1673 exit(-1);
1674 }
1675
1676 if (binary_size > 0x800)
1677 {
1678 ERROR("debug_handler.bin: larger than 2kb");
1679 exit(-1);
1680 }
1681
1682 binary_size = CEIL(binary_size, 32) * 32;
1683
1684 address = xscale->handler_address;
1685 while (binary_size > 0)
1686 {
1687 u32 cache_line[8];
1688 u8 buffer[32];
1689
1690 if ((retval = fileio_read(&debug_handler, 32, buffer, &buf_cnt)) != ERROR_OK)
1691 {
1692
1693 }
1694
1695 for (i = 0; i < buf_cnt; i += 4)
1696 {
1697 /* convert LE buffer to host-endian u32 */
1698 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1699 }
1700
1701 for (; buf_cnt < 32; buf_cnt += 4)
1702 {
1703 cache_line[buf_cnt / 4] = 0xe1a08008;
1704 }
1705
1706 /* only load addresses other than the reset vectors */
1707 if ((address % 0x400) != 0x0)
1708 {
1709 xscale_load_ic(target, 1, address, cache_line);
1710 }
1711
1712 address += buf_cnt;
1713 binary_size -= buf_cnt;
1714 };
1715
1716 xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
1717 xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
1718
1719 jtag_add_runtest(30, TAP_RTI);
1720
1721 jtag_add_sleep(100000);
1722
1723 /* set Hold reset, Halt mode and Trap Reset */
1724 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1725 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1726 xscale_write_dcsr(target, 1, 0);
1727
1728 /* clear Hold reset to let the target run (should enter debug handler) */
1729 xscale_write_dcsr(target, 0, 1);
1730 target->state = TARGET_RUNNING;
1731
1732 if ((target->reset_mode != RESET_HALT) && (target->reset_mode != RESET_INIT))
1733 {
1734 jtag_add_sleep(10000);
1735
1736 /* we should have entered debug now */
1737 xscale_debug_entry(target);
1738 target->state = TARGET_HALTED;
1739
1740 /* resume the target */
1741 xscale_resume(target, 1, 0x0, 1, 0);
1742 }
1743
1744 fileio_close(&debug_handler);
1745 }
1746 else
1747 {
1748 jtag_add_reset(0, 0);
1749 }
1750
1751
1752 return ERROR_OK;
1753 }
1754
1755 int xscale_soft_reset_halt(struct target_s *target)
1756 {
1757
1758 return ERROR_OK;
1759 }
1760
1761 int xscale_prepare_reset_halt(struct target_s *target)
1762 {
1763 /* nothing to be done for reset_halt on XScale targets
1764 * we always halt after a reset to upload the debug handler
1765 */
1766 return ERROR_OK;
1767 }
1768
1769 int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode)
1770 {
1771
1772 return ERROR_OK;
1773 }
1774
1775 int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value)
1776 {
1777
1778 return ERROR_OK;
1779 }
1780
1781 int xscale_full_context(target_t *target)
1782 {
1783 armv4_5_common_t *armv4_5 = target->arch_info;
1784
1785 u32 *buffer;
1786
1787 int i, j;
1788
1789 DEBUG("-");
1790
1791 if (target->state != TARGET_HALTED)
1792 {
1793 WARNING("target not halted");
1794 return ERROR_TARGET_NOT_HALTED;
1795 }
1796
1797 buffer = malloc(4 * 8);
1798
1799 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1800 * we can't enter User mode on an XScale (unpredictable),
1801 * but User shares registers with SYS
1802 */
1803 for(i = 1; i < 7; i++)
1804 {
1805 int valid = 1;
1806
1807 /* check if there are invalid registers in the current mode
1808 */
1809 for (j = 0; j <= 16; j++)
1810 {
1811 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
1812 valid = 0;
1813 }
1814
1815 if (!valid)
1816 {
1817 u32 tmp_cpsr;
1818
1819 /* request banked registers */
1820 xscale_send_u32(target, 0x0);
1821
1822 tmp_cpsr = 0x0;
1823 tmp_cpsr |= armv4_5_number_to_mode(i);
1824 tmp_cpsr |= 0xc0; /* I/F bits */
1825
1826 /* send CPSR for desired mode */
1827 xscale_send_u32(target, tmp_cpsr);
1828
1829 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1830 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1831 {
1832 xscale_receive(target, buffer, 8);
1833 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1834 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1835 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
1836 }
1837 else
1838 {
1839 xscale_receive(target, buffer, 7);
1840 }
1841
1842 /* move data from buffer to register cache */
1843 for (j = 8; j <= 14; j++)
1844 {
1845 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
1846 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1847 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
1848 }
1849 }
1850 }
1851
1852 free(buffer);
1853
1854 return ERROR_OK;
1855 }
1856
1857 int xscale_restore_context(target_t *target)
1858 {
1859 armv4_5_common_t *armv4_5 = target->arch_info;
1860
1861 int i, j;
1862
1863 DEBUG("-");
1864
1865 if (target->state != TARGET_HALTED)
1866 {
1867 WARNING("target not halted");
1868 return ERROR_TARGET_NOT_HALTED;
1869 }
1870
1871 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1872 * we can't enter User mode on an XScale (unpredictable),
1873 * but User shares registers with SYS
1874 */
1875 for(i = 1; i < 7; i++)
1876 {
1877 int dirty = 0;
1878
1879 /* check if there are invalid registers in the current mode
1880 */
1881 for (j = 8; j <= 14; j++)
1882 {
1883 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
1884 dirty = 1;
1885 }
1886
1887 /* if not USR/SYS, check if the SPSR needs to be written */
1888 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1889 {
1890 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
1891 dirty = 1;
1892 }
1893
1894 if (dirty)
1895 {
1896 u32 tmp_cpsr;
1897
1898 /* send banked registers */
1899 xscale_send_u32(target, 0x1);
1900
1901 tmp_cpsr = 0x0;
1902 tmp_cpsr |= armv4_5_number_to_mode(i);
1903 tmp_cpsr |= 0xc0; /* I/F bits */
1904
1905 /* send CPSR for desired mode */
1906 xscale_send_u32(target, tmp_cpsr);
1907
1908 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1909 for (j = 8; j <= 14; j++)
1910 {
1911 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
1912 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1913 }
1914
1915 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1916 {
1917 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
1918 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1919 }
1920 }
1921 }
1922
1923 return ERROR_OK;
1924 }
1925
1926 int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
1927 {
1928 armv4_5_common_t *armv4_5 = target->arch_info;
1929 xscale_common_t *xscale = armv4_5->arch_info;
1930 u32 *buf32;
1931 int i;
1932
1933 DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
1934
1935 if (target->state != TARGET_HALTED)
1936 {
1937 WARNING("target not halted");
1938 return ERROR_TARGET_NOT_HALTED;
1939 }
1940
1941 /* sanitize arguments */
1942 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1943 return ERROR_INVALID_ARGUMENTS;
1944
1945 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1946 return ERROR_TARGET_UNALIGNED_ACCESS;
1947
1948 /* send memory read request (command 0x1n, n: access size) */
1949 xscale_send_u32(target, 0x10 | size);
1950
1951 /* send base address for read request */
1952 xscale_send_u32(target, address);
1953
1954 /* send number of requested data words */
1955 xscale_send_u32(target, count);
1956
1957 /* receive data from target (count times 32-bit words in host endianness) */
1958 buf32 = malloc(4 * count);
1959 xscale_receive(target, buf32, count);
1960
1961 /* extract data from host-endian buffer into byte stream */
1962 for (i = 0; i < count; i++)
1963 {
1964 switch (size)
1965 {
1966 case 4:
1967 target_buffer_set_u32(target, buffer, buf32[i]);
1968 buffer += 4;
1969 break;
1970 case 2:
1971 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1972 buffer += 2;
1973 break;
1974 case 1:
1975 *buffer++ = buf32[i] & 0xff;
1976 break;
1977 default:
1978 ERROR("should never get here");
1979 exit(-1);
1980 }
1981 }
1982
1983 free(buf32);
1984
1985 /* examine DCSR, to see if Sticky Abort (SA) got set */
1986 xscale_read_dcsr(target);
1987 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1988 {
1989 /* clear SA bit */
1990 xscale_send_u32(target, 0x60);
1991
1992 return ERROR_TARGET_DATA_ABORT;
1993 }
1994
1995 return ERROR_OK;
1996 }
1997
1998 int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
1999 {
2000 armv4_5_common_t *armv4_5 = target->arch_info;
2001 xscale_common_t *xscale = armv4_5->arch_info;
2002
2003 DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
2004
2005 if (target->state != TARGET_HALTED)
2006 {
2007 WARNING("target not halted");
2008 return ERROR_TARGET_NOT_HALTED;
2009 }
2010
2011 /* sanitize arguments */
2012 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
2013 return ERROR_INVALID_ARGUMENTS;
2014
2015 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
2016 return ERROR_TARGET_UNALIGNED_ACCESS;
2017
2018 /* send memory write request (command 0x2n, n: access size) */
2019 xscale_send_u32(target, 0x20 | size);
2020
2021 /* send base address for read request */
2022 xscale_send_u32(target, address);
2023
2024 /* send number of requested data words to be written*/
2025 xscale_send_u32(target, count);
2026
2027 /* extract data from host-endian buffer into byte stream */
2028 #if 0
2029 for (i = 0; i < count; i++)
2030 {
2031 switch (size)
2032 {
2033 case 4:
2034 value = target_buffer_get_u32(target, buffer);
2035 xscale_send_u32(target, value);
2036 buffer += 4;
2037 break;
2038 case 2:
2039 value = target_buffer_get_u16(target, buffer);
2040 xscale_send_u32(target, value);
2041 buffer += 2;
2042 break;
2043 case 1:
2044 value = *buffer;
2045 xscale_send_u32(target, value);
2046 buffer += 1;
2047 break;
2048 default:
2049 ERROR("should never get here");
2050 exit(-1);
2051 }
2052 }
2053 #endif
2054 xscale_send(target, buffer, count, size);
2055
2056 /* examine DCSR, to see if Sticky Abort (SA) got set */
2057 xscale_read_dcsr(target);
2058 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
2059 {
2060 /* clear SA bit */
2061 xscale_send_u32(target, 0x60);
2062
2063 return ERROR_TARGET_DATA_ABORT;
2064 }
2065
2066 return ERROR_OK;
2067 }
2068
2069 int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer)
2070 {
2071 return xscale_write_memory(target, address, 4, count, buffer);
2072 }
2073
2074 int xscale_checksum_memory(struct target_s *target, u32 address, u32 count, u32* checksum)
2075 {
2076 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2077 }
2078
2079 u32 xscale_get_ttb(target_t *target)
2080 {
2081 armv4_5_common_t *armv4_5 = target->arch_info;
2082 xscale_common_t *xscale = armv4_5->arch_info;
2083 u32 ttb;
2084
2085 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2086 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2087
2088 return ttb;
2089 }
2090
2091 void xscale_disable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
2092 {
2093 armv4_5_common_t *armv4_5 = target->arch_info;
2094 xscale_common_t *xscale = armv4_5->arch_info;
2095 u32 cp15_control;
2096
2097 /* read cp15 control register */
2098 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2099 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2100
2101 if (mmu)
2102 cp15_control &= ~0x1U;
2103
2104 if (d_u_cache)
2105 {
2106 /* clean DCache */
2107 xscale_send_u32(target, 0x50);
2108 xscale_send_u32(target, xscale->cache_clean_address);
2109
2110 /* invalidate DCache */
2111 xscale_send_u32(target, 0x51);
2112
2113 cp15_control &= ~0x4U;
2114 }
2115
2116 if (i_cache)
2117 {
2118 /* invalidate ICache */
2119 xscale_send_u32(target, 0x52);
2120 cp15_control &= ~0x1000U;
2121 }
2122
2123 /* write new cp15 control register */
2124 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2125
2126 /* execute cpwait to ensure outstanding operations complete */
2127 xscale_send_u32(target, 0x53);
2128 }
2129
2130 void xscale_enable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
2131 {
2132 armv4_5_common_t *armv4_5 = target->arch_info;
2133 xscale_common_t *xscale = armv4_5->arch_info;
2134 u32 cp15_control;
2135
2136 /* read cp15 control register */
2137 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2138 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2139
2140 if (mmu)
2141 cp15_control |= 0x1U;
2142
2143 if (d_u_cache)
2144 cp15_control |= 0x4U;
2145
2146 if (i_cache)
2147 cp15_control |= 0x1000U;
2148
2149 /* write new cp15 control register */
2150 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2151
2152 /* execute cpwait to ensure outstanding operations complete */
2153 xscale_send_u32(target, 0x53);
2154 }
2155
2156 int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2157 {
2158 armv4_5_common_t *armv4_5 = target->arch_info;
2159 xscale_common_t *xscale = armv4_5->arch_info;
2160
2161 if (target->state != TARGET_HALTED)
2162 {
2163 WARNING("target not halted");
2164 return ERROR_TARGET_NOT_HALTED;
2165 }
2166
2167 if (xscale->force_hw_bkpts)
2168 breakpoint->type = BKPT_HARD;
2169
2170 if (breakpoint->set)
2171 {
2172 WARNING("breakpoint already set");
2173 return ERROR_OK;
2174 }
2175
2176 if (breakpoint->type == BKPT_HARD)
2177 {
2178 u32 value = breakpoint->address | 1;
2179 if (!xscale->ibcr0_used)
2180 {
2181 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2182 xscale->ibcr0_used = 1;
2183 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2184 }
2185 else if (!xscale->ibcr1_used)
2186 {
2187 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2188 xscale->ibcr1_used = 1;
2189 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2190 }
2191 else
2192 {
2193 ERROR("BUG: no hardware comparator available");
2194 return ERROR_OK;
2195 }
2196 }
2197 else if (breakpoint->type == BKPT_SOFT)
2198 {
2199 if (breakpoint->length == 4)
2200 {
2201 /* keep the original instruction in target endianness */
2202 target->type->read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr);
2203 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2204 target_write_u32(target, breakpoint->address, xscale->arm_bkpt);
2205 }
2206 else
2207 {
2208 /* keep the original instruction in target endianness */
2209 target->type->read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr);
2210 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2211 target_write_u32(target, breakpoint->address, xscale->thumb_bkpt);
2212 }
2213 breakpoint->set = 1;
2214 }
2215
2216 return ERROR_OK;
2217
2218 }
2219
2220 int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2221 {
2222 armv4_5_common_t *armv4_5 = target->arch_info;
2223 xscale_common_t *xscale = armv4_5->arch_info;
2224
2225 if (target->state != TARGET_HALTED)
2226 {
2227 WARNING("target not halted");
2228 return ERROR_TARGET_NOT_HALTED;
2229 }
2230
2231 if (xscale->force_hw_bkpts)
2232 {
2233 DEBUG("forcing use of hardware breakpoint at address 0x%8.8x", breakpoint->address);
2234 breakpoint->type = BKPT_HARD;
2235 }
2236
2237 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2238 {
2239 INFO("no breakpoint unit available for hardware breakpoint");
2240 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2241 }
2242 else
2243 {
2244 xscale->ibcr_available--;
2245 }
2246
2247 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2248 {
2249 INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2250 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2251 }
2252
2253 return ERROR_OK;
2254 }
2255
2256 int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2257 {
2258 armv4_5_common_t *armv4_5 = target->arch_info;
2259 xscale_common_t *xscale = armv4_5->arch_info;
2260
2261 if (target->state != TARGET_HALTED)
2262 {
2263 WARNING("target not halted");
2264 return ERROR_TARGET_NOT_HALTED;
2265 }
2266
2267 if (!breakpoint->set)
2268 {
2269 WARNING("breakpoint not set");
2270 return ERROR_OK;
2271 }
2272
2273 if (breakpoint->type == BKPT_HARD)
2274 {
2275 if (breakpoint->set == 1)
2276 {
2277 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2278 xscale->ibcr0_used = 0;
2279 }
2280 else if (breakpoint->set == 2)
2281 {
2282 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2283 xscale->ibcr1_used = 0;
2284 }
2285 breakpoint->set = 0;
2286 }
2287 else
2288 {
2289 /* restore original instruction (kept in target endianness) */
2290 if (breakpoint->length == 4)
2291 {
2292 target->type->write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr);
2293 }
2294 else
2295 {
2296 target->type->write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr);
2297 }
2298 breakpoint->set = 0;
2299 }
2300
2301 return ERROR_OK;
2302 }
2303
2304 int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2305 {
2306 armv4_5_common_t *armv4_5 = target->arch_info;
2307 xscale_common_t *xscale = armv4_5->arch_info;
2308
2309 if (target->state != TARGET_HALTED)
2310 {
2311 WARNING("target not halted");
2312 return ERROR_TARGET_NOT_HALTED;
2313 }
2314
2315 if (breakpoint->set)
2316 {
2317 xscale_unset_breakpoint(target, breakpoint);
2318 }
2319
2320 if (breakpoint->type == BKPT_HARD)
2321 xscale->ibcr_available++;
2322
2323 return ERROR_OK;
2324 }
2325
2326 int xscale_set_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2327 {
2328 armv4_5_common_t *armv4_5 = target->arch_info;
2329 xscale_common_t *xscale = armv4_5->arch_info;
2330 u8 enable=0;
2331 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2332 u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2333
2334 if (target->state != TARGET_HALTED)
2335 {
2336 WARNING("target not halted");
2337 return ERROR_TARGET_NOT_HALTED;
2338 }
2339
2340 xscale_get_reg(dbcon);
2341
2342 switch (watchpoint->rw)
2343 {
2344 case WPT_READ:
2345 enable = 0x3;
2346 break;
2347 case WPT_ACCESS:
2348 enable = 0x2;
2349 break;
2350 case WPT_WRITE:
2351 enable = 0x1;
2352 break;
2353 default:
2354 ERROR("BUG: watchpoint->rw neither read, write nor access");
2355 }
2356
2357 if (!xscale->dbr0_used)
2358 {
2359 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2360 dbcon_value |= enable;
2361 xscale_set_reg_u32(dbcon, dbcon_value);
2362 watchpoint->set = 1;
2363 xscale->dbr0_used = 1;
2364 }
2365 else if (!xscale->dbr1_used)
2366 {
2367 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2368 dbcon_value |= enable << 2;
2369 xscale_set_reg_u32(dbcon, dbcon_value);
2370 watchpoint->set = 2;
2371 xscale->dbr1_used = 1;
2372 }
2373 else
2374 {
2375 ERROR("BUG: no hardware comparator available");
2376 return ERROR_OK;
2377 }
2378
2379 return ERROR_OK;
2380 }
2381
2382 int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2383 {
2384 armv4_5_common_t *armv4_5 = target->arch_info;
2385 xscale_common_t *xscale = armv4_5->arch_info;
2386
2387 if (target->state != TARGET_HALTED)
2388 {
2389 WARNING("target not halted");
2390 return ERROR_TARGET_NOT_HALTED;
2391 }
2392
2393 if (xscale->dbr_available < 1)
2394 {
2395 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2396 }
2397
2398 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2399 {
2400 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2401 }
2402
2403 xscale->dbr_available--;
2404
2405 return ERROR_OK;
2406 }
2407
2408 int xscale_unset_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2409 {
2410 armv4_5_common_t *armv4_5 = target->arch_info;
2411 xscale_common_t *xscale = armv4_5->arch_info;
2412 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2413 u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2414
2415 if (target->state != TARGET_HALTED)
2416 {
2417 WARNING("target not halted");
2418 return ERROR_TARGET_NOT_HALTED;
2419 }
2420
2421 if (!watchpoint->set)
2422 {
2423 WARNING("breakpoint not set");
2424 return ERROR_OK;
2425 }
2426
2427 if (watchpoint->set == 1)
2428 {
2429 dbcon_value &= ~0x3;
2430 xscale_set_reg_u32(dbcon, dbcon_value);
2431 xscale->dbr0_used = 0;
2432 }
2433 else if (watchpoint->set == 2)
2434 {
2435 dbcon_value &= ~0xc;
2436 xscale_set_reg_u32(dbcon, dbcon_value);
2437 xscale->dbr1_used = 0;
2438 }
2439 watchpoint->set = 0;
2440
2441 return ERROR_OK;
2442 }
2443
2444 int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2445 {
2446 armv4_5_common_t *armv4_5 = target->arch_info;
2447 xscale_common_t *xscale = armv4_5->arch_info;
2448
2449 if (target->state != TARGET_HALTED)
2450 {
2451 WARNING("target not halted");
2452 return ERROR_TARGET_NOT_HALTED;
2453 }
2454
2455 if (watchpoint->set)
2456 {
2457 xscale_unset_watchpoint(target, watchpoint);
2458 }
2459
2460 xscale->dbr_available++;
2461
2462 return ERROR_OK;
2463 }
2464
2465 void xscale_enable_watchpoints(struct target_s *target)
2466 {
2467 watchpoint_t *watchpoint = target->watchpoints;
2468
2469 while (watchpoint)
2470 {
2471 if (watchpoint->set == 0)
2472 xscale_set_watchpoint(target, watchpoint);
2473 watchpoint = watchpoint->next;
2474 }
2475 }
2476
2477 void xscale_enable_breakpoints(struct target_s *target)
2478 {
2479 breakpoint_t *breakpoint = target->breakpoints;
2480
2481 /* set any pending breakpoints */
2482 while (breakpoint)
2483 {
2484 if (breakpoint->set == 0)
2485 xscale_set_breakpoint(target, breakpoint);
2486 breakpoint = breakpoint->next;
2487 }
2488 }
2489
2490 int xscale_get_reg(reg_t *reg)
2491 {
2492 xscale_reg_t *arch_info = reg->arch_info;
2493 target_t *target = arch_info->target;
2494 armv4_5_common_t *armv4_5 = target->arch_info;
2495 xscale_common_t *xscale = armv4_5->arch_info;
2496
2497 /* DCSR, TX and RX are accessible via JTAG */
2498 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2499 {
2500 return xscale_read_dcsr(arch_info->target);
2501 }
2502 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2503 {
2504 /* 1 = consume register content */
2505 return xscale_read_tx(arch_info->target, 1);
2506 }
2507 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2508 {
2509 /* can't read from RX register (host -> debug handler) */
2510 return ERROR_OK;
2511 }
2512 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2513 {
2514 /* can't (explicitly) read from TXRXCTRL register */
2515 return ERROR_OK;
2516 }
2517 else /* Other DBG registers have to be transfered by the debug handler */
2518 {
2519 /* send CP read request (command 0x40) */
2520 xscale_send_u32(target, 0x40);
2521
2522 /* send CP register number */
2523 xscale_send_u32(target, arch_info->dbg_handler_number);
2524
2525 /* read register value */
2526 xscale_read_tx(target, 1);
2527 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2528
2529 reg->dirty = 0;
2530 reg->valid = 1;
2531 }
2532
2533 return ERROR_OK;
2534 }
2535
2536 int xscale_set_reg(reg_t *reg, u8* buf)
2537 {
2538 xscale_reg_t *arch_info = reg->arch_info;
2539 target_t *target = arch_info->target;
2540 armv4_5_common_t *armv4_5 = target->arch_info;
2541 xscale_common_t *xscale = armv4_5->arch_info;
2542 u32 value = buf_get_u32(buf, 0, 32);
2543
2544 /* DCSR, TX and RX are accessible via JTAG */
2545 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2546 {
2547 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2548 return xscale_write_dcsr(arch_info->target, -1, -1);
2549 }
2550 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2551 {
2552 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2553 return xscale_write_rx(arch_info->target);
2554 }
2555 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2556 {
2557 /* can't write to TX register (debug-handler -> host) */
2558 return ERROR_OK;
2559 }
2560 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2561 {
2562 /* can't (explicitly) write to TXRXCTRL register */
2563 return ERROR_OK;
2564 }
2565 else /* Other DBG registers have to be transfered by the debug handler */
2566 {
2567 /* send CP write request (command 0x41) */
2568 xscale_send_u32(target, 0x41);
2569
2570 /* send CP register number */
2571 xscale_send_u32(target, arch_info->dbg_handler_number);
2572
2573 /* send CP register value */
2574 xscale_send_u32(target, value);
2575 buf_set_u32(reg->value, 0, 32, value);
2576 }
2577
2578 return ERROR_OK;
2579 }
2580
2581 /* convenience wrapper to access XScale specific registers */
2582 int xscale_set_reg_u32(reg_t *reg, u32 value)
2583 {
2584 u8 buf[4];
2585
2586 buf_set_u32(buf, 0, 32, value);
2587
2588 return xscale_set_reg(reg, buf);
2589 }
2590
2591 int xscale_write_dcsr_sw(target_t *target, u32 value)
2592 {
2593 /* get pointers to arch-specific information */
2594 armv4_5_common_t *armv4_5 = target->arch_info;
2595 xscale_common_t *xscale = armv4_5->arch_info;
2596 reg_t *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2597 xscale_reg_t *dcsr_arch_info = dcsr->arch_info;
2598
2599 /* send CP write request (command 0x41) */
2600 xscale_send_u32(target, 0x41);
2601
2602 /* send CP register number */
2603 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2604
2605 /* send CP register value */
2606 xscale_send_u32(target, value);
2607 buf_set_u32(dcsr->value, 0, 32, value);
2608
2609 return ERROR_OK;
2610 }
2611
2612 int xscale_read_trace(target_t *target)
2613 {
2614 /* get pointers to arch-specific information */
2615 armv4_5_common_t *armv4_5 = target->arch_info;
2616 xscale_common_t *xscale = armv4_5->arch_info;
2617 xscale_trace_data_t **trace_data_p;
2618
2619 /* 258 words from debug handler
2620 * 256 trace buffer entries
2621 * 2 checkpoint addresses
2622 */
2623 u32 trace_buffer[258];
2624 int is_address[256];
2625 int i, j;
2626
2627 if (target->state != TARGET_HALTED)
2628 {
2629 WARNING("target must be stopped to read trace data");
2630 return ERROR_TARGET_NOT_HALTED;
2631 }
2632
2633 /* send read trace buffer command (command 0x61) */
2634 xscale_send_u32(target, 0x61);
2635
2636 /* receive trace buffer content */
2637 xscale_receive(target, trace_buffer, 258);
2638
2639 /* parse buffer backwards to identify address entries */
2640 for (i = 255; i >= 0; i--)
2641 {
2642 is_address[i] = 0;
2643 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2644 ((trace_buffer[i] & 0xf0) == 0xd0))
2645 {
2646 if (i >= 3)
2647 is_address[--i] = 1;
2648 if (i >= 2)
2649 is_address[--i] = 1;
2650 if (i >= 1)
2651 is_address[--i] = 1;
2652 if (i >= 0)
2653 is_address[--i] = 1;
2654 }
2655 }
2656
2657
2658 /* search first non-zero entry */
2659 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2660 ;
2661
2662 if (j == 256)
2663 {
2664 DEBUG("no trace data collected");
2665 return ERROR_XSCALE_NO_TRACE_DATA;
2666 }
2667
2668 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2669 ;
2670
2671 *trace_data_p = malloc(sizeof(xscale_trace_data_t));
2672 (*trace_data_p)->next = NULL;
2673 (*trace_data_p)->chkpt0 = trace_buffer[256];
2674 (*trace_data_p)->chkpt1 = trace_buffer[257];
2675 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2676 (*trace_data_p)->entries = malloc(sizeof(xscale_trace_entry_t) * (256 - j));
2677 (*trace_data_p)->depth = 256 - j;
2678
2679 for (i = j; i < 256; i++)
2680 {
2681 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2682 if (is_address[i])
2683 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2684 else
2685 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2686 }
2687
2688 return ERROR_OK;
2689 }
2690
2691 int xscale_read_instruction(target_t *target, arm_instruction_t *instruction)
2692 {
2693 /* get pointers to arch-specific information */
2694 armv4_5_common_t *armv4_5 = target->arch_info;
2695 xscale_common_t *xscale = armv4_5->arch_info;
2696 int i;
2697 int section = -1;
2698 u32 size_read;
2699 u32 opcode;
2700 int retval;
2701
2702 if (!xscale->trace.image)
2703 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2704
2705 /* search for the section the current instruction belongs to */
2706 for (i = 0; i < xscale->trace.image->num_sections; i++)
2707 {
2708 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2709 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2710 {
2711 section = i;
2712 break;
2713 }
2714 }
2715
2716 if (section == -1)
2717 {
2718 /* current instruction couldn't be found in the image */
2719 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2720 }
2721
2722 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2723 {
2724 u8 buf[4];
2725 if ((retval = image_read_section(xscale->trace.image, section,
2726 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2727 4, buf, &size_read)) != ERROR_OK)
2728 {
2729 ERROR("error while reading instruction: %i", retval);
2730 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2731 }
2732 opcode = target_buffer_get_u32(target, buf);
2733 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2734 }
2735 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2736 {
2737 u8 buf[2];
2738 if ((retval = image_read_section(xscale->trace.image, section,
2739 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2740 2, buf, &size_read)) != ERROR_OK)
2741 {
2742 ERROR("error while reading instruction: %i", retval);
2743 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2744 }
2745 opcode = target_buffer_get_u16(target, buf);
2746 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2747 }
2748 else
2749 {
2750 ERROR("BUG: unknown core state encountered");
2751 exit(-1);
2752 }
2753
2754 return ERROR_OK;
2755 }
2756
2757 int xscale_branch_address(xscale_trace_data_t *trace_data, int i, u32 *target)
2758 {
2759 /* if there are less than four entries prior to the indirect branch message
2760 * we can't extract the address */
2761 if (i < 4)
2762 {
2763 return -1;
2764 }
2765
2766 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2767 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2768
2769 return 0;
2770 }
2771
2772 int xscale_analyze_trace(target_t *target, command_context_t *cmd_ctx)
2773 {
2774 /* get pointers to arch-specific information */
2775 armv4_5_common_t *armv4_5 = target->arch_info;
2776 xscale_common_t *xscale = armv4_5->arch_info;
2777 int next_pc_ok = 0;
2778 u32 next_pc = 0x0;
2779 xscale_trace_data_t *trace_data = xscale->trace.data;
2780 int retval;
2781
2782 while (trace_data)
2783 {
2784 int i, chkpt;
2785 int rollover;
2786 int branch;
2787 int exception;
2788 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2789
2790 chkpt = 0;
2791 rollover = 0;
2792
2793 for (i = 0; i < trace_data->depth; i++)
2794 {
2795 next_pc_ok = 0;
2796 branch = 0;
2797 exception = 0;
2798
2799 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2800 continue;
2801
2802 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2803 {
2804 case 0: /* Exceptions */
2805 case 1:
2806 case 2:
2807 case 3:
2808 case 4:
2809 case 5:
2810 case 6:
2811 case 7:
2812 exception = (trace_data->entries[i].data & 0x70) >> 4;
2813 next_pc_ok = 1;
2814 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2815 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2816 break;
2817 case 8: /* Direct Branch */
2818 branch = 1;
2819 break;
2820 case 9: /* Indirect Branch */
2821 branch = 1;
2822 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2823 {
2824 next_pc_ok = 1;
2825 }
2826 break;
2827 case 13: /* Checkpointed Indirect Branch */
2828 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2829 {
2830 next_pc_ok = 1;
2831 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2832 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2833 WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2834 }
2835 /* explicit fall-through */
2836 case 12: /* Checkpointed Direct Branch */
2837 branch = 1;
2838 if (chkpt == 0)
2839 {
2840 next_pc_ok = 1;
2841 next_pc = trace_data->chkpt0;
2842 chkpt++;
2843 }
2844 else if (chkpt == 1)
2845 {
2846 next_pc_ok = 1;
2847 next_pc = trace_data->chkpt0;
2848 chkpt++;
2849 }
2850 else
2851 {
2852 WARNING("more than two checkpointed branches encountered");
2853 }
2854 break;
2855 case 15: /* Roll-over */
2856 rollover++;
2857 continue;
2858 default: /* Reserved */
2859 command_print(cmd_ctx, "--- reserved trace message ---");
2860 ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2861 return ERROR_OK;
2862 }
2863
2864 if (xscale->trace.pc_ok)
2865 {
2866 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2867 arm_instruction_t instruction;
2868
2869 if ((exception == 6) || (exception == 7))
2870 {
2871 /* IRQ or FIQ exception, no instruction executed */
2872 executed -= 1;
2873 }
2874
2875 while (executed-- >= 0)
2876 {
2877 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2878 {
2879 /* can't continue tracing with no image available */
2880 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2881 {
2882 return retval;
2883 }
2884 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2885 {
2886 /* TODO: handle incomplete images */
2887 }
2888 }
2889
2890 /* a precise abort on a load to the PC is included in the incremental
2891 * word count, other instructions causing data aborts are not included
2892 */
2893 if ((executed == 0) && (exception == 4)
2894 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2895 {
2896 if ((instruction.type == ARM_LDM)
2897 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2898 {
2899 executed--;
2900 }
2901 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2902 && (instruction.info.load_store.Rd != 15))
2903 {
2904 executed--;
2905 }
2906 }
2907
2908 /* only the last instruction executed
2909 * (the one that caused the control flow change)
2910 * could be a taken branch
2911 */
2912 if (((executed == -1) && (branch == 1)) &&
2913 (((instruction.type == ARM_B) ||
2914 (instruction.type == ARM_BL) ||
2915 (instruction.type == ARM_BLX)) &&
2916 (instruction.info.b_bl_bx_blx.target_address != -1)))
2917 {
2918 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2919 }
2920 else
2921 {
2922 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2923 }
2924 command_print(cmd_ctx, "%s", instruction.text);
2925 }
2926
2927 rollover = 0;
2928 }
2929
2930 if (next_pc_ok)
2931 {
2932 xscale->trace.current_pc = next_pc;
2933 xscale->trace.pc_ok = 1;
2934 }
2935 }
2936
2937 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2938 {
2939 arm_instruction_t instruction;
2940 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2941 {
2942 /* can't continue tracing with no image available */
2943 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2944 {
2945 return retval;
2946 }
2947 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2948 {
2949 /* TODO: handle incomplete images */
2950 }
2951 }
2952 command_print(cmd_ctx, "%s", instruction.text);
2953 }
2954
2955 trace_data = trace_data->next;
2956 }
2957
2958 return ERROR_OK;
2959 }
2960
2961 void xscale_build_reg_cache(target_t *target)
2962 {
2963 /* get pointers to arch-specific information */
2964 armv4_5_common_t *armv4_5 = target->arch_info;
2965 xscale_common_t *xscale = armv4_5->arch_info;
2966
2967 reg_cache_t **cache_p = register_get_last_cache_p(&target->reg_cache);
2968 xscale_reg_t *arch_info = malloc(sizeof(xscale_reg_arch_info));
2969 int i;
2970 int num_regs = sizeof(xscale_reg_arch_info) / sizeof(xscale_reg_t);
2971
2972 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
2973 armv4_5->core_cache = (*cache_p);
2974
2975 /* register a register arch-type for XScale dbg registers only once */
2976 if (xscale_reg_arch_type == -1)
2977 xscale_reg_arch_type = register_reg_arch_type(xscale_get_reg, xscale_set_reg);
2978
2979 (*cache_p)->next = malloc(sizeof(reg_cache_t));
2980 cache_p = &(*cache_p)->next;
2981
2982 /* fill in values for the xscale reg cache */
2983 (*cache_p)->name = "XScale registers";
2984 (*cache_p)->next = NULL;
2985 (*cache_p)->reg_list = malloc(num_regs * sizeof(reg_t));
2986 (*cache_p)->num_regs = num_regs;
2987
2988 for (i = 0; i < num_regs; i++)
2989 {
2990 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2991 (*cache_p)->reg_list[i].value = calloc(4, 1);
2992 (*cache_p)->reg_list[i].dirty = 0;
2993 (*cache_p)->reg_list[i].valid = 0;
2994 (*cache_p)->reg_list[i].size = 32;
2995 (*cache_p)->reg_list[i].bitfield_desc = NULL;
2996 (*cache_p)->reg_list[i].num_bitfields = 0;
2997 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2998 (*cache_p)->reg_list[i].arch_type = xscale_reg_arch_type;
2999 arch_info[i] = xscale_reg_arch_info[i];
3000 arch_info[i].target = target;
3001 }
3002
3003 xscale->reg_cache = (*cache_p);
3004 }
3005
3006 int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target)
3007 {
3008 if (startup_mode != DAEMON_RESET)
3009 {
3010 ERROR("XScale target requires a reset");
3011 ERROR("Reset target to enable debug");
3012 }
3013
3014 /* assert TRST once during startup */
3015 jtag_add_reset(1, 0);
3016 jtag_add_sleep(5000);
3017 jtag_add_reset(0, 0);
3018 jtag_execute_queue();
3019
3020 return ERROR_OK;
3021 }
3022
3023 int xscale_quit()
3024 {
3025
3026 return ERROR_OK;
3027 }
3028
3029 int xscale_init_arch_info(target_t *target, xscale_common_t *xscale, int chain_pos, char *variant)
3030 {
3031 armv4_5_common_t *armv4_5;
3032 u32 high_reset_branch, low_reset_branch;
3033 int i;
3034
3035 armv4_5 = &xscale->armv4_5_common;
3036
3037 /* store architecture specfic data (none so far) */
3038 xscale->arch_info = NULL;
3039 xscale->common_magic = XSCALE_COMMON_MAGIC;
3040
3041 /* remember the variant (PXA25x, PXA27x, IXP42x, ...) */
3042 xscale->variant = strdup(variant);
3043
3044 /* prepare JTAG information for the new target */
3045 xscale->jtag_info.chain_pos = chain_pos;
3046 jtag_register_event_callback(xscale_jtag_callback, target);
3047
3048 xscale->jtag_info.dbgrx = 0x02;
3049 xscale->jtag_info.dbgtx = 0x10;
3050 xscale->jtag_info.dcsr = 0x09;
3051 xscale->jtag_info.ldic = 0x07;
3052
3053 if ((strcmp(xscale->variant, "pxa250") == 0) ||
3054 (strcmp(xscale->variant, "pxa255") == 0) ||
3055 (strcmp(xscale->variant, "pxa26x") == 0))
3056 {
3057 xscale->jtag_info.ir_length = 5;
3058 }
3059 else if ((strcmp(xscale->variant, "pxa27x") == 0) ||
3060 (strcmp(xscale->variant, "ixp42x") == 0) ||
3061 (strcmp(xscale->variant, "ixp45x") == 0) ||
3062 (strcmp(xscale->variant, "ixp46x") == 0))
3063 {
3064 xscale->jtag_info.ir_length = 7;
3065 }
3066
3067 /* the debug handler isn't installed (and thus not running) at this time */
3068 xscale->handler_installed = 0;
3069 xscale->handler_running = 0;
3070 xscale->handler_address = 0xfe000800;
3071
3072 /* clear the vectors we keep locally for reference */
3073 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
3074 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
3075
3076 /* no user-specified vectors have been configured yet */
3077 xscale->static_low_vectors_set = 0x0;
3078 xscale->static_high_vectors_set = 0x0;
3079
3080 /* calculate branches to debug handler */
3081 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
3082 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
3083
3084 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
3085 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
3086
3087 for (i = 1; i <= 7; i++)
3088 {
3089 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3090 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3091 }
3092
3093 /* 64kB aligned region used for DCache cleaning */
3094 xscale->cache_clean_address = 0xfffe0000;
3095
3096 xscale->hold_rst = 0;
3097 xscale->external_debug_break = 0;
3098
3099 xscale->force_hw_bkpts = 1;
3100
3101 xscale->ibcr_available = 2;
3102 xscale->ibcr0_used = 0;
3103 xscale->ibcr1_used = 0;
3104
3105 xscale->dbr_available = 2;
3106 xscale->dbr0_used = 0;
3107 xscale->dbr1_used = 0;
3108
3109 xscale->arm_bkpt = ARMV5_BKPT(0x0);
3110 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
3111
3112 xscale->vector_catch = 0x1;
3113
3114 xscale->trace.capture_status = TRACE_IDLE;
3115 xscale->trace.data = NULL;
3116 xscale->trace.image = NULL;
3117 xscale->trace.buffer_enabled = 0;
3118 xscale->trace.buffer_fill = 0;
3119
3120 /* prepare ARMv4/5 specific information */
3121 armv4_5->arch_info = xscale;
3122 armv4_5->read_core_reg = xscale_read_core_reg;
3123 armv4_5->write_core_reg = xscale_write_core_reg;
3124 armv4_5->full_context = xscale_full_context;
3125
3126 armv4_5_init_arch_info(target, armv4_5);
3127
3128 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3129 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3130 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3131 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3132 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3133 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3134 xscale->armv4_5_mmu.has_tiny_pages = 1;
3135 xscale->armv4_5_mmu.mmu_enabled = 0;
3136
3137 xscale->fast_memory_access = 0;
3138
3139 return ERROR_OK;
3140 }
3141
3142 /* target xscale <endianess> <startup_mode> <chain_pos> <variant> */
3143 int xscale_target_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc, struct target_s *target)
3144 {
3145 int chain_pos;
3146 char *variant = NULL;
3147 xscale_common_t *xscale = malloc(sizeof(xscale_common_t));
3148 memset(xscale, 0, sizeof(*xscale));
3149
3150 if (argc < 5)
3151 {
3152 ERROR("'target xscale' requires four arguments: <endianess> <startup_mode> <chain_pos> <variant>");
3153 return ERROR_OK;
3154 }
3155
3156 chain_pos = strtoul(args[3], NULL, 0);
3157
3158 variant = args[4];
3159
3160 xscale_init_arch_info(target, xscale, chain_pos, variant);
3161 xscale_build_reg_cache(target);
3162
3163 return ERROR_OK;
3164 }
3165
3166 int xscale_handle_debug_handler_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3167 {
3168 target_t *target = NULL;
3169 armv4_5_common_t *armv4_5;
3170 xscale_common_t *xscale;
3171
3172 u32 handler_address;
3173
3174 if (argc < 2)
3175 {
3176 ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3177 return ERROR_OK;
3178 }
3179
3180 if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
3181 {
3182 ERROR("no target '%s' configured", args[0]);
3183 return ERROR_OK;
3184 }
3185
3186 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3187 {
3188 return ERROR_OK;
3189 }
3190
3191 handler_address = strtoul(args[1], NULL, 0);
3192
3193 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3194 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3195 {
3196 xscale->handler_address = handler_address;
3197 }
3198 else
3199 {
3200 ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3201 }
3202
3203 return ERROR_OK;
3204 }
3205
3206 int xscale_handle_cache_clean_address_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3207 {
3208 target_t *target = NULL;
3209 armv4_5_common_t *armv4_5;
3210 xscale_common_t *xscale;
3211
3212 u32 cache_clean_address;
3213
3214 if (argc < 2)
3215 {
3216 ERROR("'xscale cache_clean_address <target#> <address>' command takes two required operands");
3217 return ERROR_OK;
3218 }
3219
3220 if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
3221 {
3222 ERROR("no target '%s' configured", args[0]);
3223 return ERROR_OK;
3224 }
3225
3226 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3227 {
3228 return ERROR_OK;
3229 }
3230
3231 cache_clean_address = strtoul(args[1], NULL, 0);
3232
3233 if (cache_clean_address & 0xffff)
3234 {
3235 ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3236 }
3237 else
3238 {
3239 xscale->cache_clean_address = cache_clean_address;
3240 }
3241
3242 return ERROR_OK;
3243 }
3244
3245 int xscale_handle_cache_info_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3246 {
3247 target_t *target = get_current_target(cmd_ctx);
3248 armv4_5_common_t *armv4_5;
3249 xscale_common_t *xscale;
3250
3251 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3252 {
3253 return ERROR_OK;
3254 }
3255
3256 return armv4_5_handle_cache_info_command(cmd_ctx, &xscale->armv4_5_mmu.armv4_5_cache);
3257 }
3258
3259 static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical)
3260 {
3261 armv4_5_common_t *armv4_5;
3262 xscale_common_t *xscale;
3263 int retval;
3264 int type;
3265 u32 cb;
3266 int domain;
3267 u32 ap;
3268
3269
3270 if ((retval = xscale_get_arch_pointers(target, &armv4_5, &xscale)) != ERROR_OK)
3271 {
3272 return retval;
3273 }
3274 u32 ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3275 if (type == -1)
3276 {
3277 return ret;
3278 }
3279 *physical = ret;
3280 return ERROR_OK;
3281 }
3282
3283 static int xscale_mmu(struct target_s *target, int *enabled)
3284 {
3285 armv4_5_common_t *armv4_5 = target->arch_info;
3286 xscale_common_t *xscale = armv4_5->arch_info;
3287
3288 if (target->state != TARGET_HALTED)
3289 {
3290 ERROR("Target not halted");
3291 return ERROR_TARGET_INVALID;
3292 }
3293 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3294 return ERROR_OK;
3295 }
3296
3297
3298 int xscale_handle_mmu_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3299 {
3300 target_t *target = get_current_target(cmd_ctx);
3301 armv4_5_common_t *armv4_5;
3302 xscale_common_t *xscale;
3303
3304 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3305 {
3306 return ERROR_OK;
3307 }
3308
3309 if (target->state != TARGET_HALTED)
3310 {
3311 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3312 return ERROR_OK;
3313 }
3314
3315 if (argc >= 1)
3316 {
3317 if (strcmp("enable", args[0]) == 0)
3318 {
3319 xscale_enable_mmu_caches(target, 1, 0, 0);
3320 xscale->armv4_5_mmu.mmu_enabled = 1;
3321 }
3322 else if (strcmp("disable", args[0]) == 0)
3323 {
3324 xscale_disable_mmu_caches(target, 1, 0, 0);
3325 xscale->armv4_5_mmu.mmu_enabled = 0;
3326 }
3327 }
3328
3329 command_print(cmd_ctx, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3330
3331 return ERROR_OK;
3332 }
3333
3334 int xscale_handle_idcache_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3335 {
3336 target_t *target = get_current_target(cmd_ctx);
3337 armv4_5_common_t *armv4_5;
3338 xscale_common_t *xscale;
3339 int icache = 0, dcache = 0;
3340
3341 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3342 {
3343 return ERROR_OK;
3344 }
3345
3346 if (target->state != TARGET_HALTED)
3347 {
3348 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3349 return ERROR_OK;
3350 }
3351
3352 if (strcmp(cmd, "icache") == 0)
3353 icache = 1;
3354 else if (strcmp(cmd, "dcache") == 0)
3355 dcache = 1;
3356
3357 if (argc >= 1)
3358 {
3359 if (strcmp("enable", args[0]) == 0)
3360 {
3361 xscale_enable_mmu_caches(target, 0, dcache, icache);
3362
3363 if (icache)
3364 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 1;
3365 else if (dcache)
3366 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 1;
3367 }
3368 else if (strcmp("disable", args[0]) == 0)
3369 {
3370 xscale_disable_mmu_caches(target, 0, dcache, icache);
3371
3372 if (icache)
3373 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 0;
3374 else if (dcache)
3375 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 0;
3376 }
3377 }
3378
3379 if (icache)
3380 command_print(cmd_ctx, "icache %s", (xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled) ? "enabled" : "disabled");
3381
3382 if (dcache)
3383 command_print(cmd_ctx, "dcache %s", (xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled) ? "enabled" : "disabled");
3384
3385 return ERROR_OK;
3386 }
3387
3388 int xscale_handle_vector_catch_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3389 {
3390 target_t *target = get_current_target(cmd_ctx);
3391 armv4_5_common_t *armv4_5;
3392 xscale_common_t *xscale;
3393
3394 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3395 {
3396 return ERROR_OK;
3397 }
3398
3399 if (argc < 1)
3400 {
3401 command_print(cmd_ctx, "usage: xscale vector_catch [mask]");
3402 }
3403 else
3404 {
3405 xscale->vector_catch = strtoul(args[0], NULL, 0);
3406 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3407 xscale_write_dcsr(target, -1, -1);
3408 }
3409
3410 command_print(cmd_ctx, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3411
3412 return ERROR_OK;
3413 }
3414
3415 int xscale_handle_force_hw_bkpts_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3416 {
3417 target_t *target = get_current_target(cmd_ctx);
3418 armv4_5_common_t *armv4_5;
3419 xscale_common_t *xscale;
3420
3421 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3422 {
3423 return ERROR_OK;
3424 }
3425
3426 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
3427 {
3428 xscale->force_hw_bkpts = 1;
3429 }
3430 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
3431 {
3432 xscale->force_hw_bkpts = 0;
3433 }
3434 else
3435 {
3436 command_print(cmd_ctx, "usage: xscale force_hw_bkpts <enable|disable>");
3437 }
3438
3439 command_print(cmd_ctx, "force hardware breakpoints %s", (xscale->force_hw_bkpts) ? "enabled" : "disabled");
3440
3441 return ERROR_OK;
3442 }
3443
3444 int xscale_handle_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3445 {
3446 target_t *target = get_current_target(cmd_ctx);
3447 armv4_5_common_t *armv4_5;
3448 xscale_common_t *xscale;
3449 u32 dcsr_value;
3450
3451 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3452 {
3453 return ERROR_OK;
3454 }
3455
3456 if (target->state != TARGET_HALTED)
3457 {
3458 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3459 return ERROR_OK;
3460 }
3461
3462 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
3463 {
3464 xscale_trace_data_t *td, *next_td;
3465 xscale->trace.buffer_enabled = 1;
3466
3467 /* free old trace data */
3468 td = xscale->trace.data;
3469 while (td)
3470 {
3471 next_td = td->next;
3472
3473 if (td->entries)
3474 free(td->entries);
3475 free(td);
3476 td = next_td;
3477 }
3478 xscale->trace.data = NULL;
3479 }
3480 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
3481 {
3482 xscale->trace.buffer_enabled = 0;
3483 }
3484
3485 if ((argc >= 2) && (strcmp("fill", args[1]) == 0))
3486 {
3487 if (argc >= 3)
3488 xscale->trace.buffer_fill = strtoul(args[2], NULL, 0);
3489 else
3490 xscale->trace.buffer_fill = 1;
3491 }
3492 else if ((argc >= 2) && (strcmp("wrap", args[1]) == 0))
3493 {
3494 xscale->trace.buffer_fill = -1;
3495 }
3496
3497 if (xscale->trace.buffer_enabled)
3498 {
3499 /* if we enable the trace buffer in fill-once
3500 * mode we know the address of the first instruction */
3501 xscale->trace.pc_ok = 1;
3502 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3503 }
3504 else
3505 {
3506 /* otherwise the address is unknown, and we have no known good PC */
3507 xscale->trace.pc_ok = 0;
3508 }
3509
3510 command_print(cmd_ctx, "trace buffer %s (%s)",
3511 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3512 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3513
3514 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3515 if (xscale->trace.buffer_fill >= 0)
3516 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3517 else
3518 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3519
3520 return ERROR_OK;
3521 }
3522
3523 int xscale_handle_trace_image_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3524 {
3525 target_t *target;
3526 armv4_5_common_t *armv4_5;
3527 xscale_common_t *xscale;
3528
3529 if (argc < 1)
3530 {
3531 command_print(cmd_ctx, "usage: xscale trace_image <file> [base address] [type]");
3532 return ERROR_OK;
3533 }
3534
3535 target = get_current_target(cmd_ctx);
3536
3537 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3538 {
3539 return ERROR_OK;
3540 }
3541
3542 if (xscale->trace.image)
3543 {
3544 image_close(xscale->trace.image);
3545 free(xscale->trace.image);
3546 command_print(cmd_ctx, "previously loaded image found and closed");
3547 }
3548
3549 xscale->trace.image = malloc(sizeof(image_t));
3550 xscale->trace.image->base_address_set = 0;
3551 xscale->trace.image->start_address_set = 0;
3552
3553 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3554 if (argc >= 2)
3555 {
3556 xscale->trace.image->base_address_set = 1;
3557 xscale->trace.image->base_address = strtoul(args[1], NULL, 0);
3558 }
3559 else
3560 {
3561 xscale->trace.image->base_address_set = 0;
3562 }
3563
3564 if (image_open(xscale->trace.image, args[0], (argc >= 3) ? args[2] : NULL) != ERROR_OK)
3565 {
3566 free(xscale->trace.image);
3567 xscale->trace.image = NULL;
3568 return ERROR_OK;
3569 }
3570
3571 return ERROR_OK;
3572 }
3573
3574 int xscale_handle_dump_trace_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3575 {
3576 target_t *target = get_current_target(cmd_ctx);
3577 armv4_5_common_t *armv4_5;
3578 xscale_common_t *xscale;
3579 xscale_trace_data_t *trace_data;
3580 fileio_t file;
3581
3582 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3583 {
3584 return ERROR_OK;
3585 }
3586
3587 if (target->state != TARGET_HALTED)
3588 {
3589 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3590 return ERROR_OK;
3591 }
3592
3593 if (argc < 1)
3594 {
3595 command_print(cmd_ctx, "usage: xscale dump_trace <file>");
3596 return ERROR_OK;
3597 }
3598
3599 trace_data = xscale->trace.data;
3600
3601 if (!trace_data)
3602 {
3603 command_print(cmd_ctx, "no trace data collected");
3604 return ERROR_OK;
3605 }
3606
3607 if (fileio_open(&file, args[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3608 {
3609 return ERROR_OK;
3610 }
3611
3612 while (trace_data)
3613 {
3614 int i;
3615
3616 fileio_write_u32(&file, trace_data->chkpt0);
3617 fileio_write_u32(&file, trace_data->chkpt1);
3618 fileio_write_u32(&file, trace_data->last_instruction);
3619 fileio_write_u32(&file, trace_data->depth);
3620
3621 for (i = 0; i < trace_data->depth; i++)
3622 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3623
3624 trace_data = trace_data->next;
3625 }
3626
3627 fileio_close(&file);
3628
3629 return ERROR_OK;
3630 }
3631
3632 int xscale_handle_analyze_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3633 {
3634 target_t *target = get_current_target(cmd_ctx);
3635 armv4_5_common_t *armv4_5;
3636 xscale_common_t *xscale;
3637
3638 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3639 {
3640 return ERROR_OK;
3641 }
3642
3643 xscale_analyze_trace(target, cmd_ctx);
3644
3645 return ERROR_OK;
3646 }
3647
3648 int xscale_handle_cp15(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3649 {
3650 target_t *target = get_current_target(cmd_ctx);
3651 armv4_5_common_t *armv4_5;
3652 xscale_common_t *xscale;
3653
3654 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3655 {
3656 return ERROR_OK;
3657 }
3658
3659 if (target->state != TARGET_HALTED)
3660 {
3661 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3662 return ERROR_OK;
3663 }
3664 u32 reg_no = 0;
3665 reg_t *reg = NULL;
3666 if(argc > 0)
3667 {
3668 reg_no = strtoul(args[0], NULL, 0);
3669 /*translate from xscale cp15 register no to openocd register*/
3670 switch(reg_no)
3671 {
3672 case 0:
3673 reg_no = XSCALE_MAINID;
3674 break;
3675 case 1:
3676 reg_no = XSCALE_CTRL;
3677 break;
3678 case 2:
3679 reg_no = XSCALE_TTB;
3680 break;
3681 case 3:
3682 reg_no = XSCALE_DAC;
3683 break;
3684 case 5:
3685 reg_no = XSCALE_FSR;
3686 break;
3687 case 6:
3688 reg_no = XSCALE_FAR;
3689 break;
3690 case 13:
3691 reg_no = XSCALE_PID;
3692 break;
3693 case 15:
3694 reg_no = XSCALE_CPACCESS;
3695 break;
3696 default:
3697 command_print(cmd_ctx, "invalid register number");
3698 return ERROR_INVALID_ARGUMENTS;
3699 }
3700 reg = &xscale->reg_cache->reg_list[reg_no];
3701
3702 }
3703 if(argc == 1)
3704 {
3705 u32 value;
3706
3707 /* read cp15 control register */
3708 xscale_get_reg(reg);
3709 value = buf_get_u32(reg->value, 0, 32);
3710 command_print(cmd_ctx, "%s (/%i): 0x%x", reg->name, reg->size, value);
3711 }
3712 else if(argc == 2)
3713 {
3714
3715 u32 value = strtoul(args[1], NULL, 0);
3716
3717 /* send CP write request (command 0x41) */
3718 xscale_send_u32(target, 0x41);
3719
3720 /* send CP register number */
3721 xscale_send_u32(target, reg_no);
3722
3723 /* send CP register value */
3724 xscale_send_u32(target, value);
3725
3726 /* execute cpwait to ensure outstanding operations complete */
3727 xscale_send_u32(target, 0x53);
3728 }
3729 else
3730 {
3731 command_print(cmd_ctx, "usage: cp15 [register]<, [value]>");
3732 }
3733
3734 return ERROR_OK;
3735 }
3736
3737 int handle_xscale_fast_memory_access_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3738 {
3739 target_t *target = get_current_target(cmd_ctx);
3740 armv4_5_common_t *armv4_5;
3741 xscale_common_t *xscale;
3742
3743 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3744 {
3745 return ERROR_OK;
3746 }
3747
3748 if (argc == 1)
3749 {
3750 if (strcmp("enable", args[0]) == 0)
3751 {
3752 xscale->fast_memory_access = 1;
3753 }
3754 else if (strcmp("disable", args[0]) == 0)
3755 {
3756 xscale->fast_memory_access = 0;
3757 }
3758 else
3759 {
3760 return ERROR_COMMAND_SYNTAX_ERROR;
3761 }
3762 } else if (argc!=0)
3763 {
3764 return ERROR_COMMAND_SYNTAX_ERROR;
3765 }
3766
3767 command_print(cmd_ctx, "fast memory access is %s", (xscale->fast_memory_access) ? "enabled" : "disabled");
3768
3769 return ERROR_OK;
3770 }
3771
3772 int xscale_register_commands(struct command_context_s *cmd_ctx)
3773 {
3774 command_t *xscale_cmd;
3775
3776 xscale_cmd = register_command(cmd_ctx, NULL, "xscale", NULL, COMMAND_ANY, "xscale specific commands");
3777
3778 register_command(cmd_ctx, xscale_cmd, "debug_handler", xscale_handle_debug_handler_command, COMMAND_ANY, "'xscale debug_handler <target#> <address>' command takes two required operands");
3779 register_command(cmd_ctx, xscale_cmd, "cache_clean_address", xscale_handle_cache_clean_address_command, COMMAND_ANY, NULL);
3780
3781 register_command(cmd_ctx, xscale_cmd, "cache_info", xscale_handle_cache_info_command, COMMAND_EXEC, NULL);
3782 register_command(cmd_ctx, xscale_cmd, "mmu", xscale_handle_mmu_command, COMMAND_EXEC, "['enable'|'disable'] the MMU");
3783 register_command(cmd_ctx, xscale_cmd, "icache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the ICache");
3784 register_command(cmd_ctx, xscale_cmd, "dcache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the DCache");
3785
3786 register_command(cmd_ctx, xscale_cmd, "vector_catch", xscale_handle_idcache_command, COMMAND_EXEC, "<mask> of vectors that should be catched");
3787
3788 register_command(cmd_ctx, xscale_cmd, "trace_buffer", xscale_handle_trace_buffer_command, COMMAND_EXEC, "<enable|disable> ['fill' [n]|'wrap']");
3789
3790 register_command(cmd_ctx, xscale_cmd, "dump_trace", xscale_handle_dump_trace_command, COMMAND_EXEC, "dump content of trace buffer to <file>");
3791 register_command(cmd_ctx, xscale_cmd, "analyze_trace", xscale_handle_analyze_trace_buffer_command, COMMAND_EXEC, "analyze content of trace buffer");
3792 register_command(cmd_ctx, xscale_cmd, "trace_image", xscale_handle_trace_image_command,
3793 COMMAND_EXEC, "load image from <file> [base address]");
3794
3795 register_command(cmd_ctx, xscale_cmd, "cp15", xscale_handle_cp15, COMMAND_EXEC, "access coproc 15 <register> [value]");
3796 register_command(cmd_ctx, xscale_cmd, "fast_memory_access", handle_xscale_fast_memory_access_command,
3797 COMMAND_ANY, "use fast memory accesses instead of slower but potentially unsafe slow accesses <enable|disable>");
3798
3799
3800 armv4_5_register_commands(cmd_ctx);
3801
3802 return ERROR_OK;
3803 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)