482332433446aea9c1fa5b7359388ee022b793fa
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
19 ***************************************************************************/
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "replacements.h"
25
26 #include "xscale.h"
27
28 #include "register.h"
29 #include "target.h"
30 #include "armv4_5.h"
31 #include "arm_simulator.h"
32 #include "arm_disassembler.h"
33 #include "log.h"
34 #include "jtag.h"
35 #include "binarybuffer.h"
36 #include "time_support.h"
37 #include "breakpoints.h"
38 #include "fileio.h"
39
40 #include <stdlib.h>
41 #include <string.h>
42
43 #include <sys/types.h>
44 #include <unistd.h>
45 #include <errno.h>
46
47
48 /* cli handling */
49 int xscale_register_commands(struct command_context_s *cmd_ctx);
50
51 /* forward declarations */
52 int xscale_target_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc, struct target_s *target);
53 int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target);
54 int xscale_quit();
55
56 int xscale_arch_state(struct target_s *target);
57 int xscale_poll(target_t *target);
58 int xscale_halt(target_t *target);
59 int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution);
60 int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints);
61 int xscale_debug_entry(target_t *target);
62 int xscale_restore_context(target_t *target);
63
64 int xscale_assert_reset(target_t *target);
65 int xscale_deassert_reset(target_t *target);
66 int xscale_soft_reset_halt(struct target_s *target);
67
68 int xscale_set_reg_u32(reg_t *reg, u32 value);
69
70 int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode);
71 int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value);
72
73 int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
74 int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
75 int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer);
76 int xscale_checksum_memory(struct target_s *target, u32 address, u32 count, u32* checksum);
77
78 int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
79 int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
80 int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
81 int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
82 int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
83 int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
84 void xscale_enable_watchpoints(struct target_s *target);
85 void xscale_enable_breakpoints(struct target_s *target);
86 static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical);
87 static int xscale_mmu(struct target_s *target, int *enabled);
88
89 int xscale_read_trace(target_t *target);
90
91 target_type_t xscale_target =
92 {
93 .name = "xscale",
94
95 .poll = xscale_poll,
96 .arch_state = xscale_arch_state,
97
98 .target_request_data = NULL,
99
100 .halt = xscale_halt,
101 .resume = xscale_resume,
102 .step = xscale_step,
103
104 .assert_reset = xscale_assert_reset,
105 .deassert_reset = xscale_deassert_reset,
106 .soft_reset_halt = xscale_soft_reset_halt,
107
108 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
109
110 .read_memory = xscale_read_memory,
111 .write_memory = xscale_write_memory,
112 .bulk_write_memory = xscale_bulk_write_memory,
113 .checksum_memory = xscale_checksum_memory,
114
115 .run_algorithm = armv4_5_run_algorithm,
116
117 .add_breakpoint = xscale_add_breakpoint,
118 .remove_breakpoint = xscale_remove_breakpoint,
119 .add_watchpoint = xscale_add_watchpoint,
120 .remove_watchpoint = xscale_remove_watchpoint,
121
122 .register_commands = xscale_register_commands,
123 .target_command = xscale_target_command,
124 .init_target = xscale_init_target,
125 .quit = xscale_quit,
126
127 .virt2phys = xscale_virt2phys,
128 .mmu = xscale_mmu
129 };
130
131 char* xscale_reg_list[] =
132 {
133 "XSCALE_MAINID", /* 0 */
134 "XSCALE_CACHETYPE",
135 "XSCALE_CTRL",
136 "XSCALE_AUXCTRL",
137 "XSCALE_TTB",
138 "XSCALE_DAC",
139 "XSCALE_FSR",
140 "XSCALE_FAR",
141 "XSCALE_PID",
142 "XSCALE_CPACCESS",
143 "XSCALE_IBCR0", /* 10 */
144 "XSCALE_IBCR1",
145 "XSCALE_DBR0",
146 "XSCALE_DBR1",
147 "XSCALE_DBCON",
148 "XSCALE_TBREG",
149 "XSCALE_CHKPT0",
150 "XSCALE_CHKPT1",
151 "XSCALE_DCSR",
152 "XSCALE_TX",
153 "XSCALE_RX", /* 20 */
154 "XSCALE_TXRXCTRL",
155 };
156
157 xscale_reg_t xscale_reg_arch_info[] =
158 {
159 {XSCALE_MAINID, NULL},
160 {XSCALE_CACHETYPE, NULL},
161 {XSCALE_CTRL, NULL},
162 {XSCALE_AUXCTRL, NULL},
163 {XSCALE_TTB, NULL},
164 {XSCALE_DAC, NULL},
165 {XSCALE_FSR, NULL},
166 {XSCALE_FAR, NULL},
167 {XSCALE_PID, NULL},
168 {XSCALE_CPACCESS, NULL},
169 {XSCALE_IBCR0, NULL},
170 {XSCALE_IBCR1, NULL},
171 {XSCALE_DBR0, NULL},
172 {XSCALE_DBR1, NULL},
173 {XSCALE_DBCON, NULL},
174 {XSCALE_TBREG, NULL},
175 {XSCALE_CHKPT0, NULL},
176 {XSCALE_CHKPT1, NULL},
177 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
178 {-1, NULL}, /* TX accessed via JTAG */
179 {-1, NULL}, /* RX accessed via JTAG */
180 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
181 };
182
183 int xscale_reg_arch_type = -1;
184
185 int xscale_get_reg(reg_t *reg);
186 int xscale_set_reg(reg_t *reg, u8 *buf);
187
188 int xscale_get_arch_pointers(target_t *target, armv4_5_common_t **armv4_5_p, xscale_common_t **xscale_p)
189 {
190 armv4_5_common_t *armv4_5 = target->arch_info;
191 xscale_common_t *xscale = armv4_5->arch_info;
192
193 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
194 {
195 LOG_ERROR("target isn't an XScale target");
196 return -1;
197 }
198
199 if (xscale->common_magic != XSCALE_COMMON_MAGIC)
200 {
201 LOG_ERROR("target isn't an XScale target");
202 return -1;
203 }
204
205 *armv4_5_p = armv4_5;
206 *xscale_p = xscale;
207
208 return ERROR_OK;
209 }
210
211 int xscale_jtag_set_instr(int chain_pos, u32 new_instr)
212 {
213 jtag_device_t *device = jtag_get_device(chain_pos);
214
215 if (buf_get_u32(device->cur_instr, 0, device->ir_length) != new_instr)
216 {
217 scan_field_t field;
218
219 field.device = chain_pos;
220 field.num_bits = device->ir_length;
221 field.out_value = calloc(CEIL(field.num_bits, 8), 1);
222 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
223 field.out_mask = NULL;
224 field.in_value = NULL;
225 jtag_set_check_value(&field, device->expected, device->expected_mask, NULL);
226
227 jtag_add_ir_scan(1, &field, -1);
228
229 free(field.out_value);
230 }
231
232 return ERROR_OK;
233 }
234
235 int xscale_read_dcsr(target_t *target)
236 {
237 armv4_5_common_t *armv4_5 = target->arch_info;
238 xscale_common_t *xscale = armv4_5->arch_info;
239
240 int retval;
241
242 scan_field_t fields[3];
243 u8 field0 = 0x0;
244 u8 field0_check_value = 0x2;
245 u8 field0_check_mask = 0x7;
246 u8 field2 = 0x0;
247 u8 field2_check_value = 0x0;
248 u8 field2_check_mask = 0x1;
249
250 jtag_add_end_state(TAP_PD);
251 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
252
253 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
254 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
255
256 fields[0].device = xscale->jtag_info.chain_pos;
257 fields[0].num_bits = 3;
258 fields[0].out_value = &field0;
259 fields[0].out_mask = NULL;
260 fields[0].in_value = NULL;
261 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
262
263 fields[1].device = xscale->jtag_info.chain_pos;
264 fields[1].num_bits = 32;
265 fields[1].out_value = NULL;
266 fields[1].out_mask = NULL;
267 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
268 fields[1].in_handler = NULL;
269 fields[1].in_handler_priv = NULL;
270 fields[1].in_check_value = NULL;
271 fields[1].in_check_mask = NULL;
272
273 fields[2].device = xscale->jtag_info.chain_pos;
274 fields[2].num_bits = 1;
275 fields[2].out_value = &field2;
276 fields[2].out_mask = NULL;
277 fields[2].in_value = NULL;
278 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
279
280 jtag_add_dr_scan(3, fields, -1);
281
282 if ((retval = jtag_execute_queue()) != ERROR_OK)
283 {
284 LOG_ERROR("JTAG error while reading DCSR");
285 return retval;
286 }
287
288 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
289 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
290
291 /* write the register with the value we just read
292 * on this second pass, only the first bit of field0 is guaranteed to be 0)
293 */
294 field0_check_mask = 0x1;
295 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
296 fields[1].in_value = NULL;
297
298 jtag_add_end_state(TAP_RTI);
299
300 jtag_add_dr_scan(3, fields, -1);
301
302 /* DANGER!!! this must be here. It will make sure that the arguments
303 * to jtag_set_check_value() does not go out of scope! */
304 return jtag_execute_queue();
305 }
306
307 int xscale_receive(target_t *target, u32 *buffer, int num_words)
308 {
309 if (num_words==0)
310 return ERROR_INVALID_ARGUMENTS;
311
312 int retval=ERROR_OK;
313 armv4_5_common_t *armv4_5 = target->arch_info;
314 xscale_common_t *xscale = armv4_5->arch_info;
315
316 enum tap_state path[3];
317 scan_field_t fields[3];
318
319 u8 *field0 = malloc(num_words * 1);
320 u8 field0_check_value = 0x2;
321 u8 field0_check_mask = 0x6;
322 u32 *field1 = malloc(num_words * 4);
323 u8 field2_check_value = 0x0;
324 u8 field2_check_mask = 0x1;
325 int words_done = 0;
326 int words_scheduled = 0;
327
328 int i;
329
330 path[0] = TAP_SDS;
331 path[1] = TAP_CD;
332 path[2] = TAP_SD;
333
334 fields[0].device = xscale->jtag_info.chain_pos;
335 fields[0].num_bits = 3;
336 fields[0].out_value = NULL;
337 fields[0].out_mask = NULL;
338 fields[0].in_value = NULL;
339 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
340
341 fields[1].device = xscale->jtag_info.chain_pos;
342 fields[1].num_bits = 32;
343 fields[1].out_value = NULL;
344 fields[1].out_mask = NULL;
345 fields[1].in_value = NULL;
346 fields[1].in_handler = NULL;
347 fields[1].in_handler_priv = NULL;
348 fields[1].in_check_value = NULL;
349 fields[1].in_check_mask = NULL;
350
351
352
353 fields[2].device = xscale->jtag_info.chain_pos;
354 fields[2].num_bits = 1;
355 fields[2].out_value = NULL;
356 fields[2].out_mask = NULL;
357 fields[2].in_value = NULL;
358 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
359
360 jtag_add_end_state(TAP_RTI);
361 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgtx);
362 jtag_add_runtest(1, -1); /* ensures that we're in the TAP_RTI state as the above could be a no-op */
363
364 /* repeat until all words have been collected */
365 int attempts=0;
366 while (words_done < num_words)
367 {
368 /* schedule reads */
369 words_scheduled = 0;
370 for (i = words_done; i < num_words; i++)
371 {
372 fields[0].in_value = &field0[i];
373 fields[1].in_handler = buf_to_u32_handler;
374 fields[1].in_handler_priv = (u8*)&field1[i];
375
376 jtag_add_pathmove(3, path);
377 jtag_add_dr_scan(3, fields, TAP_RTI);
378 words_scheduled++;
379 }
380
381 if ((retval = jtag_execute_queue()) != ERROR_OK)
382 {
383 LOG_ERROR("JTAG error while receiving data from debug handler");
384 break;
385 }
386
387 /* examine results */
388 for (i = words_done; i < num_words; i++)
389 {
390 if (!(field0[0] & 1))
391 {
392 /* move backwards if necessary */
393 int j;
394 for (j = i; j < num_words - 1; j++)
395 {
396 field0[j] = field0[j+1];
397 field1[j] = field1[j+1];
398 }
399 words_scheduled--;
400 }
401 }
402 if (words_scheduled==0)
403 {
404 if (attempts++==1000)
405 {
406 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
407 retval=ERROR_TARGET_TIMEOUT;
408 break;
409 }
410 }
411
412 words_done += words_scheduled;
413 }
414
415 for (i = 0; i < num_words; i++)
416 *(buffer++) = buf_get_u32((u8*)&field1[i], 0, 32);
417
418 free(field1);
419
420 return retval;
421 }
422
423 int xscale_read_tx(target_t *target, int consume)
424 {
425 armv4_5_common_t *armv4_5 = target->arch_info;
426 xscale_common_t *xscale = armv4_5->arch_info;
427 enum tap_state path[3];
428 enum tap_state noconsume_path[6];
429
430 int retval;
431 struct timeval timeout, now;
432
433 scan_field_t fields[3];
434 u8 field0_in = 0x0;
435 u8 field0_check_value = 0x2;
436 u8 field0_check_mask = 0x6;
437 u8 field2_check_value = 0x0;
438 u8 field2_check_mask = 0x1;
439
440 jtag_add_end_state(TAP_RTI);
441
442 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgtx);
443
444 path[0] = TAP_SDS;
445 path[1] = TAP_CD;
446 path[2] = TAP_SD;
447
448 noconsume_path[0] = TAP_SDS;
449 noconsume_path[1] = TAP_CD;
450 noconsume_path[2] = TAP_E1D;
451 noconsume_path[3] = TAP_PD;
452 noconsume_path[4] = TAP_E2D;
453 noconsume_path[5] = TAP_SD;
454
455 fields[0].device = xscale->jtag_info.chain_pos;
456 fields[0].num_bits = 3;
457 fields[0].out_value = NULL;
458 fields[0].out_mask = NULL;
459 fields[0].in_value = &field0_in;
460 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
461
462 fields[1].device = xscale->jtag_info.chain_pos;
463 fields[1].num_bits = 32;
464 fields[1].out_value = NULL;
465 fields[1].out_mask = NULL;
466 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
467 fields[1].in_handler = NULL;
468 fields[1].in_handler_priv = NULL;
469 fields[1].in_check_value = NULL;
470 fields[1].in_check_mask = NULL;
471
472
473
474 fields[2].device = xscale->jtag_info.chain_pos;
475 fields[2].num_bits = 1;
476 fields[2].out_value = NULL;
477 fields[2].out_mask = NULL;
478 fields[2].in_value = NULL;
479 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
480
481 gettimeofday(&timeout, NULL);
482 timeval_add_time(&timeout, 1, 0);
483
484 for (;;)
485 {
486 int i;
487 for (i=0; i<100; i++)
488 {
489 /* if we want to consume the register content (i.e. clear TX_READY),
490 * we have to go straight from Capture-DR to Shift-DR
491 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
492 */
493 if (consume)
494 jtag_add_pathmove(3, path);
495 else
496 {
497 jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
498 }
499
500 jtag_add_dr_scan(3, fields, TAP_RTI);
501
502 if ((retval = jtag_execute_queue()) != ERROR_OK)
503 {
504 LOG_ERROR("JTAG error while reading TX");
505 return ERROR_TARGET_TIMEOUT;
506 }
507
508 gettimeofday(&now, NULL);
509 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
510 {
511 LOG_ERROR("time out reading TX register");
512 return ERROR_TARGET_TIMEOUT;
513 }
514 if (!((!(field0_in & 1)) && consume))
515 {
516 goto done;
517 }
518 }
519 LOG_DEBUG("waiting 10ms");
520 usleep(10*1000); /* avoid flooding the logs */
521 }
522 done:
523
524 if (!(field0_in & 1))
525 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
526
527 return ERROR_OK;
528 }
529
530 int xscale_write_rx(target_t *target)
531 {
532 armv4_5_common_t *armv4_5 = target->arch_info;
533 xscale_common_t *xscale = armv4_5->arch_info;
534
535 int retval;
536 struct timeval timeout, now;
537
538 scan_field_t fields[3];
539 u8 field0_out = 0x0;
540 u8 field0_in = 0x0;
541 u8 field0_check_value = 0x2;
542 u8 field0_check_mask = 0x6;
543 u8 field2 = 0x0;
544 u8 field2_check_value = 0x0;
545 u8 field2_check_mask = 0x1;
546
547 jtag_add_end_state(TAP_RTI);
548
549 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgrx);
550
551 fields[0].device = xscale->jtag_info.chain_pos;
552 fields[0].num_bits = 3;
553 fields[0].out_value = &field0_out;
554 fields[0].out_mask = NULL;
555 fields[0].in_value = &field0_in;
556 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
557
558 fields[1].device = xscale->jtag_info.chain_pos;
559 fields[1].num_bits = 32;
560 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
561 fields[1].out_mask = NULL;
562 fields[1].in_value = NULL;
563 fields[1].in_handler = NULL;
564 fields[1].in_handler_priv = NULL;
565 fields[1].in_check_value = NULL;
566 fields[1].in_check_mask = NULL;
567
568
569
570 fields[2].device = xscale->jtag_info.chain_pos;
571 fields[2].num_bits = 1;
572 fields[2].out_value = &field2;
573 fields[2].out_mask = NULL;
574 fields[2].in_value = NULL;
575 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
576
577 gettimeofday(&timeout, NULL);
578 timeval_add_time(&timeout, 1, 0);
579
580 /* poll until rx_read is low */
581 LOG_DEBUG("polling RX");
582 for (;;)
583 {
584 int i;
585 for (i=0; i<10; i++)
586 {
587 jtag_add_dr_scan(3, fields, TAP_RTI);
588
589 if ((retval = jtag_execute_queue()) != ERROR_OK)
590 {
591 LOG_ERROR("JTAG error while writing RX");
592 return retval;
593 }
594
595 gettimeofday(&now, NULL);
596 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
597 {
598 LOG_ERROR("time out writing RX register");
599 return ERROR_TARGET_TIMEOUT;
600 }
601 if (!(field0_in & 1))
602 goto done;
603 }
604 LOG_DEBUG("waiting 10ms");
605 usleep(10*1000); /* wait 10ms to avoid flooding the logs */
606 }
607 done:
608
609 /* set rx_valid */
610 field2 = 0x1;
611 jtag_add_dr_scan(3, fields, TAP_RTI);
612
613 if ((retval = jtag_execute_queue()) != ERROR_OK)
614 {
615 LOG_ERROR("JTAG error while writing RX");
616 return retval;
617 }
618
619 return ERROR_OK;
620 }
621
622 /* send count elements of size byte to the debug handler */
623 int xscale_send(target_t *target, u8 *buffer, int count, int size)
624 {
625 armv4_5_common_t *armv4_5 = target->arch_info;
626 xscale_common_t *xscale = armv4_5->arch_info;
627 u32 t[3];
628 int bits[3];
629
630 int retval;
631
632 int done_count = 0;
633
634 jtag_add_end_state(TAP_RTI);
635
636 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgrx);
637
638 bits[0]=3;
639 t[0]=0;
640 bits[1]=32;
641 t[2]=1;
642 bits[2]=1;
643 int endianness = target->endianness;
644 while (done_count++ < count)
645 {
646 switch (size)
647 {
648 case 4:
649 if (endianness == TARGET_LITTLE_ENDIAN)
650 {
651 t[1]=le_to_h_u32(buffer);
652 } else
653 {
654 t[1]=be_to_h_u32(buffer);
655 }
656 break;
657 case 2:
658 if (endianness == TARGET_LITTLE_ENDIAN)
659 {
660 t[1]=le_to_h_u16(buffer);
661 } else
662 {
663 t[1]=be_to_h_u16(buffer);
664 }
665 break;
666 case 1:
667 t[1]=buffer[0];
668 break;
669 default:
670 LOG_ERROR("BUG: size neither 4, 2 nor 1");
671 exit(-1);
672 }
673 jtag_add_dr_out(xscale->jtag_info.chain_pos,
674 3,
675 bits,
676 t,
677 TAP_RTI);
678 buffer += size;
679 }
680
681 if ((retval = jtag_execute_queue()) != ERROR_OK)
682 {
683 LOG_ERROR("JTAG error while sending data to debug handler");
684 return retval;
685 }
686
687 return ERROR_OK;
688 }
689
690 int xscale_send_u32(target_t *target, u32 value)
691 {
692 armv4_5_common_t *armv4_5 = target->arch_info;
693 xscale_common_t *xscale = armv4_5->arch_info;
694
695 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
696 return xscale_write_rx(target);
697 }
698
699 int xscale_write_dcsr(target_t *target, int hold_rst, int ext_dbg_brk)
700 {
701 armv4_5_common_t *armv4_5 = target->arch_info;
702 xscale_common_t *xscale = armv4_5->arch_info;
703
704 int retval;
705
706 scan_field_t fields[3];
707 u8 field0 = 0x0;
708 u8 field0_check_value = 0x2;
709 u8 field0_check_mask = 0x7;
710 u8 field2 = 0x0;
711 u8 field2_check_value = 0x0;
712 u8 field2_check_mask = 0x1;
713
714 if (hold_rst != -1)
715 xscale->hold_rst = hold_rst;
716
717 if (ext_dbg_brk != -1)
718 xscale->external_debug_break = ext_dbg_brk;
719
720 jtag_add_end_state(TAP_RTI);
721 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
722
723 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
724 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
725
726 fields[0].device = xscale->jtag_info.chain_pos;
727 fields[0].num_bits = 3;
728 fields[0].out_value = &field0;
729 fields[0].out_mask = NULL;
730 fields[0].in_value = NULL;
731 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
732
733 fields[1].device = xscale->jtag_info.chain_pos;
734 fields[1].num_bits = 32;
735 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
736 fields[1].out_mask = NULL;
737 fields[1].in_value = NULL;
738 fields[1].in_handler = NULL;
739 fields[1].in_handler_priv = NULL;
740 fields[1].in_check_value = NULL;
741 fields[1].in_check_mask = NULL;
742
743
744
745 fields[2].device = xscale->jtag_info.chain_pos;
746 fields[2].num_bits = 1;
747 fields[2].out_value = &field2;
748 fields[2].out_mask = NULL;
749 fields[2].in_value = NULL;
750 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
751
752 jtag_add_dr_scan(3, fields, -1);
753
754 if ((retval = jtag_execute_queue()) != ERROR_OK)
755 {
756 LOG_ERROR("JTAG error while writing DCSR");
757 return retval;
758 }
759
760 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
761 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
762
763 return ERROR_OK;
764 }
765
766 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
767 unsigned int parity (unsigned int v)
768 {
769 unsigned int ov = v;
770 v ^= v >> 16;
771 v ^= v >> 8;
772 v ^= v >> 4;
773 v &= 0xf;
774 LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
775 return (0x6996 >> v) & 1;
776 }
777
778 int xscale_load_ic(target_t *target, int mini, u32 va, u32 buffer[8])
779 {
780 armv4_5_common_t *armv4_5 = target->arch_info;
781 xscale_common_t *xscale = armv4_5->arch_info;
782 u8 packet[4];
783 u8 cmd;
784 int word;
785
786 scan_field_t fields[2];
787
788 LOG_DEBUG("loading miniIC at 0x%8.8x", va);
789
790 jtag_add_end_state(TAP_RTI);
791 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.ldic); /* LDIC */
792
793 /* CMD is b010 for Main IC and b011 for Mini IC */
794 if (mini)
795 buf_set_u32(&cmd, 0, 3, 0x3);
796 else
797 buf_set_u32(&cmd, 0, 3, 0x2);
798
799 buf_set_u32(&cmd, 3, 3, 0x0);
800
801 /* virtual address of desired cache line */
802 buf_set_u32(packet, 0, 27, va >> 5);
803
804 fields[0].device = xscale->jtag_info.chain_pos;
805 fields[0].num_bits = 6;
806 fields[0].out_value = &cmd;
807 fields[0].out_mask = NULL;
808 fields[0].in_value = NULL;
809 fields[0].in_check_value = NULL;
810 fields[0].in_check_mask = NULL;
811 fields[0].in_handler = NULL;
812 fields[0].in_handler_priv = NULL;
813
814 fields[1].device = xscale->jtag_info.chain_pos;
815 fields[1].num_bits = 27;
816 fields[1].out_value = packet;
817 fields[1].out_mask = NULL;
818 fields[1].in_value = NULL;
819 fields[1].in_check_value = NULL;
820 fields[1].in_check_mask = NULL;
821 fields[1].in_handler = NULL;
822 fields[1].in_handler_priv = NULL;
823
824 jtag_add_dr_scan(2, fields, -1);
825
826 fields[0].num_bits = 32;
827 fields[0].out_value = packet;
828
829 fields[1].num_bits = 1;
830 fields[1].out_value = &cmd;
831
832 for (word = 0; word < 8; word++)
833 {
834 buf_set_u32(packet, 0, 32, buffer[word]);
835 cmd = parity(*((u32*)packet));
836 jtag_add_dr_scan(2, fields, -1);
837 }
838
839 jtag_execute_queue();
840
841 return ERROR_OK;
842 }
843
844 int xscale_invalidate_ic_line(target_t *target, u32 va)
845 {
846 armv4_5_common_t *armv4_5 = target->arch_info;
847 xscale_common_t *xscale = armv4_5->arch_info;
848 u8 packet[4];
849 u8 cmd;
850
851 scan_field_t fields[2];
852
853 jtag_add_end_state(TAP_RTI);
854 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.ldic); /* LDIC */
855
856 /* CMD for invalidate IC line b000, bits [6:4] b000 */
857 buf_set_u32(&cmd, 0, 6, 0x0);
858
859 /* virtual address of desired cache line */
860 buf_set_u32(packet, 0, 27, va >> 5);
861
862 fields[0].device = xscale->jtag_info.chain_pos;
863 fields[0].num_bits = 6;
864 fields[0].out_value = &cmd;
865 fields[0].out_mask = NULL;
866 fields[0].in_value = NULL;
867 fields[0].in_check_value = NULL;
868 fields[0].in_check_mask = NULL;
869 fields[0].in_handler = NULL;
870 fields[0].in_handler_priv = NULL;
871
872 fields[1].device = xscale->jtag_info.chain_pos;
873 fields[1].num_bits = 27;
874 fields[1].out_value = packet;
875 fields[1].out_mask = NULL;
876 fields[1].in_value = NULL;
877 fields[1].in_check_value = NULL;
878 fields[1].in_check_mask = NULL;
879 fields[1].in_handler = NULL;
880 fields[1].in_handler_priv = NULL;
881
882 jtag_add_dr_scan(2, fields, -1);
883
884 return ERROR_OK;
885 }
886
887 int xscale_update_vectors(target_t *target)
888 {
889 armv4_5_common_t *armv4_5 = target->arch_info;
890 xscale_common_t *xscale = armv4_5->arch_info;
891 int i;
892 int retval;
893
894 u32 low_reset_branch, high_reset_branch;
895
896 for (i = 1; i < 8; i++)
897 {
898 /* if there's a static vector specified for this exception, override */
899 if (xscale->static_high_vectors_set & (1 << i))
900 {
901 xscale->high_vectors[i] = xscale->static_high_vectors[i];
902 }
903 else
904 {
905 retval=target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
906 if (retval == ERROR_TARGET_TIMEOUT)
907 return retval;
908 if (retval!=ERROR_OK)
909 {
910 /* Some of these reads will fail as part of normal execution */
911 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
912 }
913 }
914 }
915
916 for (i = 1; i < 8; i++)
917 {
918 if (xscale->static_low_vectors_set & (1 << i))
919 {
920 xscale->low_vectors[i] = xscale->static_low_vectors[i];
921 }
922 else
923 {
924 retval=target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
925 if (retval == ERROR_TARGET_TIMEOUT)
926 return retval;
927 if (retval!=ERROR_OK)
928 {
929 /* Some of these reads will fail as part of normal execution */
930 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
931 }
932 }
933 }
934
935 /* calculate branches to debug handler */
936 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
937 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
938
939 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
940 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
941
942 /* invalidate and load exception vectors in mini i-cache */
943 xscale_invalidate_ic_line(target, 0x0);
944 xscale_invalidate_ic_line(target, 0xffff0000);
945
946 xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
947 xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
948
949 return ERROR_OK;
950 }
951
952 int xscale_arch_state(struct target_s *target)
953 {
954 armv4_5_common_t *armv4_5 = target->arch_info;
955 xscale_common_t *xscale = armv4_5->arch_info;
956
957 char *state[] =
958 {
959 "disabled", "enabled"
960 };
961
962 char *arch_dbg_reason[] =
963 {
964 "", "\n(processor reset)", "\n(trace buffer full)"
965 };
966
967 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
968 {
969 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
970 exit(-1);
971 }
972
973 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
974 "cpsr: 0x%8.8x pc: 0x%8.8x\n"
975 "MMU: %s, D-Cache: %s, I-Cache: %s"
976 "%s",
977 armv4_5_state_strings[armv4_5->core_state],
978 target_debug_reason_strings[target->debug_reason],
979 armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)],
980 buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
981 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
982 state[xscale->armv4_5_mmu.mmu_enabled],
983 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
984 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
985 arch_dbg_reason[xscale->arch_debug_reason]);
986
987 return ERROR_OK;
988 }
989
990 int xscale_poll(target_t *target)
991 {
992 int retval=ERROR_OK;
993 armv4_5_common_t *armv4_5 = target->arch_info;
994 xscale_common_t *xscale = armv4_5->arch_info;
995
996 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
997 {
998 enum target_state previous_state = target->state;
999 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
1000 {
1001
1002 /* there's data to read from the tx register, we entered debug state */
1003 xscale->handler_running = 1;
1004
1005 target->state = TARGET_HALTED;
1006
1007 /* process debug entry, fetching current mode regs */
1008 retval = xscale_debug_entry(target);
1009 }
1010 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1011 {
1012 LOG_USER("error while polling TX register, reset CPU");
1013 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
1014 target->state = TARGET_HALTED;
1015 }
1016
1017 /* debug_entry could have overwritten target state (i.e. immediate resume)
1018 * don't signal event handlers in that case
1019 */
1020 if (target->state != TARGET_HALTED)
1021 return ERROR_OK;
1022
1023 /* if target was running, signal that we halted
1024 * otherwise we reentered from debug execution */
1025 if (previous_state == TARGET_RUNNING)
1026 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1027 else
1028 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
1029 }
1030
1031 return retval;
1032 }
1033
1034 int xscale_debug_entry(target_t *target)
1035 {
1036 armv4_5_common_t *armv4_5 = target->arch_info;
1037 xscale_common_t *xscale = armv4_5->arch_info;
1038 u32 pc;
1039 u32 buffer[10];
1040 int i;
1041 int retval;
1042
1043 u32 moe;
1044
1045 /* clear external dbg break (will be written on next DCSR read) */
1046 xscale->external_debug_break = 0;
1047 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
1048 return retval;
1049
1050 /* get r0, pc, r1 to r7 and cpsr */
1051 if ((retval=xscale_receive(target, buffer, 10))!=ERROR_OK)
1052 return retval;
1053
1054 /* move r0 from buffer to register cache */
1055 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
1056 armv4_5->core_cache->reg_list[15].dirty = 1;
1057 armv4_5->core_cache->reg_list[15].valid = 1;
1058 LOG_DEBUG("r0: 0x%8.8x", buffer[0]);
1059
1060 /* move pc from buffer to register cache */
1061 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
1062 armv4_5->core_cache->reg_list[15].dirty = 1;
1063 armv4_5->core_cache->reg_list[15].valid = 1;
1064 LOG_DEBUG("pc: 0x%8.8x", buffer[1]);
1065
1066 /* move data from buffer to register cache */
1067 for (i = 1; i <= 7; i++)
1068 {
1069 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
1070 armv4_5->core_cache->reg_list[i].dirty = 1;
1071 armv4_5->core_cache->reg_list[i].valid = 1;
1072 LOG_DEBUG("r%i: 0x%8.8x", i, buffer[i + 1]);
1073 }
1074
1075 buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, buffer[9]);
1076 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
1077 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
1078 LOG_DEBUG("cpsr: 0x%8.8x", buffer[9]);
1079
1080 armv4_5->core_mode = buffer[9] & 0x1f;
1081 if (armv4_5_mode_to_number(armv4_5->core_mode) == -1)
1082 {
1083 target->state = TARGET_UNKNOWN;
1084 LOG_ERROR("cpsr contains invalid mode value - communication failure");
1085 return ERROR_TARGET_FAILURE;
1086 }
1087 LOG_DEBUG("target entered debug state in %s mode", armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)]);
1088
1089 if (buffer[9] & 0x20)
1090 armv4_5->core_state = ARMV4_5_STATE_THUMB;
1091 else
1092 armv4_5->core_state = ARMV4_5_STATE_ARM;
1093
1094
1095 if (armv4_5_mode_to_number(armv4_5->core_mode)==-1)
1096 return ERROR_FAIL;
1097
1098 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1099 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
1100 {
1101 xscale_receive(target, buffer, 8);
1102 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1103 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
1104 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
1105 }
1106 else
1107 {
1108 /* r8 to r14, but no spsr */
1109 xscale_receive(target, buffer, 7);
1110 }
1111
1112 /* move data from buffer to register cache */
1113 for (i = 8; i <= 14; i++)
1114 {
1115 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
1116 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
1117 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
1118 }
1119
1120 /* examine debug reason */
1121 xscale_read_dcsr(target);
1122 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
1123
1124 /* stored PC (for calculating fixup) */
1125 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1126
1127 switch (moe)
1128 {
1129 case 0x0: /* Processor reset */
1130 target->debug_reason = DBG_REASON_DBGRQ;
1131 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1132 pc -= 4;
1133 break;
1134 case 0x1: /* Instruction breakpoint hit */
1135 target->debug_reason = DBG_REASON_BREAKPOINT;
1136 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1137 pc -= 4;
1138 break;
1139 case 0x2: /* Data breakpoint hit */
1140 target->debug_reason = DBG_REASON_WATCHPOINT;
1141 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1142 pc -= 4;
1143 break;
1144 case 0x3: /* BKPT instruction executed */
1145 target->debug_reason = DBG_REASON_BREAKPOINT;
1146 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1147 pc -= 4;
1148 break;
1149 case 0x4: /* Ext. debug event */
1150 target->debug_reason = DBG_REASON_DBGRQ;
1151 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1152 pc -= 4;
1153 break;
1154 case 0x5: /* Vector trap occured */
1155 target->debug_reason = DBG_REASON_BREAKPOINT;
1156 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1157 pc -= 4;
1158 break;
1159 case 0x6: /* Trace buffer full break */
1160 target->debug_reason = DBG_REASON_DBGRQ;
1161 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1162 pc -= 4;
1163 break;
1164 case 0x7: /* Reserved */
1165 default:
1166 LOG_ERROR("Method of Entry is 'Reserved'");
1167 exit(-1);
1168 break;
1169 }
1170
1171 /* apply PC fixup */
1172 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1173
1174 /* on the first debug entry, identify cache type */
1175 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1176 {
1177 u32 cache_type_reg;
1178
1179 /* read cp15 cache type register */
1180 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1181 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1182
1183 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1184 }
1185
1186 /* examine MMU and Cache settings */
1187 /* read cp15 control register */
1188 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1189 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1190 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1191 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1192 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1193
1194 /* tracing enabled, read collected trace data */
1195 if (xscale->trace.buffer_enabled)
1196 {
1197 xscale_read_trace(target);
1198 xscale->trace.buffer_fill--;
1199
1200 /* resume if we're still collecting trace data */
1201 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1202 && (xscale->trace.buffer_fill > 0))
1203 {
1204 xscale_resume(target, 1, 0x0, 1, 0);
1205 }
1206 else
1207 {
1208 xscale->trace.buffer_enabled = 0;
1209 }
1210 }
1211
1212 return ERROR_OK;
1213 }
1214
1215 int xscale_halt(target_t *target)
1216 {
1217 armv4_5_common_t *armv4_5 = target->arch_info;
1218 xscale_common_t *xscale = armv4_5->arch_info;
1219
1220 LOG_DEBUG("target->state: %s", target_state_strings[target->state]);
1221
1222 if (target->state == TARGET_HALTED)
1223 {
1224 LOG_DEBUG("target was already halted");
1225 return ERROR_OK;
1226 }
1227 else if (target->state == TARGET_UNKNOWN)
1228 {
1229 /* this must not happen for a xscale target */
1230 LOG_ERROR("target was in unknown state when halt was requested");
1231 return ERROR_TARGET_INVALID;
1232 }
1233 else if (target->state == TARGET_RESET)
1234 {
1235 LOG_DEBUG("target->state == TARGET_RESET");
1236 }
1237 else
1238 {
1239 /* assert external dbg break */
1240 xscale->external_debug_break = 1;
1241 xscale_read_dcsr(target);
1242
1243 target->debug_reason = DBG_REASON_DBGRQ;
1244 }
1245
1246 return ERROR_OK;
1247 }
1248
1249 int xscale_enable_single_step(struct target_s *target, u32 next_pc)
1250 {
1251 armv4_5_common_t *armv4_5 = target->arch_info;
1252 xscale_common_t *xscale= armv4_5->arch_info;
1253 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1254
1255 if (xscale->ibcr0_used)
1256 {
1257 breakpoint_t *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1258
1259 if (ibcr0_bp)
1260 {
1261 xscale_unset_breakpoint(target, ibcr0_bp);
1262 }
1263 else
1264 {
1265 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1266 exit(-1);
1267 }
1268 }
1269
1270 xscale_set_reg_u32(ibcr0, next_pc | 0x1);
1271
1272 return ERROR_OK;
1273 }
1274
1275 int xscale_disable_single_step(struct target_s *target)
1276 {
1277 armv4_5_common_t *armv4_5 = target->arch_info;
1278 xscale_common_t *xscale= armv4_5->arch_info;
1279 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1280
1281 xscale_set_reg_u32(ibcr0, 0x0);
1282
1283 return ERROR_OK;
1284 }
1285
1286 int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution)
1287 {
1288 armv4_5_common_t *armv4_5 = target->arch_info;
1289 xscale_common_t *xscale= armv4_5->arch_info;
1290 breakpoint_t *breakpoint = target->breakpoints;
1291
1292 u32 current_pc;
1293
1294 int retval;
1295 int i;
1296
1297 LOG_DEBUG("-");
1298
1299 if (target->state != TARGET_HALTED)
1300 {
1301 LOG_WARNING("target not halted");
1302 return ERROR_TARGET_NOT_HALTED;
1303 }
1304
1305 if (!debug_execution)
1306 {
1307 target_free_all_working_areas(target);
1308 }
1309
1310 /* update vector tables */
1311 if ((retval=xscale_update_vectors(target))!=ERROR_OK)
1312 return retval;
1313
1314 /* current = 1: continue on current pc, otherwise continue at <address> */
1315 if (!current)
1316 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1317
1318 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1319
1320 /* if we're at the reset vector, we have to simulate the branch */
1321 if (current_pc == 0x0)
1322 {
1323 arm_simulate_step(target, NULL);
1324 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1325 }
1326
1327 /* the front-end may request us not to handle breakpoints */
1328 if (handle_breakpoints)
1329 {
1330 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1331 {
1332 u32 next_pc;
1333
1334 /* there's a breakpoint at the current PC, we have to step over it */
1335 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1336 xscale_unset_breakpoint(target, breakpoint);
1337
1338 /* calculate PC of next instruction */
1339 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1340 {
1341 u32 current_opcode;
1342 target_read_u32(target, current_pc, &current_opcode);
1343 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
1344 }
1345
1346 LOG_DEBUG("enable single-step");
1347 xscale_enable_single_step(target, next_pc);
1348
1349 /* restore banked registers */
1350 xscale_restore_context(target);
1351
1352 /* send resume request (command 0x30 or 0x31)
1353 * clean the trace buffer if it is to be enabled (0x62) */
1354 if (xscale->trace.buffer_enabled)
1355 {
1356 xscale_send_u32(target, 0x62);
1357 xscale_send_u32(target, 0x31);
1358 }
1359 else
1360 xscale_send_u32(target, 0x30);
1361
1362 /* send CPSR */
1363 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1364 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1365
1366 for (i = 7; i >= 0; i--)
1367 {
1368 /* send register */
1369 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1370 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1371 }
1372
1373 /* send PC */
1374 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1375 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1376
1377 /* wait for and process debug entry */
1378 xscale_debug_entry(target);
1379
1380 LOG_DEBUG("disable single-step");
1381 xscale_disable_single_step(target);
1382
1383 LOG_DEBUG("set breakpoint at 0x%8.8x", breakpoint->address);
1384 xscale_set_breakpoint(target, breakpoint);
1385 }
1386 }
1387
1388 /* enable any pending breakpoints and watchpoints */
1389 xscale_enable_breakpoints(target);
1390 xscale_enable_watchpoints(target);
1391
1392 /* restore banked registers */
1393 xscale_restore_context(target);
1394
1395 /* send resume request (command 0x30 or 0x31)
1396 * clean the trace buffer if it is to be enabled (0x62) */
1397 if (xscale->trace.buffer_enabled)
1398 {
1399 xscale_send_u32(target, 0x62);
1400 xscale_send_u32(target, 0x31);
1401 }
1402 else
1403 xscale_send_u32(target, 0x30);
1404
1405 /* send CPSR */
1406 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1407 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1408
1409 for (i = 7; i >= 0; i--)
1410 {
1411 /* send register */
1412 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1413 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1414 }
1415
1416 /* send PC */
1417 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1418 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1419
1420 target->debug_reason = DBG_REASON_NOTHALTED;
1421
1422 if (!debug_execution)
1423 {
1424 /* registers are now invalid */
1425 armv4_5_invalidate_core_regs(target);
1426 target->state = TARGET_RUNNING;
1427 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1428 }
1429 else
1430 {
1431 target->state = TARGET_DEBUG_RUNNING;
1432 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1433 }
1434
1435 LOG_DEBUG("target resumed");
1436
1437 xscale->handler_running = 1;
1438
1439 return ERROR_OK;
1440 }
1441
1442 int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints)
1443 {
1444 armv4_5_common_t *armv4_5 = target->arch_info;
1445 xscale_common_t *xscale = armv4_5->arch_info;
1446 breakpoint_t *breakpoint = target->breakpoints;
1447
1448 u32 current_pc, next_pc;
1449 int i;
1450 int retval;
1451
1452 if (target->state != TARGET_HALTED)
1453 {
1454 LOG_WARNING("target not halted");
1455 return ERROR_TARGET_NOT_HALTED;
1456 }
1457
1458 /* current = 1: continue on current pc, otherwise continue at <address> */
1459 if (!current)
1460 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1461
1462 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1463
1464 /* if we're at the reset vector, we have to simulate the step */
1465 if (current_pc == 0x0)
1466 {
1467 arm_simulate_step(target, NULL);
1468 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1469
1470 target->debug_reason = DBG_REASON_SINGLESTEP;
1471 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1472
1473 return ERROR_OK;
1474 }
1475
1476 /* the front-end may request us not to handle breakpoints */
1477 if (handle_breakpoints)
1478 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1479 {
1480 xscale_unset_breakpoint(target, breakpoint);
1481 }
1482
1483 target->debug_reason = DBG_REASON_SINGLESTEP;
1484
1485 /* calculate PC of next instruction */
1486 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1487 {
1488 u32 current_opcode;
1489 target_read_u32(target, current_pc, &current_opcode);
1490 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
1491 }
1492
1493 LOG_DEBUG("enable single-step");
1494 xscale_enable_single_step(target, next_pc);
1495
1496 /* restore banked registers */
1497 xscale_restore_context(target);
1498
1499 /* send resume request (command 0x30 or 0x31)
1500 * clean the trace buffer if it is to be enabled (0x62) */
1501 if (xscale->trace.buffer_enabled)
1502 {
1503 xscale_send_u32(target, 0x62);
1504 xscale_send_u32(target, 0x31);
1505 }
1506 else
1507 xscale_send_u32(target, 0x30);
1508
1509 /* send CPSR */
1510 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1511 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1512
1513 for (i = 7; i >= 0; i--)
1514 {
1515 /* send register */
1516 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1517 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1518 }
1519
1520 /* send PC */
1521 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1522 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1523
1524 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1525
1526 /* registers are now invalid */
1527 armv4_5_invalidate_core_regs(target);
1528
1529 /* wait for and process debug entry */
1530 xscale_debug_entry(target);
1531
1532 LOG_DEBUG("disable single-step");
1533 xscale_disable_single_step(target);
1534
1535 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1536
1537 if (breakpoint)
1538 {
1539 xscale_set_breakpoint(target, breakpoint);
1540 }
1541
1542 LOG_DEBUG("target stepped");
1543
1544 return ERROR_OK;
1545
1546 }
1547
1548 int xscale_assert_reset(target_t *target)
1549 {
1550 armv4_5_common_t *armv4_5 = target->arch_info;
1551 xscale_common_t *xscale = armv4_5->arch_info;
1552
1553 LOG_DEBUG("target->state: %s", target_state_strings[target->state]);
1554
1555 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1556 * end up in T-L-R, which would reset JTAG
1557 */
1558 jtag_add_end_state(TAP_RTI);
1559 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
1560
1561 /* set Hold reset, Halt mode and Trap Reset */
1562 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1563 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1564 xscale_write_dcsr(target, 1, 0);
1565
1566 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1567 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, 0x7f);
1568 jtag_execute_queue();
1569
1570 /* assert reset */
1571 jtag_add_reset(0, 1);
1572
1573 /* sleep 1ms, to be sure we fulfill any requirements */
1574 jtag_add_sleep(1000);
1575 jtag_execute_queue();
1576
1577 target->state = TARGET_RESET;
1578
1579 return ERROR_OK;
1580 }
1581
1582 int xscale_deassert_reset(target_t *target)
1583 {
1584 armv4_5_common_t *armv4_5 = target->arch_info;
1585 xscale_common_t *xscale = armv4_5->arch_info;
1586
1587 fileio_t debug_handler;
1588 u32 address;
1589 u32 binary_size;
1590
1591 u32 buf_cnt;
1592 int i;
1593 int retval;
1594
1595 breakpoint_t *breakpoint = target->breakpoints;
1596
1597 LOG_DEBUG("-");
1598
1599 xscale->ibcr_available = 2;
1600 xscale->ibcr0_used = 0;
1601 xscale->ibcr1_used = 0;
1602
1603 xscale->dbr_available = 2;
1604 xscale->dbr0_used = 0;
1605 xscale->dbr1_used = 0;
1606
1607 /* mark all hardware breakpoints as unset */
1608 while (breakpoint)
1609 {
1610 if (breakpoint->type == BKPT_HARD)
1611 {
1612 breakpoint->set = 0;
1613 }
1614 breakpoint = breakpoint->next;
1615 }
1616
1617 if (!xscale->handler_installed)
1618 {
1619 /* release SRST */
1620 jtag_add_reset(0, 0);
1621
1622 /* wait 300ms; 150 and 100ms were not enough */
1623 jtag_add_sleep(300*1000);
1624
1625 jtag_add_runtest(2030, TAP_RTI);
1626 jtag_execute_queue();
1627
1628 /* set Hold reset, Halt mode and Trap Reset */
1629 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1630 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1631 xscale_write_dcsr(target, 1, 0);
1632
1633 /* Load debug handler */
1634 if (fileio_open(&debug_handler, "xscale/debug_handler.bin", FILEIO_READ, FILEIO_BINARY) != ERROR_OK)
1635 {
1636 return ERROR_OK;
1637 }
1638
1639 if ((binary_size = debug_handler.size) % 4)
1640 {
1641 LOG_ERROR("debug_handler.bin: size not a multiple of 4");
1642 exit(-1);
1643 }
1644
1645 if (binary_size > 0x800)
1646 {
1647 LOG_ERROR("debug_handler.bin: larger than 2kb");
1648 exit(-1);
1649 }
1650
1651 binary_size = CEIL(binary_size, 32) * 32;
1652
1653 address = xscale->handler_address;
1654 while (binary_size > 0)
1655 {
1656 u32 cache_line[8];
1657 u8 buffer[32];
1658
1659 if ((retval = fileio_read(&debug_handler, 32, buffer, &buf_cnt)) != ERROR_OK)
1660 {
1661
1662 }
1663
1664 for (i = 0; i < buf_cnt; i += 4)
1665 {
1666 /* convert LE buffer to host-endian u32 */
1667 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1668 }
1669
1670 for (; buf_cnt < 32; buf_cnt += 4)
1671 {
1672 cache_line[buf_cnt / 4] = 0xe1a08008;
1673 }
1674
1675 /* only load addresses other than the reset vectors */
1676 if ((address % 0x400) != 0x0)
1677 {
1678 xscale_load_ic(target, 1, address, cache_line);
1679 }
1680
1681 address += buf_cnt;
1682 binary_size -= buf_cnt;
1683 };
1684
1685 xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
1686 xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
1687
1688 jtag_add_runtest(30, TAP_RTI);
1689
1690 jtag_add_sleep(100000);
1691
1692 /* set Hold reset, Halt mode and Trap Reset */
1693 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1694 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1695 xscale_write_dcsr(target, 1, 0);
1696
1697 /* clear Hold reset to let the target run (should enter debug handler) */
1698 xscale_write_dcsr(target, 0, 1);
1699 target->state = TARGET_RUNNING;
1700
1701 if ((target->reset_mode != RESET_HALT) && (target->reset_mode != RESET_INIT))
1702 {
1703 jtag_add_sleep(10000);
1704
1705 /* we should have entered debug now */
1706 xscale_debug_entry(target);
1707 target->state = TARGET_HALTED;
1708
1709 /* resume the target */
1710 xscale_resume(target, 1, 0x0, 1, 0);
1711 }
1712
1713 fileio_close(&debug_handler);
1714 }
1715 else
1716 {
1717 jtag_add_reset(0, 0);
1718 }
1719
1720
1721 return ERROR_OK;
1722 }
1723
1724 int xscale_soft_reset_halt(struct target_s *target)
1725 {
1726
1727 return ERROR_OK;
1728 }
1729
1730 int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode)
1731 {
1732
1733 return ERROR_OK;
1734 }
1735
1736 int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value)
1737 {
1738
1739 return ERROR_OK;
1740 }
1741
1742 int xscale_full_context(target_t *target)
1743 {
1744 armv4_5_common_t *armv4_5 = target->arch_info;
1745
1746 u32 *buffer;
1747
1748 int i, j;
1749
1750 LOG_DEBUG("-");
1751
1752 if (target->state != TARGET_HALTED)
1753 {
1754 LOG_WARNING("target not halted");
1755 return ERROR_TARGET_NOT_HALTED;
1756 }
1757
1758 buffer = malloc(4 * 8);
1759
1760 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1761 * we can't enter User mode on an XScale (unpredictable),
1762 * but User shares registers with SYS
1763 */
1764 for(i = 1; i < 7; i++)
1765 {
1766 int valid = 1;
1767
1768 /* check if there are invalid registers in the current mode
1769 */
1770 for (j = 0; j <= 16; j++)
1771 {
1772 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
1773 valid = 0;
1774 }
1775
1776 if (!valid)
1777 {
1778 u32 tmp_cpsr;
1779
1780 /* request banked registers */
1781 xscale_send_u32(target, 0x0);
1782
1783 tmp_cpsr = 0x0;
1784 tmp_cpsr |= armv4_5_number_to_mode(i);
1785 tmp_cpsr |= 0xc0; /* I/F bits */
1786
1787 /* send CPSR for desired mode */
1788 xscale_send_u32(target, tmp_cpsr);
1789
1790 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1791 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1792 {
1793 xscale_receive(target, buffer, 8);
1794 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1795 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1796 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
1797 }
1798 else
1799 {
1800 xscale_receive(target, buffer, 7);
1801 }
1802
1803 /* move data from buffer to register cache */
1804 for (j = 8; j <= 14; j++)
1805 {
1806 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
1807 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1808 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
1809 }
1810 }
1811 }
1812
1813 free(buffer);
1814
1815 return ERROR_OK;
1816 }
1817
1818 int xscale_restore_context(target_t *target)
1819 {
1820 armv4_5_common_t *armv4_5 = target->arch_info;
1821
1822 int i, j;
1823
1824 LOG_DEBUG("-");
1825
1826 if (target->state != TARGET_HALTED)
1827 {
1828 LOG_WARNING("target not halted");
1829 return ERROR_TARGET_NOT_HALTED;
1830 }
1831
1832 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1833 * we can't enter User mode on an XScale (unpredictable),
1834 * but User shares registers with SYS
1835 */
1836 for(i = 1; i < 7; i++)
1837 {
1838 int dirty = 0;
1839
1840 /* check if there are invalid registers in the current mode
1841 */
1842 for (j = 8; j <= 14; j++)
1843 {
1844 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
1845 dirty = 1;
1846 }
1847
1848 /* if not USR/SYS, check if the SPSR needs to be written */
1849 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1850 {
1851 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
1852 dirty = 1;
1853 }
1854
1855 if (dirty)
1856 {
1857 u32 tmp_cpsr;
1858
1859 /* send banked registers */
1860 xscale_send_u32(target, 0x1);
1861
1862 tmp_cpsr = 0x0;
1863 tmp_cpsr |= armv4_5_number_to_mode(i);
1864 tmp_cpsr |= 0xc0; /* I/F bits */
1865
1866 /* send CPSR for desired mode */
1867 xscale_send_u32(target, tmp_cpsr);
1868
1869 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1870 for (j = 8; j <= 14; j++)
1871 {
1872 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
1873 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1874 }
1875
1876 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1877 {
1878 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
1879 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1880 }
1881 }
1882 }
1883
1884 return ERROR_OK;
1885 }
1886
1887 int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
1888 {
1889 armv4_5_common_t *armv4_5 = target->arch_info;
1890 xscale_common_t *xscale = armv4_5->arch_info;
1891 u32 *buf32;
1892 int i;
1893 int retval;
1894
1895 LOG_DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
1896
1897 if (target->state != TARGET_HALTED)
1898 {
1899 LOG_WARNING("target not halted");
1900 return ERROR_TARGET_NOT_HALTED;
1901 }
1902
1903 /* sanitize arguments */
1904 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1905 return ERROR_INVALID_ARGUMENTS;
1906
1907 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1908 return ERROR_TARGET_UNALIGNED_ACCESS;
1909
1910 /* send memory read request (command 0x1n, n: access size) */
1911 if ((retval=xscale_send_u32(target, 0x10 | size))!=ERROR_OK)
1912 return retval;
1913
1914 /* send base address for read request */
1915 if ((retval=xscale_send_u32(target, address))!=ERROR_OK)
1916 return retval;
1917
1918 /* send number of requested data words */
1919 if ((retval=xscale_send_u32(target, count))!=ERROR_OK)
1920 return retval;
1921
1922 /* receive data from target (count times 32-bit words in host endianness) */
1923 buf32 = malloc(4 * count);
1924 if ((retval=xscale_receive(target, buf32, count))!=ERROR_OK)
1925 return retval;
1926
1927 /* extract data from host-endian buffer into byte stream */
1928 for (i = 0; i < count; i++)
1929 {
1930 switch (size)
1931 {
1932 case 4:
1933 target_buffer_set_u32(target, buffer, buf32[i]);
1934 buffer += 4;
1935 break;
1936 case 2:
1937 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1938 buffer += 2;
1939 break;
1940 case 1:
1941 *buffer++ = buf32[i] & 0xff;
1942 break;
1943 default:
1944 LOG_ERROR("should never get here");
1945 exit(-1);
1946 }
1947 }
1948
1949 free(buf32);
1950
1951 /* examine DCSR, to see if Sticky Abort (SA) got set */
1952 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
1953 return retval;
1954 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1955 {
1956 /* clear SA bit */
1957 if ((retval=xscale_send_u32(target, 0x60))!=ERROR_OK)
1958 return retval;
1959
1960 return ERROR_TARGET_DATA_ABORT;
1961 }
1962
1963 return ERROR_OK;
1964 }
1965
1966 int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
1967 {
1968 armv4_5_common_t *armv4_5 = target->arch_info;
1969 xscale_common_t *xscale = armv4_5->arch_info;
1970 int retval;
1971
1972 LOG_DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
1973
1974 if (target->state != TARGET_HALTED)
1975 {
1976 LOG_WARNING("target not halted");
1977 return ERROR_TARGET_NOT_HALTED;
1978 }
1979
1980 /* sanitize arguments */
1981 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1982 return ERROR_INVALID_ARGUMENTS;
1983
1984 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1985 return ERROR_TARGET_UNALIGNED_ACCESS;
1986
1987 /* send memory write request (command 0x2n, n: access size) */
1988 if ((retval=xscale_send_u32(target, 0x20 | size))!=ERROR_OK)
1989 return retval;
1990
1991 /* send base address for read request */
1992 if ((retval=xscale_send_u32(target, address))!=ERROR_OK)
1993 return retval;
1994
1995 /* send number of requested data words to be written*/
1996 if ((retval=xscale_send_u32(target, count))!=ERROR_OK)
1997 return retval;
1998
1999 /* extract data from host-endian buffer into byte stream */
2000 #if 0
2001 for (i = 0; i < count; i++)
2002 {
2003 switch (size)
2004 {
2005 case 4:
2006 value = target_buffer_get_u32(target, buffer);
2007 xscale_send_u32(target, value);
2008 buffer += 4;
2009 break;
2010 case 2:
2011 value = target_buffer_get_u16(target, buffer);
2012 xscale_send_u32(target, value);
2013 buffer += 2;
2014 break;
2015 case 1:
2016 value = *buffer;
2017 xscale_send_u32(target, value);
2018 buffer += 1;
2019 break;
2020 default:
2021 LOG_ERROR("should never get here");
2022 exit(-1);
2023 }
2024 }
2025 #endif
2026 if ((retval=xscale_send(target, buffer, count, size))!=ERROR_OK)
2027 return retval;
2028
2029 /* examine DCSR, to see if Sticky Abort (SA) got set */
2030 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
2031 return retval;
2032 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
2033 {
2034 /* clear SA bit */
2035 if ((retval=xscale_send_u32(target, 0x60))!=ERROR_OK)
2036 return retval;
2037
2038 return ERROR_TARGET_DATA_ABORT;
2039 }
2040
2041 return ERROR_OK;
2042 }
2043
2044 int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer)
2045 {
2046 return xscale_write_memory(target, address, 4, count, buffer);
2047 }
2048
2049 int xscale_checksum_memory(struct target_s *target, u32 address, u32 count, u32* checksum)
2050 {
2051 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2052 }
2053
2054 u32 xscale_get_ttb(target_t *target)
2055 {
2056 armv4_5_common_t *armv4_5 = target->arch_info;
2057 xscale_common_t *xscale = armv4_5->arch_info;
2058 u32 ttb;
2059
2060 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2061 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2062
2063 return ttb;
2064 }
2065
2066 void xscale_disable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
2067 {
2068 armv4_5_common_t *armv4_5 = target->arch_info;
2069 xscale_common_t *xscale = armv4_5->arch_info;
2070 u32 cp15_control;
2071
2072 /* read cp15 control register */
2073 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2074 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2075
2076 if (mmu)
2077 cp15_control &= ~0x1U;
2078
2079 if (d_u_cache)
2080 {
2081 /* clean DCache */
2082 xscale_send_u32(target, 0x50);
2083 xscale_send_u32(target, xscale->cache_clean_address);
2084
2085 /* invalidate DCache */
2086 xscale_send_u32(target, 0x51);
2087
2088 cp15_control &= ~0x4U;
2089 }
2090
2091 if (i_cache)
2092 {
2093 /* invalidate ICache */
2094 xscale_send_u32(target, 0x52);
2095 cp15_control &= ~0x1000U;
2096 }
2097
2098 /* write new cp15 control register */
2099 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2100
2101 /* execute cpwait to ensure outstanding operations complete */
2102 xscale_send_u32(target, 0x53);
2103 }
2104
2105 void xscale_enable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
2106 {
2107 armv4_5_common_t *armv4_5 = target->arch_info;
2108 xscale_common_t *xscale = armv4_5->arch_info;
2109 u32 cp15_control;
2110
2111 /* read cp15 control register */
2112 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2113 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2114
2115 if (mmu)
2116 cp15_control |= 0x1U;
2117
2118 if (d_u_cache)
2119 cp15_control |= 0x4U;
2120
2121 if (i_cache)
2122 cp15_control |= 0x1000U;
2123
2124 /* write new cp15 control register */
2125 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2126
2127 /* execute cpwait to ensure outstanding operations complete */
2128 xscale_send_u32(target, 0x53);
2129 }
2130
2131 int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2132 {
2133 armv4_5_common_t *armv4_5 = target->arch_info;
2134 xscale_common_t *xscale = armv4_5->arch_info;
2135
2136 if (target->state != TARGET_HALTED)
2137 {
2138 LOG_WARNING("target not halted");
2139 return ERROR_TARGET_NOT_HALTED;
2140 }
2141
2142 if (xscale->force_hw_bkpts)
2143 breakpoint->type = BKPT_HARD;
2144
2145 if (breakpoint->set)
2146 {
2147 LOG_WARNING("breakpoint already set");
2148 return ERROR_OK;
2149 }
2150
2151 if (breakpoint->type == BKPT_HARD)
2152 {
2153 u32 value = breakpoint->address | 1;
2154 if (!xscale->ibcr0_used)
2155 {
2156 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2157 xscale->ibcr0_used = 1;
2158 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2159 }
2160 else if (!xscale->ibcr1_used)
2161 {
2162 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2163 xscale->ibcr1_used = 1;
2164 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2165 }
2166 else
2167 {
2168 LOG_ERROR("BUG: no hardware comparator available");
2169 return ERROR_OK;
2170 }
2171 }
2172 else if (breakpoint->type == BKPT_SOFT)
2173 {
2174 if (breakpoint->length == 4)
2175 {
2176 /* keep the original instruction in target endianness */
2177 target->type->read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr);
2178 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2179 target_write_u32(target, breakpoint->address, xscale->arm_bkpt);
2180 }
2181 else
2182 {
2183 /* keep the original instruction in target endianness */
2184 target->type->read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr);
2185 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2186 target_write_u32(target, breakpoint->address, xscale->thumb_bkpt);
2187 }
2188 breakpoint->set = 1;
2189 }
2190
2191 return ERROR_OK;
2192
2193 }
2194
2195 int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2196 {
2197 armv4_5_common_t *armv4_5 = target->arch_info;
2198 xscale_common_t *xscale = armv4_5->arch_info;
2199
2200 if (target->state != TARGET_HALTED)
2201 {
2202 LOG_WARNING("target not halted");
2203 return ERROR_TARGET_NOT_HALTED;
2204 }
2205
2206 if (xscale->force_hw_bkpts)
2207 {
2208 LOG_DEBUG("forcing use of hardware breakpoint at address 0x%8.8x", breakpoint->address);
2209 breakpoint->type = BKPT_HARD;
2210 }
2211
2212 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2213 {
2214 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2215 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2216 }
2217 else
2218 {
2219 xscale->ibcr_available--;
2220 }
2221
2222 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2223 {
2224 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2225 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2226 }
2227
2228 return ERROR_OK;
2229 }
2230
2231 int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2232 {
2233 armv4_5_common_t *armv4_5 = target->arch_info;
2234 xscale_common_t *xscale = armv4_5->arch_info;
2235
2236 if (target->state != TARGET_HALTED)
2237 {
2238 LOG_WARNING("target not halted");
2239 return ERROR_TARGET_NOT_HALTED;
2240 }
2241
2242 if (!breakpoint->set)
2243 {
2244 LOG_WARNING("breakpoint not set");
2245 return ERROR_OK;
2246 }
2247
2248 if (breakpoint->type == BKPT_HARD)
2249 {
2250 if (breakpoint->set == 1)
2251 {
2252 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2253 xscale->ibcr0_used = 0;
2254 }
2255 else if (breakpoint->set == 2)
2256 {
2257 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2258 xscale->ibcr1_used = 0;
2259 }
2260 breakpoint->set = 0;
2261 }
2262 else
2263 {
2264 /* restore original instruction (kept in target endianness) */
2265 if (breakpoint->length == 4)
2266 {
2267 target->type->write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr);
2268 }
2269 else
2270 {
2271 target->type->write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr);
2272 }
2273 breakpoint->set = 0;
2274 }
2275
2276 return ERROR_OK;
2277 }
2278
2279 int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2280 {
2281 armv4_5_common_t *armv4_5 = target->arch_info;
2282 xscale_common_t *xscale = armv4_5->arch_info;
2283
2284 if (target->state != TARGET_HALTED)
2285 {
2286 LOG_WARNING("target not halted");
2287 return ERROR_TARGET_NOT_HALTED;
2288 }
2289
2290 if (breakpoint->set)
2291 {
2292 xscale_unset_breakpoint(target, breakpoint);
2293 }
2294
2295 if (breakpoint->type == BKPT_HARD)
2296 xscale->ibcr_available++;
2297
2298 return ERROR_OK;
2299 }
2300
2301 int xscale_set_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2302 {
2303 armv4_5_common_t *armv4_5 = target->arch_info;
2304 xscale_common_t *xscale = armv4_5->arch_info;
2305 u8 enable=0;
2306 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2307 u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2308
2309 if (target->state != TARGET_HALTED)
2310 {
2311 LOG_WARNING("target not halted");
2312 return ERROR_TARGET_NOT_HALTED;
2313 }
2314
2315 xscale_get_reg(dbcon);
2316
2317 switch (watchpoint->rw)
2318 {
2319 case WPT_READ:
2320 enable = 0x3;
2321 break;
2322 case WPT_ACCESS:
2323 enable = 0x2;
2324 break;
2325 case WPT_WRITE:
2326 enable = 0x1;
2327 break;
2328 default:
2329 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2330 }
2331
2332 if (!xscale->dbr0_used)
2333 {
2334 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2335 dbcon_value |= enable;
2336 xscale_set_reg_u32(dbcon, dbcon_value);
2337 watchpoint->set = 1;
2338 xscale->dbr0_used = 1;
2339 }
2340 else if (!xscale->dbr1_used)
2341 {
2342 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2343 dbcon_value |= enable << 2;
2344 xscale_set_reg_u32(dbcon, dbcon_value);
2345 watchpoint->set = 2;
2346 xscale->dbr1_used = 1;
2347 }
2348 else
2349 {
2350 LOG_ERROR("BUG: no hardware comparator available");
2351 return ERROR_OK;
2352 }
2353
2354 return ERROR_OK;
2355 }
2356
2357 int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2358 {
2359 armv4_5_common_t *armv4_5 = target->arch_info;
2360 xscale_common_t *xscale = armv4_5->arch_info;
2361
2362 if (target->state != TARGET_HALTED)
2363 {
2364 LOG_WARNING("target not halted");
2365 return ERROR_TARGET_NOT_HALTED;
2366 }
2367
2368 if (xscale->dbr_available < 1)
2369 {
2370 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2371 }
2372
2373 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2374 {
2375 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2376 }
2377
2378 xscale->dbr_available--;
2379
2380 return ERROR_OK;
2381 }
2382
2383 int xscale_unset_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2384 {
2385 armv4_5_common_t *armv4_5 = target->arch_info;
2386 xscale_common_t *xscale = armv4_5->arch_info;
2387 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2388 u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2389
2390 if (target->state != TARGET_HALTED)
2391 {
2392 LOG_WARNING("target not halted");
2393 return ERROR_TARGET_NOT_HALTED;
2394 }
2395
2396 if (!watchpoint->set)
2397 {
2398 LOG_WARNING("breakpoint not set");
2399 return ERROR_OK;
2400 }
2401
2402 if (watchpoint->set == 1)
2403 {
2404 dbcon_value &= ~0x3;
2405 xscale_set_reg_u32(dbcon, dbcon_value);
2406 xscale->dbr0_used = 0;
2407 }
2408 else if (watchpoint->set == 2)
2409 {
2410 dbcon_value &= ~0xc;
2411 xscale_set_reg_u32(dbcon, dbcon_value);
2412 xscale->dbr1_used = 0;
2413 }
2414 watchpoint->set = 0;
2415
2416 return ERROR_OK;
2417 }
2418
2419 int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2420 {
2421 armv4_5_common_t *armv4_5 = target->arch_info;
2422 xscale_common_t *xscale = armv4_5->arch_info;
2423
2424 if (target->state != TARGET_HALTED)
2425 {
2426 LOG_WARNING("target not halted");
2427 return ERROR_TARGET_NOT_HALTED;
2428 }
2429
2430 if (watchpoint->set)
2431 {
2432 xscale_unset_watchpoint(target, watchpoint);
2433 }
2434
2435 xscale->dbr_available++;
2436
2437 return ERROR_OK;
2438 }
2439
2440 void xscale_enable_watchpoints(struct target_s *target)
2441 {
2442 watchpoint_t *watchpoint = target->watchpoints;
2443
2444 while (watchpoint)
2445 {
2446 if (watchpoint->set == 0)
2447 xscale_set_watchpoint(target, watchpoint);
2448 watchpoint = watchpoint->next;
2449 }
2450 }
2451
2452 void xscale_enable_breakpoints(struct target_s *target)
2453 {
2454 breakpoint_t *breakpoint = target->breakpoints;
2455
2456 /* set any pending breakpoints */
2457 while (breakpoint)
2458 {
2459 if (breakpoint->set == 0)
2460 xscale_set_breakpoint(target, breakpoint);
2461 breakpoint = breakpoint->next;
2462 }
2463 }
2464
2465 int xscale_get_reg(reg_t *reg)
2466 {
2467 xscale_reg_t *arch_info = reg->arch_info;
2468 target_t *target = arch_info->target;
2469 armv4_5_common_t *armv4_5 = target->arch_info;
2470 xscale_common_t *xscale = armv4_5->arch_info;
2471
2472 /* DCSR, TX and RX are accessible via JTAG */
2473 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2474 {
2475 return xscale_read_dcsr(arch_info->target);
2476 }
2477 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2478 {
2479 /* 1 = consume register content */
2480 return xscale_read_tx(arch_info->target, 1);
2481 }
2482 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2483 {
2484 /* can't read from RX register (host -> debug handler) */
2485 return ERROR_OK;
2486 }
2487 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2488 {
2489 /* can't (explicitly) read from TXRXCTRL register */
2490 return ERROR_OK;
2491 }
2492 else /* Other DBG registers have to be transfered by the debug handler */
2493 {
2494 /* send CP read request (command 0x40) */
2495 xscale_send_u32(target, 0x40);
2496
2497 /* send CP register number */
2498 xscale_send_u32(target, arch_info->dbg_handler_number);
2499
2500 /* read register value */
2501 xscale_read_tx(target, 1);
2502 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2503
2504 reg->dirty = 0;
2505 reg->valid = 1;
2506 }
2507
2508 return ERROR_OK;
2509 }
2510
2511 int xscale_set_reg(reg_t *reg, u8* buf)
2512 {
2513 xscale_reg_t *arch_info = reg->arch_info;
2514 target_t *target = arch_info->target;
2515 armv4_5_common_t *armv4_5 = target->arch_info;
2516 xscale_common_t *xscale = armv4_5->arch_info;
2517 u32 value = buf_get_u32(buf, 0, 32);
2518
2519 /* DCSR, TX and RX are accessible via JTAG */
2520 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2521 {
2522 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2523 return xscale_write_dcsr(arch_info->target, -1, -1);
2524 }
2525 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2526 {
2527 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2528 return xscale_write_rx(arch_info->target);
2529 }
2530 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2531 {
2532 /* can't write to TX register (debug-handler -> host) */
2533 return ERROR_OK;
2534 }
2535 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2536 {
2537 /* can't (explicitly) write to TXRXCTRL register */
2538 return ERROR_OK;
2539 }
2540 else /* Other DBG registers have to be transfered by the debug handler */
2541 {
2542 /* send CP write request (command 0x41) */
2543 xscale_send_u32(target, 0x41);
2544
2545 /* send CP register number */
2546 xscale_send_u32(target, arch_info->dbg_handler_number);
2547
2548 /* send CP register value */
2549 xscale_send_u32(target, value);
2550 buf_set_u32(reg->value, 0, 32, value);
2551 }
2552
2553 return ERROR_OK;
2554 }
2555
2556 /* convenience wrapper to access XScale specific registers */
2557 int xscale_set_reg_u32(reg_t *reg, u32 value)
2558 {
2559 u8 buf[4];
2560
2561 buf_set_u32(buf, 0, 32, value);
2562
2563 return xscale_set_reg(reg, buf);
2564 }
2565
2566 int xscale_write_dcsr_sw(target_t *target, u32 value)
2567 {
2568 /* get pointers to arch-specific information */
2569 armv4_5_common_t *armv4_5 = target->arch_info;
2570 xscale_common_t *xscale = armv4_5->arch_info;
2571 reg_t *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2572 xscale_reg_t *dcsr_arch_info = dcsr->arch_info;
2573
2574 /* send CP write request (command 0x41) */
2575 xscale_send_u32(target, 0x41);
2576
2577 /* send CP register number */
2578 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2579
2580 /* send CP register value */
2581 xscale_send_u32(target, value);
2582 buf_set_u32(dcsr->value, 0, 32, value);
2583
2584 return ERROR_OK;
2585 }
2586
2587 int xscale_read_trace(target_t *target)
2588 {
2589 /* get pointers to arch-specific information */
2590 armv4_5_common_t *armv4_5 = target->arch_info;
2591 xscale_common_t *xscale = armv4_5->arch_info;
2592 xscale_trace_data_t **trace_data_p;
2593
2594 /* 258 words from debug handler
2595 * 256 trace buffer entries
2596 * 2 checkpoint addresses
2597 */
2598 u32 trace_buffer[258];
2599 int is_address[256];
2600 int i, j;
2601
2602 if (target->state != TARGET_HALTED)
2603 {
2604 LOG_WARNING("target must be stopped to read trace data");
2605 return ERROR_TARGET_NOT_HALTED;
2606 }
2607
2608 /* send read trace buffer command (command 0x61) */
2609 xscale_send_u32(target, 0x61);
2610
2611 /* receive trace buffer content */
2612 xscale_receive(target, trace_buffer, 258);
2613
2614 /* parse buffer backwards to identify address entries */
2615 for (i = 255; i >= 0; i--)
2616 {
2617 is_address[i] = 0;
2618 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2619 ((trace_buffer[i] & 0xf0) == 0xd0))
2620 {
2621 if (i >= 3)
2622 is_address[--i] = 1;
2623 if (i >= 2)
2624 is_address[--i] = 1;
2625 if (i >= 1)
2626 is_address[--i] = 1;
2627 if (i >= 0)
2628 is_address[--i] = 1;
2629 }
2630 }
2631
2632
2633 /* search first non-zero entry */
2634 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2635 ;
2636
2637 if (j == 256)
2638 {
2639 LOG_DEBUG("no trace data collected");
2640 return ERROR_XSCALE_NO_TRACE_DATA;
2641 }
2642
2643 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2644 ;
2645
2646 *trace_data_p = malloc(sizeof(xscale_trace_data_t));
2647 (*trace_data_p)->next = NULL;
2648 (*trace_data_p)->chkpt0 = trace_buffer[256];
2649 (*trace_data_p)->chkpt1 = trace_buffer[257];
2650 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2651 (*trace_data_p)->entries = malloc(sizeof(xscale_trace_entry_t) * (256 - j));
2652 (*trace_data_p)->depth = 256 - j;
2653
2654 for (i = j; i < 256; i++)
2655 {
2656 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2657 if (is_address[i])
2658 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2659 else
2660 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2661 }
2662
2663 return ERROR_OK;
2664 }
2665
2666 int xscale_read_instruction(target_t *target, arm_instruction_t *instruction)
2667 {
2668 /* get pointers to arch-specific information */
2669 armv4_5_common_t *armv4_5 = target->arch_info;
2670 xscale_common_t *xscale = armv4_5->arch_info;
2671 int i;
2672 int section = -1;
2673 u32 size_read;
2674 u32 opcode;
2675 int retval;
2676
2677 if (!xscale->trace.image)
2678 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2679
2680 /* search for the section the current instruction belongs to */
2681 for (i = 0; i < xscale->trace.image->num_sections; i++)
2682 {
2683 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2684 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2685 {
2686 section = i;
2687 break;
2688 }
2689 }
2690
2691 if (section == -1)
2692 {
2693 /* current instruction couldn't be found in the image */
2694 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2695 }
2696
2697 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2698 {
2699 u8 buf[4];
2700 if ((retval = image_read_section(xscale->trace.image, section,
2701 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2702 4, buf, &size_read)) != ERROR_OK)
2703 {
2704 LOG_ERROR("error while reading instruction: %i", retval);
2705 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2706 }
2707 opcode = target_buffer_get_u32(target, buf);
2708 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2709 }
2710 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2711 {
2712 u8 buf[2];
2713 if ((retval = image_read_section(xscale->trace.image, section,
2714 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2715 2, buf, &size_read)) != ERROR_OK)
2716 {
2717 LOG_ERROR("error while reading instruction: %i", retval);
2718 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2719 }
2720 opcode = target_buffer_get_u16(target, buf);
2721 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2722 }
2723 else
2724 {
2725 LOG_ERROR("BUG: unknown core state encountered");
2726 exit(-1);
2727 }
2728
2729 return ERROR_OK;
2730 }
2731
2732 int xscale_branch_address(xscale_trace_data_t *trace_data, int i, u32 *target)
2733 {
2734 /* if there are less than four entries prior to the indirect branch message
2735 * we can't extract the address */
2736 if (i < 4)
2737 {
2738 return -1;
2739 }
2740
2741 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2742 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2743
2744 return 0;
2745 }
2746
2747 int xscale_analyze_trace(target_t *target, command_context_t *cmd_ctx)
2748 {
2749 /* get pointers to arch-specific information */
2750 armv4_5_common_t *armv4_5 = target->arch_info;
2751 xscale_common_t *xscale = armv4_5->arch_info;
2752 int next_pc_ok = 0;
2753 u32 next_pc = 0x0;
2754 xscale_trace_data_t *trace_data = xscale->trace.data;
2755 int retval;
2756
2757 while (trace_data)
2758 {
2759 int i, chkpt;
2760 int rollover;
2761 int branch;
2762 int exception;
2763 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2764
2765 chkpt = 0;
2766 rollover = 0;
2767
2768 for (i = 0; i < trace_data->depth; i++)
2769 {
2770 next_pc_ok = 0;
2771 branch = 0;
2772 exception = 0;
2773
2774 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2775 continue;
2776
2777 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2778 {
2779 case 0: /* Exceptions */
2780 case 1:
2781 case 2:
2782 case 3:
2783 case 4:
2784 case 5:
2785 case 6:
2786 case 7:
2787 exception = (trace_data->entries[i].data & 0x70) >> 4;
2788 next_pc_ok = 1;
2789 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2790 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2791 break;
2792 case 8: /* Direct Branch */
2793 branch = 1;
2794 break;
2795 case 9: /* Indirect Branch */
2796 branch = 1;
2797 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2798 {
2799 next_pc_ok = 1;
2800 }
2801 break;
2802 case 13: /* Checkpointed Indirect Branch */
2803 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2804 {
2805 next_pc_ok = 1;
2806 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2807 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2808 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2809 }
2810 /* explicit fall-through */
2811 case 12: /* Checkpointed Direct Branch */
2812 branch = 1;
2813 if (chkpt == 0)
2814 {
2815 next_pc_ok = 1;
2816 next_pc = trace_data->chkpt0;
2817 chkpt++;
2818 }
2819 else if (chkpt == 1)
2820 {
2821 next_pc_ok = 1;
2822 next_pc = trace_data->chkpt0;
2823 chkpt++;
2824 }
2825 else
2826 {
2827 LOG_WARNING("more than two checkpointed branches encountered");
2828 }
2829 break;
2830 case 15: /* Roll-over */
2831 rollover++;
2832 continue;
2833 default: /* Reserved */
2834 command_print(cmd_ctx, "--- reserved trace message ---");
2835 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2836 return ERROR_OK;
2837 }
2838
2839 if (xscale->trace.pc_ok)
2840 {
2841 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2842 arm_instruction_t instruction;
2843
2844 if ((exception == 6) || (exception == 7))
2845 {
2846 /* IRQ or FIQ exception, no instruction executed */
2847 executed -= 1;
2848 }
2849
2850 while (executed-- >= 0)
2851 {
2852 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2853 {
2854 /* can't continue tracing with no image available */
2855 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2856 {
2857 return retval;
2858 }
2859 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2860 {
2861 /* TODO: handle incomplete images */
2862 }
2863 }
2864
2865 /* a precise abort on a load to the PC is included in the incremental
2866 * word count, other instructions causing data aborts are not included
2867 */
2868 if ((executed == 0) && (exception == 4)
2869 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2870 {
2871 if ((instruction.type == ARM_LDM)
2872 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2873 {
2874 executed--;
2875 }
2876 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2877 && (instruction.info.load_store.Rd != 15))
2878 {
2879 executed--;
2880 }
2881 }
2882
2883 /* only the last instruction executed
2884 * (the one that caused the control flow change)
2885 * could be a taken branch
2886 */
2887 if (((executed == -1) && (branch == 1)) &&
2888 (((instruction.type == ARM_B) ||
2889 (instruction.type == ARM_BL) ||
2890 (instruction.type == ARM_BLX)) &&
2891 (instruction.info.b_bl_bx_blx.target_address != -1)))
2892 {
2893 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2894 }
2895 else
2896 {
2897 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2898 }
2899 command_print(cmd_ctx, "%s", instruction.text);
2900 }
2901
2902 rollover = 0;
2903 }
2904
2905 if (next_pc_ok)
2906 {
2907 xscale->trace.current_pc = next_pc;
2908 xscale->trace.pc_ok = 1;
2909 }
2910 }
2911
2912 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2913 {
2914 arm_instruction_t instruction;
2915 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2916 {
2917 /* can't continue tracing with no image available */
2918 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2919 {
2920 return retval;
2921 }
2922 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2923 {
2924 /* TODO: handle incomplete images */
2925 }
2926 }
2927 command_print(cmd_ctx, "%s", instruction.text);
2928 }
2929
2930 trace_data = trace_data->next;
2931 }
2932
2933 return ERROR_OK;
2934 }
2935
2936 void xscale_build_reg_cache(target_t *target)
2937 {
2938 /* get pointers to arch-specific information */
2939 armv4_5_common_t *armv4_5 = target->arch_info;
2940 xscale_common_t *xscale = armv4_5->arch_info;
2941
2942 reg_cache_t **cache_p = register_get_last_cache_p(&target->reg_cache);
2943 xscale_reg_t *arch_info = malloc(sizeof(xscale_reg_arch_info));
2944 int i;
2945 int num_regs = sizeof(xscale_reg_arch_info) / sizeof(xscale_reg_t);
2946
2947 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
2948 armv4_5->core_cache = (*cache_p);
2949
2950 /* register a register arch-type for XScale dbg registers only once */
2951 if (xscale_reg_arch_type == -1)
2952 xscale_reg_arch_type = register_reg_arch_type(xscale_get_reg, xscale_set_reg);
2953
2954 (*cache_p)->next = malloc(sizeof(reg_cache_t));
2955 cache_p = &(*cache_p)->next;
2956
2957 /* fill in values for the xscale reg cache */
2958 (*cache_p)->name = "XScale registers";
2959 (*cache_p)->next = NULL;
2960 (*cache_p)->reg_list = malloc(num_regs * sizeof(reg_t));
2961 (*cache_p)->num_regs = num_regs;
2962
2963 for (i = 0; i < num_regs; i++)
2964 {
2965 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2966 (*cache_p)->reg_list[i].value = calloc(4, 1);
2967 (*cache_p)->reg_list[i].dirty = 0;
2968 (*cache_p)->reg_list[i].valid = 0;
2969 (*cache_p)->reg_list[i].size = 32;
2970 (*cache_p)->reg_list[i].bitfield_desc = NULL;
2971 (*cache_p)->reg_list[i].num_bitfields = 0;
2972 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2973 (*cache_p)->reg_list[i].arch_type = xscale_reg_arch_type;
2974 arch_info[i] = xscale_reg_arch_info[i];
2975 arch_info[i].target = target;
2976 }
2977
2978 xscale->reg_cache = (*cache_p);
2979 }
2980
2981 int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target)
2982 {
2983 return ERROR_OK;
2984 }
2985
2986 int xscale_quit()
2987 {
2988
2989 return ERROR_OK;
2990 }
2991
2992 int xscale_init_arch_info(target_t *target, xscale_common_t *xscale, int chain_pos, char *variant)
2993 {
2994 armv4_5_common_t *armv4_5;
2995 u32 high_reset_branch, low_reset_branch;
2996 int i;
2997
2998 armv4_5 = &xscale->armv4_5_common;
2999
3000 /* store architecture specfic data (none so far) */
3001 xscale->arch_info = NULL;
3002 xscale->common_magic = XSCALE_COMMON_MAGIC;
3003
3004 /* remember the variant (PXA25x, PXA27x, IXP42x, ...) */
3005 xscale->variant = strdup(variant);
3006
3007 /* prepare JTAG information for the new target */
3008 xscale->jtag_info.chain_pos = chain_pos;
3009
3010 xscale->jtag_info.dbgrx = 0x02;
3011 xscale->jtag_info.dbgtx = 0x10;
3012 xscale->jtag_info.dcsr = 0x09;
3013 xscale->jtag_info.ldic = 0x07;
3014
3015 if ((strcmp(xscale->variant, "pxa250") == 0) ||
3016 (strcmp(xscale->variant, "pxa255") == 0) ||
3017 (strcmp(xscale->variant, "pxa26x") == 0))
3018 {
3019 xscale->jtag_info.ir_length = 5;
3020 }
3021 else if ((strcmp(xscale->variant, "pxa27x") == 0) ||
3022 (strcmp(xscale->variant, "ixp42x") == 0) ||
3023 (strcmp(xscale->variant, "ixp45x") == 0) ||
3024 (strcmp(xscale->variant, "ixp46x") == 0))
3025 {
3026 xscale->jtag_info.ir_length = 7;
3027 }
3028
3029 /* the debug handler isn't installed (and thus not running) at this time */
3030 xscale->handler_installed = 0;
3031 xscale->handler_running = 0;
3032 xscale->handler_address = 0xfe000800;
3033
3034 /* clear the vectors we keep locally for reference */
3035 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
3036 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
3037
3038 /* no user-specified vectors have been configured yet */
3039 xscale->static_low_vectors_set = 0x0;
3040 xscale->static_high_vectors_set = 0x0;
3041
3042 /* calculate branches to debug handler */
3043 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
3044 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
3045
3046 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
3047 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
3048
3049 for (i = 1; i <= 7; i++)
3050 {
3051 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3052 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3053 }
3054
3055 /* 64kB aligned region used for DCache cleaning */
3056 xscale->cache_clean_address = 0xfffe0000;
3057
3058 xscale->hold_rst = 0;
3059 xscale->external_debug_break = 0;
3060
3061 xscale->force_hw_bkpts = 1;
3062
3063 xscale->ibcr_available = 2;
3064 xscale->ibcr0_used = 0;
3065 xscale->ibcr1_used = 0;
3066
3067 xscale->dbr_available = 2;
3068 xscale->dbr0_used = 0;
3069 xscale->dbr1_used = 0;
3070
3071 xscale->arm_bkpt = ARMV5_BKPT(0x0);
3072 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
3073
3074 xscale->vector_catch = 0x1;
3075
3076 xscale->trace.capture_status = TRACE_IDLE;
3077 xscale->trace.data = NULL;
3078 xscale->trace.image = NULL;
3079 xscale->trace.buffer_enabled = 0;
3080 xscale->trace.buffer_fill = 0;
3081
3082 /* prepare ARMv4/5 specific information */
3083 armv4_5->arch_info = xscale;
3084 armv4_5->read_core_reg = xscale_read_core_reg;
3085 armv4_5->write_core_reg = xscale_write_core_reg;
3086 armv4_5->full_context = xscale_full_context;
3087
3088 armv4_5_init_arch_info(target, armv4_5);
3089
3090 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3091 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3092 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3093 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3094 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3095 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3096 xscale->armv4_5_mmu.has_tiny_pages = 1;
3097 xscale->armv4_5_mmu.mmu_enabled = 0;
3098
3099 return ERROR_OK;
3100 }
3101
3102 /* target xscale <endianess> <startup_mode> <chain_pos> <variant> */
3103 int xscale_target_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc, struct target_s *target)
3104 {
3105 int chain_pos;
3106 char *variant = NULL;
3107 xscale_common_t *xscale = malloc(sizeof(xscale_common_t));
3108 memset(xscale, 0, sizeof(*xscale));
3109
3110 if (argc < 5)
3111 {
3112 LOG_ERROR("'target xscale' requires four arguments: <endianess> <startup_mode> <chain_pos> <variant>");
3113 return ERROR_OK;
3114 }
3115
3116 chain_pos = strtoul(args[3], NULL, 0);
3117
3118 variant = args[4];
3119
3120 xscale_init_arch_info(target, xscale, chain_pos, variant);
3121 xscale_build_reg_cache(target);
3122
3123 return ERROR_OK;
3124 }
3125
3126 int xscale_handle_debug_handler_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3127 {
3128 target_t *target = NULL;
3129 armv4_5_common_t *armv4_5;
3130 xscale_common_t *xscale;
3131
3132 u32 handler_address;
3133
3134 if (argc < 2)
3135 {
3136 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3137 return ERROR_OK;
3138 }
3139
3140 if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
3141 {
3142 LOG_ERROR("no target '%s' configured", args[0]);
3143 return ERROR_OK;
3144 }
3145
3146 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3147 {
3148 return ERROR_OK;
3149 }
3150
3151 handler_address = strtoul(args[1], NULL, 0);
3152
3153 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3154 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3155 {
3156 xscale->handler_address = handler_address;
3157 }
3158 else
3159 {
3160 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3161 }
3162
3163 return ERROR_OK;
3164 }
3165
3166 int xscale_handle_cache_clean_address_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3167 {
3168 target_t *target = NULL;
3169 armv4_5_common_t *armv4_5;
3170 xscale_common_t *xscale;
3171
3172 u32 cache_clean_address;
3173
3174 if (argc < 2)
3175 {
3176 LOG_ERROR("'xscale cache_clean_address <target#> <address>' command takes two required operands");
3177 return ERROR_OK;
3178 }
3179
3180 if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
3181 {
3182 LOG_ERROR("no target '%s' configured", args[0]);
3183 return ERROR_OK;
3184 }
3185
3186 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3187 {
3188 return ERROR_OK;
3189 }
3190
3191 cache_clean_address = strtoul(args[1], NULL, 0);
3192
3193 if (cache_clean_address & 0xffff)
3194 {
3195 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3196 }
3197 else
3198 {
3199 xscale->cache_clean_address = cache_clean_address;
3200 }
3201
3202 return ERROR_OK;
3203 }
3204
3205 int xscale_handle_cache_info_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3206 {
3207 target_t *target = get_current_target(cmd_ctx);
3208 armv4_5_common_t *armv4_5;
3209 xscale_common_t *xscale;
3210
3211 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3212 {
3213 return ERROR_OK;
3214 }
3215
3216 return armv4_5_handle_cache_info_command(cmd_ctx, &xscale->armv4_5_mmu.armv4_5_cache);
3217 }
3218
3219 static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical)
3220 {
3221 armv4_5_common_t *armv4_5;
3222 xscale_common_t *xscale;
3223 int retval;
3224 int type;
3225 u32 cb;
3226 int domain;
3227 u32 ap;
3228
3229
3230 if ((retval = xscale_get_arch_pointers(target, &armv4_5, &xscale)) != ERROR_OK)
3231 {
3232 return retval;
3233 }
3234 u32 ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3235 if (type == -1)
3236 {
3237 return ret;
3238 }
3239 *physical = ret;
3240 return ERROR_OK;
3241 }
3242
3243 static int xscale_mmu(struct target_s *target, int *enabled)
3244 {
3245 armv4_5_common_t *armv4_5 = target->arch_info;
3246 xscale_common_t *xscale = armv4_5->arch_info;
3247
3248 if (target->state != TARGET_HALTED)
3249 {
3250 LOG_ERROR("Target not halted");
3251 return ERROR_TARGET_INVALID;
3252 }
3253 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3254 return ERROR_OK;
3255 }
3256
3257
3258 int xscale_handle_mmu_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3259 {
3260 target_t *target = get_current_target(cmd_ctx);
3261 armv4_5_common_t *armv4_5;
3262 xscale_common_t *xscale;
3263
3264 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3265 {
3266 return ERROR_OK;
3267 }
3268
3269 if (target->state != TARGET_HALTED)
3270 {
3271 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3272 return ERROR_OK;
3273 }
3274
3275 if (argc >= 1)
3276 {
3277 if (strcmp("enable", args[0]) == 0)
3278 {
3279 xscale_enable_mmu_caches(target, 1, 0, 0);
3280 xscale->armv4_5_mmu.mmu_enabled = 1;
3281 }
3282 else if (strcmp("disable", args[0]) == 0)
3283 {
3284 xscale_disable_mmu_caches(target, 1, 0, 0);
3285 xscale->armv4_5_mmu.mmu_enabled = 0;
3286 }
3287 }
3288
3289 command_print(cmd_ctx, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3290
3291 return ERROR_OK;
3292 }
3293
3294 int xscale_handle_idcache_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3295 {
3296 target_t *target = get_current_target(cmd_ctx);
3297 armv4_5_common_t *armv4_5;
3298 xscale_common_t *xscale;
3299 int icache = 0, dcache = 0;
3300
3301 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3302 {
3303 return ERROR_OK;
3304 }
3305
3306 if (target->state != TARGET_HALTED)
3307 {
3308 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3309 return ERROR_OK;
3310 }
3311
3312 if (strcmp(cmd, "icache") == 0)
3313 icache = 1;
3314 else if (strcmp(cmd, "dcache") == 0)
3315 dcache = 1;
3316
3317 if (argc >= 1)
3318 {
3319 if (strcmp("enable", args[0]) == 0)
3320 {
3321 xscale_enable_mmu_caches(target, 0, dcache, icache);
3322
3323 if (icache)
3324 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 1;
3325 else if (dcache)
3326 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 1;
3327 }
3328 else if (strcmp("disable", args[0]) == 0)
3329 {
3330 xscale_disable_mmu_caches(target, 0, dcache, icache);
3331
3332 if (icache)
3333 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 0;
3334 else if (dcache)
3335 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 0;
3336 }
3337 }
3338
3339 if (icache)
3340 command_print(cmd_ctx, "icache %s", (xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled) ? "enabled" : "disabled");
3341
3342 if (dcache)
3343 command_print(cmd_ctx, "dcache %s", (xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled) ? "enabled" : "disabled");
3344
3345 return ERROR_OK;
3346 }
3347
3348 int xscale_handle_vector_catch_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3349 {
3350 target_t *target = get_current_target(cmd_ctx);
3351 armv4_5_common_t *armv4_5;
3352 xscale_common_t *xscale;
3353
3354 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3355 {
3356 return ERROR_OK;
3357 }
3358
3359 if (argc < 1)
3360 {
3361 command_print(cmd_ctx, "usage: xscale vector_catch [mask]");
3362 }
3363 else
3364 {
3365 xscale->vector_catch = strtoul(args[0], NULL, 0);
3366 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3367 xscale_write_dcsr(target, -1, -1);
3368 }
3369
3370 command_print(cmd_ctx, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3371
3372 return ERROR_OK;
3373 }
3374
3375 int xscale_handle_force_hw_bkpts_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3376 {
3377 target_t *target = get_current_target(cmd_ctx);
3378 armv4_5_common_t *armv4_5;
3379 xscale_common_t *xscale;
3380
3381 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3382 {
3383 return ERROR_OK;
3384 }
3385
3386 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
3387 {
3388 xscale->force_hw_bkpts = 1;
3389 }
3390 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
3391 {
3392 xscale->force_hw_bkpts = 0;
3393 }
3394 else
3395 {
3396 command_print(cmd_ctx, "usage: xscale force_hw_bkpts <enable|disable>");
3397 }
3398
3399 command_print(cmd_ctx, "force hardware breakpoints %s", (xscale->force_hw_bkpts) ? "enabled" : "disabled");
3400
3401 return ERROR_OK;
3402 }
3403
3404 int xscale_handle_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3405 {
3406 target_t *target = get_current_target(cmd_ctx);
3407 armv4_5_common_t *armv4_5;
3408 xscale_common_t *xscale;
3409 u32 dcsr_value;
3410
3411 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3412 {
3413 return ERROR_OK;
3414 }
3415
3416 if (target->state != TARGET_HALTED)
3417 {
3418 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3419 return ERROR_OK;
3420 }
3421
3422 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
3423 {
3424 xscale_trace_data_t *td, *next_td;
3425 xscale->trace.buffer_enabled = 1;
3426
3427 /* free old trace data */
3428 td = xscale->trace.data;
3429 while (td)
3430 {
3431 next_td = td->next;
3432
3433 if (td->entries)
3434 free(td->entries);
3435 free(td);
3436 td = next_td;
3437 }
3438 xscale->trace.data = NULL;
3439 }
3440 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
3441 {
3442 xscale->trace.buffer_enabled = 0;
3443 }
3444
3445 if ((argc >= 2) && (strcmp("fill", args[1]) == 0))
3446 {
3447 if (argc >= 3)
3448 xscale->trace.buffer_fill = strtoul(args[2], NULL, 0);
3449 else
3450 xscale->trace.buffer_fill = 1;
3451 }
3452 else if ((argc >= 2) && (strcmp("wrap", args[1]) == 0))
3453 {
3454 xscale->trace.buffer_fill = -1;
3455 }
3456
3457 if (xscale->trace.buffer_enabled)
3458 {
3459 /* if we enable the trace buffer in fill-once
3460 * mode we know the address of the first instruction */
3461 xscale->trace.pc_ok = 1;
3462 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3463 }
3464 else
3465 {
3466 /* otherwise the address is unknown, and we have no known good PC */
3467 xscale->trace.pc_ok = 0;
3468 }
3469
3470 command_print(cmd_ctx, "trace buffer %s (%s)",
3471 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3472 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3473
3474 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3475 if (xscale->trace.buffer_fill >= 0)
3476 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3477 else
3478 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3479
3480 return ERROR_OK;
3481 }
3482
3483 int xscale_handle_trace_image_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3484 {
3485 target_t *target;
3486 armv4_5_common_t *armv4_5;
3487 xscale_common_t *xscale;
3488
3489 if (argc < 1)
3490 {
3491 command_print(cmd_ctx, "usage: xscale trace_image <file> [base address] [type]");
3492 return ERROR_OK;
3493 }
3494
3495 target = get_current_target(cmd_ctx);
3496
3497 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3498 {
3499 return ERROR_OK;
3500 }
3501
3502 if (xscale->trace.image)
3503 {
3504 image_close(xscale->trace.image);
3505 free(xscale->trace.image);
3506 command_print(cmd_ctx, "previously loaded image found and closed");
3507 }
3508
3509 xscale->trace.image = malloc(sizeof(image_t));
3510 xscale->trace.image->base_address_set = 0;
3511 xscale->trace.image->start_address_set = 0;
3512
3513 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3514 if (argc >= 2)
3515 {
3516 xscale->trace.image->base_address_set = 1;
3517 xscale->trace.image->base_address = strtoul(args[1], NULL, 0);
3518 }
3519 else
3520 {
3521 xscale->trace.image->base_address_set = 0;
3522 }
3523
3524 if (image_open(xscale->trace.image, args[0], (argc >= 3) ? args[2] : NULL) != ERROR_OK)
3525 {
3526 free(xscale->trace.image);
3527 xscale->trace.image = NULL;
3528 return ERROR_OK;
3529 }
3530
3531 return ERROR_OK;
3532 }
3533
3534 int xscale_handle_dump_trace_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3535 {
3536 target_t *target = get_current_target(cmd_ctx);
3537 armv4_5_common_t *armv4_5;
3538 xscale_common_t *xscale;
3539 xscale_trace_data_t *trace_data;
3540 fileio_t file;
3541
3542 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3543 {
3544 return ERROR_OK;
3545 }
3546
3547 if (target->state != TARGET_HALTED)
3548 {
3549 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3550 return ERROR_OK;
3551 }
3552
3553 if (argc < 1)
3554 {
3555 command_print(cmd_ctx, "usage: xscale dump_trace <file>");
3556 return ERROR_OK;
3557 }
3558
3559 trace_data = xscale->trace.data;
3560
3561 if (!trace_data)
3562 {
3563 command_print(cmd_ctx, "no trace data collected");
3564 return ERROR_OK;
3565 }
3566
3567 if (fileio_open(&file, args[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3568 {
3569 return ERROR_OK;
3570 }
3571
3572 while (trace_data)
3573 {
3574 int i;
3575
3576 fileio_write_u32(&file, trace_data->chkpt0);
3577 fileio_write_u32(&file, trace_data->chkpt1);
3578 fileio_write_u32(&file, trace_data->last_instruction);
3579 fileio_write_u32(&file, trace_data->depth);
3580
3581 for (i = 0; i < trace_data->depth; i++)
3582 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3583
3584 trace_data = trace_data->next;
3585 }
3586
3587 fileio_close(&file);
3588
3589 return ERROR_OK;
3590 }
3591
3592 int xscale_handle_analyze_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3593 {
3594 target_t *target = get_current_target(cmd_ctx);
3595 armv4_5_common_t *armv4_5;
3596 xscale_common_t *xscale;
3597
3598 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3599 {
3600 return ERROR_OK;
3601 }
3602
3603 xscale_analyze_trace(target, cmd_ctx);
3604
3605 return ERROR_OK;
3606 }
3607
3608 int xscale_handle_cp15(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3609 {
3610 target_t *target = get_current_target(cmd_ctx);
3611 armv4_5_common_t *armv4_5;
3612 xscale_common_t *xscale;
3613
3614 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3615 {
3616 return ERROR_OK;
3617 }
3618
3619 if (target->state != TARGET_HALTED)
3620 {
3621 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3622 return ERROR_OK;
3623 }
3624 u32 reg_no = 0;
3625 reg_t *reg = NULL;
3626 if(argc > 0)
3627 {
3628 reg_no = strtoul(args[0], NULL, 0);
3629 /*translate from xscale cp15 register no to openocd register*/
3630 switch(reg_no)
3631 {
3632 case 0:
3633 reg_no = XSCALE_MAINID;
3634 break;
3635 case 1:
3636 reg_no = XSCALE_CTRL;
3637 break;
3638 case 2:
3639 reg_no = XSCALE_TTB;
3640 break;
3641 case 3:
3642 reg_no = XSCALE_DAC;
3643 break;
3644 case 5:
3645 reg_no = XSCALE_FSR;
3646 break;
3647 case 6:
3648 reg_no = XSCALE_FAR;
3649 break;
3650 case 13:
3651 reg_no = XSCALE_PID;
3652 break;
3653 case 15:
3654 reg_no = XSCALE_CPACCESS;
3655 break;
3656 default:
3657 command_print(cmd_ctx, "invalid register number");
3658 return ERROR_INVALID_ARGUMENTS;
3659 }
3660 reg = &xscale->reg_cache->reg_list[reg_no];
3661
3662 }
3663 if(argc == 1)
3664 {
3665 u32 value;
3666
3667 /* read cp15 control register */
3668 xscale_get_reg(reg);
3669 value = buf_get_u32(reg->value, 0, 32);
3670 command_print(cmd_ctx, "%s (/%i): 0x%x", reg->name, reg->size, value);
3671 }
3672 else if(argc == 2)
3673 {
3674
3675 u32 value = strtoul(args[1], NULL, 0);
3676
3677 /* send CP write request (command 0x41) */
3678 xscale_send_u32(target, 0x41);
3679
3680 /* send CP register number */
3681 xscale_send_u32(target, reg_no);
3682
3683 /* send CP register value */
3684 xscale_send_u32(target, value);
3685
3686 /* execute cpwait to ensure outstanding operations complete */
3687 xscale_send_u32(target, 0x53);
3688 }
3689 else
3690 {
3691 command_print(cmd_ctx, "usage: cp15 [register]<, [value]>");
3692 }
3693
3694 return ERROR_OK;
3695 }
3696
3697 int xscale_register_commands(struct command_context_s *cmd_ctx)
3698 {
3699 command_t *xscale_cmd;
3700
3701 xscale_cmd = register_command(cmd_ctx, NULL, "xscale", NULL, COMMAND_ANY, "xscale specific commands");
3702
3703 register_command(cmd_ctx, xscale_cmd, "debug_handler", xscale_handle_debug_handler_command, COMMAND_ANY, "'xscale debug_handler <target#> <address>' command takes two required operands");
3704 register_command(cmd_ctx, xscale_cmd, "cache_clean_address", xscale_handle_cache_clean_address_command, COMMAND_ANY, NULL);
3705
3706 register_command(cmd_ctx, xscale_cmd, "cache_info", xscale_handle_cache_info_command, COMMAND_EXEC, NULL);
3707 register_command(cmd_ctx, xscale_cmd, "mmu", xscale_handle_mmu_command, COMMAND_EXEC, "['enable'|'disable'] the MMU");
3708 register_command(cmd_ctx, xscale_cmd, "icache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the ICache");
3709 register_command(cmd_ctx, xscale_cmd, "dcache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the DCache");
3710
3711 register_command(cmd_ctx, xscale_cmd, "vector_catch", xscale_handle_idcache_command, COMMAND_EXEC, "<mask> of vectors that should be catched");
3712
3713 register_command(cmd_ctx, xscale_cmd, "trace_buffer", xscale_handle_trace_buffer_command, COMMAND_EXEC, "<enable|disable> ['fill' [n]|'wrap']");
3714
3715 register_command(cmd_ctx, xscale_cmd, "dump_trace", xscale_handle_dump_trace_command, COMMAND_EXEC, "dump content of trace buffer to <file>");
3716 register_command(cmd_ctx, xscale_cmd, "analyze_trace", xscale_handle_analyze_trace_buffer_command, COMMAND_EXEC, "analyze content of trace buffer");
3717 register_command(cmd_ctx, xscale_cmd, "trace_image", xscale_handle_trace_image_command,
3718 COMMAND_EXEC, "load image from <file> [base address]");
3719
3720 register_command(cmd_ctx, xscale_cmd, "cp15", xscale_handle_cp15, COMMAND_EXEC, "access coproc 15 <register> [value]");
3721
3722 armv4_5_register_commands(cmd_ctx);
3723
3724 return ERROR_OK;
3725 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)