Laurentiu Cocanu - memory read/write and exit() error path fixes
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * This program is free software; you can redistribute it and/or modify *
9 * it under the terms of the GNU General Public License as published by *
10 * the Free Software Foundation; either version 2 of the License, or *
11 * (at your option) any later version. *
12 * *
13 * This program is distributed in the hope that it will be useful, *
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
16 * GNU General Public License for more details. *
17 * *
18 * You should have received a copy of the GNU General Public License *
19 * along with this program; if not, write to the *
20 * Free Software Foundation, Inc., *
21 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
22 ***************************************************************************/
23 #ifdef HAVE_CONFIG_H
24 #include "config.h"
25 #endif
26
27 #include "replacements.h"
28
29 #include "xscale.h"
30
31 #include "arm7_9_common.h"
32 #include "register.h"
33 #include "target.h"
34 #include "armv4_5.h"
35 #include "arm_simulator.h"
36 #include "arm_disassembler.h"
37 #include "log.h"
38 #include "jtag.h"
39 #include "binarybuffer.h"
40 #include "time_support.h"
41 #include "breakpoints.h"
42 #include "fileio.h"
43
44 #include <stdlib.h>
45 #include <string.h>
46
47 #include <sys/types.h>
48 #include <unistd.h>
49 #include <errno.h>
50
51
52 /* cli handling */
53 int xscale_register_commands(struct command_context_s *cmd_ctx);
54
55 /* forward declarations */
56 int xscale_target_create(struct target_s *target, Jim_Interp *interp);
57 int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target);
58 int xscale_quit(void);
59
60 int xscale_arch_state(struct target_s *target);
61 int xscale_poll(target_t *target);
62 int xscale_halt(target_t *target);
63 int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution);
64 int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints);
65 int xscale_debug_entry(target_t *target);
66 int xscale_restore_context(target_t *target);
67
68 int xscale_assert_reset(target_t *target);
69 int xscale_deassert_reset(target_t *target);
70 int xscale_soft_reset_halt(struct target_s *target);
71
72 int xscale_set_reg_u32(reg_t *reg, u32 value);
73
74 int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode);
75 int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value);
76
77 int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
78 int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
79 int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer);
80
81 int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
82 int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
83 int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
84 int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
85 int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
86 int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
87 void xscale_enable_watchpoints(struct target_s *target);
88 void xscale_enable_breakpoints(struct target_s *target);
89 static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical);
90 static int xscale_mmu(struct target_s *target, int *enabled);
91
92 int xscale_read_trace(target_t *target);
93
94 target_type_t xscale_target =
95 {
96 .name = "xscale",
97
98 .poll = xscale_poll,
99 .arch_state = xscale_arch_state,
100
101 .target_request_data = NULL,
102
103 .halt = xscale_halt,
104 .resume = xscale_resume,
105 .step = xscale_step,
106
107 .assert_reset = xscale_assert_reset,
108 .deassert_reset = xscale_deassert_reset,
109 .soft_reset_halt = xscale_soft_reset_halt,
110
111 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
112
113 .read_memory = xscale_read_memory,
114 .write_memory = xscale_write_memory,
115 .bulk_write_memory = xscale_bulk_write_memory,
116 .checksum_memory = arm7_9_checksum_memory,
117 .blank_check_memory = arm7_9_blank_check_memory,
118
119 .run_algorithm = armv4_5_run_algorithm,
120
121 .add_breakpoint = xscale_add_breakpoint,
122 .remove_breakpoint = xscale_remove_breakpoint,
123 .add_watchpoint = xscale_add_watchpoint,
124 .remove_watchpoint = xscale_remove_watchpoint,
125
126 .register_commands = xscale_register_commands,
127 .target_create = xscale_target_create,
128 .init_target = xscale_init_target,
129 .quit = xscale_quit,
130
131 .virt2phys = xscale_virt2phys,
132 .mmu = xscale_mmu
133 };
134
135 char* xscale_reg_list[] =
136 {
137 "XSCALE_MAINID", /* 0 */
138 "XSCALE_CACHETYPE",
139 "XSCALE_CTRL",
140 "XSCALE_AUXCTRL",
141 "XSCALE_TTB",
142 "XSCALE_DAC",
143 "XSCALE_FSR",
144 "XSCALE_FAR",
145 "XSCALE_PID",
146 "XSCALE_CPACCESS",
147 "XSCALE_IBCR0", /* 10 */
148 "XSCALE_IBCR1",
149 "XSCALE_DBR0",
150 "XSCALE_DBR1",
151 "XSCALE_DBCON",
152 "XSCALE_TBREG",
153 "XSCALE_CHKPT0",
154 "XSCALE_CHKPT1",
155 "XSCALE_DCSR",
156 "XSCALE_TX",
157 "XSCALE_RX", /* 20 */
158 "XSCALE_TXRXCTRL",
159 };
160
161 xscale_reg_t xscale_reg_arch_info[] =
162 {
163 {XSCALE_MAINID, NULL},
164 {XSCALE_CACHETYPE, NULL},
165 {XSCALE_CTRL, NULL},
166 {XSCALE_AUXCTRL, NULL},
167 {XSCALE_TTB, NULL},
168 {XSCALE_DAC, NULL},
169 {XSCALE_FSR, NULL},
170 {XSCALE_FAR, NULL},
171 {XSCALE_PID, NULL},
172 {XSCALE_CPACCESS, NULL},
173 {XSCALE_IBCR0, NULL},
174 {XSCALE_IBCR1, NULL},
175 {XSCALE_DBR0, NULL},
176 {XSCALE_DBR1, NULL},
177 {XSCALE_DBCON, NULL},
178 {XSCALE_TBREG, NULL},
179 {XSCALE_CHKPT0, NULL},
180 {XSCALE_CHKPT1, NULL},
181 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
182 {-1, NULL}, /* TX accessed via JTAG */
183 {-1, NULL}, /* RX accessed via JTAG */
184 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
185 };
186
187 int xscale_reg_arch_type = -1;
188
189 int xscale_get_reg(reg_t *reg);
190 int xscale_set_reg(reg_t *reg, u8 *buf);
191
192 int xscale_get_arch_pointers(target_t *target, armv4_5_common_t **armv4_5_p, xscale_common_t **xscale_p)
193 {
194 armv4_5_common_t *armv4_5 = target->arch_info;
195 xscale_common_t *xscale = armv4_5->arch_info;
196
197 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
198 {
199 LOG_ERROR("target isn't an XScale target");
200 return -1;
201 }
202
203 if (xscale->common_magic != XSCALE_COMMON_MAGIC)
204 {
205 LOG_ERROR("target isn't an XScale target");
206 return -1;
207 }
208
209 *armv4_5_p = armv4_5;
210 *xscale_p = xscale;
211
212 return ERROR_OK;
213 }
214
215 int xscale_jtag_set_instr(int chain_pos, u32 new_instr)
216 {
217 jtag_device_t *device = jtag_get_device(chain_pos);
218
219 if (buf_get_u32(device->cur_instr, 0, device->ir_length) != new_instr)
220 {
221 scan_field_t field;
222
223 field.device = chain_pos;
224 field.num_bits = device->ir_length;
225 field.out_value = calloc(CEIL(field.num_bits, 8), 1);
226 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
227 field.out_mask = NULL;
228 field.in_value = NULL;
229 jtag_set_check_value(&field, device->expected, device->expected_mask, NULL);
230
231 jtag_add_ir_scan(1, &field, -1);
232
233 free(field.out_value);
234 }
235
236 return ERROR_OK;
237 }
238
239 int xscale_read_dcsr(target_t *target)
240 {
241 armv4_5_common_t *armv4_5 = target->arch_info;
242 xscale_common_t *xscale = armv4_5->arch_info;
243
244 int retval;
245
246 scan_field_t fields[3];
247 u8 field0 = 0x0;
248 u8 field0_check_value = 0x2;
249 u8 field0_check_mask = 0x7;
250 u8 field2 = 0x0;
251 u8 field2_check_value = 0x0;
252 u8 field2_check_mask = 0x1;
253
254 jtag_add_end_state(TAP_PD);
255 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
256
257 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
258 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
259
260 fields[0].device = xscale->jtag_info.chain_pos;
261 fields[0].num_bits = 3;
262 fields[0].out_value = &field0;
263 fields[0].out_mask = NULL;
264 fields[0].in_value = NULL;
265 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
266
267 fields[1].device = xscale->jtag_info.chain_pos;
268 fields[1].num_bits = 32;
269 fields[1].out_value = NULL;
270 fields[1].out_mask = NULL;
271 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
272 fields[1].in_handler = NULL;
273 fields[1].in_handler_priv = NULL;
274 fields[1].in_check_value = NULL;
275 fields[1].in_check_mask = NULL;
276
277 fields[2].device = xscale->jtag_info.chain_pos;
278 fields[2].num_bits = 1;
279 fields[2].out_value = &field2;
280 fields[2].out_mask = NULL;
281 fields[2].in_value = NULL;
282 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
283
284 jtag_add_dr_scan(3, fields, -1);
285
286 if ((retval = jtag_execute_queue()) != ERROR_OK)
287 {
288 LOG_ERROR("JTAG error while reading DCSR");
289 return retval;
290 }
291
292 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
293 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
294
295 /* write the register with the value we just read
296 * on this second pass, only the first bit of field0 is guaranteed to be 0)
297 */
298 field0_check_mask = 0x1;
299 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
300 fields[1].in_value = NULL;
301
302 jtag_add_end_state(TAP_RTI);
303
304 jtag_add_dr_scan(3, fields, -1);
305
306 /* DANGER!!! this must be here. It will make sure that the arguments
307 * to jtag_set_check_value() does not go out of scope! */
308 return jtag_execute_queue();
309 }
310
311 int xscale_receive(target_t *target, u32 *buffer, int num_words)
312 {
313 if (num_words==0)
314 return ERROR_INVALID_ARGUMENTS;
315
316 int retval=ERROR_OK;
317 armv4_5_common_t *armv4_5 = target->arch_info;
318 xscale_common_t *xscale = armv4_5->arch_info;
319
320 enum tap_state path[3];
321 scan_field_t fields[3];
322
323 u8 *field0 = malloc(num_words * 1);
324 u8 field0_check_value = 0x2;
325 u8 field0_check_mask = 0x6;
326 u32 *field1 = malloc(num_words * 4);
327 u8 field2_check_value = 0x0;
328 u8 field2_check_mask = 0x1;
329 int words_done = 0;
330 int words_scheduled = 0;
331
332 int i;
333
334 path[0] = TAP_SDS;
335 path[1] = TAP_CD;
336 path[2] = TAP_SD;
337
338 fields[0].device = xscale->jtag_info.chain_pos;
339 fields[0].num_bits = 3;
340 fields[0].out_value = NULL;
341 fields[0].out_mask = NULL;
342 fields[0].in_value = NULL;
343 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
344
345 fields[1].device = xscale->jtag_info.chain_pos;
346 fields[1].num_bits = 32;
347 fields[1].out_value = NULL;
348 fields[1].out_mask = NULL;
349 fields[1].in_value = NULL;
350 fields[1].in_handler = NULL;
351 fields[1].in_handler_priv = NULL;
352 fields[1].in_check_value = NULL;
353 fields[1].in_check_mask = NULL;
354
355
356
357 fields[2].device = xscale->jtag_info.chain_pos;
358 fields[2].num_bits = 1;
359 fields[2].out_value = NULL;
360 fields[2].out_mask = NULL;
361 fields[2].in_value = NULL;
362 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
363
364 jtag_add_end_state(TAP_RTI);
365 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgtx);
366 jtag_add_runtest(1, -1); /* ensures that we're in the TAP_RTI state as the above could be a no-op */
367
368 /* repeat until all words have been collected */
369 int attempts=0;
370 while (words_done < num_words)
371 {
372 /* schedule reads */
373 words_scheduled = 0;
374 for (i = words_done; i < num_words; i++)
375 {
376 fields[0].in_value = &field0[i];
377 fields[1].in_handler = buf_to_u32_handler;
378 fields[1].in_handler_priv = (u8*)&field1[i];
379
380 jtag_add_pathmove(3, path);
381 jtag_add_dr_scan(3, fields, TAP_RTI);
382 words_scheduled++;
383 }
384
385 if ((retval = jtag_execute_queue()) != ERROR_OK)
386 {
387 LOG_ERROR("JTAG error while receiving data from debug handler");
388 break;
389 }
390
391 /* examine results */
392 for (i = words_done; i < num_words; i++)
393 {
394 if (!(field0[0] & 1))
395 {
396 /* move backwards if necessary */
397 int j;
398 for (j = i; j < num_words - 1; j++)
399 {
400 field0[j] = field0[j+1];
401 field1[j] = field1[j+1];
402 }
403 words_scheduled--;
404 }
405 }
406 if (words_scheduled==0)
407 {
408 if (attempts++==1000)
409 {
410 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
411 retval=ERROR_TARGET_TIMEOUT;
412 break;
413 }
414 }
415
416 words_done += words_scheduled;
417 }
418
419 for (i = 0; i < num_words; i++)
420 *(buffer++) = buf_get_u32((u8*)&field1[i], 0, 32);
421
422 free(field1);
423
424 return retval;
425 }
426
427 int xscale_read_tx(target_t *target, int consume)
428 {
429 armv4_5_common_t *armv4_5 = target->arch_info;
430 xscale_common_t *xscale = armv4_5->arch_info;
431 enum tap_state path[3];
432 enum tap_state noconsume_path[6];
433
434 int retval;
435 struct timeval timeout, now;
436
437 scan_field_t fields[3];
438 u8 field0_in = 0x0;
439 u8 field0_check_value = 0x2;
440 u8 field0_check_mask = 0x6;
441 u8 field2_check_value = 0x0;
442 u8 field2_check_mask = 0x1;
443
444 jtag_add_end_state(TAP_RTI);
445
446 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgtx);
447
448 path[0] = TAP_SDS;
449 path[1] = TAP_CD;
450 path[2] = TAP_SD;
451
452 noconsume_path[0] = TAP_SDS;
453 noconsume_path[1] = TAP_CD;
454 noconsume_path[2] = TAP_E1D;
455 noconsume_path[3] = TAP_PD;
456 noconsume_path[4] = TAP_E2D;
457 noconsume_path[5] = TAP_SD;
458
459 fields[0].device = xscale->jtag_info.chain_pos;
460 fields[0].num_bits = 3;
461 fields[0].out_value = NULL;
462 fields[0].out_mask = NULL;
463 fields[0].in_value = &field0_in;
464 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
465
466 fields[1].device = xscale->jtag_info.chain_pos;
467 fields[1].num_bits = 32;
468 fields[1].out_value = NULL;
469 fields[1].out_mask = NULL;
470 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
471 fields[1].in_handler = NULL;
472 fields[1].in_handler_priv = NULL;
473 fields[1].in_check_value = NULL;
474 fields[1].in_check_mask = NULL;
475
476
477
478 fields[2].device = xscale->jtag_info.chain_pos;
479 fields[2].num_bits = 1;
480 fields[2].out_value = NULL;
481 fields[2].out_mask = NULL;
482 fields[2].in_value = NULL;
483 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
484
485 gettimeofday(&timeout, NULL);
486 timeval_add_time(&timeout, 1, 0);
487
488 for (;;)
489 {
490 /* if we want to consume the register content (i.e. clear TX_READY),
491 * we have to go straight from Capture-DR to Shift-DR
492 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
493 */
494 if (consume)
495 jtag_add_pathmove(3, path);
496 else
497 {
498 jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
499 }
500
501 jtag_add_dr_scan(3, fields, TAP_RTI);
502
503 if ((retval = jtag_execute_queue()) != ERROR_OK)
504 {
505 LOG_ERROR("JTAG error while reading TX");
506 return ERROR_TARGET_TIMEOUT;
507 }
508
509 gettimeofday(&now, NULL);
510 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
511 {
512 LOG_ERROR("time out reading TX register");
513 return ERROR_TARGET_TIMEOUT;
514 }
515 if (!((!(field0_in & 1)) && consume))
516 {
517 goto done;
518 }
519 if (debug_level>=3)
520 {
521 LOG_DEBUG("waiting 100ms");
522 alive_sleep(100); /* avoid flooding the logs */
523 } else
524 {
525 keep_alive();
526 }
527 }
528 done:
529
530 if (!(field0_in & 1))
531 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
532
533 return ERROR_OK;
534 }
535
536 int xscale_write_rx(target_t *target)
537 {
538 armv4_5_common_t *armv4_5 = target->arch_info;
539 xscale_common_t *xscale = armv4_5->arch_info;
540
541 int retval;
542 struct timeval timeout, now;
543
544 scan_field_t fields[3];
545 u8 field0_out = 0x0;
546 u8 field0_in = 0x0;
547 u8 field0_check_value = 0x2;
548 u8 field0_check_mask = 0x6;
549 u8 field2 = 0x0;
550 u8 field2_check_value = 0x0;
551 u8 field2_check_mask = 0x1;
552
553 jtag_add_end_state(TAP_RTI);
554
555 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgrx);
556
557 fields[0].device = xscale->jtag_info.chain_pos;
558 fields[0].num_bits = 3;
559 fields[0].out_value = &field0_out;
560 fields[0].out_mask = NULL;
561 fields[0].in_value = &field0_in;
562 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
563
564 fields[1].device = xscale->jtag_info.chain_pos;
565 fields[1].num_bits = 32;
566 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
567 fields[1].out_mask = NULL;
568 fields[1].in_value = NULL;
569 fields[1].in_handler = NULL;
570 fields[1].in_handler_priv = NULL;
571 fields[1].in_check_value = NULL;
572 fields[1].in_check_mask = NULL;
573
574
575
576 fields[2].device = xscale->jtag_info.chain_pos;
577 fields[2].num_bits = 1;
578 fields[2].out_value = &field2;
579 fields[2].out_mask = NULL;
580 fields[2].in_value = NULL;
581 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
582
583 gettimeofday(&timeout, NULL);
584 timeval_add_time(&timeout, 1, 0);
585
586 /* poll until rx_read is low */
587 LOG_DEBUG("polling RX");
588 for (;;)
589 {
590 jtag_add_dr_scan(3, fields, TAP_RTI);
591
592 if ((retval = jtag_execute_queue()) != ERROR_OK)
593 {
594 LOG_ERROR("JTAG error while writing RX");
595 return retval;
596 }
597
598 gettimeofday(&now, NULL);
599 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
600 {
601 LOG_ERROR("time out writing RX register");
602 return ERROR_TARGET_TIMEOUT;
603 }
604 if (!(field0_in & 1))
605 goto done;
606 if (debug_level>=3)
607 {
608 LOG_DEBUG("waiting 100ms");
609 alive_sleep(100); /* avoid flooding the logs */
610 } else
611 {
612 keep_alive();
613 }
614 }
615 done:
616
617 /* set rx_valid */
618 field2 = 0x1;
619 jtag_add_dr_scan(3, fields, TAP_RTI);
620
621 if ((retval = jtag_execute_queue()) != ERROR_OK)
622 {
623 LOG_ERROR("JTAG error while writing RX");
624 return retval;
625 }
626
627 return ERROR_OK;
628 }
629
630 /* send count elements of size byte to the debug handler */
631 int xscale_send(target_t *target, u8 *buffer, int count, int size)
632 {
633 armv4_5_common_t *armv4_5 = target->arch_info;
634 xscale_common_t *xscale = armv4_5->arch_info;
635 u32 t[3];
636 int bits[3];
637
638 int retval;
639
640 int done_count = 0;
641
642 jtag_add_end_state(TAP_RTI);
643
644 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgrx);
645
646 bits[0]=3;
647 t[0]=0;
648 bits[1]=32;
649 t[2]=1;
650 bits[2]=1;
651 int endianness = target->endianness;
652 while (done_count++ < count)
653 {
654 switch (size)
655 {
656 case 4:
657 if (endianness == TARGET_LITTLE_ENDIAN)
658 {
659 t[1]=le_to_h_u32(buffer);
660 } else
661 {
662 t[1]=be_to_h_u32(buffer);
663 }
664 break;
665 case 2:
666 if (endianness == TARGET_LITTLE_ENDIAN)
667 {
668 t[1]=le_to_h_u16(buffer);
669 } else
670 {
671 t[1]=be_to_h_u16(buffer);
672 }
673 break;
674 case 1:
675 t[1]=buffer[0];
676 break;
677 default:
678 LOG_ERROR("BUG: size neither 4, 2 nor 1");
679 exit(-1);
680 }
681 jtag_add_dr_out(xscale->jtag_info.chain_pos,
682 3,
683 bits,
684 t,
685 TAP_RTI);
686 buffer += size;
687 }
688
689 if ((retval = jtag_execute_queue()) != ERROR_OK)
690 {
691 LOG_ERROR("JTAG error while sending data to debug handler");
692 return retval;
693 }
694
695 return ERROR_OK;
696 }
697
698 int xscale_send_u32(target_t *target, u32 value)
699 {
700 armv4_5_common_t *armv4_5 = target->arch_info;
701 xscale_common_t *xscale = armv4_5->arch_info;
702
703 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
704 return xscale_write_rx(target);
705 }
706
707 int xscale_write_dcsr(target_t *target, int hold_rst, int ext_dbg_brk)
708 {
709 armv4_5_common_t *armv4_5 = target->arch_info;
710 xscale_common_t *xscale = armv4_5->arch_info;
711
712 int retval;
713
714 scan_field_t fields[3];
715 u8 field0 = 0x0;
716 u8 field0_check_value = 0x2;
717 u8 field0_check_mask = 0x7;
718 u8 field2 = 0x0;
719 u8 field2_check_value = 0x0;
720 u8 field2_check_mask = 0x1;
721
722 if (hold_rst != -1)
723 xscale->hold_rst = hold_rst;
724
725 if (ext_dbg_brk != -1)
726 xscale->external_debug_break = ext_dbg_brk;
727
728 jtag_add_end_state(TAP_RTI);
729 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
730
731 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
732 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
733
734 fields[0].device = xscale->jtag_info.chain_pos;
735 fields[0].num_bits = 3;
736 fields[0].out_value = &field0;
737 fields[0].out_mask = NULL;
738 fields[0].in_value = NULL;
739 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
740
741 fields[1].device = xscale->jtag_info.chain_pos;
742 fields[1].num_bits = 32;
743 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
744 fields[1].out_mask = NULL;
745 fields[1].in_value = NULL;
746 fields[1].in_handler = NULL;
747 fields[1].in_handler_priv = NULL;
748 fields[1].in_check_value = NULL;
749 fields[1].in_check_mask = NULL;
750
751
752
753 fields[2].device = xscale->jtag_info.chain_pos;
754 fields[2].num_bits = 1;
755 fields[2].out_value = &field2;
756 fields[2].out_mask = NULL;
757 fields[2].in_value = NULL;
758 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
759
760 jtag_add_dr_scan(3, fields, -1);
761
762 if ((retval = jtag_execute_queue()) != ERROR_OK)
763 {
764 LOG_ERROR("JTAG error while writing DCSR");
765 return retval;
766 }
767
768 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
769 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
770
771 return ERROR_OK;
772 }
773
774 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
775 unsigned int parity (unsigned int v)
776 {
777 unsigned int ov = v;
778 v ^= v >> 16;
779 v ^= v >> 8;
780 v ^= v >> 4;
781 v &= 0xf;
782 LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
783 return (0x6996 >> v) & 1;
784 }
785
786 int xscale_load_ic(target_t *target, int mini, u32 va, u32 buffer[8])
787 {
788 armv4_5_common_t *armv4_5 = target->arch_info;
789 xscale_common_t *xscale = armv4_5->arch_info;
790 u8 packet[4];
791 u8 cmd;
792 int word;
793
794 scan_field_t fields[2];
795
796 LOG_DEBUG("loading miniIC at 0x%8.8x", va);
797
798 jtag_add_end_state(TAP_RTI);
799 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.ldic); /* LDIC */
800
801 /* CMD is b010 for Main IC and b011 for Mini IC */
802 if (mini)
803 buf_set_u32(&cmd, 0, 3, 0x3);
804 else
805 buf_set_u32(&cmd, 0, 3, 0x2);
806
807 buf_set_u32(&cmd, 3, 3, 0x0);
808
809 /* virtual address of desired cache line */
810 buf_set_u32(packet, 0, 27, va >> 5);
811
812 fields[0].device = xscale->jtag_info.chain_pos;
813 fields[0].num_bits = 6;
814 fields[0].out_value = &cmd;
815 fields[0].out_mask = NULL;
816 fields[0].in_value = NULL;
817 fields[0].in_check_value = NULL;
818 fields[0].in_check_mask = NULL;
819 fields[0].in_handler = NULL;
820 fields[0].in_handler_priv = NULL;
821
822 fields[1].device = xscale->jtag_info.chain_pos;
823 fields[1].num_bits = 27;
824 fields[1].out_value = packet;
825 fields[1].out_mask = NULL;
826 fields[1].in_value = NULL;
827 fields[1].in_check_value = NULL;
828 fields[1].in_check_mask = NULL;
829 fields[1].in_handler = NULL;
830 fields[1].in_handler_priv = NULL;
831
832 jtag_add_dr_scan(2, fields, -1);
833
834 fields[0].num_bits = 32;
835 fields[0].out_value = packet;
836
837 fields[1].num_bits = 1;
838 fields[1].out_value = &cmd;
839
840 for (word = 0; word < 8; word++)
841 {
842 buf_set_u32(packet, 0, 32, buffer[word]);
843 cmd = parity(*((u32*)packet));
844 jtag_add_dr_scan(2, fields, -1);
845 }
846
847 jtag_execute_queue();
848
849 return ERROR_OK;
850 }
851
852 int xscale_invalidate_ic_line(target_t *target, u32 va)
853 {
854 armv4_5_common_t *armv4_5 = target->arch_info;
855 xscale_common_t *xscale = armv4_5->arch_info;
856 u8 packet[4];
857 u8 cmd;
858
859 scan_field_t fields[2];
860
861 jtag_add_end_state(TAP_RTI);
862 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.ldic); /* LDIC */
863
864 /* CMD for invalidate IC line b000, bits [6:4] b000 */
865 buf_set_u32(&cmd, 0, 6, 0x0);
866
867 /* virtual address of desired cache line */
868 buf_set_u32(packet, 0, 27, va >> 5);
869
870 fields[0].device = xscale->jtag_info.chain_pos;
871 fields[0].num_bits = 6;
872 fields[0].out_value = &cmd;
873 fields[0].out_mask = NULL;
874 fields[0].in_value = NULL;
875 fields[0].in_check_value = NULL;
876 fields[0].in_check_mask = NULL;
877 fields[0].in_handler = NULL;
878 fields[0].in_handler_priv = NULL;
879
880 fields[1].device = xscale->jtag_info.chain_pos;
881 fields[1].num_bits = 27;
882 fields[1].out_value = packet;
883 fields[1].out_mask = NULL;
884 fields[1].in_value = NULL;
885 fields[1].in_check_value = NULL;
886 fields[1].in_check_mask = NULL;
887 fields[1].in_handler = NULL;
888 fields[1].in_handler_priv = NULL;
889
890 jtag_add_dr_scan(2, fields, -1);
891
892 return ERROR_OK;
893 }
894
895 int xscale_update_vectors(target_t *target)
896 {
897 armv4_5_common_t *armv4_5 = target->arch_info;
898 xscale_common_t *xscale = armv4_5->arch_info;
899 int i;
900 int retval;
901
902 u32 low_reset_branch, high_reset_branch;
903
904 for (i = 1; i < 8; i++)
905 {
906 /* if there's a static vector specified for this exception, override */
907 if (xscale->static_high_vectors_set & (1 << i))
908 {
909 xscale->high_vectors[i] = xscale->static_high_vectors[i];
910 }
911 else
912 {
913 retval=target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
914 if (retval == ERROR_TARGET_TIMEOUT)
915 return retval;
916 if (retval!=ERROR_OK)
917 {
918 /* Some of these reads will fail as part of normal execution */
919 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
920 }
921 }
922 }
923
924 for (i = 1; i < 8; i++)
925 {
926 if (xscale->static_low_vectors_set & (1 << i))
927 {
928 xscale->low_vectors[i] = xscale->static_low_vectors[i];
929 }
930 else
931 {
932 retval=target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
933 if (retval == ERROR_TARGET_TIMEOUT)
934 return retval;
935 if (retval!=ERROR_OK)
936 {
937 /* Some of these reads will fail as part of normal execution */
938 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
939 }
940 }
941 }
942
943 /* calculate branches to debug handler */
944 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
945 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
946
947 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
948 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
949
950 /* invalidate and load exception vectors in mini i-cache */
951 xscale_invalidate_ic_line(target, 0x0);
952 xscale_invalidate_ic_line(target, 0xffff0000);
953
954 xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
955 xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
956
957 return ERROR_OK;
958 }
959
960 int xscale_arch_state(struct target_s *target)
961 {
962 armv4_5_common_t *armv4_5 = target->arch_info;
963 xscale_common_t *xscale = armv4_5->arch_info;
964
965 char *state[] =
966 {
967 "disabled", "enabled"
968 };
969
970 char *arch_dbg_reason[] =
971 {
972 "", "\n(processor reset)", "\n(trace buffer full)"
973 };
974
975 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
976 {
977 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
978 exit(-1);
979 }
980
981 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
982 "cpsr: 0x%8.8x pc: 0x%8.8x\n"
983 "MMU: %s, D-Cache: %s, I-Cache: %s"
984 "%s",
985 armv4_5_state_strings[armv4_5->core_state],
986 Jim_Nvp_value2name_simple( nvp_target_debug_reason, target->debug_reason )->name ,
987 armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)],
988 buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
989 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
990 state[xscale->armv4_5_mmu.mmu_enabled],
991 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
992 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
993 arch_dbg_reason[xscale->arch_debug_reason]);
994
995 return ERROR_OK;
996 }
997
998 int xscale_poll(target_t *target)
999 {
1000 int retval=ERROR_OK;
1001 armv4_5_common_t *armv4_5 = target->arch_info;
1002 xscale_common_t *xscale = armv4_5->arch_info;
1003
1004 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
1005 {
1006 enum target_state previous_state = target->state;
1007 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
1008 {
1009
1010 /* there's data to read from the tx register, we entered debug state */
1011 xscale->handler_running = 1;
1012
1013 target->state = TARGET_HALTED;
1014
1015 /* process debug entry, fetching current mode regs */
1016 retval = xscale_debug_entry(target);
1017 }
1018 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1019 {
1020 LOG_USER("error while polling TX register, reset CPU");
1021 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
1022 target->state = TARGET_HALTED;
1023 }
1024
1025 /* debug_entry could have overwritten target state (i.e. immediate resume)
1026 * don't signal event handlers in that case
1027 */
1028 if (target->state != TARGET_HALTED)
1029 return ERROR_OK;
1030
1031 /* if target was running, signal that we halted
1032 * otherwise we reentered from debug execution */
1033 if (previous_state == TARGET_RUNNING)
1034 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1035 else
1036 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
1037 }
1038
1039 return retval;
1040 }
1041
1042 int xscale_debug_entry(target_t *target)
1043 {
1044 armv4_5_common_t *armv4_5 = target->arch_info;
1045 xscale_common_t *xscale = armv4_5->arch_info;
1046 u32 pc;
1047 u32 buffer[10];
1048 int i;
1049 int retval;
1050
1051 u32 moe;
1052
1053 /* clear external dbg break (will be written on next DCSR read) */
1054 xscale->external_debug_break = 0;
1055 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
1056 return retval;
1057
1058 /* get r0, pc, r1 to r7 and cpsr */
1059 if ((retval=xscale_receive(target, buffer, 10))!=ERROR_OK)
1060 return retval;
1061
1062 /* move r0 from buffer to register cache */
1063 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
1064 armv4_5->core_cache->reg_list[15].dirty = 1;
1065 armv4_5->core_cache->reg_list[15].valid = 1;
1066 LOG_DEBUG("r0: 0x%8.8x", buffer[0]);
1067
1068 /* move pc from buffer to register cache */
1069 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
1070 armv4_5->core_cache->reg_list[15].dirty = 1;
1071 armv4_5->core_cache->reg_list[15].valid = 1;
1072 LOG_DEBUG("pc: 0x%8.8x", buffer[1]);
1073
1074 /* move data from buffer to register cache */
1075 for (i = 1; i <= 7; i++)
1076 {
1077 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
1078 armv4_5->core_cache->reg_list[i].dirty = 1;
1079 armv4_5->core_cache->reg_list[i].valid = 1;
1080 LOG_DEBUG("r%i: 0x%8.8x", i, buffer[i + 1]);
1081 }
1082
1083 buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, buffer[9]);
1084 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
1085 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
1086 LOG_DEBUG("cpsr: 0x%8.8x", buffer[9]);
1087
1088 armv4_5->core_mode = buffer[9] & 0x1f;
1089 if (armv4_5_mode_to_number(armv4_5->core_mode) == -1)
1090 {
1091 target->state = TARGET_UNKNOWN;
1092 LOG_ERROR("cpsr contains invalid mode value - communication failure");
1093 return ERROR_TARGET_FAILURE;
1094 }
1095 LOG_DEBUG("target entered debug state in %s mode", armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)]);
1096
1097 if (buffer[9] & 0x20)
1098 armv4_5->core_state = ARMV4_5_STATE_THUMB;
1099 else
1100 armv4_5->core_state = ARMV4_5_STATE_ARM;
1101
1102
1103 if (armv4_5_mode_to_number(armv4_5->core_mode)==-1)
1104 return ERROR_FAIL;
1105
1106 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1107 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
1108 {
1109 xscale_receive(target, buffer, 8);
1110 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1111 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
1112 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
1113 }
1114 else
1115 {
1116 /* r8 to r14, but no spsr */
1117 xscale_receive(target, buffer, 7);
1118 }
1119
1120 /* move data from buffer to register cache */
1121 for (i = 8; i <= 14; i++)
1122 {
1123 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
1124 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
1125 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
1126 }
1127
1128 /* examine debug reason */
1129 xscale_read_dcsr(target);
1130 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
1131
1132 /* stored PC (for calculating fixup) */
1133 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1134
1135 switch (moe)
1136 {
1137 case 0x0: /* Processor reset */
1138 target->debug_reason = DBG_REASON_DBGRQ;
1139 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1140 pc -= 4;
1141 break;
1142 case 0x1: /* Instruction breakpoint hit */
1143 target->debug_reason = DBG_REASON_BREAKPOINT;
1144 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1145 pc -= 4;
1146 break;
1147 case 0x2: /* Data breakpoint hit */
1148 target->debug_reason = DBG_REASON_WATCHPOINT;
1149 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1150 pc -= 4;
1151 break;
1152 case 0x3: /* BKPT instruction executed */
1153 target->debug_reason = DBG_REASON_BREAKPOINT;
1154 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1155 pc -= 4;
1156 break;
1157 case 0x4: /* Ext. debug event */
1158 target->debug_reason = DBG_REASON_DBGRQ;
1159 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1160 pc -= 4;
1161 break;
1162 case 0x5: /* Vector trap occured */
1163 target->debug_reason = DBG_REASON_BREAKPOINT;
1164 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1165 pc -= 4;
1166 break;
1167 case 0x6: /* Trace buffer full break */
1168 target->debug_reason = DBG_REASON_DBGRQ;
1169 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1170 pc -= 4;
1171 break;
1172 case 0x7: /* Reserved */
1173 default:
1174 LOG_ERROR("Method of Entry is 'Reserved'");
1175 exit(-1);
1176 break;
1177 }
1178
1179 /* apply PC fixup */
1180 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1181
1182 /* on the first debug entry, identify cache type */
1183 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1184 {
1185 u32 cache_type_reg;
1186
1187 /* read cp15 cache type register */
1188 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1189 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1190
1191 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1192 }
1193
1194 /* examine MMU and Cache settings */
1195 /* read cp15 control register */
1196 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1197 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1198 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1199 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1200 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1201
1202 /* tracing enabled, read collected trace data */
1203 if (xscale->trace.buffer_enabled)
1204 {
1205 xscale_read_trace(target);
1206 xscale->trace.buffer_fill--;
1207
1208 /* resume if we're still collecting trace data */
1209 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1210 && (xscale->trace.buffer_fill > 0))
1211 {
1212 xscale_resume(target, 1, 0x0, 1, 0);
1213 }
1214 else
1215 {
1216 xscale->trace.buffer_enabled = 0;
1217 }
1218 }
1219
1220 return ERROR_OK;
1221 }
1222
1223 int xscale_halt(target_t *target)
1224 {
1225 armv4_5_common_t *armv4_5 = target->arch_info;
1226 xscale_common_t *xscale = armv4_5->arch_info;
1227
1228 LOG_DEBUG("target->state: %s",
1229 Jim_Nvp_value2name_simple( nvp_target_state, target->state )->name);
1230
1231 if (target->state == TARGET_HALTED)
1232 {
1233 LOG_DEBUG("target was already halted");
1234 return ERROR_OK;
1235 }
1236 else if (target->state == TARGET_UNKNOWN)
1237 {
1238 /* this must not happen for a xscale target */
1239 LOG_ERROR("target was in unknown state when halt was requested");
1240 return ERROR_TARGET_INVALID;
1241 }
1242 else if (target->state == TARGET_RESET)
1243 {
1244 LOG_DEBUG("target->state == TARGET_RESET");
1245 }
1246 else
1247 {
1248 /* assert external dbg break */
1249 xscale->external_debug_break = 1;
1250 xscale_read_dcsr(target);
1251
1252 target->debug_reason = DBG_REASON_DBGRQ;
1253 }
1254
1255 return ERROR_OK;
1256 }
1257
1258 int xscale_enable_single_step(struct target_s *target, u32 next_pc)
1259 {
1260 armv4_5_common_t *armv4_5 = target->arch_info;
1261 xscale_common_t *xscale= armv4_5->arch_info;
1262 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1263
1264 if (xscale->ibcr0_used)
1265 {
1266 breakpoint_t *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1267
1268 if (ibcr0_bp)
1269 {
1270 xscale_unset_breakpoint(target, ibcr0_bp);
1271 }
1272 else
1273 {
1274 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1275 exit(-1);
1276 }
1277 }
1278
1279 xscale_set_reg_u32(ibcr0, next_pc | 0x1);
1280
1281 return ERROR_OK;
1282 }
1283
1284 int xscale_disable_single_step(struct target_s *target)
1285 {
1286 armv4_5_common_t *armv4_5 = target->arch_info;
1287 xscale_common_t *xscale= armv4_5->arch_info;
1288 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1289
1290 xscale_set_reg_u32(ibcr0, 0x0);
1291
1292 return ERROR_OK;
1293 }
1294
1295 int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution)
1296 {
1297 armv4_5_common_t *armv4_5 = target->arch_info;
1298 xscale_common_t *xscale= armv4_5->arch_info;
1299 breakpoint_t *breakpoint = target->breakpoints;
1300
1301 u32 current_pc;
1302
1303 int retval;
1304 int i;
1305
1306 LOG_DEBUG("-");
1307
1308 if (target->state != TARGET_HALTED)
1309 {
1310 LOG_WARNING("target not halted");
1311 return ERROR_TARGET_NOT_HALTED;
1312 }
1313
1314 if (!debug_execution)
1315 {
1316 target_free_all_working_areas(target);
1317 }
1318
1319 /* update vector tables */
1320 if ((retval=xscale_update_vectors(target))!=ERROR_OK)
1321 return retval;
1322
1323 /* current = 1: continue on current pc, otherwise continue at <address> */
1324 if (!current)
1325 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1326
1327 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1328
1329 /* if we're at the reset vector, we have to simulate the branch */
1330 if (current_pc == 0x0)
1331 {
1332 arm_simulate_step(target, NULL);
1333 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1334 }
1335
1336 /* the front-end may request us not to handle breakpoints */
1337 if (handle_breakpoints)
1338 {
1339 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1340 {
1341 u32 next_pc;
1342
1343 /* there's a breakpoint at the current PC, we have to step over it */
1344 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1345 xscale_unset_breakpoint(target, breakpoint);
1346
1347 /* calculate PC of next instruction */
1348 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1349 {
1350 u32 current_opcode;
1351 target_read_u32(target, current_pc, &current_opcode);
1352 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
1353 }
1354
1355 LOG_DEBUG("enable single-step");
1356 xscale_enable_single_step(target, next_pc);
1357
1358 /* restore banked registers */
1359 xscale_restore_context(target);
1360
1361 /* send resume request (command 0x30 or 0x31)
1362 * clean the trace buffer if it is to be enabled (0x62) */
1363 if (xscale->trace.buffer_enabled)
1364 {
1365 xscale_send_u32(target, 0x62);
1366 xscale_send_u32(target, 0x31);
1367 }
1368 else
1369 xscale_send_u32(target, 0x30);
1370
1371 /* send CPSR */
1372 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1373 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1374
1375 for (i = 7; i >= 0; i--)
1376 {
1377 /* send register */
1378 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1379 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1380 }
1381
1382 /* send PC */
1383 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1384 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1385
1386 /* wait for and process debug entry */
1387 xscale_debug_entry(target);
1388
1389 LOG_DEBUG("disable single-step");
1390 xscale_disable_single_step(target);
1391
1392 LOG_DEBUG("set breakpoint at 0x%8.8x", breakpoint->address);
1393 xscale_set_breakpoint(target, breakpoint);
1394 }
1395 }
1396
1397 /* enable any pending breakpoints and watchpoints */
1398 xscale_enable_breakpoints(target);
1399 xscale_enable_watchpoints(target);
1400
1401 /* restore banked registers */
1402 xscale_restore_context(target);
1403
1404 /* send resume request (command 0x30 or 0x31)
1405 * clean the trace buffer if it is to be enabled (0x62) */
1406 if (xscale->trace.buffer_enabled)
1407 {
1408 xscale_send_u32(target, 0x62);
1409 xscale_send_u32(target, 0x31);
1410 }
1411 else
1412 xscale_send_u32(target, 0x30);
1413
1414 /* send CPSR */
1415 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1416 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1417
1418 for (i = 7; i >= 0; i--)
1419 {
1420 /* send register */
1421 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1422 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1423 }
1424
1425 /* send PC */
1426 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1427 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1428
1429 target->debug_reason = DBG_REASON_NOTHALTED;
1430
1431 if (!debug_execution)
1432 {
1433 /* registers are now invalid */
1434 armv4_5_invalidate_core_regs(target);
1435 target->state = TARGET_RUNNING;
1436 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1437 }
1438 else
1439 {
1440 target->state = TARGET_DEBUG_RUNNING;
1441 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1442 }
1443
1444 LOG_DEBUG("target resumed");
1445
1446 xscale->handler_running = 1;
1447
1448 return ERROR_OK;
1449 }
1450
1451 int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints)
1452 {
1453 armv4_5_common_t *armv4_5 = target->arch_info;
1454 xscale_common_t *xscale = armv4_5->arch_info;
1455 breakpoint_t *breakpoint = target->breakpoints;
1456
1457 u32 current_pc, next_pc;
1458 int i;
1459 int retval;
1460
1461 if (target->state != TARGET_HALTED)
1462 {
1463 LOG_WARNING("target not halted");
1464 return ERROR_TARGET_NOT_HALTED;
1465 }
1466
1467 /* current = 1: continue on current pc, otherwise continue at <address> */
1468 if (!current)
1469 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1470
1471 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1472
1473 /* if we're at the reset vector, we have to simulate the step */
1474 if (current_pc == 0x0)
1475 {
1476 arm_simulate_step(target, NULL);
1477 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1478
1479 target->debug_reason = DBG_REASON_SINGLESTEP;
1480 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1481
1482 return ERROR_OK;
1483 }
1484
1485 /* the front-end may request us not to handle breakpoints */
1486 if (handle_breakpoints)
1487 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1488 {
1489 xscale_unset_breakpoint(target, breakpoint);
1490 }
1491
1492 target->debug_reason = DBG_REASON_SINGLESTEP;
1493
1494 /* calculate PC of next instruction */
1495 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1496 {
1497 u32 current_opcode;
1498 target_read_u32(target, current_pc, &current_opcode);
1499 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
1500 }
1501
1502 LOG_DEBUG("enable single-step");
1503 xscale_enable_single_step(target, next_pc);
1504
1505 /* restore banked registers */
1506 xscale_restore_context(target);
1507
1508 /* send resume request (command 0x30 or 0x31)
1509 * clean the trace buffer if it is to be enabled (0x62) */
1510 if (xscale->trace.buffer_enabled)
1511 {
1512 xscale_send_u32(target, 0x62);
1513 xscale_send_u32(target, 0x31);
1514 }
1515 else
1516 xscale_send_u32(target, 0x30);
1517
1518 /* send CPSR */
1519 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1520 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1521
1522 for (i = 7; i >= 0; i--)
1523 {
1524 /* send register */
1525 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1526 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1527 }
1528
1529 /* send PC */
1530 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1531 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1532
1533 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1534
1535 /* registers are now invalid */
1536 armv4_5_invalidate_core_regs(target);
1537
1538 /* wait for and process debug entry */
1539 xscale_debug_entry(target);
1540
1541 LOG_DEBUG("disable single-step");
1542 xscale_disable_single_step(target);
1543
1544 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1545
1546 if (breakpoint)
1547 {
1548 xscale_set_breakpoint(target, breakpoint);
1549 }
1550
1551 LOG_DEBUG("target stepped");
1552
1553 return ERROR_OK;
1554
1555 }
1556
1557 int xscale_assert_reset(target_t *target)
1558 {
1559 armv4_5_common_t *armv4_5 = target->arch_info;
1560 xscale_common_t *xscale = armv4_5->arch_info;
1561
1562 LOG_DEBUG("target->state: %s",
1563 Jim_Nvp_value2name_simple( nvp_target_state, target->state )->name);
1564
1565 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1566 * end up in T-L-R, which would reset JTAG
1567 */
1568 jtag_add_end_state(TAP_RTI);
1569 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
1570
1571 /* set Hold reset, Halt mode and Trap Reset */
1572 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1573 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1574 xscale_write_dcsr(target, 1, 0);
1575
1576 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1577 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, 0x7f);
1578 jtag_execute_queue();
1579
1580 /* assert reset */
1581 jtag_add_reset(0, 1);
1582
1583 /* sleep 1ms, to be sure we fulfill any requirements */
1584 jtag_add_sleep(1000);
1585 jtag_execute_queue();
1586
1587 target->state = TARGET_RESET;
1588
1589 if (target->reset_halt)
1590 {
1591 int retval;
1592 if ((retval = target_halt(target))!=ERROR_OK)
1593 return retval;
1594 }
1595
1596 return ERROR_OK;
1597 }
1598
1599 int xscale_deassert_reset(target_t *target)
1600 {
1601 armv4_5_common_t *armv4_5 = target->arch_info;
1602 xscale_common_t *xscale = armv4_5->arch_info;
1603
1604 fileio_t debug_handler;
1605 u32 address;
1606 u32 binary_size;
1607
1608 u32 buf_cnt;
1609 int i;
1610 int retval;
1611
1612 breakpoint_t *breakpoint = target->breakpoints;
1613
1614 LOG_DEBUG("-");
1615
1616 xscale->ibcr_available = 2;
1617 xscale->ibcr0_used = 0;
1618 xscale->ibcr1_used = 0;
1619
1620 xscale->dbr_available = 2;
1621 xscale->dbr0_used = 0;
1622 xscale->dbr1_used = 0;
1623
1624 /* mark all hardware breakpoints as unset */
1625 while (breakpoint)
1626 {
1627 if (breakpoint->type == BKPT_HARD)
1628 {
1629 breakpoint->set = 0;
1630 }
1631 breakpoint = breakpoint->next;
1632 }
1633
1634 if (!xscale->handler_installed)
1635 {
1636 /* release SRST */
1637 jtag_add_reset(0, 0);
1638
1639 /* wait 300ms; 150 and 100ms were not enough */
1640 jtag_add_sleep(300*1000);
1641
1642 jtag_add_runtest(2030, TAP_RTI);
1643 jtag_execute_queue();
1644
1645 /* set Hold reset, Halt mode and Trap Reset */
1646 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1647 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1648 xscale_write_dcsr(target, 1, 0);
1649
1650 /* Load debug handler */
1651 if (fileio_open(&debug_handler, "xscale/debug_handler.bin", FILEIO_READ, FILEIO_BINARY) != ERROR_OK)
1652 {
1653 return ERROR_OK;
1654 }
1655
1656 if ((binary_size = debug_handler.size) % 4)
1657 {
1658 LOG_ERROR("debug_handler.bin: size not a multiple of 4");
1659 exit(-1);
1660 }
1661
1662 if (binary_size > 0x800)
1663 {
1664 LOG_ERROR("debug_handler.bin: larger than 2kb");
1665 exit(-1);
1666 }
1667
1668 binary_size = CEIL(binary_size, 32) * 32;
1669
1670 address = xscale->handler_address;
1671 while (binary_size > 0)
1672 {
1673 u32 cache_line[8];
1674 u8 buffer[32];
1675
1676 if ((retval = fileio_read(&debug_handler, 32, buffer, &buf_cnt)) != ERROR_OK)
1677 {
1678
1679 }
1680
1681 for (i = 0; i < buf_cnt; i += 4)
1682 {
1683 /* convert LE buffer to host-endian u32 */
1684 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1685 }
1686
1687 for (; buf_cnt < 32; buf_cnt += 4)
1688 {
1689 cache_line[buf_cnt / 4] = 0xe1a08008;
1690 }
1691
1692 /* only load addresses other than the reset vectors */
1693 if ((address % 0x400) != 0x0)
1694 {
1695 xscale_load_ic(target, 1, address, cache_line);
1696 }
1697
1698 address += buf_cnt;
1699 binary_size -= buf_cnt;
1700 };
1701
1702 xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
1703 xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
1704
1705 jtag_add_runtest(30, TAP_RTI);
1706
1707 jtag_add_sleep(100000);
1708
1709 /* set Hold reset, Halt mode and Trap Reset */
1710 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1711 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1712 xscale_write_dcsr(target, 1, 0);
1713
1714 /* clear Hold reset to let the target run (should enter debug handler) */
1715 xscale_write_dcsr(target, 0, 1);
1716 target->state = TARGET_RUNNING;
1717
1718 if (!target->reset_halt)
1719 {
1720 jtag_add_sleep(10000);
1721
1722 /* we should have entered debug now */
1723 xscale_debug_entry(target);
1724 target->state = TARGET_HALTED;
1725
1726 /* resume the target */
1727 xscale_resume(target, 1, 0x0, 1, 0);
1728 }
1729
1730 fileio_close(&debug_handler);
1731 }
1732 else
1733 {
1734 jtag_add_reset(0, 0);
1735 }
1736
1737
1738 return ERROR_OK;
1739 }
1740
1741 int xscale_soft_reset_halt(struct target_s *target)
1742 {
1743
1744 return ERROR_OK;
1745 }
1746
1747 int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode)
1748 {
1749
1750 return ERROR_OK;
1751 }
1752
1753 int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value)
1754 {
1755
1756 return ERROR_OK;
1757 }
1758
1759 int xscale_full_context(target_t *target)
1760 {
1761 armv4_5_common_t *armv4_5 = target->arch_info;
1762
1763 u32 *buffer;
1764
1765 int i, j;
1766
1767 LOG_DEBUG("-");
1768
1769 if (target->state != TARGET_HALTED)
1770 {
1771 LOG_WARNING("target not halted");
1772 return ERROR_TARGET_NOT_HALTED;
1773 }
1774
1775 buffer = malloc(4 * 8);
1776
1777 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1778 * we can't enter User mode on an XScale (unpredictable),
1779 * but User shares registers with SYS
1780 */
1781 for(i = 1; i < 7; i++)
1782 {
1783 int valid = 1;
1784
1785 /* check if there are invalid registers in the current mode
1786 */
1787 for (j = 0; j <= 16; j++)
1788 {
1789 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
1790 valid = 0;
1791 }
1792
1793 if (!valid)
1794 {
1795 u32 tmp_cpsr;
1796
1797 /* request banked registers */
1798 xscale_send_u32(target, 0x0);
1799
1800 tmp_cpsr = 0x0;
1801 tmp_cpsr |= armv4_5_number_to_mode(i);
1802 tmp_cpsr |= 0xc0; /* I/F bits */
1803
1804 /* send CPSR for desired mode */
1805 xscale_send_u32(target, tmp_cpsr);
1806
1807 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1808 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1809 {
1810 xscale_receive(target, buffer, 8);
1811 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1812 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1813 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
1814 }
1815 else
1816 {
1817 xscale_receive(target, buffer, 7);
1818 }
1819
1820 /* move data from buffer to register cache */
1821 for (j = 8; j <= 14; j++)
1822 {
1823 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
1824 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1825 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
1826 }
1827 }
1828 }
1829
1830 free(buffer);
1831
1832 return ERROR_OK;
1833 }
1834
1835 int xscale_restore_context(target_t *target)
1836 {
1837 armv4_5_common_t *armv4_5 = target->arch_info;
1838
1839 int i, j;
1840
1841 LOG_DEBUG("-");
1842
1843 if (target->state != TARGET_HALTED)
1844 {
1845 LOG_WARNING("target not halted");
1846 return ERROR_TARGET_NOT_HALTED;
1847 }
1848
1849 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1850 * we can't enter User mode on an XScale (unpredictable),
1851 * but User shares registers with SYS
1852 */
1853 for(i = 1; i < 7; i++)
1854 {
1855 int dirty = 0;
1856
1857 /* check if there are invalid registers in the current mode
1858 */
1859 for (j = 8; j <= 14; j++)
1860 {
1861 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
1862 dirty = 1;
1863 }
1864
1865 /* if not USR/SYS, check if the SPSR needs to be written */
1866 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1867 {
1868 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
1869 dirty = 1;
1870 }
1871
1872 if (dirty)
1873 {
1874 u32 tmp_cpsr;
1875
1876 /* send banked registers */
1877 xscale_send_u32(target, 0x1);
1878
1879 tmp_cpsr = 0x0;
1880 tmp_cpsr |= armv4_5_number_to_mode(i);
1881 tmp_cpsr |= 0xc0; /* I/F bits */
1882
1883 /* send CPSR for desired mode */
1884 xscale_send_u32(target, tmp_cpsr);
1885
1886 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1887 for (j = 8; j <= 14; j++)
1888 {
1889 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
1890 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1891 }
1892
1893 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1894 {
1895 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
1896 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1897 }
1898 }
1899 }
1900
1901 return ERROR_OK;
1902 }
1903
1904 int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
1905 {
1906 armv4_5_common_t *armv4_5 = target->arch_info;
1907 xscale_common_t *xscale = armv4_5->arch_info;
1908 u32 *buf32;
1909 int i;
1910 int retval;
1911
1912 LOG_DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
1913
1914 if (target->state != TARGET_HALTED)
1915 {
1916 LOG_WARNING("target not halted");
1917 return ERROR_TARGET_NOT_HALTED;
1918 }
1919
1920 /* sanitize arguments */
1921 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1922 return ERROR_INVALID_ARGUMENTS;
1923
1924 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1925 return ERROR_TARGET_UNALIGNED_ACCESS;
1926
1927 /* send memory read request (command 0x1n, n: access size) */
1928 if ((retval=xscale_send_u32(target, 0x10 | size))!=ERROR_OK)
1929 return retval;
1930
1931 /* send base address for read request */
1932 if ((retval=xscale_send_u32(target, address))!=ERROR_OK)
1933 return retval;
1934
1935 /* send number of requested data words */
1936 if ((retval=xscale_send_u32(target, count))!=ERROR_OK)
1937 return retval;
1938
1939 /* receive data from target (count times 32-bit words in host endianness) */
1940 buf32 = malloc(4 * count);
1941 if ((retval=xscale_receive(target, buf32, count))!=ERROR_OK)
1942 return retval;
1943
1944 /* extract data from host-endian buffer into byte stream */
1945 for (i = 0; i < count; i++)
1946 {
1947 switch (size)
1948 {
1949 case 4:
1950 target_buffer_set_u32(target, buffer, buf32[i]);
1951 buffer += 4;
1952 break;
1953 case 2:
1954 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1955 buffer += 2;
1956 break;
1957 case 1:
1958 *buffer++ = buf32[i] & 0xff;
1959 break;
1960 default:
1961 LOG_ERROR("should never get here");
1962 exit(-1);
1963 }
1964 }
1965
1966 free(buf32);
1967
1968 /* examine DCSR, to see if Sticky Abort (SA) got set */
1969 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
1970 return retval;
1971 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1972 {
1973 /* clear SA bit */
1974 if ((retval=xscale_send_u32(target, 0x60))!=ERROR_OK)
1975 return retval;
1976
1977 return ERROR_TARGET_DATA_ABORT;
1978 }
1979
1980 return ERROR_OK;
1981 }
1982
1983 int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
1984 {
1985 armv4_5_common_t *armv4_5 = target->arch_info;
1986 xscale_common_t *xscale = armv4_5->arch_info;
1987 int retval;
1988
1989 LOG_DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
1990
1991 if (target->state != TARGET_HALTED)
1992 {
1993 LOG_WARNING("target not halted");
1994 return ERROR_TARGET_NOT_HALTED;
1995 }
1996
1997 /* sanitize arguments */
1998 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1999 return ERROR_INVALID_ARGUMENTS;
2000
2001 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
2002 return ERROR_TARGET_UNALIGNED_ACCESS;
2003
2004 /* send memory write request (command 0x2n, n: access size) */
2005 if ((retval=xscale_send_u32(target, 0x20 | size))!=ERROR_OK)
2006 return retval;
2007
2008 /* send base address for read request */
2009 if ((retval=xscale_send_u32(target, address))!=ERROR_OK)
2010 return retval;
2011
2012 /* send number of requested data words to be written*/
2013 if ((retval=xscale_send_u32(target, count))!=ERROR_OK)
2014 return retval;
2015
2016 /* extract data from host-endian buffer into byte stream */
2017 #if 0
2018 for (i = 0; i < count; i++)
2019 {
2020 switch (size)
2021 {
2022 case 4:
2023 value = target_buffer_get_u32(target, buffer);
2024 xscale_send_u32(target, value);
2025 buffer += 4;
2026 break;
2027 case 2:
2028 value = target_buffer_get_u16(target, buffer);
2029 xscale_send_u32(target, value);
2030 buffer += 2;
2031 break;
2032 case 1:
2033 value = *buffer;
2034 xscale_send_u32(target, value);
2035 buffer += 1;
2036 break;
2037 default:
2038 LOG_ERROR("should never get here");
2039 exit(-1);
2040 }
2041 }
2042 #endif
2043 if ((retval=xscale_send(target, buffer, count, size))!=ERROR_OK)
2044 return retval;
2045
2046 /* examine DCSR, to see if Sticky Abort (SA) got set */
2047 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
2048 return retval;
2049 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
2050 {
2051 /* clear SA bit */
2052 if ((retval=xscale_send_u32(target, 0x60))!=ERROR_OK)
2053 return retval;
2054
2055 return ERROR_TARGET_DATA_ABORT;
2056 }
2057
2058 return ERROR_OK;
2059 }
2060
2061 int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer)
2062 {
2063 return xscale_write_memory(target, address, 4, count, buffer);
2064 }
2065
2066 u32 xscale_get_ttb(target_t *target)
2067 {
2068 armv4_5_common_t *armv4_5 = target->arch_info;
2069 xscale_common_t *xscale = armv4_5->arch_info;
2070 u32 ttb;
2071
2072 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2073 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2074
2075 return ttb;
2076 }
2077
2078 void xscale_disable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
2079 {
2080 armv4_5_common_t *armv4_5 = target->arch_info;
2081 xscale_common_t *xscale = armv4_5->arch_info;
2082 u32 cp15_control;
2083
2084 /* read cp15 control register */
2085 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2086 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2087
2088 if (mmu)
2089 cp15_control &= ~0x1U;
2090
2091 if (d_u_cache)
2092 {
2093 /* clean DCache */
2094 xscale_send_u32(target, 0x50);
2095 xscale_send_u32(target, xscale->cache_clean_address);
2096
2097 /* invalidate DCache */
2098 xscale_send_u32(target, 0x51);
2099
2100 cp15_control &= ~0x4U;
2101 }
2102
2103 if (i_cache)
2104 {
2105 /* invalidate ICache */
2106 xscale_send_u32(target, 0x52);
2107 cp15_control &= ~0x1000U;
2108 }
2109
2110 /* write new cp15 control register */
2111 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2112
2113 /* execute cpwait to ensure outstanding operations complete */
2114 xscale_send_u32(target, 0x53);
2115 }
2116
2117 void xscale_enable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
2118 {
2119 armv4_5_common_t *armv4_5 = target->arch_info;
2120 xscale_common_t *xscale = armv4_5->arch_info;
2121 u32 cp15_control;
2122
2123 /* read cp15 control register */
2124 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2125 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2126
2127 if (mmu)
2128 cp15_control |= 0x1U;
2129
2130 if (d_u_cache)
2131 cp15_control |= 0x4U;
2132
2133 if (i_cache)
2134 cp15_control |= 0x1000U;
2135
2136 /* write new cp15 control register */
2137 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2138
2139 /* execute cpwait to ensure outstanding operations complete */
2140 xscale_send_u32(target, 0x53);
2141 }
2142
2143 int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2144 {
2145 int retval;
2146 armv4_5_common_t *armv4_5 = target->arch_info;
2147 xscale_common_t *xscale = armv4_5->arch_info;
2148
2149 if (target->state != TARGET_HALTED)
2150 {
2151 LOG_WARNING("target not halted");
2152 return ERROR_TARGET_NOT_HALTED;
2153 }
2154
2155 if (xscale->force_hw_bkpts)
2156 breakpoint->type = BKPT_HARD;
2157
2158 if (breakpoint->set)
2159 {
2160 LOG_WARNING("breakpoint already set");
2161 return ERROR_OK;
2162 }
2163
2164 if (breakpoint->type == BKPT_HARD)
2165 {
2166 u32 value = breakpoint->address | 1;
2167 if (!xscale->ibcr0_used)
2168 {
2169 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2170 xscale->ibcr0_used = 1;
2171 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2172 }
2173 else if (!xscale->ibcr1_used)
2174 {
2175 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2176 xscale->ibcr1_used = 1;
2177 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2178 }
2179 else
2180 {
2181 LOG_ERROR("BUG: no hardware comparator available");
2182 return ERROR_OK;
2183 }
2184 }
2185 else if (breakpoint->type == BKPT_SOFT)
2186 {
2187 if (breakpoint->length == 4)
2188 {
2189 /* keep the original instruction in target endianness */
2190 if((retval = target->type->read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2191 {
2192 return retval;
2193 }
2194 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2195 if((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2196 {
2197 return retval;
2198 }
2199 }
2200 else
2201 {
2202 /* keep the original instruction in target endianness */
2203 if((retval = target->type->read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2204 {
2205 return retval;
2206 }
2207 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2208 if((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2209 {
2210 return retval;
2211 }
2212 }
2213 breakpoint->set = 1;
2214 }
2215
2216 return ERROR_OK;
2217
2218 }
2219
2220 int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2221 {
2222 armv4_5_common_t *armv4_5 = target->arch_info;
2223 xscale_common_t *xscale = armv4_5->arch_info;
2224
2225 if (target->state != TARGET_HALTED)
2226 {
2227 LOG_WARNING("target not halted");
2228 return ERROR_TARGET_NOT_HALTED;
2229 }
2230
2231 if (xscale->force_hw_bkpts)
2232 {
2233 LOG_DEBUG("forcing use of hardware breakpoint at address 0x%8.8x", breakpoint->address);
2234 breakpoint->type = BKPT_HARD;
2235 }
2236
2237 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2238 {
2239 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2240 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2241 }
2242 else
2243 {
2244 xscale->ibcr_available--;
2245 }
2246
2247 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2248 {
2249 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2250 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2251 }
2252
2253 return ERROR_OK;
2254 }
2255
2256 int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2257 {
2258 int retval;
2259 armv4_5_common_t *armv4_5 = target->arch_info;
2260 xscale_common_t *xscale = armv4_5->arch_info;
2261
2262 if (target->state != TARGET_HALTED)
2263 {
2264 LOG_WARNING("target not halted");
2265 return ERROR_TARGET_NOT_HALTED;
2266 }
2267
2268 if (!breakpoint->set)
2269 {
2270 LOG_WARNING("breakpoint not set");
2271 return ERROR_OK;
2272 }
2273
2274 if (breakpoint->type == BKPT_HARD)
2275 {
2276 if (breakpoint->set == 1)
2277 {
2278 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2279 xscale->ibcr0_used = 0;
2280 }
2281 else if (breakpoint->set == 2)
2282 {
2283 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2284 xscale->ibcr1_used = 0;
2285 }
2286 breakpoint->set = 0;
2287 }
2288 else
2289 {
2290 /* restore original instruction (kept in target endianness) */
2291 if (breakpoint->length == 4)
2292 {
2293 if((retval = target->type->write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2294 {
2295 return retval;
2296 }
2297 }
2298 else
2299 {
2300 if((retval = target->type->write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2301 {
2302 return retval;
2303 }
2304 }
2305 breakpoint->set = 0;
2306 }
2307
2308 return ERROR_OK;
2309 }
2310
2311 int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2312 {
2313 armv4_5_common_t *armv4_5 = target->arch_info;
2314 xscale_common_t *xscale = armv4_5->arch_info;
2315
2316 if (target->state != TARGET_HALTED)
2317 {
2318 LOG_WARNING("target not halted");
2319 return ERROR_TARGET_NOT_HALTED;
2320 }
2321
2322 if (breakpoint->set)
2323 {
2324 xscale_unset_breakpoint(target, breakpoint);
2325 }
2326
2327 if (breakpoint->type == BKPT_HARD)
2328 xscale->ibcr_available++;
2329
2330 return ERROR_OK;
2331 }
2332
2333 int xscale_set_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2334 {
2335 armv4_5_common_t *armv4_5 = target->arch_info;
2336 xscale_common_t *xscale = armv4_5->arch_info;
2337 u8 enable=0;
2338 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2339 u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2340
2341 if (target->state != TARGET_HALTED)
2342 {
2343 LOG_WARNING("target not halted");
2344 return ERROR_TARGET_NOT_HALTED;
2345 }
2346
2347 xscale_get_reg(dbcon);
2348
2349 switch (watchpoint->rw)
2350 {
2351 case WPT_READ:
2352 enable = 0x3;
2353 break;
2354 case WPT_ACCESS:
2355 enable = 0x2;
2356 break;
2357 case WPT_WRITE:
2358 enable = 0x1;
2359 break;
2360 default:
2361 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2362 }
2363
2364 if (!xscale->dbr0_used)
2365 {
2366 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2367 dbcon_value |= enable;
2368 xscale_set_reg_u32(dbcon, dbcon_value);
2369 watchpoint->set = 1;
2370 xscale->dbr0_used = 1;
2371 }
2372 else if (!xscale->dbr1_used)
2373 {
2374 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2375 dbcon_value |= enable << 2;
2376 xscale_set_reg_u32(dbcon, dbcon_value);
2377 watchpoint->set = 2;
2378 xscale->dbr1_used = 1;
2379 }
2380 else
2381 {
2382 LOG_ERROR("BUG: no hardware comparator available");
2383 return ERROR_OK;
2384 }
2385
2386 return ERROR_OK;
2387 }
2388
2389 int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2390 {
2391 armv4_5_common_t *armv4_5 = target->arch_info;
2392 xscale_common_t *xscale = armv4_5->arch_info;
2393
2394 if (target->state != TARGET_HALTED)
2395 {
2396 LOG_WARNING("target not halted");
2397 return ERROR_TARGET_NOT_HALTED;
2398 }
2399
2400 if (xscale->dbr_available < 1)
2401 {
2402 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2403 }
2404
2405 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2406 {
2407 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2408 }
2409
2410 xscale->dbr_available--;
2411
2412 return ERROR_OK;
2413 }
2414
2415 int xscale_unset_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2416 {
2417 armv4_5_common_t *armv4_5 = target->arch_info;
2418 xscale_common_t *xscale = armv4_5->arch_info;
2419 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2420 u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2421
2422 if (target->state != TARGET_HALTED)
2423 {
2424 LOG_WARNING("target not halted");
2425 return ERROR_TARGET_NOT_HALTED;
2426 }
2427
2428 if (!watchpoint->set)
2429 {
2430 LOG_WARNING("breakpoint not set");
2431 return ERROR_OK;
2432 }
2433
2434 if (watchpoint->set == 1)
2435 {
2436 dbcon_value &= ~0x3;
2437 xscale_set_reg_u32(dbcon, dbcon_value);
2438 xscale->dbr0_used = 0;
2439 }
2440 else if (watchpoint->set == 2)
2441 {
2442 dbcon_value &= ~0xc;
2443 xscale_set_reg_u32(dbcon, dbcon_value);
2444 xscale->dbr1_used = 0;
2445 }
2446 watchpoint->set = 0;
2447
2448 return ERROR_OK;
2449 }
2450
2451 int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2452 {
2453 armv4_5_common_t *armv4_5 = target->arch_info;
2454 xscale_common_t *xscale = armv4_5->arch_info;
2455
2456 if (target->state != TARGET_HALTED)
2457 {
2458 LOG_WARNING("target not halted");
2459 return ERROR_TARGET_NOT_HALTED;
2460 }
2461
2462 if (watchpoint->set)
2463 {
2464 xscale_unset_watchpoint(target, watchpoint);
2465 }
2466
2467 xscale->dbr_available++;
2468
2469 return ERROR_OK;
2470 }
2471
2472 void xscale_enable_watchpoints(struct target_s *target)
2473 {
2474 watchpoint_t *watchpoint = target->watchpoints;
2475
2476 while (watchpoint)
2477 {
2478 if (watchpoint->set == 0)
2479 xscale_set_watchpoint(target, watchpoint);
2480 watchpoint = watchpoint->next;
2481 }
2482 }
2483
2484 void xscale_enable_breakpoints(struct target_s *target)
2485 {
2486 breakpoint_t *breakpoint = target->breakpoints;
2487
2488 /* set any pending breakpoints */
2489 while (breakpoint)
2490 {
2491 if (breakpoint->set == 0)
2492 xscale_set_breakpoint(target, breakpoint);
2493 breakpoint = breakpoint->next;
2494 }
2495 }
2496
2497 int xscale_get_reg(reg_t *reg)
2498 {
2499 xscale_reg_t *arch_info = reg->arch_info;
2500 target_t *target = arch_info->target;
2501 armv4_5_common_t *armv4_5 = target->arch_info;
2502 xscale_common_t *xscale = armv4_5->arch_info;
2503
2504 /* DCSR, TX and RX are accessible via JTAG */
2505 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2506 {
2507 return xscale_read_dcsr(arch_info->target);
2508 }
2509 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2510 {
2511 /* 1 = consume register content */
2512 return xscale_read_tx(arch_info->target, 1);
2513 }
2514 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2515 {
2516 /* can't read from RX register (host -> debug handler) */
2517 return ERROR_OK;
2518 }
2519 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2520 {
2521 /* can't (explicitly) read from TXRXCTRL register */
2522 return ERROR_OK;
2523 }
2524 else /* Other DBG registers have to be transfered by the debug handler */
2525 {
2526 /* send CP read request (command 0x40) */
2527 xscale_send_u32(target, 0x40);
2528
2529 /* send CP register number */
2530 xscale_send_u32(target, arch_info->dbg_handler_number);
2531
2532 /* read register value */
2533 xscale_read_tx(target, 1);
2534 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2535
2536 reg->dirty = 0;
2537 reg->valid = 1;
2538 }
2539
2540 return ERROR_OK;
2541 }
2542
2543 int xscale_set_reg(reg_t *reg, u8* buf)
2544 {
2545 xscale_reg_t *arch_info = reg->arch_info;
2546 target_t *target = arch_info->target;
2547 armv4_5_common_t *armv4_5 = target->arch_info;
2548 xscale_common_t *xscale = armv4_5->arch_info;
2549 u32 value = buf_get_u32(buf, 0, 32);
2550
2551 /* DCSR, TX and RX are accessible via JTAG */
2552 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2553 {
2554 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2555 return xscale_write_dcsr(arch_info->target, -1, -1);
2556 }
2557 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2558 {
2559 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2560 return xscale_write_rx(arch_info->target);
2561 }
2562 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2563 {
2564 /* can't write to TX register (debug-handler -> host) */
2565 return ERROR_OK;
2566 }
2567 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2568 {
2569 /* can't (explicitly) write to TXRXCTRL register */
2570 return ERROR_OK;
2571 }
2572 else /* Other DBG registers have to be transfered by the debug handler */
2573 {
2574 /* send CP write request (command 0x41) */
2575 xscale_send_u32(target, 0x41);
2576
2577 /* send CP register number */
2578 xscale_send_u32(target, arch_info->dbg_handler_number);
2579
2580 /* send CP register value */
2581 xscale_send_u32(target, value);
2582 buf_set_u32(reg->value, 0, 32, value);
2583 }
2584
2585 return ERROR_OK;
2586 }
2587
2588 /* convenience wrapper to access XScale specific registers */
2589 int xscale_set_reg_u32(reg_t *reg, u32 value)
2590 {
2591 u8 buf[4];
2592
2593 buf_set_u32(buf, 0, 32, value);
2594
2595 return xscale_set_reg(reg, buf);
2596 }
2597
2598 int xscale_write_dcsr_sw(target_t *target, u32 value)
2599 {
2600 /* get pointers to arch-specific information */
2601 armv4_5_common_t *armv4_5 = target->arch_info;
2602 xscale_common_t *xscale = armv4_5->arch_info;
2603 reg_t *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2604 xscale_reg_t *dcsr_arch_info = dcsr->arch_info;
2605
2606 /* send CP write request (command 0x41) */
2607 xscale_send_u32(target, 0x41);
2608
2609 /* send CP register number */
2610 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2611
2612 /* send CP register value */
2613 xscale_send_u32(target, value);
2614 buf_set_u32(dcsr->value, 0, 32, value);
2615
2616 return ERROR_OK;
2617 }
2618
2619 int xscale_read_trace(target_t *target)
2620 {
2621 /* get pointers to arch-specific information */
2622 armv4_5_common_t *armv4_5 = target->arch_info;
2623 xscale_common_t *xscale = armv4_5->arch_info;
2624 xscale_trace_data_t **trace_data_p;
2625
2626 /* 258 words from debug handler
2627 * 256 trace buffer entries
2628 * 2 checkpoint addresses
2629 */
2630 u32 trace_buffer[258];
2631 int is_address[256];
2632 int i, j;
2633
2634 if (target->state != TARGET_HALTED)
2635 {
2636 LOG_WARNING("target must be stopped to read trace data");
2637 return ERROR_TARGET_NOT_HALTED;
2638 }
2639
2640 /* send read trace buffer command (command 0x61) */
2641 xscale_send_u32(target, 0x61);
2642
2643 /* receive trace buffer content */
2644 xscale_receive(target, trace_buffer, 258);
2645
2646 /* parse buffer backwards to identify address entries */
2647 for (i = 255; i >= 0; i--)
2648 {
2649 is_address[i] = 0;
2650 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2651 ((trace_buffer[i] & 0xf0) == 0xd0))
2652 {
2653 if (i >= 3)
2654 is_address[--i] = 1;
2655 if (i >= 2)
2656 is_address[--i] = 1;
2657 if (i >= 1)
2658 is_address[--i] = 1;
2659 if (i >= 0)
2660 is_address[--i] = 1;
2661 }
2662 }
2663
2664
2665 /* search first non-zero entry */
2666 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2667 ;
2668
2669 if (j == 256)
2670 {
2671 LOG_DEBUG("no trace data collected");
2672 return ERROR_XSCALE_NO_TRACE_DATA;
2673 }
2674
2675 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2676 ;
2677
2678 *trace_data_p = malloc(sizeof(xscale_trace_data_t));
2679 (*trace_data_p)->next = NULL;
2680 (*trace_data_p)->chkpt0 = trace_buffer[256];
2681 (*trace_data_p)->chkpt1 = trace_buffer[257];
2682 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2683 (*trace_data_p)->entries = malloc(sizeof(xscale_trace_entry_t) * (256 - j));
2684 (*trace_data_p)->depth = 256 - j;
2685
2686 for (i = j; i < 256; i++)
2687 {
2688 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2689 if (is_address[i])
2690 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2691 else
2692 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2693 }
2694
2695 return ERROR_OK;
2696 }
2697
2698 int xscale_read_instruction(target_t *target, arm_instruction_t *instruction)
2699 {
2700 /* get pointers to arch-specific information */
2701 armv4_5_common_t *armv4_5 = target->arch_info;
2702 xscale_common_t *xscale = armv4_5->arch_info;
2703 int i;
2704 int section = -1;
2705 u32 size_read;
2706 u32 opcode;
2707 int retval;
2708
2709 if (!xscale->trace.image)
2710 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2711
2712 /* search for the section the current instruction belongs to */
2713 for (i = 0; i < xscale->trace.image->num_sections; i++)
2714 {
2715 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2716 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2717 {
2718 section = i;
2719 break;
2720 }
2721 }
2722
2723 if (section == -1)
2724 {
2725 /* current instruction couldn't be found in the image */
2726 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2727 }
2728
2729 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2730 {
2731 u8 buf[4];
2732 if ((retval = image_read_section(xscale->trace.image, section,
2733 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2734 4, buf, &size_read)) != ERROR_OK)
2735 {
2736 LOG_ERROR("error while reading instruction: %i", retval);
2737 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2738 }
2739 opcode = target_buffer_get_u32(target, buf);
2740 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2741 }
2742 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2743 {
2744 u8 buf[2];
2745 if ((retval = image_read_section(xscale->trace.image, section,
2746 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2747 2, buf, &size_read)) != ERROR_OK)
2748 {
2749 LOG_ERROR("error while reading instruction: %i", retval);
2750 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2751 }
2752 opcode = target_buffer_get_u16(target, buf);
2753 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2754 }
2755 else
2756 {
2757 LOG_ERROR("BUG: unknown core state encountered");
2758 exit(-1);
2759 }
2760
2761 return ERROR_OK;
2762 }
2763
2764 int xscale_branch_address(xscale_trace_data_t *trace_data, int i, u32 *target)
2765 {
2766 /* if there are less than four entries prior to the indirect branch message
2767 * we can't extract the address */
2768 if (i < 4)
2769 {
2770 return -1;
2771 }
2772
2773 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2774 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2775
2776 return 0;
2777 }
2778
2779 int xscale_analyze_trace(target_t *target, command_context_t *cmd_ctx)
2780 {
2781 /* get pointers to arch-specific information */
2782 armv4_5_common_t *armv4_5 = target->arch_info;
2783 xscale_common_t *xscale = armv4_5->arch_info;
2784 int next_pc_ok = 0;
2785 u32 next_pc = 0x0;
2786 xscale_trace_data_t *trace_data = xscale->trace.data;
2787 int retval;
2788
2789 while (trace_data)
2790 {
2791 int i, chkpt;
2792 int rollover;
2793 int branch;
2794 int exception;
2795 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2796
2797 chkpt = 0;
2798 rollover = 0;
2799
2800 for (i = 0; i < trace_data->depth; i++)
2801 {
2802 next_pc_ok = 0;
2803 branch = 0;
2804 exception = 0;
2805
2806 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2807 continue;
2808
2809 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2810 {
2811 case 0: /* Exceptions */
2812 case 1:
2813 case 2:
2814 case 3:
2815 case 4:
2816 case 5:
2817 case 6:
2818 case 7:
2819 exception = (trace_data->entries[i].data & 0x70) >> 4;
2820 next_pc_ok = 1;
2821 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2822 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2823 break;
2824 case 8: /* Direct Branch */
2825 branch = 1;
2826 break;
2827 case 9: /* Indirect Branch */
2828 branch = 1;
2829 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2830 {
2831 next_pc_ok = 1;
2832 }
2833 break;
2834 case 13: /* Checkpointed Indirect Branch */
2835 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2836 {
2837 next_pc_ok = 1;
2838 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2839 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2840 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2841 }
2842 /* explicit fall-through */
2843 case 12: /* Checkpointed Direct Branch */
2844 branch = 1;
2845 if (chkpt == 0)
2846 {
2847 next_pc_ok = 1;
2848 next_pc = trace_data->chkpt0;
2849 chkpt++;
2850 }
2851 else if (chkpt == 1)
2852 {
2853 next_pc_ok = 1;
2854 next_pc = trace_data->chkpt0;
2855 chkpt++;
2856 }
2857 else
2858 {
2859 LOG_WARNING("more than two checkpointed branches encountered");
2860 }
2861 break;
2862 case 15: /* Roll-over */
2863 rollover++;
2864 continue;
2865 default: /* Reserved */
2866 command_print(cmd_ctx, "--- reserved trace message ---");
2867 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2868 return ERROR_OK;
2869 }
2870
2871 if (xscale->trace.pc_ok)
2872 {
2873 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2874 arm_instruction_t instruction;
2875
2876 if ((exception == 6) || (exception == 7))
2877 {
2878 /* IRQ or FIQ exception, no instruction executed */
2879 executed -= 1;
2880 }
2881
2882 while (executed-- >= 0)
2883 {
2884 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2885 {
2886 /* can't continue tracing with no image available */
2887 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2888 {
2889 return retval;
2890 }
2891 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2892 {
2893 /* TODO: handle incomplete images */
2894 }
2895 }
2896
2897 /* a precise abort on a load to the PC is included in the incremental
2898 * word count, other instructions causing data aborts are not included
2899 */
2900 if ((executed == 0) && (exception == 4)
2901 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2902 {
2903 if ((instruction.type == ARM_LDM)
2904 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2905 {
2906 executed--;
2907 }
2908 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2909 && (instruction.info.load_store.Rd != 15))
2910 {
2911 executed--;
2912 }
2913 }
2914
2915 /* only the last instruction executed
2916 * (the one that caused the control flow change)
2917 * could be a taken branch
2918 */
2919 if (((executed == -1) && (branch == 1)) &&
2920 (((instruction.type == ARM_B) ||
2921 (instruction.type == ARM_BL) ||
2922 (instruction.type == ARM_BLX)) &&
2923 (instruction.info.b_bl_bx_blx.target_address != -1)))
2924 {
2925 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2926 }
2927 else
2928 {
2929 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2930 }
2931 command_print(cmd_ctx, "%s", instruction.text);
2932 }
2933
2934 rollover = 0;
2935 }
2936
2937 if (next_pc_ok)
2938 {
2939 xscale->trace.current_pc = next_pc;
2940 xscale->trace.pc_ok = 1;
2941 }
2942 }
2943
2944 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2945 {
2946 arm_instruction_t instruction;
2947 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2948 {
2949 /* can't continue tracing with no image available */
2950 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2951 {
2952 return retval;
2953 }
2954 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2955 {
2956 /* TODO: handle incomplete images */
2957 }
2958 }
2959 command_print(cmd_ctx, "%s", instruction.text);
2960 }
2961
2962 trace_data = trace_data->next;
2963 }
2964
2965 return ERROR_OK;
2966 }
2967
2968 void xscale_build_reg_cache(target_t *target)
2969 {
2970 /* get pointers to arch-specific information */
2971 armv4_5_common_t *armv4_5 = target->arch_info;
2972 xscale_common_t *xscale = armv4_5->arch_info;
2973
2974 reg_cache_t **cache_p = register_get_last_cache_p(&target->reg_cache);
2975 xscale_reg_t *arch_info = malloc(sizeof(xscale_reg_arch_info));
2976 int i;
2977 int num_regs = sizeof(xscale_reg_arch_info) / sizeof(xscale_reg_t);
2978
2979 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
2980 armv4_5->core_cache = (*cache_p);
2981
2982 /* register a register arch-type for XScale dbg registers only once */
2983 if (xscale_reg_arch_type == -1)
2984 xscale_reg_arch_type = register_reg_arch_type(xscale_get_reg, xscale_set_reg);
2985
2986 (*cache_p)->next = malloc(sizeof(reg_cache_t));
2987 cache_p = &(*cache_p)->next;
2988
2989 /* fill in values for the xscale reg cache */
2990 (*cache_p)->name = "XScale registers";
2991 (*cache_p)->next = NULL;
2992 (*cache_p)->reg_list = malloc(num_regs * sizeof(reg_t));
2993 (*cache_p)->num_regs = num_regs;
2994
2995 for (i = 0; i < num_regs; i++)
2996 {
2997 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2998 (*cache_p)->reg_list[i].value = calloc(4, 1);
2999 (*cache_p)->reg_list[i].dirty = 0;
3000 (*cache_p)->reg_list[i].valid = 0;
3001 (*cache_p)->reg_list[i].size = 32;
3002 (*cache_p)->reg_list[i].bitfield_desc = NULL;
3003 (*cache_p)->reg_list[i].num_bitfields = 0;
3004 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
3005 (*cache_p)->reg_list[i].arch_type = xscale_reg_arch_type;
3006 arch_info[i] = xscale_reg_arch_info[i];
3007 arch_info[i].target = target;
3008 }
3009
3010 xscale->reg_cache = (*cache_p);
3011 }
3012
3013 int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target)
3014 {
3015 return ERROR_OK;
3016 }
3017
3018 int xscale_quit(void)
3019 {
3020
3021 return ERROR_OK;
3022 }
3023
3024 int xscale_init_arch_info(target_t *target, xscale_common_t *xscale, int chain_pos, const char *variant)
3025 {
3026 armv4_5_common_t *armv4_5;
3027 u32 high_reset_branch, low_reset_branch;
3028 int i;
3029
3030 armv4_5 = &xscale->armv4_5_common;
3031
3032 /* store architecture specfic data (none so far) */
3033 xscale->arch_info = NULL;
3034 xscale->common_magic = XSCALE_COMMON_MAGIC;
3035
3036 /* remember the variant (PXA25x, PXA27x, IXP42x, ...) */
3037 xscale->variant = strdup(variant);
3038
3039 /* prepare JTAG information for the new target */
3040 xscale->jtag_info.chain_pos = chain_pos;
3041
3042 xscale->jtag_info.dbgrx = 0x02;
3043 xscale->jtag_info.dbgtx = 0x10;
3044 xscale->jtag_info.dcsr = 0x09;
3045 xscale->jtag_info.ldic = 0x07;
3046
3047 if ((strcmp(xscale->variant, "pxa250") == 0) ||
3048 (strcmp(xscale->variant, "pxa255") == 0) ||
3049 (strcmp(xscale->variant, "pxa26x") == 0))
3050 {
3051 xscale->jtag_info.ir_length = 5;
3052 }
3053 else if ((strcmp(xscale->variant, "pxa27x") == 0) ||
3054 (strcmp(xscale->variant, "ixp42x") == 0) ||
3055 (strcmp(xscale->variant, "ixp45x") == 0) ||
3056 (strcmp(xscale->variant, "ixp46x") == 0))
3057 {
3058 xscale->jtag_info.ir_length = 7;
3059 }
3060
3061 /* the debug handler isn't installed (and thus not running) at this time */
3062 xscale->handler_installed = 0;
3063 xscale->handler_running = 0;
3064 xscale->handler_address = 0xfe000800;
3065
3066 /* clear the vectors we keep locally for reference */
3067 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
3068 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
3069
3070 /* no user-specified vectors have been configured yet */
3071 xscale->static_low_vectors_set = 0x0;
3072 xscale->static_high_vectors_set = 0x0;
3073
3074 /* calculate branches to debug handler */
3075 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
3076 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
3077
3078 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
3079 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
3080
3081 for (i = 1; i <= 7; i++)
3082 {
3083 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3084 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3085 }
3086
3087 /* 64kB aligned region used for DCache cleaning */
3088 xscale->cache_clean_address = 0xfffe0000;
3089
3090 xscale->hold_rst = 0;
3091 xscale->external_debug_break = 0;
3092
3093 xscale->force_hw_bkpts = 1;
3094
3095 xscale->ibcr_available = 2;
3096 xscale->ibcr0_used = 0;
3097 xscale->ibcr1_used = 0;
3098
3099 xscale->dbr_available = 2;
3100 xscale->dbr0_used = 0;
3101 xscale->dbr1_used = 0;
3102
3103 xscale->arm_bkpt = ARMV5_BKPT(0x0);
3104 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
3105
3106 xscale->vector_catch = 0x1;
3107
3108 xscale->trace.capture_status = TRACE_IDLE;
3109 xscale->trace.data = NULL;
3110 xscale->trace.image = NULL;
3111 xscale->trace.buffer_enabled = 0;
3112 xscale->trace.buffer_fill = 0;
3113
3114 /* prepare ARMv4/5 specific information */
3115 armv4_5->arch_info = xscale;
3116 armv4_5->read_core_reg = xscale_read_core_reg;
3117 armv4_5->write_core_reg = xscale_write_core_reg;
3118 armv4_5->full_context = xscale_full_context;
3119
3120 armv4_5_init_arch_info(target, armv4_5);
3121
3122 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3123 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3124 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3125 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3126 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3127 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3128 xscale->armv4_5_mmu.has_tiny_pages = 1;
3129 xscale->armv4_5_mmu.mmu_enabled = 0;
3130
3131 return ERROR_OK;
3132 }
3133
3134 /* target xscale <endianess> <startup_mode> <chain_pos> <variant> */
3135 int xscale_target_create(struct target_s *target, Jim_Interp *interp)
3136 {
3137 xscale_common_t *xscale = calloc(1,sizeof(xscale_common_t));
3138
3139 xscale_init_arch_info(target, xscale, target->chain_position, target->variant);
3140 xscale_build_reg_cache(target);
3141
3142 return ERROR_OK;
3143 }
3144
3145 int xscale_handle_debug_handler_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3146 {
3147 target_t *target = NULL;
3148 armv4_5_common_t *armv4_5;
3149 xscale_common_t *xscale;
3150
3151 u32 handler_address;
3152
3153 if (argc < 2)
3154 {
3155 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3156 return ERROR_OK;
3157 }
3158
3159 if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
3160 {
3161 LOG_ERROR("no target '%s' configured", args[0]);
3162 return ERROR_OK;
3163 }
3164
3165 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3166 {
3167 return ERROR_OK;
3168 }
3169
3170 handler_address = strtoul(args[1], NULL, 0);
3171
3172 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3173 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3174 {
3175 xscale->handler_address = handler_address;
3176 }
3177 else
3178 {
3179 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3180 }
3181
3182 return ERROR_OK;
3183 }
3184
3185 int xscale_handle_cache_clean_address_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3186 {
3187 target_t *target = NULL;
3188 armv4_5_common_t *armv4_5;
3189 xscale_common_t *xscale;
3190
3191 u32 cache_clean_address;
3192
3193 if (argc < 2)
3194 {
3195 LOG_ERROR("'xscale cache_clean_address <target#> <address>' command takes two required operands");
3196 return ERROR_OK;
3197 }
3198
3199 if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
3200 {
3201 LOG_ERROR("no target '%s' configured", args[0]);
3202 return ERROR_OK;
3203 }
3204
3205 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3206 {
3207 return ERROR_OK;
3208 }
3209
3210 cache_clean_address = strtoul(args[1], NULL, 0);
3211
3212 if (cache_clean_address & 0xffff)
3213 {
3214 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3215 }
3216 else
3217 {
3218 xscale->cache_clean_address = cache_clean_address;
3219 }
3220
3221 return ERROR_OK;
3222 }
3223
3224 int xscale_handle_cache_info_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3225 {
3226 target_t *target = get_current_target(cmd_ctx);
3227 armv4_5_common_t *armv4_5;
3228 xscale_common_t *xscale;
3229
3230 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3231 {
3232 return ERROR_OK;
3233 }
3234
3235 return armv4_5_handle_cache_info_command(cmd_ctx, &xscale->armv4_5_mmu.armv4_5_cache);
3236 }
3237
3238 static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical)
3239 {
3240 armv4_5_common_t *armv4_5;
3241 xscale_common_t *xscale;
3242 int retval;
3243 int type;
3244 u32 cb;
3245 int domain;
3246 u32 ap;
3247
3248
3249 if ((retval = xscale_get_arch_pointers(target, &armv4_5, &xscale)) != ERROR_OK)
3250 {
3251 return retval;
3252 }
3253 u32 ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3254 if (type == -1)
3255 {
3256 return ret;
3257 }
3258 *physical = ret;
3259 return ERROR_OK;
3260 }
3261
3262 static int xscale_mmu(struct target_s *target, int *enabled)
3263 {
3264 armv4_5_common_t *armv4_5 = target->arch_info;
3265 xscale_common_t *xscale = armv4_5->arch_info;
3266
3267 if (target->state != TARGET_HALTED)
3268 {
3269 LOG_ERROR("Target not halted");
3270 return ERROR_TARGET_INVALID;
3271 }
3272 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3273 return ERROR_OK;
3274 }
3275
3276
3277 int xscale_handle_mmu_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3278 {
3279 target_t *target = get_current_target(cmd_ctx);
3280 armv4_5_common_t *armv4_5;
3281 xscale_common_t *xscale;
3282
3283 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3284 {
3285 return ERROR_OK;
3286 }
3287
3288 if (target->state != TARGET_HALTED)
3289 {
3290 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3291 return ERROR_OK;
3292 }
3293
3294 if (argc >= 1)
3295 {
3296 if (strcmp("enable", args[0]) == 0)
3297 {
3298 xscale_enable_mmu_caches(target, 1, 0, 0);
3299 xscale->armv4_5_mmu.mmu_enabled = 1;
3300 }
3301 else if (strcmp("disable", args[0]) == 0)
3302 {
3303 xscale_disable_mmu_caches(target, 1, 0, 0);
3304 xscale->armv4_5_mmu.mmu_enabled = 0;
3305 }
3306 }
3307
3308 command_print(cmd_ctx, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3309
3310 return ERROR_OK;
3311 }
3312
3313 int xscale_handle_idcache_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3314 {
3315 target_t *target = get_current_target(cmd_ctx);
3316 armv4_5_common_t *armv4_5;
3317 xscale_common_t *xscale;
3318 int icache = 0, dcache = 0;
3319
3320 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3321 {
3322 return ERROR_OK;
3323 }
3324
3325 if (target->state != TARGET_HALTED)
3326 {
3327 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3328 return ERROR_OK;
3329 }
3330
3331 if (strcmp(cmd, "icache") == 0)
3332 icache = 1;
3333 else if (strcmp(cmd, "dcache") == 0)
3334 dcache = 1;
3335
3336 if (argc >= 1)
3337 {
3338 if (strcmp("enable", args[0]) == 0)
3339 {
3340 xscale_enable_mmu_caches(target, 0, dcache, icache);
3341
3342 if (icache)
3343 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 1;
3344 else if (dcache)
3345 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 1;
3346 }
3347 else if (strcmp("disable", args[0]) == 0)
3348 {
3349 xscale_disable_mmu_caches(target, 0, dcache, icache);
3350
3351 if (icache)
3352 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 0;
3353 else if (dcache)
3354 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 0;
3355 }
3356 }
3357
3358 if (icache)
3359 command_print(cmd_ctx, "icache %s", (xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled) ? "enabled" : "disabled");
3360
3361 if (dcache)
3362 command_print(cmd_ctx, "dcache %s", (xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled) ? "enabled" : "disabled");
3363
3364 return ERROR_OK;
3365 }
3366
3367 int xscale_handle_vector_catch_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3368 {
3369 target_t *target = get_current_target(cmd_ctx);
3370 armv4_5_common_t *armv4_5;
3371 xscale_common_t *xscale;
3372
3373 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3374 {
3375 return ERROR_OK;
3376 }
3377
3378 if (argc < 1)
3379 {
3380 command_print(cmd_ctx, "usage: xscale vector_catch [mask]");
3381 }
3382 else
3383 {
3384 xscale->vector_catch = strtoul(args[0], NULL, 0);
3385 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3386 xscale_write_dcsr(target, -1, -1);
3387 }
3388
3389 command_print(cmd_ctx, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3390
3391 return ERROR_OK;
3392 }
3393
3394 int xscale_handle_force_hw_bkpts_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3395 {
3396 target_t *target = get_current_target(cmd_ctx);
3397 armv4_5_common_t *armv4_5;
3398 xscale_common_t *xscale;
3399
3400 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3401 {
3402 return ERROR_OK;
3403 }
3404
3405 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
3406 {
3407 xscale->force_hw_bkpts = 1;
3408 }
3409 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
3410 {
3411 xscale->force_hw_bkpts = 0;
3412 }
3413 else
3414 {
3415 command_print(cmd_ctx, "usage: xscale force_hw_bkpts <enable|disable>");
3416 }
3417
3418 command_print(cmd_ctx, "force hardware breakpoints %s", (xscale->force_hw_bkpts) ? "enabled" : "disabled");
3419
3420 return ERROR_OK;
3421 }
3422
3423 int xscale_handle_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3424 {
3425 target_t *target = get_current_target(cmd_ctx);
3426 armv4_5_common_t *armv4_5;
3427 xscale_common_t *xscale;
3428 u32 dcsr_value;
3429
3430 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3431 {
3432 return ERROR_OK;
3433 }
3434
3435 if (target->state != TARGET_HALTED)
3436 {
3437 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3438 return ERROR_OK;
3439 }
3440
3441 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
3442 {
3443 xscale_trace_data_t *td, *next_td;
3444 xscale->trace.buffer_enabled = 1;
3445
3446 /* free old trace data */
3447 td = xscale->trace.data;
3448 while (td)
3449 {
3450 next_td = td->next;
3451
3452 if (td->entries)
3453 free(td->entries);
3454 free(td);
3455 td = next_td;
3456 }
3457 xscale->trace.data = NULL;
3458 }
3459 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
3460 {
3461 xscale->trace.buffer_enabled = 0;
3462 }
3463
3464 if ((argc >= 2) && (strcmp("fill", args[1]) == 0))
3465 {
3466 if (argc >= 3)
3467 xscale->trace.buffer_fill = strtoul(args[2], NULL, 0);
3468 else
3469 xscale->trace.buffer_fill = 1;
3470 }
3471 else if ((argc >= 2) && (strcmp("wrap", args[1]) == 0))
3472 {
3473 xscale->trace.buffer_fill = -1;
3474 }
3475
3476 if (xscale->trace.buffer_enabled)
3477 {
3478 /* if we enable the trace buffer in fill-once
3479 * mode we know the address of the first instruction */
3480 xscale->trace.pc_ok = 1;
3481 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3482 }
3483 else
3484 {
3485 /* otherwise the address is unknown, and we have no known good PC */
3486 xscale->trace.pc_ok = 0;
3487 }
3488
3489 command_print(cmd_ctx, "trace buffer %s (%s)",
3490 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3491 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3492
3493 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3494 if (xscale->trace.buffer_fill >= 0)
3495 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3496 else
3497 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3498
3499 return ERROR_OK;
3500 }
3501
3502 int xscale_handle_trace_image_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3503 {
3504 target_t *target;
3505 armv4_5_common_t *armv4_5;
3506 xscale_common_t *xscale;
3507
3508 if (argc < 1)
3509 {
3510 command_print(cmd_ctx, "usage: xscale trace_image <file> [base address] [type]");
3511 return ERROR_OK;
3512 }
3513
3514 target = get_current_target(cmd_ctx);
3515
3516 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3517 {
3518 return ERROR_OK;
3519 }
3520
3521 if (xscale->trace.image)
3522 {
3523 image_close(xscale->trace.image);
3524 free(xscale->trace.image);
3525 command_print(cmd_ctx, "previously loaded image found and closed");
3526 }
3527
3528 xscale->trace.image = malloc(sizeof(image_t));
3529 xscale->trace.image->base_address_set = 0;
3530 xscale->trace.image->start_address_set = 0;
3531
3532 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3533 if (argc >= 2)
3534 {
3535 xscale->trace.image->base_address_set = 1;
3536 xscale->trace.image->base_address = strtoul(args[1], NULL, 0);
3537 }
3538 else
3539 {
3540 xscale->trace.image->base_address_set = 0;
3541 }
3542
3543 if (image_open(xscale->trace.image, args[0], (argc >= 3) ? args[2] : NULL) != ERROR_OK)
3544 {
3545 free(xscale->trace.image);
3546 xscale->trace.image = NULL;
3547 return ERROR_OK;
3548 }
3549
3550 return ERROR_OK;
3551 }
3552
3553 int xscale_handle_dump_trace_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3554 {
3555 target_t *target = get_current_target(cmd_ctx);
3556 armv4_5_common_t *armv4_5;
3557 xscale_common_t *xscale;
3558 xscale_trace_data_t *trace_data;
3559 fileio_t file;
3560
3561 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3562 {
3563 return ERROR_OK;
3564 }
3565
3566 if (target->state != TARGET_HALTED)
3567 {
3568 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3569 return ERROR_OK;
3570 }
3571
3572 if (argc < 1)
3573 {
3574 command_print(cmd_ctx, "usage: xscale dump_trace <file>");
3575 return ERROR_OK;
3576 }
3577
3578 trace_data = xscale->trace.data;
3579
3580 if (!trace_data)
3581 {
3582 command_print(cmd_ctx, "no trace data collected");
3583 return ERROR_OK;
3584 }
3585
3586 if (fileio_open(&file, args[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3587 {
3588 return ERROR_OK;
3589 }
3590
3591 while (trace_data)
3592 {
3593 int i;
3594
3595 fileio_write_u32(&file, trace_data->chkpt0);
3596 fileio_write_u32(&file, trace_data->chkpt1);
3597 fileio_write_u32(&file, trace_data->last_instruction);
3598 fileio_write_u32(&file, trace_data->depth);
3599
3600 for (i = 0; i < trace_data->depth; i++)
3601 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3602
3603 trace_data = trace_data->next;
3604 }
3605
3606 fileio_close(&file);
3607
3608 return ERROR_OK;
3609 }
3610
3611 int xscale_handle_analyze_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3612 {
3613 target_t *target = get_current_target(cmd_ctx);
3614 armv4_5_common_t *armv4_5;
3615 xscale_common_t *xscale;
3616
3617 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3618 {
3619 return ERROR_OK;
3620 }
3621
3622 xscale_analyze_trace(target, cmd_ctx);
3623
3624 return ERROR_OK;
3625 }
3626
3627 int xscale_handle_cp15(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3628 {
3629 target_t *target = get_current_target(cmd_ctx);
3630 armv4_5_common_t *armv4_5;
3631 xscale_common_t *xscale;
3632
3633 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3634 {
3635 return ERROR_OK;
3636 }
3637
3638 if (target->state != TARGET_HALTED)
3639 {
3640 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3641 return ERROR_OK;
3642 }
3643 u32 reg_no = 0;
3644 reg_t *reg = NULL;
3645 if(argc > 0)
3646 {
3647 reg_no = strtoul(args[0], NULL, 0);
3648 /*translate from xscale cp15 register no to openocd register*/
3649 switch(reg_no)
3650 {
3651 case 0:
3652 reg_no = XSCALE_MAINID;
3653 break;
3654 case 1:
3655 reg_no = XSCALE_CTRL;
3656 break;
3657 case 2:
3658 reg_no = XSCALE_TTB;
3659 break;
3660 case 3:
3661 reg_no = XSCALE_DAC;
3662 break;
3663 case 5:
3664 reg_no = XSCALE_FSR;
3665 break;
3666 case 6:
3667 reg_no = XSCALE_FAR;
3668 break;
3669 case 13:
3670 reg_no = XSCALE_PID;
3671 break;
3672 case 15:
3673 reg_no = XSCALE_CPACCESS;
3674 break;
3675 default:
3676 command_print(cmd_ctx, "invalid register number");
3677 return ERROR_INVALID_ARGUMENTS;
3678 }
3679 reg = &xscale->reg_cache->reg_list[reg_no];
3680
3681 }
3682 if(argc == 1)
3683 {
3684 u32 value;
3685
3686 /* read cp15 control register */
3687 xscale_get_reg(reg);
3688 value = buf_get_u32(reg->value, 0, 32);
3689 command_print(cmd_ctx, "%s (/%i): 0x%x", reg->name, reg->size, value);
3690 }
3691 else if(argc == 2)
3692 {
3693
3694 u32 value = strtoul(args[1], NULL, 0);
3695
3696 /* send CP write request (command 0x41) */
3697 xscale_send_u32(target, 0x41);
3698
3699 /* send CP register number */
3700 xscale_send_u32(target, reg_no);
3701
3702 /* send CP register value */
3703 xscale_send_u32(target, value);
3704
3705 /* execute cpwait to ensure outstanding operations complete */
3706 xscale_send_u32(target, 0x53);
3707 }
3708 else
3709 {
3710 command_print(cmd_ctx, "usage: cp15 [register]<, [value]>");
3711 }
3712
3713 return ERROR_OK;
3714 }
3715
3716 int xscale_register_commands(struct command_context_s *cmd_ctx)
3717 {
3718 command_t *xscale_cmd;
3719
3720 xscale_cmd = register_command(cmd_ctx, NULL, "xscale", NULL, COMMAND_ANY, "xscale specific commands");
3721
3722 register_command(cmd_ctx, xscale_cmd, "debug_handler", xscale_handle_debug_handler_command, COMMAND_ANY, "'xscale debug_handler <target#> <address>' command takes two required operands");
3723 register_command(cmd_ctx, xscale_cmd, "cache_clean_address", xscale_handle_cache_clean_address_command, COMMAND_ANY, NULL);
3724
3725 register_command(cmd_ctx, xscale_cmd, "cache_info", xscale_handle_cache_info_command, COMMAND_EXEC, NULL);
3726 register_command(cmd_ctx, xscale_cmd, "mmu", xscale_handle_mmu_command, COMMAND_EXEC, "['enable'|'disable'] the MMU");
3727 register_command(cmd_ctx, xscale_cmd, "icache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the ICache");
3728 register_command(cmd_ctx, xscale_cmd, "dcache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the DCache");
3729
3730 register_command(cmd_ctx, xscale_cmd, "vector_catch", xscale_handle_idcache_command, COMMAND_EXEC, "<mask> of vectors that should be catched");
3731
3732 register_command(cmd_ctx, xscale_cmd, "trace_buffer", xscale_handle_trace_buffer_command, COMMAND_EXEC, "<enable|disable> ['fill' [n]|'wrap']");
3733
3734 register_command(cmd_ctx, xscale_cmd, "dump_trace", xscale_handle_dump_trace_command, COMMAND_EXEC, "dump content of trace buffer to <file>");
3735 register_command(cmd_ctx, xscale_cmd, "analyze_trace", xscale_handle_analyze_trace_buffer_command, COMMAND_EXEC, "analyze content of trace buffer");
3736 register_command(cmd_ctx, xscale_cmd, "trace_image", xscale_handle_trace_image_command,
3737 COMMAND_EXEC, "load image from <file> [base address]");
3738
3739 register_command(cmd_ctx, xscale_cmd, "cp15", xscale_handle_cp15, COMMAND_EXEC, "access coproc 15 <register> [value]");
3740
3741 armv4_5_register_commands(cmd_ctx);
3742
3743 return ERROR_OK;
3744 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)