1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
19 ***************************************************************************/
24 #include "replacements.h"
31 #include "arm_simulator.h"
32 #include "arm_disassembler.h"
35 #include "binarybuffer.h"
36 #include "time_support.h"
37 #include "breakpoints.h"
43 #include <sys/types.h>
49 int xscale_register_commands(struct command_context_s
*cmd_ctx
);
51 /* forward declarations */
52 int xscale_target_command(struct command_context_s
*cmd_ctx
, char *cmd
, char **args
, int argc
, struct target_s
*target
);
53 int xscale_init_target(struct command_context_s
*cmd_ctx
, struct target_s
*target
);
56 int xscale_arch_state(struct target_s
*target
);
57 int xscale_poll(target_t
*target
);
58 int xscale_halt(target_t
*target
);
59 int xscale_resume(struct target_s
*target
, int current
, u32 address
, int handle_breakpoints
, int debug_execution
);
60 int xscale_step(struct target_s
*target
, int current
, u32 address
, int handle_breakpoints
);
61 int xscale_debug_entry(target_t
*target
);
62 int xscale_restore_context(target_t
*target
);
64 int xscale_assert_reset(target_t
*target
);
65 int xscale_deassert_reset(target_t
*target
);
66 int xscale_soft_reset_halt(struct target_s
*target
);
67 int xscale_prepare_reset_halt(struct target_s
*target
);
69 int xscale_set_reg_u32(reg_t
*reg
, u32 value
);
71 int xscale_read_core_reg(struct target_s
*target
, int num
, enum armv4_5_mode mode
);
72 int xscale_write_core_reg(struct target_s
*target
, int num
, enum armv4_5_mode mode
, u32 value
);
74 int xscale_read_memory(struct target_s
*target
, u32 address
, u32 size
, u32 count
, u8
*buffer
);
75 int xscale_write_memory(struct target_s
*target
, u32 address
, u32 size
, u32 count
, u8
*buffer
);
76 int xscale_bulk_write_memory(target_t
*target
, u32 address
, u32 count
, u8
*buffer
);
77 int xscale_checksum_memory(struct target_s
*target
, u32 address
, u32 count
, u32
* checksum
);
79 int xscale_add_breakpoint(struct target_s
*target
, breakpoint_t
*breakpoint
);
80 int xscale_remove_breakpoint(struct target_s
*target
, breakpoint_t
*breakpoint
);
81 int xscale_set_breakpoint(struct target_s
*target
, breakpoint_t
*breakpoint
);
82 int xscale_unset_breakpoint(struct target_s
*target
, breakpoint_t
*breakpoint
);
83 int xscale_add_watchpoint(struct target_s
*target
, watchpoint_t
*watchpoint
);
84 int xscale_remove_watchpoint(struct target_s
*target
, watchpoint_t
*watchpoint
);
85 void xscale_enable_watchpoints(struct target_s
*target
);
86 void xscale_enable_breakpoints(struct target_s
*target
);
87 static int xscale_virt2phys(struct target_s
*target
, u32
virtual, u32
*physical
);
88 static int xscale_mmu(struct target_s
*target
, int *enabled
);
90 int xscale_read_trace(target_t
*target
);
92 target_type_t xscale_target
=
97 .arch_state
= xscale_arch_state
,
99 .target_request_data
= NULL
,
102 .resume
= xscale_resume
,
105 .assert_reset
= xscale_assert_reset
,
106 .deassert_reset
= xscale_deassert_reset
,
107 .soft_reset_halt
= xscale_soft_reset_halt
,
108 .prepare_reset_halt
= xscale_prepare_reset_halt
,
110 .get_gdb_reg_list
= armv4_5_get_gdb_reg_list
,
112 .read_memory
= xscale_read_memory
,
113 .write_memory
= xscale_write_memory
,
114 .bulk_write_memory
= xscale_bulk_write_memory
,
115 .checksum_memory
= xscale_checksum_memory
,
117 .run_algorithm
= armv4_5_run_algorithm
,
119 .add_breakpoint
= xscale_add_breakpoint
,
120 .remove_breakpoint
= xscale_remove_breakpoint
,
121 .add_watchpoint
= xscale_add_watchpoint
,
122 .remove_watchpoint
= xscale_remove_watchpoint
,
124 .register_commands
= xscale_register_commands
,
125 .target_command
= xscale_target_command
,
126 .init_target
= xscale_init_target
,
129 .virt2phys
= xscale_virt2phys
,
133 char* xscale_reg_list
[] =
135 "XSCALE_MAINID", /* 0 */
145 "XSCALE_IBCR0", /* 10 */
155 "XSCALE_RX", /* 20 */
159 xscale_reg_t xscale_reg_arch_info
[] =
161 {XSCALE_MAINID
, NULL
},
162 {XSCALE_CACHETYPE
, NULL
},
164 {XSCALE_AUXCTRL
, NULL
},
170 {XSCALE_CPACCESS
, NULL
},
171 {XSCALE_IBCR0
, NULL
},
172 {XSCALE_IBCR1
, NULL
},
175 {XSCALE_DBCON
, NULL
},
176 {XSCALE_TBREG
, NULL
},
177 {XSCALE_CHKPT0
, NULL
},
178 {XSCALE_CHKPT1
, NULL
},
179 {XSCALE_DCSR
, NULL
}, /* DCSR accessed via JTAG or SW */
180 {-1, NULL
}, /* TX accessed via JTAG */
181 {-1, NULL
}, /* RX accessed via JTAG */
182 {-1, NULL
}, /* TXRXCTRL implicit access via JTAG */
185 int xscale_reg_arch_type
= -1;
187 int xscale_get_reg(reg_t
*reg
);
188 int xscale_set_reg(reg_t
*reg
, u8
*buf
);
190 int xscale_get_arch_pointers(target_t
*target
, armv4_5_common_t
**armv4_5_p
, xscale_common_t
**xscale_p
)
192 armv4_5_common_t
*armv4_5
= target
->arch_info
;
193 xscale_common_t
*xscale
= armv4_5
->arch_info
;
195 if (armv4_5
->common_magic
!= ARMV4_5_COMMON_MAGIC
)
197 ERROR("target isn't an XScale target");
201 if (xscale
->common_magic
!= XSCALE_COMMON_MAGIC
)
203 ERROR("target isn't an XScale target");
207 *armv4_5_p
= armv4_5
;
213 int xscale_jtag_set_instr(int chain_pos
, u32 new_instr
)
215 jtag_device_t
*device
= jtag_get_device(chain_pos
);
217 if (buf_get_u32(device
->cur_instr
, 0, device
->ir_length
) != new_instr
)
221 field
.device
= chain_pos
;
222 field
.num_bits
= device
->ir_length
;
223 field
.out_value
= calloc(CEIL(field
.num_bits
, 8), 1);
224 buf_set_u32(field
.out_value
, 0, field
.num_bits
, new_instr
);
225 field
.out_mask
= NULL
;
226 field
.in_value
= NULL
;
227 jtag_set_check_value(&field
, device
->expected
, device
->expected_mask
, NULL
);
229 jtag_add_ir_scan(1, &field
, -1);
231 free(field
.out_value
);
237 int xscale_jtag_callback(enum jtag_event event
, void *priv
)
241 case JTAG_TRST_ASSERTED
:
243 case JTAG_TRST_RELEASED
:
245 case JTAG_SRST_ASSERTED
:
247 case JTAG_SRST_RELEASED
:
250 WARNING("unhandled JTAG event");
256 int xscale_read_dcsr(target_t
*target
)
258 armv4_5_common_t
*armv4_5
= target
->arch_info
;
259 xscale_common_t
*xscale
= armv4_5
->arch_info
;
263 scan_field_t fields
[3];
265 u8 field0_check_value
= 0x2;
266 u8 field0_check_mask
= 0x7;
268 u8 field2_check_value
= 0x0;
269 u8 field2_check_mask
= 0x1;
271 jtag_add_end_state(TAP_PD
);
272 xscale_jtag_set_instr(xscale
->jtag_info
.chain_pos
, xscale
->jtag_info
.dcsr
);
274 buf_set_u32(&field0
, 1, 1, xscale
->hold_rst
);
275 buf_set_u32(&field0
, 2, 1, xscale
->external_debug_break
);
277 fields
[0].device
= xscale
->jtag_info
.chain_pos
;
278 fields
[0].num_bits
= 3;
279 fields
[0].out_value
= &field0
;
280 fields
[0].out_mask
= NULL
;
281 fields
[0].in_value
= NULL
;
282 jtag_set_check_value(fields
+0, &field0_check_value
, &field0_check_mask
, NULL
);
284 fields
[1].device
= xscale
->jtag_info
.chain_pos
;
285 fields
[1].num_bits
= 32;
286 fields
[1].out_value
= NULL
;
287 fields
[1].out_mask
= NULL
;
288 fields
[1].in_value
= xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
;
289 fields
[1].in_handler
= NULL
;
290 fields
[1].in_handler_priv
= NULL
;
291 fields
[1].in_check_value
= NULL
;
292 fields
[1].in_check_mask
= NULL
;
294 fields
[2].device
= xscale
->jtag_info
.chain_pos
;
295 fields
[2].num_bits
= 1;
296 fields
[2].out_value
= &field2
;
297 fields
[2].out_mask
= NULL
;
298 fields
[2].in_value
= NULL
;
299 jtag_set_check_value(fields
+2, &field2_check_value
, &field2_check_mask
, NULL
);
301 jtag_add_dr_scan(3, fields
, -1);
303 if ((retval
= jtag_execute_queue()) != ERROR_OK
)
305 ERROR("JTAG error while reading DCSR");
309 xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].dirty
= 0;
310 xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].valid
= 1;
312 /* write the register with the value we just read
313 * on this second pass, only the first bit of field0 is guaranteed to be 0)
315 field0_check_mask
= 0x1;
316 fields
[1].out_value
= xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
;
317 fields
[1].in_value
= NULL
;
319 jtag_add_end_state(TAP_RTI
);
321 jtag_add_dr_scan(3, fields
, -1);
323 /* DANGER!!! this must be here. It will make sure that the arguments
324 * to jtag_set_check_value() does not go out of scope! */
325 return jtag_execute_queue();
328 int xscale_receive(target_t
*target
, u32
*buffer
, int num_words
)
331 return ERROR_INVALID_ARGUMENTS
;
334 armv4_5_common_t
*armv4_5
= target
->arch_info
;
335 xscale_common_t
*xscale
= armv4_5
->arch_info
;
337 enum tap_state path
[3];
338 scan_field_t fields
[3];
340 u8
*field0
= malloc(num_words
* 1);
341 u8 field0_check_value
= 0x2;
342 u8 field0_check_mask
= 0x6;
343 u32
*field1
= malloc(num_words
* 4);
344 u8 field2_check_value
= 0x0;
345 u8 field2_check_mask
= 0x1;
347 int words_scheduled
= 0;
355 fields
[0].device
= xscale
->jtag_info
.chain_pos
;
356 fields
[0].num_bits
= 3;
357 fields
[0].out_value
= NULL
;
358 fields
[0].out_mask
= NULL
;
359 fields
[0].in_value
= NULL
;
360 jtag_set_check_value(fields
+0, &field0_check_value
, &field0_check_mask
, NULL
);
362 fields
[1].device
= xscale
->jtag_info
.chain_pos
;
363 fields
[1].num_bits
= 32;
364 fields
[1].out_value
= NULL
;
365 fields
[1].out_mask
= NULL
;
366 fields
[1].in_value
= NULL
;
367 fields
[1].in_handler
= NULL
;
368 fields
[1].in_handler_priv
= NULL
;
369 fields
[1].in_check_value
= NULL
;
370 fields
[1].in_check_mask
= NULL
;
374 fields
[2].device
= xscale
->jtag_info
.chain_pos
;
375 fields
[2].num_bits
= 1;
376 fields
[2].out_value
= NULL
;
377 fields
[2].out_mask
= NULL
;
378 fields
[2].in_value
= NULL
;
379 jtag_set_check_value(fields
+2, &field2_check_value
, &field2_check_mask
, NULL
);
381 jtag_add_end_state(TAP_RTI
);
382 xscale_jtag_set_instr(xscale
->jtag_info
.chain_pos
, xscale
->jtag_info
.dbgtx
);
383 jtag_add_runtest(1, -1); /* ensures that we're in the TAP_RTI state as the above could be a no-op */
385 /* repeat until all words have been collected */
387 while (words_done
< num_words
)
391 for (i
= words_done
; i
< num_words
; i
++)
393 fields
[0].in_value
= &field0
[i
];
394 fields
[1].in_handler
= buf_to_u32_handler
;
395 fields
[1].in_handler_priv
= (u8
*)&field1
[i
];
397 jtag_add_pathmove(3, path
);
398 jtag_add_dr_scan(3, fields
, TAP_RTI
);
402 if ((retval
= jtag_execute_queue()) != ERROR_OK
)
404 ERROR("JTAG error while receiving data from debug handler");
408 /* examine results */
409 for (i
= words_done
; i
< num_words
; i
++)
411 if (!(field0
[0] & 1))
413 /* move backwards if necessary */
415 for (j
= i
; j
< num_words
- 1; j
++)
417 field0
[j
] = field0
[j
+1];
418 field1
[j
] = field1
[j
+1];
423 if (words_scheduled
==0)
425 if (attempts
++==1000)
427 ERROR("Failed to receiving data from debug handler after 1000 attempts");
428 retval
=ERROR_TARGET_TIMEOUT
;
433 words_done
+= words_scheduled
;
436 for (i
= 0; i
< num_words
; i
++)
437 *(buffer
++) = buf_get_u32((u8
*)&field1
[i
], 0, 32);
444 int xscale_read_tx(target_t
*target
, int consume
)
446 armv4_5_common_t
*armv4_5
= target
->arch_info
;
447 xscale_common_t
*xscale
= armv4_5
->arch_info
;
448 enum tap_state path
[3];
449 enum tap_state noconsume_path
[9];
452 struct timeval timeout
, now
;
454 scan_field_t fields
[3];
456 u8 field0_check_value
= 0x2;
457 u8 field0_check_mask
= 0x6;
458 u8 field2_check_value
= 0x0;
459 u8 field2_check_mask
= 0x1;
461 jtag_add_end_state(TAP_RTI
);
463 xscale_jtag_set_instr(xscale
->jtag_info
.chain_pos
, xscale
->jtag_info
.dbgtx
);
469 noconsume_path
[0] = TAP_SDS
;
470 noconsume_path
[1] = TAP_CD
;
471 noconsume_path
[2] = TAP_E1D
;
472 noconsume_path
[3] = TAP_PD
;
473 noconsume_path
[4] = TAP_E2D
;
474 noconsume_path
[5] = TAP_UD
;
475 noconsume_path
[6] = TAP_SDS
;
476 noconsume_path
[7] = TAP_CD
;
477 noconsume_path
[8] = TAP_SD
;
479 fields
[0].device
= xscale
->jtag_info
.chain_pos
;
480 fields
[0].num_bits
= 3;
481 fields
[0].out_value
= NULL
;
482 fields
[0].out_mask
= NULL
;
483 fields
[0].in_value
= &field0_in
;
484 jtag_set_check_value(fields
+0, &field0_check_value
, &field0_check_mask
, NULL
);
486 fields
[1].device
= xscale
->jtag_info
.chain_pos
;
487 fields
[1].num_bits
= 32;
488 fields
[1].out_value
= NULL
;
489 fields
[1].out_mask
= NULL
;
490 fields
[1].in_value
= xscale
->reg_cache
->reg_list
[XSCALE_TX
].value
;
491 fields
[1].in_handler
= NULL
;
492 fields
[1].in_handler_priv
= NULL
;
493 fields
[1].in_check_value
= NULL
;
494 fields
[1].in_check_mask
= NULL
;
498 fields
[2].device
= xscale
->jtag_info
.chain_pos
;
499 fields
[2].num_bits
= 1;
500 fields
[2].out_value
= NULL
;
501 fields
[2].out_mask
= NULL
;
502 fields
[2].in_value
= NULL
;
503 jtag_set_check_value(fields
+2, &field2_check_value
, &field2_check_mask
, NULL
);
505 gettimeofday(&timeout
, NULL
);
506 timeval_add_time(&timeout
, 5, 0);
510 /* if we want to consume the register content (i.e. clear TX_READY),
511 * we have to go straight from Capture-DR to Shift-DR
512 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
515 jtag_add_pathmove(3, path
);
517 jtag_add_pathmove(sizeof(noconsume_path
)/sizeof(*noconsume_path
), noconsume_path
);
519 jtag_add_dr_scan(3, fields
, TAP_RTI
);
521 if ((retval
= jtag_execute_queue()) != ERROR_OK
)
523 ERROR("JTAG error while reading TX");
524 return ERROR_TARGET_TIMEOUT
;
527 gettimeofday(&now
, NULL
);
528 if ((now
.tv_sec
> timeout
.tv_sec
) || ((now
.tv_sec
== timeout
.tv_sec
)&& (now
.tv_usec
> timeout
.tv_usec
)))
530 ERROR("time out reading TX register");
531 return ERROR_TARGET_TIMEOUT
;
533 if (!((!(field0_in
& 1)) && consume
))
537 usleep(500*1000); /* avoid flooding the logs */
540 if (!(field0_in
& 1))
541 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
546 int xscale_write_rx(target_t
*target
)
548 armv4_5_common_t
*armv4_5
= target
->arch_info
;
549 xscale_common_t
*xscale
= armv4_5
->arch_info
;
552 struct timeval timeout
, now
;
554 scan_field_t fields
[3];
557 u8 field0_check_value
= 0x2;
558 u8 field0_check_mask
= 0x6;
560 u8 field2_check_value
= 0x0;
561 u8 field2_check_mask
= 0x1;
563 jtag_add_end_state(TAP_RTI
);
565 xscale_jtag_set_instr(xscale
->jtag_info
.chain_pos
, xscale
->jtag_info
.dbgrx
);
567 fields
[0].device
= xscale
->jtag_info
.chain_pos
;
568 fields
[0].num_bits
= 3;
569 fields
[0].out_value
= &field0_out
;
570 fields
[0].out_mask
= NULL
;
571 fields
[0].in_value
= &field0_in
;
572 jtag_set_check_value(fields
+0, &field0_check_value
, &field0_check_mask
, NULL
);
574 fields
[1].device
= xscale
->jtag_info
.chain_pos
;
575 fields
[1].num_bits
= 32;
576 fields
[1].out_value
= xscale
->reg_cache
->reg_list
[XSCALE_RX
].value
;
577 fields
[1].out_mask
= NULL
;
578 fields
[1].in_value
= NULL
;
579 fields
[1].in_handler
= NULL
;
580 fields
[1].in_handler_priv
= NULL
;
581 fields
[1].in_check_value
= NULL
;
582 fields
[1].in_check_mask
= NULL
;
586 fields
[2].device
= xscale
->jtag_info
.chain_pos
;
587 fields
[2].num_bits
= 1;
588 fields
[2].out_value
= &field2
;
589 fields
[2].out_mask
= NULL
;
590 fields
[2].in_value
= NULL
;
591 jtag_set_check_value(fields
+2, &field2_check_value
, &field2_check_mask
, NULL
);
593 gettimeofday(&timeout
, NULL
);
594 timeval_add_time(&timeout
, 5, 0);
596 /* poll until rx_read is low */
600 jtag_add_dr_scan(3, fields
, TAP_RTI
);
602 if ((retval
= jtag_execute_queue()) != ERROR_OK
)
604 ERROR("JTAG error while writing RX");
608 gettimeofday(&now
, NULL
);
609 if ((now
.tv_sec
> timeout
.tv_sec
) || ((now
.tv_sec
== timeout
.tv_sec
)&& (now
.tv_usec
> timeout
.tv_usec
)))
611 ERROR("time out writing RX register");
612 return ERROR_TARGET_TIMEOUT
;
614 if (!(field0_in
& 1))
616 usleep(500*1000); /* wait 500ms to avoid flooding the logs */
621 jtag_add_dr_scan(3, fields
, TAP_RTI
);
623 if ((retval
= jtag_execute_queue()) != ERROR_OK
)
625 ERROR("JTAG error while writing RX");
632 /* send count elements of size byte to the debug handler */
633 int xscale_send(target_t
*target
, u8
*buffer
, int count
, int size
)
635 armv4_5_common_t
*armv4_5
= target
->arch_info
;
636 xscale_common_t
*xscale
= armv4_5
->arch_info
;
641 u8 output
[4] = {0, 0, 0, 0};
643 scan_field_t fields
[3];
645 u8 field0_check_value
= 0x2;
646 u8 field0_check_mask
= 0x6;
648 u8 field2_check_value
= 0x0;
649 u8 field2_check_mask
= 0x1;
651 jtag_add_end_state(TAP_RTI
);
653 xscale_jtag_set_instr(xscale
->jtag_info
.chain_pos
, xscale
->jtag_info
.dbgrx
);
655 fields
[0].device
= xscale
->jtag_info
.chain_pos
;
656 fields
[0].num_bits
= 3;
657 fields
[0].out_value
= &field0_out
;
658 fields
[0].out_mask
= NULL
;
659 fields
[0].in_handler
= NULL
;
660 fields
[0].in_value
= NULL
;
661 if (!xscale
->fast_memory_access
)
663 jtag_set_check_value(fields
+0, &field0_check_value
, &field0_check_mask
, NULL
);
666 fields
[1].device
= xscale
->jtag_info
.chain_pos
;
667 fields
[1].num_bits
= 32;
668 fields
[1].out_value
= output
;
669 fields
[1].out_mask
= NULL
;
670 fields
[1].in_value
= NULL
;
671 fields
[1].in_handler
= NULL
;
672 fields
[1].in_handler_priv
= NULL
;
673 fields
[1].in_check_value
= NULL
;
674 fields
[1].in_check_mask
= NULL
;
678 fields
[2].device
= xscale
->jtag_info
.chain_pos
;
679 fields
[2].num_bits
= 1;
680 fields
[2].out_value
= &field2
;
681 fields
[2].out_mask
= NULL
;
682 fields
[2].in_value
= NULL
;
683 fields
[2].in_handler
= NULL
;
684 if (!xscale
->fast_memory_access
)
686 jtag_set_check_value(fields
+2, &field2_check_value
, &field2_check_mask
, NULL
);
691 int endianness
= target
->endianness
;
692 while (done_count
++ < count
)
694 if (endianness
== TARGET_LITTLE_ENDIAN
)
707 jtag_add_dr_scan(3, fields
, TAP_RTI
);
713 while (done_count
++ < count
)
715 /* extract sized element from target-endian buffer, and put it
716 * into little-endian output buffer
721 buf_set_u32(output
, 0, 32, target_buffer_get_u16(target
, buffer
));
727 ERROR("BUG: size neither 4, 2 nor 1");
731 jtag_add_dr_scan(3, fields
, TAP_RTI
);
737 if ((retval
= jtag_execute_queue()) != ERROR_OK
)
739 ERROR("JTAG error while sending data to debug handler");
746 int xscale_send_u32(target_t
*target
, u32 value
)
748 armv4_5_common_t
*armv4_5
= target
->arch_info
;
749 xscale_common_t
*xscale
= armv4_5
->arch_info
;
751 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_RX
].value
, 0, 32, value
);
752 return xscale_write_rx(target
);
755 int xscale_write_dcsr(target_t
*target
, int hold_rst
, int ext_dbg_brk
)
757 armv4_5_common_t
*armv4_5
= target
->arch_info
;
758 xscale_common_t
*xscale
= armv4_5
->arch_info
;
762 scan_field_t fields
[3];
764 u8 field0_check_value
= 0x2;
765 u8 field0_check_mask
= 0x7;
767 u8 field2_check_value
= 0x0;
768 u8 field2_check_mask
= 0x1;
771 xscale
->hold_rst
= hold_rst
;
773 if (ext_dbg_brk
!= -1)
774 xscale
->external_debug_break
= ext_dbg_brk
;
776 jtag_add_end_state(TAP_RTI
);
777 xscale_jtag_set_instr(xscale
->jtag_info
.chain_pos
, xscale
->jtag_info
.dcsr
);
779 buf_set_u32(&field0
, 1, 1, xscale
->hold_rst
);
780 buf_set_u32(&field0
, 2, 1, xscale
->external_debug_break
);
782 fields
[0].device
= xscale
->jtag_info
.chain_pos
;
783 fields
[0].num_bits
= 3;
784 fields
[0].out_value
= &field0
;
785 fields
[0].out_mask
= NULL
;
786 fields
[0].in_value
= NULL
;
787 jtag_set_check_value(fields
+0, &field0_check_value
, &field0_check_mask
, NULL
);
789 fields
[1].device
= xscale
->jtag_info
.chain_pos
;
790 fields
[1].num_bits
= 32;
791 fields
[1].out_value
= xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
;
792 fields
[1].out_mask
= NULL
;
793 fields
[1].in_value
= NULL
;
794 fields
[1].in_handler
= NULL
;
795 fields
[1].in_handler_priv
= NULL
;
796 fields
[1].in_check_value
= NULL
;
797 fields
[1].in_check_mask
= NULL
;
801 fields
[2].device
= xscale
->jtag_info
.chain_pos
;
802 fields
[2].num_bits
= 1;
803 fields
[2].out_value
= &field2
;
804 fields
[2].out_mask
= NULL
;
805 fields
[2].in_value
= NULL
;
806 jtag_set_check_value(fields
+2, &field2_check_value
, &field2_check_mask
, NULL
);
808 jtag_add_dr_scan(3, fields
, -1);
810 if ((retval
= jtag_execute_queue()) != ERROR_OK
)
812 ERROR("JTAG error while writing DCSR");
816 xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].dirty
= 0;
817 xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].valid
= 1;
822 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
823 unsigned int parity (unsigned int v
)
830 DEBUG("parity of 0x%x is %i", ov
, (0x6996 >> v
) & 1);
831 return (0x6996 >> v
) & 1;
834 int xscale_load_ic(target_t
*target
, int mini
, u32 va
, u32 buffer
[8])
836 armv4_5_common_t
*armv4_5
= target
->arch_info
;
837 xscale_common_t
*xscale
= armv4_5
->arch_info
;
842 scan_field_t fields
[2];
844 DEBUG("loading miniIC at 0x%8.8x", va
);
846 jtag_add_end_state(TAP_RTI
);
847 xscale_jtag_set_instr(xscale
->jtag_info
.chain_pos
, xscale
->jtag_info
.ldic
); /* LDIC */
849 /* CMD is b010 for Main IC and b011 for Mini IC */
851 buf_set_u32(&cmd
, 0, 3, 0x3);
853 buf_set_u32(&cmd
, 0, 3, 0x2);
855 buf_set_u32(&cmd
, 3, 3, 0x0);
857 /* virtual address of desired cache line */
858 buf_set_u32(packet
, 0, 27, va
>> 5);
860 fields
[0].device
= xscale
->jtag_info
.chain_pos
;
861 fields
[0].num_bits
= 6;
862 fields
[0].out_value
= &cmd
;
863 fields
[0].out_mask
= NULL
;
864 fields
[0].in_value
= NULL
;
865 fields
[0].in_check_value
= NULL
;
866 fields
[0].in_check_mask
= NULL
;
867 fields
[0].in_handler
= NULL
;
868 fields
[0].in_handler_priv
= NULL
;
870 fields
[1].device
= xscale
->jtag_info
.chain_pos
;
871 fields
[1].num_bits
= 27;
872 fields
[1].out_value
= packet
;
873 fields
[1].out_mask
= NULL
;
874 fields
[1].in_value
= NULL
;
875 fields
[1].in_check_value
= NULL
;
876 fields
[1].in_check_mask
= NULL
;
877 fields
[1].in_handler
= NULL
;
878 fields
[1].in_handler_priv
= NULL
;
880 jtag_add_dr_scan(2, fields
, -1);
882 fields
[0].num_bits
= 32;
883 fields
[0].out_value
= packet
;
885 fields
[1].num_bits
= 1;
886 fields
[1].out_value
= &cmd
;
888 for (word
= 0; word
< 8; word
++)
890 buf_set_u32(packet
, 0, 32, buffer
[word
]);
891 cmd
= parity(*((u32
*)packet
));
892 jtag_add_dr_scan(2, fields
, -1);
895 jtag_execute_queue();
900 int xscale_invalidate_ic_line(target_t
*target
, u32 va
)
902 armv4_5_common_t
*armv4_5
= target
->arch_info
;
903 xscale_common_t
*xscale
= armv4_5
->arch_info
;
907 scan_field_t fields
[2];
909 jtag_add_end_state(TAP_RTI
);
910 xscale_jtag_set_instr(xscale
->jtag_info
.chain_pos
, xscale
->jtag_info
.ldic
); /* LDIC */
912 /* CMD for invalidate IC line b000, bits [6:4] b000 */
913 buf_set_u32(&cmd
, 0, 6, 0x0);
915 /* virtual address of desired cache line */
916 buf_set_u32(packet
, 0, 27, va
>> 5);
918 fields
[0].device
= xscale
->jtag_info
.chain_pos
;
919 fields
[0].num_bits
= 6;
920 fields
[0].out_value
= &cmd
;
921 fields
[0].out_mask
= NULL
;
922 fields
[0].in_value
= NULL
;
923 fields
[0].in_check_value
= NULL
;
924 fields
[0].in_check_mask
= NULL
;
925 fields
[0].in_handler
= NULL
;
926 fields
[0].in_handler_priv
= NULL
;
928 fields
[1].device
= xscale
->jtag_info
.chain_pos
;
929 fields
[1].num_bits
= 27;
930 fields
[1].out_value
= packet
;
931 fields
[1].out_mask
= NULL
;
932 fields
[1].in_value
= NULL
;
933 fields
[1].in_check_value
= NULL
;
934 fields
[1].in_check_mask
= NULL
;
935 fields
[1].in_handler
= NULL
;
936 fields
[1].in_handler_priv
= NULL
;
938 jtag_add_dr_scan(2, fields
, -1);
943 int xscale_update_vectors(target_t
*target
)
945 armv4_5_common_t
*armv4_5
= target
->arch_info
;
946 xscale_common_t
*xscale
= armv4_5
->arch_info
;
950 u32 low_reset_branch
, high_reset_branch
;
952 for (i
= 1; i
< 8; i
++)
954 /* if there's a static vector specified for this exception, override */
955 if (xscale
->static_high_vectors_set
& (1 << i
))
957 xscale
->high_vectors
[i
] = xscale
->static_high_vectors
[i
];
961 retval
=target_read_u32(target
, 0xffff0000 + 4*i
, &xscale
->high_vectors
[i
]);
962 if (retval
== ERROR_TARGET_TIMEOUT
)
964 if (retval
!=ERROR_OK
)
966 /* Some of these reads will fail as part of normal execution */
967 xscale
->high_vectors
[i
] = ARMV4_5_B(0xfffffe, 0);
972 for (i
= 1; i
< 8; i
++)
974 if (xscale
->static_low_vectors_set
& (1 << i
))
976 xscale
->low_vectors
[i
] = xscale
->static_low_vectors
[i
];
980 retval
=target_read_u32(target
, 0x0 + 4*i
, &xscale
->low_vectors
[i
]);
981 if (retval
== ERROR_TARGET_TIMEOUT
)
983 if (retval
!=ERROR_OK
)
985 /* Some of these reads will fail as part of normal execution */
986 xscale
->low_vectors
[i
] = ARMV4_5_B(0xfffffe, 0);
991 /* calculate branches to debug handler */
992 low_reset_branch
= (xscale
->handler_address
+ 0x20 - 0x0 - 0x8) >> 2;
993 high_reset_branch
= (xscale
->handler_address
+ 0x20 - 0xffff0000 - 0x8) >> 2;
995 xscale
->low_vectors
[0] = ARMV4_5_B((low_reset_branch
& 0xffffff), 0);
996 xscale
->high_vectors
[0] = ARMV4_5_B((high_reset_branch
& 0xffffff), 0);
998 /* invalidate and load exception vectors in mini i-cache */
999 xscale_invalidate_ic_line(target
, 0x0);
1000 xscale_invalidate_ic_line(target
, 0xffff0000);
1002 xscale_load_ic(target
, 1, 0x0, xscale
->low_vectors
);
1003 xscale_load_ic(target
, 1, 0xffff0000, xscale
->high_vectors
);
1008 int xscale_arch_state(struct target_s
*target
)
1010 armv4_5_common_t
*armv4_5
= target
->arch_info
;
1011 xscale_common_t
*xscale
= armv4_5
->arch_info
;
1015 "disabled", "enabled"
1018 char *arch_dbg_reason
[] =
1020 "", "\n(processor reset)", "\n(trace buffer full)"
1023 if (armv4_5
->common_magic
!= ARMV4_5_COMMON_MAGIC
)
1025 ERROR("BUG: called for a non-ARMv4/5 target");
1029 USER("target halted in %s state due to %s, current mode: %s\n"
1030 "cpsr: 0x%8.8x pc: 0x%8.8x\n"
1031 "MMU: %s, D-Cache: %s, I-Cache: %s"
1033 armv4_5_state_strings
[armv4_5
->core_state
],
1034 target_debug_reason_strings
[target
->debug_reason
],
1035 armv4_5_mode_strings
[armv4_5_mode_to_number(armv4_5
->core_mode
)],
1036 buf_get_u32(armv4_5
->core_cache
->reg_list
[ARMV4_5_CPSR
].value
, 0, 32),
1037 buf_get_u32(armv4_5
->core_cache
->reg_list
[15].value
, 0, 32),
1038 state
[xscale
->armv4_5_mmu
.mmu_enabled
],
1039 state
[xscale
->armv4_5_mmu
.armv4_5_cache
.d_u_cache_enabled
],
1040 state
[xscale
->armv4_5_mmu
.armv4_5_cache
.i_cache_enabled
],
1041 arch_dbg_reason
[xscale
->arch_debug_reason
]);
1046 int xscale_poll(target_t
*target
)
1048 int retval
=ERROR_OK
;
1049 armv4_5_common_t
*armv4_5
= target
->arch_info
;
1050 xscale_common_t
*xscale
= armv4_5
->arch_info
;
1052 if ((target
->state
== TARGET_RUNNING
) || (target
->state
== TARGET_DEBUG_RUNNING
))
1054 enum target_state previous_state
= target
->state
;
1055 if ((retval
= xscale_read_tx(target
, 0)) == ERROR_OK
)
1058 /* there's data to read from the tx register, we entered debug state */
1059 xscale
->handler_running
= 1;
1061 target
->state
= TARGET_HALTED
;
1063 /* process debug entry, fetching current mode regs */
1064 retval
= xscale_debug_entry(target
);
1066 else if (retval
!= ERROR_TARGET_RESOURCE_NOT_AVAILABLE
)
1068 USER("error while polling TX register, reset CPU");
1069 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
1070 target
->state
= TARGET_HALTED
;
1073 /* debug_entry could have overwritten target state (i.e. immediate resume)
1074 * don't signal event handlers in that case
1076 if (target
->state
!= TARGET_HALTED
)
1079 /* if target was running, signal that we halted
1080 * otherwise we reentered from debug execution */
1081 if (previous_state
== TARGET_RUNNING
)
1082 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
1084 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_HALTED
);
1090 int xscale_debug_entry(target_t
*target
)
1092 armv4_5_common_t
*armv4_5
= target
->arch_info
;
1093 xscale_common_t
*xscale
= armv4_5
->arch_info
;
1101 /* clear external dbg break (will be written on next DCSR read) */
1102 xscale
->external_debug_break
= 0;
1103 if ((retval
=xscale_read_dcsr(target
))!=ERROR_OK
)
1106 /* get r0, pc, r1 to r7 and cpsr */
1107 if ((retval
=xscale_receive(target
, buffer
, 10))!=ERROR_OK
)
1110 /* move r0 from buffer to register cache */
1111 buf_set_u32(armv4_5
->core_cache
->reg_list
[0].value
, 0, 32, buffer
[0]);
1112 armv4_5
->core_cache
->reg_list
[15].dirty
= 1;
1113 armv4_5
->core_cache
->reg_list
[15].valid
= 1;
1114 DEBUG("r0: 0x%8.8x", buffer
[0]);
1116 /* move pc from buffer to register cache */
1117 buf_set_u32(armv4_5
->core_cache
->reg_list
[15].value
, 0, 32, buffer
[1]);
1118 armv4_5
->core_cache
->reg_list
[15].dirty
= 1;
1119 armv4_5
->core_cache
->reg_list
[15].valid
= 1;
1120 DEBUG("pc: 0x%8.8x", buffer
[1]);
1122 /* move data from buffer to register cache */
1123 for (i
= 1; i
<= 7; i
++)
1125 buf_set_u32(armv4_5
->core_cache
->reg_list
[i
].value
, 0, 32, buffer
[1 + i
]);
1126 armv4_5
->core_cache
->reg_list
[i
].dirty
= 1;
1127 armv4_5
->core_cache
->reg_list
[i
].valid
= 1;
1128 DEBUG("r%i: 0x%8.8x", i
, buffer
[i
+ 1]);
1131 buf_set_u32(armv4_5
->core_cache
->reg_list
[ARMV4_5_CPSR
].value
, 0, 32, buffer
[9]);
1132 armv4_5
->core_cache
->reg_list
[ARMV4_5_CPSR
].dirty
= 1;
1133 armv4_5
->core_cache
->reg_list
[ARMV4_5_CPSR
].valid
= 1;
1134 DEBUG("cpsr: 0x%8.8x", buffer
[9]);
1136 armv4_5
->core_mode
= buffer
[9] & 0x1f;
1137 if (armv4_5_mode_to_number(armv4_5
->core_mode
) == -1)
1139 target
->state
= TARGET_UNKNOWN
;
1140 ERROR("cpsr contains invalid mode value - communication failure");
1141 return ERROR_TARGET_FAILURE
;
1143 DEBUG("target entered debug state in %s mode", armv4_5_mode_strings
[armv4_5_mode_to_number(armv4_5
->core_mode
)]);
1145 if (buffer
[9] & 0x20)
1146 armv4_5
->core_state
= ARMV4_5_STATE_THUMB
;
1148 armv4_5
->core_state
= ARMV4_5_STATE_ARM
;
1150 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1151 if ((armv4_5
->core_mode
!= ARMV4_5_MODE_USR
) && (armv4_5
->core_mode
!= ARMV4_5_MODE_SYS
))
1153 xscale_receive(target
, buffer
, 8);
1154 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, armv4_5
->core_mode
, 16).value
, 0, 32, buffer
[7]);
1155 ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, armv4_5
->core_mode
, 16).dirty
= 0;
1156 ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, armv4_5
->core_mode
, 16).valid
= 1;
1160 /* r8 to r14, but no spsr */
1161 xscale_receive(target
, buffer
, 7);
1164 /* move data from buffer to register cache */
1165 for (i
= 8; i
<= 14; i
++)
1167 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, armv4_5
->core_mode
, i
).value
, 0, 32, buffer
[i
- 8]);
1168 ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, armv4_5
->core_mode
, i
).dirty
= 0;
1169 ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, armv4_5
->core_mode
, i
).valid
= 1;
1172 /* examine debug reason */
1173 xscale_read_dcsr(target
);
1174 moe
= buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 2, 3);
1176 /* stored PC (for calculating fixup) */
1177 pc
= buf_get_u32(armv4_5
->core_cache
->reg_list
[15].value
, 0, 32);
1181 case 0x0: /* Processor reset */
1182 target
->debug_reason
= DBG_REASON_DBGRQ
;
1183 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_RESET
;
1186 case 0x1: /* Instruction breakpoint hit */
1187 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
1188 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_GENERIC
;
1191 case 0x2: /* Data breakpoint hit */
1192 target
->debug_reason
= DBG_REASON_WATCHPOINT
;
1193 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_GENERIC
;
1196 case 0x3: /* BKPT instruction executed */
1197 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
1198 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_GENERIC
;
1201 case 0x4: /* Ext. debug event */
1202 target
->debug_reason
= DBG_REASON_DBGRQ
;
1203 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_GENERIC
;
1206 case 0x5: /* Vector trap occured */
1207 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
1208 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_GENERIC
;
1211 case 0x6: /* Trace buffer full break */
1212 target
->debug_reason
= DBG_REASON_DBGRQ
;
1213 xscale
->arch_debug_reason
= XSCALE_DBG_REASON_TB_FULL
;
1216 case 0x7: /* Reserved */
1218 ERROR("Method of Entry is 'Reserved'");
1223 /* apply PC fixup */
1224 buf_set_u32(armv4_5
->core_cache
->reg_list
[15].value
, 0, 32, pc
);
1226 /* on the first debug entry, identify cache type */
1227 if (xscale
->armv4_5_mmu
.armv4_5_cache
.ctype
== -1)
1231 /* read cp15 cache type register */
1232 xscale_get_reg(&xscale
->reg_cache
->reg_list
[XSCALE_CACHETYPE
]);
1233 cache_type_reg
= buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_CACHETYPE
].value
, 0, 32);
1235 armv4_5_identify_cache(cache_type_reg
, &xscale
->armv4_5_mmu
.armv4_5_cache
);
1238 /* examine MMU and Cache settings */
1239 /* read cp15 control register */
1240 xscale_get_reg(&xscale
->reg_cache
->reg_list
[XSCALE_CTRL
]);
1241 xscale
->cp15_control_reg
= buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_CTRL
].value
, 0, 32);
1242 xscale
->armv4_5_mmu
.mmu_enabled
= (xscale
->cp15_control_reg
& 0x1U
) ? 1 : 0;
1243 xscale
->armv4_5_mmu
.armv4_5_cache
.d_u_cache_enabled
= (xscale
->cp15_control_reg
& 0x4U
) ? 1 : 0;
1244 xscale
->armv4_5_mmu
.armv4_5_cache
.i_cache_enabled
= (xscale
->cp15_control_reg
& 0x1000U
) ? 1 : 0;
1246 /* tracing enabled, read collected trace data */
1247 if (xscale
->trace
.buffer_enabled
)
1249 xscale_read_trace(target
);
1250 xscale
->trace
.buffer_fill
--;
1252 /* resume if we're still collecting trace data */
1253 if ((xscale
->arch_debug_reason
== XSCALE_DBG_REASON_TB_FULL
)
1254 && (xscale
->trace
.buffer_fill
> 0))
1256 xscale_resume(target
, 1, 0x0, 1, 0);
1260 xscale
->trace
.buffer_enabled
= 0;
1267 int xscale_halt(target_t
*target
)
1269 armv4_5_common_t
*armv4_5
= target
->arch_info
;
1270 xscale_common_t
*xscale
= armv4_5
->arch_info
;
1272 DEBUG("target->state: %s", target_state_strings
[target
->state
]);
1274 if (target
->state
== TARGET_HALTED
)
1276 WARNING("target was already halted");
1277 return ERROR_TARGET_ALREADY_HALTED
;
1279 else if (target
->state
== TARGET_UNKNOWN
)
1281 /* this must not happen for a xscale target */
1282 ERROR("target was in unknown state when halt was requested");
1283 return ERROR_TARGET_INVALID
;
1285 else if (target
->state
== TARGET_RESET
)
1287 DEBUG("target->state == TARGET_RESET");
1291 /* assert external dbg break */
1292 xscale
->external_debug_break
= 1;
1293 xscale_read_dcsr(target
);
1295 target
->debug_reason
= DBG_REASON_DBGRQ
;
1301 int xscale_enable_single_step(struct target_s
*target
, u32 next_pc
)
1303 armv4_5_common_t
*armv4_5
= target
->arch_info
;
1304 xscale_common_t
*xscale
= armv4_5
->arch_info
;
1305 reg_t
*ibcr0
= &xscale
->reg_cache
->reg_list
[XSCALE_IBCR0
];
1307 if (xscale
->ibcr0_used
)
1309 breakpoint_t
*ibcr0_bp
= breakpoint_find(target
, buf_get_u32(ibcr0
->value
, 0, 32) & 0xfffffffe);
1313 xscale_unset_breakpoint(target
, ibcr0_bp
);
1317 ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1322 xscale_set_reg_u32(ibcr0
, next_pc
| 0x1);
1327 int xscale_disable_single_step(struct target_s
*target
)
1329 armv4_5_common_t
*armv4_5
= target
->arch_info
;
1330 xscale_common_t
*xscale
= armv4_5
->arch_info
;
1331 reg_t
*ibcr0
= &xscale
->reg_cache
->reg_list
[XSCALE_IBCR0
];
1333 xscale_set_reg_u32(ibcr0
, 0x0);
1338 int xscale_resume(struct target_s
*target
, int current
, u32 address
, int handle_breakpoints
, int debug_execution
)
1340 armv4_5_common_t
*armv4_5
= target
->arch_info
;
1341 xscale_common_t
*xscale
= armv4_5
->arch_info
;
1342 breakpoint_t
*breakpoint
= target
->breakpoints
;
1351 if (target
->state
!= TARGET_HALTED
)
1353 WARNING("target not halted");
1354 return ERROR_TARGET_NOT_HALTED
;
1357 if (!debug_execution
)
1359 target_free_all_working_areas(target
);
1362 /* update vector tables */
1363 if ((retval
=xscale_update_vectors(target
))!=ERROR_OK
)
1366 /* current = 1: continue on current pc, otherwise continue at <address> */
1368 buf_set_u32(armv4_5
->core_cache
->reg_list
[15].value
, 0, 32, address
);
1370 current_pc
= buf_get_u32(armv4_5
->core_cache
->reg_list
[15].value
, 0, 32);
1372 /* if we're at the reset vector, we have to simulate the branch */
1373 if (current_pc
== 0x0)
1375 arm_simulate_step(target
, NULL
);
1376 current_pc
= buf_get_u32(armv4_5
->core_cache
->reg_list
[15].value
, 0, 32);
1379 /* the front-end may request us not to handle breakpoints */
1380 if (handle_breakpoints
)
1382 if ((breakpoint
= breakpoint_find(target
, buf_get_u32(armv4_5
->core_cache
->reg_list
[15].value
, 0, 32))))
1386 /* there's a breakpoint at the current PC, we have to step over it */
1387 DEBUG("unset breakpoint at 0x%8.8x", breakpoint
->address
);
1388 xscale_unset_breakpoint(target
, breakpoint
);
1390 /* calculate PC of next instruction */
1391 if ((retval
= arm_simulate_step(target
, &next_pc
)) != ERROR_OK
)
1394 target_read_u32(target
, current_pc
, ¤t_opcode
);
1395 ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode
);
1398 DEBUG("enable single-step");
1399 xscale_enable_single_step(target
, next_pc
);
1401 /* restore banked registers */
1402 xscale_restore_context(target
);
1404 /* send resume request (command 0x30 or 0x31)
1405 * clean the trace buffer if it is to be enabled (0x62) */
1406 if (xscale
->trace
.buffer_enabled
)
1408 xscale_send_u32(target
, 0x62);
1409 xscale_send_u32(target
, 0x31);
1412 xscale_send_u32(target
, 0x30);
1415 xscale_send_u32(target
, buf_get_u32(armv4_5
->core_cache
->reg_list
[ARMV4_5_CPSR
].value
, 0, 32));
1416 DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5
->core_cache
->reg_list
[ARMV4_5_CPSR
].value
, 0, 32));
1418 for (i
= 7; i
>= 0; i
--)
1421 xscale_send_u32(target
, buf_get_u32(armv4_5
->core_cache
->reg_list
[i
].value
, 0, 32));
1422 DEBUG("writing r%i with value 0x%8.8x", i
, buf_get_u32(armv4_5
->core_cache
->reg_list
[i
].value
, 0, 32));
1426 xscale_send_u32(target
, buf_get_u32(armv4_5
->core_cache
->reg_list
[15].value
, 0, 32));
1427 DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5
->core_cache
->reg_list
[15].value
, 0, 32));
1429 /* wait for and process debug entry */
1430 xscale_debug_entry(target
);
1432 DEBUG("disable single-step");
1433 xscale_disable_single_step(target
);
1435 DEBUG("set breakpoint at 0x%8.8x", breakpoint
->address
);
1436 xscale_set_breakpoint(target
, breakpoint
);
1440 /* enable any pending breakpoints and watchpoints */
1441 xscale_enable_breakpoints(target
);
1442 xscale_enable_watchpoints(target
);
1444 /* restore banked registers */
1445 xscale_restore_context(target
);
1447 /* send resume request (command 0x30 or 0x31)
1448 * clean the trace buffer if it is to be enabled (0x62) */
1449 if (xscale
->trace
.buffer_enabled
)
1451 xscale_send_u32(target
, 0x62);
1452 xscale_send_u32(target
, 0x31);
1455 xscale_send_u32(target
, 0x30);
1458 xscale_send_u32(target
, buf_get_u32(armv4_5
->core_cache
->reg_list
[ARMV4_5_CPSR
].value
, 0, 32));
1459 DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5
->core_cache
->reg_list
[ARMV4_5_CPSR
].value
, 0, 32));
1461 for (i
= 7; i
>= 0; i
--)
1464 xscale_send_u32(target
, buf_get_u32(armv4_5
->core_cache
->reg_list
[i
].value
, 0, 32));
1465 DEBUG("writing r%i with value 0x%8.8x", i
, buf_get_u32(armv4_5
->core_cache
->reg_list
[i
].value
, 0, 32));
1469 xscale_send_u32(target
, buf_get_u32(armv4_5
->core_cache
->reg_list
[15].value
, 0, 32));
1470 DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5
->core_cache
->reg_list
[15].value
, 0, 32));
1472 target
->debug_reason
= DBG_REASON_NOTHALTED
;
1474 if (!debug_execution
)
1476 /* registers are now invalid */
1477 armv4_5_invalidate_core_regs(target
);
1478 target
->state
= TARGET_RUNNING
;
1479 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1483 target
->state
= TARGET_DEBUG_RUNNING
;
1484 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
1487 DEBUG("target resumed");
1489 xscale
->handler_running
= 1;
1494 int xscale_step(struct target_s
*target
, int current
, u32 address
, int handle_breakpoints
)
1496 armv4_5_common_t
*armv4_5
= target
->arch_info
;
1497 xscale_common_t
*xscale
= armv4_5
->arch_info
;
1498 breakpoint_t
*breakpoint
= target
->breakpoints
;
1500 u32 current_pc
, next_pc
;
1504 if (target
->state
!= TARGET_HALTED
)
1506 WARNING("target not halted");
1507 return ERROR_TARGET_NOT_HALTED
;
1510 /* current = 1: continue on current pc, otherwise continue at <address> */
1512 buf_set_u32(armv4_5
->core_cache
->reg_list
[15].value
, 0, 32, address
);
1514 current_pc
= buf_get_u32(armv4_5
->core_cache
->reg_list
[15].value
, 0, 32);
1516 /* if we're at the reset vector, we have to simulate the step */
1517 if (current_pc
== 0x0)
1519 arm_simulate_step(target
, NULL
);
1520 current_pc
= buf_get_u32(armv4_5
->core_cache
->reg_list
[15].value
, 0, 32);
1522 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
1523 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
1528 /* the front-end may request us not to handle breakpoints */
1529 if (handle_breakpoints
)
1530 if ((breakpoint
= breakpoint_find(target
, buf_get_u32(armv4_5
->core_cache
->reg_list
[15].value
, 0, 32))))
1532 xscale_unset_breakpoint(target
, breakpoint
);
1535 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
1537 /* calculate PC of next instruction */
1538 if ((retval
= arm_simulate_step(target
, &next_pc
)) != ERROR_OK
)
1541 target_read_u32(target
, current_pc
, ¤t_opcode
);
1542 ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode
);
1545 DEBUG("enable single-step");
1546 xscale_enable_single_step(target
, next_pc
);
1548 /* restore banked registers */
1549 xscale_restore_context(target
);
1551 /* send resume request (command 0x30 or 0x31)
1552 * clean the trace buffer if it is to be enabled (0x62) */
1553 if (xscale
->trace
.buffer_enabled
)
1555 xscale_send_u32(target
, 0x62);
1556 xscale_send_u32(target
, 0x31);
1559 xscale_send_u32(target
, 0x30);
1562 xscale_send_u32(target
, buf_get_u32(armv4_5
->core_cache
->reg_list
[ARMV4_5_CPSR
].value
, 0, 32));
1563 DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5
->core_cache
->reg_list
[ARMV4_5_CPSR
].value
, 0, 32));
1565 for (i
= 7; i
>= 0; i
--)
1568 xscale_send_u32(target
, buf_get_u32(armv4_5
->core_cache
->reg_list
[i
].value
, 0, 32));
1569 DEBUG("writing r%i with value 0x%8.8x", i
, buf_get_u32(armv4_5
->core_cache
->reg_list
[i
].value
, 0, 32));
1573 xscale_send_u32(target
, buf_get_u32(armv4_5
->core_cache
->reg_list
[15].value
, 0, 32));
1574 DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5
->core_cache
->reg_list
[15].value
, 0, 32));
1576 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1578 /* registers are now invalid */
1579 armv4_5_invalidate_core_regs(target
);
1581 /* wait for and process debug entry */
1582 xscale_debug_entry(target
);
1584 DEBUG("disable single-step");
1585 xscale_disable_single_step(target
);
1587 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
1591 xscale_set_breakpoint(target
, breakpoint
);
1594 DEBUG("target stepped");
1600 int xscale_assert_reset(target_t
*target
)
1602 armv4_5_common_t
*armv4_5
= target
->arch_info
;
1603 xscale_common_t
*xscale
= armv4_5
->arch_info
;
1605 DEBUG("target->state: %s", target_state_strings
[target
->state
]);
1607 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1608 * end up in T-L-R, which would reset JTAG
1610 jtag_add_end_state(TAP_RTI
);
1611 xscale_jtag_set_instr(xscale
->jtag_info
.chain_pos
, xscale
->jtag_info
.dcsr
);
1613 /* set Hold reset, Halt mode and Trap Reset */
1614 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 30, 1, 0x1);
1615 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 16, 1, 0x1);
1616 xscale_write_dcsr(target
, 1, 0);
1618 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1619 xscale_jtag_set_instr(xscale
->jtag_info
.chain_pos
, 0x7f);
1620 jtag_execute_queue();
1623 jtag_add_reset(0, 1);
1625 /* sleep 1ms, to be sure we fulfill any requirements */
1626 jtag_add_sleep(1000);
1627 jtag_execute_queue();
1629 target
->state
= TARGET_RESET
;
1634 int xscale_deassert_reset(target_t
*target
)
1636 armv4_5_common_t
*armv4_5
= target
->arch_info
;
1637 xscale_common_t
*xscale
= armv4_5
->arch_info
;
1639 fileio_t debug_handler
;
1647 breakpoint_t
*breakpoint
= target
->breakpoints
;
1651 xscale
->ibcr_available
= 2;
1652 xscale
->ibcr0_used
= 0;
1653 xscale
->ibcr1_used
= 0;
1655 xscale
->dbr_available
= 2;
1656 xscale
->dbr0_used
= 0;
1657 xscale
->dbr1_used
= 0;
1659 /* mark all hardware breakpoints as unset */
1662 if (breakpoint
->type
== BKPT_HARD
)
1664 breakpoint
->set
= 0;
1666 breakpoint
= breakpoint
->next
;
1669 if (!xscale
->handler_installed
)
1672 jtag_add_reset(0, 0);
1674 /* wait 300ms; 150 and 100ms were not enough */
1675 jtag_add_sleep(300*1000);
1677 jtag_add_runtest(2030, TAP_RTI
);
1678 jtag_execute_queue();
1680 /* set Hold reset, Halt mode and Trap Reset */
1681 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 30, 1, 0x1);
1682 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 16, 1, 0x1);
1683 xscale_write_dcsr(target
, 1, 0);
1685 /* Load debug handler */
1686 if (fileio_open(&debug_handler
, "xscale/debug_handler.bin", FILEIO_READ
, FILEIO_BINARY
) != ERROR_OK
)
1691 if ((binary_size
= debug_handler
.size
) % 4)
1693 ERROR("debug_handler.bin: size not a multiple of 4");
1697 if (binary_size
> 0x800)
1699 ERROR("debug_handler.bin: larger than 2kb");
1703 binary_size
= CEIL(binary_size
, 32) * 32;
1705 address
= xscale
->handler_address
;
1706 while (binary_size
> 0)
1711 if ((retval
= fileio_read(&debug_handler
, 32, buffer
, &buf_cnt
)) != ERROR_OK
)
1716 for (i
= 0; i
< buf_cnt
; i
+= 4)
1718 /* convert LE buffer to host-endian u32 */
1719 cache_line
[i
/ 4] = le_to_h_u32(&buffer
[i
]);
1722 for (; buf_cnt
< 32; buf_cnt
+= 4)
1724 cache_line
[buf_cnt
/ 4] = 0xe1a08008;
1727 /* only load addresses other than the reset vectors */
1728 if ((address
% 0x400) != 0x0)
1730 xscale_load_ic(target
, 1, address
, cache_line
);
1734 binary_size
-= buf_cnt
;
1737 xscale_load_ic(target
, 1, 0x0, xscale
->low_vectors
);
1738 xscale_load_ic(target
, 1, 0xffff0000, xscale
->high_vectors
);
1740 jtag_add_runtest(30, TAP_RTI
);
1742 jtag_add_sleep(100000);
1744 /* set Hold reset, Halt mode and Trap Reset */
1745 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 30, 1, 0x1);
1746 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 16, 1, 0x1);
1747 xscale_write_dcsr(target
, 1, 0);
1749 /* clear Hold reset to let the target run (should enter debug handler) */
1750 xscale_write_dcsr(target
, 0, 1);
1751 target
->state
= TARGET_RUNNING
;
1753 if ((target
->reset_mode
!= RESET_HALT
) && (target
->reset_mode
!= RESET_INIT
))
1755 jtag_add_sleep(10000);
1757 /* we should have entered debug now */
1758 xscale_debug_entry(target
);
1759 target
->state
= TARGET_HALTED
;
1761 /* resume the target */
1762 xscale_resume(target
, 1, 0x0, 1, 0);
1765 fileio_close(&debug_handler
);
1769 jtag_add_reset(0, 0);
1776 int xscale_soft_reset_halt(struct target_s
*target
)
1782 int xscale_prepare_reset_halt(struct target_s
*target
)
1784 /* nothing to be done for reset_halt on XScale targets
1785 * we always halt after a reset to upload the debug handler
1790 int xscale_read_core_reg(struct target_s
*target
, int num
, enum armv4_5_mode mode
)
1796 int xscale_write_core_reg(struct target_s
*target
, int num
, enum armv4_5_mode mode
, u32 value
)
1802 int xscale_full_context(target_t
*target
)
1804 armv4_5_common_t
*armv4_5
= target
->arch_info
;
1812 if (target
->state
!= TARGET_HALTED
)
1814 WARNING("target not halted");
1815 return ERROR_TARGET_NOT_HALTED
;
1818 buffer
= malloc(4 * 8);
1820 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1821 * we can't enter User mode on an XScale (unpredictable),
1822 * but User shares registers with SYS
1824 for(i
= 1; i
< 7; i
++)
1828 /* check if there are invalid registers in the current mode
1830 for (j
= 0; j
<= 16; j
++)
1832 if (ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, armv4_5_number_to_mode(i
), j
).valid
== 0)
1840 /* request banked registers */
1841 xscale_send_u32(target
, 0x0);
1844 tmp_cpsr
|= armv4_5_number_to_mode(i
);
1845 tmp_cpsr
|= 0xc0; /* I/F bits */
1847 /* send CPSR for desired mode */
1848 xscale_send_u32(target
, tmp_cpsr
);
1850 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1851 if ((armv4_5_number_to_mode(i
) != ARMV4_5_MODE_USR
) && (armv4_5_number_to_mode(i
) != ARMV4_5_MODE_SYS
))
1853 xscale_receive(target
, buffer
, 8);
1854 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, armv4_5
->core_mode
, 16).value
, 0, 32, buffer
[7]);
1855 ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, armv4_5_number_to_mode(i
), 16).dirty
= 0;
1856 ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, armv4_5_number_to_mode(i
), 16).valid
= 1;
1860 xscale_receive(target
, buffer
, 7);
1863 /* move data from buffer to register cache */
1864 for (j
= 8; j
<= 14; j
++)
1866 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, armv4_5_number_to_mode(i
), j
).value
, 0, 32, buffer
[j
- 8]);
1867 ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, armv4_5_number_to_mode(i
), j
).dirty
= 0;
1868 ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, armv4_5_number_to_mode(i
), j
).valid
= 1;
1878 int xscale_restore_context(target_t
*target
)
1880 armv4_5_common_t
*armv4_5
= target
->arch_info
;
1886 if (target
->state
!= TARGET_HALTED
)
1888 WARNING("target not halted");
1889 return ERROR_TARGET_NOT_HALTED
;
1892 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1893 * we can't enter User mode on an XScale (unpredictable),
1894 * but User shares registers with SYS
1896 for(i
= 1; i
< 7; i
++)
1900 /* check if there are invalid registers in the current mode
1902 for (j
= 8; j
<= 14; j
++)
1904 if (ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, armv4_5_number_to_mode(i
), j
).dirty
== 1)
1908 /* if not USR/SYS, check if the SPSR needs to be written */
1909 if ((armv4_5_number_to_mode(i
) != ARMV4_5_MODE_USR
) && (armv4_5_number_to_mode(i
) != ARMV4_5_MODE_SYS
))
1911 if (ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, armv4_5_number_to_mode(i
), 16).dirty
== 1)
1919 /* send banked registers */
1920 xscale_send_u32(target
, 0x1);
1923 tmp_cpsr
|= armv4_5_number_to_mode(i
);
1924 tmp_cpsr
|= 0xc0; /* I/F bits */
1926 /* send CPSR for desired mode */
1927 xscale_send_u32(target
, tmp_cpsr
);
1929 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1930 for (j
= 8; j
<= 14; j
++)
1932 xscale_send_u32(target
, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, armv4_5
->core_mode
, j
).value
, 0, 32));
1933 ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, armv4_5_number_to_mode(i
), j
).dirty
= 0;
1936 if ((armv4_5_number_to_mode(i
) != ARMV4_5_MODE_USR
) && (armv4_5_number_to_mode(i
) != ARMV4_5_MODE_SYS
))
1938 xscale_send_u32(target
, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, armv4_5
->core_mode
, 16).value
, 0, 32));
1939 ARMV4_5_CORE_REG_MODE(armv4_5
->core_cache
, armv4_5_number_to_mode(i
), 16).dirty
= 0;
1947 int xscale_read_memory(struct target_s
*target
, u32 address
, u32 size
, u32 count
, u8
*buffer
)
1949 armv4_5_common_t
*armv4_5
= target
->arch_info
;
1950 xscale_common_t
*xscale
= armv4_5
->arch_info
;
1955 DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address
, size
, count
);
1957 if (target
->state
!= TARGET_HALTED
)
1959 WARNING("target not halted");
1960 return ERROR_TARGET_NOT_HALTED
;
1963 /* sanitize arguments */
1964 if (((size
!= 4) && (size
!= 2) && (size
!= 1)) || (count
== 0) || !(buffer
))
1965 return ERROR_INVALID_ARGUMENTS
;
1967 if (((size
== 4) && (address
& 0x3u
)) || ((size
== 2) && (address
& 0x1u
)))
1968 return ERROR_TARGET_UNALIGNED_ACCESS
;
1970 /* send memory read request (command 0x1n, n: access size) */
1971 if ((retval
=xscale_send_u32(target
, 0x10 | size
))!=ERROR_OK
)
1974 /* send base address for read request */
1975 if ((retval
=xscale_send_u32(target
, address
))!=ERROR_OK
)
1978 /* send number of requested data words */
1979 if ((retval
=xscale_send_u32(target
, count
))!=ERROR_OK
)
1982 /* receive data from target (count times 32-bit words in host endianness) */
1983 buf32
= malloc(4 * count
);
1984 if ((retval
=xscale_receive(target
, buf32
, count
))!=ERROR_OK
)
1987 /* extract data from host-endian buffer into byte stream */
1988 for (i
= 0; i
< count
; i
++)
1993 target_buffer_set_u32(target
, buffer
, buf32
[i
]);
1997 target_buffer_set_u16(target
, buffer
, buf32
[i
] & 0xffff);
2001 *buffer
++ = buf32
[i
] & 0xff;
2004 ERROR("should never get here");
2011 /* examine DCSR, to see if Sticky Abort (SA) got set */
2012 if ((retval
=xscale_read_dcsr(target
))!=ERROR_OK
)
2014 if (buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 5, 1) == 1)
2017 if ((retval
=xscale_send_u32(target
, 0x60))!=ERROR_OK
)
2020 return ERROR_TARGET_DATA_ABORT
;
2026 int xscale_write_memory(struct target_s
*target
, u32 address
, u32 size
, u32 count
, u8
*buffer
)
2028 armv4_5_common_t
*armv4_5
= target
->arch_info
;
2029 xscale_common_t
*xscale
= armv4_5
->arch_info
;
2032 DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address
, size
, count
);
2034 if (target
->state
!= TARGET_HALTED
)
2036 WARNING("target not halted");
2037 return ERROR_TARGET_NOT_HALTED
;
2040 /* sanitize arguments */
2041 if (((size
!= 4) && (size
!= 2) && (size
!= 1)) || (count
== 0) || !(buffer
))
2042 return ERROR_INVALID_ARGUMENTS
;
2044 if (((size
== 4) && (address
& 0x3u
)) || ((size
== 2) && (address
& 0x1u
)))
2045 return ERROR_TARGET_UNALIGNED_ACCESS
;
2047 /* send memory write request (command 0x2n, n: access size) */
2048 if ((retval
=xscale_send_u32(target
, 0x20 | size
))!=ERROR_OK
)
2051 /* send base address for read request */
2052 if ((retval
=xscale_send_u32(target
, address
))!=ERROR_OK
)
2055 /* send number of requested data words to be written*/
2056 if ((retval
=xscale_send_u32(target
, count
))!=ERROR_OK
)
2059 /* extract data from host-endian buffer into byte stream */
2061 for (i
= 0; i
< count
; i
++)
2066 value
= target_buffer_get_u32(target
, buffer
);
2067 xscale_send_u32(target
, value
);
2071 value
= target_buffer_get_u16(target
, buffer
);
2072 xscale_send_u32(target
, value
);
2077 xscale_send_u32(target
, value
);
2081 ERROR("should never get here");
2086 if ((retval
=xscale_send(target
, buffer
, count
, size
))!=ERROR_OK
)
2089 /* examine DCSR, to see if Sticky Abort (SA) got set */
2090 if ((retval
=xscale_read_dcsr(target
))!=ERROR_OK
)
2092 if (buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 5, 1) == 1)
2095 if ((retval
=xscale_send_u32(target
, 0x60))!=ERROR_OK
)
2098 return ERROR_TARGET_DATA_ABORT
;
2104 int xscale_bulk_write_memory(target_t
*target
, u32 address
, u32 count
, u8
*buffer
)
2106 return xscale_write_memory(target
, address
, 4, count
, buffer
);
2109 int xscale_checksum_memory(struct target_s
*target
, u32 address
, u32 count
, u32
* checksum
)
2111 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2114 u32
xscale_get_ttb(target_t
*target
)
2116 armv4_5_common_t
*armv4_5
= target
->arch_info
;
2117 xscale_common_t
*xscale
= armv4_5
->arch_info
;
2120 xscale_get_reg(&xscale
->reg_cache
->reg_list
[XSCALE_TTB
]);
2121 ttb
= buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_TTB
].value
, 0, 32);
2126 void xscale_disable_mmu_caches(target_t
*target
, int mmu
, int d_u_cache
, int i_cache
)
2128 armv4_5_common_t
*armv4_5
= target
->arch_info
;
2129 xscale_common_t
*xscale
= armv4_5
->arch_info
;
2132 /* read cp15 control register */
2133 xscale_get_reg(&xscale
->reg_cache
->reg_list
[XSCALE_CTRL
]);
2134 cp15_control
= buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_CTRL
].value
, 0, 32);
2137 cp15_control
&= ~0x1U
;
2142 xscale_send_u32(target
, 0x50);
2143 xscale_send_u32(target
, xscale
->cache_clean_address
);
2145 /* invalidate DCache */
2146 xscale_send_u32(target
, 0x51);
2148 cp15_control
&= ~0x4U
;
2153 /* invalidate ICache */
2154 xscale_send_u32(target
, 0x52);
2155 cp15_control
&= ~0x1000U
;
2158 /* write new cp15 control register */
2159 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_CTRL
], cp15_control
);
2161 /* execute cpwait to ensure outstanding operations complete */
2162 xscale_send_u32(target
, 0x53);
2165 void xscale_enable_mmu_caches(target_t
*target
, int mmu
, int d_u_cache
, int i_cache
)
2167 armv4_5_common_t
*armv4_5
= target
->arch_info
;
2168 xscale_common_t
*xscale
= armv4_5
->arch_info
;
2171 /* read cp15 control register */
2172 xscale_get_reg(&xscale
->reg_cache
->reg_list
[XSCALE_CTRL
]);
2173 cp15_control
= buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_CTRL
].value
, 0, 32);
2176 cp15_control
|= 0x1U
;
2179 cp15_control
|= 0x4U
;
2182 cp15_control
|= 0x1000U
;
2184 /* write new cp15 control register */
2185 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_CTRL
], cp15_control
);
2187 /* execute cpwait to ensure outstanding operations complete */
2188 xscale_send_u32(target
, 0x53);
2191 int xscale_set_breakpoint(struct target_s
*target
, breakpoint_t
*breakpoint
)
2193 armv4_5_common_t
*armv4_5
= target
->arch_info
;
2194 xscale_common_t
*xscale
= armv4_5
->arch_info
;
2196 if (target
->state
!= TARGET_HALTED
)
2198 WARNING("target not halted");
2199 return ERROR_TARGET_NOT_HALTED
;
2202 if (xscale
->force_hw_bkpts
)
2203 breakpoint
->type
= BKPT_HARD
;
2205 if (breakpoint
->set
)
2207 WARNING("breakpoint already set");
2211 if (breakpoint
->type
== BKPT_HARD
)
2213 u32 value
= breakpoint
->address
| 1;
2214 if (!xscale
->ibcr0_used
)
2216 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_IBCR0
], value
);
2217 xscale
->ibcr0_used
= 1;
2218 breakpoint
->set
= 1; /* breakpoint set on first breakpoint register */
2220 else if (!xscale
->ibcr1_used
)
2222 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_IBCR1
], value
);
2223 xscale
->ibcr1_used
= 1;
2224 breakpoint
->set
= 2; /* breakpoint set on second breakpoint register */
2228 ERROR("BUG: no hardware comparator available");
2232 else if (breakpoint
->type
== BKPT_SOFT
)
2234 if (breakpoint
->length
== 4)
2236 /* keep the original instruction in target endianness */
2237 target
->type
->read_memory(target
, breakpoint
->address
, 4, 1, breakpoint
->orig_instr
);
2238 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2239 target_write_u32(target
, breakpoint
->address
, xscale
->arm_bkpt
);
2243 /* keep the original instruction in target endianness */
2244 target
->type
->read_memory(target
, breakpoint
->address
, 2, 1, breakpoint
->orig_instr
);
2245 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2246 target_write_u32(target
, breakpoint
->address
, xscale
->thumb_bkpt
);
2248 breakpoint
->set
= 1;
2255 int xscale_add_breakpoint(struct target_s
*target
, breakpoint_t
*breakpoint
)
2257 armv4_5_common_t
*armv4_5
= target
->arch_info
;
2258 xscale_common_t
*xscale
= armv4_5
->arch_info
;
2260 if (target
->state
!= TARGET_HALTED
)
2262 WARNING("target not halted");
2263 return ERROR_TARGET_NOT_HALTED
;
2266 if (xscale
->force_hw_bkpts
)
2268 DEBUG("forcing use of hardware breakpoint at address 0x%8.8x", breakpoint
->address
);
2269 breakpoint
->type
= BKPT_HARD
;
2272 if ((breakpoint
->type
== BKPT_HARD
) && (xscale
->ibcr_available
< 1))
2274 INFO("no breakpoint unit available for hardware breakpoint");
2275 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2279 xscale
->ibcr_available
--;
2282 if ((breakpoint
->length
!= 2) && (breakpoint
->length
!= 4))
2284 INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2285 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2291 int xscale_unset_breakpoint(struct target_s
*target
, breakpoint_t
*breakpoint
)
2293 armv4_5_common_t
*armv4_5
= target
->arch_info
;
2294 xscale_common_t
*xscale
= armv4_5
->arch_info
;
2296 if (target
->state
!= TARGET_HALTED
)
2298 WARNING("target not halted");
2299 return ERROR_TARGET_NOT_HALTED
;
2302 if (!breakpoint
->set
)
2304 WARNING("breakpoint not set");
2308 if (breakpoint
->type
== BKPT_HARD
)
2310 if (breakpoint
->set
== 1)
2312 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_IBCR0
], 0x0);
2313 xscale
->ibcr0_used
= 0;
2315 else if (breakpoint
->set
== 2)
2317 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_IBCR1
], 0x0);
2318 xscale
->ibcr1_used
= 0;
2320 breakpoint
->set
= 0;
2324 /* restore original instruction (kept in target endianness) */
2325 if (breakpoint
->length
== 4)
2327 target
->type
->write_memory(target
, breakpoint
->address
, 4, 1, breakpoint
->orig_instr
);
2331 target
->type
->write_memory(target
, breakpoint
->address
, 2, 1, breakpoint
->orig_instr
);
2333 breakpoint
->set
= 0;
2339 int xscale_remove_breakpoint(struct target_s
*target
, breakpoint_t
*breakpoint
)
2341 armv4_5_common_t
*armv4_5
= target
->arch_info
;
2342 xscale_common_t
*xscale
= armv4_5
->arch_info
;
2344 if (target
->state
!= TARGET_HALTED
)
2346 WARNING("target not halted");
2347 return ERROR_TARGET_NOT_HALTED
;
2350 if (breakpoint
->set
)
2352 xscale_unset_breakpoint(target
, breakpoint
);
2355 if (breakpoint
->type
== BKPT_HARD
)
2356 xscale
->ibcr_available
++;
2361 int xscale_set_watchpoint(struct target_s
*target
, watchpoint_t
*watchpoint
)
2363 armv4_5_common_t
*armv4_5
= target
->arch_info
;
2364 xscale_common_t
*xscale
= armv4_5
->arch_info
;
2366 reg_t
*dbcon
= &xscale
->reg_cache
->reg_list
[XSCALE_DBCON
];
2367 u32 dbcon_value
= buf_get_u32(dbcon
->value
, 0, 32);
2369 if (target
->state
!= TARGET_HALTED
)
2371 WARNING("target not halted");
2372 return ERROR_TARGET_NOT_HALTED
;
2375 xscale_get_reg(dbcon
);
2377 switch (watchpoint
->rw
)
2389 ERROR("BUG: watchpoint->rw neither read, write nor access");
2392 if (!xscale
->dbr0_used
)
2394 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_DBR0
], watchpoint
->address
);
2395 dbcon_value
|= enable
;
2396 xscale_set_reg_u32(dbcon
, dbcon_value
);
2397 watchpoint
->set
= 1;
2398 xscale
->dbr0_used
= 1;
2400 else if (!xscale
->dbr1_used
)
2402 xscale_set_reg_u32(&xscale
->reg_cache
->reg_list
[XSCALE_DBR1
], watchpoint
->address
);
2403 dbcon_value
|= enable
<< 2;
2404 xscale_set_reg_u32(dbcon
, dbcon_value
);
2405 watchpoint
->set
= 2;
2406 xscale
->dbr1_used
= 1;
2410 ERROR("BUG: no hardware comparator available");
2417 int xscale_add_watchpoint(struct target_s
*target
, watchpoint_t
*watchpoint
)
2419 armv4_5_common_t
*armv4_5
= target
->arch_info
;
2420 xscale_common_t
*xscale
= armv4_5
->arch_info
;
2422 if (target
->state
!= TARGET_HALTED
)
2424 WARNING("target not halted");
2425 return ERROR_TARGET_NOT_HALTED
;
2428 if (xscale
->dbr_available
< 1)
2430 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2433 if ((watchpoint
->length
!= 1) && (watchpoint
->length
!= 2) && (watchpoint
->length
!= 4))
2435 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2438 xscale
->dbr_available
--;
2443 int xscale_unset_watchpoint(struct target_s
*target
, watchpoint_t
*watchpoint
)
2445 armv4_5_common_t
*armv4_5
= target
->arch_info
;
2446 xscale_common_t
*xscale
= armv4_5
->arch_info
;
2447 reg_t
*dbcon
= &xscale
->reg_cache
->reg_list
[XSCALE_DBCON
];
2448 u32 dbcon_value
= buf_get_u32(dbcon
->value
, 0, 32);
2450 if (target
->state
!= TARGET_HALTED
)
2452 WARNING("target not halted");
2453 return ERROR_TARGET_NOT_HALTED
;
2456 if (!watchpoint
->set
)
2458 WARNING("breakpoint not set");
2462 if (watchpoint
->set
== 1)
2464 dbcon_value
&= ~0x3;
2465 xscale_set_reg_u32(dbcon
, dbcon_value
);
2466 xscale
->dbr0_used
= 0;
2468 else if (watchpoint
->set
== 2)
2470 dbcon_value
&= ~0xc;
2471 xscale_set_reg_u32(dbcon
, dbcon_value
);
2472 xscale
->dbr1_used
= 0;
2474 watchpoint
->set
= 0;
2479 int xscale_remove_watchpoint(struct target_s
*target
, watchpoint_t
*watchpoint
)
2481 armv4_5_common_t
*armv4_5
= target
->arch_info
;
2482 xscale_common_t
*xscale
= armv4_5
->arch_info
;
2484 if (target
->state
!= TARGET_HALTED
)
2486 WARNING("target not halted");
2487 return ERROR_TARGET_NOT_HALTED
;
2490 if (watchpoint
->set
)
2492 xscale_unset_watchpoint(target
, watchpoint
);
2495 xscale
->dbr_available
++;
2500 void xscale_enable_watchpoints(struct target_s
*target
)
2502 watchpoint_t
*watchpoint
= target
->watchpoints
;
2506 if (watchpoint
->set
== 0)
2507 xscale_set_watchpoint(target
, watchpoint
);
2508 watchpoint
= watchpoint
->next
;
2512 void xscale_enable_breakpoints(struct target_s
*target
)
2514 breakpoint_t
*breakpoint
= target
->breakpoints
;
2516 /* set any pending breakpoints */
2519 if (breakpoint
->set
== 0)
2520 xscale_set_breakpoint(target
, breakpoint
);
2521 breakpoint
= breakpoint
->next
;
2525 int xscale_get_reg(reg_t
*reg
)
2527 xscale_reg_t
*arch_info
= reg
->arch_info
;
2528 target_t
*target
= arch_info
->target
;
2529 armv4_5_common_t
*armv4_5
= target
->arch_info
;
2530 xscale_common_t
*xscale
= armv4_5
->arch_info
;
2532 /* DCSR, TX and RX are accessible via JTAG */
2533 if (strcmp(reg
->name
, "XSCALE_DCSR") == 0)
2535 return xscale_read_dcsr(arch_info
->target
);
2537 else if (strcmp(reg
->name
, "XSCALE_TX") == 0)
2539 /* 1 = consume register content */
2540 return xscale_read_tx(arch_info
->target
, 1);
2542 else if (strcmp(reg
->name
, "XSCALE_RX") == 0)
2544 /* can't read from RX register (host -> debug handler) */
2547 else if (strcmp(reg
->name
, "XSCALE_TXRXCTRL") == 0)
2549 /* can't (explicitly) read from TXRXCTRL register */
2552 else /* Other DBG registers have to be transfered by the debug handler */
2554 /* send CP read request (command 0x40) */
2555 xscale_send_u32(target
, 0x40);
2557 /* send CP register number */
2558 xscale_send_u32(target
, arch_info
->dbg_handler_number
);
2560 /* read register value */
2561 xscale_read_tx(target
, 1);
2562 buf_cpy(xscale
->reg_cache
->reg_list
[XSCALE_TX
].value
, reg
->value
, 32);
2571 int xscale_set_reg(reg_t
*reg
, u8
* buf
)
2573 xscale_reg_t
*arch_info
= reg
->arch_info
;
2574 target_t
*target
= arch_info
->target
;
2575 armv4_5_common_t
*armv4_5
= target
->arch_info
;
2576 xscale_common_t
*xscale
= armv4_5
->arch_info
;
2577 u32 value
= buf_get_u32(buf
, 0, 32);
2579 /* DCSR, TX and RX are accessible via JTAG */
2580 if (strcmp(reg
->name
, "XSCALE_DCSR") == 0)
2582 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 0, 32, value
);
2583 return xscale_write_dcsr(arch_info
->target
, -1, -1);
2585 else if (strcmp(reg
->name
, "XSCALE_RX") == 0)
2587 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_RX
].value
, 0, 32, value
);
2588 return xscale_write_rx(arch_info
->target
);
2590 else if (strcmp(reg
->name
, "XSCALE_TX") == 0)
2592 /* can't write to TX register (debug-handler -> host) */
2595 else if (strcmp(reg
->name
, "XSCALE_TXRXCTRL") == 0)
2597 /* can't (explicitly) write to TXRXCTRL register */
2600 else /* Other DBG registers have to be transfered by the debug handler */
2602 /* send CP write request (command 0x41) */
2603 xscale_send_u32(target
, 0x41);
2605 /* send CP register number */
2606 xscale_send_u32(target
, arch_info
->dbg_handler_number
);
2608 /* send CP register value */
2609 xscale_send_u32(target
, value
);
2610 buf_set_u32(reg
->value
, 0, 32, value
);
2616 /* convenience wrapper to access XScale specific registers */
2617 int xscale_set_reg_u32(reg_t
*reg
, u32 value
)
2621 buf_set_u32(buf
, 0, 32, value
);
2623 return xscale_set_reg(reg
, buf
);
2626 int xscale_write_dcsr_sw(target_t
*target
, u32 value
)
2628 /* get pointers to arch-specific information */
2629 armv4_5_common_t
*armv4_5
= target
->arch_info
;
2630 xscale_common_t
*xscale
= armv4_5
->arch_info
;
2631 reg_t
*dcsr
= &xscale
->reg_cache
->reg_list
[XSCALE_DCSR
];
2632 xscale_reg_t
*dcsr_arch_info
= dcsr
->arch_info
;
2634 /* send CP write request (command 0x41) */
2635 xscale_send_u32(target
, 0x41);
2637 /* send CP register number */
2638 xscale_send_u32(target
, dcsr_arch_info
->dbg_handler_number
);
2640 /* send CP register value */
2641 xscale_send_u32(target
, value
);
2642 buf_set_u32(dcsr
->value
, 0, 32, value
);
2647 int xscale_read_trace(target_t
*target
)
2649 /* get pointers to arch-specific information */
2650 armv4_5_common_t
*armv4_5
= target
->arch_info
;
2651 xscale_common_t
*xscale
= armv4_5
->arch_info
;
2652 xscale_trace_data_t
**trace_data_p
;
2654 /* 258 words from debug handler
2655 * 256 trace buffer entries
2656 * 2 checkpoint addresses
2658 u32 trace_buffer
[258];
2659 int is_address
[256];
2662 if (target
->state
!= TARGET_HALTED
)
2664 WARNING("target must be stopped to read trace data");
2665 return ERROR_TARGET_NOT_HALTED
;
2668 /* send read trace buffer command (command 0x61) */
2669 xscale_send_u32(target
, 0x61);
2671 /* receive trace buffer content */
2672 xscale_receive(target
, trace_buffer
, 258);
2674 /* parse buffer backwards to identify address entries */
2675 for (i
= 255; i
>= 0; i
--)
2678 if (((trace_buffer
[i
] & 0xf0) == 0x90) ||
2679 ((trace_buffer
[i
] & 0xf0) == 0xd0))
2682 is_address
[--i
] = 1;
2684 is_address
[--i
] = 1;
2686 is_address
[--i
] = 1;
2688 is_address
[--i
] = 1;
2693 /* search first non-zero entry */
2694 for (j
= 0; (j
< 256) && (trace_buffer
[j
] == 0) && (!is_address
[j
]); j
++)
2699 DEBUG("no trace data collected");
2700 return ERROR_XSCALE_NO_TRACE_DATA
;
2703 for (trace_data_p
= &xscale
->trace
.data
; *trace_data_p
; trace_data_p
= &(*trace_data_p
)->next
)
2706 *trace_data_p
= malloc(sizeof(xscale_trace_data_t
));
2707 (*trace_data_p
)->next
= NULL
;
2708 (*trace_data_p
)->chkpt0
= trace_buffer
[256];
2709 (*trace_data_p
)->chkpt1
= trace_buffer
[257];
2710 (*trace_data_p
)->last_instruction
= buf_get_u32(armv4_5
->core_cache
->reg_list
[15].value
, 0, 32);
2711 (*trace_data_p
)->entries
= malloc(sizeof(xscale_trace_entry_t
) * (256 - j
));
2712 (*trace_data_p
)->depth
= 256 - j
;
2714 for (i
= j
; i
< 256; i
++)
2716 (*trace_data_p
)->entries
[i
- j
].data
= trace_buffer
[i
];
2718 (*trace_data_p
)->entries
[i
- j
].type
= XSCALE_TRACE_ADDRESS
;
2720 (*trace_data_p
)->entries
[i
- j
].type
= XSCALE_TRACE_MESSAGE
;
2726 int xscale_read_instruction(target_t
*target
, arm_instruction_t
*instruction
)
2728 /* get pointers to arch-specific information */
2729 armv4_5_common_t
*armv4_5
= target
->arch_info
;
2730 xscale_common_t
*xscale
= armv4_5
->arch_info
;
2737 if (!xscale
->trace
.image
)
2738 return ERROR_TRACE_IMAGE_UNAVAILABLE
;
2740 /* search for the section the current instruction belongs to */
2741 for (i
= 0; i
< xscale
->trace
.image
->num_sections
; i
++)
2743 if ((xscale
->trace
.image
->sections
[i
].base_address
<= xscale
->trace
.current_pc
) &&
2744 (xscale
->trace
.image
->sections
[i
].base_address
+ xscale
->trace
.image
->sections
[i
].size
> xscale
->trace
.current_pc
))
2753 /* current instruction couldn't be found in the image */
2754 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE
;
2757 if (xscale
->trace
.core_state
== ARMV4_5_STATE_ARM
)
2760 if ((retval
= image_read_section(xscale
->trace
.image
, section
,
2761 xscale
->trace
.current_pc
- xscale
->trace
.image
->sections
[section
].base_address
,
2762 4, buf
, &size_read
)) != ERROR_OK
)
2764 ERROR("error while reading instruction: %i", retval
);
2765 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE
;
2767 opcode
= target_buffer_get_u32(target
, buf
);
2768 arm_evaluate_opcode(opcode
, xscale
->trace
.current_pc
, instruction
);
2770 else if (xscale
->trace
.core_state
== ARMV4_5_STATE_THUMB
)
2773 if ((retval
= image_read_section(xscale
->trace
.image
, section
,
2774 xscale
->trace
.current_pc
- xscale
->trace
.image
->sections
[section
].base_address
,
2775 2, buf
, &size_read
)) != ERROR_OK
)
2777 ERROR("error while reading instruction: %i", retval
);
2778 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE
;
2780 opcode
= target_buffer_get_u16(target
, buf
);
2781 thumb_evaluate_opcode(opcode
, xscale
->trace
.current_pc
, instruction
);
2785 ERROR("BUG: unknown core state encountered");
2792 int xscale_branch_address(xscale_trace_data_t
*trace_data
, int i
, u32
*target
)
2794 /* if there are less than four entries prior to the indirect branch message
2795 * we can't extract the address */
2801 *target
= (trace_data
->entries
[i
-1].data
) | (trace_data
->entries
[i
-2].data
<< 8) |
2802 (trace_data
->entries
[i
-3].data
<< 16) | (trace_data
->entries
[i
-4].data
<< 24);
2807 int xscale_analyze_trace(target_t
*target
, command_context_t
*cmd_ctx
)
2809 /* get pointers to arch-specific information */
2810 armv4_5_common_t
*armv4_5
= target
->arch_info
;
2811 xscale_common_t
*xscale
= armv4_5
->arch_info
;
2814 xscale_trace_data_t
*trace_data
= xscale
->trace
.data
;
2823 xscale
->trace
.core_state
= ARMV4_5_STATE_ARM
;
2828 for (i
= 0; i
< trace_data
->depth
; i
++)
2834 if (trace_data
->entries
[i
].type
== XSCALE_TRACE_ADDRESS
)
2837 switch ((trace_data
->entries
[i
].data
& 0xf0) >> 4)
2839 case 0: /* Exceptions */
2847 exception
= (trace_data
->entries
[i
].data
& 0x70) >> 4;
2849 next_pc
= (trace_data
->entries
[i
].data
& 0xf0) >> 2;
2850 command_print(cmd_ctx
, "--- exception %i ---", (trace_data
->entries
[i
].data
& 0xf0) >> 4);
2852 case 8: /* Direct Branch */
2855 case 9: /* Indirect Branch */
2857 if (xscale_branch_address(trace_data
, i
, &next_pc
) == 0)
2862 case 13: /* Checkpointed Indirect Branch */
2863 if (xscale_branch_address(trace_data
, i
, &next_pc
) == 0)
2866 if (((chkpt
== 0) && (next_pc
!= trace_data
->chkpt0
))
2867 || ((chkpt
== 1) && (next_pc
!= trace_data
->chkpt1
)))
2868 WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2870 /* explicit fall-through */
2871 case 12: /* Checkpointed Direct Branch */
2876 next_pc
= trace_data
->chkpt0
;
2879 else if (chkpt
== 1)
2882 next_pc
= trace_data
->chkpt0
;
2887 WARNING("more than two checkpointed branches encountered");
2890 case 15: /* Roll-over */
2893 default: /* Reserved */
2894 command_print(cmd_ctx
, "--- reserved trace message ---");
2895 ERROR("BUG: trace message %i is reserved", (trace_data
->entries
[i
].data
& 0xf0) >> 4);
2899 if (xscale
->trace
.pc_ok
)
2901 int executed
= (trace_data
->entries
[i
].data
& 0xf) + rollover
* 16;
2902 arm_instruction_t instruction
;
2904 if ((exception
== 6) || (exception
== 7))
2906 /* IRQ or FIQ exception, no instruction executed */
2910 while (executed
-- >= 0)
2912 if ((retval
= xscale_read_instruction(target
, &instruction
)) != ERROR_OK
)
2914 /* can't continue tracing with no image available */
2915 if (retval
== ERROR_TRACE_IMAGE_UNAVAILABLE
)
2919 else if (retval
== ERROR_TRACE_INSTRUCTION_UNAVAILABLE
)
2921 /* TODO: handle incomplete images */
2925 /* a precise abort on a load to the PC is included in the incremental
2926 * word count, other instructions causing data aborts are not included
2928 if ((executed
== 0) && (exception
== 4)
2929 && ((instruction
.type
>= ARM_LDR
) && (instruction
.type
<= ARM_LDM
)))
2931 if ((instruction
.type
== ARM_LDM
)
2932 && ((instruction
.info
.load_store_multiple
.register_list
& 0x8000) == 0))
2936 else if (((instruction
.type
>= ARM_LDR
) && (instruction
.type
<= ARM_LDRSH
))
2937 && (instruction
.info
.load_store
.Rd
!= 15))
2943 /* only the last instruction executed
2944 * (the one that caused the control flow change)
2945 * could be a taken branch
2947 if (((executed
== -1) && (branch
== 1)) &&
2948 (((instruction
.type
== ARM_B
) ||
2949 (instruction
.type
== ARM_BL
) ||
2950 (instruction
.type
== ARM_BLX
)) &&
2951 (instruction
.info
.b_bl_bx_blx
.target_address
!= -1)))
2953 xscale
->trace
.current_pc
= instruction
.info
.b_bl_bx_blx
.target_address
;
2957 xscale
->trace
.current_pc
+= (xscale
->trace
.core_state
== ARMV4_5_STATE_ARM
) ? 4 : 2;
2959 command_print(cmd_ctx
, "%s", instruction
.text
);
2967 xscale
->trace
.current_pc
= next_pc
;
2968 xscale
->trace
.pc_ok
= 1;
2972 for (; xscale
->trace
.current_pc
< trace_data
->last_instruction
; xscale
->trace
.current_pc
+= (xscale
->trace
.core_state
== ARMV4_5_STATE_ARM
) ? 4 : 2)
2974 arm_instruction_t instruction
;
2975 if ((retval
= xscale_read_instruction(target
, &instruction
)) != ERROR_OK
)
2977 /* can't continue tracing with no image available */
2978 if (retval
== ERROR_TRACE_IMAGE_UNAVAILABLE
)
2982 else if (retval
== ERROR_TRACE_INSTRUCTION_UNAVAILABLE
)
2984 /* TODO: handle incomplete images */
2987 command_print(cmd_ctx
, "%s", instruction
.text
);
2990 trace_data
= trace_data
->next
;
2996 void xscale_build_reg_cache(target_t
*target
)
2998 /* get pointers to arch-specific information */
2999 armv4_5_common_t
*armv4_5
= target
->arch_info
;
3000 xscale_common_t
*xscale
= armv4_5
->arch_info
;
3002 reg_cache_t
**cache_p
= register_get_last_cache_p(&target
->reg_cache
);
3003 xscale_reg_t
*arch_info
= malloc(sizeof(xscale_reg_arch_info
));
3005 int num_regs
= sizeof(xscale_reg_arch_info
) / sizeof(xscale_reg_t
);
3007 (*cache_p
) = armv4_5_build_reg_cache(target
, armv4_5
);
3008 armv4_5
->core_cache
= (*cache_p
);
3010 /* register a register arch-type for XScale dbg registers only once */
3011 if (xscale_reg_arch_type
== -1)
3012 xscale_reg_arch_type
= register_reg_arch_type(xscale_get_reg
, xscale_set_reg
);
3014 (*cache_p
)->next
= malloc(sizeof(reg_cache_t
));
3015 cache_p
= &(*cache_p
)->next
;
3017 /* fill in values for the xscale reg cache */
3018 (*cache_p
)->name
= "XScale registers";
3019 (*cache_p
)->next
= NULL
;
3020 (*cache_p
)->reg_list
= malloc(num_regs
* sizeof(reg_t
));
3021 (*cache_p
)->num_regs
= num_regs
;
3023 for (i
= 0; i
< num_regs
; i
++)
3025 (*cache_p
)->reg_list
[i
].name
= xscale_reg_list
[i
];
3026 (*cache_p
)->reg_list
[i
].value
= calloc(4, 1);
3027 (*cache_p
)->reg_list
[i
].dirty
= 0;
3028 (*cache_p
)->reg_list
[i
].valid
= 0;
3029 (*cache_p
)->reg_list
[i
].size
= 32;
3030 (*cache_p
)->reg_list
[i
].bitfield_desc
= NULL
;
3031 (*cache_p
)->reg_list
[i
].num_bitfields
= 0;
3032 (*cache_p
)->reg_list
[i
].arch_info
= &arch_info
[i
];
3033 (*cache_p
)->reg_list
[i
].arch_type
= xscale_reg_arch_type
;
3034 arch_info
[i
] = xscale_reg_arch_info
[i
];
3035 arch_info
[i
].target
= target
;
3038 xscale
->reg_cache
= (*cache_p
);
3041 int xscale_init_target(struct command_context_s
*cmd_ctx
, struct target_s
*target
)
3043 if (startup_mode
!= DAEMON_RESET
)
3045 ERROR("XScale target requires a reset");
3046 ERROR("Reset target to enable debug");
3049 /* assert TRST once during startup */
3050 jtag_add_reset(1, 0);
3051 jtag_add_sleep(5000);
3052 jtag_add_reset(0, 0);
3053 jtag_execute_queue();
3064 int xscale_init_arch_info(target_t
*target
, xscale_common_t
*xscale
, int chain_pos
, char *variant
)
3066 armv4_5_common_t
*armv4_5
;
3067 u32 high_reset_branch
, low_reset_branch
;
3070 armv4_5
= &xscale
->armv4_5_common
;
3072 /* store architecture specfic data (none so far) */
3073 xscale
->arch_info
= NULL
;
3074 xscale
->common_magic
= XSCALE_COMMON_MAGIC
;
3076 /* remember the variant (PXA25x, PXA27x, IXP42x, ...) */
3077 xscale
->variant
= strdup(variant
);
3079 /* prepare JTAG information for the new target */
3080 xscale
->jtag_info
.chain_pos
= chain_pos
;
3081 jtag_register_event_callback(xscale_jtag_callback
, target
);
3083 xscale
->jtag_info
.dbgrx
= 0x02;
3084 xscale
->jtag_info
.dbgtx
= 0x10;
3085 xscale
->jtag_info
.dcsr
= 0x09;
3086 xscale
->jtag_info
.ldic
= 0x07;
3088 if ((strcmp(xscale
->variant
, "pxa250") == 0) ||
3089 (strcmp(xscale
->variant
, "pxa255") == 0) ||
3090 (strcmp(xscale
->variant
, "pxa26x") == 0))
3092 xscale
->jtag_info
.ir_length
= 5;
3094 else if ((strcmp(xscale
->variant
, "pxa27x") == 0) ||
3095 (strcmp(xscale
->variant
, "ixp42x") == 0) ||
3096 (strcmp(xscale
->variant
, "ixp45x") == 0) ||
3097 (strcmp(xscale
->variant
, "ixp46x") == 0))
3099 xscale
->jtag_info
.ir_length
= 7;
3102 /* the debug handler isn't installed (and thus not running) at this time */
3103 xscale
->handler_installed
= 0;
3104 xscale
->handler_running
= 0;
3105 xscale
->handler_address
= 0xfe000800;
3107 /* clear the vectors we keep locally for reference */
3108 memset(xscale
->low_vectors
, 0, sizeof(xscale
->low_vectors
));
3109 memset(xscale
->high_vectors
, 0, sizeof(xscale
->high_vectors
));
3111 /* no user-specified vectors have been configured yet */
3112 xscale
->static_low_vectors_set
= 0x0;
3113 xscale
->static_high_vectors_set
= 0x0;
3115 /* calculate branches to debug handler */
3116 low_reset_branch
= (xscale
->handler_address
+ 0x20 - 0x0 - 0x8) >> 2;
3117 high_reset_branch
= (xscale
->handler_address
+ 0x20 - 0xffff0000 - 0x8) >> 2;
3119 xscale
->low_vectors
[0] = ARMV4_5_B((low_reset_branch
& 0xffffff), 0);
3120 xscale
->high_vectors
[0] = ARMV4_5_B((high_reset_branch
& 0xffffff), 0);
3122 for (i
= 1; i
<= 7; i
++)
3124 xscale
->low_vectors
[i
] = ARMV4_5_B(0xfffffe, 0);
3125 xscale
->high_vectors
[i
] = ARMV4_5_B(0xfffffe, 0);
3128 /* 64kB aligned region used for DCache cleaning */
3129 xscale
->cache_clean_address
= 0xfffe0000;
3131 xscale
->hold_rst
= 0;
3132 xscale
->external_debug_break
= 0;
3134 xscale
->force_hw_bkpts
= 1;
3136 xscale
->ibcr_available
= 2;
3137 xscale
->ibcr0_used
= 0;
3138 xscale
->ibcr1_used
= 0;
3140 xscale
->dbr_available
= 2;
3141 xscale
->dbr0_used
= 0;
3142 xscale
->dbr1_used
= 0;
3144 xscale
->arm_bkpt
= ARMV5_BKPT(0x0);
3145 xscale
->thumb_bkpt
= ARMV5_T_BKPT(0x0) & 0xffff;
3147 xscale
->vector_catch
= 0x1;
3149 xscale
->trace
.capture_status
= TRACE_IDLE
;
3150 xscale
->trace
.data
= NULL
;
3151 xscale
->trace
.image
= NULL
;
3152 xscale
->trace
.buffer_enabled
= 0;
3153 xscale
->trace
.buffer_fill
= 0;
3155 /* prepare ARMv4/5 specific information */
3156 armv4_5
->arch_info
= xscale
;
3157 armv4_5
->read_core_reg
= xscale_read_core_reg
;
3158 armv4_5
->write_core_reg
= xscale_write_core_reg
;
3159 armv4_5
->full_context
= xscale_full_context
;
3161 armv4_5_init_arch_info(target
, armv4_5
);
3163 xscale
->armv4_5_mmu
.armv4_5_cache
.ctype
= -1;
3164 xscale
->armv4_5_mmu
.get_ttb
= xscale_get_ttb
;
3165 xscale
->armv4_5_mmu
.read_memory
= xscale_read_memory
;
3166 xscale
->armv4_5_mmu
.write_memory
= xscale_write_memory
;
3167 xscale
->armv4_5_mmu
.disable_mmu_caches
= xscale_disable_mmu_caches
;
3168 xscale
->armv4_5_mmu
.enable_mmu_caches
= xscale_enable_mmu_caches
;
3169 xscale
->armv4_5_mmu
.has_tiny_pages
= 1;
3170 xscale
->armv4_5_mmu
.mmu_enabled
= 0;
3172 xscale
->fast_memory_access
= 0;
3177 /* target xscale <endianess> <startup_mode> <chain_pos> <variant> */
3178 int xscale_target_command(struct command_context_s
*cmd_ctx
, char *cmd
, char **args
, int argc
, struct target_s
*target
)
3181 char *variant
= NULL
;
3182 xscale_common_t
*xscale
= malloc(sizeof(xscale_common_t
));
3183 memset(xscale
, 0, sizeof(*xscale
));
3187 ERROR("'target xscale' requires four arguments: <endianess> <startup_mode> <chain_pos> <variant>");
3191 chain_pos
= strtoul(args
[3], NULL
, 0);
3195 xscale_init_arch_info(target
, xscale
, chain_pos
, variant
);
3196 xscale_build_reg_cache(target
);
3201 int xscale_handle_debug_handler_command(struct command_context_s
*cmd_ctx
, char *cmd
, char **args
, int argc
)
3203 target_t
*target
= NULL
;
3204 armv4_5_common_t
*armv4_5
;
3205 xscale_common_t
*xscale
;
3207 u32 handler_address
;
3211 ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3215 if ((target
= get_target_by_num(strtoul(args
[0], NULL
, 0))) == NULL
)
3217 ERROR("no target '%s' configured", args
[0]);
3221 if (xscale_get_arch_pointers(target
, &armv4_5
, &xscale
) != ERROR_OK
)
3226 handler_address
= strtoul(args
[1], NULL
, 0);
3228 if (((handler_address
>= 0x800) && (handler_address
<= 0x1fef800)) ||
3229 ((handler_address
>= 0xfe000800) && (handler_address
<= 0xfffff800)))
3231 xscale
->handler_address
= handler_address
;
3235 ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3241 int xscale_handle_cache_clean_address_command(struct command_context_s
*cmd_ctx
, char *cmd
, char **args
, int argc
)
3243 target_t
*target
= NULL
;
3244 armv4_5_common_t
*armv4_5
;
3245 xscale_common_t
*xscale
;
3247 u32 cache_clean_address
;
3251 ERROR("'xscale cache_clean_address <target#> <address>' command takes two required operands");
3255 if ((target
= get_target_by_num(strtoul(args
[0], NULL
, 0))) == NULL
)
3257 ERROR("no target '%s' configured", args
[0]);
3261 if (xscale_get_arch_pointers(target
, &armv4_5
, &xscale
) != ERROR_OK
)
3266 cache_clean_address
= strtoul(args
[1], NULL
, 0);
3268 if (cache_clean_address
& 0xffff)
3270 ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3274 xscale
->cache_clean_address
= cache_clean_address
;
3280 int xscale_handle_cache_info_command(struct command_context_s
*cmd_ctx
, char *cmd
, char **args
, int argc
)
3282 target_t
*target
= get_current_target(cmd_ctx
);
3283 armv4_5_common_t
*armv4_5
;
3284 xscale_common_t
*xscale
;
3286 if (xscale_get_arch_pointers(target
, &armv4_5
, &xscale
) != ERROR_OK
)
3291 return armv4_5_handle_cache_info_command(cmd_ctx
, &xscale
->armv4_5_mmu
.armv4_5_cache
);
3294 static int xscale_virt2phys(struct target_s
*target
, u32
virtual, u32
*physical
)
3296 armv4_5_common_t
*armv4_5
;
3297 xscale_common_t
*xscale
;
3305 if ((retval
= xscale_get_arch_pointers(target
, &armv4_5
, &xscale
)) != ERROR_OK
)
3309 u32 ret
= armv4_5_mmu_translate_va(target
, &xscale
->armv4_5_mmu
, virtual, &type
, &cb
, &domain
, &ap
);
3318 static int xscale_mmu(struct target_s
*target
, int *enabled
)
3320 armv4_5_common_t
*armv4_5
= target
->arch_info
;
3321 xscale_common_t
*xscale
= armv4_5
->arch_info
;
3323 if (target
->state
!= TARGET_HALTED
)
3325 ERROR("Target not halted");
3326 return ERROR_TARGET_INVALID
;
3328 *enabled
= xscale
->armv4_5_mmu
.mmu_enabled
;
3333 int xscale_handle_mmu_command(command_context_t
*cmd_ctx
, char *cmd
, char **args
, int argc
)
3335 target_t
*target
= get_current_target(cmd_ctx
);
3336 armv4_5_common_t
*armv4_5
;
3337 xscale_common_t
*xscale
;
3339 if (xscale_get_arch_pointers(target
, &armv4_5
, &xscale
) != ERROR_OK
)
3344 if (target
->state
!= TARGET_HALTED
)
3346 command_print(cmd_ctx
, "target must be stopped for \"%s\" command", cmd
);
3352 if (strcmp("enable", args
[0]) == 0)
3354 xscale_enable_mmu_caches(target
, 1, 0, 0);
3355 xscale
->armv4_5_mmu
.mmu_enabled
= 1;
3357 else if (strcmp("disable", args
[0]) == 0)
3359 xscale_disable_mmu_caches(target
, 1, 0, 0);
3360 xscale
->armv4_5_mmu
.mmu_enabled
= 0;
3364 command_print(cmd_ctx
, "mmu %s", (xscale
->armv4_5_mmu
.mmu_enabled
) ? "enabled" : "disabled");
3369 int xscale_handle_idcache_command(command_context_t
*cmd_ctx
, char *cmd
, char **args
, int argc
)
3371 target_t
*target
= get_current_target(cmd_ctx
);
3372 armv4_5_common_t
*armv4_5
;
3373 xscale_common_t
*xscale
;
3374 int icache
= 0, dcache
= 0;
3376 if (xscale_get_arch_pointers(target
, &armv4_5
, &xscale
) != ERROR_OK
)
3381 if (target
->state
!= TARGET_HALTED
)
3383 command_print(cmd_ctx
, "target must be stopped for \"%s\" command", cmd
);
3387 if (strcmp(cmd
, "icache") == 0)
3389 else if (strcmp(cmd
, "dcache") == 0)
3394 if (strcmp("enable", args
[0]) == 0)
3396 xscale_enable_mmu_caches(target
, 0, dcache
, icache
);
3399 xscale
->armv4_5_mmu
.armv4_5_cache
.i_cache_enabled
= 1;
3401 xscale
->armv4_5_mmu
.armv4_5_cache
.d_u_cache_enabled
= 1;
3403 else if (strcmp("disable", args
[0]) == 0)
3405 xscale_disable_mmu_caches(target
, 0, dcache
, icache
);
3408 xscale
->armv4_5_mmu
.armv4_5_cache
.i_cache_enabled
= 0;
3410 xscale
->armv4_5_mmu
.armv4_5_cache
.d_u_cache_enabled
= 0;
3415 command_print(cmd_ctx
, "icache %s", (xscale
->armv4_5_mmu
.armv4_5_cache
.i_cache_enabled
) ? "enabled" : "disabled");
3418 command_print(cmd_ctx
, "dcache %s", (xscale
->armv4_5_mmu
.armv4_5_cache
.d_u_cache_enabled
) ? "enabled" : "disabled");
3423 int xscale_handle_vector_catch_command(command_context_t
*cmd_ctx
, char *cmd
, char **args
, int argc
)
3425 target_t
*target
= get_current_target(cmd_ctx
);
3426 armv4_5_common_t
*armv4_5
;
3427 xscale_common_t
*xscale
;
3429 if (xscale_get_arch_pointers(target
, &armv4_5
, &xscale
) != ERROR_OK
)
3436 command_print(cmd_ctx
, "usage: xscale vector_catch [mask]");
3440 xscale
->vector_catch
= strtoul(args
[0], NULL
, 0);
3441 buf_set_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 16, 8, xscale
->vector_catch
);
3442 xscale_write_dcsr(target
, -1, -1);
3445 command_print(cmd_ctx
, "vector catch mask: 0x%2.2x", xscale
->vector_catch
);
3450 int xscale_handle_force_hw_bkpts_command(struct command_context_s
*cmd_ctx
, char *cmd
, char **args
, int argc
)
3452 target_t
*target
= get_current_target(cmd_ctx
);
3453 armv4_5_common_t
*armv4_5
;
3454 xscale_common_t
*xscale
;
3456 if (xscale_get_arch_pointers(target
, &armv4_5
, &xscale
) != ERROR_OK
)
3461 if ((argc
>= 1) && (strcmp("enable", args
[0]) == 0))
3463 xscale
->force_hw_bkpts
= 1;
3465 else if ((argc
>= 1) && (strcmp("disable", args
[0]) == 0))
3467 xscale
->force_hw_bkpts
= 0;
3471 command_print(cmd_ctx
, "usage: xscale force_hw_bkpts <enable|disable>");
3474 command_print(cmd_ctx
, "force hardware breakpoints %s", (xscale
->force_hw_bkpts
) ? "enabled" : "disabled");
3479 int xscale_handle_trace_buffer_command(struct command_context_s
*cmd_ctx
, char *cmd
, char **args
, int argc
)
3481 target_t
*target
= get_current_target(cmd_ctx
);
3482 armv4_5_common_t
*armv4_5
;
3483 xscale_common_t
*xscale
;
3486 if (xscale_get_arch_pointers(target
, &armv4_5
, &xscale
) != ERROR_OK
)
3491 if (target
->state
!= TARGET_HALTED
)
3493 command_print(cmd_ctx
, "target must be stopped for \"%s\" command", cmd
);
3497 if ((argc
>= 1) && (strcmp("enable", args
[0]) == 0))
3499 xscale_trace_data_t
*td
, *next_td
;
3500 xscale
->trace
.buffer_enabled
= 1;
3502 /* free old trace data */
3503 td
= xscale
->trace
.data
;
3513 xscale
->trace
.data
= NULL
;
3515 else if ((argc
>= 1) && (strcmp("disable", args
[0]) == 0))
3517 xscale
->trace
.buffer_enabled
= 0;
3520 if ((argc
>= 2) && (strcmp("fill", args
[1]) == 0))
3523 xscale
->trace
.buffer_fill
= strtoul(args
[2], NULL
, 0);
3525 xscale
->trace
.buffer_fill
= 1;
3527 else if ((argc
>= 2) && (strcmp("wrap", args
[1]) == 0))
3529 xscale
->trace
.buffer_fill
= -1;
3532 if (xscale
->trace
.buffer_enabled
)
3534 /* if we enable the trace buffer in fill-once
3535 * mode we know the address of the first instruction */
3536 xscale
->trace
.pc_ok
= 1;
3537 xscale
->trace
.current_pc
= buf_get_u32(armv4_5
->core_cache
->reg_list
[15].value
, 0, 32);
3541 /* otherwise the address is unknown, and we have no known good PC */
3542 xscale
->trace
.pc_ok
= 0;
3545 command_print(cmd_ctx
, "trace buffer %s (%s)",
3546 (xscale
->trace
.buffer_enabled
) ? "enabled" : "disabled",
3547 (xscale
->trace
.buffer_fill
> 0) ? "fill" : "wrap");
3549 dcsr_value
= buf_get_u32(xscale
->reg_cache
->reg_list
[XSCALE_DCSR
].value
, 0, 32);
3550 if (xscale
->trace
.buffer_fill
>= 0)
3551 xscale_write_dcsr_sw(target
, (dcsr_value
& 0xfffffffc) | 2);
3553 xscale_write_dcsr_sw(target
, dcsr_value
& 0xfffffffc);
3558 int xscale_handle_trace_image_command(struct command_context_s
*cmd_ctx
, char *cmd
, char **args
, int argc
)
3561 armv4_5_common_t
*armv4_5
;
3562 xscale_common_t
*xscale
;
3566 command_print(cmd_ctx
, "usage: xscale trace_image <file> [base address] [type]");
3570 target
= get_current_target(cmd_ctx
);
3572 if (xscale_get_arch_pointers(target
, &armv4_5
, &xscale
) != ERROR_OK
)
3577 if (xscale
->trace
.image
)
3579 image_close(xscale
->trace
.image
);
3580 free(xscale
->trace
.image
);
3581 command_print(cmd_ctx
, "previously loaded image found and closed");
3584 xscale
->trace
.image
= malloc(sizeof(image_t
));
3585 xscale
->trace
.image
->base_address_set
= 0;
3586 xscale
->trace
.image
->start_address_set
= 0;
3588 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3591 xscale
->trace
.image
->base_address_set
= 1;
3592 xscale
->trace
.image
->base_address
= strtoul(args
[1], NULL
, 0);
3596 xscale
->trace
.image
->base_address_set
= 0;
3599 if (image_open(xscale
->trace
.image
, args
[0], (argc
>= 3) ? args
[2] : NULL
) != ERROR_OK
)
3601 free(xscale
->trace
.image
);
3602 xscale
->trace
.image
= NULL
;
3609 int xscale_handle_dump_trace_command(struct command_context_s
*cmd_ctx
, char *cmd
, char **args
, int argc
)
3611 target_t
*target
= get_current_target(cmd_ctx
);
3612 armv4_5_common_t
*armv4_5
;
3613 xscale_common_t
*xscale
;
3614 xscale_trace_data_t
*trace_data
;
3617 if (xscale_get_arch_pointers(target
, &armv4_5
, &xscale
) != ERROR_OK
)
3622 if (target
->state
!= TARGET_HALTED
)
3624 command_print(cmd_ctx
, "target must be stopped for \"%s\" command", cmd
);
3630 command_print(cmd_ctx
, "usage: xscale dump_trace <file>");
3634 trace_data
= xscale
->trace
.data
;
3638 command_print(cmd_ctx
, "no trace data collected");
3642 if (fileio_open(&file
, args
[0], FILEIO_WRITE
, FILEIO_BINARY
) != ERROR_OK
)
3651 fileio_write_u32(&file
, trace_data
->chkpt0
);
3652 fileio_write_u32(&file
, trace_data
->chkpt1
);
3653 fileio_write_u32(&file
, trace_data
->last_instruction
);
3654 fileio_write_u32(&file
, trace_data
->depth
);
3656 for (i
= 0; i
< trace_data
->depth
; i
++)
3657 fileio_write_u32(&file
, trace_data
->entries
[i
].data
| ((trace_data
->entries
[i
].type
& 0xffff) << 16));
3659 trace_data
= trace_data
->next
;
3662 fileio_close(&file
);
3667 int xscale_handle_analyze_trace_buffer_command(struct command_context_s
*cmd_ctx
, char *cmd
, char **args
, int argc
)
3669 target_t
*target
= get_current_target(cmd_ctx
);
3670 armv4_5_common_t
*armv4_5
;
3671 xscale_common_t
*xscale
;
3673 if (xscale_get_arch_pointers(target
, &armv4_5
, &xscale
) != ERROR_OK
)
3678 xscale_analyze_trace(target
, cmd_ctx
);
3683 int xscale_handle_cp15(command_context_t
*cmd_ctx
, char *cmd
, char **args
, int argc
)
3685 target_t
*target
= get_current_target(cmd_ctx
);
3686 armv4_5_common_t
*armv4_5
;
3687 xscale_common_t
*xscale
;
3689 if (xscale_get_arch_pointers(target
, &armv4_5
, &xscale
) != ERROR_OK
)
3694 if (target
->state
!= TARGET_HALTED
)
3696 command_print(cmd_ctx
, "target must be stopped for \"%s\" command", cmd
);
3703 reg_no
= strtoul(args
[0], NULL
, 0);
3704 /*translate from xscale cp15 register no to openocd register*/
3708 reg_no
= XSCALE_MAINID
;
3711 reg_no
= XSCALE_CTRL
;
3714 reg_no
= XSCALE_TTB
;
3717 reg_no
= XSCALE_DAC
;
3720 reg_no
= XSCALE_FSR
;
3723 reg_no
= XSCALE_FAR
;
3726 reg_no
= XSCALE_PID
;
3729 reg_no
= XSCALE_CPACCESS
;
3732 command_print(cmd_ctx
, "invalid register number");
3733 return ERROR_INVALID_ARGUMENTS
;
3735 reg
= &xscale
->reg_cache
->reg_list
[reg_no
];
3742 /* read cp15 control register */
3743 xscale_get_reg(reg
);
3744 value
= buf_get_u32(reg
->value
, 0, 32);
3745 command_print(cmd_ctx
, "%s (/%i): 0x%x", reg
->name
, reg
->size
, value
);
3750 u32 value
= strtoul(args
[1], NULL
, 0);
3752 /* send CP write request (command 0x41) */
3753 xscale_send_u32(target
, 0x41);
3755 /* send CP register number */
3756 xscale_send_u32(target
, reg_no
);
3758 /* send CP register value */
3759 xscale_send_u32(target
, value
);
3761 /* execute cpwait to ensure outstanding operations complete */
3762 xscale_send_u32(target
, 0x53);
3766 command_print(cmd_ctx
, "usage: cp15 [register]<, [value]>");
3772 int handle_xscale_fast_memory_access_command(struct command_context_s
*cmd_ctx
, char *cmd
, char **args
, int argc
)
3774 target_t
*target
= get_current_target(cmd_ctx
);
3775 armv4_5_common_t
*armv4_5
;
3776 xscale_common_t
*xscale
;
3778 if (xscale_get_arch_pointers(target
, &armv4_5
, &xscale
) != ERROR_OK
)
3785 if (strcmp("enable", args
[0]) == 0)
3787 xscale
->fast_memory_access
= 1;
3789 else if (strcmp("disable", args
[0]) == 0)
3791 xscale
->fast_memory_access
= 0;
3795 return ERROR_COMMAND_SYNTAX_ERROR
;
3799 return ERROR_COMMAND_SYNTAX_ERROR
;
3802 command_print(cmd_ctx
, "fast memory access is %s", (xscale
->fast_memory_access
) ? "enabled" : "disabled");
3807 int xscale_register_commands(struct command_context_s
*cmd_ctx
)
3809 command_t
*xscale_cmd
;
3811 xscale_cmd
= register_command(cmd_ctx
, NULL
, "xscale", NULL
, COMMAND_ANY
, "xscale specific commands");
3813 register_command(cmd_ctx
, xscale_cmd
, "debug_handler", xscale_handle_debug_handler_command
, COMMAND_ANY
, "'xscale debug_handler <target#> <address>' command takes two required operands");
3814 register_command(cmd_ctx
, xscale_cmd
, "cache_clean_address", xscale_handle_cache_clean_address_command
, COMMAND_ANY
, NULL
);
3816 register_command(cmd_ctx
, xscale_cmd
, "cache_info", xscale_handle_cache_info_command
, COMMAND_EXEC
, NULL
);
3817 register_command(cmd_ctx
, xscale_cmd
, "mmu", xscale_handle_mmu_command
, COMMAND_EXEC
, "['enable'|'disable'] the MMU");
3818 register_command(cmd_ctx
, xscale_cmd
, "icache", xscale_handle_idcache_command
, COMMAND_EXEC
, "['enable'|'disable'] the ICache");
3819 register_command(cmd_ctx
, xscale_cmd
, "dcache", xscale_handle_idcache_command
, COMMAND_EXEC
, "['enable'|'disable'] the DCache");
3821 register_command(cmd_ctx
, xscale_cmd
, "vector_catch", xscale_handle_idcache_command
, COMMAND_EXEC
, "<mask> of vectors that should be catched");
3823 register_command(cmd_ctx
, xscale_cmd
, "trace_buffer", xscale_handle_trace_buffer_command
, COMMAND_EXEC
, "<enable|disable> ['fill' [n]|'wrap']");
3825 register_command(cmd_ctx
, xscale_cmd
, "dump_trace", xscale_handle_dump_trace_command
, COMMAND_EXEC
, "dump content of trace buffer to <file>");
3826 register_command(cmd_ctx
, xscale_cmd
, "analyze_trace", xscale_handle_analyze_trace_buffer_command
, COMMAND_EXEC
, "analyze content of trace buffer");
3827 register_command(cmd_ctx
, xscale_cmd
, "trace_image", xscale_handle_trace_image_command
,
3828 COMMAND_EXEC
, "load image from <file> [base address]");
3830 register_command(cmd_ctx
, xscale_cmd
, "cp15", xscale_handle_cp15
, COMMAND_EXEC
, "access coproc 15 <register> [value]");
3831 register_command(cmd_ctx
, xscale_cmd
, "fast_memory_access", handle_xscale_fast_memory_access_command
,
3832 COMMAND_ANY
, "use fast memory accesses instead of slower but potentially unsafe slow accesses <enable|disable>");
3835 armv4_5_register_commands(cmd_ctx
);
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)