6d7854ea35490f7d306e5781a212968fadbf742d
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
19 ***************************************************************************/
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "replacements.h"
25
26 #include "xscale.h"
27
28 #include "register.h"
29 #include "target.h"
30 #include "armv4_5.h"
31 #include "arm_simulator.h"
32 #include "arm_disassembler.h"
33 #include "log.h"
34 #include "jtag.h"
35 #include "binarybuffer.h"
36 #include "time_support.h"
37 #include "breakpoints.h"
38 #include "fileio.h"
39
40 #include <stdlib.h>
41 #include <string.h>
42
43 #include <sys/types.h>
44 #include <unistd.h>
45 #include <errno.h>
46
47
48 /* cli handling */
49 int xscale_register_commands(struct command_context_s *cmd_ctx);
50
51 /* forward declarations */
52 int xscale_target_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc, struct target_s *target);
53 int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target);
54 int xscale_quit();
55
56 int xscale_arch_state(struct target_s *target);
57 int xscale_poll(target_t *target);
58 int xscale_halt(target_t *target);
59 int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution);
60 int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints);
61 int xscale_debug_entry(target_t *target);
62 int xscale_restore_context(target_t *target);
63
64 int xscale_assert_reset(target_t *target);
65 int xscale_deassert_reset(target_t *target);
66 int xscale_soft_reset_halt(struct target_s *target);
67
68 int xscale_set_reg_u32(reg_t *reg, u32 value);
69
70 int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode);
71 int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value);
72
73 int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
74 int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
75 int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer);
76 int xscale_checksum_memory(struct target_s *target, u32 address, u32 count, u32* checksum);
77
78 int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
79 int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
80 int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
81 int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
82 int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
83 int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
84 void xscale_enable_watchpoints(struct target_s *target);
85 void xscale_enable_breakpoints(struct target_s *target);
86 static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical);
87 static int xscale_mmu(struct target_s *target, int *enabled);
88
89 int xscale_read_trace(target_t *target);
90
91 target_type_t xscale_target =
92 {
93 .name = "xscale",
94
95 .poll = xscale_poll,
96 .arch_state = xscale_arch_state,
97
98 .target_request_data = NULL,
99
100 .halt = xscale_halt,
101 .resume = xscale_resume,
102 .step = xscale_step,
103
104 .assert_reset = xscale_assert_reset,
105 .deassert_reset = xscale_deassert_reset,
106 .soft_reset_halt = xscale_soft_reset_halt,
107
108 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
109
110 .read_memory = xscale_read_memory,
111 .write_memory = xscale_write_memory,
112 .bulk_write_memory = xscale_bulk_write_memory,
113 .checksum_memory = xscale_checksum_memory,
114
115 .run_algorithm = armv4_5_run_algorithm,
116
117 .add_breakpoint = xscale_add_breakpoint,
118 .remove_breakpoint = xscale_remove_breakpoint,
119 .add_watchpoint = xscale_add_watchpoint,
120 .remove_watchpoint = xscale_remove_watchpoint,
121
122 .register_commands = xscale_register_commands,
123 .target_command = xscale_target_command,
124 .init_target = xscale_init_target,
125 .quit = xscale_quit,
126
127 .virt2phys = xscale_virt2phys,
128 .mmu = xscale_mmu
129 };
130
131 char* xscale_reg_list[] =
132 {
133 "XSCALE_MAINID", /* 0 */
134 "XSCALE_CACHETYPE",
135 "XSCALE_CTRL",
136 "XSCALE_AUXCTRL",
137 "XSCALE_TTB",
138 "XSCALE_DAC",
139 "XSCALE_FSR",
140 "XSCALE_FAR",
141 "XSCALE_PID",
142 "XSCALE_CPACCESS",
143 "XSCALE_IBCR0", /* 10 */
144 "XSCALE_IBCR1",
145 "XSCALE_DBR0",
146 "XSCALE_DBR1",
147 "XSCALE_DBCON",
148 "XSCALE_TBREG",
149 "XSCALE_CHKPT0",
150 "XSCALE_CHKPT1",
151 "XSCALE_DCSR",
152 "XSCALE_TX",
153 "XSCALE_RX", /* 20 */
154 "XSCALE_TXRXCTRL",
155 };
156
157 xscale_reg_t xscale_reg_arch_info[] =
158 {
159 {XSCALE_MAINID, NULL},
160 {XSCALE_CACHETYPE, NULL},
161 {XSCALE_CTRL, NULL},
162 {XSCALE_AUXCTRL, NULL},
163 {XSCALE_TTB, NULL},
164 {XSCALE_DAC, NULL},
165 {XSCALE_FSR, NULL},
166 {XSCALE_FAR, NULL},
167 {XSCALE_PID, NULL},
168 {XSCALE_CPACCESS, NULL},
169 {XSCALE_IBCR0, NULL},
170 {XSCALE_IBCR1, NULL},
171 {XSCALE_DBR0, NULL},
172 {XSCALE_DBR1, NULL},
173 {XSCALE_DBCON, NULL},
174 {XSCALE_TBREG, NULL},
175 {XSCALE_CHKPT0, NULL},
176 {XSCALE_CHKPT1, NULL},
177 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
178 {-1, NULL}, /* TX accessed via JTAG */
179 {-1, NULL}, /* RX accessed via JTAG */
180 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
181 };
182
183 int xscale_reg_arch_type = -1;
184
185 int xscale_get_reg(reg_t *reg);
186 int xscale_set_reg(reg_t *reg, u8 *buf);
187
188 int xscale_get_arch_pointers(target_t *target, armv4_5_common_t **armv4_5_p, xscale_common_t **xscale_p)
189 {
190 armv4_5_common_t *armv4_5 = target->arch_info;
191 xscale_common_t *xscale = armv4_5->arch_info;
192
193 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
194 {
195 LOG_ERROR("target isn't an XScale target");
196 return -1;
197 }
198
199 if (xscale->common_magic != XSCALE_COMMON_MAGIC)
200 {
201 LOG_ERROR("target isn't an XScale target");
202 return -1;
203 }
204
205 *armv4_5_p = armv4_5;
206 *xscale_p = xscale;
207
208 return ERROR_OK;
209 }
210
211 int xscale_jtag_set_instr(int chain_pos, u32 new_instr)
212 {
213 jtag_device_t *device = jtag_get_device(chain_pos);
214
215 if (buf_get_u32(device->cur_instr, 0, device->ir_length) != new_instr)
216 {
217 scan_field_t field;
218
219 field.device = chain_pos;
220 field.num_bits = device->ir_length;
221 field.out_value = calloc(CEIL(field.num_bits, 8), 1);
222 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
223 field.out_mask = NULL;
224 field.in_value = NULL;
225 jtag_set_check_value(&field, device->expected, device->expected_mask, NULL);
226
227 jtag_add_ir_scan(1, &field, -1);
228
229 free(field.out_value);
230 }
231
232 return ERROR_OK;
233 }
234
235 int xscale_read_dcsr(target_t *target)
236 {
237 armv4_5_common_t *armv4_5 = target->arch_info;
238 xscale_common_t *xscale = armv4_5->arch_info;
239
240 int retval;
241
242 scan_field_t fields[3];
243 u8 field0 = 0x0;
244 u8 field0_check_value = 0x2;
245 u8 field0_check_mask = 0x7;
246 u8 field2 = 0x0;
247 u8 field2_check_value = 0x0;
248 u8 field2_check_mask = 0x1;
249
250 jtag_add_end_state(TAP_PD);
251 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
252
253 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
254 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
255
256 fields[0].device = xscale->jtag_info.chain_pos;
257 fields[0].num_bits = 3;
258 fields[0].out_value = &field0;
259 fields[0].out_mask = NULL;
260 fields[0].in_value = NULL;
261 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
262
263 fields[1].device = xscale->jtag_info.chain_pos;
264 fields[1].num_bits = 32;
265 fields[1].out_value = NULL;
266 fields[1].out_mask = NULL;
267 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
268 fields[1].in_handler = NULL;
269 fields[1].in_handler_priv = NULL;
270 fields[1].in_check_value = NULL;
271 fields[1].in_check_mask = NULL;
272
273 fields[2].device = xscale->jtag_info.chain_pos;
274 fields[2].num_bits = 1;
275 fields[2].out_value = &field2;
276 fields[2].out_mask = NULL;
277 fields[2].in_value = NULL;
278 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
279
280 jtag_add_dr_scan(3, fields, -1);
281
282 if ((retval = jtag_execute_queue()) != ERROR_OK)
283 {
284 LOG_ERROR("JTAG error while reading DCSR");
285 return retval;
286 }
287
288 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
289 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
290
291 /* write the register with the value we just read
292 * on this second pass, only the first bit of field0 is guaranteed to be 0)
293 */
294 field0_check_mask = 0x1;
295 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
296 fields[1].in_value = NULL;
297
298 jtag_add_end_state(TAP_RTI);
299
300 jtag_add_dr_scan(3, fields, -1);
301
302 /* DANGER!!! this must be here. It will make sure that the arguments
303 * to jtag_set_check_value() does not go out of scope! */
304 return jtag_execute_queue();
305 }
306
307 int xscale_receive(target_t *target, u32 *buffer, int num_words)
308 {
309 if (num_words==0)
310 return ERROR_INVALID_ARGUMENTS;
311
312 int retval=ERROR_OK;
313 armv4_5_common_t *armv4_5 = target->arch_info;
314 xscale_common_t *xscale = armv4_5->arch_info;
315
316 enum tap_state path[3];
317 scan_field_t fields[3];
318
319 u8 *field0 = malloc(num_words * 1);
320 u8 field0_check_value = 0x2;
321 u8 field0_check_mask = 0x6;
322 u32 *field1 = malloc(num_words * 4);
323 u8 field2_check_value = 0x0;
324 u8 field2_check_mask = 0x1;
325 int words_done = 0;
326 int words_scheduled = 0;
327
328 int i;
329
330 path[0] = TAP_SDS;
331 path[1] = TAP_CD;
332 path[2] = TAP_SD;
333
334 fields[0].device = xscale->jtag_info.chain_pos;
335 fields[0].num_bits = 3;
336 fields[0].out_value = NULL;
337 fields[0].out_mask = NULL;
338 fields[0].in_value = NULL;
339 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
340
341 fields[1].device = xscale->jtag_info.chain_pos;
342 fields[1].num_bits = 32;
343 fields[1].out_value = NULL;
344 fields[1].out_mask = NULL;
345 fields[1].in_value = NULL;
346 fields[1].in_handler = NULL;
347 fields[1].in_handler_priv = NULL;
348 fields[1].in_check_value = NULL;
349 fields[1].in_check_mask = NULL;
350
351
352
353 fields[2].device = xscale->jtag_info.chain_pos;
354 fields[2].num_bits = 1;
355 fields[2].out_value = NULL;
356 fields[2].out_mask = NULL;
357 fields[2].in_value = NULL;
358 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
359
360 jtag_add_end_state(TAP_RTI);
361 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgtx);
362 jtag_add_runtest(1, -1); /* ensures that we're in the TAP_RTI state as the above could be a no-op */
363
364 /* repeat until all words have been collected */
365 int attempts=0;
366 while (words_done < num_words)
367 {
368 /* schedule reads */
369 words_scheduled = 0;
370 for (i = words_done; i < num_words; i++)
371 {
372 fields[0].in_value = &field0[i];
373 fields[1].in_handler = buf_to_u32_handler;
374 fields[1].in_handler_priv = (u8*)&field1[i];
375
376 jtag_add_pathmove(3, path);
377 jtag_add_dr_scan(3, fields, TAP_RTI);
378 words_scheduled++;
379 }
380
381 if ((retval = jtag_execute_queue()) != ERROR_OK)
382 {
383 LOG_ERROR("JTAG error while receiving data from debug handler");
384 break;
385 }
386
387 /* examine results */
388 for (i = words_done; i < num_words; i++)
389 {
390 if (!(field0[0] & 1))
391 {
392 /* move backwards if necessary */
393 int j;
394 for (j = i; j < num_words - 1; j++)
395 {
396 field0[j] = field0[j+1];
397 field1[j] = field1[j+1];
398 }
399 words_scheduled--;
400 }
401 }
402 if (words_scheduled==0)
403 {
404 if (attempts++==1000)
405 {
406 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
407 retval=ERROR_TARGET_TIMEOUT;
408 break;
409 }
410 }
411
412 words_done += words_scheduled;
413 }
414
415 for (i = 0; i < num_words; i++)
416 *(buffer++) = buf_get_u32((u8*)&field1[i], 0, 32);
417
418 free(field1);
419
420 return retval;
421 }
422
423 int xscale_read_tx(target_t *target, int consume)
424 {
425 armv4_5_common_t *armv4_5 = target->arch_info;
426 xscale_common_t *xscale = armv4_5->arch_info;
427 enum tap_state path[3];
428 enum tap_state noconsume_path[6];
429
430 int retval;
431 struct timeval timeout, now;
432
433 scan_field_t fields[3];
434 u8 field0_in = 0x0;
435 u8 field0_check_value = 0x2;
436 u8 field0_check_mask = 0x6;
437 u8 field2_check_value = 0x0;
438 u8 field2_check_mask = 0x1;
439
440 jtag_add_end_state(TAP_RTI);
441
442 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgtx);
443
444 path[0] = TAP_SDS;
445 path[1] = TAP_CD;
446 path[2] = TAP_SD;
447
448 noconsume_path[0] = TAP_SDS;
449 noconsume_path[1] = TAP_CD;
450 noconsume_path[2] = TAP_E1D;
451 noconsume_path[3] = TAP_PD;
452 noconsume_path[4] = TAP_E2D;
453 noconsume_path[5] = TAP_SD;
454
455 fields[0].device = xscale->jtag_info.chain_pos;
456 fields[0].num_bits = 3;
457 fields[0].out_value = NULL;
458 fields[0].out_mask = NULL;
459 fields[0].in_value = &field0_in;
460 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
461
462 fields[1].device = xscale->jtag_info.chain_pos;
463 fields[1].num_bits = 32;
464 fields[1].out_value = NULL;
465 fields[1].out_mask = NULL;
466 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
467 fields[1].in_handler = NULL;
468 fields[1].in_handler_priv = NULL;
469 fields[1].in_check_value = NULL;
470 fields[1].in_check_mask = NULL;
471
472
473
474 fields[2].device = xscale->jtag_info.chain_pos;
475 fields[2].num_bits = 1;
476 fields[2].out_value = NULL;
477 fields[2].out_mask = NULL;
478 fields[2].in_value = NULL;
479 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
480
481 gettimeofday(&timeout, NULL);
482 timeval_add_time(&timeout, 1, 0);
483
484 for (;;)
485 {
486 int i;
487 for (i=0; i<100; i++)
488 {
489 /* if we want to consume the register content (i.e. clear TX_READY),
490 * we have to go straight from Capture-DR to Shift-DR
491 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
492 */
493 if (consume)
494 jtag_add_pathmove(3, path);
495 else
496 {
497 jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
498 }
499
500 jtag_add_dr_scan(3, fields, TAP_RTI);
501
502 if ((retval = jtag_execute_queue()) != ERROR_OK)
503 {
504 LOG_ERROR("JTAG error while reading TX");
505 return ERROR_TARGET_TIMEOUT;
506 }
507
508 gettimeofday(&now, NULL);
509 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
510 {
511 LOG_ERROR("time out reading TX register");
512 return ERROR_TARGET_TIMEOUT;
513 }
514 if (!((!(field0_in & 1)) && consume))
515 {
516 goto done;
517 }
518 }
519 LOG_DEBUG("waiting 10ms");
520 usleep(10*1000); /* avoid flooding the logs */
521 }
522 done:
523
524 if (!(field0_in & 1))
525 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
526
527 return ERROR_OK;
528 }
529
530 int xscale_write_rx(target_t *target)
531 {
532 armv4_5_common_t *armv4_5 = target->arch_info;
533 xscale_common_t *xscale = armv4_5->arch_info;
534
535 int retval;
536 struct timeval timeout, now;
537
538 scan_field_t fields[3];
539 u8 field0_out = 0x0;
540 u8 field0_in = 0x0;
541 u8 field0_check_value = 0x2;
542 u8 field0_check_mask = 0x6;
543 u8 field2 = 0x0;
544 u8 field2_check_value = 0x0;
545 u8 field2_check_mask = 0x1;
546
547 jtag_add_end_state(TAP_RTI);
548
549 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgrx);
550
551 fields[0].device = xscale->jtag_info.chain_pos;
552 fields[0].num_bits = 3;
553 fields[0].out_value = &field0_out;
554 fields[0].out_mask = NULL;
555 fields[0].in_value = &field0_in;
556 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
557
558 fields[1].device = xscale->jtag_info.chain_pos;
559 fields[1].num_bits = 32;
560 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
561 fields[1].out_mask = NULL;
562 fields[1].in_value = NULL;
563 fields[1].in_handler = NULL;
564 fields[1].in_handler_priv = NULL;
565 fields[1].in_check_value = NULL;
566 fields[1].in_check_mask = NULL;
567
568
569
570 fields[2].device = xscale->jtag_info.chain_pos;
571 fields[2].num_bits = 1;
572 fields[2].out_value = &field2;
573 fields[2].out_mask = NULL;
574 fields[2].in_value = NULL;
575 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
576
577 gettimeofday(&timeout, NULL);
578 timeval_add_time(&timeout, 1, 0);
579
580 /* poll until rx_read is low */
581 LOG_DEBUG("polling RX");
582 for (;;)
583 {
584 int i;
585 for (i=0; i<10; i++)
586 {
587 jtag_add_dr_scan(3, fields, TAP_RTI);
588
589 if ((retval = jtag_execute_queue()) != ERROR_OK)
590 {
591 LOG_ERROR("JTAG error while writing RX");
592 return retval;
593 }
594
595 gettimeofday(&now, NULL);
596 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
597 {
598 LOG_ERROR("time out writing RX register");
599 return ERROR_TARGET_TIMEOUT;
600 }
601 if (!(field0_in & 1))
602 goto done;
603 }
604 LOG_DEBUG("waiting 10ms");
605 usleep(10*1000); /* wait 10ms to avoid flooding the logs */
606 }
607 done:
608
609 /* set rx_valid */
610 field2 = 0x1;
611 jtag_add_dr_scan(3, fields, TAP_RTI);
612
613 if ((retval = jtag_execute_queue()) != ERROR_OK)
614 {
615 LOG_ERROR("JTAG error while writing RX");
616 return retval;
617 }
618
619 return ERROR_OK;
620 }
621
622 /* send count elements of size byte to the debug handler */
623 int xscale_send(target_t *target, u8 *buffer, int count, int size)
624 {
625 armv4_5_common_t *armv4_5 = target->arch_info;
626 xscale_common_t *xscale = armv4_5->arch_info;
627 u32 t[3];
628 int bits[3];
629
630 int retval;
631
632 int done_count = 0;
633 u8 output[4] = {0, 0, 0, 0};
634
635 scan_field_t fields[3];
636 u8 field0_out = 0x0;
637 u8 field0_check_value = 0x2;
638 u8 field0_check_mask = 0x6;
639 u8 field2 = 0x1;
640 u8 field2_check_value = 0x0;
641 u8 field2_check_mask = 0x1;
642
643 jtag_add_end_state(TAP_RTI);
644
645 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgrx);
646
647 fields[0].device = xscale->jtag_info.chain_pos;
648 fields[0].num_bits = 3;
649 fields[0].out_value = &field0_out;
650 fields[0].out_mask = NULL;
651 fields[0].in_handler = NULL;
652 fields[0].in_value = NULL;
653 if (!xscale->fast_memory_access)
654 {
655 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
656 }
657
658 fields[1].device = xscale->jtag_info.chain_pos;
659 fields[1].num_bits = 32;
660 fields[1].out_value = output;
661 fields[1].out_mask = NULL;
662 fields[1].in_value = NULL;
663 fields[1].in_handler = NULL;
664 fields[1].in_handler_priv = NULL;
665 fields[1].in_check_value = NULL;
666 fields[1].in_check_mask = NULL;
667
668
669
670 fields[2].device = xscale->jtag_info.chain_pos;
671 fields[2].num_bits = 1;
672 fields[2].out_value = &field2;
673 fields[2].out_mask = NULL;
674 fields[2].in_value = NULL;
675 fields[2].in_handler = NULL;
676 if (!xscale->fast_memory_access)
677 {
678 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
679 }
680
681 if (size==4)
682 {
683 bits[0]=3;
684 t[0]=0;
685 bits[1]=32;
686 t[2]=1;
687 bits[2]=1;
688 int endianness = target->endianness;
689 while (done_count++ < count)
690 {
691 switch (size)
692 {
693 case 4:
694 if (endianness == TARGET_LITTLE_ENDIAN)
695 {
696 t[1]=le_to_h_u32(buffer);
697 } else
698 {
699 t[1]=be_to_h_u32(buffer);
700 }
701 break;
702 case 2:
703 if (endianness == TARGET_LITTLE_ENDIAN)
704 {
705 t[1]=le_to_h_u16(buffer);
706 } else
707 {
708 t[1]=be_to_h_u16(buffer);
709 }
710 break;
711 case 1:
712 t[1]=buffer[0];
713 break;
714 default:
715 LOG_ERROR("BUG: size neither 4, 2 nor 1");
716 exit(-1);
717 }
718 jtag_add_dr_out(xscale->jtag_info.chain_pos,
719 3,
720 bits,
721 t,
722 TAP_RTI);
723 buffer += size;
724 }
725
726 }
727
728 if ((retval = jtag_execute_queue()) != ERROR_OK)
729 {
730 LOG_ERROR("JTAG error while sending data to debug handler");
731 return retval;
732 }
733
734 return ERROR_OK;
735 }
736
737 int xscale_send_u32(target_t *target, u32 value)
738 {
739 armv4_5_common_t *armv4_5 = target->arch_info;
740 xscale_common_t *xscale = armv4_5->arch_info;
741
742 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
743 return xscale_write_rx(target);
744 }
745
746 int xscale_write_dcsr(target_t *target, int hold_rst, int ext_dbg_brk)
747 {
748 armv4_5_common_t *armv4_5 = target->arch_info;
749 xscale_common_t *xscale = armv4_5->arch_info;
750
751 int retval;
752
753 scan_field_t fields[3];
754 u8 field0 = 0x0;
755 u8 field0_check_value = 0x2;
756 u8 field0_check_mask = 0x7;
757 u8 field2 = 0x0;
758 u8 field2_check_value = 0x0;
759 u8 field2_check_mask = 0x1;
760
761 if (hold_rst != -1)
762 xscale->hold_rst = hold_rst;
763
764 if (ext_dbg_brk != -1)
765 xscale->external_debug_break = ext_dbg_brk;
766
767 jtag_add_end_state(TAP_RTI);
768 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
769
770 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
771 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
772
773 fields[0].device = xscale->jtag_info.chain_pos;
774 fields[0].num_bits = 3;
775 fields[0].out_value = &field0;
776 fields[0].out_mask = NULL;
777 fields[0].in_value = NULL;
778 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
779
780 fields[1].device = xscale->jtag_info.chain_pos;
781 fields[1].num_bits = 32;
782 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
783 fields[1].out_mask = NULL;
784 fields[1].in_value = NULL;
785 fields[1].in_handler = NULL;
786 fields[1].in_handler_priv = NULL;
787 fields[1].in_check_value = NULL;
788 fields[1].in_check_mask = NULL;
789
790
791
792 fields[2].device = xscale->jtag_info.chain_pos;
793 fields[2].num_bits = 1;
794 fields[2].out_value = &field2;
795 fields[2].out_mask = NULL;
796 fields[2].in_value = NULL;
797 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
798
799 jtag_add_dr_scan(3, fields, -1);
800
801 if ((retval = jtag_execute_queue()) != ERROR_OK)
802 {
803 LOG_ERROR("JTAG error while writing DCSR");
804 return retval;
805 }
806
807 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
808 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
809
810 return ERROR_OK;
811 }
812
813 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
814 unsigned int parity (unsigned int v)
815 {
816 unsigned int ov = v;
817 v ^= v >> 16;
818 v ^= v >> 8;
819 v ^= v >> 4;
820 v &= 0xf;
821 LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
822 return (0x6996 >> v) & 1;
823 }
824
825 int xscale_load_ic(target_t *target, int mini, u32 va, u32 buffer[8])
826 {
827 armv4_5_common_t *armv4_5 = target->arch_info;
828 xscale_common_t *xscale = armv4_5->arch_info;
829 u8 packet[4];
830 u8 cmd;
831 int word;
832
833 scan_field_t fields[2];
834
835 LOG_DEBUG("loading miniIC at 0x%8.8x", va);
836
837 jtag_add_end_state(TAP_RTI);
838 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.ldic); /* LDIC */
839
840 /* CMD is b010 for Main IC and b011 for Mini IC */
841 if (mini)
842 buf_set_u32(&cmd, 0, 3, 0x3);
843 else
844 buf_set_u32(&cmd, 0, 3, 0x2);
845
846 buf_set_u32(&cmd, 3, 3, 0x0);
847
848 /* virtual address of desired cache line */
849 buf_set_u32(packet, 0, 27, va >> 5);
850
851 fields[0].device = xscale->jtag_info.chain_pos;
852 fields[0].num_bits = 6;
853 fields[0].out_value = &cmd;
854 fields[0].out_mask = NULL;
855 fields[0].in_value = NULL;
856 fields[0].in_check_value = NULL;
857 fields[0].in_check_mask = NULL;
858 fields[0].in_handler = NULL;
859 fields[0].in_handler_priv = NULL;
860
861 fields[1].device = xscale->jtag_info.chain_pos;
862 fields[1].num_bits = 27;
863 fields[1].out_value = packet;
864 fields[1].out_mask = NULL;
865 fields[1].in_value = NULL;
866 fields[1].in_check_value = NULL;
867 fields[1].in_check_mask = NULL;
868 fields[1].in_handler = NULL;
869 fields[1].in_handler_priv = NULL;
870
871 jtag_add_dr_scan(2, fields, -1);
872
873 fields[0].num_bits = 32;
874 fields[0].out_value = packet;
875
876 fields[1].num_bits = 1;
877 fields[1].out_value = &cmd;
878
879 for (word = 0; word < 8; word++)
880 {
881 buf_set_u32(packet, 0, 32, buffer[word]);
882 cmd = parity(*((u32*)packet));
883 jtag_add_dr_scan(2, fields, -1);
884 }
885
886 jtag_execute_queue();
887
888 return ERROR_OK;
889 }
890
891 int xscale_invalidate_ic_line(target_t *target, u32 va)
892 {
893 armv4_5_common_t *armv4_5 = target->arch_info;
894 xscale_common_t *xscale = armv4_5->arch_info;
895 u8 packet[4];
896 u8 cmd;
897
898 scan_field_t fields[2];
899
900 jtag_add_end_state(TAP_RTI);
901 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.ldic); /* LDIC */
902
903 /* CMD for invalidate IC line b000, bits [6:4] b000 */
904 buf_set_u32(&cmd, 0, 6, 0x0);
905
906 /* virtual address of desired cache line */
907 buf_set_u32(packet, 0, 27, va >> 5);
908
909 fields[0].device = xscale->jtag_info.chain_pos;
910 fields[0].num_bits = 6;
911 fields[0].out_value = &cmd;
912 fields[0].out_mask = NULL;
913 fields[0].in_value = NULL;
914 fields[0].in_check_value = NULL;
915 fields[0].in_check_mask = NULL;
916 fields[0].in_handler = NULL;
917 fields[0].in_handler_priv = NULL;
918
919 fields[1].device = xscale->jtag_info.chain_pos;
920 fields[1].num_bits = 27;
921 fields[1].out_value = packet;
922 fields[1].out_mask = NULL;
923 fields[1].in_value = NULL;
924 fields[1].in_check_value = NULL;
925 fields[1].in_check_mask = NULL;
926 fields[1].in_handler = NULL;
927 fields[1].in_handler_priv = NULL;
928
929 jtag_add_dr_scan(2, fields, -1);
930
931 return ERROR_OK;
932 }
933
934 int xscale_update_vectors(target_t *target)
935 {
936 armv4_5_common_t *armv4_5 = target->arch_info;
937 xscale_common_t *xscale = armv4_5->arch_info;
938 int i;
939 int retval;
940
941 u32 low_reset_branch, high_reset_branch;
942
943 for (i = 1; i < 8; i++)
944 {
945 /* if there's a static vector specified for this exception, override */
946 if (xscale->static_high_vectors_set & (1 << i))
947 {
948 xscale->high_vectors[i] = xscale->static_high_vectors[i];
949 }
950 else
951 {
952 retval=target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
953 if (retval == ERROR_TARGET_TIMEOUT)
954 return retval;
955 if (retval!=ERROR_OK)
956 {
957 /* Some of these reads will fail as part of normal execution */
958 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
959 }
960 }
961 }
962
963 for (i = 1; i < 8; i++)
964 {
965 if (xscale->static_low_vectors_set & (1 << i))
966 {
967 xscale->low_vectors[i] = xscale->static_low_vectors[i];
968 }
969 else
970 {
971 retval=target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
972 if (retval == ERROR_TARGET_TIMEOUT)
973 return retval;
974 if (retval!=ERROR_OK)
975 {
976 /* Some of these reads will fail as part of normal execution */
977 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
978 }
979 }
980 }
981
982 /* calculate branches to debug handler */
983 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
984 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
985
986 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
987 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
988
989 /* invalidate and load exception vectors in mini i-cache */
990 xscale_invalidate_ic_line(target, 0x0);
991 xscale_invalidate_ic_line(target, 0xffff0000);
992
993 xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
994 xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
995
996 return ERROR_OK;
997 }
998
999 int xscale_arch_state(struct target_s *target)
1000 {
1001 armv4_5_common_t *armv4_5 = target->arch_info;
1002 xscale_common_t *xscale = armv4_5->arch_info;
1003
1004 char *state[] =
1005 {
1006 "disabled", "enabled"
1007 };
1008
1009 char *arch_dbg_reason[] =
1010 {
1011 "", "\n(processor reset)", "\n(trace buffer full)"
1012 };
1013
1014 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
1015 {
1016 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
1017 exit(-1);
1018 }
1019
1020 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
1021 "cpsr: 0x%8.8x pc: 0x%8.8x\n"
1022 "MMU: %s, D-Cache: %s, I-Cache: %s"
1023 "%s",
1024 armv4_5_state_strings[armv4_5->core_state],
1025 target_debug_reason_strings[target->debug_reason],
1026 armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)],
1027 buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
1028 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
1029 state[xscale->armv4_5_mmu.mmu_enabled],
1030 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
1031 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
1032 arch_dbg_reason[xscale->arch_debug_reason]);
1033
1034 return ERROR_OK;
1035 }
1036
1037 int xscale_poll(target_t *target)
1038 {
1039 int retval=ERROR_OK;
1040 armv4_5_common_t *armv4_5 = target->arch_info;
1041 xscale_common_t *xscale = armv4_5->arch_info;
1042
1043 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
1044 {
1045 enum target_state previous_state = target->state;
1046 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
1047 {
1048
1049 /* there's data to read from the tx register, we entered debug state */
1050 xscale->handler_running = 1;
1051
1052 target->state = TARGET_HALTED;
1053
1054 /* process debug entry, fetching current mode regs */
1055 retval = xscale_debug_entry(target);
1056 }
1057 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1058 {
1059 LOG_USER("error while polling TX register, reset CPU");
1060 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
1061 target->state = TARGET_HALTED;
1062 }
1063
1064 /* debug_entry could have overwritten target state (i.e. immediate resume)
1065 * don't signal event handlers in that case
1066 */
1067 if (target->state != TARGET_HALTED)
1068 return ERROR_OK;
1069
1070 /* if target was running, signal that we halted
1071 * otherwise we reentered from debug execution */
1072 if (previous_state == TARGET_RUNNING)
1073 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1074 else
1075 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
1076 }
1077
1078 return retval;
1079 }
1080
1081 int xscale_debug_entry(target_t *target)
1082 {
1083 armv4_5_common_t *armv4_5 = target->arch_info;
1084 xscale_common_t *xscale = armv4_5->arch_info;
1085 u32 pc;
1086 u32 buffer[10];
1087 int i;
1088 int retval;
1089
1090 u32 moe;
1091
1092 /* clear external dbg break (will be written on next DCSR read) */
1093 xscale->external_debug_break = 0;
1094 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
1095 return retval;
1096
1097 /* get r0, pc, r1 to r7 and cpsr */
1098 if ((retval=xscale_receive(target, buffer, 10))!=ERROR_OK)
1099 return retval;
1100
1101 /* move r0 from buffer to register cache */
1102 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
1103 armv4_5->core_cache->reg_list[15].dirty = 1;
1104 armv4_5->core_cache->reg_list[15].valid = 1;
1105 LOG_DEBUG("r0: 0x%8.8x", buffer[0]);
1106
1107 /* move pc from buffer to register cache */
1108 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
1109 armv4_5->core_cache->reg_list[15].dirty = 1;
1110 armv4_5->core_cache->reg_list[15].valid = 1;
1111 LOG_DEBUG("pc: 0x%8.8x", buffer[1]);
1112
1113 /* move data from buffer to register cache */
1114 for (i = 1; i <= 7; i++)
1115 {
1116 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
1117 armv4_5->core_cache->reg_list[i].dirty = 1;
1118 armv4_5->core_cache->reg_list[i].valid = 1;
1119 LOG_DEBUG("r%i: 0x%8.8x", i, buffer[i + 1]);
1120 }
1121
1122 buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, buffer[9]);
1123 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
1124 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
1125 LOG_DEBUG("cpsr: 0x%8.8x", buffer[9]);
1126
1127 armv4_5->core_mode = buffer[9] & 0x1f;
1128 if (armv4_5_mode_to_number(armv4_5->core_mode) == -1)
1129 {
1130 target->state = TARGET_UNKNOWN;
1131 LOG_ERROR("cpsr contains invalid mode value - communication failure");
1132 return ERROR_TARGET_FAILURE;
1133 }
1134 LOG_DEBUG("target entered debug state in %s mode", armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)]);
1135
1136 if (buffer[9] & 0x20)
1137 armv4_5->core_state = ARMV4_5_STATE_THUMB;
1138 else
1139 armv4_5->core_state = ARMV4_5_STATE_ARM;
1140
1141 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1142 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
1143 {
1144 xscale_receive(target, buffer, 8);
1145 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1146 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
1147 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
1148 }
1149 else
1150 {
1151 /* r8 to r14, but no spsr */
1152 xscale_receive(target, buffer, 7);
1153 }
1154
1155 /* move data from buffer to register cache */
1156 for (i = 8; i <= 14; i++)
1157 {
1158 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
1159 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
1160 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
1161 }
1162
1163 /* examine debug reason */
1164 xscale_read_dcsr(target);
1165 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
1166
1167 /* stored PC (for calculating fixup) */
1168 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1169
1170 switch (moe)
1171 {
1172 case 0x0: /* Processor reset */
1173 target->debug_reason = DBG_REASON_DBGRQ;
1174 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1175 pc -= 4;
1176 break;
1177 case 0x1: /* Instruction breakpoint hit */
1178 target->debug_reason = DBG_REASON_BREAKPOINT;
1179 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1180 pc -= 4;
1181 break;
1182 case 0x2: /* Data breakpoint hit */
1183 target->debug_reason = DBG_REASON_WATCHPOINT;
1184 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1185 pc -= 4;
1186 break;
1187 case 0x3: /* BKPT instruction executed */
1188 target->debug_reason = DBG_REASON_BREAKPOINT;
1189 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1190 pc -= 4;
1191 break;
1192 case 0x4: /* Ext. debug event */
1193 target->debug_reason = DBG_REASON_DBGRQ;
1194 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1195 pc -= 4;
1196 break;
1197 case 0x5: /* Vector trap occured */
1198 target->debug_reason = DBG_REASON_BREAKPOINT;
1199 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1200 pc -= 4;
1201 break;
1202 case 0x6: /* Trace buffer full break */
1203 target->debug_reason = DBG_REASON_DBGRQ;
1204 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1205 pc -= 4;
1206 break;
1207 case 0x7: /* Reserved */
1208 default:
1209 LOG_ERROR("Method of Entry is 'Reserved'");
1210 exit(-1);
1211 break;
1212 }
1213
1214 /* apply PC fixup */
1215 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1216
1217 /* on the first debug entry, identify cache type */
1218 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1219 {
1220 u32 cache_type_reg;
1221
1222 /* read cp15 cache type register */
1223 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1224 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1225
1226 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1227 }
1228
1229 /* examine MMU and Cache settings */
1230 /* read cp15 control register */
1231 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1232 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1233 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1234 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1235 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1236
1237 /* tracing enabled, read collected trace data */
1238 if (xscale->trace.buffer_enabled)
1239 {
1240 xscale_read_trace(target);
1241 xscale->trace.buffer_fill--;
1242
1243 /* resume if we're still collecting trace data */
1244 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1245 && (xscale->trace.buffer_fill > 0))
1246 {
1247 xscale_resume(target, 1, 0x0, 1, 0);
1248 }
1249 else
1250 {
1251 xscale->trace.buffer_enabled = 0;
1252 }
1253 }
1254
1255 return ERROR_OK;
1256 }
1257
1258 int xscale_halt(target_t *target)
1259 {
1260 armv4_5_common_t *armv4_5 = target->arch_info;
1261 xscale_common_t *xscale = armv4_5->arch_info;
1262
1263 LOG_DEBUG("target->state: %s", target_state_strings[target->state]);
1264
1265 if (target->state == TARGET_HALTED)
1266 {
1267 LOG_DEBUG("target was already halted");
1268 return ERROR_OK;
1269 }
1270 else if (target->state == TARGET_UNKNOWN)
1271 {
1272 /* this must not happen for a xscale target */
1273 LOG_ERROR("target was in unknown state when halt was requested");
1274 return ERROR_TARGET_INVALID;
1275 }
1276 else if (target->state == TARGET_RESET)
1277 {
1278 LOG_DEBUG("target->state == TARGET_RESET");
1279 }
1280 else
1281 {
1282 /* assert external dbg break */
1283 xscale->external_debug_break = 1;
1284 xscale_read_dcsr(target);
1285
1286 target->debug_reason = DBG_REASON_DBGRQ;
1287 }
1288
1289 return ERROR_OK;
1290 }
1291
1292 int xscale_enable_single_step(struct target_s *target, u32 next_pc)
1293 {
1294 armv4_5_common_t *armv4_5 = target->arch_info;
1295 xscale_common_t *xscale= armv4_5->arch_info;
1296 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1297
1298 if (xscale->ibcr0_used)
1299 {
1300 breakpoint_t *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1301
1302 if (ibcr0_bp)
1303 {
1304 xscale_unset_breakpoint(target, ibcr0_bp);
1305 }
1306 else
1307 {
1308 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1309 exit(-1);
1310 }
1311 }
1312
1313 xscale_set_reg_u32(ibcr0, next_pc | 0x1);
1314
1315 return ERROR_OK;
1316 }
1317
1318 int xscale_disable_single_step(struct target_s *target)
1319 {
1320 armv4_5_common_t *armv4_5 = target->arch_info;
1321 xscale_common_t *xscale= armv4_5->arch_info;
1322 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1323
1324 xscale_set_reg_u32(ibcr0, 0x0);
1325
1326 return ERROR_OK;
1327 }
1328
1329 int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution)
1330 {
1331 armv4_5_common_t *armv4_5 = target->arch_info;
1332 xscale_common_t *xscale= armv4_5->arch_info;
1333 breakpoint_t *breakpoint = target->breakpoints;
1334
1335 u32 current_pc;
1336
1337 int retval;
1338 int i;
1339
1340 LOG_DEBUG("-");
1341
1342 if (target->state != TARGET_HALTED)
1343 {
1344 LOG_WARNING("target not halted");
1345 return ERROR_TARGET_NOT_HALTED;
1346 }
1347
1348 if (!debug_execution)
1349 {
1350 target_free_all_working_areas(target);
1351 }
1352
1353 /* update vector tables */
1354 if ((retval=xscale_update_vectors(target))!=ERROR_OK)
1355 return retval;
1356
1357 /* current = 1: continue on current pc, otherwise continue at <address> */
1358 if (!current)
1359 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1360
1361 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1362
1363 /* if we're at the reset vector, we have to simulate the branch */
1364 if (current_pc == 0x0)
1365 {
1366 arm_simulate_step(target, NULL);
1367 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1368 }
1369
1370 /* the front-end may request us not to handle breakpoints */
1371 if (handle_breakpoints)
1372 {
1373 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1374 {
1375 u32 next_pc;
1376
1377 /* there's a breakpoint at the current PC, we have to step over it */
1378 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1379 xscale_unset_breakpoint(target, breakpoint);
1380
1381 /* calculate PC of next instruction */
1382 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1383 {
1384 u32 current_opcode;
1385 target_read_u32(target, current_pc, &current_opcode);
1386 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
1387 }
1388
1389 LOG_DEBUG("enable single-step");
1390 xscale_enable_single_step(target, next_pc);
1391
1392 /* restore banked registers */
1393 xscale_restore_context(target);
1394
1395 /* send resume request (command 0x30 or 0x31)
1396 * clean the trace buffer if it is to be enabled (0x62) */
1397 if (xscale->trace.buffer_enabled)
1398 {
1399 xscale_send_u32(target, 0x62);
1400 xscale_send_u32(target, 0x31);
1401 }
1402 else
1403 xscale_send_u32(target, 0x30);
1404
1405 /* send CPSR */
1406 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1407 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1408
1409 for (i = 7; i >= 0; i--)
1410 {
1411 /* send register */
1412 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1413 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1414 }
1415
1416 /* send PC */
1417 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1418 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1419
1420 /* wait for and process debug entry */
1421 xscale_debug_entry(target);
1422
1423 LOG_DEBUG("disable single-step");
1424 xscale_disable_single_step(target);
1425
1426 LOG_DEBUG("set breakpoint at 0x%8.8x", breakpoint->address);
1427 xscale_set_breakpoint(target, breakpoint);
1428 }
1429 }
1430
1431 /* enable any pending breakpoints and watchpoints */
1432 xscale_enable_breakpoints(target);
1433 xscale_enable_watchpoints(target);
1434
1435 /* restore banked registers */
1436 xscale_restore_context(target);
1437
1438 /* send resume request (command 0x30 or 0x31)
1439 * clean the trace buffer if it is to be enabled (0x62) */
1440 if (xscale->trace.buffer_enabled)
1441 {
1442 xscale_send_u32(target, 0x62);
1443 xscale_send_u32(target, 0x31);
1444 }
1445 else
1446 xscale_send_u32(target, 0x30);
1447
1448 /* send CPSR */
1449 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1450 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1451
1452 for (i = 7; i >= 0; i--)
1453 {
1454 /* send register */
1455 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1456 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1457 }
1458
1459 /* send PC */
1460 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1461 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1462
1463 target->debug_reason = DBG_REASON_NOTHALTED;
1464
1465 if (!debug_execution)
1466 {
1467 /* registers are now invalid */
1468 armv4_5_invalidate_core_regs(target);
1469 target->state = TARGET_RUNNING;
1470 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1471 }
1472 else
1473 {
1474 target->state = TARGET_DEBUG_RUNNING;
1475 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1476 }
1477
1478 LOG_DEBUG("target resumed");
1479
1480 xscale->handler_running = 1;
1481
1482 return ERROR_OK;
1483 }
1484
1485 int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints)
1486 {
1487 armv4_5_common_t *armv4_5 = target->arch_info;
1488 xscale_common_t *xscale = armv4_5->arch_info;
1489 breakpoint_t *breakpoint = target->breakpoints;
1490
1491 u32 current_pc, next_pc;
1492 int i;
1493 int retval;
1494
1495 if (target->state != TARGET_HALTED)
1496 {
1497 LOG_WARNING("target not halted");
1498 return ERROR_TARGET_NOT_HALTED;
1499 }
1500
1501 /* current = 1: continue on current pc, otherwise continue at <address> */
1502 if (!current)
1503 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1504
1505 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1506
1507 /* if we're at the reset vector, we have to simulate the step */
1508 if (current_pc == 0x0)
1509 {
1510 arm_simulate_step(target, NULL);
1511 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1512
1513 target->debug_reason = DBG_REASON_SINGLESTEP;
1514 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1515
1516 return ERROR_OK;
1517 }
1518
1519 /* the front-end may request us not to handle breakpoints */
1520 if (handle_breakpoints)
1521 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1522 {
1523 xscale_unset_breakpoint(target, breakpoint);
1524 }
1525
1526 target->debug_reason = DBG_REASON_SINGLESTEP;
1527
1528 /* calculate PC of next instruction */
1529 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1530 {
1531 u32 current_opcode;
1532 target_read_u32(target, current_pc, &current_opcode);
1533 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
1534 }
1535
1536 LOG_DEBUG("enable single-step");
1537 xscale_enable_single_step(target, next_pc);
1538
1539 /* restore banked registers */
1540 xscale_restore_context(target);
1541
1542 /* send resume request (command 0x30 or 0x31)
1543 * clean the trace buffer if it is to be enabled (0x62) */
1544 if (xscale->trace.buffer_enabled)
1545 {
1546 xscale_send_u32(target, 0x62);
1547 xscale_send_u32(target, 0x31);
1548 }
1549 else
1550 xscale_send_u32(target, 0x30);
1551
1552 /* send CPSR */
1553 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1554 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1555
1556 for (i = 7; i >= 0; i--)
1557 {
1558 /* send register */
1559 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1560 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1561 }
1562
1563 /* send PC */
1564 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1565 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1566
1567 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1568
1569 /* registers are now invalid */
1570 armv4_5_invalidate_core_regs(target);
1571
1572 /* wait for and process debug entry */
1573 xscale_debug_entry(target);
1574
1575 LOG_DEBUG("disable single-step");
1576 xscale_disable_single_step(target);
1577
1578 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1579
1580 if (breakpoint)
1581 {
1582 xscale_set_breakpoint(target, breakpoint);
1583 }
1584
1585 LOG_DEBUG("target stepped");
1586
1587 return ERROR_OK;
1588
1589 }
1590
1591 int xscale_assert_reset(target_t *target)
1592 {
1593 armv4_5_common_t *armv4_5 = target->arch_info;
1594 xscale_common_t *xscale = armv4_5->arch_info;
1595
1596 LOG_DEBUG("target->state: %s", target_state_strings[target->state]);
1597
1598 /* TRST every time. We want to be able to support daemon_startup attach */
1599 jtag_add_reset(1, 0);
1600 jtag_add_sleep(5000);
1601 jtag_add_reset(0, 0);
1602 jtag_add_sleep(5000);
1603 jtag_execute_queue();
1604
1605
1606
1607 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1608 * end up in T-L-R, which would reset JTAG
1609 */
1610 jtag_add_end_state(TAP_RTI);
1611 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
1612
1613 /* set Hold reset, Halt mode and Trap Reset */
1614 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1615 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1616 xscale_write_dcsr(target, 1, 0);
1617
1618 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1619 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, 0x7f);
1620 jtag_execute_queue();
1621
1622 /* assert reset */
1623 jtag_add_reset(0, 1);
1624
1625 /* sleep 1ms, to be sure we fulfill any requirements */
1626 jtag_add_sleep(1000);
1627 jtag_execute_queue();
1628
1629 target->state = TARGET_RESET;
1630
1631 return ERROR_OK;
1632 }
1633
1634 int xscale_deassert_reset(target_t *target)
1635 {
1636 armv4_5_common_t *armv4_5 = target->arch_info;
1637 xscale_common_t *xscale = armv4_5->arch_info;
1638
1639 fileio_t debug_handler;
1640 u32 address;
1641 u32 binary_size;
1642
1643 u32 buf_cnt;
1644 int i;
1645 int retval;
1646
1647 breakpoint_t *breakpoint = target->breakpoints;
1648
1649 LOG_DEBUG("-");
1650
1651 xscale->ibcr_available = 2;
1652 xscale->ibcr0_used = 0;
1653 xscale->ibcr1_used = 0;
1654
1655 xscale->dbr_available = 2;
1656 xscale->dbr0_used = 0;
1657 xscale->dbr1_used = 0;
1658
1659 /* mark all hardware breakpoints as unset */
1660 while (breakpoint)
1661 {
1662 if (breakpoint->type == BKPT_HARD)
1663 {
1664 breakpoint->set = 0;
1665 }
1666 breakpoint = breakpoint->next;
1667 }
1668
1669 if (!xscale->handler_installed)
1670 {
1671 /* release SRST */
1672 jtag_add_reset(0, 0);
1673
1674 /* wait 300ms; 150 and 100ms were not enough */
1675 jtag_add_sleep(300*1000);
1676
1677 jtag_add_runtest(2030, TAP_RTI);
1678 jtag_execute_queue();
1679
1680 /* set Hold reset, Halt mode and Trap Reset */
1681 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1682 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1683 xscale_write_dcsr(target, 1, 0);
1684
1685 /* Load debug handler */
1686 if (fileio_open(&debug_handler, "xscale/debug_handler.bin", FILEIO_READ, FILEIO_BINARY) != ERROR_OK)
1687 {
1688 return ERROR_OK;
1689 }
1690
1691 if ((binary_size = debug_handler.size) % 4)
1692 {
1693 LOG_ERROR("debug_handler.bin: size not a multiple of 4");
1694 exit(-1);
1695 }
1696
1697 if (binary_size > 0x800)
1698 {
1699 LOG_ERROR("debug_handler.bin: larger than 2kb");
1700 exit(-1);
1701 }
1702
1703 binary_size = CEIL(binary_size, 32) * 32;
1704
1705 address = xscale->handler_address;
1706 while (binary_size > 0)
1707 {
1708 u32 cache_line[8];
1709 u8 buffer[32];
1710
1711 if ((retval = fileio_read(&debug_handler, 32, buffer, &buf_cnt)) != ERROR_OK)
1712 {
1713
1714 }
1715
1716 for (i = 0; i < buf_cnt; i += 4)
1717 {
1718 /* convert LE buffer to host-endian u32 */
1719 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1720 }
1721
1722 for (; buf_cnt < 32; buf_cnt += 4)
1723 {
1724 cache_line[buf_cnt / 4] = 0xe1a08008;
1725 }
1726
1727 /* only load addresses other than the reset vectors */
1728 if ((address % 0x400) != 0x0)
1729 {
1730 xscale_load_ic(target, 1, address, cache_line);
1731 }
1732
1733 address += buf_cnt;
1734 binary_size -= buf_cnt;
1735 };
1736
1737 xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
1738 xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
1739
1740 jtag_add_runtest(30, TAP_RTI);
1741
1742 jtag_add_sleep(100000);
1743
1744 /* set Hold reset, Halt mode and Trap Reset */
1745 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1746 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1747 xscale_write_dcsr(target, 1, 0);
1748
1749 /* clear Hold reset to let the target run (should enter debug handler) */
1750 xscale_write_dcsr(target, 0, 1);
1751 target->state = TARGET_RUNNING;
1752
1753 if ((target->reset_mode != RESET_HALT) && (target->reset_mode != RESET_INIT))
1754 {
1755 jtag_add_sleep(10000);
1756
1757 /* we should have entered debug now */
1758 xscale_debug_entry(target);
1759 target->state = TARGET_HALTED;
1760
1761 /* resume the target */
1762 xscale_resume(target, 1, 0x0, 1, 0);
1763 }
1764
1765 fileio_close(&debug_handler);
1766 }
1767 else
1768 {
1769 jtag_add_reset(0, 0);
1770 }
1771
1772
1773 return ERROR_OK;
1774 }
1775
1776 int xscale_soft_reset_halt(struct target_s *target)
1777 {
1778
1779 return ERROR_OK;
1780 }
1781
1782 int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode)
1783 {
1784
1785 return ERROR_OK;
1786 }
1787
1788 int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value)
1789 {
1790
1791 return ERROR_OK;
1792 }
1793
1794 int xscale_full_context(target_t *target)
1795 {
1796 armv4_5_common_t *armv4_5 = target->arch_info;
1797
1798 u32 *buffer;
1799
1800 int i, j;
1801
1802 LOG_DEBUG("-");
1803
1804 if (target->state != TARGET_HALTED)
1805 {
1806 LOG_WARNING("target not halted");
1807 return ERROR_TARGET_NOT_HALTED;
1808 }
1809
1810 buffer = malloc(4 * 8);
1811
1812 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1813 * we can't enter User mode on an XScale (unpredictable),
1814 * but User shares registers with SYS
1815 */
1816 for(i = 1; i < 7; i++)
1817 {
1818 int valid = 1;
1819
1820 /* check if there are invalid registers in the current mode
1821 */
1822 for (j = 0; j <= 16; j++)
1823 {
1824 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
1825 valid = 0;
1826 }
1827
1828 if (!valid)
1829 {
1830 u32 tmp_cpsr;
1831
1832 /* request banked registers */
1833 xscale_send_u32(target, 0x0);
1834
1835 tmp_cpsr = 0x0;
1836 tmp_cpsr |= armv4_5_number_to_mode(i);
1837 tmp_cpsr |= 0xc0; /* I/F bits */
1838
1839 /* send CPSR for desired mode */
1840 xscale_send_u32(target, tmp_cpsr);
1841
1842 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1843 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1844 {
1845 xscale_receive(target, buffer, 8);
1846 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1847 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1848 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
1849 }
1850 else
1851 {
1852 xscale_receive(target, buffer, 7);
1853 }
1854
1855 /* move data from buffer to register cache */
1856 for (j = 8; j <= 14; j++)
1857 {
1858 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
1859 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1860 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
1861 }
1862 }
1863 }
1864
1865 free(buffer);
1866
1867 return ERROR_OK;
1868 }
1869
1870 int xscale_restore_context(target_t *target)
1871 {
1872 armv4_5_common_t *armv4_5 = target->arch_info;
1873
1874 int i, j;
1875
1876 LOG_DEBUG("-");
1877
1878 if (target->state != TARGET_HALTED)
1879 {
1880 LOG_WARNING("target not halted");
1881 return ERROR_TARGET_NOT_HALTED;
1882 }
1883
1884 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1885 * we can't enter User mode on an XScale (unpredictable),
1886 * but User shares registers with SYS
1887 */
1888 for(i = 1; i < 7; i++)
1889 {
1890 int dirty = 0;
1891
1892 /* check if there are invalid registers in the current mode
1893 */
1894 for (j = 8; j <= 14; j++)
1895 {
1896 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
1897 dirty = 1;
1898 }
1899
1900 /* if not USR/SYS, check if the SPSR needs to be written */
1901 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1902 {
1903 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
1904 dirty = 1;
1905 }
1906
1907 if (dirty)
1908 {
1909 u32 tmp_cpsr;
1910
1911 /* send banked registers */
1912 xscale_send_u32(target, 0x1);
1913
1914 tmp_cpsr = 0x0;
1915 tmp_cpsr |= armv4_5_number_to_mode(i);
1916 tmp_cpsr |= 0xc0; /* I/F bits */
1917
1918 /* send CPSR for desired mode */
1919 xscale_send_u32(target, tmp_cpsr);
1920
1921 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1922 for (j = 8; j <= 14; j++)
1923 {
1924 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
1925 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1926 }
1927
1928 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1929 {
1930 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
1931 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1932 }
1933 }
1934 }
1935
1936 return ERROR_OK;
1937 }
1938
1939 int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
1940 {
1941 armv4_5_common_t *armv4_5 = target->arch_info;
1942 xscale_common_t *xscale = armv4_5->arch_info;
1943 u32 *buf32;
1944 int i;
1945 int retval;
1946
1947 LOG_DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
1948
1949 if (target->state != TARGET_HALTED)
1950 {
1951 LOG_WARNING("target not halted");
1952 return ERROR_TARGET_NOT_HALTED;
1953 }
1954
1955 /* sanitize arguments */
1956 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1957 return ERROR_INVALID_ARGUMENTS;
1958
1959 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1960 return ERROR_TARGET_UNALIGNED_ACCESS;
1961
1962 /* send memory read request (command 0x1n, n: access size) */
1963 if ((retval=xscale_send_u32(target, 0x10 | size))!=ERROR_OK)
1964 return retval;
1965
1966 /* send base address for read request */
1967 if ((retval=xscale_send_u32(target, address))!=ERROR_OK)
1968 return retval;
1969
1970 /* send number of requested data words */
1971 if ((retval=xscale_send_u32(target, count))!=ERROR_OK)
1972 return retval;
1973
1974 /* receive data from target (count times 32-bit words in host endianness) */
1975 buf32 = malloc(4 * count);
1976 if ((retval=xscale_receive(target, buf32, count))!=ERROR_OK)
1977 return retval;
1978
1979 /* extract data from host-endian buffer into byte stream */
1980 for (i = 0; i < count; i++)
1981 {
1982 switch (size)
1983 {
1984 case 4:
1985 target_buffer_set_u32(target, buffer, buf32[i]);
1986 buffer += 4;
1987 break;
1988 case 2:
1989 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1990 buffer += 2;
1991 break;
1992 case 1:
1993 *buffer++ = buf32[i] & 0xff;
1994 break;
1995 default:
1996 LOG_ERROR("should never get here");
1997 exit(-1);
1998 }
1999 }
2000
2001 free(buf32);
2002
2003 /* examine DCSR, to see if Sticky Abort (SA) got set */
2004 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
2005 return retval;
2006 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
2007 {
2008 /* clear SA bit */
2009 if ((retval=xscale_send_u32(target, 0x60))!=ERROR_OK)
2010 return retval;
2011
2012 return ERROR_TARGET_DATA_ABORT;
2013 }
2014
2015 return ERROR_OK;
2016 }
2017
2018 int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
2019 {
2020 armv4_5_common_t *armv4_5 = target->arch_info;
2021 xscale_common_t *xscale = armv4_5->arch_info;
2022 int retval;
2023
2024 LOG_DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
2025
2026 if (target->state != TARGET_HALTED)
2027 {
2028 LOG_WARNING("target not halted");
2029 return ERROR_TARGET_NOT_HALTED;
2030 }
2031
2032 /* sanitize arguments */
2033 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
2034 return ERROR_INVALID_ARGUMENTS;
2035
2036 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
2037 return ERROR_TARGET_UNALIGNED_ACCESS;
2038
2039 /* send memory write request (command 0x2n, n: access size) */
2040 if ((retval=xscale_send_u32(target, 0x20 | size))!=ERROR_OK)
2041 return retval;
2042
2043 /* send base address for read request */
2044 if ((retval=xscale_send_u32(target, address))!=ERROR_OK)
2045 return retval;
2046
2047 /* send number of requested data words to be written*/
2048 if ((retval=xscale_send_u32(target, count))!=ERROR_OK)
2049 return retval;
2050
2051 /* extract data from host-endian buffer into byte stream */
2052 #if 0
2053 for (i = 0; i < count; i++)
2054 {
2055 switch (size)
2056 {
2057 case 4:
2058 value = target_buffer_get_u32(target, buffer);
2059 xscale_send_u32(target, value);
2060 buffer += 4;
2061 break;
2062 case 2:
2063 value = target_buffer_get_u16(target, buffer);
2064 xscale_send_u32(target, value);
2065 buffer += 2;
2066 break;
2067 case 1:
2068 value = *buffer;
2069 xscale_send_u32(target, value);
2070 buffer += 1;
2071 break;
2072 default:
2073 LOG_ERROR("should never get here");
2074 exit(-1);
2075 }
2076 }
2077 #endif
2078 if ((retval=xscale_send(target, buffer, count, size))!=ERROR_OK)
2079 return retval;
2080
2081 /* examine DCSR, to see if Sticky Abort (SA) got set */
2082 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
2083 return retval;
2084 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
2085 {
2086 /* clear SA bit */
2087 if ((retval=xscale_send_u32(target, 0x60))!=ERROR_OK)
2088 return retval;
2089
2090 return ERROR_TARGET_DATA_ABORT;
2091 }
2092
2093 return ERROR_OK;
2094 }
2095
2096 int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer)
2097 {
2098 return xscale_write_memory(target, address, 4, count, buffer);
2099 }
2100
2101 int xscale_checksum_memory(struct target_s *target, u32 address, u32 count, u32* checksum)
2102 {
2103 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2104 }
2105
2106 u32 xscale_get_ttb(target_t *target)
2107 {
2108 armv4_5_common_t *armv4_5 = target->arch_info;
2109 xscale_common_t *xscale = armv4_5->arch_info;
2110 u32 ttb;
2111
2112 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2113 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2114
2115 return ttb;
2116 }
2117
2118 void xscale_disable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
2119 {
2120 armv4_5_common_t *armv4_5 = target->arch_info;
2121 xscale_common_t *xscale = armv4_5->arch_info;
2122 u32 cp15_control;
2123
2124 /* read cp15 control register */
2125 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2126 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2127
2128 if (mmu)
2129 cp15_control &= ~0x1U;
2130
2131 if (d_u_cache)
2132 {
2133 /* clean DCache */
2134 xscale_send_u32(target, 0x50);
2135 xscale_send_u32(target, xscale->cache_clean_address);
2136
2137 /* invalidate DCache */
2138 xscale_send_u32(target, 0x51);
2139
2140 cp15_control &= ~0x4U;
2141 }
2142
2143 if (i_cache)
2144 {
2145 /* invalidate ICache */
2146 xscale_send_u32(target, 0x52);
2147 cp15_control &= ~0x1000U;
2148 }
2149
2150 /* write new cp15 control register */
2151 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2152
2153 /* execute cpwait to ensure outstanding operations complete */
2154 xscale_send_u32(target, 0x53);
2155 }
2156
2157 void xscale_enable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
2158 {
2159 armv4_5_common_t *armv4_5 = target->arch_info;
2160 xscale_common_t *xscale = armv4_5->arch_info;
2161 u32 cp15_control;
2162
2163 /* read cp15 control register */
2164 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2165 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2166
2167 if (mmu)
2168 cp15_control |= 0x1U;
2169
2170 if (d_u_cache)
2171 cp15_control |= 0x4U;
2172
2173 if (i_cache)
2174 cp15_control |= 0x1000U;
2175
2176 /* write new cp15 control register */
2177 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2178
2179 /* execute cpwait to ensure outstanding operations complete */
2180 xscale_send_u32(target, 0x53);
2181 }
2182
2183 int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2184 {
2185 armv4_5_common_t *armv4_5 = target->arch_info;
2186 xscale_common_t *xscale = armv4_5->arch_info;
2187
2188 if (target->state != TARGET_HALTED)
2189 {
2190 LOG_WARNING("target not halted");
2191 return ERROR_TARGET_NOT_HALTED;
2192 }
2193
2194 if (xscale->force_hw_bkpts)
2195 breakpoint->type = BKPT_HARD;
2196
2197 if (breakpoint->set)
2198 {
2199 LOG_WARNING("breakpoint already set");
2200 return ERROR_OK;
2201 }
2202
2203 if (breakpoint->type == BKPT_HARD)
2204 {
2205 u32 value = breakpoint->address | 1;
2206 if (!xscale->ibcr0_used)
2207 {
2208 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2209 xscale->ibcr0_used = 1;
2210 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2211 }
2212 else if (!xscale->ibcr1_used)
2213 {
2214 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2215 xscale->ibcr1_used = 1;
2216 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2217 }
2218 else
2219 {
2220 LOG_ERROR("BUG: no hardware comparator available");
2221 return ERROR_OK;
2222 }
2223 }
2224 else if (breakpoint->type == BKPT_SOFT)
2225 {
2226 if (breakpoint->length == 4)
2227 {
2228 /* keep the original instruction in target endianness */
2229 target->type->read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr);
2230 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2231 target_write_u32(target, breakpoint->address, xscale->arm_bkpt);
2232 }
2233 else
2234 {
2235 /* keep the original instruction in target endianness */
2236 target->type->read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr);
2237 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2238 target_write_u32(target, breakpoint->address, xscale->thumb_bkpt);
2239 }
2240 breakpoint->set = 1;
2241 }
2242
2243 return ERROR_OK;
2244
2245 }
2246
2247 int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2248 {
2249 armv4_5_common_t *armv4_5 = target->arch_info;
2250 xscale_common_t *xscale = armv4_5->arch_info;
2251
2252 if (target->state != TARGET_HALTED)
2253 {
2254 LOG_WARNING("target not halted");
2255 return ERROR_TARGET_NOT_HALTED;
2256 }
2257
2258 if (xscale->force_hw_bkpts)
2259 {
2260 LOG_DEBUG("forcing use of hardware breakpoint at address 0x%8.8x", breakpoint->address);
2261 breakpoint->type = BKPT_HARD;
2262 }
2263
2264 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2265 {
2266 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2267 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2268 }
2269 else
2270 {
2271 xscale->ibcr_available--;
2272 }
2273
2274 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2275 {
2276 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2277 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2278 }
2279
2280 return ERROR_OK;
2281 }
2282
2283 int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2284 {
2285 armv4_5_common_t *armv4_5 = target->arch_info;
2286 xscale_common_t *xscale = armv4_5->arch_info;
2287
2288 if (target->state != TARGET_HALTED)
2289 {
2290 LOG_WARNING("target not halted");
2291 return ERROR_TARGET_NOT_HALTED;
2292 }
2293
2294 if (!breakpoint->set)
2295 {
2296 LOG_WARNING("breakpoint not set");
2297 return ERROR_OK;
2298 }
2299
2300 if (breakpoint->type == BKPT_HARD)
2301 {
2302 if (breakpoint->set == 1)
2303 {
2304 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2305 xscale->ibcr0_used = 0;
2306 }
2307 else if (breakpoint->set == 2)
2308 {
2309 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2310 xscale->ibcr1_used = 0;
2311 }
2312 breakpoint->set = 0;
2313 }
2314 else
2315 {
2316 /* restore original instruction (kept in target endianness) */
2317 if (breakpoint->length == 4)
2318 {
2319 target->type->write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr);
2320 }
2321 else
2322 {
2323 target->type->write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr);
2324 }
2325 breakpoint->set = 0;
2326 }
2327
2328 return ERROR_OK;
2329 }
2330
2331 int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2332 {
2333 armv4_5_common_t *armv4_5 = target->arch_info;
2334 xscale_common_t *xscale = armv4_5->arch_info;
2335
2336 if (target->state != TARGET_HALTED)
2337 {
2338 LOG_WARNING("target not halted");
2339 return ERROR_TARGET_NOT_HALTED;
2340 }
2341
2342 if (breakpoint->set)
2343 {
2344 xscale_unset_breakpoint(target, breakpoint);
2345 }
2346
2347 if (breakpoint->type == BKPT_HARD)
2348 xscale->ibcr_available++;
2349
2350 return ERROR_OK;
2351 }
2352
2353 int xscale_set_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2354 {
2355 armv4_5_common_t *armv4_5 = target->arch_info;
2356 xscale_common_t *xscale = armv4_5->arch_info;
2357 u8 enable=0;
2358 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2359 u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2360
2361 if (target->state != TARGET_HALTED)
2362 {
2363 LOG_WARNING("target not halted");
2364 return ERROR_TARGET_NOT_HALTED;
2365 }
2366
2367 xscale_get_reg(dbcon);
2368
2369 switch (watchpoint->rw)
2370 {
2371 case WPT_READ:
2372 enable = 0x3;
2373 break;
2374 case WPT_ACCESS:
2375 enable = 0x2;
2376 break;
2377 case WPT_WRITE:
2378 enable = 0x1;
2379 break;
2380 default:
2381 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2382 }
2383
2384 if (!xscale->dbr0_used)
2385 {
2386 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2387 dbcon_value |= enable;
2388 xscale_set_reg_u32(dbcon, dbcon_value);
2389 watchpoint->set = 1;
2390 xscale->dbr0_used = 1;
2391 }
2392 else if (!xscale->dbr1_used)
2393 {
2394 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2395 dbcon_value |= enable << 2;
2396 xscale_set_reg_u32(dbcon, dbcon_value);
2397 watchpoint->set = 2;
2398 xscale->dbr1_used = 1;
2399 }
2400 else
2401 {
2402 LOG_ERROR("BUG: no hardware comparator available");
2403 return ERROR_OK;
2404 }
2405
2406 return ERROR_OK;
2407 }
2408
2409 int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2410 {
2411 armv4_5_common_t *armv4_5 = target->arch_info;
2412 xscale_common_t *xscale = armv4_5->arch_info;
2413
2414 if (target->state != TARGET_HALTED)
2415 {
2416 LOG_WARNING("target not halted");
2417 return ERROR_TARGET_NOT_HALTED;
2418 }
2419
2420 if (xscale->dbr_available < 1)
2421 {
2422 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2423 }
2424
2425 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2426 {
2427 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2428 }
2429
2430 xscale->dbr_available--;
2431
2432 return ERROR_OK;
2433 }
2434
2435 int xscale_unset_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2436 {
2437 armv4_5_common_t *armv4_5 = target->arch_info;
2438 xscale_common_t *xscale = armv4_5->arch_info;
2439 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2440 u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2441
2442 if (target->state != TARGET_HALTED)
2443 {
2444 LOG_WARNING("target not halted");
2445 return ERROR_TARGET_NOT_HALTED;
2446 }
2447
2448 if (!watchpoint->set)
2449 {
2450 LOG_WARNING("breakpoint not set");
2451 return ERROR_OK;
2452 }
2453
2454 if (watchpoint->set == 1)
2455 {
2456 dbcon_value &= ~0x3;
2457 xscale_set_reg_u32(dbcon, dbcon_value);
2458 xscale->dbr0_used = 0;
2459 }
2460 else if (watchpoint->set == 2)
2461 {
2462 dbcon_value &= ~0xc;
2463 xscale_set_reg_u32(dbcon, dbcon_value);
2464 xscale->dbr1_used = 0;
2465 }
2466 watchpoint->set = 0;
2467
2468 return ERROR_OK;
2469 }
2470
2471 int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2472 {
2473 armv4_5_common_t *armv4_5 = target->arch_info;
2474 xscale_common_t *xscale = armv4_5->arch_info;
2475
2476 if (target->state != TARGET_HALTED)
2477 {
2478 LOG_WARNING("target not halted");
2479 return ERROR_TARGET_NOT_HALTED;
2480 }
2481
2482 if (watchpoint->set)
2483 {
2484 xscale_unset_watchpoint(target, watchpoint);
2485 }
2486
2487 xscale->dbr_available++;
2488
2489 return ERROR_OK;
2490 }
2491
2492 void xscale_enable_watchpoints(struct target_s *target)
2493 {
2494 watchpoint_t *watchpoint = target->watchpoints;
2495
2496 while (watchpoint)
2497 {
2498 if (watchpoint->set == 0)
2499 xscale_set_watchpoint(target, watchpoint);
2500 watchpoint = watchpoint->next;
2501 }
2502 }
2503
2504 void xscale_enable_breakpoints(struct target_s *target)
2505 {
2506 breakpoint_t *breakpoint = target->breakpoints;
2507
2508 /* set any pending breakpoints */
2509 while (breakpoint)
2510 {
2511 if (breakpoint->set == 0)
2512 xscale_set_breakpoint(target, breakpoint);
2513 breakpoint = breakpoint->next;
2514 }
2515 }
2516
2517 int xscale_get_reg(reg_t *reg)
2518 {
2519 xscale_reg_t *arch_info = reg->arch_info;
2520 target_t *target = arch_info->target;
2521 armv4_5_common_t *armv4_5 = target->arch_info;
2522 xscale_common_t *xscale = armv4_5->arch_info;
2523
2524 /* DCSR, TX and RX are accessible via JTAG */
2525 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2526 {
2527 return xscale_read_dcsr(arch_info->target);
2528 }
2529 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2530 {
2531 /* 1 = consume register content */
2532 return xscale_read_tx(arch_info->target, 1);
2533 }
2534 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2535 {
2536 /* can't read from RX register (host -> debug handler) */
2537 return ERROR_OK;
2538 }
2539 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2540 {
2541 /* can't (explicitly) read from TXRXCTRL register */
2542 return ERROR_OK;
2543 }
2544 else /* Other DBG registers have to be transfered by the debug handler */
2545 {
2546 /* send CP read request (command 0x40) */
2547 xscale_send_u32(target, 0x40);
2548
2549 /* send CP register number */
2550 xscale_send_u32(target, arch_info->dbg_handler_number);
2551
2552 /* read register value */
2553 xscale_read_tx(target, 1);
2554 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2555
2556 reg->dirty = 0;
2557 reg->valid = 1;
2558 }
2559
2560 return ERROR_OK;
2561 }
2562
2563 int xscale_set_reg(reg_t *reg, u8* buf)
2564 {
2565 xscale_reg_t *arch_info = reg->arch_info;
2566 target_t *target = arch_info->target;
2567 armv4_5_common_t *armv4_5 = target->arch_info;
2568 xscale_common_t *xscale = armv4_5->arch_info;
2569 u32 value = buf_get_u32(buf, 0, 32);
2570
2571 /* DCSR, TX and RX are accessible via JTAG */
2572 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2573 {
2574 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2575 return xscale_write_dcsr(arch_info->target, -1, -1);
2576 }
2577 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2578 {
2579 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2580 return xscale_write_rx(arch_info->target);
2581 }
2582 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2583 {
2584 /* can't write to TX register (debug-handler -> host) */
2585 return ERROR_OK;
2586 }
2587 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2588 {
2589 /* can't (explicitly) write to TXRXCTRL register */
2590 return ERROR_OK;
2591 }
2592 else /* Other DBG registers have to be transfered by the debug handler */
2593 {
2594 /* send CP write request (command 0x41) */
2595 xscale_send_u32(target, 0x41);
2596
2597 /* send CP register number */
2598 xscale_send_u32(target, arch_info->dbg_handler_number);
2599
2600 /* send CP register value */
2601 xscale_send_u32(target, value);
2602 buf_set_u32(reg->value, 0, 32, value);
2603 }
2604
2605 return ERROR_OK;
2606 }
2607
2608 /* convenience wrapper to access XScale specific registers */
2609 int xscale_set_reg_u32(reg_t *reg, u32 value)
2610 {
2611 u8 buf[4];
2612
2613 buf_set_u32(buf, 0, 32, value);
2614
2615 return xscale_set_reg(reg, buf);
2616 }
2617
2618 int xscale_write_dcsr_sw(target_t *target, u32 value)
2619 {
2620 /* get pointers to arch-specific information */
2621 armv4_5_common_t *armv4_5 = target->arch_info;
2622 xscale_common_t *xscale = armv4_5->arch_info;
2623 reg_t *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2624 xscale_reg_t *dcsr_arch_info = dcsr->arch_info;
2625
2626 /* send CP write request (command 0x41) */
2627 xscale_send_u32(target, 0x41);
2628
2629 /* send CP register number */
2630 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2631
2632 /* send CP register value */
2633 xscale_send_u32(target, value);
2634 buf_set_u32(dcsr->value, 0, 32, value);
2635
2636 return ERROR_OK;
2637 }
2638
2639 int xscale_read_trace(target_t *target)
2640 {
2641 /* get pointers to arch-specific information */
2642 armv4_5_common_t *armv4_5 = target->arch_info;
2643 xscale_common_t *xscale = armv4_5->arch_info;
2644 xscale_trace_data_t **trace_data_p;
2645
2646 /* 258 words from debug handler
2647 * 256 trace buffer entries
2648 * 2 checkpoint addresses
2649 */
2650 u32 trace_buffer[258];
2651 int is_address[256];
2652 int i, j;
2653
2654 if (target->state != TARGET_HALTED)
2655 {
2656 LOG_WARNING("target must be stopped to read trace data");
2657 return ERROR_TARGET_NOT_HALTED;
2658 }
2659
2660 /* send read trace buffer command (command 0x61) */
2661 xscale_send_u32(target, 0x61);
2662
2663 /* receive trace buffer content */
2664 xscale_receive(target, trace_buffer, 258);
2665
2666 /* parse buffer backwards to identify address entries */
2667 for (i = 255; i >= 0; i--)
2668 {
2669 is_address[i] = 0;
2670 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2671 ((trace_buffer[i] & 0xf0) == 0xd0))
2672 {
2673 if (i >= 3)
2674 is_address[--i] = 1;
2675 if (i >= 2)
2676 is_address[--i] = 1;
2677 if (i >= 1)
2678 is_address[--i] = 1;
2679 if (i >= 0)
2680 is_address[--i] = 1;
2681 }
2682 }
2683
2684
2685 /* search first non-zero entry */
2686 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2687 ;
2688
2689 if (j == 256)
2690 {
2691 LOG_DEBUG("no trace data collected");
2692 return ERROR_XSCALE_NO_TRACE_DATA;
2693 }
2694
2695 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2696 ;
2697
2698 *trace_data_p = malloc(sizeof(xscale_trace_data_t));
2699 (*trace_data_p)->next = NULL;
2700 (*trace_data_p)->chkpt0 = trace_buffer[256];
2701 (*trace_data_p)->chkpt1 = trace_buffer[257];
2702 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2703 (*trace_data_p)->entries = malloc(sizeof(xscale_trace_entry_t) * (256 - j));
2704 (*trace_data_p)->depth = 256 - j;
2705
2706 for (i = j; i < 256; i++)
2707 {
2708 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2709 if (is_address[i])
2710 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2711 else
2712 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2713 }
2714
2715 return ERROR_OK;
2716 }
2717
2718 int xscale_read_instruction(target_t *target, arm_instruction_t *instruction)
2719 {
2720 /* get pointers to arch-specific information */
2721 armv4_5_common_t *armv4_5 = target->arch_info;
2722 xscale_common_t *xscale = armv4_5->arch_info;
2723 int i;
2724 int section = -1;
2725 u32 size_read;
2726 u32 opcode;
2727 int retval;
2728
2729 if (!xscale->trace.image)
2730 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2731
2732 /* search for the section the current instruction belongs to */
2733 for (i = 0; i < xscale->trace.image->num_sections; i++)
2734 {
2735 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2736 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2737 {
2738 section = i;
2739 break;
2740 }
2741 }
2742
2743 if (section == -1)
2744 {
2745 /* current instruction couldn't be found in the image */
2746 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2747 }
2748
2749 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2750 {
2751 u8 buf[4];
2752 if ((retval = image_read_section(xscale->trace.image, section,
2753 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2754 4, buf, &size_read)) != ERROR_OK)
2755 {
2756 LOG_ERROR("error while reading instruction: %i", retval);
2757 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2758 }
2759 opcode = target_buffer_get_u32(target, buf);
2760 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2761 }
2762 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2763 {
2764 u8 buf[2];
2765 if ((retval = image_read_section(xscale->trace.image, section,
2766 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2767 2, buf, &size_read)) != ERROR_OK)
2768 {
2769 LOG_ERROR("error while reading instruction: %i", retval);
2770 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2771 }
2772 opcode = target_buffer_get_u16(target, buf);
2773 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2774 }
2775 else
2776 {
2777 LOG_ERROR("BUG: unknown core state encountered");
2778 exit(-1);
2779 }
2780
2781 return ERROR_OK;
2782 }
2783
2784 int xscale_branch_address(xscale_trace_data_t *trace_data, int i, u32 *target)
2785 {
2786 /* if there are less than four entries prior to the indirect branch message
2787 * we can't extract the address */
2788 if (i < 4)
2789 {
2790 return -1;
2791 }
2792
2793 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2794 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2795
2796 return 0;
2797 }
2798
2799 int xscale_analyze_trace(target_t *target, command_context_t *cmd_ctx)
2800 {
2801 /* get pointers to arch-specific information */
2802 armv4_5_common_t *armv4_5 = target->arch_info;
2803 xscale_common_t *xscale = armv4_5->arch_info;
2804 int next_pc_ok = 0;
2805 u32 next_pc = 0x0;
2806 xscale_trace_data_t *trace_data = xscale->trace.data;
2807 int retval;
2808
2809 while (trace_data)
2810 {
2811 int i, chkpt;
2812 int rollover;
2813 int branch;
2814 int exception;
2815 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2816
2817 chkpt = 0;
2818 rollover = 0;
2819
2820 for (i = 0; i < trace_data->depth; i++)
2821 {
2822 next_pc_ok = 0;
2823 branch = 0;
2824 exception = 0;
2825
2826 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2827 continue;
2828
2829 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2830 {
2831 case 0: /* Exceptions */
2832 case 1:
2833 case 2:
2834 case 3:
2835 case 4:
2836 case 5:
2837 case 6:
2838 case 7:
2839 exception = (trace_data->entries[i].data & 0x70) >> 4;
2840 next_pc_ok = 1;
2841 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2842 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2843 break;
2844 case 8: /* Direct Branch */
2845 branch = 1;
2846 break;
2847 case 9: /* Indirect Branch */
2848 branch = 1;
2849 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2850 {
2851 next_pc_ok = 1;
2852 }
2853 break;
2854 case 13: /* Checkpointed Indirect Branch */
2855 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2856 {
2857 next_pc_ok = 1;
2858 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2859 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2860 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2861 }
2862 /* explicit fall-through */
2863 case 12: /* Checkpointed Direct Branch */
2864 branch = 1;
2865 if (chkpt == 0)
2866 {
2867 next_pc_ok = 1;
2868 next_pc = trace_data->chkpt0;
2869 chkpt++;
2870 }
2871 else if (chkpt == 1)
2872 {
2873 next_pc_ok = 1;
2874 next_pc = trace_data->chkpt0;
2875 chkpt++;
2876 }
2877 else
2878 {
2879 LOG_WARNING("more than two checkpointed branches encountered");
2880 }
2881 break;
2882 case 15: /* Roll-over */
2883 rollover++;
2884 continue;
2885 default: /* Reserved */
2886 command_print(cmd_ctx, "--- reserved trace message ---");
2887 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2888 return ERROR_OK;
2889 }
2890
2891 if (xscale->trace.pc_ok)
2892 {
2893 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2894 arm_instruction_t instruction;
2895
2896 if ((exception == 6) || (exception == 7))
2897 {
2898 /* IRQ or FIQ exception, no instruction executed */
2899 executed -= 1;
2900 }
2901
2902 while (executed-- >= 0)
2903 {
2904 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2905 {
2906 /* can't continue tracing with no image available */
2907 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2908 {
2909 return retval;
2910 }
2911 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2912 {
2913 /* TODO: handle incomplete images */
2914 }
2915 }
2916
2917 /* a precise abort on a load to the PC is included in the incremental
2918 * word count, other instructions causing data aborts are not included
2919 */
2920 if ((executed == 0) && (exception == 4)
2921 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2922 {
2923 if ((instruction.type == ARM_LDM)
2924 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2925 {
2926 executed--;
2927 }
2928 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2929 && (instruction.info.load_store.Rd != 15))
2930 {
2931 executed--;
2932 }
2933 }
2934
2935 /* only the last instruction executed
2936 * (the one that caused the control flow change)
2937 * could be a taken branch
2938 */
2939 if (((executed == -1) && (branch == 1)) &&
2940 (((instruction.type == ARM_B) ||
2941 (instruction.type == ARM_BL) ||
2942 (instruction.type == ARM_BLX)) &&
2943 (instruction.info.b_bl_bx_blx.target_address != -1)))
2944 {
2945 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2946 }
2947 else
2948 {
2949 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2950 }
2951 command_print(cmd_ctx, "%s", instruction.text);
2952 }
2953
2954 rollover = 0;
2955 }
2956
2957 if (next_pc_ok)
2958 {
2959 xscale->trace.current_pc = next_pc;
2960 xscale->trace.pc_ok = 1;
2961 }
2962 }
2963
2964 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2965 {
2966 arm_instruction_t instruction;
2967 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2968 {
2969 /* can't continue tracing with no image available */
2970 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2971 {
2972 return retval;
2973 }
2974 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2975 {
2976 /* TODO: handle incomplete images */
2977 }
2978 }
2979 command_print(cmd_ctx, "%s", instruction.text);
2980 }
2981
2982 trace_data = trace_data->next;
2983 }
2984
2985 return ERROR_OK;
2986 }
2987
2988 void xscale_build_reg_cache(target_t *target)
2989 {
2990 /* get pointers to arch-specific information */
2991 armv4_5_common_t *armv4_5 = target->arch_info;
2992 xscale_common_t *xscale = armv4_5->arch_info;
2993
2994 reg_cache_t **cache_p = register_get_last_cache_p(&target->reg_cache);
2995 xscale_reg_t *arch_info = malloc(sizeof(xscale_reg_arch_info));
2996 int i;
2997 int num_regs = sizeof(xscale_reg_arch_info) / sizeof(xscale_reg_t);
2998
2999 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
3000 armv4_5->core_cache = (*cache_p);
3001
3002 /* register a register arch-type for XScale dbg registers only once */
3003 if (xscale_reg_arch_type == -1)
3004 xscale_reg_arch_type = register_reg_arch_type(xscale_get_reg, xscale_set_reg);
3005
3006 (*cache_p)->next = malloc(sizeof(reg_cache_t));
3007 cache_p = &(*cache_p)->next;
3008
3009 /* fill in values for the xscale reg cache */
3010 (*cache_p)->name = "XScale registers";
3011 (*cache_p)->next = NULL;
3012 (*cache_p)->reg_list = malloc(num_regs * sizeof(reg_t));
3013 (*cache_p)->num_regs = num_regs;
3014
3015 for (i = 0; i < num_regs; i++)
3016 {
3017 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
3018 (*cache_p)->reg_list[i].value = calloc(4, 1);
3019 (*cache_p)->reg_list[i].dirty = 0;
3020 (*cache_p)->reg_list[i].valid = 0;
3021 (*cache_p)->reg_list[i].size = 32;
3022 (*cache_p)->reg_list[i].bitfield_desc = NULL;
3023 (*cache_p)->reg_list[i].num_bitfields = 0;
3024 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
3025 (*cache_p)->reg_list[i].arch_type = xscale_reg_arch_type;
3026 arch_info[i] = xscale_reg_arch_info[i];
3027 arch_info[i].target = target;
3028 }
3029
3030 xscale->reg_cache = (*cache_p);
3031 }
3032
3033 int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target)
3034 {
3035 return ERROR_OK;
3036 }
3037
3038 int xscale_quit()
3039 {
3040
3041 return ERROR_OK;
3042 }
3043
3044 int xscale_init_arch_info(target_t *target, xscale_common_t *xscale, int chain_pos, char *variant)
3045 {
3046 armv4_5_common_t *armv4_5;
3047 u32 high_reset_branch, low_reset_branch;
3048 int i;
3049
3050 armv4_5 = &xscale->armv4_5_common;
3051
3052 /* store architecture specfic data (none so far) */
3053 xscale->arch_info = NULL;
3054 xscale->common_magic = XSCALE_COMMON_MAGIC;
3055
3056 /* remember the variant (PXA25x, PXA27x, IXP42x, ...) */
3057 xscale->variant = strdup(variant);
3058
3059 /* prepare JTAG information for the new target */
3060 xscale->jtag_info.chain_pos = chain_pos;
3061
3062 xscale->jtag_info.dbgrx = 0x02;
3063 xscale->jtag_info.dbgtx = 0x10;
3064 xscale->jtag_info.dcsr = 0x09;
3065 xscale->jtag_info.ldic = 0x07;
3066
3067 if ((strcmp(xscale->variant, "pxa250") == 0) ||
3068 (strcmp(xscale->variant, "pxa255") == 0) ||
3069 (strcmp(xscale->variant, "pxa26x") == 0))
3070 {
3071 xscale->jtag_info.ir_length = 5;
3072 }
3073 else if ((strcmp(xscale->variant, "pxa27x") == 0) ||
3074 (strcmp(xscale->variant, "ixp42x") == 0) ||
3075 (strcmp(xscale->variant, "ixp45x") == 0) ||
3076 (strcmp(xscale->variant, "ixp46x") == 0))
3077 {
3078 xscale->jtag_info.ir_length = 7;
3079 }
3080
3081 /* the debug handler isn't installed (and thus not running) at this time */
3082 xscale->handler_installed = 0;
3083 xscale->handler_running = 0;
3084 xscale->handler_address = 0xfe000800;
3085
3086 /* clear the vectors we keep locally for reference */
3087 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
3088 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
3089
3090 /* no user-specified vectors have been configured yet */
3091 xscale->static_low_vectors_set = 0x0;
3092 xscale->static_high_vectors_set = 0x0;
3093
3094 /* calculate branches to debug handler */
3095 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
3096 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
3097
3098 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
3099 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
3100
3101 for (i = 1; i <= 7; i++)
3102 {
3103 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3104 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3105 }
3106
3107 /* 64kB aligned region used for DCache cleaning */
3108 xscale->cache_clean_address = 0xfffe0000;
3109
3110 xscale->hold_rst = 0;
3111 xscale->external_debug_break = 0;
3112
3113 xscale->force_hw_bkpts = 1;
3114
3115 xscale->ibcr_available = 2;
3116 xscale->ibcr0_used = 0;
3117 xscale->ibcr1_used = 0;
3118
3119 xscale->dbr_available = 2;
3120 xscale->dbr0_used = 0;
3121 xscale->dbr1_used = 0;
3122
3123 xscale->arm_bkpt = ARMV5_BKPT(0x0);
3124 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
3125
3126 xscale->vector_catch = 0x1;
3127
3128 xscale->trace.capture_status = TRACE_IDLE;
3129 xscale->trace.data = NULL;
3130 xscale->trace.image = NULL;
3131 xscale->trace.buffer_enabled = 0;
3132 xscale->trace.buffer_fill = 0;
3133
3134 /* prepare ARMv4/5 specific information */
3135 armv4_5->arch_info = xscale;
3136 armv4_5->read_core_reg = xscale_read_core_reg;
3137 armv4_5->write_core_reg = xscale_write_core_reg;
3138 armv4_5->full_context = xscale_full_context;
3139
3140 armv4_5_init_arch_info(target, armv4_5);
3141
3142 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3143 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3144 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3145 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3146 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3147 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3148 xscale->armv4_5_mmu.has_tiny_pages = 1;
3149 xscale->armv4_5_mmu.mmu_enabled = 0;
3150
3151 xscale->fast_memory_access = 0;
3152
3153 return ERROR_OK;
3154 }
3155
3156 /* target xscale <endianess> <startup_mode> <chain_pos> <variant> */
3157 int xscale_target_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc, struct target_s *target)
3158 {
3159 int chain_pos;
3160 char *variant = NULL;
3161 xscale_common_t *xscale = malloc(sizeof(xscale_common_t));
3162 memset(xscale, 0, sizeof(*xscale));
3163
3164 if (argc < 5)
3165 {
3166 LOG_ERROR("'target xscale' requires four arguments: <endianess> <startup_mode> <chain_pos> <variant>");
3167 return ERROR_OK;
3168 }
3169
3170 chain_pos = strtoul(args[3], NULL, 0);
3171
3172 variant = args[4];
3173
3174 xscale_init_arch_info(target, xscale, chain_pos, variant);
3175 xscale_build_reg_cache(target);
3176
3177 return ERROR_OK;
3178 }
3179
3180 int xscale_handle_debug_handler_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3181 {
3182 target_t *target = NULL;
3183 armv4_5_common_t *armv4_5;
3184 xscale_common_t *xscale;
3185
3186 u32 handler_address;
3187
3188 if (argc < 2)
3189 {
3190 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3191 return ERROR_OK;
3192 }
3193
3194 if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
3195 {
3196 LOG_ERROR("no target '%s' configured", args[0]);
3197 return ERROR_OK;
3198 }
3199
3200 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3201 {
3202 return ERROR_OK;
3203 }
3204
3205 handler_address = strtoul(args[1], NULL, 0);
3206
3207 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3208 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3209 {
3210 xscale->handler_address = handler_address;
3211 }
3212 else
3213 {
3214 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3215 }
3216
3217 return ERROR_OK;
3218 }
3219
3220 int xscale_handle_cache_clean_address_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3221 {
3222 target_t *target = NULL;
3223 armv4_5_common_t *armv4_5;
3224 xscale_common_t *xscale;
3225
3226 u32 cache_clean_address;
3227
3228 if (argc < 2)
3229 {
3230 LOG_ERROR("'xscale cache_clean_address <target#> <address>' command takes two required operands");
3231 return ERROR_OK;
3232 }
3233
3234 if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
3235 {
3236 LOG_ERROR("no target '%s' configured", args[0]);
3237 return ERROR_OK;
3238 }
3239
3240 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3241 {
3242 return ERROR_OK;
3243 }
3244
3245 cache_clean_address = strtoul(args[1], NULL, 0);
3246
3247 if (cache_clean_address & 0xffff)
3248 {
3249 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3250 }
3251 else
3252 {
3253 xscale->cache_clean_address = cache_clean_address;
3254 }
3255
3256 return ERROR_OK;
3257 }
3258
3259 int xscale_handle_cache_info_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3260 {
3261 target_t *target = get_current_target(cmd_ctx);
3262 armv4_5_common_t *armv4_5;
3263 xscale_common_t *xscale;
3264
3265 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3266 {
3267 return ERROR_OK;
3268 }
3269
3270 return armv4_5_handle_cache_info_command(cmd_ctx, &xscale->armv4_5_mmu.armv4_5_cache);
3271 }
3272
3273 static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical)
3274 {
3275 armv4_5_common_t *armv4_5;
3276 xscale_common_t *xscale;
3277 int retval;
3278 int type;
3279 u32 cb;
3280 int domain;
3281 u32 ap;
3282
3283
3284 if ((retval = xscale_get_arch_pointers(target, &armv4_5, &xscale)) != ERROR_OK)
3285 {
3286 return retval;
3287 }
3288 u32 ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3289 if (type == -1)
3290 {
3291 return ret;
3292 }
3293 *physical = ret;
3294 return ERROR_OK;
3295 }
3296
3297 static int xscale_mmu(struct target_s *target, int *enabled)
3298 {
3299 armv4_5_common_t *armv4_5 = target->arch_info;
3300 xscale_common_t *xscale = armv4_5->arch_info;
3301
3302 if (target->state != TARGET_HALTED)
3303 {
3304 LOG_ERROR("Target not halted");
3305 return ERROR_TARGET_INVALID;
3306 }
3307 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3308 return ERROR_OK;
3309 }
3310
3311
3312 int xscale_handle_mmu_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3313 {
3314 target_t *target = get_current_target(cmd_ctx);
3315 armv4_5_common_t *armv4_5;
3316 xscale_common_t *xscale;
3317
3318 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3319 {
3320 return ERROR_OK;
3321 }
3322
3323 if (target->state != TARGET_HALTED)
3324 {
3325 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3326 return ERROR_OK;
3327 }
3328
3329 if (argc >= 1)
3330 {
3331 if (strcmp("enable", args[0]) == 0)
3332 {
3333 xscale_enable_mmu_caches(target, 1, 0, 0);
3334 xscale->armv4_5_mmu.mmu_enabled = 1;
3335 }
3336 else if (strcmp("disable", args[0]) == 0)
3337 {
3338 xscale_disable_mmu_caches(target, 1, 0, 0);
3339 xscale->armv4_5_mmu.mmu_enabled = 0;
3340 }
3341 }
3342
3343 command_print(cmd_ctx, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3344
3345 return ERROR_OK;
3346 }
3347
3348 int xscale_handle_idcache_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3349 {
3350 target_t *target = get_current_target(cmd_ctx);
3351 armv4_5_common_t *armv4_5;
3352 xscale_common_t *xscale;
3353 int icache = 0, dcache = 0;
3354
3355 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3356 {
3357 return ERROR_OK;
3358 }
3359
3360 if (target->state != TARGET_HALTED)
3361 {
3362 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3363 return ERROR_OK;
3364 }
3365
3366 if (strcmp(cmd, "icache") == 0)
3367 icache = 1;
3368 else if (strcmp(cmd, "dcache") == 0)
3369 dcache = 1;
3370
3371 if (argc >= 1)
3372 {
3373 if (strcmp("enable", args[0]) == 0)
3374 {
3375 xscale_enable_mmu_caches(target, 0, dcache, icache);
3376
3377 if (icache)
3378 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 1;
3379 else if (dcache)
3380 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 1;
3381 }
3382 else if (strcmp("disable", args[0]) == 0)
3383 {
3384 xscale_disable_mmu_caches(target, 0, dcache, icache);
3385
3386 if (icache)
3387 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 0;
3388 else if (dcache)
3389 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 0;
3390 }
3391 }
3392
3393 if (icache)
3394 command_print(cmd_ctx, "icache %s", (xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled) ? "enabled" : "disabled");
3395
3396 if (dcache)
3397 command_print(cmd_ctx, "dcache %s", (xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled) ? "enabled" : "disabled");
3398
3399 return ERROR_OK;
3400 }
3401
3402 int xscale_handle_vector_catch_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3403 {
3404 target_t *target = get_current_target(cmd_ctx);
3405 armv4_5_common_t *armv4_5;
3406 xscale_common_t *xscale;
3407
3408 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3409 {
3410 return ERROR_OK;
3411 }
3412
3413 if (argc < 1)
3414 {
3415 command_print(cmd_ctx, "usage: xscale vector_catch [mask]");
3416 }
3417 else
3418 {
3419 xscale->vector_catch = strtoul(args[0], NULL, 0);
3420 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3421 xscale_write_dcsr(target, -1, -1);
3422 }
3423
3424 command_print(cmd_ctx, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3425
3426 return ERROR_OK;
3427 }
3428
3429 int xscale_handle_force_hw_bkpts_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3430 {
3431 target_t *target = get_current_target(cmd_ctx);
3432 armv4_5_common_t *armv4_5;
3433 xscale_common_t *xscale;
3434
3435 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3436 {
3437 return ERROR_OK;
3438 }
3439
3440 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
3441 {
3442 xscale->force_hw_bkpts = 1;
3443 }
3444 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
3445 {
3446 xscale->force_hw_bkpts = 0;
3447 }
3448 else
3449 {
3450 command_print(cmd_ctx, "usage: xscale force_hw_bkpts <enable|disable>");
3451 }
3452
3453 command_print(cmd_ctx, "force hardware breakpoints %s", (xscale->force_hw_bkpts) ? "enabled" : "disabled");
3454
3455 return ERROR_OK;
3456 }
3457
3458 int xscale_handle_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3459 {
3460 target_t *target = get_current_target(cmd_ctx);
3461 armv4_5_common_t *armv4_5;
3462 xscale_common_t *xscale;
3463 u32 dcsr_value;
3464
3465 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3466 {
3467 return ERROR_OK;
3468 }
3469
3470 if (target->state != TARGET_HALTED)
3471 {
3472 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3473 return ERROR_OK;
3474 }
3475
3476 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
3477 {
3478 xscale_trace_data_t *td, *next_td;
3479 xscale->trace.buffer_enabled = 1;
3480
3481 /* free old trace data */
3482 td = xscale->trace.data;
3483 while (td)
3484 {
3485 next_td = td->next;
3486
3487 if (td->entries)
3488 free(td->entries);
3489 free(td);
3490 td = next_td;
3491 }
3492 xscale->trace.data = NULL;
3493 }
3494 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
3495 {
3496 xscale->trace.buffer_enabled = 0;
3497 }
3498
3499 if ((argc >= 2) && (strcmp("fill", args[1]) == 0))
3500 {
3501 if (argc >= 3)
3502 xscale->trace.buffer_fill = strtoul(args[2], NULL, 0);
3503 else
3504 xscale->trace.buffer_fill = 1;
3505 }
3506 else if ((argc >= 2) && (strcmp("wrap", args[1]) == 0))
3507 {
3508 xscale->trace.buffer_fill = -1;
3509 }
3510
3511 if (xscale->trace.buffer_enabled)
3512 {
3513 /* if we enable the trace buffer in fill-once
3514 * mode we know the address of the first instruction */
3515 xscale->trace.pc_ok = 1;
3516 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3517 }
3518 else
3519 {
3520 /* otherwise the address is unknown, and we have no known good PC */
3521 xscale->trace.pc_ok = 0;
3522 }
3523
3524 command_print(cmd_ctx, "trace buffer %s (%s)",
3525 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3526 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3527
3528 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3529 if (xscale->trace.buffer_fill >= 0)
3530 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3531 else
3532 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3533
3534 return ERROR_OK;
3535 }
3536
3537 int xscale_handle_trace_image_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3538 {
3539 target_t *target;
3540 armv4_5_common_t *armv4_5;
3541 xscale_common_t *xscale;
3542
3543 if (argc < 1)
3544 {
3545 command_print(cmd_ctx, "usage: xscale trace_image <file> [base address] [type]");
3546 return ERROR_OK;
3547 }
3548
3549 target = get_current_target(cmd_ctx);
3550
3551 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3552 {
3553 return ERROR_OK;
3554 }
3555
3556 if (xscale->trace.image)
3557 {
3558 image_close(xscale->trace.image);
3559 free(xscale->trace.image);
3560 command_print(cmd_ctx, "previously loaded image found and closed");
3561 }
3562
3563 xscale->trace.image = malloc(sizeof(image_t));
3564 xscale->trace.image->base_address_set = 0;
3565 xscale->trace.image->start_address_set = 0;
3566
3567 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3568 if (argc >= 2)
3569 {
3570 xscale->trace.image->base_address_set = 1;
3571 xscale->trace.image->base_address = strtoul(args[1], NULL, 0);
3572 }
3573 else
3574 {
3575 xscale->trace.image->base_address_set = 0;
3576 }
3577
3578 if (image_open(xscale->trace.image, args[0], (argc >= 3) ? args[2] : NULL) != ERROR_OK)
3579 {
3580 free(xscale->trace.image);
3581 xscale->trace.image = NULL;
3582 return ERROR_OK;
3583 }
3584
3585 return ERROR_OK;
3586 }
3587
3588 int xscale_handle_dump_trace_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3589 {
3590 target_t *target = get_current_target(cmd_ctx);
3591 armv4_5_common_t *armv4_5;
3592 xscale_common_t *xscale;
3593 xscale_trace_data_t *trace_data;
3594 fileio_t file;
3595
3596 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3597 {
3598 return ERROR_OK;
3599 }
3600
3601 if (target->state != TARGET_HALTED)
3602 {
3603 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3604 return ERROR_OK;
3605 }
3606
3607 if (argc < 1)
3608 {
3609 command_print(cmd_ctx, "usage: xscale dump_trace <file>");
3610 return ERROR_OK;
3611 }
3612
3613 trace_data = xscale->trace.data;
3614
3615 if (!trace_data)
3616 {
3617 command_print(cmd_ctx, "no trace data collected");
3618 return ERROR_OK;
3619 }
3620
3621 if (fileio_open(&file, args[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3622 {
3623 return ERROR_OK;
3624 }
3625
3626 while (trace_data)
3627 {
3628 int i;
3629
3630 fileio_write_u32(&file, trace_data->chkpt0);
3631 fileio_write_u32(&file, trace_data->chkpt1);
3632 fileio_write_u32(&file, trace_data->last_instruction);
3633 fileio_write_u32(&file, trace_data->depth);
3634
3635 for (i = 0; i < trace_data->depth; i++)
3636 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3637
3638 trace_data = trace_data->next;
3639 }
3640
3641 fileio_close(&file);
3642
3643 return ERROR_OK;
3644 }
3645
3646 int xscale_handle_analyze_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3647 {
3648 target_t *target = get_current_target(cmd_ctx);
3649 armv4_5_common_t *armv4_5;
3650 xscale_common_t *xscale;
3651
3652 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3653 {
3654 return ERROR_OK;
3655 }
3656
3657 xscale_analyze_trace(target, cmd_ctx);
3658
3659 return ERROR_OK;
3660 }
3661
3662 int xscale_handle_cp15(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3663 {
3664 target_t *target = get_current_target(cmd_ctx);
3665 armv4_5_common_t *armv4_5;
3666 xscale_common_t *xscale;
3667
3668 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3669 {
3670 return ERROR_OK;
3671 }
3672
3673 if (target->state != TARGET_HALTED)
3674 {
3675 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3676 return ERROR_OK;
3677 }
3678 u32 reg_no = 0;
3679 reg_t *reg = NULL;
3680 if(argc > 0)
3681 {
3682 reg_no = strtoul(args[0], NULL, 0);
3683 /*translate from xscale cp15 register no to openocd register*/
3684 switch(reg_no)
3685 {
3686 case 0:
3687 reg_no = XSCALE_MAINID;
3688 break;
3689 case 1:
3690 reg_no = XSCALE_CTRL;
3691 break;
3692 case 2:
3693 reg_no = XSCALE_TTB;
3694 break;
3695 case 3:
3696 reg_no = XSCALE_DAC;
3697 break;
3698 case 5:
3699 reg_no = XSCALE_FSR;
3700 break;
3701 case 6:
3702 reg_no = XSCALE_FAR;
3703 break;
3704 case 13:
3705 reg_no = XSCALE_PID;
3706 break;
3707 case 15:
3708 reg_no = XSCALE_CPACCESS;
3709 break;
3710 default:
3711 command_print(cmd_ctx, "invalid register number");
3712 return ERROR_INVALID_ARGUMENTS;
3713 }
3714 reg = &xscale->reg_cache->reg_list[reg_no];
3715
3716 }
3717 if(argc == 1)
3718 {
3719 u32 value;
3720
3721 /* read cp15 control register */
3722 xscale_get_reg(reg);
3723 value = buf_get_u32(reg->value, 0, 32);
3724 command_print(cmd_ctx, "%s (/%i): 0x%x", reg->name, reg->size, value);
3725 }
3726 else if(argc == 2)
3727 {
3728
3729 u32 value = strtoul(args[1], NULL, 0);
3730
3731 /* send CP write request (command 0x41) */
3732 xscale_send_u32(target, 0x41);
3733
3734 /* send CP register number */
3735 xscale_send_u32(target, reg_no);
3736
3737 /* send CP register value */
3738 xscale_send_u32(target, value);
3739
3740 /* execute cpwait to ensure outstanding operations complete */
3741 xscale_send_u32(target, 0x53);
3742 }
3743 else
3744 {
3745 command_print(cmd_ctx, "usage: cp15 [register]<, [value]>");
3746 }
3747
3748 return ERROR_OK;
3749 }
3750
3751 int handle_xscale_fast_memory_access_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3752 {
3753 target_t *target = get_current_target(cmd_ctx);
3754 armv4_5_common_t *armv4_5;
3755 xscale_common_t *xscale;
3756
3757 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3758 {
3759 return ERROR_OK;
3760 }
3761
3762 if (argc == 1)
3763 {
3764 if (strcmp("enable", args[0]) == 0)
3765 {
3766 xscale->fast_memory_access = 1;
3767 }
3768 else if (strcmp("disable", args[0]) == 0)
3769 {
3770 xscale->fast_memory_access = 0;
3771 }
3772 else
3773 {
3774 return ERROR_COMMAND_SYNTAX_ERROR;
3775 }
3776 } else if (argc!=0)
3777 {
3778 return ERROR_COMMAND_SYNTAX_ERROR;
3779 }
3780
3781 command_print(cmd_ctx, "fast memory access is %s", (xscale->fast_memory_access) ? "enabled" : "disabled");
3782
3783 return ERROR_OK;
3784 }
3785
3786 int xscale_register_commands(struct command_context_s *cmd_ctx)
3787 {
3788 command_t *xscale_cmd;
3789
3790 xscale_cmd = register_command(cmd_ctx, NULL, "xscale", NULL, COMMAND_ANY, "xscale specific commands");
3791
3792 register_command(cmd_ctx, xscale_cmd, "debug_handler", xscale_handle_debug_handler_command, COMMAND_ANY, "'xscale debug_handler <target#> <address>' command takes two required operands");
3793 register_command(cmd_ctx, xscale_cmd, "cache_clean_address", xscale_handle_cache_clean_address_command, COMMAND_ANY, NULL);
3794
3795 register_command(cmd_ctx, xscale_cmd, "cache_info", xscale_handle_cache_info_command, COMMAND_EXEC, NULL);
3796 register_command(cmd_ctx, xscale_cmd, "mmu", xscale_handle_mmu_command, COMMAND_EXEC, "['enable'|'disable'] the MMU");
3797 register_command(cmd_ctx, xscale_cmd, "icache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the ICache");
3798 register_command(cmd_ctx, xscale_cmd, "dcache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the DCache");
3799
3800 register_command(cmd_ctx, xscale_cmd, "vector_catch", xscale_handle_idcache_command, COMMAND_EXEC, "<mask> of vectors that should be catched");
3801
3802 register_command(cmd_ctx, xscale_cmd, "trace_buffer", xscale_handle_trace_buffer_command, COMMAND_EXEC, "<enable|disable> ['fill' [n]|'wrap']");
3803
3804 register_command(cmd_ctx, xscale_cmd, "dump_trace", xscale_handle_dump_trace_command, COMMAND_EXEC, "dump content of trace buffer to <file>");
3805 register_command(cmd_ctx, xscale_cmd, "analyze_trace", xscale_handle_analyze_trace_buffer_command, COMMAND_EXEC, "analyze content of trace buffer");
3806 register_command(cmd_ctx, xscale_cmd, "trace_image", xscale_handle_trace_image_command,
3807 COMMAND_EXEC, "load image from <file> [base address]");
3808
3809 register_command(cmd_ctx, xscale_cmd, "cp15", xscale_handle_cp15, COMMAND_EXEC, "access coproc 15 <register> [value]");
3810 register_command(cmd_ctx, xscale_cmd, "fast_memory_access", handle_xscale_fast_memory_access_command,
3811 COMMAND_ANY, "use fast memory accesses instead of slower but potentially unsafe slow accesses <enable|disable>");
3812
3813
3814 armv4_5_register_commands(cmd_ctx);
3815
3816 return ERROR_OK;
3817 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)