explode tcl_target_func into many handlers
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2009 √ėyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * This program is free software; you can redistribute it and/or modify *
18 * it under the terms of the GNU General Public License as published by *
19 * the Free Software Foundation; either version 2 of the License, or *
20 * (at your option) any later version. *
21 * *
22 * This program is distributed in the hope that it will be useful, *
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
25 * GNU General Public License for more details. *
26 * *
27 * You should have received a copy of the GNU General Public License *
28 * along with this program; if not, write to the *
29 * Free Software Foundation, Inc., *
30 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
31 ***************************************************************************/
32 #ifdef HAVE_CONFIG_H
33 #include "config.h"
34 #endif
35
36 #include "target.h"
37 #include "target_type.h"
38 #include "target_request.h"
39 #include "breakpoints.h"
40 #include "time_support.h"
41 #include "register.h"
42 #include "trace.h"
43 #include "image.h"
44 #include "jtag.h"
45
46
47 static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj *const *argv);
48
49 static int target_array2mem(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv);
50 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv);
51
52 /* targets */
53 extern struct target_type arm7tdmi_target;
54 extern struct target_type arm720t_target;
55 extern struct target_type arm9tdmi_target;
56 extern struct target_type arm920t_target;
57 extern struct target_type arm966e_target;
58 extern struct target_type arm926ejs_target;
59 extern struct target_type fa526_target;
60 extern struct target_type feroceon_target;
61 extern struct target_type dragonite_target;
62 extern struct target_type xscale_target;
63 extern struct target_type cortexm3_target;
64 extern struct target_type cortexa8_target;
65 extern struct target_type arm11_target;
66 extern struct target_type mips_m4k_target;
67 extern struct target_type avr_target;
68 extern struct target_type testee_target;
69
70 struct target_type *target_types[] =
71 {
72 &arm7tdmi_target,
73 &arm9tdmi_target,
74 &arm920t_target,
75 &arm720t_target,
76 &arm966e_target,
77 &arm926ejs_target,
78 &fa526_target,
79 &feroceon_target,
80 &dragonite_target,
81 &xscale_target,
82 &cortexm3_target,
83 &cortexa8_target,
84 &arm11_target,
85 &mips_m4k_target,
86 &avr_target,
87 &testee_target,
88 NULL,
89 };
90
91 struct target *all_targets = NULL;
92 struct target_event_callback *target_event_callbacks = NULL;
93 struct target_timer_callback *target_timer_callbacks = NULL;
94
95 const Jim_Nvp nvp_assert[] = {
96 { .name = "assert", NVP_ASSERT },
97 { .name = "deassert", NVP_DEASSERT },
98 { .name = "T", NVP_ASSERT },
99 { .name = "F", NVP_DEASSERT },
100 { .name = "t", NVP_ASSERT },
101 { .name = "f", NVP_DEASSERT },
102 { .name = NULL, .value = -1 }
103 };
104
105 const Jim_Nvp nvp_error_target[] = {
106 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
107 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
108 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
109 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
110 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
111 { .value = ERROR_TARGET_UNALIGNED_ACCESS , .name = "err-unaligned-access" },
112 { .value = ERROR_TARGET_DATA_ABORT , .name = "err-data-abort" },
113 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE , .name = "err-resource-not-available" },
114 { .value = ERROR_TARGET_TRANSLATION_FAULT , .name = "err-translation-fault" },
115 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
116 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
117 { .value = -1, .name = NULL }
118 };
119
120 const char *target_strerror_safe(int err)
121 {
122 const Jim_Nvp *n;
123
124 n = Jim_Nvp_value2name_simple(nvp_error_target, err);
125 if (n->name == NULL) {
126 return "unknown";
127 } else {
128 return n->name;
129 }
130 }
131
132 static const Jim_Nvp nvp_target_event[] = {
133 { .value = TARGET_EVENT_OLD_gdb_program_config , .name = "old-gdb_program_config" },
134 { .value = TARGET_EVENT_OLD_pre_resume , .name = "old-pre_resume" },
135
136 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
137 { .value = TARGET_EVENT_HALTED, .name = "halted" },
138 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
139 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
140 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
141
142 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
143 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
144
145 /* historical name */
146
147 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
148
149 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
150 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
151 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
152 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
153 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
154 { .value = TARGET_EVENT_RESET_HALT_PRE, .name = "reset-halt-pre" },
155 { .value = TARGET_EVENT_RESET_HALT_POST, .name = "reset-halt-post" },
156 { .value = TARGET_EVENT_RESET_WAIT_PRE, .name = "reset-wait-pre" },
157 { .value = TARGET_EVENT_RESET_WAIT_POST, .name = "reset-wait-post" },
158 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
159 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
160
161 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
162 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
163
164 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
165 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
166
167 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
168 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
169
170 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
171 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END , .name = "gdb-flash-write-end" },
172
173 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
174 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END , .name = "gdb-flash-erase-end" },
175
176 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
177 { .value = TARGET_EVENT_RESUMED , .name = "resume-ok" },
178 { .value = TARGET_EVENT_RESUME_END , .name = "resume-end" },
179
180 { .name = NULL, .value = -1 }
181 };
182
183 const Jim_Nvp nvp_target_state[] = {
184 { .name = "unknown", .value = TARGET_UNKNOWN },
185 { .name = "running", .value = TARGET_RUNNING },
186 { .name = "halted", .value = TARGET_HALTED },
187 { .name = "reset", .value = TARGET_RESET },
188 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
189 { .name = NULL, .value = -1 },
190 };
191
192 const Jim_Nvp nvp_target_debug_reason [] = {
193 { .name = "debug-request" , .value = DBG_REASON_DBGRQ },
194 { .name = "breakpoint" , .value = DBG_REASON_BREAKPOINT },
195 { .name = "watchpoint" , .value = DBG_REASON_WATCHPOINT },
196 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
197 { .name = "single-step" , .value = DBG_REASON_SINGLESTEP },
198 { .name = "target-not-halted" , .value = DBG_REASON_NOTHALTED },
199 { .name = "undefined" , .value = DBG_REASON_UNDEFINED },
200 { .name = NULL, .value = -1 },
201 };
202
203 const Jim_Nvp nvp_target_endian[] = {
204 { .name = "big", .value = TARGET_BIG_ENDIAN },
205 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
206 { .name = "be", .value = TARGET_BIG_ENDIAN },
207 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
208 { .name = NULL, .value = -1 },
209 };
210
211 const Jim_Nvp nvp_reset_modes[] = {
212 { .name = "unknown", .value = RESET_UNKNOWN },
213 { .name = "run" , .value = RESET_RUN },
214 { .name = "halt" , .value = RESET_HALT },
215 { .name = "init" , .value = RESET_INIT },
216 { .name = NULL , .value = -1 },
217 };
218
219 const char *
220 target_state_name( struct target *t )
221 {
222 const char *cp;
223 cp = Jim_Nvp_value2name_simple(nvp_target_state, t->state)->name;
224 if( !cp ){
225 LOG_ERROR("Invalid target state: %d", (int)(t->state));
226 cp = "(*BUG*unknown*BUG*)";
227 }
228 return cp;
229 }
230
231 /* determine the number of the new target */
232 static int new_target_number(void)
233 {
234 struct target *t;
235 int x;
236
237 /* number is 0 based */
238 x = -1;
239 t = all_targets;
240 while (t) {
241 if (x < t->target_number) {
242 x = t->target_number;
243 }
244 t = t->next;
245 }
246 return x + 1;
247 }
248
249 /* read a uint32_t from a buffer in target memory endianness */
250 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
251 {
252 if (target->endianness == TARGET_LITTLE_ENDIAN)
253 return le_to_h_u32(buffer);
254 else
255 return be_to_h_u32(buffer);
256 }
257
258 /* read a uint16_t from a buffer in target memory endianness */
259 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
260 {
261 if (target->endianness == TARGET_LITTLE_ENDIAN)
262 return le_to_h_u16(buffer);
263 else
264 return be_to_h_u16(buffer);
265 }
266
267 /* read a uint8_t from a buffer in target memory endianness */
268 uint8_t target_buffer_get_u8(struct target *target, const uint8_t *buffer)
269 {
270 return *buffer & 0x0ff;
271 }
272
273 /* write a uint32_t to a buffer in target memory endianness */
274 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
275 {
276 if (target->endianness == TARGET_LITTLE_ENDIAN)
277 h_u32_to_le(buffer, value);
278 else
279 h_u32_to_be(buffer, value);
280 }
281
282 /* write a uint16_t to a buffer in target memory endianness */
283 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
284 {
285 if (target->endianness == TARGET_LITTLE_ENDIAN)
286 h_u16_to_le(buffer, value);
287 else
288 h_u16_to_be(buffer, value);
289 }
290
291 /* write a uint8_t to a buffer in target memory endianness */
292 void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
293 {
294 *buffer = value;
295 }
296
297 /* return a pointer to a configured target; id is name or number */
298 struct target *get_target(const char *id)
299 {
300 struct target *target;
301
302 /* try as tcltarget name */
303 for (target = all_targets; target; target = target->next) {
304 if (target->cmd_name == NULL)
305 continue;
306 if (strcmp(id, target->cmd_name) == 0)
307 return target;
308 }
309
310 /* It's OK to remove this fallback sometime after August 2010 or so */
311
312 /* no match, try as number */
313 unsigned num;
314 if (parse_uint(id, &num) != ERROR_OK)
315 return NULL;
316
317 for (target = all_targets; target; target = target->next) {
318 if (target->target_number == (int)num) {
319 LOG_WARNING("use '%s' as target identifier, not '%u'",
320 target->cmd_name, num);
321 return target;
322 }
323 }
324
325 return NULL;
326 }
327
328 /* returns a pointer to the n-th configured target */
329 static struct target *get_target_by_num(int num)
330 {
331 struct target *target = all_targets;
332
333 while (target) {
334 if (target->target_number == num) {
335 return target;
336 }
337 target = target->next;
338 }
339
340 return NULL;
341 }
342
343 struct target* get_current_target(struct command_context *cmd_ctx)
344 {
345 struct target *target = get_target_by_num(cmd_ctx->current_target);
346
347 if (target == NULL)
348 {
349 LOG_ERROR("BUG: current_target out of bounds");
350 exit(-1);
351 }
352
353 return target;
354 }
355
356 int target_poll(struct target *target)
357 {
358 int retval;
359
360 /* We can't poll until after examine */
361 if (!target_was_examined(target))
362 {
363 /* Fail silently lest we pollute the log */
364 return ERROR_FAIL;
365 }
366
367 retval = target->type->poll(target);
368 if (retval != ERROR_OK)
369 return retval;
370
371 if (target->halt_issued)
372 {
373 if (target->state == TARGET_HALTED)
374 {
375 target->halt_issued = false;
376 } else
377 {
378 long long t = timeval_ms() - target->halt_issued_time;
379 if (t>1000)
380 {
381 target->halt_issued = false;
382 LOG_INFO("Halt timed out, wake up GDB.");
383 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
384 }
385 }
386 }
387
388 return ERROR_OK;
389 }
390
391 int target_halt(struct target *target)
392 {
393 int retval;
394 /* We can't poll until after examine */
395 if (!target_was_examined(target))
396 {
397 LOG_ERROR("Target not examined yet");
398 return ERROR_FAIL;
399 }
400
401 retval = target->type->halt(target);
402 if (retval != ERROR_OK)
403 return retval;
404
405 target->halt_issued = true;
406 target->halt_issued_time = timeval_ms();
407
408 return ERROR_OK;
409 }
410
411 int target_resume(struct target *target, int current, uint32_t address, int handle_breakpoints, int debug_execution)
412 {
413 int retval;
414
415 /* We can't poll until after examine */
416 if (!target_was_examined(target))
417 {
418 LOG_ERROR("Target not examined yet");
419 return ERROR_FAIL;
420 }
421
422 /* note that resume *must* be asynchronous. The CPU can halt before we poll. The CPU can
423 * even halt at the current PC as a result of a software breakpoint being inserted by (a bug?)
424 * the application.
425 */
426 if ((retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution)) != ERROR_OK)
427 return retval;
428
429 return retval;
430 }
431
432 int target_process_reset(struct command_context *cmd_ctx, enum target_reset_mode reset_mode)
433 {
434 char buf[100];
435 int retval;
436 Jim_Nvp *n;
437 n = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode);
438 if (n->name == NULL) {
439 LOG_ERROR("invalid reset mode");
440 return ERROR_FAIL;
441 }
442
443 /* disable polling during reset to make reset event scripts
444 * more predictable, i.e. dr/irscan & pathmove in events will
445 * not have JTAG operations injected into the middle of a sequence.
446 */
447 bool save_poll = jtag_poll_get_enabled();
448
449 jtag_poll_set_enabled(false);
450
451 sprintf(buf, "ocd_process_reset %s", n->name);
452 retval = Jim_Eval(interp, buf);
453
454 jtag_poll_set_enabled(save_poll);
455
456 if (retval != JIM_OK) {
457 Jim_PrintErrorMessage(interp);
458 return ERROR_FAIL;
459 }
460
461 /* We want any events to be processed before the prompt */
462 retval = target_call_timer_callbacks_now();
463
464 return retval;
465 }
466
467 static int identity_virt2phys(struct target *target,
468 uint32_t virtual, uint32_t *physical)
469 {
470 *physical = virtual;
471 return ERROR_OK;
472 }
473
474 static int no_mmu(struct target *target, int *enabled)
475 {
476 *enabled = 0;
477 return ERROR_OK;
478 }
479
480 static int default_examine(struct target *target)
481 {
482 target_set_examined(target);
483 return ERROR_OK;
484 }
485
486 int target_examine_one(struct target *target)
487 {
488 return target->type->examine(target);
489 }
490
491 static int jtag_enable_callback(enum jtag_event event, void *priv)
492 {
493 struct target *target = priv;
494
495 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
496 return ERROR_OK;
497
498 jtag_unregister_event_callback(jtag_enable_callback, target);
499 return target_examine_one(target);
500 }
501
502
503 /* Targets that correctly implement init + examine, i.e.
504 * no communication with target during init:
505 *
506 * XScale
507 */
508 int target_examine(void)
509 {
510 int retval = ERROR_OK;
511 struct target *target;
512
513 for (target = all_targets; target; target = target->next)
514 {
515 /* defer examination, but don't skip it */
516 if (!target->tap->enabled) {
517 jtag_register_event_callback(jtag_enable_callback,
518 target);
519 continue;
520 }
521 if ((retval = target_examine_one(target)) != ERROR_OK)
522 return retval;
523 }
524 return retval;
525 }
526 const char *target_type_name(struct target *target)
527 {
528 return target->type->name;
529 }
530
531 static int target_write_memory_imp(struct target *target, uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
532 {
533 if (!target_was_examined(target))
534 {
535 LOG_ERROR("Target not examined yet");
536 return ERROR_FAIL;
537 }
538 return target->type->write_memory_imp(target, address, size, count, buffer);
539 }
540
541 static int target_read_memory_imp(struct target *target, uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
542 {
543 if (!target_was_examined(target))
544 {
545 LOG_ERROR("Target not examined yet");
546 return ERROR_FAIL;
547 }
548 return target->type->read_memory_imp(target, address, size, count, buffer);
549 }
550
551 static int target_soft_reset_halt_imp(struct target *target)
552 {
553 if (!target_was_examined(target))
554 {
555 LOG_ERROR("Target not examined yet");
556 return ERROR_FAIL;
557 }
558 if (!target->type->soft_reset_halt_imp) {
559 LOG_ERROR("Target %s does not support soft_reset_halt",
560 target_name(target));
561 return ERROR_FAIL;
562 }
563 return target->type->soft_reset_halt_imp(target);
564 }
565
566 static int target_run_algorithm_imp(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_param, uint32_t entry_point, uint32_t exit_point, int timeout_ms, void *arch_info)
567 {
568 if (!target_was_examined(target))
569 {
570 LOG_ERROR("Target not examined yet");
571 return ERROR_FAIL;
572 }
573 return target->type->run_algorithm_imp(target, num_mem_params, mem_params, num_reg_params, reg_param, entry_point, exit_point, timeout_ms, arch_info);
574 }
575
576 int target_read_memory(struct target *target,
577 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
578 {
579 return target->type->read_memory(target, address, size, count, buffer);
580 }
581
582 int target_read_phys_memory(struct target *target,
583 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
584 {
585 return target->type->read_phys_memory(target, address, size, count, buffer);
586 }
587
588 int target_write_memory(struct target *target,
589 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
590 {
591 return target->type->write_memory(target, address, size, count, buffer);
592 }
593
594 int target_write_phys_memory(struct target *target,
595 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
596 {
597 return target->type->write_phys_memory(target, address, size, count, buffer);
598 }
599
600 int target_bulk_write_memory(struct target *target,
601 uint32_t address, uint32_t count, uint8_t *buffer)
602 {
603 return target->type->bulk_write_memory(target, address, count, buffer);
604 }
605
606 int target_add_breakpoint(struct target *target,
607 struct breakpoint *breakpoint)
608 {
609 if (target->state != TARGET_HALTED) {
610 LOG_WARNING("target %s is not halted", target->cmd_name);
611 return ERROR_TARGET_NOT_HALTED;
612 }
613 return target->type->add_breakpoint(target, breakpoint);
614 }
615 int target_remove_breakpoint(struct target *target,
616 struct breakpoint *breakpoint)
617 {
618 return target->type->remove_breakpoint(target, breakpoint);
619 }
620
621 int target_add_watchpoint(struct target *target,
622 struct watchpoint *watchpoint)
623 {
624 if (target->state != TARGET_HALTED) {
625 LOG_WARNING("target %s is not halted", target->cmd_name);
626 return ERROR_TARGET_NOT_HALTED;
627 }
628 return target->type->add_watchpoint(target, watchpoint);
629 }
630 int target_remove_watchpoint(struct target *target,
631 struct watchpoint *watchpoint)
632 {
633 return target->type->remove_watchpoint(target, watchpoint);
634 }
635
636 int target_get_gdb_reg_list(struct target *target,
637 struct reg **reg_list[], int *reg_list_size)
638 {
639 return target->type->get_gdb_reg_list(target, reg_list, reg_list_size);
640 }
641 int target_step(struct target *target,
642 int current, uint32_t address, int handle_breakpoints)
643 {
644 return target->type->step(target, current, address, handle_breakpoints);
645 }
646
647
648 int target_run_algorithm(struct target *target,
649 int num_mem_params, struct mem_param *mem_params,
650 int num_reg_params, struct reg_param *reg_param,
651 uint32_t entry_point, uint32_t exit_point,
652 int timeout_ms, void *arch_info)
653 {
654 return target->type->run_algorithm(target,
655 num_mem_params, mem_params, num_reg_params, reg_param,
656 entry_point, exit_point, timeout_ms, arch_info);
657 }
658
659 /**
660 * Reset the @c examined flag for the given target.
661 * Pure paranoia -- targets are zeroed on allocation.
662 */
663 static void target_reset_examined(struct target *target)
664 {
665 target->examined = false;
666 }
667
668
669
670 static int default_mrc(struct target *target, int cpnum, uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm, uint32_t *value)
671 {
672 LOG_ERROR("Not implemented: %s", __func__);
673 return ERROR_FAIL;
674 }
675
676 static int default_mcr(struct target *target, int cpnum, uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm, uint32_t value)
677 {
678 LOG_ERROR("Not implemented: %s", __func__);
679 return ERROR_FAIL;
680 }
681
682 static int arm_cp_check(struct target *target, int cpnum, uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm)
683 {
684 /* basic check */
685 if (!target_was_examined(target))
686 {
687 LOG_ERROR("Target not examined yet");
688 return ERROR_FAIL;
689 }
690
691 if ((cpnum <0) || (cpnum > 15))
692 {
693 LOG_ERROR("Illegal co-processor %d", cpnum);
694 return ERROR_FAIL;
695 }
696
697 if (op1 > 7)
698 {
699 LOG_ERROR("Illegal op1");
700 return ERROR_FAIL;
701 }
702
703 if (op2 > 7)
704 {
705 LOG_ERROR("Illegal op2");
706 return ERROR_FAIL;
707 }
708
709 if (CRn > 15)
710 {
711 LOG_ERROR("Illegal CRn");
712 return ERROR_FAIL;
713 }
714
715 if (CRm > 15)
716 {
717 LOG_ERROR("Illegal CRm");
718 return ERROR_FAIL;
719 }
720
721 return ERROR_OK;
722 }
723
724 int target_mrc(struct target *target, int cpnum, uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm, uint32_t *value)
725 {
726 int retval;
727
728 retval = arm_cp_check(target, cpnum, op1, op2, CRn, CRm);
729 if (retval != ERROR_OK)
730 return retval;
731
732 return target->type->mrc(target, cpnum, op1, op2, CRn, CRm, value);
733 }
734
735 int target_mcr(struct target *target, int cpnum, uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm, uint32_t value)
736 {
737 int retval;
738
739 retval = arm_cp_check(target, cpnum, op1, op2, CRn, CRm);
740 if (retval != ERROR_OK)
741 return retval;
742
743 return target->type->mcr(target, cpnum, op1, op2, CRn, CRm, value);
744 }
745
746 static int
747 err_read_phys_memory(struct target *target, uint32_t address,
748 uint32_t size, uint32_t count, uint8_t *buffer)
749 {
750 LOG_ERROR("Not implemented: %s", __func__);
751 return ERROR_FAIL;
752 }
753
754 static int
755 err_write_phys_memory(struct target *target, uint32_t address,
756 uint32_t size, uint32_t count, uint8_t *buffer)
757 {
758 LOG_ERROR("Not implemented: %s", __func__);
759 return ERROR_FAIL;
760 }
761
762 int target_init(struct command_context *cmd_ctx)
763 {
764 struct target *target;
765 int retval;
766
767 for (target = all_targets; target; target = target->next) {
768 struct target_type *type = target->type;
769
770 target_reset_examined(target);
771 if (target->type->examine == NULL)
772 {
773 target->type->examine = default_examine;
774 }
775
776 if ((retval = target->type->init_target(cmd_ctx, target)) != ERROR_OK)
777 {
778 LOG_ERROR("target '%s' init failed", target_name(target));
779 return retval;
780 }
781
782 /**
783 * @todo MCR/MRC are ARM-specific; don't require them in
784 * all targets, or for ARMs without coprocessors.
785 */
786 if (target->type->mcr == NULL)
787 {
788 target->type->mcr = default_mcr;
789 } else
790 {
791 const struct command_registration mcr_cmd = {
792 .name = "mcr",
793 .mode = COMMAND_EXEC,
794 .jim_handler = &jim_mcrmrc,
795 .help = "write coprocessor",
796 .usage = "<cpnum> <op1> <op2> <CRn> <CRm> <value>",
797 };
798 register_command(cmd_ctx, NULL, &mcr_cmd);
799 }
800
801 if (target->type->mrc == NULL)
802 {
803 target->type->mrc = default_mrc;
804 } else
805 {
806 const struct command_registration mrc_cmd = {
807 .name = "mrc",
808 .jim_handler = &jim_mcrmrc,
809 .help = "read coprocessor",
810 .usage = "<cpnum> <op1> <op2> <CRn> <CRm>",
811 };
812 register_command(cmd_ctx, NULL, &mrc_cmd);
813 }
814
815
816 /**
817 * @todo get rid of those *memory_imp() methods, now that all
818 * callers are using target_*_memory() accessors ... and make
819 * sure the "physical" paths handle the same issues.
820 */
821
822 /* a non-invasive way(in terms of patches) to add some code that
823 * runs before the type->write/read_memory implementation
824 */
825 target->type->write_memory_imp = target->type->write_memory;
826 target->type->write_memory = target_write_memory_imp;
827 target->type->read_memory_imp = target->type->read_memory;
828 target->type->read_memory = target_read_memory_imp;
829 target->type->soft_reset_halt_imp = target->type->soft_reset_halt;
830 target->type->soft_reset_halt = target_soft_reset_halt_imp;
831 target->type->run_algorithm_imp = target->type->run_algorithm;
832 target->type->run_algorithm = target_run_algorithm_imp;
833
834 /* Sanity-check MMU support ... stub in what we must, to help
835 * implement it in stages, but warn if we need to do so.
836 */
837 if (type->mmu) {
838 if (type->write_phys_memory == NULL) {
839 LOG_ERROR("type '%s' is missing %s",
840 type->name,
841 "write_phys_memory");
842 type->write_phys_memory = err_write_phys_memory;
843 }
844 if (type->read_phys_memory == NULL) {
845 LOG_ERROR("type '%s' is missing %s",
846 type->name,
847 "read_phys_memory");
848 type->read_phys_memory = err_read_phys_memory;
849 }
850 if (type->virt2phys == NULL) {
851 LOG_ERROR("type '%s' is missing %s",
852 type->name,
853 "virt2phys");
854 type->virt2phys = identity_virt2phys;
855 }
856
857 /* Make sure no-MMU targets all behave the same: make no
858 * distinction between physical and virtual addresses, and
859 * ensure that virt2phys() is always an identity mapping.
860 */
861 } else {
862 if (type->write_phys_memory
863 || type->read_phys_memory
864 || type->virt2phys)
865 LOG_WARNING("type '%s' has broken MMU hooks",
866 type->name);
867
868 type->mmu = no_mmu;
869 type->write_phys_memory = type->write_memory;
870 type->read_phys_memory = type->read_memory;
871 type->virt2phys = identity_virt2phys;
872 }
873 }
874
875 if (all_targets)
876 {
877 if ((retval = target_register_user_commands(cmd_ctx)) != ERROR_OK)
878 return retval;
879 if ((retval = target_register_timer_callback(handle_target, 100, 1, NULL)) != ERROR_OK)
880 return retval;
881 }
882
883 return ERROR_OK;
884 }
885
886 int target_register_event_callback(int (*callback)(struct target *target, enum target_event event, void *priv), void *priv)
887 {
888 struct target_event_callback **callbacks_p = &target_event_callbacks;
889
890 if (callback == NULL)
891 {
892 return ERROR_INVALID_ARGUMENTS;
893 }
894
895 if (*callbacks_p)
896 {
897 while ((*callbacks_p)->next)
898 callbacks_p = &((*callbacks_p)->next);
899 callbacks_p = &((*callbacks_p)->next);
900 }
901
902 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
903 (*callbacks_p)->callback = callback;
904 (*callbacks_p)->priv = priv;
905 (*callbacks_p)->next = NULL;
906
907 return ERROR_OK;
908 }
909
910 int target_register_timer_callback(int (*callback)(void *priv), int time_ms, int periodic, void *priv)
911 {
912 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
913 struct timeval now;
914
915 if (callback == NULL)
916 {
917 return ERROR_INVALID_ARGUMENTS;
918 }
919
920 if (*callbacks_p)
921 {
922 while ((*callbacks_p)->next)
923 callbacks_p = &((*callbacks_p)->next);
924 callbacks_p = &((*callbacks_p)->next);
925 }
926
927 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
928 (*callbacks_p)->callback = callback;
929 (*callbacks_p)->periodic = periodic;
930 (*callbacks_p)->time_ms = time_ms;
931
932 gettimeofday(&now, NULL);
933 (*callbacks_p)->when.tv_usec = now.tv_usec + (time_ms % 1000) * 1000;
934 time_ms -= (time_ms % 1000);
935 (*callbacks_p)->when.tv_sec = now.tv_sec + (time_ms / 1000);
936 if ((*callbacks_p)->when.tv_usec > 1000000)
937 {
938 (*callbacks_p)->when.tv_usec = (*callbacks_p)->when.tv_usec - 1000000;
939 (*callbacks_p)->when.tv_sec += 1;
940 }
941
942 (*callbacks_p)->priv = priv;
943 (*callbacks_p)->next = NULL;
944
945 return ERROR_OK;
946 }
947
948 int target_unregister_event_callback(int (*callback)(struct target *target, enum target_event event, void *priv), void *priv)
949 {
950 struct target_event_callback **p = &target_event_callbacks;
951 struct target_event_callback *c = target_event_callbacks;
952
953 if (callback == NULL)
954 {
955 return ERROR_INVALID_ARGUMENTS;
956 }
957
958 while (c)
959 {
960 struct target_event_callback *next = c->next;
961 if ((c->callback == callback) && (c->priv == priv))
962 {
963 *p = next;
964 free(c);
965 return ERROR_OK;
966 }
967 else
968 p = &(c->next);
969 c = next;
970 }
971
972 return ERROR_OK;
973 }
974
975 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
976 {
977 struct target_timer_callback **p = &target_timer_callbacks;
978 struct target_timer_callback *c = target_timer_callbacks;
979
980 if (callback == NULL)
981 {
982 return ERROR_INVALID_ARGUMENTS;
983 }
984
985 while (c)
986 {
987 struct target_timer_callback *next = c->next;
988 if ((c->callback == callback) && (c->priv == priv))
989 {
990 *p = next;
991 free(c);
992 return ERROR_OK;
993 }
994 else
995 p = &(c->next);
996 c = next;
997 }
998
999 return ERROR_OK;
1000 }
1001
1002 int target_call_event_callbacks(struct target *target, enum target_event event)
1003 {
1004 struct target_event_callback *callback = target_event_callbacks;
1005 struct target_event_callback *next_callback;
1006
1007 if (event == TARGET_EVENT_HALTED)
1008 {
1009 /* execute early halted first */
1010 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1011 }
1012
1013 LOG_DEBUG("target event %i (%s)",
1014 event,
1015 Jim_Nvp_value2name_simple(nvp_target_event, event)->name);
1016
1017 target_handle_event(target, event);
1018
1019 while (callback)
1020 {
1021 next_callback = callback->next;
1022 callback->callback(target, event, callback->priv);
1023 callback = next_callback;
1024 }
1025
1026 return ERROR_OK;
1027 }
1028
1029 static int target_timer_callback_periodic_restart(
1030 struct target_timer_callback *cb, struct timeval *now)
1031 {
1032 int time_ms = cb->time_ms;
1033 cb->when.tv_usec = now->tv_usec + (time_ms % 1000) * 1000;
1034 time_ms -= (time_ms % 1000);
1035 cb->when.tv_sec = now->tv_sec + time_ms / 1000;
1036 if (cb->when.tv_usec > 1000000)
1037 {
1038 cb->when.tv_usec = cb->when.tv_usec - 1000000;
1039 cb->when.tv_sec += 1;
1040 }
1041 return ERROR_OK;
1042 }
1043
1044 static int target_call_timer_callback(struct target_timer_callback *cb,
1045 struct timeval *now)
1046 {
1047 cb->callback(cb->priv);
1048
1049 if (cb->periodic)
1050 return target_timer_callback_periodic_restart(cb, now);
1051
1052 return target_unregister_timer_callback(cb->callback, cb->priv);
1053 }
1054
1055 static int target_call_timer_callbacks_check_time(int checktime)
1056 {
1057 keep_alive();
1058
1059 struct timeval now;
1060 gettimeofday(&now, NULL);
1061
1062 struct target_timer_callback *callback = target_timer_callbacks;
1063 while (callback)
1064 {
1065 // cleaning up may unregister and free this callback
1066 struct target_timer_callback *next_callback = callback->next;
1067
1068 bool call_it = callback->callback &&
1069 ((!checktime && callback->periodic) ||
1070 now.tv_sec > callback->when.tv_sec ||
1071 (now.tv_sec == callback->when.tv_sec &&
1072 now.tv_usec >= callback->when.tv_usec));
1073
1074 if (call_it)
1075 {
1076 int retval = target_call_timer_callback(callback, &now);
1077 if (retval != ERROR_OK)
1078 return retval;
1079 }
1080
1081 callback = next_callback;
1082 }
1083
1084 return ERROR_OK;
1085 }
1086
1087 int target_call_timer_callbacks(void)
1088 {
1089 return target_call_timer_callbacks_check_time(1);
1090 }
1091
1092 /* invoke periodic callbacks immediately */
1093 int target_call_timer_callbacks_now(void)
1094 {
1095 return target_call_timer_callbacks_check_time(0);
1096 }
1097
1098 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
1099 {
1100 struct working_area *c = target->working_areas;
1101 struct working_area *new_wa = NULL;
1102
1103 /* Reevaluate working area address based on MMU state*/
1104 if (target->working_areas == NULL)
1105 {
1106 int retval;
1107 int enabled;
1108
1109 retval = target->type->mmu(target, &enabled);
1110 if (retval != ERROR_OK)
1111 {
1112 return retval;
1113 }
1114
1115 if (!enabled) {
1116 if (target->working_area_phys_spec) {
1117 LOG_DEBUG("MMU disabled, using physical "
1118 "address for working memory 0x%08x",
1119 (unsigned)target->working_area_phys);
1120 target->working_area = target->working_area_phys;
1121 } else {
1122 LOG_ERROR("No working memory available. "
1123 "Specify -work-area-phys to target.");
1124 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1125 }
1126 } else {
1127 if (target->working_area_virt_spec) {
1128 LOG_DEBUG("MMU enabled, using virtual "
1129 "address for working memory 0x%08x",
1130 (unsigned)target->working_area_virt);
1131 target->working_area = target->working_area_virt;
1132 } else {
1133 LOG_ERROR("No working memory available. "
1134 "Specify -work-area-virt to target.");
1135 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1136 }
1137 }
1138 }
1139
1140 /* only allocate multiples of 4 byte */
1141 if (size % 4)
1142 {
1143 LOG_ERROR("BUG: code tried to allocate unaligned number of bytes (0x%08x), padding", ((unsigned)(size)));
1144 size = (size + 3) & (~3);
1145 }
1146
1147 /* see if there's already a matching working area */
1148 while (c)
1149 {
1150 if ((c->free) && (c->size == size))
1151 {
1152 new_wa = c;
1153 break;
1154 }
1155 c = c->next;
1156 }
1157
1158 /* if not, allocate a new one */
1159 if (!new_wa)
1160 {
1161 struct working_area **p = &target->working_areas;
1162 uint32_t first_free = target->working_area;
1163 uint32_t free_size = target->working_area_size;
1164
1165 c = target->working_areas;
1166 while (c)
1167 {
1168 first_free += c->size;
1169 free_size -= c->size;
1170 p = &c->next;
1171 c = c->next;
1172 }
1173
1174 if (free_size < size)
1175 {
1176 LOG_WARNING("not enough working area available(requested %u, free %u)",
1177 (unsigned)(size), (unsigned)(free_size));
1178 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1179 }
1180
1181 LOG_DEBUG("allocated new working area at address 0x%08x", (unsigned)first_free);
1182
1183 new_wa = malloc(sizeof(struct working_area));
1184 new_wa->next = NULL;
1185 new_wa->size = size;
1186 new_wa->address = first_free;
1187
1188 if (target->backup_working_area)
1189 {
1190 int retval;
1191 new_wa->backup = malloc(new_wa->size);
1192 if ((retval = target_read_memory(target, new_wa->address, 4, new_wa->size / 4, new_wa->backup)) != ERROR_OK)
1193 {
1194 free(new_wa->backup);
1195 free(new_wa);
1196 return retval;
1197 }
1198 }
1199 else
1200 {
1201 new_wa->backup = NULL;
1202 }
1203
1204 /* put new entry in list */
1205 *p = new_wa;
1206 }
1207
1208 /* mark as used, and return the new (reused) area */
1209 new_wa->free = 0;
1210 *area = new_wa;
1211
1212 /* user pointer */
1213 new_wa->user = area;
1214
1215 return ERROR_OK;
1216 }
1217
1218 int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
1219 {
1220 if (area->free)
1221 return ERROR_OK;
1222
1223 if (restore && target->backup_working_area)
1224 {
1225 int retval;
1226 if ((retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup)) != ERROR_OK)
1227 return retval;
1228 }
1229
1230 area->free = 1;
1231
1232 /* mark user pointer invalid */
1233 *area->user = NULL;
1234 area->user = NULL;
1235
1236 return ERROR_OK;
1237 }
1238
1239 int target_free_working_area(struct target *target, struct working_area *area)
1240 {
1241 return target_free_working_area_restore(target, area, 1);
1242 }
1243
1244 /* free resources and restore memory, if restoring memory fails,
1245 * free up resources anyway
1246 */
1247 void target_free_all_working_areas_restore(struct target *target, int restore)
1248 {
1249 struct working_area *c = target->working_areas;
1250
1251 while (c)
1252 {
1253 struct working_area *next = c->next;
1254 target_free_working_area_restore(target, c, restore);
1255
1256 if (c->backup)
1257 free(c->backup);
1258
1259 free(c);
1260
1261 c = next;
1262 }
1263
1264 target->working_areas = NULL;
1265 }
1266
1267 void target_free_all_working_areas(struct target *target)
1268 {
1269 target_free_all_working_areas_restore(target, 1);
1270 }
1271
1272 int target_arch_state(struct target *target)
1273 {
1274 int retval;
1275 if (target == NULL)
1276 {
1277 LOG_USER("No target has been configured");
1278 return ERROR_OK;
1279 }
1280
1281 LOG_USER("target state: %s", target_state_name( target ));
1282
1283 if (target->state != TARGET_HALTED)
1284 return ERROR_OK;
1285
1286 retval = target->type->arch_state(target);
1287 return retval;
1288 }
1289
1290 /* Single aligned words are guaranteed to use 16 or 32 bit access
1291 * mode respectively, otherwise data is handled as quickly as
1292 * possible
1293 */
1294 int target_write_buffer(struct target *target, uint32_t address, uint32_t size, uint8_t *buffer)
1295 {
1296 int retval;
1297 LOG_DEBUG("writing buffer of %i byte at 0x%8.8x",
1298 (int)size, (unsigned)address);
1299
1300 if (!target_was_examined(target))
1301 {
1302 LOG_ERROR("Target not examined yet");
1303 return ERROR_FAIL;
1304 }
1305
1306 if (size == 0) {
1307 return ERROR_OK;
1308 }
1309
1310 if ((address + size - 1) < address)
1311 {
1312 /* GDB can request this when e.g. PC is 0xfffffffc*/
1313 LOG_ERROR("address + size wrapped(0x%08x, 0x%08x)",
1314 (unsigned)address,
1315 (unsigned)size);
1316 return ERROR_FAIL;
1317 }
1318
1319 if (((address % 2) == 0) && (size == 2))
1320 {
1321 return target_write_memory(target, address, 2, 1, buffer);
1322 }
1323
1324 /* handle unaligned head bytes */
1325 if (address % 4)
1326 {
1327 uint32_t unaligned = 4 - (address % 4);
1328
1329 if (unaligned > size)
1330 unaligned = size;
1331
1332 if ((retval = target_write_memory(target, address, 1, unaligned, buffer)) != ERROR_OK)
1333 return retval;
1334
1335 buffer += unaligned;
1336 address += unaligned;
1337 size -= unaligned;
1338 }
1339
1340 /* handle aligned words */
1341 if (size >= 4)
1342 {
1343 int aligned = size - (size % 4);
1344
1345 /* use bulk writes above a certain limit. This may have to be changed */
1346 if (aligned > 128)
1347 {
1348 if ((retval = target->type->bulk_write_memory(target, address, aligned / 4, buffer)) != ERROR_OK)
1349 return retval;
1350 }
1351 else
1352 {
1353 if ((retval = target_write_memory(target, address, 4, aligned / 4, buffer)) != ERROR_OK)
1354 return retval;
1355 }
1356
1357 buffer += aligned;
1358 address += aligned;
1359 size -= aligned;
1360 }
1361
1362 /* handle tail writes of less than 4 bytes */
1363 if (size > 0)
1364 {
1365 if ((retval = target_write_memory(target, address, 1, size, buffer)) != ERROR_OK)
1366 return retval;
1367 }
1368
1369 return ERROR_OK;
1370 }
1371
1372 /* Single aligned words are guaranteed to use 16 or 32 bit access
1373 * mode respectively, otherwise data is handled as quickly as
1374 * possible
1375 */
1376 int target_read_buffer(struct target *target, uint32_t address, uint32_t size, uint8_t *buffer)
1377 {
1378 int retval;
1379 LOG_DEBUG("reading buffer of %i byte at 0x%8.8x",
1380 (int)size, (unsigned)address);
1381
1382 if (!target_was_examined(target))
1383 {
1384 LOG_ERROR("Target not examined yet");
1385 return ERROR_FAIL;
1386 }
1387
1388 if (size == 0) {
1389 return ERROR_OK;
1390 }
1391
1392 if ((address + size - 1) < address)
1393 {
1394 /* GDB can request this when e.g. PC is 0xfffffffc*/
1395 LOG_ERROR("address + size wrapped(0x%08" PRIx32 ", 0x%08" PRIx32 ")",
1396 address,
1397 size);
1398 return ERROR_FAIL;
1399 }
1400
1401 if (((address % 2) == 0) && (size == 2))
1402 {
1403 return target_read_memory(target, address, 2, 1, buffer);
1404 }
1405
1406 /* handle unaligned head bytes */
1407 if (address % 4)
1408 {
1409 uint32_t unaligned = 4 - (address % 4);
1410
1411 if (unaligned > size)
1412 unaligned = size;
1413
1414 if ((retval = target_read_memory(target, address, 1, unaligned, buffer)) != ERROR_OK)
1415 return retval;
1416
1417 buffer += unaligned;
1418 address += unaligned;
1419 size -= unaligned;
1420 }
1421
1422 /* handle aligned words */
1423 if (size >= 4)
1424 {
1425 int aligned = size - (size % 4);
1426
1427 if ((retval = target_read_memory(target, address, 4, aligned / 4, buffer)) != ERROR_OK)
1428 return retval;
1429
1430 buffer += aligned;
1431 address += aligned;
1432 size -= aligned;
1433 }
1434
1435 /*prevent byte access when possible (avoid AHB access limitations in some cases)*/
1436 if(size >=2)
1437 {
1438 int aligned = size - (size%2);
1439 retval = target_read_memory(target, address, 2, aligned / 2, buffer);
1440 if (retval != ERROR_OK)
1441 return retval;
1442
1443 buffer += aligned;
1444 address += aligned;
1445 size -= aligned;
1446 }
1447 /* handle tail writes of less than 4 bytes */
1448 if (size > 0)
1449 {
1450 if ((retval = target_read_memory(target, address, 1, size, buffer)) != ERROR_OK)
1451 return retval;
1452 }
1453
1454 return ERROR_OK;
1455 }
1456
1457 int target_checksum_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* crc)
1458 {
1459 uint8_t *buffer;
1460 int retval;
1461 uint32_t i;
1462 uint32_t checksum = 0;
1463 if (!target_was_examined(target))
1464 {
1465 LOG_ERROR("Target not examined yet");
1466 return ERROR_FAIL;
1467 }
1468
1469 if ((retval = target->type->checksum_memory(target, address,
1470 size, &checksum)) != ERROR_OK)
1471 {
1472 buffer = malloc(size);
1473 if (buffer == NULL)
1474 {
1475 LOG_ERROR("error allocating buffer for section (%d bytes)", (int)size);
1476 return ERROR_INVALID_ARGUMENTS;
1477 }
1478 retval = target_read_buffer(target, address, size, buffer);
1479 if (retval != ERROR_OK)
1480 {
1481 free(buffer);
1482 return retval;
1483 }
1484
1485 /* convert to target endianess */
1486 for (i = 0; i < (size/sizeof(uint32_t)); i++)
1487 {
1488 uint32_t target_data;
1489 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
1490 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
1491 }
1492
1493 retval = image_calculate_checksum(buffer, size, &checksum);
1494 free(buffer);
1495 }
1496
1497 *crc = checksum;
1498
1499 return retval;
1500 }
1501
1502 int target_blank_check_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* blank)
1503 {
1504 int retval;
1505 if (!target_was_examined(target))
1506 {
1507 LOG_ERROR("Target not examined yet");
1508 return ERROR_FAIL;
1509 }
1510
1511 if (target->type->blank_check_memory == 0)
1512 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1513
1514 retval = target->type->blank_check_memory(target, address, size, blank);
1515
1516 return retval;
1517 }
1518
1519 int target_read_u32(struct target *target, uint32_t address, uint32_t *value)
1520 {
1521 uint8_t value_buf[4];
1522 if (!target_was_examined(target))
1523 {
1524 LOG_ERROR("Target not examined yet");
1525 return ERROR_FAIL;
1526 }
1527
1528 int retval = target_read_memory(target, address, 4, 1, value_buf);
1529
1530 if (retval == ERROR_OK)
1531 {
1532 *value = target_buffer_get_u32(target, value_buf);
1533 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
1534 address,
1535 *value);
1536 }
1537 else
1538 {
1539 *value = 0x0;
1540 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1541 address);
1542 }
1543
1544 return retval;
1545 }
1546
1547 int target_read_u16(struct target *target, uint32_t address, uint16_t *value)
1548 {
1549 uint8_t value_buf[2];
1550 if (!target_was_examined(target))
1551 {
1552 LOG_ERROR("Target not examined yet");
1553 return ERROR_FAIL;
1554 }
1555
1556 int retval = target_read_memory(target, address, 2, 1, value_buf);
1557
1558 if (retval == ERROR_OK)
1559 {
1560 *value = target_buffer_get_u16(target, value_buf);
1561 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%4.4x",
1562 address,
1563 *value);
1564 }
1565 else
1566 {
1567 *value = 0x0;
1568 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1569 address);
1570 }
1571
1572 return retval;
1573 }
1574
1575 int target_read_u8(struct target *target, uint32_t address, uint8_t *value)
1576 {
1577 int retval = target_read_memory(target, address, 1, 1, value);
1578 if (!target_was_examined(target))
1579 {
1580 LOG_ERROR("Target not examined yet");
1581 return ERROR_FAIL;
1582 }
1583
1584 if (retval == ERROR_OK)
1585 {
1586 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
1587 address,
1588 *value);
1589 }
1590 else
1591 {
1592 *value = 0x0;
1593 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1594 address);
1595 }
1596
1597 return retval;
1598 }
1599
1600 int target_write_u32(struct target *target, uint32_t address, uint32_t value)
1601 {
1602 int retval;
1603 uint8_t value_buf[4];
1604 if (!target_was_examined(target))
1605 {
1606 LOG_ERROR("Target not examined yet");
1607 return ERROR_FAIL;
1608 }
1609
1610 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
1611 address,
1612 value);
1613
1614 target_buffer_set_u32(target, value_buf, value);
1615 if ((retval = target_write_memory(target, address, 4, 1, value_buf)) != ERROR_OK)
1616 {
1617 LOG_DEBUG("failed: %i", retval);
1618 }
1619
1620 return retval;
1621 }
1622
1623 int target_write_u16(struct target *target, uint32_t address, uint16_t value)
1624 {
1625 int retval;
1626 uint8_t value_buf[2];
1627 if (!target_was_examined(target))
1628 {
1629 LOG_ERROR("Target not examined yet");
1630 return ERROR_FAIL;
1631 }
1632
1633 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8x",
1634 address,
1635 value);
1636
1637 target_buffer_set_u16(target, value_buf, value);
1638 if ((retval = target_write_memory(target, address, 2, 1, value_buf)) != ERROR_OK)
1639 {
1640 LOG_DEBUG("failed: %i", retval);
1641 }
1642
1643 return retval;
1644 }
1645
1646 int target_write_u8(struct target *target, uint32_t address, uint8_t value)
1647 {
1648 int retval;
1649 if (!target_was_examined(target))
1650 {
1651 LOG_ERROR("Target not examined yet");
1652 return ERROR_FAIL;
1653 }
1654
1655 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
1656 address, value);
1657
1658 if ((retval = target_write_memory(target, address, 1, 1, &value)) != ERROR_OK)
1659 {
1660 LOG_DEBUG("failed: %i", retval);
1661 }
1662
1663 return retval;
1664 }
1665
1666 COMMAND_HANDLER(handle_targets_command)
1667 {
1668 struct target *target = all_targets;
1669
1670 if (CMD_ARGC == 1)
1671 {
1672 target = get_target(CMD_ARGV[0]);
1673 if (target == NULL) {
1674 command_print(CMD_CTX,"Target: %s is unknown, try one of:\n", CMD_ARGV[0]);
1675 goto DumpTargets;
1676 }
1677 if (!target->tap->enabled) {
1678 command_print(CMD_CTX,"Target: TAP %s is disabled, "
1679 "can't be the current target\n",
1680 target->tap->dotted_name);
1681 return ERROR_FAIL;
1682 }
1683
1684 CMD_CTX->current_target = target->target_number;
1685 return ERROR_OK;
1686 }
1687 DumpTargets:
1688
1689 target = all_targets;
1690 command_print(CMD_CTX, " TargetName Type Endian TapName State ");
1691 command_print(CMD_CTX, "-- ------------------ ---------- ------ ------------------ ------------");
1692 while (target)
1693 {
1694 const char *state;
1695 char marker = ' ';
1696
1697 if (target->tap->enabled)
1698 state = target_state_name( target );
1699 else
1700 state = "tap-disabled";
1701
1702 if (CMD_CTX->current_target == target->target_number)
1703 marker = '*';
1704
1705 /* keep columns lined up to match the headers above */
1706 command_print(CMD_CTX, "%2d%c %-18s %-10s %-6s %-18s %s",
1707 target->target_number,
1708 marker,
1709 target_name(target),
1710 target_type_name(target),
1711 Jim_Nvp_value2name_simple(nvp_target_endian,
1712 target->endianness)->name,
1713 target->tap->dotted_name,
1714 state);
1715 target = target->next;
1716 }
1717
1718 return ERROR_OK;
1719 }
1720
1721 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
1722
1723 static int powerDropout;
1724 static int srstAsserted;
1725
1726 static int runPowerRestore;
1727 static int runPowerDropout;
1728 static int runSrstAsserted;
1729 static int runSrstDeasserted;
1730
1731 static int sense_handler(void)
1732 {
1733 static int prevSrstAsserted = 0;
1734 static int prevPowerdropout = 0;
1735
1736 int retval;
1737 if ((retval = jtag_power_dropout(&powerDropout)) != ERROR_OK)
1738 return retval;
1739
1740 int powerRestored;
1741 powerRestored = prevPowerdropout && !powerDropout;
1742 if (powerRestored)
1743 {
1744 runPowerRestore = 1;
1745 }
1746
1747 long long current = timeval_ms();
1748 static long long lastPower = 0;
1749 int waitMore = lastPower + 2000 > current;
1750 if (powerDropout && !waitMore)
1751 {
1752 runPowerDropout = 1;
1753 lastPower = current;
1754 }
1755
1756 if ((retval = jtag_srst_asserted(&srstAsserted)) != ERROR_OK)
1757 return retval;
1758
1759 int srstDeasserted;
1760 srstDeasserted = prevSrstAsserted && !srstAsserted;
1761
1762 static long long lastSrst = 0;
1763 waitMore = lastSrst + 2000 > current;
1764 if (srstDeasserted && !waitMore)
1765 {
1766 runSrstDeasserted = 1;
1767 lastSrst = current;
1768 }
1769
1770 if (!prevSrstAsserted && srstAsserted)
1771 {
1772 runSrstAsserted = 1;
1773 }
1774
1775 prevSrstAsserted = srstAsserted;
1776 prevPowerdropout = powerDropout;
1777
1778 if (srstDeasserted || powerRestored)
1779 {
1780 /* Other than logging the event we can't do anything here.
1781 * Issuing a reset is a particularly bad idea as we might
1782 * be inside a reset already.
1783 */
1784 }
1785
1786 return ERROR_OK;
1787 }
1788
1789 static void target_call_event_callbacks_all(enum target_event e) {
1790 struct target *target;
1791 target = all_targets;
1792 while (target) {
1793 target_call_event_callbacks(target, e);
1794 target = target->next;
1795 }
1796 }
1797
1798 /* process target state changes */
1799 int handle_target(void *priv)
1800 {
1801 int retval = ERROR_OK;
1802
1803 /* we do not want to recurse here... */
1804 static int recursive = 0;
1805 if (! recursive)
1806 {
1807 recursive = 1;
1808 sense_handler();
1809 /* danger! running these procedures can trigger srst assertions and power dropouts.
1810 * We need to avoid an infinite loop/recursion here and we do that by
1811 * clearing the flags after running these events.
1812 */
1813 int did_something = 0;
1814 if (runSrstAsserted)
1815 {
1816 target_call_event_callbacks_all(TARGET_EVENT_GDB_HALT);
1817 Jim_Eval(interp, "srst_asserted");
1818 did_something = 1;
1819 }
1820 if (runSrstDeasserted)
1821 {
1822 Jim_Eval(interp, "srst_deasserted");
1823 did_something = 1;
1824 }
1825 if (runPowerDropout)
1826 {
1827 target_call_event_callbacks_all(TARGET_EVENT_GDB_HALT);
1828 Jim_Eval(interp, "power_dropout");
1829 did_something = 1;
1830 }
1831 if (runPowerRestore)
1832 {
1833 Jim_Eval(interp, "power_restore");
1834 did_something = 1;
1835 }
1836
1837 if (did_something)
1838 {
1839 /* clear detect flags */
1840 sense_handler();
1841 }
1842
1843 /* clear action flags */
1844
1845 runSrstAsserted = 0;
1846 runSrstDeasserted = 0;
1847 runPowerRestore = 0;
1848 runPowerDropout = 0;
1849
1850 recursive = 0;
1851 }
1852
1853 /* Poll targets for state changes unless that's globally disabled.
1854 * Skip targets that are currently disabled.
1855 */
1856 for (struct target *target = all_targets;
1857 is_jtag_poll_safe() && target;
1858 target = target->next)
1859 {
1860 if (!target->tap->enabled)
1861 continue;
1862
1863 /* only poll target if we've got power and srst isn't asserted */
1864 if (!powerDropout && !srstAsserted)
1865 {
1866 /* polling may fail silently until the target has been examined */
1867 if ((retval = target_poll(target)) != ERROR_OK)
1868 {
1869 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1870 return retval;
1871 }
1872 }
1873 }
1874
1875 return retval;
1876 }
1877
1878 COMMAND_HANDLER(handle_reg_command)
1879 {
1880 struct target *target;
1881 struct reg *reg = NULL;
1882 unsigned count = 0;
1883 char *value;
1884
1885 LOG_DEBUG("-");
1886
1887 target = get_current_target(CMD_CTX);
1888
1889 /* list all available registers for the current target */
1890 if (CMD_ARGC == 0)
1891 {
1892 struct reg_cache *cache = target->reg_cache;
1893
1894 count = 0;
1895 while (cache)
1896 {
1897 unsigned i;
1898
1899 command_print(CMD_CTX, "===== %s", cache->name);
1900
1901 for (i = 0, reg = cache->reg_list;
1902 i < cache->num_regs;
1903 i++, reg++, count++)
1904 {
1905 /* only print cached values if they are valid */
1906 if (reg->valid) {
1907 value = buf_to_str(reg->value,
1908 reg->size, 16);
1909 command_print(CMD_CTX,
1910 "(%i) %s (/%" PRIu32 "): 0x%s%s",
1911 count, reg->name,
1912 reg->size, value,
1913 reg->dirty
1914 ? " (dirty)"
1915 : "");
1916 free(value);
1917 } else {
1918 command_print(CMD_CTX, "(%i) %s (/%" PRIu32 ")",
1919 count, reg->name,
1920 reg->size) ;
1921 }
1922 }
1923 cache = cache->next;
1924 }
1925
1926 return ERROR_OK;
1927 }
1928
1929 /* access a single register by its ordinal number */
1930 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9'))
1931 {
1932 unsigned num;
1933 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
1934
1935 struct reg_cache *cache = target->reg_cache;
1936 count = 0;
1937 while (cache)
1938 {
1939 unsigned i;
1940 for (i = 0; i < cache->num_regs; i++)
1941 {
1942 if (count++ == num)
1943 {
1944 reg = &cache->reg_list[i];
1945 break;
1946 }
1947 }
1948 if (reg)
1949 break;
1950 cache = cache->next;
1951 }
1952
1953 if (!reg)
1954 {
1955 command_print(CMD_CTX, "%i is out of bounds, the current target has only %i registers (0 - %i)", num, count, count - 1);
1956 return ERROR_OK;
1957 }
1958 } else /* access a single register by its name */
1959 {
1960 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], 1);
1961
1962 if (!reg)
1963 {
1964 command_print(CMD_CTX, "register %s not found in current target", CMD_ARGV[0]);
1965 return ERROR_OK;
1966 }
1967 }
1968
1969 /* display a register */
1970 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0') && (CMD_ARGV[1][0] <= '9'))))
1971 {
1972 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
1973 reg->valid = 0;
1974
1975 if (reg->valid == 0)
1976 {
1977 reg->type->get(reg);
1978 }
1979 value = buf_to_str(reg->value, reg->size, 16);
1980 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
1981 free(value);
1982 return ERROR_OK;
1983 }
1984
1985 /* set register value */
1986 if (CMD_ARGC == 2)
1987 {
1988 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
1989 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
1990
1991 reg->type->set(reg, buf);
1992
1993 value = buf_to_str(reg->value, reg->size, 16);
1994 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
1995 free(value);
1996
1997 free(buf);
1998
1999 return ERROR_OK;
2000 }
2001
2002 command_print(CMD_CTX, "usage: reg <#|name> [value]");
2003
2004 return ERROR_OK;
2005 }
2006
2007 COMMAND_HANDLER(handle_poll_command)
2008 {
2009 int retval = ERROR_OK;
2010 struct target *target = get_current_target(CMD_CTX);
2011
2012 if (CMD_ARGC == 0)
2013 {
2014 command_print(CMD_CTX, "background polling: %s",
2015 jtag_poll_get_enabled() ? "on" : "off");
2016 command_print(CMD_CTX, "TAP: %s (%s)",
2017 target->tap->dotted_name,
2018 target->tap->enabled ? "enabled" : "disabled");
2019 if (!target->tap->enabled)
2020 return ERROR_OK;
2021 if ((retval = target_poll(target)) != ERROR_OK)
2022 return retval;
2023 if ((retval = target_arch_state(target)) != ERROR_OK)
2024 return retval;
2025 }
2026 else if (CMD_ARGC == 1)
2027 {
2028 bool enable;
2029 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
2030 jtag_poll_set_enabled(enable);
2031 }
2032 else
2033 {
2034 return ERROR_COMMAND_SYNTAX_ERROR;
2035 }
2036
2037 return retval;
2038 }
2039
2040 COMMAND_HANDLER(handle_wait_halt_command)
2041 {
2042 if (CMD_ARGC > 1)
2043 return ERROR_COMMAND_SYNTAX_ERROR;
2044
2045 unsigned ms = 5000;
2046 if (1 == CMD_ARGC)
2047 {
2048 int retval = parse_uint(CMD_ARGV[0], &ms);
2049 if (ERROR_OK != retval)
2050 {
2051 command_print(CMD_CTX, "usage: %s [seconds]", CMD_NAME);
2052 return ERROR_COMMAND_SYNTAX_ERROR;
2053 }
2054 // convert seconds (given) to milliseconds (needed)
2055 ms *= 1000;
2056 }
2057
2058 struct target *target = get_current_target(CMD_CTX);
2059 return target_wait_state(target, TARGET_HALTED, ms);
2060 }
2061
2062 /* wait for target state to change. The trick here is to have a low
2063 * latency for short waits and not to suck up all the CPU time
2064 * on longer waits.
2065 *
2066 * After 500ms, keep_alive() is invoked
2067 */
2068 int target_wait_state(struct target *target, enum target_state state, int ms)
2069 {
2070 int retval;
2071 long long then = 0, cur;
2072 int once = 1;
2073
2074 for (;;)
2075 {
2076 if ((retval = target_poll(target)) != ERROR_OK)
2077 return retval;
2078 if (target->state == state)
2079 {
2080 break;
2081 }
2082 cur = timeval_ms();
2083 if (once)
2084 {
2085 once = 0;
2086 then = timeval_ms();
2087 LOG_DEBUG("waiting for target %s...",
2088 Jim_Nvp_value2name_simple(nvp_target_state,state)->name);
2089 }
2090
2091 if (cur-then > 500)
2092 {
2093 keep_alive();
2094 }
2095
2096 if ((cur-then) > ms)
2097 {
2098 LOG_ERROR("timed out while waiting for target %s",
2099 Jim_Nvp_value2name_simple(nvp_target_state,state)->name);
2100 return ERROR_FAIL;
2101 }
2102 }
2103
2104 return ERROR_OK;
2105 }
2106
2107 COMMAND_HANDLER(handle_halt_command)
2108 {
2109 LOG_DEBUG("-");
2110
2111 struct target *target = get_current_target(CMD_CTX);
2112 int retval = target_halt(target);
2113 if (ERROR_OK != retval)
2114 return retval;
2115
2116 if (CMD_ARGC == 1)
2117 {
2118 unsigned wait;
2119 retval = parse_uint(CMD_ARGV[0], &wait);
2120 if (ERROR_OK != retval)
2121 return ERROR_COMMAND_SYNTAX_ERROR;
2122 if (!wait)
2123 return ERROR_OK;
2124 }
2125
2126 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
2127 }
2128
2129 COMMAND_HANDLER(handle_soft_reset_halt_command)
2130 {
2131 struct target *target = get_current_target(CMD_CTX);
2132
2133 LOG_USER("requesting target halt and executing a soft reset");
2134
2135 target->type->soft_reset_halt(target);
2136
2137 return ERROR_OK;
2138 }
2139
2140 COMMAND_HANDLER(handle_reset_command)
2141 {
2142 if (CMD_ARGC > 1)
2143 return ERROR_COMMAND_SYNTAX_ERROR;
2144
2145 enum target_reset_mode reset_mode = RESET_RUN;
2146 if (CMD_ARGC == 1)
2147 {
2148 const Jim_Nvp *n;
2149 n = Jim_Nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
2150 if ((n->name == NULL) || (n->value == RESET_UNKNOWN)) {
2151 return ERROR_COMMAND_SYNTAX_ERROR;
2152 }
2153 reset_mode = n->value;
2154 }
2155
2156 /* reset *all* targets */
2157 return target_process_reset(CMD_CTX, reset_mode);
2158 }
2159
2160
2161 COMMAND_HANDLER(handle_resume_command)
2162 {
2163 int current = 1;
2164 if (CMD_ARGC > 1)
2165 return ERROR_COMMAND_SYNTAX_ERROR;
2166
2167 struct target *target = get_current_target(CMD_CTX);
2168 target_handle_event(target, TARGET_EVENT_OLD_pre_resume);
2169
2170 /* with no CMD_ARGV, resume from current pc, addr = 0,
2171 * with one arguments, addr = CMD_ARGV[0],
2172 * handle breakpoints, not debugging */
2173 uint32_t addr = 0;
2174 if (CMD_ARGC == 1)
2175 {
2176 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2177 current = 0;
2178 }
2179
2180 return target_resume(target, current, addr, 1, 0);
2181 }
2182
2183 COMMAND_HANDLER(handle_step_command)
2184 {
2185 if (CMD_ARGC > 1)
2186 return ERROR_COMMAND_SYNTAX_ERROR;
2187
2188 LOG_DEBUG("-");
2189
2190 /* with no CMD_ARGV, step from current pc, addr = 0,
2191 * with one argument addr = CMD_ARGV[0],
2192 * handle breakpoints, debugging */
2193 uint32_t addr = 0;
2194 int current_pc = 1;
2195 if (CMD_ARGC == 1)
2196 {
2197 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2198 current_pc = 0;
2199 }
2200
2201 struct target *target = get_current_target(CMD_CTX);
2202
2203 return target->type->step(target, current_pc, addr, 1);
2204 }
2205
2206 static void handle_md_output(struct command_context *cmd_ctx,
2207 struct target *target, uint32_t address, unsigned size,
2208 unsigned count, const uint8_t *buffer)
2209 {
2210 const unsigned line_bytecnt = 32;
2211 unsigned line_modulo = line_bytecnt / size;
2212
2213 char output[line_bytecnt * 4 + 1];
2214 unsigned output_len = 0;
2215
2216 const char *value_fmt;
2217 switch (size) {
2218 case 4: value_fmt = "%8.8x "; break;
2219 case 2: value_fmt = "%4.2x "; break;
2220 case 1: value_fmt = "%2.2x "; break;
2221 default:
2222 LOG_ERROR("invalid memory read size: %u", size);
2223 exit(-1);
2224 }
2225
2226 for (unsigned i = 0; i < count; i++)
2227 {
2228 if (i % line_modulo == 0)
2229 {
2230 output_len += snprintf(output + output_len,
2231 sizeof(output) - output_len,
2232 "0x%8.8x: ",
2233 (unsigned)(address + (i*size)));
2234 }
2235
2236 uint32_t value = 0;
2237 const uint8_t *value_ptr = buffer + i * size;
2238 switch (size) {
2239 case 4: value = target_buffer_get_u32(target, value_ptr); break;
2240 case 2: value = target_buffer_get_u16(target, value_ptr); break;
2241 case 1: value = *value_ptr;
2242 }
2243 output_len += snprintf(output + output_len,
2244 sizeof(output) - output_len,
2245 value_fmt, value);
2246
2247 if ((i % line_modulo == line_modulo - 1) || (i == count - 1))
2248 {
2249 command_print(cmd_ctx, "%s", output);
2250 output_len = 0;
2251 }
2252 }
2253 }
2254
2255 COMMAND_HANDLER(handle_md_command)
2256 {
2257 if (CMD_ARGC < 1)
2258 return ERROR_COMMAND_SYNTAX_ERROR;
2259
2260 unsigned size = 0;
2261 switch (CMD_NAME[2]) {
2262 case 'w': size = 4; break;
2263 case 'h': size = 2; break;
2264 case 'b': size = 1; break;
2265 default: return ERROR_COMMAND_SYNTAX_ERROR;
2266 }
2267
2268 bool physical=strcmp(CMD_ARGV[0], "phys")==0;
2269 int (*fn)(struct target *target,
2270 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
2271 if (physical)
2272 {
2273 CMD_ARGC--;
2274 CMD_ARGV++;
2275 fn=target_read_phys_memory;
2276 } else
2277 {
2278 fn=target_read_memory;
2279 }
2280 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
2281 {
2282 return ERROR_COMMAND_SYNTAX_ERROR;
2283 }
2284
2285 uint32_t address;
2286 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2287
2288 unsigned count = 1;
2289 if (CMD_ARGC == 2)
2290 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
2291
2292 uint8_t *buffer = calloc(count, size);
2293
2294 struct target *target = get_current_target(CMD_CTX);
2295 int retval = fn(target, address, size, count, buffer);
2296 if (ERROR_OK == retval)
2297 handle_md_output(CMD_CTX, target, address, size, count, buffer);
2298
2299 free(buffer);
2300
2301 return retval;
2302 }
2303
2304 COMMAND_HANDLER(handle_mw_command)
2305 {
2306 if (CMD_ARGC < 2)
2307 {
2308 return ERROR_COMMAND_SYNTAX_ERROR;
2309 }
2310 bool physical=strcmp(CMD_ARGV[0], "phys")==0;
2311 int (*fn)(struct target *target,
2312 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
2313 if (physical)
2314 {
2315 CMD_ARGC--;
2316 CMD_ARGV++;
2317 fn=target_write_phys_memory;
2318 } else
2319 {
2320 fn=target_write_memory;
2321 }
2322 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
2323 return ERROR_COMMAND_SYNTAX_ERROR;
2324
2325 uint32_t address;
2326 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2327
2328 uint32_t value;
2329 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
2330
2331 unsigned count = 1;
2332 if (CMD_ARGC == 3)
2333 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
2334
2335 struct target *target = get_current_target(CMD_CTX);
2336 unsigned wordsize;
2337 uint8_t value_buf[4];
2338 switch (CMD_NAME[2])
2339 {
2340 case 'w':
2341 wordsize = 4;
2342 target_buffer_set_u32(target, value_buf, value);
2343 break;
2344 case 'h':
2345 wordsize = 2;
2346 target_buffer_set_u16(target, value_buf, value);
2347 break;
2348 case 'b':
2349 wordsize = 1;
2350 value_buf[0] = value;
2351 break;
2352 default:
2353 return ERROR_COMMAND_SYNTAX_ERROR;
2354 }
2355 for (unsigned i = 0; i < count; i++)
2356 {
2357 int retval = fn(target,
2358 address + i * wordsize, wordsize, 1, value_buf);
2359 if (ERROR_OK != retval)
2360 return retval;
2361 keep_alive();
2362 }
2363
2364 return ERROR_OK;
2365
2366 }
2367
2368 static COMMAND_HELPER(parse_load_image_command_CMD_ARGV, struct image *image,
2369 uint32_t *min_address, uint32_t *max_address)
2370 {
2371 if (CMD_ARGC < 1 || CMD_ARGC > 5)
2372 return ERROR_COMMAND_SYNTAX_ERROR;
2373
2374 /* a base address isn't always necessary,
2375 * default to 0x0 (i.e. don't relocate) */
2376 if (CMD_ARGC >= 2)
2377 {
2378 uint32_t addr;
2379 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], addr);
2380 image->base_address = addr;
2381 image->base_address_set = 1;
2382 }
2383 else
2384 image->base_address_set = 0;
2385
2386 image->start_address_set = 0;
2387
2388 if (CMD_ARGC >= 4)
2389 {
2390 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], *min_address);
2391 }
2392 if (CMD_ARGC == 5)
2393 {
2394 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], *max_address);
2395 // use size (given) to find max (required)
2396 *max_address += *min_address;
2397 }
2398
2399 if (*min_address > *max_address)
2400 return ERROR_COMMAND_SYNTAX_ERROR;
2401
2402 return ERROR_OK;
2403 }
2404
2405 COMMAND_HANDLER(handle_load_image_command)
2406 {
2407 uint8_t *buffer;
2408 size_t buf_cnt;
2409 uint32_t image_size;
2410 uint32_t min_address = 0;
2411 uint32_t max_address = 0xffffffff;
2412 int i;
2413 struct image image;
2414
2415 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
2416 &image, &min_address, &max_address);
2417 if (ERROR_OK != retval)
2418 return retval;
2419
2420 struct target *target = get_current_target(CMD_CTX);
2421
2422 struct duration bench;
2423 duration_start(&bench);
2424
2425 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
2426 {
2427 return ERROR_OK;
2428 }
2429
2430 image_size = 0x0;
2431 retval = ERROR_OK;
2432 for (i = 0; i < image.num_sections; i++)
2433 {
2434 buffer = malloc(image.sections[i].size);
2435 if (buffer == NULL)
2436 {
2437 command_print(CMD_CTX,
2438 "error allocating buffer for section (%d bytes)",
2439 (int)(image.sections[i].size));
2440 break;
2441 }
2442
2443 if ((retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt)) != ERROR_OK)
2444 {
2445 free(buffer);
2446 break;
2447 }
2448
2449 uint32_t offset = 0;
2450 uint32_t length = buf_cnt;
2451
2452 /* DANGER!!! beware of unsigned comparision here!!! */
2453
2454 if ((image.sections[i].base_address + buf_cnt >= min_address)&&
2455 (image.sections[i].base_address < max_address))
2456 {
2457 if (image.sections[i].base_address < min_address)
2458 {
2459 /* clip addresses below */
2460 offset += min_address-image.sections[i].base_address;
2461 length -= offset;
2462 }
2463
2464 if (image.sections[i].base_address + buf_cnt > max_address)
2465 {
2466 length -= (image.sections[i].base_address + buf_cnt)-max_address;
2467 }
2468
2469 if ((retval = target_write_buffer(target, image.sections[i].base_address + offset, length, buffer + offset)) != ERROR_OK)
2470 {
2471 free(buffer);
2472 break;
2473 }
2474 image_size += length;
2475 command_print(CMD_CTX, "%u bytes written at address 0x%8.8" PRIx32 "",
2476 (unsigned int)length,
2477 image.sections[i].base_address + offset);
2478 }
2479
2480 free(buffer);
2481 }
2482
2483 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK))
2484 {
2485 command_print(CMD_CTX, "downloaded %" PRIu32 " bytes "
2486 "in %fs (%0.3f kb/s)", image_size,
2487 duration_elapsed(&bench), duration_kbps(&bench, image_size));
2488 }
2489
2490 image_close(&image);
2491
2492 return retval;
2493
2494 }
2495
2496 COMMAND_HANDLER(handle_dump_image_command)
2497 {
2498 struct fileio fileio;
2499
2500 uint8_t buffer[560];
2501 int retvaltemp;
2502
2503
2504 struct target *target = get_current_target(CMD_CTX);
2505
2506 if (CMD_ARGC != 3)
2507 {
2508 command_print(CMD_CTX, "usage: dump_image <filename> <address> <size>");
2509 return ERROR_OK;
2510 }
2511
2512 uint32_t address;
2513 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], address);
2514 uint32_t size;
2515 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], size);
2516
2517 if (fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
2518 {
2519 return ERROR_OK;
2520 }
2521
2522 struct duration bench;
2523 duration_start(&bench);
2524
2525 int retval = ERROR_OK;
2526 while (size > 0)
2527 {
2528 size_t size_written;
2529 uint32_t this_run_size = (size > 560) ? 560 : size;
2530 retval = target_read_buffer(target, address, this_run_size, buffer);
2531 if (retval != ERROR_OK)
2532 {
2533 break;
2534 }
2535
2536 retval = fileio_write(&fileio, this_run_size, buffer, &size_written);
2537 if (retval != ERROR_OK)
2538 {
2539 break;
2540 }
2541
2542 size -= this_run_size;
2543 address += this_run_size;
2544 }
2545
2546 if ((retvaltemp = fileio_close(&fileio)) != ERROR_OK)
2547 return retvaltemp;
2548
2549 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK))
2550 {
2551 command_print(CMD_CTX,
2552 "dumped %zu bytes in %fs (%0.3f kb/s)", fileio.size,
2553 duration_elapsed(&bench), duration_kbps(&bench, fileio.size));
2554 }
2555
2556 return retval;
2557 }
2558
2559 static COMMAND_HELPER(handle_verify_image_command_internal, int verify)
2560 {
2561 uint8_t *buffer;
2562 size_t buf_cnt;
2563 uint32_t image_size;
2564 int i;
2565 int retval;
2566 uint32_t checksum = 0;
2567 uint32_t mem_checksum = 0;
2568
2569 struct image image;
2570
2571 struct target *target = get_current_target(CMD_CTX);
2572
2573 if (CMD_ARGC < 1)
2574 {
2575 return ERROR_COMMAND_SYNTAX_ERROR;
2576 }
2577
2578 if (!target)
2579 {
2580 LOG_ERROR("no target selected");
2581 return ERROR_FAIL;
2582 }
2583
2584 struct duration bench;
2585 duration_start(&bench);
2586
2587 if (CMD_ARGC >= 2)
2588 {
2589 uint32_t addr;
2590 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], addr);
2591 image.base_address = addr;
2592 image.base_address_set = 1;
2593 }
2594 else
2595 {
2596 image.base_address_set = 0;
2597 image.base_address = 0x0;
2598 }
2599
2600 image.start_address_set = 0;
2601
2602 if ((retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL)) != ERROR_OK)
2603 {
2604 return retval;
2605 }
2606
2607 image_size = 0x0;
2608 retval = ERROR_OK;
2609 for (i = 0; i < image.num_sections; i++)
2610 {
2611 buffer = malloc(image.sections[i].size);
2612 if (buffer == NULL)
2613 {
2614 command_print(CMD_CTX,
2615 "error allocating buffer for section (%d bytes)",
2616 (int)(image.sections[i].size));
2617 break;
2618 }
2619 if ((retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt)) != ERROR_OK)
2620 {
2621 free(buffer);
2622 break;
2623 }
2624
2625 if (verify)
2626 {
2627 /* calculate checksum of image */
2628 image_calculate_checksum(buffer, buf_cnt, &checksum);
2629
2630 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
2631 if (retval != ERROR_OK)
2632 {
2633 free(buffer);
2634 break;
2635 }
2636
2637 if (checksum != mem_checksum)
2638 {
2639 /* failed crc checksum, fall back to a binary compare */
2640 uint8_t *data;
2641
2642 command_print(CMD_CTX, "checksum mismatch - attempting binary compare");
2643
2644 data = (uint8_t*)malloc(buf_cnt);
2645
2646 /* Can we use 32bit word accesses? */
2647 int size = 1;
2648 int count = buf_cnt;
2649 if ((count % 4) == 0)
2650 {
2651 size *= 4;
2652 count /= 4;
2653 }
2654 retval = target_read_memory(target, image.sections[i].base_address, size, count, data);
2655 if (retval == ERROR_OK)
2656 {
2657 uint32_t t;
2658 for (t = 0; t < buf_cnt; t++)
2659 {
2660 if (data[t] != buffer[t])
2661 {
2662 command_print(CMD_CTX,
2663 "Verify operation failed address 0x%08x. Was 0x%02x instead of 0x%02x\n",
2664 (unsigned)(t + image.sections[i].base_address),
2665 data[t],
2666 buffer[t]);
2667 free(data);
2668 free(buffer);
2669 retval = ERROR_FAIL;
2670 goto done;
2671 }
2672 if ((t%16384) == 0)
2673 {
2674 keep_alive();
2675 }
2676 }
2677 }
2678
2679 free(data);
2680 }
2681 } else
2682 {
2683 command_print(CMD_CTX, "address 0x%08" PRIx32 " length 0x%08zx",
2684 image.sections[i].base_address,
2685 buf_cnt);
2686 }
2687
2688 free(buffer);
2689 image_size += buf_cnt;
2690 }
2691 done:
2692 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK))
2693 {
2694 command_print(CMD_CTX, "verified %" PRIu32 " bytes "
2695 "in %fs (%0.3f kb/s)", image_size,
2696 duration_elapsed(&bench), duration_kbps(&bench, image_size));
2697 }
2698
2699 image_close(&image);
2700
2701 return retval;
2702 }
2703
2704 COMMAND_HANDLER(handle_verify_image_command)
2705 {
2706 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, 1);
2707 }
2708
2709 COMMAND_HANDLER(handle_test_image_command)
2710 {
2711 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, 0);
2712 }
2713
2714 static int handle_bp_command_list(struct command_context *cmd_ctx)
2715 {
2716 struct target *target = get_current_target(cmd_ctx);
2717 struct breakpoint *breakpoint = target->breakpoints;
2718 while (breakpoint)
2719 {
2720 if (breakpoint->type == BKPT_SOFT)
2721 {
2722 char* buf = buf_to_str(breakpoint->orig_instr,
2723 breakpoint->length, 16);
2724 command_print(cmd_ctx, "0x%8.8" PRIx32 ", 0x%x, %i, 0x%s",
2725 breakpoint->address,
2726 breakpoint->length,
2727 breakpoint->set, buf);
2728 free(buf);
2729 }
2730 else
2731 {
2732 command_print(cmd_ctx, "0x%8.8" PRIx32 ", 0x%x, %i",
2733 breakpoint->address,
2734 breakpoint->length, breakpoint->set);
2735 }
2736
2737 breakpoint = breakpoint->next;
2738 }
2739 return ERROR_OK;
2740 }
2741
2742 static int handle_bp_command_set(struct command_context *cmd_ctx,
2743 uint32_t addr, uint32_t length, int hw)
2744 {
2745 struct target *target = get_current_target(cmd_ctx);
2746 int retval = breakpoint_add(target, addr, length, hw);
2747 if (ERROR_OK == retval)
2748 command_print(cmd_ctx, "breakpoint set at 0x%8.8" PRIx32 "", addr);
2749 else
2750 LOG_ERROR("Failure setting breakpoint");
2751 return retval;
2752 }
2753
2754 COMMAND_HANDLER(handle_bp_command)
2755 {
2756 if (CMD_ARGC == 0)
2757 return handle_bp_command_list(CMD_CTX);
2758
2759 if (CMD_ARGC < 2 || CMD_ARGC > 3)
2760 {
2761 command_print(CMD_CTX, "usage: bp <address> <length> ['hw']");
2762 return ERROR_COMMAND_SYNTAX_ERROR;
2763 }
2764
2765 uint32_t addr;
2766 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2767 uint32_t length;
2768 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
2769
2770 int hw = BKPT_SOFT;
2771 if (CMD_ARGC == 3)
2772 {
2773 if (strcmp(CMD_ARGV[2], "hw") == 0)
2774 hw = BKPT_HARD;
2775 else
2776 return ERROR_COMMAND_SYNTAX_ERROR;
2777 }
2778
2779 return handle_bp_command_set(CMD_CTX, addr, length, hw);
2780 }
2781
2782 COMMAND_HANDLER(handle_rbp_command)
2783 {
2784 if (CMD_ARGC != 1)
2785 return ERROR_COMMAND_SYNTAX_ERROR;
2786
2787 uint32_t addr;
2788 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2789
2790 struct target *target = get_current_target(CMD_CTX);
2791 breakpoint_remove(target, addr);
2792
2793 return ERROR_OK;
2794 }
2795
2796 COMMAND_HANDLER(handle_wp_command)
2797 {
2798 struct target *target = get_current_target(CMD_CTX);
2799
2800 if (CMD_ARGC == 0)
2801 {
2802 struct watchpoint *watchpoint = target->watchpoints;
2803
2804 while (watchpoint)
2805 {
2806 command_print(CMD_CTX, "address: 0x%8.8" PRIx32
2807 ", len: 0x%8.8" PRIx32
2808 ", r/w/a: %i, value: 0x%8.8" PRIx32
2809 ", mask: 0x%8.8" PRIx32,
2810 watchpoint->address,
2811 watchpoint->length,
2812 (int)watchpoint->rw,
2813 watchpoint->value,
2814 watchpoint->mask);
2815 watchpoint = watchpoint->next;
2816 }
2817 return ERROR_OK;
2818 }
2819
2820 enum watchpoint_rw type = WPT_ACCESS;
2821 uint32_t addr = 0;
2822 uint32_t length = 0;
2823 uint32_t data_value = 0x0;
2824 uint32_t data_mask = 0xffffffff;
2825
2826 switch (CMD_ARGC)
2827 {
2828 case 5:
2829 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
2830 // fall through
2831 case 4:
2832 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
2833 // fall through
2834 case 3:
2835 switch (CMD_ARGV[2][0])
2836 {
2837 case 'r':
2838 type = WPT_READ;
2839 break;
2840 case 'w':
2841 type = WPT_WRITE;
2842 break;
2843 case 'a':
2844 type = WPT_ACCESS;
2845 break;
2846 default:
2847 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
2848 return ERROR_COMMAND_SYNTAX_ERROR;
2849 }
2850 // fall through
2851 case 2:
2852 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
2853 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2854 break;
2855
2856 default:
2857 command_print(CMD_CTX, "usage: wp [address length "
2858 "[(r|w|a) [value [mask]]]]");
2859 return ERROR_COMMAND_SYNTAX_ERROR;
2860 }
2861
2862 int retval = watchpoint_add(target, addr, length, type,
2863 data_value, data_mask);
2864 if (ERROR_OK != retval)
2865 LOG_ERROR("Failure setting watchpoints");
2866
2867 return retval;
2868 }
2869
2870 COMMAND_HANDLER(handle_rwp_command)
2871 {
2872 if (CMD_ARGC != 1)
2873 return ERROR_COMMAND_SYNTAX_ERROR;
2874
2875 uint32_t addr;
2876 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2877
2878 struct target *target = get_current_target(CMD_CTX);
2879 watchpoint_remove(target, addr);
2880
2881 return ERROR_OK;
2882 }
2883
2884
2885 /**
2886 * Translate a virtual address to a physical address.
2887 *
2888 * The low-level target implementation must have logged a detailed error
2889 * which is forwarded to telnet/GDB session.
2890 */
2891 COMMAND_HANDLER(handle_virt2phys_command)
2892 {
2893 if (CMD_ARGC != 1)
2894 return ERROR_COMMAND_SYNTAX_ERROR;
2895
2896 uint32_t va;
2897 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], va);
2898 uint32_t pa;
2899
2900 struct target *target = get_current_target(CMD_CTX);
2901 int retval = target->type->virt2phys(target, va, &pa);
2902 if (retval == ERROR_OK)
2903 command_print(CMD_CTX, "Physical address 0x%08" PRIx32 "", pa);
2904
2905 return retval;
2906 }
2907
2908 static void writeData(FILE *f, const void *data, size_t len)
2909 {
2910 size_t written = fwrite(data, 1, len, f);
2911 if (written != len)
2912 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
2913 }
2914
2915 static void writeLong(FILE *f, int l)
2916 {
2917 int i;
2918 for (i = 0; i < 4; i++)
2919 {
2920 char c = (l >> (i*8))&0xff;
2921 writeData(f, &c, 1);
2922 }
2923
2924 }
2925
2926 static void writeString(FILE *f, char *s)
2927 {
2928 writeData(f, s, strlen(s));
2929 }
2930
2931 /* Dump a gmon.out histogram file. */
2932 static void writeGmon(uint32_t *samples, uint32_t sampleNum, const char *filename)
2933 {
2934 uint32_t i;
2935 FILE *f = fopen(filename, "w");
2936 if (f == NULL)
2937 return;
2938 writeString(f, "gmon");
2939 writeLong(f, 0x00000001); /* Version */
2940 writeLong(f, 0); /* padding */
2941 writeLong(f, 0); /* padding */
2942 writeLong(f, 0); /* padding */
2943
2944 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
2945 writeData(f, &zero, 1);
2946
2947 /* figure out bucket size */
2948 uint32_t min = samples[0];
2949 uint32_t max = samples[0];
2950 for (i = 0; i < sampleNum; i++)
2951 {
2952 if (min > samples[i])
2953 {
2954 min = samples[i];
2955 }
2956 if (max < samples[i])
2957 {
2958 max = samples[i];
2959 }
2960 }
2961
2962 int addressSpace = (max-min + 1);
2963
2964 static const uint32_t maxBuckets = 256 * 1024; /* maximum buckets. */
2965 uint32_t length = addressSpace;
2966 if (length > maxBuckets)
2967 {
2968 length = maxBuckets;
2969 }
2970 int *buckets = malloc(sizeof(int)*length);
2971 if (buckets == NULL)
2972 {
2973 fclose(f);
2974 return;
2975 }
2976 memset(buckets, 0, sizeof(int)*length);
2977 for (i = 0; i < sampleNum;i++)
2978 {
2979 uint32_t address = samples[i];
2980 long long a = address-min;
2981 long long b = length-1;
2982 long long c = addressSpace-1;
2983 int index = (a*b)/c; /* danger!!!! int32 overflows */
2984 buckets[index]++;
2985 }
2986
2987 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
2988 writeLong(f, min); /* low_pc */
2989 writeLong(f, max); /* high_pc */
2990 writeLong(f, length); /* # of samples */
2991 writeLong(f, 64000000); /* 64MHz */
2992 writeString(f, "seconds");
2993 for (i = 0; i < (15-strlen("seconds")); i++)
2994 writeData(f, &zero, 1);
2995 writeString(f, "s");
2996
2997 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
2998
2999 char *data = malloc(2*length);
3000 if (data != NULL)
3001 {
3002 for (i = 0; i < length;i++)
3003 {
3004 int val;
3005 val = buckets[i];
3006 if (val > 65535)
3007 {
3008 val = 65535;
3009 }
3010 data[i*2]=val&0xff;
3011 data[i*2 + 1]=(val >> 8)&0xff;
3012 }
3013 free(buckets);
3014 writeData(f, data, length * 2);
3015 free(data);
3016 } else
3017 {
3018 free(buckets);
3019 }
3020
3021 fclose(f);
3022 }
3023
3024 /* profiling samples the CPU PC as quickly as OpenOCD is able, which will be used as a random sampling of PC */
3025 COMMAND_HANDLER(handle_profile_command)
3026 {
3027 struct target *target = get_current_target(CMD_CTX);
3028 struct timeval timeout, now;
3029
3030 gettimeofday(&timeout, NULL);
3031 if (CMD_ARGC != 2)
3032 {
3033 return ERROR_COMMAND_SYNTAX_ERROR;
3034 }
3035 unsigned offset;
3036 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], offset);
3037
3038 timeval_add_time(&timeout, offset, 0);
3039
3040 command_print(CMD_CTX, "Starting profiling. Halting and resuming the target as often as we can...");
3041
3042 static const int maxSample = 10000;
3043 uint32_t *samples = malloc(sizeof(uint32_t)*maxSample);
3044 if (samples == NULL)
3045 return ERROR_OK;
3046
3047 int numSamples = 0;
3048 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
3049 struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
3050
3051 for (;;)
3052 {
3053 int retval;
3054 target_poll(target);
3055 if (target->state == TARGET_HALTED)
3056 {
3057 uint32_t t=*((uint32_t *)reg->value);
3058 samples[numSamples++]=t;
3059 retval = target_resume(target, 1, 0, 0, 0); /* current pc, addr = 0, do not handle breakpoints, not debugging */
3060 target_poll(target);
3061 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
3062 } else if (target->state == TARGET_RUNNING)
3063 {
3064 /* We want to quickly sample the PC. */
3065 if ((retval = target_halt(target)) != ERROR_OK)
3066 {
3067 free(samples);
3068 return retval;
3069 }
3070 } else
3071 {
3072 command_print(CMD_CTX, "Target not halted or running");
3073 retval = ERROR_OK;
3074 break;
3075 }
3076 if (retval != ERROR_OK)
3077 {
3078 break;
3079 }
3080
3081 gettimeofday(&now, NULL);
3082 if ((numSamples >= maxSample) || ((now.tv_sec >= timeout.tv_sec) && (now.tv_usec >= timeout.tv_usec)))
3083 {
3084 command_print(CMD_CTX, "Profiling completed. %d samples.", numSamples);
3085 if ((retval = target_poll(target)) != ERROR_OK)
3086 {
3087 free(samples);
3088 return retval;
3089 }
3090 if (target->state == TARGET_HALTED)
3091 {
3092 target_resume(target, 1, 0, 0, 0); /* current pc, addr = 0, do not handle breakpoints, not debugging */
3093 }
3094 if ((retval = target_poll(target)) != ERROR_OK)
3095 {
3096 free(samples);
3097 return retval;
3098 }
3099 writeGmon(samples, numSamples, CMD_ARGV[1]);
3100 command_print(CMD_CTX, "Wrote %s", CMD_ARGV[1]);
3101 break;
3102 }
3103 }
3104 free(samples);
3105
3106 return ERROR_OK;
3107 }
3108
3109 static int new_int_array_element(Jim_Interp * interp, const char *varname, int idx, uint32_t val)
3110 {
3111 char *namebuf;
3112 Jim_Obj *nameObjPtr, *valObjPtr;
3113 int result;
3114
3115 namebuf = alloc_printf("%s(%d)", varname, idx);
3116 if (!namebuf)
3117 return JIM_ERR;
3118
3119 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
3120 valObjPtr = Jim_NewIntObj(interp, val);
3121 if (!nameObjPtr || !valObjPtr)
3122 {
3123 free(namebuf);
3124 return JIM_ERR;
3125 }
3126
3127 Jim_IncrRefCount(nameObjPtr);
3128 Jim_IncrRefCount(valObjPtr);
3129 result = Jim_SetVariable(interp, nameObjPtr, valObjPtr);
3130 Jim_DecrRefCount(interp, nameObjPtr);
3131 Jim_DecrRefCount(interp, valObjPtr);
3132 free(namebuf);
3133 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
3134 return result;
3135 }
3136
3137 static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
3138 {
3139 struct command_context *context;
3140 struct target *target;
3141
3142 context = Jim_GetAssocData(interp, "context");
3143 if (context == NULL)
3144 {
3145 LOG_ERROR("mem2array: no command context");
3146 return JIM_ERR;
3147 }
3148 target = get_current_target(context);
3149 if (target == NULL)
3150 {
3151 LOG_ERROR("mem2array: no current target");
3152 return JIM_ERR;
3153 }
3154
3155 return target_mem2array(interp, target, argc-1, argv + 1);
3156 }
3157
3158 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
3159 {
3160 long l;
3161 uint32_t width;
3162 int len;
3163 uint32_t addr;
3164 uint32_t count;
3165 uint32_t v;
3166 const char *varname;
3167 int n, e, retval;
3168 uint32_t i;
3169
3170 /* argv[1] = name of array to receive the data
3171 * argv[2] = desired width
3172 * argv[3] = memory address
3173 * argv[4] = count of times to read
3174 */
3175 if (argc != 4) {
3176 Jim_WrongNumArgs(interp, 1, argv, "varname width addr nelems");
3177 return JIM_ERR;
3178 }
3179 varname = Jim_GetString(argv[0], &len);
3180 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
3181
3182 e = Jim_GetLong(interp, argv[1], &l);
3183 width = l;
3184 if (e != JIM_OK) {
3185 return e;
3186 }
3187
3188 e = Jim_GetLong(interp, argv[2], &l);
3189 addr = l;
3190 if (e != JIM_OK) {
3191 return e;
3192 }
3193 e = Jim_GetLong(interp, argv[3], &l);
3194 len = l;
3195 if (e != JIM_OK) {
3196 return e;
3197 }
3198 switch (width) {
3199 case 8:
3200 width = 1;
3201 break;