e964f522488c6bc41f807088bb554660b6b301b8
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 √ėyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program; if not, write to the *
38 * Free Software Foundation, Inc., *
39 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
40 ***************************************************************************/
41
42 #ifdef HAVE_CONFIG_H
43 #include "config.h"
44 #endif
45
46 #include <helper/time_support.h>
47 #include <jtag/jtag.h>
48 #include <flash/nor/core.h>
49
50 #include "target.h"
51 #include "target_type.h"
52 #include "target_request.h"
53 #include "breakpoints.h"
54 #include "register.h"
55 #include "trace.h"
56 #include "image.h"
57 #include "rtos/rtos.h"
58
59 static int target_read_buffer_default(struct target *target, uint32_t address,
60 uint32_t size, uint8_t *buffer);
61 static int target_write_buffer_default(struct target *target, uint32_t address,
62 uint32_t size, const uint8_t *buffer);
63 static int target_array2mem(Jim_Interp *interp, struct target *target,
64 int argc, Jim_Obj * const *argv);
65 static int target_mem2array(Jim_Interp *interp, struct target *target,
66 int argc, Jim_Obj * const *argv);
67 static int target_register_user_commands(struct command_context *cmd_ctx);
68
69 /* targets */
70 extern struct target_type arm7tdmi_target;
71 extern struct target_type arm720t_target;
72 extern struct target_type arm9tdmi_target;
73 extern struct target_type arm920t_target;
74 extern struct target_type arm966e_target;
75 extern struct target_type arm946e_target;
76 extern struct target_type arm926ejs_target;
77 extern struct target_type fa526_target;
78 extern struct target_type feroceon_target;
79 extern struct target_type dragonite_target;
80 extern struct target_type xscale_target;
81 extern struct target_type cortexm3_target;
82 extern struct target_type cortexa8_target;
83 extern struct target_type arm11_target;
84 extern struct target_type mips_m4k_target;
85 extern struct target_type avr_target;
86 extern struct target_type dsp563xx_target;
87 extern struct target_type dsp5680xx_target;
88 extern struct target_type testee_target;
89 extern struct target_type avr32_ap7k_target;
90 extern struct target_type hla_target;
91
92 static struct target_type *target_types[] = {
93 &arm7tdmi_target,
94 &arm9tdmi_target,
95 &arm920t_target,
96 &arm720t_target,
97 &arm966e_target,
98 &arm946e_target,
99 &arm926ejs_target,
100 &fa526_target,
101 &feroceon_target,
102 &dragonite_target,
103 &xscale_target,
104 &cortexm3_target,
105 &cortexa8_target,
106 &arm11_target,
107 &mips_m4k_target,
108 &avr_target,
109 &dsp563xx_target,
110 &dsp5680xx_target,
111 &testee_target,
112 &avr32_ap7k_target,
113 &hla_target,
114 NULL,
115 };
116
117 struct target *all_targets;
118 static struct target_event_callback *target_event_callbacks;
119 static struct target_timer_callback *target_timer_callbacks;
120 static const int polling_interval = 100;
121
122 static const Jim_Nvp nvp_assert[] = {
123 { .name = "assert", NVP_ASSERT },
124 { .name = "deassert", NVP_DEASSERT },
125 { .name = "T", NVP_ASSERT },
126 { .name = "F", NVP_DEASSERT },
127 { .name = "t", NVP_ASSERT },
128 { .name = "f", NVP_DEASSERT },
129 { .name = NULL, .value = -1 }
130 };
131
132 static const Jim_Nvp nvp_error_target[] = {
133 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
134 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
135 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
136 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
137 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
138 { .value = ERROR_TARGET_UNALIGNED_ACCESS , .name = "err-unaligned-access" },
139 { .value = ERROR_TARGET_DATA_ABORT , .name = "err-data-abort" },
140 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE , .name = "err-resource-not-available" },
141 { .value = ERROR_TARGET_TRANSLATION_FAULT , .name = "err-translation-fault" },
142 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
143 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
144 { .value = -1, .name = NULL }
145 };
146
147 static const char *target_strerror_safe(int err)
148 {
149 const Jim_Nvp *n;
150
151 n = Jim_Nvp_value2name_simple(nvp_error_target, err);
152 if (n->name == NULL)
153 return "unknown";
154 else
155 return n->name;
156 }
157
158 static const Jim_Nvp nvp_target_event[] = {
159
160 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
161 { .value = TARGET_EVENT_HALTED, .name = "halted" },
162 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
163 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
164 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
165
166 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
167 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
168
169 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
170 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
171 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
172 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
173 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
174 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
175 { .value = TARGET_EVENT_RESET_HALT_PRE, .name = "reset-halt-pre" },
176 { .value = TARGET_EVENT_RESET_HALT_POST, .name = "reset-halt-post" },
177 { .value = TARGET_EVENT_RESET_WAIT_PRE, .name = "reset-wait-pre" },
178 { .value = TARGET_EVENT_RESET_WAIT_POST, .name = "reset-wait-post" },
179 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
180 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
181
182 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
183 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
184
185 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
186 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
187
188 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
189 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
190
191 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
192 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END , .name = "gdb-flash-write-end" },
193
194 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
195 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END , .name = "gdb-flash-erase-end" },
196
197 { .name = NULL, .value = -1 }
198 };
199
200 static const Jim_Nvp nvp_target_state[] = {
201 { .name = "unknown", .value = TARGET_UNKNOWN },
202 { .name = "running", .value = TARGET_RUNNING },
203 { .name = "halted", .value = TARGET_HALTED },
204 { .name = "reset", .value = TARGET_RESET },
205 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
206 { .name = NULL, .value = -1 },
207 };
208
209 static const Jim_Nvp nvp_target_debug_reason[] = {
210 { .name = "debug-request" , .value = DBG_REASON_DBGRQ },
211 { .name = "breakpoint" , .value = DBG_REASON_BREAKPOINT },
212 { .name = "watchpoint" , .value = DBG_REASON_WATCHPOINT },
213 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
214 { .name = "single-step" , .value = DBG_REASON_SINGLESTEP },
215 { .name = "target-not-halted" , .value = DBG_REASON_NOTHALTED },
216 { .name = "undefined" , .value = DBG_REASON_UNDEFINED },
217 { .name = NULL, .value = -1 },
218 };
219
220 static const Jim_Nvp nvp_target_endian[] = {
221 { .name = "big", .value = TARGET_BIG_ENDIAN },
222 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
223 { .name = "be", .value = TARGET_BIG_ENDIAN },
224 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
225 { .name = NULL, .value = -1 },
226 };
227
228 static const Jim_Nvp nvp_reset_modes[] = {
229 { .name = "unknown", .value = RESET_UNKNOWN },
230 { .name = "run" , .value = RESET_RUN },
231 { .name = "halt" , .value = RESET_HALT },
232 { .name = "init" , .value = RESET_INIT },
233 { .name = NULL , .value = -1 },
234 };
235
236 const char *debug_reason_name(struct target *t)
237 {
238 const char *cp;
239
240 cp = Jim_Nvp_value2name_simple(nvp_target_debug_reason,
241 t->debug_reason)->name;
242 if (!cp) {
243 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
244 cp = "(*BUG*unknown*BUG*)";
245 }
246 return cp;
247 }
248
249 const char *target_state_name(struct target *t)
250 {
251 const char *cp;
252 cp = Jim_Nvp_value2name_simple(nvp_target_state, t->state)->name;
253 if (!cp) {
254 LOG_ERROR("Invalid target state: %d", (int)(t->state));
255 cp = "(*BUG*unknown*BUG*)";
256 }
257 return cp;
258 }
259
260 /* determine the number of the new target */
261 static int new_target_number(void)
262 {
263 struct target *t;
264 int x;
265
266 /* number is 0 based */
267 x = -1;
268 t = all_targets;
269 while (t) {
270 if (x < t->target_number)
271 x = t->target_number;
272 t = t->next;
273 }
274 return x + 1;
275 }
276
277 /* read a uint32_t from a buffer in target memory endianness */
278 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
279 {
280 if (target->endianness == TARGET_LITTLE_ENDIAN)
281 return le_to_h_u32(buffer);
282 else
283 return be_to_h_u32(buffer);
284 }
285
286 /* read a uint24_t from a buffer in target memory endianness */
287 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
288 {
289 if (target->endianness == TARGET_LITTLE_ENDIAN)
290 return le_to_h_u24(buffer);
291 else
292 return be_to_h_u24(buffer);
293 }
294
295 /* read a uint16_t from a buffer in target memory endianness */
296 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
297 {
298 if (target->endianness == TARGET_LITTLE_ENDIAN)
299 return le_to_h_u16(buffer);
300 else
301 return be_to_h_u16(buffer);
302 }
303
304 /* read a uint8_t from a buffer in target memory endianness */
305 static uint8_t target_buffer_get_u8(struct target *target, const uint8_t *buffer)
306 {
307 return *buffer & 0x0ff;
308 }
309
310 /* write a uint32_t to a buffer in target memory endianness */
311 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
312 {
313 if (target->endianness == TARGET_LITTLE_ENDIAN)
314 h_u32_to_le(buffer, value);
315 else
316 h_u32_to_be(buffer, value);
317 }
318
319 /* write a uint24_t to a buffer in target memory endianness */
320 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
321 {
322 if (target->endianness == TARGET_LITTLE_ENDIAN)
323 h_u24_to_le(buffer, value);
324 else
325 h_u24_to_be(buffer, value);
326 }
327
328 /* write a uint16_t to a buffer in target memory endianness */
329 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
330 {
331 if (target->endianness == TARGET_LITTLE_ENDIAN)
332 h_u16_to_le(buffer, value);
333 else
334 h_u16_to_be(buffer, value);
335 }
336
337 /* write a uint8_t to a buffer in target memory endianness */
338 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
339 {
340 *buffer = value;
341 }
342
343 /* write a uint32_t array to a buffer in target memory endianness */
344 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
345 {
346 uint32_t i;
347 for (i = 0; i < count; i++)
348 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
349 }
350
351 /* write a uint16_t array to a buffer in target memory endianness */
352 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
353 {
354 uint32_t i;
355 for (i = 0; i < count; i++)
356 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
357 }
358
359 /* write a uint32_t array to a buffer in target memory endianness */
360 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, uint32_t *srcbuf)
361 {
362 uint32_t i;
363 for (i = 0; i < count; i++)
364 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
365 }
366
367 /* write a uint16_t array to a buffer in target memory endianness */
368 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, uint16_t *srcbuf)
369 {
370 uint32_t i;
371 for (i = 0; i < count; i++)
372 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
373 }
374
375 /* return a pointer to a configured target; id is name or number */
376 struct target *get_target(const char *id)
377 {
378 struct target *target;
379
380 /* try as tcltarget name */
381 for (target = all_targets; target; target = target->next) {
382 if (target_name(target) == NULL)
383 continue;
384 if (strcmp(id, target_name(target)) == 0)
385 return target;
386 }
387
388 /* It's OK to remove this fallback sometime after August 2010 or so */
389
390 /* no match, try as number */
391 unsigned num;
392 if (parse_uint(id, &num) != ERROR_OK)
393 return NULL;
394
395 for (target = all_targets; target; target = target->next) {
396 if (target->target_number == (int)num) {
397 LOG_WARNING("use '%s' as target identifier, not '%u'",
398 target_name(target), num);
399 return target;
400 }
401 }
402
403 return NULL;
404 }
405
406 /* returns a pointer to the n-th configured target */
407 static struct target *get_target_by_num(int num)
408 {
409 struct target *target = all_targets;
410
411 while (target) {
412 if (target->target_number == num)
413 return target;
414 target = target->next;
415 }
416
417 return NULL;
418 }
419
420 struct target *get_current_target(struct command_context *cmd_ctx)
421 {
422 struct target *target = get_target_by_num(cmd_ctx->current_target);
423
424 if (target == NULL) {
425 LOG_ERROR("BUG: current_target out of bounds");
426 exit(-1);
427 }
428
429 return target;
430 }
431
432 int target_poll(struct target *target)
433 {
434 int retval;
435
436 /* We can't poll until after examine */
437 if (!target_was_examined(target)) {
438 /* Fail silently lest we pollute the log */
439 return ERROR_FAIL;
440 }
441
442 retval = target->type->poll(target);
443 if (retval != ERROR_OK)
444 return retval;
445
446 if (target->halt_issued) {
447 if (target->state == TARGET_HALTED)
448 target->halt_issued = false;
449 else {
450 long long t = timeval_ms() - target->halt_issued_time;
451 if (t > 1000) {
452 target->halt_issued = false;
453 LOG_INFO("Halt timed out, wake up GDB.");
454 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
455 }
456 }
457 }
458
459 return ERROR_OK;
460 }
461
462 int target_halt(struct target *target)
463 {
464 int retval;
465 /* We can't poll until after examine */
466 if (!target_was_examined(target)) {
467 LOG_ERROR("Target not examined yet");
468 return ERROR_FAIL;
469 }
470
471 retval = target->type->halt(target);
472 if (retval != ERROR_OK)
473 return retval;
474
475 target->halt_issued = true;
476 target->halt_issued_time = timeval_ms();
477
478 return ERROR_OK;
479 }
480
481 /**
482 * Make the target (re)start executing using its saved execution
483 * context (possibly with some modifications).
484 *
485 * @param target Which target should start executing.
486 * @param current True to use the target's saved program counter instead
487 * of the address parameter
488 * @param address Optionally used as the program counter.
489 * @param handle_breakpoints True iff breakpoints at the resumption PC
490 * should be skipped. (For example, maybe execution was stopped by
491 * such a breakpoint, in which case it would be counterprodutive to
492 * let it re-trigger.
493 * @param debug_execution False if all working areas allocated by OpenOCD
494 * should be released and/or restored to their original contents.
495 * (This would for example be true to run some downloaded "helper"
496 * algorithm code, which resides in one such working buffer and uses
497 * another for data storage.)
498 *
499 * @todo Resolve the ambiguity about what the "debug_execution" flag
500 * signifies. For example, Target implementations don't agree on how
501 * it relates to invalidation of the register cache, or to whether
502 * breakpoints and watchpoints should be enabled. (It would seem wrong
503 * to enable breakpoints when running downloaded "helper" algorithms
504 * (debug_execution true), since the breakpoints would be set to match
505 * target firmware being debugged, not the helper algorithm.... and
506 * enabling them could cause such helpers to malfunction (for example,
507 * by overwriting data with a breakpoint instruction. On the other
508 * hand the infrastructure for running such helpers might use this
509 * procedure but rely on hardware breakpoint to detect termination.)
510 */
511 int target_resume(struct target *target, int current, uint32_t address, int handle_breakpoints, int debug_execution)
512 {
513 int retval;
514
515 /* We can't poll until after examine */
516 if (!target_was_examined(target)) {
517 LOG_ERROR("Target not examined yet");
518 return ERROR_FAIL;
519 }
520
521 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
522
523 /* note that resume *must* be asynchronous. The CPU can halt before
524 * we poll. The CPU can even halt at the current PC as a result of
525 * a software breakpoint being inserted by (a bug?) the application.
526 */
527 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
528 if (retval != ERROR_OK)
529 return retval;
530
531 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
532
533 return retval;
534 }
535
536 static int target_process_reset(struct command_context *cmd_ctx, enum target_reset_mode reset_mode)
537 {
538 char buf[100];
539 int retval;
540 Jim_Nvp *n;
541 n = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode);
542 if (n->name == NULL) {
543 LOG_ERROR("invalid reset mode");
544 return ERROR_FAIL;
545 }
546
547 /* disable polling during reset to make reset event scripts
548 * more predictable, i.e. dr/irscan & pathmove in events will
549 * not have JTAG operations injected into the middle of a sequence.
550 */
551 bool save_poll = jtag_poll_get_enabled();
552
553 jtag_poll_set_enabled(false);
554
555 sprintf(buf, "ocd_process_reset %s", n->name);
556 retval = Jim_Eval(cmd_ctx->interp, buf);
557
558 jtag_poll_set_enabled(save_poll);
559
560 if (retval != JIM_OK) {
561 Jim_MakeErrorMessage(cmd_ctx->interp);
562 command_print(NULL, "%s\n", Jim_GetString(Jim_GetResult(cmd_ctx->interp), NULL));
563 return ERROR_FAIL;
564 }
565
566 /* We want any events to be processed before the prompt */
567 retval = target_call_timer_callbacks_now();
568
569 struct target *target;
570 for (target = all_targets; target; target = target->next)
571 target->type->check_reset(target);
572
573 return retval;
574 }
575
576 static int identity_virt2phys(struct target *target,
577 uint32_t virtual, uint32_t *physical)
578 {
579 *physical = virtual;
580 return ERROR_OK;
581 }
582
583 static int no_mmu(struct target *target, int *enabled)
584 {
585 *enabled = 0;
586 return ERROR_OK;
587 }
588
589 static int default_examine(struct target *target)
590 {
591 target_set_examined(target);
592 return ERROR_OK;
593 }
594
595 /* no check by default */
596 static int default_check_reset(struct target *target)
597 {
598 return ERROR_OK;
599 }
600
601 int target_examine_one(struct target *target)
602 {
603 return target->type->examine(target);
604 }
605
606 static int jtag_enable_callback(enum jtag_event event, void *priv)
607 {
608 struct target *target = priv;
609
610 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
611 return ERROR_OK;
612
613 jtag_unregister_event_callback(jtag_enable_callback, target);
614
615 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
616
617 int retval = target_examine_one(target);
618 if (retval != ERROR_OK)
619 return retval;
620
621 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
622
623 return retval;
624 }
625
626 /* Targets that correctly implement init + examine, i.e.
627 * no communication with target during init:
628 *
629 * XScale
630 */
631 int target_examine(void)
632 {
633 int retval = ERROR_OK;
634 struct target *target;
635
636 for (target = all_targets; target; target = target->next) {
637 /* defer examination, but don't skip it */
638 if (!target->tap->enabled) {
639 jtag_register_event_callback(jtag_enable_callback,
640 target);
641 continue;
642 }
643
644 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
645
646 retval = target_examine_one(target);
647 if (retval != ERROR_OK)
648 return retval;
649
650 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
651 }
652 return retval;
653 }
654
655 const char *target_type_name(struct target *target)
656 {
657 return target->type->name;
658 }
659
660 static int target_write_memory_imp(struct target *target, uint32_t address,
661 uint32_t size, uint32_t count, const uint8_t *buffer)
662 {
663 if (!target_was_examined(target)) {
664 LOG_ERROR("Target not examined yet");
665 return ERROR_FAIL;
666 }
667 return target->type->write_memory_imp(target, address, size, count, buffer);
668 }
669
670 static int target_read_memory_imp(struct target *target, uint32_t address,
671 uint32_t size, uint32_t count, uint8_t *buffer)
672 {
673 if (!target_was_examined(target)) {
674 LOG_ERROR("Target not examined yet");
675 return ERROR_FAIL;
676 }
677 return target->type->read_memory_imp(target, address, size, count, buffer);
678 }
679
680 static int target_soft_reset_halt_imp(struct target *target)
681 {
682 if (!target_was_examined(target)) {
683 LOG_ERROR("Target not examined yet");
684 return ERROR_FAIL;
685 }
686 if (!target->type->soft_reset_halt_imp) {
687 LOG_ERROR("Target %s does not support soft_reset_halt",
688 target_name(target));
689 return ERROR_FAIL;
690 }
691 return target->type->soft_reset_halt_imp(target);
692 }
693
694 /**
695 * Downloads a target-specific native code algorithm to the target,
696 * and executes it. * Note that some targets may need to set up, enable,
697 * and tear down a breakpoint (hard or * soft) to detect algorithm
698 * termination, while others may support lower overhead schemes where
699 * soft breakpoints embedded in the algorithm automatically terminate the
700 * algorithm.
701 *
702 * @param target used to run the algorithm
703 * @param arch_info target-specific description of the algorithm.
704 */
705 int target_run_algorithm(struct target *target,
706 int num_mem_params, struct mem_param *mem_params,
707 int num_reg_params, struct reg_param *reg_param,
708 uint32_t entry_point, uint32_t exit_point,
709 int timeout_ms, void *arch_info)
710 {
711 int retval = ERROR_FAIL;
712
713 if (!target_was_examined(target)) {
714 LOG_ERROR("Target not examined yet");
715 goto done;
716 }
717 if (!target->type->run_algorithm) {
718 LOG_ERROR("Target type '%s' does not support %s",
719 target_type_name(target), __func__);
720 goto done;
721 }
722
723 target->running_alg = true;
724 retval = target->type->run_algorithm(target,
725 num_mem_params, mem_params,
726 num_reg_params, reg_param,
727 entry_point, exit_point, timeout_ms, arch_info);
728 target->running_alg = false;
729
730 done:
731 return retval;
732 }
733
734 /**
735 * Downloads a target-specific native code algorithm to the target,
736 * executes and leaves it running.
737 *
738 * @param target used to run the algorithm
739 * @param arch_info target-specific description of the algorithm.
740 */
741 int target_start_algorithm(struct target *target,
742 int num_mem_params, struct mem_param *mem_params,
743 int num_reg_params, struct reg_param *reg_params,
744 uint32_t entry_point, uint32_t exit_point,
745 void *arch_info)
746 {
747 int retval = ERROR_FAIL;
748
749 if (!target_was_examined(target)) {
750 LOG_ERROR("Target not examined yet");
751 goto done;
752 }
753 if (!target->type->start_algorithm) {
754 LOG_ERROR("Target type '%s' does not support %s",
755 target_type_name(target), __func__);
756 goto done;
757 }
758 if (target->running_alg) {
759 LOG_ERROR("Target is already running an algorithm");
760 goto done;
761 }
762
763 target->running_alg = true;
764 retval = target->type->start_algorithm(target,
765 num_mem_params, mem_params,
766 num_reg_params, reg_params,
767 entry_point, exit_point, arch_info);
768
769 done:
770 return retval;
771 }
772
773 /**
774 * Waits for an algorithm started with target_start_algorithm() to complete.
775 *
776 * @param target used to run the algorithm
777 * @param arch_info target-specific description of the algorithm.
778 */
779 int target_wait_algorithm(struct target *target,
780 int num_mem_params, struct mem_param *mem_params,
781 int num_reg_params, struct reg_param *reg_params,
782 uint32_t exit_point, int timeout_ms,
783 void *arch_info)
784 {
785 int retval = ERROR_FAIL;
786
787 if (!target->type->wait_algorithm) {
788 LOG_ERROR("Target type '%s' does not support %s",
789 target_type_name(target), __func__);
790 goto done;
791 }
792 if (!target->running_alg) {
793 LOG_ERROR("Target is not running an algorithm");
794 goto done;
795 }
796
797 retval = target->type->wait_algorithm(target,
798 num_mem_params, mem_params,
799 num_reg_params, reg_params,
800 exit_point, timeout_ms, arch_info);
801 if (retval != ERROR_TARGET_TIMEOUT)
802 target->running_alg = false;
803
804 done:
805 return retval;
806 }
807
808 /**
809 * Executes a target-specific native code algorithm in the target.
810 * It differs from target_run_algorithm in that the algorithm is asynchronous.
811 * Because of this it requires an compliant algorithm:
812 * see contrib/loaders/flash/stm32f1x.S for example.
813 *
814 * @param target used to run the algorithm
815 */
816
817 int target_run_flash_async_algorithm(struct target *target,
818 uint8_t *buffer, uint32_t count, int block_size,
819 int num_mem_params, struct mem_param *mem_params,
820 int num_reg_params, struct reg_param *reg_params,
821 uint32_t buffer_start, uint32_t buffer_size,
822 uint32_t entry_point, uint32_t exit_point, void *arch_info)
823 {
824 int retval;
825 int timeout = 0;
826
827 /* Set up working area. First word is write pointer, second word is read pointer,
828 * rest is fifo data area. */
829 uint32_t wp_addr = buffer_start;
830 uint32_t rp_addr = buffer_start + 4;
831 uint32_t fifo_start_addr = buffer_start + 8;
832 uint32_t fifo_end_addr = buffer_start + buffer_size;
833
834 uint32_t wp = fifo_start_addr;
835 uint32_t rp = fifo_start_addr;
836
837 /* validate block_size is 2^n */
838 assert(!block_size || !(block_size & (block_size - 1)));
839
840 retval = target_write_u32(target, wp_addr, wp);
841 if (retval != ERROR_OK)
842 return retval;
843 retval = target_write_u32(target, rp_addr, rp);
844 if (retval != ERROR_OK)
845 return retval;
846
847 /* Start up algorithm on target and let it idle while writing the first chunk */
848 retval = target_start_algorithm(target, num_mem_params, mem_params,
849 num_reg_params, reg_params,
850 entry_point,
851 exit_point,
852 arch_info);
853
854 if (retval != ERROR_OK) {
855 LOG_ERROR("error starting target flash write algorithm");
856 return retval;
857 }
858
859 while (count > 0) {
860
861 retval = target_read_u32(target, rp_addr, &rp);
862 if (retval != ERROR_OK) {
863 LOG_ERROR("failed to get read pointer");
864 break;
865 }
866
867 LOG_DEBUG("count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32, count, wp, rp);
868
869 if (rp == 0) {
870 LOG_ERROR("flash write algorithm aborted by target");
871 retval = ERROR_FLASH_OPERATION_FAILED;
872 break;
873 }
874
875 if ((rp & (block_size - 1)) || rp < fifo_start_addr || rp >= fifo_end_addr) {
876 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
877 break;
878 }
879
880 /* Count the number of bytes available in the fifo without
881 * crossing the wrap around. Make sure to not fill it completely,
882 * because that would make wp == rp and that's the empty condition. */
883 uint32_t thisrun_bytes;
884 if (rp > wp)
885 thisrun_bytes = rp - wp - block_size;
886 else if (rp > fifo_start_addr)
887 thisrun_bytes = fifo_end_addr - wp;
888 else
889 thisrun_bytes = fifo_end_addr - wp - block_size;
890
891 if (thisrun_bytes == 0) {
892 /* Throttle polling a bit if transfer is (much) faster than flash
893 * programming. The exact delay shouldn't matter as long as it's
894 * less than buffer size / flash speed. This is very unlikely to
895 * run when using high latency connections such as USB. */
896 alive_sleep(10);
897
898 /* to stop an infinite loop on some targets check and increment a timeout
899 * this issue was observed on a stellaris using the new ICDI interface */
900 if (timeout++ >= 500) {
901 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
902 return ERROR_FLASH_OPERATION_FAILED;
903 }
904 continue;
905 }
906
907 /* reset our timeout */
908 timeout = 0;
909
910 /* Limit to the amount of data we actually want to write */
911 if (thisrun_bytes > count * block_size)
912 thisrun_bytes = count * block_size;
913
914 /* Write data to fifo */
915 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
916 if (retval != ERROR_OK)
917 break;
918
919 /* Update counters and wrap write pointer */
920 buffer += thisrun_bytes;
921 count -= thisrun_bytes / block_size;
922 wp += thisrun_bytes;
923 if (wp >= fifo_end_addr)
924 wp = fifo_start_addr;
925
926 /* Store updated write pointer to target */
927 retval = target_write_u32(target, wp_addr, wp);
928 if (retval != ERROR_OK)
929 break;
930 }
931
932 if (retval != ERROR_OK) {
933 /* abort flash write algorithm on target */
934 target_write_u32(target, wp_addr, 0);
935 }
936
937 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
938 num_reg_params, reg_params,
939 exit_point,
940 10000,
941 arch_info);
942
943 if (retval2 != ERROR_OK) {
944 LOG_ERROR("error waiting for target flash write algorithm");
945 retval = retval2;
946 }
947
948 return retval;
949 }
950
951 int target_read_memory(struct target *target,
952 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
953 {
954 return target->type->read_memory(target, address, size, count, buffer);
955 }
956
957 static int target_read_phys_memory(struct target *target,
958 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
959 {
960 return target->type->read_phys_memory(target, address, size, count, buffer);
961 }
962
963 int target_write_memory(struct target *target,
964 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
965 {
966 return target->type->write_memory(target, address, size, count, buffer);
967 }
968
969 static int target_write_phys_memory(struct target *target,
970 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
971 {
972 return target->type->write_phys_memory(target, address, size, count, buffer);
973 }
974
975 int target_bulk_write_memory(struct target *target,
976 uint32_t address, uint32_t count, const uint8_t *buffer)
977 {
978 return target->type->bulk_write_memory(target, address, count, buffer);
979 }
980
981 int target_add_breakpoint(struct target *target,
982 struct breakpoint *breakpoint)
983 {
984 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
985 LOG_WARNING("target %s is not halted", target_name(target));
986 return ERROR_TARGET_NOT_HALTED;
987 }
988 return target->type->add_breakpoint(target, breakpoint);
989 }
990
991 int target_add_context_breakpoint(struct target *target,
992 struct breakpoint *breakpoint)
993 {
994 if (target->state != TARGET_HALTED) {
995 LOG_WARNING("target %s is not halted", target_name(target));
996 return ERROR_TARGET_NOT_HALTED;
997 }
998 return target->type->add_context_breakpoint(target, breakpoint);
999 }
1000
1001 int target_add_hybrid_breakpoint(struct target *target,
1002 struct breakpoint *breakpoint)
1003 {
1004 if (target->state != TARGET_HALTED) {
1005 LOG_WARNING("target %s is not halted", target_name(target));
1006 return ERROR_TARGET_NOT_HALTED;
1007 }
1008 return target->type->add_hybrid_breakpoint(target, breakpoint);
1009 }
1010
1011 int target_remove_breakpoint(struct target *target,
1012 struct breakpoint *breakpoint)
1013 {
1014 return target->type->remove_breakpoint(target, breakpoint);
1015 }
1016
1017 int target_add_watchpoint(struct target *target,
1018 struct watchpoint *watchpoint)
1019 {
1020 if (target->state != TARGET_HALTED) {
1021 LOG_WARNING("target %s is not halted", target_name(target));
1022 return ERROR_TARGET_NOT_HALTED;
1023 }
1024 return target->type->add_watchpoint(target, watchpoint);
1025 }
1026 int target_remove_watchpoint(struct target *target,
1027 struct watchpoint *watchpoint)
1028 {
1029 return target->type->remove_watchpoint(target, watchpoint);
1030 }
1031
1032 int target_get_gdb_reg_list(struct target *target,
1033 struct reg **reg_list[], int *reg_list_size)
1034 {
1035 return target->type->get_gdb_reg_list(target, reg_list, reg_list_size);
1036 }
1037 int target_step(struct target *target,
1038 int current, uint32_t address, int handle_breakpoints)
1039 {
1040 return target->type->step(target, current, address, handle_breakpoints);
1041 }
1042
1043 /**
1044 * Reset the @c examined flag for the given target.
1045 * Pure paranoia -- targets are zeroed on allocation.
1046 */
1047 static void target_reset_examined(struct target *target)
1048 {
1049 target->examined = false;
1050 }
1051
1052 static int err_read_phys_memory(struct target *target, uint32_t address,
1053 uint32_t size, uint32_t count, uint8_t *buffer)
1054 {
1055 LOG_ERROR("Not implemented: %s", __func__);
1056 return ERROR_FAIL;
1057 }
1058
1059 static int err_write_phys_memory(struct target *target, uint32_t address,
1060 uint32_t size, uint32_t count, const uint8_t *buffer)
1061 {
1062 LOG_ERROR("Not implemented: %s", __func__);
1063 return ERROR_FAIL;
1064 }
1065
1066 static int handle_target(void *priv);
1067
1068 static int target_init_one(struct command_context *cmd_ctx,
1069 struct target *target)
1070 {
1071 target_reset_examined(target);
1072
1073 struct target_type *type = target->type;
1074 if (type->examine == NULL)
1075 type->examine = default_examine;
1076
1077 if (type->check_reset == NULL)
1078 type->check_reset = default_check_reset;
1079
1080 assert(type->init_target != NULL);
1081
1082 int retval = type->init_target(cmd_ctx, target);
1083 if (ERROR_OK != retval) {
1084 LOG_ERROR("target '%s' init failed", target_name(target));
1085 return retval;
1086 }
1087
1088 /**
1089 * @todo get rid of those *memory_imp() methods, now that all
1090 * callers are using target_*_memory() accessors ... and make
1091 * sure the "physical" paths handle the same issues.
1092 */
1093 /* a non-invasive way(in terms of patches) to add some code that
1094 * runs before the type->write/read_memory implementation
1095 */
1096 type->write_memory_imp = target->type->write_memory;
1097 type->write_memory = target_write_memory_imp;
1098
1099 type->read_memory_imp = target->type->read_memory;
1100 type->read_memory = target_read_memory_imp;
1101
1102 type->soft_reset_halt_imp = target->type->soft_reset_halt;
1103 type->soft_reset_halt = target_soft_reset_halt_imp;
1104
1105 /* Sanity-check MMU support ... stub in what we must, to help
1106 * implement it in stages, but warn if we need to do so.
1107 */
1108 if (type->mmu) {
1109 if (type->write_phys_memory == NULL) {
1110 LOG_ERROR("type '%s' is missing write_phys_memory",
1111 type->name);
1112 type->write_phys_memory = err_write_phys_memory;
1113 }
1114 if (type->read_phys_memory == NULL) {
1115 LOG_ERROR("type '%s' is missing read_phys_memory",
1116 type->name);
1117 type->read_phys_memory = err_read_phys_memory;
1118 }
1119 if (type->virt2phys == NULL) {
1120 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1121 type->virt2phys = identity_virt2phys;
1122 }
1123 } else {
1124 /* Make sure no-MMU targets all behave the same: make no
1125 * distinction between physical and virtual addresses, and
1126 * ensure that virt2phys() is always an identity mapping.
1127 */
1128 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1129 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1130
1131 type->mmu = no_mmu;
1132 type->write_phys_memory = type->write_memory;
1133 type->read_phys_memory = type->read_memory;
1134 type->virt2phys = identity_virt2phys;
1135 }
1136
1137 if (target->type->read_buffer == NULL)
1138 target->type->read_buffer = target_read_buffer_default;
1139
1140 if (target->type->write_buffer == NULL)
1141 target->type->write_buffer = target_write_buffer_default;
1142
1143 return ERROR_OK;
1144 }
1145
1146 static int target_init(struct command_context *cmd_ctx)
1147 {
1148 struct target *target;
1149 int retval;
1150
1151 for (target = all_targets; target; target = target->next) {
1152 retval = target_init_one(cmd_ctx, target);
1153 if (ERROR_OK != retval)
1154 return retval;
1155 }
1156
1157 if (!all_targets)
1158 return ERROR_OK;
1159
1160 retval = target_register_user_commands(cmd_ctx);
1161 if (ERROR_OK != retval)
1162 return retval;
1163
1164 retval = target_register_timer_callback(&handle_target,
1165 polling_interval, 1, cmd_ctx->interp);
1166 if (ERROR_OK != retval)
1167 return retval;
1168
1169 return ERROR_OK;
1170 }
1171
1172 COMMAND_HANDLER(handle_target_init_command)
1173 {
1174 int retval;
1175
1176 if (CMD_ARGC != 0)
1177 return ERROR_COMMAND_SYNTAX_ERROR;
1178
1179 static bool target_initialized;
1180 if (target_initialized) {
1181 LOG_INFO("'target init' has already been called");
1182 return ERROR_OK;
1183 }
1184 target_initialized = true;
1185
1186 retval = command_run_line(CMD_CTX, "init_targets");
1187 if (ERROR_OK != retval)
1188 return retval;
1189
1190 retval = command_run_line(CMD_CTX, "init_board");
1191 if (ERROR_OK != retval)
1192 return retval;
1193
1194 LOG_DEBUG("Initializing targets...");
1195 return target_init(CMD_CTX);
1196 }
1197
1198 int target_register_event_callback(int (*callback)(struct target *target,
1199 enum target_event event, void *priv), void *priv)
1200 {
1201 struct target_event_callback **callbacks_p = &target_event_callbacks;
1202
1203 if (callback == NULL)
1204 return ERROR_COMMAND_SYNTAX_ERROR;
1205
1206 if (*callbacks_p) {
1207 while ((*callbacks_p)->next)
1208 callbacks_p = &((*callbacks_p)->next);
1209 callbacks_p = &((*callbacks_p)->next);
1210 }
1211
1212 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1213 (*callbacks_p)->callback = callback;
1214 (*callbacks_p)->priv = priv;
1215 (*callbacks_p)->next = NULL;
1216
1217 return ERROR_OK;
1218 }
1219
1220 int target_register_timer_callback(int (*callback)(void *priv), int time_ms, int periodic, void *priv)
1221 {
1222 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1223 struct timeval now;
1224
1225 if (callback == NULL)
1226 return ERROR_COMMAND_SYNTAX_ERROR;
1227
1228 if (*callbacks_p) {
1229 while ((*callbacks_p)->next)
1230 callbacks_p = &((*callbacks_p)->next);
1231 callbacks_p = &((*callbacks_p)->next);
1232 }
1233
1234 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1235 (*callbacks_p)->callback = callback;
1236 (*callbacks_p)->periodic = periodic;
1237 (*callbacks_p)->time_ms = time_ms;
1238
1239 gettimeofday(&now, NULL);
1240 (*callbacks_p)->when.tv_usec = now.tv_usec + (time_ms % 1000) * 1000;
1241 time_ms -= (time_ms % 1000);
1242 (*callbacks_p)->when.tv_sec = now.tv_sec + (time_ms / 1000);
1243 if ((*callbacks_p)->when.tv_usec > 1000000) {
1244 (*callbacks_p)->when.tv_usec = (*callbacks_p)->when.tv_usec - 1000000;
1245 (*callbacks_p)->when.tv_sec += 1;
1246 }
1247
1248 (*callbacks_p)->priv = priv;
1249 (*callbacks_p)->next = NULL;
1250
1251 return ERROR_OK;
1252 }
1253
1254 int target_unregister_event_callback(int (*callback)(struct target *target,
1255 enum target_event event, void *priv), void *priv)
1256 {
1257 struct target_event_callback **p = &target_event_callbacks;
1258 struct target_event_callback *c = target_event_callbacks;
1259
1260 if (callback == NULL)
1261 return ERROR_COMMAND_SYNTAX_ERROR;
1262
1263 while (c) {
1264 struct target_event_callback *next = c->next;
1265 if ((c->callback == callback) && (c->priv == priv)) {
1266 *p = next;
1267 free(c);
1268 return ERROR_OK;
1269 } else
1270 p = &(c->next);
1271 c = next;
1272 }
1273
1274 return ERROR_OK;
1275 }
1276
1277 static int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1278 {
1279 struct target_timer_callback **p = &target_timer_callbacks;
1280 struct target_timer_callback *c = target_timer_callbacks;
1281
1282 if (callback == NULL)
1283 return ERROR_COMMAND_SYNTAX_ERROR;
1284
1285 while (c) {
1286 struct target_timer_callback *next = c->next;
1287 if ((c->callback == callback) && (c->priv == priv)) {
1288 *p = next;
1289 free(c);
1290 return ERROR_OK;
1291 } else
1292 p = &(c->next);
1293 c = next;
1294 }
1295
1296 return ERROR_OK;
1297 }
1298
1299 int target_call_event_callbacks(struct target *target, enum target_event event)
1300 {
1301 struct target_event_callback *callback = target_event_callbacks;
1302 struct target_event_callback *next_callback;
1303
1304 if (event == TARGET_EVENT_HALTED) {
1305 /* execute early halted first */
1306 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1307 }
1308
1309 LOG_DEBUG("target event %i (%s)", event,
1310 Jim_Nvp_value2name_simple(nvp_target_event, event)->name);
1311
1312 target_handle_event(target, event);
1313
1314 while (callback) {
1315 next_callback = callback->next;
1316 callback->callback(target, event, callback->priv);
1317 callback = next_callback;
1318 }
1319
1320 return ERROR_OK;
1321 }
1322
1323 static int target_timer_callback_periodic_restart(
1324 struct target_timer_callback *cb, struct timeval *now)
1325 {
1326 int time_ms = cb->time_ms;
1327 cb->when.tv_usec = now->tv_usec + (time_ms % 1000) * 1000;
1328 time_ms -= (time_ms % 1000);
1329 cb->when.tv_sec = now->tv_sec + time_ms / 1000;
1330 if (cb->when.tv_usec > 1000000) {
1331 cb->when.tv_usec = cb->when.tv_usec - 1000000;
1332 cb->when.tv_sec += 1;
1333 }
1334 return ERROR_OK;
1335 }
1336
1337 static int target_call_timer_callback(struct target_timer_callback *cb,
1338 struct timeval *now)
1339 {
1340 cb->callback(cb->priv);
1341
1342 if (cb->periodic)
1343 return target_timer_callback_periodic_restart(cb, now);
1344
1345 return target_unregister_timer_callback(cb->callback, cb->priv);
1346 }
1347
1348 static int target_call_timer_callbacks_check_time(int checktime)
1349 {
1350 keep_alive();
1351
1352 struct timeval now;
1353 gettimeofday(&now, NULL);
1354
1355 struct target_timer_callback *callback = target_timer_callbacks;
1356 while (callback) {
1357 /* cleaning up may unregister and free this callback */
1358 struct target_timer_callback *next_callback = callback->next;
1359
1360 bool call_it = callback->callback &&
1361 ((!checktime && callback->periodic) ||
1362 now.tv_sec > callback->when.tv_sec ||
1363 (now.tv_sec == callback->when.tv_sec &&
1364 now.tv_usec >= callback->when.tv_usec));
1365
1366 if (call_it) {
1367 int retval = target_call_timer_callback(callback, &now);
1368 if (retval != ERROR_OK)
1369 return retval;
1370 }
1371
1372 callback = next_callback;
1373 }
1374
1375 return ERROR_OK;
1376 }
1377
1378 int target_call_timer_callbacks(void)
1379 {
1380 return target_call_timer_callbacks_check_time(1);
1381 }
1382
1383 /* invoke periodic callbacks immediately */
1384 int target_call_timer_callbacks_now(void)
1385 {
1386 return target_call_timer_callbacks_check_time(0);
1387 }
1388
1389 /* Prints the working area layout for debug purposes */
1390 static void print_wa_layout(struct target *target)
1391 {
1392 struct working_area *c = target->working_areas;
1393
1394 while (c) {
1395 LOG_DEBUG("%c%c 0x%08"PRIx32"-0x%08"PRIx32" (%"PRIu32" bytes)",
1396 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1397 c->address, c->address + c->size - 1, c->size);
1398 c = c->next;
1399 }
1400 }
1401
1402 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1403 static void target_split_working_area(struct working_area *area, uint32_t size)
1404 {
1405 assert(area->free); /* Shouldn't split an allocated area */
1406 assert(size <= area->size); /* Caller should guarantee this */
1407
1408 /* Split only if not already the right size */
1409 if (size < area->size) {
1410 struct working_area *new_wa = malloc(sizeof(*new_wa));
1411
1412 if (new_wa == NULL)
1413 return;
1414
1415 new_wa->next = area->next;
1416 new_wa->size = area->size - size;
1417 new_wa->address = area->address + size;
1418 new_wa->backup = NULL;
1419 new_wa->user = NULL;
1420 new_wa->free = true;
1421
1422 area->next = new_wa;
1423 area->size = size;
1424
1425 /* If backup memory was allocated to this area, it has the wrong size
1426 * now so free it and it will be reallocated if/when needed */
1427 if (area->backup) {
1428 free(area->backup);
1429 area->backup = NULL;
1430 }
1431 }
1432 }
1433
1434 /* Merge all adjacent free areas into one */
1435 static void target_merge_working_areas(struct target *target)
1436 {
1437 struct working_area *c = target->working_areas;
1438
1439 while (c && c->next) {
1440 assert(c->next->address == c->address + c->size); /* This is an invariant */
1441
1442 /* Find two adjacent free areas */
1443 if (c->free && c->next->free) {
1444 /* Merge the last into the first */
1445 c->size += c->next->size;
1446
1447 /* Remove the last */
1448 struct working_area *to_be_freed = c->next;
1449 c->next = c->next->next;
1450 if (to_be_freed->backup)
1451 free(to_be_freed->backup);
1452 free(to_be_freed);
1453
1454 /* If backup memory was allocated to the remaining area, it's has
1455 * the wrong size now */
1456 if (c->backup) {
1457 free(c->backup);
1458 c->backup = NULL;
1459 }
1460 } else {
1461 c = c->next;
1462 }
1463 }
1464 }
1465
1466 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
1467 {
1468 /* Reevaluate working area address based on MMU state*/
1469 if (target->working_areas == NULL) {
1470 int retval;
1471 int enabled;
1472
1473 retval = target->type->mmu(target, &enabled);
1474 if (retval != ERROR_OK)
1475 return retval;
1476
1477 if (!enabled) {
1478 if (target->working_area_phys_spec) {
1479 LOG_DEBUG("MMU disabled, using physical "
1480 "address for working memory 0x%08"PRIx32,
1481 target->working_area_phys);
1482 target->working_area = target->working_area_phys;
1483 } else {
1484 LOG_ERROR("No working memory available. "
1485 "Specify -work-area-phys to target.");
1486 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1487 }
1488 } else {
1489 if (target->working_area_virt_spec) {
1490 LOG_DEBUG("MMU enabled, using virtual "
1491 "address for working memory 0x%08"PRIx32,
1492 target->working_area_virt);
1493 target->working_area = target->working_area_virt;
1494 } else {
1495 LOG_ERROR("No working memory available. "
1496 "Specify -work-area-virt to target.");
1497 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1498 }
1499 }
1500
1501 /* Set up initial working area on first call */
1502 struct working_area *new_wa = malloc(sizeof(*new_wa));
1503 if (new_wa) {
1504 new_wa->next = NULL;
1505 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
1506 new_wa->address = target->working_area;
1507 new_wa->backup = NULL;
1508 new_wa->user = NULL;
1509 new_wa->free = true;
1510 }
1511
1512 target->working_areas = new_wa;
1513 }
1514
1515 /* only allocate multiples of 4 byte */
1516 if (size % 4)
1517 size = (size + 3) & (~3UL);
1518
1519 struct working_area *c = target->working_areas;
1520
1521 /* Find the first large enough working area */
1522 while (c) {
1523 if (c->free && c->size >= size)
1524 break;
1525 c = c->next;
1526 }
1527
1528 if (c == NULL)
1529 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1530
1531 /* Split the working area into the requested size */
1532 target_split_working_area(c, size);
1533
1534 LOG_DEBUG("allocated new working area of %"PRIu32" bytes at address 0x%08"PRIx32, size, c->address);
1535
1536 if (target->backup_working_area) {
1537 if (c->backup == NULL) {
1538 c->backup = malloc(c->size);
1539 if (c->backup == NULL)
1540 return ERROR_FAIL;
1541 }
1542
1543 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
1544 if (retval != ERROR_OK)
1545 return retval;
1546 }
1547
1548 /* mark as used, and return the new (reused) area */
1549 c->free = false;
1550 *area = c;
1551
1552 /* user pointer */
1553 c->user = area;
1554
1555 print_wa_layout(target);
1556
1557 return ERROR_OK;
1558 }
1559
1560 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
1561 {
1562 int retval;
1563
1564 retval = target_alloc_working_area_try(target, size, area);
1565 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1566 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
1567 return retval;
1568
1569 }
1570
1571 static int target_restore_working_area(struct target *target, struct working_area *area)
1572 {
1573 int retval = ERROR_OK;
1574
1575 if (target->backup_working_area && area->backup != NULL) {
1576 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
1577 if (retval != ERROR_OK)
1578 LOG_ERROR("failed to restore %"PRIu32" bytes of working area at address 0x%08"PRIx32,
1579 area->size, area->address);
1580 }
1581
1582 return retval;
1583 }
1584
1585 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
1586 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
1587 {
1588 int retval = ERROR_OK;
1589
1590 if (area->free)
1591 return retval;
1592
1593 if (restore) {
1594 retval = target_restore_working_area(target, area);
1595 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
1596 if (retval != ERROR_OK)
1597 return retval;
1598 }
1599
1600 area->free = true;
1601
1602 LOG_DEBUG("freed %"PRIu32" bytes of working area at address 0x%08"PRIx32,
1603 area->size, area->address);
1604
1605 /* mark user pointer invalid */
1606 /* TODO: Is this really safe? It points to some previous caller's memory.
1607 * How could we know that the area pointer is still in that place and not
1608 * some other vital data? What's the purpose of this, anyway? */
1609 *area->user = NULL;
1610 area->user = NULL;
1611
1612 target_merge_working_areas(target);
1613
1614 print_wa_layout(target);
1615
1616 return retval;
1617 }
1618
1619 int target_free_working_area(struct target *target, struct working_area *area)
1620 {
1621 return target_free_working_area_restore(target, area, 1);
1622 }
1623
1624 /* free resources and restore memory, if restoring memory fails,
1625 * free up resources anyway
1626 */
1627 static void target_free_all_working_areas_restore(struct target *target, int restore)
1628 {
1629 struct working_area *c = target->working_areas;
1630
1631 LOG_DEBUG("freeing all working areas");
1632
1633 /* Loop through all areas, restoring the allocated ones and marking them as free */
1634 while (c) {
1635 if (!c->free) {
1636 if (restore)
1637 target_restore_working_area(target, c);
1638 c->free = true;
1639 *c->user = NULL; /* Same as above */
1640 c->user = NULL;
1641 }
1642 c = c->next;
1643 }
1644
1645 /* Run a merge pass to combine all areas into one */
1646 target_merge_working_areas(target);
1647
1648 print_wa_layout(target);
1649 }
1650
1651 void target_free_all_working_areas(struct target *target)
1652 {
1653 target_free_all_working_areas_restore(target, 1);
1654 }
1655
1656 /* Find the largest number of bytes that can be allocated */
1657 uint32_t target_get_working_area_avail(struct target *target)
1658 {
1659 struct working_area *c = target->working_areas;
1660 uint32_t max_size = 0;
1661
1662 if (c == NULL)
1663 return target->working_area_size;
1664
1665 while (c) {
1666 if (c->free && max_size < c->size)
1667 max_size = c->size;
1668
1669 c = c->next;
1670 }
1671
1672 return max_size;
1673 }
1674
1675 int target_arch_state(struct target *target)
1676 {
1677 int retval;
1678 if (target == NULL) {
1679 LOG_USER("No target has been configured");
1680 return ERROR_OK;
1681 }
1682
1683 LOG_USER("target state: %s", target_state_name(target));
1684
1685 if (target->state != TARGET_HALTED)
1686 return ERROR_OK;
1687
1688 retval = target->type->arch_state(target);
1689 return retval;
1690 }
1691
1692 /* Single aligned words are guaranteed to use 16 or 32 bit access
1693 * mode respectively, otherwise data is handled as quickly as
1694 * possible
1695 */
1696 int target_write_buffer(struct target *target, uint32_t address, uint32_t size, const uint8_t *buffer)
1697 {
1698 LOG_DEBUG("writing buffer of %i byte at 0x%8.8x",
1699 (int)size, (unsigned)address);
1700
1701 if (!target_was_examined(target)) {
1702 LOG_ERROR("Target not examined yet");
1703 return ERROR_FAIL;
1704 }
1705
1706 if (size == 0)
1707 return ERROR_OK;
1708
1709 if ((address + size - 1) < address) {
1710 /* GDB can request this when e.g. PC is 0xfffffffc*/
1711 LOG_ERROR("address + size wrapped(0x%08x, 0x%08x)",
1712 (unsigned)address,
1713 (unsigned)size);
1714 return ERROR_FAIL;
1715 }
1716
1717 return target->type->write_buffer(target, address, size, buffer);
1718 }
1719
1720 static int target_write_buffer_default(struct target *target, uint32_t address, uint32_t size, const uint8_t *buffer)
1721 {
1722 int retval = ERROR_OK;
1723
1724 if (((address % 2) == 0) && (size == 2))
1725 return target_write_memory(target, address, 2, 1, buffer);
1726
1727 /* handle unaligned head bytes */
1728 if (address % 4) {
1729 uint32_t unaligned = 4 - (address % 4);
1730
1731 if (unaligned > size)
1732 unaligned = size;
1733
1734 retval = target_write_memory(target, address, 1, unaligned, buffer);
1735 if (retval != ERROR_OK)
1736 return retval;
1737
1738 buffer += unaligned;
1739 address += unaligned;
1740 size -= unaligned;
1741 }
1742
1743 /* handle aligned words */
1744 if (size >= 4) {
1745 int aligned = size - (size % 4);
1746
1747 /* use bulk writes above a certain limit. This may have to be changed */
1748 if (aligned > 128) {
1749 retval = target->type->bulk_write_memory(target, address, aligned / 4, buffer);
1750 if (retval != ERROR_OK)
1751 return retval;
1752 } else {
1753 retval = target_write_memory(target, address, 4, aligned / 4, buffer);
1754 if (retval != ERROR_OK)
1755 return retval;
1756 }
1757
1758 buffer += aligned;
1759 address += aligned;
1760 size -= aligned;
1761 }
1762
1763 /* handle tail writes of less than 4 bytes */
1764 if (size > 0) {
1765 retval = target_write_memory(target, address, 1, size, buffer);
1766 if (retval != ERROR_OK)
1767 return retval;
1768 }
1769
1770 return retval;
1771 }
1772
1773 /* Single aligned words are guaranteed to use 16 or 32 bit access
1774 * mode respectively, otherwise data is handled as quickly as
1775 * possible
1776 */
1777 int target_read_buffer(struct target *target, uint32_t address, uint32_t size, uint8_t *buffer)
1778 {
1779 LOG_DEBUG("reading buffer of %i byte at 0x%8.8x",
1780 (int)size, (unsigned)address);
1781
1782 if (!target_was_examined(target)) {
1783 LOG_ERROR("Target not examined yet");
1784 return ERROR_FAIL;
1785 }
1786
1787 if (size == 0)
1788 return ERROR_OK;
1789
1790 if ((address + size - 1) < address) {
1791 /* GDB can request this when e.g. PC is 0xfffffffc*/
1792 LOG_ERROR("address + size wrapped(0x%08" PRIx32 ", 0x%08" PRIx32 ")",
1793 address,
1794 size);
1795 return ERROR_FAIL;
1796 }
1797
1798 return target->type->read_buffer(target, address, size, buffer);
1799 }
1800
1801 static int target_read_buffer_default(struct target *target, uint32_t address, uint32_t size, uint8_t *buffer)
1802 {
1803 int retval = ERROR_OK;
1804
1805 if (((address % 2) == 0) && (size == 2))
1806 return target_read_memory(target, address, 2, 1, buffer);
1807
1808 /* handle unaligned head bytes */
1809 if (address % 4) {
1810 uint32_t unaligned = 4 - (address % 4);
1811
1812 if (unaligned > size)
1813 unaligned = size;
1814
1815 retval = target_read_memory(target, address, 1, unaligned, buffer);
1816 if (retval != ERROR_OK)
1817 return retval;
1818
1819 buffer += unaligned;
1820 address += unaligned;
1821 size -= unaligned;
1822 }
1823
1824 /* handle aligned words */
1825 if (size >= 4) {
1826 int aligned = size - (size % 4);
1827
1828 retval = target_read_memory(target, address, 4, aligned / 4, buffer);
1829 if (retval != ERROR_OK)
1830 return retval;
1831
1832 buffer += aligned;
1833 address += aligned;
1834 size -= aligned;
1835 }
1836
1837 /*prevent byte access when possible (avoid AHB access limitations in some cases)*/
1838 if (size >= 2) {
1839 int aligned = size - (size % 2);
1840 retval = target_read_memory(target, address, 2, aligned / 2, buffer);
1841 if (retval != ERROR_OK)
1842 return retval;
1843
1844 buffer += aligned;
1845 address += aligned;
1846 size -= aligned;
1847 }
1848 /* handle tail writes of less than 4 bytes */
1849 if (size > 0) {
1850 retval = target_read_memory(target, address, 1, size, buffer);
1851 if (retval != ERROR_OK)
1852 return retval;
1853 }
1854
1855 return ERROR_OK;
1856 }
1857
1858 int target_checksum_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* crc)
1859 {
1860 uint8_t *buffer;
1861 int retval;
1862 uint32_t i;
1863 uint32_t checksum = 0;
1864 if (!target_was_examined(target)) {
1865 LOG_ERROR("Target not examined yet");
1866 return ERROR_FAIL;
1867 }
1868
1869 retval = target->type->checksum_memory(target, address, size, &checksum);
1870 if (retval != ERROR_OK) {
1871 buffer = malloc(size);
1872 if (buffer == NULL) {
1873 LOG_ERROR("error allocating buffer for section (%d bytes)", (int)size);
1874 return ERROR_COMMAND_SYNTAX_ERROR;
1875 }
1876 retval = target_read_buffer(target, address, size, buffer);
1877 if (retval != ERROR_OK) {
1878 free(buffer);
1879 return retval;
1880 }
1881
1882 /* convert to target endianness */
1883 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
1884 uint32_t target_data;
1885 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
1886 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
1887 }
1888
1889 retval = image_calculate_checksum(buffer, size, &checksum);
1890 free(buffer);
1891 }
1892
1893 *crc = checksum;
1894
1895 return retval;
1896 }
1897
1898 int target_blank_check_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* blank)
1899 {
1900 int retval;
1901 if (!target_was_examined(target)) {
1902 LOG_ERROR("Target not examined yet");
1903 return ERROR_FAIL;
1904 }
1905
1906 if (target->type->blank_check_memory == 0)
1907 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1908
1909 retval = target->type->blank_check_memory(target, address, size, blank);
1910
1911 return retval;
1912 }
1913
1914 int target_read_u32(struct target *target, uint32_t address, uint32_t *value)
1915 {
1916 uint8_t value_buf[4];
1917 if (!target_was_examined(target)) {
1918 LOG_ERROR("Target not examined yet");
1919 return ERROR_FAIL;
1920 }
1921
1922 int retval = target_read_memory(target, address, 4, 1, value_buf);
1923
1924 if (retval == ERROR_OK) {
1925 *value = target_buffer_get_u32(target, value_buf);
1926 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
1927 address,
1928 *value);
1929 } else {
1930 *value = 0x0;
1931 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1932 address);
1933 }
1934
1935 return retval;
1936 }
1937
1938 int target_read_u16(struct target *target, uint32_t address, uint16_t *value)
1939 {
1940 uint8_t value_buf[2];
1941 if (!target_was_examined(target)) {
1942 LOG_ERROR("Target not examined yet");
1943 return ERROR_FAIL;
1944 }
1945
1946 int retval = target_read_memory(target, address, 2, 1, value_buf);
1947
1948 if (retval == ERROR_OK) {
1949 *value = target_buffer_get_u16(target, value_buf);
1950 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%4.4x",
1951 address,
1952 *value);
1953 } else {
1954 *value = 0x0;
1955 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1956 address);
1957 }
1958
1959 return retval;
1960 }
1961
1962 int target_read_u8(struct target *target, uint32_t address, uint8_t *value)
1963 {
1964 int retval = target_read_memory(target, address, 1, 1, value);
1965 if (!target_was_examined(target)) {
1966 LOG_ERROR("Target not examined yet");
1967 return ERROR_FAIL;
1968 }
1969
1970 if (retval == ERROR_OK) {
1971 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
1972 address,
1973 *value);
1974 } else {
1975 *value = 0x0;
1976 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1977 address);
1978 }
1979
1980 return retval;
1981 }
1982
1983 int target_write_u32(struct target *target, uint32_t address, uint32_t value)
1984 {
1985 int retval;
1986 uint8_t value_buf[4];
1987 if (!target_was_examined(target)) {
1988 LOG_ERROR("Target not examined yet");
1989 return ERROR_FAIL;
1990 }
1991
1992 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
1993 address,
1994 value);
1995
1996 target_buffer_set_u32(target, value_buf, value);
1997 retval = target_write_memory(target, address, 4, 1, value_buf);
1998 if (retval != ERROR_OK)
1999 LOG_DEBUG("failed: %i", retval);
2000
2001 return retval;
2002 }
2003
2004 int target_write_u16(struct target *target, uint32_t address, uint16_t value)
2005 {
2006 int retval;
2007 uint8_t value_buf[2];
2008 if (!target_was_examined(target)) {
2009 LOG_ERROR("Target not examined yet");
2010 return ERROR_FAIL;
2011 }
2012
2013 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8x",
2014 address,
2015 value);
2016
2017 target_buffer_set_u16(target, value_buf, value);
2018 retval = target_write_memory(target, address, 2, 1, value_buf);
2019 if (retval != ERROR_OK)
2020 LOG_DEBUG("failed: %i", retval);
2021
2022 return retval;
2023 }
2024
2025 int target_write_u8(struct target *target, uint32_t address, uint8_t value)
2026 {
2027 int retval;
2028 if (!target_was_examined(target)) {
2029 LOG_ERROR("Target not examined yet");
2030 return ERROR_FAIL;
2031 }
2032
2033 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
2034 address, value);
2035
2036 retval = target_write_memory(target, address, 1, 1, &value);
2037 if (retval != ERROR_OK)
2038 LOG_DEBUG("failed: %i", retval);
2039
2040 return retval;
2041 }
2042
2043 static int find_target(struct command_context *cmd_ctx, const char *name)
2044 {
2045 struct target *target = get_target(name);
2046 if (target == NULL) {
2047 LOG_ERROR("Target: %s is unknown, try one of:\n", name);
2048 return ERROR_FAIL;
2049 }
2050 if (!target->tap->enabled) {
2051 LOG_USER("Target: TAP %s is disabled, "
2052 "can't be the current target\n",
2053 target->tap->dotted_name);
2054 return ERROR_FAIL;
2055 }
2056
2057 cmd_ctx->current_target = target->target_number;
2058 return ERROR_OK;
2059 }
2060
2061
2062 COMMAND_HANDLER(handle_targets_command)
2063 {
2064 int retval = ERROR_OK;
2065 if (CMD_ARGC == 1) {
2066 retval = find_target(CMD_CTX, CMD_ARGV[0]);
2067 if (retval == ERROR_OK) {
2068 /* we're done! */
2069 return retval;
2070 }
2071 }
2072
2073 struct target *target = all_targets;
2074 command_print(CMD_CTX, " TargetName Type Endian TapName State ");
2075 command_print(CMD_CTX, "-- ------------------ ---------- ------ ------------------ ------------");
2076 while (target) {
2077 const char *state;
2078 char marker = ' ';
2079
2080 if (target->tap->enabled)
2081 state = target_state_name(target);
2082 else
2083 state = "tap-disabled";
2084
2085 if (CMD_CTX->current_target == target->target_number)
2086 marker = '*';
2087
2088 /* keep columns lined up to match the headers above */
2089 command_print(CMD_CTX,
2090 "%2d%c %-18s %-10s %-6s %-18s %s",
2091 target->target_number,
2092 marker,
2093 target_name(target),
2094 target_type_name(target),
2095 Jim_Nvp_value2name_simple(nvp_target_endian,
2096 target->endianness)->name,
2097 target->tap->dotted_name,
2098 state);
2099 target = target->next;
2100 }
2101
2102 return retval;
2103 }
2104
2105 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2106
2107 static int powerDropout;
2108 static int srstAsserted;
2109
2110 static int runPowerRestore;
2111 static int runPowerDropout;
2112 static int runSrstAsserted;
2113 static int runSrstDeasserted;
2114
2115 static int sense_handler(void)
2116 {
2117 static int prevSrstAsserted;
2118 static int prevPowerdropout;
2119
2120 int retval = jtag_power_dropout(&powerDropout);
2121 if (retval != ERROR_OK)
2122 return retval;
2123
2124 int powerRestored;
2125 powerRestored = prevPowerdropout && !powerDropout;
2126 if (powerRestored)
2127 runPowerRestore = 1;
2128
2129 long long current = timeval_ms();
2130 static long long lastPower;
2131 int waitMore = lastPower + 2000 > current;
2132 if (powerDropout && !waitMore) {
2133 runPowerDropout = 1;
2134 lastPower = current;
2135 }
2136
2137 retval = jtag_srst_asserted(&srstAsserted);
2138 if (retval != ERROR_OK)
2139 return retval;
2140
2141 int srstDeasserted;
2142 srstDeasserted = prevSrstAsserted && !srstAsserted;
2143
2144 static long long lastSrst;
2145 waitMore = lastSrst + 2000 > current;
2146 if (srstDeasserted && !waitMore) {
2147 runSrstDeasserted = 1;
2148 lastSrst = current;
2149 }
2150
2151 if (!prevSrstAsserted && srstAsserted)
2152 runSrstAsserted = 1;
2153
2154 prevSrstAsserted = srstAsserted;
2155 prevPowerdropout = powerDropout;
2156
2157 if (srstDeasserted || powerRestored) {
2158 /* Other than logging the event we can't do anything here.
2159 * Issuing a reset is a particularly bad idea as we might
2160 * be inside a reset already.
2161 */
2162 }
2163
2164 return ERROR_OK;
2165 }
2166
2167 /* process target state changes */
2168 static int handle_target(void *priv)
2169 {
2170 Jim_Interp *interp = (Jim_Interp *)priv;
2171 int retval = ERROR_OK;
2172
2173 if (!is_jtag_poll_safe()) {
2174 /* polling is disabled currently */
2175 return ERROR_OK;
2176 }
2177
2178 /* we do not want to recurse here... */
2179 static int recursive;
2180 if (!recursive) {
2181 recursive = 1;
2182 sense_handler();
2183 /* danger! running these procedures can trigger srst assertions and power dropouts.
2184 * We need to avoid an infinite loop/recursion here and we do that by
2185 * clearing the flags after running these events.
2186 */
2187 int did_something = 0;
2188 if (runSrstAsserted) {
2189 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2190 Jim_Eval(interp, "srst_asserted");
2191 did_something = 1;
2192 }
2193 if (runSrstDeasserted) {
2194 Jim_Eval(interp, "srst_deasserted");
2195 did_something = 1;
2196 }
2197 if (runPowerDropout) {
2198 LOG_INFO("Power dropout detected, running power_dropout proc.");
2199 Jim_Eval(interp, "power_dropout");
2200 did_something = 1;
2201 }
2202 if (runPowerRestore) {
2203 Jim_Eval(interp, "power_restore");
2204 did_something = 1;
2205 }
2206
2207 if (did_something) {
2208 /* clear detect flags */
2209 sense_handler();
2210 }
2211
2212 /* clear action flags */
2213
2214 runSrstAsserted = 0;
2215 runSrstDeasserted = 0;
2216 runPowerRestore = 0;
2217 runPowerDropout = 0;
2218
2219 recursive = 0;
2220 }
2221
2222 /* Poll targets for state changes unless that's globally disabled.
2223 * Skip targets that are currently disabled.
2224 */
2225 for (struct target *target = all_targets;
2226 is_jtag_poll_safe() && target;
2227 target = target->next) {
2228 if (!target->tap->enabled)
2229 continue;
2230
2231 if (target->backoff.times > target->backoff.count) {
2232 /* do not poll this time as we failed previously */
2233 target->backoff.count++;
2234 continue;
2235 }
2236 target->backoff.count = 0;
2237
2238 /* only poll target if we've got power and srst isn't asserted */
2239 if (!powerDropout && !srstAsserted) {
2240 /* polling may fail silently until the target has been examined */
2241 retval = target_poll(target);
2242 if (retval != ERROR_OK) {
2243 /* 100ms polling interval. Increase interval between polling up to 5000ms */
2244 if (target->backoff.times * polling_interval < 5000) {
2245 target->backoff.times *= 2;
2246 target->backoff.times++;
2247 }
2248 LOG_USER("Polling target %s failed, GDB will be halted. Polling again in %dms",
2249 target_name(target),
2250 target->backoff.times * polling_interval);
2251
2252 /* Tell GDB to halt the debugger. This allows the user to
2253 * run monitor commands to handle the situation.
2254 */
2255 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
2256 return retval;
2257 }
2258 /* Since we succeeded, we reset backoff count */
2259 if (target->backoff.times > 0)
2260 LOG_USER("Polling target %s succeeded again", target_name(target));
2261 target->backoff.times = 0;
2262 }
2263 }
2264
2265 return retval;
2266 }
2267
2268 COMMAND_HANDLER(handle_reg_command)
2269 {
2270 struct target *target;
2271 struct reg *reg = NULL;
2272 unsigned count = 0;
2273 char *value;
2274
2275 LOG_DEBUG("-");
2276
2277 target = get_current_target(CMD_CTX);
2278
2279 /* list all available registers for the current target */
2280 if (CMD_ARGC == 0) {
2281 struct reg_cache *cache = target->reg_cache;
2282
2283 count = 0;
2284 while (cache) {
2285 unsigned i;
2286
2287 command_print(CMD_CTX, "===== %s", cache->name);
2288
2289 for (i = 0, reg = cache->reg_list;
2290 i < cache->num_regs;
2291 i++, reg++, count++) {
2292 /* only print cached values if they are valid */
2293 if (reg->valid) {
2294 value = buf_to_str(reg->value,
2295 reg->size, 16);
2296 command_print(CMD_CTX,
2297 "(%i) %s (/%" PRIu32 "): 0x%s%s",
2298 count, reg->name,
2299 reg->size, value,
2300 reg->dirty
2301 ? " (dirty)"
2302 : "");
2303 free(value);
2304 } else {
2305 command_print(CMD_CTX, "(%i) %s (/%" PRIu32 ")",
2306 count, reg->name,
2307 reg->size) ;
2308 }
2309 }
2310 cache = cache->next;
2311 }
2312
2313 return ERROR_OK;
2314 }
2315
2316 /* access a single register by its ordinal number */
2317 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
2318 unsigned num;
2319 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
2320
2321 struct reg_cache *cache = target->reg_cache;
2322 count = 0;
2323 while (cache) {
2324 unsigned i;
2325 for (i = 0; i < cache->num_regs; i++) {
2326 if (count++ == num) {
2327 reg = &cache->reg_list[i];
2328 break;
2329 }
2330 }
2331 if (reg)
2332 break;
2333 cache = cache->next;
2334 }
2335
2336 if (!reg) {
2337 command_print(CMD_CTX, "%i is out of bounds, the current target "
2338 "has only %i registers (0 - %i)", num, count, count - 1);
2339 return ERROR_OK;
2340 }
2341 } else {
2342 /* access a single register by its name */
2343 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], 1);
2344
2345 if (!reg) {
2346 command_print(CMD_CTX, "register %s not found in current target", CMD_ARGV[0]);
2347 return ERROR_OK;
2348 }
2349 }
2350
2351 assert(reg != NULL); /* give clang a hint that we *know* reg is != NULL here */
2352
2353 /* display a register */
2354 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
2355 && (CMD_ARGV[1][0] <= '9')))) {
2356 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
2357 reg->valid = 0;
2358
2359 if (reg->valid == 0)
2360 reg->type->get(reg);
2361 value = buf_to_str(reg->value, reg->size, 16);
2362 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2363 free(value);
2364 return ERROR_OK;
2365 }
2366
2367 /* set register value */
2368 if (CMD_ARGC == 2) {
2369 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
2370 if (buf == NULL)
2371 return ERROR_FAIL;
2372 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
2373
2374 reg->type->set(reg, buf);
2375
2376 value = buf_to_str(reg->value, reg->size, 16);
2377 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2378 free(value);
2379
2380 free(buf);
2381
2382 return ERROR_OK;
2383 }
2384
2385 return ERROR_COMMAND_SYNTAX_ERROR;
2386 }
2387
2388 COMMAND_HANDLER(handle_poll_command)
2389 {
2390 int retval = ERROR_OK;
2391 struct target *target = get_current_target(CMD_CTX);
2392
2393 if (CMD_ARGC == 0) {
2394 command_print(CMD_CTX, "background polling: %s",
2395 jtag_poll_get_enabled() ? "on" : "off");
2396 command_print(CMD_CTX, "TAP: %s (%s)",
2397 target->tap->dotted_name,
2398 target->tap->enabled ? "enabled" : "disabled");
2399 if (!target->tap->enabled)
2400 return ERROR_OK;
2401 retval = target_poll(target);
2402 if (retval != ERROR_OK)
2403 return retval;
2404 retval = target_arch_state(target);
2405 if (retval != ERROR_OK)
2406 return retval;
2407 } else if (CMD_ARGC == 1) {
2408 bool enable;
2409 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
2410 jtag_poll_set_enabled(enable);
2411 } else
2412 return ERROR_COMMAND_SYNTAX_ERROR;
2413
2414 return retval;
2415 }
2416
2417 COMMAND_HANDLER(handle_wait_halt_command)
2418 {
2419 if (CMD_ARGC > 1)
2420 return ERROR_COMMAND_SYNTAX_ERROR;
2421
2422 unsigned ms = 5000;
2423 if (1 == CMD_ARGC) {
2424 int retval = parse_uint(CMD_ARGV[0], &ms);
2425 if (ERROR_OK != retval)
2426 return ERROR_COMMAND_SYNTAX_ERROR;
2427 /* convert seconds (given) to milliseconds (needed) */
2428 ms *= 1000;
2429 }
2430
2431 struct target *target = get_current_target(CMD_CTX);
2432 return target_wait_state(target, TARGET_HALTED, ms);
2433 }
2434
2435 /* wait for target state to change. The trick here is to have a low
2436 * latency for short waits and not to suck up all the CPU time
2437 * on longer waits.
2438 *
2439 * After 500ms, keep_alive() is invoked
2440 */
2441 int target_wait_state(struct target *target, enum target_state state, int ms)
2442 {
2443 int retval;
2444 long long then = 0, cur;
2445 int once = 1;
2446
2447 for (;;) {
2448 retval = target_poll(target);
2449 if (retval != ERROR_OK)
2450 return retval;
2451 if (target->state == state)
2452 break;
2453 cur = timeval_ms();
2454 if (once) {
2455 once = 0;
2456 then = timeval_ms();
2457 LOG_DEBUG("waiting for target %s...",
2458 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
2459 }
2460
2461 if (cur-then > 500)
2462 keep_alive();
2463
2464 if ((cur-then) > ms) {
2465 LOG_ERROR("timed out while waiting for target %s",
2466 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
2467 return ERROR_FAIL;
2468 }
2469 }
2470
2471 return ERROR_OK;
2472 }
2473
2474 COMMAND_HANDLER(handle_halt_command)
2475 {
2476 LOG_DEBUG("-");
2477
2478 struct target *target = get_current_target(CMD_CTX);
2479 int retval = target_halt(target);
2480 if (ERROR_OK != retval)
2481 return retval;
2482
2483 if (CMD_ARGC == 1) {
2484 unsigned wait_local;
2485 retval = parse_uint(CMD_ARGV[0], &wait_local);
2486 if (ERROR_OK != retval)
2487 return ERROR_COMMAND_SYNTAX_ERROR;
2488 if (!wait_local)
2489 return ERROR_OK;
2490 }
2491
2492 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
2493 }
2494
2495 COMMAND_HANDLER(handle_soft_reset_halt_command)
2496 {
2497 struct target *target = get_current_target(CMD_CTX);
2498
2499 LOG_USER("requesting target halt and executing a soft reset");
2500
2501 target->type->soft_reset_halt(target);
2502
2503 return ERROR_OK;
2504 }
2505
2506 COMMAND_HANDLER(handle_reset_command)
2507 {
2508 if (CMD_ARGC > 1)
2509 return ERROR_COMMAND_SYNTAX_ERROR;
2510
2511 enum target_reset_mode reset_mode = RESET_RUN;
2512 if (CMD_ARGC == 1) {
2513 const Jim_Nvp *n;
2514 n = Jim_Nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
2515 if ((n->name == NULL) || (n->value == RESET_UNKNOWN))
2516 return ERROR_COMMAND_SYNTAX_ERROR;
2517 reset_mode = n->value;
2518 }
2519
2520 /* reset *all* targets */
2521 return target_process_reset(CMD_CTX, reset_mode);
2522 }
2523
2524
2525 COMMAND_HANDLER(handle_resume_command)
2526 {
2527 int current = 1;
2528 if (CMD_ARGC > 1)
2529 return ERROR_COMMAND_SYNTAX_ERROR;
2530
2531 struct target *target = get_current_target(CMD_CTX);
2532
2533 /* with no CMD_ARGV, resume from current pc, addr = 0,
2534 * with one arguments, addr = CMD_ARGV[0],
2535 * handle breakpoints, not debugging */
2536 uint32_t addr = 0;
2537 if (CMD_ARGC == 1) {
2538 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2539 current = 0;
2540 }
2541
2542 return target_resume(target, current, addr, 1, 0);
2543 }
2544
2545 COMMAND_HANDLER(handle_step_command)
2546 {
2547 if (CMD_ARGC > 1)
2548 return ERROR_COMMAND_SYNTAX_ERROR;
2549
2550 LOG_DEBUG("-");
2551
2552 /* with no CMD_ARGV, step from current pc, addr = 0,
2553 * with one argument addr = CMD_ARGV[0],
2554 * handle breakpoints, debugging */
2555 uint32_t addr = 0;
2556 int current_pc = 1;
2557 if (CMD_ARGC == 1) {
2558 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2559 current_pc = 0;
2560 }
2561
2562 struct target *target = get_current_target(CMD_CTX);
2563
2564 return target->type->step(target, current_pc, addr, 1);
2565 }
2566
2567 static void handle_md_output(struct command_context *cmd_ctx,
2568 struct target *target, uint32_t address, unsigned size,
2569 unsigned count, const uint8_t *buffer)
2570 {
2571 const unsigned line_bytecnt = 32;
2572 unsigned line_modulo = line_bytecnt / size;
2573
2574 char output[line_bytecnt * 4 + 1];
2575 unsigned output_len = 0;
2576
2577 const char *value_fmt;
2578 switch (size) {
2579 case 4:
2580 value_fmt = "%8.8x ";
2581 break;
2582 case 2:
2583 value_fmt = "%4.4x ";
2584 break;
2585 case 1:
2586 value_fmt = "%2.2x ";
2587 break;
2588 default:
2589 /* "can't happen", caller checked */
2590 LOG_ERROR("invalid memory read size: %u", size);
2591 return;
2592 }
2593
2594 for (unsigned i = 0; i < count; i++) {
2595 if (i % line_modulo == 0) {
2596 output_len += snprintf(output + output_len,
2597 sizeof(output) - output_len,
2598 "0x%8.8x: ",
2599 (unsigned)(address + (i*size)));
2600 }
2601
2602 uint32_t value = 0;
2603 const uint8_t *value_ptr = buffer + i * size;
2604 switch (size) {
2605 case 4:
2606 value = target_buffer_get_u32(target, value_ptr);
2607 break;
2608 case 2:
2609 value = target_buffer_get_u16(target, value_ptr);
2610 break;
2611 case 1:
2612 value = *value_ptr;
2613 }
2614 output_len += snprintf(output + output_len,
2615 sizeof(output) - output_len,
2616 value_fmt, value);
2617
2618 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
2619 command_print(cmd_ctx, "%s", output);
2620 output_len = 0;
2621 }
2622 }
2623 }
2624
2625 COMMAND_HANDLER(handle_md_command)
2626 {
2627 if (CMD_ARGC < 1)
2628 return ERROR_COMMAND_SYNTAX_ERROR;
2629
2630 unsigned size = 0;
2631 switch (CMD_NAME[2]) {
2632 case 'w':
2633 size = 4;
2634 break;
2635 case 'h':
2636 size = 2;
2637 break;
2638 case 'b':
2639 size = 1;
2640 break;
2641 default:
2642 return ERROR_COMMAND_SYNTAX_ERROR;
2643 }
2644
2645 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
2646 int (*fn)(struct target *target,
2647 uint32_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
2648 if (physical) {
2649 CMD_ARGC--;
2650 CMD_ARGV++;
2651 fn = target_read_phys_memory;
2652 } else
2653 fn = target_read_memory;
2654 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
2655 return ERROR_COMMAND_SYNTAX_ERROR;
2656
2657 uint32_t address;
2658 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2659
2660 unsigned count = 1;
2661 if (CMD_ARGC == 2)
2662 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
2663
2664 uint8_t *buffer = calloc(count, size);
2665
2666 struct target *target = get_current_target(CMD_CTX);
2667 int retval = fn(target, address, size, count, buffer);
2668 if (ERROR_OK == retval)
2669 handle_md_output(CMD_CTX, target, address, size, count, buffer);
2670
2671 free(buffer);
2672
2673 return retval;
2674 }
2675
2676 typedef int (*target_write_fn)(struct target *target,
2677 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
2678
2679 static int target_write_memory_fast(struct target *target,
2680 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
2681 {
2682 return target_write_buffer(target, address, size * count, buffer);
2683 }
2684
2685 static int target_fill_mem(struct target *target,
2686 uint32_t address,
2687 target_write_fn fn,
2688 unsigned data_size,
2689 /* value */
2690 uint32_t b,
2691 /* count */
2692 unsigned c)
2693 {
2694 /* We have to write in reasonably large chunks to be able
2695 * to fill large memory areas with any sane speed */
2696 const unsigned chunk_size = 16384;
2697 uint8_t *target_buf = malloc(chunk_size * data_size);
2698 if (target_buf == NULL) {
2699 LOG_ERROR("Out of memory");
2700 return ERROR_FAIL;
2701 }
2702
2703 for (unsigned i = 0; i < chunk_size; i++) {
2704 switch (data_size) {
2705 case 4:
2706 target_buffer_set_u32(target, target_buf + i * data_size, b);
2707 break;
2708 case 2:
2709 target_buffer_set_u16(target, target_buf + i * data_size, b);
2710 break;
2711 case 1:
2712 target_buffer_set_u8(target, target_buf + i * data_size, b);
2713 break;
2714 default:
2715 exit(-1);
2716 }
2717 }
2718
2719 int retval = ERROR_OK;
2720
2721 for (unsigned x = 0; x < c; x += chunk_size) {
2722 unsigned current;
2723 current = c - x;
2724 if (current > chunk_size)
2725 current = chunk_size;
2726 retval = fn(target, address + x * data_size, data_size, current, target_buf);
2727 if (retval != ERROR_OK)
2728 break;
2729 /* avoid GDB timeouts */
2730 keep_alive();
2731 }
2732 free(target_buf);
2733
2734 return retval;
2735 }
2736
2737
2738 COMMAND_HANDLER(handle_mw_command)
2739 {
2740 if (CMD_ARGC < 2)
2741 return ERROR_COMMAND_SYNTAX_ERROR;
2742 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
2743 target_write_fn fn;
2744 if (physical) {
2745 CMD_ARGC--;
2746 CMD_ARGV++;
2747 fn = target_write_phys_memory;
2748 } else
2749 fn = target_write_memory_fast;
2750 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
2751 return ERROR_COMMAND_SYNTAX_ERROR;
2752
2753 uint32_t address;
2754 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2755
2756 uint32_t value;
2757 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
2758
2759 unsigned count = 1;
2760 if (CMD_ARGC == 3)
2761 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
2762
2763 struct target *target = get_current_target(CMD_CTX);
2764 unsigned wordsize;
2765 switch (CMD_NAME[2]) {
2766 case 'w':
2767 wordsize = 4;
2768 break;
2769 case 'h':
2770 wordsize = 2;
2771 break;
2772 case 'b':
2773 wordsize = 1;
2774 break;
2775 default:
2776 return ERROR_COMMAND_SYNTAX_ERROR;
2777 }
2778
2779 return target_fill_mem(target, address, fn, wordsize, value, count);
2780 }
2781
2782 static COMMAND_HELPER(parse_load_image_command_CMD_ARGV, struct image *image,
2783 uint32_t *min_address, uint32_t *max_address)
2784 {
2785 if (CMD_ARGC < 1 || CMD_ARGC > 5)
2786 return ERROR_COMMAND_SYNTAX_ERROR;
2787
2788 /* a base address isn't always necessary,
2789 * default to 0x0 (i.e. don't relocate) */
2790 if (CMD_ARGC >= 2) {
2791 uint32_t addr;
2792 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], addr);
2793 image->base_address = addr;
2794 image->base_address_set = 1;
2795 } else
2796 image->base_address_set = 0;
2797
2798 image->start_address_set = 0;
2799
2800 if (CMD_ARGC >= 4)
2801 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], *min_address);
2802 if (CMD_ARGC == 5) {
2803 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], *max_address);
2804 /* use size (given) to find max (required) */
2805 *max_address += *min_address;
2806 }
2807
2808 if (*min_address > *max_address)
2809 return ERROR_COMMAND_SYNTAX_ERROR;
2810
2811 return ERROR_OK;
2812 }
2813
2814 COMMAND_HANDLER(handle_load_image_command)
2815 {
2816 uint8_t *buffer;
2817 size_t buf_cnt;
2818 uint32_t image_size;
2819 uint32_t min_address = 0;
2820 uint32_t max_address = 0xffffffff;
2821 int i;
2822 struct image image;
2823
2824 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
2825 &image, &min_address, &max_address);
2826 if (ERROR_OK != retval)
2827 return retval;
2828
2829 struct target *target = get_current_target(CMD_CTX);
2830
2831 struct duration bench;
2832 duration_start(&bench);
2833
2834 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
2835 return ERROR_OK;
2836
2837 image_size = 0x0;
2838 retval = ERROR_OK;
2839 for (i = 0; i < image.num_sections; i++) {
2840 buffer = malloc(image.sections[i].size);
2841 if (buffer == NULL) {
2842 command_print(CMD_CTX,
2843 "error allocating buffer for section (%d bytes)",
2844 (int)(image.sections[i].size));
2845 break;
2846 }
2847
2848 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
2849 if (retval != ERROR_OK) {
2850 free(buffer);
2851 break;
2852 }
2853
2854 uint32_t offset = 0;
2855 uint32_t length = buf_cnt;
2856
2857 /* DANGER!!! beware of unsigned comparision here!!! */
2858
2859 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
2860 (image.sections[i].base_address < max_address)) {
2861
2862 if (image.sections[i].base_address < min_address) {
2863 /* clip addresses below */
2864 offset += min_address-image.sections[i].base_address;
2865 length -= offset;
2866 }
2867
2868 if (image.sections[i].base_address + buf_cnt > max_address)
2869 length -= (image.sections[i].base_address + buf_cnt)-max_address;
2870
2871 retval = target_write_buffer(target,
2872 image.sections[i].base_address + offset, length, buffer + offset);
2873 if (retval != ERROR_OK) {
2874 free(buffer);
2875 break;
2876 }
2877 image_size += length;
2878 command_print(CMD_CTX, "%u bytes written at address 0x%8.8" PRIx32 "",
2879 (unsigned int)length,
2880 image.sections[i].base_address + offset);
2881 }
2882
2883 free(buffer);
2884 }
2885
2886 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
2887 command_print(CMD_CTX, "downloaded %" PRIu32 " bytes "
2888 "in %fs (%0.3f KiB/s)", image_size,
2889 duration_elapsed(&bench), duration_kbps(&bench, image_size));
2890 }
2891
2892 image_close(&image);
2893
2894 return retval;
2895
2896 }
2897
2898 COMMAND_HANDLER(handle_dump_image_command)
2899 {
2900 struct fileio fileio;
2901 uint8_t *buffer;
2902 int retval, retvaltemp;
2903 uint32_t address, size;
2904 struct duration bench;
2905 struct target *target = get_current_target(CMD_CTX);
2906
2907 if (CMD_ARGC != 3)
2908 return ERROR_COMMAND_SYNTAX_ERROR;
2909
2910 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], address);
2911 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], size);
2912
2913 uint32_t buf_size = (size > 4096) ? 4096 : size;
2914 buffer = malloc(buf_size);
2915 if (!buffer)
2916 return ERROR_FAIL;
2917
2918 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
2919 if (retval != ERROR_OK) {
2920 free(buffer);
2921 return retval;
2922 }
2923
2924 duration_start(&bench);
2925
2926 while (size > 0) {
2927 size_t size_written;
2928 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
2929 retval = target_read_buffer(target, address, this_run_size, buffer);
2930 if (retval != ERROR_OK)
2931 break;
2932
2933 retval = fileio_write(&fileio, this_run_size, buffer, &size_written);
2934 if (retval != ERROR_OK)
2935 break;
2936
2937 size -= this_run_size;
2938 address += this_run_size;
2939 }
2940
2941 free(buffer);
2942
2943 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
2944 int filesize;
2945 retval = fileio_size(&fileio, &filesize);
2946 if (retval != ERROR_OK)
2947 return retval;
2948 command_print(CMD_CTX,
2949 "dumped %ld bytes in %fs (%0.3f KiB/s)", (long)filesize,
2950 duration_elapsed(&bench), duration_kbps(&bench, filesize));
2951 }
2952
2953 retvaltemp = fileio_close(&fileio);
2954 if (retvaltemp != ERROR_OK)
2955 return retvaltemp;
2956
2957 return retval;
2958 }
2959
2960 static COMMAND_HELPER(handle_verify_image_command_internal, int verify)
2961 {
2962 uint8_t *buffer;
2963 size_t buf_cnt;
2964 uint32_t image_size;
2965 int i;
2966 int retval;
2967 uint32_t checksum = 0;
2968 uint32_t mem_checksum = 0;
2969
2970 struct image image;
2971
2972 struct target *target = get_current_target(CMD_CTX);
2973
2974 if (CMD_ARGC < 1)
2975 return ERROR_COMMAND_SYNTAX_ERROR;
2976
2977 if (!target) {
2978 LOG_ERROR("no target selected");
2979 return ERROR_FAIL;
2980 }
2981
2982 struct duration bench;
2983 duration_start(&bench);
2984
2985 if (CMD_ARGC >= 2) {
2986 uint32_t addr;
2987 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], addr);
2988 image.base_address = addr;
2989 image.base_address_set = 1;
2990 } else {
2991 image.base_address_set = 0;
2992 image.base_address = 0x0;
2993 }
2994
2995 image.start_address_set = 0;
2996
2997 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
2998 if (retval != ERROR_OK)
2999 return retval;
3000
3001 image_size = 0x0;
3002 int diffs = 0;
3003 retval = ERROR_OK;
3004 for (i = 0; i < image.num_sections; i++) {
3005 buffer = malloc(image.sections[i].size);
3006 if (buffer == NULL) {
3007 command_print(CMD_CTX,
3008 "error allocating buffer for section (%d bytes)",
3009 (int)(image.sections[i].size));
3010 break;
3011 }
3012 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3013 if (retval != ERROR_OK) {
3014 free(buffer);
3015 break;
3016 }
3017
3018 if (verify) {
3019 /* calculate checksum of image */
3020 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3021 if (retval != ERROR_OK) {
3022 free(buffer);
3023 break;
3024 }
3025
3026 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3027 if (retval != ERROR_OK) {
3028 free(buffer);
3029 break;
3030 }
3031
3032 if (checksum != mem_checksum) {
3033 /* failed crc checksum, fall back to a binary compare */
3034 uint8_t *data;
3035
3036 if (diffs == 0)
3037 LOG_ERROR("checksum mismatch - attempting binary compare");
3038
3039 data = (uint8_t *)malloc(buf_cnt);
3040
3041 /* Can we use 32bit word accesses? */
3042 int size = 1;
3043 int count = buf_cnt;
3044 if ((count % 4) == 0) {
3045 size *= 4;
3046 count /= 4;
3047 }
3048 retval = target_read_memory(target, image.sections[i].base_address, size, count, data);
3049 if (retval == ERROR_OK) {
3050 uint32_t t;
3051 for (t = 0; t < buf_cnt; t++) {
3052 if (data[t] != buffer[t]) {
3053 command_print(CMD_CTX,
3054 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3055 diffs,
3056 (unsigned)(t + image.sections[i].base_address),
3057 data[t],
3058 buffer[t]);
3059 if (diffs++ >= 127) {
3060 command_print(CMD_CTX, "More than 128 errors, the rest are not printed.");
3061 free(data);
3062 free(buffer);
3063 goto done;
3064 }
3065 }
3066 keep_alive();
3067 }
3068 }
3069 free(data);
3070 }
3071 } else {
3072 command_print(CMD_CTX, "address 0x%08" PRIx32 " length 0x%08zx",
3073 image.sections[i].base_address,
3074 buf_cnt);
3075 }
3076
3077 free(buffer);
3078 image_size += buf_cnt;
3079 }
3080 if (diffs > 0)
3081 command_print(CMD_CTX, "No more differences found.");
3082 done:
3083 if (diffs > 0)
3084 retval = ERROR_FAIL;
3085 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3086 command_print(CMD_CTX, "verified %" PRIu32 " bytes "
3087 "in %fs (%0.3f KiB/s)", image_size,
3088 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3089 }
3090
3091 image_close(&image);
3092
3093 return retval;
3094 }
3095
3096 COMMAND_HANDLER(handle_verify_image_command)
3097 {
3098 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, 1);
3099 }
3100
3101 COMMAND_HANDLER(handle_test_image_command)
3102 {
3103 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, 0);
3104 }
3105
3106 static int handle_bp_command_list(struct command_context *cmd_ctx)
3107 {
3108 struct target *target = get_current_target(cmd_ctx);
3109 struct breakpoint *breakpoint = target->breakpoints;
3110 while (breakpoint) {
3111 if (breakpoint->type == BKPT_SOFT) {
3112 char *buf = buf_to_str(breakpoint->orig_instr,
3113 breakpoint->length, 16);
3114 command_print(cmd_ctx, "IVA breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i, 0x%s",
3115 breakpoint->address,
3116 breakpoint->length,
3117 breakpoint->set, buf);
3118 free(buf);
3119 } else {
3120 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3121 command_print(cmd_ctx, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3122 breakpoint->asid,
3123 breakpoint->length, breakpoint->set);
3124 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3125 command_print(cmd_ctx, "Hybrid breakpoint(IVA): 0x%8.8" PRIx32 ", 0x%x, %i",
3126 breakpoint->address,
3127 breakpoint->length, breakpoint->set);
3128 command_print(cmd_ctx, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3129 breakpoint->asid);
3130 } else
3131 command_print(cmd_ctx, "Breakpoint(IVA): 0x%8.8" PRIx32 ", 0x%x, %i",
3132 breakpoint->address,
3133 breakpoint->length, breakpoint->set);
3134 }
3135
3136 breakpoint = breakpoint->next;
3137 }
3138 return ERROR_OK;
3139 }
3140
3141 static int handle_bp_command_set(struct command_context *cmd_ctx,
3142 uint32_t addr, uint32_t asid, uint32_t length, int hw)
3143 {
3144 struct target *target = get_current_target(cmd_ctx);
3145
3146 if (asid == 0) {
3147 int retval = breakpoint_add(target, addr, length, hw);
3148 if (ERROR_OK == retval)
3149 command_print(cmd_ctx, "breakpoint set at 0x%8.8" PRIx32 "", addr);
3150 else {
3151 LOG_ERROR("Failure setting breakpoint, the same address(IVA) is already used");
3152 return retval;
3153 }
3154 } else if (addr == 0) {
3155 int retval = context_breakpoint_add(target, asid, length, hw);
3156 if (ERROR_OK == retval)
3157 command_print(cmd_ctx, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
3158 else {
3159 LOG_ERROR("Failure setting breakpoint, the same address(CONTEXTID) is already used");
3160 return retval;
3161 }
3162 } else {
3163 int retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
3164 if (ERROR_OK == retval)
3165 command_print(cmd_ctx, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
3166 else {
3167 LOG_ERROR("Failure setting breakpoint, the same address is already used");
3168 return retval;
3169 }
3170 }
3171 return ERROR_OK;
3172 }
3173
3174 COMMAND_HANDLER(handle_bp_command)
3175 {
3176 uint32_t addr;
3177 uint32_t asid;
3178 uint32_t length;
3179 int hw = BKPT_SOFT;
3180
3181 switch (CMD_ARGC) {
3182 case 0:
3183 return handle_bp_command_list(CMD_CTX);
3184
3185 case 2:
3186 asid = 0;
3187 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3188 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3189 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3190
3191 case 3:
3192 if (strcmp(CMD_ARGV[2], "hw") == 0) {
3193 hw = BKPT_HARD;
3194 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3195
3196 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3197
3198 asid = 0;
3199 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3200 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
3201 hw = BKPT_HARD;
3202 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
3203 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3204 addr = 0;
3205 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3206 }
3207
3208 case 4:
3209 hw = BKPT_HARD;
3210 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3211 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
3212 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
3213 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3214
3215 default:
3216 return ERROR_COMMAND_SYNTAX_ERROR;
3217 }
3218 }
3219
3220 COMMAND_HANDLER(handle_rbp_command)
3221 {
3222 if (CMD_ARGC != 1)
3223 return ERROR_COMMAND_SYNTAX_ERROR;
3224
3225 uint32_t addr;
3226 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3227
3228 struct target *target = get_current_target(CMD_CTX);
3229 breakpoint_remove(target, addr);
3230
3231 return ERROR_OK;
3232 }
3233
3234 COMMAND_HANDLER(handle_wp_command)
3235 {
3236 struct target *target = get_current_target(CMD_CTX);
3237
3238 if (CMD_ARGC == 0) {
3239 struct watchpoint *watchpoint = target->watchpoints;
3240
3241 while (watchpoint) {
3242 command_print(CMD_CTX, "address: 0x%8.8" PRIx32
3243 ", len: 0x%8.8" PRIx32
3244 ", r/w/a: %i, value: 0x%8.8" PRIx32
3245 ", mask: 0x%8.8" PRIx32,
3246 watchpoint->address,
3247 watchpoint->length,
3248 (int)watchpoint->rw,
3249 watchpoint->value,
3250 watchpoint->mask);
3251 watchpoint = watchpoint->next;
3252 }
3253 return ERROR_OK;
3254 }
3255
3256 enum watchpoint_rw type = WPT_ACCESS;
3257 uint32_t addr = 0;
3258 uint32_t length = 0;
3259 uint32_t data_value = 0x0;
3260 uint32_t data_mask = 0xffffffff;
3261
3262 switch (CMD_ARGC) {
3263 case 5:
3264 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
3265 /* fall through */
3266 case 4:
3267 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
3268 /* fall through */
3269 case 3:
3270 switch (CMD_ARGV[2][0]) {
3271 case 'r':
3272 type = WPT_READ;
3273 break;
3274 case 'w':
3275 type = WPT_WRITE;
3276 break;
3277 case 'a':
3278 type = WPT_ACCESS;
3279 break;
3280 default:
3281 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
3282 return ERROR_COMMAND_SYNTAX_ERROR;
3283 }
3284 /* fall through */
3285 case 2:
3286 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3287 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3288 break;
3289
3290 default:
3291 return ERROR_COMMAND_SYNTAX_ERROR;
3292 }
3293
3294 int retval = watchpoint_add(target, addr, length, type,
3295 data_value, data_mask);
3296 if (ERROR_OK != retval)
3297 LOG_ERROR("Failure setting watchpoints");
3298
3299 return retval;
3300 }
3301
3302 COMMAND_HANDLER(handle_rwp_command)
3303 {
3304 if (CMD_ARGC != 1)
3305 return ERROR_COMMAND_SYNTAX_ERROR;
3306
3307 uint32_t addr;
3308 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3309
3310 struct target *target = get_current_target(CMD_CTX);
3311 watchpoint_remove(target, addr);
3312
3313 return ERROR_OK;
3314 }
3315
3316 /**
3317 * Translate a virtual address to a physical address.
3318 *
3319 * The low-level target implementation must have logged a detailed error
3320 * which is forwarded to telnet/GDB session.
3321 */
3322 COMMAND_HANDLER(handle_virt2phys_command)
3323 {
3324 if (CMD_ARGC != 1)
3325 return ERROR_COMMAND_SYNTAX_ERROR;
3326
3327 uint32_t va;
3328 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], va);
3329 uint32_t pa;
3330
3331 struct target *target = get_current_target(CMD_CTX);
3332 int retval = target->type->virt2phys(target, va, &pa);
3333 if (retval == ERROR_OK)
3334 command_print(CMD_CTX, "Physical address 0x%08" PRIx32 "", pa);
3335
3336 return retval;
3337 }
3338
3339 static void writeData(FILE *f, const void *data, size_t len)
3340 {
3341 size_t written = fwrite(data, 1, len, f);
3342 if (written != len)
3343 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
3344 }
3345
3346 static void writeLong(FILE *f, int l)
3347 {
3348 int i;
3349 for (i = 0; i < 4; i++) {
3350 char c = (l >> (i*8))&0xff;
3351 writeData(f, &c, 1);
3352 }
3353
3354 }
3355
3356 static void writeString(FILE *f, char *s)
3357 {
3358 writeData(f, s, strlen(s));
3359 }
3360
3361 /* Dump a gmon.out histogram file. */
3362 static void writeGmon(uint32_t *samples, uint32_t sampleNum, const char *filename)
3363 {
3364 uint32_t i;
3365 FILE *f = fopen(filename, "w");
3366 if (f == NULL)
3367 return;
3368 writeString(f, "gmon");
3369 writeLong(f, 0x00000001); /* Version */
3370 writeLong(f, 0); /* padding */
3371 writeLong(f, 0); /* padding */
3372 writeLong(f, 0); /* padding */
3373
3374 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
3375 writeData(f, &zero, 1);
3376
3377 /* figure out bucket size */
3378 uint32_t min = samples[0];
3379 uint32_t max = samples[0];
3380 for (i = 0; i < sampleNum; i++) {
3381 if (min > samples[i])
3382 min = samples[i];
3383 if (max < samples[i])
3384 max = samples[i];
3385 }
3386
3387 int addressSpace = (max - min + 1);
3388 assert(addressSpace >= 2);
3389
3390 static const uint32_t maxBuckets = 16 * 1024; /* maximum buckets. */
3391 uint32_t length = addressSpace;
3392 if (length > maxBuckets)
3393 length = maxBuckets;
3394 int *buckets = malloc(sizeof(int)*length);
3395 if (buckets == NULL) {
3396 fclose(f);
3397 return;
3398 }
3399 memset(buckets, 0, sizeof(int) * length);
3400 for (i = 0; i < sampleNum; i++) {
3401 uint32_t address = samples[i];
3402 long long a = address - min;
3403 long long b = length - 1;
3404 long long c = addressSpace - 1;
3405 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
3406 buckets[index_t]++;
3407 }
3408
3409 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
3410 writeLong(f, min); /* low_pc */
3411 writeLong(f, max); /* high_pc */
3412 writeLong(f, length); /* # of samples */
3413 writeLong(f, 100); /* KLUDGE! We lie, ca. 100Hz best case. */
3414 writeString(f, "seconds");
3415 for (i = 0; i < (15-strlen("seconds")); i++)
3416 writeData(f, &zero, 1);
3417 writeString(f, "s");
3418
3419 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
3420
3421 char *data = malloc(2 * length);
3422 if (data != NULL) {
3423 for (i = 0; i < length; i++) {
3424 int val;
3425 val = buckets[i];
3426 if (val > 65535)
3427 val = 65535;
3428 data[i * 2] = val&0xff;
3429 data[i * 2 + 1] = (val >> 8) & 0xff;
3430 }
3431 free(buckets);
3432 writeData(f, data, length * 2);
3433 free(data);
3434 } else
3435 free(buckets);
3436
3437 fclose(f);
3438 }
3439
3440 /* profiling samples the CPU PC as quickly as OpenOCD is able,
3441 * which will be used as a random sampling of PC */
3442 COMMAND_HANDLER(handle_profile_command)
3443 {
3444 struct target *target = get_current_target(CMD_CTX);
3445 struct timeval timeout, now;
3446
3447 gettimeofday(&timeout, NULL);
3448 if (CMD_ARGC != 2)
3449 return ERROR_COMMAND_SYNTAX_ERROR;
3450 unsigned offset;
3451 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], offset);
3452
3453 timeval_add_time(&timeout, offset, 0);
3454
3455 /**
3456 * @todo: Some cores let us sample the PC without the
3457 * annoying halt/resume step; for example, ARMv7 PCSR.
3458 * Provide a way to use that more efficient mechanism.
3459 */
3460
3461 command_print(CMD_CTX, "Starting profiling. Halting and resuming the target as often as we can...");
3462
3463 static const int maxSample = 10000;
3464 uint32_t *samples = malloc(sizeof(uint32_t)*maxSample);
3465 if (samples == NULL)
3466 return ERROR_OK;
3467
3468 int numSamples = 0;
3469 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
3470 struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
3471
3472 int retval = ERROR_OK;
3473 for (;;) {
3474 target_poll(target);
3475 if (target->state == TARGET_HALTED) {
3476 uint32_t t = *((uint32_t *)reg->value);
3477 samples[numSamples++] = t;
3478 /* current pc, addr = 0, do not handle breakpoints, not debugging */
3479 retval = target_resume(target, 1, 0, 0, 0);
3480 target_poll(target);
3481 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
3482 } else if (target->state == TARGET_RUNNING) {
3483 /* We want to quickly sample the PC. */
3484 retval = target_halt(target);
3485 if (retval != ERROR_OK) {
3486 free(samples);
3487 return retval;
3488 }
3489 } else {
3490 command_print(CMD_CTX, "Target not halted or running");
3491 retval = ERROR_OK;
3492 break;
3493 }
3494 if (retval != ERROR_OK)
3495 break;
3496
3497 gettimeofday(&now, NULL);
3498 if ((numSamples >= maxSample) || ((now.tv_sec >= timeout.tv_sec)
3499 && (now.tv_usec >= timeout.tv_usec))) {
3500 command_print(CMD_CTX, "Profiling completed. %d samples.", numSamples);
3501 retval = target_poll(target);
3502 if (retval != ERROR_OK) {
3503 free(samples);
3504 return retval;
3505 }
3506 if (target->state == TARGET_HALTED) {
3507 /* current pc, addr = 0, do not handle
3508 * breakpoints, not debugging */
3509 target_resume(target, 1, 0, 0, 0);
3510 }
3511 retval = target_poll(target);
3512 if (retval != ERROR_OK) {
3513 free(samples);
3514 return retval;
3515 }
3516 writeGmon(samples, numSamples, CMD_ARGV[1]);
3517 command_print(CMD_CTX, "Wrote %s", CMD_ARGV[1]);
3518 break;
3519 }
3520 }
3521 free(samples);
3522
3523 return retval;
3524 }
3525
3526 static int new_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t val)
3527 {
3528 char *namebuf;
3529 Jim_Obj *nameObjPtr, *valObjPtr;
3530 int result;
3531
3532 namebuf = alloc_printf("%s(%d)", varname, idx);
3533 if (!namebuf)
3534 return JIM_ERR;
3535
3536 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
3537 valObjPtr = Jim_NewIntObj(interp, val);
3538 if (!nameObjPtr || !valObjPtr) {
3539 free(namebuf);
3540 return JIM_ERR;
3541 }
3542
3543 Jim_IncrRefCount(nameObjPtr);
3544 Jim_IncrRefCount(valObjPtr);
3545 result = Jim_SetVariable(interp, nameObjPtr, valObjPtr);
3546 Jim_DecrRefCount(interp, nameObjPtr);
3547 Jim_DecrRefCount(interp, valObjPtr);
3548 free(namebuf);
3549 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
3550 return result;
3551 }
3552
3553 static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
3554 {
3555 struct command_context *context;
3556 struct target *target;
3557
3558 context = current_command_context(interp);
3559 assert(context != NULL);
3560
3561 target = get_current_target(context);
3562 if (target == NULL) {
3563 LOG_ERROR("mem2array: no current target");
3564 return JIM_ERR;
3565 }
3566
3567 return target_mem2array(interp, target, argc - 1, argv + 1);
3568 }
3569
3570 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
3571 {
3572 long l;
3573 uint32_t width;
3574 int len;
3575 uint32_t addr;
3576 uint32_t count;
3577 uint32_t v;
3578 const char *varname;
3579 int n, e, retval;
3580 uint32_t i;
3581
3582 /* argv[1] = name of array to receive the data
3583 * argv[2] = desired width
3584 * argv[3] = memory address
3585 * argv[4] = count of times to read
3586 */
3587 if (argc != 4) {
3588 Jim_WrongNumArgs(interp, 1, argv, "varname width addr nelems");
3589 return JIM_ERR;
3590 }
3591 varname = Jim_GetString(argv[0], &len);
3592 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
3593
3594 e = Jim_GetLong(interp, argv[1], &l);
3595 width = l;
3596 if (e != JIM_OK)
3597 return e;
3598
3599 e = Jim_GetLong(interp, argv[2], &l);
3600 addr = l;
3601 if (e != JIM_OK)
3602 return e;
3603 e = Jim_GetLong(interp, argv[3], &l);
3604 len = l;
3605 if (e != JIM_OK)
3606 return e;
3607 switch (width) {
3608 case 8:
3609 width = 1;
3610 break;
3611 case 16:
3612 width = 2;
3613 break;
3614 case 32:
3615 width = 4;
3616 break;
3617 default:
3618 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3619 Jim_AppendStrings(interp, Jim_GetResult(interp), "Invalid width param, must be 8/16/32", NULL);
3620 return JIM_ERR;
3621 }
3622 if (len == 0) {
3623 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3624 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
3625 return JIM_ERR;
3626 }
3627 if ((addr + (len * width)) < addr) {
3628 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3629 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
3630 return JIM_ERR;
3631 }
3632 /* absurd transfer size? */
3633 if (len > 65536) {
3634 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3635 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: absurd > 64K item request", NULL);
3636 return JIM_ERR;
3637 }
3638
3639 if ((width == 1) ||
3640 ((width == 2) && ((addr & 1) == 0)) ||
3641 ((width == 4) && ((addr & 3) == 0))) {
3642 /* all is well */
3643 } else {
3644 char buf[100];
3645 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3646 sprintf(