be9742f5c2d0dc9d5f9967b899a456e85dfba2b7
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 √ėyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * This program is free software; you can redistribute it and/or modify *
24 * it under the terms of the GNU General Public License as published by *
25 * the Free Software Foundation; either version 2 of the License, or *
26 * (at your option) any later version. *
27 * *
28 * This program is distributed in the hope that it will be useful, *
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
31 * GNU General Public License for more details. *
32 * *
33 * You should have received a copy of the GNU General Public License *
34 * along with this program; if not, write to the *
35 * Free Software Foundation, Inc., *
36 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
37 ***************************************************************************/
38 #ifdef HAVE_CONFIG_H
39 #include "config.h"
40 #endif
41
42 #include <helper/time_support.h>
43 #include <jtag/jtag.h>
44 #include <flash/nor/core.h>
45
46 #include "target.h"
47 #include "target_type.h"
48 #include "target_request.h"
49 #include "breakpoints.h"
50 #include "register.h"
51 #include "trace.h"
52 #include "image.h"
53 #include "rtos/rtos.h"
54
55
56 static int target_read_buffer_default(struct target *target, uint32_t address,
57 uint32_t size, uint8_t *buffer);
58 static int target_write_buffer_default(struct target *target, uint32_t address,
59 uint32_t size, const uint8_t *buffer);
60 static int target_array2mem(Jim_Interp *interp, struct target *target,
61 int argc, Jim_Obj *const *argv);
62 static int target_mem2array(Jim_Interp *interp, struct target *target,
63 int argc, Jim_Obj *const *argv);
64 static int target_register_user_commands(struct command_context *cmd_ctx);
65
66 /* targets */
67 extern struct target_type arm7tdmi_target;
68 extern struct target_type arm720t_target;
69 extern struct target_type arm9tdmi_target;
70 extern struct target_type arm920t_target;
71 extern struct target_type arm966e_target;
72 extern struct target_type arm946e_target;
73 extern struct target_type arm926ejs_target;
74 extern struct target_type fa526_target;
75 extern struct target_type feroceon_target;
76 extern struct target_type dragonite_target;
77 extern struct target_type xscale_target;
78 extern struct target_type cortexm3_target;
79 extern struct target_type cortexa8_target;
80 extern struct target_type arm11_target;
81 extern struct target_type mips_m4k_target;
82 extern struct target_type avr_target;
83 extern struct target_type dsp563xx_target;
84 extern struct target_type dsp5680xx_target;
85 extern struct target_type testee_target;
86 extern struct target_type avr32_ap7k_target;
87
88 static struct target_type *target_types[] =
89 {
90 &arm7tdmi_target,
91 &arm9tdmi_target,
92 &arm920t_target,
93 &arm720t_target,
94 &arm966e_target,
95 &arm946e_target,
96 &arm926ejs_target,
97 &fa526_target,
98 &feroceon_target,
99 &dragonite_target,
100 &xscale_target,
101 &cortexm3_target,
102 &cortexa8_target,
103 &arm11_target,
104 &mips_m4k_target,
105 &avr_target,
106 &dsp563xx_target,
107 &dsp5680xx_target,
108 &testee_target,
109 &avr32_ap7k_target,
110 NULL,
111 };
112
113 struct target *all_targets = NULL;
114 static struct target_event_callback *target_event_callbacks = NULL;
115 static struct target_timer_callback *target_timer_callbacks = NULL;
116 static const int polling_interval = 100;
117
118 static const Jim_Nvp nvp_assert[] = {
119 { .name = "assert", NVP_ASSERT },
120 { .name = "deassert", NVP_DEASSERT },
121 { .name = "T", NVP_ASSERT },
122 { .name = "F", NVP_DEASSERT },
123 { .name = "t", NVP_ASSERT },
124 { .name = "f", NVP_DEASSERT },
125 { .name = NULL, .value = -1 }
126 };
127
128 static const Jim_Nvp nvp_error_target[] = {
129 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
130 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
131 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
132 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
133 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
134 { .value = ERROR_TARGET_UNALIGNED_ACCESS , .name = "err-unaligned-access" },
135 { .value = ERROR_TARGET_DATA_ABORT , .name = "err-data-abort" },
136 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE , .name = "err-resource-not-available" },
137 { .value = ERROR_TARGET_TRANSLATION_FAULT , .name = "err-translation-fault" },
138 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
139 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
140 { .value = -1, .name = NULL }
141 };
142
143 static const char *target_strerror_safe(int err)
144 {
145 const Jim_Nvp *n;
146
147 n = Jim_Nvp_value2name_simple(nvp_error_target, err);
148 if (n->name == NULL) {
149 return "unknown";
150 } else {
151 return n->name;
152 }
153 }
154
155 static const Jim_Nvp nvp_target_event[] = {
156 { .value = TARGET_EVENT_OLD_gdb_program_config , .name = "old-gdb_program_config" },
157 { .value = TARGET_EVENT_OLD_pre_resume , .name = "old-pre_resume" },
158
159 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
160 { .value = TARGET_EVENT_HALTED, .name = "halted" },
161 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
162 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
163 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
164
165 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
166 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
167
168 /* historical name */
169
170 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
171
172 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
173 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
174 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
175 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
176 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
177 { .value = TARGET_EVENT_RESET_HALT_PRE, .name = "reset-halt-pre" },
178 { .value = TARGET_EVENT_RESET_HALT_POST, .name = "reset-halt-post" },
179 { .value = TARGET_EVENT_RESET_WAIT_PRE, .name = "reset-wait-pre" },
180 { .value = TARGET_EVENT_RESET_WAIT_POST, .name = "reset-wait-post" },
181 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
182 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
183
184 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
185 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
186
187 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
188 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
189
190 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
191 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
192
193 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
194 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END , .name = "gdb-flash-write-end" },
195
196 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
197 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END , .name = "gdb-flash-erase-end" },
198
199 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
200 { .value = TARGET_EVENT_RESUMED , .name = "resume-ok" },
201 { .value = TARGET_EVENT_RESUME_END , .name = "resume-end" },
202
203 { .name = NULL, .value = -1 }
204 };
205
206 static const Jim_Nvp nvp_target_state[] = {
207 { .name = "unknown", .value = TARGET_UNKNOWN },
208 { .name = "running", .value = TARGET_RUNNING },
209 { .name = "halted", .value = TARGET_HALTED },
210 { .name = "reset", .value = TARGET_RESET },
211 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
212 { .name = NULL, .value = -1 },
213 };
214
215 static const Jim_Nvp nvp_target_debug_reason [] = {
216 { .name = "debug-request" , .value = DBG_REASON_DBGRQ },
217 { .name = "breakpoint" , .value = DBG_REASON_BREAKPOINT },
218 { .name = "watchpoint" , .value = DBG_REASON_WATCHPOINT },
219 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
220 { .name = "single-step" , .value = DBG_REASON_SINGLESTEP },
221 { .name = "target-not-halted" , .value = DBG_REASON_NOTHALTED },
222 { .name = "undefined" , .value = DBG_REASON_UNDEFINED },
223 { .name = NULL, .value = -1 },
224 };
225
226 static const Jim_Nvp nvp_target_endian[] = {
227 { .name = "big", .value = TARGET_BIG_ENDIAN },
228 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
229 { .name = "be", .value = TARGET_BIG_ENDIAN },
230 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
231 { .name = NULL, .value = -1 },
232 };
233
234 static const Jim_Nvp nvp_reset_modes[] = {
235 { .name = "unknown", .value = RESET_UNKNOWN },
236 { .name = "run" , .value = RESET_RUN },
237 { .name = "halt" , .value = RESET_HALT },
238 { .name = "init" , .value = RESET_INIT },
239 { .name = NULL , .value = -1 },
240 };
241
242 const char *debug_reason_name(struct target *t)
243 {
244 const char *cp;
245
246 cp = Jim_Nvp_value2name_simple(nvp_target_debug_reason,
247 t->debug_reason)->name;
248 if (!cp) {
249 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
250 cp = "(*BUG*unknown*BUG*)";
251 }
252 return cp;
253 }
254
255 const char *
256 target_state_name( struct target *t )
257 {
258 const char *cp;
259 cp = Jim_Nvp_value2name_simple(nvp_target_state, t->state)->name;
260 if( !cp ){
261 LOG_ERROR("Invalid target state: %d", (int)(t->state));
262 cp = "(*BUG*unknown*BUG*)";
263 }
264 return cp;
265 }
266
267 /* determine the number of the new target */
268 static int new_target_number(void)
269 {
270 struct target *t;
271 int x;
272
273 /* number is 0 based */
274 x = -1;
275 t = all_targets;
276 while (t) {
277 if (x < t->target_number) {
278 x = t->target_number;
279 }
280 t = t->next;
281 }
282 return x + 1;
283 }
284
285 /* read a uint32_t from a buffer in target memory endianness */
286 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
287 {
288 if (target->endianness == TARGET_LITTLE_ENDIAN)
289 return le_to_h_u32(buffer);
290 else
291 return be_to_h_u32(buffer);
292 }
293
294 /* read a uint24_t from a buffer in target memory endianness */
295 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
296 {
297 if (target->endianness == TARGET_LITTLE_ENDIAN)
298 return le_to_h_u24(buffer);
299 else
300 return be_to_h_u24(buffer);
301 }
302
303 /* read a uint16_t from a buffer in target memory endianness */
304 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
305 {
306 if (target->endianness == TARGET_LITTLE_ENDIAN)
307 return le_to_h_u16(buffer);
308 else
309 return be_to_h_u16(buffer);
310 }
311
312 /* read a uint8_t from a buffer in target memory endianness */
313 static uint8_t target_buffer_get_u8(struct target *target, const uint8_t *buffer)
314 {
315 return *buffer & 0x0ff;
316 }
317
318 /* write a uint32_t to a buffer in target memory endianness */
319 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
320 {
321 if (target->endianness == TARGET_LITTLE_ENDIAN)
322 h_u32_to_le(buffer, value);
323 else
324 h_u32_to_be(buffer, value);
325 }
326
327 /* write a uint24_t to a buffer in target memory endianness */
328 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
329 {
330 if (target->endianness == TARGET_LITTLE_ENDIAN)
331 h_u24_to_le(buffer, value);
332 else
333 h_u24_to_be(buffer, value);
334 }
335
336 /* write a uint16_t to a buffer in target memory endianness */
337 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
338 {
339 if (target->endianness == TARGET_LITTLE_ENDIAN)
340 h_u16_to_le(buffer, value);
341 else
342 h_u16_to_be(buffer, value);
343 }
344
345 /* write a uint8_t to a buffer in target memory endianness */
346 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
347 {
348 *buffer = value;
349 }
350
351 /* write a uint32_t array to a buffer in target memory endianness */
352 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
353 {
354 uint32_t i;
355 for(i = 0; i < count; i ++)
356 dstbuf[i] = target_buffer_get_u32(target,&buffer[i*4]);
357 }
358
359 /* write a uint16_t array to a buffer in target memory endianness */
360 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
361 {
362 uint32_t i;
363 for(i = 0; i < count; i ++)
364 dstbuf[i] = target_buffer_get_u16(target,&buffer[i*2]);
365 }
366
367 /* write a uint32_t array to a buffer in target memory endianness */
368 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, uint32_t *srcbuf)
369 {
370 uint32_t i;
371 for(i = 0; i < count; i ++)
372 target_buffer_set_u32(target,&buffer[i*4],srcbuf[i]);
373 }
374
375 /* write a uint16_t array to a buffer in target memory endianness */
376 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, uint16_t *srcbuf)
377 {
378 uint32_t i;
379 for(i = 0; i < count; i ++)
380 target_buffer_set_u16(target,&buffer[i*2],srcbuf[i]);
381 }
382
383 /* return a pointer to a configured target; id is name or number */
384 struct target *get_target(const char *id)
385 {
386 struct target *target;
387
388 /* try as tcltarget name */
389 for (target = all_targets; target; target = target->next) {
390 if (target->cmd_name == NULL)
391 continue;
392 if (strcmp(id, target->cmd_name) == 0)
393 return target;
394 }
395
396 /* It's OK to remove this fallback sometime after August 2010 or so */
397
398 /* no match, try as number */
399 unsigned num;
400 if (parse_uint(id, &num) != ERROR_OK)
401 return NULL;
402
403 for (target = all_targets; target; target = target->next) {
404 if (target->target_number == (int)num) {
405 LOG_WARNING("use '%s' as target identifier, not '%u'",
406 target->cmd_name, num);
407 return target;
408 }
409 }
410
411 return NULL;
412 }
413
414 /* returns a pointer to the n-th configured target */
415 static struct target *get_target_by_num(int num)
416 {
417 struct target *target = all_targets;
418
419 while (target) {
420 if (target->target_number == num) {
421 return target;
422 }
423 target = target->next;
424 }
425
426 return NULL;
427 }
428
429 struct target* get_current_target(struct command_context *cmd_ctx)
430 {
431 struct target *target = get_target_by_num(cmd_ctx->current_target);
432
433 if (target == NULL)
434 {
435 LOG_ERROR("BUG: current_target out of bounds");
436 exit(-1);
437 }
438
439 return target;
440 }
441
442 int target_poll(struct target *target)
443 {
444 int retval;
445
446 /* We can't poll until after examine */
447 if (!target_was_examined(target))
448 {
449 /* Fail silently lest we pollute the log */
450 return ERROR_FAIL;
451 }
452
453 retval = target->type->poll(target);
454 if (retval != ERROR_OK)
455 return retval;
456
457 if (target->halt_issued)
458 {
459 if (target->state == TARGET_HALTED)
460 {
461 target->halt_issued = false;
462 } else
463 {
464 long long t = timeval_ms() - target->halt_issued_time;
465 if (t>1000)
466 {
467 target->halt_issued = false;
468 LOG_INFO("Halt timed out, wake up GDB.");
469 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
470 }
471 }
472 }
473
474 return ERROR_OK;
475 }
476
477 int target_halt(struct target *target)
478 {
479 int retval;
480 /* We can't poll until after examine */
481 if (!target_was_examined(target))
482 {
483 LOG_ERROR("Target not examined yet");
484 return ERROR_FAIL;
485 }
486
487 retval = target->type->halt(target);
488 if (retval != ERROR_OK)
489 return retval;
490
491 target->halt_issued = true;
492 target->halt_issued_time = timeval_ms();
493
494 return ERROR_OK;
495 }
496
497 /**
498 * Make the target (re)start executing using its saved execution
499 * context (possibly with some modifications).
500 *
501 * @param target Which target should start executing.
502 * @param current True to use the target's saved program counter instead
503 * of the address parameter
504 * @param address Optionally used as the program counter.
505 * @param handle_breakpoints True iff breakpoints at the resumption PC
506 * should be skipped. (For example, maybe execution was stopped by
507 * such a breakpoint, in which case it would be counterprodutive to
508 * let it re-trigger.
509 * @param debug_execution False if all working areas allocated by OpenOCD
510 * should be released and/or restored to their original contents.
511 * (This would for example be true to run some downloaded "helper"
512 * algorithm code, which resides in one such working buffer and uses
513 * another for data storage.)
514 *
515 * @todo Resolve the ambiguity about what the "debug_execution" flag
516 * signifies. For example, Target implementations don't agree on how
517 * it relates to invalidation of the register cache, or to whether
518 * breakpoints and watchpoints should be enabled. (It would seem wrong
519 * to enable breakpoints when running downloaded "helper" algorithms
520 * (debug_execution true), since the breakpoints would be set to match
521 * target firmware being debugged, not the helper algorithm.... and
522 * enabling them could cause such helpers to malfunction (for example,
523 * by overwriting data with a breakpoint instruction. On the other
524 * hand the infrastructure for running such helpers might use this
525 * procedure but rely on hardware breakpoint to detect termination.)
526 */
527 int target_resume(struct target *target, int current, uint32_t address, int handle_breakpoints, int debug_execution)
528 {
529 int retval;
530
531 /* We can't poll until after examine */
532 if (!target_was_examined(target))
533 {
534 LOG_ERROR("Target not examined yet");
535 return ERROR_FAIL;
536 }
537
538 /* note that resume *must* be asynchronous. The CPU can halt before
539 * we poll. The CPU can even halt at the current PC as a result of
540 * a software breakpoint being inserted by (a bug?) the application.
541 */
542 if ((retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution)) != ERROR_OK)
543 return retval;
544
545 return retval;
546 }
547
548 static int target_process_reset(struct command_context *cmd_ctx, enum target_reset_mode reset_mode)
549 {
550 char buf[100];
551 int retval;
552 Jim_Nvp *n;
553 n = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode);
554 if (n->name == NULL) {
555 LOG_ERROR("invalid reset mode");
556 return ERROR_FAIL;
557 }
558
559 /* disable polling during reset to make reset event scripts
560 * more predictable, i.e. dr/irscan & pathmove in events will
561 * not have JTAG operations injected into the middle of a sequence.
562 */
563 bool save_poll = jtag_poll_get_enabled();
564
565 jtag_poll_set_enabled(false);
566
567 sprintf(buf, "ocd_process_reset %s", n->name);
568 retval = Jim_Eval(cmd_ctx->interp, buf);
569
570 jtag_poll_set_enabled(save_poll);
571
572 if (retval != JIM_OK) {
573 Jim_MakeErrorMessage(cmd_ctx->interp);
574 command_print(NULL,"%s\n", Jim_GetString(Jim_GetResult(cmd_ctx->interp), NULL));
575 return ERROR_FAIL;
576 }
577
578 /* We want any events to be processed before the prompt */
579 retval = target_call_timer_callbacks_now();
580
581 struct target *target;
582 for (target = all_targets; target; target = target->next) {
583 target->type->check_reset(target);
584 }
585
586 return retval;
587 }
588
589 static int identity_virt2phys(struct target *target,
590 uint32_t virtual, uint32_t *physical)
591 {
592 *physical = virtual;
593 return ERROR_OK;
594 }
595
596 static int no_mmu(struct target *target, int *enabled)
597 {
598 *enabled = 0;
599 return ERROR_OK;
600 }
601
602 static int default_examine(struct target *target)
603 {
604 target_set_examined(target);
605 return ERROR_OK;
606 }
607
608 /* no check by default */
609 static int default_check_reset(struct target *target)
610 {
611 return ERROR_OK;
612 }
613
614 int target_examine_one(struct target *target)
615 {
616 return target->type->examine(target);
617 }
618
619 static int jtag_enable_callback(enum jtag_event event, void *priv)
620 {
621 struct target *target = priv;
622
623 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
624 return ERROR_OK;
625
626 jtag_unregister_event_callback(jtag_enable_callback, target);
627 return target_examine_one(target);
628 }
629
630
631 /* Targets that correctly implement init + examine, i.e.
632 * no communication with target during init:
633 *
634 * XScale
635 */
636 int target_examine(void)
637 {
638 int retval = ERROR_OK;
639 struct target *target;
640
641 for (target = all_targets; target; target = target->next)
642 {
643 /* defer examination, but don't skip it */
644 if (!target->tap->enabled) {
645 jtag_register_event_callback(jtag_enable_callback,
646 target);
647 continue;
648 }
649 if ((retval = target_examine_one(target)) != ERROR_OK)
650 return retval;
651 }
652 return retval;
653 }
654 const char *target_type_name(struct target *target)
655 {
656 return target->type->name;
657 }
658
659 static int target_write_memory_imp(struct target *target, uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
660 {
661 if (!target_was_examined(target))
662 {
663 LOG_ERROR("Target not examined yet");
664 return ERROR_FAIL;
665 }
666 return target->type->write_memory_imp(target, address, size, count, buffer);
667 }
668
669 static int target_read_memory_imp(struct target *target, uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
670 {
671 if (!target_was_examined(target))
672 {
673 LOG_ERROR("Target not examined yet");
674 return ERROR_FAIL;
675 }
676 return target->type->read_memory_imp(target, address, size, count, buffer);
677 }
678
679 static int target_soft_reset_halt_imp(struct target *target)
680 {
681 if (!target_was_examined(target))
682 {
683 LOG_ERROR("Target not examined yet");
684 return ERROR_FAIL;
685 }
686 if (!target->type->soft_reset_halt_imp) {
687 LOG_ERROR("Target %s does not support soft_reset_halt",
688 target_name(target));
689 return ERROR_FAIL;
690 }
691 return target->type->soft_reset_halt_imp(target);
692 }
693
694 /**
695 * Downloads a target-specific native code algorithm to the target,
696 * and executes it. * Note that some targets may need to set up, enable,
697 * and tear down a breakpoint (hard or * soft) to detect algorithm
698 * termination, while others may support lower overhead schemes where
699 * soft breakpoints embedded in the algorithm automatically terminate the
700 * algorithm.
701 *
702 * @param target used to run the algorithm
703 * @param arch_info target-specific description of the algorithm.
704 */
705 int target_run_algorithm(struct target *target,
706 int num_mem_params, struct mem_param *mem_params,
707 int num_reg_params, struct reg_param *reg_param,
708 uint32_t entry_point, uint32_t exit_point,
709 int timeout_ms, void *arch_info)
710 {
711 int retval = ERROR_FAIL;
712
713 if (!target_was_examined(target))
714 {
715 LOG_ERROR("Target not examined yet");
716 goto done;
717 }
718 if (!target->type->run_algorithm) {
719 LOG_ERROR("Target type '%s' does not support %s",
720 target_type_name(target), __func__);
721 goto done;
722 }
723
724 target->running_alg = true;
725 retval = target->type->run_algorithm(target,
726 num_mem_params, mem_params,
727 num_reg_params, reg_param,
728 entry_point, exit_point, timeout_ms, arch_info);
729 target->running_alg = false;
730
731 done:
732 return retval;
733 }
734
735
736 int target_read_memory(struct target *target,
737 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
738 {
739 return target->type->read_memory(target, address, size, count, buffer);
740 }
741
742 static int target_read_phys_memory(struct target *target,
743 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
744 {
745 return target->type->read_phys_memory(target, address, size, count, buffer);
746 }
747
748 int target_write_memory(struct target *target,
749 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
750 {
751 return target->type->write_memory(target, address, size, count, buffer);
752 }
753
754 static int target_write_phys_memory(struct target *target,
755 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
756 {
757 return target->type->write_phys_memory(target, address, size, count, buffer);
758 }
759
760 int target_bulk_write_memory(struct target *target,
761 uint32_t address, uint32_t count, const uint8_t *buffer)
762 {
763 return target->type->bulk_write_memory(target, address, count, buffer);
764 }
765
766 int target_add_breakpoint(struct target *target,
767 struct breakpoint *breakpoint)
768 {
769 if ((target->state != TARGET_HALTED)&&(breakpoint->type!=BKPT_HARD)) {
770 LOG_WARNING("target %s is not halted", target->cmd_name);
771 return ERROR_TARGET_NOT_HALTED;
772 }
773 return target->type->add_breakpoint(target, breakpoint);
774 }
775
776 int target_add_context_breakpoint(struct target *target,
777 struct breakpoint *breakpoint)
778 {
779 if (target->state != TARGET_HALTED) {
780 LOG_WARNING("target %s is not halted", target->cmd_name);
781 return ERROR_TARGET_NOT_HALTED;
782 }
783 return target->type->add_context_breakpoint(target, breakpoint);
784 }
785
786 int target_add_hybrid_breakpoint(struct target *target,
787 struct breakpoint *breakpoint)
788 {
789 if (target->state != TARGET_HALTED) {
790 LOG_WARNING("target %s is not halted", target->cmd_name);
791 return ERROR_TARGET_NOT_HALTED;
792 }
793 return target->type->add_hybrid_breakpoint(target, breakpoint);
794 }
795
796 int target_remove_breakpoint(struct target *target,
797 struct breakpoint *breakpoint)
798 {
799 return target->type->remove_breakpoint(target, breakpoint);
800 }
801
802 int target_add_watchpoint(struct target *target,
803 struct watchpoint *watchpoint)
804 {
805 if (target->state != TARGET_HALTED) {
806 LOG_WARNING("target %s is not halted", target->cmd_name);
807 return ERROR_TARGET_NOT_HALTED;
808 }
809 return target->type->add_watchpoint(target, watchpoint);
810 }
811 int target_remove_watchpoint(struct target *target,
812 struct watchpoint *watchpoint)
813 {
814 return target->type->remove_watchpoint(target, watchpoint);
815 }
816
817 int target_get_gdb_reg_list(struct target *target,
818 struct reg **reg_list[], int *reg_list_size)
819 {
820 return target->type->get_gdb_reg_list(target, reg_list, reg_list_size);
821 }
822 int target_step(struct target *target,
823 int current, uint32_t address, int handle_breakpoints)
824 {
825 return target->type->step(target, current, address, handle_breakpoints);
826 }
827
828
829 /**
830 * Reset the @c examined flag for the given target.
831 * Pure paranoia -- targets are zeroed on allocation.
832 */
833 static void target_reset_examined(struct target *target)
834 {
835 target->examined = false;
836 }
837
838 static int
839 err_read_phys_memory(struct target *target, uint32_t address,
840 uint32_t size, uint32_t count, uint8_t *buffer)
841 {
842 LOG_ERROR("Not implemented: %s", __func__);
843 return ERROR_FAIL;
844 }
845
846 static int
847 err_write_phys_memory(struct target *target, uint32_t address,
848 uint32_t size, uint32_t count, const uint8_t *buffer)
849 {
850 LOG_ERROR("Not implemented: %s", __func__);
851 return ERROR_FAIL;
852 }
853
854 static int handle_target(void *priv);
855
856 static int target_init_one(struct command_context *cmd_ctx,
857 struct target *target)
858 {
859 target_reset_examined(target);
860
861 struct target_type *type = target->type;
862 if (type->examine == NULL)
863 type->examine = default_examine;
864
865 if (type->check_reset== NULL)
866 type->check_reset = default_check_reset;
867
868 int retval = type->init_target(cmd_ctx, target);
869 if (ERROR_OK != retval)
870 {
871 LOG_ERROR("target '%s' init failed", target_name(target));
872 return retval;
873 }
874
875 /**
876 * @todo get rid of those *memory_imp() methods, now that all
877 * callers are using target_*_memory() accessors ... and make
878 * sure the "physical" paths handle the same issues.
879 */
880 /* a non-invasive way(in terms of patches) to add some code that
881 * runs before the type->write/read_memory implementation
882 */
883 type->write_memory_imp = target->type->write_memory;
884 type->write_memory = target_write_memory_imp;
885
886 type->read_memory_imp = target->type->read_memory;
887 type->read_memory = target_read_memory_imp;
888
889 type->soft_reset_halt_imp = target->type->soft_reset_halt;
890 type->soft_reset_halt = target_soft_reset_halt_imp;
891
892 /* Sanity-check MMU support ... stub in what we must, to help
893 * implement it in stages, but warn if we need to do so.
894 */
895 if (type->mmu)
896 {
897 if (type->write_phys_memory == NULL)
898 {
899 LOG_ERROR("type '%s' is missing write_phys_memory",
900 type->name);
901 type->write_phys_memory = err_write_phys_memory;
902 }
903 if (type->read_phys_memory == NULL)
904 {
905 LOG_ERROR("type '%s' is missing read_phys_memory",
906 type->name);
907 type->read_phys_memory = err_read_phys_memory;
908 }
909 if (type->virt2phys == NULL)
910 {
911 LOG_ERROR("type '%s' is missing virt2phys", type->name);
912 type->virt2phys = identity_virt2phys;
913 }
914 }
915 else
916 {
917 /* Make sure no-MMU targets all behave the same: make no
918 * distinction between physical and virtual addresses, and
919 * ensure that virt2phys() is always an identity mapping.
920 */
921 if (type->write_phys_memory || type->read_phys_memory
922 || type->virt2phys)
923 {
924 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
925 }
926
927 type->mmu = no_mmu;
928 type->write_phys_memory = type->write_memory;
929 type->read_phys_memory = type->read_memory;
930 type->virt2phys = identity_virt2phys;
931 }
932
933 if (target->type->read_buffer == NULL)
934 target->type->read_buffer = target_read_buffer_default;
935
936 if (target->type->write_buffer == NULL)
937 target->type->write_buffer = target_write_buffer_default;
938
939 return ERROR_OK;
940 }
941
942 static int target_init(struct command_context *cmd_ctx)
943 {
944 struct target *target;
945 int retval;
946
947 for (target = all_targets; target; target = target->next)
948 {
949 retval = target_init_one(cmd_ctx, target);
950 if (ERROR_OK != retval)
951 return retval;
952 }
953
954 if (!all_targets)
955 return ERROR_OK;
956
957 retval = target_register_user_commands(cmd_ctx);
958 if (ERROR_OK != retval)
959 return retval;
960
961 retval = target_register_timer_callback(&handle_target,
962 polling_interval, 1, cmd_ctx->interp);
963 if (ERROR_OK != retval)
964 return retval;
965
966 return ERROR_OK;
967 }
968
969 COMMAND_HANDLER(handle_target_init_command)
970 {
971 if (CMD_ARGC != 0)
972 return ERROR_COMMAND_SYNTAX_ERROR;
973
974 static bool target_initialized = false;
975 if (target_initialized)
976 {
977 LOG_INFO("'target init' has already been called");
978 return ERROR_OK;
979 }
980 target_initialized = true;
981
982 LOG_DEBUG("Initializing targets...");
983 return target_init(CMD_CTX);
984 }
985
986 int target_register_event_callback(int (*callback)(struct target *target, enum target_event event, void *priv), void *priv)
987 {
988 struct target_event_callback **callbacks_p = &target_event_callbacks;
989
990 if (callback == NULL)
991 {
992 return ERROR_INVALID_ARGUMENTS;
993 }
994
995 if (*callbacks_p)
996 {
997 while ((*callbacks_p)->next)
998 callbacks_p = &((*callbacks_p)->next);
999 callbacks_p = &((*callbacks_p)->next);
1000 }
1001
1002 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1003 (*callbacks_p)->callback = callback;
1004 (*callbacks_p)->priv = priv;
1005 (*callbacks_p)->next = NULL;
1006
1007 return ERROR_OK;
1008 }
1009
1010 int target_register_timer_callback(int (*callback)(void *priv), int time_ms, int periodic, void *priv)
1011 {
1012 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1013 struct timeval now;
1014
1015 if (callback == NULL)
1016 {
1017 return ERROR_INVALID_ARGUMENTS;
1018 }
1019
1020 if (*callbacks_p)
1021 {
1022 while ((*callbacks_p)->next)
1023 callbacks_p = &((*callbacks_p)->next);
1024 callbacks_p = &((*callbacks_p)->next);
1025 }
1026
1027 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1028 (*callbacks_p)->callback = callback;
1029 (*callbacks_p)->periodic = periodic;
1030 (*callbacks_p)->time_ms = time_ms;
1031
1032 gettimeofday(&now, NULL);
1033 (*callbacks_p)->when.tv_usec = now.tv_usec + (time_ms % 1000) * 1000;
1034 time_ms -= (time_ms % 1000);
1035 (*callbacks_p)->when.tv_sec = now.tv_sec + (time_ms / 1000);
1036 if ((*callbacks_p)->when.tv_usec > 1000000)
1037 {
1038 (*callbacks_p)->when.tv_usec = (*callbacks_p)->when.tv_usec - 1000000;
1039 (*callbacks_p)->when.tv_sec += 1;
1040 }
1041
1042 (*callbacks_p)->priv = priv;
1043 (*callbacks_p)->next = NULL;
1044
1045 return ERROR_OK;
1046 }
1047
1048 int target_unregister_event_callback(int (*callback)(struct target *target, enum target_event event, void *priv), void *priv)
1049 {
1050 struct target_event_callback **p = &target_event_callbacks;
1051 struct target_event_callback *c = target_event_callbacks;
1052
1053 if (callback == NULL)
1054 {
1055 return ERROR_INVALID_ARGUMENTS;
1056 }
1057
1058 while (c)
1059 {
1060 struct target_event_callback *next = c->next;
1061 if ((c->callback == callback) && (c->priv == priv))
1062 {
1063 *p = next;
1064 free(c);
1065 return ERROR_OK;
1066 }
1067 else
1068 p = &(c->next);
1069 c = next;
1070 }
1071
1072 return ERROR_OK;
1073 }
1074
1075 static int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1076 {
1077 struct target_timer_callback **p = &target_timer_callbacks;
1078 struct target_timer_callback *c = target_timer_callbacks;
1079
1080 if (callback == NULL)
1081 {
1082 return ERROR_INVALID_ARGUMENTS;
1083 }
1084
1085 while (c)
1086 {
1087 struct target_timer_callback *next = c->next;
1088 if ((c->callback == callback) && (c->priv == priv))
1089 {
1090 *p = next;
1091 free(c);
1092 return ERROR_OK;
1093 }
1094 else
1095 p = &(c->next);
1096 c = next;
1097 }
1098
1099 return ERROR_OK;
1100 }
1101
1102 int target_call_event_callbacks(struct target *target, enum target_event event)
1103 {
1104 struct target_event_callback *callback = target_event_callbacks;
1105 struct target_event_callback *next_callback;
1106
1107 if (event == TARGET_EVENT_HALTED)
1108 {
1109 /* execute early halted first */
1110 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1111 }
1112
1113 LOG_DEBUG("target event %i (%s)",
1114 event,
1115 Jim_Nvp_value2name_simple(nvp_target_event, event)->name);
1116
1117 target_handle_event(target, event);
1118
1119 while (callback)
1120 {
1121 next_callback = callback->next;
1122 callback->callback(target, event, callback->priv);
1123 callback = next_callback;
1124 }
1125
1126 return ERROR_OK;
1127 }
1128
1129 static int target_timer_callback_periodic_restart(
1130 struct target_timer_callback *cb, struct timeval *now)
1131 {
1132 int time_ms = cb->time_ms;
1133 cb->when.tv_usec = now->tv_usec + (time_ms % 1000) * 1000;
1134 time_ms -= (time_ms % 1000);
1135 cb->when.tv_sec = now->tv_sec + time_ms / 1000;
1136 if (cb->when.tv_usec > 1000000)
1137 {
1138 cb->when.tv_usec = cb->when.tv_usec - 1000000;
1139 cb->when.tv_sec += 1;
1140 }
1141 return ERROR_OK;
1142 }
1143
1144 static int target_call_timer_callback(struct target_timer_callback *cb,
1145 struct timeval *now)
1146 {
1147 cb->callback(cb->priv);
1148
1149 if (cb->periodic)
1150 return target_timer_callback_periodic_restart(cb, now);
1151
1152 return target_unregister_timer_callback(cb->callback, cb->priv);
1153 }
1154
1155 static int target_call_timer_callbacks_check_time(int checktime)
1156 {
1157 keep_alive();
1158
1159 struct timeval now;
1160 gettimeofday(&now, NULL);
1161
1162 struct target_timer_callback *callback = target_timer_callbacks;
1163 while (callback)
1164 {
1165 // cleaning up may unregister and free this callback
1166 struct target_timer_callback *next_callback = callback->next;
1167
1168 bool call_it = callback->callback &&
1169 ((!checktime && callback->periodic) ||
1170 now.tv_sec > callback->when.tv_sec ||
1171 (now.tv_sec == callback->when.tv_sec &&
1172 now.tv_usec >= callback->when.tv_usec));
1173
1174 if (call_it)
1175 {
1176 int retval = target_call_timer_callback(callback, &now);
1177 if (retval != ERROR_OK)
1178 return retval;
1179 }
1180
1181 callback = next_callback;
1182 }
1183
1184 return ERROR_OK;
1185 }
1186
1187 int target_call_timer_callbacks(void)
1188 {
1189 return target_call_timer_callbacks_check_time(1);
1190 }
1191
1192 /* invoke periodic callbacks immediately */
1193 int target_call_timer_callbacks_now(void)
1194 {
1195 return target_call_timer_callbacks_check_time(0);
1196 }
1197
1198 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
1199 {
1200 struct working_area *c = target->working_areas;
1201 struct working_area *new_wa = NULL;
1202
1203 /* Reevaluate working area address based on MMU state*/
1204 if (target->working_areas == NULL)
1205 {
1206 int retval;
1207 int enabled;
1208
1209 retval = target->type->mmu(target, &enabled);
1210 if (retval != ERROR_OK)
1211 {
1212 return retval;
1213 }
1214
1215 if (!enabled) {
1216 if (target->working_area_phys_spec) {
1217 LOG_DEBUG("MMU disabled, using physical "
1218 "address for working memory 0x%08x",
1219 (unsigned)target->working_area_phys);
1220 target->working_area = target->working_area_phys;
1221 } else {
1222 LOG_ERROR("No working memory available. "
1223 "Specify -work-area-phys to target.");
1224 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1225 }
1226 } else {
1227 if (target->working_area_virt_spec) {
1228 LOG_DEBUG("MMU enabled, using virtual "
1229 "address for working memory 0x%08x",
1230 (unsigned)target->working_area_virt);
1231 target->working_area = target->working_area_virt;
1232 } else {
1233 LOG_ERROR("No working memory available. "
1234 "Specify -work-area-virt to target.");
1235 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1236 }
1237 }
1238 }
1239
1240 /* only allocate multiples of 4 byte */
1241 if (size % 4)
1242 {
1243 LOG_ERROR("BUG: code tried to allocate unaligned number of bytes (0x%08x), padding", ((unsigned)(size)));
1244 size = (size + 3) & (~3);
1245 }
1246
1247 /* see if there's already a matching working area */
1248 while (c)
1249 {
1250 if ((c->free) && (c->size == size))
1251 {
1252 new_wa = c;
1253 break;
1254 }
1255 c = c->next;
1256 }
1257
1258 /* if not, allocate a new one */
1259 if (!new_wa)
1260 {
1261 struct working_area **p = &target->working_areas;
1262 uint32_t first_free = target->working_area;
1263 uint32_t free_size = target->working_area_size;
1264
1265 c = target->working_areas;
1266 while (c)
1267 {
1268 first_free += c->size;
1269 free_size -= c->size;
1270 p = &c->next;
1271 c = c->next;
1272 }
1273
1274 if (free_size < size)
1275 {
1276 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1277 }
1278
1279 LOG_DEBUG("allocated new working area at address 0x%08x", (unsigned)first_free);
1280
1281 new_wa = malloc(sizeof(struct working_area));
1282 new_wa->next = NULL;
1283 new_wa->size = size;
1284 new_wa->address = first_free;
1285
1286 if (target->backup_working_area)
1287 {
1288 int retval;
1289 new_wa->backup = malloc(new_wa->size);
1290 if ((retval = target_read_memory(target, new_wa->address, 4, new_wa->size / 4, new_wa->backup)) != ERROR_OK)
1291 {
1292 free(new_wa->backup);
1293 free(new_wa);
1294 return retval;
1295 }
1296 }
1297 else
1298 {
1299 new_wa->backup = NULL;
1300 }
1301
1302 /* put new entry in list */
1303 *p = new_wa;
1304 }
1305
1306 /* mark as used, and return the new (reused) area */
1307 new_wa->free = false;
1308 *area = new_wa;
1309
1310 /* user pointer */
1311 new_wa->user = area;
1312
1313 return ERROR_OK;
1314 }
1315
1316 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
1317 {
1318 int retval;
1319
1320 retval = target_alloc_working_area_try(target, size, area);
1321 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1322 {
1323 LOG_WARNING("not enough working area available(requested %u)", (unsigned)(size));
1324 }
1325 return retval;
1326
1327 }
1328
1329 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
1330 {
1331 if (area->free)
1332 return ERROR_OK;
1333
1334 if (restore && target->backup_working_area)
1335 {
1336 int retval;
1337 if ((retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup)) != ERROR_OK)
1338 return retval;
1339 }
1340
1341 area->free = true;
1342
1343 /* mark user pointer invalid */
1344 *area->user = NULL;
1345 area->user = NULL;
1346
1347 return ERROR_OK;
1348 }
1349
1350 int target_free_working_area(struct target *target, struct working_area *area)
1351 {
1352 return target_free_working_area_restore(target, area, 1);
1353 }
1354
1355 /* free resources and restore memory, if restoring memory fails,
1356 * free up resources anyway
1357 */
1358 static void target_free_all_working_areas_restore(struct target *target, int restore)
1359 {
1360 struct working_area *c = target->working_areas;
1361
1362 while (c)
1363 {
1364 struct working_area *next = c->next;
1365 target_free_working_area_restore(target, c, restore);
1366
1367 if (c->backup)
1368 free(c->backup);
1369
1370 free(c);
1371
1372 c = next;
1373 }
1374
1375 target->working_areas = NULL;
1376 }
1377
1378 void target_free_all_working_areas(struct target *target)
1379 {
1380 target_free_all_working_areas_restore(target, 1);
1381 }
1382
1383 int target_arch_state(struct target *target)
1384 {
1385 int retval;
1386 if (target == NULL)
1387 {
1388 LOG_USER("No target has been configured");
1389 return ERROR_OK;
1390 }
1391
1392 LOG_USER("target state: %s", target_state_name( target ));
1393
1394 if (target->state != TARGET_HALTED)
1395 return ERROR_OK;
1396
1397 retval = target->type->arch_state(target);
1398 return retval;
1399 }
1400
1401 /* Single aligned words are guaranteed to use 16 or 32 bit access
1402 * mode respectively, otherwise data is handled as quickly as
1403 * possible
1404 */
1405 int target_write_buffer(struct target *target, uint32_t address, uint32_t size, const uint8_t *buffer)
1406 {
1407 LOG_DEBUG("writing buffer of %i byte at 0x%8.8x",
1408 (int)size, (unsigned)address);
1409
1410 if (!target_was_examined(target))
1411 {
1412 LOG_ERROR("Target not examined yet");
1413 return ERROR_FAIL;
1414 }
1415
1416 if (size == 0) {
1417 return ERROR_OK;
1418 }
1419
1420 if ((address + size - 1) < address)
1421 {
1422 /* GDB can request this when e.g. PC is 0xfffffffc*/
1423 LOG_ERROR("address + size wrapped(0x%08x, 0x%08x)",
1424 (unsigned)address,
1425 (unsigned)size);
1426 return ERROR_FAIL;
1427 }
1428
1429 return target->type->write_buffer(target, address, size, buffer);
1430 }
1431
1432 static int target_write_buffer_default(struct target *target, uint32_t address, uint32_t size, const uint8_t *buffer)
1433 {
1434 int retval = ERROR_OK;
1435
1436 if (((address % 2) == 0) && (size == 2))
1437 {
1438 return target_write_memory(target, address, 2, 1, buffer);
1439 }
1440
1441 /* handle unaligned head bytes */
1442 if (address % 4)
1443 {
1444 uint32_t unaligned = 4 - (address % 4);
1445
1446 if (unaligned > size)
1447 unaligned = size;
1448
1449 if ((retval = target_write_memory(target, address, 1, unaligned, buffer)) != ERROR_OK)
1450 return retval;
1451
1452 buffer += unaligned;
1453 address += unaligned;
1454 size -= unaligned;
1455 }
1456
1457 /* handle aligned words */
1458 if (size >= 4)
1459 {
1460 int aligned = size - (size % 4);
1461
1462 /* use bulk writes above a certain limit. This may have to be changed */
1463 if (aligned > 128)
1464 {
1465 if ((retval = target->type->bulk_write_memory(target, address, aligned / 4, buffer)) != ERROR_OK)
1466 return retval;
1467 }
1468 else
1469 {
1470 if ((retval = target_write_memory(target, address, 4, aligned / 4, buffer)) != ERROR_OK)
1471 return retval;
1472 }
1473
1474 buffer += aligned;
1475 address += aligned;
1476 size -= aligned;
1477 }
1478
1479 /* handle tail writes of less than 4 bytes */
1480 if (size > 0)
1481 {
1482 if ((retval = target_write_memory(target, address, 1, size, buffer)) != ERROR_OK)
1483 return retval;
1484 }
1485
1486 return retval;
1487 }
1488
1489 /* Single aligned words are guaranteed to use 16 or 32 bit access
1490 * mode respectively, otherwise data is handled as quickly as
1491 * possible
1492 */
1493 int target_read_buffer(struct target *target, uint32_t address, uint32_t size, uint8_t *buffer)
1494 {
1495 LOG_DEBUG("reading buffer of %i byte at 0x%8.8x",
1496 (int)size, (unsigned)address);
1497
1498 if (!target_was_examined(target))
1499 {
1500 LOG_ERROR("Target not examined yet");
1501 return ERROR_FAIL;
1502 }
1503
1504 if (size == 0) {
1505 return ERROR_OK;
1506 }
1507
1508 if ((address + size - 1) < address)
1509 {
1510 /* GDB can request this when e.g. PC is 0xfffffffc*/
1511 LOG_ERROR("address + size wrapped(0x%08" PRIx32 ", 0x%08" PRIx32 ")",
1512 address,
1513 size);
1514 return ERROR_FAIL;
1515 }
1516
1517 return target->type->read_buffer(target, address, size, buffer);
1518 }
1519
1520 static int target_read_buffer_default(struct target *target, uint32_t address, uint32_t size, uint8_t *buffer)
1521 {
1522 int retval = ERROR_OK;
1523
1524 if (((address % 2) == 0) && (size == 2))
1525 {
1526 return target_read_memory(target, address, 2, 1, buffer);
1527 }
1528
1529 /* handle unaligned head bytes */
1530 if (address % 4)
1531 {
1532 uint32_t unaligned = 4 - (address % 4);
1533
1534 if (unaligned > size)
1535 unaligned = size;
1536
1537 if ((retval = target_read_memory(target, address, 1, unaligned, buffer)) != ERROR_OK)
1538 return retval;
1539
1540 buffer += unaligned;
1541 address += unaligned;
1542 size -= unaligned;
1543 }
1544
1545 /* handle aligned words */
1546 if (size >= 4)
1547 {
1548 int aligned = size - (size % 4);
1549
1550 if ((retval = target_read_memory(target, address, 4, aligned / 4, buffer)) != ERROR_OK)
1551 return retval;
1552
1553 buffer += aligned;
1554 address += aligned;
1555 size -= aligned;
1556 }
1557
1558 /*prevent byte access when possible (avoid AHB access limitations in some cases)*/
1559 if(size >=2)
1560 {
1561 int aligned = size - (size%2);
1562 retval = target_read_memory(target, address, 2, aligned / 2, buffer);
1563 if (retval != ERROR_OK)
1564 return retval;
1565
1566 buffer += aligned;
1567 address += aligned;
1568 size -= aligned;
1569 }
1570 /* handle tail writes of less than 4 bytes */
1571 if (size > 0)
1572 {
1573 if ((retval = target_read_memory(target, address, 1, size, buffer)) != ERROR_OK)
1574 return retval;
1575 }
1576
1577 return ERROR_OK;
1578 }
1579
1580 int target_checksum_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* crc)
1581 {
1582 uint8_t *buffer;
1583 int retval;
1584 uint32_t i;
1585 uint32_t checksum = 0;
1586 if (!target_was_examined(target))
1587 {
1588 LOG_ERROR("Target not examined yet");
1589 return ERROR_FAIL;
1590 }
1591
1592 if ((retval = target->type->checksum_memory(target, address,
1593 size, &checksum)) != ERROR_OK)
1594 {
1595 buffer = malloc(size);
1596 if (buffer == NULL)
1597 {
1598 LOG_ERROR("error allocating buffer for section (%d bytes)", (int)size);
1599 return ERROR_INVALID_ARGUMENTS;
1600 }
1601 retval = target_read_buffer(target, address, size, buffer);
1602 if (retval != ERROR_OK)
1603 {
1604 free(buffer);
1605 return retval;
1606 }
1607
1608 /* convert to target endianness */
1609 for (i = 0; i < (size/sizeof(uint32_t)); i++)
1610 {
1611 uint32_t target_data;
1612 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
1613 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
1614 }
1615
1616 retval = image_calculate_checksum(buffer, size, &checksum);
1617 free(buffer);
1618 }
1619
1620 *crc = checksum;
1621
1622 return retval;
1623 }
1624
1625 int target_blank_check_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* blank)
1626 {
1627 int retval;
1628 if (!target_was_examined(target))
1629 {
1630 LOG_ERROR("Target not examined yet");
1631 return ERROR_FAIL;
1632 }
1633
1634 if (target->type->blank_check_memory == 0)
1635 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1636
1637 retval = target->type->blank_check_memory(target, address, size, blank);
1638
1639 return retval;
1640 }
1641
1642 int target_read_u32(struct target *target, uint32_t address, uint32_t *value)
1643 {
1644 uint8_t value_buf[4];
1645 if (!target_was_examined(target))
1646 {
1647 LOG_ERROR("Target not examined yet");
1648 return ERROR_FAIL;
1649 }
1650
1651 int retval = target_read_memory(target, address, 4, 1, value_buf);
1652
1653 if (retval == ERROR_OK)
1654 {
1655 *value = target_buffer_get_u32(target, value_buf);
1656 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
1657 address,
1658 *value);
1659 }
1660 else
1661 {
1662 *value = 0x0;
1663 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1664 address);
1665 }
1666
1667 return retval;
1668 }
1669
1670 int target_read_u16(struct target *target, uint32_t address, uint16_t *value)
1671 {
1672 uint8_t value_buf[2];
1673 if (!target_was_examined(target))
1674 {
1675 LOG_ERROR("Target not examined yet");
1676 return ERROR_FAIL;
1677 }
1678
1679 int retval = target_read_memory(target, address, 2, 1, value_buf);
1680
1681 if (retval == ERROR_OK)
1682 {
1683 *value = target_buffer_get_u16(target, value_buf);
1684 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%4.4x",
1685 address,
1686 *value);
1687 }
1688 else
1689 {
1690 *value = 0x0;
1691 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1692 address);
1693 }
1694
1695 return retval;
1696 }
1697
1698 int target_read_u8(struct target *target, uint32_t address, uint8_t *value)
1699 {
1700 int retval = target_read_memory(target, address, 1, 1, value);
1701 if (!target_was_examined(target))
1702 {
1703 LOG_ERROR("Target not examined yet");
1704 return ERROR_FAIL;
1705 }
1706
1707 if (retval == ERROR_OK)
1708 {
1709 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
1710 address,
1711 *value);
1712 }
1713 else
1714 {
1715 *value = 0x0;
1716 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1717 address);
1718 }
1719
1720 return retval;
1721 }
1722
1723 int target_write_u32(struct target *target, uint32_t address, uint32_t value)
1724 {
1725 int retval;
1726 uint8_t value_buf[4];
1727 if (!target_was_examined(target))
1728 {
1729 LOG_ERROR("Target not examined yet");
1730 return ERROR_FAIL;
1731 }
1732
1733 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
1734 address,
1735 value);
1736
1737 target_buffer_set_u32(target, value_buf, value);
1738 if ((retval = target_write_memory(target, address, 4, 1, value_buf)) != ERROR_OK)
1739 {
1740 LOG_DEBUG("failed: %i", retval);
1741 }
1742
1743 return retval;
1744 }
1745
1746 int target_write_u16(struct target *target, uint32_t address, uint16_t value)
1747 {
1748 int retval;
1749 uint8_t value_buf[2];
1750 if (!target_was_examined(target))
1751 {
1752 LOG_ERROR("Target not examined yet");
1753 return ERROR_FAIL;
1754 }
1755
1756 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8x",
1757 address,
1758 value);
1759
1760 target_buffer_set_u16(target, value_buf, value);
1761 if ((retval = target_write_memory(target, address, 2, 1, value_buf)) != ERROR_OK)
1762 {
1763 LOG_DEBUG("failed: %i", retval);
1764 }
1765
1766 return retval;
1767 }
1768
1769 int target_write_u8(struct target *target, uint32_t address, uint8_t value)
1770 {
1771 int retval;
1772 if (!target_was_examined(target))
1773 {
1774 LOG_ERROR("Target not examined yet");
1775 return ERROR_FAIL;
1776 }
1777
1778 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
1779 address, value);
1780
1781 if ((retval = target_write_memory(target, address, 1, 1, &value)) != ERROR_OK)
1782 {
1783 LOG_DEBUG("failed: %i", retval);
1784 }
1785
1786 return retval;
1787 }
1788
1789 COMMAND_HANDLER(handle_targets_command)
1790 {
1791 struct target *target = all_targets;
1792
1793 if (CMD_ARGC == 1)
1794 {
1795 target = get_target(CMD_ARGV[0]);
1796 if (target == NULL) {
1797 command_print(CMD_CTX,"Target: %s is unknown, try one of:\n", CMD_ARGV[0]);
1798 goto DumpTargets;
1799 }
1800 if (!target->tap->enabled) {
1801 command_print(CMD_CTX,"Target: TAP %s is disabled, "
1802 "can't be the current target\n",
1803 target->tap->dotted_name);
1804 return ERROR_FAIL;
1805 }
1806
1807 CMD_CTX->current_target = target->target_number;
1808 return ERROR_OK;
1809 }
1810 DumpTargets:
1811
1812 target = all_targets;
1813 command_print(CMD_CTX, " TargetName Type Endian TapName State ");
1814 command_print(CMD_CTX, "-- ------------------ ---------- ------ ------------------ ------------");
1815 while (target)
1816 {
1817 const char *state;
1818 char marker = ' ';
1819
1820 if (target->tap->enabled)
1821 state = target_state_name( target );
1822 else
1823 state = "tap-disabled";
1824
1825 if (CMD_CTX->current_target == target->target_number)
1826 marker = '*';
1827
1828 /* keep columns lined up to match the headers above */
1829 command_print(CMD_CTX, "%2d%c %-18s %-10s %-6s %-18s %s",
1830 target->target_number,
1831 marker,
1832 target_name(target),
1833 target_type_name(target),
1834 Jim_Nvp_value2name_simple(nvp_target_endian,
1835 target->endianness)->name,
1836 target->tap->dotted_name,
1837 state);
1838 target = target->next;
1839 }
1840
1841 return ERROR_OK;
1842 }
1843
1844 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
1845
1846 static int powerDropout;
1847 static int srstAsserted;
1848
1849 static int runPowerRestore;
1850 static int runPowerDropout;
1851 static int runSrstAsserted;
1852 static int runSrstDeasserted;
1853
1854 static int sense_handler(void)
1855 {
1856 static int prevSrstAsserted = 0;
1857 static int prevPowerdropout = 0;
1858
1859 int retval;
1860 if ((retval = jtag_power_dropout(&powerDropout)) != ERROR_OK)
1861 return retval;
1862
1863 int powerRestored;
1864 powerRestored = prevPowerdropout && !powerDropout;
1865 if (powerRestored)
1866 {
1867 runPowerRestore = 1;
1868 }
1869
1870 long long current = timeval_ms();
1871 static long long lastPower = 0;
1872 int waitMore = lastPower + 2000 > current;
1873 if (powerDropout && !waitMore)
1874 {
1875 runPowerDropout = 1;
1876 lastPower = current;
1877 }
1878
1879 if ((retval = jtag_srst_asserted(&srstAsserted)) != ERROR_OK)
1880 return retval;
1881
1882 int srstDeasserted;
1883 srstDeasserted = prevSrstAsserted && !srstAsserted;
1884
1885 static long long lastSrst = 0;
1886 waitMore = lastSrst + 2000 > current;
1887 if (srstDeasserted && !waitMore)
1888 {
1889 runSrstDeasserted = 1;
1890 lastSrst = current;
1891 }
1892
1893 if (!prevSrstAsserted && srstAsserted)
1894 {
1895 runSrstAsserted = 1;
1896 }
1897
1898 prevSrstAsserted = srstAsserted;
1899 prevPowerdropout = powerDropout;
1900
1901 if (srstDeasserted || powerRestored)
1902 {
1903 /* Other than logging the event we can't do anything here.
1904 * Issuing a reset is a particularly bad idea as we might
1905 * be inside a reset already.
1906 */
1907 }
1908
1909 return ERROR_OK;
1910 }
1911
1912 static int backoff_times = 0;
1913 static int backoff_count = 0;
1914
1915 /* process target state changes */
1916 static int handle_target(void *priv)
1917 {
1918 Jim_Interp *interp = (Jim_Interp *)priv;
1919 int retval = ERROR_OK;
1920
1921 if (!is_jtag_poll_safe())
1922 {
1923 /* polling is disabled currently */
1924 return ERROR_OK;
1925 }
1926
1927 /* we do not want to recurse here... */
1928 static int recursive = 0;
1929 if (! recursive)
1930 {
1931 recursive = 1;
1932 sense_handler();
1933 /* danger! running these procedures can trigger srst assertions and power dropouts.
1934 * We need to avoid an infinite loop/recursion here and we do that by
1935 * clearing the flags after running these events.
1936 */
1937 int did_something = 0;
1938 if (runSrstAsserted)
1939 {
1940 LOG_INFO("srst asserted detected, running srst_asserted proc.");
1941 Jim_Eval(interp, "srst_asserted");
1942 did_something = 1;
1943 }
1944 if (runSrstDeasserted)
1945 {
1946 Jim_Eval(interp, "srst_deasserted");
1947 did_something = 1;
1948 }
1949 if (runPowerDropout)
1950 {
1951 LOG_INFO("Power dropout detected, running power_dropout proc.");
1952 Jim_Eval(interp, "power_dropout");
1953 did_something = 1;
1954 }
1955 if (runPowerRestore)
1956 {
1957 Jim_Eval(interp, "power_restore");
1958 did_something = 1;
1959 }
1960
1961 if (did_something)
1962 {
1963 /* clear detect flags */
1964 sense_handler();
1965 }
1966
1967 /* clear action flags */
1968
1969 runSrstAsserted = 0;
1970 runSrstDeasserted = 0;
1971 runPowerRestore = 0;
1972 runPowerDropout = 0;
1973
1974 recursive = 0;
1975 }
1976
1977 if (backoff_times > backoff_count)
1978 {
1979 /* do not poll this time as we failed previously */
1980 backoff_count++;
1981 return ERROR_OK;
1982 }
1983 backoff_count = 0;
1984
1985 /* Poll targets for state changes unless that's globally disabled.
1986 * Skip targets that are currently disabled.
1987 */
1988 for (struct target *target = all_targets;
1989 is_jtag_poll_safe() && target;
1990 target = target->next)
1991 {
1992 if (!target->tap->enabled)
1993 continue;
1994
1995 /* only poll target if we've got power and srst isn't asserted */
1996 if (!powerDropout && !srstAsserted)
1997 {
1998 /* polling may fail silently until the target has been examined */
1999 if ((retval = target_poll(target)) != ERROR_OK)
2000 {
2001 /* 100ms polling interval. Increase interval between polling up to 5000ms */
2002 if (backoff_times * polling_interval < 5000)
2003 {
2004 backoff_times *= 2;
2005 backoff_times++;
2006 }
2007 LOG_USER("Polling target failed, GDB will be halted. Polling again in %dms", backoff_times * polling_interval);
2008
2009 /* Tell GDB to halt the debugger. This allows the user to
2010 * run monitor commands to handle the situation.
2011 */
2012 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
2013 return retval;
2014 }
2015 /* Since we succeeded, we reset backoff count */
2016 if (backoff_times > 0)
2017 {
2018 LOG_USER("Polling succeeded again");
2019 }
2020 backoff_times = 0;
2021 }
2022 }
2023
2024 return retval;
2025 }
2026
2027 COMMAND_HANDLER(handle_reg_command)
2028 {
2029 struct target *target;
2030 struct reg *reg = NULL;
2031 unsigned count = 0;
2032 char *value;
2033
2034 LOG_DEBUG("-");
2035
2036 target = get_current_target(CMD_CTX);
2037
2038 /* list all available registers for the current target */
2039 if (CMD_ARGC == 0)
2040 {
2041 struct reg_cache *cache = target->reg_cache;
2042
2043 count = 0;
2044 while (cache)
2045 {
2046 unsigned i;
2047
2048 command_print(CMD_CTX, "===== %s", cache->name);
2049
2050 for (i = 0, reg = cache->reg_list;
2051 i < cache->num_regs;
2052 i++, reg++, count++)
2053 {
2054 /* only print cached values if they are valid */
2055 if (reg->valid) {
2056 value = buf_to_str(reg->value,
2057 reg->size, 16);
2058 command_print(CMD_CTX,
2059 "(%i) %s (/%" PRIu32 "): 0x%s%s",
2060 count, reg->name,
2061 reg->size, value,
2062 reg->dirty
2063 ? " (dirty)"
2064 : "");
2065 free(value);
2066 } else {
2067 command_print(CMD_CTX, "(%i) %s (/%" PRIu32 ")",
2068 count, reg->name,
2069 reg->size) ;
2070 }
2071 }
2072 cache = cache->next;
2073 }
2074
2075 return ERROR_OK;
2076 }
2077
2078 /* access a single register by its ordinal number */
2079 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9'))
2080 {
2081 unsigned num;
2082 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
2083
2084 struct reg_cache *cache = target->reg_cache;
2085 count = 0;
2086 while (cache)
2087 {
2088 unsigned i;
2089 for (i = 0; i < cache->num_regs; i++)
2090 {
2091 if (count++ == num)
2092 {
2093 reg = &cache->reg_list[i];
2094 break;
2095 }
2096 }
2097 if (reg)
2098 break;
2099 cache = cache->next;
2100 }
2101
2102 if (!reg)
2103 {
2104 command_print(CMD_CTX, "%i is out of bounds, the current target has only %i registers (0 - %i)", num, count, count - 1);
2105 return ERROR_OK;
2106 }
2107 } else /* access a single register by its name */
2108 {
2109 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], 1);
2110
2111 if (!reg)
2112 {
2113 command_print(CMD_CTX, "register %s not found in current target", CMD_ARGV[0]);
2114 return ERROR_OK;
2115 }
2116 }
2117
2118 /* display a register */
2119 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0') && (CMD_ARGV[1][0] <= '9'))))
2120 {
2121 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
2122 reg->valid = 0;
2123
2124 if (reg->valid == 0)
2125 {
2126 reg->type->get(reg);
2127 }
2128 value = buf_to_str(reg->value, reg->size, 16);
2129 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2130 free(value);
2131 return ERROR_OK;
2132 }
2133
2134 /* set register value */
2135 if (CMD_ARGC == 2)
2136 {
2137 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
2138 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
2139
2140 reg->type->set(reg, buf);
2141
2142 value = buf_to_str(reg->value, reg->size, 16);
2143 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2144 free(value);
2145
2146 free(buf);
2147
2148 return ERROR_OK;
2149 }
2150
2151 command_print(CMD_CTX, "usage: reg <#|name> [value]");
2152
2153 return ERROR_OK;
2154 }
2155
2156 COMMAND_HANDLER(handle_poll_command)
2157 {
2158 int retval = ERROR_OK;
2159 struct target *target = get_current_target(CMD_CTX);
2160
2161 if (CMD_ARGC == 0)
2162 {
2163 command_print(CMD_CTX, "background polling: %s",
2164 jtag_poll_get_enabled() ? "on" : "off");
2165 command_print(CMD_CTX, "TAP: %s (%s)",
2166 target->tap->dotted_name,
2167 target->tap->enabled ? "enabled" : "disabled");
2168 if (!target->tap->enabled)
2169 return ERROR_OK;
2170 if ((retval = target_poll(target)) != ERROR_OK)
2171 return retval;
2172 if ((retval = target_arch_state(target)) != ERROR_OK)
2173 return retval;
2174 }
2175 else if (CMD_ARGC == 1)
2176 {
2177 bool enable;
2178 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
2179 jtag_poll_set_enabled(enable);
2180 }
2181 else
2182 {
2183 return ERROR_COMMAND_SYNTAX_ERROR;
2184 }
2185
2186 return retval;
2187 }
2188
2189 COMMAND_HANDLER(handle_wait_halt_command)
2190 {
2191 if (CMD_ARGC > 1)
2192 return ERROR_COMMAND_SYNTAX_ERROR;
2193
2194 unsigned ms = 5000;
2195 if (1 == CMD_ARGC)
2196 {
2197 int retval = parse_uint(CMD_ARGV[0], &ms);
2198 if (ERROR_OK != retval)
2199 {
2200 command_print(CMD_CTX, "usage: %s [seconds]", CMD_NAME);
2201 return ERROR_COMMAND_SYNTAX_ERROR;
2202 }
2203 // convert seconds (given) to milliseconds (needed)
2204 ms *= 1000;
2205 }
2206
2207 struct target *target = get_current_target(CMD_CTX);
2208 return target_wait_state(target, TARGET_HALTED, ms);
2209 }
2210
2211 /* wait for target state to change. The trick here is to have a low
2212 * latency for short waits and not to suck up all the CPU time
2213 * on longer waits.
2214 *
2215 * After 500ms, keep_alive() is invoked
2216 */
2217 int target_wait_state(struct target *target, enum target_state state, int ms)
2218 {
2219 int retval;
2220 long long then = 0, cur;
2221 int once = 1;
2222
2223 for (;;)
2224 {
2225 if ((retval = target_poll(target)) != ERROR_OK)
2226 return retval;
2227 if (target->state == state)
2228 {
2229 break;
2230 }
2231 cur = timeval_ms();
2232 if (once)
2233 {
2234 once = 0;
2235 then = timeval_ms();
2236 LOG_DEBUG("waiting for target %s...",
2237 Jim_Nvp_value2name_simple(nvp_target_state,state)->name);
2238 }
2239
2240 if (cur-then > 500)
2241 {
2242 keep_alive();
2243 }
2244
2245 if ((cur-then) > ms)
2246 {
2247 LOG_ERROR("timed out while waiting for target %s",
2248 Jim_Nvp_value2name_simple(nvp_target_state,state)->name);
2249 return ERROR_FAIL;
2250 }
2251 }
2252
2253 return ERROR_OK;
2254 }
2255
2256 COMMAND_HANDLER(handle_halt_command)
2257 {
2258 LOG_DEBUG("-");
2259
2260 struct target *target = get_current_target(CMD_CTX);
2261 int retval = target_halt(target);
2262 if (ERROR_OK != retval)
2263 return retval;
2264
2265 if (CMD_ARGC == 1)
2266 {
2267 unsigned wait_local;
2268 retval = parse_uint(CMD_ARGV[0], &wait_local);
2269 if (ERROR_OK != retval)
2270 return ERROR_COMMAND_SYNTAX_ERROR;
2271 if (!wait_local)
2272 return ERROR_OK;
2273 }
2274
2275 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
2276 }
2277
2278 COMMAND_HANDLER(handle_soft_reset_halt_command)
2279 {
2280 struct target *target = get_current_target(CMD_CTX);
2281
2282 LOG_USER("requesting target halt and executing a soft reset");
2283
2284 target->type->soft_reset_halt(target);
2285
2286 return ERROR_OK;
2287 }
2288
2289 COMMAND_HANDLER(handle_reset_command)
2290 {
2291 if (CMD_ARGC > 1)
2292 return ERROR_COMMAND_SYNTAX_ERROR;
2293
2294 enum target_reset_mode reset_mode = RESET_RUN;
2295 if (CMD_ARGC == 1)
2296 {
2297 const Jim_Nvp *n;
2298 n = Jim_Nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
2299 if ((n->name == NULL) || (n->value == RESET_UNKNOWN)) {
2300 return ERROR_COMMAND_SYNTAX_ERROR;
2301 }
2302 reset_mode = n->value;
2303 }
2304
2305 /* reset *all* targets */
2306 return target_process_reset(CMD_CTX, reset_mode);
2307 }
2308
2309
2310 COMMAND_HANDLER(handle_resume_command)
2311 {
2312 int current = 1;
2313 if (CMD_ARGC > 1)
2314 return ERROR_COMMAND_SYNTAX_ERROR;
2315
2316 struct target *target = get_current_target(CMD_CTX);
2317 target_handle_event(target, TARGET_EVENT_OLD_pre_resume);
2318
2319 /* with no CMD_ARGV, resume from current pc, addr = 0,
2320 * with one arguments, addr = CMD_ARGV[0],
2321 * handle breakpoints, not debugging */
2322 uint32_t addr = 0;
2323 if (CMD_ARGC == 1)
2324 {
2325 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2326 current = 0;
2327 }
2328
2329 return target_resume(target, current, addr, 1, 0);
2330 }
2331
2332 COMMAND_HANDLER(handle_step_command)
2333 {
2334 if (CMD_ARGC > 1)
2335 return ERROR_COMMAND_SYNTAX_ERROR;
2336
2337 LOG_DEBUG("-");
2338
2339 /* with no CMD_ARGV, step from current pc, addr = 0,
2340 * with one argument addr = CMD_ARGV[0],
2341 * handle breakpoints, debugging */
2342 uint32_t addr = 0;
2343 int current_pc = 1;
2344 if (CMD_ARGC == 1)
2345 {
2346 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2347 current_pc = 0;
2348 }
2349
2350 struct target *target = get_current_target(CMD_CTX);
2351
2352 return target->type->step(target, current_pc, addr, 1);
2353 }
2354
2355 static void handle_md_output(struct command_context *cmd_ctx,
2356 struct target *target, uint32_t address, unsigned size,
2357 unsigned count, const uint8_t *buffer)
2358 {
2359 const unsigned line_bytecnt = 32;
2360 unsigned line_modulo = line_bytecnt / size;
2361
2362 char output[line_bytecnt * 4 + 1];
2363 unsigned output_len = 0;
2364
2365 const char *value_fmt;
2366 switch (size) {
2367 case 4: value_fmt = "%8.8x "; break;
2368 case 2: value_fmt = "%4.4x "; break;
2369 case 1: value_fmt = "%2.2x "; break;
2370 default:
2371 /* "can't happen", caller checked */
2372 LOG_ERROR("invalid memory read size: %u", size);
2373 return;
2374 }
2375
2376 for (unsigned i = 0; i < count; i++)
2377 {
2378 if (i % line_modulo == 0)
2379 {
2380 output_len += snprintf(output + output_len,
2381 sizeof(output) - output_len,
2382 "0x%8.8x: ",
2383 (unsigned)(address + (i*size)));
2384 }
2385
2386 uint32_t value = 0;
2387 const uint8_t *value_ptr = buffer + i * size;
2388 switch (size) {
2389 case 4: value = target_buffer_get_u32(target, value_ptr); break;
2390 case 2: value = target_buffer_get_u16(target, value_ptr); break;
2391 case 1: value = *value_ptr;
2392 }
2393 output_len += snprintf(output + output_len,
2394 sizeof(output) - output_len,
2395 value_fmt, value);
2396
2397 if ((i % line_modulo == line_modulo - 1) || (i == count - 1))
2398 {
2399 command_print(cmd_ctx, "%s", output);
2400 output_len = 0;
2401 }
2402 }
2403 }
2404
2405 COMMAND_HANDLER(handle_md_command)
2406 {
2407 if (CMD_ARGC < 1)
2408 return ERROR_COMMAND_SYNTAX_ERROR;
2409
2410 unsigned size = 0;
2411 switch (CMD_NAME[2]) {
2412 case 'w': size = 4; break;
2413 case 'h': size = 2; break;
2414 case 'b': size = 1; break;
2415 default: return ERROR_COMMAND_SYNTAX_ERROR;
2416 }
2417
2418 bool physical=strcmp(CMD_ARGV[0], "phys")==0;
2419 int (*fn)(struct target *target,
2420 uint32_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
2421 if (physical)
2422 {
2423 CMD_ARGC--;
2424 CMD_ARGV++;
2425 fn=target_read_phys_memory;
2426 } else
2427 {
2428 fn=target_read_memory;
2429 }
2430 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
2431 {
2432 return ERROR_COMMAND_SYNTAX_ERROR;
2433 }
2434
2435 uint32_t address;
2436 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2437
2438 unsigned count = 1;
2439 if (CMD_ARGC == 2)
2440 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
2441
2442 uint8_t *buffer = calloc(count, size);
2443
2444 struct target *target = get_current_target(CMD_CTX);
2445 int retval = fn(target, address, size, count, buffer);
2446 if (ERROR_OK == retval)
2447 handle_md_output(CMD_CTX, target, address, size, count, buffer);
2448
2449 free(buffer);
2450
2451 return retval;
2452 }
2453
2454 typedef int (*target_write_fn)(struct target *target,
2455 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
2456
2457 static int target_write_memory_fast(struct target *target,
2458 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
2459 {
2460 return target_write_buffer(target, address, size * count, buffer);
2461 }
2462
2463 static int target_fill_mem(struct target *target,
2464 uint32_t address,
2465 target_write_fn fn,
2466 unsigned data_size,
2467 /* value */
2468 uint32_t b,
2469 /* count */
2470 unsigned c)
2471 {
2472 /* We have to write in reasonably large chunks to be able
2473 * to fill large memory areas with any sane speed */
2474 const unsigned chunk_size = 16384;
2475 uint8_t *target_buf = malloc(chunk_size * data_size);
2476 if (target_buf == NULL)
2477 {
2478 LOG_ERROR("Out of memory");
2479 return ERROR_FAIL;
2480 }
2481
2482 for (unsigned i = 0; i < chunk_size; i ++)
2483 {
2484 switch (data_size)
2485 {
2486 case 4:
2487 target_buffer_set_u32(target, target_buf + i*data_size, b);
2488 break;
2489 case 2:
2490 target_buffer_set_u16(target, target_buf + i*data_size, b);
2491 break;
2492 case 1:
2493 target_buffer_set_u8(target, target_buf + i*data_size, b);
2494 break;
2495 default:
2496 exit(-1);
2497 }
2498 }
2499
2500 int retval = ERROR_OK;
2501
2502 for (unsigned x = 0; x < c; x += chunk_size)
2503 {
2504 unsigned current;
2505 current = c - x;
2506 if (current > chunk_size)
2507 {
2508 current = chunk_size;
2509 }
2510 retval = fn(target, address + x * data_size, data_size, current, target_buf);
2511 if (retval != ERROR_OK)
2512 {
2513 break;
2514 }
2515 /* avoid GDB timeouts */
2516 keep_alive();
2517 }
2518 free(target_buf);
2519
2520 return retval;
2521 }
2522
2523
2524 COMMAND_HANDLER(handle_mw_command)
2525 {
2526 if (CMD_ARGC < 2)
2527 {
2528 return ERROR_COMMAND_SYNTAX_ERROR;
2529 }
2530 bool physical=strcmp(CMD_ARGV[0], "phys")==0;
2531 target_write_fn fn;
2532 if (physical)
2533 {
2534 CMD_ARGC--;
2535 CMD_ARGV++;
2536 fn=target_write_phys_memory;
2537 } else
2538 {
2539 fn = target_write_memory_fast;
2540 }
2541 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
2542 return ERROR_COMMAND_SYNTAX_ERROR;
2543
2544 uint32_t address;
2545 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2546
2547 uint32_t value;
2548 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
2549
2550 unsigned count = 1;
2551 if (CMD_ARGC == 3)
2552 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
2553
2554 struct target *target = get_current_target(CMD_CTX);
2555 unsigned wordsize;
2556 switch (CMD_NAME[2])
2557 {
2558 case 'w':
2559 wordsize = 4;
2560 break;
2561 case 'h':
2562 wordsize = 2;
2563 break;
2564 case 'b':
2565 wordsize = 1;
2566 break;
2567 default:
2568 return ERROR_COMMAND_SYNTAX_ERROR;
2569 }
2570
2571 return target_fill_mem(target, address, fn, wordsize, value, count);
2572 }
2573
2574 static COMMAND_HELPER(parse_load_image_command_CMD_ARGV, struct image *image,
2575 uint32_t *min_address, uint32_t *max_address)
2576 {
2577 if (CMD_ARGC < 1 || CMD_ARGC > 5)
2578 return ERROR_COMMAND_SYNTAX_ERROR;
2579
2580 /* a base address isn't always necessary,
2581 * default to 0x0 (i.e. don't relocate) */
2582 if (CMD_ARGC >= 2)
2583 {
2584 uint32_t addr;
2585 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], addr);
2586 image->base_address = addr;
2587 image->base_address_set = 1;
2588 }
2589 else
2590 image->base_address_set = 0;
2591
2592 image->start_address_set = 0;
2593
2594 if (CMD_ARGC >= 4)
2595 {
2596 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], *min_address);
2597 }
2598 if (CMD_ARGC == 5)
2599 {
2600 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], *max_address);
2601 // use size (given) to find max (required)
2602 *max_address += *min_address;
2603 }
2604
2605 if (*min_address > *max_address)
2606 return ERROR_COMMAND_SYNTAX_ERROR;
2607
2608 return ERROR_OK;
2609 }
2610
2611 COMMAND_HANDLER(handle_load_image_command)
2612 {
2613 uint8_t *buffer;
2614 size_t buf_cnt;
2615 uint32_t image_size;
2616 uint32_t min_address = 0;
2617 uint32_t max_address = 0xffffffff;
2618 int i;
2619 struct image image;
2620
2621 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
2622 &image, &min_address, &max_address);
2623 if (ERROR_OK != retval)
2624 return retval;
2625
2626 struct target *target = get_current_target(CMD_CTX);
2627
2628 struct duration bench;
2629 duration_start(&bench);
2630
2631 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
2632 {
2633 return ERROR_OK;
2634 }
2635
2636 image_size = 0x0;
2637 retval = ERROR_OK;
2638 for (i = 0; i < image.num_sections; i++)
2639 {
2640 buffer = malloc(image.sections[i].size);
2641 if (buffer == NULL)
2642 {
2643 command_print(CMD_CTX,
2644 "error allocating buffer for section (%d bytes)",
2645 (int)(image.sections[i].size));
2646 break;
2647 }
2648
2649 if ((retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt)) != ERROR_OK)
2650 {
2651 free(buffer);
2652 break;
2653 }
2654
2655 uint32_t offset = 0;
2656 uint32_t length = buf_cnt;
2657
2658 /* DANGER!!! beware of unsigned comparision here!!! */
2659
2660 if ((image.sections[i].base_address + buf_cnt >= min_address)&&
2661 (image.sections[i].base_address < max_address))
2662 {
2663 if (image.sections[i].base_address < min_address)
2664 {
2665 /* clip addresses below */
2666 offset += min_address-image.sections[i].base_address;
2667 length -= offset;
2668 }
2669
2670 if (image.sections[i].base_address + buf_cnt > max_address)
2671 {
2672 length -= (image.sections[i].base_address + buf_cnt)-max_address;
2673 }
2674
2675 if ((retval = target_write_buffer(target, image.sections[i].base_address + offset, length, buffer + offset)) != ERROR_OK)
2676 {
2677 free(buffer);
2678 break;
2679 }
2680 image_size += length;
2681 command_print(CMD_CTX, "%u bytes written at address 0x%8.8" PRIx32 "",
2682 (unsigned int)length,
2683 image.sections[i].base_address + offset);
2684 }
2685
2686 free(buffer);
2687 }
2688
2689 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK))
2690 {
2691 command_print(CMD_CTX, "downloaded %" PRIu32 " bytes "
2692 "in %fs (%0.3f KiB/s)", image_size,
2693 duration_elapsed(&bench), duration_kbps(&bench, image_size));
2694 }
2695
2696 image_close(&image);
2697
2698 return retval;
2699
2700 }
2701
2702 COMMAND_HANDLER(handle_dump_image_command)
2703 {
2704 struct fileio fileio;
2705 uint8_t buffer[560];
2706 int retval, retvaltemp;
2707 uint32_t address, size;
2708 struct duration bench;
2709 struct target *target = get_current_target(CMD_CTX);
2710
2711 if (CMD_ARGC != 3)
2712 return ERROR_COMMAND_SYNTAX_ERROR;
2713
2714 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], address);
2715 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], size);
2716
2717 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
2718 if (retval != ERROR_OK)
2719 return retval;
2720
2721 duration_start(&bench);
2722
2723 retval = ERROR_OK;
2724 while (size > 0)
2725 {
2726 size_t size_written;
2727 uint32_t this_run_size = (size > 560) ? 560 : size;
2728 retval = target_read_buffer(target, address, this_run_size, buffer);
2729 if (retval != ERROR_OK)
2730 {
2731 break;
2732 }
2733
2734 retval = fileio_write(&fileio, this_run_size, buffer, &size_written);
2735 if (retval != ERROR_OK)
2736 {
2737 break;
2738 }
2739
2740 size -= this_run_size;
2741 address += this_run_size;
2742 }
2743
2744 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK))
2745 {
2746 int filesize;
2747 retval = fileio_size(&fileio, &filesize);
2748 if (retval != ERROR_OK)
2749 return retval;
2750 command_print(CMD_CTX,
2751 "dumped %ld bytes in %fs (%0.3f KiB/s)", (long)filesize,
2752 duration_elapsed(&bench), duration_kbps(&bench, filesize));
2753 }
2754
2755 if ((retvaltemp = fileio_close(&fileio)) != ERROR_OK)
2756 return retvaltemp;
2757
2758 return retval;
2759 }
2760
2761 static COMMAND_HELPER(handle_verify_image_command_internal, int verify)
2762 {
2763 uint8_t *buffer;
2764 size_t buf_cnt;
2765 uint32_t image_size;
2766 int i;
2767 int retval;
2768 uint32_t checksum = 0;
2769 uint32_t mem_checksum = 0;
2770
2771 struct image image;
2772
2773 struct target *target = get_current_target(CMD_CTX);
2774
2775 if (CMD_ARGC < 1)
2776 {
2777 return ERROR_COMMAND_SYNTAX_ERROR;
2778 }
2779
2780 if (!target)
2781 {
2782 LOG_ERROR("no target selected");
2783 return ERROR_FAIL;
2784 }
2785
2786 struct duration bench;
2787 duration_start(&bench);
2788
2789 if (CMD_ARGC >= 2)
2790 {
2791 uint32_t addr;
2792 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], addr);
2793 image.base_address = addr;
2794 image.base_address_set = 1;
2795 }
2796 else
2797 {
2798 image.base_address_set = 0;
2799 image.base_address = 0x0;
2800 }
2801
2802 image.start_address_set = 0;
2803
2804 if ((retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL)) != ERROR_OK)
2805 {
2806 return retval;
2807 }
2808
2809 image_size = 0x0;
2810 int diffs = 0;
2811 retval = ERROR_OK;
2812 for (i = 0; i < image.num_sections; i++)
2813 {
2814 buffer = malloc(image.sections[i].size);
2815 if (buffer == NULL)
2816 {
2817 command_print(CMD_CTX,
2818 "error allocating buffer for section (%d bytes)",
2819 (int)(image.sections[i].size));
2820 break;
2821 }
2822 if ((retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt)) != ERROR_OK)
2823 {
2824 free(buffer);
2825 break;
2826 }
2827
2828 if (verify)
2829 {
2830 /* calculate checksum of image */
2831 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
2832 if (retval != ERROR_OK)
2833 {
2834 free(buffer);
2835 break;
2836 }
2837
2838 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
2839 if (retval != ERROR_OK)
2840 {
2841 free(buffer);
2842 break;
2843 }
2844
2845 if (checksum != mem_checksum)
2846 {
2847 /* failed crc checksum, fall back to a binary compare */
2848 uint8_t *data;
2849
2850 if (diffs == 0)
2851 {
2852 LOG_ERROR("checksum mismatch - attempting binary compare");
2853 }
2854
2855 data = (uint8_t*)malloc(buf_cnt);
2856
2857 /* Can we use 32bit word accesses? */
2858 int size = 1;
2859 int count = buf_cnt;
2860 if ((count % 4) == 0)
2861 {
2862 size *= 4;
2863 count /= 4;
2864 }
2865 retval = target_read_memory(target, image.sections[i].base_address, size, count, data);
2866 if (retval == ERROR_OK)
2867 {
2868 uint32_t t;
2869 for (t = 0; t < buf_cnt; t++)
2870 {
2871 if (data[t] != buffer[t])
2872 {
2873 command_print(CMD_CTX,
2874 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
2875 diffs,
2876 (unsigned)(t + image.sections[i].base_address),
2877 data[t],
2878 buffer[t]);
2879 if (diffs++ >= 127)
2880 {
2881 command_print(CMD_CTX, "More than 128 errors, the rest are not printed.");
2882 free(data);
2883 free(buffer);
2884 goto done;
2885 }
2886 }
2887 keep_alive();
2888 }
2889 }
2890 free(data);
2891 }
2892 } else
2893 {
2894 command_print(CMD_CTX, "address 0x%08" PRIx32 " length 0x%08zx",
2895 image.sections[i].base_address,
2896 buf_cnt);
2897 }
2898
2899 free(buffer);
2900 image_size += buf_cnt;
2901 }
2902 if (diffs > 0)
2903 {
2904 command_print(CMD_CTX, "No more differences found.");
2905 }
2906 done:
2907 if (diffs > 0)
2908 {
2909 retval = ERROR_FAIL;
2910 }
2911 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK))
2912 {
2913 command_print(CMD_CTX, "verified %" PRIu32 " bytes "
2914 "in %fs (%0.3f KiB/s)", image_size,
2915 duration_elapsed(&bench), duration_kbps(&bench, image_size));
2916 }
2917
2918 image_close(&image);
2919
2920 return retval;
2921 }
2922
2923 COMMAND_HANDLER(handle_verify_image_command)
2924 {
2925 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, 1);
2926 }
2927
2928 COMMAND_HANDLER(handle_test_image_command)
2929 {
2930 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, 0);
2931 }
2932
2933 static int handle_bp_command_list(struct command_context *cmd_ctx)
2934 {
2935 struct target *target = get_current_target(cmd_ctx);
2936 struct breakpoint *breakpoint = target->breakpoints;
2937 while (breakpoint)
2938 {
2939 if (breakpoint->type == BKPT_SOFT)
2940 {
2941 char* buf = buf_to_str(breakpoint->orig_instr,
2942 breakpoint->length, 16);
2943 command_print(cmd_ctx, "IVA breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i, 0x%s",
2944 breakpoint->address,
2945 breakpoint->length,
2946 breakpoint->set, buf);
2947 free(buf);
2948 }
2949 else
2950 {
2951 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
2952 command_print(cmd_ctx, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
2953 breakpoint->asid,
2954 breakpoint->length, breakpoint->set);
2955 else if ((breakpoint->address != 0) && (breakpoint->asid != 0))
2956 {
2957 command_print(cmd_ctx, "Hybrid breakpoint(IVA): 0x%8.8" PRIx32 ", 0x%x, %i",
2958 breakpoint->address,
2959 breakpoint->length, breakpoint->set);
2960 command_print(cmd_ctx, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
2961 breakpoint->asid);
2962 }
2963 else
2964 command_print(cmd_ctx, "Breakpoint(IVA): 0x%8.8" PRIx32 ", 0x%x, %i",
2965 breakpoint->address,
2966 breakpoint->length, breakpoint->set);
2967 }
2968
2969 breakpoint = breakpoint->next;
2970 }
2971 return ERROR_OK;
2972 }
2973
2974 static int handle_bp_command_set(struct command_context *cmd_ctx,
2975 uint32_t addr, uint32_t asid, uint32_t length, int hw)
2976 {
2977 struct target *target = get_current_target(cmd_ctx);
2978
2979 if (asid == 0)
2980 { int retval = breakpoint_add(target, addr, length, hw);
2981 if (ERROR_OK == retval)
2982 command_print(cmd_ctx, "breakpoint set at 0x%8.8" PRIx32 "", addr);
2983 else
2984 {
2985 LOG_ERROR("Failure setting breakpoint, the same address(IVA) is already used");
2986 return retval;
2987 }
2988 }
2989 else if (addr == 0)
2990 {
2991 int retval = context_breakpoint_add(target, asid, length, hw);
2992 if (ERROR_OK == retval)
2993 command_print(cmd_ctx, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
2994 else
2995 {
2996 LOG_ERROR("Failure setting breakpoint, the same address(CONTEXTID) is already used");
2997 return retval;
2998 }
2999 }
3000 else
3001 {
3002 int retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
3003 if(ERROR_OK == retval)
3004 command_print(cmd_ctx, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
3005 else
3006 {
3007 LOG_ERROR("Failure setting breakpoint, the same address is already used");
3008 return retval;
3009 }
3010 }
3011 return ERROR_OK;
3012
3013
3014 }
3015
3016 COMMAND_HANDLER(handle_bp_command)
3017 {
3018 uint32_t addr;
3019 uint32_t asid;
3020 uint32_t length;
3021 int hw = BKPT_SOFT;
3022 switch(CMD_ARGC)
3023 {
3024 case 0:
3025 return handle_bp_command_list(CMD_CTX);
3026 case 3:
3027
3028 if(strcmp(CMD_ARGV[2], "hw") == 0)
3029 {
3030 hw = BKPT_HARD;
3031 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3032
3033 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3034
3035 asid = 0;
3036 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3037 }
3038 else if(strcmp(CMD_ARGV[2], "hw_ctx") == 0)
3039 {
3040 hw = BKPT_HARD;
3041 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
3042 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3043 addr = 0;
3044 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3045 }
3046
3047 case 4:
3048 hw = BKPT_HARD;
3049 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3050 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
3051 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
3052 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3053 default:
3054 command_print(CMD_CTX, "usage: bp <address> [<asid>]<length> ['hw'|'hw_ctx']");
3055 return ERROR_COMMAND_SYNTAX_ERROR;
3056 }
3057
3058
3059 }
3060
3061 COMMAND_HANDLER(handle_rbp_command)
3062 {
3063 if (CMD_ARGC != 1)
3064 return ERROR_COMMAND_SYNTAX_ERROR;
3065
3066 uint32_t addr;
3067 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3068
3069 struct target *target = get_current_target(CMD_CTX);
3070 breakpoint_remove(target, addr);
3071
3072 return ERROR_OK;
3073 }
3074
3075 COMMAND_HANDLER(handle_wp_command)
3076 {
3077 struct target *target = get_current_target(CMD_CTX);
3078
3079 if (CMD_ARGC == 0)
3080 {
3081 struct watchpoint *watchpoint = target->watchpoints;
3082
3083 while (watchpoint)
3084 {
3085 command_print(CMD_CTX, "address: 0x%8.8" PRIx32
3086 ", len: 0x%8.8" PRIx32
3087 ", r/w/a: %i, value: 0x%8.8" PRIx32
3088 ", mask: 0x%8.8" PRIx32,
3089 watchpoint->address,
3090 watchpoint->length,
3091 (int)watchpoint->rw,
3092 watchpoint->value,
3093 watchpoint->mask);
3094 watchpoint = watchpoint->next;
3095 }
3096 return ERROR_OK;
3097 }
3098
3099 enum watchpoint_rw type = WPT_ACCESS;
3100 uint32_t addr = 0;
3101 uint32_t length = 0;
3102 uint32_t data_value = 0x0;
3103 uint32_t data_mask = 0xffffffff;
3104
3105 switch (CMD_ARGC)
3106 {
3107 case 5:
3108 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
3109 // fall through
3110 case 4:
3111 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
3112 // fall through
3113 case 3:
3114 switch (CMD_ARGV[2][0])
3115 {
3116 case 'r':
3117 type = WPT_READ;
3118 break;
3119 case 'w':
3120 type = WPT_WRITE;
3121 break;
3122 case 'a':
3123 type = WPT_ACCESS;
3124 break;
3125 default:
3126 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
3127 return ERROR_COMMAND_SYNTAX_ERROR;
3128 }
3129 // fall through
3130 case 2:
3131 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3132 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3133 break;
3134
3135 default:
3136 command_print(CMD_CTX, "usage: wp [address length "
3137 "[(r|w|a) [value [mask]]]]");
3138 return ERROR_COMMAND_SYNTAX_ERROR;
3139 }
3140
3141 int retval = watchpoint_add(target, addr, length, type,
3142 data_value, data_mask);
3143 if (ERROR_OK != retval)
3144 LOG_ERROR("Failure setting watchpoints");
3145
3146 return retval;
3147 }
3148
3149 COMMAND_HANDLER(handle_rwp_command)
3150 {
3151 if (CMD_ARGC != 1)
3152 return ERROR_COMMAND_SYNTAX_ERROR;
3153
3154 uint32_t addr;
3155 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3156
3157 struct target *target = get_current_target(CMD_CTX);
3158 watchpoint_remove(target, addr);
3159
3160 return ERROR_OK;
3161 }
3162
3163
3164 /**
3165 * Translate a virtual address to a physical address.
3166 *
3167 * The low-level target implementation must have logged a detailed error
3168 * which is forwarded to telnet/GDB session.
3169 */
3170 COMMAND_HANDLER(handle_virt2phys_command)
3171 {
3172 if (CMD_ARGC != 1)
3173 return ERROR_COMMAND_SYNTAX_ERROR;
3174
3175 uint32_t va;
3176 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], va);
3177 uint32_t pa;
3178
3179 struct target *target = get_current_target(CMD_CTX);
3180 int retval = target->type->virt2phys(target, va, &pa);
3181 if (retval == ERROR_OK)
3182 command_print(CMD_CTX, "Physical address 0x%08" PRIx32 "", pa);
3183
3184 return retval;
3185 }
3186
3187 static void writeData(FILE *f, const void *data, size_t len)
3188 {
3189 size_t written = fwrite(data, 1, len, f);
3190 if (written != len)
3191 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
3192 }
3193
3194 static void writeLong(FILE *f, int l)
3195 {
3196 int i;
3197 for (i = 0; i < 4; i++)
3198 {
3199 char c = (l >> (i*8))&0xff;
3200 writeData(f, &c, 1);
3201 }
3202
3203 }
3204
3205 static void writeString(FILE *f, char *s)
3206 {
3207 writeData(f, s, strlen(s));
3208 }
3209
3210 /* Dump a gmon.out histogram file. */
3211 static void writeGmon(uint32_t *samples, uint32_t sampleNum, const char *filename)
3212 {
3213 uint32_t i;
3214 FILE *f = fopen(filename, "w");
3215 if (f == NULL)
3216 return;
3217 writeString(f, "gmon");
3218 writeLong(f, 0x00000001); /* Version */
3219 writeLong(f, 0); /* padding */
3220 writeLong(f, 0); /* padding */
3221 writeLong(f, 0); /* padding */
3222
3223 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
3224 writeData(f, &zero, 1);
3225
3226 /* figure out bucket size */
3227 uint32_t min = samples[0];
3228 uint32_t max = samples[0];
3229 for (i = 0; i < sampleNum; i++)
3230 {
3231 if (min > samples[i])
3232 {
3233 min = samples[i];
3234 }
3235 if (max < samples[i])
3236 {
3237 max = samples[i];
3238 }
3239 }
3240
3241 int addressSpace = (max-min + 1);
3242
3243 static const uint32_t maxBuckets = 16 * 1024; /* maximum buckets. */
3244 uint32_t length = addressSpace;
3245 if (length > maxBuckets)
3246 {
3247 length = maxBuckets;
3248 }
3249 int *buckets = malloc(sizeof(int)*length);
3250 if (buckets == NULL)
3251 {
3252 fclose(f);
3253 return;
3254 }
3255 memset(buckets, 0, sizeof(int)*length);
3256 for (i = 0; i < sampleNum;i++)
3257 {
3258 uint32_t address = samples[i];
3259 long long a = address-min;
3260 long long b = length-1;
3261 long long c = addressSpace-1;
3262 int index_t = (a*b)/c; /* danger!!!! int32 overflows */
3263 buckets[index_t]++;
3264 }
3265
3266 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
3267 writeLong(f, min); /* low_pc */
3268 writeLong(f, max); /* high_pc */
3269 writeLong(f, length); /* # of samples */
3270 writeLong(f, 100); /* KLUDGE! We lie, ca. 100Hz best case. */
3271 writeString(f, "seconds");
3272 for (i = 0; i < (15-strlen("seconds")); i++)
3273 writeData(f, &zero, 1);
3274 writeString(f, "s");
3275
3276 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
3277
3278 char *data = malloc(2*length);
3279 if (data != NULL)
3280 {
3281 for (i = 0; i < length;i++)
3282 {
3283 int val;
3284 val = buckets[i];
3285 if (val > 65535)
3286 {
3287 val = 65535;
3288 }
3289 data[i*2]=val&0xff;
3290 data[i*2 + 1]=(val >> 8)&0xff;
3291 }
3292 free(buckets);
3293 writeData(f, data, length * 2);
3294 free(data);
3295 } else
3296 {
3297 free(buckets);
3298 }
3299
3300 fclose(f);
3301 }
3302
3303 /* profiling samples the CPU PC as quickly as OpenOCD is able,
3304 * which will be used as a random sampling of PC */
3305 COMMAND_HANDLER(handle_profile_command)
3306 {
3307 struct target *target = get_current_target(CMD_CTX);
3308 struct timeval timeout, now;
3309
3310 gettimeofday(&timeout, NULL);
3311 if (CMD_ARGC != 2)
3312 {
3313 return ERROR_COMMAND_SYNTAX_ERROR;
3314 }
3315 unsigned offset;
3316 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], offset);
3317
3318 timeval_add_time(&timeout, offset, 0);
3319
3320 /**
3321 * @todo: Some cores let us sample the PC without the
3322 * annoying halt/resume step; for example, ARMv7 PCSR.
3323 * Provide a way to use that more efficient mechanism.
3324 */
3325
3326 command_print(CMD_CTX, "Starting profiling. Halting and resuming the target as often as we can...");
3327
3328 static const int maxSample = 10000;
3329 uint32_t *samples = malloc(sizeof(uint32_t)*maxSample);
3330 if (samples == NULL)
3331 return ERROR_OK;
3332
3333 int numSamples = 0;
3334 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
3335 struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
3336
3337 for (;;)
3338 {
3339 int retval;
3340 target_poll(target);
3341 if (target->state == TARGET_HALTED)
3342 {
3343 uint32_t t=*((uint32_t *)reg->value);
3344 samples[numSamples++]=t;
3345 retval = target_resume(target, 1, 0, 0, 0); /* current pc, addr = 0, do not handle breakpoints, not debugging */
3346 target_poll(target);
3347 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
3348 } else if (target->state == TARGET_RUNNING)
3349 {
3350 /* We want to quickly sample the PC. */
3351 if ((retval = target_halt(target)) != ERROR_OK)
3352 {
3353 free(samples);
3354 return retval;
3355 }
3356 } else
3357 {
3358 command_print(CMD_CTX, "Target not halted or running");
3359 retval = ERROR_OK;
3360 break;
3361 }
3362 if (retval != ERROR_OK)
3363 {
3364 break;
3365 }
3366
3367 gettimeofday(&now, NULL);
3368 if ((numSamples >= maxSample) || ((now.tv_sec >= timeout.tv_sec) && (now.tv_usec >= timeout.tv_usec)))
3369 {
3370 command_print(CMD_CTX, "Profiling completed. %d samples.", numSamples);
3371 if ((retval = target_poll(target)) != ERROR_OK)
3372 {
3373 free(samples);
3374 return retval;
3375 }
3376 if (target->state == TARGET_HALTED)
3377 {
3378 target_resume(target, 1, 0, 0, 0); /* current pc, addr = 0, do not handle breakpoints, not debugging */
3379 }
3380 if ((retval = target_poll(target)) != ERROR_OK)
3381 {
3382 free(samples);
3383 return retval;
3384 }
3385 writeGmon(samples, numSamples, CMD_ARGV[1]);
3386 command_print(CMD_CTX, "Wrote %s", CMD_ARGV[1]);
3387 break;
3388 }
3389 }
3390 free(samples);
3391
3392 return ERROR_OK;
3393 }
3394
3395 static int new_int_array_element(Jim_Interp * interp, const char *varname, int idx, uint32_t val)
3396 {
3397 char *namebuf;
3398 Jim_Obj *nameObjPtr, *valObjPtr;
3399 int result;
3400
3401 namebuf = alloc_printf("%s(%d)", varname, idx);
3402 if (!namebuf)
3403 return JIM_ERR;
3404
3405 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
3406 valObjPtr = Jim_NewIntObj(interp, val);
3407 if (!nameObjPtr || !valObjPtr)
3408 {
3409 free(namebuf);
3410 return JIM_ERR;
3411 }
3412
3413 Jim_IncrRefCount(nameObjPtr);
3414 Jim_IncrRefCount(valObjPtr);
3415 result = Jim_SetVariable(interp, nameObjPtr, valObjPtr);
3416 Jim_DecrRefCount(interp, nameObjPtr);
3417 Jim_DecrRefCount(interp, valObjPtr);
3418 free(namebuf);
3419 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
3420 return result;
3421 }
3422
3423 static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
3424 {
3425 struct command_context *context;
3426 struct target *target;
3427
3428 context = current_command_context(interp);
3429 assert (context != NULL);
3430
3431 target = get_current_target(context);
3432 if (target == NULL)
3433 {
3434 LOG_ERROR("mem2array: no current target");
3435 return JIM_ERR;
3436 }
3437
3438 return target_mem2array(interp, target, argc-1, argv + 1);
3439 }
3440
3441 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
3442 {
3443 long l;
3444 uint32_t width;
3445 int len;
3446 uint32_t addr;
3447 uint32_t count;
3448 uint32_t v;
3449 const char *varname;
3450 int n, e, retval;
3451 uint32_t i;
3452
3453 /* argv[1] = name of array to receive the data
3454 * argv[2] = desired width
3455 * argv[3] = memory address
3456 * argv[4] = count of times to read
3457 */
3458 if (argc != 4) {
3459 Jim_WrongNumArgs(interp, 1, argv, "varname width addr nelems");
3460 return JIM_ERR;
3461 }
3462 varname = Jim_GetString(argv[0], &len);
3463 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
3464
3465 e = Jim_GetLong(interp, argv[1], &l);
3466 width = l;
3467 if (e != JIM_OK) {
3468 return e;
3469 }
3470
3471 e = Jim_GetLong(interp, argv[2], &l);
3472 addr = l;
3473 if (e != JIM_OK) {
3474 return e;
3475 }
3476 e = Jim_GetLong(interp, argv[3], &l);
3477 len = l;
3478 if (e != JIM_OK) {
3479 return e;
3480 }
3481 switch (width) {
3482 case 8:
3483 width = 1;
3484 break;
3485 case 16:
3486 width = 2;
3487 break;
3488 case 32:
3489 width = 4;
3490 break;
3491 default:
3492 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3493 Jim_AppendStrings(interp, Jim_GetResult(interp), "Invalid width param, must be 8/16/32", NULL);
3494 return JIM_ERR;
3495 }
3496 if (len == 0) {
3497 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3498 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
3499 return JIM_ERR;
3500 }
3501 if ((addr + (len * width)) < addr) {
3502 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3503 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
3504 return JIM_ERR;
3505 }
3506 /* absurd transfer size? */
3507 if (len > 65536) {
3508 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3509 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: absurd > 64K item request", NULL);
3510 return JIM_ERR;
3511 }
3512
3513 if ((width == 1) ||
3514 ((width == 2) && ((addr & 1) == 0)) ||
3515 ((width == 4) && ((addr & 3) == 0))) {
3516 /* all is well */
3517 } else {
3518 char buf[100];
3519 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3520 sprintf(buf, "mem2array address: 0x%08" PRIx32 " is not aligned for %" PRId32 " byte reads",
3521 addr,
3522 width);
3523 Jim_AppendStrings(interp, Jim_GetResult(interp), buf , NULL);
3524 return JIM_ERR;
3525 }
3526
3527 /* Transfer loop */
3528
3529 /* index counter */
3530 n = 0;
3531
3532 size_t buffersize = 4096;
3533 uint8_t *buffer = malloc(buffersize);
3534 if (buffer == NULL)
3535 return JIM_ERR;
3536
3537 /* assume ok */
3538 e = JIM_OK;
3539 while (len) {
3540 /* Slurp... in buffer size chunks */
3541
3542 count = len; /* in objects.. */
3543 if (count > (buffersize/width)) {
3544 count = (buffersize/width);
3545 }
3546
3547 retval = target_read_memory(target, addr, width, count, buffer);
3548 if (retval != ERROR_OK) {
3549 /* BOO !*/
3550 LOG_ERROR("mem2array: Read @ 0x%08x, w=%d, cnt=%d, failed",
3551 (unsigned int)addr,
3552 (int)width,
3553 (int)count);
3554 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3555 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
3556 e = JIM_ERR;
3557 len = 0;
3558 } else {
3559 v = 0; /* shut up gcc */
3560 for (i = 0 ;i < count ;i++, n++) {
3561 switch (width) {
3562 case 4:
3563 v = target_buffer_get_u32(target, &buffer[i*width]);
3564 break;
3565 case 2:
3566 v = target_buffer_get_u16(target, &buffer[i*width]);
3567 break;
3568 case 1:
3569 v = buffer[i] & 0x0ff;
3570 break;
3571 }
3572 new_int_array_element(interp, varname, n, v);
3573 }
3574 len -= count;
3575 }
3576 }
3577
3578 free(buffer);
3579
3580 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3581
3582 return JIM_OK;
3583 }
3584
3585 static int get_int_array_element(Jim_Interp * interp, const char *varname, int idx, uint32_t *val)
3586 {
3587 char *namebuf;
3588 Jim_Obj *nameObjPtr, *valObjPtr;
3589 int result;
3590 long l;
3591
3592 namebuf = alloc_printf("%s(%d)", varname, idx);
3593 if (!namebuf)
3594 return JIM_ERR;
3595
3596 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
3597 if (!nameObjPtr)
3598 {
3599 free(namebuf);
3600 return JIM_ERR;
3601 }
3602
3603 Jim_IncrRefCount(nameObjPtr);
3604 valObjPtr = Jim_GetVariable(interp, nameObjPtr, JIM_ERRMSG);
3605 Jim_DecrRefCount(interp, nameObjPtr);
3606 free(namebuf);
3607 if (valObjPtr == NULL)
3608 return JIM_ERR;
3609
3610 result = Jim_GetLong(interp, valObjPtr, &l);
3611 /* printf("%s(%d) => 0%08x\n", varname, idx, val); */
3612 *val = l;
3613 return result;
3614 }
3615
3616 static int jim_array2mem(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
3617 {
3618 struct command_context *context;
3619 struct target *target;
3620
3621 context = current_command_context(interp);
3622 assert (context != NULL);
3623
3624 target = get_current_target(context);
3625 if (target == NULL) {
3626 LOG_ERROR("array2mem: no current target");
3627 return JIM_ERR;
3628 }
3629
3630 return target_array2mem(interp,target, argc-1, argv + 1);
3631 }
3632
3633 static int target_array2mem(Jim_Interp *interp, struct target *target,
3634 int argc, Jim_Obj *const *argv)
3635 {
3636 long l;
3637 uint32_t width;
3638 int len;
3639 uint32_t addr;
3640 uint32_t count;
3641 uint32_t v;
3642 const char *varname;
3643 int n, e, retval;
3644 uint32_t i;
3645
3646 /* argv[1] = name of array to get the data
3647 * argv[2] = desired width
3648 * argv[3] = memory address
3649 * argv[4] = count to write
3650 */
3651 if (argc != 4) {
3652 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems");
3653 return JIM_ERR;
3654 }
3655 varname = Jim_GetString(argv[0], &len);
3656 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
3657
3658 e = Jim_GetLong(interp, argv[1], &l);
3659 width = l;
3660 if (e != JIM_OK) {
3661 return e;
3662 }
3663
3664 e = Jim_GetLong(interp, argv[2], &l);
3665 addr = l;
3666 if (e != JIM_OK) {
3667 return e;
3668 }
3669 e = Jim_GetLong(interp, argv[3], &l);
3670 len = l;
3671 if (e != JIM_OK) {
3672 return e;
3673 }
3674 switch (width) {
3675 case 8:
3676 width = 1;
3677 break;
3678 case 16:
3679 width = 2;
3680 break;
3681 case 32:
3682 width = 4;
3683 break;
3684 default:
3685 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3686 Jim_AppendStrings(interp, Jim_GetResult(interp), "Invalid width param, must be 8/16/32", NULL);
3687 return JIM_ERR;
3688 }
3689 if (len == 0) {
3690 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3691 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: zero width read?", NULL);
3692 return JIM_ERR;
3693 }
3694 if ((addr + (len * width)) < addr) {
3695 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3696 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: addr + len - wraps to zero?", NULL);
3697 return JIM_ERR;
3698 }
3699 /* absurd transfer size? */
3700 if (len > 65536) {
3701 Jim_SetResult