1778a5aa3d31c30e5b18a945cc2f3c02d5413459
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 √ėyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program; if not, write to the *
38 * Free Software Foundation, Inc., *
39 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
40 ***************************************************************************/
41
42 #ifdef HAVE_CONFIG_H
43 #include "config.h"
44 #endif
45
46 #include <helper/time_support.h>
47 #include <jtag/jtag.h>
48 #include <flash/nor/core.h>
49
50 #include "target.h"
51 #include "target_type.h"
52 #include "target_request.h"
53 #include "breakpoints.h"
54 #include "register.h"
55 #include "trace.h"
56 #include "image.h"
57 #include "rtos/rtos.h"
58
59 static int target_read_buffer_default(struct target *target, uint32_t address,
60 uint32_t size, uint8_t *buffer);
61 static int target_write_buffer_default(struct target *target, uint32_t address,
62 uint32_t size, const uint8_t *buffer);
63 static int target_array2mem(Jim_Interp *interp, struct target *target,
64 int argc, Jim_Obj * const *argv);
65 static int target_mem2array(Jim_Interp *interp, struct target *target,
66 int argc, Jim_Obj * const *argv);
67 static int target_register_user_commands(struct command_context *cmd_ctx);
68
69 /* targets */
70 extern struct target_type arm7tdmi_target;
71 extern struct target_type arm720t_target;
72 extern struct target_type arm9tdmi_target;
73 extern struct target_type arm920t_target;
74 extern struct target_type arm966e_target;
75 extern struct target_type arm946e_target;
76 extern struct target_type arm926ejs_target;
77 extern struct target_type fa526_target;
78 extern struct target_type feroceon_target;
79 extern struct target_type dragonite_target;
80 extern struct target_type xscale_target;
81 extern struct target_type cortexm3_target;
82 extern struct target_type cortexa8_target;
83 extern struct target_type arm11_target;
84 extern struct target_type mips_m4k_target;
85 extern struct target_type avr_target;
86 extern struct target_type dsp563xx_target;
87 extern struct target_type dsp5680xx_target;
88 extern struct target_type testee_target;
89 extern struct target_type avr32_ap7k_target;
90 extern struct target_type stm32_stlink_target;
91
92 static struct target_type *target_types[] = {
93 &arm7tdmi_target,
94 &arm9tdmi_target,
95 &arm920t_target,
96 &arm720t_target,
97 &arm966e_target,
98 &arm946e_target,
99 &arm926ejs_target,
100 &fa526_target,
101 &feroceon_target,
102 &dragonite_target,
103 &xscale_target,
104 &cortexm3_target,
105 &cortexa8_target,
106 &arm11_target,
107 &mips_m4k_target,
108 &avr_target,
109 &dsp563xx_target,
110 &dsp5680xx_target,
111 &testee_target,
112 &avr32_ap7k_target,
113 &stm32_stlink_target,
114 NULL,
115 };
116
117 struct target *all_targets;
118 static struct target_event_callback *target_event_callbacks;
119 static struct target_timer_callback *target_timer_callbacks;
120 static const int polling_interval = 100;
121
122 static const Jim_Nvp nvp_assert[] = {
123 { .name = "assert", NVP_ASSERT },
124 { .name = "deassert", NVP_DEASSERT },
125 { .name = "T", NVP_ASSERT },
126 { .name = "F", NVP_DEASSERT },
127 { .name = "t", NVP_ASSERT },
128 { .name = "f", NVP_DEASSERT },
129 { .name = NULL, .value = -1 }
130 };
131
132 static const Jim_Nvp nvp_error_target[] = {
133 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
134 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
135 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
136 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
137 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
138 { .value = ERROR_TARGET_UNALIGNED_ACCESS , .name = "err-unaligned-access" },
139 { .value = ERROR_TARGET_DATA_ABORT , .name = "err-data-abort" },
140 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE , .name = "err-resource-not-available" },
141 { .value = ERROR_TARGET_TRANSLATION_FAULT , .name = "err-translation-fault" },
142 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
143 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
144 { .value = -1, .name = NULL }
145 };
146
147 static const char *target_strerror_safe(int err)
148 {
149 const Jim_Nvp *n;
150
151 n = Jim_Nvp_value2name_simple(nvp_error_target, err);
152 if (n->name == NULL)
153 return "unknown";
154 else
155 return n->name;
156 }
157
158 static const Jim_Nvp nvp_target_event[] = {
159 { .value = TARGET_EVENT_OLD_gdb_program_config , .name = "old-gdb_program_config" },
160 { .value = TARGET_EVENT_OLD_pre_resume , .name = "old-pre_resume" },
161
162 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
163 { .value = TARGET_EVENT_HALTED, .name = "halted" },
164 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
165 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
166 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
167
168 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
169 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
170
171 /* historical name */
172
173 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
174
175 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
176 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
177 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
178 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
179 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
180 { .value = TARGET_EVENT_RESET_HALT_PRE, .name = "reset-halt-pre" },
181 { .value = TARGET_EVENT_RESET_HALT_POST, .name = "reset-halt-post" },
182 { .value = TARGET_EVENT_RESET_WAIT_PRE, .name = "reset-wait-pre" },
183 { .value = TARGET_EVENT_RESET_WAIT_POST, .name = "reset-wait-post" },
184 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
185 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
186
187 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
188 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
189
190 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
191 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
192
193 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
194 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
195
196 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
197 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END , .name = "gdb-flash-write-end" },
198
199 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
200 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END , .name = "gdb-flash-erase-end" },
201
202 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
203 { .value = TARGET_EVENT_RESUMED , .name = "resume-ok" },
204 { .value = TARGET_EVENT_RESUME_END , .name = "resume-end" },
205
206 { .name = NULL, .value = -1 }
207 };
208
209 static const Jim_Nvp nvp_target_state[] = {
210 { .name = "unknown", .value = TARGET_UNKNOWN },
211 { .name = "running", .value = TARGET_RUNNING },
212 { .name = "halted", .value = TARGET_HALTED },
213 { .name = "reset", .value = TARGET_RESET },
214 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
215 { .name = NULL, .value = -1 },
216 };
217
218 static const Jim_Nvp nvp_target_debug_reason[] = {
219 { .name = "debug-request" , .value = DBG_REASON_DBGRQ },
220 { .name = "breakpoint" , .value = DBG_REASON_BREAKPOINT },
221 { .name = "watchpoint" , .value = DBG_REASON_WATCHPOINT },
222 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
223 { .name = "single-step" , .value = DBG_REASON_SINGLESTEP },
224 { .name = "target-not-halted" , .value = DBG_REASON_NOTHALTED },
225 { .name = "undefined" , .value = DBG_REASON_UNDEFINED },
226 { .name = NULL, .value = -1 },
227 };
228
229 static const Jim_Nvp nvp_target_endian[] = {
230 { .name = "big", .value = TARGET_BIG_ENDIAN },
231 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
232 { .name = "be", .value = TARGET_BIG_ENDIAN },
233 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
234 { .name = NULL, .value = -1 },
235 };
236
237 static const Jim_Nvp nvp_reset_modes[] = {
238 { .name = "unknown", .value = RESET_UNKNOWN },
239 { .name = "run" , .value = RESET_RUN },
240 { .name = "halt" , .value = RESET_HALT },
241 { .name = "init" , .value = RESET_INIT },
242 { .name = NULL , .value = -1 },
243 };
244
245 const char *debug_reason_name(struct target *t)
246 {
247 const char *cp;
248
249 cp = Jim_Nvp_value2name_simple(nvp_target_debug_reason,
250 t->debug_reason)->name;
251 if (!cp) {
252 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
253 cp = "(*BUG*unknown*BUG*)";
254 }
255 return cp;
256 }
257
258 const char *target_state_name(struct target *t)
259 {
260 const char *cp;
261 cp = Jim_Nvp_value2name_simple(nvp_target_state, t->state)->name;
262 if (!cp) {
263 LOG_ERROR("Invalid target state: %d", (int)(t->state));
264 cp = "(*BUG*unknown*BUG*)";
265 }
266 return cp;
267 }
268
269 /* determine the number of the new target */
270 static int new_target_number(void)
271 {
272 struct target *t;
273 int x;
274
275 /* number is 0 based */
276 x = -1;
277 t = all_targets;
278 while (t) {
279 if (x < t->target_number)
280 x = t->target_number;
281 t = t->next;
282 }
283 return x + 1;
284 }
285
286 /* read a uint32_t from a buffer in target memory endianness */
287 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
288 {
289 if (target->endianness == TARGET_LITTLE_ENDIAN)
290 return le_to_h_u32(buffer);
291 else
292 return be_to_h_u32(buffer);
293 }
294
295 /* read a uint24_t from a buffer in target memory endianness */
296 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
297 {
298 if (target->endianness == TARGET_LITTLE_ENDIAN)
299 return le_to_h_u24(buffer);
300 else
301 return be_to_h_u24(buffer);
302 }
303
304 /* read a uint16_t from a buffer in target memory endianness */
305 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
306 {
307 if (target->endianness == TARGET_LITTLE_ENDIAN)
308 return le_to_h_u16(buffer);
309 else
310 return be_to_h_u16(buffer);
311 }
312
313 /* read a uint8_t from a buffer in target memory endianness */
314 static uint8_t target_buffer_get_u8(struct target *target, const uint8_t *buffer)
315 {
316 return *buffer & 0x0ff;
317 }
318
319 /* write a uint32_t to a buffer in target memory endianness */
320 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
321 {
322 if (target->endianness == TARGET_LITTLE_ENDIAN)
323 h_u32_to_le(buffer, value);
324 else
325 h_u32_to_be(buffer, value);
326 }
327
328 /* write a uint24_t to a buffer in target memory endianness */
329 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
330 {
331 if (target->endianness == TARGET_LITTLE_ENDIAN)
332 h_u24_to_le(buffer, value);
333 else
334 h_u24_to_be(buffer, value);
335 }
336
337 /* write a uint16_t to a buffer in target memory endianness */
338 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
339 {
340 if (target->endianness == TARGET_LITTLE_ENDIAN)
341 h_u16_to_le(buffer, value);
342 else
343 h_u16_to_be(buffer, value);
344 }
345
346 /* write a uint8_t to a buffer in target memory endianness */
347 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
348 {
349 *buffer = value;
350 }
351
352 /* write a uint32_t array to a buffer in target memory endianness */
353 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
354 {
355 uint32_t i;
356 for (i = 0; i < count; i++)
357 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
358 }
359
360 /* write a uint16_t array to a buffer in target memory endianness */
361 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
362 {
363 uint32_t i;
364 for (i = 0; i < count; i++)
365 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
366 }
367
368 /* write a uint32_t array to a buffer in target memory endianness */
369 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, uint32_t *srcbuf)
370 {
371 uint32_t i;
372 for (i = 0; i < count; i++)
373 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
374 }
375
376 /* write a uint16_t array to a buffer in target memory endianness */
377 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, uint16_t *srcbuf)
378 {
379 uint32_t i;
380 for (i = 0; i < count; i++)
381 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
382 }
383
384 /* return a pointer to a configured target; id is name or number */
385 struct target *get_target(const char *id)
386 {
387 struct target *target;
388
389 /* try as tcltarget name */
390 for (target = all_targets; target; target = target->next) {
391 if (target->cmd_name == NULL)
392 continue;
393 if (strcmp(id, target->cmd_name) == 0)
394 return target;
395 }
396
397 /* It's OK to remove this fallback sometime after August 2010 or so */
398
399 /* no match, try as number */
400 unsigned num;
401 if (parse_uint(id, &num) != ERROR_OK)
402 return NULL;
403
404 for (target = all_targets; target; target = target->next) {
405 if (target->target_number == (int)num) {
406 LOG_WARNING("use '%s' as target identifier, not '%u'",
407 target->cmd_name, num);
408 return target;
409 }
410 }
411
412 return NULL;
413 }
414
415 /* returns a pointer to the n-th configured target */
416 static struct target *get_target_by_num(int num)
417 {
418 struct target *target = all_targets;
419
420 while (target) {
421 if (target->target_number == num)
422 return target;
423 target = target->next;
424 }
425
426 return NULL;
427 }
428
429 struct target *get_current_target(struct command_context *cmd_ctx)
430 {
431 struct target *target = get_target_by_num(cmd_ctx->current_target);
432
433 if (target == NULL) {
434 LOG_ERROR("BUG: current_target out of bounds");
435 exit(-1);
436 }
437
438 return target;
439 }
440
441 int target_poll(struct target *target)
442 {
443 int retval;
444
445 /* We can't poll until after examine */
446 if (!target_was_examined(target)) {
447 /* Fail silently lest we pollute the log */
448 return ERROR_FAIL;
449 }
450
451 retval = target->type->poll(target);
452 if (retval != ERROR_OK)
453 return retval;
454
455 if (target->halt_issued) {
456 if (target->state == TARGET_HALTED)
457 target->halt_issued = false;
458 else {
459 long long t = timeval_ms() - target->halt_issued_time;
460 if (t > 1000) {
461 target->halt_issued = false;
462 LOG_INFO("Halt timed out, wake up GDB.");
463 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
464 }
465 }
466 }
467
468 return ERROR_OK;
469 }
470
471 int target_halt(struct target *target)
472 {
473 int retval;
474 /* We can't poll until after examine */
475 if (!target_was_examined(target)) {
476 LOG_ERROR("Target not examined yet");
477 return ERROR_FAIL;
478 }
479
480 retval = target->type->halt(target);
481 if (retval != ERROR_OK)
482 return retval;
483
484 target->halt_issued = true;
485 target->halt_issued_time = timeval_ms();
486
487 return ERROR_OK;
488 }
489
490 /**
491 * Make the target (re)start executing using its saved execution
492 * context (possibly with some modifications).
493 *
494 * @param target Which target should start executing.
495 * @param current True to use the target's saved program counter instead
496 * of the address parameter
497 * @param address Optionally used as the program counter.
498 * @param handle_breakpoints True iff breakpoints at the resumption PC
499 * should be skipped. (For example, maybe execution was stopped by
500 * such a breakpoint, in which case it would be counterprodutive to
501 * let it re-trigger.
502 * @param debug_execution False if all working areas allocated by OpenOCD
503 * should be released and/or restored to their original contents.
504 * (This would for example be true to run some downloaded "helper"
505 * algorithm code, which resides in one such working buffer and uses
506 * another for data storage.)
507 *
508 * @todo Resolve the ambiguity about what the "debug_execution" flag
509 * signifies. For example, Target implementations don't agree on how
510 * it relates to invalidation of the register cache, or to whether
511 * breakpoints and watchpoints should be enabled. (It would seem wrong
512 * to enable breakpoints when running downloaded "helper" algorithms
513 * (debug_execution true), since the breakpoints would be set to match
514 * target firmware being debugged, not the helper algorithm.... and
515 * enabling them could cause such helpers to malfunction (for example,
516 * by overwriting data with a breakpoint instruction. On the other
517 * hand the infrastructure for running such helpers might use this
518 * procedure but rely on hardware breakpoint to detect termination.)
519 */
520 int target_resume(struct target *target, int current, uint32_t address, int handle_breakpoints, int debug_execution)
521 {
522 int retval;
523
524 /* We can't poll until after examine */
525 if (!target_was_examined(target)) {
526 LOG_ERROR("Target not examined yet");
527 return ERROR_FAIL;
528 }
529
530 /* note that resume *must* be asynchronous. The CPU can halt before
531 * we poll. The CPU can even halt at the current PC as a result of
532 * a software breakpoint being inserted by (a bug?) the application.
533 */
534 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
535 if (retval != ERROR_OK)
536 return retval;
537
538 return retval;
539 }
540
541 static int target_process_reset(struct command_context *cmd_ctx, enum target_reset_mode reset_mode)
542 {
543 char buf[100];
544 int retval;
545 Jim_Nvp *n;
546 n = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode);
547 if (n->name == NULL) {
548 LOG_ERROR("invalid reset mode");
549 return ERROR_FAIL;
550 }
551
552 /* disable polling during reset to make reset event scripts
553 * more predictable, i.e. dr/irscan & pathmove in events will
554 * not have JTAG operations injected into the middle of a sequence.
555 */
556 bool save_poll = jtag_poll_get_enabled();
557
558 jtag_poll_set_enabled(false);
559
560 sprintf(buf, "ocd_process_reset %s", n->name);
561 retval = Jim_Eval(cmd_ctx->interp, buf);
562
563 jtag_poll_set_enabled(save_poll);
564
565 if (retval != JIM_OK) {
566 Jim_MakeErrorMessage(cmd_ctx->interp);
567 command_print(NULL, "%s\n", Jim_GetString(Jim_GetResult(cmd_ctx->interp), NULL));
568 return ERROR_FAIL;
569 }
570
571 /* We want any events to be processed before the prompt */
572 retval = target_call_timer_callbacks_now();
573
574 struct target *target;
575 for (target = all_targets; target; target = target->next)
576 target->type->check_reset(target);
577
578 return retval;
579 }
580
581 static int identity_virt2phys(struct target *target,
582 uint32_t virtual, uint32_t *physical)
583 {
584 *physical = virtual;
585 return ERROR_OK;
586 }
587
588 static int no_mmu(struct target *target, int *enabled)
589 {
590 *enabled = 0;
591 return ERROR_OK;
592 }
593
594 static int default_examine(struct target *target)
595 {
596 target_set_examined(target);
597 return ERROR_OK;
598 }
599
600 /* no check by default */
601 static int default_check_reset(struct target *target)
602 {
603 return ERROR_OK;
604 }
605
606 int target_examine_one(struct target *target)
607 {
608 return target->type->examine(target);
609 }
610
611 static int jtag_enable_callback(enum jtag_event event, void *priv)
612 {
613 struct target *target = priv;
614
615 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
616 return ERROR_OK;
617
618 jtag_unregister_event_callback(jtag_enable_callback, target);
619 return target_examine_one(target);
620 }
621
622
623 /* Targets that correctly implement init + examine, i.e.
624 * no communication with target during init:
625 *
626 * XScale
627 */
628 int target_examine(void)
629 {
630 int retval = ERROR_OK;
631 struct target *target;
632
633 for (target = all_targets; target; target = target->next) {
634 /* defer examination, but don't skip it */
635 if (!target->tap->enabled) {
636 jtag_register_event_callback(jtag_enable_callback,
637 target);
638 continue;
639 }
640 retval = target_examine_one(target);
641 if (retval != ERROR_OK)
642 return retval;
643 }
644 return retval;
645 }
646 const char *target_type_name(struct target *target)
647 {
648 return target->type->name;
649 }
650
651 static int target_write_memory_imp(struct target *target, uint32_t address,
652 uint32_t size, uint32_t count, const uint8_t *buffer)
653 {
654 if (!target_was_examined(target)) {
655 LOG_ERROR("Target not examined yet");
656 return ERROR_FAIL;
657 }
658 return target->type->write_memory_imp(target, address, size, count, buffer);
659 }
660
661 static int target_read_memory_imp(struct target *target, uint32_t address,
662 uint32_t size, uint32_t count, uint8_t *buffer)
663 {
664 if (!target_was_examined(target)) {
665 LOG_ERROR("Target not examined yet");
666 return ERROR_FAIL;
667 }
668 return target->type->read_memory_imp(target, address, size, count, buffer);
669 }
670
671 static int target_soft_reset_halt_imp(struct target *target)
672 {
673 if (!target_was_examined(target)) {
674 LOG_ERROR("Target not examined yet");
675 return ERROR_FAIL;
676 }
677 if (!target->type->soft_reset_halt_imp) {
678 LOG_ERROR("Target %s does not support soft_reset_halt",
679 target_name(target));
680 return ERROR_FAIL;
681 }
682 return target->type->soft_reset_halt_imp(target);
683 }
684
685 /**
686 * Downloads a target-specific native code algorithm to the target,
687 * and executes it. * Note that some targets may need to set up, enable,
688 * and tear down a breakpoint (hard or * soft) to detect algorithm
689 * termination, while others may support lower overhead schemes where
690 * soft breakpoints embedded in the algorithm automatically terminate the
691 * algorithm.
692 *
693 * @param target used to run the algorithm
694 * @param arch_info target-specific description of the algorithm.
695 */
696 int target_run_algorithm(struct target *target,
697 int num_mem_params, struct mem_param *mem_params,
698 int num_reg_params, struct reg_param *reg_param,
699 uint32_t entry_point, uint32_t exit_point,
700 int timeout_ms, void *arch_info)
701 {
702 int retval = ERROR_FAIL;
703
704 if (!target_was_examined(target)) {
705 LOG_ERROR("Target not examined yet");
706 goto done;
707 }
708 if (!target->type->run_algorithm) {
709 LOG_ERROR("Target type '%s' does not support %s",
710 target_type_name(target), __func__);
711 goto done;
712 }
713
714 target->running_alg = true;
715 retval = target->type->run_algorithm(target,
716 num_mem_params, mem_params,
717 num_reg_params, reg_param,
718 entry_point, exit_point, timeout_ms, arch_info);
719 target->running_alg = false;
720
721 done:
722 return retval;
723 }
724
725 /**
726 * Downloads a target-specific native code algorithm to the target,
727 * executes and leaves it running.
728 *
729 * @param target used to run the algorithm
730 * @param arch_info target-specific description of the algorithm.
731 */
732 int target_start_algorithm(struct target *target,
733 int num_mem_params, struct mem_param *mem_params,
734 int num_reg_params, struct reg_param *reg_params,
735 uint32_t entry_point, uint32_t exit_point,
736 void *arch_info)
737 {
738 int retval = ERROR_FAIL;
739
740 if (!target_was_examined(target)) {
741 LOG_ERROR("Target not examined yet");
742 goto done;
743 }
744 if (!target->type->start_algorithm) {
745 LOG_ERROR("Target type '%s' does not support %s",
746 target_type_name(target), __func__);
747 goto done;
748 }
749 if (target->running_alg) {
750 LOG_ERROR("Target is already running an algorithm");
751 goto done;
752 }
753
754 target->running_alg = true;
755 retval = target->type->start_algorithm(target,
756 num_mem_params, mem_params,
757 num_reg_params, reg_params,
758 entry_point, exit_point, arch_info);
759
760 done:
761 return retval;
762 }
763
764 /**
765 * Waits for an algorithm started with target_start_algorithm() to complete.
766 *
767 * @param target used to run the algorithm
768 * @param arch_info target-specific description of the algorithm.
769 */
770 int target_wait_algorithm(struct target *target,
771 int num_mem_params, struct mem_param *mem_params,
772 int num_reg_params, struct reg_param *reg_params,
773 uint32_t exit_point, int timeout_ms,
774 void *arch_info)
775 {
776 int retval = ERROR_FAIL;
777
778 if (!target->type->wait_algorithm) {
779 LOG_ERROR("Target type '%s' does not support %s",
780 target_type_name(target), __func__);
781 goto done;
782 }
783 if (!target->running_alg) {
784 LOG_ERROR("Target is not running an algorithm");
785 goto done;
786 }
787
788 retval = target->type->wait_algorithm(target,
789 num_mem_params, mem_params,
790 num_reg_params, reg_params,
791 exit_point, timeout_ms, arch_info);
792 if (retval != ERROR_TARGET_TIMEOUT)
793 target->running_alg = false;
794
795 done:
796 return retval;
797 }
798
799 /**
800 * Executes a target-specific native code algorithm in the target.
801 * It differs from target_run_algorithm in that the algorithm is asynchronous.
802 * Because of this it requires an compliant algorithm:
803 * see contrib/loaders/flash/stm32f1x.S for example.
804 *
805 * @param target used to run the algorithm
806 */
807
808 int target_run_flash_async_algorithm(struct target *target,
809 uint8_t *buffer, uint32_t count, int block_size,
810 int num_mem_params, struct mem_param *mem_params,
811 int num_reg_params, struct reg_param *reg_params,
812 uint32_t buffer_start, uint32_t buffer_size,
813 uint32_t entry_point, uint32_t exit_point, void *arch_info)
814 {
815 int retval;
816
817 /* Set up working area. First word is write pointer, second word is read pointer,
818 * rest is fifo data area. */
819 uint32_t wp_addr = buffer_start;
820 uint32_t rp_addr = buffer_start + 4;
821 uint32_t fifo_start_addr = buffer_start + 8;
822 uint32_t fifo_end_addr = buffer_start + buffer_size;
823
824 uint32_t wp = fifo_start_addr;
825 uint32_t rp = fifo_start_addr;
826
827 /* validate block_size is 2^n */
828 assert(!block_size || !(block_size & (block_size - 1)));
829
830 retval = target_write_u32(target, wp_addr, wp);
831 if (retval != ERROR_OK)
832 return retval;
833 retval = target_write_u32(target, rp_addr, rp);
834 if (retval != ERROR_OK)
835 return retval;
836
837 /* Start up algorithm on target and let it idle while writing the first chunk */
838 retval = target_start_algorithm(target, num_mem_params, mem_params,
839 num_reg_params, reg_params,
840 entry_point,
841 exit_point,
842 arch_info);
843
844 if (retval != ERROR_OK) {
845 LOG_ERROR("error starting target flash write algorithm");
846 return retval;
847 }
848
849 while (count > 0) {
850
851 retval = target_read_u32(target, rp_addr, &rp);
852 if (retval != ERROR_OK) {
853 LOG_ERROR("failed to get read pointer");
854 break;
855 }
856
857 LOG_DEBUG("count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32, count, wp, rp);
858
859 if (rp == 0) {
860 LOG_ERROR("flash write algorithm aborted by target");
861 retval = ERROR_FLASH_OPERATION_FAILED;
862 break;
863 }
864
865 if ((rp & (block_size - 1)) || rp < fifo_start_addr || rp >= fifo_end_addr) {
866 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
867 break;
868 }
869
870 /* Count the number of bytes available in the fifo without
871 * crossing the wrap around. Make sure to not fill it completely,
872 * because that would make wp == rp and that's the empty condition. */
873 uint32_t thisrun_bytes;
874 if (rp > wp)
875 thisrun_bytes = rp - wp - block_size;
876 else if (rp > fifo_start_addr)
877 thisrun_bytes = fifo_end_addr - wp;
878 else
879 thisrun_bytes = fifo_end_addr - wp - block_size;
880
881 if (thisrun_bytes == 0) {
882 /* Throttle polling a bit if transfer is (much) faster than flash
883 * programming. The exact delay shouldn't matter as long as it's
884 * less than buffer size / flash speed. This is very unlikely to
885 * run when using high latency connections such as USB. */
886 alive_sleep(10);
887 continue;
888 }
889
890 /* Limit to the amount of data we actually want to write */
891 if (thisrun_bytes > count * block_size)
892 thisrun_bytes = count * block_size;
893
894 /* Write data to fifo */
895 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
896 if (retval != ERROR_OK)
897 break;
898
899 /* Update counters and wrap write pointer */
900 buffer += thisrun_bytes;
901 count -= thisrun_bytes / block_size;
902 wp += thisrun_bytes;
903 if (wp >= fifo_end_addr)
904 wp = fifo_start_addr;
905
906 /* Store updated write pointer to target */
907 retval = target_write_u32(target, wp_addr, wp);
908 if (retval != ERROR_OK)
909 break;
910 }
911
912 if (retval != ERROR_OK) {
913 /* abort flash write algorithm on target */
914 target_write_u32(target, wp_addr, 0);
915 }
916
917 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
918 num_reg_params, reg_params,
919 exit_point,
920 10000,
921 arch_info);
922
923 if (retval2 != ERROR_OK) {
924 LOG_ERROR("error waiting for target flash write algorithm");
925 retval = retval2;
926 }
927
928 return retval;
929 }
930
931 int target_read_memory(struct target *target,
932 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
933 {
934 return target->type->read_memory(target, address, size, count, buffer);
935 }
936
937 static int target_read_phys_memory(struct target *target,
938 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
939 {
940 return target->type->read_phys_memory(target, address, size, count, buffer);
941 }
942
943 int target_write_memory(struct target *target,
944 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
945 {
946 return target->type->write_memory(target, address, size, count, buffer);
947 }
948
949 static int target_write_phys_memory(struct target *target,
950 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
951 {
952 return target->type->write_phys_memory(target, address, size, count, buffer);
953 }
954
955 int target_bulk_write_memory(struct target *target,
956 uint32_t address, uint32_t count, const uint8_t *buffer)
957 {
958 return target->type->bulk_write_memory(target, address, count, buffer);
959 }
960
961 int target_add_breakpoint(struct target *target,
962 struct breakpoint *breakpoint)
963 {
964 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
965 LOG_WARNING("target %s is not halted", target->cmd_name);
966 return ERROR_TARGET_NOT_HALTED;
967 }
968 return target->type->add_breakpoint(target, breakpoint);
969 }
970
971 int target_add_context_breakpoint(struct target *target,
972 struct breakpoint *breakpoint)
973 {
974 if (target->state != TARGET_HALTED) {
975 LOG_WARNING("target %s is not halted", target->cmd_name);
976 return ERROR_TARGET_NOT_HALTED;
977 }
978 return target->type->add_context_breakpoint(target, breakpoint);
979 }
980
981 int target_add_hybrid_breakpoint(struct target *target,
982 struct breakpoint *breakpoint)
983 {
984 if (target->state != TARGET_HALTED) {
985 LOG_WARNING("target %s is not halted", target->cmd_name);
986 return ERROR_TARGET_NOT_HALTED;
987 }
988 return target->type->add_hybrid_breakpoint(target, breakpoint);
989 }
990
991 int target_remove_breakpoint(struct target *target,
992 struct breakpoint *breakpoint)
993 {
994 return target->type->remove_breakpoint(target, breakpoint);
995 }
996
997 int target_add_watchpoint(struct target *target,
998 struct watchpoint *watchpoint)
999 {
1000 if (target->state != TARGET_HALTED) {
1001 LOG_WARNING("target %s is not halted", target->cmd_name);
1002 return ERROR_TARGET_NOT_HALTED;
1003 }
1004 return target->type->add_watchpoint(target, watchpoint);
1005 }
1006 int target_remove_watchpoint(struct target *target,
1007 struct watchpoint *watchpoint)
1008 {
1009 return target->type->remove_watchpoint(target, watchpoint);
1010 }
1011
1012 int target_get_gdb_reg_list(struct target *target,
1013 struct reg **reg_list[], int *reg_list_size)
1014 {
1015 return target->type->get_gdb_reg_list(target, reg_list, reg_list_size);
1016 }
1017 int target_step(struct target *target,
1018 int current, uint32_t address, int handle_breakpoints)
1019 {
1020 return target->type->step(target, current, address, handle_breakpoints);
1021 }
1022
1023 /**
1024 * Reset the @c examined flag for the given target.
1025 * Pure paranoia -- targets are zeroed on allocation.
1026 */
1027 static void target_reset_examined(struct target *target)
1028 {
1029 target->examined = false;
1030 }
1031
1032 static int err_read_phys_memory(struct target *target, uint32_t address,
1033 uint32_t size, uint32_t count, uint8_t *buffer)
1034 {
1035 LOG_ERROR("Not implemented: %s", __func__);
1036 return ERROR_FAIL;
1037 }
1038
1039 static int err_write_phys_memory(struct target *target, uint32_t address,
1040 uint32_t size, uint32_t count, const uint8_t *buffer)
1041 {
1042 LOG_ERROR("Not implemented: %s", __func__);
1043 return ERROR_FAIL;
1044 }
1045
1046 static int handle_target(void *priv);
1047
1048 static int target_init_one(struct command_context *cmd_ctx,
1049 struct target *target)
1050 {
1051 target_reset_examined(target);
1052
1053 struct target_type *type = target->type;
1054 if (type->examine == NULL)
1055 type->examine = default_examine;
1056
1057 if (type->check_reset == NULL)
1058 type->check_reset = default_check_reset;
1059
1060 assert(type->init_target != NULL);
1061
1062 int retval = type->init_target(cmd_ctx, target);
1063 if (ERROR_OK != retval) {
1064 LOG_ERROR("target '%s' init failed", target_name(target));
1065 return retval;
1066 }
1067
1068 /**
1069 * @todo get rid of those *memory_imp() methods, now that all
1070 * callers are using target_*_memory() accessors ... and make
1071 * sure the "physical" paths handle the same issues.
1072 */
1073 /* a non-invasive way(in terms of patches) to add some code that
1074 * runs before the type->write/read_memory implementation
1075 */
1076 type->write_memory_imp = target->type->write_memory;
1077 type->write_memory = target_write_memory_imp;
1078
1079 type->read_memory_imp = target->type->read_memory;
1080 type->read_memory = target_read_memory_imp;
1081
1082 type->soft_reset_halt_imp = target->type->soft_reset_halt;
1083 type->soft_reset_halt = target_soft_reset_halt_imp;
1084
1085 /* Sanity-check MMU support ... stub in what we must, to help
1086 * implement it in stages, but warn if we need to do so.
1087 */
1088 if (type->mmu) {
1089 if (type->write_phys_memory == NULL) {
1090 LOG_ERROR("type '%s' is missing write_phys_memory",
1091 type->name);
1092 type->write_phys_memory = err_write_phys_memory;
1093 }
1094 if (type->read_phys_memory == NULL) {
1095 LOG_ERROR("type '%s' is missing read_phys_memory",
1096 type->name);
1097 type->read_phys_memory = err_read_phys_memory;
1098 }
1099 if (type->virt2phys == NULL) {
1100 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1101 type->virt2phys = identity_virt2phys;
1102 }
1103 } else {
1104 /* Make sure no-MMU targets all behave the same: make no
1105 * distinction between physical and virtual addresses, and
1106 * ensure that virt2phys() is always an identity mapping.
1107 */
1108 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1109 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1110
1111 type->mmu = no_mmu;
1112 type->write_phys_memory = type->write_memory;
1113 type->read_phys_memory = type->read_memory;
1114 type->virt2phys = identity_virt2phys;
1115 }
1116
1117 if (target->type->read_buffer == NULL)
1118 target->type->read_buffer = target_read_buffer_default;
1119
1120 if (target->type->write_buffer == NULL)
1121 target->type->write_buffer = target_write_buffer_default;
1122
1123 return ERROR_OK;
1124 }
1125
1126 static int target_init(struct command_context *cmd_ctx)
1127 {
1128 struct target *target;
1129 int retval;
1130
1131 for (target = all_targets; target; target = target->next) {
1132 retval = target_init_one(cmd_ctx, target);
1133 if (ERROR_OK != retval)
1134 return retval;
1135 }
1136
1137 if (!all_targets)
1138 return ERROR_OK;
1139
1140 retval = target_register_user_commands(cmd_ctx);
1141 if (ERROR_OK != retval)
1142 return retval;
1143
1144 retval = target_register_timer_callback(&handle_target,
1145 polling_interval, 1, cmd_ctx->interp);
1146 if (ERROR_OK != retval)
1147 return retval;
1148
1149 return ERROR_OK;
1150 }
1151
1152 COMMAND_HANDLER(handle_target_init_command)
1153 {
1154 int retval;
1155
1156 if (CMD_ARGC != 0)
1157 return ERROR_COMMAND_SYNTAX_ERROR;
1158
1159 static bool target_initialized;
1160 if (target_initialized) {
1161 LOG_INFO("'target init' has already been called");
1162 return ERROR_OK;
1163 }
1164 target_initialized = true;
1165
1166 retval = command_run_line(CMD_CTX, "init_targets");
1167 if (ERROR_OK != retval)
1168 return retval;
1169
1170 retval = command_run_line(CMD_CTX, "init_board");
1171 if (ERROR_OK != retval)
1172 return retval;
1173
1174 LOG_DEBUG("Initializing targets...");
1175 return target_init(CMD_CTX);
1176 }
1177
1178 int target_register_event_callback(int (*callback)(struct target *target,
1179 enum target_event event, void *priv), void *priv)
1180 {
1181 struct target_event_callback **callbacks_p = &target_event_callbacks;
1182
1183 if (callback == NULL)
1184 return ERROR_COMMAND_SYNTAX_ERROR;
1185
1186 if (*callbacks_p) {
1187 while ((*callbacks_p)->next)
1188 callbacks_p = &((*callbacks_p)->next);
1189 callbacks_p = &((*callbacks_p)->next);
1190 }
1191
1192 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1193 (*callbacks_p)->callback = callback;
1194 (*callbacks_p)->priv = priv;
1195 (*callbacks_p)->next = NULL;
1196
1197 return ERROR_OK;
1198 }
1199
1200 int target_register_timer_callback(int (*callback)(void *priv), int time_ms, int periodic, void *priv)
1201 {
1202 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1203 struct timeval now;
1204
1205 if (callback == NULL)
1206 return ERROR_COMMAND_SYNTAX_ERROR;
1207
1208 if (*callbacks_p) {
1209 while ((*callbacks_p)->next)
1210 callbacks_p = &((*callbacks_p)->next);
1211 callbacks_p = &((*callbacks_p)->next);
1212 }
1213
1214 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1215 (*callbacks_p)->callback = callback;
1216 (*callbacks_p)->periodic = periodic;
1217 (*callbacks_p)->time_ms = time_ms;
1218
1219 gettimeofday(&now, NULL);
1220 (*callbacks_p)->when.tv_usec = now.tv_usec + (time_ms % 1000) * 1000;
1221 time_ms -= (time_ms % 1000);
1222 (*callbacks_p)->when.tv_sec = now.tv_sec + (time_ms / 1000);
1223 if ((*callbacks_p)->when.tv_usec > 1000000) {
1224 (*callbacks_p)->when.tv_usec = (*callbacks_p)->when.tv_usec - 1000000;
1225 (*callbacks_p)->when.tv_sec += 1;
1226 }
1227
1228 (*callbacks_p)->priv = priv;
1229 (*callbacks_p)->next = NULL;
1230
1231 return ERROR_OK;
1232 }
1233
1234 int target_unregister_event_callback(int (*callback)(struct target *target,
1235 enum target_event event, void *priv), void *priv)
1236 {
1237 struct target_event_callback **p = &target_event_callbacks;
1238 struct target_event_callback *c = target_event_callbacks;
1239
1240 if (callback == NULL)
1241 return ERROR_COMMAND_SYNTAX_ERROR;
1242
1243 while (c) {
1244 struct target_event_callback *next = c->next;
1245 if ((c->callback == callback) && (c->priv == priv)) {
1246 *p = next;
1247 free(c);
1248 return ERROR_OK;
1249 } else
1250 p = &(c->next);
1251 c = next;
1252 }
1253
1254 return ERROR_OK;
1255 }
1256
1257 static int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1258 {
1259 struct target_timer_callback **p = &target_timer_callbacks;
1260 struct target_timer_callback *c = target_timer_callbacks;
1261
1262 if (callback == NULL)
1263 return ERROR_COMMAND_SYNTAX_ERROR;
1264
1265 while (c) {
1266 struct target_timer_callback *next = c->next;
1267 if ((c->callback == callback) && (c->priv == priv)) {
1268 *p = next;
1269 free(c);
1270 return ERROR_OK;
1271 } else
1272 p = &(c->next);
1273 c = next;
1274 }
1275
1276 return ERROR_OK;
1277 }
1278
1279 int target_call_event_callbacks(struct target *target, enum target_event event)
1280 {
1281 struct target_event_callback *callback = target_event_callbacks;
1282 struct target_event_callback *next_callback;
1283
1284 if (event == TARGET_EVENT_HALTED) {
1285 /* execute early halted first */
1286 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1287 }
1288
1289 LOG_DEBUG("target event %i (%s)", event,
1290 Jim_Nvp_value2name_simple(nvp_target_event, event)->name);
1291
1292 target_handle_event(target, event);
1293
1294 while (callback) {
1295 next_callback = callback->next;
1296 callback->callback(target, event, callback->priv);
1297 callback = next_callback;
1298 }
1299
1300 return ERROR_OK;
1301 }
1302
1303 static int target_timer_callback_periodic_restart(
1304 struct target_timer_callback *cb, struct timeval *now)
1305 {
1306 int time_ms = cb->time_ms;
1307 cb->when.tv_usec = now->tv_usec + (time_ms % 1000) * 1000;
1308 time_ms -= (time_ms % 1000);
1309 cb->when.tv_sec = now->tv_sec + time_ms / 1000;
1310 if (cb->when.tv_usec > 1000000) {
1311 cb->when.tv_usec = cb->when.tv_usec - 1000000;
1312 cb->when.tv_sec += 1;
1313 }
1314 return ERROR_OK;
1315 }
1316
1317 static int target_call_timer_callback(struct target_timer_callback *cb,
1318 struct timeval *now)
1319 {
1320 cb->callback(cb->priv);
1321
1322 if (cb->periodic)
1323 return target_timer_callback_periodic_restart(cb, now);
1324
1325 return target_unregister_timer_callback(cb->callback, cb->priv);
1326 }
1327
1328 static int target_call_timer_callbacks_check_time(int checktime)
1329 {
1330 keep_alive();
1331
1332 struct timeval now;
1333 gettimeofday(&now, NULL);
1334
1335 struct target_timer_callback *callback = target_timer_callbacks;
1336 while (callback) {
1337 /* cleaning up may unregister and free this callback */
1338 struct target_timer_callback *next_callback = callback->next;
1339
1340 bool call_it = callback->callback &&
1341 ((!checktime && callback->periodic) ||
1342 now.tv_sec > callback->when.tv_sec ||
1343 (now.tv_sec == callback->when.tv_sec &&
1344 now.tv_usec >= callback->when.tv_usec));
1345
1346 if (call_it) {
1347 int retval = target_call_timer_callback(callback, &now);
1348 if (retval != ERROR_OK)
1349 return retval;
1350 }
1351
1352 callback = next_callback;
1353 }
1354
1355 return ERROR_OK;
1356 }
1357
1358 int target_call_timer_callbacks(void)
1359 {
1360 return target_call_timer_callbacks_check_time(1);
1361 }
1362
1363 /* invoke periodic callbacks immediately */
1364 int target_call_timer_callbacks_now(void)
1365 {
1366 return target_call_timer_callbacks_check_time(0);
1367 }
1368
1369 /* Prints the working area layout for debug purposes */
1370 static void print_wa_layout(struct target *target)
1371 {
1372 struct working_area *c = target->working_areas;
1373
1374 while (c) {
1375 LOG_DEBUG("%c%c 0x%08"PRIx32"-0x%08"PRIx32" (%"PRIu32" bytes)",
1376 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1377 c->address, c->address + c->size - 1, c->size);
1378 c = c->next;
1379 }
1380 }
1381
1382 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1383 static void target_split_working_area(struct working_area *area, uint32_t size)
1384 {
1385 assert(area->free); /* Shouldn't split an allocated area */
1386 assert(size <= area->size); /* Caller should guarantee this */
1387
1388 /* Split only if not already the right size */
1389 if (size < area->size) {
1390 struct working_area *new_wa = malloc(sizeof(*new_wa));
1391
1392 if (new_wa == NULL)
1393 return;
1394
1395 new_wa->next = area->next;
1396 new_wa->size = area->size - size;
1397 new_wa->address = area->address + size;
1398 new_wa->backup = NULL;
1399 new_wa->user = NULL;
1400 new_wa->free = true;
1401
1402 area->next = new_wa;
1403 area->size = size;
1404
1405 /* If backup memory was allocated to this area, it has the wrong size
1406 * now so free it and it will be reallocated if/when needed */
1407 if (area->backup) {
1408 free(area->backup);
1409 area->backup = NULL;
1410 }
1411 }
1412 }
1413
1414 /* Merge all adjacent free areas into one */
1415 static void target_merge_working_areas(struct target *target)
1416 {
1417 struct working_area *c = target->working_areas;
1418
1419 while (c && c->next) {
1420 assert(c->next->address == c->address + c->size); /* This is an invariant */
1421
1422 /* Find two adjacent free areas */
1423 if (c->free && c->next->free) {
1424 /* Merge the last into the first */
1425 c->size += c->next->size;
1426
1427 /* Remove the last */
1428 struct working_area *to_be_freed = c->next;
1429 c->next = c->next->next;
1430 if (to_be_freed->backup)
1431 free(to_be_freed->backup);
1432 free(to_be_freed);
1433
1434 /* If backup memory was allocated to the remaining area, it's has
1435 * the wrong size now */
1436 if (c->backup) {
1437 free(c->backup);
1438 c->backup = NULL;
1439 }
1440 } else {
1441 c = c->next;
1442 }
1443 }
1444 }
1445
1446 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
1447 {
1448 /* Reevaluate working area address based on MMU state*/
1449 if (target->working_areas == NULL) {
1450 int retval;
1451 int enabled;
1452
1453 retval = target->type->mmu(target, &enabled);
1454 if (retval != ERROR_OK)
1455 return retval;
1456
1457 if (!enabled) {
1458 if (target->working_area_phys_spec) {
1459 LOG_DEBUG("MMU disabled, using physical "
1460 "address for working memory 0x%08"PRIx32,
1461 target->working_area_phys);
1462 target->working_area = target->working_area_phys;
1463 } else {
1464 LOG_ERROR("No working memory available. "
1465 "Specify -work-area-phys to target.");
1466 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1467 }
1468 } else {
1469 if (target->working_area_virt_spec) {
1470 LOG_DEBUG("MMU enabled, using virtual "
1471 "address for working memory 0x%08"PRIx32,
1472 target->working_area_virt);
1473 target->working_area = target->working_area_virt;
1474 } else {
1475 LOG_ERROR("No working memory available. "
1476 "Specify -work-area-virt to target.");
1477 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1478 }
1479 }
1480
1481 /* Set up initial working area on first call */
1482 struct working_area *new_wa = malloc(sizeof(*new_wa));
1483 if (new_wa) {
1484 new_wa->next = NULL;
1485 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
1486 new_wa->address = target->working_area;
1487 new_wa->backup = NULL;
1488 new_wa->user = NULL;
1489 new_wa->free = true;
1490 }
1491
1492 target->working_areas = new_wa;
1493 }
1494
1495 /* only allocate multiples of 4 byte */
1496 if (size % 4)
1497 size = (size + 3) & (~3UL);
1498
1499 struct working_area *c = target->working_areas;
1500
1501 /* Find the first large enough working area */
1502 while (c) {
1503 if (c->free && c->size >= size)
1504 break;
1505 c = c->next;
1506 }
1507
1508 if (c == NULL)
1509 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1510
1511 /* Split the working area into the requested size */
1512 target_split_working_area(c, size);
1513
1514 LOG_DEBUG("allocated new working area of %"PRIu32" bytes at address 0x%08"PRIx32, size, c->address);
1515
1516 if (target->backup_working_area) {
1517 if (c->backup == NULL) {
1518 c->backup = malloc(c->size);
1519 if (c->backup == NULL)
1520 return ERROR_FAIL;
1521 }
1522
1523 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
1524 if (retval != ERROR_OK)
1525 return retval;
1526 }
1527
1528 /* mark as used, and return the new (reused) area */
1529 c->free = false;
1530 *area = c;
1531
1532 /* user pointer */
1533 c->user = area;
1534
1535 print_wa_layout(target);
1536
1537 return ERROR_OK;
1538 }
1539
1540 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
1541 {
1542 int retval;
1543
1544 retval = target_alloc_working_area_try(target, size, area);
1545 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1546 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
1547 return retval;
1548
1549 }
1550
1551 static int target_restore_working_area(struct target *target, struct working_area *area)
1552 {
1553 int retval = ERROR_OK;
1554
1555 if (target->backup_working_area && area->backup != NULL) {
1556 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
1557 if (retval != ERROR_OK)
1558 LOG_ERROR("failed to restore %"PRIu32" bytes of working area at address 0x%08"PRIx32,
1559 area->size, area->address);
1560 }
1561
1562 return retval;
1563 }
1564
1565 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
1566 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
1567 {
1568 int retval = ERROR_OK;
1569
1570 if (area->free)
1571 return retval;
1572
1573 if (restore) {
1574 retval = target_restore_working_area(target, area);
1575 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
1576 if (retval != ERROR_OK)
1577 return retval;
1578 }
1579
1580 area->free = true;
1581
1582 LOG_DEBUG("freed %"PRIu32" bytes of working area at address 0x%08"PRIx32,
1583 area->size, area->address);
1584
1585 /* mark user pointer invalid */
1586 /* TODO: Is this really safe? It points to some previous caller's memory.
1587 * How could we know that the area pointer is still in that place and not
1588 * some other vital data? What's the purpose of this, anyway? */
1589 *area->user = NULL;
1590 area->user = NULL;
1591
1592 target_merge_working_areas(target);
1593
1594 print_wa_layout(target);
1595
1596 return retval;
1597 }
1598
1599 int target_free_working_area(struct target *target, struct working_area *area)
1600 {
1601 return target_free_working_area_restore(target, area, 1);
1602 }
1603
1604 /* free resources and restore memory, if restoring memory fails,
1605 * free up resources anyway
1606 */
1607 static void target_free_all_working_areas_restore(struct target *target, int restore)
1608 {
1609 struct working_area *c = target->working_areas;
1610
1611 LOG_DEBUG("freeing all working areas");
1612
1613 /* Loop through all areas, restoring the allocated ones and marking them as free */
1614 while (c) {
1615 if (!c->free) {
1616 if (restore)
1617 target_restore_working_area(target, c);
1618 c->free = true;
1619 *c->user = NULL; /* Same as above */
1620 c->user = NULL;
1621 }
1622 c = c->next;
1623 }
1624
1625 /* Run a merge pass to combine all areas into one */
1626 target_merge_working_areas(target);
1627
1628 print_wa_layout(target);
1629 }
1630
1631 void target_free_all_working_areas(struct target *target)
1632 {
1633 target_free_all_working_areas_restore(target, 1);
1634 }
1635
1636 /* Find the largest number of bytes that can be allocated */
1637 uint32_t target_get_working_area_avail(struct target *target)
1638 {
1639 struct working_area *c = target->working_areas;
1640 uint32_t max_size = 0;
1641
1642 if (c == NULL)
1643 return target->working_area_size;
1644
1645 while (c) {
1646 if (c->free && max_size < c->size)
1647 max_size = c->size;
1648
1649 c = c->next;
1650 }
1651
1652 return max_size;
1653 }
1654
1655 int target_arch_state(struct target *target)
1656 {
1657 int retval;
1658 if (target == NULL) {
1659 LOG_USER("No target has been configured");
1660 return ERROR_OK;
1661 }
1662
1663 LOG_USER("target state: %s", target_state_name(target));
1664
1665 if (target->state != TARGET_HALTED)
1666 return ERROR_OK;
1667
1668 retval = target->type->arch_state(target);
1669 return retval;
1670 }
1671
1672 /* Single aligned words are guaranteed to use 16 or 32 bit access
1673 * mode respectively, otherwise data is handled as quickly as
1674 * possible
1675 */
1676 int target_write_buffer(struct target *target, uint32_t address, uint32_t size, const uint8_t *buffer)
1677 {
1678 LOG_DEBUG("writing buffer of %i byte at 0x%8.8x",
1679 (int)size, (unsigned)address);
1680
1681 if (!target_was_examined(target)) {
1682 LOG_ERROR("Target not examined yet");
1683 return ERROR_FAIL;
1684 }
1685
1686 if (size == 0)
1687 return ERROR_OK;
1688
1689 if ((address + size - 1) < address) {
1690 /* GDB can request this when e.g. PC is 0xfffffffc*/
1691 LOG_ERROR("address + size wrapped(0x%08x, 0x%08x)",
1692 (unsigned)address,
1693 (unsigned)size);
1694 return ERROR_FAIL;
1695 }
1696
1697 return target->type->write_buffer(target, address, size, buffer);
1698 }
1699
1700 static int target_write_buffer_default(struct target *target, uint32_t address, uint32_t size, const uint8_t *buffer)
1701 {
1702 int retval = ERROR_OK;
1703
1704 if (((address % 2) == 0) && (size == 2))
1705 return target_write_memory(target, address, 2, 1, buffer);
1706
1707 /* handle unaligned head bytes */
1708 if (address % 4) {
1709 uint32_t unaligned = 4 - (address % 4);
1710
1711 if (unaligned > size)
1712 unaligned = size;
1713
1714 retval = target_write_memory(target, address, 1, unaligned, buffer);
1715 if (retval != ERROR_OK)
1716 return retval;
1717
1718 buffer += unaligned;
1719 address += unaligned;
1720 size -= unaligned;
1721 }
1722
1723 /* handle aligned words */
1724 if (size >= 4) {
1725 int aligned = size - (size % 4);
1726
1727 /* use bulk writes above a certain limit. This may have to be changed */
1728 if (aligned > 128) {
1729 retval = target->type->bulk_write_memory(target, address, aligned / 4, buffer);
1730 if (retval != ERROR_OK)
1731 return retval;
1732 } else {
1733 retval = target_write_memory(target, address, 4, aligned / 4, buffer);
1734 if (retval != ERROR_OK)
1735 return retval;
1736 }
1737
1738 buffer += aligned;
1739 address += aligned;
1740 size -= aligned;
1741 }
1742
1743 /* handle tail writes of less than 4 bytes */
1744 if (size > 0) {
1745 retval = target_write_memory(target, address, 1, size, buffer);
1746 if (retval != ERROR_OK)
1747 return retval;
1748 }
1749
1750 return retval;
1751 }
1752
1753 /* Single aligned words are guaranteed to use 16 or 32 bit access
1754 * mode respectively, otherwise data is handled as quickly as
1755 * possible
1756 */
1757 int target_read_buffer(struct target *target, uint32_t address, uint32_t size, uint8_t *buffer)
1758 {
1759 LOG_DEBUG("reading buffer of %i byte at 0x%8.8x",
1760 (int)size, (unsigned)address);
1761
1762 if (!target_was_examined(target)) {
1763 LOG_ERROR("Target not examined yet");
1764 return ERROR_FAIL;
1765 }
1766
1767 if (size == 0)
1768 return ERROR_OK;
1769
1770 if ((address + size - 1) < address) {
1771 /* GDB can request this when e.g. PC is 0xfffffffc*/
1772 LOG_ERROR("address + size wrapped(0x%08" PRIx32 ", 0x%08" PRIx32 ")",
1773 address,
1774 size);
1775 return ERROR_FAIL;
1776 }
1777
1778 return target->type->read_buffer(target, address, size, buffer);
1779 }
1780
1781 static int target_read_buffer_default(struct target *target, uint32_t address, uint32_t size, uint8_t *buffer)
1782 {
1783 int retval = ERROR_OK;
1784
1785 if (((address % 2) == 0) && (size == 2))
1786 return target_read_memory(target, address, 2, 1, buffer);
1787
1788 /* handle unaligned head bytes */
1789 if (address % 4) {
1790 uint32_t unaligned = 4 - (address % 4);
1791
1792 if (unaligned > size)
1793 unaligned = size;
1794
1795 retval = target_read_memory(target, address, 1, unaligned, buffer);
1796 if (retval != ERROR_OK)
1797 return retval;
1798
1799 buffer += unaligned;
1800 address += unaligned;
1801 size -= unaligned;
1802 }
1803
1804 /* handle aligned words */
1805 if (size >= 4) {
1806 int aligned = size - (size % 4);
1807
1808 retval = target_read_memory(target, address, 4, aligned / 4, buffer);
1809 if (retval != ERROR_OK)
1810 return retval;
1811
1812 buffer += aligned;
1813 address += aligned;
1814 size -= aligned;
1815 }
1816
1817 /*prevent byte access when possible (avoid AHB access limitations in some cases)*/
1818 if (size >= 2) {
1819 int aligned = size - (size % 2);
1820 retval = target_read_memory(target, address, 2, aligned / 2, buffer);
1821 if (retval != ERROR_OK)
1822 return retval;
1823
1824 buffer += aligned;
1825 address += aligned;
1826 size -= aligned;
1827 }
1828 /* handle tail writes of less than 4 bytes */
1829 if (size > 0) {
1830 retval = target_read_memory(target, address, 1, size, buffer);
1831 if (retval != ERROR_OK)
1832 return retval;
1833 }
1834
1835 return ERROR_OK;
1836 }
1837
1838 int target_checksum_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* crc)
1839 {
1840 uint8_t *buffer;
1841 int retval;
1842 uint32_t i;
1843 uint32_t checksum = 0;
1844 if (!target_was_examined(target)) {
1845 LOG_ERROR("Target not examined yet");
1846 return ERROR_FAIL;
1847 }
1848
1849 retval = target->type->checksum_memory(target, address, size, &checksum);
1850 if (retval != ERROR_OK) {
1851 buffer = malloc(size);
1852 if (buffer == NULL) {
1853 LOG_ERROR("error allocating buffer for section (%d bytes)", (int)size);
1854 return ERROR_COMMAND_SYNTAX_ERROR;
1855 }
1856 retval = target_read_buffer(target, address, size, buffer);
1857 if (retval != ERROR_OK) {
1858 free(buffer);
1859 return retval;
1860 }
1861
1862 /* convert to target endianness */
1863 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
1864 uint32_t target_data;
1865 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
1866 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
1867 }
1868
1869 retval = image_calculate_checksum(buffer, size, &checksum);
1870 free(buffer);
1871 }
1872
1873 *crc = checksum;
1874
1875 return retval;
1876 }
1877
1878 int target_blank_check_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* blank)
1879 {
1880 int retval;
1881 if (!target_was_examined(target)) {
1882 LOG_ERROR("Target not examined yet");
1883 return ERROR_FAIL;
1884 }
1885
1886 if (target->type->blank_check_memory == 0)
1887 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1888
1889 retval = target->type->blank_check_memory(target, address, size, blank);
1890
1891 return retval;
1892 }
1893
1894 int target_read_u32(struct target *target, uint32_t address, uint32_t *value)
1895 {
1896 uint8_t value_buf[4];
1897 if (!target_was_examined(target)) {
1898 LOG_ERROR("Target not examined yet");
1899 return ERROR_FAIL;
1900 }
1901
1902 int retval = target_read_memory(target, address, 4, 1, value_buf);
1903
1904 if (retval == ERROR_OK) {
1905 *value = target_buffer_get_u32(target, value_buf);
1906 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
1907 address,
1908 *value);
1909 } else {
1910 *value = 0x0;
1911 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1912 address);
1913 }
1914
1915 return retval;
1916 }
1917
1918 int target_read_u16(struct target *target, uint32_t address, uint16_t *value)
1919 {
1920 uint8_t value_buf[2];
1921 if (!target_was_examined(target)) {
1922 LOG_ERROR("Target not examined yet");
1923 return ERROR_FAIL;
1924 }
1925
1926 int retval = target_read_memory(target, address, 2, 1, value_buf);
1927
1928 if (retval == ERROR_OK) {
1929 *value = target_buffer_get_u16(target, value_buf);
1930 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%4.4x",
1931 address,
1932 *value);
1933 } else {
1934 *value = 0x0;
1935 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1936 address);
1937 }
1938
1939 return retval;
1940 }
1941
1942 int target_read_u8(struct target *target, uint32_t address, uint8_t *value)
1943 {
1944 int retval = target_read_memory(target, address, 1, 1, value);
1945 if (!target_was_examined(target)) {
1946 LOG_ERROR("Target not examined yet");
1947 return ERROR_FAIL;
1948 }
1949
1950 if (retval == ERROR_OK) {
1951 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
1952 address,
1953 *value);
1954 } else {
1955 *value = 0x0;
1956 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1957 address);
1958 }
1959
1960 return retval;
1961 }
1962
1963 int target_write_u32(struct target *target, uint32_t address, uint32_t value)
1964 {
1965 int retval;
1966 uint8_t value_buf[4];
1967 if (!target_was_examined(target)) {
1968 LOG_ERROR("Target not examined yet");
1969 return ERROR_FAIL;
1970 }
1971
1972 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
1973 address,
1974 value);
1975
1976 target_buffer_set_u32(target, value_buf, value);
1977 retval = target_write_memory(target, address, 4, 1, value_buf);
1978 if (retval != ERROR_OK)
1979 LOG_DEBUG("failed: %i", retval);
1980
1981 return retval;
1982 }
1983
1984 int target_write_u16(struct target *target, uint32_t address, uint16_t value)
1985 {
1986 int retval;
1987 uint8_t value_buf[2];
1988 if (!target_was_examined(target)) {
1989 LOG_ERROR("Target not examined yet");
1990 return ERROR_FAIL;
1991 }
1992
1993 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8x",
1994 address,
1995 value);
1996
1997 target_buffer_set_u16(target, value_buf, value);
1998 retval = target_write_memory(target, address, 2, 1, value_buf);
1999 if (retval != ERROR_OK)
2000 LOG_DEBUG("failed: %i", retval);
2001
2002 return retval;
2003 }
2004
2005 int target_write_u8(struct target *target, uint32_t address, uint8_t value)
2006 {
2007 int retval;
2008 if (!target_was_examined(target)) {
2009 LOG_ERROR("Target not examined yet");
2010 return ERROR_FAIL;
2011 }
2012
2013 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
2014 address, value);
2015
2016 retval = target_write_memory(target, address, 1, 1, &value);
2017 if (retval != ERROR_OK)
2018 LOG_DEBUG("failed: %i", retval);
2019
2020 return retval;
2021 }
2022
2023 static int find_target(struct command_context *cmd_ctx, const char *name)
2024 {
2025 struct target *target = get_target(name);
2026 if (target == NULL) {
2027 LOG_ERROR("Target: %s is unknown, try one of:\n", name);
2028 return ERROR_FAIL;
2029 }
2030 if (!target->tap->enabled) {
2031 LOG_USER("Target: TAP %s is disabled, "
2032 "can't be the current target\n",
2033 target->tap->dotted_name);
2034 return ERROR_FAIL;
2035 }
2036
2037 cmd_ctx->current_target = target->target_number;
2038 return ERROR_OK;
2039 }
2040
2041
2042 COMMAND_HANDLER(handle_targets_command)
2043 {
2044 int retval = ERROR_OK;
2045 if (CMD_ARGC == 1) {
2046 retval = find_target(CMD_CTX, CMD_ARGV[0]);
2047 if (retval == ERROR_OK) {
2048 /* we're done! */
2049 return retval;
2050 }
2051 }
2052
2053 struct target *target = all_targets;
2054 command_print(CMD_CTX, " TargetName Type Endian TapName State ");
2055 command_print(CMD_CTX, "-- ------------------ ---------- ------ ------------------ ------------");
2056 while (target) {
2057 const char *state;
2058 char marker = ' ';
2059
2060 if (target->tap->enabled)
2061 state = target_state_name(target);
2062 else
2063 state = "tap-disabled";
2064
2065 if (CMD_CTX->current_target == target->target_number)
2066 marker = '*';
2067
2068 /* keep columns lined up to match the headers above */
2069 command_print(CMD_CTX,
2070 "%2d%c %-18s %-10s %-6s %-18s %s",
2071 target->target_number,
2072 marker,
2073 target_name(target),
2074 target_type_name(target),
2075 Jim_Nvp_value2name_simple(nvp_target_endian,
2076 target->endianness)->name,
2077 target->tap->dotted_name,
2078 state);
2079 target = target->next;
2080 }
2081
2082 return retval;
2083 }
2084
2085 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2086
2087 static int powerDropout;
2088 static int srstAsserted;
2089
2090 static int runPowerRestore;
2091 static int runPowerDropout;
2092 static int runSrstAsserted;
2093 static int runSrstDeasserted;
2094
2095 static int sense_handler(void)
2096 {
2097 static int prevSrstAsserted;
2098 static int prevPowerdropout;
2099
2100 int retval = jtag_power_dropout(&powerDropout);
2101 if (retval != ERROR_OK)
2102 return retval;
2103
2104 int powerRestored;
2105 powerRestored = prevPowerdropout && !powerDropout;
2106 if (powerRestored)
2107 runPowerRestore = 1;
2108
2109 long long current = timeval_ms();
2110 static long long lastPower;
2111 int waitMore = lastPower + 2000 > current;
2112 if (powerDropout && !waitMore) {
2113 runPowerDropout = 1;
2114 lastPower = current;
2115 }
2116
2117 retval = jtag_srst_asserted(&srstAsserted);
2118 if (retval != ERROR_OK)
2119 return retval;
2120
2121 int srstDeasserted;
2122 srstDeasserted = prevSrstAsserted && !srstAsserted;
2123
2124 static long long lastSrst;
2125 waitMore = lastSrst + 2000 > current;
2126 if (srstDeasserted && !waitMore) {
2127 runSrstDeasserted = 1;
2128 lastSrst = current;
2129 }
2130
2131 if (!prevSrstAsserted && srstAsserted)
2132 runSrstAsserted = 1;
2133
2134 prevSrstAsserted = srstAsserted;
2135 prevPowerdropout = powerDropout;
2136
2137 if (srstDeasserted || powerRestored) {
2138 /* Other than logging the event we can't do anything here.
2139 * Issuing a reset is a particularly bad idea as we might
2140 * be inside a reset already.
2141 */
2142 }
2143
2144 return ERROR_OK;
2145 }
2146
2147 static int backoff_times;
2148 static int backoff_count;
2149
2150 /* process target state changes */
2151 static int handle_target(void *priv)
2152 {
2153 Jim_Interp *interp = (Jim_Interp *)priv;
2154 int retval = ERROR_OK;
2155
2156 if (!is_jtag_poll_safe()) {
2157 /* polling is disabled currently */
2158 return ERROR_OK;
2159 }
2160
2161 /* we do not want to recurse here... */
2162 static int recursive;
2163 if (!recursive) {
2164 recursive = 1;
2165 sense_handler();
2166 /* danger! running these procedures can trigger srst assertions and power dropouts.
2167 * We need to avoid an infinite loop/recursion here and we do that by
2168 * clearing the flags after running these events.
2169 */
2170 int did_something = 0;
2171 if (runSrstAsserted) {
2172 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2173 Jim_Eval(interp, "srst_asserted");
2174 did_something = 1;
2175 }
2176 if (runSrstDeasserted) {
2177 Jim_Eval(interp, "srst_deasserted");
2178 did_something = 1;
2179 }
2180 if (runPowerDropout) {
2181 LOG_INFO("Power dropout detected, running power_dropout proc.");
2182 Jim_Eval(interp, "power_dropout");
2183 did_something = 1;
2184 }
2185 if (runPowerRestore) {
2186 Jim_Eval(interp, "power_restore");
2187 did_something = 1;
2188 }
2189
2190 if (did_something) {
2191 /* clear detect flags */
2192 sense_handler();
2193 }
2194
2195 /* clear action flags */
2196
2197 runSrstAsserted = 0;
2198 runSrstDeasserted = 0;
2199 runPowerRestore = 0;
2200 runPowerDropout = 0;
2201
2202 recursive = 0;
2203 }
2204
2205 if (backoff_times > backoff_count) {
2206 /* do not poll this time as we failed previously */
2207 backoff_count++;
2208 return ERROR_OK;
2209 }
2210 backoff_count = 0;
2211
2212 /* Poll targets for state changes unless that's globally disabled.
2213 * Skip targets that are currently disabled.
2214 */
2215 for (struct target *target = all_targets;
2216 is_jtag_poll_safe() && target;
2217 target = target->next) {
2218 if (!target->tap->enabled)
2219 continue;
2220
2221 /* only poll target if we've got power and srst isn't asserted */
2222 if (!powerDropout && !srstAsserted) {
2223 /* polling may fail silently until the target has been examined */
2224 retval = target_poll(target);
2225 if (retval != ERROR_OK) {
2226 /* 100ms polling interval. Increase interval between polling up to 5000ms */
2227 if (backoff_times * polling_interval < 5000) {
2228 backoff_times *= 2;
2229 backoff_times++;
2230 }
2231 LOG_USER("Polling target failed, GDB will be halted. Polling again in %dms",
2232 backoff_times * polling_interval);
2233
2234 /* Tell GDB to halt the debugger. This allows the user to
2235 * run monitor commands to handle the situation.
2236 */
2237 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
2238 return retval;
2239 }
2240 /* Since we succeeded, we reset backoff count */
2241 if (backoff_times > 0)
2242 LOG_USER("Polling succeeded again");
2243 backoff_times = 0;
2244 }
2245 }
2246
2247 return retval;
2248 }
2249
2250 COMMAND_HANDLER(handle_reg_command)
2251 {
2252 struct target *target;
2253 struct reg *reg = NULL;
2254 unsigned count = 0;
2255 char *value;
2256
2257 LOG_DEBUG("-");
2258
2259 target = get_current_target(CMD_CTX);
2260
2261 /* list all available registers for the current target */
2262 if (CMD_ARGC == 0) {
2263 struct reg_cache *cache = target->reg_cache;
2264
2265 count = 0;
2266 while (cache) {
2267 unsigned i;
2268
2269 command_print(CMD_CTX, "===== %s", cache->name);
2270
2271 for (i = 0, reg = cache->reg_list;
2272 i < cache->num_regs;
2273 i++, reg++, count++) {
2274 /* only print cached values if they are valid */
2275 if (reg->valid) {
2276 value = buf_to_str(reg->value,
2277 reg->size, 16);
2278 command_print(CMD_CTX,
2279 "(%i) %s (/%" PRIu32 "): 0x%s%s",
2280 count, reg->name,
2281 reg->size, value,
2282 reg->dirty
2283 ? " (dirty)"
2284 : "");
2285 free(value);
2286 } else {
2287 command_print(CMD_CTX, "(%i) %s (/%" PRIu32 ")",
2288 count, reg->name,
2289 reg->size) ;
2290 }
2291 }
2292 cache = cache->next;
2293 }
2294
2295 return ERROR_OK;
2296 }
2297
2298 /* access a single register by its ordinal number */
2299 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
2300 unsigned num;
2301 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
2302
2303 struct reg_cache *cache = target->reg_cache;
2304 count = 0;
2305 while (cache) {
2306 unsigned i;
2307 for (i = 0; i < cache->num_regs; i++) {
2308 if (count++ == num) {
2309 reg = &cache->reg_list[i];
2310 break;
2311 }
2312 }
2313 if (reg)
2314 break;
2315 cache = cache->next;
2316 }
2317
2318 if (!reg) {
2319 command_print(CMD_CTX, "%i is out of bounds, the current target "
2320 "has only %i registers (0 - %i)", num, count, count - 1);
2321 return ERROR_OK;
2322 }
2323 } else {
2324 /* access a single register by its name */
2325 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], 1);
2326
2327 if (!reg) {
2328 command_print(CMD_CTX, "register %s not found in current target", CMD_ARGV[0]);
2329 return ERROR_OK;
2330 }
2331 }
2332
2333 assert(reg != NULL); /* give clang a hint that we *know* reg is != NULL here */
2334
2335 /* display a register */
2336 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
2337 && (CMD_ARGV[1][0] <= '9')))) {
2338 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
2339 reg->valid = 0;
2340
2341 if (reg->valid == 0)
2342 reg->type->get(reg);
2343 value = buf_to_str(reg->value, reg->size, 16);
2344 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2345 free(value);
2346 return ERROR_OK;
2347 }
2348
2349 /* set register value */
2350 if (CMD_ARGC == 2) {
2351 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
2352 if (buf == NULL)
2353 return ERROR_FAIL;
2354 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
2355
2356 reg->type->set(reg, buf);
2357
2358 value = buf_to_str(reg->value, reg->size, 16);
2359 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2360 free(value);
2361
2362 free(buf);
2363
2364 return ERROR_OK;
2365 }
2366
2367 return ERROR_COMMAND_SYNTAX_ERROR;
2368 }
2369
2370 COMMAND_HANDLER(handle_poll_command)
2371 {
2372 int retval = ERROR_OK;
2373 struct target *target = get_current_target(CMD_CTX);
2374
2375 if (CMD_ARGC == 0) {
2376 command_print(CMD_CTX, "background polling: %s",
2377 jtag_poll_get_enabled() ? "on" : "off");
2378 command_print(CMD_CTX, "TAP: %s (%s)",
2379 target->tap->dotted_name,
2380 target->tap->enabled ? "enabled" : "disabled");
2381 if (!target->tap->enabled)
2382 return ERROR_OK;
2383 retval = target_poll(target);
2384 if (retval != ERROR_OK)
2385 return retval;
2386 retval = target_arch_state(target);
2387 if (retval != ERROR_OK)
2388 return retval;
2389 } else if (CMD_ARGC == 1) {
2390 bool enable;
2391 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
2392 jtag_poll_set_enabled(enable);
2393 } else
2394 return ERROR_COMMAND_SYNTAX_ERROR;
2395
2396 return retval;
2397 }
2398
2399 COMMAND_HANDLER(handle_wait_halt_command)
2400 {
2401 if (CMD_ARGC > 1)
2402 return ERROR_COMMAND_SYNTAX_ERROR;
2403
2404 unsigned ms = 5000;
2405 if (1 == CMD_ARGC) {
2406 int retval = parse_uint(CMD_ARGV[0], &ms);
2407 if (ERROR_OK != retval)
2408 return ERROR_COMMAND_SYNTAX_ERROR;
2409 /* convert seconds (given) to milliseconds (needed) */
2410 ms *= 1000;
2411 }
2412
2413 struct target *target = get_current_target(CMD_CTX);
2414 return target_wait_state(target, TARGET_HALTED, ms);
2415 }
2416
2417 /* wait for target state to change. The trick here is to have a low
2418 * latency for short waits and not to suck up all the CPU time
2419 * on longer waits.
2420 *
2421 * After 500ms, keep_alive() is invoked
2422 */
2423 int target_wait_state(struct target *target, enum target_state state, int ms)
2424 {
2425 int retval;
2426 long long then = 0, cur;
2427 int once = 1;
2428
2429 for (;;) {
2430 retval = target_poll(target);
2431 if (retval != ERROR_OK)
2432 return retval;
2433 if (target->state == state)
2434 break;
2435 cur = timeval_ms();
2436 if (once) {
2437 once = 0;
2438 then = timeval_ms();
2439 LOG_DEBUG("waiting for target %s...",
2440 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
2441 }
2442
2443 if (cur-then > 500)
2444 keep_alive();
2445
2446 if ((cur-then) > ms) {
2447 LOG_ERROR("timed out while waiting for target %s",
2448 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
2449 return ERROR_FAIL;
2450 }
2451 }
2452
2453 return ERROR_OK;
2454 }
2455
2456 COMMAND_HANDLER(handle_halt_command)
2457 {
2458 LOG_DEBUG("-");
2459
2460 struct target *target = get_current_target(CMD_CTX);
2461 int retval = target_halt(target);
2462 if (ERROR_OK != retval)
2463 return retval;
2464
2465 if (CMD_ARGC == 1) {
2466 unsigned wait_local;
2467 retval = parse_uint(CMD_ARGV[0], &wait_local);
2468 if (ERROR_OK != retval)
2469 return ERROR_COMMAND_SYNTAX_ERROR;
2470 if (!wait_local)
2471 return ERROR_OK;
2472 }
2473
2474 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
2475 }
2476
2477 COMMAND_HANDLER(handle_soft_reset_halt_command)
2478 {
2479 struct target *target = get_current_target(CMD_CTX);
2480
2481 LOG_USER("requesting target halt and executing a soft reset");
2482
2483 target->type->soft_reset_halt(target);
2484
2485 return ERROR_OK;
2486 }
2487
2488 COMMAND_HANDLER(handle_reset_command)
2489 {
2490 if (CMD_ARGC > 1)
2491 return ERROR_COMMAND_SYNTAX_ERROR;
2492
2493 enum target_reset_mode reset_mode = RESET_RUN;
2494 if (CMD_ARGC == 1) {
2495 const Jim_Nvp *n;
2496 n = Jim_Nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
2497 if ((n->name == NULL) || (n->value == RESET_UNKNOWN))
2498 return ERROR_COMMAND_SYNTAX_ERROR;
2499 reset_mode = n->value;
2500 }
2501
2502 /* reset *all* targets */
2503 return target_process_reset(CMD_CTX, reset_mode);
2504 }
2505
2506
2507 COMMAND_HANDLER(handle_resume_command)
2508 {
2509 int current = 1;
2510 if (CMD_ARGC > 1)
2511 return ERROR_COMMAND_SYNTAX_ERROR;
2512
2513 struct target *target = get_current_target(CMD_CTX);
2514 target_handle_event(target, TARGET_EVENT_OLD_pre_resume);
2515
2516 /* with no CMD_ARGV, resume from current pc, addr = 0,
2517 * with one arguments, addr = CMD_ARGV[0],
2518 * handle breakpoints, not debugging */
2519 uint32_t addr = 0;
2520 if (CMD_ARGC == 1) {
2521 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2522 current = 0;
2523 }
2524
2525 return target_resume(target, current, addr, 1, 0);
2526 }
2527
2528 COMMAND_HANDLER(handle_step_command)
2529 {
2530 if (CMD_ARGC > 1)
2531 return ERROR_COMMAND_SYNTAX_ERROR;
2532
2533 LOG_DEBUG("-");
2534
2535 /* with no CMD_ARGV, step from current pc, addr = 0,
2536 * with one argument addr = CMD_ARGV[0],
2537 * handle breakpoints, debugging */
2538 uint32_t addr = 0;
2539 int current_pc = 1;
2540 if (CMD_ARGC == 1) {
2541 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2542 current_pc = 0;
2543 }
2544
2545 struct target *target = get_current_target(CMD_CTX);
2546
2547 return target->type->step(target, current_pc, addr, 1);
2548 }
2549
2550 static void handle_md_output(struct command_context *cmd_ctx,
2551 struct target *target, uint32_t address, unsigned size,
2552 unsigned count, const uint8_t *buffer)
2553 {
2554 const unsigned line_bytecnt = 32;
2555 unsigned line_modulo = line_bytecnt / size;
2556
2557 char output[line_bytecnt * 4 + 1];
2558 unsigned output_len = 0;
2559
2560 const char *value_fmt;
2561 switch (size) {
2562 case 4:
2563 value_fmt = "%8.8x ";
2564 break;
2565 case 2:
2566 value_fmt = "%4.4x ";
2567 break;
2568 case 1:
2569 value_fmt = "%2.2x ";
2570 break;
2571 default:
2572 /* "can't happen", caller checked */
2573 LOG_ERROR("invalid memory read size: %u", size);
2574 return;
2575 }
2576
2577 for (unsigned i = 0; i < count; i++) {
2578 if (i % line_modulo == 0) {
2579 output_len += snprintf(output + output_len,
2580 sizeof(output) - output_len,
2581 "0x%8.8x: ",
2582 (unsigned)(address + (i*size)));
2583 }
2584
2585 uint32_t value = 0;
2586 const uint8_t *value_ptr = buffer + i * size;
2587 switch (size) {
2588 case 4:
2589 value = target_buffer_get_u32(target, value_ptr);
2590 break;
2591 case 2:
2592 value = target_buffer_get_u16(target, value_ptr);
2593 break;
2594 case 1:
2595 value = *value_ptr;
2596 }
2597 output_len += snprintf(output + output_len,
2598 sizeof(output) - output_len,
2599 value_fmt, value);
2600
2601 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
2602 command_print(cmd_ctx, "%s", output);
2603 output_len = 0;
2604 }
2605 }
2606 }
2607
2608 COMMAND_HANDLER(handle_md_command)
2609 {
2610 if (CMD_ARGC < 1)
2611 return ERROR_COMMAND_SYNTAX_ERROR;
2612
2613 unsigned size = 0;
2614 switch (CMD_NAME[2]) {
2615 case 'w':
2616 size = 4;
2617 break;
2618 case 'h':
2619 size = 2;
2620 break;
2621 case 'b':
2622 size = 1;
2623 break;
2624 default:
2625 return ERROR_COMMAND_SYNTAX_ERROR;
2626 }
2627
2628 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
2629 int (*fn)(struct target *target,
2630 uint32_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
2631 if (physical) {
2632 CMD_ARGC--;
2633 CMD_ARGV++;
2634 fn = target_read_phys_memory;
2635 } else
2636 fn = target_read_memory;
2637 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
2638 return ERROR_COMMAND_SYNTAX_ERROR;
2639
2640 uint32_t address;
2641 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2642
2643 unsigned count = 1;
2644 if (CMD_ARGC == 2)
2645 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
2646
2647 uint8_t *buffer = calloc(count, size);
2648
2649 struct target *target = get_current_target(CMD_CTX);
2650 int retval = fn(target, address, size, count, buffer);
2651 if (ERROR_OK == retval)
2652 handle_md_output(CMD_CTX, target, address, size, count, buffer);
2653
2654 free(buffer);
2655
2656 return retval;
2657 }
2658
2659 typedef int (*target_write_fn)(struct target *target,
2660 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
2661
2662 static int target_write_memory_fast(struct target *target,
2663 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
2664 {
2665 return target_write_buffer(target, address, size * count, buffer);
2666 }
2667
2668 static int target_fill_mem(struct target *target,
2669 uint32_t address,
2670 target_write_fn fn,
2671 unsigned data_size,
2672 /* value */
2673 uint32_t b,
2674 /* count */
2675 unsigned c)
2676 {
2677 /* We have to write in reasonably large chunks to be able
2678 * to fill large memory areas with any sane speed */
2679 const unsigned chunk_size = 16384;
2680 uint8_t *target_buf = malloc(chunk_size * data_size);
2681 if (target_buf == NULL) {
2682 LOG_ERROR("Out of memory");
2683 return ERROR_FAIL;
2684 }
2685
2686 for (unsigned i = 0; i < chunk_size; i++) {
2687 switch (data_size) {
2688 case 4:
2689 target_buffer_set_u32(target, target_buf + i * data_size, b);
2690 break;
2691 case 2:
2692 target_buffer_set_u16(target, target_buf + i * data_size, b);
2693 break;
2694 case 1:
2695 target_buffer_set_u8(target, target_buf + i * data_size, b);
2696 break;
2697 default:
2698 exit(-1);
2699 }
2700 }
2701
2702 int retval = ERROR_OK;
2703
2704 for (unsigned x = 0; x < c; x += chunk_size) {
2705 unsigned current;
2706 current = c - x;
2707 if (current > chunk_size)
2708 current = chunk_size;
2709 retval = fn(target, address + x * data_size, data_size, current, target_buf);
2710 if (retval != ERROR_OK)
2711 break;
2712 /* avoid GDB timeouts */
2713 keep_alive();
2714 }
2715 free(target_buf);
2716
2717 return retval;
2718 }
2719
2720
2721 COMMAND_HANDLER(handle_mw_command)
2722 {
2723 if (CMD_ARGC < 2)
2724 return ERROR_COMMAND_SYNTAX_ERROR;
2725 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
2726 target_write_fn fn;
2727 if (physical) {
2728 CMD_ARGC--;
2729 CMD_ARGV++;
2730 fn = target_write_phys_memory;
2731 } else
2732 fn = target_write_memory_fast;
2733 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
2734 return ERROR_COMMAND_SYNTAX_ERROR;
2735
2736 uint32_t address;
2737 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2738
2739 uint32_t value;
2740 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
2741
2742 unsigned count = 1;
2743 if (CMD_ARGC == 3)
2744 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
2745
2746 struct target *target = get_current_target(CMD_CTX);
2747 unsigned wordsize;
2748 switch (CMD_NAME[2]) {
2749 case 'w':
2750 wordsize = 4;
2751 break;
2752 case 'h':
2753 wordsize = 2;
2754 break;
2755 case 'b':
2756 wordsize = 1;
2757 break;
2758 default:
2759 return ERROR_COMMAND_SYNTAX_ERROR;
2760 }
2761
2762 return target_fill_mem(target, address, fn, wordsize, value, count);
2763 }
2764
2765 static COMMAND_HELPER(parse_load_image_command_CMD_ARGV, struct image *image,
2766 uint32_t *min_address, uint32_t *max_address)
2767 {
2768 if (CMD_ARGC < 1 || CMD_ARGC > 5)
2769 return ERROR_COMMAND_SYNTAX_ERROR;
2770
2771 /* a base address isn't always necessary,
2772 * default to 0x0 (i.e. don't relocate) */
2773 if (CMD_ARGC >= 2) {
2774 uint32_t addr;
2775 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], addr);
2776 image->base_address = addr;
2777 image->base_address_set = 1;
2778 } else
2779 image->base_address_set = 0;
2780
2781 image->start_address_set = 0;
2782
2783 if (CMD_ARGC >= 4)
2784 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], *min_address);
2785 if (CMD_ARGC == 5) {
2786 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], *max_address);
2787 /* use size (given) to find max (required) */
2788 *max_address += *min_address;
2789 }
2790
2791 if (*min_address > *max_address)
2792 return ERROR_COMMAND_SYNTAX_ERROR;
2793
2794 return ERROR_OK;
2795 }
2796
2797 COMMAND_HANDLER(handle_load_image_command)
2798 {
2799 uint8_t *buffer;
2800 size_t buf_cnt;
2801 uint32_t image_size;
2802 uint32_t min_address = 0;
2803 uint32_t max_address = 0xffffffff;
2804 int i;
2805 struct image image;
2806
2807 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
2808 &image, &min_address, &max_address);
2809 if (ERROR_OK != retval)
2810 return retval;
2811
2812 struct target *target = get_current_target(CMD_CTX);
2813
2814 struct duration bench;
2815 duration_start(&bench);
2816
2817 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
2818 return ERROR_OK;
2819
2820 image_size = 0x0;
2821 retval = ERROR_OK;
2822 for (i = 0; i < image.num_sections; i++) {
2823 buffer = malloc(image.sections[i].size);
2824 if (buffer == NULL) {
2825 command_print(CMD_CTX,
2826 "error allocating buffer for section (%d bytes)",
2827 (int)(image.sections[i].size));
2828 break;
2829 }
2830
2831 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
2832 if (retval != ERROR_OK) {
2833 free(buffer);
2834 break;
2835 }
2836
2837 uint32_t offset = 0;
2838 uint32_t length = buf_cnt;
2839
2840 /* DANGER!!! beware of unsigned comparision here!!! */
2841
2842 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
2843 (image.sections[i].base_address < max_address)) {
2844
2845 if (image.sections[i].base_address < min_address) {
2846 /* clip addresses below */
2847 offset += min_address-image.sections[i].base_address;
2848 length -= offset;
2849 }
2850
2851 if (image.sections[i].base_address + buf_cnt > max_address)
2852 length -= (image.sections[i].base_address + buf_cnt)-max_address;
2853
2854 retval = target_write_buffer(target,
2855 image.sections[i].base_address + offset, length, buffer + offset);
2856 if (retval != ERROR_OK) {
2857 free(buffer);
2858 break;
2859 }
2860 image_size += length;
2861 command_print(CMD_CTX, "%u bytes written at address 0x%8.8" PRIx32 "",
2862 (unsigned int)length,
2863 image.sections[i].base_address + offset);
2864 }
2865
2866 free(buffer);
2867 }
2868
2869 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
2870 command_print(CMD_CTX, "downloaded %" PRIu32 " bytes "
2871 "in %fs (%0.3f KiB/s)", image_size,
2872 duration_elapsed(&bench), duration_kbps(&bench, image_size));
2873 }
2874
2875 image_close(&image);
2876
2877 return retval;
2878
2879 }
2880
2881 COMMAND_HANDLER(handle_dump_image_command)
2882 {
2883 struct fileio fileio;
2884 uint8_t *buffer;
2885 int retval, retvaltemp;
2886 uint32_t address, size;
2887 struct duration bench;
2888 struct target *target = get_current_target(CMD_CTX);
2889
2890 if (CMD_ARGC != 3)
2891 return ERROR_COMMAND_SYNTAX_ERROR;
2892
2893 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], address);
2894 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], size);
2895
2896 uint32_t buf_size = (size > 4096) ? 4096 : size;
2897 buffer = malloc(buf_size);
2898 if (!buffer)
2899 return ERROR_FAIL;
2900
2901 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
2902 if (retval != ERROR_OK) {
2903 free(buffer);
2904 return retval;
2905 }
2906
2907 duration_start(&bench);
2908
2909 while (size > 0) {
2910 size_t size_written;
2911 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
2912 retval = target_read_buffer(target, address, this_run_size, buffer);
2913 if (retval != ERROR_OK)
2914 break;
2915
2916 retval = fileio_write(&fileio, this_run_size, buffer, &size_written);
2917 if (retval != ERROR_OK)
2918 break;
2919
2920 size -= this_run_size;
2921 address += this_run_size;
2922 }
2923
2924 free(buffer);
2925
2926 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
2927 int filesize;
2928 retval = fileio_size(&fileio, &filesize);
2929 if (retval != ERROR_OK)
2930 return retval;
2931 command_print(CMD_CTX,
2932 "dumped %ld bytes in %fs (%0.3f KiB/s)", (long)filesize,
2933 duration_elapsed(&bench), duration_kbps(&bench, filesize));
2934 }
2935
2936 retvaltemp = fileio_close(&fileio);
2937 if (retvaltemp != ERROR_OK)
2938 return retvaltemp;
2939
2940 return retval;
2941 }
2942
2943 static COMMAND_HELPER(handle_verify_image_command_internal, int verify)
2944 {
2945 uint8_t *buffer;
2946 size_t buf_cnt;
2947 uint32_t image_size;
2948 int i;
2949 int retval;
2950 uint32_t checksum = 0;
2951 uint32_t mem_checksum = 0;
2952
2953 struct image image;
2954
2955 struct target *target = get_current_target(CMD_CTX);
2956
2957 if (CMD_ARGC < 1)
2958 return ERROR_COMMAND_SYNTAX_ERROR;
2959
2960 if (!target) {
2961 LOG_ERROR("no target selected");
2962 return ERROR_FAIL;
2963 }
2964
2965 struct duration bench;
2966 duration_start(&bench);
2967
2968 if (CMD_ARGC >= 2) {
2969 uint32_t addr;
2970 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], addr);
2971 image.base_address = addr;
2972 image.base_address_set = 1;
2973 } else {
2974 image.base_address_set = 0;
2975 image.base_address = 0x0;
2976 }
2977
2978 image.start_address_set = 0;
2979
2980 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
2981 if (retval != ERROR_OK)
2982 return retval;
2983
2984 image_size = 0x0;
2985 int diffs = 0;
2986 retval = ERROR_OK;
2987 for (i = 0; i < image.num_sections; i++) {
2988 buffer = malloc(image.sections[i].size);
2989 if (buffer == NULL) {
2990 command_print(CMD_CTX,
2991 "error allocating buffer for section (%d bytes)",
2992 (int)(image.sections[i].size));
2993 break;
2994 }
2995 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
2996 if (retval != ERROR_OK) {
2997 free(buffer);
2998 break;
2999 }
3000
3001 if (verify) {
3002 /* calculate checksum of image */
3003 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3004 if (retval != ERROR_OK) {
3005 free(buffer);
3006 break;
3007 }
3008
3009 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3010 if (retval != ERROR_OK) {
3011 free(buffer);
3012 break;
3013 }
3014
3015 if (checksum != mem_checksum) {
3016 /* failed crc checksum, fall back to a binary compare */
3017 uint8_t *data;
3018
3019 if (diffs == 0)
3020 LOG_ERROR("checksum mismatch - attempting binary compare");
3021
3022 data = (uint8_t *)malloc(buf_cnt);
3023
3024 /* Can we use 32bit word accesses? */
3025 int size = 1;
3026 int count = buf_cnt;
3027 if ((count % 4) == 0) {
3028 size *= 4;
3029 count /= 4;
3030 }
3031 retval = target_read_memory(target, image.sections[i].base_address, size, count, data);
3032 if (retval == ERROR_OK) {
3033 uint32_t t;
3034 for (t = 0; t < buf_cnt; t++) {
3035 if (data[t] != buffer[t]) {
3036 command_print(CMD_CTX,
3037 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3038 diffs,
3039 (unsigned)(t + image.sections[i].base_address),
3040 data[t],
3041 buffer[t]);
3042 if (diffs++ >= 127) {
3043 command_print(CMD_CTX, "More than 128 errors, the rest are not printed.");
3044 free(data);
3045 free(buffer);
3046 goto done;
3047 }
3048 }
3049 keep_alive();
3050 }
3051 }
3052 free(data);
3053 }
3054 } else {
3055 command_print(CMD_CTX, "address 0x%08" PRIx32 " length 0x%08zx",
3056 image.sections[i].base_address,
3057 buf_cnt);
3058 }
3059
3060 free(buffer);
3061 image_size += buf_cnt;
3062 }
3063 if (diffs > 0)
3064 command_print(CMD_CTX, "No more differences found.");
3065 done:
3066 if (diffs > 0)
3067 retval = ERROR_FAIL;
3068 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3069 command_print(CMD_CTX, "verified %" PRIu32 " bytes "
3070 "in %fs (%0.3f KiB/s)", image_size,
3071 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3072 }
3073
3074 image_close(&image);
3075
3076 return retval;
3077 }
3078
3079 COMMAND_HANDLER(handle_verify_image_command)
3080 {
3081 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, 1);
3082 }
3083
3084 COMMAND_HANDLER(handle_test_image_command)
3085 {
3086 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, 0);
3087 }
3088
3089 static int handle_bp_command_list(struct command_context *cmd_ctx)
3090 {
3091 struct target *target = get_current_target(cmd_ctx);
3092 struct breakpoint *breakpoint = target->breakpoints;
3093 while (breakpoint) {
3094 if (breakpoint->type == BKPT_SOFT) {
3095 char *buf = buf_to_str(breakpoint->orig_instr,
3096 breakpoint->length, 16);
3097 command_print(cmd_ctx, "IVA breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i, 0x%s",
3098 breakpoint->address,
3099 breakpoint->length,
3100 breakpoint->set, buf);
3101 free(buf);
3102 } else {
3103 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3104 command_print(cmd_ctx, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3105 breakpoint->asid,
3106 breakpoint->length, breakpoint->set);
3107 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3108 command_print(cmd_ctx, "Hybrid breakpoint(IVA): 0x%8.8" PRIx32 ", 0x%x, %i",
3109 breakpoint->address,
3110 breakpoint->length, breakpoint->set);
3111 command_print(cmd_ctx, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3112 breakpoint->asid);
3113 } else
3114 command_print(cmd_ctx, "Breakpoint(IVA): 0x%8.8" PRIx32 ", 0x%x, %i",
3115 breakpoint->address,
3116 breakpoint->length, breakpoint->set);
3117 }
3118
3119 breakpoint = breakpoint->next;
3120 }
3121 return ERROR_OK;
3122 }
3123
3124 static int handle_bp_command_set(struct command_context *cmd_ctx,
3125 uint32_t addr, uint32_t asid, uint32_t length, int hw)
3126 {
3127 struct target *target = get_current_target(cmd_ctx);
3128
3129 if (asid == 0) {
3130 int retval = breakpoint_add(target, addr, length, hw);
3131 if (ERROR_OK == retval)
3132 command_print(cmd_ctx, "breakpoint set at 0x%8.8" PRIx32 "", addr);
3133 else {
3134 LOG_ERROR("Failure setting breakpoint, the same address(IVA) is already used");
3135 return retval;
3136 }
3137 } else if (addr == 0) {
3138 int retval = context_breakpoint_add(target, asid, length, hw);
3139 if (ERROR_OK == retval)
3140 command_print(cmd_ctx, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
3141 else {
3142 LOG_ERROR("Failure setting breakpoint, the same address(CONTEXTID) is already used");
3143 return retval;
3144 }
3145 } else {
3146 int retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
3147 if (ERROR_OK == retval)
3148 command_print(cmd_ctx, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
3149 else {
3150 LOG_ERROR("Failure setting breakpoint, the same address is already used");
3151 return retval;
3152 }
3153 }
3154 return ERROR_OK;
3155 }
3156
3157 COMMAND_HANDLER(handle_bp_command)
3158 {
3159 uint32_t addr;
3160 uint32_t asid;
3161 uint32_t length;
3162 int hw = BKPT_SOFT;
3163
3164 switch (CMD_ARGC) {
3165 case 0:
3166 return handle_bp_command_list(CMD_CTX);
3167
3168 case 2:
3169 asid = 0;
3170 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3171 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3172 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3173
3174 case 3:
3175 if (strcmp(CMD_ARGV[2], "hw") == 0) {
3176 hw = BKPT_HARD;
3177 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3178
3179 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3180
3181 asid = 0;
3182 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3183 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
3184 hw = BKPT_HARD;
3185 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
3186 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3187 addr = 0;
3188 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3189 }
3190
3191 case 4:
3192 hw = BKPT_HARD;
3193 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3194 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
3195 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
3196 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3197
3198 default:
3199 return ERROR_COMMAND_SYNTAX_ERROR;
3200 }
3201 }
3202
3203 COMMAND_HANDLER(handle_rbp_command)
3204 {
3205 if (CMD_ARGC != 1)
3206 return ERROR_COMMAND_SYNTAX_ERROR;
3207
3208 uint32_t addr;
3209 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3210
3211 struct target *target = get_current_target(CMD_CTX);
3212 breakpoint_remove(target, addr);
3213
3214 return ERROR_OK;
3215 }
3216
3217 COMMAND_HANDLER(handle_wp_command)
3218 {
3219 struct target *target = get_current_target(CMD_CTX);
3220
3221 if (CMD_ARGC == 0) {
3222 struct watchpoint *watchpoint = target->watchpoints;
3223
3224 while (watchpoint) {
3225 command_print(CMD_CTX, "address: 0x%8.8" PRIx32
3226 ", len: 0x%8.8" PRIx32
3227 ", r/w/a: %i, value: 0x%8.8" PRIx32
3228 ", mask: 0x%8.8" PRIx32,
3229 watchpoint->address,
3230 watchpoint->length,
3231 (int)watchpoint->rw,
3232 watchpoint->value,
3233 watchpoint->mask);
3234 watchpoint = watchpoint->next;
3235 }
3236 return ERROR_OK;
3237 }
3238
3239 enum watchpoint_rw type = WPT_ACCESS;
3240 uint32_t addr = 0;
3241 uint32_t length = 0;
3242 uint32_t data_value = 0x0;
3243 uint32_t data_mask = 0xffffffff;
3244
3245 switch (CMD_ARGC) {
3246 case 5:
3247 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
3248 /* fall through */
3249 case 4:
3250 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
3251 /* fall through */
3252 case 3:
3253 switch (CMD_ARGV[2][0]) {
3254 case 'r':
3255 type = WPT_READ;
3256 break;
3257 case 'w':
3258 type = WPT_WRITE;
3259 break;
3260 case 'a':
3261 type = WPT_ACCESS;
3262 break;
3263 default:
3264 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
3265 return ERROR_COMMAND_SYNTAX_ERROR;
3266 }
3267 /* fall through */
3268 case 2:
3269 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3270 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3271 break;
3272
3273 default:
3274 return ERROR_COMMAND_SYNTAX_ERROR;
3275 }
3276
3277 int retval = watchpoint_add(target, addr, length, type,
3278 data_value, data_mask);
3279 if (ERROR_OK != retval)
3280 LOG_ERROR("Failure setting watchpoints");
3281
3282 return retval;
3283 }
3284
3285 COMMAND_HANDLER(handle_rwp_command)
3286 {
3287 if (CMD_ARGC != 1)
3288 return ERROR_COMMAND_SYNTAX_ERROR;
3289
3290 uint32_t addr;
3291 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3292
3293 struct target *target = get_current_target(CMD_CTX);
3294 watchpoint_remove(target, addr);
3295
3296 return ERROR_OK;
3297 }
3298
3299 /**
3300 * Translate a virtual address to a physical address.
3301 *
3302 * The low-level target implementation must have logged a detailed error
3303 * which is forwarded to telnet/GDB session.
3304 */
3305 COMMAND_HANDLER(handle_virt2phys_command)
3306 {
3307 if (CMD_ARGC != 1)
3308 return ERROR_COMMAND_SYNTAX_ERROR;
3309
3310 uint32_t va;
3311 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], va);
3312 uint32_t pa;
3313
3314 struct target *target = get_current_target(CMD_CTX);
3315 int retval = target->type->virt2phys(target, va, &pa);
3316 if (retval == ERROR_OK)
3317 command_print(CMD_CTX, "Physical address 0x%08" PRIx32 "", pa);
3318
3319 return retval;
3320 }
3321
3322 static void writeData(FILE *f, const void *data, size_t len)
3323 {
3324 size_t written = fwrite(data, 1, len, f);
3325 if (written != len)
3326 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
3327 }
3328
3329 static void writeLong(FILE *f, int l)
3330 {
3331 int i;
3332 for (i = 0; i < 4; i++) {
3333 char c = (l >> (i*8))&0xff;
3334 writeData(f, &c, 1);
3335 }
3336
3337 }
3338
3339 static void writeString(FILE *f, char *s)
3340 {
3341 writeData(f, s, strlen(s));
3342 }
3343
3344 /* Dump a gmon.out histogram file. */
3345 static void writeGmon(uint32_t *samples, uint32_t sampleNum, const char *filename)
3346 {
3347 uint32_t i;
3348 FILE *f = fopen(filename, "w");
3349 if (f == NULL)
3350 return;
3351 writeString(f, "gmon");
3352 writeLong(f, 0x00000001); /* Version */
3353 writeLong(f, 0); /* padding */
3354 writeLong(f, 0); /* padding */
3355 writeLong(f, 0); /* padding */
3356
3357 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
3358 writeData(f, &zero, 1);
3359
3360 /* figure out bucket size */
3361 uint32_t min = samples[0];
3362 uint32_t max = samples[0];
3363 for (i = 0; i < sampleNum; i++) {
3364 if (min > samples[i])
3365 min = samples[i];
3366 if (max < samples[i])
3367 max = samples[i];
3368 }
3369
3370 int addressSpace = (max - min + 1);
3371 assert(addressSpace >= 2);
3372
3373 static const uint32_t maxBuckets = 16 * 1024; /* maximum buckets. */
3374 uint32_t length = addressSpace;
3375 if (length > maxBuckets)
3376 length = maxBuckets;
3377 int *buckets = malloc(sizeof(int)*length);
3378 if (buckets == NULL) {
3379 fclose(f);
3380 return;
3381 }
3382 memset(buckets, 0, sizeof(int) * length);
3383 for (i = 0; i < sampleNum; i++) {
3384 uint32_t address = samples[i];
3385 long long a = address - min;
3386 long long b = length - 1;
3387 long long c = addressSpace - 1;
3388 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
3389 buckets[index_t]++;
3390 }
3391
3392 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
3393 writeLong(f, min); /* low_pc */
3394 writeLong(f, max); /* high_pc */
3395 writeLong(f, length); /* # of samples */
3396 writeLong(f, 100); /* KLUDGE! We lie, ca. 100Hz best case. */
3397 writeString(f, "seconds");
3398 for (i = 0; i < (15-strlen("seconds")); i++)
3399 writeData(f, &zero, 1);
3400 writeString(f, "s");
3401
3402 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
3403
3404 char *data = malloc(2 * length);
3405 if (data != NULL) {
3406 for (i = 0; i < length; i++) {
3407 int val;
3408 val = buckets[i];
3409 if (val > 65535)
3410 val = 65535;
3411 data[i * 2] = val&0xff;
3412 data[i * 2 + 1] = (val >> 8) & 0xff;
3413 }
3414 free(buckets);
3415 writeData(f, data, length * 2);
3416 free(data);
3417 } else
3418 free(buckets);
3419
3420 fclose(f);
3421 }
3422
3423 /* profiling samples the CPU PC as quickly as OpenOCD is able,
3424 * which will be used as a random sampling of PC */
3425 COMMAND_HANDLER(handle_profile_command)
3426 {
3427 struct target *target = get_current_target(CMD_CTX);
3428 struct timeval timeout, now;
3429
3430 gettimeofday(&timeout, NULL);
3431 if (CMD_ARGC != 2)
3432 return ERROR_COMMAND_SYNTAX_ERROR;
3433 unsigned offset;
3434 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], offset);
3435
3436 timeval_add_time(&timeout, offset, 0);
3437
3438 /**
3439 * @todo: Some cores let us sample the PC without the
3440 * annoying halt/resume step; for example, ARMv7 PCSR.
3441 * Provide a way to use that more efficient mechanism.
3442 */
3443
3444 command_print(CMD_CTX, "Starting profiling. Halting and resuming the target as often as we can...");
3445
3446 static const int maxSample = 10000;
3447 uint32_t *samples = malloc(sizeof(uint32_t)*maxSample);
3448 if (samples == NULL)
3449 return ERROR_OK;
3450
3451 int numSamples = 0;
3452 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
3453 struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
3454
3455 int retval = ERROR_OK;
3456 for (;;) {
3457 target_poll(target);
3458 if (target->state == TARGET_HALTED) {
3459 uint32_t t = *((uint32_t *)reg->value);
3460 samples[numSamples++] = t;
3461 /* current pc, addr = 0, do not handle breakpoints, not debugging */
3462 retval = target_resume(target, 1, 0, 0, 0);
3463 target_poll(target);
3464 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
3465 } else if (target->state == TARGET_RUNNING) {
3466 /* We want to quickly sample the PC. */
3467 retval = target_halt(target);
3468 if (retval != ERROR_OK) {
3469 free(samples);
3470 return retval;
3471 }
3472 } else {
3473 command_print(CMD_CTX, "Target not halted or running");
3474 retval = ERROR_OK;
3475 break;
3476 }
3477 if (retval != ERROR_OK)
3478 break;
3479
3480 gettimeofday(&now, NULL);
3481 if ((numSamples >= maxSample) || ((now.tv_sec >= timeout.tv_sec)
3482 && (now.tv_usec >= timeout.tv_usec))) {
3483 command_print(CMD_CTX, "Profiling completed. %d samples.", numSamples);
3484 retval = target_poll(target);
3485 if (retval != ERROR_OK) {
3486 free(samples);
3487 return retval;
3488 }
3489 if (target->state == TARGET_HALTED) {
3490 /* current pc, addr = 0, do not handle
3491 * breakpoints, not debugging */
3492 target_resume(target, 1, 0, 0, 0);
3493 }
3494 retval = target_poll(target);
3495 if (retval != ERROR_OK) {
3496 free(samples);
3497 return retval;
3498 }
3499 writeGmon(samples, numSamples, CMD_ARGV[1]);
3500 command_print(CMD_CTX, "Wrote %s", CMD_ARGV[1]);
3501 break;
3502 }
3503 }
3504 free(samples);
3505
3506 return retval;
3507 }
3508
3509 static int new_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t val)
3510 {
3511 char *namebuf;
3512 Jim_Obj *nameObjPtr, *valObjPtr;
3513 int result;
3514
3515 namebuf = alloc_printf("%s(%d)", varname, idx);
3516 if (!namebuf)
3517 return JIM_ERR;
3518
3519 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
3520 valObjPtr = Jim_NewIntObj(interp, val);
3521 if (!nameObjPtr || !valObjPtr) {
3522 free(namebuf);
3523 return JIM_ERR;
3524 }
3525
3526 Jim_IncrRefCount(nameObjPtr);
3527 Jim_IncrRefCount(valObjPtr);
3528 result = Jim_SetVariable(interp, nameObjPtr, valObjPtr);
3529 Jim_DecrRefCount(interp, nameObjPtr);
3530 Jim_DecrRefCount(interp, valObjPtr);
3531 free(namebuf);
3532 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
3533 return result;
3534 }
3535
3536 static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
3537 {
3538 struct command_context *context;
3539 struct target *target;
3540
3541 context = current_command_context(interp);
3542 assert(context != NULL);
3543
3544 target = get_current_target(context);
3545 if (target == NULL) {
3546 LOG_ERROR("mem2array: no current target");
3547 return JIM_ERR;
3548 }
3549
3550 return target_mem2array(interp, target, argc - 1, argv + 1);
3551 }
3552
3553 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
3554 {
3555 long l;
3556 uint32_t width;
3557 int len;
3558 uint32_t addr;
3559 uint32_t count;
3560 uint32_t v;
3561 const char *varname;
3562 int n, e, retval;
3563 uint32_t i;
3564
3565 /* argv[1] = name of array to receive the data
3566 * argv[2] = desired width
3567 * argv[3] = memory address
3568 * argv[4] = count of times to read
3569 */
3570 if (argc != 4) {
3571 Jim_WrongNumArgs(interp, 1, argv, "varname width addr nelems");
3572 return JIM_ERR;
3573 }
3574 varname = Jim_GetString(argv[0], &len);
3575 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
3576
3577 e = Jim_GetLong(interp, argv[1], &l);
3578 width = l;
3579 if (e != JIM_OK)
3580 return e;
3581
3582 e = Jim_GetLong(interp, argv[2], &l);
3583 addr = l;
3584 if (e != JIM_OK)
3585 return e;
3586 e = Jim_GetLong(interp, argv[3], &l);
3587 len = l;
3588 if (e != JIM_OK)
3589 return e;
3590 switch (width) {
3591 case 8:
3592 width = 1;
3593 break;
3594 case 16:
3595 width = 2;
3596 break;
3597 case 32:
3598 width = 4;
3599 break;
3600 default:
3601 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3602 Jim_AppendStrings(interp, Jim_GetResult(interp), "Invalid width param, must be 8/16/32", NULL);
3603 return JIM_ERR;
3604 }
3605 if (len == 0) {
3606 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3607 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
3608 return JIM_ERR;
3609 }
3610 if ((addr + (len * width)) < addr) {
3611 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3612 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
3613 return JIM_ERR;
3614 }
3615 /* absurd transfer size? */
3616 if (len > 65536) {
3617 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3618 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: absurd > 64K item request", NULL);
3619 return JIM_ERR;
3620 }
3621
3622 if ((width == 1) ||
3623 ((width == 2) && ((addr & 1) == 0)) ||
3624 ((width == 4) && ((addr & 3) == 0))) {
3625 /* all is well */
3626 } else {
3627 char buf[100];
3628 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3629 sprintf(buf, "mem2array address: 0x%08" PRIx32 " is not aligned for %" PRId32 " byte reads",
3630 addr,
3631 width);
3632 Jim_AppendStrings(interp, Jim_GetResult(interp), buf , NULL);
3633 return JIM_ERR;
3634 }
3635
3636 /* Transfer loop */
3637
3638 /* index counter */
3639 n = 0;
3640
3641 size_t buffersize = 4096;
3642 uint8_t *buffer = malloc(buffersize);
3643 if (buffer == NULL)
3644 return JIM_ERR;
3645
3646 /* assume ok */
3647 e = JIM_OK;
3648 while