target: add async algorithm timeout
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 √ėyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program; if not, write to the *
38 * Free Software Foundation, Inc., *
39 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
40 ***************************************************************************/
41
42 #ifdef HAVE_CONFIG_H
43 #include "config.h"
44 #endif
45
46 #include <helper/time_support.h>
47 #include <jtag/jtag.h>
48 #include <flash/nor/core.h>
49
50 #include "target.h"
51 #include "target_type.h"
52 #include "target_request.h"
53 #include "breakpoints.h"
54 #include "register.h"
55 #include "trace.h"
56 #include "image.h"
57 #include "rtos/rtos.h"
58
59 static int target_read_buffer_default(struct target *target, uint32_t address,
60 uint32_t size, uint8_t *buffer);
61 static int target_write_buffer_default(struct target *target, uint32_t address,
62 uint32_t size, const uint8_t *buffer);
63 static int target_array2mem(Jim_Interp *interp, struct target *target,
64 int argc, Jim_Obj * const *argv);
65 static int target_mem2array(Jim_Interp *interp, struct target *target,
66 int argc, Jim_Obj * const *argv);
67 static int target_register_user_commands(struct command_context *cmd_ctx);
68
69 /* targets */
70 extern struct target_type arm7tdmi_target;
71 extern struct target_type arm720t_target;
72 extern struct target_type arm9tdmi_target;
73 extern struct target_type arm920t_target;
74 extern struct target_type arm966e_target;
75 extern struct target_type arm946e_target;
76 extern struct target_type arm926ejs_target;
77 extern struct target_type fa526_target;
78 extern struct target_type feroceon_target;
79 extern struct target_type dragonite_target;
80 extern struct target_type xscale_target;
81 extern struct target_type cortexm3_target;
82 extern struct target_type cortexa8_target;
83 extern struct target_type arm11_target;
84 extern struct target_type mips_m4k_target;
85 extern struct target_type avr_target;
86 extern struct target_type dsp563xx_target;
87 extern struct target_type dsp5680xx_target;
88 extern struct target_type testee_target;
89 extern struct target_type avr32_ap7k_target;
90 extern struct target_type stm32_stlink_target;
91
92 static struct target_type *target_types[] = {
93 &arm7tdmi_target,
94 &arm9tdmi_target,
95 &arm920t_target,
96 &arm720t_target,
97 &arm966e_target,
98 &arm946e_target,
99 &arm926ejs_target,
100 &fa526_target,
101 &feroceon_target,
102 &dragonite_target,
103 &xscale_target,
104 &cortexm3_target,
105 &cortexa8_target,
106 &arm11_target,
107 &mips_m4k_target,
108 &avr_target,
109 &dsp563xx_target,
110 &dsp5680xx_target,
111 &testee_target,
112 &avr32_ap7k_target,
113 &stm32_stlink_target,
114 NULL,
115 };
116
117 struct target *all_targets;
118 static struct target_event_callback *target_event_callbacks;
119 static struct target_timer_callback *target_timer_callbacks;
120 static const int polling_interval = 100;
121
122 static const Jim_Nvp nvp_assert[] = {
123 { .name = "assert", NVP_ASSERT },
124 { .name = "deassert", NVP_DEASSERT },
125 { .name = "T", NVP_ASSERT },
126 { .name = "F", NVP_DEASSERT },
127 { .name = "t", NVP_ASSERT },
128 { .name = "f", NVP_DEASSERT },
129 { .name = NULL, .value = -1 }
130 };
131
132 static const Jim_Nvp nvp_error_target[] = {
133 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
134 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
135 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
136 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
137 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
138 { .value = ERROR_TARGET_UNALIGNED_ACCESS , .name = "err-unaligned-access" },
139 { .value = ERROR_TARGET_DATA_ABORT , .name = "err-data-abort" },
140 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE , .name = "err-resource-not-available" },
141 { .value = ERROR_TARGET_TRANSLATION_FAULT , .name = "err-translation-fault" },
142 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
143 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
144 { .value = -1, .name = NULL }
145 };
146
147 static const char *target_strerror_safe(int err)
148 {
149 const Jim_Nvp *n;
150
151 n = Jim_Nvp_value2name_simple(nvp_error_target, err);
152 if (n->name == NULL)
153 return "unknown";
154 else
155 return n->name;
156 }
157
158 static const Jim_Nvp nvp_target_event[] = {
159
160 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
161 { .value = TARGET_EVENT_HALTED, .name = "halted" },
162 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
163 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
164 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
165
166 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
167 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
168
169 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
170 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
171 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
172 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
173 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
174 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
175 { .value = TARGET_EVENT_RESET_HALT_PRE, .name = "reset-halt-pre" },
176 { .value = TARGET_EVENT_RESET_HALT_POST, .name = "reset-halt-post" },
177 { .value = TARGET_EVENT_RESET_WAIT_PRE, .name = "reset-wait-pre" },
178 { .value = TARGET_EVENT_RESET_WAIT_POST, .name = "reset-wait-post" },
179 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
180 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
181
182 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
183 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
184
185 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
186 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
187
188 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
189 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
190
191 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
192 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END , .name = "gdb-flash-write-end" },
193
194 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
195 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END , .name = "gdb-flash-erase-end" },
196
197 { .name = NULL, .value = -1 }
198 };
199
200 static const Jim_Nvp nvp_target_state[] = {
201 { .name = "unknown", .value = TARGET_UNKNOWN },
202 { .name = "running", .value = TARGET_RUNNING },
203 { .name = "halted", .value = TARGET_HALTED },
204 { .name = "reset", .value = TARGET_RESET },
205 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
206 { .name = NULL, .value = -1 },
207 };
208
209 static const Jim_Nvp nvp_target_debug_reason[] = {
210 { .name = "debug-request" , .value = DBG_REASON_DBGRQ },
211 { .name = "breakpoint" , .value = DBG_REASON_BREAKPOINT },
212 { .name = "watchpoint" , .value = DBG_REASON_WATCHPOINT },
213 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
214 { .name = "single-step" , .value = DBG_REASON_SINGLESTEP },
215 { .name = "target-not-halted" , .value = DBG_REASON_NOTHALTED },
216 { .name = "undefined" , .value = DBG_REASON_UNDEFINED },
217 { .name = NULL, .value = -1 },
218 };
219
220 static const Jim_Nvp nvp_target_endian[] = {
221 { .name = "big", .value = TARGET_BIG_ENDIAN },
222 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
223 { .name = "be", .value = TARGET_BIG_ENDIAN },
224 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
225 { .name = NULL, .value = -1 },
226 };
227
228 static const Jim_Nvp nvp_reset_modes[] = {
229 { .name = "unknown", .value = RESET_UNKNOWN },
230 { .name = "run" , .value = RESET_RUN },
231 { .name = "halt" , .value = RESET_HALT },
232 { .name = "init" , .value = RESET_INIT },
233 { .name = NULL , .value = -1 },
234 };
235
236 const char *debug_reason_name(struct target *t)
237 {
238 const char *cp;
239
240 cp = Jim_Nvp_value2name_simple(nvp_target_debug_reason,
241 t->debug_reason)->name;
242 if (!cp) {
243 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
244 cp = "(*BUG*unknown*BUG*)";
245 }
246 return cp;
247 }
248
249 const char *target_state_name(struct target *t)
250 {
251 const char *cp;
252 cp = Jim_Nvp_value2name_simple(nvp_target_state, t->state)->name;
253 if (!cp) {
254 LOG_ERROR("Invalid target state: %d", (int)(t->state));
255 cp = "(*BUG*unknown*BUG*)";
256 }
257 return cp;
258 }
259
260 /* determine the number of the new target */
261 static int new_target_number(void)
262 {
263 struct target *t;
264 int x;
265
266 /* number is 0 based */
267 x = -1;
268 t = all_targets;
269 while (t) {
270 if (x < t->target_number)
271 x = t->target_number;
272 t = t->next;
273 }
274 return x + 1;
275 }
276
277 /* read a uint32_t from a buffer in target memory endianness */
278 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
279 {
280 if (target->endianness == TARGET_LITTLE_ENDIAN)
281 return le_to_h_u32(buffer);
282 else
283 return be_to_h_u32(buffer);
284 }
285
286 /* read a uint24_t from a buffer in target memory endianness */
287 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
288 {
289 if (target->endianness == TARGET_LITTLE_ENDIAN)
290 return le_to_h_u24(buffer);
291 else
292 return be_to_h_u24(buffer);
293 }
294
295 /* read a uint16_t from a buffer in target memory endianness */
296 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
297 {
298 if (target->endianness == TARGET_LITTLE_ENDIAN)
299 return le_to_h_u16(buffer);
300 else
301 return be_to_h_u16(buffer);
302 }
303
304 /* read a uint8_t from a buffer in target memory endianness */
305 static uint8_t target_buffer_get_u8(struct target *target, const uint8_t *buffer)
306 {
307 return *buffer & 0x0ff;
308 }
309
310 /* write a uint32_t to a buffer in target memory endianness */
311 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
312 {
313 if (target->endianness == TARGET_LITTLE_ENDIAN)
314 h_u32_to_le(buffer, value);
315 else
316 h_u32_to_be(buffer, value);
317 }
318
319 /* write a uint24_t to a buffer in target memory endianness */
320 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
321 {
322 if (target->endianness == TARGET_LITTLE_ENDIAN)
323 h_u24_to_le(buffer, value);
324 else
325 h_u24_to_be(buffer, value);
326 }
327
328 /* write a uint16_t to a buffer in target memory endianness */
329 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
330 {
331 if (target->endianness == TARGET_LITTLE_ENDIAN)
332 h_u16_to_le(buffer, value);
333 else
334 h_u16_to_be(buffer, value);
335 }
336
337 /* write a uint8_t to a buffer in target memory endianness */
338 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
339 {
340 *buffer = value;
341 }
342
343 /* write a uint32_t array to a buffer in target memory endianness */
344 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
345 {
346 uint32_t i;
347 for (i = 0; i < count; i++)
348 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
349 }
350
351 /* write a uint16_t array to a buffer in target memory endianness */
352 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
353 {
354 uint32_t i;
355 for (i = 0; i < count; i++)
356 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
357 }
358
359 /* write a uint32_t array to a buffer in target memory endianness */
360 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, uint32_t *srcbuf)
361 {
362 uint32_t i;
363 for (i = 0; i < count; i++)
364 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
365 }
366
367 /* write a uint16_t array to a buffer in target memory endianness */
368 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, uint16_t *srcbuf)
369 {
370 uint32_t i;
371 for (i = 0; i < count; i++)
372 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
373 }
374
375 /* return a pointer to a configured target; id is name or number */
376 struct target *get_target(const char *id)
377 {
378 struct target *target;
379
380 /* try as tcltarget name */
381 for (target = all_targets; target; target = target->next) {
382 if (target->cmd_name == NULL)
383 continue;
384 if (strcmp(id, target->cmd_name) == 0)
385 return target;
386 }
387
388 /* It's OK to remove this fallback sometime after August 2010 or so */
389
390 /* no match, try as number */
391 unsigned num;
392 if (parse_uint(id, &num) != ERROR_OK)
393 return NULL;
394
395 for (target = all_targets; target; target = target->next) {
396 if (target->target_number == (int)num) {
397 LOG_WARNING("use '%s' as target identifier, not '%u'",
398 target->cmd_name, num);
399 return target;
400 }
401 }
402
403 return NULL;
404 }
405
406 /* returns a pointer to the n-th configured target */
407 static struct target *get_target_by_num(int num)
408 {
409 struct target *target = all_targets;
410
411 while (target) {
412 if (target->target_number == num)
413 return target;
414 target = target->next;
415 }
416
417 return NULL;
418 }
419
420 struct target *get_current_target(struct command_context *cmd_ctx)
421 {
422 struct target *target = get_target_by_num(cmd_ctx->current_target);
423
424 if (target == NULL) {
425 LOG_ERROR("BUG: current_target out of bounds");
426 exit(-1);
427 }
428
429 return target;
430 }
431
432 int target_poll(struct target *target)
433 {
434 int retval;
435
436 /* We can't poll until after examine */
437 if (!target_was_examined(target)) {
438 /* Fail silently lest we pollute the log */
439 return ERROR_FAIL;
440 }
441
442 retval = target->type->poll(target);
443 if (retval != ERROR_OK)
444 return retval;
445
446 if (target->halt_issued) {
447 if (target->state == TARGET_HALTED)
448 target->halt_issued = false;
449 else {
450 long long t = timeval_ms() - target->halt_issued_time;
451 if (t > 1000) {
452 target->halt_issued = false;
453 LOG_INFO("Halt timed out, wake up GDB.");
454 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
455 }
456 }
457 }
458
459 return ERROR_OK;
460 }
461
462 int target_halt(struct target *target)
463 {
464 int retval;
465 /* We can't poll until after examine */
466 if (!target_was_examined(target)) {
467 LOG_ERROR("Target not examined yet");
468 return ERROR_FAIL;
469 }
470
471 retval = target->type->halt(target);
472 if (retval != ERROR_OK)
473 return retval;
474
475 target->halt_issued = true;
476 target->halt_issued_time = timeval_ms();
477
478 return ERROR_OK;
479 }
480
481 /**
482 * Make the target (re)start executing using its saved execution
483 * context (possibly with some modifications).
484 *
485 * @param target Which target should start executing.
486 * @param current True to use the target's saved program counter instead
487 * of the address parameter
488 * @param address Optionally used as the program counter.
489 * @param handle_breakpoints True iff breakpoints at the resumption PC
490 * should be skipped. (For example, maybe execution was stopped by
491 * such a breakpoint, in which case it would be counterprodutive to
492 * let it re-trigger.
493 * @param debug_execution False if all working areas allocated by OpenOCD
494 * should be released and/or restored to their original contents.
495 * (This would for example be true to run some downloaded "helper"
496 * algorithm code, which resides in one such working buffer and uses
497 * another for data storage.)
498 *
499 * @todo Resolve the ambiguity about what the "debug_execution" flag
500 * signifies. For example, Target implementations don't agree on how
501 * it relates to invalidation of the register cache, or to whether
502 * breakpoints and watchpoints should be enabled. (It would seem wrong
503 * to enable breakpoints when running downloaded "helper" algorithms
504 * (debug_execution true), since the breakpoints would be set to match
505 * target firmware being debugged, not the helper algorithm.... and
506 * enabling them could cause such helpers to malfunction (for example,
507 * by overwriting data with a breakpoint instruction. On the other
508 * hand the infrastructure for running such helpers might use this
509 * procedure but rely on hardware breakpoint to detect termination.)
510 */
511 int target_resume(struct target *target, int current, uint32_t address, int handle_breakpoints, int debug_execution)
512 {
513 int retval;
514
515 /* We can't poll until after examine */
516 if (!target_was_examined(target)) {
517 LOG_ERROR("Target not examined yet");
518 return ERROR_FAIL;
519 }
520
521 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
522
523 /* note that resume *must* be asynchronous. The CPU can halt before
524 * we poll. The CPU can even halt at the current PC as a result of
525 * a software breakpoint being inserted by (a bug?) the application.
526 */
527 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
528 if (retval != ERROR_OK)
529 return retval;
530
531 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
532
533 return retval;
534 }
535
536 static int target_process_reset(struct command_context *cmd_ctx, enum target_reset_mode reset_mode)
537 {
538 char buf[100];
539 int retval;
540 Jim_Nvp *n;
541 n = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode);
542 if (n->name == NULL) {
543 LOG_ERROR("invalid reset mode");
544 return ERROR_FAIL;
545 }
546
547 /* disable polling during reset to make reset event scripts
548 * more predictable, i.e. dr/irscan & pathmove in events will
549 * not have JTAG operations injected into the middle of a sequence.
550 */
551 bool save_poll = jtag_poll_get_enabled();
552
553 jtag_poll_set_enabled(false);
554
555 sprintf(buf, "ocd_process_reset %s", n->name);
556 retval = Jim_Eval(cmd_ctx->interp, buf);
557
558 jtag_poll_set_enabled(save_poll);
559
560 if (retval != JIM_OK) {
561 Jim_MakeErrorMessage(cmd_ctx->interp);
562 command_print(NULL, "%s\n", Jim_GetString(Jim_GetResult(cmd_ctx->interp), NULL));
563 return ERROR_FAIL;
564 }
565
566 /* We want any events to be processed before the prompt */
567 retval = target_call_timer_callbacks_now();
568
569 struct target *target;
570 for (target = all_targets; target; target = target->next)
571 target->type->check_reset(target);
572
573 return retval;
574 }
575
576 static int identity_virt2phys(struct target *target,
577 uint32_t virtual, uint32_t *physical)
578 {
579 *physical = virtual;
580 return ERROR_OK;
581 }
582
583 static int no_mmu(struct target *target, int *enabled)
584 {
585 *enabled = 0;
586 return ERROR_OK;
587 }
588
589 static int default_examine(struct target *target)
590 {
591 target_set_examined(target);
592 return ERROR_OK;
593 }
594
595 /* no check by default */
596 static int default_check_reset(struct target *target)
597 {
598 return ERROR_OK;
599 }
600
601 int target_examine_one(struct target *target)
602 {
603 return target->type->examine(target);
604 }
605
606 static int jtag_enable_callback(enum jtag_event event, void *priv)
607 {
608 struct target *target = priv;
609
610 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
611 return ERROR_OK;
612
613 jtag_unregister_event_callback(jtag_enable_callback, target);
614
615 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
616
617 int retval = target_examine_one(target);
618 if (retval != ERROR_OK)
619 return retval;
620
621 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
622
623 return retval;
624 }
625
626 /* Targets that correctly implement init + examine, i.e.
627 * no communication with target during init:
628 *
629 * XScale
630 */
631 int target_examine(void)
632 {
633 int retval = ERROR_OK;
634 struct target *target;
635
636 for (target = all_targets; target; target = target->next) {
637 /* defer examination, but don't skip it */
638 if (!target->tap->enabled) {
639 jtag_register_event_callback(jtag_enable_callback,
640 target);
641 continue;
642 }
643
644 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
645
646 retval = target_examine_one(target);
647 if (retval != ERROR_OK)
648 return retval;
649
650 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
651 }
652 return retval;
653 }
654
655 const char *target_type_name(struct target *target)
656 {
657 return target->type->name;
658 }
659
660 static int target_write_memory_imp(struct target *target, uint32_t address,
661 uint32_t size, uint32_t count, const uint8_t *buffer)
662 {
663 if (!target_was_examined(target)) {
664 LOG_ERROR("Target not examined yet");
665 return ERROR_FAIL;
666 }
667 return target->type->write_memory_imp(target, address, size, count, buffer);
668 }
669
670 static int target_read_memory_imp(struct target *target, uint32_t address,
671 uint32_t size, uint32_t count, uint8_t *buffer)
672 {
673 if (!target_was_examined(target)) {
674 LOG_ERROR("Target not examined yet");
675 return ERROR_FAIL;
676 }
677 return target->type->read_memory_imp(target, address, size, count, buffer);
678 }
679
680 static int target_soft_reset_halt_imp(struct target *target)
681 {
682 if (!target_was_examined(target)) {
683 LOG_ERROR("Target not examined yet");
684 return ERROR_FAIL;
685 }
686 if (!target->type->soft_reset_halt_imp) {
687 LOG_ERROR("Target %s does not support soft_reset_halt",
688 target_name(target));
689 return ERROR_FAIL;
690 }
691 return target->type->soft_reset_halt_imp(target);
692 }
693
694 /**
695 * Downloads a target-specific native code algorithm to the target,
696 * and executes it. * Note that some targets may need to set up, enable,
697 * and tear down a breakpoint (hard or * soft) to detect algorithm
698 * termination, while others may support lower overhead schemes where
699 * soft breakpoints embedded in the algorithm automatically terminate the
700 * algorithm.
701 *
702 * @param target used to run the algorithm
703 * @param arch_info target-specific description of the algorithm.
704 */
705 int target_run_algorithm(struct target *target,
706 int num_mem_params, struct mem_param *mem_params,
707 int num_reg_params, struct reg_param *reg_param,
708 uint32_t entry_point, uint32_t exit_point,
709 int timeout_ms, void *arch_info)
710 {
711 int retval = ERROR_FAIL;
712
713 if (!target_was_examined(target)) {
714 LOG_ERROR("Target not examined yet");
715 goto done;
716 }
717 if (!target->type->run_algorithm) {
718 LOG_ERROR("Target type '%s' does not support %s",
719 target_type_name(target), __func__);
720 goto done;
721 }
722
723 target->running_alg = true;
724 retval = target->type->run_algorithm(target,
725 num_mem_params, mem_params,
726 num_reg_params, reg_param,
727 entry_point, exit_point, timeout_ms, arch_info);
728 target->running_alg = false;
729
730 done:
731 return retval;
732 }
733
734 /**
735 * Downloads a target-specific native code algorithm to the target,
736 * executes and leaves it running.
737 *
738 * @param target used to run the algorithm
739 * @param arch_info target-specific description of the algorithm.
740 */
741 int target_start_algorithm(struct target *target,
742 int num_mem_params, struct mem_param *mem_params,
743 int num_reg_params, struct reg_param *reg_params,
744 uint32_t entry_point, uint32_t exit_point,
745 void *arch_info)
746 {
747 int retval = ERROR_FAIL;
748
749 if (!target_was_examined(target)) {
750 LOG_ERROR("Target not examined yet");
751 goto done;
752 }
753 if (!target->type->start_algorithm) {
754 LOG_ERROR("Target type '%s' does not support %s",
755 target_type_name(target), __func__);
756 goto done;
757 }
758 if (target->running_alg) {
759 LOG_ERROR("Target is already running an algorithm");
760 goto done;
761 }
762
763 target->running_alg = true;
764 retval = target->type->start_algorithm(target,
765 num_mem_params, mem_params,
766 num_reg_params, reg_params,
767 entry_point, exit_point, arch_info);
768
769 done:
770 return retval;
771 }
772
773 /**
774 * Waits for an algorithm started with target_start_algorithm() to complete.
775 *
776 * @param target used to run the algorithm
777 * @param arch_info target-specific description of the algorithm.
778 */
779 int target_wait_algorithm(struct target *target,
780 int num_mem_params, struct mem_param *mem_params,
781 int num_reg_params, struct reg_param *reg_params,
782 uint32_t exit_point, int timeout_ms,
783 void *arch_info)
784 {
785 int retval = ERROR_FAIL;
786
787 if (!target->type->wait_algorithm) {
788 LOG_ERROR("Target type '%s' does not support %s",
789 target_type_name(target), __func__);
790 goto done;
791 }
792 if (!target->running_alg) {
793 LOG_ERROR("Target is not running an algorithm");
794 goto done;
795 }
796
797 retval = target->type->wait_algorithm(target,
798 num_mem_params, mem_params,
799 num_reg_params, reg_params,
800 exit_point, timeout_ms, arch_info);
801 if (retval != ERROR_TARGET_TIMEOUT)
802 target->running_alg = false;
803
804 done:
805 return retval;
806 }
807
808 /**
809 * Executes a target-specific native code algorithm in the target.
810 * It differs from target_run_algorithm in that the algorithm is asynchronous.
811 * Because of this it requires an compliant algorithm:
812 * see contrib/loaders/flash/stm32f1x.S for example.
813 *
814 * @param target used to run the algorithm
815 */
816
817 int target_run_flash_async_algorithm(struct target *target,
818 uint8_t *buffer, uint32_t count, int block_size,
819 int num_mem_params, struct mem_param *mem_params,
820 int num_reg_params, struct reg_param *reg_params,
821 uint32_t buffer_start, uint32_t buffer_size,
822 uint32_t entry_point, uint32_t exit_point, void *arch_info)
823 {
824 int retval;
825 int timeout = 0;
826
827 /* Set up working area. First word is write pointer, second word is read pointer,
828 * rest is fifo data area. */
829 uint32_t wp_addr = buffer_start;
830 uint32_t rp_addr = buffer_start + 4;
831 uint32_t fifo_start_addr = buffer_start + 8;
832 uint32_t fifo_end_addr = buffer_start + buffer_size;
833
834 uint32_t wp = fifo_start_addr;
835 uint32_t rp = fifo_start_addr;
836
837 /* validate block_size is 2^n */
838 assert(!block_size || !(block_size & (block_size - 1)));
839
840 retval = target_write_u32(target, wp_addr, wp);
841 if (retval != ERROR_OK)
842 return retval;
843 retval = target_write_u32(target, rp_addr, rp);
844 if (retval != ERROR_OK)
845 return retval;
846
847 /* Start up algorithm on target and let it idle while writing the first chunk */
848 retval = target_start_algorithm(target, num_mem_params, mem_params,
849 num_reg_params, reg_params,
850 entry_point,
851 exit_point,
852 arch_info);
853
854 if (retval != ERROR_OK) {
855 LOG_ERROR("error starting target flash write algorithm");
856 return retval;
857 }
858
859 while (count > 0) {
860
861 retval = target_read_u32(target, rp_addr, &rp);
862 if (retval != ERROR_OK) {
863 LOG_ERROR("failed to get read pointer");
864 break;
865 }
866
867 LOG_DEBUG("count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32, count, wp, rp);
868
869 if (rp == 0) {
870 LOG_ERROR("flash write algorithm aborted by target");
871 retval = ERROR_FLASH_OPERATION_FAILED;
872 break;
873 }
874
875 if ((rp & (block_size - 1)) || rp < fifo_start_addr || rp >= fifo_end_addr) {
876 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
877 break;
878 }
879
880 /* Count the number of bytes available in the fifo without
881 * crossing the wrap around. Make sure to not fill it completely,
882 * because that would make wp == rp and that's the empty condition. */
883 uint32_t thisrun_bytes;
884 if (rp > wp)
885 thisrun_bytes = rp - wp - block_size;
886 else if (rp > fifo_start_addr)
887 thisrun_bytes = fifo_end_addr - wp;
888 else
889 thisrun_bytes = fifo_end_addr - wp - block_size;
890
891 if (thisrun_bytes == 0) {
892 /* Throttle polling a bit if transfer is (much) faster than flash
893 * programming. The exact delay shouldn't matter as long as it's
894 * less than buffer size / flash speed. This is very unlikely to
895 * run when using high latency connections such as USB. */
896 alive_sleep(10);
897
898 /* to stop an infinite loop on some targets check and increment a timeout
899 * this issue was observed on a stellaris using the new ICDI interface */
900 if (timeout++ >= 500) {
901 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
902 return ERROR_FLASH_OPERATION_FAILED;
903 }
904 continue;
905 }
906
907 /* reset our timeout */
908 timeout = 0;
909
910 /* Limit to the amount of data we actually want to write */
911 if (thisrun_bytes > count * block_size)
912 thisrun_bytes = count * block_size;
913
914 /* Write data to fifo */
915 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
916 if (retval != ERROR_OK)
917 break;
918
919 /* Update counters and wrap write pointer */
920 buffer += thisrun_bytes;
921 count -= thisrun_bytes / block_size;
922 wp += thisrun_bytes;
923 if (wp >= fifo_end_addr)
924 wp = fifo_start_addr;
925
926 /* Store updated write pointer to target */
927 retval = target_write_u32(target, wp_addr, wp);
928 if (retval != ERROR_OK)
929 break;
930 }
931
932 if (retval != ERROR_OK) {
933 /* abort flash write algorithm on target */
934 target_write_u32(target, wp_addr, 0);
935 }
936
937 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
938 num_reg_params, reg_params,
939 exit_point,
940 10000,
941 arch_info);
942
943 if (retval2 != ERROR_OK) {
944 LOG_ERROR("error waiting for target flash write algorithm");
945 retval = retval2;
946 }
947
948 return retval;
949 }
950
951 int target_read_memory(struct target *target,
952 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
953 {
954 return target->type->read_memory(target, address, size, count, buffer);
955 }
956
957 static int target_read_phys_memory(struct target *target,
958 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
959 {
960 return target->type->read_phys_memory(target, address, size, count, buffer);
961 }
962
963 int target_write_memory(struct target *target,
964 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
965 {
966 return target->type->write_memory(target, address, size, count, buffer);
967 }
968
969 static int target_write_phys_memory(struct target *target,
970 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
971 {
972 return target->type->write_phys_memory(target, address, size, count, buffer);
973 }
974
975 int target_bulk_write_memory(struct target *target,
976 uint32_t address, uint32_t count, const uint8_t *buffer)
977 {
978 return target->type->bulk_write_memory(target, address, count, buffer);
979 }
980
981 int target_add_breakpoint(struct target *target,
982 struct breakpoint *breakpoint)
983 {
984 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
985 LOG_WARNING("target %s is not halted", target->cmd_name);
986 return ERROR_TARGET_NOT_HALTED;
987 }
988 return target->type->add_breakpoint(target, breakpoint);
989 }
990
991 int target_add_context_breakpoint(struct target *target,
992 struct breakpoint *breakpoint)
993 {
994 if (target->state != TARGET_HALTED) {
995 LOG_WARNING("target %s is not halted", target->cmd_name);
996 return ERROR_TARGET_NOT_HALTED;
997 }
998 return target->type->add_context_breakpoint(target, breakpoint);
999 }
1000
1001 int target_add_hybrid_breakpoint(struct target *target,
1002 struct breakpoint *breakpoint)
1003 {
1004 if (target->state != TARGET_HALTED) {
1005 LOG_WARNING("target %s is not halted", target->cmd_name);
1006 return ERROR_TARGET_NOT_HALTED;
1007 }
1008 return target->type->add_hybrid_breakpoint(target, breakpoint);
1009 }
1010
1011 int target_remove_breakpoint(struct target *target,
1012 struct breakpoint *breakpoint)
1013 {
1014 return target->type->remove_breakpoint(target, breakpoint);
1015 }
1016
1017 int target_add_watchpoint(struct target *target,
1018 struct watchpoint *watchpoint)
1019 {
1020 if (target->state != TARGET_HALTED) {
1021 LOG_WARNING("target %s is not halted", target->cmd_name);
1022 return ERROR_TARGET_NOT_HALTED;
1023 }
1024 return target->type->add_watchpoint(target, watchpoint);
1025 }
1026 int target_remove_watchpoint(struct target *target,
1027 struct watchpoint *watchpoint)
1028 {
1029 return target->type->remove_watchpoint(target, watchpoint);
1030 }
1031
1032 int target_get_gdb_reg_list(struct target *target,
1033 struct reg **reg_list[], int *reg_list_size)
1034 {
1035 return target->type->get_gdb_reg_list(target, reg_list, reg_list_size);
1036 }
1037 int target_step(struct target *target,
1038 int current, uint32_t address, int handle_breakpoints)
1039 {
1040 return target->type->step(target, current, address, handle_breakpoints);
1041 }
1042
1043 /**
1044 * Reset the @c examined flag for the given target.
1045 * Pure paranoia -- targets are zeroed on allocation.
1046 */
1047 static void target_reset_examined(struct target *target)
1048 {
1049 target->examined = false;
1050 }
1051
1052 static int err_read_phys_memory(struct target *target, uint32_t address,
1053 uint32_t size, uint32_t count, uint8_t *buffer)
1054 {
1055 LOG_ERROR("Not implemented: %s", __func__);
1056 return ERROR_FAIL;
1057 }
1058
1059 static int err_write_phys_memory(struct target *target, uint32_t address,
1060 uint32_t size, uint32_t count, const uint8_t *buffer)
1061 {
1062 LOG_ERROR("Not implemented: %s", __func__);
1063 return ERROR_FAIL;
1064 }
1065
1066 static int handle_target(void *priv);
1067
1068 static int target_init_one(struct command_context *cmd_ctx,
1069 struct target *target)
1070 {
1071 target_reset_examined(target);
1072
1073 struct target_type *type = target->type;
1074 if (type->examine == NULL)
1075 type->examine = default_examine;
1076
1077 if (type->check_reset == NULL)
1078 type->check_reset = default_check_reset;
1079
1080 assert(type->init_target != NULL);
1081
1082 int retval = type->init_target(cmd_ctx, target);
1083 if (ERROR_OK != retval) {
1084 LOG_ERROR("target '%s' init failed", target_name(target));
1085 return retval;
1086 }
1087
1088 /**
1089 * @todo get rid of those *memory_imp() methods, now that all
1090 * callers are using target_*_memory() accessors ... and make
1091 * sure the "physical" paths handle the same issues.
1092 */
1093 /* a non-invasive way(in terms of patches) to add some code that
1094 * runs before the type->write/read_memory implementation
1095 */
1096 type->write_memory_imp = target->type->write_memory;
1097 type->write_memory = target_write_memory_imp;
1098
1099 type->read_memory_imp = target->type->read_memory;
1100 type->read_memory = target_read_memory_imp;
1101
1102 type->soft_reset_halt_imp = target->type->soft_reset_halt;
1103 type->soft_reset_halt = target_soft_reset_halt_imp;
1104
1105 /* Sanity-check MMU support ... stub in what we must, to help
1106 * implement it in stages, but warn if we need to do so.
1107 */
1108 if (type->mmu) {
1109 if (type->write_phys_memory == NULL) {
1110 LOG_ERROR("type '%s' is missing write_phys_memory",
1111 type->name);
1112 type->write_phys_memory = err_write_phys_memory;
1113 }
1114 if (type->read_phys_memory == NULL) {
1115 LOG_ERROR("type '%s' is missing read_phys_memory",
1116 type->name);
1117 type->read_phys_memory = err_read_phys_memory;
1118 }
1119 if (type->virt2phys == NULL) {
1120 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1121 type->virt2phys = identity_virt2phys;
1122 }
1123 } else {
1124 /* Make sure no-MMU targets all behave the same: make no
1125 * distinction between physical and virtual addresses, and
1126 * ensure that virt2phys() is always an identity mapping.
1127 */
1128 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1129 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1130
1131 type->mmu = no_mmu;
1132 type->write_phys_memory = type->write_memory;
1133 type->read_phys_memory = type->read_memory;
1134 type->virt2phys = identity_virt2phys;
1135 }
1136
1137 if (target->type->read_buffer == NULL)
1138 target->type->read_buffer = target_read_buffer_default;
1139
1140 if (target->type->write_buffer == NULL)
1141 target->type->write_buffer = target_write_buffer_default;
1142
1143 return ERROR_OK;
1144 }
1145
1146 static int target_init(struct command_context *cmd_ctx)
1147 {
1148 struct target *target;
1149 int retval;
1150
1151 for (target = all_targets; target; target = target->next) {
1152 retval = target_init_one(cmd_ctx, target);
1153 if (ERROR_OK != retval)
1154 return retval;
1155 }
1156
1157 if (!all_targets)
1158 return ERROR_OK;
1159
1160 retval = target_register_user_commands(cmd_ctx);
1161 if (ERROR_OK != retval)
1162 return retval;
1163
1164 retval = target_register_timer_callback(&handle_target,
1165 polling_interval, 1, cmd_ctx->interp);
1166 if (ERROR_OK != retval)
1167 return retval;
1168
1169 return ERROR_OK;
1170 }
1171
1172 COMMAND_HANDLER(handle_target_init_command)
1173 {
1174 int retval;
1175
1176 if (CMD_ARGC != 0)
1177 return ERROR_COMMAND_SYNTAX_ERROR;
1178
1179 static bool target_initialized;
1180 if (target_initialized) {
1181 LOG_INFO("'target init' has already been called");
1182 return ERROR_OK;
1183 }
1184 target_initialized = true;
1185
1186 retval = command_run_line(CMD_CTX, "init_targets");
1187 if (ERROR_OK != retval)
1188 return retval;
1189
1190 retval = command_run_line(CMD_CTX, "init_board");
1191 if (ERROR_OK != retval)
1192 return retval;
1193
1194 LOG_DEBUG("Initializing targets...");
1195 return target_init(CMD_CTX);
1196 }
1197
1198 int target_register_event_callback(int (*callback)(struct target *target,
1199 enum target_event event, void *priv), void *priv)
1200 {
1201 struct target_event_callback **callbacks_p = &target_event_callbacks;
1202
1203 if (callback == NULL)
1204 return ERROR_COMMAND_SYNTAX_ERROR;
1205
1206 if (*callbacks_p) {
1207 while ((*callbacks_p)->next)
1208 callbacks_p = &((*callbacks_p)->next);
1209 callbacks_p = &((*callbacks_p)->next);
1210 }
1211
1212 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1213 (*callbacks_p)->callback = callback;
1214 (*callbacks_p)->priv = priv;
1215 (*callbacks_p)->next = NULL;
1216
1217 return ERROR_OK;
1218 }
1219
1220 int target_register_timer_callback(int (*callback)(void *priv), int time_ms, int periodic, void *priv)
1221 {
1222 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1223 struct timeval now;
1224
1225 if (callback == NULL)
1226 return ERROR_COMMAND_SYNTAX_ERROR;
1227
1228 if (*callbacks_p) {
1229 while ((*callbacks_p)->next)
1230 callbacks_p = &((*callbacks_p)->next);
1231 callbacks_p = &((*callbacks_p)->next);
1232 }
1233
1234 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1235 (*callbacks_p)->callback = callback;
1236 (*callbacks_p)->periodic = periodic;
1237 (*callbacks_p)->time_ms = time_ms;
1238
1239 gettimeofday(&now, NULL);
1240 (*callbacks_p)->when.tv_usec = now.tv_usec + (time_ms % 1000) * 1000;
1241 time_ms -= (time_ms % 1000);
1242 (*callbacks_p)->when.tv_sec = now.tv_sec + (time_ms / 1000);
1243 if ((*callbacks_p)->when.tv_usec > 1000000) {
1244 (*callbacks_p)->when.tv_usec = (*callbacks_p)->when.tv_usec - 1000000;
1245 (*callbacks_p)->when.tv_sec += 1;
1246 }
1247
1248 (*callbacks_p)->priv = priv;
1249 (*callbacks_p)->next = NULL;
1250
1251 return ERROR_OK;
1252 }
1253
1254 int target_unregister_event_callback(int (*callback)(struct target *target,
1255 enum target_event event, void *priv), void *priv)
1256 {
1257 struct target_event_callback **p = &target_event_callbacks;
1258 struct target_event_callback *c = target_event_callbacks;
1259
1260 if (callback == NULL)
1261 return ERROR_COMMAND_SYNTAX_ERROR;
1262
1263 while (c) {
1264 struct target_event_callback *next = c->next;
1265 if ((c->callback == callback) && (c->priv == priv)) {
1266 *p = next;
1267 free(c);
1268 return ERROR_OK;
1269 } else
1270 p = &(c->next);
1271 c = next;
1272 }
1273
1274 return ERROR_OK;
1275 }
1276
1277 static int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1278 {
1279 struct target_timer_callback **p = &target_timer_callbacks;
1280 struct target_timer_callback *c = target_timer_callbacks;
1281
1282 if (callback == NULL)
1283 return ERROR_COMMAND_SYNTAX_ERROR;
1284
1285 while (c) {
1286 struct target_timer_callback *next = c->next;
1287 if ((c->callback == callback) && (c->priv == priv)) {
1288 *p = next;
1289 free(c);
1290 return ERROR_OK;
1291 } else
1292 p = &(c->next);
1293 c = next;
1294 }
1295
1296 return ERROR_OK;
1297 }
1298
1299 int target_call_event_callbacks(struct target *target, enum target_event event)
1300 {
1301 struct target_event_callback *callback = target_event_callbacks;
1302 struct target_event_callback *next_callback;
1303
1304 if (event == TARGET_EVENT_HALTED) {
1305 /* execute early halted first */
1306 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1307 }
1308
1309 LOG_DEBUG("target event %i (%s)", event,
1310 Jim_Nvp_value2name_simple(nvp_target_event, event)->name);
1311
1312 target_handle_event(target, event);
1313
1314 while (callback) {
1315 next_callback = callback->next;
1316 callback->callback(target, event, callback->priv);
1317 callback = next_callback;
1318 }
1319
1320 return ERROR_OK;
1321 }
1322
1323 static int target_timer_callback_periodic_restart(
1324 struct target_timer_callback *cb, struct timeval *now)
1325 {
1326 int time_ms = cb->time_ms;
1327 cb->when.tv_usec = now->tv_usec + (time_ms % 1000) * 1000;
1328 time_ms -= (time_ms % 1000);
1329 cb->when.tv_sec = now->tv_sec + time_ms / 1000;
1330 if (cb->when.tv_usec > 1000000) {
1331 cb->when.tv_usec = cb->when.tv_usec - 1000000;
1332 cb->when.tv_sec += 1;
1333 }
1334 return ERROR_OK;
1335 }
1336
1337 static int target_call_timer_callback(struct target_timer_callback *cb,
1338 struct timeval *now)
1339 {
1340 cb->callback(cb->priv);
1341
1342 if (cb->periodic)
1343 return target_timer_callback_periodic_restart(cb, now);
1344
1345 return target_unregister_timer_callback(cb->callback, cb->priv);
1346 }
1347
1348 static int target_call_timer_callbacks_check_time(int checktime)
1349 {
1350 keep_alive();
1351
1352 struct timeval now;
1353 gettimeofday(&now, NULL);
1354
1355 struct target_timer_callback *callback = target_timer_callbacks;
1356 while (callback) {
1357 /* cleaning up may unregister and free this callback */
1358 struct target_timer_callback *next_callback = callback->next;
1359
1360 bool call_it = callback->callback &&
1361 ((!checktime && callback->periodic) ||
1362 now.tv_sec > callback->when.tv_sec ||
1363 (now.tv_sec == callback->when.tv_sec &&
1364 now.tv_usec >= callback->when.tv_usec));
1365
1366 if (call_it) {
1367 int retval = target_call_timer_callback(callback, &now);
1368 if (retval != ERROR_OK)
1369 return retval;
1370 }
1371
1372 callback = next_callback;
1373 }
1374
1375 return ERROR_OK;
1376 }
1377
1378 int target_call_timer_callbacks(void)
1379 {
1380 return target_call_timer_callbacks_check_time(1);
1381 }
1382
1383 /* invoke periodic callbacks immediately */
1384 int target_call_timer_callbacks_now(void)
1385 {
1386 return target_call_timer_callbacks_check_time(0);
1387 }
1388
1389 /* Prints the working area layout for debug purposes */
1390 static void print_wa_layout(struct target *target)
1391 {
1392 struct working_area *c = target->working_areas;
1393
1394 while (c) {
1395 LOG_DEBUG("%c%c 0x%08"PRIx32"-0x%08"PRIx32" (%"PRIu32" bytes)",
1396 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1397 c->address, c->address + c->size - 1, c->size);
1398 c = c->next;
1399 }
1400 }
1401
1402 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1403 static void target_split_working_area(struct working_area *area, uint32_t size)
1404 {
1405 assert(area->free); /* Shouldn't split an allocated area */
1406 assert(size <= area->size); /* Caller should guarantee this */
1407
1408 /* Split only if not already the right size */
1409 if (size < area->size) {
1410 struct working_area *new_wa = malloc(sizeof(*new_wa));
1411
1412 if (new_wa == NULL)
1413 return;
1414
1415 new_wa->next = area->next;
1416 new_wa->size = area->size - size;
1417 new_wa->address = area->address + size;
1418 new_wa->backup = NULL;
1419 new_wa->user = NULL;
1420 new_wa->free = true;
1421
1422 area->next = new_wa;
1423 area->size = size;
1424
1425 /* If backup memory was allocated to this area, it has the wrong size
1426 * now so free it and it will be reallocated if/when needed */
1427 if (area->backup) {
1428 free(area->backup);
1429 area->backup = NULL;
1430 }
1431 }
1432 }
1433
1434 /* Merge all adjacent free areas into one */
1435 static void target_merge_working_areas(struct target *target)
1436 {
1437 struct working_area *c = target->working_areas;
1438
1439 while (c && c->next) {
1440 assert(c->next->address == c->address + c->size); /* This is an invariant */
1441
1442 /* Find two adjacent free areas */
1443 if (c->free && c->next->free) {
1444 /* Merge the last into the first */
1445 c->size += c->next->size;
1446
1447 /* Remove the last */
1448 struct working_area *to_be_freed = c->next;
1449 c->next = c->next->next;
1450 if (to_be_freed->backup)
1451 free(to_be_freed->backup);
1452 free(to_be_freed);
1453
1454 /* If backup memory was allocated to the remaining area, it's has
1455 * the wrong size now */
1456 if (c->backup) {
1457 free(c->backup);
1458 c->backup = NULL;
1459 }
1460 } else {
1461 c = c->next;
1462 }
1463 }
1464 }
1465
1466 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
1467 {
1468 /* Reevaluate working area address based on MMU state*/
1469 if (target->working_areas == NULL) {
1470 int retval;
1471 int enabled;
1472
1473 retval = target->type->mmu(target, &enabled);
1474 if (retval != ERROR_OK)
1475 return retval;
1476
1477 if (!enabled) {
1478 if (target->working_area_phys_spec) {
1479 LOG_DEBUG("MMU disabled, using physical "
1480 "address for working memory 0x%08"PRIx32,
1481 target->working_area_phys);
1482 target->working_area = target->working_area_phys;
1483 } else {
1484 LOG_ERROR("No working memory available. "
1485 "Specify -work-area-phys to target.");
1486 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1487 }
1488 } else {
1489 if (target->working_area_virt_spec) {
1490 LOG_DEBUG("MMU enabled, using virtual "
1491 "address for working memory 0x%08"PRIx32,
1492 target->working_area_virt);
1493 target->working_area = target->working_area_virt;
1494 } else {
1495 LOG_ERROR("No working memory available. "
1496 "Specify -work-area-virt to target.");
1497 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1498 }
1499 }
1500
1501 /* Set up initial working area on first call */
1502 struct working_area *new_wa = malloc(sizeof(*new_wa));
1503 if (new_wa) {
1504 new_wa->next = NULL;
1505 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
1506 new_wa->address = target->working_area;
1507 new_wa->backup = NULL;
1508 new_wa->user = NULL;
1509 new_wa->free = true;
1510 }
1511
1512 target->working_areas = new_wa;
1513 }
1514
1515 /* only allocate multiples of 4 byte */
1516 if (size % 4)
1517 size = (size + 3) & (~3UL);
1518
1519 struct working_area *c = target->working_areas;
1520
1521 /* Find the first large enough working area */
1522 while (c) {
1523 if (c->free && c->size >= size)
1524 break;
1525 c = c->next;
1526 }
1527
1528 if (c == NULL)
1529 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1530
1531 /* Split the working area into the requested size */
1532 target_split_working_area(c, size);
1533
1534 LOG_DEBUG("allocated new working area of %"PRIu32" bytes at address 0x%08"PRIx32, size, c->address);
1535
1536 if (target->backup_working_area) {
1537 if (c->backup == NULL) {
1538 c->backup = malloc(c->size);
1539 if (c->backup == NULL)
1540 return ERROR_FAIL;
1541 }
1542
1543 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
1544 if (retval != ERROR_OK)
1545 return retval;
1546 }
1547
1548 /* mark as used, and return the new (reused) area */
1549 c->free = false;
1550 *area = c;
1551
1552 /* user pointer */
1553 c->user = area;
1554
1555 print_wa_layout(target);
1556
1557 return ERROR_OK;
1558 }
1559
1560 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
1561 {
1562 int retval;
1563
1564 retval = target_alloc_working_area_try(target, size, area);
1565 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1566 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
1567 return retval;
1568
1569 }
1570
1571 static int target_restore_working_area(struct target *target, struct working_area *area)
1572 {
1573 int retval = ERROR_OK;
1574
1575 if (target->backup_working_area && area->backup != NULL) {
1576 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
1577 if (retval != ERROR_OK)
1578 LOG_ERROR("failed to restore %"PRIu32" bytes of working area at address 0x%08"PRIx32,
1579 area->size, area->address);
1580 }
1581
1582 return retval;
1583 }
1584
1585 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
1586 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
1587 {
1588 int retval = ERROR_OK;
1589
1590 if (area->free)
1591 return retval;
1592
1593 if (restore) {
1594 retval = target_restore_working_area(target, area);
1595 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
1596 if (retval != ERROR_OK)
1597 return retval;
1598 }
1599
1600 area->free = true;
1601
1602 LOG_DEBUG("freed %"PRIu32" bytes of working area at address 0x%08"PRIx32,
1603 area->size, area->address);
1604
1605 /* mark user pointer invalid */
1606 /* TODO: Is this really safe? It points to some previous caller's memory.
1607 * How could we know that the area pointer is still in that place and not
1608 * some other vital data? What's the purpose of this, anyway? */
1609 *area->user = NULL;
1610 area->user = NULL;
1611
1612 target_merge_working_areas(target);
1613
1614 print_wa_layout(target);
1615
1616 return retval;
1617 }
1618
1619 int target_free_working_area(struct target *target, struct working_area *area)
1620 {
1621 return target_free_working_area_restore(target, area, 1);
1622 }
1623
1624 /* free resources and restore memory, if restoring memory fails,
1625 * free up resources anyway
1626 */
1627 static void target_free_all_working_areas_restore(struct target *target, int restore)
1628 {
1629 struct working_area *c = target->working_areas;
1630
1631 LOG_DEBUG("freeing all working areas");
1632
1633 /* Loop through all areas, restoring the allocated ones and marking them as free */
1634 while (c) {
1635 if (!c->free) {
1636 if (restore)
1637 target_restore_working_area(target, c);
1638 c->free = true;
1639 *c->user = NULL; /* Same as above */
1640 c->user = NULL;
1641 }
1642 c = c->next;
1643 }
1644
1645 /* Run a merge pass to combine all areas into one */
1646 target_merge_working_areas(target);
1647
1648 print_wa_layout(target);
1649 }
1650
1651 void target_free_all_working_areas(struct target *target)
1652 {
1653 target_free_all_working_areas_restore(target, 1);
1654 }
1655
1656 /* Find the largest number of bytes that can be allocated */
1657 uint32_t target_get_working_area_avail(struct target *target)
1658 {
1659 struct working_area *c = target->working_areas;
1660 uint32_t max_size = 0;
1661
1662 if (c == NULL)
1663 return target->working_area_size;
1664
1665 while (c) {
1666 if (c->free && max_size < c->size)
1667 max_size = c->size;
1668
1669 c = c->next;
1670 }
1671
1672 return max_size;
1673 }
1674
1675 int target_arch_state(struct target *target)
1676 {
1677 int retval;
1678 if (target == NULL) {
1679 LOG_USER("No target has been configured");
1680 return ERROR_OK;
1681 }
1682
1683 LOG_USER("target state: %s", target_state_name(target));
1684
1685 if (target->state != TARGET_HALTED)
1686 return ERROR_OK;
1687
1688 retval = target->type->arch_state(target);
1689 return retval;
1690 }
1691
1692 /* Single aligned words are guaranteed to use 16 or 32 bit access
1693 * mode respectively, otherwise data is handled as quickly as
1694 * possible
1695 */
1696 int target_write_buffer(struct target *target, uint32_t address, uint32_t size, const uint8_t *buffer)
1697 {
1698 LOG_DEBUG("writing buffer of %i byte at 0x%8.8x",
1699 (int)size, (unsigned)address);
1700
1701 if (!target_was_examined(target)) {
1702 LOG_ERROR("Target not examined yet");
1703 return ERROR_FAIL;
1704 }
1705
1706 if (size == 0)
1707 return ERROR_OK;
1708
1709 if ((address + size - 1) < address) {
1710 /* GDB can request this when e.g. PC is 0xfffffffc*/
1711 LOG_ERROR("address + size wrapped(0x%08x, 0x%08x)",
1712 (unsigned)address,
1713 (unsigned)size);
1714 return ERROR_FAIL;
1715 }
1716
1717 return target->type->write_buffer(target, address, size, buffer);
1718 }
1719
1720 static int target_write_buffer_default(struct target *target, uint32_t address, uint32_t size, const uint8_t *buffer)
1721 {
1722 int retval = ERROR_OK;
1723
1724 if (((address % 2) == 0) && (size == 2))
1725 return target_write_memory(target, address, 2, 1, buffer);
1726
1727 /* handle unaligned head bytes */
1728 if (address % 4) {
1729 uint32_t unaligned = 4 - (address % 4);
1730
1731 if (unaligned > size)
1732 unaligned = size;
1733
1734 retval = target_write_memory(target, address, 1, unaligned, buffer);
1735 if (retval != ERROR_OK)
1736 return retval;
1737
1738 buffer += unaligned;
1739 address += unaligned;
1740 size -= unaligned;
1741 }
1742
1743 /* handle aligned words */
1744 if (size >= 4) {
1745 int aligned = size - (size % 4);
1746
1747 /* use bulk writes above a certain limit. This may have to be changed */
1748 if (aligned > 128) {
1749 retval = target->type->bulk_write_memory(target, address, aligned / 4, buffer);
1750 if (retval != ERROR_OK)
1751 return retval;
1752 } else {
1753 retval = target_write_memory(target, address, 4, aligned / 4, buffer);
1754 if (retval != ERROR_OK)
1755 return retval;
1756 }
1757
1758 buffer += aligned;
1759 address += aligned;
1760 size -= aligned;
1761 }
1762
1763 /* handle tail writes of less than 4 bytes */
1764 if (size > 0) {
1765 retval = target_write_memory(target, address, 1, size, buffer);
1766 if (retval != ERROR_OK)
1767 return retval;
1768 }
1769
1770 return retval;
1771 }
1772
1773 /* Single aligned words are guaranteed to use 16 or 32 bit access
1774 * mode respectively, otherwise data is handled as quickly as
1775 * possible
1776 */
1777 int target_read_buffer(struct target *target, uint32_t address, uint32_t size, uint8_t *buffer)
1778 {
1779 LOG_DEBUG("reading buffer of %i byte at 0x%8.8x",
1780 (int)size, (unsigned)address);
1781
1782 if (!target_was_examined(target)) {
1783 LOG_ERROR("Target not examined yet");
1784 return ERROR_FAIL;
1785 }
1786
1787 if (size == 0)
1788 return ERROR_OK;
1789
1790 if ((address + size - 1) < address) {
1791 /* GDB can request this when e.g. PC is 0xfffffffc*/
1792 LOG_ERROR("address + size wrapped(0x%08" PRIx32 ", 0x%08" PRIx32 ")",
1793 address,
1794 size);
1795 return ERROR_FAIL;
1796 }
1797
1798 return target->type->read_buffer(target, address, size, buffer);
1799 }
1800
1801 static int target_read_buffer_default(struct target *target, uint32_t address, uint32_t size, uint8_t *buffer)
1802 {
1803 int retval = ERROR_OK;
1804
1805 if (((address % 2) == 0) && (size == 2))
1806 return target_read_memory(target, address, 2, 1, buffer);
1807
1808 /* handle unaligned head bytes */
1809 if (address % 4) {
1810 uint32_t unaligned = 4 - (address % 4);
1811
1812 if (unaligned > size)
1813 unaligned = size;
1814
1815 retval = target_read_memory(target, address, 1, unaligned, buffer);
1816 if (retval != ERROR_OK)
1817 return retval;
1818
1819 buffer += unaligned;
1820 address += unaligned;
1821 size -= unaligned;
1822 }
1823
1824 /* handle aligned words */
1825 if (size >= 4) {
1826 int aligned = size - (size % 4);
1827
1828 retval = target_read_memory(target, address, 4, aligned / 4, buffer);
1829 if (retval != ERROR_OK)
1830 return retval;
1831
1832 buffer += aligned;
1833 address += aligned;
1834 size -= aligned;
1835 }
1836
1837 /*prevent byte access when possible (avoid AHB access limitations in some cases)*/
1838 if (size >= 2) {
1839 int aligned = size - (size % 2);
1840 retval = target_read_memory(target, address, 2, aligned / 2, buffer);
1841 if (retval != ERROR_OK)
1842 return retval;
1843
1844 buffer += aligned;
1845 address += aligned;
1846 size -= aligned;
1847 }
1848 /* handle tail writes of less than 4 bytes */
1849 if (size > 0) {
1850 retval = target_read_memory(target, address, 1, size, buffer);
1851 if (retval != ERROR_OK)
1852 return retval;
1853 }
1854
1855 return ERROR_OK;
1856 }
1857
1858 int target_checksum_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* crc)
1859 {
1860 uint8_t *buffer;
1861 int retval;
1862 uint32_t i;
1863 uint32_t checksum = 0;
1864 if (!target_was_examined(target)) {
1865 LOG_ERROR("Target not examined yet");
1866 return ERROR_FAIL;
1867 }
1868
1869 retval = target->type->checksum_memory(target, address, size, &checksum);
1870 if (retval != ERROR_OK) {
1871 buffer = malloc(size);
1872 if (buffer == NULL) {
1873 LOG_ERROR("error allocating buffer for section (%d bytes)", (int)size);
1874 return ERROR_COMMAND_SYNTAX_ERROR;
1875 }
1876 retval = target_read_buffer(target, address, size, buffer);
1877 if (retval != ERROR_OK) {
1878 free(buffer);
1879 return retval;
1880 }
1881
1882 /* convert to target endianness */
1883 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
1884 uint32_t target_data;
1885 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
1886 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
1887 }
1888
1889 retval = image_calculate_checksum(buffer, size, &checksum);
1890 free(buffer);
1891 }
1892
1893 *crc = checksum;
1894
1895 return retval;
1896 }
1897
1898 int target_blank_check_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* blank)
1899 {
1900 int retval;
1901 if (!target_was_examined(target)) {
1902 LOG_ERROR("Target not examined yet");
1903 return ERROR_FAIL;
1904 }
1905
1906 if (target->type->blank_check_memory == 0)
1907 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1908
1909 retval = target->type->blank_check_memory(target, address, size, blank);
1910
1911 return retval;
1912 }
1913
1914 int target_read_u32(struct target *target, uint32_t address, uint32_t *value)
1915 {
1916 uint8_t value_buf[4];
1917 if (!target_was_examined(target)) {
1918 LOG_ERROR("Target not examined yet");
1919 return ERROR_FAIL;
1920 }
1921
1922 int retval = target_read_memory(target, address, 4, 1, value_buf);
1923
1924 if (retval == ERROR_OK) {
1925 *value = target_buffer_get_u32(target, value_buf);
1926 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
1927 address,
1928 *value);
1929 } else {
1930 *value = 0x0;
1931 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1932 address);
1933 }
1934
1935 return retval;
1936 }
1937
1938 int target_read_u16(struct target *target, uint32_t address, uint16_t *value)
1939 {
1940 uint8_t value_buf[2];
1941 if (!target_was_examined(target)) {
1942 LOG_ERROR("Target not examined yet");
1943 return ERROR_FAIL;
1944 }
1945
1946 int retval = target_read_memory(target, address, 2, 1, value_buf);
1947
1948 if (retval == ERROR_OK) {
1949 *value = target_buffer_get_u16(target, value_buf);
1950 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%4.4x",
1951 address,
1952 *value);
1953 } else {
1954 *value = 0x0;
1955 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1956 address);
1957 }
1958
1959 return retval;
1960 }
1961
1962 int target_read_u8(struct target *target, uint32_t address, uint8_t *value)
1963 {
1964 int retval = target_read_memory(target, address, 1, 1, value);
1965 if (!target_was_examined(target)) {
1966 LOG_ERROR("Target not examined yet");
1967 return ERROR_FAIL;
1968 }
1969
1970 if (retval == ERROR_OK) {
1971 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
1972 address,
1973 *value);
1974 } else {
1975 *value = 0x0;
1976 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1977 address);
1978 }
1979
1980 return retval;
1981 }
1982
1983 int target_write_u32(struct target *target, uint32_t address, uint32_t value)
1984 {
1985 int retval;
1986 uint8_t value_buf[4];
1987 if (!target_was_examined(target)) {
1988 LOG_ERROR("Target not examined yet");
1989 return ERROR_FAIL;
1990 }
1991
1992 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
1993 address,
1994 value);
1995
1996 target_buffer_set_u32(target, value_buf, value);
1997 retval = target_write_memory(target, address, 4, 1, value_buf);
1998 if (retval != ERROR_OK)
1999 LOG_DEBUG("failed: %i", retval);
2000
2001 return retval;
2002 }
2003
2004 int target_write_u16(struct target *target, uint32_t address, uint16_t value)
2005 {
2006 int retval;
2007 uint8_t value_buf[2];
2008 if (!target_was_examined(target)) {
2009 LOG_ERROR("Target not examined yet");
2010 return ERROR_FAIL;
2011 }
2012
2013 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8x",
2014 address,
2015 value);
2016
2017 target_buffer_set_u16(target, value_buf, value);
2018 retval = target_write_memory(target, address, 2, 1, value_buf);
2019 if (retval != ERROR_OK)
2020 LOG_DEBUG("failed: %i", retval);
2021
2022 return retval;
2023 }
2024
2025 int target_write_u8(struct target *target, uint32_t address, uint8_t value)
2026 {
2027 int retval;
2028 if (!target_was_examined(target)) {
2029 LOG_ERROR("Target not examined yet");
2030 return ERROR_FAIL;
2031 }
2032
2033 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
2034 address, value);
2035
2036 retval = target_write_memory(target, address, 1, 1, &value);
2037 if (retval != ERROR_OK)
2038 LOG_DEBUG("failed: %i", retval);
2039
2040 return retval;
2041 }
2042
2043 static int find_target(struct command_context *cmd_ctx, const char *name)
2044 {
2045 struct target *target = get_target(name);
2046 if (target == NULL) {
2047 LOG_ERROR("Target: %s is unknown, try one of:\n", name);
2048 return ERROR_FAIL;
2049 }
2050 if (!target->tap->enabled) {
2051 LOG_USER("Target: TAP %s is disabled, "
2052 "can't be the current target\n",
2053 target->tap->dotted_name);
2054 return ERROR_FAIL;
2055 }
2056
2057 cmd_ctx->current_target = target->target_number;
2058 return ERROR_OK;
2059 }
2060
2061
2062 COMMAND_HANDLER(handle_targets_command)
2063 {
2064 int retval = ERROR_OK;
2065 if (CMD_ARGC == 1) {
2066 retval = find_target(CMD_CTX, CMD_ARGV[0]);
2067 if (retval == ERROR_OK) {
2068 /* we're done! */
2069 return retval;
2070 }
2071 }
2072
2073 struct target *target = all_targets;
2074 command_print(CMD_CTX, " TargetName Type Endian TapName State ");
2075 command_print(CMD_CTX, "-- ------------------ ---------- ------ ------------------ ------------");
2076 while (target) {
2077 const char *state;
2078 char marker = ' ';
2079
2080 if (target->tap->enabled)
2081 state = target_state_name(target);
2082 else
2083 state = "tap-disabled";
2084
2085 if (CMD_CTX->current_target == target->target_number)
2086 marker = '*';
2087
2088 /* keep columns lined up to match the headers above */
2089 command_print(CMD_CTX,
2090 "%2d%c %-18s %-10s %-6s %-18s %s",
2091 target->target_number,
2092 marker,
2093 target_name(target),
2094 target_type_name(target),
2095 Jim_Nvp_value2name_simple(nvp_target_endian,
2096 target->endianness)->name,
2097 target->tap->dotted_name,
2098 state);
2099 target = target->next;
2100 }
2101
2102 return retval;
2103 }
2104
2105 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2106
2107 static int powerDropout;
2108 static int srstAsserted;
2109
2110 static int runPowerRestore;
2111 static int runPowerDropout;
2112 static int runSrstAsserted;
2113 static int runSrstDeasserted;
2114
2115 static int sense_handler(void)
2116 {
2117 static int prevSrstAsserted;
2118 static int prevPowerdropout;
2119
2120 int retval = jtag_power_dropout(&powerDropout);
2121 if (retval != ERROR_OK)
2122 return retval;
2123
2124 int powerRestored;
2125 powerRestored = prevPowerdropout && !powerDropout;
2126 if (powerRestored)
2127 runPowerRestore = 1;
2128
2129 long long current = timeval_ms();
2130 static long long lastPower;
2131 int waitMore = lastPower + 2000 > current;
2132 if (powerDropout && !waitMore) {
2133 runPowerDropout = 1;
2134 lastPower = current;
2135 }
2136
2137 retval = jtag_srst_asserted(&srstAsserted);
2138 if (retval != ERROR_OK)
2139 return retval;
2140
2141 int srstDeasserted;
2142 srstDeasserted = prevSrstAsserted && !srstAsserted;
2143
2144 static long long lastSrst;
2145 waitMore = lastSrst + 2000 > current;
2146 if (srstDeasserted && !waitMore) {
2147 runSrstDeasserted = 1;
2148 lastSrst = current;
2149 }
2150
2151 if (!prevSrstAsserted && srstAsserted)
2152 runSrstAsserted = 1;
2153
2154 prevSrstAsserted = srstAsserted;
2155 prevPowerdropout = powerDropout;
2156
2157 if (srstDeasserted || powerRestored) {
2158 /* Other than logging the event we can't do anything here.
2159 * Issuing a reset is a particularly bad idea as we might
2160 * be inside a reset already.
2161 */
2162 }
2163
2164 return ERROR_OK;
2165 }
2166
2167 static int backoff_times;
2168 static int backoff_count;
2169
2170 /* process target state changes */
2171 static int handle_target(void *priv)
2172 {
2173 Jim_Interp *interp = (Jim_Interp *)priv;
2174 int retval = ERROR_OK;
2175
2176 if (!is_jtag_poll_safe()) {
2177 /* polling is disabled currently */
2178 return ERROR_OK;
2179 }
2180
2181 /* we do not want to recurse here... */
2182 static int recursive;
2183 if (!recursive) {
2184 recursive = 1;
2185 sense_handler();
2186 /* danger! running these procedures can trigger srst assertions and power dropouts.
2187 * We need to avoid an infinite loop/recursion here and we do that by
2188 * clearing the flags after running these events.
2189 */
2190 int did_something = 0;
2191 if (runSrstAsserted) {
2192 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2193 Jim_Eval(interp, "srst_asserted");
2194 did_something = 1;
2195 }
2196 if (runSrstDeasserted) {
2197 Jim_Eval(interp, "srst_deasserted");
2198 did_something = 1;
2199 }
2200 if (runPowerDropout) {
2201 LOG_INFO("Power dropout detected, running power_dropout proc.");
2202 Jim_Eval(interp, "power_dropout");
2203 did_something = 1;
2204 }
2205 if (runPowerRestore) {
2206 Jim_Eval(interp, "power_restore");
2207 did_something = 1;
2208 }
2209
2210 if (did_something) {
2211 /* clear detect flags */
2212 sense_handler();
2213 }
2214
2215 /* clear action flags */
2216
2217 runSrstAsserted = 0;
2218 runSrstDeasserted = 0;
2219 runPowerRestore = 0;
2220 runPowerDropout = 0;
2221
2222 recursive = 0;
2223 }
2224
2225 if (backoff_times > backoff_count) {
2226 /* do not poll this time as we failed previously */
2227 backoff_count++;
2228 return ERROR_OK;
2229 }
2230 backoff_count = 0;
2231
2232 /* Poll targets for state changes unless that's globally disabled.
2233 * Skip targets that are currently disabled.
2234 */
2235 for (struct target *target = all_targets;
2236 is_jtag_poll_safe() && target;
2237 target = target->next) {
2238 if (!target->tap->enabled)
2239 continue;
2240
2241 /* only poll target if we've got power and srst isn't asserted */
2242 if (!powerDropout && !srstAsserted) {
2243 /* polling may fail silently until the target has been examined */
2244 retval = target_poll(target);
2245 if (retval != ERROR_OK) {
2246 /* 100ms polling interval. Increase interval between polling up to 5000ms */
2247 if (backoff_times * polling_interval < 5000) {
2248 backoff_times *= 2;
2249 backoff_times++;
2250 }
2251 LOG_USER("Polling target failed, GDB will be halted. Polling again in %dms",
2252 backoff_times * polling_interval);
2253
2254 /* Tell GDB to halt the debugger. This allows the user to
2255 * run monitor commands to handle the situation.
2256 */
2257 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
2258 return retval;
2259 }
2260 /* Since we succeeded, we reset backoff count */
2261 if (backoff_times > 0)
2262 LOG_USER("Polling succeeded again");
2263 backoff_times = 0;
2264 }
2265 }
2266
2267 return retval;
2268 }
2269
2270 COMMAND_HANDLER(handle_reg_command)
2271 {
2272 struct target *target;
2273 struct reg *reg = NULL;
2274 unsigned count = 0;
2275 char *value;
2276
2277 LOG_DEBUG("-");
2278
2279 target = get_current_target(CMD_CTX);
2280
2281 /* list all available registers for the current target */
2282 if (CMD_ARGC == 0) {
2283 struct reg_cache *cache = target->reg_cache;
2284
2285 count = 0;
2286 while (cache) {
2287 unsigned i;
2288
2289 command_print(CMD_CTX, "===== %s", cache->name);
2290
2291 for (i = 0, reg = cache->reg_list;
2292 i < cache->num_regs;
2293 i++, reg++, count++) {
2294 /* only print cached values if they are valid */
2295 if (reg->valid) {
2296 value = buf_to_str(reg->value,
2297 reg->size, 16);
2298 command_print(CMD_CTX,
2299 "(%i) %s (/%" PRIu32 "): 0x%s%s",
2300 count, reg->name,
2301 reg->size, value,
2302 reg->dirty
2303 ? " (dirty)"
2304 : "");
2305 free(value);
2306 } else {
2307 command_print(CMD_CTX, "(%i) %s (/%" PRIu32 ")",
2308 count, reg->name,
2309 reg->size) ;
2310 }
2311 }
2312 cache = cache->next;
2313 }
2314
2315 return ERROR_OK;
2316 }
2317
2318 /* access a single register by its ordinal number */
2319 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
2320 unsigned num;
2321 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
2322
2323 struct reg_cache *cache = target->reg_cache;
2324 count = 0;
2325 while (cache) {
2326 unsigned i;
2327 for (i = 0; i < cache->num_regs; i++) {
2328 if (count++ == num) {
2329 reg = &cache->reg_list[i];
2330 break;
2331 }
2332 }
2333 if (reg)
2334 break;
2335 cache = cache->next;
2336 }
2337
2338 if (!reg) {
2339 command_print(CMD_CTX, "%i is out of bounds, the current target "
2340 "has only %i registers (0 - %i)", num, count, count - 1);
2341 return ERROR_OK;
2342 }
2343 } else {
2344 /* access a single register by its name */
2345 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], 1);
2346
2347 if (!reg) {
2348 command_print(CMD_CTX, "register %s not found in current target", CMD_ARGV[0]);
2349 return ERROR_OK;
2350 }
2351 }
2352
2353 assert(reg != NULL); /* give clang a hint that we *know* reg is != NULL here */
2354
2355 /* display a register */
2356 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
2357 && (CMD_ARGV[1][0] <= '9')))) {
2358 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
2359 reg->valid = 0;
2360
2361 if (reg->valid == 0)
2362 reg->type->get(reg);
2363 value = buf_to_str(reg->value, reg->size, 16);
2364 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2365 free(value);
2366 return ERROR_OK;
2367 }
2368
2369 /* set register value */
2370 if (CMD_ARGC == 2) {
2371 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
2372 if (buf == NULL)
2373 return ERROR_FAIL;
2374 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
2375
2376 reg->type->set(reg, buf);
2377
2378 value = buf_to_str(reg->value, reg->size, 16);
2379 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2380 free(value);
2381
2382 free(buf);
2383
2384 return ERROR_OK;
2385 }
2386
2387 return ERROR_COMMAND_SYNTAX_ERROR;
2388 }
2389
2390 COMMAND_HANDLER(handle_poll_command)
2391 {
2392 int retval = ERROR_OK;
2393 struct target *target = get_current_target(CMD_CTX);
2394
2395 if (CMD_ARGC == 0) {
2396 command_print(CMD_CTX, "background polling: %s",
2397 jtag_poll_get_enabled() ? "on" : "off");
2398 command_print(CMD_CTX, "TAP: %s (%s)",
2399 target->tap->dotted_name,
2400 target->tap->enabled ? "enabled" : "disabled");
2401 if (!target->tap->enabled)
2402 return ERROR_OK;
2403 retval = target_poll(target);
2404 if (retval != ERROR_OK)
2405 return retval;
2406 retval = target_arch_state(target);
2407 if (retval != ERROR_OK)
2408 return retval;
2409 } else if (CMD_ARGC == 1) {
2410 bool enable;
2411 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
2412 jtag_poll_set_enabled(enable);
2413 } else
2414 return ERROR_COMMAND_SYNTAX_ERROR;
2415
2416 return retval;
2417 }
2418
2419 COMMAND_HANDLER(handle_wait_halt_command)
2420 {
2421 if (CMD_ARGC > 1)
2422 return ERROR_COMMAND_SYNTAX_ERROR;
2423
2424 unsigned ms = 5000;
2425 if (1 == CMD_ARGC) {
2426 int retval = parse_uint(CMD_ARGV[0], &ms);
2427 if (ERROR_OK != retval)
2428 return ERROR_COMMAND_SYNTAX_ERROR;
2429 /* convert seconds (given) to milliseconds (needed) */
2430 ms *= 1000;
2431 }
2432
2433 struct target *target = get_current_target(CMD_CTX);
2434 return target_wait_state(target, TARGET_HALTED, ms);
2435 }
2436
2437 /* wait for target state to change. The trick here is to have a low
2438 * latency for short waits and not to suck up all the CPU time
2439 * on longer waits.
2440 *
2441 * After 500ms, keep_alive() is invoked
2442 */
2443 int target_wait_state(struct target *target, enum target_state state, int ms)
2444 {
2445 int retval;
2446 long long then = 0, cur;
2447 int once = 1;
2448
2449 for (;;) {
2450 retval = target_poll(target);
2451 if (retval != ERROR_OK)
2452 return retval;
2453 if (target->state == state)
2454 break;
2455 cur = timeval_ms();
2456 if (once) {
2457 once = 0;
2458 then = timeval_ms();
2459 LOG_DEBUG("waiting for target %s...",
2460 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
2461 }
2462
2463 if (cur-then > 500)
2464 keep_alive();
2465
2466 if ((cur-then) > ms) {
2467 LOG_ERROR("timed out while waiting for target %s",
2468 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
2469 return ERROR_FAIL;
2470 }
2471 }
2472
2473 return ERROR_OK;
2474 }
2475
2476 COMMAND_HANDLER(handle_halt_command)
2477 {
2478 LOG_DEBUG("-");
2479
2480 struct target *target = get_current_target(CMD_CTX);
2481 int retval = target_halt(target);
2482 if (ERROR_OK != retval)
2483 return retval;
2484
2485 if (CMD_ARGC == 1) {
2486 unsigned wait_local;
2487 retval = parse_uint(CMD_ARGV[0], &wait_local);
2488 if (ERROR_OK != retval)
2489 return ERROR_COMMAND_SYNTAX_ERROR;
2490 if (!wait_local)
2491 return ERROR_OK;
2492 }
2493
2494 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
2495 }
2496
2497 COMMAND_HANDLER(handle_soft_reset_halt_command)
2498 {
2499 struct target *target = get_current_target(CMD_CTX);
2500
2501 LOG_USER("requesting target halt and executing a soft reset");
2502
2503 target->type->soft_reset_halt(target);
2504
2505 return ERROR_OK;
2506 }
2507
2508 COMMAND_HANDLER(handle_reset_command)
2509 {
2510 if (CMD_ARGC > 1)
2511 return ERROR_COMMAND_SYNTAX_ERROR;
2512
2513 enum target_reset_mode reset_mode = RESET_RUN;
2514 if (CMD_ARGC == 1) {
2515 const Jim_Nvp *n;
2516 n = Jim_Nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
2517 if ((n->name == NULL) || (n->value == RESET_UNKNOWN))
2518 return ERROR_COMMAND_SYNTAX_ERROR;
2519 reset_mode = n->value;
2520 }
2521
2522 /* reset *all* targets */
2523 return target_process_reset(CMD_CTX, reset_mode);
2524 }
2525
2526
2527 COMMAND_HANDLER(handle_resume_command)
2528 {
2529 int current = 1;
2530 if (CMD_ARGC > 1)
2531 return ERROR_COMMAND_SYNTAX_ERROR;
2532
2533 struct target *target = get_current_target(CMD_CTX);
2534
2535 /* with no CMD_ARGV, resume from current pc, addr = 0,
2536 * with one arguments, addr = CMD_ARGV[0],
2537 * handle breakpoints, not debugging */
2538 uint32_t addr = 0;
2539 if (CMD_ARGC == 1) {
2540 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2541 current = 0;
2542 }
2543
2544 return target_resume(target, current, addr, 1, 0);
2545 }
2546
2547 COMMAND_HANDLER(handle_step_command)
2548 {
2549 if (CMD_ARGC > 1)
2550 return ERROR_COMMAND_SYNTAX_ERROR;
2551
2552 LOG_DEBUG("-");
2553
2554 /* with no CMD_ARGV, step from current pc, addr = 0,
2555 * with one argument addr = CMD_ARGV[0],
2556 * handle breakpoints, debugging */
2557 uint32_t addr = 0;
2558 int current_pc = 1;
2559 if (CMD_ARGC == 1) {
2560 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2561 current_pc = 0;
2562 }
2563
2564 struct target *target = get_current_target(CMD_CTX);
2565
2566 return target->type->step(target, current_pc, addr, 1);
2567 }
2568
2569 static void handle_md_output(struct command_context *cmd_ctx,
2570 struct target *target, uint32_t address, unsigned size,
2571 unsigned count, const uint8_t *buffer)
2572 {
2573 const unsigned line_bytecnt = 32;
2574 unsigned line_modulo = line_bytecnt / size;
2575
2576 char output[line_bytecnt * 4 + 1];
2577 unsigned output_len = 0;
2578
2579 const char *value_fmt;
2580 switch (size) {
2581 case 4:
2582 value_fmt = "%8.8x ";
2583 break;
2584 case 2:
2585 value_fmt = "%4.4x ";
2586 break;
2587 case 1:
2588 value_fmt = "%2.2x ";
2589 break;
2590 default:
2591 /* "can't happen", caller checked */
2592 LOG_ERROR("invalid memory read size: %u", size);
2593 return;
2594 }
2595
2596 for (unsigned i = 0; i < count; i++) {
2597 if (i % line_modulo == 0) {
2598 output_len += snprintf(output + output_len,
2599 sizeof(output) - output_len,
2600 "0x%8.8x: ",
2601 (unsigned)(address + (i*size)));
2602 }
2603
2604 uint32_t value = 0;
2605 const uint8_t *value_ptr = buffer + i * size;
2606 switch (size) {
2607 case 4:
2608 value = target_buffer_get_u32(target, value_ptr);
2609 break;
2610 case 2:
2611 value = target_buffer_get_u16(target, value_ptr);
2612 break;
2613 case 1:
2614 value = *value_ptr;
2615 }
2616 output_len += snprintf(output + output_len,
2617 sizeof(output) - output_len,
2618 value_fmt, value);
2619
2620 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
2621 command_print(cmd_ctx, "%s", output);
2622 output_len = 0;
2623 }
2624 }
2625 }
2626
2627 COMMAND_HANDLER(handle_md_command)
2628 {
2629 if (CMD_ARGC < 1)
2630 return ERROR_COMMAND_SYNTAX_ERROR;
2631
2632 unsigned size = 0;
2633 switch (CMD_NAME[2]) {
2634 case 'w':
2635 size = 4;
2636 break;
2637 case 'h':
2638 size = 2;
2639 break;
2640 case 'b':
2641 size = 1;
2642 break;
2643 default:
2644 return ERROR_COMMAND_SYNTAX_ERROR;
2645 }
2646
2647 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
2648 int (*fn)(struct target *target,
2649 uint32_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
2650 if (physical) {
2651 CMD_ARGC--;
2652 CMD_ARGV++;
2653 fn = target_read_phys_memory;
2654 } else
2655 fn = target_read_memory;
2656 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
2657 return ERROR_COMMAND_SYNTAX_ERROR;
2658
2659 uint32_t address;
2660 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2661
2662 unsigned count = 1;
2663 if (CMD_ARGC == 2)
2664 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
2665
2666 uint8_t *buffer = calloc(count, size);
2667
2668 struct target *target = get_current_target(CMD_CTX);
2669 int retval = fn(target, address, size, count, buffer);
2670 if (ERROR_OK == retval)
2671 handle_md_output(CMD_CTX, target, address, size, count, buffer);
2672
2673 free(buffer);
2674
2675 return retval;
2676 }
2677
2678 typedef int (*target_write_fn)(struct target *target,
2679 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
2680
2681 static int target_write_memory_fast(struct target *target,
2682 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
2683 {
2684 return target_write_buffer(target, address, size * count, buffer);
2685 }
2686
2687 static int target_fill_mem(struct target *target,
2688 uint32_t address,
2689 target_write_fn fn,
2690 unsigned data_size,
2691 /* value */
2692 uint32_t b,
2693 /* count */
2694 unsigned c)
2695 {
2696 /* We have to write in reasonably large chunks to be able
2697 * to fill large memory areas with any sane speed */
2698 const unsigned chunk_size = 16384;
2699 uint8_t *target_buf = malloc(chunk_size * data_size);
2700 if (target_buf == NULL) {
2701 LOG_ERROR("Out of memory");
2702 return ERROR_FAIL;
2703 }
2704
2705 for (unsigned i = 0; i < chunk_size; i++) {
2706 switch (data_size) {
2707 case 4:
2708 target_buffer_set_u32(target, target_buf + i * data_size, b);
2709 break;
2710 case 2:
2711 target_buffer_set_u16(target, target_buf + i * data_size, b);
2712 break;
2713 case 1:
2714 target_buffer_set_u8(target, target_buf + i * data_size, b);
2715 break;
2716 default:
2717 exit(-1);
2718 }
2719 }
2720
2721 int retval = ERROR_OK;
2722
2723 for (unsigned x = 0; x < c; x += chunk_size) {
2724 unsigned current;
2725 current = c - x;
2726 if (current > chunk_size)
2727 current = chunk_size;
2728 retval = fn(target, address + x * data_size, data_size, current, target_buf);
2729 if (retval != ERROR_OK)
2730 break;
2731 /* avoid GDB timeouts */
2732 keep_alive();
2733 }
2734 free(target_buf);
2735
2736 return retval;
2737 }
2738
2739
2740 COMMAND_HANDLER(handle_mw_command)
2741 {
2742 if (CMD_ARGC < 2)
2743 return ERROR_COMMAND_SYNTAX_ERROR;
2744 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
2745 target_write_fn fn;
2746 if (physical) {
2747 CMD_ARGC--;
2748 CMD_ARGV++;
2749 fn = target_write_phys_memory;
2750 } else
2751 fn = target_write_memory_fast;
2752 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
2753 return ERROR_COMMAND_SYNTAX_ERROR;
2754
2755 uint32_t address;
2756 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2757
2758 uint32_t value;
2759 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
2760
2761 unsigned count = 1;
2762 if (CMD_ARGC == 3)
2763 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
2764
2765 struct target *target = get_current_target(CMD_CTX);
2766 unsigned wordsize;
2767 switch (CMD_NAME[2]) {
2768 case 'w':
2769 wordsize = 4;
2770 break;
2771 case 'h':
2772 wordsize = 2;
2773 break;
2774 case 'b':
2775 wordsize = 1;
2776 break;
2777 default:
2778 return ERROR_COMMAND_SYNTAX_ERROR;
2779 }
2780
2781 return target_fill_mem(target, address, fn, wordsize, value, count);
2782 }
2783
2784 static COMMAND_HELPER(parse_load_image_command_CMD_ARGV, struct image *image,
2785 uint32_t *min_address, uint32_t *max_address)
2786 {
2787 if (CMD_ARGC < 1 || CMD_ARGC > 5)
2788 return ERROR_COMMAND_SYNTAX_ERROR;
2789
2790 /* a base address isn't always necessary,
2791 * default to 0x0 (i.e. don't relocate) */
2792 if (CMD_ARGC >= 2) {
2793 uint32_t addr;
2794 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], addr);
2795 image->base_address = addr;
2796 image->base_address_set = 1;
2797 } else
2798 image->base_address_set = 0;
2799
2800 image->start_address_set = 0;
2801
2802 if (CMD_ARGC >= 4)
2803 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], *min_address);
2804 if (CMD_ARGC == 5) {
2805 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], *max_address);
2806 /* use size (given) to find max (required) */
2807 *max_address += *min_address;
2808 }
2809
2810 if (*min_address > *max_address)
2811 return ERROR_COMMAND_SYNTAX_ERROR;
2812
2813 return ERROR_OK;
2814 }
2815
2816 COMMAND_HANDLER(handle_load_image_command)
2817 {
2818 uint8_t *buffer;
2819 size_t buf_cnt;
2820 uint32_t image_size;
2821 uint32_t min_address = 0;
2822 uint32_t max_address = 0xffffffff;
2823 int i;
2824 struct image image;
2825
2826 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
2827 &image, &min_address, &max_address);
2828 if (ERROR_OK != retval)
2829 return retval;
2830
2831 struct target *target = get_current_target(CMD_CTX);
2832
2833 struct duration bench;
2834 duration_start(&bench);
2835
2836 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
2837 return ERROR_OK;
2838
2839 image_size = 0x0;
2840 retval = ERROR_OK;
2841 for (i = 0; i < image.num_sections; i++) {
2842 buffer = malloc(image.sections[i].size);
2843 if (buffer == NULL) {
2844 command_print(CMD_CTX,
2845 "error allocating buffer for section (%d bytes)",
2846 (int)(image.sections[i].size));
2847 break;
2848 }
2849
2850 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
2851 if (retval != ERROR_OK) {
2852 free(buffer);
2853 break;
2854 }
2855
2856 uint32_t offset = 0;
2857 uint32_t length = buf_cnt;
2858
2859 /* DANGER!!! beware of unsigned comparision here!!! */
2860
2861 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
2862 (image.sections[i].base_address < max_address)) {
2863
2864 if (image.sections[i].base_address < min_address) {
2865 /* clip addresses below */
2866 offset += min_address-image.sections[i].base_address;
2867 length -= offset;
2868 }
2869
2870 if (image.sections[i].base_address + buf_cnt > max_address)
2871 length -= (image.sections[i].base_address + buf_cnt)-max_address;
2872
2873 retval = target_write_buffer(target,
2874 image.sections[i].base_address + offset, length, buffer + offset);
2875 if (retval != ERROR_OK) {
2876 free(buffer);
2877 break;
2878 }
2879 image_size += length;
2880 command_print(CMD_CTX, "%u bytes written at address 0x%8.8" PRIx32 "",
2881 (unsigned int)length,
2882 image.sections[i].base_address + offset);
2883 }
2884
2885 free(buffer);
2886 }
2887
2888 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
2889 command_print(CMD_CTX, "downloaded %" PRIu32 " bytes "
2890 "in %fs (%0.3f KiB/s)", image_size,
2891 duration_elapsed(&bench), duration_kbps(&bench, image_size));
2892 }
2893
2894 image_close(&image);
2895
2896 return retval;
2897
2898 }
2899
2900 COMMAND_HANDLER(handle_dump_image_command)
2901 {
2902 struct fileio fileio;
2903 uint8_t *buffer;
2904 int retval, retvaltemp;
2905 uint32_t address, size;
2906 struct duration bench;
2907 struct target *target = get_current_target(CMD_CTX);
2908
2909 if (CMD_ARGC != 3)
2910 return ERROR_COMMAND_SYNTAX_ERROR;
2911
2912 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], address);
2913 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], size);
2914
2915 uint32_t buf_size = (size > 4096) ? 4096 : size;
2916 buffer = malloc(buf_size);
2917 if (!buffer)
2918 return ERROR_FAIL;
2919
2920 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
2921 if (retval != ERROR_OK) {
2922 free(buffer);
2923 return retval;
2924 }
2925
2926 duration_start(&bench);
2927
2928 while (size > 0) {
2929 size_t size_written;
2930 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
2931 retval = target_read_buffer(target, address, this_run_size, buffer);
2932 if (retval != ERROR_OK)
2933 break;
2934
2935 retval = fileio_write(&fileio, this_run_size, buffer, &size_written);
2936 if (retval != ERROR_OK)
2937 break;
2938
2939 size -= this_run_size;
2940 address += this_run_size;
2941 }
2942
2943 free(buffer);
2944
2945 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
2946 int filesize;
2947 retval = fileio_size(&fileio, &filesize);
2948 if (retval != ERROR_OK)
2949 return retval;
2950 command_print(CMD_CTX,
2951 "dumped %ld bytes in %fs (%0.3f KiB/s)", (long)filesize,
2952 duration_elapsed(&bench), duration_kbps(&bench, filesize));
2953 }
2954
2955 retvaltemp = fileio_close(&fileio);
2956 if (retvaltemp != ERROR_OK)
2957 return retvaltemp;
2958
2959 return retval;
2960 }
2961
2962 static COMMAND_HELPER(handle_verify_image_command_internal, int verify)
2963 {
2964 uint8_t *buffer;
2965 size_t buf_cnt;
2966 uint32_t image_size;
2967 int i;
2968 int retval;
2969 uint32_t checksum = 0;
2970 uint32_t mem_checksum = 0;
2971
2972 struct image image;
2973
2974 struct target *target = get_current_target(CMD_CTX);
2975
2976 if (CMD_ARGC < 1)
2977 return ERROR_COMMAND_SYNTAX_ERROR;
2978
2979 if (!target) {
2980 LOG_ERROR("no target selected");
2981 return ERROR_FAIL;
2982 }
2983
2984 struct duration bench;
2985 duration_start(&bench);
2986
2987 if (CMD_ARGC >= 2) {
2988 uint32_t addr;
2989 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], addr);
2990 image.base_address = addr;
2991 image.base_address_set = 1;
2992 } else {
2993 image.base_address_set = 0;
2994 image.base_address = 0x0;
2995 }
2996
2997 image.start_address_set = 0;
2998
2999 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3000 if (retval != ERROR_OK)
3001 return retval;
3002
3003 image_size = 0x0;
3004 int diffs = 0;
3005 retval = ERROR_OK;
3006 for (i = 0; i < image.num_sections; i++) {
3007 buffer = malloc(image.sections[i].size);
3008 if (buffer == NULL) {
3009 command_print(CMD_CTX,
3010 "error allocating buffer for section (%d bytes)",
3011 (int)(image.sections[i].size));
3012 break;
3013 }
3014 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3015 if (retval != ERROR_OK) {
3016 free(buffer);
3017 break;
3018 }
3019
3020 if (verify) {
3021 /* calculate checksum of image */
3022 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3023 if (retval != ERROR_OK) {
3024 free(buffer);
3025 break;
3026 }
3027
3028 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3029 if (retval != ERROR_OK) {
3030 free(buffer);
3031 break;
3032 }
3033
3034 if (checksum != mem_checksum) {
3035 /* failed crc checksum, fall back to a binary compare */
3036 uint8_t *data;
3037
3038 if (diffs == 0)
3039 LOG_ERROR("checksum mismatch - attempting binary compare");
3040
3041 data = (uint8_t *)malloc(buf_cnt);
3042
3043 /* Can we use 32bit word accesses? */
3044 int size = 1;
3045 int count = buf_cnt;
3046 if ((count % 4) == 0) {
3047 size *= 4;
3048 count /= 4;
3049 }
3050 retval = target_read_memory(target, image.sections[i].base_address, size, count, data);
3051 if (retval == ERROR_OK) {
3052 uint32_t t;
3053 for (t = 0; t < buf_cnt; t++) {
3054 if (data[t] != buffer[t]) {
3055 command_print(CMD_CTX,
3056 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3057 diffs,
3058 (unsigned)(t + image.sections[i].base_address),
3059 data[t],
3060 buffer[t]);
3061 if (diffs++ >= 127) {
3062 command_print(CMD_CTX, "More than 128 errors, the rest are not printed.");
3063 free(data);
3064 free(buffer);
3065 goto done;
3066 }
3067 }
3068 keep_alive();
3069 }
3070 }
3071 free(data);
3072 }
3073 } else {
3074 command_print(CMD_CTX, "address 0x%08" PRIx32 " length 0x%08zx",
3075 image.sections[i].base_address,
3076 buf_cnt);
3077 }
3078
3079 free(buffer);
3080 image_size += buf_cnt;
3081 }
3082 if (diffs > 0)
3083 command_print(CMD_CTX, "No more differences found.");
3084 done:
3085 if (diffs > 0)
3086 retval = ERROR_FAIL;
3087 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3088 command_print(CMD_CTX, "verified %" PRIu32 " bytes "
3089 "in %fs (%0.3f KiB/s)", image_size,
3090 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3091 }
3092
3093 image_close(&image);
3094
3095 return retval;
3096 }
3097
3098 COMMAND_HANDLER(handle_verify_image_command)
3099 {
3100 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, 1);
3101 }
3102
3103 COMMAND_HANDLER(handle_test_image_command)
3104 {
3105 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, 0);
3106 }
3107
3108 static int handle_bp_command_list(struct command_context *cmd_ctx)
3109 {
3110 struct target *target = get_current_target(cmd_ctx);
3111 struct breakpoint *breakpoint = target->breakpoints;
3112 while (breakpoint) {
3113 if (breakpoint->type == BKPT_SOFT) {
3114 char *buf = buf_to_str(breakpoint->orig_instr,
3115 breakpoint->length, 16);
3116 command_print(cmd_ctx, "IVA breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i, 0x%s",
3117 breakpoint->address,
3118 breakpoint->length,
3119 breakpoint->set, buf);
3120 free(buf);
3121 } else {
3122 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3123 command_print(cmd_ctx, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3124 breakpoint->asid,
3125 breakpoint->length, breakpoint->set);
3126 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3127 command_print(cmd_ctx, "Hybrid breakpoint(IVA): 0x%8.8" PRIx32 ", 0x%x, %i",
3128 breakpoint->address,
3129 breakpoint->length, breakpoint->set);
3130 command_print(cmd_ctx, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3131 breakpoint->asid);
3132 } else
3133 command_print(cmd_ctx, "Breakpoint(IVA): 0x%8.8" PRIx32 ", 0x%x, %i",
3134 breakpoint->address,
3135 breakpoint->length, breakpoint->set);
3136 }
3137
3138 breakpoint = breakpoint->next;
3139 }
3140 return ERROR_OK;
3141 }
3142
3143 static int handle_bp_command_set(struct command_context *cmd_ctx,
3144 uint32_t addr, uint32_t asid, uint32_t length, int hw)
3145 {
3146 struct target *target = get_current_target(cmd_ctx);
3147
3148 if (asid == 0) {
3149 int retval = breakpoint_add(target, addr, length, hw);
3150 if (ERROR_OK == retval)
3151 command_print(cmd_ctx, "breakpoint set at 0x%8.8" PRIx32 "", addr);
3152 else {
3153 LOG_ERROR("Failure setting breakpoint, the same address(IVA) is already used");
3154 return retval;
3155 }
3156 } else if (addr == 0) {
3157 int retval = context_breakpoint_add(target, asid, length, hw);
3158 if (ERROR_OK == retval)
3159 command_print(cmd_ctx, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
3160 else {
3161 LOG_ERROR("Failure setting breakpoint, the same address(CONTEXTID) is already used");
3162 return retval;
3163 }
3164 } else {
3165 int retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
3166 if (ERROR_OK == retval)
3167 command_print(cmd_ctx, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
3168 else {
3169 LOG_ERROR("Failure setting breakpoint, the same address is already used");
3170 return retval;
3171 }
3172 }
3173 return ERROR_OK;
3174 }
3175