Change return value on error.
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 √ėyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * This program is free software; you can redistribute it and/or modify *
24 * it under the terms of the GNU General Public License as published by *
25 * the Free Software Foundation; either version 2 of the License, or *
26 * (at your option) any later version. *
27 * *
28 * This program is distributed in the hope that it will be useful, *
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
31 * GNU General Public License for more details. *
32 * *
33 * You should have received a copy of the GNU General Public License *
34 * along with this program; if not, write to the *
35 * Free Software Foundation, Inc., *
36 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
37 ***************************************************************************/
38 #ifdef HAVE_CONFIG_H
39 #include "config.h"
40 #endif
41
42 #include <helper/time_support.h>
43 #include <jtag/jtag.h>
44 #include <flash/nor/core.h>
45
46 #include "target.h"
47 #include "target_type.h"
48 #include "target_request.h"
49 #include "breakpoints.h"
50 #include "register.h"
51 #include "trace.h"
52 #include "image.h"
53 #include "rtos/rtos.h"
54
55
56 static int target_read_buffer_default(struct target *target, uint32_t address,
57 uint32_t size, uint8_t *buffer);
58 static int target_write_buffer_default(struct target *target, uint32_t address,
59 uint32_t size, const uint8_t *buffer);
60 static int target_array2mem(Jim_Interp *interp, struct target *target,
61 int argc, Jim_Obj *const *argv);
62 static int target_mem2array(Jim_Interp *interp, struct target *target,
63 int argc, Jim_Obj *const *argv);
64 static int target_register_user_commands(struct command_context *cmd_ctx);
65
66 /* targets */
67 extern struct target_type arm7tdmi_target;
68 extern struct target_type arm720t_target;
69 extern struct target_type arm9tdmi_target;
70 extern struct target_type arm920t_target;
71 extern struct target_type arm966e_target;
72 extern struct target_type arm946e_target;
73 extern struct target_type arm926ejs_target;
74 extern struct target_type fa526_target;
75 extern struct target_type feroceon_target;
76 extern struct target_type dragonite_target;
77 extern struct target_type xscale_target;
78 extern struct target_type cortexm3_target;
79 extern struct target_type cortexa8_target;
80 extern struct target_type arm11_target;
81 extern struct target_type mips_m4k_target;
82 extern struct target_type avr_target;
83 extern struct target_type dsp563xx_target;
84 extern struct target_type dsp5680xx_target;
85 extern struct target_type testee_target;
86 extern struct target_type avr32_ap7k_target;
87 extern struct target_type stm32_stlink_target;
88
89 static struct target_type *target_types[] =
90 {
91 &arm7tdmi_target,
92 &arm9tdmi_target,
93 &arm920t_target,
94 &arm720t_target,
95 &arm966e_target,
96 &arm946e_target,
97 &arm926ejs_target,
98 &fa526_target,
99 &feroceon_target,
100 &dragonite_target,
101 &xscale_target,
102 &cortexm3_target,
103 &cortexa8_target,
104 &arm11_target,
105 &mips_m4k_target,
106 &avr_target,
107 &dsp563xx_target,
108 &dsp5680xx_target,
109 &testee_target,
110 &avr32_ap7k_target,
111 &stm32_stlink_target,
112 NULL,
113 };
114
115 struct target *all_targets = NULL;
116 static struct target_event_callback *target_event_callbacks = NULL;
117 static struct target_timer_callback *target_timer_callbacks = NULL;
118 static const int polling_interval = 100;
119
120 static const Jim_Nvp nvp_assert[] = {
121 { .name = "assert", NVP_ASSERT },
122 { .name = "deassert", NVP_DEASSERT },
123 { .name = "T", NVP_ASSERT },
124 { .name = "F", NVP_DEASSERT },
125 { .name = "t", NVP_ASSERT },
126 { .name = "f", NVP_DEASSERT },
127 { .name = NULL, .value = -1 }
128 };
129
130 static const Jim_Nvp nvp_error_target[] = {
131 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
132 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
133 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
134 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
135 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
136 { .value = ERROR_TARGET_UNALIGNED_ACCESS , .name = "err-unaligned-access" },
137 { .value = ERROR_TARGET_DATA_ABORT , .name = "err-data-abort" },
138 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE , .name = "err-resource-not-available" },
139 { .value = ERROR_TARGET_TRANSLATION_FAULT , .name = "err-translation-fault" },
140 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
141 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
142 { .value = -1, .name = NULL }
143 };
144
145 static const char *target_strerror_safe(int err)
146 {
147 const Jim_Nvp *n;
148
149 n = Jim_Nvp_value2name_simple(nvp_error_target, err);
150 if (n->name == NULL) {
151 return "unknown";
152 } else {
153 return n->name;
154 }
155 }
156
157 static const Jim_Nvp nvp_target_event[] = {
158 { .value = TARGET_EVENT_OLD_gdb_program_config , .name = "old-gdb_program_config" },
159 { .value = TARGET_EVENT_OLD_pre_resume , .name = "old-pre_resume" },
160
161 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
162 { .value = TARGET_EVENT_HALTED, .name = "halted" },
163 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
164 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
165 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
166
167 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
168 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
169
170 /* historical name */
171
172 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
173
174 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
175 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
176 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
177 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
178 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
179 { .value = TARGET_EVENT_RESET_HALT_PRE, .name = "reset-halt-pre" },
180 { .value = TARGET_EVENT_RESET_HALT_POST, .name = "reset-halt-post" },
181 { .value = TARGET_EVENT_RESET_WAIT_PRE, .name = "reset-wait-pre" },
182 { .value = TARGET_EVENT_RESET_WAIT_POST, .name = "reset-wait-post" },
183 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
184 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
185
186 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
187 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
188
189 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
190 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
191
192 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
193 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
194
195 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
196 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END , .name = "gdb-flash-write-end" },
197
198 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
199 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END , .name = "gdb-flash-erase-end" },
200
201 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
202 { .value = TARGET_EVENT_RESUMED , .name = "resume-ok" },
203 { .value = TARGET_EVENT_RESUME_END , .name = "resume-end" },
204
205 { .name = NULL, .value = -1 }
206 };
207
208 static const Jim_Nvp nvp_target_state[] = {
209 { .name = "unknown", .value = TARGET_UNKNOWN },
210 { .name = "running", .value = TARGET_RUNNING },
211 { .name = "halted", .value = TARGET_HALTED },
212 { .name = "reset", .value = TARGET_RESET },
213 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
214 { .name = NULL, .value = -1 },
215 };
216
217 static const Jim_Nvp nvp_target_debug_reason [] = {
218 { .name = "debug-request" , .value = DBG_REASON_DBGRQ },
219 { .name = "breakpoint" , .value = DBG_REASON_BREAKPOINT },
220 { .name = "watchpoint" , .value = DBG_REASON_WATCHPOINT },
221 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
222 { .name = "single-step" , .value = DBG_REASON_SINGLESTEP },
223 { .name = "target-not-halted" , .value = DBG_REASON_NOTHALTED },
224 { .name = "undefined" , .value = DBG_REASON_UNDEFINED },
225 { .name = NULL, .value = -1 },
226 };
227
228 static const Jim_Nvp nvp_target_endian[] = {
229 { .name = "big", .value = TARGET_BIG_ENDIAN },
230 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
231 { .name = "be", .value = TARGET_BIG_ENDIAN },
232 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
233 { .name = NULL, .value = -1 },
234 };
235
236 static const Jim_Nvp nvp_reset_modes[] = {
237 { .name = "unknown", .value = RESET_UNKNOWN },
238 { .name = "run" , .value = RESET_RUN },
239 { .name = "halt" , .value = RESET_HALT },
240 { .name = "init" , .value = RESET_INIT },
241 { .name = NULL , .value = -1 },
242 };
243
244 const char *debug_reason_name(struct target *t)
245 {
246 const char *cp;
247
248 cp = Jim_Nvp_value2name_simple(nvp_target_debug_reason,
249 t->debug_reason)->name;
250 if (!cp) {
251 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
252 cp = "(*BUG*unknown*BUG*)";
253 }
254 return cp;
255 }
256
257 const char *
258 target_state_name( struct target *t )
259 {
260 const char *cp;
261 cp = Jim_Nvp_value2name_simple(nvp_target_state, t->state)->name;
262 if( !cp ){
263 LOG_ERROR("Invalid target state: %d", (int)(t->state));
264 cp = "(*BUG*unknown*BUG*)";
265 }
266 return cp;
267 }
268
269 /* determine the number of the new target */
270 static int new_target_number(void)
271 {
272 struct target *t;
273 int x;
274
275 /* number is 0 based */
276 x = -1;
277 t = all_targets;
278 while (t) {
279 if (x < t->target_number) {
280 x = t->target_number;
281 }
282 t = t->next;
283 }
284 return x + 1;
285 }
286
287 /* read a uint32_t from a buffer in target memory endianness */
288 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
289 {
290 if (target->endianness == TARGET_LITTLE_ENDIAN)
291 return le_to_h_u32(buffer);
292 else
293 return be_to_h_u32(buffer);
294 }
295
296 /* read a uint24_t from a buffer in target memory endianness */
297 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
298 {
299 if (target->endianness == TARGET_LITTLE_ENDIAN)
300 return le_to_h_u24(buffer);
301 else
302 return be_to_h_u24(buffer);
303 }
304
305 /* read a uint16_t from a buffer in target memory endianness */
306 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
307 {
308 if (target->endianness == TARGET_LITTLE_ENDIAN)
309 return le_to_h_u16(buffer);
310 else
311 return be_to_h_u16(buffer);
312 }
313
314 /* read a uint8_t from a buffer in target memory endianness */
315 static uint8_t target_buffer_get_u8(struct target *target, const uint8_t *buffer)
316 {
317 return *buffer & 0x0ff;
318 }
319
320 /* write a uint32_t to a buffer in target memory endianness */
321 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
322 {
323 if (target->endianness == TARGET_LITTLE_ENDIAN)
324 h_u32_to_le(buffer, value);
325 else
326 h_u32_to_be(buffer, value);
327 }
328
329 /* write a uint24_t to a buffer in target memory endianness */
330 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
331 {
332 if (target->endianness == TARGET_LITTLE_ENDIAN)
333 h_u24_to_le(buffer, value);
334 else
335 h_u24_to_be(buffer, value);
336 }
337
338 /* write a uint16_t to a buffer in target memory endianness */
339 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
340 {
341 if (target->endianness == TARGET_LITTLE_ENDIAN)
342 h_u16_to_le(buffer, value);
343 else
344 h_u16_to_be(buffer, value);
345 }
346
347 /* write a uint8_t to a buffer in target memory endianness */
348 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
349 {
350 *buffer = value;
351 }
352
353 /* write a uint32_t array to a buffer in target memory endianness */
354 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
355 {
356 uint32_t i;
357 for(i = 0; i < count; i ++)
358 dstbuf[i] = target_buffer_get_u32(target,&buffer[i*4]);
359 }
360
361 /* write a uint16_t array to a buffer in target memory endianness */
362 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
363 {
364 uint32_t i;
365 for(i = 0; i < count; i ++)
366 dstbuf[i] = target_buffer_get_u16(target,&buffer[i*2]);
367 }
368
369 /* write a uint32_t array to a buffer in target memory endianness */
370 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, uint32_t *srcbuf)
371 {
372 uint32_t i;
373 for(i = 0; i < count; i ++)
374 target_buffer_set_u32(target,&buffer[i*4],srcbuf[i]);
375 }
376
377 /* write a uint16_t array to a buffer in target memory endianness */
378 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, uint16_t *srcbuf)
379 {
380 uint32_t i;
381 for(i = 0; i < count; i ++)
382 target_buffer_set_u16(target,&buffer[i*2],srcbuf[i]);
383 }
384
385 /* return a pointer to a configured target; id is name or number */
386 struct target *get_target(const char *id)
387 {
388 struct target *target;
389
390 /* try as tcltarget name */
391 for (target = all_targets; target; target = target->next) {
392 if (target->cmd_name == NULL)
393 continue;
394 if (strcmp(id, target->cmd_name) == 0)
395 return target;
396 }
397
398 /* It's OK to remove this fallback sometime after August 2010 or so */
399
400 /* no match, try as number */
401 unsigned num;
402 if (parse_uint(id, &num) != ERROR_OK)
403 return NULL;
404
405 for (target = all_targets; target; target = target->next) {
406 if (target->target_number == (int)num) {
407 LOG_WARNING("use '%s' as target identifier, not '%u'",
408 target->cmd_name, num);
409 return target;
410 }
411 }
412
413 return NULL;
414 }
415
416 /* returns a pointer to the n-th configured target */
417 static struct target *get_target_by_num(int num)
418 {
419 struct target *target = all_targets;
420
421 while (target) {
422 if (target->target_number == num) {
423 return target;
424 }
425 target = target->next;
426 }
427
428 return NULL;
429 }
430
431 struct target* get_current_target(struct command_context *cmd_ctx)
432 {
433 struct target *target = get_target_by_num(cmd_ctx->current_target);
434
435 if (target == NULL)
436 {
437 LOG_ERROR("BUG: current_target out of bounds");
438 exit(-1);
439 }
440
441 return target;
442 }
443
444 int target_poll(struct target *target)
445 {
446 int retval;
447
448 /* We can't poll until after examine */
449 if (!target_was_examined(target))
450 {
451 /* Fail silently lest we pollute the log */
452 return ERROR_FAIL;
453 }
454
455 retval = target->type->poll(target);
456 if (retval != ERROR_OK)
457 return retval;
458
459 if (target->halt_issued)
460 {
461 if (target->state == TARGET_HALTED)
462 {
463 target->halt_issued = false;
464 } else
465 {
466 long long t = timeval_ms() - target->halt_issued_time;
467 if (t>1000)
468 {
469 target->halt_issued = false;
470 LOG_INFO("Halt timed out, wake up GDB.");
471 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
472 }
473 }
474 }
475
476 return ERROR_OK;
477 }
478
479 int target_halt(struct target *target)
480 {
481 int retval;
482 /* We can't poll until after examine */
483 if (!target_was_examined(target))
484 {
485 LOG_ERROR("Target not examined yet");
486 return ERROR_FAIL;
487 }
488
489 retval = target->type->halt(target);
490 if (retval != ERROR_OK)
491 return retval;
492
493 target->halt_issued = true;
494 target->halt_issued_time = timeval_ms();
495
496 return ERROR_OK;
497 }
498
499 /**
500 * Make the target (re)start executing using its saved execution
501 * context (possibly with some modifications).
502 *
503 * @param target Which target should start executing.
504 * @param current True to use the target's saved program counter instead
505 * of the address parameter
506 * @param address Optionally used as the program counter.
507 * @param handle_breakpoints True iff breakpoints at the resumption PC
508 * should be skipped. (For example, maybe execution was stopped by
509 * such a breakpoint, in which case it would be counterprodutive to
510 * let it re-trigger.
511 * @param debug_execution False if all working areas allocated by OpenOCD
512 * should be released and/or restored to their original contents.
513 * (This would for example be true to run some downloaded "helper"
514 * algorithm code, which resides in one such working buffer and uses
515 * another for data storage.)
516 *
517 * @todo Resolve the ambiguity about what the "debug_execution" flag
518 * signifies. For example, Target implementations don't agree on how
519 * it relates to invalidation of the register cache, or to whether
520 * breakpoints and watchpoints should be enabled. (It would seem wrong
521 * to enable breakpoints when running downloaded "helper" algorithms
522 * (debug_execution true), since the breakpoints would be set to match
523 * target firmware being debugged, not the helper algorithm.... and
524 * enabling them could cause such helpers to malfunction (for example,
525 * by overwriting data with a breakpoint instruction. On the other
526 * hand the infrastructure for running such helpers might use this
527 * procedure but rely on hardware breakpoint to detect termination.)
528 */
529 int target_resume(struct target *target, int current, uint32_t address, int handle_breakpoints, int debug_execution)
530 {
531 int retval;
532
533 /* We can't poll until after examine */
534 if (!target_was_examined(target))
535 {
536 LOG_ERROR("Target not examined yet");
537 return ERROR_FAIL;
538 }
539
540 /* note that resume *must* be asynchronous. The CPU can halt before
541 * we poll. The CPU can even halt at the current PC as a result of
542 * a software breakpoint being inserted by (a bug?) the application.
543 */
544 if ((retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution)) != ERROR_OK)
545 return retval;
546
547 return retval;
548 }
549
550 static int target_process_reset(struct command_context *cmd_ctx, enum target_reset_mode reset_mode)
551 {
552 char buf[100];
553 int retval;
554 Jim_Nvp *n;
555 n = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode);
556 if (n->name == NULL) {
557 LOG_ERROR("invalid reset mode");
558 return ERROR_FAIL;
559 }
560
561 /* disable polling during reset to make reset event scripts
562 * more predictable, i.e. dr/irscan & pathmove in events will
563 * not have JTAG operations injected into the middle of a sequence.
564 */
565 bool save_poll = jtag_poll_get_enabled();
566
567 jtag_poll_set_enabled(false);
568
569 sprintf(buf, "ocd_process_reset %s", n->name);
570 retval = Jim_Eval(cmd_ctx->interp, buf);
571
572 jtag_poll_set_enabled(save_poll);
573
574 if (retval != JIM_OK) {
575 Jim_MakeErrorMessage(cmd_ctx->interp);
576 command_print(NULL,"%s\n", Jim_GetString(Jim_GetResult(cmd_ctx->interp), NULL));
577 return ERROR_FAIL;
578 }
579
580 /* We want any events to be processed before the prompt */
581 retval = target_call_timer_callbacks_now();
582
583 struct target *target;
584 for (target = all_targets; target; target = target->next) {
585 target->type->check_reset(target);
586 }
587
588 return retval;
589 }
590
591 static int identity_virt2phys(struct target *target,
592 uint32_t virtual, uint32_t *physical)
593 {
594 *physical = virtual;
595 return ERROR_OK;
596 }
597
598 static int no_mmu(struct target *target, int *enabled)
599 {
600 *enabled = 0;
601 return ERROR_OK;
602 }
603
604 static int default_examine(struct target *target)
605 {
606 target_set_examined(target);
607 return ERROR_OK;
608 }
609
610 /* no check by default */
611 static int default_check_reset(struct target *target)
612 {
613 return ERROR_OK;
614 }
615
616 int target_examine_one(struct target *target)
617 {
618 return target->type->examine(target);
619 }
620
621 static int jtag_enable_callback(enum jtag_event event, void *priv)
622 {
623 struct target *target = priv;
624
625 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
626 return ERROR_OK;
627
628 jtag_unregister_event_callback(jtag_enable_callback, target);
629 return target_examine_one(target);
630 }
631
632
633 /* Targets that correctly implement init + examine, i.e.
634 * no communication with target during init:
635 *
636 * XScale
637 */
638 int target_examine(void)
639 {
640 int retval = ERROR_OK;
641 struct target *target;
642
643 for (target = all_targets; target; target = target->next)
644 {
645 /* defer examination, but don't skip it */
646 if (!target->tap->enabled) {
647 jtag_register_event_callback(jtag_enable_callback,
648 target);
649 continue;
650 }
651 if ((retval = target_examine_one(target)) != ERROR_OK)
652 return retval;
653 }
654 return retval;
655 }
656 const char *target_type_name(struct target *target)
657 {
658 return target->type->name;
659 }
660
661 static int target_write_memory_imp(struct target *target, uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
662 {
663 if (!target_was_examined(target))
664 {
665 LOG_ERROR("Target not examined yet");
666 return ERROR_FAIL;
667 }
668 return target->type->write_memory_imp(target, address, size, count, buffer);
669 }
670
671 static int target_read_memory_imp(struct target *target, uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
672 {
673 if (!target_was_examined(target))
674 {
675 LOG_ERROR("Target not examined yet");
676 return ERROR_FAIL;
677 }
678 return target->type->read_memory_imp(target, address, size, count, buffer);
679 }
680
681 static int target_soft_reset_halt_imp(struct target *target)
682 {
683 if (!target_was_examined(target))
684 {
685 LOG_ERROR("Target not examined yet");
686 return ERROR_FAIL;
687 }
688 if (!target->type->soft_reset_halt_imp) {
689 LOG_ERROR("Target %s does not support soft_reset_halt",
690 target_name(target));
691 return ERROR_FAIL;
692 }
693 return target->type->soft_reset_halt_imp(target);
694 }
695
696 /**
697 * Downloads a target-specific native code algorithm to the target,
698 * and executes it. * Note that some targets may need to set up, enable,
699 * and tear down a breakpoint (hard or * soft) to detect algorithm
700 * termination, while others may support lower overhead schemes where
701 * soft breakpoints embedded in the algorithm automatically terminate the
702 * algorithm.
703 *
704 * @param target used to run the algorithm
705 * @param arch_info target-specific description of the algorithm.
706 */
707 int target_run_algorithm(struct target *target,
708 int num_mem_params, struct mem_param *mem_params,
709 int num_reg_params, struct reg_param *reg_param,
710 uint32_t entry_point, uint32_t exit_point,
711 int timeout_ms, void *arch_info)
712 {
713 int retval = ERROR_FAIL;
714
715 if (!target_was_examined(target))
716 {
717 LOG_ERROR("Target not examined yet");
718 goto done;
719 }
720 if (!target->type->run_algorithm) {
721 LOG_ERROR("Target type '%s' does not support %s",
722 target_type_name(target), __func__);
723 goto done;
724 }
725
726 target->running_alg = true;
727 retval = target->type->run_algorithm(target,
728 num_mem_params, mem_params,
729 num_reg_params, reg_param,
730 entry_point, exit_point, timeout_ms, arch_info);
731 target->running_alg = false;
732
733 done:
734 return retval;
735 }
736
737 /**
738 * Downloads a target-specific native code algorithm to the target,
739 * executes and leaves it running.
740 *
741 * @param target used to run the algorithm
742 * @param arch_info target-specific description of the algorithm.
743 */
744 int target_start_algorithm(struct target *target,
745 int num_mem_params, struct mem_param *mem_params,
746 int num_reg_params, struct reg_param *reg_params,
747 uint32_t entry_point, uint32_t exit_point,
748 void *arch_info)
749 {
750 int retval = ERROR_FAIL;
751
752 if (!target_was_examined(target))
753 {
754 LOG_ERROR("Target not examined yet");
755 goto done;
756 }
757 if (!target->type->start_algorithm) {
758 LOG_ERROR("Target type '%s' does not support %s",
759 target_type_name(target), __func__);
760 goto done;
761 }
762 if (target->running_alg) {
763 LOG_ERROR("Target is already running an algorithm");
764 goto done;
765 }
766
767 target->running_alg = true;
768 retval = target->type->start_algorithm(target,
769 num_mem_params, mem_params,
770 num_reg_params, reg_params,
771 entry_point, exit_point, arch_info);
772
773 done:
774 return retval;
775 }
776
777 /**
778 * Waits for an algorithm started with target_start_algorithm() to complete.
779 *
780 * @param target used to run the algorithm
781 * @param arch_info target-specific description of the algorithm.
782 */
783 int target_wait_algorithm(struct target *target,
784 int num_mem_params, struct mem_param *mem_params,
785 int num_reg_params, struct reg_param *reg_params,
786 uint32_t exit_point, int timeout_ms,
787 void *arch_info)
788 {
789 int retval = ERROR_FAIL;
790
791 if (!target->type->wait_algorithm) {
792 LOG_ERROR("Target type '%s' does not support %s",
793 target_type_name(target), __func__);
794 goto done;
795 }
796 if (!target->running_alg) {
797 LOG_ERROR("Target is not running an algorithm");
798 goto done;
799 }
800
801 retval = target->type->wait_algorithm(target,
802 num_mem_params, mem_params,
803 num_reg_params, reg_params,
804 exit_point, timeout_ms, arch_info);
805 if (retval != ERROR_TARGET_TIMEOUT)
806 target->running_alg = false;
807
808 done:
809 return retval;
810 }
811
812
813 int target_read_memory(struct target *target,
814 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
815 {
816 return target->type->read_memory(target, address, size, count, buffer);
817 }
818
819 static int target_read_phys_memory(struct target *target,
820 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
821 {
822 return target->type->read_phys_memory(target, address, size, count, buffer);
823 }
824
825 int target_write_memory(struct target *target,
826 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
827 {
828 return target->type->write_memory(target, address, size, count, buffer);
829 }
830
831 static int target_write_phys_memory(struct target *target,
832 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
833 {
834 return target->type->write_phys_memory(target, address, size, count, buffer);
835 }
836
837 int target_bulk_write_memory(struct target *target,
838 uint32_t address, uint32_t count, const uint8_t *buffer)
839 {
840 return target->type->bulk_write_memory(target, address, count, buffer);
841 }
842
843 int target_add_breakpoint(struct target *target,
844 struct breakpoint *breakpoint)
845 {
846 if ((target->state != TARGET_HALTED)&&(breakpoint->type!=BKPT_HARD)) {
847 LOG_WARNING("target %s is not halted", target->cmd_name);
848 return ERROR_TARGET_NOT_HALTED;
849 }
850 return target->type->add_breakpoint(target, breakpoint);
851 }
852
853 int target_add_context_breakpoint(struct target *target,
854 struct breakpoint *breakpoint)
855 {
856 if (target->state != TARGET_HALTED) {
857 LOG_WARNING("target %s is not halted", target->cmd_name);
858 return ERROR_TARGET_NOT_HALTED;
859 }
860 return target->type->add_context_breakpoint(target, breakpoint);
861 }
862
863 int target_add_hybrid_breakpoint(struct target *target,
864 struct breakpoint *breakpoint)
865 {
866 if (target->state != TARGET_HALTED) {
867 LOG_WARNING("target %s is not halted", target->cmd_name);
868 return ERROR_TARGET_NOT_HALTED;
869 }
870 return target->type->add_hybrid_breakpoint(target, breakpoint);
871 }
872
873 int target_remove_breakpoint(struct target *target,
874 struct breakpoint *breakpoint)
875 {
876 return target->type->remove_breakpoint(target, breakpoint);
877 }
878
879 int target_add_watchpoint(struct target *target,
880 struct watchpoint *watchpoint)
881 {
882 if (target->state != TARGET_HALTED) {
883 LOG_WARNING("target %s is not halted", target->cmd_name);
884 return ERROR_TARGET_NOT_HALTED;
885 }
886 return target->type->add_watchpoint(target, watchpoint);
887 }
888 int target_remove_watchpoint(struct target *target,
889 struct watchpoint *watchpoint)
890 {
891 return target->type->remove_watchpoint(target, watchpoint);
892 }
893
894 int target_get_gdb_reg_list(struct target *target,
895 struct reg **reg_list[], int *reg_list_size)
896 {
897 return target->type->get_gdb_reg_list(target, reg_list, reg_list_size);
898 }
899 int target_step(struct target *target,
900 int current, uint32_t address, int handle_breakpoints)
901 {
902 return target->type->step(target, current, address, handle_breakpoints);
903 }
904
905
906 /**
907 * Reset the @c examined flag for the given target.
908 * Pure paranoia -- targets are zeroed on allocation.
909 */
910 static void target_reset_examined(struct target *target)
911 {
912 target->examined = false;
913 }
914
915 static int
916 err_read_phys_memory(struct target *target, uint32_t address,
917 uint32_t size, uint32_t count, uint8_t *buffer)
918 {
919 LOG_ERROR("Not implemented: %s", __func__);
920 return ERROR_FAIL;
921 }
922
923 static int
924 err_write_phys_memory(struct target *target, uint32_t address,
925 uint32_t size, uint32_t count, const uint8_t *buffer)
926 {
927 LOG_ERROR("Not implemented: %s", __func__);
928 return ERROR_FAIL;
929 }
930
931 static int handle_target(void *priv);
932
933 static int target_init_one(struct command_context *cmd_ctx,
934 struct target *target)
935 {
936 target_reset_examined(target);
937
938 struct target_type *type = target->type;
939 if (type->examine == NULL)
940 type->examine = default_examine;
941
942 if (type->check_reset== NULL)
943 type->check_reset = default_check_reset;
944
945 assert(type->init_target != NULL);
946
947 int retval = type->init_target(cmd_ctx, target);
948 if (ERROR_OK != retval)
949 {
950 LOG_ERROR("target '%s' init failed", target_name(target));
951 return retval;
952 }
953
954 /**
955 * @todo get rid of those *memory_imp() methods, now that all
956 * callers are using target_*_memory() accessors ... and make
957 * sure the "physical" paths handle the same issues.
958 */
959 /* a non-invasive way(in terms of patches) to add some code that
960 * runs before the type->write/read_memory implementation
961 */
962 type->write_memory_imp = target->type->write_memory;
963 type->write_memory = target_write_memory_imp;
964
965 type->read_memory_imp = target->type->read_memory;
966 type->read_memory = target_read_memory_imp;
967
968 type->soft_reset_halt_imp = target->type->soft_reset_halt;
969 type->soft_reset_halt = target_soft_reset_halt_imp;
970
971 /* Sanity-check MMU support ... stub in what we must, to help
972 * implement it in stages, but warn if we need to do so.
973 */
974 if (type->mmu)
975 {
976 if (type->write_phys_memory == NULL)
977 {
978 LOG_ERROR("type '%s' is missing write_phys_memory",
979 type->name);
980 type->write_phys_memory = err_write_phys_memory;
981 }
982 if (type->read_phys_memory == NULL)
983 {
984 LOG_ERROR("type '%s' is missing read_phys_memory",
985 type->name);
986 type->read_phys_memory = err_read_phys_memory;
987 }
988 if (type->virt2phys == NULL)
989 {
990 LOG_ERROR("type '%s' is missing virt2phys", type->name);
991 type->virt2phys = identity_virt2phys;
992 }
993 }
994 else
995 {
996 /* Make sure no-MMU targets all behave the same: make no
997 * distinction between physical and virtual addresses, and
998 * ensure that virt2phys() is always an identity mapping.
999 */
1000 if (type->write_phys_memory || type->read_phys_memory
1001 || type->virt2phys)
1002 {
1003 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1004 }
1005
1006 type->mmu = no_mmu;
1007 type->write_phys_memory = type->write_memory;
1008 type->read_phys_memory = type->read_memory;
1009 type->virt2phys = identity_virt2phys;
1010 }
1011
1012 if (target->type->read_buffer == NULL)
1013 target->type->read_buffer = target_read_buffer_default;
1014
1015 if (target->type->write_buffer == NULL)
1016 target->type->write_buffer = target_write_buffer_default;
1017
1018 return ERROR_OK;
1019 }
1020
1021 static int target_init(struct command_context *cmd_ctx)
1022 {
1023 struct target *target;
1024 int retval;
1025
1026 for (target = all_targets; target; target = target->next)
1027 {
1028 retval = target_init_one(cmd_ctx, target);
1029 if (ERROR_OK != retval)
1030 return retval;
1031 }
1032
1033 if (!all_targets)
1034 return ERROR_OK;
1035
1036 retval = target_register_user_commands(cmd_ctx);
1037 if (ERROR_OK != retval)
1038 return retval;
1039
1040 retval = target_register_timer_callback(&handle_target,
1041 polling_interval, 1, cmd_ctx->interp);
1042 if (ERROR_OK != retval)
1043 return retval;
1044
1045 return ERROR_OK;
1046 }
1047
1048 COMMAND_HANDLER(handle_target_init_command)
1049 {
1050 int retval;
1051
1052 if (CMD_ARGC != 0)
1053 return ERROR_COMMAND_SYNTAX_ERROR;
1054
1055 static bool target_initialized = false;
1056 if (target_initialized)
1057 {
1058 LOG_INFO("'target init' has already been called");
1059 return ERROR_OK;
1060 }
1061 target_initialized = true;
1062
1063 retval = command_run_line(CMD_CTX, "init_targets");
1064 if (ERROR_OK != retval)
1065 return retval;
1066
1067 LOG_DEBUG("Initializing targets...");
1068 return target_init(CMD_CTX);
1069 }
1070
1071 int target_register_event_callback(int (*callback)(struct target *target, enum target_event event, void *priv), void *priv)
1072 {
1073 struct target_event_callback **callbacks_p = &target_event_callbacks;
1074
1075 if (callback == NULL)
1076 {
1077 return ERROR_COMMAND_SYNTAX_ERROR;
1078 }
1079
1080 if (*callbacks_p)
1081 {
1082 while ((*callbacks_p)->next)
1083 callbacks_p = &((*callbacks_p)->next);
1084 callbacks_p = &((*callbacks_p)->next);
1085 }
1086
1087 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1088 (*callbacks_p)->callback = callback;
1089 (*callbacks_p)->priv = priv;
1090 (*callbacks_p)->next = NULL;
1091
1092 return ERROR_OK;
1093 }
1094
1095 int target_register_timer_callback(int (*callback)(void *priv), int time_ms, int periodic, void *priv)
1096 {
1097 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1098 struct timeval now;
1099
1100 if (callback == NULL)
1101 {
1102 return ERROR_COMMAND_SYNTAX_ERROR;
1103 }
1104
1105 if (*callbacks_p)
1106 {
1107 while ((*callbacks_p)->next)
1108 callbacks_p = &((*callbacks_p)->next);
1109 callbacks_p = &((*callbacks_p)->next);
1110 }
1111
1112 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1113 (*callbacks_p)->callback = callback;
1114 (*callbacks_p)->periodic = periodic;
1115 (*callbacks_p)->time_ms = time_ms;
1116
1117 gettimeofday(&now, NULL);
1118 (*callbacks_p)->when.tv_usec = now.tv_usec + (time_ms % 1000) * 1000;
1119 time_ms -= (time_ms % 1000);
1120 (*callbacks_p)->when.tv_sec = now.tv_sec + (time_ms / 1000);
1121 if ((*callbacks_p)->when.tv_usec > 1000000)
1122 {
1123 (*callbacks_p)->when.tv_usec = (*callbacks_p)->when.tv_usec - 1000000;
1124 (*callbacks_p)->when.tv_sec += 1;
1125 }
1126
1127 (*callbacks_p)->priv = priv;
1128 (*callbacks_p)->next = NULL;
1129
1130 return ERROR_OK;
1131 }
1132
1133 int target_unregister_event_callback(int (*callback)(struct target *target, enum target_event event, void *priv), void *priv)
1134 {
1135 struct target_event_callback **p = &target_event_callbacks;
1136 struct target_event_callback *c = target_event_callbacks;
1137
1138 if (callback == NULL)
1139 {
1140 return ERROR_COMMAND_SYNTAX_ERROR;
1141 }
1142
1143 while (c)
1144 {
1145 struct target_event_callback *next = c->next;
1146 if ((c->callback == callback) && (c->priv == priv))
1147 {
1148 *p = next;
1149 free(c);
1150 return ERROR_OK;
1151 }
1152 else
1153 p = &(c->next);
1154 c = next;
1155 }
1156
1157 return ERROR_OK;
1158 }
1159
1160 static int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1161 {
1162 struct target_timer_callback **p = &target_timer_callbacks;
1163 struct target_timer_callback *c = target_timer_callbacks;
1164
1165 if (callback == NULL)
1166 {
1167 return ERROR_COMMAND_SYNTAX_ERROR;
1168 }
1169
1170 while (c)
1171 {
1172 struct target_timer_callback *next = c->next;
1173 if ((c->callback == callback) && (c->priv == priv))
1174 {
1175 *p = next;
1176 free(c);
1177 return ERROR_OK;
1178 }
1179 else
1180 p = &(c->next);
1181 c = next;
1182 }
1183
1184 return ERROR_OK;
1185 }
1186
1187 int target_call_event_callbacks(struct target *target, enum target_event event)
1188 {
1189 struct target_event_callback *callback = target_event_callbacks;
1190 struct target_event_callback *next_callback;
1191
1192 if (event == TARGET_EVENT_HALTED)
1193 {
1194 /* execute early halted first */
1195 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1196 }
1197
1198 LOG_DEBUG("target event %i (%s)",
1199 event,
1200 Jim_Nvp_value2name_simple(nvp_target_event, event)->name);
1201
1202 target_handle_event(target, event);
1203
1204 while (callback)
1205 {
1206 next_callback = callback->next;
1207 callback->callback(target, event, callback->priv);
1208 callback = next_callback;
1209 }
1210
1211 return ERROR_OK;
1212 }
1213
1214 static int target_timer_callback_periodic_restart(
1215 struct target_timer_callback *cb, struct timeval *now)
1216 {
1217 int time_ms = cb->time_ms;
1218 cb->when.tv_usec = now->tv_usec + (time_ms % 1000) * 1000;
1219 time_ms -= (time_ms % 1000);
1220 cb->when.tv_sec = now->tv_sec + time_ms / 1000;
1221 if (cb->when.tv_usec > 1000000)
1222 {
1223 cb->when.tv_usec = cb->when.tv_usec - 1000000;
1224 cb->when.tv_sec += 1;
1225 }
1226 return ERROR_OK;
1227 }
1228
1229 static int target_call_timer_callback(struct target_timer_callback *cb,
1230 struct timeval *now)
1231 {
1232 cb->callback(cb->priv);
1233
1234 if (cb->periodic)
1235 return target_timer_callback_periodic_restart(cb, now);
1236
1237 return target_unregister_timer_callback(cb->callback, cb->priv);
1238 }
1239
1240 static int target_call_timer_callbacks_check_time(int checktime)
1241 {
1242 keep_alive();
1243
1244 struct timeval now;
1245 gettimeofday(&now, NULL);
1246
1247 struct target_timer_callback *callback = target_timer_callbacks;
1248 while (callback)
1249 {
1250 // cleaning up may unregister and free this callback
1251 struct target_timer_callback *next_callback = callback->next;
1252
1253 bool call_it = callback->callback &&
1254 ((!checktime && callback->periodic) ||
1255 now.tv_sec > callback->when.tv_sec ||
1256 (now.tv_sec == callback->when.tv_sec &&
1257 now.tv_usec >= callback->when.tv_usec));
1258
1259 if (call_it)
1260 {
1261 int retval = target_call_timer_callback(callback, &now);
1262 if (retval != ERROR_OK)
1263 return retval;
1264 }
1265
1266 callback = next_callback;
1267 }
1268
1269 return ERROR_OK;
1270 }
1271
1272 int target_call_timer_callbacks(void)
1273 {
1274 return target_call_timer_callbacks_check_time(1);
1275 }
1276
1277 /* invoke periodic callbacks immediately */
1278 int target_call_timer_callbacks_now(void)
1279 {
1280 return target_call_timer_callbacks_check_time(0);
1281 }
1282
1283 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
1284 {
1285 struct working_area *c = target->working_areas;
1286 struct working_area *new_wa = NULL;
1287
1288 /* Reevaluate working area address based on MMU state*/
1289 if (target->working_areas == NULL)
1290 {
1291 int retval;
1292 int enabled;
1293
1294 retval = target->type->mmu(target, &enabled);
1295 if (retval != ERROR_OK)
1296 {
1297 return retval;
1298 }
1299
1300 if (!enabled) {
1301 if (target->working_area_phys_spec) {
1302 LOG_DEBUG("MMU disabled, using physical "
1303 "address for working memory 0x%08x",
1304 (unsigned)target->working_area_phys);
1305 target->working_area = target->working_area_phys;
1306 } else {
1307 LOG_ERROR("No working memory available. "
1308 "Specify -work-area-phys to target.");
1309 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1310 }
1311 } else {
1312 if (target->working_area_virt_spec) {
1313 LOG_DEBUG("MMU enabled, using virtual "
1314 "address for working memory 0x%08x",
1315 (unsigned)target->working_area_virt);
1316 target->working_area = target->working_area_virt;
1317 } else {
1318 LOG_ERROR("No working memory available. "
1319 "Specify -work-area-virt to target.");
1320 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1321 }
1322 }
1323 }
1324
1325 /* only allocate multiples of 4 byte */
1326 if (size % 4)
1327 {
1328 LOG_ERROR("BUG: code tried to allocate unaligned number of bytes (0x%08x), padding", ((unsigned)(size)));
1329 size = (size + 3) & (~3);
1330 }
1331
1332 /* see if there's already a matching working area */
1333 while (c)
1334 {
1335 if ((c->free) && (c->size == size))
1336 {
1337 new_wa = c;
1338 break;
1339 }
1340 c = c->next;
1341 }
1342
1343 /* if not, allocate a new one */
1344 if (!new_wa)
1345 {
1346 struct working_area **p = &target->working_areas;
1347 uint32_t first_free = target->working_area;
1348 uint32_t free_size = target->working_area_size;
1349
1350 c = target->working_areas;
1351 while (c)
1352 {
1353 first_free += c->size;
1354 free_size -= c->size;
1355 p = &c->next;
1356 c = c->next;
1357 }
1358
1359 if (free_size < size)
1360 {
1361 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1362 }
1363
1364 LOG_DEBUG("allocated new working area at address 0x%08x", (unsigned)first_free);
1365
1366 new_wa = malloc(sizeof(struct working_area));
1367 new_wa->next = NULL;
1368 new_wa->size = size;
1369 new_wa->address = first_free;
1370
1371 if (target->backup_working_area)
1372 {
1373 int retval;
1374 new_wa->backup = malloc(new_wa->size);
1375 if ((retval = target_read_memory(target, new_wa->address, 4, new_wa->size / 4, new_wa->backup)) != ERROR_OK)
1376 {
1377 free(new_wa->backup);
1378 free(new_wa);
1379 return retval;
1380 }
1381 }
1382 else
1383 {
1384 new_wa->backup = NULL;
1385 }
1386
1387 /* put new entry in list */
1388 *p = new_wa;
1389 }
1390
1391 /* mark as used, and return the new (reused) area */
1392 new_wa->free = false;
1393 *area = new_wa;
1394
1395 /* user pointer */
1396 new_wa->user = area;
1397
1398 return ERROR_OK;
1399 }
1400
1401 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
1402 {
1403 int retval;
1404
1405 retval = target_alloc_working_area_try(target, size, area);
1406 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1407 {
1408 LOG_WARNING("not enough working area available(requested %u)", (unsigned)(size));
1409 }
1410 return retval;
1411
1412 }
1413
1414 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
1415 {
1416 if (area->free)
1417 return ERROR_OK;
1418
1419 if (restore && target->backup_working_area)
1420 {
1421 int retval;
1422 if ((retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup)) != ERROR_OK)
1423 return retval;
1424 }
1425
1426 area->free = true;
1427
1428 /* mark user pointer invalid */
1429 *area->user = NULL;
1430 area->user = NULL;
1431
1432 return ERROR_OK;
1433 }
1434
1435 int target_free_working_area(struct target *target, struct working_area *area)
1436 {
1437 return target_free_working_area_restore(target, area, 1);
1438 }
1439
1440 /* free resources and restore memory, if restoring memory fails,
1441 * free up resources anyway
1442 */
1443 static void target_free_all_working_areas_restore(struct target *target, int restore)
1444 {
1445 struct working_area *c = target->working_areas;
1446
1447 while (c)
1448 {
1449 struct working_area *next = c->next;
1450 target_free_working_area_restore(target, c, restore);
1451
1452 if (c->backup)
1453 free(c->backup);
1454
1455 free(c);
1456
1457 c = next;
1458 }
1459
1460 target->working_areas = NULL;
1461 }
1462
1463 void target_free_all_working_areas(struct target *target)
1464 {
1465 target_free_all_working_areas_restore(target, 1);
1466 }
1467
1468 int target_arch_state(struct target *target)
1469 {
1470 int retval;
1471 if (target == NULL)
1472 {
1473 LOG_USER("No target has been configured");
1474 return ERROR_OK;
1475 }
1476
1477 LOG_USER("target state: %s", target_state_name( target ));
1478
1479 if (target->state != TARGET_HALTED)
1480 return ERROR_OK;
1481
1482 retval = target->type->arch_state(target);
1483 return retval;
1484 }
1485
1486 /* Single aligned words are guaranteed to use 16 or 32 bit access
1487 * mode respectively, otherwise data is handled as quickly as
1488 * possible
1489 */
1490 int target_write_buffer(struct target *target, uint32_t address, uint32_t size, const uint8_t *buffer)
1491 {
1492 LOG_DEBUG("writing buffer of %i byte at 0x%8.8x",
1493 (int)size, (unsigned)address);
1494
1495 if (!target_was_examined(target))
1496 {
1497 LOG_ERROR("Target not examined yet");
1498 return ERROR_FAIL;
1499 }
1500
1501 if (size == 0) {
1502 return ERROR_OK;
1503 }
1504
1505 if ((address + size - 1) < address)
1506 {
1507 /* GDB can request this when e.g. PC is 0xfffffffc*/
1508 LOG_ERROR("address + size wrapped(0x%08x, 0x%08x)",
1509 (unsigned)address,
1510 (unsigned)size);
1511 return ERROR_FAIL;
1512 }
1513
1514 return target->type->write_buffer(target, address, size, buffer);
1515 }
1516
1517 static int target_write_buffer_default(struct target *target, uint32_t address, uint32_t size, const uint8_t *buffer)
1518 {
1519 int retval = ERROR_OK;
1520
1521 if (((address % 2) == 0) && (size == 2))
1522 {
1523 return target_write_memory(target, address, 2, 1, buffer);
1524 }
1525
1526 /* handle unaligned head bytes */
1527 if (address % 4)
1528 {
1529 uint32_t unaligned = 4 - (address % 4);
1530
1531 if (unaligned > size)
1532 unaligned = size;
1533
1534 if ((retval = target_write_memory(target, address, 1, unaligned, buffer)) != ERROR_OK)
1535 return retval;
1536
1537 buffer += unaligned;
1538 address += unaligned;
1539 size -= unaligned;
1540 }
1541
1542 /* handle aligned words */
1543 if (size >= 4)
1544 {
1545 int aligned = size - (size % 4);
1546
1547 /* use bulk writes above a certain limit. This may have to be changed */
1548 if (aligned > 128)
1549 {
1550 if ((retval = target->type->bulk_write_memory(target, address, aligned / 4, buffer)) != ERROR_OK)
1551 return retval;
1552 }
1553 else
1554 {
1555 if ((retval = target_write_memory(target, address, 4, aligned / 4, buffer)) != ERROR_OK)
1556 return retval;
1557 }
1558
1559 buffer += aligned;
1560 address += aligned;
1561 size -= aligned;
1562 }
1563
1564 /* handle tail writes of less than 4 bytes */
1565 if (size > 0)
1566 {
1567 if ((retval = target_write_memory(target, address, 1, size, buffer)) != ERROR_OK)
1568 return retval;
1569 }
1570
1571 return retval;
1572 }
1573
1574 /* Single aligned words are guaranteed to use 16 or 32 bit access
1575 * mode respectively, otherwise data is handled as quickly as
1576 * possible
1577 */
1578 int target_read_buffer(struct target *target, uint32_t address, uint32_t size, uint8_t *buffer)
1579 {
1580 LOG_DEBUG("reading buffer of %i byte at 0x%8.8x",
1581 (int)size, (unsigned)address);
1582
1583 if (!target_was_examined(target))
1584 {
1585 LOG_ERROR("Target not examined yet");
1586 return ERROR_FAIL;
1587 }
1588
1589 if (size == 0) {
1590 return ERROR_OK;
1591 }
1592
1593 if ((address + size - 1) < address)
1594 {
1595 /* GDB can request this when e.g. PC is 0xfffffffc*/
1596 LOG_ERROR("address + size wrapped(0x%08" PRIx32 ", 0x%08" PRIx32 ")",
1597 address,
1598 size);
1599 return ERROR_FAIL;
1600 }
1601
1602 return target->type->read_buffer(target, address, size, buffer);
1603 }
1604
1605 static int target_read_buffer_default(struct target *target, uint32_t address, uint32_t size, uint8_t *buffer)
1606 {
1607 int retval = ERROR_OK;
1608
1609 if (((address % 2) == 0) && (size == 2))
1610 {
1611 return target_read_memory(target, address, 2, 1, buffer);
1612 }
1613
1614 /* handle unaligned head bytes */
1615 if (address % 4)
1616 {
1617 uint32_t unaligned = 4 - (address % 4);
1618
1619 if (unaligned > size)
1620 unaligned = size;
1621
1622 if ((retval = target_read_memory(target, address, 1, unaligned, buffer)) != ERROR_OK)
1623 return retval;
1624
1625 buffer += unaligned;
1626 address += unaligned;
1627 size -= unaligned;
1628 }
1629
1630 /* handle aligned words */
1631 if (size >= 4)
1632 {
1633 int aligned = size - (size % 4);
1634
1635 if ((retval = target_read_memory(target, address, 4, aligned / 4, buffer)) != ERROR_OK)
1636 return retval;
1637
1638 buffer += aligned;
1639 address += aligned;
1640 size -= aligned;
1641 }
1642
1643 /*prevent byte access when possible (avoid AHB access limitations in some cases)*/
1644 if(size >=2)
1645 {
1646 int aligned = size - (size%2);
1647 retval = target_read_memory(target, address, 2, aligned / 2, buffer);
1648 if (retval != ERROR_OK)
1649 return retval;
1650
1651 buffer += aligned;
1652 address += aligned;
1653 size -= aligned;
1654 }
1655 /* handle tail writes of less than 4 bytes */
1656 if (size > 0)
1657 {
1658 if ((retval = target_read_memory(target, address, 1, size, buffer)) != ERROR_OK)
1659 return retval;
1660 }
1661
1662 return ERROR_OK;
1663 }
1664
1665 int target_checksum_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* crc)
1666 {
1667 uint8_t *buffer;
1668 int retval;
1669 uint32_t i;
1670 uint32_t checksum = 0;
1671 if (!target_was_examined(target))
1672 {
1673 LOG_ERROR("Target not examined yet");
1674 return ERROR_FAIL;
1675 }
1676
1677 if ((retval = target->type->checksum_memory(target, address,
1678 size, &checksum)) != ERROR_OK)
1679 {
1680 buffer = malloc(size);
1681 if (buffer == NULL)
1682 {
1683 LOG_ERROR("error allocating buffer for section (%d bytes)", (int)size);
1684 return ERROR_COMMAND_SYNTAX_ERROR;
1685 }
1686 retval = target_read_buffer(target, address, size, buffer);
1687 if (retval != ERROR_OK)
1688 {
1689 free(buffer);
1690 return retval;
1691 }
1692
1693 /* convert to target endianness */
1694 for (i = 0; i < (size/sizeof(uint32_t)); i++)
1695 {
1696 uint32_t target_data;
1697 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
1698 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
1699 }
1700
1701 retval = image_calculate_checksum(buffer, size, &checksum);
1702 free(buffer);
1703 }
1704
1705 *crc = checksum;
1706
1707 return retval;
1708 }
1709
1710 int target_blank_check_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* blank)
1711 {
1712 int retval;
1713 if (!target_was_examined(target))
1714 {
1715 LOG_ERROR("Target not examined yet");
1716 return ERROR_FAIL;
1717 }
1718
1719 if (target->type->blank_check_memory == 0)
1720 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1721
1722 retval = target->type->blank_check_memory(target, address, size, blank);
1723
1724 return retval;
1725 }
1726
1727 int target_read_u32(struct target *target, uint32_t address, uint32_t *value)
1728 {
1729 uint8_t value_buf[4];
1730 if (!target_was_examined(target))
1731 {
1732 LOG_ERROR("Target not examined yet");
1733 return ERROR_FAIL;
1734 }
1735
1736 int retval = target_read_memory(target, address, 4, 1, value_buf);
1737
1738 if (retval == ERROR_OK)
1739 {
1740 *value = target_buffer_get_u32(target, value_buf);
1741 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
1742 address,
1743 *value);
1744 }
1745 else
1746 {
1747 *value = 0x0;
1748 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1749 address);
1750 }
1751
1752 return retval;
1753 }
1754
1755 int target_read_u16(struct target *target, uint32_t address, uint16_t *value)
1756 {
1757 uint8_t value_buf[2];
1758 if (!target_was_examined(target))
1759 {
1760 LOG_ERROR("Target not examined yet");
1761 return ERROR_FAIL;
1762 }
1763
1764 int retval = target_read_memory(target, address, 2, 1, value_buf);
1765
1766 if (retval == ERROR_OK)
1767 {
1768 *value = target_buffer_get_u16(target, value_buf);
1769 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%4.4x",
1770 address,
1771 *value);
1772 }
1773 else
1774 {
1775 *value = 0x0;
1776 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1777 address);
1778 }
1779
1780 return retval;
1781 }
1782
1783 int target_read_u8(struct target *target, uint32_t address, uint8_t *value)
1784 {
1785 int retval = target_read_memory(target, address, 1, 1, value);
1786 if (!target_was_examined(target))
1787 {
1788 LOG_ERROR("Target not examined yet");
1789 return ERROR_FAIL;
1790 }
1791
1792 if (retval == ERROR_OK)
1793 {
1794 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
1795 address,
1796 *value);
1797 }
1798 else
1799 {
1800 *value = 0x0;
1801 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1802 address);
1803 }
1804
1805 return retval;
1806 }
1807
1808 int target_write_u32(struct target *target, uint32_t address, uint32_t value)
1809 {
1810 int retval;
1811 uint8_t value_buf[4];
1812 if (!target_was_examined(target))
1813 {
1814 LOG_ERROR("Target not examined yet");
1815 return ERROR_FAIL;
1816 }
1817
1818 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
1819 address,
1820 value);
1821
1822 target_buffer_set_u32(target, value_buf, value);
1823 if ((retval = target_write_memory(target, address, 4, 1, value_buf)) != ERROR_OK)
1824 {
1825 LOG_DEBUG("failed: %i", retval);
1826 }
1827
1828 return retval;
1829 }
1830
1831 int target_write_u16(struct target *target, uint32_t address, uint16_t value)
1832 {
1833 int retval;
1834 uint8_t value_buf[2];
1835 if (!target_was_examined(target))
1836 {
1837 LOG_ERROR("Target not examined yet");
1838 return ERROR_FAIL;
1839 }
1840
1841 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8x",
1842 address,
1843 value);
1844
1845 target_buffer_set_u16(target, value_buf, value);
1846 if ((retval = target_write_memory(target, address, 2, 1, value_buf)) != ERROR_OK)
1847 {
1848 LOG_DEBUG("failed: %i", retval);
1849 }
1850
1851 return retval;
1852 }
1853
1854 int target_write_u8(struct target *target, uint32_t address, uint8_t value)
1855 {
1856 int retval;
1857 if (!target_was_examined(target))
1858 {
1859 LOG_ERROR("Target not examined yet");
1860 return ERROR_FAIL;
1861 }
1862
1863 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
1864 address, value);
1865
1866 if ((retval = target_write_memory(target, address, 1, 1, &value)) != ERROR_OK)
1867 {
1868 LOG_DEBUG("failed: %i", retval);
1869 }
1870
1871 return retval;
1872 }
1873
1874 static int find_target(struct command_context *cmd_ctx, const char *name)
1875 {
1876 struct target *target = get_target(name);
1877 if (target == NULL) {
1878 LOG_ERROR("Target: %s is unknown, try one of:\n", name);
1879 return ERROR_FAIL;
1880 }
1881 if (!target->tap->enabled) {
1882 LOG_USER("Target: TAP %s is disabled, "
1883 "can't be the current target\n",
1884 target->tap->dotted_name);
1885 return ERROR_FAIL;
1886 }
1887
1888 cmd_ctx->current_target = target->target_number;
1889 return ERROR_OK;
1890 }
1891
1892
1893 COMMAND_HANDLER(handle_targets_command)
1894 {
1895 int retval = ERROR_OK;
1896 if (CMD_ARGC == 1)
1897 {
1898 retval = find_target(CMD_CTX, CMD_ARGV[0]);
1899 if (retval == ERROR_OK) {
1900 /* we're done! */
1901 return retval;
1902 }
1903 }
1904
1905 struct target *target = all_targets;
1906 command_print(CMD_CTX, " TargetName Type Endian TapName State ");
1907 command_print(CMD_CTX, "-- ------------------ ---------- ------ ------------------ ------------");
1908 while (target)
1909 {
1910 const char *state;
1911 char marker = ' ';
1912
1913 if (target->tap->enabled)
1914 state = target_state_name( target );
1915 else
1916 state = "tap-disabled";
1917
1918 if (CMD_CTX->current_target == target->target_number)
1919 marker = '*';
1920
1921 /* keep columns lined up to match the headers above */
1922 command_print(CMD_CTX,
1923 "%2d%c %-18s %-10s %-6s %-18s %s",
1924 target->target_number,
1925 marker,
1926 target_name(target),
1927 target_type_name(target),
1928 Jim_Nvp_value2name_simple(nvp_target_endian,
1929 target->endianness)->name,
1930 target->tap->dotted_name,
1931 state);
1932 target = target->next;
1933 }
1934
1935 return retval;
1936 }
1937
1938 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
1939
1940 static int powerDropout;
1941 static int srstAsserted;
1942
1943 static int runPowerRestore;
1944 static int runPowerDropout;
1945 static int runSrstAsserted;
1946 static int runSrstDeasserted;
1947
1948 static int sense_handler(void)
1949 {
1950 static int prevSrstAsserted = 0;
1951 static int prevPowerdropout = 0;
1952
1953 int retval;
1954 if ((retval = jtag_power_dropout(&powerDropout)) != ERROR_OK)
1955 return retval;
1956
1957 int powerRestored;
1958 powerRestored = prevPowerdropout && !powerDropout;
1959 if (powerRestored)
1960 {
1961 runPowerRestore = 1;
1962 }
1963
1964 long long current = timeval_ms();
1965 static long long lastPower = 0;
1966 int waitMore = lastPower + 2000 > current;
1967 if (powerDropout && !waitMore)
1968 {
1969 runPowerDropout = 1;
1970 lastPower = current;
1971 }
1972
1973 if ((retval = jtag_srst_asserted(&srstAsserted)) != ERROR_OK)
1974 return retval;
1975
1976 int srstDeasserted;
1977 srstDeasserted = prevSrstAsserted && !srstAsserted;
1978
1979 static long long lastSrst = 0;
1980 waitMore = lastSrst + 2000 > current;
1981 if (srstDeasserted && !waitMore)
1982 {
1983 runSrstDeasserted = 1;
1984 lastSrst = current;
1985 }
1986
1987 if (!prevSrstAsserted && srstAsserted)
1988 {
1989 runSrstAsserted = 1;
1990 }
1991
1992 prevSrstAsserted = srstAsserted;
1993 prevPowerdropout = powerDropout;
1994
1995 if (srstDeasserted || powerRestored)
1996 {
1997 /* Other than logging the event we can't do anything here.
1998 * Issuing a reset is a particularly bad idea as we might
1999 * be inside a reset already.
2000 */
2001 }
2002
2003 return ERROR_OK;
2004 }
2005
2006 static int backoff_times = 0;
2007 static int backoff_count = 0;
2008
2009 /* process target state changes */
2010 static int handle_target(void *priv)
2011 {
2012 Jim_Interp *interp = (Jim_Interp *)priv;
2013 int retval = ERROR_OK;
2014
2015 if (!is_jtag_poll_safe())
2016 {
2017 /* polling is disabled currently */
2018 return ERROR_OK;
2019 }
2020
2021 /* we do not want to recurse here... */
2022 static int recursive = 0;
2023 if (! recursive)
2024 {
2025 recursive = 1;
2026 sense_handler();
2027 /* danger! running these procedures can trigger srst assertions and power dropouts.
2028 * We need to avoid an infinite loop/recursion here and we do that by
2029 * clearing the flags after running these events.
2030 */
2031 int did_something = 0;
2032 if (runSrstAsserted)
2033 {
2034 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2035 Jim_Eval(interp, "srst_asserted");
2036 did_something = 1;
2037 }
2038 if (runSrstDeasserted)
2039 {
2040 Jim_Eval(interp, "srst_deasserted");
2041 did_something = 1;
2042 }
2043 if (runPowerDropout)
2044 {
2045 LOG_INFO("Power dropout detected, running power_dropout proc.");
2046 Jim_Eval(interp, "power_dropout");
2047 did_something = 1;
2048 }
2049 if (runPowerRestore)
2050 {
2051 Jim_Eval(interp, "power_restore");
2052 did_something = 1;
2053 }
2054
2055 if (did_something)
2056 {
2057 /* clear detect flags */
2058 sense_handler();
2059 }
2060
2061 /* clear action flags */
2062
2063 runSrstAsserted = 0;
2064 runSrstDeasserted = 0;
2065 runPowerRestore = 0;
2066 runPowerDropout = 0;
2067
2068 recursive = 0;
2069 }
2070
2071 if (backoff_times > backoff_count)
2072 {
2073 /* do not poll this time as we failed previously */
2074 backoff_count++;
2075 return ERROR_OK;
2076 }
2077 backoff_count = 0;
2078
2079 /* Poll targets for state changes unless that's globally disabled.
2080 * Skip targets that are currently disabled.
2081 */
2082 for (struct target *target = all_targets;
2083 is_jtag_poll_safe() && target;
2084 target = target->next)
2085 {
2086 if (!target->tap->enabled)
2087 continue;
2088
2089 /* only poll target if we've got power and srst isn't asserted */
2090 if (!powerDropout && !srstAsserted)
2091 {
2092 /* polling may fail silently until the target has been examined */
2093 if ((retval = target_poll(target)) != ERROR_OK)
2094 {
2095 /* 100ms polling interval. Increase interval between polling up to 5000ms */
2096 if (backoff_times * polling_interval < 5000)
2097 {
2098 backoff_times *= 2;
2099 backoff_times++;
2100 }
2101 LOG_USER("Polling target failed, GDB will be halted. Polling again in %dms", backoff_times * polling_interval);
2102
2103 /* Tell GDB to halt the debugger. This allows the user to
2104 * run monitor commands to handle the situation.
2105 */
2106 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
2107 return retval;
2108 }
2109 /* Since we succeeded, we reset backoff count */
2110 if (backoff_times > 0)
2111 {
2112 LOG_USER("Polling succeeded again");
2113 }
2114 backoff_times = 0;
2115 }
2116 }
2117
2118 return retval;
2119 }
2120
2121 COMMAND_HANDLER(handle_reg_command)
2122 {
2123 struct target *target;
2124 struct reg *reg = NULL;
2125 unsigned count = 0;
2126 char *value;
2127
2128 LOG_DEBUG("-");
2129
2130 target = get_current_target(CMD_CTX);
2131
2132 /* list all available registers for the current target */
2133 if (CMD_ARGC == 0)
2134 {
2135 struct reg_cache *cache = target->reg_cache;
2136
2137 count = 0;
2138 while (cache)
2139 {
2140 unsigned i;
2141
2142 command_print(CMD_CTX, "===== %s", cache->name);
2143
2144 for (i = 0, reg = cache->reg_list;
2145 i < cache->num_regs;
2146 i++, reg++, count++)
2147 {
2148 /* only print cached values if they are valid */
2149 if (reg->valid) {
2150 value = buf_to_str(reg->value,
2151 reg->size, 16);
2152 command_print(CMD_CTX,
2153 "(%i) %s (/%" PRIu32 "): 0x%s%s",
2154 count, reg->name,
2155 reg->size, value,
2156 reg->dirty
2157 ? " (dirty)"
2158 : "");
2159 free(value);
2160 } else {
2161 command_print(CMD_CTX, "(%i) %s (/%" PRIu32 ")",
2162 count, reg->name,
2163 reg->size) ;
2164 }
2165 }
2166 cache = cache->next;
2167 }
2168
2169 return ERROR_OK;
2170 }
2171
2172 /* access a single register by its ordinal number */
2173 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9'))
2174 {
2175 unsigned num;
2176 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
2177
2178 struct reg_cache *cache = target->reg_cache;
2179 count = 0;
2180 while (cache)
2181 {
2182 unsigned i;
2183 for (i = 0; i < cache->num_regs; i++)
2184 {
2185 if (count++ == num)
2186 {
2187 reg = &cache->reg_list[i];
2188 break;
2189 }
2190 }
2191 if (reg)
2192 break;
2193 cache = cache->next;
2194 }
2195
2196 if (!reg)
2197 {
2198 command_print(CMD_CTX, "%i is out of bounds, the current target has only %i registers (0 - %i)", num, count, count - 1);
2199 return ERROR_OK;
2200 }
2201 } else /* access a single register by its name */
2202 {
2203 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], 1);
2204
2205 if (!reg)
2206 {
2207 command_print(CMD_CTX, "register %s not found in current target", CMD_ARGV[0]);
2208 return ERROR_OK;
2209 }
2210 }
2211
2212 assert(reg != NULL); /* give clang a hint that we *know* reg is != NULL here */
2213
2214 /* display a register */
2215 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0') && (CMD_ARGV[1][0] <= '9'))))
2216 {
2217 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
2218 reg->valid = 0;
2219
2220 if (reg->valid == 0)
2221 {
2222 reg->type->get(reg);
2223 }
2224 value = buf_to_str(reg->value, reg->size, 16);
2225 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2226 free(value);
2227 return ERROR_OK;
2228 }
2229
2230 /* set register value */
2231 if (CMD_ARGC == 2)
2232 {
2233 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
2234 if (buf == NULL)
2235 return ERROR_FAIL;
2236 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
2237
2238 reg->type->set(reg, buf);
2239
2240 value = buf_to_str(reg->value, reg->size, 16);
2241 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2242 free(value);
2243
2244 free(buf);
2245
2246 return ERROR_OK;
2247 }
2248
2249 return ERROR_COMMAND_SYNTAX_ERROR;
2250 }
2251
2252 COMMAND_HANDLER(handle_poll_command)
2253 {
2254 int retval = ERROR_OK;
2255 struct target *target = get_current_target(CMD_CTX);
2256
2257 if (CMD_ARGC == 0)
2258 {
2259 command_print(CMD_CTX, "background polling: %s",
2260 jtag_poll_get_enabled() ? "on" : "off");
2261 command_print(CMD_CTX, "TAP: %s (%s)",
2262 target->tap->dotted_name,
2263 target->tap->enabled ? "enabled" : "disabled");
2264 if (!target->tap->enabled)
2265 return ERROR_OK;
2266 if ((retval = target_poll(target)) != ERROR_OK)
2267 return retval;
2268 if ((retval = target_arch_state(target)) != ERROR_OK)
2269 return retval;
2270 }
2271 else if (CMD_ARGC == 1)
2272 {
2273 bool enable;
2274 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
2275 jtag_poll_set_enabled(enable);
2276 }
2277 else
2278 {
2279 return ERROR_COMMAND_SYNTAX_ERROR;
2280 }
2281
2282 return retval;
2283 }
2284
2285 COMMAND_HANDLER(handle_wait_halt_command)
2286 {
2287 if (CMD_ARGC > 1)
2288 return ERROR_COMMAND_SYNTAX_ERROR;
2289
2290 unsigned ms = 5000;
2291 if (1 == CMD_ARGC)
2292 {
2293 int retval = parse_uint(CMD_ARGV[0], &ms);
2294 if (ERROR_OK != retval)
2295 {
2296 return ERROR_COMMAND_SYNTAX_ERROR;
2297 }
2298 // convert seconds (given) to milliseconds (needed)
2299 ms *= 1000;
2300 }
2301
2302 struct target *target = get_current_target(CMD_CTX);
2303 return target_wait_state(target, TARGET_HALTED, ms);
2304 }
2305
2306 /* wait for target state to change. The trick here is to have a low
2307 * latency for short waits and not to suck up all the CPU time
2308 * on longer waits.
2309 *
2310 * After 500ms, keep_alive() is invoked
2311 */
2312 int target_wait_state(struct target *target, enum target_state state, int ms)
2313 {
2314 int retval;
2315 long long then = 0, cur;
2316 int once = 1;
2317
2318 for (;;)
2319 {
2320 if ((retval = target_poll(target)) != ERROR_OK)
2321 return retval;
2322 if (target->state == state)
2323 {
2324 break;
2325 }
2326 cur = timeval_ms();
2327 if (once)
2328 {
2329 once = 0;
2330 then = timeval_ms();
2331 LOG_DEBUG("waiting for target %s...",
2332 Jim_Nvp_value2name_simple(nvp_target_state,state)->name);
2333 }
2334
2335 if (cur-then > 500)
2336 {
2337 keep_alive();
2338 }
2339
2340 if ((cur-then) > ms)
2341 {
2342 LOG_ERROR("timed out while waiting for target %s",
2343 Jim_Nvp_value2name_simple(nvp_target_state,state)->name);
2344 return ERROR_FAIL;
2345 }
2346 }
2347
2348 return ERROR_OK;
2349 }
2350
2351 COMMAND_HANDLER(handle_halt_command)
2352 {
2353 LOG_DEBUG("-");
2354
2355 struct target *target = get_current_target(CMD_CTX);
2356 int retval = target_halt(target);
2357 if (ERROR_OK != retval)
2358 return retval;
2359
2360 if (CMD_ARGC == 1)
2361 {
2362 unsigned wait_local;
2363 retval = parse_uint(CMD_ARGV[0], &wait_local);
2364 if (ERROR_OK != retval)
2365 return ERROR_COMMAND_SYNTAX_ERROR;
2366 if (!wait_local)
2367 return ERROR_OK;
2368 }
2369
2370 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
2371 }
2372
2373 COMMAND_HANDLER(handle_soft_reset_halt_command)
2374 {
2375 struct target *target = get_current_target(CMD_CTX);
2376
2377 LOG_USER("requesting target halt and executing a soft reset");
2378
2379 target->type->soft_reset_halt(target);
2380
2381 return ERROR_OK;
2382 }
2383
2384 COMMAND_HANDLER(handle_reset_command)
2385 {
2386 if (CMD_ARGC > 1)
2387 return ERROR_COMMAND_SYNTAX_ERROR;
2388
2389 enum target_reset_mode reset_mode = RESET_RUN;
2390 if (CMD_ARGC == 1)
2391 {
2392 const Jim_Nvp *n;
2393 n = Jim_Nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
2394 if ((n->name == NULL) || (n->value == RESET_UNKNOWN)) {
2395 return ERROR_COMMAND_SYNTAX_ERROR;
2396 }
2397 reset_mode = n->value;
2398 }
2399
2400 /* reset *all* targets */
2401 return target_process_reset(CMD_CTX, reset_mode);
2402 }
2403
2404
2405 COMMAND_HANDLER(handle_resume_command)
2406 {
2407 int current = 1;
2408 if (CMD_ARGC > 1)
2409 return ERROR_COMMAND_SYNTAX_ERROR;
2410
2411 struct target *target = get_current_target(CMD_CTX);
2412 target_handle_event(target, TARGET_EVENT_OLD_pre_resume);
2413
2414 /* with no CMD_ARGV, resume from current pc, addr = 0,
2415 * with one arguments, addr = CMD_ARGV[0],
2416 * handle breakpoints, not debugging */
2417 uint32_t addr = 0;
2418 if (CMD_ARGC == 1)
2419 {
2420 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2421 current = 0;
2422 }
2423
2424 return target_resume(target, current, addr, 1, 0);
2425 }
2426
2427 COMMAND_HANDLER(handle_step_command)
2428 {
2429 if (CMD_ARGC > 1)
2430 return ERROR_COMMAND_SYNTAX_ERROR;
2431
2432 LOG_DEBUG("-");
2433
2434 /* with no CMD_ARGV, step from current pc, addr = 0,
2435 * with one argument addr = CMD_ARGV[0],
2436 * handle breakpoints, debugging */
2437 uint32_t addr = 0;
2438 int current_pc = 1;
2439 if (CMD_ARGC == 1)
2440 {
2441 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2442 current_pc = 0;
2443 }
2444
2445 struct target *target = get_current_target(CMD_CTX);
2446
2447 return target->type->step(target, current_pc, addr, 1);
2448 }
2449
2450 static void handle_md_output(struct command_context *cmd_ctx,
2451 struct target *target, uint32_t address, unsigned size,
2452 unsigned count, const uint8_t *buffer)
2453 {
2454 const unsigned line_bytecnt = 32;
2455 unsigned line_modulo = line_bytecnt / size;
2456
2457 char output[line_bytecnt * 4 + 1];
2458 unsigned output_len = 0;
2459
2460 const char *value_fmt;
2461 switch (size) {
2462 case 4: value_fmt = "%8.8x "; break;
2463 case 2: value_fmt = "%4.4x "; break;
2464 case 1: value_fmt = "%2.2x "; break;
2465 default:
2466 /* "can't happen", caller checked */
2467 LOG_ERROR("invalid memory read size: %u", size);
2468 return;
2469 }
2470
2471 for (unsigned i = 0; i < count; i++)
2472 {
2473 if (i % line_modulo == 0)
2474 {
2475 output_len += snprintf(output + output_len,
2476 sizeof(output) - output_len,
2477 "0x%8.8x: ",
2478 (unsigned)(address + (i*size)));
2479 }
2480
2481 uint32_t value = 0;
2482 const uint8_t *value_ptr = buffer + i * size;
2483 switch (size) {
2484 case 4: value = target_buffer_get_u32(target, value_ptr); break;
2485 case 2: value = target_buffer_get_u16(target, value_ptr); break;
2486 case 1: value = *value_ptr;
2487 }
2488 output_len += snprintf(output + output_len,
2489 sizeof(output) - output_len,
2490 value_fmt, value);
2491
2492 if ((i % line_modulo == line_modulo - 1) || (i == count - 1))
2493 {
2494 command_print(cmd_ctx, "%s", output);
2495 output_len = 0;
2496 }
2497 }
2498 }
2499
2500 COMMAND_HANDLER(handle_md_command)
2501 {
2502 if (CMD_ARGC < 1)
2503 return ERROR_COMMAND_SYNTAX_ERROR;
2504
2505 unsigned size = 0;
2506 switch (CMD_NAME[2]) {
2507 case 'w': size = 4; break;
2508 case 'h': size = 2; break;
2509 case 'b': size = 1; break;
2510 default: return ERROR_COMMAND_SYNTAX_ERROR;
2511 }
2512
2513 bool physical=strcmp(CMD_ARGV[0], "phys")==0;
2514 int (*fn)(struct target *target,
2515 uint32_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
2516 if (physical)
2517 {
2518 CMD_ARGC--;
2519 CMD_ARGV++;
2520 fn=target_read_phys_memory;
2521 } else
2522 {
2523 fn=target_read_memory;
2524 }
2525 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
2526 {
2527 return ERROR_COMMAND_SYNTAX_ERROR;
2528 }
2529
2530 uint32_t address;
2531 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2532
2533 unsigned count = 1;
2534 if (CMD_ARGC == 2)
2535 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
2536
2537 uint8_t *buffer = calloc(count, size);
2538
2539 struct target *target = get_current_target(CMD_CTX);
2540 int retval = fn(target, address, size, count, buffer);
2541 if (ERROR_OK == retval)
2542 handle_md_output(CMD_CTX, target, address, size, count, buffer);
2543
2544 free(buffer);
2545
2546 return retval;
2547 }
2548
2549 typedef int (*target_write_fn)(struct target *target,
2550 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
2551
2552 static int target_write_memory_fast(struct target *target,
2553 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
2554 {
2555 return target_write_buffer(target, address, size * count, buffer);
2556 }
2557
2558 static int target_fill_mem(struct target *target,
2559 uint32_t address,
2560 target_write_fn fn,
2561 unsigned data_size,
2562 /* value */
2563 uint32_t b,
2564 /* count */
2565 unsigned c)
2566 {
2567 /* We have to write in reasonably large chunks to be able
2568 * to fill large memory areas with any sane speed */
2569 const unsigned chunk_size = 16384;
2570 uint8_t *target_buf = malloc(chunk_size * data_size);
2571 if (target_buf == NULL)
2572 {
2573 LOG_ERROR("Out of memory");
2574 return ERROR_FAIL;
2575 }
2576
2577 for (unsigned i = 0; i < chunk_size; i ++)
2578 {
2579 switch (data_size)
2580 {
2581 case 4:
2582 target_buffer_set_u32(target, target_buf + i*data_size, b);
2583 break;
2584 case 2:
2585 target_buffer_set_u16(target, target_buf + i*data_size, b);
2586 break;
2587 case 1:
2588 target_buffer_set_u8(target, target_buf + i*data_size, b);
2589 break;
2590 default:
2591 exit(-1);
2592 }
2593 }
2594
2595 int retval = ERROR_OK;
2596
2597 for (unsigned x = 0; x < c; x += chunk_size)
2598 {
2599 unsigned current;
2600 current = c - x;
2601 if (current > chunk_size)
2602 {
2603 current = chunk_size;
2604 }
2605 retval = fn(target, address + x * data_size, data_size, current, target_buf);
2606 if (retval != ERROR_OK)
2607 {
2608 break;
2609 }
2610 /* avoid GDB timeouts */
2611 keep_alive();
2612 }
2613 free(target_buf);
2614
2615 return retval;
2616 }
2617
2618
2619 COMMAND_HANDLER(handle_mw_command)
2620 {
2621 if (CMD_ARGC < 2)
2622 {
2623 return ERROR_COMMAND_SYNTAX_ERROR;
2624 }
2625 bool physical=strcmp(CMD_ARGV[0], "phys")==0;
2626 target_write_fn fn;
2627 if (physical)
2628 {
2629 CMD_ARGC--;
2630 CMD_ARGV++;
2631 fn=target_write_phys_memory;
2632 } else
2633 {
2634 fn = target_write_memory_fast;
2635 }
2636 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
2637 return ERROR_COMMAND_SYNTAX_ERROR;
2638
2639 uint32_t address;
2640 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2641
2642 uint32_t value;
2643 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
2644
2645 unsigned count = 1;
2646 if (CMD_ARGC == 3)
2647 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
2648
2649 struct target *target = get_current_target(CMD_CTX);
2650 unsigned wordsize;
2651 switch (CMD_NAME[2])
2652 {
2653 case 'w':
2654 wordsize = 4;
2655 break;
2656 case 'h':
2657 wordsize = 2;
2658 break;
2659 case 'b':
2660 wordsize = 1;
2661 break;
2662 default:
2663 return ERROR_COMMAND_SYNTAX_ERROR;
2664 }
2665
2666 return target_fill_mem(target, address, fn, wordsize, value, count);
2667 }
2668
2669 static COMMAND_HELPER(parse_load_image_command_CMD_ARGV, struct image *image,
2670 uint32_t *min_address, uint32_t *max_address)
2671 {
2672 if (CMD_ARGC < 1 || CMD_ARGC > 5)
2673 return ERROR_COMMAND_SYNTAX_ERROR;
2674
2675 /* a base address isn't always necessary,
2676 * default to 0x0 (i.e. don't relocate) */
2677 if (CMD_ARGC >= 2)
2678 {
2679 uint32_t addr;
2680 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], addr);
2681 image->base_address = addr;
2682 image->base_address_set = 1;
2683 }
2684 else
2685 image->base_address_set = 0;
2686
2687 image->start_address_set = 0;
2688
2689 if (CMD_ARGC >= 4)
2690 {
2691 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], *min_address);
2692 }
2693 if (CMD_ARGC == 5)
2694 {
2695 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], *max_address);
2696 // use size (given) to find max (required)
2697 *max_address += *min_address;
2698 }
2699
2700 if (*min_address > *max_address)
2701 return ERROR_COMMAND_SYNTAX_ERROR;
2702
2703 return ERROR_OK;
2704 }
2705
2706 COMMAND_HANDLER(handle_load_image_command)
2707 {
2708 uint8_t *buffer;
2709 size_t buf_cnt;
2710 uint32_t image_size;
2711 uint32_t min_address = 0;
2712 uint32_t max_address = 0xffffffff;
2713 int i;
2714 struct image image;
2715
2716 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
2717 &image, &min_address, &max_address);
2718 if (ERROR_OK != retval)
2719 return retval;
2720
2721 struct target *target = get_current_target(CMD_CTX);
2722
2723 struct duration bench;
2724 duration_start(&bench);
2725
2726 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
2727 {
2728 return ERROR_OK;
2729 }
2730
2731 image_size = 0x0;
2732 retval = ERROR_OK;
2733 for (i = 0; i < image.num_sections; i++)
2734 {
2735 buffer = malloc(image.sections[i].size);
2736 if (buffer == NULL)
2737 {
2738 command_print(CMD_CTX,
2739 "error allocating buffer for section (%d bytes)",
2740 (int)(image.sections[i].size));
2741 break;
2742 }
2743
2744 if ((retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt)) != ERROR_OK)
2745 {
2746 free(buffer);
2747 break;
2748 }
2749
2750 uint32_t offset = 0;
2751 uint32_t length = buf_cnt;
2752
2753 /* DANGER!!! beware of unsigned comparision here!!! */
2754
2755 if ((image.sections[i].base_address + buf_cnt >= min_address)&&
2756 (image.sections[i].base_address < max_address))
2757 {
2758 if (image.sections[i].base_address < min_address)
2759 {
2760 /* clip addresses below */
2761 offset += min_address-image.sections[i].base_address;
2762 length -= offset;
2763 }
2764
2765 if (image.sections[i].base_address + buf_cnt > max_address)
2766 {
2767 length -= (image.sections[i].base_address + buf_cnt)-max_address;
2768 }
2769
2770 if ((retval = target_write_buffer(target, image.sections[i].base_address + offset, length, buffer + offset)) != ERROR_OK)
2771 {
2772 free(buffer);
2773 break;
2774 }
2775 image_size += length;
2776 command_print(CMD_CTX, "%u bytes written at address 0x%8.8" PRIx32 "",
2777 (unsigned int)length,
2778 image.sections[i].base_address + offset);
2779 }
2780
2781 free(buffer);
2782 }
2783
2784 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK))
2785 {
2786 command_print(CMD_CTX, "downloaded %" PRIu32 " bytes "
2787 "in %fs (%0.3f KiB/s)", image_size,
2788 duration_elapsed(&bench), duration_kbps(&bench, image_size));
2789 }
2790
2791 image_close(&image);
2792
2793 return retval;
2794
2795 }
2796
2797 COMMAND_HANDLER(handle_dump_image_command)
2798 {
2799 struct fileio fileio;
2800 uint8_t buffer[560];
2801 int retval, retvaltemp;
2802 uint32_t address, size;
2803 struct duration bench;
2804 struct target *target = get_current_target(CMD_CTX);
2805
2806 if (CMD_ARGC != 3)
2807 return ERROR_COMMAND_SYNTAX_ERROR;
2808
2809 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], address);
2810 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], size);
2811
2812 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
2813 if (retval != ERROR_OK)
2814 return retval;
2815
2816 duration_start(&bench);
2817
2818 retval = ERROR_OK;
2819 while (size > 0)
2820 {
2821 size_t size_written;
2822 uint32_t this_run_size = (size > 560) ? 560 : size;
2823 retval = target_read_buffer(target, address, this_run_size, buffer);
2824 if (retval != ERROR_OK)
2825 {
2826 break;
2827 }
2828
2829 retval = fileio_write(&fileio, this_run_size, buffer, &size_written);
2830 if (retval != ERROR_OK)
2831 {
2832 break;
2833 }
2834
2835 size -= this_run_size;
2836 address += this_run_size;
2837 }
2838
2839 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK))
2840 {
2841 int filesize;
2842 retval = fileio_size(&fileio, &filesize);
2843 if (retval != ERROR_OK)
2844 return retval;
2845 command_print(CMD_CTX,
2846 "dumped %ld bytes in %fs (%0.3f KiB/s)", (long)filesize,
2847 duration_elapsed(&bench), duration_kbps(&bench, filesize));
2848 }
2849
2850 if ((retvaltemp = fileio_close(&fileio)) != ERROR_OK)
2851 return retvaltemp;
2852
2853 return retval;
2854 }
2855
2856 static COMMAND_HELPER(handle_verify_image_command_internal, int verify)
2857 {
2858 uint8_t *buffer;
2859 size_t buf_cnt;
2860 uint32_t image_size;
2861 int i;
2862 int retval;
2863 uint32_t checksum = 0;
2864 uint32_t mem_checksum = 0;
2865
2866 struct image image;
2867
2868 struct target *target = get_current_target(CMD_CTX);
2869
2870 if (CMD_ARGC < 1)
2871 {
2872 return ERROR_COMMAND_SYNTAX_ERROR;
2873 }
2874
2875 if (!target)
2876 {
2877 LOG_ERROR("no target selected");
2878 return ERROR_FAIL;
2879 }
2880
2881 struct duration bench;
2882 duration_start(&bench);
2883
2884 if (CMD_ARGC >= 2)
2885 {
2886 uint32_t addr;
2887 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], addr);
2888 image.base_address = addr;
2889 image.base_address_set = 1;
2890 }
2891 else
2892 {
2893 image.base_address_set = 0;
2894 image.base_address = 0x0;
2895 }
2896
2897 image.start_address_set = 0;
2898
2899 if ((retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL)) != ERROR_OK)
2900 {
2901 return retval;
2902 }
2903
2904 image_size = 0x0;
2905 int diffs = 0;
2906 retval = ERROR_OK;
2907 for (i = 0; i < image.num_sections; i++)
2908 {
2909 buffer = malloc(image.sections[i].size);
2910 if (buffer == NULL)
2911 {
2912 command_print(CMD_CTX,
2913 "error allocating buffer for section (%d bytes)",
2914 (int)(image.sections[i].size));
2915 break;
2916 }
2917 if ((retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt)) != ERROR_OK)
2918 {
2919 free(buffer);
2920 break;
2921 }
2922
2923 if (verify)
2924 {
2925 /* calculate checksum of image */
2926 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
2927 if (retval != ERROR_OK)
2928 {
2929 free(buffer);
2930 break;
2931 }
2932
2933 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
2934 if (retval != ERROR_OK)
2935 {
2936 free(buffer);
2937 break;
2938 }
2939
2940 if (checksum != mem_checksum)
2941 {
2942 /* failed crc checksum, fall back to a binary compare */
2943 uint8_t *data;
2944
2945 if (diffs == 0)
2946 {
2947 LOG_ERROR("checksum mismatch - attempting binary compare");
2948 }
2949
2950 data = (uint8_t*)malloc(buf_cnt);
2951
2952 /* Can we use 32bit word accesses? */
2953 int size = 1;
2954 int count = buf_cnt;
2955 if ((count % 4) == 0)
2956 {
2957 size *= 4;
2958 count /= 4;
2959 }
2960 retval = target_read_memory(target, image.sections[i].base_address, size, count, data);
2961 if (retval == ERROR_OK)
2962 {
2963 uint32_t t;
2964 for (t = 0; t < buf_cnt; t++)
2965 {
2966 if (data[t] != buffer[t])
2967 {
2968 command_print(CMD_CTX,
2969 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
2970 diffs,
2971 (unsigned)(t + image.sections[i].base_address),
2972 data[t],
2973 buffer[t]);
2974 if (diffs++ >= 127)
2975 {
2976 command_print(CMD_CTX, "More than 128 errors, the rest are not printed.");
2977 free(data);
2978 free(buffer);
2979 goto done;
2980 }
2981 }
2982 keep_alive();
2983 }
2984 }
2985 free(data);
2986 }
2987 } else
2988 {
2989 command_print(CMD_CTX, "address 0x%08" PRIx32 " length 0x%08zx",
2990 image.sections[i].base_address,
2991 buf_cnt);
2992 }
2993
2994 free(buffer);
2995 image_size += buf_cnt;
2996 }
2997 if (diffs > 0)
2998 {
2999 command_print(CMD_CTX, "No more differences found.");
3000 }
3001 done:
3002 if (diffs > 0)
3003 {
3004 retval = ERROR_FAIL;
3005 }
3006 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK))
3007 {
3008 command_print(CMD_CTX, "verified %" PRIu32 " bytes "
3009 "in %fs (%0.3f KiB/s)", image_size,
3010 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3011 }
3012
3013 image_close(&image);
3014
3015 return retval;
3016 }
3017
3018 COMMAND_HANDLER(handle_verify_image_command)
3019 {
3020 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, 1);
3021 }
3022
3023 COMMAND_HANDLER(handle_test_image_command)
3024 {
3025 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, 0);
3026 }
3027
3028 static int handle_bp_command_list(struct command_context *cmd_ctx)
3029 {
3030 struct target *target = get_current_target(cmd_ctx);
3031 struct breakpoint *breakpoint = target->breakpoints;
3032 while (breakpoint)
3033 {
3034 if (breakpoint->type == BKPT_SOFT)
3035 {
3036 char* buf = buf_to_str(breakpoint->orig_instr,
3037 breakpoint->length, 16);
3038 command_print(cmd_ctx, "IVA breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i, 0x%s",
3039 breakpoint->address,
3040 breakpoint->length,
3041 breakpoint->set, buf);
3042 free(buf);
3043 }
3044 else
3045 {
3046 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3047 command_print(cmd_ctx, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3048 breakpoint->asid,
3049 breakpoint->length, breakpoint->set);
3050 else if ((breakpoint->address != 0) && (breakpoint->asid != 0))
3051 {
3052 command_print(cmd_ctx, "Hybrid breakpoint(IVA): 0x%8.8" PRIx32 ", 0x%x, %i",
3053 breakpoint->address,
3054 breakpoint->length, breakpoint->set);
3055 command_print(cmd_ctx, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3056 breakpoint->asid);
3057 }
3058 else
3059 command_print(cmd_ctx, "Breakpoint(IVA): 0x%8.8" PRIx32 ", 0x%x, %i",
3060 breakpoint->address,
3061 breakpoint->length, breakpoint->set);
3062 }
3063
3064 breakpoint = breakpoint->next;
3065 }
3066 return ERROR_OK;
3067 }
3068
3069 static int handle_bp_command_set(struct command_context *cmd_ctx,
3070 uint32_t addr, uint32_t asid, uint32_t length, int hw)
3071 {
3072 struct target *target = get_current_target(cmd_ctx);
3073
3074 if (asid == 0)
3075 {
3076 int retval = breakpoint_add(target, addr, length, hw);
3077 if (ERROR_OK == retval)
3078 command_print(cmd_ctx, "breakpoint set at 0x%8.8" PRIx32 "", addr);
3079 else
3080 {
3081 LOG_ERROR("Failure setting breakpoint, the same address(IVA) is already used");
3082 return retval;
3083 }
3084 }
3085 else if (addr == 0)
3086 {
3087 int retval = context_breakpoint_add(target, asid, length, hw);
3088 if (ERROR_OK == retval)
3089 command_print(cmd_ctx, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
3090 else
3091 {
3092 LOG_ERROR("Failure setting breakpoint, the same address(CONTEXTID) is already used");
3093 return retval;
3094 }
3095 }
3096 else
3097 {
3098 int retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
3099 if(ERROR_OK == retval)
3100 command_print(cmd_ctx, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
3101 else
3102 {
3103 LOG_ERROR("Failure setting breakpoint, the same address is already used");
3104 return retval;
3105 }
3106 }
3107 return ERROR_OK;
3108 }
3109
3110 COMMAND_HANDLER(handle_bp_command)
3111 {
3112 uint32_t addr;
3113 uint32_t asid;
3114 uint32_t length;
3115 int hw = BKPT_SOFT;
3116 switch(CMD_ARGC)
3117 {
3118 case 0:
3119 return handle_bp_command_list(CMD_CTX);
3120
3121 case 2:
3122 asid = 0;
3123 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3124 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3125 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3126
3127 case 3:
3128 if(strcmp(CMD_ARGV[2], "hw") == 0)
3129 {
3130 hw = BKPT_HARD;
3131 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3132
3133 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3134
3135 asid = 0;
3136 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3137 }
3138 else if(strcmp(CMD_ARGV[2], "hw_ctx") == 0)
3139 {
3140 hw = BKPT_HARD;
3141 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
3142 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3143 addr = 0;
3144 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3145 }
3146
3147 case 4:
3148 hw = BKPT_HARD;
3149 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3150 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
3151 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
3152 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3153
3154 default:
3155 return ERROR_COMMAND_SYNTAX_ERROR;
3156 }
3157 }
3158
3159 COMMAND_HANDLER(handle_rbp_command)
3160 {
3161 if (CMD_ARGC != 1)
3162 return ERROR_COMMAND_SYNTAX_ERROR;
3163
3164 uint32_t addr;
3165 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3166
3167 struct target *target = get_current_target(CMD_CTX);
3168 breakpoint_remove(target, addr);
3169
3170 return ERROR_OK;
3171 }
3172
3173 COMMAND_HANDLER(handle_wp_command)
3174 {
3175 struct target *target = get_current_target(CMD_CTX);
3176
3177 if (CMD_ARGC == 0)
3178 {
3179 struct watchpoint *watchpoint = target->watchpoints;
3180
3181 while (watchpoint)
3182 {
3183 command_print(CMD_CTX, "address: 0x%8.8" PRIx32
3184 ", len: 0x%8.8" PRIx32
3185 ", r/w/a: %i, value: 0x%8.8" PRIx32
3186 ", mask: 0x%8.8" PRIx32,
3187 watchpoint->address,
3188 watchpoint->length,
3189 (int)watchpoint->rw,
3190 watchpoint->value,
3191 watchpoint->mask);
3192 watchpoint = watchpoint->next;
3193 }
3194 return ERROR_OK;
3195 }
3196
3197 enum watchpoint_rw type = WPT_ACCESS;
3198 uint32_t addr = 0;
3199 uint32_t length = 0;
3200 uint32_t data_value = 0x0;
3201 uint32_t data_mask = 0xffffffff;
3202
3203 switch (CMD_ARGC)
3204 {
3205 case 5:
3206 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
3207 // fall through
3208 case 4:
3209 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
3210 // fall through
3211 case 3:
3212 switch (CMD_ARGV[2][0])
3213 {
3214 case 'r':
3215 type = WPT_READ;
3216