1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program; if not, write to the *
38 * Free Software Foundation, Inc., *
39 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
40 ***************************************************************************/
46 #include <helper/time_support.h>
47 #include <jtag/jtag.h>
48 #include <flash/nor/core.h>
51 #include "target_type.h"
52 #include "target_request.h"
53 #include "breakpoints.h"
57 #include "rtos/rtos.h"
59 /* default halt wait timeout (ms) */
60 #define DEFAULT_HALT_TIMEOUT 5000
62 static int target_read_buffer_default(struct target
*target
, uint32_t address
,
63 uint32_t size
, uint8_t *buffer
);
64 static int target_write_buffer_default(struct target
*target
, uint32_t address
,
65 uint32_t size
, const uint8_t *buffer
);
66 static int target_array2mem(Jim_Interp
*interp
, struct target
*target
,
67 int argc
, Jim_Obj
* const *argv
);
68 static int target_mem2array(Jim_Interp
*interp
, struct target
*target
,
69 int argc
, Jim_Obj
* const *argv
);
70 static int target_register_user_commands(struct command_context
*cmd_ctx
);
71 static int target_get_gdb_fileio_info_default(struct target
*target
,
72 struct gdb_fileio_info
*fileio_info
);
73 static int target_gdb_fileio_end_default(struct target
*target
, int retcode
,
74 int fileio_errno
, bool ctrl_c
);
77 extern struct target_type arm7tdmi_target
;
78 extern struct target_type arm720t_target
;
79 extern struct target_type arm9tdmi_target
;
80 extern struct target_type arm920t_target
;
81 extern struct target_type arm966e_target
;
82 extern struct target_type arm946e_target
;
83 extern struct target_type arm926ejs_target
;
84 extern struct target_type fa526_target
;
85 extern struct target_type feroceon_target
;
86 extern struct target_type dragonite_target
;
87 extern struct target_type xscale_target
;
88 extern struct target_type cortexm3_target
;
89 extern struct target_type cortexa8_target
;
90 extern struct target_type cortexr4_target
;
91 extern struct target_type arm11_target
;
92 extern struct target_type mips_m4k_target
;
93 extern struct target_type avr_target
;
94 extern struct target_type dsp563xx_target
;
95 extern struct target_type dsp5680xx_target
;
96 extern struct target_type testee_target
;
97 extern struct target_type avr32_ap7k_target
;
98 extern struct target_type hla_target
;
99 extern struct target_type nds32_v2_target
;
100 extern struct target_type nds32_v3_target
;
101 extern struct target_type nds32_v3m_target
;
103 static struct target_type
*target_types
[] = {
132 struct target
*all_targets
;
133 static struct target_event_callback
*target_event_callbacks
;
134 static struct target_timer_callback
*target_timer_callbacks
;
135 static const int polling_interval
= 100;
137 static const Jim_Nvp nvp_assert
[] = {
138 { .name
= "assert", NVP_ASSERT
},
139 { .name
= "deassert", NVP_DEASSERT
},
140 { .name
= "T", NVP_ASSERT
},
141 { .name
= "F", NVP_DEASSERT
},
142 { .name
= "t", NVP_ASSERT
},
143 { .name
= "f", NVP_DEASSERT
},
144 { .name
= NULL
, .value
= -1 }
147 static const Jim_Nvp nvp_error_target
[] = {
148 { .value
= ERROR_TARGET_INVALID
, .name
= "err-invalid" },
149 { .value
= ERROR_TARGET_INIT_FAILED
, .name
= "err-init-failed" },
150 { .value
= ERROR_TARGET_TIMEOUT
, .name
= "err-timeout" },
151 { .value
= ERROR_TARGET_NOT_HALTED
, .name
= "err-not-halted" },
152 { .value
= ERROR_TARGET_FAILURE
, .name
= "err-failure" },
153 { .value
= ERROR_TARGET_UNALIGNED_ACCESS
, .name
= "err-unaligned-access" },
154 { .value
= ERROR_TARGET_DATA_ABORT
, .name
= "err-data-abort" },
155 { .value
= ERROR_TARGET_RESOURCE_NOT_AVAILABLE
, .name
= "err-resource-not-available" },
156 { .value
= ERROR_TARGET_TRANSLATION_FAULT
, .name
= "err-translation-fault" },
157 { .value
= ERROR_TARGET_NOT_RUNNING
, .name
= "err-not-running" },
158 { .value
= ERROR_TARGET_NOT_EXAMINED
, .name
= "err-not-examined" },
159 { .value
= -1, .name
= NULL
}
162 static const char *target_strerror_safe(int err
)
166 n
= Jim_Nvp_value2name_simple(nvp_error_target
, err
);
173 static const Jim_Nvp nvp_target_event
[] = {
175 { .value
= TARGET_EVENT_GDB_HALT
, .name
= "gdb-halt" },
176 { .value
= TARGET_EVENT_HALTED
, .name
= "halted" },
177 { .value
= TARGET_EVENT_RESUMED
, .name
= "resumed" },
178 { .value
= TARGET_EVENT_RESUME_START
, .name
= "resume-start" },
179 { .value
= TARGET_EVENT_RESUME_END
, .name
= "resume-end" },
181 { .name
= "gdb-start", .value
= TARGET_EVENT_GDB_START
},
182 { .name
= "gdb-end", .value
= TARGET_EVENT_GDB_END
},
184 { .value
= TARGET_EVENT_RESET_START
, .name
= "reset-start" },
185 { .value
= TARGET_EVENT_RESET_ASSERT_PRE
, .name
= "reset-assert-pre" },
186 { .value
= TARGET_EVENT_RESET_ASSERT
, .name
= "reset-assert" },
187 { .value
= TARGET_EVENT_RESET_ASSERT_POST
, .name
= "reset-assert-post" },
188 { .value
= TARGET_EVENT_RESET_DEASSERT_PRE
, .name
= "reset-deassert-pre" },
189 { .value
= TARGET_EVENT_RESET_DEASSERT_POST
, .name
= "reset-deassert-post" },
190 { .value
= TARGET_EVENT_RESET_HALT_PRE
, .name
= "reset-halt-pre" },
191 { .value
= TARGET_EVENT_RESET_HALT_POST
, .name
= "reset-halt-post" },
192 { .value
= TARGET_EVENT_RESET_WAIT_PRE
, .name
= "reset-wait-pre" },
193 { .value
= TARGET_EVENT_RESET_WAIT_POST
, .name
= "reset-wait-post" },
194 { .value
= TARGET_EVENT_RESET_INIT
, .name
= "reset-init" },
195 { .value
= TARGET_EVENT_RESET_END
, .name
= "reset-end" },
197 { .value
= TARGET_EVENT_EXAMINE_START
, .name
= "examine-start" },
198 { .value
= TARGET_EVENT_EXAMINE_END
, .name
= "examine-end" },
200 { .value
= TARGET_EVENT_DEBUG_HALTED
, .name
= "debug-halted" },
201 { .value
= TARGET_EVENT_DEBUG_RESUMED
, .name
= "debug-resumed" },
203 { .value
= TARGET_EVENT_GDB_ATTACH
, .name
= "gdb-attach" },
204 { .value
= TARGET_EVENT_GDB_DETACH
, .name
= "gdb-detach" },
206 { .value
= TARGET_EVENT_GDB_FLASH_WRITE_START
, .name
= "gdb-flash-write-start" },
207 { .value
= TARGET_EVENT_GDB_FLASH_WRITE_END
, .name
= "gdb-flash-write-end" },
209 { .value
= TARGET_EVENT_GDB_FLASH_ERASE_START
, .name
= "gdb-flash-erase-start" },
210 { .value
= TARGET_EVENT_GDB_FLASH_ERASE_END
, .name
= "gdb-flash-erase-end" },
212 { .name
= NULL
, .value
= -1 }
215 static const Jim_Nvp nvp_target_state
[] = {
216 { .name
= "unknown", .value
= TARGET_UNKNOWN
},
217 { .name
= "running", .value
= TARGET_RUNNING
},
218 { .name
= "halted", .value
= TARGET_HALTED
},
219 { .name
= "reset", .value
= TARGET_RESET
},
220 { .name
= "debug-running", .value
= TARGET_DEBUG_RUNNING
},
221 { .name
= NULL
, .value
= -1 },
224 static const Jim_Nvp nvp_target_debug_reason
[] = {
225 { .name
= "debug-request" , .value
= DBG_REASON_DBGRQ
},
226 { .name
= "breakpoint" , .value
= DBG_REASON_BREAKPOINT
},
227 { .name
= "watchpoint" , .value
= DBG_REASON_WATCHPOINT
},
228 { .name
= "watchpoint-and-breakpoint", .value
= DBG_REASON_WPTANDBKPT
},
229 { .name
= "single-step" , .value
= DBG_REASON_SINGLESTEP
},
230 { .name
= "target-not-halted" , .value
= DBG_REASON_NOTHALTED
},
231 { .name
= "program-exit" , .value
= DBG_REASON_EXIT
},
232 { .name
= "undefined" , .value
= DBG_REASON_UNDEFINED
},
233 { .name
= NULL
, .value
= -1 },
236 static const Jim_Nvp nvp_target_endian
[] = {
237 { .name
= "big", .value
= TARGET_BIG_ENDIAN
},
238 { .name
= "little", .value
= TARGET_LITTLE_ENDIAN
},
239 { .name
= "be", .value
= TARGET_BIG_ENDIAN
},
240 { .name
= "le", .value
= TARGET_LITTLE_ENDIAN
},
241 { .name
= NULL
, .value
= -1 },
244 static const Jim_Nvp nvp_reset_modes
[] = {
245 { .name
= "unknown", .value
= RESET_UNKNOWN
},
246 { .name
= "run" , .value
= RESET_RUN
},
247 { .name
= "halt" , .value
= RESET_HALT
},
248 { .name
= "init" , .value
= RESET_INIT
},
249 { .name
= NULL
, .value
= -1 },
252 const char *debug_reason_name(struct target
*t
)
256 cp
= Jim_Nvp_value2name_simple(nvp_target_debug_reason
,
257 t
->debug_reason
)->name
;
259 LOG_ERROR("Invalid debug reason: %d", (int)(t
->debug_reason
));
260 cp
= "(*BUG*unknown*BUG*)";
265 const char *target_state_name(struct target
*t
)
268 cp
= Jim_Nvp_value2name_simple(nvp_target_state
, t
->state
)->name
;
270 LOG_ERROR("Invalid target state: %d", (int)(t
->state
));
271 cp
= "(*BUG*unknown*BUG*)";
276 /* determine the number of the new target */
277 static int new_target_number(void)
282 /* number is 0 based */
286 if (x
< t
->target_number
)
287 x
= t
->target_number
;
293 /* read a uint32_t from a buffer in target memory endianness */
294 uint32_t target_buffer_get_u32(struct target
*target
, const uint8_t *buffer
)
296 if (target
->endianness
== TARGET_LITTLE_ENDIAN
)
297 return le_to_h_u32(buffer
);
299 return be_to_h_u32(buffer
);
302 /* read a uint24_t from a buffer in target memory endianness */
303 uint32_t target_buffer_get_u24(struct target
*target
, const uint8_t *buffer
)
305 if (target
->endianness
== TARGET_LITTLE_ENDIAN
)
306 return le_to_h_u24(buffer
);
308 return be_to_h_u24(buffer
);
311 /* read a uint16_t from a buffer in target memory endianness */
312 uint16_t target_buffer_get_u16(struct target
*target
, const uint8_t *buffer
)
314 if (target
->endianness
== TARGET_LITTLE_ENDIAN
)
315 return le_to_h_u16(buffer
);
317 return be_to_h_u16(buffer
);
320 /* read a uint8_t from a buffer in target memory endianness */
321 static uint8_t target_buffer_get_u8(struct target
*target
, const uint8_t *buffer
)
323 return *buffer
& 0x0ff;
326 /* write a uint32_t to a buffer in target memory endianness */
327 void target_buffer_set_u32(struct target
*target
, uint8_t *buffer
, uint32_t value
)
329 if (target
->endianness
== TARGET_LITTLE_ENDIAN
)
330 h_u32_to_le(buffer
, value
);
332 h_u32_to_be(buffer
, value
);
335 /* write a uint24_t to a buffer in target memory endianness */
336 void target_buffer_set_u24(struct target
*target
, uint8_t *buffer
, uint32_t value
)
338 if (target
->endianness
== TARGET_LITTLE_ENDIAN
)
339 h_u24_to_le(buffer
, value
);
341 h_u24_to_be(buffer
, value
);
344 /* write a uint16_t to a buffer in target memory endianness */
345 void target_buffer_set_u16(struct target
*target
, uint8_t *buffer
, uint16_t value
)
347 if (target
->endianness
== TARGET_LITTLE_ENDIAN
)
348 h_u16_to_le(buffer
, value
);
350 h_u16_to_be(buffer
, value
);
353 /* write a uint8_t to a buffer in target memory endianness */
354 static void target_buffer_set_u8(struct target
*target
, uint8_t *buffer
, uint8_t value
)
359 /* write a uint32_t array to a buffer in target memory endianness */
360 void target_buffer_get_u32_array(struct target
*target
, const uint8_t *buffer
, uint32_t count
, uint32_t *dstbuf
)
363 for (i
= 0; i
< count
; i
++)
364 dstbuf
[i
] = target_buffer_get_u32(target
, &buffer
[i
* 4]);
367 /* write a uint16_t array to a buffer in target memory endianness */
368 void target_buffer_get_u16_array(struct target
*target
, const uint8_t *buffer
, uint32_t count
, uint16_t *dstbuf
)
371 for (i
= 0; i
< count
; i
++)
372 dstbuf
[i
] = target_buffer_get_u16(target
, &buffer
[i
* 2]);
375 /* write a uint32_t array to a buffer in target memory endianness */
376 void target_buffer_set_u32_array(struct target
*target
, uint8_t *buffer
, uint32_t count
, uint32_t *srcbuf
)
379 for (i
= 0; i
< count
; i
++)
380 target_buffer_set_u32(target
, &buffer
[i
* 4], srcbuf
[i
]);
383 /* write a uint16_t array to a buffer in target memory endianness */
384 void target_buffer_set_u16_array(struct target
*target
, uint8_t *buffer
, uint32_t count
, uint16_t *srcbuf
)
387 for (i
= 0; i
< count
; i
++)
388 target_buffer_set_u16(target
, &buffer
[i
* 2], srcbuf
[i
]);
391 /* return a pointer to a configured target; id is name or number */
392 struct target
*get_target(const char *id
)
394 struct target
*target
;
396 /* try as tcltarget name */
397 for (target
= all_targets
; target
; target
= target
->next
) {
398 if (target_name(target
) == NULL
)
400 if (strcmp(id
, target_name(target
)) == 0)
404 /* It's OK to remove this fallback sometime after August 2010 or so */
406 /* no match, try as number */
408 if (parse_uint(id
, &num
) != ERROR_OK
)
411 for (target
= all_targets
; target
; target
= target
->next
) {
412 if (target
->target_number
== (int)num
) {
413 LOG_WARNING("use '%s' as target identifier, not '%u'",
414 target_name(target
), num
);
422 /* returns a pointer to the n-th configured target */
423 static struct target
*get_target_by_num(int num
)
425 struct target
*target
= all_targets
;
428 if (target
->target_number
== num
)
430 target
= target
->next
;
436 struct target
*get_current_target(struct command_context
*cmd_ctx
)
438 struct target
*target
= get_target_by_num(cmd_ctx
->current_target
);
440 if (target
== NULL
) {
441 LOG_ERROR("BUG: current_target out of bounds");
448 int target_poll(struct target
*target
)
452 /* We can't poll until after examine */
453 if (!target_was_examined(target
)) {
454 /* Fail silently lest we pollute the log */
458 retval
= target
->type
->poll(target
);
459 if (retval
!= ERROR_OK
)
462 if (target
->halt_issued
) {
463 if (target
->state
== TARGET_HALTED
)
464 target
->halt_issued
= false;
466 long long t
= timeval_ms() - target
->halt_issued_time
;
467 if (t
> DEFAULT_HALT_TIMEOUT
) {
468 target
->halt_issued
= false;
469 LOG_INFO("Halt timed out, wake up GDB.");
470 target_call_event_callbacks(target
, TARGET_EVENT_GDB_HALT
);
478 int target_halt(struct target
*target
)
481 /* We can't poll until after examine */
482 if (!target_was_examined(target
)) {
483 LOG_ERROR("Target not examined yet");
487 retval
= target
->type
->halt(target
);
488 if (retval
!= ERROR_OK
)
491 target
->halt_issued
= true;
492 target
->halt_issued_time
= timeval_ms();
498 * Make the target (re)start executing using its saved execution
499 * context (possibly with some modifications).
501 * @param target Which target should start executing.
502 * @param current True to use the target's saved program counter instead
503 * of the address parameter
504 * @param address Optionally used as the program counter.
505 * @param handle_breakpoints True iff breakpoints at the resumption PC
506 * should be skipped. (For example, maybe execution was stopped by
507 * such a breakpoint, in which case it would be counterprodutive to
509 * @param debug_execution False if all working areas allocated by OpenOCD
510 * should be released and/or restored to their original contents.
511 * (This would for example be true to run some downloaded "helper"
512 * algorithm code, which resides in one such working buffer and uses
513 * another for data storage.)
515 * @todo Resolve the ambiguity about what the "debug_execution" flag
516 * signifies. For example, Target implementations don't agree on how
517 * it relates to invalidation of the register cache, or to whether
518 * breakpoints and watchpoints should be enabled. (It would seem wrong
519 * to enable breakpoints when running downloaded "helper" algorithms
520 * (debug_execution true), since the breakpoints would be set to match
521 * target firmware being debugged, not the helper algorithm.... and
522 * enabling them could cause such helpers to malfunction (for example,
523 * by overwriting data with a breakpoint instruction. On the other
524 * hand the infrastructure for running such helpers might use this
525 * procedure but rely on hardware breakpoint to detect termination.)
527 int target_resume(struct target
*target
, int current
, uint32_t address
, int handle_breakpoints
, int debug_execution
)
531 /* We can't poll until after examine */
532 if (!target_was_examined(target
)) {
533 LOG_ERROR("Target not examined yet");
537 target_call_event_callbacks(target
, TARGET_EVENT_RESUME_START
);
539 /* note that resume *must* be asynchronous. The CPU can halt before
540 * we poll. The CPU can even halt at the current PC as a result of
541 * a software breakpoint being inserted by (a bug?) the application.
543 retval
= target
->type
->resume(target
, current
, address
, handle_breakpoints
, debug_execution
);
544 if (retval
!= ERROR_OK
)
547 target_call_event_callbacks(target
, TARGET_EVENT_RESUME_END
);
552 static int target_process_reset(struct command_context
*cmd_ctx
, enum target_reset_mode reset_mode
)
557 n
= Jim_Nvp_value2name_simple(nvp_reset_modes
, reset_mode
);
558 if (n
->name
== NULL
) {
559 LOG_ERROR("invalid reset mode");
563 /* disable polling during reset to make reset event scripts
564 * more predictable, i.e. dr/irscan & pathmove in events will
565 * not have JTAG operations injected into the middle of a sequence.
567 bool save_poll
= jtag_poll_get_enabled();
569 jtag_poll_set_enabled(false);
571 sprintf(buf
, "ocd_process_reset %s", n
->name
);
572 retval
= Jim_Eval(cmd_ctx
->interp
, buf
);
574 jtag_poll_set_enabled(save_poll
);
576 if (retval
!= JIM_OK
) {
577 Jim_MakeErrorMessage(cmd_ctx
->interp
);
578 command_print(NULL
, "%s\n", Jim_GetString(Jim_GetResult(cmd_ctx
->interp
), NULL
));
582 /* We want any events to be processed before the prompt */
583 retval
= target_call_timer_callbacks_now();
585 struct target
*target
;
586 for (target
= all_targets
; target
; target
= target
->next
) {
587 target
->type
->check_reset(target
);
588 target
->running_alg
= false;
594 static int identity_virt2phys(struct target
*target
,
595 uint32_t virtual, uint32_t *physical
)
601 static int no_mmu(struct target
*target
, int *enabled
)
607 static int default_examine(struct target
*target
)
609 target_set_examined(target
);
613 /* no check by default */
614 static int default_check_reset(struct target
*target
)
619 int target_examine_one(struct target
*target
)
621 return target
->type
->examine(target
);
624 static int jtag_enable_callback(enum jtag_event event
, void *priv
)
626 struct target
*target
= priv
;
628 if (event
!= JTAG_TAP_EVENT_ENABLE
|| !target
->tap
->enabled
)
631 jtag_unregister_event_callback(jtag_enable_callback
, target
);
633 target_call_event_callbacks(target
, TARGET_EVENT_EXAMINE_START
);
635 int retval
= target_examine_one(target
);
636 if (retval
!= ERROR_OK
)
639 target_call_event_callbacks(target
, TARGET_EVENT_EXAMINE_END
);
644 /* Targets that correctly implement init + examine, i.e.
645 * no communication with target during init:
649 int target_examine(void)
651 int retval
= ERROR_OK
;
652 struct target
*target
;
654 for (target
= all_targets
; target
; target
= target
->next
) {
655 /* defer examination, but don't skip it */
656 if (!target
->tap
->enabled
) {
657 jtag_register_event_callback(jtag_enable_callback
,
662 target_call_event_callbacks(target
, TARGET_EVENT_EXAMINE_START
);
664 retval
= target_examine_one(target
);
665 if (retval
!= ERROR_OK
)
668 target_call_event_callbacks(target
, TARGET_EVENT_EXAMINE_END
);
673 const char *target_type_name(struct target
*target
)
675 return target
->type
->name
;
678 static int target_soft_reset_halt(struct target
*target
)
680 if (!target_was_examined(target
)) {
681 LOG_ERROR("Target not examined yet");
684 if (!target
->type
->soft_reset_halt
) {
685 LOG_ERROR("Target %s does not support soft_reset_halt",
686 target_name(target
));
689 return target
->type
->soft_reset_halt(target
);
693 * Downloads a target-specific native code algorithm to the target,
694 * and executes it. * Note that some targets may need to set up, enable,
695 * and tear down a breakpoint (hard or * soft) to detect algorithm
696 * termination, while others may support lower overhead schemes where
697 * soft breakpoints embedded in the algorithm automatically terminate the
700 * @param target used to run the algorithm
701 * @param arch_info target-specific description of the algorithm.
703 int target_run_algorithm(struct target
*target
,
704 int num_mem_params
, struct mem_param
*mem_params
,
705 int num_reg_params
, struct reg_param
*reg_param
,
706 uint32_t entry_point
, uint32_t exit_point
,
707 int timeout_ms
, void *arch_info
)
709 int retval
= ERROR_FAIL
;
711 if (!target_was_examined(target
)) {
712 LOG_ERROR("Target not examined yet");
715 if (!target
->type
->run_algorithm
) {
716 LOG_ERROR("Target type '%s' does not support %s",
717 target_type_name(target
), __func__
);
721 target
->running_alg
= true;
722 retval
= target
->type
->run_algorithm(target
,
723 num_mem_params
, mem_params
,
724 num_reg_params
, reg_param
,
725 entry_point
, exit_point
, timeout_ms
, arch_info
);
726 target
->running_alg
= false;
733 * Downloads a target-specific native code algorithm to the target,
734 * executes and leaves it running.
736 * @param target used to run the algorithm
737 * @param arch_info target-specific description of the algorithm.
739 int target_start_algorithm(struct target
*target
,
740 int num_mem_params
, struct mem_param
*mem_params
,
741 int num_reg_params
, struct reg_param
*reg_params
,
742 uint32_t entry_point
, uint32_t exit_point
,
745 int retval
= ERROR_FAIL
;
747 if (!target_was_examined(target
)) {
748 LOG_ERROR("Target not examined yet");
751 if (!target
->type
->start_algorithm
) {
752 LOG_ERROR("Target type '%s' does not support %s",
753 target_type_name(target
), __func__
);
756 if (target
->running_alg
) {
757 LOG_ERROR("Target is already running an algorithm");
761 target
->running_alg
= true;
762 retval
= target
->type
->start_algorithm(target
,
763 num_mem_params
, mem_params
,
764 num_reg_params
, reg_params
,
765 entry_point
, exit_point
, arch_info
);
772 * Waits for an algorithm started with target_start_algorithm() to complete.
774 * @param target used to run the algorithm
775 * @param arch_info target-specific description of the algorithm.
777 int target_wait_algorithm(struct target
*target
,
778 int num_mem_params
, struct mem_param
*mem_params
,
779 int num_reg_params
, struct reg_param
*reg_params
,
780 uint32_t exit_point
, int timeout_ms
,
783 int retval
= ERROR_FAIL
;
785 if (!target
->type
->wait_algorithm
) {
786 LOG_ERROR("Target type '%s' does not support %s",
787 target_type_name(target
), __func__
);
790 if (!target
->running_alg
) {
791 LOG_ERROR("Target is not running an algorithm");
795 retval
= target
->type
->wait_algorithm(target
,
796 num_mem_params
, mem_params
,
797 num_reg_params
, reg_params
,
798 exit_point
, timeout_ms
, arch_info
);
799 if (retval
!= ERROR_TARGET_TIMEOUT
)
800 target
->running_alg
= false;
807 * Executes a target-specific native code algorithm in the target.
808 * It differs from target_run_algorithm in that the algorithm is asynchronous.
809 * Because of this it requires an compliant algorithm:
810 * see contrib/loaders/flash/stm32f1x.S for example.
812 * @param target used to run the algorithm
815 int target_run_flash_async_algorithm(struct target
*target
,
816 uint8_t *buffer
, uint32_t count
, int block_size
,
817 int num_mem_params
, struct mem_param
*mem_params
,
818 int num_reg_params
, struct reg_param
*reg_params
,
819 uint32_t buffer_start
, uint32_t buffer_size
,
820 uint32_t entry_point
, uint32_t exit_point
, void *arch_info
)
825 /* Set up working area. First word is write pointer, second word is read pointer,
826 * rest is fifo data area. */
827 uint32_t wp_addr
= buffer_start
;
828 uint32_t rp_addr
= buffer_start
+ 4;
829 uint32_t fifo_start_addr
= buffer_start
+ 8;
830 uint32_t fifo_end_addr
= buffer_start
+ buffer_size
;
832 uint32_t wp
= fifo_start_addr
;
833 uint32_t rp
= fifo_start_addr
;
835 /* validate block_size is 2^n */
836 assert(!block_size
|| !(block_size
& (block_size
- 1)));
838 retval
= target_write_u32(target
, wp_addr
, wp
);
839 if (retval
!= ERROR_OK
)
841 retval
= target_write_u32(target
, rp_addr
, rp
);
842 if (retval
!= ERROR_OK
)
845 /* Start up algorithm on target and let it idle while writing the first chunk */
846 retval
= target_start_algorithm(target
, num_mem_params
, mem_params
,
847 num_reg_params
, reg_params
,
852 if (retval
!= ERROR_OK
) {
853 LOG_ERROR("error starting target flash write algorithm");
859 retval
= target_read_u32(target
, rp_addr
, &rp
);
860 if (retval
!= ERROR_OK
) {
861 LOG_ERROR("failed to get read pointer");
865 LOG_DEBUG("count 0x%" PRIx32
" wp 0x%" PRIx32
" rp 0x%" PRIx32
, count
, wp
, rp
);
868 LOG_ERROR("flash write algorithm aborted by target");
869 retval
= ERROR_FLASH_OPERATION_FAILED
;
873 if ((rp
& (block_size
- 1)) || rp
< fifo_start_addr
|| rp
>= fifo_end_addr
) {
874 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32
, rp
);
878 /* Count the number of bytes available in the fifo without
879 * crossing the wrap around. Make sure to not fill it completely,
880 * because that would make wp == rp and that's the empty condition. */
881 uint32_t thisrun_bytes
;
883 thisrun_bytes
= rp
- wp
- block_size
;
884 else if (rp
> fifo_start_addr
)
885 thisrun_bytes
= fifo_end_addr
- wp
;
887 thisrun_bytes
= fifo_end_addr
- wp
- block_size
;
889 if (thisrun_bytes
== 0) {
890 /* Throttle polling a bit if transfer is (much) faster than flash
891 * programming. The exact delay shouldn't matter as long as it's
892 * less than buffer size / flash speed. This is very unlikely to
893 * run when using high latency connections such as USB. */
896 /* to stop an infinite loop on some targets check and increment a timeout
897 * this issue was observed on a stellaris using the new ICDI interface */
898 if (timeout
++ >= 500) {
899 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
900 return ERROR_FLASH_OPERATION_FAILED
;
905 /* reset our timeout */
908 /* Limit to the amount of data we actually want to write */
909 if (thisrun_bytes
> count
* block_size
)
910 thisrun_bytes
= count
* block_size
;
912 /* Write data to fifo */
913 retval
= target_write_buffer(target
, wp
, thisrun_bytes
, buffer
);
914 if (retval
!= ERROR_OK
)
917 /* Update counters and wrap write pointer */
918 buffer
+= thisrun_bytes
;
919 count
-= thisrun_bytes
/ block_size
;
921 if (wp
>= fifo_end_addr
)
922 wp
= fifo_start_addr
;
924 /* Store updated write pointer to target */
925 retval
= target_write_u32(target
, wp_addr
, wp
);
926 if (retval
!= ERROR_OK
)
930 if (retval
!= ERROR_OK
) {
931 /* abort flash write algorithm on target */
932 target_write_u32(target
, wp_addr
, 0);
935 int retval2
= target_wait_algorithm(target
, num_mem_params
, mem_params
,
936 num_reg_params
, reg_params
,
941 if (retval2
!= ERROR_OK
) {
942 LOG_ERROR("error waiting for target flash write algorithm");
949 int target_read_memory(struct target
*target
,
950 uint32_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
)
952 if (!target_was_examined(target
)) {
953 LOG_ERROR("Target not examined yet");
956 return target
->type
->read_memory(target
, address
, size
, count
, buffer
);
959 int target_read_phys_memory(struct target
*target
,
960 uint32_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
)
962 if (!target_was_examined(target
)) {
963 LOG_ERROR("Target not examined yet");
966 return target
->type
->read_phys_memory(target
, address
, size
, count
, buffer
);
969 int target_write_memory(struct target
*target
,
970 uint32_t address
, uint32_t size
, uint32_t count
, const uint8_t *buffer
)
972 if (!target_was_examined(target
)) {
973 LOG_ERROR("Target not examined yet");
976 return target
->type
->write_memory(target
, address
, size
, count
, buffer
);
979 int target_write_phys_memory(struct target
*target
,
980 uint32_t address
, uint32_t size
, uint32_t count
, const uint8_t *buffer
)
982 if (!target_was_examined(target
)) {
983 LOG_ERROR("Target not examined yet");
986 return target
->type
->write_phys_memory(target
, address
, size
, count
, buffer
);
989 static int target_bulk_write_memory_default(struct target
*target
,
990 uint32_t address
, uint32_t count
, const uint8_t *buffer
)
992 return target_write_memory(target
, address
, 4, count
, buffer
);
995 int target_add_breakpoint(struct target
*target
,
996 struct breakpoint
*breakpoint
)
998 if ((target
->state
!= TARGET_HALTED
) && (breakpoint
->type
!= BKPT_HARD
)) {
999 LOG_WARNING("target %s is not halted", target_name(target
));
1000 return ERROR_TARGET_NOT_HALTED
;
1002 return target
->type
->add_breakpoint(target
, breakpoint
);
1005 int target_add_context_breakpoint(struct target
*target
,
1006 struct breakpoint
*breakpoint
)
1008 if (target
->state
!= TARGET_HALTED
) {
1009 LOG_WARNING("target %s is not halted", target_name(target
));
1010 return ERROR_TARGET_NOT_HALTED
;
1012 return target
->type
->add_context_breakpoint(target
, breakpoint
);
1015 int target_add_hybrid_breakpoint(struct target
*target
,
1016 struct breakpoint
*breakpoint
)
1018 if (target
->state
!= TARGET_HALTED
) {
1019 LOG_WARNING("target %s is not halted", target_name(target
));
1020 return ERROR_TARGET_NOT_HALTED
;
1022 return target
->type
->add_hybrid_breakpoint(target
, breakpoint
);
1025 int target_remove_breakpoint(struct target
*target
,
1026 struct breakpoint
*breakpoint
)
1028 return target
->type
->remove_breakpoint(target
, breakpoint
);
1031 int target_add_watchpoint(struct target
*target
,
1032 struct watchpoint
*watchpoint
)
1034 if (target
->state
!= TARGET_HALTED
) {
1035 LOG_WARNING("target %s is not halted", target_name(target
));
1036 return ERROR_TARGET_NOT_HALTED
;
1038 return target
->type
->add_watchpoint(target
, watchpoint
);
1040 int target_remove_watchpoint(struct target
*target
,
1041 struct watchpoint
*watchpoint
)
1043 return target
->type
->remove_watchpoint(target
, watchpoint
);
1045 int target_hit_watchpoint(struct target
*target
,
1046 struct watchpoint
**hit_watchpoint
)
1048 if (target
->state
!= TARGET_HALTED
) {
1049 LOG_WARNING("target %s is not halted", target
->cmd_name
);
1050 return ERROR_TARGET_NOT_HALTED
;
1053 if (target
->type
->hit_watchpoint
== NULL
) {
1054 /* For backward compatible, if hit_watchpoint is not implemented,
1055 * return ERROR_FAIL such that gdb_server will not take the nonsense
1060 return target
->type
->hit_watchpoint(target
, hit_watchpoint
);
1063 int target_get_gdb_reg_list(struct target
*target
,
1064 struct reg
**reg_list
[], int *reg_list_size
,
1065 enum target_register_class reg_class
)
1067 return target
->type
->get_gdb_reg_list(target
, reg_list
, reg_list_size
, reg_class
);
1069 int target_step(struct target
*target
,
1070 int current
, uint32_t address
, int handle_breakpoints
)
1072 return target
->type
->step(target
, current
, address
, handle_breakpoints
);
1075 int target_get_gdb_fileio_info(struct target
*target
, struct gdb_fileio_info
*fileio_info
)
1077 if (target
->state
!= TARGET_HALTED
) {
1078 LOG_WARNING("target %s is not halted", target
->cmd_name
);
1079 return ERROR_TARGET_NOT_HALTED
;
1081 return target
->type
->get_gdb_fileio_info(target
, fileio_info
);
1084 int target_gdb_fileio_end(struct target
*target
, int retcode
, int fileio_errno
, bool ctrl_c
)
1086 if (target
->state
!= TARGET_HALTED
) {
1087 LOG_WARNING("target %s is not halted", target
->cmd_name
);
1088 return ERROR_TARGET_NOT_HALTED
;
1090 return target
->type
->gdb_fileio_end(target
, retcode
, fileio_errno
, ctrl_c
);
1094 * Reset the @c examined flag for the given target.
1095 * Pure paranoia -- targets are zeroed on allocation.
1097 static void target_reset_examined(struct target
*target
)
1099 target
->examined
= false;
1102 static int err_read_phys_memory(struct target
*target
, uint32_t address
,
1103 uint32_t size
, uint32_t count
, uint8_t *buffer
)
1105 LOG_ERROR("Not implemented: %s", __func__
);
1109 static int err_write_phys_memory(struct target
*target
, uint32_t address
,
1110 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
1112 LOG_ERROR("Not implemented: %s", __func__
);
1116 static int handle_target(void *priv
);
1118 static int target_init_one(struct command_context
*cmd_ctx
,
1119 struct target
*target
)
1121 target_reset_examined(target
);
1123 struct target_type
*type
= target
->type
;
1124 if (type
->examine
== NULL
)
1125 type
->examine
= default_examine
;
1127 if (type
->check_reset
== NULL
)
1128 type
->check_reset
= default_check_reset
;
1130 assert(type
->init_target
!= NULL
);
1132 int retval
= type
->init_target(cmd_ctx
, target
);
1133 if (ERROR_OK
!= retval
) {
1134 LOG_ERROR("target '%s' init failed", target_name(target
));
1138 /* Sanity-check MMU support ... stub in what we must, to help
1139 * implement it in stages, but warn if we need to do so.
1142 if (type
->write_phys_memory
== NULL
) {
1143 LOG_ERROR("type '%s' is missing write_phys_memory",
1145 type
->write_phys_memory
= err_write_phys_memory
;
1147 if (type
->read_phys_memory
== NULL
) {
1148 LOG_ERROR("type '%s' is missing read_phys_memory",
1150 type
->read_phys_memory
= err_read_phys_memory
;
1152 if (type
->virt2phys
== NULL
) {
1153 LOG_ERROR("type '%s' is missing virt2phys", type
->name
);
1154 type
->virt2phys
= identity_virt2phys
;
1157 /* Make sure no-MMU targets all behave the same: make no
1158 * distinction between physical and virtual addresses, and
1159 * ensure that virt2phys() is always an identity mapping.
1161 if (type
->write_phys_memory
|| type
->read_phys_memory
|| type
->virt2phys
)
1162 LOG_WARNING("type '%s' has bad MMU hooks", type
->name
);
1165 type
->write_phys_memory
= type
->write_memory
;
1166 type
->read_phys_memory
= type
->read_memory
;
1167 type
->virt2phys
= identity_virt2phys
;
1170 if (target
->type
->read_buffer
== NULL
)
1171 target
->type
->read_buffer
= target_read_buffer_default
;
1173 if (target
->type
->write_buffer
== NULL
)
1174 target
->type
->write_buffer
= target_write_buffer_default
;
1176 if (target
->type
->bulk_write_memory
== NULL
)
1177 target
->type
->bulk_write_memory
= target_bulk_write_memory_default
;
1179 if (target
->type
->get_gdb_fileio_info
== NULL
)
1180 target
->type
->get_gdb_fileio_info
= target_get_gdb_fileio_info_default
;
1182 if (target
->type
->gdb_fileio_end
== NULL
)
1183 target
->type
->gdb_fileio_end
= target_gdb_fileio_end_default
;
1188 static int target_init(struct command_context
*cmd_ctx
)
1190 struct target
*target
;
1193 for (target
= all_targets
; target
; target
= target
->next
) {
1194 retval
= target_init_one(cmd_ctx
, target
);
1195 if (ERROR_OK
!= retval
)
1202 retval
= target_register_user_commands(cmd_ctx
);
1203 if (ERROR_OK
!= retval
)
1206 retval
= target_register_timer_callback(&handle_target
,
1207 polling_interval
, 1, cmd_ctx
->interp
);
1208 if (ERROR_OK
!= retval
)
1214 COMMAND_HANDLER(handle_target_init_command
)
1219 return ERROR_COMMAND_SYNTAX_ERROR
;
1221 static bool target_initialized
;
1222 if (target_initialized
) {
1223 LOG_INFO("'target init' has already been called");
1226 target_initialized
= true;
1228 retval
= command_run_line(CMD_CTX
, "init_targets");
1229 if (ERROR_OK
!= retval
)
1232 retval
= command_run_line(CMD_CTX
, "init_board");
1233 if (ERROR_OK
!= retval
)
1236 LOG_DEBUG("Initializing targets...");
1237 return target_init(CMD_CTX
);
1240 int target_register_event_callback(int (*callback
)(struct target
*target
,
1241 enum target_event event
, void *priv
), void *priv
)
1243 struct target_event_callback
**callbacks_p
= &target_event_callbacks
;
1245 if (callback
== NULL
)
1246 return ERROR_COMMAND_SYNTAX_ERROR
;
1249 while ((*callbacks_p
)->next
)
1250 callbacks_p
= &((*callbacks_p
)->next
);
1251 callbacks_p
= &((*callbacks_p
)->next
);
1254 (*callbacks_p
) = malloc(sizeof(struct target_event_callback
));
1255 (*callbacks_p
)->callback
= callback
;
1256 (*callbacks_p
)->priv
= priv
;
1257 (*callbacks_p
)->next
= NULL
;
1262 int target_register_timer_callback(int (*callback
)(void *priv
), int time_ms
, int periodic
, void *priv
)
1264 struct target_timer_callback
**callbacks_p
= &target_timer_callbacks
;
1267 if (callback
== NULL
)
1268 return ERROR_COMMAND_SYNTAX_ERROR
;
1271 while ((*callbacks_p
)->next
)
1272 callbacks_p
= &((*callbacks_p
)->next
);
1273 callbacks_p
= &((*callbacks_p
)->next
);
1276 (*callbacks_p
) = malloc(sizeof(struct target_timer_callback
));
1277 (*callbacks_p
)->callback
= callback
;
1278 (*callbacks_p
)->periodic
= periodic
;
1279 (*callbacks_p
)->time_ms
= time_ms
;
1281 gettimeofday(&now
, NULL
);
1282 (*callbacks_p
)->when
.tv_usec
= now
.tv_usec
+ (time_ms
% 1000) * 1000;
1283 time_ms
-= (time_ms
% 1000);
1284 (*callbacks_p
)->when
.tv_sec
= now
.tv_sec
+ (time_ms
/ 1000);
1285 if ((*callbacks_p
)->when
.tv_usec
> 1000000) {
1286 (*callbacks_p
)->when
.tv_usec
= (*callbacks_p
)->when
.tv_usec
- 1000000;
1287 (*callbacks_p
)->when
.tv_sec
+= 1;
1290 (*callbacks_p
)->priv
= priv
;
1291 (*callbacks_p
)->next
= NULL
;
1296 int target_unregister_event_callback(int (*callback
)(struct target
*target
,
1297 enum target_event event
, void *priv
), void *priv
)
1299 struct target_event_callback
**p
= &target_event_callbacks
;
1300 struct target_event_callback
*c
= target_event_callbacks
;
1302 if (callback
== NULL
)
1303 return ERROR_COMMAND_SYNTAX_ERROR
;
1306 struct target_event_callback
*next
= c
->next
;
1307 if ((c
->callback
== callback
) && (c
->priv
== priv
)) {
1319 static int target_unregister_timer_callback(int (*callback
)(void *priv
), void *priv
)
1321 struct target_timer_callback
**p
= &target_timer_callbacks
;
1322 struct target_timer_callback
*c
= target_timer_callbacks
;
1324 if (callback
== NULL
)
1325 return ERROR_COMMAND_SYNTAX_ERROR
;
1328 struct target_timer_callback
*next
= c
->next
;
1329 if ((c
->callback
== callback
) && (c
->priv
== priv
)) {
1341 int target_call_event_callbacks(struct target
*target
, enum target_event event
)
1343 struct target_event_callback
*callback
= target_event_callbacks
;
1344 struct target_event_callback
*next_callback
;
1346 if (event
== TARGET_EVENT_HALTED
) {
1347 /* execute early halted first */
1348 target_call_event_callbacks(target
, TARGET_EVENT_GDB_HALT
);
1351 LOG_DEBUG("target event %i (%s)", event
,
1352 Jim_Nvp_value2name_simple(nvp_target_event
, event
)->name
);
1354 target_handle_event(target
, event
);
1357 next_callback
= callback
->next
;
1358 callback
->callback(target
, event
, callback
->priv
);
1359 callback
= next_callback
;
1365 static int target_timer_callback_periodic_restart(
1366 struct target_timer_callback
*cb
, struct timeval
*now
)
1368 int time_ms
= cb
->time_ms
;
1369 cb
->when
.tv_usec
= now
->tv_usec
+ (time_ms
% 1000) * 1000;
1370 time_ms
-= (time_ms
% 1000);
1371 cb
->when
.tv_sec
= now
->tv_sec
+ time_ms
/ 1000;
1372 if (cb
->when
.tv_usec
> 1000000) {
1373 cb
->when
.tv_usec
= cb
->when
.tv_usec
- 1000000;
1374 cb
->when
.tv_sec
+= 1;
1379 static int target_call_timer_callback(struct target_timer_callback
*cb
,
1380 struct timeval
*now
)
1382 cb
->callback(cb
->priv
);
1385 return target_timer_callback_periodic_restart(cb
, now
);
1387 return target_unregister_timer_callback(cb
->callback
, cb
->priv
);
1390 static int target_call_timer_callbacks_check_time(int checktime
)
1395 gettimeofday(&now
, NULL
);
1397 struct target_timer_callback
*callback
= target_timer_callbacks
;
1399 /* cleaning up may unregister and free this callback */
1400 struct target_timer_callback
*next_callback
= callback
->next
;
1402 bool call_it
= callback
->callback
&&
1403 ((!checktime
&& callback
->periodic
) ||
1404 now
.tv_sec
> callback
->when
.tv_sec
||
1405 (now
.tv_sec
== callback
->when
.tv_sec
&&
1406 now
.tv_usec
>= callback
->when
.tv_usec
));
1409 int retval
= target_call_timer_callback(callback
, &now
);
1410 if (retval
!= ERROR_OK
)
1414 callback
= next_callback
;
1420 int target_call_timer_callbacks(void)
1422 return target_call_timer_callbacks_check_time(1);
1425 /* invoke periodic callbacks immediately */
1426 int target_call_timer_callbacks_now(void)
1428 return target_call_timer_callbacks_check_time(0);
1431 /* Prints the working area layout for debug purposes */
1432 static void print_wa_layout(struct target
*target
)
1434 struct working_area
*c
= target
->working_areas
;
1437 LOG_DEBUG("%c%c 0x%08"PRIx32
"-0x%08"PRIx32
" (%"PRIu32
" bytes)",
1438 c
->backup ?
'b' : ' ', c
->free ?
' ' : '*',
1439 c
->address
, c
->address
+ c
->size
- 1, c
->size
);
1444 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1445 static void target_split_working_area(struct working_area
*area
, uint32_t size
)
1447 assert(area
->free
); /* Shouldn't split an allocated area */
1448 assert(size
<= area
->size
); /* Caller should guarantee this */
1450 /* Split only if not already the right size */
1451 if (size
< area
->size
) {
1452 struct working_area
*new_wa
= malloc(sizeof(*new_wa
));
1457 new_wa
->next
= area
->next
;
1458 new_wa
->size
= area
->size
- size
;
1459 new_wa
->address
= area
->address
+ size
;
1460 new_wa
->backup
= NULL
;
1461 new_wa
->user
= NULL
;
1462 new_wa
->free
= true;
1464 area
->next
= new_wa
;
1467 /* If backup memory was allocated to this area, it has the wrong size
1468 * now so free it and it will be reallocated if/when needed */
1471 area
->backup
= NULL
;
1476 /* Merge all adjacent free areas into one */
1477 static void target_merge_working_areas(struct target
*target
)
1479 struct working_area
*c
= target
->working_areas
;
1481 while (c
&& c
->next
) {
1482 assert(c
->next
->address
== c
->address
+ c
->size
); /* This is an invariant */
1484 /* Find two adjacent free areas */
1485 if (c
->free
&& c
->next
->free
) {
1486 /* Merge the last into the first */
1487 c
->size
+= c
->next
->size
;
1489 /* Remove the last */
1490 struct working_area
*to_be_freed
= c
->next
;
1491 c
->next
= c
->next
->next
;
1492 if (to_be_freed
->backup
)
1493 free(to_be_freed
->backup
);
1496 /* If backup memory was allocated to the remaining area, it's has
1497 * the wrong size now */
1508 int target_alloc_working_area_try(struct target
*target
, uint32_t size
, struct working_area
**area
)
1510 /* Reevaluate working area address based on MMU state*/
1511 if (target
->working_areas
== NULL
) {
1515 retval
= target
->type
->mmu(target
, &enabled
);
1516 if (retval
!= ERROR_OK
)
1520 if (target
->working_area_phys_spec
) {
1521 LOG_DEBUG("MMU disabled, using physical "
1522 "address for working memory 0x%08"PRIx32
,
1523 target
->working_area_phys
);
1524 target
->working_area
= target
->working_area_phys
;
1526 LOG_ERROR("No working memory available. "
1527 "Specify -work-area-phys to target.");
1528 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1531 if (target
->working_area_virt_spec
) {
1532 LOG_DEBUG("MMU enabled, using virtual "
1533 "address for working memory 0x%08"PRIx32
,
1534 target
->working_area_virt
);
1535 target
->working_area
= target
->working_area_virt
;
1537 LOG_ERROR("No working memory available. "
1538 "Specify -work-area-virt to target.");
1539 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1543 /* Set up initial working area on first call */
1544 struct working_area
*new_wa
= malloc(sizeof(*new_wa
));
1546 new_wa
->next
= NULL
;
1547 new_wa
->size
= target
->working_area_size
& ~3UL; /* 4-byte align */
1548 new_wa
->address
= target
->working_area
;
1549 new_wa
->backup
= NULL
;
1550 new_wa
->user
= NULL
;
1551 new_wa
->free
= true;
1554 target
->working_areas
= new_wa
;
1557 /* only allocate multiples of 4 byte */
1559 size
= (size
+ 3) & (~3UL);
1561 struct working_area
*c
= target
->working_areas
;
1563 /* Find the first large enough working area */
1565 if (c
->free
&& c
->size
>= size
)
1571 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1573 /* Split the working area into the requested size */
1574 target_split_working_area(c
, size
);
1576 LOG_DEBUG("allocated new working area of %"PRIu32
" bytes at address 0x%08"PRIx32
, size
, c
->address
);
1578 if (target
->backup_working_area
) {
1579 if (c
->backup
== NULL
) {
1580 c
->backup
= malloc(c
->size
);
1581 if (c
->backup
== NULL
)
1585 int retval
= target_read_memory(target
, c
->address
, 4, c
->size
/ 4, c
->backup
);
1586 if (retval
!= ERROR_OK
)
1590 /* mark as used, and return the new (reused) area */
1597 print_wa_layout(target
);
1602 int target_alloc_working_area(struct target
*target
, uint32_t size
, struct working_area
**area
)
1606 retval
= target_alloc_working_area_try(target
, size
, area
);
1607 if (retval
== ERROR_TARGET_RESOURCE_NOT_AVAILABLE
)
1608 LOG_WARNING("not enough working area available(requested %"PRIu32
")", size
);
1613 static int target_restore_working_area(struct target
*target
, struct working_area
*area
)
1615 int retval
= ERROR_OK
;
1617 if (target
->backup_working_area
&& area
->backup
!= NULL
) {
1618 retval
= target_write_memory(target
, area
->address
, 4, area
->size
/ 4, area
->backup
);
1619 if (retval
!= ERROR_OK
)
1620 LOG_ERROR("failed to restore %"PRIu32
" bytes of working area at address 0x%08"PRIx32
,
1621 area
->size
, area
->address
);
1627 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
1628 static int target_free_working_area_restore(struct target
*target
, struct working_area
*area
, int restore
)
1630 int retval
= ERROR_OK
;
1636 retval
= target_restore_working_area(target
, area
);
1637 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
1638 if (retval
!= ERROR_OK
)
1644 LOG_DEBUG("freed %"PRIu32
" bytes of working area at address 0x%08"PRIx32
,
1645 area
->size
, area
->address
);
1647 /* mark user pointer invalid */
1648 /* TODO: Is this really safe? It points to some previous caller's memory.
1649 * How could we know that the area pointer is still in that place and not
1650 * some other vital data? What's the purpose of this, anyway? */
1654 target_merge_working_areas(target
);
1656 print_wa_layout(target
);
1661 int target_free_working_area(struct target
*target
, struct working_area
*area
)
1663 return target_free_working_area_restore(target
, area
, 1);
1666 /* free resources and restore memory, if restoring memory fails,
1667 * free up resources anyway
1669 static void target_free_all_working_areas_restore(struct target
*target
, int restore
)
1671 struct working_area
*c
= target
->working_areas
;
1673 LOG_DEBUG("freeing all working areas");
1675 /* Loop through all areas, restoring the allocated ones and marking them as free */
1679 target_restore_working_area(target
, c
);
1681 *c
->user
= NULL
; /* Same as above */
1687 /* Run a merge pass to combine all areas into one */
1688 target_merge_working_areas(target
);
1690 print_wa_layout(target
);
1693 void target_free_all_working_areas(struct target
*target
)
1695 target_free_all_working_areas_restore(target
, 1);
1698 /* Find the largest number of bytes that can be allocated */
1699 uint32_t target_get_working_area_avail(struct target
*target
)
1701 struct working_area
*c
= target
->working_areas
;
1702 uint32_t max_size
= 0;
1705 return target
->working_area_size
;
1708 if (c
->free
&& max_size
< c
->size
)
1717 int target_arch_state(struct target
*target
)
1720 if (target
== NULL
) {
1721 LOG_USER("No target has been configured");
1725 LOG_USER("target state: %s", target_state_name(target
));
1727 if (target
->state
!= TARGET_HALTED
)
1730 retval
= target
->type
->arch_state(target
);
1734 static int target_get_gdb_fileio_info_default(struct target
*target
,
1735 struct gdb_fileio_info
*fileio_info
)
1737 /* If target does not support semi-hosting function, target
1738 has no need to provide .get_gdb_fileio_info callback.
1739 It just return ERROR_FAIL and gdb_server will return "Txx"
1740 as target halted every time. */
1744 static int target_gdb_fileio_end_default(struct target
*target
,
1745 int retcode
, int fileio_errno
, bool ctrl_c
)
1750 /* Single aligned words are guaranteed to use 16 or 32 bit access
1751 * mode respectively, otherwise data is handled as quickly as
1754 int target_write_buffer(struct target
*target
, uint32_t address
, uint32_t size
, const uint8_t *buffer
)
1756 LOG_DEBUG("writing buffer of %i byte at 0x%8.8x",
1757 (int)size
, (unsigned)address
);
1759 if (!target_was_examined(target
)) {
1760 LOG_ERROR("Target not examined yet");
1767 if ((address
+ size
- 1) < address
) {
1768 /* GDB can request this when e.g. PC is 0xfffffffc*/
1769 LOG_ERROR("address + size wrapped(0x%08x, 0x%08x)",
1775 return target
->type
->write_buffer(target
, address
, size
, buffer
);
1778 static int target_write_buffer_default(struct target
*target
, uint32_t address
, uint32_t size
, const uint8_t *buffer
)
1780 int retval
= ERROR_OK
;
1782 if (((address
% 2) == 0) && (size
== 2))
1783 return target_write_memory(target
, address
, 2, 1, buffer
);
1785 /* handle unaligned head bytes */
1787 uint32_t unaligned
= 4 - (address
% 4);
1789 if (unaligned
> size
)
1792 retval
= target_write_memory(target
, address
, 1, unaligned
, buffer
);
1793 if (retval
!= ERROR_OK
)
1796 buffer
+= unaligned
;
1797 address
+= unaligned
;
1801 /* handle aligned words */
1803 int aligned
= size
- (size
% 4);
1805 /* use bulk writes above a certain limit. This may have to be changed */
1806 if (aligned
> 128) {
1807 retval
= target
->type
->bulk_write_memory(target
, address
, aligned
/ 4, buffer
);
1808 if (retval
!= ERROR_OK
)
1811 retval
= target_write_memory(target
, address
, 4, aligned
/ 4, buffer
);
1812 if (retval
!= ERROR_OK
)
1821 /* handle tail writes of less than 4 bytes */
1823 retval
= target_write_memory(target
, address
, 1, size
, buffer
);
1824 if (retval
!= ERROR_OK
)
1831 /* Single aligned words are guaranteed to use 16 or 32 bit access
1832 * mode respectively, otherwise data is handled as quickly as
1835 int target_read_buffer(struct target
*target
, uint32_t address
, uint32_t size
, uint8_t *buffer
)
1837 LOG_DEBUG("reading buffer of %i byte at 0x%8.8x",
1838 (int)size
, (unsigned)address
);
1840 if (!target_was_examined(target
)) {
1841 LOG_ERROR("Target not examined yet");
1848 if ((address
+ size
- 1) < address
) {
1849 /* GDB can request this when e.g. PC is 0xfffffffc*/
1850 LOG_ERROR("address + size wrapped(0x%08" PRIx32
", 0x%08" PRIx32
")",
1856 return target
->type
->read_buffer(target
, address
, size
, buffer
);
1859 static int target_read_buffer_default(struct target
*target
, uint32_t address
, uint32_t size
, uint8_t *buffer
)
1861 int retval
= ERROR_OK
;
1863 if (((address
% 2) == 0) && (size
== 2))
1864 return target_read_memory(target
, address
, 2, 1, buffer
);
1866 /* handle unaligned head bytes */
1868 uint32_t unaligned
= 4 - (address
% 4);
1870 if (unaligned
> size
)
1873 retval
= target_read_memory(target
, address
, 1, unaligned
, buffer
);
1874 if (retval
!= ERROR_OK
)
1877 buffer
+= unaligned
;
1878 address
+= unaligned
;
1882 /* handle aligned words */
1884 int aligned
= size
- (size
% 4);
1886 retval
= target_read_memory(target
, address
, 4, aligned
/ 4, buffer
);
1887 if (retval
!= ERROR_OK
)
1895 /*prevent byte access when possible (avoid AHB access limitations in some cases)*/
1897 int aligned
= size
- (size
% 2);
1898 retval
= target_read_memory(target
, address
, 2, aligned
/ 2, buffer
);
1899 if (retval
!= ERROR_OK
)
1906 /* handle tail writes of less than 4 bytes */
1908 retval
= target_read_memory(target
, address
, 1, size
, buffer
);
1909 if (retval
!= ERROR_OK
)
1916 int target_checksum_memory(struct target
*target
, uint32_t address
, uint32_t size
, uint32_t* crc
)
1921 uint32_t checksum
= 0;
1922 if (!target_was_examined(target
)) {
1923 LOG_ERROR("Target not examined yet");
1927 retval
= target
->type
->checksum_memory(target
, address
, size
, &checksum
);
1928 if (retval
!= ERROR_OK
) {
1929 buffer
= malloc(size
);
1930 if (buffer
== NULL
) {
1931 LOG_ERROR("error allocating buffer for section (%d bytes)", (int)size
);
1932 return ERROR_COMMAND_SYNTAX_ERROR
;
1934 retval
= target_read_buffer(target
, address
, size
, buffer
);
1935 if (retval
!= ERROR_OK
) {
1940 /* convert to target endianness */
1941 for (i
= 0; i
< (size
/sizeof(uint32_t)); i
++) {
1942 uint32_t target_data
;
1943 target_data
= target_buffer_get_u32(target
, &buffer
[i
*sizeof(uint32_t)]);
1944 target_buffer_set_u32(target
, &buffer
[i
*sizeof(uint32_t)], target_data
);
1947 retval
= image_calculate_checksum(buffer
, size
, &checksum
);
1956 int target_blank_check_memory(struct target
*target
, uint32_t address
, uint32_t size
, uint32_t* blank
)
1959 if (!target_was_examined(target
)) {
1960 LOG_ERROR("Target not examined yet");
1964 if (target
->type
->blank_check_memory
== 0)
1965 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1967 retval
= target
->type
->blank_check_memory(target
, address
, size
, blank
);
1972 int target_read_u32(struct target
*target
, uint32_t address
, uint32_t *value
)
1974 uint8_t value_buf
[4];
1975 if (!target_was_examined(target
)) {
1976 LOG_ERROR("Target not examined yet");
1980 int retval
= target_read_memory(target
, address
, 4, 1, value_buf
);
1982 if (retval
== ERROR_OK
) {
1983 *value
= target_buffer_get_u32(target
, value_buf
);
1984 LOG_DEBUG("address: 0x%8.8" PRIx32
", value: 0x%8.8" PRIx32
"",
1989 LOG_DEBUG("address: 0x%8.8" PRIx32
" failed",
1996 int target_read_u16(struct target
*target
, uint32_t address
, uint16_t *value
)
1998 uint8_t value_buf
[2];
1999 if (!target_was_examined(target
)) {
2000 LOG_ERROR("Target not examined yet");
2004 int retval
= target_read_memory(target
, address
, 2, 1, value_buf
);
2006 if (retval
== ERROR_OK
) {
2007 *value
= target_buffer_get_u16(target
, value_buf
);
2008 LOG_DEBUG("address: 0x%8.8" PRIx32
", value: 0x%4.4x",
2013 LOG_DEBUG("address: 0x%8.8" PRIx32
" failed",
2020 int target_read_u8(struct target
*target
, uint32_t address
, uint8_t *value
)
2022 int retval
= target_read_memory(target
, address
, 1, 1, value
);
2023 if (!target_was_examined(target
)) {
2024 LOG_ERROR("Target not examined yet");
2028 if (retval
== ERROR_OK
) {
2029 LOG_DEBUG("address: 0x%8.8" PRIx32
", value: 0x%2.2x",
2034 LOG_DEBUG("address: 0x%8.8" PRIx32
" failed",
2041 int target_write_u32(struct target
*target
, uint32_t address
, uint32_t value
)
2044 uint8_t value_buf
[4];
2045 if (!target_was_examined(target
)) {
2046 LOG_ERROR("Target not examined yet");
2050 LOG_DEBUG("address: 0x%8.8" PRIx32
", value: 0x%8.8" PRIx32
"",
2054 target_buffer_set_u32(target
, value_buf
, value
);
2055 retval
= target_write_memory(target
, address
, 4, 1, value_buf
);
2056 if (retval
!= ERROR_OK
)
2057 LOG_DEBUG("failed: %i", retval
);
2062 int target_write_u16(struct target
*target
, uint32_t address
, uint16_t value
)
2065 uint8_t value_buf
[2];
2066 if (!target_was_examined(target
)) {
2067 LOG_ERROR("Target not examined yet");
2071 LOG_DEBUG("address: 0x%8.8" PRIx32
", value: 0x%8.8x",
2075 target_buffer_set_u16(target
, value_buf
, value
);
2076 retval
= target_write_memory(target
, address
, 2, 1, value_buf
);
2077 if (retval
!= ERROR_OK
)
2078 LOG_DEBUG("failed: %i", retval
);
2083 int target_write_u8(struct target
*target
, uint32_t address
, uint8_t value
)
2086 if (!target_was_examined(target
)) {
2087 LOG_ERROR("Target not examined yet");
2091 LOG_DEBUG("address: 0x%8.8" PRIx32
", value: 0x%2.2x",
2094 retval
= target_write_memory(target
, address
, 1, 1, &value
);
2095 if (retval
!= ERROR_OK
)
2096 LOG_DEBUG("failed: %i", retval
);
2101 static int find_target(struct command_context
*cmd_ctx
, const char *name
)
2103 struct target
*target
= get_target(name
);
2104 if (target
== NULL
) {
2105 LOG_ERROR("Target: %s is unknown, try one of:\n", name
);
2108 if (!target
->tap
->enabled
) {
2109 LOG_USER("Target: TAP %s is disabled, "
2110 "can't be the current target\n",
2111 target
->tap
->dotted_name
);
2115 cmd_ctx
->current_target
= target
->target_number
;
2120 COMMAND_HANDLER(handle_targets_command
)
2122 int retval
= ERROR_OK
;
2123 if (CMD_ARGC
== 1) {
2124 retval
= find_target(CMD_CTX
, CMD_ARGV
[0]);
2125 if (retval
== ERROR_OK
) {
2131 struct target
*target
= all_targets
;
2132 command_print(CMD_CTX
, " TargetName Type Endian TapName State ");
2133 command_print(CMD_CTX
, "-- ------------------ ---------- ------ ------------------ ------------");
2138 if (target
->tap
->enabled
)
2139 state
= target_state_name(target
);
2141 state
= "tap-disabled";
2143 if (CMD_CTX
->current_target
== target
->target_number
)
2146 /* keep columns lined up to match the headers above */
2147 command_print(CMD_CTX
,
2148 "%2d%c %-18s %-10s %-6s %-18s %s",
2149 target
->target_number
,
2151 target_name(target
),
2152 target_type_name(target
),
2153 Jim_Nvp_value2name_simple(nvp_target_endian
,
2154 target
->endianness
)->name
,
2155 target
->tap
->dotted_name
,
2157 target
= target
->next
;
2163 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2165 static int powerDropout
;
2166 static int srstAsserted
;
2168 static int runPowerRestore
;
2169 static int runPowerDropout
;
2170 static int runSrstAsserted
;
2171 static int runSrstDeasserted
;
2173 static int sense_handler(void)
2175 static int prevSrstAsserted
;
2176 static int prevPowerdropout
;
2178 int retval
= jtag_power_dropout(&powerDropout
);
2179 if (retval
!= ERROR_OK
)
2183 powerRestored
= prevPowerdropout
&& !powerDropout
;
2185 runPowerRestore
= 1;
2187 long long current
= timeval_ms();
2188 static long long lastPower
;
2189 int waitMore
= lastPower
+ 2000 > current
;
2190 if (powerDropout
&& !waitMore
) {
2191 runPowerDropout
= 1;
2192 lastPower
= current
;
2195 retval
= jtag_srst_asserted(&srstAsserted
);
2196 if (retval
!= ERROR_OK
)
2200 srstDeasserted
= prevSrstAsserted
&& !srstAsserted
;
2202 static long long lastSrst
;
2203 waitMore
= lastSrst
+ 2000 > current
;
2204 if (srstDeasserted
&& !waitMore
) {
2205 runSrstDeasserted
= 1;
2209 if (!prevSrstAsserted
&& srstAsserted
)
2210 runSrstAsserted
= 1;
2212 prevSrstAsserted
= srstAsserted
;
2213 prevPowerdropout
= powerDropout
;
2215 if (srstDeasserted
|| powerRestored
) {
2216 /* Other than logging the event we can't do anything here.
2217 * Issuing a reset is a particularly bad idea as we might
2218 * be inside a reset already.
2225 /* process target state changes */
2226 static int handle_target(void *priv
)
2228 Jim_Interp
*interp
= (Jim_Interp
*)priv
;
2229 int retval
= ERROR_OK
;
2231 if (!is_jtag_poll_safe()) {
2232 /* polling is disabled currently */
2236 /* we do not want to recurse here... */
2237 static int recursive
;
2241 /* danger! running these procedures can trigger srst assertions and power dropouts.
2242 * We need to avoid an infinite loop/recursion here and we do that by
2243 * clearing the flags after running these events.
2245 int did_something
= 0;
2246 if (runSrstAsserted
) {
2247 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2248 Jim_Eval(interp
, "srst_asserted");
2251 if (runSrstDeasserted
) {
2252 Jim_Eval(interp
, "srst_deasserted");
2255 if (runPowerDropout
) {
2256 LOG_INFO("Power dropout detected, running power_dropout proc.");
2257 Jim_Eval(interp
, "power_dropout");
2260 if (runPowerRestore
) {
2261 Jim_Eval(interp
, "power_restore");
2265 if (did_something
) {
2266 /* clear detect flags */
2270 /* clear action flags */
2272 runSrstAsserted
= 0;
2273 runSrstDeasserted
= 0;
2274 runPowerRestore
= 0;
2275 runPowerDropout
= 0;
2280 /* Poll targets for state changes unless that's globally disabled.
2281 * Skip targets that are currently disabled.
2283 for (struct target
*target
= all_targets
;
2284 is_jtag_poll_safe() && target
;
2285 target
= target
->next
) {
2286 if (!target
->tap
->enabled
)
2289 if (target
->backoff
.times
> target
->backoff
.count
) {
2290 /* do not poll this time as we failed previously */
2291 target
->backoff
.count
++;
2294 target
->backoff
.count
= 0;
2296 /* only poll target if we've got power and srst isn't asserted */
2297 if (!powerDropout
&& !srstAsserted
) {
2298 /* polling may fail silently until the target has been examined */
2299 retval
= target_poll(target
);
2300 if (retval
!= ERROR_OK
) {
2301 /* 100ms polling interval. Increase interval between polling up to 5000ms */
2302 if (target
->backoff
.times
* polling_interval
< 5000) {
2303 target
->backoff
.times
*= 2;
2304 target
->backoff
.times
++;
2306 LOG_USER("Polling target %s failed, GDB will be halted. Polling again in %dms",
2307 target_name(target
),
2308 target
->backoff
.times
* polling_interval
);
2310 /* Tell GDB to halt the debugger. This allows the user to
2311 * run monitor commands to handle the situation.
2313 target_call_event_callbacks(target
, TARGET_EVENT_GDB_HALT
);
2316 /* Since we succeeded, we reset backoff count */
2317 if (target
->backoff
.times
> 0)
2318 LOG_USER("Polling target %s succeeded again", target_name(target
));
2319 target
->backoff
.times
= 0;
2326 COMMAND_HANDLER(handle_reg_command
)
2328 struct target
*target
;
2329 struct reg
*reg
= NULL
;
2335 target
= get_current_target(CMD_CTX
);
2337 /* list all available registers for the current target */
2338 if (CMD_ARGC
== 0) {
2339 struct reg_cache
*cache
= target
->reg_cache
;
2345 command_print(CMD_CTX
, "===== %s", cache
->name
);
2347 for (i
= 0, reg
= cache
->reg_list
;
2348 i
< cache
->num_regs
;
2349 i
++, reg
++, count
++) {
2350 /* only print cached values if they are valid */
2352 value
= buf_to_str(reg
->value
,
2354 command_print(CMD_CTX
,
2355 "(%i) %s (/%" PRIu32
"): 0x%s%s",
2363 command_print(CMD_CTX
, "(%i) %s (/%" PRIu32
")",
2368 cache
= cache
->next
;
2374 /* access a single register by its ordinal number */
2375 if ((CMD_ARGV
[0][0] >= '0') && (CMD_ARGV
[0][0] <= '9')) {
2377 COMMAND_PARSE_NUMBER(uint
, CMD_ARGV
[0], num
);
2379 struct reg_cache
*cache
= target
->reg_cache
;
2383 for (i
= 0; i
< cache
->num_regs
; i
++) {
2384 if (count
++ == num
) {
2385 reg
= &cache
->reg_list
[i
];
2391 cache
= cache
->next
;
2395 command_print(CMD_CTX
, "%i is out of bounds, the current target "
2396 "has only %i registers (0 - %i)", num
, count
, count
- 1);
2400 /* access a single register by its name */
2401 reg
= register_get_by_name(target
->reg_cache
, CMD_ARGV
[0], 1);
2404 command_print(CMD_CTX
, "register %s not found in current target", CMD_ARGV
[0]);
2409 assert(reg
!= NULL
); /* give clang a hint that we *know* reg is != NULL here */
2411 /* display a register */
2412 if ((CMD_ARGC
== 1) || ((CMD_ARGC
== 2) && !((CMD_ARGV
[1][0] >= '0')
2413 && (CMD_ARGV
[1][0] <= '9')))) {
2414 if ((CMD_ARGC
== 2) && (strcmp(CMD_ARGV
[1], "force") == 0))
2417 if (reg
->valid
== 0)
2418 reg
->type
->get(reg
);
2419 value
= buf_to_str(reg
->value
, reg
->size
, 16);
2420 command_print(CMD_CTX
, "%s (/%i): 0x%s", reg
->name
, (int)(reg
->size
), value
);
2425 /* set register value */
2426 if (CMD_ARGC
== 2) {
2427 uint8_t *buf
= malloc(DIV_ROUND_UP(reg
->size
, 8));
2430 str_to_buf(CMD_ARGV
[1], strlen(CMD_ARGV
[1]), buf
, reg
->size
, 0);
2432 reg
->type
->set(reg
, buf
);
2434 value
= buf_to_str(reg
->value
, reg
->size
, 16);
2435 command_print(CMD_CTX
, "%s (/%i): 0x%s", reg
->name
, (int)(reg
->size
), value
);
2443 return ERROR_COMMAND_SYNTAX_ERROR
;
2446 COMMAND_HANDLER(handle_poll_command
)
2448 int retval
= ERROR_OK
;
2449 struct target
*target
= get_current_target(CMD_CTX
);
2451 if (CMD_ARGC
== 0) {
2452 command_print(CMD_CTX
, "background polling: %s",
2453 jtag_poll_get_enabled() ?
"on" : "off");
2454 command_print(CMD_CTX
, "TAP: %s (%s)",
2455 target
->tap
->dotted_name
,
2456 target
->tap
->enabled ?
"enabled" : "disabled");
2457 if (!target
->tap
->enabled
)
2459 retval
= target_poll(target
);
2460 if (retval
!= ERROR_OK
)
2462 retval
= target_arch_state(target
);
2463 if (retval
!= ERROR_OK
)
2465 } else if (CMD_ARGC
== 1) {
2467 COMMAND_PARSE_ON_OFF(CMD_ARGV
[0], enable
);
2468 jtag_poll_set_enabled(enable
);
2470 return ERROR_COMMAND_SYNTAX_ERROR
;
2475 COMMAND_HANDLER(handle_wait_halt_command
)
2478 return ERROR_COMMAND_SYNTAX_ERROR
;
2480 unsigned ms
= DEFAULT_HALT_TIMEOUT
;
2481 if (1 == CMD_ARGC
) {
2482 int retval
= parse_uint(CMD_ARGV
[0], &ms
);
2483 if (ERROR_OK
!= retval
)
2484 return ERROR_COMMAND_SYNTAX_ERROR
;
2487 struct target
*target
= get_current_target(CMD_CTX
);
2488 return target_wait_state(target
, TARGET_HALTED
, ms
);
2491 /* wait for target state to change. The trick here is to have a low
2492 * latency for short waits and not to suck up all the CPU time
2495 * After 500ms, keep_alive() is invoked
2497 int target_wait_state(struct target
*target
, enum target_state state
, int ms
)
2500 long long then
= 0, cur
;
2504 retval
= target_poll(target
);
2505 if (retval
!= ERROR_OK
)
2507 if (target
->state
== state
)
2512 then
= timeval_ms();
2513 LOG_DEBUG("waiting for target %s...",
2514 Jim_Nvp_value2name_simple(nvp_target_state
, state
)->name
);
2520 if ((cur
-then
) > ms
) {
2521 LOG_ERROR("timed out while waiting for target %s",
2522 Jim_Nvp_value2name_simple(nvp_target_state
, state
)->name
);
2530 COMMAND_HANDLER(handle_halt_command
)
2534 struct target
*target
= get_current_target(CMD_CTX
);
2535 int retval
= target_halt(target
);
2536 if (ERROR_OK
!= retval
)
2539 if (CMD_ARGC
== 1) {
2540 unsigned wait_local
;
2541 retval
= parse_uint(CMD_ARGV
[0], &wait_local
);
2542 if (ERROR_OK
!= retval
)
2543 return ERROR_COMMAND_SYNTAX_ERROR
;
2548 return CALL_COMMAND_HANDLER(handle_wait_halt_command
);
2551 COMMAND_HANDLER(handle_soft_reset_halt_command
)
2553 struct target
*target
= get_current_target(CMD_CTX
);
2555 LOG_USER("requesting target halt and executing a soft reset");
2557 target_soft_reset_halt(target
);
2562 COMMAND_HANDLER(handle_reset_command
)
2565 return ERROR_COMMAND_SYNTAX_ERROR
;
2567 enum target_reset_mode reset_mode
= RESET_RUN
;
2568 if (CMD_ARGC
== 1) {
2570 n
= Jim_Nvp_name2value_simple(nvp_reset_modes
, CMD_ARGV
[0]);
2571 if ((n
->name
== NULL
) || (n
->value
== RESET_UNKNOWN
))
2572 return ERROR_COMMAND_SYNTAX_ERROR
;
2573 reset_mode
= n
->value
;
2576 /* reset *all* targets */
2577 return target_process_reset(CMD_CTX
, reset_mode
);
2581 COMMAND_HANDLER(handle_resume_command
)
2585 return ERROR_COMMAND_SYNTAX_ERROR
;
2587 struct target
*target
= get_current_target(CMD_CTX
);
2589 /* with no CMD_ARGV, resume from current pc, addr = 0,
2590 * with one arguments, addr = CMD_ARGV[0],
2591 * handle breakpoints, not debugging */
2593 if (CMD_ARGC
== 1) {
2594 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[0], addr
);
2598 return target_resume(target
, current
, addr
, 1, 0);
2601 COMMAND_HANDLER(handle_step_command
)
2604 return ERROR_COMMAND_SYNTAX_ERROR
;
2608 /* with no CMD_ARGV, step from current pc, addr = 0,
2609 * with one argument addr = CMD_ARGV[0],
2610 * handle breakpoints, debugging */
2613 if (CMD_ARGC
== 1) {
2614 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[0], addr
);
2618 struct target
*target
= get_current_target(CMD_CTX
);
2620 return target
->type
->step(target
, current_pc
, addr
, 1);
2623 static void handle_md_output(struct command_context
*cmd_ctx
,
2624 struct target
*target
, uint32_t address
, unsigned size
,
2625 unsigned count
, const uint8_t *buffer
)
2627 const unsigned line_bytecnt
= 32;
2628 unsigned line_modulo
= line_bytecnt
/ size
;
2630 char output
[line_bytecnt
* 4 + 1];
2631 unsigned output_len
= 0;
2633 const char *value_fmt
;
2636 value_fmt
= "%8.8x ";
2639 value_fmt
= "%4.4x ";
2642 value_fmt
= "%2.2x ";
2645 /* "can't happen", caller checked */
2646 LOG_ERROR("invalid memory read size: %u", size
);
2650 for (unsigned i
= 0; i
< count
; i
++) {
2651 if (i
% line_modulo
== 0) {
2652 output_len
+= snprintf(output
+ output_len
,
2653 sizeof(output
) - output_len
,
2655 (unsigned)(address
+ (i
*size
)));
2659 const uint8_t *value_ptr
= buffer
+ i
* size
;
2662 value
= target_buffer_get_u32(target
, value_ptr
);
2665 value
= target_buffer_get_u16(target
, value_ptr
);
2670 output_len
+= snprintf(output
+ output_len
,
2671 sizeof(output
) - output_len
,
2674 if ((i
% line_modulo
== line_modulo
- 1) || (i
== count
- 1)) {
2675 command_print(cmd_ctx
, "%s", output
);
2681 COMMAND_HANDLER(handle_md_command
)
2684 return ERROR_COMMAND_SYNTAX_ERROR
;
2687 switch (CMD_NAME
[2]) {
2698 return ERROR_COMMAND_SYNTAX_ERROR
;
2701 bool physical
= strcmp(CMD_ARGV
[0], "phys") == 0;
2702 int (*fn
)(struct target
*target
,
2703 uint32_t address
, uint32_t size_value
, uint32_t count
, uint8_t *buffer
);
2707 fn
= target_read_phys_memory
;
2709 fn
= target_read_memory
;
2710 if ((CMD_ARGC
< 1) || (CMD_ARGC
> 2))
2711 return ERROR_COMMAND_SYNTAX_ERROR
;
2714 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[0], address
);
2718 COMMAND_PARSE_NUMBER(uint
, CMD_ARGV
[1], count
);
2720 uint8_t *buffer
= calloc(count
, size
);
2722 struct target
*target
= get_current_target(CMD_CTX
);
2723 int retval
= fn(target
, address
, size
, count
, buffer
);
2724 if (ERROR_OK
== retval
)
2725 handle_md_output(CMD_CTX
, target
, address
, size
, count
, buffer
);
2732 typedef int (*target_write_fn
)(struct target
*target
,
2733 uint32_t address
, uint32_t size
, uint32_t count
, const uint8_t *buffer
);
2735 static int target_write_memory_fast(struct target
*target
,
2736 uint32_t address
, uint32_t size
, uint32_t count
, const uint8_t *buffer
)
2738 return target_write_buffer(target
, address
, size
* count
, buffer
);
2741 static int target_fill_mem(struct target
*target
,
2750 /* We have to write in reasonably large chunks to be able
2751 * to fill large memory areas with any sane speed */
2752 const unsigned chunk_size
= 16384;
2753 uint8_t *target_buf
= malloc(chunk_size
* data_size
);
2754 if (target_buf
== NULL
) {
2755 LOG_ERROR("Out of memory");
2759 for (unsigned i
= 0; i
< chunk_size
; i
++) {
2760 switch (data_size
) {
2762 target_buffer_set_u32(target
, target_buf
+ i
* data_size
, b
);
2765 target_buffer_set_u16(target
, target_buf
+ i
* data_size
, b
);
2768 target_buffer_set_u8(target
, target_buf
+ i
* data_size
, b
);
2775 int retval
= ERROR_OK
;
2777 for (unsigned x
= 0; x
< c
; x
+= chunk_size
) {
2780 if (current
> chunk_size
)
2781 current
= chunk_size
;
2782 retval
= fn(target
, address
+ x
* data_size
, data_size
, current
, target_buf
);
2783 if (retval
!= ERROR_OK
)
2785 /* avoid GDB timeouts */
2794 COMMAND_HANDLER(handle_mw_command
)
2797 return ERROR_COMMAND_SYNTAX_ERROR
;
2798 bool physical
= strcmp(CMD_ARGV
[0], "phys") == 0;
2803 fn
= target_write_phys_memory
;
2805 fn
= target_write_memory_fast
;
2806 if ((CMD_ARGC
< 2) || (CMD_ARGC
> 3))
2807 return ERROR_COMMAND_SYNTAX_ERROR
;
2810 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[0], address
);
2813 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[1], value
);
2817 COMMAND_PARSE_NUMBER(uint
, CMD_ARGV
[2], count
);
2819 struct target
*target
= get_current_target(CMD_CTX
);
2821 switch (CMD_NAME
[2]) {
2832 return ERROR_COMMAND_SYNTAX_ERROR
;
2835 return target_fill_mem(target
, address
, fn
, wordsize
, value
, count
);
2838 static COMMAND_HELPER(parse_load_image_command_CMD_ARGV
, struct image
*image
,
2839 uint32_t *min_address
, uint32_t *max_address
)
2841 if (CMD_ARGC
< 1 || CMD_ARGC
> 5)
2842 return ERROR_COMMAND_SYNTAX_ERROR
;
2844 /* a base address isn't always necessary,
2845 * default to 0x0 (i.e. don't relocate) */
2846 if (CMD_ARGC
>= 2) {
2848 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[1], addr
);
2849 image
->base_address
= addr
;
2850 image
->base_address_set
= 1;
2852 image
->base_address_set
= 0;
2854 image
->start_address_set
= 0;
2857 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[3], *min_address
);
2858 if (CMD_ARGC
== 5) {
2859 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[4], *max_address
);
2860 /* use size (given) to find max (required) */
2861 *max_address
+= *min_address
;
2864 if (*min_address
> *max_address
)
2865 return ERROR_COMMAND_SYNTAX_ERROR
;
2870 COMMAND_HANDLER(handle_load_image_command
)
2874 uint32_t image_size
;
2875 uint32_t min_address
= 0;
2876 uint32_t max_address
= 0xffffffff;
2880 int retval
= CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV
,
2881 &image
, &min_address
, &max_address
);
2882 if (ERROR_OK
!= retval
)
2885 struct target
*target
= get_current_target(CMD_CTX
);
2887 struct duration bench
;
2888 duration_start(&bench
);
2890 if (image_open(&image
, CMD_ARGV
[0], (CMD_ARGC
>= 3) ? CMD_ARGV
[2] : NULL
) != ERROR_OK
)
2895 for (i
= 0; i
< image
.num_sections
; i
++) {
2896 buffer
= malloc(image
.sections
[i
].size
);
2897 if (buffer
== NULL
) {
2898 command_print(CMD_CTX
,
2899 "error allocating buffer for section (%d bytes)",
2900 (int)(image
.sections
[i
].size
));
2904 retval
= image_read_section(&image
, i
, 0x0, image
.sections
[i
].size
, buffer
, &buf_cnt
);
2905 if (retval
!= ERROR_OK
) {
2910 uint32_t offset
= 0;
2911 uint32_t length
= buf_cnt
;
2913 /* DANGER!!! beware of unsigned comparision here!!! */
2915 if ((image
.sections
[i
].base_address
+ buf_cnt
>= min_address
) &&
2916 (image
.sections
[i
].base_address
< max_address
)) {
2918 if (image
.sections
[i
].base_address
< min_address
) {
2919 /* clip addresses below */
2920 offset
+= min_address
-image
.sections
[i
].base_address
;
2924 if (image
.sections
[i
].base_address
+ buf_cnt
> max_address
)
2925 length
-= (image
.sections
[i
].base_address
+ buf_cnt
)-max_address
;
2927 retval
= target_write_buffer(target
,
2928 image
.sections
[i
].base_address
+ offset
, length
, buffer
+ offset
);
2929 if (retval
!= ERROR_OK
) {
2933 image_size
+= length
;
2934 command_print(CMD_CTX
, "%u bytes written at address 0x%8.8" PRIx32
"",
2935 (unsigned int)length
,
2936 image
.sections
[i
].base_address
+ offset
);
2942 if ((ERROR_OK
== retval
) && (duration_measure(&bench
) == ERROR_OK
)) {
2943 command_print(CMD_CTX
, "downloaded %" PRIu32
" bytes "
2944 "in %fs (%0.3f KiB/s)", image_size
,
2945 duration_elapsed(&bench
), duration_kbps(&bench
, image_size
));
2948 image_close(&image
);
2954 COMMAND_HANDLER(handle_dump_image_command
)
2956 struct fileio fileio
;
2958 int retval
, retvaltemp
;
2959 uint32_t address
, size
;
2960 struct duration bench
;
2961 struct target
*target
= get_current_target(CMD_CTX
);
2964 return ERROR_COMMAND_SYNTAX_ERROR
;
2966 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[1], address
);
2967 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[2], size
);
2969 uint32_t buf_size
= (size
> 4096) ?
4096 : size
;
2970 buffer
= malloc(buf_size
);
2974 retval
= fileio_open(&fileio
, CMD_ARGV
[0], FILEIO_WRITE
, FILEIO_BINARY
);
2975 if (retval
!= ERROR_OK
) {
2980 duration_start(&bench
);
2983 size_t size_written
;
2984 uint32_t this_run_size
= (size
> buf_size
) ? buf_size
: size
;
2985 retval
= target_read_buffer(target
, address
, this_run_size
, buffer
);
2986 if (retval
!= ERROR_OK
)
2989 retval
= fileio_write(&fileio
, this_run_size
, buffer
, &size_written
);
2990 if (retval
!= ERROR_OK
)
2993 size
-= this_run_size
;
2994 address
+= this_run_size
;
2999 if ((ERROR_OK
== retval
) && (duration_measure(&bench
) == ERROR_OK
)) {
3001 retval
= fileio_size(&fileio
, &filesize
);
3002 if (retval
!= ERROR_OK
)
3004 command_print(CMD_CTX
,
3005 "dumped %ld bytes in %fs (%0.3f KiB/s)", (long)filesize
,
3006 duration_elapsed(&bench
), duration_kbps(&bench
, filesize
));
3009 retvaltemp
= fileio_close(&fileio
);
3010 if (retvaltemp
!= ERROR_OK
)
3016 static COMMAND_HELPER(handle_verify_image_command_internal
, int verify
)
3020 uint32_t image_size
;
3023 uint32_t checksum
= 0;
3024 uint32_t mem_checksum
= 0;
3028 struct target
*target
= get_current_target(CMD_CTX
);
3031 return ERROR_COMMAND_SYNTAX_ERROR
;
3034 LOG_ERROR("no target selected");
3038 struct duration bench
;
3039 duration_start(&bench
);
3041 if (CMD_ARGC
>= 2) {
3043 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[1], addr
);
3044 image
.base_address
= addr
;
3045 image
.base_address_set
= 1;
3047 image
.base_address_set
= 0;
3048 image
.base_address
= 0x0;
3051 image
.start_address_set
= 0;
3053 retval
= image_open(&image
, CMD_ARGV
[0], (CMD_ARGC
== 3) ? CMD_ARGV
[2] : NULL
);
3054 if (retval
!= ERROR_OK
)
3060 for (i
= 0; i
< image
.num_sections
; i
++) {
3061 buffer
= malloc(image
.sections
[i
].size
);
3062 if (buffer
== NULL
) {
3063 command_print(CMD_CTX
,
3064 "error allocating buffer for section (%d bytes)",
3065 (int)(image
.sections
[i
].size
));
3068 retval
= image_read_section(&image
, i
, 0x0, image
.sections
[i
].size
, buffer
, &buf_cnt
);
3069 if (retval
!= ERROR_OK
) {
3075 /* calculate checksum of image */
3076 retval
= image_calculate_checksum(buffer
, buf_cnt
, &checksum
);
3077 if (retval
!= ERROR_OK
) {
3082 retval
= target_checksum_memory(target
, image
.sections
[i
].base_address
, buf_cnt
, &mem_checksum
);
3083 if (retval
!= ERROR_OK
) {
3088 if (checksum
!= mem_checksum
) {
3089 /* failed crc checksum, fall back to a binary compare */
3093 LOG_ERROR("checksum mismatch - attempting binary compare");
3095 data
= (uint8_t *)malloc(buf_cnt
);
3097 /* Can we use 32bit word accesses? */
3099 int count
= buf_cnt
;
3100 if ((count
% 4) == 0) {
3104 retval
= target_read_memory(target
, image
.sections
[i
].base_address
, size
, count
, data
);
3105 if (retval
== ERROR_OK
) {
3107 for (t
= 0; t
< buf_cnt
; t
++) {
3108 if (data
[t
] != buffer
[t
]) {
3109 command_print(CMD_CTX
,
3110 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3112 (unsigned)(t
+ image
.sections
[i
].base_address
),
3115 if (diffs
++ >= 127) {
3116 command_print(CMD_CTX
, "More than 128 errors, the rest are not printed.");
3128 command_print(CMD_CTX
, "address 0x%08" PRIx32
" length 0x%08zx",
3129 image
.sections
[i
].base_address
,
3134 image_size
+= buf_cnt
;
3137 command_print(CMD_CTX
, "No more differences found.");
3140 retval
= ERROR_FAIL
;
3141 if ((ERROR_OK
== retval
) && (duration_measure(&bench
) == ERROR_OK
)) {
3142 command_print(CMD_CTX
, "verified %" PRIu32
" bytes "
3143 "in %fs (%0.3f KiB/s)", image_size
,
3144 duration_elapsed(&bench
), duration_kbps(&bench
, image_size
));
3147 image_close(&image
);
3152 COMMAND_HANDLER(handle_verify_image_command
)
3154 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal
, 1);
3157 COMMAND_HANDLER(handle_test_image_command
)
3159 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal
, 0);
3162 static int handle_bp_command_list(struct command_context
*cmd_ctx
)
3164 struct target
*target
= get_current_target(cmd_ctx
);
3165 struct breakpoint
*breakpoint
= target
->breakpoints
;
3166 while (breakpoint
) {
3167 if (breakpoint
->type
== BKPT_SOFT
) {
3168 char *buf
= buf_to_str(breakpoint
->orig_instr
,
3169 breakpoint
->length
, 16);
3170 command_print(cmd_ctx
, "IVA breakpoint: 0x%8.8" PRIx32
", 0x%x, %i, 0x%s",
3171 breakpoint
->address
,
3173 breakpoint
->set
, buf
);
3176 if ((breakpoint
->address
== 0) && (breakpoint
->asid
!= 0))
3177 command_print(cmd_ctx
, "Context breakpoint: 0x%8.8" PRIx32
", 0x%x, %i",
3179 breakpoint
->length
, breakpoint
->set
);
3180 else if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
3181 command_print(cmd_ctx
, "Hybrid breakpoint(IVA): 0x%8.8" PRIx32
", 0x%x, %i",
3182 breakpoint
->address
,
3183 breakpoint
->length
, breakpoint
->set
);
3184 command_print(cmd_ctx
, "\t|--->linked with ContextID: 0x%8.8" PRIx32
,
3187 command_print(cmd_ctx
, "Breakpoint(IVA): 0x%8.8" PRIx32
", 0x%x, %i",
3188 breakpoint
->address
,
3189 breakpoint
->length
, breakpoint
->set
);
3192 breakpoint
= breakpoint
->next
;
3197 static int handle_bp_command_set(struct command_context
*cmd_ctx
,
3198 uint32_t addr
, uint32_t asid
, uint32_t length
, int hw
)
3200 struct target
*target
= get_current_target(cmd_ctx
);
3203 int retval
= breakpoint_add(target
, addr
, length
, hw
);
3204 if (ERROR_OK
== retval
)
3205 command_print(cmd_ctx
, "breakpoint set at 0x%8.8" PRIx32
"", addr
);
3207 LOG_ERROR("Failure setting breakpoint, the same address(IVA) is already used");
3210 } else if (addr
== 0) {
3211 int retval
= context_breakpoint_add(target
, asid
, length
, hw
);
3212 if (ERROR_OK
== retval
)
3213 command_print(cmd_ctx
, "Context breakpoint set at 0x%8.8" PRIx32
"", asid
);
3215 LOG_ERROR("Failure setting breakpoint, the same address(CONTEXTID) is already used");
3219 int retval
= hybrid_breakpoint_add(target
, addr
, asid
, length
, hw
);
3220 if (ERROR_OK
== retval
)
3221 command_print(cmd_ctx
, "Hybrid breakpoint set at 0x%8.8" PRIx32
"", asid
);
3223 LOG_ERROR("Failure setting breakpoint, the same address is already used");
3230 COMMAND_HANDLER(handle_bp_command
)
3239 return handle_bp_command_list(CMD_CTX
);
3243 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[0], addr
);
3244 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[1], length
);
3245 return handle_bp_command_set(CMD_CTX
, addr
, asid
, length
, hw
);
3248 if (strcmp(CMD_ARGV
[2], "hw") == 0) {
3250 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[0], addr
);
3252 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[1], length
);
3255 return handle_bp_command_set(CMD_CTX
, addr
, asid
, length
, hw
);
3256 } else if (strcmp(CMD_ARGV
[2], "hw_ctx") == 0) {
3258 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[0], asid
);
3259 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[1], length
);
3261 return handle_bp_command_set(CMD_CTX
, addr
, asid
, length
, hw
);
3266 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[0], addr
);
3267 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[1], asid
);
3268 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[2], length
);
3269 return handle_bp_command_set(CMD_CTX
, addr
, asid
, length
, hw
);
3272 return ERROR_COMMAND_SYNTAX_ERROR
;
3276 COMMAND_HANDLER(handle_rbp_command
)
3279 return ERROR_COMMAND_SYNTAX_ERROR
;
3282 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[0], addr
);
3284 struct target
*target
= get_current_target(CMD_CTX
);
3285 breakpoint_remove(target
, addr
);
3290 COMMAND_HANDLER(handle_wp_command
)
3292 struct target
*target
= get_current_target(CMD_CTX
);
3294 if (CMD_ARGC
== 0) {
3295 struct watchpoint
*watchpoint
= target
->watchpoints
;
3297 while (watchpoint
) {
3298 command_print(CMD_CTX
, "address: 0x%8.8" PRIx32
3299 ", len: 0x%8.8" PRIx32
3300 ", r/w/a: %i, value: 0x%8.8" PRIx32
3301 ", mask: 0x%8.8" PRIx32
,
3302 watchpoint
->address
,
3304 (int)watchpoint
->rw
,
3307 watchpoint
= watchpoint
->next
;
3312 enum watchpoint_rw type
= WPT_ACCESS
;
3314 uint32_t length
= 0;
3315 uint32_t data_value
= 0x0;
3316 uint32_t data_mask
= 0xffffffff;
3320 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[4], data_mask
);
3323 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[3], data_value
);
3326 switch (CMD_ARGV
[2][0]) {
3337 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV
[2][0]);
3338 return ERROR_COMMAND_SYNTAX_ERROR
;
3342 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[1], length
);
3343 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[0], addr
);
3347 return ERROR_COMMAND_SYNTAX_ERROR
;
3350 int retval
= watchpoint_add(target
, addr
, length
, type
,
3351 data_value
, data_mask
);
3352 if (ERROR_OK
!= retval
)
3353 LOG_ERROR("Failure setting watchpoints");
3358 COMMAND_HANDLER(handle_rwp_command
)
3361 return ERROR_COMMAND_SYNTAX_ERROR
;
3364 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[0], addr
);
3366 struct target
*target
= get_current_target(CMD_CTX
);
3367 watchpoint_remove(target
, addr
);
3373 * Translate a virtual address to a physical address.
3375 * The low-level target implementation must have logged a detailed error
3376 * which is forwarded to telnet/GDB session.
3378 COMMAND_HANDLER(handle_virt2phys_command
)
3381 return ERROR_COMMAND_SYNTAX_ERROR
;
3384 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[0], va
);
3387 struct target
*target
= get_current_target(CMD_CTX
);
3388 int retval
= target
->type
->virt2phys(target
, va
, &pa
);
3389 if (retval
== ERROR_OK
)
3390 command_print(CMD_CTX
, "Physical address 0x%08" PRIx32
"", pa
);
3395 static void writeData(FILE *f
, const void *data
, size_t len
)
3397 size_t written
= fwrite(data
, 1, len
, f
);
3399 LOG_ERROR("failed to write %zu bytes: %s", len
, strerror(errno
));
3402 static void writeLong(FILE *f
, int l
)
3405 for (i
= 0; i
< 4; i
++) {
3406 char c
= (l
>> (i
*8))&0xff;
3407 writeData(f
, &c
, 1);
3412 static void writeString(FILE *f
, char *s
)
3414 writeData(f
, s
, strlen(s
));
3417 /* Dump a gmon.out histogram file. */
3418 static void writeGmon(uint32_t *samples
, uint32_t sampleNum
, const char *filename
)
3421 FILE *f
= fopen(filename
, "w");
3424 writeString(f
, "gmon");
3425 writeLong(f
, 0x00000001); /* Version */
3426 writeLong(f
, 0); /* padding */
3427 writeLong(f
, 0); /* padding */
3428 writeLong(f
, 0); /* padding */
3430 uint8_t zero
= 0; /* GMON_TAG_TIME_HIST */
3431 writeData(f
, &zero
, 1);
3433 /* figure out bucket size */
3434 uint32_t min
= samples
[0];
3435 uint32_t max
= samples
[0];
3436 for (i
= 0; i
< sampleNum
; i
++) {
3437 if (min
> samples
[i
])
3439 if (max
< samples
[i
])
3443 int addressSpace
= (max
- min
+ 1);
3444 assert(addressSpace
>= 2);
3446 static const uint32_t maxBuckets
= 16 * 1024; /* maximum buckets. */
3447 uint32_t length
= addressSpace
;
3448 if (length
> maxBuckets
)
3449 length
= maxBuckets
;
3450 int *buckets
= malloc(sizeof(int)*length
);
3451 if (buckets
== NULL
) {
3455 memset(buckets
, 0, sizeof(int) * length
);
3456 for (i
= 0; i
< sampleNum
; i
++) {
3457 uint32_t address
= samples
[i
];
3458 long long a
= address
- min
;
3459 long long b
= length
- 1;
3460 long long c
= addressSpace
- 1;
3461 int index_t
= (a
* b
) / c
; /* danger!!!! int32 overflows */
3465 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
3466 writeLong(f
, min
); /* low_pc */
3467 writeLong(f
, max
); /* high_pc */
3468 writeLong(f
, length
); /* # of samples */
3469 writeLong(f
, 100); /* KLUDGE! We lie, ca. 100Hz best case. */
3470 writeString(f
, "seconds");
3471 for (i
= 0; i
< (15-strlen("seconds")); i
++)
3472 writeData(f
, &zero
, 1);
3473 writeString(f
, "s");
3475 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
3477 char *data
= malloc(2 * length
);
3479 for (i
= 0; i
< length
; i
++) {
3484 data
[i
* 2] = val
&0xff;
3485 data
[i
* 2 + 1] = (val
>> 8) & 0xff;
3488 writeData(f
, data
, length
* 2);
3496 /* profiling samples the CPU PC as quickly as OpenOCD is able,
3497 * which will be used as a random sampling of PC */
3498 COMMAND_HANDLER(handle_profile_command
)
3500 struct target
*target
= get_current_target(CMD_CTX
);
3501 struct timeval timeout
, now
;
3503 gettimeofday(&timeout
, NULL
);
3505 return ERROR_COMMAND_SYNTAX_ERROR
;
3507 COMMAND_PARSE_NUMBER(uint
, CMD_ARGV
[0], offset
);
3509 timeval_add_time(&timeout
, offset
, 0);
3512 * @todo: Some cores let us sample the PC without the
3513 * annoying halt/resume step; for example, ARMv7 PCSR.
3514 * Provide a way to use that more efficient mechanism.
3517 command_print(CMD_CTX
, "Starting profiling. Halting and resuming the target as often as we can...");
3519 static const int maxSample
= 10000;
3520 uint32_t *samples
= malloc(sizeof(uint32_t)*maxSample
);
3521 if (samples
== NULL
)
3525 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
3526 struct reg
*reg
= register_get_by_name(target
->reg_cache
, "pc", 1);
3528 int retval
= ERROR_OK
;
3530 target_poll(target
);
3531 if (target
->state
== TARGET_HALTED
) {
3532 uint32_t t
= *((uint32_t *)reg
->value
);
3533 samples
[numSamples
++] = t
;
3534 /* current pc, addr = 0, do not handle breakpoints, not debugging */
3535 retval
= target_resume(target
, 1, 0, 0, 0);
3536 target_poll(target
);
3537 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
3538 } else if (target
->state
== TARGET_RUNNING
) {
3539 /* We want to quickly sample the PC. */
3540 retval
= target_halt(target
);
3541 if (retval
!= ERROR_OK
) {
3546 command_print(CMD_CTX
, "Target not halted or running");
3550 if (retval
!= ERROR_OK
)
3553 gettimeofday(&now
, NULL
);
3554 if ((numSamples
>= maxSample
) || ((now
.tv_sec
>= timeout
.tv_sec
)
3555 && (now
.tv_usec
>= timeout
.tv_usec
))) {
3556 command_print(CMD_CTX
, "Profiling completed. %d samples.", numSamples
);
3557 retval
= target_poll(target
);
3558 if (retval
!= ERROR_OK
) {
3562 if (target
->state
== TARGET_HALTED
) {
3563 /* current pc, addr = 0, do not handle
3564 * breakpoints, not debugging */
3565 target_resume(target
, 1, 0, 0, 0);
3567 retval
= target_poll(target
);
3568 if (retval
!= ERROR_OK
) {
3572 writeGmon(samples
, numSamples
, CMD_ARGV
[1]);
3573 command_print(CMD_CTX
, "Wrote %s", CMD_ARGV
[1]);
3582 static int new_int_array_element(Jim_Interp
*interp
, const char *varname
, int idx
, uint32_t val
)
3585 Jim_Obj
*nameObjPtr
, *valObjPtr
;
3588 namebuf
= alloc_printf("%s(%d)", varname
, idx
);
3592 nameObjPtr
= Jim_NewStringObj(interp
, namebuf
, -1);
3593 valObjPtr
= Jim_NewIntObj(interp
, val
);
3594 if (!nameObjPtr
|| !valObjPtr
) {
3599 Jim_IncrRefCount(nameObjPtr
);
3600 Jim_IncrRefCount(valObjPtr
);
3601 result
= Jim_SetVariable(interp
, nameObjPtr
, valObjPtr
);
3602 Jim_DecrRefCount(interp
, nameObjPtr
);
3603 Jim_DecrRefCount(interp
, valObjPtr
);
3605 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
3609 static int jim_mem2array(Jim_Interp
*interp
, int argc
, Jim_Obj
*const *argv
)
3611 struct command_context
*context
;
3612 struct target
*target
;
3614 context
= current_command_context(interp
);
3615 assert(context
!= NULL
);
3617 target
= get_current_target(context
);
3618 if (target
== NULL
) {
3619 LOG_ERROR("mem2array: no current target");
3623 return target_mem2array(interp
, target
, argc
- 1, argv
+ 1);
3626 static int target_mem2array(Jim_Interp
*interp
, struct target
*target
, int argc
, Jim_Obj
*const *argv
)
3634 const char *varname
;
3638 /* argv[1] = name of array to receive the data
3639 * argv[2] = desired width
3640 * argv[3] = memory address
3641 * argv[4] = count of times to read
3644 Jim_WrongNumArgs(interp
, 1, argv
, "varname width addr nelems");
3647 varname
= Jim_GetString(argv
[0], &len
);
3648 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
3650 e
= Jim_GetLong(interp
, argv
[1], &l
);