break from long loops on shutdown request
[openocd.git] / src / target / target.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2007-2010 Øyvind Harboe *
8 * oyvind.harboe@zylin.com *
9 * *
10 * Copyright (C) 2008, Duane Ellis *
11 * openocd@duaneeellis.com *
12 * *
13 * Copyright (C) 2008 by Spencer Oliver *
14 * spen@spen-soft.co.uk *
15 * *
16 * Copyright (C) 2008 by Rick Altherr *
17 * kc8apf@kc8apf.net> *
18 * *
19 * Copyright (C) 2011 by Broadcom Corporation *
20 * Evan Hunter - ehunter@broadcom.com *
21 * *
22 * Copyright (C) ST-Ericsson SA 2011 *
23 * michel.jaouen@stericsson.com : smp minimum support *
24 * *
25 * Copyright (C) 2011 Andreas Fritiofson *
26 * andreas.fritiofson@gmail.com *
27 ***************************************************************************/
28
29 #ifdef HAVE_CONFIG_H
30 #include "config.h"
31 #endif
32
33 #include <helper/align.h>
34 #include <helper/nvp.h>
35 #include <helper/time_support.h>
36 #include <jtag/jtag.h>
37 #include <flash/nor/core.h>
38
39 #include "target.h"
40 #include "target_type.h"
41 #include "target_request.h"
42 #include "breakpoints.h"
43 #include "register.h"
44 #include "trace.h"
45 #include "image.h"
46 #include "rtos/rtos.h"
47 #include "transport/transport.h"
48 #include "arm_cti.h"
49 #include "smp.h"
50 #include "semihosting_common.h"
51
52 /* default halt wait timeout (ms) */
53 #define DEFAULT_HALT_TIMEOUT 5000
54
55 static int target_read_buffer_default(struct target *target, target_addr_t address,
56 uint32_t count, uint8_t *buffer);
57 static int target_write_buffer_default(struct target *target, target_addr_t address,
58 uint32_t count, const uint8_t *buffer);
59 static int target_array2mem(Jim_Interp *interp, struct target *target,
60 int argc, Jim_Obj * const *argv);
61 static int target_mem2array(Jim_Interp *interp, struct target *target,
62 int argc, Jim_Obj * const *argv);
63 static int target_register_user_commands(struct command_context *cmd_ctx);
64 static int target_get_gdb_fileio_info_default(struct target *target,
65 struct gdb_fileio_info *fileio_info);
66 static int target_gdb_fileio_end_default(struct target *target, int retcode,
67 int fileio_errno, bool ctrl_c);
68
69 static struct target_type *target_types[] = {
70 &arm7tdmi_target,
71 &arm9tdmi_target,
72 &arm920t_target,
73 &arm720t_target,
74 &arm966e_target,
75 &arm946e_target,
76 &arm926ejs_target,
77 &fa526_target,
78 &feroceon_target,
79 &dragonite_target,
80 &xscale_target,
81 &xtensa_chip_target,
82 &cortexm_target,
83 &cortexa_target,
84 &cortexr4_target,
85 &arm11_target,
86 &ls1_sap_target,
87 &mips_m4k_target,
88 &avr_target,
89 &dsp563xx_target,
90 &dsp5680xx_target,
91 &testee_target,
92 &avr32_ap7k_target,
93 &hla_target,
94 &esp32_target,
95 &esp32s2_target,
96 &esp32s3_target,
97 &or1k_target,
98 &quark_x10xx_target,
99 &quark_d20xx_target,
100 &stm8_target,
101 &riscv_target,
102 &mem_ap_target,
103 &esirisc_target,
104 &arcv2_target,
105 &aarch64_target,
106 &armv8r_target,
107 &mips_mips64_target,
108 NULL,
109 };
110
111 struct target *all_targets;
112 static struct target_event_callback *target_event_callbacks;
113 static struct target_timer_callback *target_timer_callbacks;
114 static int64_t target_timer_next_event_value;
115 static LIST_HEAD(target_reset_callback_list);
116 static LIST_HEAD(target_trace_callback_list);
117 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
118 static LIST_HEAD(empty_smp_targets);
119
120 enum nvp_assert {
121 NVP_DEASSERT,
122 NVP_ASSERT,
123 };
124
125 static const struct nvp nvp_assert[] = {
126 { .name = "assert", NVP_ASSERT },
127 { .name = "deassert", NVP_DEASSERT },
128 { .name = "T", NVP_ASSERT },
129 { .name = "F", NVP_DEASSERT },
130 { .name = "t", NVP_ASSERT },
131 { .name = "f", NVP_DEASSERT },
132 { .name = NULL, .value = -1 }
133 };
134
135 static const struct nvp nvp_error_target[] = {
136 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
137 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
138 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
139 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
140 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
141 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
142 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
143 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
144 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
145 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
146 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
147 { .value = -1, .name = NULL }
148 };
149
150 static const char *target_strerror_safe(int err)
151 {
152 const struct nvp *n;
153
154 n = nvp_value2name(nvp_error_target, err);
155 if (!n->name)
156 return "unknown";
157 else
158 return n->name;
159 }
160
161 static const struct jim_nvp nvp_target_event[] = {
162
163 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
164 { .value = TARGET_EVENT_HALTED, .name = "halted" },
165 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
166 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
167 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
168 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
169 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
170
171 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
172 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
173
174 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
175 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
176 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
177 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
178 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
179 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
180 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
181 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
182
183 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
184 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
185 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
186
187 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
188 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
189
190 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
191 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
192
193 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
194 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
195
196 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
197 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
198
199 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
200
201 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X100, .name = "semihosting-user-cmd-0x100" },
202 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X101, .name = "semihosting-user-cmd-0x101" },
203 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X102, .name = "semihosting-user-cmd-0x102" },
204 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X103, .name = "semihosting-user-cmd-0x103" },
205 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X104, .name = "semihosting-user-cmd-0x104" },
206 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X105, .name = "semihosting-user-cmd-0x105" },
207 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X106, .name = "semihosting-user-cmd-0x106" },
208 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X107, .name = "semihosting-user-cmd-0x107" },
209
210 { .name = NULL, .value = -1 }
211 };
212
213 static const struct nvp nvp_target_state[] = {
214 { .name = "unknown", .value = TARGET_UNKNOWN },
215 { .name = "running", .value = TARGET_RUNNING },
216 { .name = "halted", .value = TARGET_HALTED },
217 { .name = "reset", .value = TARGET_RESET },
218 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
219 { .name = NULL, .value = -1 },
220 };
221
222 static const struct nvp nvp_target_debug_reason[] = {
223 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
224 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
225 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
226 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
227 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
228 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
229 { .name = "program-exit", .value = DBG_REASON_EXIT },
230 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
231 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
232 { .name = NULL, .value = -1 },
233 };
234
235 static const struct jim_nvp nvp_target_endian[] = {
236 { .name = "big", .value = TARGET_BIG_ENDIAN },
237 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
238 { .name = "be", .value = TARGET_BIG_ENDIAN },
239 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
240 { .name = NULL, .value = -1 },
241 };
242
243 static const struct nvp nvp_reset_modes[] = {
244 { .name = "unknown", .value = RESET_UNKNOWN },
245 { .name = "run", .value = RESET_RUN },
246 { .name = "halt", .value = RESET_HALT },
247 { .name = "init", .value = RESET_INIT },
248 { .name = NULL, .value = -1 },
249 };
250
251 const char *debug_reason_name(struct target *t)
252 {
253 const char *cp;
254
255 cp = nvp_value2name(nvp_target_debug_reason,
256 t->debug_reason)->name;
257 if (!cp) {
258 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
259 cp = "(*BUG*unknown*BUG*)";
260 }
261 return cp;
262 }
263
264 const char *target_state_name(struct target *t)
265 {
266 const char *cp;
267 cp = nvp_value2name(nvp_target_state, t->state)->name;
268 if (!cp) {
269 LOG_ERROR("Invalid target state: %d", (int)(t->state));
270 cp = "(*BUG*unknown*BUG*)";
271 }
272
273 if (!target_was_examined(t) && t->defer_examine)
274 cp = "examine deferred";
275
276 return cp;
277 }
278
279 const char *target_event_name(enum target_event event)
280 {
281 const char *cp;
282 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
283 if (!cp) {
284 LOG_ERROR("Invalid target event: %d", (int)(event));
285 cp = "(*BUG*unknown*BUG*)";
286 }
287 return cp;
288 }
289
290 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
291 {
292 const char *cp;
293 cp = nvp_value2name(nvp_reset_modes, reset_mode)->name;
294 if (!cp) {
295 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
296 cp = "(*BUG*unknown*BUG*)";
297 }
298 return cp;
299 }
300
301 static void append_to_list_all_targets(struct target *target)
302 {
303 struct target **t = &all_targets;
304
305 while (*t)
306 t = &((*t)->next);
307 *t = target;
308 }
309
310 /* read a uint64_t from a buffer in target memory endianness */
311 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
312 {
313 if (target->endianness == TARGET_LITTLE_ENDIAN)
314 return le_to_h_u64(buffer);
315 else
316 return be_to_h_u64(buffer);
317 }
318
319 /* read a uint32_t from a buffer in target memory endianness */
320 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
321 {
322 if (target->endianness == TARGET_LITTLE_ENDIAN)
323 return le_to_h_u32(buffer);
324 else
325 return be_to_h_u32(buffer);
326 }
327
328 /* read a uint24_t from a buffer in target memory endianness */
329 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
330 {
331 if (target->endianness == TARGET_LITTLE_ENDIAN)
332 return le_to_h_u24(buffer);
333 else
334 return be_to_h_u24(buffer);
335 }
336
337 /* read a uint16_t from a buffer in target memory endianness */
338 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
339 {
340 if (target->endianness == TARGET_LITTLE_ENDIAN)
341 return le_to_h_u16(buffer);
342 else
343 return be_to_h_u16(buffer);
344 }
345
346 /* write a uint64_t to a buffer in target memory endianness */
347 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
348 {
349 if (target->endianness == TARGET_LITTLE_ENDIAN)
350 h_u64_to_le(buffer, value);
351 else
352 h_u64_to_be(buffer, value);
353 }
354
355 /* write a uint32_t to a buffer in target memory endianness */
356 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
357 {
358 if (target->endianness == TARGET_LITTLE_ENDIAN)
359 h_u32_to_le(buffer, value);
360 else
361 h_u32_to_be(buffer, value);
362 }
363
364 /* write a uint24_t to a buffer in target memory endianness */
365 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
366 {
367 if (target->endianness == TARGET_LITTLE_ENDIAN)
368 h_u24_to_le(buffer, value);
369 else
370 h_u24_to_be(buffer, value);
371 }
372
373 /* write a uint16_t to a buffer in target memory endianness */
374 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
375 {
376 if (target->endianness == TARGET_LITTLE_ENDIAN)
377 h_u16_to_le(buffer, value);
378 else
379 h_u16_to_be(buffer, value);
380 }
381
382 /* write a uint8_t to a buffer in target memory endianness */
383 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
384 {
385 *buffer = value;
386 }
387
388 /* write a uint64_t array to a buffer in target memory endianness */
389 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
390 {
391 uint32_t i;
392 for (i = 0; i < count; i++)
393 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
394 }
395
396 /* write a uint32_t array to a buffer in target memory endianness */
397 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
398 {
399 uint32_t i;
400 for (i = 0; i < count; i++)
401 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
402 }
403
404 /* write a uint16_t array to a buffer in target memory endianness */
405 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
406 {
407 uint32_t i;
408 for (i = 0; i < count; i++)
409 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
410 }
411
412 /* write a uint64_t array to a buffer in target memory endianness */
413 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
414 {
415 uint32_t i;
416 for (i = 0; i < count; i++)
417 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
418 }
419
420 /* write a uint32_t array to a buffer in target memory endianness */
421 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
422 {
423 uint32_t i;
424 for (i = 0; i < count; i++)
425 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
426 }
427
428 /* write a uint16_t array to a buffer in target memory endianness */
429 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
430 {
431 uint32_t i;
432 for (i = 0; i < count; i++)
433 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
434 }
435
436 /* return a pointer to a configured target; id is name or index in all_targets */
437 struct target *get_target(const char *id)
438 {
439 struct target *target;
440
441 /* try as tcltarget name */
442 for (target = all_targets; target; target = target->next) {
443 if (!target_name(target))
444 continue;
445 if (strcmp(id, target_name(target)) == 0)
446 return target;
447 }
448
449 /* try as index */
450 unsigned int index, counter;
451 if (parse_uint(id, &index) != ERROR_OK)
452 return NULL;
453
454 for (target = all_targets, counter = index;
455 target && counter;
456 target = target->next, --counter)
457 ;
458
459 return target;
460 }
461
462 struct target *get_current_target(struct command_context *cmd_ctx)
463 {
464 struct target *target = get_current_target_or_null(cmd_ctx);
465
466 if (!target) {
467 LOG_ERROR("BUG: current_target out of bounds");
468 exit(-1);
469 }
470
471 return target;
472 }
473
474 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
475 {
476 return cmd_ctx->current_target_override
477 ? cmd_ctx->current_target_override
478 : cmd_ctx->current_target;
479 }
480
481 int target_poll(struct target *target)
482 {
483 int retval;
484
485 /* We can't poll until after examine */
486 if (!target_was_examined(target)) {
487 /* Fail silently lest we pollute the log */
488 return ERROR_FAIL;
489 }
490
491 retval = target->type->poll(target);
492 if (retval != ERROR_OK)
493 return retval;
494
495 if (target->halt_issued) {
496 if (target->state == TARGET_HALTED)
497 target->halt_issued = false;
498 else {
499 int64_t t = timeval_ms() - target->halt_issued_time;
500 if (t > DEFAULT_HALT_TIMEOUT) {
501 target->halt_issued = false;
502 LOG_INFO("Halt timed out, wake up GDB.");
503 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
504 }
505 }
506 }
507
508 return ERROR_OK;
509 }
510
511 int target_halt(struct target *target)
512 {
513 int retval;
514 /* We can't poll until after examine */
515 if (!target_was_examined(target)) {
516 LOG_ERROR("Target not examined yet");
517 return ERROR_FAIL;
518 }
519
520 retval = target->type->halt(target);
521 if (retval != ERROR_OK)
522 return retval;
523
524 target->halt_issued = true;
525 target->halt_issued_time = timeval_ms();
526
527 return ERROR_OK;
528 }
529
530 /**
531 * Make the target (re)start executing using its saved execution
532 * context (possibly with some modifications).
533 *
534 * @param target Which target should start executing.
535 * @param current True to use the target's saved program counter instead
536 * of the address parameter
537 * @param address Optionally used as the program counter.
538 * @param handle_breakpoints True iff breakpoints at the resumption PC
539 * should be skipped. (For example, maybe execution was stopped by
540 * such a breakpoint, in which case it would be counterproductive to
541 * let it re-trigger.
542 * @param debug_execution False if all working areas allocated by OpenOCD
543 * should be released and/or restored to their original contents.
544 * (This would for example be true to run some downloaded "helper"
545 * algorithm code, which resides in one such working buffer and uses
546 * another for data storage.)
547 *
548 * @todo Resolve the ambiguity about what the "debug_execution" flag
549 * signifies. For example, Target implementations don't agree on how
550 * it relates to invalidation of the register cache, or to whether
551 * breakpoints and watchpoints should be enabled. (It would seem wrong
552 * to enable breakpoints when running downloaded "helper" algorithms
553 * (debug_execution true), since the breakpoints would be set to match
554 * target firmware being debugged, not the helper algorithm.... and
555 * enabling them could cause such helpers to malfunction (for example,
556 * by overwriting data with a breakpoint instruction. On the other
557 * hand the infrastructure for running such helpers might use this
558 * procedure but rely on hardware breakpoint to detect termination.)
559 */
560 int target_resume(struct target *target, int current, target_addr_t address,
561 int handle_breakpoints, int debug_execution)
562 {
563 int retval;
564
565 /* We can't poll until after examine */
566 if (!target_was_examined(target)) {
567 LOG_ERROR("Target not examined yet");
568 return ERROR_FAIL;
569 }
570
571 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
572
573 /* note that resume *must* be asynchronous. The CPU can halt before
574 * we poll. The CPU can even halt at the current PC as a result of
575 * a software breakpoint being inserted by (a bug?) the application.
576 */
577 /*
578 * resume() triggers the event 'resumed'. The execution of TCL commands
579 * in the event handler causes the polling of targets. If the target has
580 * already halted for a breakpoint, polling will run the 'halted' event
581 * handler before the pending 'resumed' handler.
582 * Disable polling during resume() to guarantee the execution of handlers
583 * in the correct order.
584 */
585 bool save_poll_mask = jtag_poll_mask();
586 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
587 jtag_poll_unmask(save_poll_mask);
588
589 if (retval != ERROR_OK)
590 return retval;
591
592 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
593
594 return retval;
595 }
596
597 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
598 {
599 char buf[100];
600 int retval;
601 const struct nvp *n;
602 n = nvp_value2name(nvp_reset_modes, reset_mode);
603 if (!n->name) {
604 LOG_ERROR("invalid reset mode");
605 return ERROR_FAIL;
606 }
607
608 struct target *target;
609 for (target = all_targets; target; target = target->next)
610 target_call_reset_callbacks(target, reset_mode);
611
612 /* disable polling during reset to make reset event scripts
613 * more predictable, i.e. dr/irscan & pathmove in events will
614 * not have JTAG operations injected into the middle of a sequence.
615 */
616 bool save_poll_mask = jtag_poll_mask();
617
618 sprintf(buf, "ocd_process_reset %s", n->name);
619 retval = Jim_Eval(cmd->ctx->interp, buf);
620
621 jtag_poll_unmask(save_poll_mask);
622
623 if (retval != JIM_OK) {
624 Jim_MakeErrorMessage(cmd->ctx->interp);
625 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
626 return ERROR_FAIL;
627 }
628
629 /* We want any events to be processed before the prompt */
630 retval = target_call_timer_callbacks_now();
631
632 for (target = all_targets; target; target = target->next) {
633 target->type->check_reset(target);
634 target->running_alg = false;
635 }
636
637 return retval;
638 }
639
640 static int identity_virt2phys(struct target *target,
641 target_addr_t virtual, target_addr_t *physical)
642 {
643 *physical = virtual;
644 return ERROR_OK;
645 }
646
647 static int no_mmu(struct target *target, int *enabled)
648 {
649 *enabled = 0;
650 return ERROR_OK;
651 }
652
653 /**
654 * Reset the @c examined flag for the given target.
655 * Pure paranoia -- targets are zeroed on allocation.
656 */
657 static inline void target_reset_examined(struct target *target)
658 {
659 target->examined = false;
660 }
661
662 static int default_examine(struct target *target)
663 {
664 target_set_examined(target);
665 return ERROR_OK;
666 }
667
668 /* no check by default */
669 static int default_check_reset(struct target *target)
670 {
671 return ERROR_OK;
672 }
673
674 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
675 * Keep in sync */
676 int target_examine_one(struct target *target)
677 {
678 LOG_TARGET_DEBUG(target, "Examination started");
679
680 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
681
682 int retval = target->type->examine(target);
683 if (retval != ERROR_OK) {
684 LOG_TARGET_ERROR(target, "Examination failed");
685 LOG_TARGET_DEBUG(target, "examine() returned error code %d", retval);
686 target_reset_examined(target);
687 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
688 return retval;
689 }
690
691 target_set_examined(target);
692 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
693
694 LOG_TARGET_INFO(target, "Examination succeed");
695 return ERROR_OK;
696 }
697
698 static int jtag_enable_callback(enum jtag_event event, void *priv)
699 {
700 struct target *target = priv;
701
702 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
703 return ERROR_OK;
704
705 jtag_unregister_event_callback(jtag_enable_callback, target);
706
707 return target_examine_one(target);
708 }
709
710 /* Targets that correctly implement init + examine, i.e.
711 * no communication with target during init:
712 *
713 * XScale
714 */
715 int target_examine(void)
716 {
717 int retval = ERROR_OK;
718 struct target *target;
719
720 for (target = all_targets; target; target = target->next) {
721 /* defer examination, but don't skip it */
722 if (!target->tap->enabled) {
723 jtag_register_event_callback(jtag_enable_callback,
724 target);
725 continue;
726 }
727
728 if (target->defer_examine)
729 continue;
730
731 int retval2 = target_examine_one(target);
732 if (retval2 != ERROR_OK) {
733 LOG_WARNING("target %s examination failed", target_name(target));
734 retval = retval2;
735 }
736 }
737 return retval;
738 }
739
740 const char *target_type_name(struct target *target)
741 {
742 return target->type->name;
743 }
744
745 static int target_soft_reset_halt(struct target *target)
746 {
747 if (!target_was_examined(target)) {
748 LOG_ERROR("Target not examined yet");
749 return ERROR_FAIL;
750 }
751 if (!target->type->soft_reset_halt) {
752 LOG_ERROR("Target %s does not support soft_reset_halt",
753 target_name(target));
754 return ERROR_FAIL;
755 }
756 return target->type->soft_reset_halt(target);
757 }
758
759 /**
760 * Downloads a target-specific native code algorithm to the target,
761 * and executes it. * Note that some targets may need to set up, enable,
762 * and tear down a breakpoint (hard or * soft) to detect algorithm
763 * termination, while others may support lower overhead schemes where
764 * soft breakpoints embedded in the algorithm automatically terminate the
765 * algorithm.
766 *
767 * @param target used to run the algorithm
768 * @param num_mem_params
769 * @param mem_params
770 * @param num_reg_params
771 * @param reg_param
772 * @param entry_point
773 * @param exit_point
774 * @param timeout_ms
775 * @param arch_info target-specific description of the algorithm.
776 */
777 int target_run_algorithm(struct target *target,
778 int num_mem_params, struct mem_param *mem_params,
779 int num_reg_params, struct reg_param *reg_param,
780 target_addr_t entry_point, target_addr_t exit_point,
781 unsigned int timeout_ms, void *arch_info)
782 {
783 int retval = ERROR_FAIL;
784
785 if (!target_was_examined(target)) {
786 LOG_ERROR("Target not examined yet");
787 goto done;
788 }
789 if (!target->type->run_algorithm) {
790 LOG_ERROR("Target type '%s' does not support %s",
791 target_type_name(target), __func__);
792 goto done;
793 }
794
795 target->running_alg = true;
796 retval = target->type->run_algorithm(target,
797 num_mem_params, mem_params,
798 num_reg_params, reg_param,
799 entry_point, exit_point, timeout_ms, arch_info);
800 target->running_alg = false;
801
802 done:
803 return retval;
804 }
805
806 /**
807 * Executes a target-specific native code algorithm and leaves it running.
808 *
809 * @param target used to run the algorithm
810 * @param num_mem_params
811 * @param mem_params
812 * @param num_reg_params
813 * @param reg_params
814 * @param entry_point
815 * @param exit_point
816 * @param arch_info target-specific description of the algorithm.
817 */
818 int target_start_algorithm(struct target *target,
819 int num_mem_params, struct mem_param *mem_params,
820 int num_reg_params, struct reg_param *reg_params,
821 target_addr_t entry_point, target_addr_t exit_point,
822 void *arch_info)
823 {
824 int retval = ERROR_FAIL;
825
826 if (!target_was_examined(target)) {
827 LOG_ERROR("Target not examined yet");
828 goto done;
829 }
830 if (!target->type->start_algorithm) {
831 LOG_ERROR("Target type '%s' does not support %s",
832 target_type_name(target), __func__);
833 goto done;
834 }
835 if (target->running_alg) {
836 LOG_ERROR("Target is already running an algorithm");
837 goto done;
838 }
839
840 target->running_alg = true;
841 retval = target->type->start_algorithm(target,
842 num_mem_params, mem_params,
843 num_reg_params, reg_params,
844 entry_point, exit_point, arch_info);
845
846 done:
847 return retval;
848 }
849
850 /**
851 * Waits for an algorithm started with target_start_algorithm() to complete.
852 *
853 * @param target used to run the algorithm
854 * @param num_mem_params
855 * @param mem_params
856 * @param num_reg_params
857 * @param reg_params
858 * @param exit_point
859 * @param timeout_ms
860 * @param arch_info target-specific description of the algorithm.
861 */
862 int target_wait_algorithm(struct target *target,
863 int num_mem_params, struct mem_param *mem_params,
864 int num_reg_params, struct reg_param *reg_params,
865 target_addr_t exit_point, unsigned int timeout_ms,
866 void *arch_info)
867 {
868 int retval = ERROR_FAIL;
869
870 if (!target->type->wait_algorithm) {
871 LOG_ERROR("Target type '%s' does not support %s",
872 target_type_name(target), __func__);
873 goto done;
874 }
875 if (!target->running_alg) {
876 LOG_ERROR("Target is not running an algorithm");
877 goto done;
878 }
879
880 retval = target->type->wait_algorithm(target,
881 num_mem_params, mem_params,
882 num_reg_params, reg_params,
883 exit_point, timeout_ms, arch_info);
884 if (retval != ERROR_TARGET_TIMEOUT)
885 target->running_alg = false;
886
887 done:
888 return retval;
889 }
890
891 /**
892 * Streams data to a circular buffer on target intended for consumption by code
893 * running asynchronously on target.
894 *
895 * This is intended for applications where target-specific native code runs
896 * on the target, receives data from the circular buffer, does something with
897 * it (most likely writing it to a flash memory), and advances the circular
898 * buffer pointer.
899 *
900 * This assumes that the helper algorithm has already been loaded to the target,
901 * but has not been started yet. Given memory and register parameters are passed
902 * to the algorithm.
903 *
904 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
905 * following format:
906 *
907 * [buffer_start + 0, buffer_start + 4):
908 * Write Pointer address (aka head). Written and updated by this
909 * routine when new data is written to the circular buffer.
910 * [buffer_start + 4, buffer_start + 8):
911 * Read Pointer address (aka tail). Updated by code running on the
912 * target after it consumes data.
913 * [buffer_start + 8, buffer_start + buffer_size):
914 * Circular buffer contents.
915 *
916 * See contrib/loaders/flash/stm32f1x.S for an example.
917 *
918 * @param target used to run the algorithm
919 * @param buffer address on the host where data to be sent is located
920 * @param count number of blocks to send
921 * @param block_size size in bytes of each block
922 * @param num_mem_params count of memory-based params to pass to algorithm
923 * @param mem_params memory-based params to pass to algorithm
924 * @param num_reg_params count of register-based params to pass to algorithm
925 * @param reg_params memory-based params to pass to algorithm
926 * @param buffer_start address on the target of the circular buffer structure
927 * @param buffer_size size of the circular buffer structure
928 * @param entry_point address on the target to execute to start the algorithm
929 * @param exit_point address at which to set a breakpoint to catch the
930 * end of the algorithm; can be 0 if target triggers a breakpoint itself
931 * @param arch_info
932 */
933
934 int target_run_flash_async_algorithm(struct target *target,
935 const uint8_t *buffer, uint32_t count, int block_size,
936 int num_mem_params, struct mem_param *mem_params,
937 int num_reg_params, struct reg_param *reg_params,
938 uint32_t buffer_start, uint32_t buffer_size,
939 uint32_t entry_point, uint32_t exit_point, void *arch_info)
940 {
941 int retval;
942 int timeout = 0;
943
944 const uint8_t *buffer_orig = buffer;
945
946 /* Set up working area. First word is write pointer, second word is read pointer,
947 * rest is fifo data area. */
948 uint32_t wp_addr = buffer_start;
949 uint32_t rp_addr = buffer_start + 4;
950 uint32_t fifo_start_addr = buffer_start + 8;
951 uint32_t fifo_end_addr = buffer_start + buffer_size;
952
953 uint32_t wp = fifo_start_addr;
954 uint32_t rp = fifo_start_addr;
955
956 /* validate block_size is 2^n */
957 assert(IS_PWR_OF_2(block_size));
958
959 retval = target_write_u32(target, wp_addr, wp);
960 if (retval != ERROR_OK)
961 return retval;
962 retval = target_write_u32(target, rp_addr, rp);
963 if (retval != ERROR_OK)
964 return retval;
965
966 /* Start up algorithm on target and let it idle while writing the first chunk */
967 retval = target_start_algorithm(target, num_mem_params, mem_params,
968 num_reg_params, reg_params,
969 entry_point,
970 exit_point,
971 arch_info);
972
973 if (retval != ERROR_OK) {
974 LOG_ERROR("error starting target flash write algorithm");
975 return retval;
976 }
977
978 while (count > 0) {
979
980 retval = target_read_u32(target, rp_addr, &rp);
981 if (retval != ERROR_OK) {
982 LOG_ERROR("failed to get read pointer");
983 break;
984 }
985
986 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
987 (size_t) (buffer - buffer_orig), count, wp, rp);
988
989 if (rp == 0) {
990 LOG_ERROR("flash write algorithm aborted by target");
991 retval = ERROR_FLASH_OPERATION_FAILED;
992 break;
993 }
994
995 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
996 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
997 break;
998 }
999
1000 /* Count the number of bytes available in the fifo without
1001 * crossing the wrap around. Make sure to not fill it completely,
1002 * because that would make wp == rp and that's the empty condition. */
1003 uint32_t thisrun_bytes;
1004 if (rp > wp)
1005 thisrun_bytes = rp - wp - block_size;
1006 else if (rp > fifo_start_addr)
1007 thisrun_bytes = fifo_end_addr - wp;
1008 else
1009 thisrun_bytes = fifo_end_addr - wp - block_size;
1010
1011 if (thisrun_bytes == 0) {
1012 /* Throttle polling a bit if transfer is (much) faster than flash
1013 * programming. The exact delay shouldn't matter as long as it's
1014 * less than buffer size / flash speed. This is very unlikely to
1015 * run when using high latency connections such as USB. */
1016 alive_sleep(2);
1017
1018 /* to stop an infinite loop on some targets check and increment a timeout
1019 * this issue was observed on a stellaris using the new ICDI interface */
1020 if (timeout++ >= 2500) {
1021 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1022 return ERROR_FLASH_OPERATION_FAILED;
1023 }
1024 continue;
1025 }
1026
1027 /* reset our timeout */
1028 timeout = 0;
1029
1030 /* Limit to the amount of data we actually want to write */
1031 if (thisrun_bytes > count * block_size)
1032 thisrun_bytes = count * block_size;
1033
1034 /* Force end of large blocks to be word aligned */
1035 if (thisrun_bytes >= 16)
1036 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1037
1038 /* Write data to fifo */
1039 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1040 if (retval != ERROR_OK)
1041 break;
1042
1043 /* Update counters and wrap write pointer */
1044 buffer += thisrun_bytes;
1045 count -= thisrun_bytes / block_size;
1046 wp += thisrun_bytes;
1047 if (wp >= fifo_end_addr)
1048 wp = fifo_start_addr;
1049
1050 /* Store updated write pointer to target */
1051 retval = target_write_u32(target, wp_addr, wp);
1052 if (retval != ERROR_OK)
1053 break;
1054
1055 /* Avoid GDB timeouts */
1056 keep_alive();
1057 }
1058
1059 if (retval != ERROR_OK) {
1060 /* abort flash write algorithm on target */
1061 target_write_u32(target, wp_addr, 0);
1062 }
1063
1064 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1065 num_reg_params, reg_params,
1066 exit_point,
1067 10000,
1068 arch_info);
1069
1070 if (retval2 != ERROR_OK) {
1071 LOG_ERROR("error waiting for target flash write algorithm");
1072 retval = retval2;
1073 }
1074
1075 if (retval == ERROR_OK) {
1076 /* check if algorithm set rp = 0 after fifo writer loop finished */
1077 retval = target_read_u32(target, rp_addr, &rp);
1078 if (retval == ERROR_OK && rp == 0) {
1079 LOG_ERROR("flash write algorithm aborted by target");
1080 retval = ERROR_FLASH_OPERATION_FAILED;
1081 }
1082 }
1083
1084 return retval;
1085 }
1086
1087 int target_run_read_async_algorithm(struct target *target,
1088 uint8_t *buffer, uint32_t count, int block_size,
1089 int num_mem_params, struct mem_param *mem_params,
1090 int num_reg_params, struct reg_param *reg_params,
1091 uint32_t buffer_start, uint32_t buffer_size,
1092 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1093 {
1094 int retval;
1095 int timeout = 0;
1096
1097 const uint8_t *buffer_orig = buffer;
1098
1099 /* Set up working area. First word is write pointer, second word is read pointer,
1100 * rest is fifo data area. */
1101 uint32_t wp_addr = buffer_start;
1102 uint32_t rp_addr = buffer_start + 4;
1103 uint32_t fifo_start_addr = buffer_start + 8;
1104 uint32_t fifo_end_addr = buffer_start + buffer_size;
1105
1106 uint32_t wp = fifo_start_addr;
1107 uint32_t rp = fifo_start_addr;
1108
1109 /* validate block_size is 2^n */
1110 assert(IS_PWR_OF_2(block_size));
1111
1112 retval = target_write_u32(target, wp_addr, wp);
1113 if (retval != ERROR_OK)
1114 return retval;
1115 retval = target_write_u32(target, rp_addr, rp);
1116 if (retval != ERROR_OK)
1117 return retval;
1118
1119 /* Start up algorithm on target */
1120 retval = target_start_algorithm(target, num_mem_params, mem_params,
1121 num_reg_params, reg_params,
1122 entry_point,
1123 exit_point,
1124 arch_info);
1125
1126 if (retval != ERROR_OK) {
1127 LOG_ERROR("error starting target flash read algorithm");
1128 return retval;
1129 }
1130
1131 while (count > 0) {
1132 retval = target_read_u32(target, wp_addr, &wp);
1133 if (retval != ERROR_OK) {
1134 LOG_ERROR("failed to get write pointer");
1135 break;
1136 }
1137
1138 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1139 (size_t)(buffer - buffer_orig), count, wp, rp);
1140
1141 if (wp == 0) {
1142 LOG_ERROR("flash read algorithm aborted by target");
1143 retval = ERROR_FLASH_OPERATION_FAILED;
1144 break;
1145 }
1146
1147 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1148 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1149 break;
1150 }
1151
1152 /* Count the number of bytes available in the fifo without
1153 * crossing the wrap around. */
1154 uint32_t thisrun_bytes;
1155 if (wp >= rp)
1156 thisrun_bytes = wp - rp;
1157 else
1158 thisrun_bytes = fifo_end_addr - rp;
1159
1160 if (thisrun_bytes == 0) {
1161 /* Throttle polling a bit if transfer is (much) faster than flash
1162 * reading. The exact delay shouldn't matter as long as it's
1163 * less than buffer size / flash speed. This is very unlikely to
1164 * run when using high latency connections such as USB. */
1165 alive_sleep(2);
1166
1167 /* to stop an infinite loop on some targets check and increment a timeout
1168 * this issue was observed on a stellaris using the new ICDI interface */
1169 if (timeout++ >= 2500) {
1170 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1171 return ERROR_FLASH_OPERATION_FAILED;
1172 }
1173 continue;
1174 }
1175
1176 /* Reset our timeout */
1177 timeout = 0;
1178
1179 /* Limit to the amount of data we actually want to read */
1180 if (thisrun_bytes > count * block_size)
1181 thisrun_bytes = count * block_size;
1182
1183 /* Force end of large blocks to be word aligned */
1184 if (thisrun_bytes >= 16)
1185 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1186
1187 /* Read data from fifo */
1188 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1189 if (retval != ERROR_OK)
1190 break;
1191
1192 /* Update counters and wrap write pointer */
1193 buffer += thisrun_bytes;
1194 count -= thisrun_bytes / block_size;
1195 rp += thisrun_bytes;
1196 if (rp >= fifo_end_addr)
1197 rp = fifo_start_addr;
1198
1199 /* Store updated write pointer to target */
1200 retval = target_write_u32(target, rp_addr, rp);
1201 if (retval != ERROR_OK)
1202 break;
1203
1204 /* Avoid GDB timeouts */
1205 keep_alive();
1206
1207 if (openocd_is_shutdown_pending()) {
1208 retval = ERROR_SERVER_INTERRUPTED;
1209 break;
1210 }
1211 }
1212
1213 if (retval != ERROR_OK) {
1214 /* abort flash write algorithm on target */
1215 target_write_u32(target, rp_addr, 0);
1216 }
1217
1218 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1219 num_reg_params, reg_params,
1220 exit_point,
1221 10000,
1222 arch_info);
1223
1224 if (retval2 != ERROR_OK) {
1225 LOG_ERROR("error waiting for target flash write algorithm");
1226 retval = retval2;
1227 }
1228
1229 if (retval == ERROR_OK) {
1230 /* check if algorithm set wp = 0 after fifo writer loop finished */
1231 retval = target_read_u32(target, wp_addr, &wp);
1232 if (retval == ERROR_OK && wp == 0) {
1233 LOG_ERROR("flash read algorithm aborted by target");
1234 retval = ERROR_FLASH_OPERATION_FAILED;
1235 }
1236 }
1237
1238 return retval;
1239 }
1240
1241 int target_read_memory(struct target *target,
1242 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1243 {
1244 if (!target_was_examined(target)) {
1245 LOG_ERROR("Target not examined yet");
1246 return ERROR_FAIL;
1247 }
1248 if (!target->type->read_memory) {
1249 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1250 return ERROR_FAIL;
1251 }
1252 return target->type->read_memory(target, address, size, count, buffer);
1253 }
1254
1255 int target_read_phys_memory(struct target *target,
1256 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1257 {
1258 if (!target_was_examined(target)) {
1259 LOG_ERROR("Target not examined yet");
1260 return ERROR_FAIL;
1261 }
1262 if (!target->type->read_phys_memory) {
1263 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1264 return ERROR_FAIL;
1265 }
1266 return target->type->read_phys_memory(target, address, size, count, buffer);
1267 }
1268
1269 int target_write_memory(struct target *target,
1270 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1271 {
1272 if (!target_was_examined(target)) {
1273 LOG_ERROR("Target not examined yet");
1274 return ERROR_FAIL;
1275 }
1276 if (!target->type->write_memory) {
1277 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1278 return ERROR_FAIL;
1279 }
1280 return target->type->write_memory(target, address, size, count, buffer);
1281 }
1282
1283 int target_write_phys_memory(struct target *target,
1284 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1285 {
1286 if (!target_was_examined(target)) {
1287 LOG_ERROR("Target not examined yet");
1288 return ERROR_FAIL;
1289 }
1290 if (!target->type->write_phys_memory) {
1291 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1292 return ERROR_FAIL;
1293 }
1294 return target->type->write_phys_memory(target, address, size, count, buffer);
1295 }
1296
1297 int target_add_breakpoint(struct target *target,
1298 struct breakpoint *breakpoint)
1299 {
1300 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1301 LOG_TARGET_ERROR(target, "not halted (add breakpoint)");
1302 return ERROR_TARGET_NOT_HALTED;
1303 }
1304 return target->type->add_breakpoint(target, breakpoint);
1305 }
1306
1307 int target_add_context_breakpoint(struct target *target,
1308 struct breakpoint *breakpoint)
1309 {
1310 if (target->state != TARGET_HALTED) {
1311 LOG_TARGET_ERROR(target, "not halted (add context breakpoint)");
1312 return ERROR_TARGET_NOT_HALTED;
1313 }
1314 return target->type->add_context_breakpoint(target, breakpoint);
1315 }
1316
1317 int target_add_hybrid_breakpoint(struct target *target,
1318 struct breakpoint *breakpoint)
1319 {
1320 if (target->state != TARGET_HALTED) {
1321 LOG_TARGET_ERROR(target, "not halted (add hybrid breakpoint)");
1322 return ERROR_TARGET_NOT_HALTED;
1323 }
1324 return target->type->add_hybrid_breakpoint(target, breakpoint);
1325 }
1326
1327 int target_remove_breakpoint(struct target *target,
1328 struct breakpoint *breakpoint)
1329 {
1330 return target->type->remove_breakpoint(target, breakpoint);
1331 }
1332
1333 int target_add_watchpoint(struct target *target,
1334 struct watchpoint *watchpoint)
1335 {
1336 if (target->state != TARGET_HALTED) {
1337 LOG_TARGET_ERROR(target, "not halted (add watchpoint)");
1338 return ERROR_TARGET_NOT_HALTED;
1339 }
1340 return target->type->add_watchpoint(target, watchpoint);
1341 }
1342 int target_remove_watchpoint(struct target *target,
1343 struct watchpoint *watchpoint)
1344 {
1345 return target->type->remove_watchpoint(target, watchpoint);
1346 }
1347 int target_hit_watchpoint(struct target *target,
1348 struct watchpoint **hit_watchpoint)
1349 {
1350 if (target->state != TARGET_HALTED) {
1351 LOG_TARGET_ERROR(target, "not halted (hit watchpoint)");
1352 return ERROR_TARGET_NOT_HALTED;
1353 }
1354
1355 if (!target->type->hit_watchpoint) {
1356 /* For backward compatible, if hit_watchpoint is not implemented,
1357 * return ERROR_FAIL such that gdb_server will not take the nonsense
1358 * information. */
1359 return ERROR_FAIL;
1360 }
1361
1362 return target->type->hit_watchpoint(target, hit_watchpoint);
1363 }
1364
1365 const char *target_get_gdb_arch(struct target *target)
1366 {
1367 if (!target->type->get_gdb_arch)
1368 return NULL;
1369 return target->type->get_gdb_arch(target);
1370 }
1371
1372 int target_get_gdb_reg_list(struct target *target,
1373 struct reg **reg_list[], int *reg_list_size,
1374 enum target_register_class reg_class)
1375 {
1376 int result = ERROR_FAIL;
1377
1378 if (!target_was_examined(target)) {
1379 LOG_ERROR("Target not examined yet");
1380 goto done;
1381 }
1382
1383 result = target->type->get_gdb_reg_list(target, reg_list,
1384 reg_list_size, reg_class);
1385
1386 done:
1387 if (result != ERROR_OK) {
1388 *reg_list = NULL;
1389 *reg_list_size = 0;
1390 }
1391 return result;
1392 }
1393
1394 int target_get_gdb_reg_list_noread(struct target *target,
1395 struct reg **reg_list[], int *reg_list_size,
1396 enum target_register_class reg_class)
1397 {
1398 if (target->type->get_gdb_reg_list_noread &&
1399 target->type->get_gdb_reg_list_noread(target, reg_list,
1400 reg_list_size, reg_class) == ERROR_OK)
1401 return ERROR_OK;
1402 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1403 }
1404
1405 bool target_supports_gdb_connection(struct target *target)
1406 {
1407 /*
1408 * exclude all the targets that don't provide get_gdb_reg_list
1409 * or that have explicit gdb_max_connection == 0
1410 */
1411 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1412 }
1413
1414 int target_step(struct target *target,
1415 int current, target_addr_t address, int handle_breakpoints)
1416 {
1417 int retval;
1418
1419 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1420
1421 retval = target->type->step(target, current, address, handle_breakpoints);
1422 if (retval != ERROR_OK)
1423 return retval;
1424
1425 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1426
1427 return retval;
1428 }
1429
1430 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1431 {
1432 if (target->state != TARGET_HALTED) {
1433 LOG_TARGET_ERROR(target, "not halted (gdb fileio)");
1434 return ERROR_TARGET_NOT_HALTED;
1435 }
1436 return target->type->get_gdb_fileio_info(target, fileio_info);
1437 }
1438
1439 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1440 {
1441 if (target->state != TARGET_HALTED) {
1442 LOG_TARGET_ERROR(target, "not halted (gdb fileio end)");
1443 return ERROR_TARGET_NOT_HALTED;
1444 }
1445 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1446 }
1447
1448 target_addr_t target_address_max(struct target *target)
1449 {
1450 unsigned bits = target_address_bits(target);
1451 if (sizeof(target_addr_t) * 8 == bits)
1452 return (target_addr_t) -1;
1453 else
1454 return (((target_addr_t) 1) << bits) - 1;
1455 }
1456
1457 unsigned target_address_bits(struct target *target)
1458 {
1459 if (target->type->address_bits)
1460 return target->type->address_bits(target);
1461 return 32;
1462 }
1463
1464 unsigned int target_data_bits(struct target *target)
1465 {
1466 if (target->type->data_bits)
1467 return target->type->data_bits(target);
1468 return 32;
1469 }
1470
1471 static int target_profiling(struct target *target, uint32_t *samples,
1472 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1473 {
1474 return target->type->profiling(target, samples, max_num_samples,
1475 num_samples, seconds);
1476 }
1477
1478 static int handle_target(void *priv);
1479
1480 static int target_init_one(struct command_context *cmd_ctx,
1481 struct target *target)
1482 {
1483 target_reset_examined(target);
1484
1485 struct target_type *type = target->type;
1486 if (!type->examine)
1487 type->examine = default_examine;
1488
1489 if (!type->check_reset)
1490 type->check_reset = default_check_reset;
1491
1492 assert(type->init_target);
1493
1494 int retval = type->init_target(cmd_ctx, target);
1495 if (retval != ERROR_OK) {
1496 LOG_ERROR("target '%s' init failed", target_name(target));
1497 return retval;
1498 }
1499
1500 /* Sanity-check MMU support ... stub in what we must, to help
1501 * implement it in stages, but warn if we need to do so.
1502 */
1503 if (type->mmu) {
1504 if (!type->virt2phys) {
1505 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1506 type->virt2phys = identity_virt2phys;
1507 }
1508 } else {
1509 /* Make sure no-MMU targets all behave the same: make no
1510 * distinction between physical and virtual addresses, and
1511 * ensure that virt2phys() is always an identity mapping.
1512 */
1513 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1514 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1515
1516 type->mmu = no_mmu;
1517 type->write_phys_memory = type->write_memory;
1518 type->read_phys_memory = type->read_memory;
1519 type->virt2phys = identity_virt2phys;
1520 }
1521
1522 if (!target->type->read_buffer)
1523 target->type->read_buffer = target_read_buffer_default;
1524
1525 if (!target->type->write_buffer)
1526 target->type->write_buffer = target_write_buffer_default;
1527
1528 if (!target->type->get_gdb_fileio_info)
1529 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1530
1531 if (!target->type->gdb_fileio_end)
1532 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1533
1534 if (!target->type->profiling)
1535 target->type->profiling = target_profiling_default;
1536
1537 return ERROR_OK;
1538 }
1539
1540 static int target_init(struct command_context *cmd_ctx)
1541 {
1542 struct target *target;
1543 int retval;
1544
1545 for (target = all_targets; target; target = target->next) {
1546 retval = target_init_one(cmd_ctx, target);
1547 if (retval != ERROR_OK)
1548 return retval;
1549 }
1550
1551 if (!all_targets)
1552 return ERROR_OK;
1553
1554 retval = target_register_user_commands(cmd_ctx);
1555 if (retval != ERROR_OK)
1556 return retval;
1557
1558 retval = target_register_timer_callback(&handle_target,
1559 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1560 if (retval != ERROR_OK)
1561 return retval;
1562
1563 return ERROR_OK;
1564 }
1565
1566 COMMAND_HANDLER(handle_target_init_command)
1567 {
1568 int retval;
1569
1570 if (CMD_ARGC != 0)
1571 return ERROR_COMMAND_SYNTAX_ERROR;
1572
1573 static bool target_initialized;
1574 if (target_initialized) {
1575 LOG_INFO("'target init' has already been called");
1576 return ERROR_OK;
1577 }
1578 target_initialized = true;
1579
1580 retval = command_run_line(CMD_CTX, "init_targets");
1581 if (retval != ERROR_OK)
1582 return retval;
1583
1584 retval = command_run_line(CMD_CTX, "init_target_events");
1585 if (retval != ERROR_OK)
1586 return retval;
1587
1588 retval = command_run_line(CMD_CTX, "init_board");
1589 if (retval != ERROR_OK)
1590 return retval;
1591
1592 LOG_DEBUG("Initializing targets...");
1593 return target_init(CMD_CTX);
1594 }
1595
1596 int target_register_event_callback(int (*callback)(struct target *target,
1597 enum target_event event, void *priv), void *priv)
1598 {
1599 struct target_event_callback **callbacks_p = &target_event_callbacks;
1600
1601 if (!callback)
1602 return ERROR_COMMAND_SYNTAX_ERROR;
1603
1604 if (*callbacks_p) {
1605 while ((*callbacks_p)->next)
1606 callbacks_p = &((*callbacks_p)->next);
1607 callbacks_p = &((*callbacks_p)->next);
1608 }
1609
1610 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1611 (*callbacks_p)->callback = callback;
1612 (*callbacks_p)->priv = priv;
1613 (*callbacks_p)->next = NULL;
1614
1615 return ERROR_OK;
1616 }
1617
1618 int target_register_reset_callback(int (*callback)(struct target *target,
1619 enum target_reset_mode reset_mode, void *priv), void *priv)
1620 {
1621 struct target_reset_callback *entry;
1622
1623 if (!callback)
1624 return ERROR_COMMAND_SYNTAX_ERROR;
1625
1626 entry = malloc(sizeof(struct target_reset_callback));
1627 if (!entry) {
1628 LOG_ERROR("error allocating buffer for reset callback entry");
1629 return ERROR_COMMAND_SYNTAX_ERROR;
1630 }
1631
1632 entry->callback = callback;
1633 entry->priv = priv;
1634 list_add(&entry->list, &target_reset_callback_list);
1635
1636
1637 return ERROR_OK;
1638 }
1639
1640 int target_register_trace_callback(int (*callback)(struct target *target,
1641 size_t len, uint8_t *data, void *priv), void *priv)
1642 {
1643 struct target_trace_callback *entry;
1644
1645 if (!callback)
1646 return ERROR_COMMAND_SYNTAX_ERROR;
1647
1648 entry = malloc(sizeof(struct target_trace_callback));
1649 if (!entry) {
1650 LOG_ERROR("error allocating buffer for trace callback entry");
1651 return ERROR_COMMAND_SYNTAX_ERROR;
1652 }
1653
1654 entry->callback = callback;
1655 entry->priv = priv;
1656 list_add(&entry->list, &target_trace_callback_list);
1657
1658
1659 return ERROR_OK;
1660 }
1661
1662 int target_register_timer_callback(int (*callback)(void *priv),
1663 unsigned int time_ms, enum target_timer_type type, void *priv)
1664 {
1665 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1666
1667 if (!callback)
1668 return ERROR_COMMAND_SYNTAX_ERROR;
1669
1670 if (*callbacks_p) {
1671 while ((*callbacks_p)->next)
1672 callbacks_p = &((*callbacks_p)->next);
1673 callbacks_p = &((*callbacks_p)->next);
1674 }
1675
1676 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1677 (*callbacks_p)->callback = callback;
1678 (*callbacks_p)->type = type;
1679 (*callbacks_p)->time_ms = time_ms;
1680 (*callbacks_p)->removed = false;
1681
1682 (*callbacks_p)->when = timeval_ms() + time_ms;
1683 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1684
1685 (*callbacks_p)->priv = priv;
1686 (*callbacks_p)->next = NULL;
1687
1688 return ERROR_OK;
1689 }
1690
1691 int target_unregister_event_callback(int (*callback)(struct target *target,
1692 enum target_event event, void *priv), void *priv)
1693 {
1694 struct target_event_callback **p = &target_event_callbacks;
1695 struct target_event_callback *c = target_event_callbacks;
1696
1697 if (!callback)
1698 return ERROR_COMMAND_SYNTAX_ERROR;
1699
1700 while (c) {
1701 struct target_event_callback *next = c->next;
1702 if ((c->callback == callback) && (c->priv == priv)) {
1703 *p = next;
1704 free(c);
1705 return ERROR_OK;
1706 } else
1707 p = &(c->next);
1708 c = next;
1709 }
1710
1711 return ERROR_OK;
1712 }
1713
1714 int target_unregister_reset_callback(int (*callback)(struct target *target,
1715 enum target_reset_mode reset_mode, void *priv), void *priv)
1716 {
1717 struct target_reset_callback *entry;
1718
1719 if (!callback)
1720 return ERROR_COMMAND_SYNTAX_ERROR;
1721
1722 list_for_each_entry(entry, &target_reset_callback_list, list) {
1723 if (entry->callback == callback && entry->priv == priv) {
1724 list_del(&entry->list);
1725 free(entry);
1726 break;
1727 }
1728 }
1729
1730 return ERROR_OK;
1731 }
1732
1733 int target_unregister_trace_callback(int (*callback)(struct target *target,
1734 size_t len, uint8_t *data, void *priv), void *priv)
1735 {
1736 struct target_trace_callback *entry;
1737
1738 if (!callback)
1739 return ERROR_COMMAND_SYNTAX_ERROR;
1740
1741 list_for_each_entry(entry, &target_trace_callback_list, list) {
1742 if (entry->callback == callback && entry->priv == priv) {
1743 list_del(&entry->list);
1744 free(entry);
1745 break;
1746 }
1747 }
1748
1749 return ERROR_OK;
1750 }
1751
1752 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1753 {
1754 if (!callback)
1755 return ERROR_COMMAND_SYNTAX_ERROR;
1756
1757 for (struct target_timer_callback *c = target_timer_callbacks;
1758 c; c = c->next) {
1759 if ((c->callback == callback) && (c->priv == priv)) {
1760 c->removed = true;
1761 return ERROR_OK;
1762 }
1763 }
1764
1765 return ERROR_FAIL;
1766 }
1767
1768 int target_call_event_callbacks(struct target *target, enum target_event event)
1769 {
1770 struct target_event_callback *callback = target_event_callbacks;
1771 struct target_event_callback *next_callback;
1772
1773 if (event == TARGET_EVENT_HALTED) {
1774 /* execute early halted first */
1775 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1776 }
1777
1778 LOG_DEBUG("target event %i (%s) for core %s", event,
1779 target_event_name(event),
1780 target_name(target));
1781
1782 target_handle_event(target, event);
1783
1784 while (callback) {
1785 next_callback = callback->next;
1786 callback->callback(target, event, callback->priv);
1787 callback = next_callback;
1788 }
1789
1790 return ERROR_OK;
1791 }
1792
1793 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1794 {
1795 struct target_reset_callback *callback;
1796
1797 LOG_DEBUG("target reset %i (%s)", reset_mode,
1798 nvp_value2name(nvp_reset_modes, reset_mode)->name);
1799
1800 list_for_each_entry(callback, &target_reset_callback_list, list)
1801 callback->callback(target, reset_mode, callback->priv);
1802
1803 return ERROR_OK;
1804 }
1805
1806 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1807 {
1808 struct target_trace_callback *callback;
1809
1810 list_for_each_entry(callback, &target_trace_callback_list, list)
1811 callback->callback(target, len, data, callback->priv);
1812
1813 return ERROR_OK;
1814 }
1815
1816 static int target_timer_callback_periodic_restart(
1817 struct target_timer_callback *cb, int64_t *now)
1818 {
1819 cb->when = *now + cb->time_ms;
1820 return ERROR_OK;
1821 }
1822
1823 static int target_call_timer_callback(struct target_timer_callback *cb,
1824 int64_t *now)
1825 {
1826 cb->callback(cb->priv);
1827
1828 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1829 return target_timer_callback_periodic_restart(cb, now);
1830
1831 return target_unregister_timer_callback(cb->callback, cb->priv);
1832 }
1833
1834 static int target_call_timer_callbacks_check_time(int checktime)
1835 {
1836 static bool callback_processing;
1837
1838 /* Do not allow nesting */
1839 if (callback_processing)
1840 return ERROR_OK;
1841
1842 callback_processing = true;
1843
1844 keep_alive();
1845
1846 int64_t now = timeval_ms();
1847
1848 /* Initialize to a default value that's a ways into the future.
1849 * The loop below will make it closer to now if there are
1850 * callbacks that want to be called sooner. */
1851 target_timer_next_event_value = now + 1000;
1852
1853 /* Store an address of the place containing a pointer to the
1854 * next item; initially, that's a standalone "root of the
1855 * list" variable. */
1856 struct target_timer_callback **callback = &target_timer_callbacks;
1857 while (callback && *callback) {
1858 if ((*callback)->removed) {
1859 struct target_timer_callback *p = *callback;
1860 *callback = (*callback)->next;
1861 free(p);
1862 continue;
1863 }
1864
1865 bool call_it = (*callback)->callback &&
1866 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1867 now >= (*callback)->when);
1868
1869 if (call_it)
1870 target_call_timer_callback(*callback, &now);
1871
1872 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1873 target_timer_next_event_value = (*callback)->when;
1874
1875 callback = &(*callback)->next;
1876 }
1877
1878 callback_processing = false;
1879 return ERROR_OK;
1880 }
1881
1882 int target_call_timer_callbacks(void)
1883 {
1884 return target_call_timer_callbacks_check_time(1);
1885 }
1886
1887 /* invoke periodic callbacks immediately */
1888 int target_call_timer_callbacks_now(void)
1889 {
1890 return target_call_timer_callbacks_check_time(0);
1891 }
1892
1893 int64_t target_timer_next_event(void)
1894 {
1895 return target_timer_next_event_value;
1896 }
1897
1898 /* Prints the working area layout for debug purposes */
1899 static void print_wa_layout(struct target *target)
1900 {
1901 struct working_area *c = target->working_areas;
1902
1903 while (c) {
1904 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1905 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1906 c->address, c->address + c->size - 1, c->size);
1907 c = c->next;
1908 }
1909 }
1910
1911 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1912 static void target_split_working_area(struct working_area *area, uint32_t size)
1913 {
1914 assert(area->free); /* Shouldn't split an allocated area */
1915 assert(size <= area->size); /* Caller should guarantee this */
1916
1917 /* Split only if not already the right size */
1918 if (size < area->size) {
1919 struct working_area *new_wa = malloc(sizeof(*new_wa));
1920
1921 if (!new_wa)
1922 return;
1923
1924 new_wa->next = area->next;
1925 new_wa->size = area->size - size;
1926 new_wa->address = area->address + size;
1927 new_wa->backup = NULL;
1928 new_wa->user = NULL;
1929 new_wa->free = true;
1930
1931 area->next = new_wa;
1932 area->size = size;
1933
1934 /* If backup memory was allocated to this area, it has the wrong size
1935 * now so free it and it will be reallocated if/when needed */
1936 free(area->backup);
1937 area->backup = NULL;
1938 }
1939 }
1940
1941 /* Merge all adjacent free areas into one */
1942 static void target_merge_working_areas(struct target *target)
1943 {
1944 struct working_area *c = target->working_areas;
1945
1946 while (c && c->next) {
1947 assert(c->next->address == c->address + c->size); /* This is an invariant */
1948
1949 /* Find two adjacent free areas */
1950 if (c->free && c->next->free) {
1951 /* Merge the last into the first */
1952 c->size += c->next->size;
1953
1954 /* Remove the last */
1955 struct working_area *to_be_freed = c->next;
1956 c->next = c->next->next;
1957 free(to_be_freed->backup);
1958 free(to_be_freed);
1959
1960 /* If backup memory was allocated to the remaining area, it's has
1961 * the wrong size now */
1962 free(c->backup);
1963 c->backup = NULL;
1964 } else {
1965 c = c->next;
1966 }
1967 }
1968 }
1969
1970 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
1971 {
1972 /* Reevaluate working area address based on MMU state*/
1973 if (!target->working_areas) {
1974 int retval;
1975 int enabled;
1976
1977 retval = target->type->mmu(target, &enabled);
1978 if (retval != ERROR_OK)
1979 return retval;
1980
1981 if (!enabled) {
1982 if (target->working_area_phys_spec) {
1983 LOG_DEBUG("MMU disabled, using physical "
1984 "address for working memory " TARGET_ADDR_FMT,
1985 target->working_area_phys);
1986 target->working_area = target->working_area_phys;
1987 } else {
1988 LOG_ERROR("No working memory available. "
1989 "Specify -work-area-phys to target.");
1990 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1991 }
1992 } else {
1993 if (target->working_area_virt_spec) {
1994 LOG_DEBUG("MMU enabled, using virtual "
1995 "address for working memory " TARGET_ADDR_FMT,
1996 target->working_area_virt);
1997 target->working_area = target->working_area_virt;
1998 } else {
1999 LOG_ERROR("No working memory available. "
2000 "Specify -work-area-virt to target.");
2001 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2002 }
2003 }
2004
2005 /* Set up initial working area on first call */
2006 struct working_area *new_wa = malloc(sizeof(*new_wa));
2007 if (new_wa) {
2008 new_wa->next = NULL;
2009 new_wa->size = ALIGN_DOWN(target->working_area_size, 4); /* 4-byte align */
2010 new_wa->address = target->working_area;
2011 new_wa->backup = NULL;
2012 new_wa->user = NULL;
2013 new_wa->free = true;
2014 }
2015
2016 target->working_areas = new_wa;
2017 }
2018
2019 /* only allocate multiples of 4 byte */
2020 size = ALIGN_UP(size, 4);
2021
2022 struct working_area *c = target->working_areas;
2023
2024 /* Find the first large enough working area */
2025 while (c) {
2026 if (c->free && c->size >= size)
2027 break;
2028 c = c->next;
2029 }
2030
2031 if (!c)
2032 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2033
2034 /* Split the working area into the requested size */
2035 target_split_working_area(c, size);
2036
2037 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2038 size, c->address);
2039
2040 if (target->backup_working_area) {
2041 if (!c->backup) {
2042 c->backup = malloc(c->size);
2043 if (!c->backup)
2044 return ERROR_FAIL;
2045 }
2046
2047 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2048 if (retval != ERROR_OK)
2049 return retval;
2050 }
2051
2052 /* mark as used, and return the new (reused) area */
2053 c->free = false;
2054 *area = c;
2055
2056 /* user pointer */
2057 c->user = area;
2058
2059 print_wa_layout(target);
2060
2061 return ERROR_OK;
2062 }
2063
2064 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2065 {
2066 int retval;
2067
2068 retval = target_alloc_working_area_try(target, size, area);
2069 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2070 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2071 return retval;
2072
2073 }
2074
2075 static int target_restore_working_area(struct target *target, struct working_area *area)
2076 {
2077 int retval = ERROR_OK;
2078
2079 if (target->backup_working_area && area->backup) {
2080 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2081 if (retval != ERROR_OK)
2082 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2083 area->size, area->address);
2084 }
2085
2086 return retval;
2087 }
2088
2089 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2090 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2091 {
2092 if (!area || area->free)
2093 return ERROR_OK;
2094
2095 int retval = ERROR_OK;
2096 if (restore) {
2097 retval = target_restore_working_area(target, area);
2098 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2099 if (retval != ERROR_OK)
2100 return retval;
2101 }
2102
2103 area->free = true;
2104
2105 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2106 area->size, area->address);
2107
2108 /* mark user pointer invalid */
2109 /* TODO: Is this really safe? It points to some previous caller's memory.
2110 * How could we know that the area pointer is still in that place and not
2111 * some other vital data? What's the purpose of this, anyway? */
2112 *area->user = NULL;
2113 area->user = NULL;
2114
2115 target_merge_working_areas(target);
2116
2117 print_wa_layout(target);
2118
2119 return retval;
2120 }
2121
2122 int target_free_working_area(struct target *target, struct working_area *area)
2123 {
2124 return target_free_working_area_restore(target, area, 1);
2125 }
2126
2127 /* free resources and restore memory, if restoring memory fails,
2128 * free up resources anyway
2129 */
2130 static void target_free_all_working_areas_restore(struct target *target, int restore)
2131 {
2132 struct working_area *c = target->working_areas;
2133
2134 LOG_DEBUG("freeing all working areas");
2135
2136 /* Loop through all areas, restoring the allocated ones and marking them as free */
2137 while (c) {
2138 if (!c->free) {
2139 if (restore)
2140 target_restore_working_area(target, c);
2141 c->free = true;
2142 *c->user = NULL; /* Same as above */
2143 c->user = NULL;
2144 }
2145 c = c->next;
2146 }
2147
2148 /* Run a merge pass to combine all areas into one */
2149 target_merge_working_areas(target);
2150
2151 print_wa_layout(target);
2152 }
2153
2154 void target_free_all_working_areas(struct target *target)
2155 {
2156 target_free_all_working_areas_restore(target, 1);
2157
2158 /* Now we have none or only one working area marked as free */
2159 if (target->working_areas) {
2160 /* Free the last one to allow on-the-fly moving and resizing */
2161 free(target->working_areas->backup);
2162 free(target->working_areas);
2163 target->working_areas = NULL;
2164 }
2165 }
2166
2167 /* Find the largest number of bytes that can be allocated */
2168 uint32_t target_get_working_area_avail(struct target *target)
2169 {
2170 struct working_area *c = target->working_areas;
2171 uint32_t max_size = 0;
2172
2173 if (!c)
2174 return ALIGN_DOWN(target->working_area_size, 4);
2175
2176 while (c) {
2177 if (c->free && max_size < c->size)
2178 max_size = c->size;
2179
2180 c = c->next;
2181 }
2182
2183 return max_size;
2184 }
2185
2186 static void target_destroy(struct target *target)
2187 {
2188 breakpoint_remove_all(target);
2189 watchpoint_remove_all(target);
2190
2191 if (target->type->deinit_target)
2192 target->type->deinit_target(target);
2193
2194 if (target->semihosting)
2195 free(target->semihosting->basedir);
2196 free(target->semihosting);
2197
2198 jtag_unregister_event_callback(jtag_enable_callback, target);
2199
2200 struct target_event_action *teap = target->event_action;
2201 while (teap) {
2202 struct target_event_action *next = teap->next;
2203 Jim_DecrRefCount(teap->interp, teap->body);
2204 free(teap);
2205 teap = next;
2206 }
2207
2208 target_free_all_working_areas(target);
2209
2210 /* release the targets SMP list */
2211 if (target->smp) {
2212 struct target_list *head, *tmp;
2213
2214 list_for_each_entry_safe(head, tmp, target->smp_targets, lh) {
2215 list_del(&head->lh);
2216 head->target->smp = 0;
2217 free(head);
2218 }
2219 if (target->smp_targets != &empty_smp_targets)
2220 free(target->smp_targets);
2221 target->smp = 0;
2222 }
2223
2224 rtos_destroy(target);
2225
2226 free(target->gdb_port_override);
2227 free(target->type);
2228 free(target->trace_info);
2229 free(target->fileio_info);
2230 free(target->cmd_name);
2231 free(target);
2232 }
2233
2234 void target_quit(void)
2235 {
2236 struct target_event_callback *pe = target_event_callbacks;
2237 while (pe) {
2238 struct target_event_callback *t = pe->next;
2239 free(pe);
2240 pe = t;
2241 }
2242 target_event_callbacks = NULL;
2243
2244 struct target_timer_callback *pt = target_timer_callbacks;
2245 while (pt) {
2246 struct target_timer_callback *t = pt->next;
2247 free(pt);
2248 pt = t;
2249 }
2250 target_timer_callbacks = NULL;
2251
2252 for (struct target *target = all_targets; target;) {
2253 struct target *tmp;
2254
2255 tmp = target->next;
2256 target_destroy(target);
2257 target = tmp;
2258 }
2259
2260 all_targets = NULL;
2261 }
2262
2263 int target_arch_state(struct target *target)
2264 {
2265 int retval;
2266 if (!target) {
2267 LOG_WARNING("No target has been configured");
2268 return ERROR_OK;
2269 }
2270
2271 if (target->state != TARGET_HALTED)
2272 return ERROR_OK;
2273
2274 retval = target->type->arch_state(target);
2275 return retval;
2276 }
2277
2278 static int target_get_gdb_fileio_info_default(struct target *target,
2279 struct gdb_fileio_info *fileio_info)
2280 {
2281 /* If target does not support semi-hosting function, target
2282 has no need to provide .get_gdb_fileio_info callback.
2283 It just return ERROR_FAIL and gdb_server will return "Txx"
2284 as target halted every time. */
2285 return ERROR_FAIL;
2286 }
2287
2288 static int target_gdb_fileio_end_default(struct target *target,
2289 int retcode, int fileio_errno, bool ctrl_c)
2290 {
2291 return ERROR_OK;
2292 }
2293
2294 int target_profiling_default(struct target *target, uint32_t *samples,
2295 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2296 {
2297 struct timeval timeout, now;
2298
2299 gettimeofday(&timeout, NULL);
2300 timeval_add_time(&timeout, seconds, 0);
2301
2302 LOG_INFO("Starting profiling. Halting and resuming the"
2303 " target as often as we can...");
2304
2305 uint32_t sample_count = 0;
2306 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2307 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2308
2309 int retval = ERROR_OK;
2310 for (;;) {
2311 target_poll(target);
2312 if (target->state == TARGET_HALTED) {
2313 uint32_t t = buf_get_u32(reg->value, 0, 32);
2314 samples[sample_count++] = t;
2315 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2316 retval = target_resume(target, 1, 0, 0, 0);
2317 target_poll(target);
2318 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2319 } else if (target->state == TARGET_RUNNING) {
2320 /* We want to quickly sample the PC. */
2321 retval = target_halt(target);
2322 } else {
2323 LOG_INFO("Target not halted or running");
2324 retval = ERROR_OK;
2325 break;
2326 }
2327
2328 if (retval != ERROR_OK)
2329 break;
2330
2331 gettimeofday(&now, NULL);
2332 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2333 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2334 break;
2335 }
2336 }
2337
2338 *num_samples = sample_count;
2339 return retval;
2340 }
2341
2342 /* Single aligned words are guaranteed to use 16 or 32 bit access
2343 * mode respectively, otherwise data is handled as quickly as
2344 * possible
2345 */
2346 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2347 {
2348 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2349 size, address);
2350
2351 if (!target_was_examined(target)) {
2352 LOG_ERROR("Target not examined yet");
2353 return ERROR_FAIL;
2354 }
2355
2356 if (size == 0)
2357 return ERROR_OK;
2358
2359 if ((address + size - 1) < address) {
2360 /* GDB can request this when e.g. PC is 0xfffffffc */
2361 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2362 address,
2363 size);
2364 return ERROR_FAIL;
2365 }
2366
2367 return target->type->write_buffer(target, address, size, buffer);
2368 }
2369
2370 static int target_write_buffer_default(struct target *target,
2371 target_addr_t address, uint32_t count, const uint8_t *buffer)
2372 {
2373 uint32_t size;
2374 unsigned int data_bytes = target_data_bits(target) / 8;
2375
2376 /* Align up to maximum bytes. The loop condition makes sure the next pass
2377 * will have something to do with the size we leave to it. */
2378 for (size = 1;
2379 size < data_bytes && count >= size * 2 + (address & size);
2380 size *= 2) {
2381 if (address & size) {
2382 int retval = target_write_memory(target, address, size, 1, buffer);
2383 if (retval != ERROR_OK)
2384 return retval;
2385 address += size;
2386 count -= size;
2387 buffer += size;
2388 }
2389 }
2390
2391 /* Write the data with as large access size as possible. */
2392 for (; size > 0; size /= 2) {
2393 uint32_t aligned = count - count % size;
2394 if (aligned > 0) {
2395 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2396 if (retval != ERROR_OK)
2397 return retval;
2398 address += aligned;
2399 count -= aligned;
2400 buffer += aligned;
2401 }
2402 }
2403
2404 return ERROR_OK;
2405 }
2406
2407 /* Single aligned words are guaranteed to use 16 or 32 bit access
2408 * mode respectively, otherwise data is handled as quickly as
2409 * possible
2410 */
2411 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2412 {
2413 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2414 size, address);
2415
2416 if (!target_was_examined(target)) {
2417 LOG_ERROR("Target not examined yet");
2418 return ERROR_FAIL;
2419 }
2420
2421 if (size == 0)
2422 return ERROR_OK;
2423
2424 if ((address + size - 1) < address) {
2425 /* GDB can request this when e.g. PC is 0xfffffffc */
2426 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2427 address,
2428 size);
2429 return ERROR_FAIL;
2430 }
2431
2432 return target->type->read_buffer(target, address, size, buffer);
2433 }
2434
2435 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2436 {
2437 uint32_t size;
2438 unsigned int data_bytes = target_data_bits(target) / 8;
2439
2440 /* Align up to maximum bytes. The loop condition makes sure the next pass
2441 * will have something to do with the size we leave to it. */
2442 for (size = 1;
2443 size < data_bytes && count >= size * 2 + (address & size);
2444 size *= 2) {
2445 if (address & size) {
2446 int retval = target_read_memory(target, address, size, 1, buffer);
2447 if (retval != ERROR_OK)
2448 return retval;
2449 address += size;
2450 count -= size;
2451 buffer += size;
2452 }
2453 }
2454
2455 /* Read the data with as large access size as possible. */
2456 for (; size > 0; size /= 2) {
2457 uint32_t aligned = count - count % size;
2458 if (aligned > 0) {
2459 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2460 if (retval != ERROR_OK)
2461 return retval;
2462 address += aligned;
2463 count -= aligned;
2464 buffer += aligned;
2465 }
2466 }
2467
2468 return ERROR_OK;
2469 }
2470
2471 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2472 {
2473 uint8_t *buffer;
2474 int retval;
2475 uint32_t i;
2476 uint32_t checksum = 0;
2477 if (!target_was_examined(target)) {
2478 LOG_ERROR("Target not examined yet");
2479 return ERROR_FAIL;
2480 }
2481 if (!target->type->checksum_memory) {
2482 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2483 return ERROR_FAIL;
2484 }
2485
2486 retval = target->type->checksum_memory(target, address, size, &checksum);
2487 if (retval != ERROR_OK) {
2488 buffer = malloc(size);
2489 if (!buffer) {
2490 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2491 return ERROR_COMMAND_SYNTAX_ERROR;
2492 }
2493 retval = target_read_buffer(target, address, size, buffer);
2494 if (retval != ERROR_OK) {
2495 free(buffer);
2496 return retval;
2497 }
2498
2499 /* convert to target endianness */
2500 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2501 uint32_t target_data;
2502 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2503 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2504 }
2505
2506 retval = image_calculate_checksum(buffer, size, &checksum);
2507 free(buffer);
2508 }
2509
2510 *crc = checksum;
2511
2512 return retval;
2513 }
2514
2515 int target_blank_check_memory(struct target *target,
2516 struct target_memory_check_block *blocks, int num_blocks,
2517 uint8_t erased_value)
2518 {
2519 if (!target_was_examined(target)) {
2520 LOG_ERROR("Target not examined yet");
2521 return ERROR_FAIL;
2522 }
2523
2524 if (!target->type->blank_check_memory)
2525 return ERROR_NOT_IMPLEMENTED;
2526
2527 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2528 }
2529
2530 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2531 {
2532 uint8_t value_buf[8];
2533 if (!target_was_examined(target)) {
2534 LOG_ERROR("Target not examined yet");
2535 return ERROR_FAIL;
2536 }
2537
2538 int retval = target_read_memory(target, address, 8, 1, value_buf);
2539
2540 if (retval == ERROR_OK) {
2541 *value = target_buffer_get_u64(target, value_buf);
2542 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2543 address,
2544 *value);
2545 } else {
2546 *value = 0x0;
2547 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2548 address);
2549 }
2550
2551 return retval;
2552 }
2553
2554 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2555 {
2556 uint8_t value_buf[4];
2557 if (!target_was_examined(target)) {
2558 LOG_ERROR("Target not examined yet");
2559 return ERROR_FAIL;
2560 }
2561
2562 int retval = target_read_memory(target, address, 4, 1, value_buf);
2563
2564 if (retval == ERROR_OK) {
2565 *value = target_buffer_get_u32(target, value_buf);
2566 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2567 address,
2568 *value);
2569 } else {
2570 *value = 0x0;
2571 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2572 address);
2573 }
2574
2575 return retval;
2576 }
2577
2578 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2579 {
2580 uint8_t value_buf[2];
2581 if (!target_was_examined(target)) {
2582 LOG_ERROR("Target not examined yet");
2583 return ERROR_FAIL;
2584 }
2585
2586 int retval = target_read_memory(target, address, 2, 1, value_buf);
2587
2588 if (retval == ERROR_OK) {
2589 *value = target_buffer_get_u16(target, value_buf);
2590 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2591 address,
2592 *value);
2593 } else {
2594 *value = 0x0;
2595 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2596 address);
2597 }
2598
2599 return retval;
2600 }
2601
2602 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2603 {
2604 if (!target_was_examined(target)) {
2605 LOG_ERROR("Target not examined yet");
2606 return ERROR_FAIL;
2607 }
2608
2609 int retval = target_read_memory(target, address, 1, 1, value);
2610
2611 if (retval == ERROR_OK) {
2612 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2613 address,
2614 *value);
2615 } else {
2616 *value = 0x0;
2617 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2618 address);
2619 }
2620
2621 return retval;
2622 }
2623
2624 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2625 {
2626 int retval;
2627 uint8_t value_buf[8];
2628 if (!target_was_examined(target)) {
2629 LOG_ERROR("Target not examined yet");
2630 return ERROR_FAIL;
2631 }
2632
2633 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2634 address,
2635 value);
2636
2637 target_buffer_set_u64(target, value_buf, value);
2638 retval = target_write_memory(target, address, 8, 1, value_buf);
2639 if (retval != ERROR_OK)
2640 LOG_DEBUG("failed: %i", retval);
2641
2642 return retval;
2643 }
2644
2645 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2646 {
2647 int retval;
2648 uint8_t value_buf[4];
2649 if (!target_was_examined(target)) {
2650 LOG_ERROR("Target not examined yet");
2651 return ERROR_FAIL;
2652 }
2653
2654 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2655 address,
2656 value);
2657
2658 target_buffer_set_u32(target, value_buf, value);
2659 retval = target_write_memory(target, address, 4, 1, value_buf);
2660 if (retval != ERROR_OK)
2661 LOG_DEBUG("failed: %i", retval);
2662
2663 return retval;
2664 }
2665
2666 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2667 {
2668 int retval;
2669 uint8_t value_buf[2];
2670 if (!target_was_examined(target)) {
2671 LOG_ERROR("Target not examined yet");
2672 return ERROR_FAIL;
2673 }
2674
2675 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2676 address,
2677 value);
2678
2679 target_buffer_set_u16(target, value_buf, value);
2680 retval = target_write_memory(target, address, 2, 1, value_buf);
2681 if (retval != ERROR_OK)
2682 LOG_DEBUG("failed: %i", retval);
2683
2684 return retval;
2685 }
2686
2687 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2688 {
2689 int retval;
2690 if (!target_was_examined(target)) {
2691 LOG_ERROR("Target not examined yet");
2692 return ERROR_FAIL;
2693 }
2694
2695 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2696 address, value);
2697
2698 retval = target_write_memory(target, address, 1, 1, &value);
2699 if (retval != ERROR_OK)
2700 LOG_DEBUG("failed: %i", retval);
2701
2702 return retval;
2703 }
2704
2705 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2706 {
2707 int retval;
2708 uint8_t value_buf[8];
2709 if (!target_was_examined(target)) {
2710 LOG_ERROR("Target not examined yet");
2711 return ERROR_FAIL;
2712 }
2713
2714 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2715 address,
2716 value);
2717
2718 target_buffer_set_u64(target, value_buf, value);
2719 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2720 if (retval != ERROR_OK)
2721 LOG_DEBUG("failed: %i", retval);
2722
2723 return retval;
2724 }
2725
2726 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2727 {
2728 int retval;
2729 uint8_t value_buf[4];
2730 if (!target_was_examined(target)) {
2731 LOG_ERROR("Target not examined yet");
2732 return ERROR_FAIL;
2733 }
2734
2735 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2736 address,
2737 value);
2738
2739 target_buffer_set_u32(target, value_buf, value);
2740 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2741 if (retval != ERROR_OK)
2742 LOG_DEBUG("failed: %i", retval);
2743
2744 return retval;
2745 }
2746
2747 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2748 {
2749 int retval;
2750 uint8_t value_buf[2];
2751 if (!target_was_examined(target)) {
2752 LOG_ERROR("Target not examined yet");
2753 return ERROR_FAIL;
2754 }
2755
2756 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2757 address,
2758 value);
2759
2760 target_buffer_set_u16(target, value_buf, value);
2761 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2762 if (retval != ERROR_OK)
2763 LOG_DEBUG("failed: %i", retval);
2764
2765 return retval;
2766 }
2767
2768 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2769 {
2770 int retval;
2771 if (!target_was_examined(target)) {
2772 LOG_ERROR("Target not examined yet");
2773 return ERROR_FAIL;
2774 }
2775
2776 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2777 address, value);
2778
2779 retval = target_write_phys_memory(target, address, 1, 1, &value);
2780 if (retval != ERROR_OK)
2781 LOG_DEBUG("failed: %i", retval);
2782
2783 return retval;
2784 }
2785
2786 static int find_target(struct command_invocation *cmd, const char *name)
2787 {
2788 struct target *target = get_target(name);
2789 if (!target) {
2790 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2791 return ERROR_FAIL;
2792 }
2793 if (!target->tap->enabled) {
2794 command_print(cmd, "Target: TAP %s is disabled, "
2795 "can't be the current target\n",
2796 target->tap->dotted_name);
2797 return ERROR_FAIL;
2798 }
2799
2800 cmd->ctx->current_target = target;
2801 if (cmd->ctx->current_target_override)
2802 cmd->ctx->current_target_override = target;
2803
2804 return ERROR_OK;
2805 }
2806
2807
2808 COMMAND_HANDLER(handle_targets_command)
2809 {
2810 int retval = ERROR_OK;
2811 if (CMD_ARGC == 1) {
2812 retval = find_target(CMD, CMD_ARGV[0]);
2813 if (retval == ERROR_OK) {
2814 /* we're done! */
2815 return retval;
2816 }
2817 }
2818
2819 unsigned int index = 0;
2820 command_print(CMD, " TargetName Type Endian TapName State ");
2821 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2822 for (struct target *target = all_targets; target; target = target->next, ++index) {
2823 const char *state;
2824 char marker = ' ';
2825
2826 if (target->tap->enabled)
2827 state = target_state_name(target);
2828 else
2829 state = "tap-disabled";
2830
2831 if (CMD_CTX->current_target == target)
2832 marker = '*';
2833
2834 /* keep columns lined up to match the headers above */
2835 command_print(CMD,
2836 "%2d%c %-18s %-10s %-6s %-18s %s",
2837 index,
2838 marker,
2839 target_name(target),
2840 target_type_name(target),
2841 jim_nvp_value2name_simple(nvp_target_endian,
2842 target->endianness)->name,
2843 target->tap->dotted_name,
2844 state);
2845 }
2846
2847 return retval;
2848 }
2849
2850 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2851
2852 static int power_dropout;
2853 static int srst_asserted;
2854
2855 static int run_power_restore;
2856 static int run_power_dropout;
2857 static int run_srst_asserted;
2858 static int run_srst_deasserted;
2859
2860 static int sense_handler(void)
2861 {
2862 static int prev_srst_asserted;
2863 static int prev_power_dropout;
2864
2865 int retval = jtag_power_dropout(&power_dropout);
2866 if (retval != ERROR_OK)
2867 return retval;
2868
2869 int power_restored;
2870 power_restored = prev_power_dropout && !power_dropout;
2871 if (power_restored)
2872 run_power_restore = 1;
2873
2874 int64_t current = timeval_ms();
2875 static int64_t last_power;
2876 bool wait_more = last_power + 2000 > current;
2877 if (power_dropout && !wait_more) {
2878 run_power_dropout = 1;
2879 last_power = current;
2880 }
2881
2882 retval = jtag_srst_asserted(&srst_asserted);
2883 if (retval != ERROR_OK)
2884 return retval;
2885
2886 int srst_deasserted;
2887 srst_deasserted = prev_srst_asserted && !srst_asserted;
2888
2889 static int64_t last_srst;
2890 wait_more = last_srst + 2000 > current;
2891 if (srst_deasserted && !wait_more) {
2892 run_srst_deasserted = 1;
2893 last_srst = current;
2894 }
2895
2896 if (!prev_srst_asserted && srst_asserted)
2897 run_srst_asserted = 1;
2898
2899 prev_srst_asserted = srst_asserted;
2900 prev_power_dropout = power_dropout;
2901
2902 if (srst_deasserted || power_restored) {
2903 /* Other than logging the event we can't do anything here.
2904 * Issuing a reset is a particularly bad idea as we might
2905 * be inside a reset already.
2906 */
2907 }
2908
2909 return ERROR_OK;
2910 }
2911
2912 /* process target state changes */
2913 static int handle_target(void *priv)
2914 {
2915 Jim_Interp *interp = (Jim_Interp *)priv;
2916 int retval = ERROR_OK;
2917
2918 if (!is_jtag_poll_safe()) {
2919 /* polling is disabled currently */
2920 return ERROR_OK;
2921 }
2922
2923 /* we do not want to recurse here... */
2924 static int recursive;
2925 if (!recursive) {
2926 recursive = 1;
2927 sense_handler();
2928 /* danger! running these procedures can trigger srst assertions and power dropouts.
2929 * We need to avoid an infinite loop/recursion here and we do that by
2930 * clearing the flags after running these events.
2931 */
2932 int did_something = 0;
2933 if (run_srst_asserted) {
2934 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2935 Jim_Eval(interp, "srst_asserted");
2936 did_something = 1;
2937 }
2938 if (run_srst_deasserted) {
2939 Jim_Eval(interp, "srst_deasserted");
2940 did_something = 1;
2941 }
2942 if (run_power_dropout) {
2943 LOG_INFO("Power dropout detected, running power_dropout proc.");
2944 Jim_Eval(interp, "power_dropout");
2945 did_something = 1;
2946 }
2947 if (run_power_restore) {
2948 Jim_Eval(interp, "power_restore");
2949 did_something = 1;
2950 }
2951
2952 if (did_something) {
2953 /* clear detect flags */
2954 sense_handler();
2955 }
2956
2957 /* clear action flags */
2958
2959 run_srst_asserted = 0;
2960 run_srst_deasserted = 0;
2961 run_power_restore = 0;
2962 run_power_dropout = 0;
2963
2964 recursive = 0;
2965 }
2966
2967 /* Poll targets for state changes unless that's globally disabled.
2968 * Skip targets that are currently disabled.
2969 */
2970 for (struct target *target = all_targets;
2971 is_jtag_poll_safe() && target;
2972 target = target->next) {
2973
2974 if (!target_was_examined(target))
2975 continue;
2976
2977 if (!target->tap->enabled)
2978 continue;
2979
2980 if (target->backoff.times > target->backoff.count) {
2981 /* do not poll this time as we failed previously */
2982 target->backoff.count++;
2983 continue;
2984 }
2985 target->backoff.count = 0;
2986
2987 /* only poll target if we've got power and srst isn't asserted */
2988 if (!power_dropout && !srst_asserted) {
2989 /* polling may fail silently until the target has been examined */
2990 retval = target_poll(target);
2991 if (retval != ERROR_OK) {
2992 /* 100ms polling interval. Increase interval between polling up to 5000ms */
2993 if (target->backoff.times * polling_interval < 5000) {
2994 target->backoff.times *= 2;
2995 target->backoff.times++;
2996 }
2997
2998 /* Tell GDB to halt the debugger. This allows the user to
2999 * run monitor commands to handle the situation.
3000 */
3001 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3002 }
3003 if (target->backoff.times > 0) {
3004 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3005 target_reset_examined(target);
3006 retval = target_examine_one(target);
3007 /* Target examination could have failed due to unstable connection,
3008 * but we set the examined flag anyway to repoll it later */
3009 if (retval != ERROR_OK) {
3010 target_set_examined(target);
3011 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3012 target->backoff.times * polling_interval);
3013 return retval;
3014 }
3015 }
3016
3017 /* Since we succeeded, we reset backoff count */
3018 target->backoff.times = 0;
3019 }
3020 }
3021
3022 return retval;
3023 }
3024
3025 COMMAND_HANDLER(handle_reg_command)
3026 {
3027 LOG_DEBUG("-");
3028
3029 struct target *target = get_current_target(CMD_CTX);
3030 if (!target_was_examined(target)) {
3031 LOG_ERROR("Target not examined yet");
3032 return ERROR_TARGET_NOT_EXAMINED;
3033 }
3034 struct reg *reg = NULL;
3035
3036 /* list all available registers for the current target */
3037 if (CMD_ARGC == 0) {
3038 struct reg_cache *cache = target->reg_cache;
3039
3040 unsigned int count = 0;
3041 while (cache) {
3042 unsigned i;
3043
3044 command_print(CMD, "===== %s", cache->name);
3045
3046 for (i = 0, reg = cache->reg_list;
3047 i < cache->num_regs;
3048 i++, reg++, count++) {
3049 if (reg->exist == false || reg->hidden)
3050 continue;
3051 /* only print cached values if they are valid */
3052 if (reg->valid) {
3053 char *value = buf_to_hex_str(reg->value,
3054 reg->size);
3055 command_print(CMD,
3056 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3057 count, reg->name,
3058 reg->size, value,
3059 reg->dirty
3060 ? " (dirty)"
3061 : "");
3062 free(value);
3063 } else {
3064 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3065 count, reg->name,
3066 reg->size);
3067 }
3068 }
3069 cache = cache->next;
3070 }
3071
3072 return ERROR_OK;
3073 }
3074
3075 /* access a single register by its ordinal number */
3076 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3077 unsigned num;
3078 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3079
3080 struct reg_cache *cache = target->reg_cache;
3081 unsigned int count = 0;
3082 while (cache) {
3083 unsigned i;
3084 for (i = 0; i < cache->num_regs; i++) {
3085 if (count++ == num) {
3086 reg = &cache->reg_list[i];
3087 break;
3088 }
3089 }
3090 if (reg)
3091 break;
3092 cache = cache->next;
3093 }
3094
3095 if (!reg) {
3096 command_print(CMD, "%i is out of bounds, the current target "
3097 "has only %i registers (0 - %i)", num, count, count - 1);
3098 return ERROR_FAIL;
3099 }
3100 } else {
3101 /* access a single register by its name */
3102 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3103
3104 if (!reg)
3105 goto not_found;
3106 }
3107
3108 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3109
3110 if (!reg->exist)
3111 goto not_found;
3112
3113 /* display a register */
3114 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3115 && (CMD_ARGV[1][0] <= '9')))) {
3116 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3117 reg->valid = false;
3118
3119 if (!reg->valid) {
3120 int retval = reg->type->get(reg);
3121 if (retval != ERROR_OK) {
3122 LOG_ERROR("Could not read register '%s'", reg->name);
3123 return retval;
3124 }
3125 }
3126 char *value = buf_to_hex_str(reg->value, reg->size);
3127 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3128 free(value);
3129 return ERROR_OK;
3130 }
3131
3132 /* set register value */
3133 if (CMD_ARGC == 2) {
3134 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3135 if (!buf)
3136 return ERROR_FAIL;
3137 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3138
3139 int retval = reg->type->set(reg, buf);
3140 if (retval != ERROR_OK) {
3141 LOG_ERROR("Could not write to register '%s'", reg->name);
3142 } else {
3143 char *value = buf_to_hex_str(reg->value, reg->size);
3144 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3145 free(value);
3146 }
3147
3148 free(buf);
3149
3150 return retval;
3151 }
3152
3153 return ERROR_COMMAND_SYNTAX_ERROR;
3154
3155 not_found:
3156 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3157 return ERROR_FAIL;
3158 }
3159
3160 COMMAND_HANDLER(handle_poll_command)
3161 {
3162 int retval = ERROR_OK;
3163 struct target *target = get_current_target(CMD_CTX);
3164
3165 if (CMD_ARGC == 0) {
3166 command_print(CMD, "background polling: %s",
3167 jtag_poll_get_enabled() ? "on" : "off");
3168 command_print(CMD, "TAP: %s (%s)",
3169 target->tap->dotted_name,
3170 target->tap->enabled ? "enabled" : "disabled");
3171 if (!target->tap->enabled)
3172 return ERROR_OK;
3173 retval = target_poll(target);
3174 if (retval != ERROR_OK)
3175 return retval;
3176 retval = target_arch_state(target);
3177 if (retval != ERROR_OK)
3178 return retval;
3179 } else if (CMD_ARGC == 1) {
3180 bool enable;
3181 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3182 jtag_poll_set_enabled(enable);
3183 } else
3184 return ERROR_COMMAND_SYNTAX_ERROR;
3185
3186 return retval;
3187 }
3188
3189 COMMAND_HANDLER(handle_wait_halt_command)
3190 {
3191 if (CMD_ARGC > 1)
3192 return ERROR_COMMAND_SYNTAX_ERROR;
3193
3194 unsigned ms = DEFAULT_HALT_TIMEOUT;
3195 if (1 == CMD_ARGC) {
3196 int retval = parse_uint(CMD_ARGV[0], &ms);
3197 if (retval != ERROR_OK)
3198 return ERROR_COMMAND_SYNTAX_ERROR;
3199 }
3200
3201 struct target *target = get_current_target(CMD_CTX);
3202 return target_wait_state(target, TARGET_HALTED, ms);
3203 }
3204
3205 /* wait for target state to change. The trick here is to have a low
3206 * latency for short waits and not to suck up all the CPU time
3207 * on longer waits.
3208 *
3209 * After 500ms, keep_alive() is invoked
3210 */
3211 int target_wait_state(struct target *target, enum target_state state, unsigned int ms)
3212 {
3213 int retval;
3214 int64_t then = 0, cur;
3215 bool once = true;
3216
3217 for (;;) {
3218 retval = target_poll(target);
3219 if (retval != ERROR_OK)
3220 return retval;
3221 if (target->state == state)
3222 break;
3223 cur = timeval_ms();
3224 if (once) {
3225 once = false;
3226 then = timeval_ms();
3227 LOG_DEBUG("waiting for target %s...",
3228 nvp_value2name(nvp_target_state, state)->name);
3229 }
3230
3231 if (cur - then > 500) {
3232 keep_alive();
3233 if (openocd_is_shutdown_pending())
3234 return ERROR_SERVER_INTERRUPTED;
3235 }
3236
3237 if ((cur-then) > ms) {
3238 LOG_ERROR("timed out while waiting for target %s",
3239 nvp_value2name(nvp_target_state, state)->name);
3240 return ERROR_FAIL;
3241 }
3242 }
3243
3244 return ERROR_OK;
3245 }
3246
3247 COMMAND_HANDLER(handle_halt_command)
3248 {
3249 LOG_DEBUG("-");
3250
3251 struct target *target = get_current_target(CMD_CTX);
3252
3253 target->verbose_halt_msg = true;
3254
3255 int retval = target_halt(target);
3256 if (retval != ERROR_OK)
3257 return retval;
3258
3259 if (CMD_ARGC == 1) {
3260 unsigned wait_local;
3261 retval = parse_uint(CMD_ARGV[0], &wait_local);
3262 if (retval != ERROR_OK)
3263 return ERROR_COMMAND_SYNTAX_ERROR;
3264 if (!wait_local)
3265 return ERROR_OK;
3266 }
3267
3268 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3269 }
3270
3271 COMMAND_HANDLER(handle_soft_reset_halt_command)
3272 {
3273 struct target *target = get_current_target(CMD_CTX);
3274
3275 LOG_TARGET_INFO(target, "requesting target halt and executing a soft reset");
3276
3277 target_soft_reset_halt(target);
3278
3279 return ERROR_OK;
3280 }
3281
3282 COMMAND_HANDLER(handle_reset_command)
3283 {
3284 if (CMD_ARGC > 1)
3285 return ERROR_COMMAND_SYNTAX_ERROR;
3286
3287 enum target_reset_mode reset_mode = RESET_RUN;
3288 if (CMD_ARGC == 1) {
3289 const struct nvp *n;
3290 n = nvp_name2value(nvp_reset_modes, CMD_ARGV[0]);
3291 if ((!n->name) || (n->value == RESET_UNKNOWN))
3292 return ERROR_COMMAND_SYNTAX_ERROR;
3293 reset_mode = n->value;
3294 }
3295
3296 /* reset *all* targets */
3297 return target_process_reset(CMD, reset_mode);
3298 }
3299
3300
3301 COMMAND_HANDLER(handle_resume_command)
3302 {
3303 int current = 1;
3304 if (CMD_ARGC > 1)
3305 return ERROR_COMMAND_SYNTAX_ERROR;
3306
3307 struct target *target = get_current_target(CMD_CTX);
3308
3309 /* with no CMD_ARGV, resume from current pc, addr = 0,
3310 * with one arguments, addr = CMD_ARGV[0],
3311 * handle breakpoints, not debugging */
3312 target_addr_t addr = 0;
3313 if (CMD_ARGC == 1) {
3314 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3315 current = 0;
3316 }
3317
3318 return target_resume(target, current, addr, 1, 0);
3319 }
3320
3321 COMMAND_HANDLER(handle_step_command)
3322 {
3323 if (CMD_ARGC > 1)
3324 return ERROR_COMMAND_SYNTAX_ERROR;
3325
3326 LOG_DEBUG("-");
3327
3328 /* with no CMD_ARGV, step from current pc, addr = 0,
3329 * with one argument addr = CMD_ARGV[0],
3330 * handle breakpoints, debugging */
3331 target_addr_t addr = 0;
3332 int current_pc = 1;
3333 if (CMD_ARGC == 1) {
3334 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3335 current_pc = 0;
3336 }
3337
3338 struct target *target = get_current_target(CMD_CTX);
3339
3340 return target_step(target, current_pc, addr, 1);
3341 }
3342
3343 void target_handle_md_output(struct command_invocation *cmd,
3344 struct target *target, target_addr_t address, unsigned size,
3345 unsigned count, const uint8_t *buffer)
3346 {
3347 const unsigned line_bytecnt = 32;
3348 unsigned line_modulo = line_bytecnt / size;
3349
3350 char output[line_bytecnt * 4 + 1];
3351 unsigned output_len = 0;
3352
3353 const char *value_fmt;
3354 switch (size) {
3355 case 8:
3356 value_fmt = "%16.16"PRIx64" ";
3357 break;
3358 case 4:
3359 value_fmt = "%8.8"PRIx64" ";
3360 break;
3361 case 2:
3362 value_fmt = "%4.4"PRIx64" ";
3363 break;
3364 case 1:
3365 value_fmt = "%2.2"PRIx64" ";
3366 break;
3367 default:
3368 /* "can't happen", caller checked */
3369 LOG_ERROR("invalid memory read size: %u", size);
3370 return;
3371 }
3372
3373 for (unsigned i = 0; i < count; i++) {
3374 if (i % line_modulo == 0) {
3375 output_len += snprintf(output + output_len,
3376 sizeof(output) - output_len,
3377 TARGET_ADDR_FMT ": ",
3378 (address + (i * size)));
3379 }
3380
3381 uint64_t value = 0;
3382 const uint8_t *value_ptr = buffer + i * size;
3383 switch (size) {
3384 case 8:
3385 value = target_buffer_get_u64(target, value_ptr);
3386 break;
3387 case 4:
3388 value = target_buffer_get_u32(target, value_ptr);
3389 break;
3390 case 2:
3391 value = target_buffer_get_u16(target, value_ptr);
3392 break;
3393 case 1:
3394 value = *value_ptr;
3395 }
3396 output_len += snprintf(output + output_len,
3397 sizeof(output) - output_len,
3398 value_fmt, value);
3399
3400 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3401 command_print(cmd, "%s", output);
3402 output_len = 0;
3403 }
3404 }
3405 }
3406
3407 COMMAND_HANDLER(handle_md_command)
3408 {
3409 if (CMD_ARGC < 1)
3410 return ERROR_COMMAND_SYNTAX_ERROR;
3411
3412 unsigned size = 0;
3413 switch (CMD_NAME[2]) {
3414 case 'd':
3415 size = 8;
3416 break;
3417 case 'w':
3418 size = 4;
3419 break;
3420 case 'h':
3421 size = 2;
3422 break;
3423 case 'b':
3424 size = 1;
3425 break;
3426 default:
3427 return ERROR_COMMAND_SYNTAX_ERROR;
3428 }
3429
3430 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3431 int (*fn)(struct target *target,
3432 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3433 if (physical) {
3434 CMD_ARGC--;
3435 CMD_ARGV++;
3436 fn = target_read_phys_memory;
3437 } else
3438 fn = target_read_memory;
3439 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3440 return ERROR_COMMAND_SYNTAX_ERROR;
3441
3442 target_addr_t address;
3443 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3444
3445 unsigned count = 1;
3446 if (CMD_ARGC == 2)
3447 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3448
3449 uint8_t *buffer = calloc(count, size);
3450 if (!buffer) {
3451 LOG_ERROR("Failed to allocate md read buffer");
3452 return ERROR_FAIL;
3453 }
3454
3455 struct target *target = get_current_target(CMD_CTX);
3456 int retval = fn(target, address, size, count, buffer);
3457 if (retval == ERROR_OK)
3458 target_handle_md_output(CMD, target, address, size, count, buffer);
3459
3460 free(buffer);
3461
3462 return retval;
3463 }
3464
3465 typedef int (*target_write_fn)(struct target *target,
3466 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3467
3468 static int target_fill_mem(struct target *target,
3469 target_addr_t address,
3470 target_write_fn fn,
3471 unsigned data_size,
3472 /* value */
3473 uint64_t b,
3474 /* count */
3475 unsigned c)
3476 {
3477 /* We have to write in reasonably large chunks to be able
3478 * to fill large memory areas with any sane speed */
3479 const unsigned chunk_size = 16384;
3480 uint8_t *target_buf = malloc(chunk_size * data_size);
3481 if (!target_buf) {
3482 LOG_ERROR("Out of memory");
3483 return ERROR_FAIL;
3484 }
3485
3486 for (unsigned i = 0; i < chunk_size; i++) {
3487 switch (data_size) {
3488 case 8:
3489 target_buffer_set_u64(target, target_buf + i * data_size, b);
3490 break;
3491 case 4:
3492 target_buffer_set_u32(target, target_buf + i * data_size, b);
3493 break;
3494 case 2:
3495 target_buffer_set_u16(target, target_buf + i * data_size, b);
3496 break;
3497 case 1:
3498 target_buffer_set_u8(target, target_buf + i * data_size, b);
3499 break;
3500 default:
3501 exit(-1);
3502 }
3503 }
3504
3505 int retval = ERROR_OK;
3506
3507 for (unsigned x = 0; x < c; x += chunk_size) {
3508 unsigned current;
3509 current = c - x;
3510 if (current > chunk_size)
3511 current = chunk_size;
3512 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3513 if (retval != ERROR_OK)
3514 break;
3515 /* avoid GDB timeouts */
3516 keep_alive();
3517
3518 if (openocd_is_shutdown_pending()) {
3519 retval = ERROR_SERVER_INTERRUPTED;
3520 break;
3521 }
3522 }
3523 free(target_buf);
3524
3525 return retval;
3526 }
3527
3528
3529 COMMAND_HANDLER(handle_mw_command)
3530 {
3531 if (CMD_ARGC < 2)
3532 return ERROR_COMMAND_SYNTAX_ERROR;
3533 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3534 target_write_fn fn;
3535 if (physical) {
3536 CMD_ARGC--;
3537 CMD_ARGV++;
3538 fn = target_write_phys_memory;
3539 } else
3540 fn = target_write_memory;
3541 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3542 return ERROR_COMMAND_SYNTAX_ERROR;
3543
3544 target_addr_t address;
3545 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3546
3547 uint64_t value;
3548 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3549
3550 unsigned count = 1;
3551 if (CMD_ARGC == 3)
3552 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3553
3554 struct target *target = get_current_target(CMD_CTX);
3555 unsigned wordsize;
3556 switch (CMD_NAME[2]) {
3557 case 'd':
3558 wordsize = 8;
3559 break;
3560 case 'w':
3561 wordsize = 4;
3562 break;
3563 case 'h':
3564 wordsize = 2;
3565 break;
3566 case 'b':
3567 wordsize = 1;
3568 break;
3569 default:
3570 return ERROR_COMMAND_SYNTAX_ERROR;
3571 }
3572
3573 return target_fill_mem(target, address, fn, wordsize, value, count);
3574 }
3575
3576 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3577 target_addr_t *min_address, target_addr_t *max_address)
3578 {
3579 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3580 return ERROR_COMMAND_SYNTAX_ERROR;
3581
3582 /* a base address isn't always necessary,
3583 * default to 0x0 (i.e. don't relocate) */
3584 if (CMD_ARGC >= 2) {
3585 target_addr_t addr;
3586 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3587 image->base_address = addr;
3588 image->base_address_set = true;
3589 } else
3590 image->base_address_set = false;
3591
3592 image->start_address_set = false;
3593
3594 if (CMD_ARGC >= 4)
3595 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3596 if (CMD_ARGC == 5) {
3597 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3598 /* use size (given) to find max (required) */
3599 *max_address += *min_address;
3600 }
3601
3602 if (*min_address > *max_address)
3603 return ERROR_COMMAND_SYNTAX_ERROR;
3604
3605 return ERROR_OK;
3606 }
3607
3608 COMMAND_HANDLER(handle_load_image_command)
3609 {
3610 uint8_t *buffer;
3611 size_t buf_cnt;
3612 uint32_t image_size;
3613 target_addr_t min_address = 0;
3614 target_addr_t max_address = -1;
3615 struct image image;
3616
3617 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3618 &image, &min_address, &max_address);
3619 if (retval != ERROR_OK)
3620 return retval;
3621
3622 struct target *target = get_current_target(CMD_CTX);
3623
3624 struct duration bench;
3625 duration_start(&bench);
3626
3627 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3628 return ERROR_FAIL;
3629
3630 image_size = 0x0;
3631 retval = ERROR_OK;
3632 for (unsigned int i = 0; i < image.num_sections; i++) {
3633 buffer = malloc(image.sections[i].size);
3634 if (!buffer) {
3635 command_print(CMD,
3636 "error allocating buffer for section (%d bytes)",
3637 (int)(image.sections[i].size));
3638 retval = ERROR_FAIL;
3639 break;
3640 }
3641
3642 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3643 if (retval != ERROR_OK) {
3644 free(buffer);
3645 break;
3646 }
3647
3648 uint32_t offset = 0;
3649 uint32_t length = buf_cnt;
3650
3651 /* DANGER!!! beware of unsigned comparison here!!! */
3652
3653 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3654 (image.sections[i].base_address < max_address)) {
3655
3656 if (image.sections[i].base_address < min_address) {
3657 /* clip addresses below */
3658 offset += min_address-image.sections[i].base_address;
3659 length -= offset;
3660 }
3661
3662 if (image.sections[i].base_address + buf_cnt > max_address)
3663 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3664
3665 retval = target_write_buffer(target,
3666 image.sections[i].base_address + offset, length, buffer + offset);
3667 if (retval != ERROR_OK) {
3668 free(buffer);
3669 break;
3670 }
3671 image_size += length;
3672 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3673 (unsigned int)length,
3674 image.sections[i].base_address + offset);
3675 }
3676
3677 free(buffer);
3678 }
3679
3680 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3681 command_print(CMD, "downloaded %" PRIu32 " bytes "
3682 "in %fs (%0.3f KiB/s)", image_size,
3683 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3684 }
3685
3686 image_close(&image);
3687
3688 return retval;
3689
3690 }
3691
3692 COMMAND_HANDLER(handle_dump_image_command)
3693 {
3694 struct fileio *fileio;
3695 uint8_t *buffer;
3696 int retval, retvaltemp;
3697 target_addr_t address, size;
3698 struct duration bench;
3699 struct target *target = get_current_target(CMD_CTX);
3700
3701 if (CMD_ARGC != 3)
3702 return ERROR_COMMAND_SYNTAX_ERROR;
3703
3704 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3705 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3706
3707 uint32_t buf_size = (size > 4096) ? 4096 : size;
3708 buffer = malloc(buf_size);
3709 if (!buffer)
3710 return ERROR_FAIL;
3711
3712 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3713 if (retval != ERROR_OK) {
3714 free(buffer);
3715 return retval;
3716 }
3717
3718 duration_start(&bench);
3719
3720 while (size > 0) {
3721 size_t size_written;
3722 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3723 retval = target_read_buffer(target, address, this_run_size, buffer);
3724 if (retval != ERROR_OK)
3725 break;
3726
3727 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3728 if (retval != ERROR_OK)
3729 break;
3730
3731 size -= this_run_size;
3732 address += this_run_size;
3733 }
3734
3735 free(buffer);
3736
3737 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3738 size_t filesize;
3739 retval = fileio_size(fileio, &filesize);
3740 if (retval != ERROR_OK)
3741 return retval;
3742 command_print(CMD,
3743 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3744 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3745 }
3746
3747 retvaltemp = fileio_close(fileio);
3748 if (retvaltemp != ERROR_OK)
3749 return retvaltemp;
3750
3751 return retval;
3752 }
3753
3754 enum verify_mode {
3755 IMAGE_TEST = 0,
3756 IMAGE_VERIFY = 1,
3757 IMAGE_CHECKSUM_ONLY = 2
3758 };
3759
3760 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3761 {
3762 uint8_t *buffer;
3763 size_t buf_cnt;
3764 uint32_t image_size;
3765 int retval;
3766 uint32_t checksum = 0;
3767 uint32_t mem_checksum = 0;
3768
3769 struct image image;
3770
3771 struct target *target = get_current_target(CMD_CTX);
3772
3773 if (CMD_ARGC < 1)
3774 return ERROR_COMMAND_SYNTAX_ERROR;
3775
3776 if (!target) {
3777 LOG_ERROR("no target selected");
3778 return ERROR_FAIL;
3779 }
3780
3781 struct duration bench;
3782 duration_start(&bench);
3783
3784 if (CMD_ARGC >= 2) {
3785 target_addr_t addr;
3786 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3787 image.base_address = addr;
3788 image.base_address_set = true;
3789 } else {
3790 image.base_address_set = false;
3791 image.base_address = 0x0;
3792 }
3793
3794 image.start_address_set = false;
3795
3796 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3797 if (retval != ERROR_OK)
3798 return retval;
3799
3800 image_size = 0x0;
3801 int diffs = 0;
3802 retval = ERROR_OK;
3803 for (unsigned int i = 0; i < image.num_sections; i++) {
3804 buffer = malloc(image.sections[i].size);
3805 if (!buffer) {
3806 command_print(CMD,
3807 "error allocating buffer for section (%" PRIu32 " bytes)",
3808 image.sections[i].size);
3809 break;
3810 }
3811 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3812 if (retval != ERROR_OK) {
3813 free(buffer);
3814 break;
3815 }
3816
3817 if (verify >= IMAGE_VERIFY) {
3818 /* calculate checksum of image */
3819 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3820 if (retval != ERROR_OK) {
3821 free(buffer);
3822 break;
3823 }
3824
3825 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3826 if (retval != ERROR_OK) {
3827 free(buffer);
3828 break;
3829 }
3830 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3831 LOG_ERROR("checksum mismatch");
3832 free(buffer);
3833 retval = ERROR_FAIL;
3834 goto done;
3835 }
3836 if (checksum != mem_checksum) {
3837 /* failed crc checksum, fall back to a binary compare */
3838 uint8_t *data;
3839
3840 if (diffs == 0)
3841 LOG_ERROR("checksum mismatch - attempting binary compare");
3842
3843 data = malloc(buf_cnt);
3844
3845 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3846 if (retval == ERROR_OK) {
3847 uint32_t t;
3848 for (t = 0; t < buf_cnt; t++) {
3849 if (data[t] != buffer[t]) {
3850 command_print(CMD,
3851 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3852 diffs,
3853 (unsigned)(t + image.sections[i].base_address),
3854 data[t],
3855 buffer[t]);
3856 if (diffs++ >= 127) {
3857 command_print(CMD, "More than 128 errors, the rest are not printed.");
3858 free(data);
3859 free(buffer);
3860 goto done;
3861 }
3862 }
3863 keep_alive();
3864 if (openocd_is_shutdown_pending()) {
3865 retval = ERROR_SERVER_INTERRUPTED;
3866 free(data);
3867 free(buffer);
3868 goto done;
3869 }
3870 }
3871 }
3872 free(data);
3873 }
3874 } else {
3875 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3876 image.sections[i].base_address,
3877 buf_cnt);
3878 }
3879
3880 free(buffer);
3881 image_size += buf_cnt;
3882 }
3883 if (diffs > 0)
3884 command_print(CMD, "No more differences found.");
3885 done:
3886 if (diffs > 0)
3887 retval = ERROR_FAIL;
3888 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3889 command_print(CMD, "verified %" PRIu32 " bytes "
3890 "in %fs (%0.3f KiB/s)", image_size,
3891 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3892 }
3893
3894 image_close(&image);
3895
3896 return retval;
3897 }
3898
3899 COMMAND_HANDLER(handle_verify_image_checksum_command)
3900 {
3901 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3902 }
3903
3904 COMMAND_HANDLER(handle_verify_image_command)
3905 {
3906 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3907 }
3908
3909 COMMAND_HANDLER(handle_test_image_command)
3910 {
3911 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3912 }
3913
3914 static int handle_bp_command_list(struct command_invocation *cmd)
3915 {
3916 struct target *target = get_current_target(cmd->ctx);
3917 struct breakpoint *breakpoint = target->breakpoints;
3918 while (breakpoint) {
3919 if (breakpoint->type == BKPT_SOFT) {
3920 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3921 breakpoint->length);
3922 command_print(cmd, "Software breakpoint(IVA): addr=" TARGET_ADDR_FMT ", len=0x%x, orig_instr=0x%s",
3923 breakpoint->address,
3924 breakpoint->length,
3925 buf);
3926 free(buf);
3927 } else {
3928 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3929 command_print(cmd, "Context breakpoint: asid=0x%8.8" PRIx32 ", len=0x%x, num=%u",
3930 breakpoint->asid,
3931 breakpoint->length, breakpoint->number);
3932 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3933 command_print(cmd, "Hybrid breakpoint(IVA): addr=" TARGET_ADDR_FMT ", len=0x%x, num=%u",
3934 breakpoint->address,
3935 breakpoint->length, breakpoint->number);
3936 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3937 breakpoint->asid);
3938 } else
3939 command_print(cmd, "Hardware breakpoint(IVA): addr=" TARGET_ADDR_FMT ", len=0x%x, num=%u",
3940 breakpoint->address,
3941 breakpoint->length, breakpoint->number);
3942 }
3943
3944 breakpoint = breakpoint->next;
3945 }
3946 return ERROR_OK;
3947 }
3948
3949 static int handle_bp_command_set(struct command_invocation *cmd,
3950 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3951 {
3952 struct target *target = get_current_target(cmd->ctx);
3953 int retval;
3954
3955 if (asid == 0) {
3956 retval = breakpoint_add(target, addr, length, hw);
3957 /* error is always logged in breakpoint_add(), do not print it again */
3958 if (retval == ERROR_OK)
3959 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
3960
3961 } else if (addr == 0) {
3962 if (!target->type->add_context_breakpoint) {
3963 LOG_TARGET_ERROR(target, "Context breakpoint not available");
3964 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3965 }
3966 retval = context_breakpoint_add(target, asid, length, hw);
3967 /* error is always logged in context_breakpoint_add(), do not print it again */
3968 if (retval == ERROR_OK)
3969 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
3970
3971 } else {
3972 if (!target->type->add_hybrid_breakpoint) {
3973 LOG_TARGET_ERROR(target, "Hybrid breakpoint not available");
3974 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3975 }
3976 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
3977 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
3978 if (retval == ERROR_OK)
3979 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
3980 }
3981 return retval;
3982 }
3983
3984 COMMAND_HANDLER(handle_bp_command)
3985 {
3986 target_addr_t addr;
3987 uint32_t asid;
3988 uint32_t length;
3989 int hw = BKPT_SOFT;
3990
3991 switch (CMD_ARGC) {
3992 case 0:
3993 return handle_bp_command_list(CMD);
3994
3995 case 2:
3996 asid = 0;
3997 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3998 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3999 return handle_bp_command_set(CMD, addr, asid, length, hw);
4000
4001 case 3:
4002 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4003 hw = BKPT_HARD;
4004 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4005 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4006 asid = 0;
4007 return handle_bp_command_set(CMD, addr, asid, length, hw);
4008 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4009 hw = BKPT_HARD;
4010 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4011 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4012 addr = 0;
4013 return handle_bp_command_set(CMD, addr, asid, length, hw);
4014 }
4015 /* fallthrough */
4016 case 4:
4017 hw = BKPT_HARD;
4018 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4019 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4020 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4021 return handle_bp_command_set(CMD, addr, asid, length, hw);
4022
4023 default:
4024 return ERROR_COMMAND_SYNTAX_ERROR;
4025 }
4026 }
4027
4028 COMMAND_HANDLER(handle_rbp_command)
4029 {
4030 int retval;
4031
4032 if (CMD_ARGC != 1)
4033 return ERROR_COMMAND_SYNTAX_ERROR;
4034
4035 struct target *target = get_current_target(CMD_CTX);
4036
4037 if (!strcmp(CMD_ARGV[0], "all")) {
4038 retval = breakpoint_remove_all(target);
4039
4040 if (retval != ERROR_OK) {
4041 command_print(CMD, "Error encountered during removal of all breakpoints.");
4042 command_print(CMD, "Some breakpoints may have remained set.");
4043 }
4044 } else {
4045 target_addr_t addr;
4046 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4047
4048 retval = breakpoint_remove(target, addr);
4049
4050 if (retval != ERROR_OK)
4051 command_print(CMD, "Error during removal of breakpoint at address " TARGET_ADDR_FMT, addr);
4052 }
4053
4054 return retval;
4055 }
4056
4057 COMMAND_HANDLER(handle_wp_command)
4058 {
4059 struct target *target = get_current_target(CMD_CTX);
4060
4061 if (CMD_ARGC == 0) {
4062 struct watchpoint *watchpoint = target->watchpoints;
4063
4064 while (watchpoint) {
4065 char wp_type = (watchpoint->rw == WPT_READ ? 'r' : (watchpoint->rw == WPT_WRITE ? 'w' : 'a'));
4066 command_print(CMD, "address: " TARGET_ADDR_FMT
4067 ", len: 0x%8.8" PRIx32
4068 ", r/w/a: %c, value: 0x%8.8" PRIx64
4069 ", mask: 0x%8.8" PRIx64,
4070 watchpoint->address,
4071 watchpoint->length,
4072 wp_type,
4073 watchpoint->value,
4074 watchpoint->mask);
4075 watchpoint = watchpoint->next;
4076 }
4077 return ERROR_OK;
4078 }
4079
4080 enum watchpoint_rw type = WPT_ACCESS;
4081 target_addr_t addr = 0;
4082 uint32_t length = 0;
4083 uint64_t data_value = 0x0;
4084 uint64_t data_mask = WATCHPOINT_IGNORE_DATA_VALUE_MASK;
4085 bool mask_specified = false;
4086
4087 switch (CMD_ARGC) {
4088 case 5:
4089 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[4], data_mask);
4090 mask_specified = true;
4091 /* fall through */
4092 case 4:
4093 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[3], data_value);
4094 // if user specified only data value without mask - the mask should be 0
4095 if (!mask_specified)
4096 data_mask = 0;
4097 /* fall through */
4098 case 3:
4099 switch (CMD_ARGV[2][0]) {
4100 case 'r':
4101 type = WPT_READ;
4102 break;
4103 case 'w':
4104 type = WPT_WRITE;
4105 break;
4106 case 'a':
4107 type = WPT_ACCESS;
4108 break;
4109 default:
4110 LOG_TARGET_ERROR(target, "invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4111 return ERROR_COMMAND_SYNTAX_ERROR;
4112 }
4113 /* fall through */
4114 case 2:
4115 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4116 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4117 break;
4118
4119 default:
4120 return ERROR_COMMAND_SYNTAX_ERROR;
4121 }
4122
4123 int retval = watchpoint_add(target, addr, length, type,
4124 data_value, data_mask);
4125 if (retval != ERROR_OK)
4126 LOG_TARGET_ERROR(target, "Failure setting watchpoints");
4127
4128 return retval;
4129 }
4130
4131 COMMAND_HANDLER(handle_rwp_command)
4132 {
4133 int retval;
4134
4135 if (CMD_ARGC != 1)
4136 return ERROR_COMMAND_SYNTAX_ERROR;
4137
4138 struct target *target = get_current_target(CMD_CTX);
4139 if (!strcmp(CMD_ARGV[0], "all")) {
4140 retval = watchpoint_remove_all(target);
4141
4142 if (retval != ERROR_OK) {
4143 command_print(CMD, "Error encountered during removal of all watchpoints.");
4144 command_print(CMD, "Some watchpoints may have remained set.");
4145 }
4146 } else {
4147 target_addr_t addr;
4148 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4149
4150 retval = watchpoint_remove(target, addr);
4151
4152 if (retval != ERROR_OK)
4153 command_print(CMD, "Error during removal of watchpoint at address " TARGET_ADDR_FMT, addr);
4154 }
4155
4156 return retval;
4157 }
4158
4159 /**
4160 * Translate a virtual address to a physical address.
4161 *
4162 * The low-level target implementation must have logged a detailed error
4163 * which is forwarded to telnet/GDB session.
4164 */
4165 COMMAND_HANDLER(handle_virt2phys_command)
4166 {
4167 if (CMD_ARGC != 1)
4168 return ERROR_COMMAND_SYNTAX_ERROR;
4169
4170 target_addr_t va;
4171 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4172 target_addr_t pa;
4173
4174 struct target *target = get_current_target(CMD_CTX);
4175 int retval = target->type->virt2phys(target, va, &pa);
4176 if (retval == ERROR_OK)
4177 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4178
4179 return retval;
4180 }
4181
4182 static void write_data(FILE *f, const void *data, size_t len)
4183 {
4184 size_t written = fwrite(data, 1, len, f);
4185 if (written != len)
4186 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4187 }
4188
4189 static void write_long(FILE *f, int l, struct target *target)
4190 {
4191 uint8_t val[4];
4192
4193 target_buffer_set_u32(target, val, l);
4194 write_data(f, val, 4);
4195 }
4196
4197 static void write_string(FILE *f, char *s)
4198 {
4199 write_data(f, s, strlen(s));
4200 }
4201
4202 typedef unsigned char UNIT[2]; /* unit of profiling */
4203
4204 /* Dump a gmon.out histogram file. */
4205 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4206 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4207 {
4208 uint32_t i;
4209 FILE *f = fopen(filename, "w");
4210 if (!f)
4211 return;
4212 write_string(f, "gmon");
4213 write_long(f, 0x00000001, target); /* Version */
4214 write_long(f, 0, target); /* padding */
4215 write_long(f, 0, target); /* padding */
4216 write_long(f, 0, target); /* padding */
4217
4218 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4219 write_data(f, &zero, 1);
4220
4221 /* figure out bucket size */
4222 uint32_t min;
4223 uint32_t max;
4224 if (with_range) {
4225 min = start_address;
4226 max = end_address;
4227 } else {
4228 min = samples[0];
4229 max = samples[0];
4230 for (i = 0; i < sample_num; i++) {
4231 if (min > samples[i])
4232 min = samples[i];
4233 if (max < samples[i])
4234 max = samples[i];
4235 }
4236
4237 /* max should be (largest sample + 1)
4238 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4239 if (max < UINT32_MAX)
4240 max++;
4241
4242 /* gprof requires (max - min) >= 2 */
4243 while ((max - min) < 2) {
4244 if (max < UINT32_MAX)
4245 max++;
4246 else
4247 min--;
4248 }
4249 }
4250
4251 uint32_t address_space = max - min;
4252
4253 /* FIXME: What is the reasonable number of buckets?
4254 * The profiling result will be more accurate if there are enough buckets. */
4255 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4256 uint32_t num_buckets = address_space / sizeof(UNIT);
4257 if (num_buckets > max_buckets)
4258 num_buckets = max_buckets;
4259 int *buckets = malloc(sizeof(int) * num_buckets);
4260 if (!buckets) {
4261 fclose(f);
4262 return;
4263 }
4264 memset(buckets, 0, sizeof(int) * num_buckets);
4265 for (i = 0; i < sample_num; i++) {
4266 uint32_t address = samples[i];
4267
4268 if ((address < min) || (max <= address))
4269 continue;
4270
4271 long long a = address - min;
4272 long long b = num_buckets;
4273 long long c = address_space;
4274 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4275 buckets[index_t]++;
4276 }
4277
4278 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4279 write_long(f, min, target); /* low_pc */
4280 write_long(f, max, target); /* high_pc */
4281 write_long(f, num_buckets, target); /* # of buckets */
4282 float sample_rate = sample_num / (duration_ms / 1000.0);
4283 write_long(f, sample_rate, target);
4284 write_string(f, "seconds");
4285 for (i = 0; i < (15-strlen("seconds")); i++)
4286 write_data(f, &zero, 1);
4287 write_string(f, "s");
4288
4289 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4290
4291 char *data = malloc(2 * num_buckets);
4292 if (data) {
4293 for (i = 0; i < num_buckets; i++) {
4294 int val;
4295 val = buckets[i];
4296 if (val > 65535)
4297 val = 65535;
4298 data[i * 2] = val&0xff;
4299 data[i * 2 + 1] = (val >> 8) & 0xff;
4300 }
4301 free(buckets);
4302 write_data(f, data, num_buckets * 2);
4303 free(data);
4304 } else
4305 free(buckets);
4306
4307 fclose(f);
4308 }
4309
4310 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4311 * which will be used as a random sampling of PC */
4312 COMMAND_HANDLER(handle_profile_command)
4313 {
4314 struct target *target = get_current_target(CMD_CTX);
4315
4316 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4317 return ERROR_COMMAND_SYNTAX_ERROR;
4318
4319 const uint32_t MAX_PROFILE_SAMPLE_NUM = 1000000;
4320 uint32_t offset;
4321 uint32_t num_of_samples;
4322 int retval = ERROR_OK;
4323 bool halted_before_profiling = target->state == TARGET_HALTED;
4324
4325 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4326
4327 uint32_t start_address = 0;
4328 uint32_t end_address = 0;
4329 bool with_range = false;
4330 if (CMD_ARGC == 4) {
4331 with_range = true;
4332 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4333 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4334 if (start_address > end_address || (end_address - start_address) < 2) {
4335 command_print(CMD, "Error: end - start < 2");
4336 return ERROR_COMMAND_ARGUMENT_INVALID;
4337 }
4338 }
4339
4340 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4341 if (!samples) {
4342 LOG_ERROR("No memory to store samples.");
4343 return ERROR_FAIL;
4344 }
4345
4346 uint64_t timestart_ms = timeval_ms();
4347 /**
4348 * Some cores let us sample the PC without the
4349 * annoying halt/resume step; for example, ARMv7 PCSR.
4350 * Provide a way to use that more efficient mechanism.
4351 */
4352 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4353 &num_of_samples, offset);
4354 if (retval != ERROR_OK) {
4355 free(samples);
4356 return retval;
4357 }
4358 uint32_t duration_ms = timeval_ms() - timestart_ms;
4359
4360 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4361
4362 retval = target_poll(target);
4363 if (retval != ERROR_OK) {
4364 free(samples);
4365 return retval;
4366 }
4367
4368 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4369 /* The target was halted before we started and is running now. Halt it,
4370 * for consistency. */
4371 retval = target_halt(target);
4372 if (retval != ERROR_OK) {
4373 free(samples);
4374 return retval;
4375 }
4376 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4377 /* The target was running before we started and is halted now. Resume
4378 * it, for consistency. */
4379 retval = target_resume(target, 1, 0, 0, 0);
4380 if (retval != ERROR_OK) {
4381 free(samples);
4382 return retval;
4383 }
4384 }
4385
4386 retval = target_poll(target);
4387 if (retval != ERROR_OK) {
4388 free(samples);
4389 return retval;
4390 }
4391
4392 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4393 with_range, start_address, end_address, target, duration_ms);
4394 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4395
4396 free(samples);
4397 return retval;
4398 }
4399
4400 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4401 {
4402 char *namebuf;
4403 Jim_Obj *obj_name, *obj_val;
4404 int result;
4405
4406 namebuf = alloc_printf("%s(%d)", varname, idx);
4407 if (!namebuf)
4408 return JIM_ERR;
4409
4410 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4411 jim_wide wide_val = val;
4412 obj_val = Jim_NewWideObj(interp, wide_val);
4413 if (!obj_name || !obj_val) {
4414 free(namebuf);
4415 return JIM_ERR;
4416 }
4417
4418 Jim_IncrRefCount(obj_name);
4419 Jim_IncrRefCount(obj_val);
4420 result = Jim_SetVariable(interp, obj_name, obj_val);
4421 Jim_DecrRefCount(interp, obj_name);
4422 Jim_DecrRefCount(interp, obj_val);
4423 free(namebuf);
4424 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4425 return result;
4426 }
4427
4428 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4429 {
4430 int e;
4431
4432 LOG_WARNING("DEPRECATED! use 'read_memory' not 'mem2array'");
4433
4434 /* argv[0] = name of array to receive the data
4435 * argv[1] = desired element width in bits
4436 * argv[2] = memory address
4437 * argv[3] = count of times to read
4438 * argv[4] = optional "phys"
4439 */
4440 if (argc < 4 || argc > 5) {
4441 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4442 return JIM_ERR;
4443 }
4444
4445 /* Arg 0: Name of the array variable */
4446 const char *varname = Jim_GetString(argv[0], NULL);
4447
4448 /* Arg 1: Bit width of one element */
4449 long l;
4450 e = Jim_GetLong(interp, argv[1], &l);
4451 if (e != JIM_OK)
4452 return e;
4453 const unsigned int width_bits = l;
4454
4455 if (width_bits != 8 &&
4456 width_bits != 16 &&
4457 width_bits != 32 &&
4458 width_bits != 64) {
4459 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4460 Jim_AppendStrings(interp, Jim_GetResult(interp),
4461 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4462 return JIM_ERR;
4463 }
4464 const unsigned int width = width_bits / 8;
4465
4466 /* Arg 2: Memory address */
4467 jim_wide wide_addr;
4468 e = Jim_GetWide(interp, argv[2], &wide_addr);
4469 if (e != JIM_OK)
4470 return e;
4471 target_addr_t addr = (target_addr_t)wide_addr;
4472
4473 /* Arg 3: Number of elements to read */
4474 e = Jim_GetLong(interp, argv[3], &l);
4475 if (e != JIM_OK)
4476 return e;
4477 size_t len = l;
4478
4479 /* Arg 4: phys */
4480 bool is_phys = false;
4481 if (argc > 4) {
4482 int str_len = 0;
4483 const char *phys = Jim_GetString(argv[4], &str_len);
4484 if (!strncmp(phys, "phys", str_len))
4485 is_phys = true;
4486 else
4487 return JIM_ERR;
4488 }
4489
4490 /* Argument checks */
4491 if (len == 0) {
4492 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4493 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4494 return JIM_ERR;
4495 }
4496 if ((addr + (len * width)) < addr) {
4497 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4498 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4499 return JIM_ERR;
4500 }
4501 if (len > 65536) {
4502 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4503 Jim_AppendStrings(interp, Jim_GetResult(interp),
4504 "mem2array: too large read request, exceeds 64K items", NULL);
4505 return JIM_ERR;
4506 }
4507
4508 if ((width == 1) ||
4509 ((width == 2) && ((addr & 1) == 0)) ||
4510 ((width == 4) && ((addr & 3) == 0)) ||
4511 ((width == 8) && ((addr & 7) == 0))) {
4512 /* alignment correct */
4513 } else {
4514 char buf[100];
4515 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4516 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4517 addr,
4518 width);
4519 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4520 return JIM_ERR;
4521 }
4522
4523 /* Transfer loop */
4524
4525 /* index counter */
4526 size_t idx = 0;
4527
4528 const size_t buffersize = 4096;
4529 uint8_t *buffer = malloc(buffersize);
4530 if (!buffer)
4531 return JIM_ERR;
4532
4533 /* assume ok */
4534 e = JIM_OK;
4535 while (len) {
4536 /* Slurp... in buffer size chunks */
4537 const unsigned int max_chunk_len = buffersize / width;
4538 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4539
4540 int retval;
4541 if (is_phys)
4542 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4543 else
4544 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4545 if (retval != ERROR_OK) {
4546 /* BOO !*/
4547 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4548 addr,
4549 width,
4550 chunk_len);
4551 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4552 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4553 e = JIM_ERR;
4554 break;
4555 } else {
4556 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4557 uint64_t v = 0;
4558 switch (width) {
4559 case 8:
4560 v = target_buffer_get_u64(target, &buffer[i*width]);
4561 break;
4562 case 4:
4563 v = target_buffer_get_u32(target, &buffer[i*width]);
4564 break;
4565 case 2:
4566 v = target_buffer_get_u16(target, &buffer[i*width]);
4567 break;
4568 case 1:
4569 v = buffer[i] & 0x0ff;
4570 break;
4571 }
4572 new_u64_array_element(interp, varname, idx, v);
4573 }
4574 len -= chunk_len;
4575 addr += chunk_len * width;
4576 }
4577 }
4578
4579 free(buffer);
4580
4581 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4582
4583 return e;
4584 }
4585
4586 COMMAND_HANDLER(handle_target_read_memory)
4587 {
4588 /*
4589 * CMD_ARGV[0] = memory address
4590 * CMD_ARGV[1] = desired element width in bits
4591 * CMD_ARGV[2] = number of elements to read
4592 * CMD_ARGV[3] = optional "phys"
4593 */
4594
4595 if (CMD_ARGC < 3 || CMD_ARGC > 4)
4596 return ERROR_COMMAND_SYNTAX_ERROR;
4597
4598 /* Arg 1: Memory address. */
4599 target_addr_t addr;
4600 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], addr);
4601
4602 /* Arg 2: Bit width of one element. */
4603 unsigned int width_bits;
4604 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], width_bits);
4605
4606 /* Arg 3: Number of elements to read. */
4607 unsigned int count;
4608 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
4609
4610 /* Arg 4: Optional 'phys'. */
4611 bool is_phys = false;
4612 if (CMD_ARGC == 4) {
4613 if (strcmp(CMD_ARGV[3], "phys")) {
4614 command_print(CMD, "invalid argument '%s', must be 'phys'", CMD_ARGV[3]);
4615 return ERROR_COMMAND_ARGUMENT_INVALID;
4616 }
4617
4618 is_phys = true;
4619 }
4620
4621 switch (width_bits) {
4622 case 8:
4623 case 16:
4624 case 32:
4625 case 64:
4626 break;
4627 default:
4628 command_print(CMD, "invalid width, must be 8, 16, 32 or 64");
4629 return ERROR_COMMAND_ARGUMENT_INVALID;
4630 }
4631
4632 const unsigned int width = width_bits / 8;
4633
4634 if ((addr + (count * width)) < addr) {
4635 command_print(CMD, "read_memory: addr + count wraps to zero");
4636 return ERROR_COMMAND_ARGUMENT_INVALID;
4637 }
4638
4639 if (count > 65536) {
4640 command_print(CMD, "read_memory: too large read request, exceeds 64K elements");
4641 return ERROR_COMMAND_ARGUMENT_INVALID;
4642 }
4643
4644 struct target *target = get_current_target(CMD_CTX);
4645
4646 const size_t buffersize = 4096;
4647 uint8_t *buffer = malloc(buffersize);
4648
4649 if (!buffer) {
4650 LOG_ERROR("Failed to allocate memory");
4651 return ERROR_FAIL;
4652 }
4653
4654 char *separator = "";
4655 while (count > 0) {
4656 const unsigned int max_chunk_len = buffersize / width;
4657 const size_t chunk_len = MIN(count, max_chunk_len);
4658
4659 int retval;
4660
4661 if (is_phys)
4662 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4663 else
4664 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4665
4666 if (retval != ERROR_OK) {
4667 LOG_DEBUG("read_memory: read at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
4668 addr, width_bits, chunk_len);
4669 /*
4670 * FIXME: we append the errmsg to the list of value already read.
4671 * Add a way to flush and replace old output, but LOG_DEBUG() it
4672 */
4673 command_print(CMD, "read_memory: failed to read memory");
4674 free(buffer);
4675 return retval;
4676 }
4677
4678 for (size_t i = 0; i < chunk_len ; i++) {
4679 uint64_t v = 0;
4680
4681 switch (width) {
4682 case 8:
4683 v = target_buffer_get_u64(target, &buffer[i * width]);
4684 break;
4685 case 4:
4686 v = target_buffer_get_u32(target, &buffer[i * width]);
4687 break;
4688 case 2:
4689 v = target_buffer_get_u16(target, &buffer[i * width]);
4690 break;
4691 case 1:
4692 v = buffer[i];
4693 break;
4694 }
4695
4696 command_print_sameline(CMD, "%s0x%" PRIx64, separator, v);
4697 separator = " ";
4698 }
4699
4700 count -= chunk_len;
4701 addr += chunk_len * width;
4702 }
4703
4704 free(buffer);
4705
4706 return ERROR_OK;
4707 }
4708
4709 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4710 {
4711 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4712 if (!namebuf)
4713 return JIM_ERR;
4714
4715 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4716 if (!obj_name) {
4717 free(namebuf);
4718 return JIM_ERR;
4719 }
4720
4721 Jim_IncrRefCount(obj_name);
4722 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4723 Jim_DecrRefCount(interp, obj_name);
4724 free(namebuf);
4725 if (!obj_val)
4726 return JIM_ERR;
4727
4728 jim_wide wide_val;
4729 int result = Jim_GetWide(interp, obj_val, &wide_val);
4730 *val = wide_val;
4731 return result;
4732 }
4733
4734 static int target_array2mem(Jim_Interp *interp, struct target *target,
4735 int argc, Jim_Obj *const *argv)
4736 {
4737 int e;
4738
4739 LOG_WARNING("DEPRECATED! use 'write_memory' not 'array2mem'");
4740
4741 /* argv[0] = name of array from which to read the data
4742 * argv[1] = desired element width in bits
4743 * argv[2] = memory address
4744 * argv[3] = number of elements to write
4745 * argv[4] = optional "phys"
4746 */
4747 if (argc < 4 || argc > 5) {
4748 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4749 return JIM_ERR;
4750 }
4751
4752 /* Arg 0: Name of the array variable */
4753 const char *varname = Jim_GetString(argv[0], NULL);
4754
4755 /* Arg 1: Bit width of one element */
4756 long l;
4757 e = Jim_GetLong(interp, argv[1], &l);
4758 if (e != JIM_OK)
4759 return e;
4760 const unsigned int width_bits = l;
4761
4762 if (width_bits != 8 &&
4763 width_bits != 16 &&
4764 width_bits != 32 &&
4765 width_bits != 64) {
4766 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4767 Jim_AppendStrings(interp, Jim_GetResult(interp),
4768 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4769 return JIM_ERR;
4770 }
4771 const unsigned int width = width_bits / 8;
4772
4773 /* Arg 2: Memory address */
4774 jim_wide wide_addr;
4775 e = Jim_GetWide(interp, argv[2], &wide_addr);
4776 if (e != JIM_OK)
4777 return e;
4778 target_addr_t addr = (target_addr_t)wide_addr;
4779
4780 /* Arg 3: Number of elements to write */
4781 e = Jim_GetLong(interp, argv[3], &l);
4782 if (e != JIM_OK)
4783 return e;
4784 size_t len = l;
4785
4786 /* Arg 4: Phys */
4787 bool is_phys = false;
4788 if (argc > 4) {
4789 int str_len = 0;
4790 const char *phys = Jim_GetString(argv[4], &str_len);
4791 if (!strncmp(phys, "phys", str_len))
4792 is_phys = true;
4793 else
4794 return JIM_ERR;
4795 }
4796
4797 /* Argument checks */
4798 if (len == 0) {
4799 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4800 Jim_AppendStrings(interp, Jim_GetResult(interp),
4801 "array2mem: zero width read?", NULL);
4802 return JIM_ERR;
4803 }
4804
4805 if ((addr + (len * width)) < addr) {
4806 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4807 Jim_AppendStrings(interp, Jim_GetResult(interp),
4808 "array2mem: addr + len - wraps to zero?", NULL);
4809 return JIM_ERR;
4810 }
4811
4812 if (len > 65536) {
4813 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4814 Jim_AppendStrings(interp, Jim_GetResult(interp),
4815 "array2mem: too large memory write request, exceeds 64K items", NULL);
4816 return JIM_ERR;
4817 }
4818
4819 if ((width == 1) ||
4820 ((width == 2) && ((addr & 1) == 0)) ||
4821 ((width == 4) && ((addr & 3) == 0)) ||
4822 ((width == 8) && ((addr & 7) == 0))) {
4823 /* alignment correct */
4824 } else {
4825 char buf[100];
4826 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4827 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4828 addr,
4829 width);
4830 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4831 return JIM_ERR;
4832 }
4833
4834 /* Transfer loop */
4835
4836 /* assume ok */
4837 e = JIM_OK;
4838
4839 const size_t buffersize = 4096;
4840 uint8_t *buffer = malloc(buffersize);
4841 if (!buffer)
4842 return JIM_ERR;
4843
4844 /* index counter */
4845 size_t idx = 0;
4846
4847 while (len) {
4848 /* Slurp... in buffer size chunks */
4849 const unsigned int max_chunk_len = buffersize / width;
4850
4851 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4852
4853 /* Fill the buffer */
4854 for (size_t i = 0; i < chunk_len; i++, idx++) {
4855 uint64_t v = 0;
4856 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4857 free(buffer);
4858 return JIM_ERR;
4859 }
4860 switch (width) {
4861 case 8:
4862 target_buffer_set_u64(target, &buffer[i * width], v);
4863 break;
4864 case 4:
4865 target_buffer_set_u32(target, &buffer[i * width], v);
4866 break;
4867 case 2:
4868 target_buffer_set_u16(target, &buffer[i * width], v);
4869 break;
4870 case 1:
4871 buffer[i] = v & 0x0ff;
4872 break;
4873 }
4874 }
4875 len -= chunk_len;
4876
4877 /* Write the buffer to memory */
4878 int retval;
4879 if (is_phys)
4880 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4881 else
4882 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4883 if (retval != ERROR_OK) {
4884 /* BOO !*/
4885 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4886 addr,
4887 width,
4888 chunk_len);
4889 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4890 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4891 e = JIM_ERR;
4892 break;
4893 }
4894 addr += chunk_len * width;
4895 }
4896
4897 free(buffer);
4898
4899 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4900
4901 return e;
4902 }
4903
4904 static int target_jim_write_memory(Jim_Interp *interp, int argc,
4905 Jim_Obj * const *argv)
4906 {
4907 /*
4908 * argv[1] = memory address
4909 * argv[2] = desired element width in bits
4910 * argv[3] = list of data to write
4911 * argv[4] = optional "phys"
4912 */
4913
4914 if (argc < 4 || argc > 5) {
4915 Jim_WrongNumArgs(interp, 1, argv, "address width data ['phys']");
4916 return JIM_ERR;
4917 }
4918
4919 /* Arg 1: Memory address. */
4920 int e;
4921 jim_wide wide_addr;
4922 e = Jim_GetWide(interp, argv[1], &wide_addr);
4923
4924 if (e != JIM_OK)
4925 return e;
4926
4927 target_addr_t addr = (target_addr_t)wide_addr;
4928
4929 /* Arg 2: Bit width of one element. */
4930 long l;
4931 e = Jim_GetLong(interp, argv[2], &l);
4932
4933 if (e != JIM_OK)
4934 return e;
4935
4936 const unsigned int width_bits = l;
4937 size_t count = Jim_ListLength(interp, argv[3]);
4938
4939 /* Arg 4: Optional 'phys'. */
4940 bool is_phys = false;
4941
4942 if (argc > 4) {
4943 const char *phys = Jim_GetString(argv[4], NULL);
4944
4945 if (strcmp(phys, "phys")) {
4946 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4947 return JIM_ERR;
4948 }
4949
4950 is_phys = true;
4951 }
4952
4953 switch (width_bits) {
4954 case 8:
4955 case 16:
4956 case 32:
4957 case 64:
4958 break;
4959 default:
4960 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
4961 return JIM_ERR;
4962 }
4963
4964 const unsigned int width = width_bits / 8;
4965
4966 if ((addr + (count * width)) < addr) {
4967 Jim_SetResultString(interp, "write_memory: addr + len wraps to zero", -1);
4968 return JIM_ERR;
4969 }
4970
4971 if (count > 65536) {
4972 Jim_SetResultString(interp, "write_memory: too large memory write request, exceeds 64K elements", -1);
4973 return JIM_ERR;
4974 }
4975
4976 struct command_context *cmd_ctx = current_command_context(interp);
4977 assert(cmd_ctx != NULL);
4978 struct target *target = get_current_target(cmd_ctx);
4979
4980 const size_t buffersize = 4096;
4981 uint8_t *buffer = malloc(buffersize);
4982
4983 if (!buffer) {
4984 LOG_ERROR("Failed to allocate memory");
4985 return JIM_ERR;
4986 }
4987
4988 size_t j = 0;
4989
4990 while (count > 0) {
4991 const unsigned int max_chunk_len = buffersize / width;
4992 const size_t chunk_len = MIN(count, max_chunk_len);
4993
4994 for (size_t i = 0; i < chunk_len; i++, j++) {
4995 Jim_Obj *tmp = Jim_ListGetIndex(interp, argv[3], j);
4996 jim_wide element_wide;
4997 Jim_GetWide(interp, tmp, &element_wide);
4998
4999 const uint64_t v = element_wide;
5000
5001 switch (width) {
5002 case 8:
5003 target_buffer_set_u64(target, &buffer[i * width], v);
5004 break;
5005 case 4:
5006 target_buffer_set_u32(target, &buffer[i * width], v);
5007 break;
5008 case 2:
5009 target_buffer_set_u16(target, &buffer[i * width], v);
5010 break;
5011 case 1:
5012 buffer[i] = v & 0x0ff;
5013 break;
5014 }
5015 }
5016
5017 count -= chunk_len;
5018
5019 int retval;
5020
5021 if (is_phys)
5022 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
5023 else
5024 retval = target_write_memory(target, addr, width, chunk_len, buffer);
5025
5026 if (retval != ERROR_OK) {
5027 LOG_ERROR("write_memory: write at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
5028 addr, width_bits, chunk_len);
5029 Jim_SetResultString(interp, "write_memory: failed to write memory", -1);
5030 e = JIM_ERR;
5031 break;
5032 }
5033
5034 addr += chunk_len * width;
5035 }
5036
5037 free(buffer);
5038
5039 return e;
5040 }
5041
5042 /* FIX? should we propagate errors here rather than printing them
5043 * and continuing?
5044 */
5045 void target_handle_event(struct target *target, enum target_event e)
5046 {
5047 struct target_event_action *teap;
5048 int retval;
5049
5050 for (teap = target->event_action; teap; teap = teap->next) {
5051 if (teap->event == e) {
5052 LOG_DEBUG("target: %s (%s) event: %d (%s) action: %s",
5053 target_name(target),
5054 target_type_name(target),
5055 e,
5056 target_event_name(e),
5057 Jim_GetString(teap->body, NULL));
5058
5059 /* Override current target by the target an event
5060 * is issued from (lot of scripts need it).
5061 * Return back to previous override as soon
5062 * as the handler processing is done */
5063 struct command_context *cmd_ctx = current_command_context(teap->interp);
5064 struct target *saved_target_override = cmd_ctx->current_target_override;
5065 cmd_ctx->current_target_override = target;
5066
5067 retval = Jim_EvalObj(teap->interp, teap->body);
5068
5069 cmd_ctx->current_target_override = saved_target_override;
5070
5071 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
5072 return;
5073
5074 if (retval == JIM_RETURN)
5075 retval = teap->interp->returnCode;
5076
5077 if (retval != JIM_OK) {
5078 Jim_MakeErrorMessage(teap->interp);
5079 LOG_USER("Error executing event %s on target %s:\n%s",
5080 target_event_name(e),
5081 target_name(target),
5082 Jim_GetString(Jim_GetResult(teap->interp), NULL));
5083 /* clean both error code and stacktrace before return */
5084 Jim_Eval(teap->interp, "error \"\" \"\"");
5085 }
5086 }
5087 }
5088 }
5089
5090 static int target_jim_get_reg(Jim_Interp *interp, int argc,
5091 Jim_Obj * const *argv)
5092 {
5093 bool force = false;
5094
5095 if (argc == 3) {
5096 const char *option = Jim_GetString(argv[1], NULL);
5097
5098 if (!strcmp(option, "-force")) {
5099 argc--;
5100 argv++;
5101 force = true;
5102 } else {
5103 Jim_SetResultFormatted(interp, "invalid option '%s'", option);
5104 return JIM_ERR;
5105 }
5106 }
5107
5108 if (argc != 2) {
5109 Jim_WrongNumArgs(interp, 1, argv, "[-force] list");
5110 return JIM_ERR;
5111 }
5112
5113 const int length = Jim_ListLength(interp, argv[1]);
5114
5115 Jim_Obj *result_dict = Jim_NewDictObj(interp, NULL, 0);
5116
5117 if (!result_dict)
5118 return JIM_ERR;
5119
5120 struct command_context *cmd_ctx = current_command_context(interp);
5121 assert(cmd_ctx != NULL);
5122 const struct target *target = get_current_target(cmd_ctx);
5123
5124 for (int i = 0; i < length; i++) {
5125 Jim_Obj *elem = Jim_ListGetIndex(interp, argv[1], i);
5126
5127 if (!elem)
5128 return JIM_ERR;
5129
5130 const char *reg_name = Jim_String(elem);
5131
5132 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5133 false);
5134
5135 if (!reg || !reg->exist) {
5136 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5137 return JIM_ERR;
5138 }
5139
5140 if (force || !reg->valid) {
5141 int retval = reg->type->get(reg);
5142
5143 if (retval != ERROR_OK) {
5144 Jim_SetResultFormatted(interp, "failed to read register '%s'",
5145 reg_name);
5146 return JIM_ERR;
5147 }
5148 }
5149
5150 char *reg_value = buf_to_hex_str(reg->value, reg->size);
5151
5152 if (!reg_value) {
5153 LOG_ERROR("Failed to allocate memory");
5154 return JIM_ERR;
5155 }
5156
5157 char *tmp = alloc_printf("0x%s", reg_value);
5158
5159 free(reg_value);
5160
5161 if (!tmp) {
5162 LOG_ERROR("Failed to allocate memory");
5163 return JIM_ERR;
5164 }
5165
5166 Jim_DictAddElement(interp, result_dict, elem,
5167 Jim_NewStringObj(interp, tmp, -1));
5168
5169 free(tmp);
5170 }
5171
5172 Jim_SetResult(interp, result_dict);
5173
5174 return JIM_OK;
5175 }
5176
5177 static int target_jim_set_reg(Jim_Interp *interp, int argc,
5178 Jim_Obj * const *argv)
5179 {
5180 if (argc != 2) {
5181 Jim_WrongNumArgs(interp, 1, argv, "dict");
5182 return JIM_ERR;
5183 }
5184
5185 int tmp;
5186 #if JIM_VERSION >= 80
5187 Jim_Obj **dict = Jim_DictPairs(interp, argv[1], &tmp);
5188
5189 if (!dict)
5190 return JIM_ERR;
5191 #else
5192 Jim_Obj **dict;
5193 int ret = Jim_DictPairs(interp, argv[1], &dict, &tmp);
5194
5195 if (ret != JIM_OK)
5196 return ret;
5197 #endif
5198
5199 const unsigned int length = tmp;
5200 struct command_context *cmd_ctx = current_command_context(interp);
5201 assert(cmd_ctx);
5202 const struct target *target = get_current_target(cmd_ctx);
5203
5204 for (unsigned int i = 0; i < length; i += 2) {
5205 const char *reg_name = Jim_String(dict[i]);
5206 const char *reg_value = Jim_String(dict[i + 1]);
5207 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5208 false);
5209
5210 if (!reg || !reg->exist) {
5211 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5212 return JIM_ERR;
5213 }
5214
5215 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
5216
5217 if (!buf) {
5218 LOG_ERROR("Failed to allocate memory");
5219 return JIM_ERR;
5220 }
5221
5222 str_to_buf(reg_value, strlen(reg_value), buf, reg->size, 0);
5223 int retval = reg->type->set(reg, buf);
5224 free(buf);
5225
5226 if (retval != ERROR_OK) {
5227 Jim_SetResultFormatted(interp, "failed to set '%s' to register '%s'",
5228 reg_value, reg_name);
5229 return JIM_ERR;
5230 }
5231 }
5232
5233 return JIM_OK;
5234 }
5235
5236 /**
5237 * Returns true only if the target has a handler for the specified event.
5238 */
5239 bool target_has_event_action(struct target *target, enum target_event event)
5240 {
5241 struct target_event_action *teap;
5242
5243 for (teap = target->event_action; teap; teap = teap->next) {
5244 if (teap->event == event)
5245 return true;
5246 }
5247 return false;
5248 }
5249
5250 enum target_cfg_param {
5251 TCFG_TYPE,
5252 TCFG_EVENT,
5253 TCFG_WORK_AREA_VIRT,
5254 TCFG_WORK_AREA_PHYS,
5255 TCFG_WORK_AREA_SIZE,
5256 TCFG_WORK_AREA_BACKUP,
5257 TCFG_ENDIAN,
5258 TCFG_COREID,
5259 TCFG_CHAIN_POSITION,
5260 TCFG_DBGBASE,
5261 TCFG_RTOS,
5262 TCFG_DEFER_EXAMINE,
5263 TCFG_GDB_PORT,
5264 TCFG_GDB_MAX_CONNECTIONS,
5265 };
5266
5267 static struct jim_nvp nvp_config_opts[] = {
5268 { .name = "-type", .value = TCFG_TYPE },
5269 { .name = "-event", .value = TCFG_EVENT },
5270 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
5271 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
5272 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
5273 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
5274 { .name = "-endian", .value = TCFG_ENDIAN },
5275 { .name = "-coreid", .value = TCFG_COREID },
5276 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
5277 { .name = "-dbgbase", .value = TCFG_DBGBASE },
5278 { .name = "-rtos", .value = TCFG_RTOS },
5279 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
5280 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
5281 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
5282 { .name = NULL, .value = -1 }
5283 };
5284
5285 static int target_configure(struct jim_getopt_info *goi, struct target *target)
5286 {
5287 struct jim_nvp *n;
5288 Jim_Obj *o;
5289 jim_wide w;
5290 int e;
5291
5292 /* parse config or cget options ... */
5293 while (goi->argc > 0) {
5294 Jim_SetEmptyResult(goi->interp);
5295 /* jim_getopt_debug(goi); */
5296
5297 if (target->type->target_jim_configure) {
5298 /* target defines a configure function */
5299 /* target gets first dibs on parameters */
5300 e = (*(target->type->target_jim_configure))(target, goi);
5301 if (e == JIM_OK) {
5302 /* more? */
5303 continue;
5304 }
5305 if (e == JIM_ERR) {
5306 /* An error */
5307 return e;
5308 }
5309 /* otherwise we 'continue' below */
5310 }
5311 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
5312 if (e != JIM_OK) {
5313 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
5314 return e;
5315 }
5316 switch (n->value) {
5317 case TCFG_TYPE:
5318 /* not settable */
5319 if (goi->isconfigure) {
5320 Jim_SetResultFormatted(goi->interp,
5321 "not settable: %s", n->name);
5322 return JIM_ERR;
5323 } else {
5324 no_params:
5325 if (goi->argc != 0) {
5326 Jim_WrongNumArgs(goi->interp,
5327 goi->argc, goi->argv,
5328 "NO PARAMS");
5329 return JIM_ERR;
5330 }
5331 }
5332 Jim_SetResultString(goi->interp,
5333 target_type_name(target), -1);
5334 /* loop for more */
5335 break;
5336 case TCFG_EVENT:
5337 if (goi->argc == 0) {
5338 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
5339 return JIM_ERR;
5340 }
5341
5342 e = jim_getopt_nvp(goi, nvp_target_event, &n);
5343 if (e != JIM_OK) {
5344 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
5345 return e;
5346 }
5347
5348 if (goi->isconfigure) {
5349 if (goi->argc != 1) {
5350 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
5351 return JIM_ERR;
5352 }
5353 } else {
5354 if (goi->argc != 0) {
5355 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
5356 return JIM_ERR;
5357 }
5358 }
5359
5360 {
5361 struct target_event_action *teap;
5362
5363 teap = target->event_action;
5364 /* replace existing? */
5365 while (teap) {
5366 if (teap->event == (enum target_event)n->value)
5367 break;
5368 teap = teap->next;
5369 }
5370
5371 if (goi->isconfigure) {
5372 /* START_DEPRECATED_TPIU */
5373 if (n->value == TARGET_EVENT_TRACE_CONFIG)
5374 LOG_INFO("DEPRECATED target event %s; use TPIU events {pre,post}-{enable,disable}", n->name);
5375 /* END_DEPRECATED_TPIU */
5376
5377 bool replace = true;
5378 if (!teap) {
5379 /* create new */
5380 teap = calloc(1, sizeof(*teap));
5381 replace = false;
5382 }
5383 teap->event = n->value;
5384 teap->interp = goi->interp;
5385 jim_getopt_obj(goi, &o);
5386 if (teap->body)
5387 Jim_DecrRefCount(teap->interp, teap->body);
5388 teap->body = Jim_DuplicateObj(goi->interp, o);
5389 /*
5390 * FIXME:
5391 * Tcl/TK - "tk events" have a nice feature.
5392 * See the "BIND" command.
5393 * We should support that here.
5394 * You can specify %X and %Y in the event code.
5395 * The idea is: %T - target name.
5396 * The idea is: %N - target number
5397 * The idea is: %E - event name.
5398 */
5399 Jim_IncrRefCount(teap->body);
5400
5401 if (!replace) {
5402 /* add to head of event list */
5403 teap->next = target->event_action;
5404 target->event_action = teap;
5405 }
5406 Jim_SetEmptyResult(goi->interp);
5407 } else {
5408 /* get */
5409 if (!teap)
5410 Jim_SetEmptyResult(goi->interp);
5411 else
5412 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5413 }
5414 }
5415 /* loop for more */
5416 break;
5417
5418 case TCFG_WORK_AREA_VIRT:
5419 if (goi->isconfigure) {
5420 target_free_all_working_areas(target);
5421 e = jim_getopt_wide(goi, &w);
5422 if (e != JIM_OK)
5423 return e;
5424 target->working_area_virt = w;
5425 target->working_area_virt_spec = true;
5426 } else {
5427 if (goi->argc != 0)
5428 goto no_params;
5429 }
5430 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5431 /* loop for more */
5432 break;
5433
5434 case TCFG_WORK_AREA_PHYS:
5435 if (goi->isconfigure) {
5436 target_free_all_working_areas(target);
5437 e = jim_getopt_wide(goi, &w);
5438 if (e != JIM_OK)
5439 return e;
5440 target->working_area_phys = w;
5441 target->working_area_phys_spec = true;
5442 } else {
5443 if (goi->argc != 0)
5444 goto no_params;
5445 }
5446 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5447 /* loop for more */
5448 break;
5449
5450 case TCFG_WORK_AREA_SIZE:
5451 if (goi->isconfigure) {
5452 target_free_all_working_areas(target);
5453 e = jim_getopt_wide(goi, &w);
5454 if (e != JIM_OK)
5455 return e;
5456 target->working_area_size = w;
5457 } else {
5458 if (goi->argc != 0)
5459 goto no_params;
5460 }
5461 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5462 /* loop for more */
5463 break;
5464
5465 case TCFG_WORK_AREA_BACKUP:
5466 if (goi->isconfigure) {
5467 target_free_all_working_areas(target);
5468 e = jim_getopt_wide(goi, &w);
5469 if (e != JIM_OK)
5470 return e;
5471 /* make this boolean */
5472 target->backup_working_area = (w != 0);
5473 } else {
5474 if (goi->argc != 0)
5475 goto no_params;
5476 }
5477 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area ? 1 : 0));
5478 /* loop for more e*/
5479 break;
5480
5481
5482 case TCFG_ENDIAN:
5483 if (goi->isconfigure) {
5484 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5485 if (e != JIM_OK) {
5486 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5487 return e;
5488 }
5489 target->endianness = n->value;
5490 } else {
5491 if (goi->argc != 0)
5492 goto no_params;
5493 }
5494 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5495 if (!n->name) {
5496 target->endianness = TARGET_LITTLE_ENDIAN;
5497 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5498 }
5499 Jim_SetResultString(goi->interp, n->name, -1);
5500 /* loop for more */
5501 break;
5502
5503 case TCFG_COREID:
5504 if (goi->isconfigure) {
5505 e = jim_getopt_wide(goi, &w);
5506 if (e != JIM_OK)
5507 return e;
5508 target->coreid = (int32_t)w;
5509 } else {
5510 if (goi->argc != 0)
5511 goto no_params;
5512 }
5513 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5514 /* loop for more */
5515 break;
5516
5517 case TCFG_CHAIN_POSITION:
5518 if (goi->isconfigure) {
5519 Jim_Obj *o_t;
5520 struct jtag_tap *tap;
5521
5522 if (target->has_dap) {
5523 Jim_SetResultString(goi->interp,
5524 "target requires -dap parameter instead of -chain-position!", -1);
5525 return JIM_ERR;
5526 }
5527
5528 target_free_all_working_areas(target);
5529 e = jim_getopt_obj(goi, &o_t);
5530 if (e != JIM_OK)
5531 return e;
5532 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5533 if (!tap)
5534 return JIM_ERR;
5535 target->tap = tap;
5536 target->tap_configured = true;
5537 } else {
5538 if (goi->argc != 0)
5539 goto no_params;
5540 }
5541 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5542 /* loop for more e*/
5543 break;
5544 case TCFG_DBGBASE:
5545 if (goi->isconfigure) {
5546 e = jim_getopt_wide(goi, &w);
5547 if (e != JIM_OK)
5548 return e;
5549 target->dbgbase = (uint32_t)w;
5550 target->dbgbase_set = true;
5551 } else {
5552 if (goi->argc != 0)
5553 goto no_params;
5554 }
5555 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5556 /* loop for more */
5557 break;
5558 case TCFG_RTOS:
5559 /* RTOS */
5560 {
5561 int result = rtos_create(goi, target);
5562 if (result != JIM_OK)
5563 return result;
5564 }
5565 /* loop for more */
5566 break;
5567
5568 case TCFG_DEFER_EXAMINE:
5569 /* DEFER_EXAMINE */
5570 target->defer_examine = true;
5571 /* loop for more */
5572 break;
5573
5574 case TCFG_GDB_PORT:
5575 if (goi->isconfigure) {
5576 struct command_context *cmd_ctx = current_command_context(goi->interp);
5577 if (cmd_ctx->mode != COMMAND_CONFIG) {
5578 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5579 return JIM_ERR;
5580 }
5581
5582 const char *s;
5583 e = jim_getopt_string(goi, &s, NULL);
5584 if (e != JIM_OK)
5585 return e;
5586 free(target->gdb_port_override);
5587 target->gdb_port_override = strdup(s);
5588 } else {
5589 if (goi->argc != 0)
5590 goto no_params;
5591 }
5592 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5593 /* loop for more */
5594 break;
5595
5596 case TCFG_GDB_MAX_CONNECTIONS:
5597 if (goi->isconfigure) {
5598 struct command_context *cmd_ctx = current_command_context(goi->interp);
5599 if (cmd_ctx->mode != COMMAND_CONFIG) {
5600 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5601 return JIM_ERR;
5602 }
5603
5604 e = jim_getopt_wide(goi, &w);
5605 if (e != JIM_OK)
5606 return e;
5607 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5608 } else {
5609 if (goi->argc != 0)
5610 goto no_params;
5611 }
5612 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5613 break;
5614 }
5615 } /* while (goi->argc) */
5616
5617
5618 /* done - we return */
5619 return JIM_OK;
5620 }
5621
5622 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5623 {
5624 struct command *c = jim_to_command(interp);
5625 struct jim_getopt_info goi;
5626
5627 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5628 goi.isconfigure = !strcmp(c->name, "configure");
5629 if (goi.argc < 1) {
5630 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5631 "missing: -option ...");
5632 return JIM_ERR;
5633 }
5634 struct command_context *cmd_ctx = current_command_context(interp);
5635 assert(cmd_ctx);
5636 struct target *target = get_current_target(cmd_ctx);
5637 return target_configure(&goi, target);
5638 }
5639
5640 static int jim_target_mem2array(Jim_Interp *interp,
5641 int argc, Jim_Obj *const *argv)
5642 {
5643 struct command_context *cmd_ctx = current_command_context(interp);
5644 assert(cmd_ctx);
5645 struct target *target = get_current_target(cmd_ctx);
5646 return target_mem2array(interp, target, argc - 1, argv + 1);
5647 }
5648
5649 static int jim_target_array2mem(Jim_Interp *interp,
5650 int argc, Jim_Obj *const *argv)
5651 {
5652 struct command_context *cmd_ctx = current_command_context(interp);
5653 assert(cmd_ctx);
5654 struct target *target = get_current_target(cmd_ctx);
5655 return target_array2mem(interp, target, argc - 1, argv + 1);
5656 }
5657
5658 COMMAND_HANDLER(handle_target_examine)
5659 {
5660 bool allow_defer = false;
5661
5662 if (CMD_ARGC > 1)
5663 return ERROR_COMMAND_SYNTAX_ERROR;
5664
5665 if (CMD_ARGC == 1) {
5666 if (strcmp(CMD_ARGV[0], "allow-defer"))
5667 return ERROR_COMMAND_ARGUMENT_INVALID;
5668 allow_defer = true;
5669 }
5670
5671 struct target *target = get_current_target(CMD_CTX);
5672 if (!target->tap->enabled) {
5673 command_print(CMD, "[TAP is disabled]");
5674 return ERROR_FAIL;
5675 }
5676
5677 if (allow_defer && target->defer_examine) {
5678 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5679 LOG_INFO("Use arp_examine command to examine it manually!");
5680 return ERROR_OK;
5681 }
5682
5683 int retval = target->type->examine(target);
5684 if (retval != ERROR_OK) {
5685 target_reset_examined(target);
5686 return retval;
5687 }
5688
5689 target_set_examined(target);
5690
5691 return ERROR_OK;
5692 }
5693
5694 COMMAND_HANDLER(handle_target_was_examined)
5695 {
5696 if (CMD_ARGC != 0)
5697 return ERROR_COMMAND_SYNTAX_ERROR;
5698
5699 struct target *target = get_current_target(CMD_CTX);
5700
5701 command_print(CMD, "%d", target_was_examined(target) ? 1 : 0);
5702
5703 return ERROR_OK;
5704 }
5705
5706 COMMAND_HANDLER(handle_target_examine_deferred)
5707 {
5708 if (CMD_ARGC != 0)
5709 return ERROR_COMMAND_SYNTAX_ERROR;
5710
5711 struct target *target = get_current_target(CMD_CTX);
5712
5713 command_print(CMD, "%d", target->defer_examine ? 1 : 0);
5714
5715 return ERROR_OK;
5716 }
5717
5718 COMMAND_HANDLER(handle_target_halt_gdb)
5719 {
5720 if (CMD_ARGC != 0)
5721 return ERROR_COMMAND_SYNTAX_ERROR;
5722
5723 struct target *target = get_current_target(CMD_CTX);
5724
5725 return target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
5726 }
5727
5728 COMMAND_HANDLER(handle_target_poll)
5729 {
5730 if (CMD_ARGC != 0)
5731 return ERROR_COMMAND_SYNTAX_ERROR;
5732
5733 struct target *target = get_current_target(CMD_CTX);
5734 if (!target->tap->enabled) {
5735 command_print(CMD, "[TAP is disabled]");
5736 return ERROR_FAIL;
5737 }
5738
5739 if (!(target_was_examined(target)))
5740 return ERROR_TARGET_NOT_EXAMINED;
5741
5742 return target->type->poll(target);
5743 }
5744
5745 COMMAND_HANDLER(handle_target_reset)
5746 {
5747 if (CMD_ARGC != 2)
5748 return ERROR_COMMAND_SYNTAX_ERROR;
5749
5750 const struct nvp *n = nvp_name2value(nvp_assert, CMD_ARGV[0]);
5751 if (!n->name) {
5752 nvp_unknown_command_print(CMD, nvp_assert, NULL, CMD_ARGV[0]);
5753 return ERROR_COMMAND_ARGUMENT_INVALID;
5754 }
5755
5756 /* the halt or not param */
5757 int a;
5758 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], a);
5759
5760 struct target *target = get_current_target(CMD_CTX);
5761 if (!target->tap->enabled) {
5762 command_print(CMD, "[TAP is disabled]");
5763 return ERROR_FAIL;
5764 }
5765
5766 if (!target->type->assert_reset || !target->type->deassert_reset) {
5767 command_print(CMD, "No target-specific reset for %s", target_name(target));
5768 return ERROR_FAIL;
5769 }
5770
5771 if (target->defer_examine)
5772 target_reset_examined(target);
5773
5774 /* determine if we should halt or not. */
5775 target->reset_halt = (a != 0);
5776 /* When this happens - all workareas are invalid. */
5777 target_free_all_working_areas_restore(target, 0);
5778
5779 /* do the assert */
5780 if (n->value == NVP_ASSERT)
5781 return target->type->assert_reset(target);
5782 return target->type->deassert_reset(target);
5783 }
5784
5785 COMMAND_HANDLER(handle_target_halt)
5786 {
5787 if (CMD_ARGC != 0)
5788 return ERROR_COMMAND_SYNTAX_ERROR;
5789
5790 struct target *target = get_current_target(CMD_CTX);
5791 if (!target->tap->enabled) {
5792 command_print(CMD, "[TAP is disabled]");
5793 return ERROR_FAIL;
5794 }
5795
5796 return target->type->halt(target);
5797 }
5798
5799 COMMAND_HANDLER(handle_target_wait_state)
5800 {
5801 if (CMD_ARGC != 2)
5802 return ERROR_COMMAND_SYNTAX_ERROR;
5803
5804 const struct nvp *n = nvp_name2value(nvp_target_state, CMD_ARGV[0]);
5805 if (!n->name) {
5806 nvp_unknown_command_print(CMD, nvp_target_state, NULL, CMD_ARGV[0]);
5807 return ERROR_COMMAND_ARGUMENT_INVALID;
5808 }
5809
5810 unsigned int a;
5811 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], a);
5812
5813 struct target *target = get_current_target(CMD_CTX);
5814 if (!target->tap->enabled) {
5815 command_print(CMD, "[TAP is disabled]");
5816 return ERROR_FAIL;
5817 }
5818
5819 int retval = target_wait_state(target, n->value, a);
5820 if (retval != ERROR_OK) {
5821 command_print(CMD,
5822 "target: %s wait %s fails (%d) %s",
5823 target_name(target), n->name,
5824 retval, target_strerror_safe(retval));
5825 return retval;
5826 }
5827 return ERROR_OK;
5828 }
5829 /* List for human, Events defined for this target.
5830 * scripts/programs should use 'name cget -event NAME'
5831 */
5832 COMMAND_HANDLER(handle_target_event_list)
5833 {
5834 struct target *target = get_current_target(CMD_CTX);
5835 struct target_event_action *teap = target->event_action;
5836
5837 command_print(CMD, "Event actions for target %s\n",
5838 target_name(target));
5839 command_print(CMD, "%-25s | Body", "Event");
5840 command_print(CMD, "------------------------- | "
5841 "----------------------------------------");
5842 while (teap) {
5843 command_print(CMD, "%-25s | %s",
5844 target_event_name(teap->event),
5845 Jim_GetString(teap->body, NULL));
5846 teap = teap->next;
5847 }
5848 command_print(CMD, "***END***");
5849 return ERROR_OK;
5850 }
5851
5852 COMMAND_HANDLER(handle_target_current_state)
5853 {
5854 if (CMD_ARGC != 0)
5855 return ERROR_COMMAND_SYNTAX_ERROR;
5856
5857 struct target *target = get_current_target(CMD_CTX);
5858
5859 command_print(CMD, "%s", target_state_name(target));
5860
5861 return ERROR_OK;
5862 }
5863
5864 COMMAND_HANDLER(handle_target_debug_reason)
5865 {
5866 if (CMD_ARGC != 0)
5867 return ERROR_COMMAND_SYNTAX_ERROR;
5868
5869 struct target *target = get_current_target(CMD_CTX);
5870
5871
5872 const char *debug_reason = nvp_value2name(nvp_target_debug_reason,
5873 target->debug_reason)->name;
5874
5875 if (!debug_reason) {
5876 command_print(CMD, "bug: invalid debug reason (%d)",
5877 target->debug_reason);
5878 return ERROR_FAIL;
5879 }
5880
5881 command_print(CMD, "%s", debug_reason);
5882
5883 return ERROR_OK;
5884 }
5885
5886 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5887 {
5888 struct jim_getopt_info goi;
5889 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5890 if (goi.argc != 1) {
5891 const char *cmd_name = Jim_GetString(argv[0], NULL);
5892 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5893 return JIM_ERR;
5894 }
5895 struct jim_nvp *n;
5896 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5897 if (e != JIM_OK) {
5898 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5899 return e;
5900 }
5901 struct command_context *cmd_ctx = current_command_context(interp);
5902 assert(cmd_ctx);
5903 struct target *target = get_current_target(cmd_ctx);
5904 target_handle_event(target, n->value);
5905 return JIM_OK;
5906 }
5907
5908 static const struct command_registration target_instance_command_handlers[] = {
5909 {
5910 .name = "configure",
5911 .mode = COMMAND_ANY,
5912 .jim_handler = jim_target_configure,
5913 .help = "configure a new target for use",
5914 .usage = "[target_attribute ...]",
5915 },
5916 {
5917 .name = "cget",
5918 .mode = COMMAND_ANY,
5919 .jim_handler = jim_target_configure,
5920 .help = "returns the specified target attribute",
5921 .usage = "target_attribute",
5922 },
5923 {
5924 .name = "mwd",
5925 .handler = handle_mw_command,
5926 .mode = COMMAND_EXEC,
5927 .help = "Write 64-bit word(s) to target memory",
5928 .usage = "address data [count]",
5929 },
5930 {
5931 .name = "mww",
5932 .handler = handle_mw_command,
5933 .mode = COMMAND_EXEC,
5934 .help = "Write 32-bit word(s) to target memory",
5935 .usage = "address data [count]",
5936 },
5937 {
5938 .name = "mwh",
5939 .handler = handle_mw_command,
5940 .mode = COMMAND_EXEC,
5941 .help = "Write 16-bit half-word(s) to target memory",
5942 .usage = "address data [count]",
5943 },
5944 {
5945 .name = "mwb",
5946 .handler = handle_mw_command,
5947 .mode = COMMAND_EXEC,
5948 .help = "Write byte(s) to target memory",
5949 .usage = "address data [count]",
5950 },
5951 {
5952 .name = "mdd",
5953 .handler = handle_md_command,
5954 .mode = COMMAND_EXEC,
5955 .help = "Display target memory as 64-bit words",
5956 .usage = "address [count]",
5957 },
5958 {
5959 .name = "mdw",
5960 .handler = handle_md_command,
5961 .mode = COMMAND_EXEC,
5962 .help = "Display target memory as 32-bit words",
5963 .usage = "address [count]",
5964 },
5965 {
5966 .name = "mdh",
5967 .handler = handle_md_command,
5968 .mode = COMMAND_EXEC,
5969 .help = "Display target memory as 16-bit half-words",
5970 .usage = "address [count]",
5971 },
5972 {
5973 .name = "mdb",
5974 .handler = handle_md_command,
5975 .mode = COMMAND_EXEC,
5976 .help = "Display target memory as 8-bit bytes",
5977 .usage = "address [count]",
5978 },
5979 {
5980 .name = "array2mem",
5981 .mode = COMMAND_EXEC,
5982 .jim_handler = jim_target_array2mem,
5983 .help = "Writes Tcl array of 8/16/32 bit numbers "
5984 "to target memory",
5985 .usage = "arrayname bitwidth address count",
5986 },
5987 {
5988 .name = "mem2array",
5989 .mode = COMMAND_EXEC,
5990 .jim_handler = jim_target_mem2array,
5991 .help = "Loads Tcl array of 8/16/32 bit numbers "
5992 "from target memory",
5993 .usage = "arrayname bitwidth address count",
5994 },
5995 {
5996 .name = "get_reg",
5997 .mode = COMMAND_EXEC,
5998 .jim_handler = target_jim_get_reg,
5999 .help = "Get register values from the target",
6000 .usage = "list",
6001 },
6002 {
6003 .name = "set_reg",
6004 .mode = COMMAND_EXEC,
6005 .jim_handler = target_jim_set_reg,
6006 .help = "Set target register values",
6007 .usage = "dict",
6008 },
6009 {
6010 .name = "read_memory",
6011 .mode = COMMAND_EXEC,
6012 .handler = handle_target_read_memory,
6013 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
6014 .usage = "address width count ['phys']",
6015 },
6016 {
6017 .name = "write_memory",
6018 .mode = COMMAND_EXEC,
6019 .jim_handler = target_jim_write_memory,
6020 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
6021 .usage = "address width data ['phys']",
6022 },
6023 {
6024 .name = "eventlist",
6025 .handler = handle_target_event_list,
6026 .mode = COMMAND_EXEC,
6027 .help = "displays a table of events defined for this target",
6028 .usage = "",
6029 },
6030 {
6031 .name = "curstate",
6032 .mode = COMMAND_EXEC,
6033 .handler = handle_target_current_state,
6034 .help = "displays the current state of this target",
6035 .usage = "",
6036 },
6037 {
6038 .name = "debug_reason",
6039 .mode = COMMAND_EXEC,
6040 .handler = handle_target_debug_reason,
6041 .help = "displays the debug reason of this target",
6042 .usage = "",
6043 },
6044 {
6045 .name = "arp_examine",
6046 .mode = COMMAND_EXEC,
6047 .handler = handle_target_examine,
6048 .help = "used internally for reset processing",
6049 .usage = "['allow-defer']",
6050 },
6051 {
6052 .name = "was_examined",
6053 .mode = COMMAND_EXEC,
6054 .handler = handle_target_was_examined,
6055 .help = "used internally for reset processing",
6056 .usage = "",
6057 },
6058 {
6059 .name = "examine_deferred",
6060 .mode = COMMAND_EXEC,
6061 .handler = handle_target_examine_deferred,
6062 .help = "used internally for reset processing",
6063 .usage = "",
6064 },
6065 {
6066 .name = "arp_halt_gdb",
6067 .mode = COMMAND_EXEC,
6068 .handler = handle_target_halt_gdb,
6069 .help = "used internally for reset processing to halt GDB",
6070 .usage = "",
6071 },
6072 {
6073 .name = "arp_poll",
6074 .mode = COMMAND_EXEC,
6075 .handler = handle_target_poll,
6076 .help = "used internally for reset processing",
6077 .usage = "",
6078 },
6079 {
6080 .name = "arp_reset",
6081 .mode = COMMAND_EXEC,
6082 .handler = handle_target_reset,
6083 .help = "used internally for reset processing",
6084 .usage = "'assert'|'deassert' halt",
6085 },
6086 {
6087 .name = "arp_halt",
6088 .mode = COMMAND_EXEC,
6089 .handler = handle_target_halt,
6090 .help = "used internally for reset processing",
6091 .usage = "",
6092 },
6093 {
6094 .name = "arp_waitstate",
6095 .mode = COMMAND_EXEC,
6096 .handler = handle_target_wait_state,
6097 .help = "used internally for reset processing",
6098 .usage = "statename timeoutmsecs",
6099 },
6100 {
6101 .name = "invoke-event",
6102 .mode = COMMAND_EXEC,
6103 .jim_handler = jim_target_invoke_event,
6104 .help = "invoke handler for specified event",
6105 .usage = "event_name",
6106 },
6107 COMMAND_REGISTRATION_DONE
6108 };
6109
6110 static int target_create(struct jim_getopt_info *goi)
6111 {
6112 Jim_Obj *new_cmd;
6113 Jim_Cmd *cmd;
6114 const char *cp;
6115 int e;
6116 int x;
6117 struct target *target;
6118 struct command_context *cmd_ctx;
6119
6120 cmd_ctx = current_command_context(goi->interp);
6121 assert(cmd_ctx);
6122
6123 if (goi->argc < 3) {
6124 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
6125 return JIM_ERR;
6126 }
6127
6128 /* COMMAND */
6129 jim_getopt_obj(goi, &new_cmd);
6130 /* does this command exist? */
6131 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_NONE);
6132 if (cmd) {
6133 cp = Jim_GetString(new_cmd, NULL);
6134 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
6135 return JIM_ERR;
6136 }
6137
6138 /* TYPE */
6139 e = jim_getopt_string(goi, &cp, NULL);
6140 if (e != JIM_OK)
6141 return e;
6142 struct transport *tr = get_current_transport();
6143 if (tr && tr->override_target) {
6144 e = tr->override_target(&cp);
6145 if (e != ERROR_OK) {
6146 LOG_ERROR("The selected transport doesn't support this target");
6147 return JIM_ERR;
6148 }
6149 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
6150 }
6151 /* now does target type exist */
6152 for (x = 0 ; target_types[x] ; x++) {
6153 if (strcmp(cp, target_types[x]->name) == 0) {
6154 /* found */
6155 break;
6156 }
6157 }
6158 if (!target_types[x]) {
6159 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
6160 for (x = 0 ; target_types[x] ; x++) {
6161 if (target_types[x + 1]) {
6162 Jim_AppendStrings(goi->interp,
6163 Jim_GetResult(goi->interp),
6164 target_types[x]->name,
6165 ", ", NULL);
6166 } else {
6167 Jim_AppendStrings(goi->interp,
6168 Jim_GetResult(goi->interp),
6169 " or ",
6170 target_types[x]->name, NULL);
6171 }
6172 }
6173 return JIM_ERR;
6174 }
6175
6176 /* Create it */
6177 target = calloc(1, sizeof(struct target));
6178 if (!target) {
6179 LOG_ERROR("Out of memory");
6180 return JIM_ERR;
6181 }
6182
6183 /* set empty smp cluster */
6184 target->smp_targets = &empty_smp_targets;
6185
6186 /* allocate memory for each unique target type */
6187 target->type = malloc(sizeof(struct target_type));
6188 if (!target->type) {
6189 LOG_ERROR("Out of memory");
6190 free(target);
6191 return JIM_ERR;
6192 }
6193
6194 memcpy(target->type, target_types[x], sizeof(struct target_type));
6195
6196 /* default to first core, override with -coreid */
6197 target->coreid = 0;
6198
6199 target->working_area = 0x0;
6200 target->working_area_size = 0x0;
6201 target->working_areas = NULL;
6202 target->backup_working_area = false;
6203
6204 target->state = TARGET_UNKNOWN;
6205 target->debug_reason = DBG_REASON_UNDEFINED;
6206 target->reg_cache = NULL;
6207 target->breakpoints = NULL;
6208 target->watchpoints = NULL;
6209 target->next = NULL;
6210 target->arch_info = NULL;
6211
6212 target->verbose_halt_msg = true;
6213
6214 target->halt_issued = false;
6215
6216 /* initialize trace information */
6217 target->trace_info = calloc(1, sizeof(struct trace));
6218 if (!target->trace_info) {
6219 LOG_ERROR("Out of memory");
6220 free(target->type);
6221 free(target);
6222 return JIM_ERR;
6223 }
6224
6225 target->dbgmsg = NULL;
6226 target->dbg_msg_enabled = 0;
6227
6228 target->endianness = TARGET_ENDIAN_UNKNOWN;
6229
6230 target->rtos = NULL;
6231 target->rtos_auto_detect = false;
6232
6233 target->gdb_port_override = NULL;
6234 target->gdb_max_connections = 1;
6235
6236 /* Do the rest as "configure" options */
6237 goi->isconfigure = 1;
6238 e = target_configure(goi, target);
6239
6240 if (e == JIM_OK) {
6241 if (target->has_dap) {
6242 if (!target->dap_configured) {
6243 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
6244 e = JIM_ERR;
6245 }
6246 } else {
6247 if (!target->tap_configured) {
6248 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
6249 e = JIM_ERR;
6250 }
6251 }
6252 /* tap must be set after target was configured */
6253 if (!target->tap)
6254 e = JIM_ERR;
6255 }
6256
6257 if (e != JIM_OK) {
6258 rtos_destroy(target);
6259 free(target->gdb_port_override);
6260 free(target->trace_info);
6261 free(target->type);
6262 free(target);
6263 return e;
6264 }
6265
6266 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
6267 /* default endian to little if not specified */
6268 target->endianness = TARGET_LITTLE_ENDIAN;
6269 }
6270
6271 cp = Jim_GetString(new_cmd, NULL);
6272 target->cmd_name = strdup(cp);
6273 if (!target->cmd_name) {
6274 LOG_ERROR("Out of memory");
6275 rtos_destroy(target);
6276 free(target->gdb_port_override);
6277 free(target->trace_info);
6278 free(target->type);
6279 free(target);
6280 return JIM_ERR;
6281 }
6282
6283 if (target->type->target_create) {
6284 e = (*(target->type->target_create))(target, goi->interp);
6285 if (e != ERROR_OK) {
6286 LOG_DEBUG("target_create failed");
6287 free(target->cmd_name);
6288 rtos_destroy(target);
6289 free(target->gdb_port_override);
6290 free(target->trace_info);
6291 free(target->type);
6292 free(target);
6293 return JIM_ERR;
6294 }
6295 }
6296
6297 /* create the target specific commands */
6298 if (target->type->commands) {
6299 e = register_commands(cmd_ctx, NULL, target->type->commands);
6300 if (e != ERROR_OK)
6301 LOG_ERROR("unable to register '%s' commands", cp);
6302 }
6303
6304 /* now - create the new target name command */
6305 const struct command_registration target_subcommands[] = {
6306 {
6307 .chain = target_instance_command_handlers,
6308 },
6309 {
6310 .chain = target->type->commands,
6311 },
6312 COMMAND_REGISTRATION_DONE
6313 };
6314 const struct command_registration target_commands[] = {
6315 {
6316 .name = cp,
6317 .mode = COMMAND_ANY,
6318 .help = "target command group",
6319 .usage = "",
6320 .chain = target_subcommands,
6321 },
6322 COMMAND_REGISTRATION_DONE
6323 };
6324 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
6325 if (e != ERROR_OK) {
6326 if (target->type->deinit_target)
6327 target->type->deinit_target(target);
6328 free(target->cmd_name);
6329 rtos_destroy(target);
6330 free(target->gdb_port_override);
6331 free(target->trace_info);
6332 free(target->type);
6333 free(target);
6334 return JIM_ERR;
6335 }
6336
6337 /* append to end of list */
6338 append_to_list_all_targets(target);
6339
6340 cmd_ctx->current_target = target;
6341 return JIM_OK;
6342 }
6343
6344 COMMAND_HANDLER(handle_target_current)
6345 {
6346 if (CMD_ARGC != 0)
6347 return ERROR_COMMAND_SYNTAX_ERROR;
6348
6349 struct target *target = get_current_target_or_null(CMD_CTX);
6350 if (target)
6351 command_print(CMD, "%s", target_name(target));
6352
6353 return ERROR_OK;
6354 }
6355
6356 COMMAND_HANDLER(handle_target_types)
6357 {
6358 if (CMD_ARGC != 0)
6359 return ERROR_COMMAND_SYNTAX_ERROR;
6360
6361 for (unsigned int x = 0; target_types[x]; x++)
6362 command_print(CMD, "%s", target_types[x]->name);
6363
6364 return ERROR_OK;
6365 }
6366
6367 COMMAND_HANDLER(handle_target_names)
6368 {
6369 if (CMD_ARGC != 0)
6370 return ERROR_COMMAND_SYNTAX_ERROR;
6371
6372 struct target *target = all_targets;
6373 while (target) {
6374 command_print(CMD, "%s", target_name(target));
6375 target = target->next;
6376 }
6377
6378 return ERROR_OK;
6379 }
6380
6381 static struct target_list *
6382 __attribute__((warn_unused_result))
6383 create_target_list_node(const char *targetname)
6384 {
6385 struct target *target = get_target(targetname);
6386 LOG_DEBUG("%s ", targetname);
6387 if (!target)
6388 return NULL;
6389
6390 struct target_list *new = malloc(sizeof(struct target_list));
6391 if (!new) {
6392 LOG_ERROR("Out of memory");
6393 return new;
6394 }
6395
6396 new->target = target;
6397 return new;
6398 }
6399
6400 static int get_target_with_common_rtos_type(struct command_invocation *cmd,
6401 struct list_head *lh, struct target **result)
6402 {
6403 struct target *target = NULL;
6404 struct target_list *curr;
6405 foreach_smp_target(curr, lh) {
6406 struct rtos *curr_rtos = curr->target->rtos;
6407 if (curr_rtos) {
6408 if (target && target->rtos && target->rtos->type != curr_rtos->type) {
6409 command_print(cmd, "Different rtos types in members of one smp target!");
6410 return ERROR_FAIL;
6411 }
6412 target = curr->target;
6413 }
6414 }
6415 *result = target;
6416 return ERROR_OK;
6417 }
6418
6419 COMMAND_HANDLER(handle_target_smp)
6420 {
6421 static int smp_group = 1;
6422
6423 if (CMD_ARGC == 0) {
6424 LOG_DEBUG("Empty SMP target");
6425 return ERROR_OK;
6426 }
6427 LOG_DEBUG("%d", CMD_ARGC);
6428 /* CMD_ARGC[0] = target to associate in smp
6429 * CMD_ARGC[1] = target to associate in smp
6430 * CMD_ARGC[2] ...
6431 */
6432
6433 struct list_head *lh = malloc(sizeof(*lh));
6434 if (!lh) {
6435 LOG_ERROR("Out of memory");
6436 return ERROR_FAIL;
6437 }
6438 INIT_LIST_HEAD(lh);
6439
6440 for (unsigned int i = 0; i < CMD_ARGC; i++) {
6441 struct target_list *new = create_target_list_node(CMD_ARGV[i]);
6442 if (new)
6443 list_add_tail(&new->lh, lh);
6444 }
6445 /* now parse the list of cpu and put the target in smp mode*/
6446 struct target_list *curr;
6447 foreach_smp_target(curr, lh) {
6448 struct target *target = curr->target;
6449 target->smp = smp_group;
6450 target->smp_targets = lh;
6451 }
6452 smp_group++;
6453
6454 struct target *rtos_target;
6455 int retval = get_target_with_common_rtos_type(CMD, lh, &rtos_target);
6456 if (retval == ERROR_OK && rtos_target)
6457 retval = rtos_smp_init(rtos_target);
6458
6459 return retval;
6460 }
6461
6462 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6463 {
6464 struct jim_getopt_info goi;
6465 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6466 if (goi.argc < 3) {
6467 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6468 "<name> <target_type> [<target_options> ...]");
6469 return JIM_ERR;
6470 }
6471 return target_create(&goi);
6472 }
6473
6474 static const struct command_registration target_subcommand_handlers[] = {
6475 {
6476 .name = "init",
6477 .mode = COMMAND_CONFIG,
6478 .handler = handle_target_init_command,
6479 .help = "initialize targets",
6480 .usage = "",
6481 },
6482 {
6483 .name = "create",
6484 .mode = COMMAND_CONFIG,
6485 .jim_handler = jim_target_create,
6486 .usage = "name type '-chain-position' name [options ...]",
6487 .help = "Creates and selects a new target",
6488 },
6489 {
6490 .name = "current",
6491 .mode = COMMAND_ANY,
6492 .handler = handle_target_current,
6493 .help = "Returns the currently selected target",
6494 .usage = "",
6495 },
6496 {
6497 .name = "types",
6498 .mode = COMMAND_ANY,
6499 .handler = handle_target_types,
6500 .help = "Returns the available target types as "
6501 "a list of strings",
6502 .usage = "",
6503 },
6504 {
6505 .name = "names",
6506 .mode = COMMAND_ANY,
6507 .handler = handle_target_names,
6508 .help = "Returns the names of all targets as a list of strings",
6509 .usage = "",
6510 },
6511 {
6512 .name = "smp",
6513 .mode = COMMAND_ANY,
6514 .handler = handle_target_smp,
6515 .usage = "targetname1 targetname2 ...",
6516 .help = "gather several target in a smp list"
6517 },
6518
6519 COMMAND_REGISTRATION_DONE
6520 };
6521
6522 struct fast_load {
6523 target_addr_t address;
6524 uint8_t *data;
6525 int length;
6526
6527 };
6528
6529 static int fastload_num;
6530 static struct fast_load *fastload;
6531
6532 static void free_fastload(void)
6533 {
6534 if (fastload) {
6535 for (int i = 0; i < fastload_num; i++)
6536 free(fastload[i].data);
6537 free(fastload);
6538 fastload = NULL;
6539 }
6540 }
6541
6542 COMMAND_HANDLER(handle_fast_load_image_command)
6543 {
6544 uint8_t *buffer;
6545 size_t buf_cnt;
6546 uint32_t image_size;
6547 target_addr_t min_address = 0;
6548 target_addr_t max_address = -1;
6549
6550 struct image image;
6551
6552 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6553 &image, &min_address, &max_address);
6554 if (retval != ERROR_OK)
6555 return retval;
6556
6557 struct duration bench;
6558 duration_start(&bench);
6559
6560 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6561 if (retval != ERROR_OK)
6562 return retval;
6563
6564 image_size = 0x0;
6565 retval = ERROR_OK;
6566 fastload_num = image.num_sections;
6567 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6568 if (!fastload) {
6569 command_print(CMD, "out of memory");
6570 image_close(&image);
6571 return ERROR_FAIL;
6572 }
6573 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6574 for (unsigned int i = 0; i < image.num_sections; i++) {
6575 buffer = malloc(image.sections[i].size);
6576 if (!buffer) {
6577 command_print(CMD, "error allocating buffer for section (%d bytes)",
6578 (int)(image.sections[i].size));
6579 retval = ERROR_FAIL;
6580 break;
6581 }
6582
6583 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6584 if (retval != ERROR_OK) {
6585 free(buffer);
6586 break;
6587 }
6588
6589 uint32_t offset = 0;
6590 uint32_t length = buf_cnt;
6591
6592 /* DANGER!!! beware of unsigned comparison here!!! */
6593
6594 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6595 (image.sections[i].base_address < max_address)) {
6596 if (image.sections[i].base_address < min_address) {
6597 /* clip addresses below */
6598 offset += min_address-image.sections[i].base_address;
6599 length -= offset;
6600 }
6601
6602 if (image.sections[i].base_address + buf_cnt > max_address)
6603 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6604
6605 fastload[i].address = image.sections[i].base_address + offset;
6606 fastload[i].data = malloc(length);
6607 if (!fastload[i].data) {
6608 free(buffer);
6609 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6610 length);
6611 retval = ERROR_FAIL;
6612 break;
6613 }
6614 memcpy(fastload[i].data, buffer + offset, length);
6615 fastload[i].length = length;
6616
6617 image_size += length;
6618 command_print(CMD, "%u bytes written at address 0x%8.8x",
6619 (unsigned int)length,
6620 ((unsigned int)(image.sections[i].base_address + offset)));
6621 }
6622
6623 free(buffer);
6624 }
6625
6626 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6627 command_print(CMD, "Loaded %" PRIu32 " bytes "
6628 "in %fs (%0.3f KiB/s)", image_size,
6629 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6630
6631 command_print(CMD,
6632 "WARNING: image has not been loaded to target!"
6633 "You can issue a 'fast_load' to finish loading.");
6634 }
6635
6636 image_close(&image);
6637
6638 if (retval != ERROR_OK)
6639 free_fastload();
6640
6641 return retval;
6642 }
6643
6644 COMMAND_HANDLER(handle_fast_load_command)
6645 {
6646 if (CMD_ARGC > 0)
6647 return ERROR_COMMAND_SYNTAX_ERROR;
6648 if (!fastload) {
6649 LOG_ERROR("No image in memory");
6650 return ERROR_FAIL;
6651 }
6652 int i;
6653 int64_t ms = timeval_ms();
6654 int size = 0;
6655 int retval = ERROR_OK;
6656 for (i = 0; i < fastload_num; i++) {
6657 struct target *target = get_current_target(CMD_CTX);
6658 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6659 (unsigned int)(fastload[i].address),
6660 (unsigned int)(fastload[i].length));
6661 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6662 if (retval != ERROR_OK)
6663 break;
6664 size += fastload[i].length;
6665 }
6666 if (retval == ERROR_OK) {
6667 int64_t after = timeval_ms();
6668 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6669 }
6670 return retval;
6671 }
6672
6673 static const struct command_registration target_command_handlers[] = {
6674 {
6675 .name = "targets",
6676 .handler = handle_targets_command,
6677 .mode = COMMAND_ANY,
6678 .help = "change current default target (one parameter) "
6679 "or prints table of all targets (no parameters)",
6680 .usage = "[target]",
6681 },
6682 {
6683 .name = "target",
6684 .mode = COMMAND_CONFIG,
6685 .help = "configure target",
6686 .chain = target_subcommand_handlers,
6687 .usage = "",
6688 },
6689 COMMAND_REGISTRATION_DONE
6690 };
6691
6692 int target_register_commands(struct command_context *cmd_ctx)
6693 {
6694 return register_commands(cmd_ctx, NULL, target_command_handlers);
6695 }
6696
6697 static bool target_reset_nag = true;
6698
6699 bool get_target_reset_nag(void)
6700 {
6701 return target_reset_nag;
6702 }
6703
6704 COMMAND_HANDLER(handle_target_reset_nag)
6705 {
6706 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6707 &target_reset_nag, "Nag after each reset about options to improve "
6708 "performance");
6709 }
6710
6711 COMMAND_HANDLER(handle_ps_command)
6712 {
6713 struct target *target = get_current_target(CMD_CTX);
6714 char *display;
6715 if (target->state != TARGET_HALTED) {
6716 command_print(CMD, "Error: [%s] not halted", target_name(target));
6717 return ERROR_TARGET_NOT_HALTED;
6718 }
6719
6720 if ((target->rtos) && (target->rtos->type)
6721 && (target->rtos->type->ps_command)) {
6722 display = target->rtos->type->ps_command(target);
6723 command_print(CMD, "%s", display);
6724 free(display);
6725 return ERROR_OK;
6726 } else {
6727 LOG_INFO("failed");
6728 return ERROR_TARGET_FAILURE;
6729 }
6730 }
6731
6732 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6733 {
6734 if (text)
6735 command_print_sameline(cmd, "%s", text);
6736 for (int i = 0; i < size; i++)
6737 command_print_sameline(cmd, " %02x", buf[i]);
6738 command_print(cmd, " ");
6739 }
6740
6741 COMMAND_HANDLER(handle_test_mem_access_command)
6742 {
6743 struct target *target = get_current_target(CMD_CTX);
6744 uint32_t test_size;
6745 int retval = ERROR_OK;
6746
6747 if (target->state != TARGET_HALTED) {
6748 command_print(CMD, "Error: [%s] not halted", target_name(target));
6749 return ERROR_TARGET_NOT_HALTED;
6750 }
6751
6752 if (CMD_ARGC != 1)
6753 return ERROR_COMMAND_SYNTAX_ERROR;
6754
6755 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6756
6757 /* Test reads */
6758 size_t num_bytes = test_size + 4;
6759
6760 struct working_area *wa = NULL;
6761 retval = target_alloc_working_area(target, num_bytes, &wa);
6762 if (retval != ERROR_OK) {
6763 LOG_ERROR("Not enough working area");
6764 return ERROR_FAIL;
6765 }
6766
6767 uint8_t *test_pattern = malloc(num_bytes);
6768
6769 for (size_t i = 0; i < num_bytes; i++)
6770 test_pattern[i] = rand();
6771
6772 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6773 if (retval != ERROR_OK) {
6774 LOG_ERROR("Test pattern write failed");
6775 goto out;
6776 }
6777
6778 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6779 for (int size = 1; size <= 4; size *= 2) {
6780 for (int offset = 0; offset < 4; offset++) {
6781 uint32_t count = test_size / size;
6782 size_t host_bufsiz = (count + 2) * size + host_offset;
6783 uint8_t *read_ref = malloc(host_bufsiz);
6784 uint8_t *read_buf = malloc(host_bufsiz);
6785
6786 for (size_t i = 0; i < host_bufsiz; i++) {
6787 read_ref[i] = rand();
6788 read_buf[i] = read_ref[i];
6789 }
6790 command_print_sameline(CMD,
6791 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6792 size, offset, host_offset ? "un" : "");
6793
6794 struct duration bench;
6795 duration_start(&bench);
6796
6797 retval = target_read_memory(target, wa->address + offset, size, count,
6798 read_buf + size + host_offset);
6799
6800 duration_measure(&bench);
6801
6802 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6803 command_print(CMD, "Unsupported alignment");
6804 goto next;
6805 } else if (retval != ERROR_OK) {
6806 command_print(CMD, "Memory read failed");
6807 goto next;
6808 }
6809
6810 /* replay on host */
6811 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6812
6813 /* check result */
6814 int result = memcmp(read_ref, read_buf, host_bufsiz);
6815 if (result == 0) {
6816 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6817 duration_elapsed(&bench),
6818 duration_kbps(&bench, count * size));
6819 } else {
6820 command_print(CMD, "Compare failed");
6821 binprint(CMD, "ref:", read_ref, host_bufsiz);
6822 binprint(CMD, "buf:", read_buf, host_bufsiz);
6823 }
6824 next:
6825 free(read_ref);
6826 free(read_buf);
6827 }
6828 }
6829 }
6830
6831 out:
6832 free(test_pattern);
6833
6834 target_free_working_area(target, wa);
6835
6836 /* Test writes */
6837 num_bytes = test_size + 4 + 4 + 4;
6838
6839 retval = target_alloc_working_area(target, num_bytes, &wa);
6840 if (retval != ERROR_OK) {
6841 LOG_ERROR("Not enough working area");
6842 return ERROR_FAIL;
6843 }
6844
6845 test_pattern = malloc(num_bytes);
6846
6847 for (size_t i = 0; i < num_bytes; i++)
6848 test_pattern[i] = rand();
6849
6850 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6851 for (int size = 1; size <= 4; size *= 2) {
6852 for (int offset = 0; offset < 4; offset++) {
6853 uint32_t count = test_size / size;
6854 size_t host_bufsiz = count * size + host_offset;
6855 uint8_t *read_ref = malloc(num_bytes);
6856 uint8_t *read_buf = malloc(num_bytes);
6857 uint8_t *write_buf = malloc(host_bufsiz);
6858
6859 for (size_t i = 0; i < host_bufsiz; i++)
6860 write_buf[i] = rand();
6861 command_print_sameline(CMD,
6862 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6863 size, offset, host_offset ? "un" : "");
6864
6865 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6866 if (retval != ERROR_OK) {
6867 command_print(CMD, "Test pattern write failed");
6868 goto nextw;
6869 }
6870
6871 /* replay on host */
6872 memcpy(read_ref, test_pattern, num_bytes);
6873 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6874
6875 struct duration bench;
6876 duration_start(&bench);
6877
6878 retval = target_write_memory(target, wa->address + size + offset, size, count,
6879 write_buf + host_offset);
6880
6881 duration_measure(&bench);
6882
6883 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6884 command_print(CMD, "Unsupported alignment");
6885 goto nextw;
6886 } else if (retval != ERROR_OK) {
6887 command_print(CMD, "Memory write failed");
6888 goto nextw;
6889 }
6890
6891 /* read back */
6892 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6893 if (retval != ERROR_OK) {
6894 command_print(CMD, "Test pattern write failed");
6895 goto nextw;
6896 }
6897
6898 /* check result */
6899 int result = memcmp(read_ref, read_buf, num_bytes);
6900 if (result == 0) {
6901 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6902 duration_elapsed(&bench),
6903 duration_kbps(&bench, count * size));
6904 } else {
6905 command_print(CMD, "Compare failed");
6906 binprint(CMD, "ref:", read_ref, num_bytes);
6907 binprint(CMD, "buf:", read_buf, num_bytes);
6908 }
6909 nextw:
6910 free(read_ref);
6911 free(read_buf);
6912 }
6913 }
6914 }
6915
6916 free(test_pattern);
6917
6918 target_free_working_area(target, wa);
6919 return retval;
6920 }
6921
6922 static const struct command_registration target_exec_command_handlers[] = {
6923 {
6924 .name = "fast_load_image",
6925 .handler = handle_fast_load_image_command,
6926 .mode = COMMAND_ANY,
6927 .help = "Load image into server memory for later use by "
6928 "fast_load; primarily for profiling",
6929 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6930 "[min_address [max_length]]",
6931 },
6932 {
6933 .name = "fast_load",
6934 .handler = handle_fast_load_command,
6935 .mode = COMMAND_EXEC,
6936 .help = "loads active fast load image to current target "
6937 "- mainly for profiling purposes",
6938 .usage = "",
6939 },
6940 {
6941 .name = "profile",
6942 .handler = handle_profile_command,
6943 .mode = COMMAND_EXEC,
6944 .usage = "seconds filename [start end]",
6945 .help = "profiling samples the CPU PC",
6946 },
6947 /** @todo don't register virt2phys() unless target supports it */
6948 {
6949 .name = "virt2phys",
6950 .handler = handle_virt2phys_command,
6951 .mode = COMMAND_ANY,
6952 .help = "translate a virtual address into a physical address",
6953 .usage = "virtual_address",
6954 },
6955 {
6956 .name = "reg",
6957 .handler = handle_reg_command,
6958 .mode = COMMAND_EXEC,
6959 .help = "display (reread from target with \"force\") or set a register; "
6960 "with no arguments, displays all registers and their values",
6961 .usage = "[(register_number|register_name) [(value|'force')]]",
6962 },
6963 {
6964 .name = "poll",
6965 .handler = handle_poll_command,
6966 .mode = COMMAND_EXEC,
6967 .help = "poll target state; or reconfigure background polling",
6968 .usage = "['on'|'off']",
6969 },
6970 {
6971 .name = "wait_halt",
6972 .handler = handle_wait_halt_command,
6973 .mode = COMMAND_EXEC,
6974 .help = "wait up to the specified number of milliseconds "
6975 "(default 5000) for a previously requested halt",
6976 .usage = "[milliseconds]",
6977 },
6978 {
6979 .name = "halt",
6980 .handler = handle_halt_command,
6981 .mode = COMMAND_EXEC,
6982 .help = "request target to halt, then wait up to the specified "
6983 "number of milliseconds (default 5000) for it to complete",
6984 .usage = "[milliseconds]",
6985 },
6986 {
6987 .name = "resume",
6988 .handler = handle_resume_command,
6989 .mode = COMMAND_EXEC,
6990 .help = "resume target execution from current PC or address",
6991 .usage = "[address]",
6992 },
6993 {
6994 .name = "reset",
6995 .handler = handle_reset_command,
6996 .mode = COMMAND_EXEC,
6997 .usage = "[run|halt|init]",
6998 .help = "Reset all targets into the specified mode. "
6999 "Default reset mode is run, if not given.",
7000 },
7001 {
7002 .name = "soft_reset_halt",
7003 .handler = handle_soft_reset_halt_command,
7004 .mode = COMMAND_EXEC,
7005 .usage = "",
7006 .help = "halt the target and do a soft reset",
7007 },
7008 {
7009 .name = "step",
7010 .handler = handle_step_command,
7011 .mode = COMMAND_EXEC,
7012 .help = "step one instruction from current PC or address",
7013 .usage = "[address]",
7014 },
7015 {
7016 .name = "mdd",
7017 .handler = handle_md_command,
7018 .mode = COMMAND_EXEC,
7019 .help = "display memory double-words",
7020 .usage = "['phys'] address [count]",
7021 },
7022 {
7023 .name = "mdw",
7024 .handler = handle_md_command,
7025 .mode = COMMAND_EXEC,
7026 .help = "display memory words",
7027 .usage = "['phys'] address [count]",
7028 },
7029 {
7030 .name = "mdh",
7031 .handler = handle_md_command,
7032 .mode = COMMAND_EXEC,
7033 .help = "display memory half-words",
7034 .usage = "['phys'] address [count]",
7035 },
7036 {
7037 .name = "mdb",
7038 .handler = handle_md_command,
7039 .mode = COMMAND_EXEC,
7040 .help = "display memory bytes",
7041 .usage = "['phys'] address [count]",
7042 },
7043 {
7044 .name = "mwd",
7045 .handler = handle_mw_command,
7046 .mode = COMMAND_EXEC,
7047 .help = "write memory double-word",
7048 .usage = "['phys'] address value [count]",
7049 },
7050 {
7051 .name = "mww",
7052 .handler = handle_mw_command,
7053 .mode = COMMAND_EXEC,
7054 .help = "write memory word",
7055 .usage = "['phys'] address value [count]",
7056 },
7057 {
7058 .name = "mwh",
7059 .handler = handle_mw_command,
7060 .mode = COMMAND_EXEC,
7061 .help = "write memory half-word",
7062 .usage = "['phys'] address value [count]",
7063 },
7064 {
7065 .name = "mwb",
7066 .handler = handle_mw_command,
7067 .mode = COMMAND_EXEC,
7068 .help = "write memory byte",
7069 .usage = "['phys'] address value [count]",
7070 },
7071 {
7072 .name = "bp",
7073 .handler = handle_bp_command,
7074 .mode = COMMAND_EXEC,
7075 .help = "list or set hardware or software breakpoint",
7076 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
7077 },
7078 {
7079 .name = "rbp",
7080 .handler = handle_rbp_command,
7081 .mode = COMMAND_EXEC,
7082 .help = "remove breakpoint",
7083 .usage = "'all' | address",
7084 },
7085 {
7086 .name = "wp",
7087 .handler = handle_wp_command,
7088 .mode = COMMAND_EXEC,
7089 .help = "list (no params) or create watchpoints",
7090 .usage = "[address length [('r'|'w'|'a') [value [mask]]]]",
7091 },
7092 {
7093 .name = "rwp",
7094 .handler = handle_rwp_command,
7095 .mode = COMMAND_EXEC,
7096 .help = "remove watchpoint",
7097 .usage = "'all' | address",
7098 },
7099 {
7100 .name = "load_image",
7101 .handler = handle_load_image_command,
7102 .mode = COMMAND_EXEC,
7103 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
7104 "[min_address] [max_length]",
7105 },
7106 {
7107 .name = "dump_image",
7108 .handler = handle_dump_image_command,
7109 .mode = COMMAND_EXEC,
7110 .usage = "filename address size",
7111 },
7112 {
7113 .name = "verify_image_checksum",
7114 .handler = handle_verify_image_checksum_command,
7115 .mode = COMMAND_EXEC,
7116 .usage = "filename [offset [type]]",
7117 },
7118 {
7119 .name = "verify_image",
7120 .handler = handle_verify_image_command,
7121 .mode = COMMAND_EXEC,
7122 .usage = "filename [offset [type]]",
7123 },
7124 {
7125 .name = "test_image",
7126 .handler = handle_test_image_command,
7127 .mode = COMMAND_EXEC,
7128 .usage = "filename [offset [type]]",
7129 },
7130 {
7131 .name = "get_reg",
7132 .mode = COMMAND_EXEC,
7133 .jim_handler = target_jim_get_reg,
7134 .help = "Get register values from the target",
7135 .usage = "list",
7136 },
7137 {
7138 .name = "set_reg",
7139 .mode = COMMAND_EXEC,
7140 .jim_handler = target_jim_set_reg,
7141 .help = "Set target register values",
7142 .usage = "dict",
7143 },
7144 {
7145 .name = "read_memory",
7146 .mode = COMMAND_EXEC,
7147 .handler = handle_target_read_memory,
7148 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
7149 .usage = "address width count ['phys']",
7150 },
7151 {
7152 .name = "write_memory",
7153 .mode = COMMAND_EXEC,
7154 .jim_handler = target_jim_write_memory,
7155 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
7156 .usage = "address width data ['phys']",
7157 },
7158 {
7159 .name = "reset_nag",
7160 .handler = handle_target_reset_nag,
7161 .mode = COMMAND_ANY,
7162 .help = "Nag after each reset about options that could have been "
7163 "enabled to improve performance.",
7164 .usage = "['enable'|'disable']",
7165 },
7166 {
7167 .name = "ps",
7168 .handler = handle_ps_command,
7169 .mode = COMMAND_EXEC,
7170 .help = "list all tasks",
7171 .usage = "",
7172 },
7173 {
7174 .name = "test_mem_access",
7175 .handler = handle_test_mem_access_command,
7176 .mode = COMMAND_EXEC,
7177 .help = "Test the target's memory access functions",
7178 .usage = "size",
7179 },
7180
7181 COMMAND_REGISTRATION_DONE
7182 };
7183 static int target_register_user_commands(struct command_context *cmd_ctx)
7184 {
7185 int retval = ERROR_OK;
7186 retval = target_request_register_commands(cmd_ctx);
7187 if (retval != ERROR_OK)
7188 return retval;
7189
7190 retval = trace_register_commands(cmd_ctx);
7191 if (retval != ERROR_OK)
7192 return retval;
7193
7194
7195 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
7196 }
7197
7198 const char *target_debug_reason_str(enum target_debug_reason reason)
7199 {
7200 switch (reason) {
7201 case DBG_REASON_DBGRQ:
7202 return "DBGRQ";
7203 case DBG_REASON_BREAKPOINT:
7204 return "BREAKPOINT";
7205 case DBG_REASON_WATCHPOINT:
7206 return "WATCHPOINT";
7207 case DBG_REASON_WPTANDBKPT:
7208 return "WPTANDBKPT";
7209 case DBG_REASON_SINGLESTEP:
7210 return "SINGLESTEP";
7211 case DBG_REASON_NOTHALTED:
7212 return "NOTHALTED";
7213 case DBG_REASON_EXIT:
7214 return "EXIT";
7215 case DBG_REASON_EXC_CATCH:
7216 return "EXC_CATCH";
7217 case DBG_REASON_UNDEFINED:
7218 return "UNDEFINED";
7219 default:
7220 return "UNKNOWN!";
7221 }
7222 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)