target: Unify the output of "bp" command
[openocd.git] / src / target / target.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2007-2010 Øyvind Harboe *
8 * oyvind.harboe@zylin.com *
9 * *
10 * Copyright (C) 2008, Duane Ellis *
11 * openocd@duaneeellis.com *
12 * *
13 * Copyright (C) 2008 by Spencer Oliver *
14 * spen@spen-soft.co.uk *
15 * *
16 * Copyright (C) 2008 by Rick Altherr *
17 * kc8apf@kc8apf.net> *
18 * *
19 * Copyright (C) 2011 by Broadcom Corporation *
20 * Evan Hunter - ehunter@broadcom.com *
21 * *
22 * Copyright (C) ST-Ericsson SA 2011 *
23 * michel.jaouen@stericsson.com : smp minimum support *
24 * *
25 * Copyright (C) 2011 Andreas Fritiofson *
26 * andreas.fritiofson@gmail.com *
27 ***************************************************************************/
28
29 #ifdef HAVE_CONFIG_H
30 #include "config.h"
31 #endif
32
33 #include <helper/align.h>
34 #include <helper/nvp.h>
35 #include <helper/time_support.h>
36 #include <jtag/jtag.h>
37 #include <flash/nor/core.h>
38
39 #include "target.h"
40 #include "target_type.h"
41 #include "target_request.h"
42 #include "breakpoints.h"
43 #include "register.h"
44 #include "trace.h"
45 #include "image.h"
46 #include "rtos/rtos.h"
47 #include "transport/transport.h"
48 #include "arm_cti.h"
49 #include "smp.h"
50 #include "semihosting_common.h"
51
52 /* default halt wait timeout (ms) */
53 #define DEFAULT_HALT_TIMEOUT 5000
54
55 static int target_read_buffer_default(struct target *target, target_addr_t address,
56 uint32_t count, uint8_t *buffer);
57 static int target_write_buffer_default(struct target *target, target_addr_t address,
58 uint32_t count, const uint8_t *buffer);
59 static int target_array2mem(Jim_Interp *interp, struct target *target,
60 int argc, Jim_Obj * const *argv);
61 static int target_mem2array(Jim_Interp *interp, struct target *target,
62 int argc, Jim_Obj * const *argv);
63 static int target_register_user_commands(struct command_context *cmd_ctx);
64 static int target_get_gdb_fileio_info_default(struct target *target,
65 struct gdb_fileio_info *fileio_info);
66 static int target_gdb_fileio_end_default(struct target *target, int retcode,
67 int fileio_errno, bool ctrl_c);
68
69 static struct target_type *target_types[] = {
70 &arm7tdmi_target,
71 &arm9tdmi_target,
72 &arm920t_target,
73 &arm720t_target,
74 &arm966e_target,
75 &arm946e_target,
76 &arm926ejs_target,
77 &fa526_target,
78 &feroceon_target,
79 &dragonite_target,
80 &xscale_target,
81 &xtensa_chip_target,
82 &cortexm_target,
83 &cortexa_target,
84 &cortexr4_target,
85 &arm11_target,
86 &ls1_sap_target,
87 &mips_m4k_target,
88 &avr_target,
89 &dsp563xx_target,
90 &dsp5680xx_target,
91 &testee_target,
92 &avr32_ap7k_target,
93 &hla_target,
94 &esp32_target,
95 &esp32s2_target,
96 &esp32s3_target,
97 &or1k_target,
98 &quark_x10xx_target,
99 &quark_d20xx_target,
100 &stm8_target,
101 &riscv_target,
102 &mem_ap_target,
103 &esirisc_target,
104 &arcv2_target,
105 &aarch64_target,
106 &armv8r_target,
107 &mips_mips64_target,
108 NULL,
109 };
110
111 struct target *all_targets;
112 static struct target_event_callback *target_event_callbacks;
113 static struct target_timer_callback *target_timer_callbacks;
114 static int64_t target_timer_next_event_value;
115 static LIST_HEAD(target_reset_callback_list);
116 static LIST_HEAD(target_trace_callback_list);
117 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
118 static LIST_HEAD(empty_smp_targets);
119
120 enum nvp_assert {
121 NVP_DEASSERT,
122 NVP_ASSERT,
123 };
124
125 static const struct nvp nvp_assert[] = {
126 { .name = "assert", NVP_ASSERT },
127 { .name = "deassert", NVP_DEASSERT },
128 { .name = "T", NVP_ASSERT },
129 { .name = "F", NVP_DEASSERT },
130 { .name = "t", NVP_ASSERT },
131 { .name = "f", NVP_DEASSERT },
132 { .name = NULL, .value = -1 }
133 };
134
135 static const struct nvp nvp_error_target[] = {
136 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
137 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
138 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
139 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
140 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
141 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
142 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
143 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
144 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
145 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
146 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
147 { .value = -1, .name = NULL }
148 };
149
150 static const char *target_strerror_safe(int err)
151 {
152 const struct nvp *n;
153
154 n = nvp_value2name(nvp_error_target, err);
155 if (!n->name)
156 return "unknown";
157 else
158 return n->name;
159 }
160
161 static const struct jim_nvp nvp_target_event[] = {
162
163 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
164 { .value = TARGET_EVENT_HALTED, .name = "halted" },
165 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
166 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
167 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
168 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
169 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
170
171 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
172 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
173
174 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
175 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
176 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
177 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
178 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
179 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
180 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
181 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
182
183 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
184 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
185 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
186
187 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
188 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
189
190 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
191 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
192
193 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
194 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
195
196 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
197 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
198
199 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
200
201 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X100, .name = "semihosting-user-cmd-0x100" },
202 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X101, .name = "semihosting-user-cmd-0x101" },
203 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X102, .name = "semihosting-user-cmd-0x102" },
204 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X103, .name = "semihosting-user-cmd-0x103" },
205 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X104, .name = "semihosting-user-cmd-0x104" },
206 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X105, .name = "semihosting-user-cmd-0x105" },
207 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X106, .name = "semihosting-user-cmd-0x106" },
208 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X107, .name = "semihosting-user-cmd-0x107" },
209
210 { .name = NULL, .value = -1 }
211 };
212
213 static const struct nvp nvp_target_state[] = {
214 { .name = "unknown", .value = TARGET_UNKNOWN },
215 { .name = "running", .value = TARGET_RUNNING },
216 { .name = "halted", .value = TARGET_HALTED },
217 { .name = "reset", .value = TARGET_RESET },
218 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
219 { .name = NULL, .value = -1 },
220 };
221
222 static const struct nvp nvp_target_debug_reason[] = {
223 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
224 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
225 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
226 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
227 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
228 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
229 { .name = "program-exit", .value = DBG_REASON_EXIT },
230 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
231 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
232 { .name = NULL, .value = -1 },
233 };
234
235 static const struct jim_nvp nvp_target_endian[] = {
236 { .name = "big", .value = TARGET_BIG_ENDIAN },
237 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
238 { .name = "be", .value = TARGET_BIG_ENDIAN },
239 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
240 { .name = NULL, .value = -1 },
241 };
242
243 static const struct nvp nvp_reset_modes[] = {
244 { .name = "unknown", .value = RESET_UNKNOWN },
245 { .name = "run", .value = RESET_RUN },
246 { .name = "halt", .value = RESET_HALT },
247 { .name = "init", .value = RESET_INIT },
248 { .name = NULL, .value = -1 },
249 };
250
251 const char *debug_reason_name(struct target *t)
252 {
253 const char *cp;
254
255 cp = nvp_value2name(nvp_target_debug_reason,
256 t->debug_reason)->name;
257 if (!cp) {
258 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
259 cp = "(*BUG*unknown*BUG*)";
260 }
261 return cp;
262 }
263
264 const char *target_state_name(struct target *t)
265 {
266 const char *cp;
267 cp = nvp_value2name(nvp_target_state, t->state)->name;
268 if (!cp) {
269 LOG_ERROR("Invalid target state: %d", (int)(t->state));
270 cp = "(*BUG*unknown*BUG*)";
271 }
272
273 if (!target_was_examined(t) && t->defer_examine)
274 cp = "examine deferred";
275
276 return cp;
277 }
278
279 const char *target_event_name(enum target_event event)
280 {
281 const char *cp;
282 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
283 if (!cp) {
284 LOG_ERROR("Invalid target event: %d", (int)(event));
285 cp = "(*BUG*unknown*BUG*)";
286 }
287 return cp;
288 }
289
290 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
291 {
292 const char *cp;
293 cp = nvp_value2name(nvp_reset_modes, reset_mode)->name;
294 if (!cp) {
295 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
296 cp = "(*BUG*unknown*BUG*)";
297 }
298 return cp;
299 }
300
301 /* determine the number of the new target */
302 static int new_target_number(void)
303 {
304 struct target *t;
305 int x;
306
307 /* number is 0 based */
308 x = -1;
309 t = all_targets;
310 while (t) {
311 if (x < t->target_number)
312 x = t->target_number;
313 t = t->next;
314 }
315 return x + 1;
316 }
317
318 static void append_to_list_all_targets(struct target *target)
319 {
320 struct target **t = &all_targets;
321
322 while (*t)
323 t = &((*t)->next);
324 *t = target;
325 }
326
327 /* read a uint64_t from a buffer in target memory endianness */
328 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
329 {
330 if (target->endianness == TARGET_LITTLE_ENDIAN)
331 return le_to_h_u64(buffer);
332 else
333 return be_to_h_u64(buffer);
334 }
335
336 /* read a uint32_t from a buffer in target memory endianness */
337 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
338 {
339 if (target->endianness == TARGET_LITTLE_ENDIAN)
340 return le_to_h_u32(buffer);
341 else
342 return be_to_h_u32(buffer);
343 }
344
345 /* read a uint24_t from a buffer in target memory endianness */
346 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
347 {
348 if (target->endianness == TARGET_LITTLE_ENDIAN)
349 return le_to_h_u24(buffer);
350 else
351 return be_to_h_u24(buffer);
352 }
353
354 /* read a uint16_t from a buffer in target memory endianness */
355 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
356 {
357 if (target->endianness == TARGET_LITTLE_ENDIAN)
358 return le_to_h_u16(buffer);
359 else
360 return be_to_h_u16(buffer);
361 }
362
363 /* write a uint64_t to a buffer in target memory endianness */
364 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
365 {
366 if (target->endianness == TARGET_LITTLE_ENDIAN)
367 h_u64_to_le(buffer, value);
368 else
369 h_u64_to_be(buffer, value);
370 }
371
372 /* write a uint32_t to a buffer in target memory endianness */
373 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
374 {
375 if (target->endianness == TARGET_LITTLE_ENDIAN)
376 h_u32_to_le(buffer, value);
377 else
378 h_u32_to_be(buffer, value);
379 }
380
381 /* write a uint24_t to a buffer in target memory endianness */
382 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
383 {
384 if (target->endianness == TARGET_LITTLE_ENDIAN)
385 h_u24_to_le(buffer, value);
386 else
387 h_u24_to_be(buffer, value);
388 }
389
390 /* write a uint16_t to a buffer in target memory endianness */
391 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
392 {
393 if (target->endianness == TARGET_LITTLE_ENDIAN)
394 h_u16_to_le(buffer, value);
395 else
396 h_u16_to_be(buffer, value);
397 }
398
399 /* write a uint8_t to a buffer in target memory endianness */
400 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
401 {
402 *buffer = value;
403 }
404
405 /* write a uint64_t array to a buffer in target memory endianness */
406 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
407 {
408 uint32_t i;
409 for (i = 0; i < count; i++)
410 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
411 }
412
413 /* write a uint32_t array to a buffer in target memory endianness */
414 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
415 {
416 uint32_t i;
417 for (i = 0; i < count; i++)
418 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
419 }
420
421 /* write a uint16_t array to a buffer in target memory endianness */
422 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
423 {
424 uint32_t i;
425 for (i = 0; i < count; i++)
426 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
427 }
428
429 /* write a uint64_t array to a buffer in target memory endianness */
430 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
431 {
432 uint32_t i;
433 for (i = 0; i < count; i++)
434 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
435 }
436
437 /* write a uint32_t array to a buffer in target memory endianness */
438 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
439 {
440 uint32_t i;
441 for (i = 0; i < count; i++)
442 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
443 }
444
445 /* write a uint16_t array to a buffer in target memory endianness */
446 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
447 {
448 uint32_t i;
449 for (i = 0; i < count; i++)
450 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
451 }
452
453 /* return a pointer to a configured target; id is name or number */
454 struct target *get_target(const char *id)
455 {
456 struct target *target;
457
458 /* try as tcltarget name */
459 for (target = all_targets; target; target = target->next) {
460 if (!target_name(target))
461 continue;
462 if (strcmp(id, target_name(target)) == 0)
463 return target;
464 }
465
466 /* It's OK to remove this fallback sometime after August 2010 or so */
467
468 /* no match, try as number */
469 unsigned num;
470 if (parse_uint(id, &num) != ERROR_OK)
471 return NULL;
472
473 for (target = all_targets; target; target = target->next) {
474 if (target->target_number == (int)num) {
475 LOG_WARNING("use '%s' as target identifier, not '%u'",
476 target_name(target), num);
477 return target;
478 }
479 }
480
481 return NULL;
482 }
483
484 /* returns a pointer to the n-th configured target */
485 struct target *get_target_by_num(int num)
486 {
487 struct target *target = all_targets;
488
489 while (target) {
490 if (target->target_number == num)
491 return target;
492 target = target->next;
493 }
494
495 return NULL;
496 }
497
498 struct target *get_current_target(struct command_context *cmd_ctx)
499 {
500 struct target *target = get_current_target_or_null(cmd_ctx);
501
502 if (!target) {
503 LOG_ERROR("BUG: current_target out of bounds");
504 exit(-1);
505 }
506
507 return target;
508 }
509
510 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
511 {
512 return cmd_ctx->current_target_override
513 ? cmd_ctx->current_target_override
514 : cmd_ctx->current_target;
515 }
516
517 int target_poll(struct target *target)
518 {
519 int retval;
520
521 /* We can't poll until after examine */
522 if (!target_was_examined(target)) {
523 /* Fail silently lest we pollute the log */
524 return ERROR_FAIL;
525 }
526
527 retval = target->type->poll(target);
528 if (retval != ERROR_OK)
529 return retval;
530
531 if (target->halt_issued) {
532 if (target->state == TARGET_HALTED)
533 target->halt_issued = false;
534 else {
535 int64_t t = timeval_ms() - target->halt_issued_time;
536 if (t > DEFAULT_HALT_TIMEOUT) {
537 target->halt_issued = false;
538 LOG_INFO("Halt timed out, wake up GDB.");
539 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
540 }
541 }
542 }
543
544 return ERROR_OK;
545 }
546
547 int target_halt(struct target *target)
548 {
549 int retval;
550 /* We can't poll until after examine */
551 if (!target_was_examined(target)) {
552 LOG_ERROR("Target not examined yet");
553 return ERROR_FAIL;
554 }
555
556 retval = target->type->halt(target);
557 if (retval != ERROR_OK)
558 return retval;
559
560 target->halt_issued = true;
561 target->halt_issued_time = timeval_ms();
562
563 return ERROR_OK;
564 }
565
566 /**
567 * Make the target (re)start executing using its saved execution
568 * context (possibly with some modifications).
569 *
570 * @param target Which target should start executing.
571 * @param current True to use the target's saved program counter instead
572 * of the address parameter
573 * @param address Optionally used as the program counter.
574 * @param handle_breakpoints True iff breakpoints at the resumption PC
575 * should be skipped. (For example, maybe execution was stopped by
576 * such a breakpoint, in which case it would be counterproductive to
577 * let it re-trigger.
578 * @param debug_execution False if all working areas allocated by OpenOCD
579 * should be released and/or restored to their original contents.
580 * (This would for example be true to run some downloaded "helper"
581 * algorithm code, which resides in one such working buffer and uses
582 * another for data storage.)
583 *
584 * @todo Resolve the ambiguity about what the "debug_execution" flag
585 * signifies. For example, Target implementations don't agree on how
586 * it relates to invalidation of the register cache, or to whether
587 * breakpoints and watchpoints should be enabled. (It would seem wrong
588 * to enable breakpoints when running downloaded "helper" algorithms
589 * (debug_execution true), since the breakpoints would be set to match
590 * target firmware being debugged, not the helper algorithm.... and
591 * enabling them could cause such helpers to malfunction (for example,
592 * by overwriting data with a breakpoint instruction. On the other
593 * hand the infrastructure for running such helpers might use this
594 * procedure but rely on hardware breakpoint to detect termination.)
595 */
596 int target_resume(struct target *target, int current, target_addr_t address,
597 int handle_breakpoints, int debug_execution)
598 {
599 int retval;
600
601 /* We can't poll until after examine */
602 if (!target_was_examined(target)) {
603 LOG_ERROR("Target not examined yet");
604 return ERROR_FAIL;
605 }
606
607 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
608
609 /* note that resume *must* be asynchronous. The CPU can halt before
610 * we poll. The CPU can even halt at the current PC as a result of
611 * a software breakpoint being inserted by (a bug?) the application.
612 */
613 /*
614 * resume() triggers the event 'resumed'. The execution of TCL commands
615 * in the event handler causes the polling of targets. If the target has
616 * already halted for a breakpoint, polling will run the 'halted' event
617 * handler before the pending 'resumed' handler.
618 * Disable polling during resume() to guarantee the execution of handlers
619 * in the correct order.
620 */
621 bool save_poll_mask = jtag_poll_mask();
622 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
623 jtag_poll_unmask(save_poll_mask);
624
625 if (retval != ERROR_OK)
626 return retval;
627
628 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
629
630 return retval;
631 }
632
633 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
634 {
635 char buf[100];
636 int retval;
637 const struct nvp *n;
638 n = nvp_value2name(nvp_reset_modes, reset_mode);
639 if (!n->name) {
640 LOG_ERROR("invalid reset mode");
641 return ERROR_FAIL;
642 }
643
644 struct target *target;
645 for (target = all_targets; target; target = target->next)
646 target_call_reset_callbacks(target, reset_mode);
647
648 /* disable polling during reset to make reset event scripts
649 * more predictable, i.e. dr/irscan & pathmove in events will
650 * not have JTAG operations injected into the middle of a sequence.
651 */
652 bool save_poll_mask = jtag_poll_mask();
653
654 sprintf(buf, "ocd_process_reset %s", n->name);
655 retval = Jim_Eval(cmd->ctx->interp, buf);
656
657 jtag_poll_unmask(save_poll_mask);
658
659 if (retval != JIM_OK) {
660 Jim_MakeErrorMessage(cmd->ctx->interp);
661 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
662 return ERROR_FAIL;
663 }
664
665 /* We want any events to be processed before the prompt */
666 retval = target_call_timer_callbacks_now();
667
668 for (target = all_targets; target; target = target->next) {
669 target->type->check_reset(target);
670 target->running_alg = false;
671 }
672
673 return retval;
674 }
675
676 static int identity_virt2phys(struct target *target,
677 target_addr_t virtual, target_addr_t *physical)
678 {
679 *physical = virtual;
680 return ERROR_OK;
681 }
682
683 static int no_mmu(struct target *target, int *enabled)
684 {
685 *enabled = 0;
686 return ERROR_OK;
687 }
688
689 /**
690 * Reset the @c examined flag for the given target.
691 * Pure paranoia -- targets are zeroed on allocation.
692 */
693 static inline void target_reset_examined(struct target *target)
694 {
695 target->examined = false;
696 }
697
698 static int default_examine(struct target *target)
699 {
700 target_set_examined(target);
701 return ERROR_OK;
702 }
703
704 /* no check by default */
705 static int default_check_reset(struct target *target)
706 {
707 return ERROR_OK;
708 }
709
710 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
711 * Keep in sync */
712 int target_examine_one(struct target *target)
713 {
714 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
715
716 int retval = target->type->examine(target);
717 if (retval != ERROR_OK) {
718 target_reset_examined(target);
719 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
720 return retval;
721 }
722
723 target_set_examined(target);
724 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
725
726 return ERROR_OK;
727 }
728
729 static int jtag_enable_callback(enum jtag_event event, void *priv)
730 {
731 struct target *target = priv;
732
733 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
734 return ERROR_OK;
735
736 jtag_unregister_event_callback(jtag_enable_callback, target);
737
738 return target_examine_one(target);
739 }
740
741 /* Targets that correctly implement init + examine, i.e.
742 * no communication with target during init:
743 *
744 * XScale
745 */
746 int target_examine(void)
747 {
748 int retval = ERROR_OK;
749 struct target *target;
750
751 for (target = all_targets; target; target = target->next) {
752 /* defer examination, but don't skip it */
753 if (!target->tap->enabled) {
754 jtag_register_event_callback(jtag_enable_callback,
755 target);
756 continue;
757 }
758
759 if (target->defer_examine)
760 continue;
761
762 int retval2 = target_examine_one(target);
763 if (retval2 != ERROR_OK) {
764 LOG_WARNING("target %s examination failed", target_name(target));
765 retval = retval2;
766 }
767 }
768 return retval;
769 }
770
771 const char *target_type_name(struct target *target)
772 {
773 return target->type->name;
774 }
775
776 static int target_soft_reset_halt(struct target *target)
777 {
778 if (!target_was_examined(target)) {
779 LOG_ERROR("Target not examined yet");
780 return ERROR_FAIL;
781 }
782 if (!target->type->soft_reset_halt) {
783 LOG_ERROR("Target %s does not support soft_reset_halt",
784 target_name(target));
785 return ERROR_FAIL;
786 }
787 return target->type->soft_reset_halt(target);
788 }
789
790 /**
791 * Downloads a target-specific native code algorithm to the target,
792 * and executes it. * Note that some targets may need to set up, enable,
793 * and tear down a breakpoint (hard or * soft) to detect algorithm
794 * termination, while others may support lower overhead schemes where
795 * soft breakpoints embedded in the algorithm automatically terminate the
796 * algorithm.
797 *
798 * @param target used to run the algorithm
799 * @param num_mem_params
800 * @param mem_params
801 * @param num_reg_params
802 * @param reg_param
803 * @param entry_point
804 * @param exit_point
805 * @param timeout_ms
806 * @param arch_info target-specific description of the algorithm.
807 */
808 int target_run_algorithm(struct target *target,
809 int num_mem_params, struct mem_param *mem_params,
810 int num_reg_params, struct reg_param *reg_param,
811 target_addr_t entry_point, target_addr_t exit_point,
812 unsigned int timeout_ms, void *arch_info)
813 {
814 int retval = ERROR_FAIL;
815
816 if (!target_was_examined(target)) {
817 LOG_ERROR("Target not examined yet");
818 goto done;
819 }
820 if (!target->type->run_algorithm) {
821 LOG_ERROR("Target type '%s' does not support %s",
822 target_type_name(target), __func__);
823 goto done;
824 }
825
826 target->running_alg = true;
827 retval = target->type->run_algorithm(target,
828 num_mem_params, mem_params,
829 num_reg_params, reg_param,
830 entry_point, exit_point, timeout_ms, arch_info);
831 target->running_alg = false;
832
833 done:
834 return retval;
835 }
836
837 /**
838 * Executes a target-specific native code algorithm and leaves it running.
839 *
840 * @param target used to run the algorithm
841 * @param num_mem_params
842 * @param mem_params
843 * @param num_reg_params
844 * @param reg_params
845 * @param entry_point
846 * @param exit_point
847 * @param arch_info target-specific description of the algorithm.
848 */
849 int target_start_algorithm(struct target *target,
850 int num_mem_params, struct mem_param *mem_params,
851 int num_reg_params, struct reg_param *reg_params,
852 target_addr_t entry_point, target_addr_t exit_point,
853 void *arch_info)
854 {
855 int retval = ERROR_FAIL;
856
857 if (!target_was_examined(target)) {
858 LOG_ERROR("Target not examined yet");
859 goto done;
860 }
861 if (!target->type->start_algorithm) {
862 LOG_ERROR("Target type '%s' does not support %s",
863 target_type_name(target), __func__);
864 goto done;
865 }
866 if (target->running_alg) {
867 LOG_ERROR("Target is already running an algorithm");
868 goto done;
869 }
870
871 target->running_alg = true;
872 retval = target->type->start_algorithm(target,
873 num_mem_params, mem_params,
874 num_reg_params, reg_params,
875 entry_point, exit_point, arch_info);
876
877 done:
878 return retval;
879 }
880
881 /**
882 * Waits for an algorithm started with target_start_algorithm() to complete.
883 *
884 * @param target used to run the algorithm
885 * @param num_mem_params
886 * @param mem_params
887 * @param num_reg_params
888 * @param reg_params
889 * @param exit_point
890 * @param timeout_ms
891 * @param arch_info target-specific description of the algorithm.
892 */
893 int target_wait_algorithm(struct target *target,
894 int num_mem_params, struct mem_param *mem_params,
895 int num_reg_params, struct reg_param *reg_params,
896 target_addr_t exit_point, unsigned int timeout_ms,
897 void *arch_info)
898 {
899 int retval = ERROR_FAIL;
900
901 if (!target->type->wait_algorithm) {
902 LOG_ERROR("Target type '%s' does not support %s",
903 target_type_name(target), __func__);
904 goto done;
905 }
906 if (!target->running_alg) {
907 LOG_ERROR("Target is not running an algorithm");
908 goto done;
909 }
910
911 retval = target->type->wait_algorithm(target,
912 num_mem_params, mem_params,
913 num_reg_params, reg_params,
914 exit_point, timeout_ms, arch_info);
915 if (retval != ERROR_TARGET_TIMEOUT)
916 target->running_alg = false;
917
918 done:
919 return retval;
920 }
921
922 /**
923 * Streams data to a circular buffer on target intended for consumption by code
924 * running asynchronously on target.
925 *
926 * This is intended for applications where target-specific native code runs
927 * on the target, receives data from the circular buffer, does something with
928 * it (most likely writing it to a flash memory), and advances the circular
929 * buffer pointer.
930 *
931 * This assumes that the helper algorithm has already been loaded to the target,
932 * but has not been started yet. Given memory and register parameters are passed
933 * to the algorithm.
934 *
935 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
936 * following format:
937 *
938 * [buffer_start + 0, buffer_start + 4):
939 * Write Pointer address (aka head). Written and updated by this
940 * routine when new data is written to the circular buffer.
941 * [buffer_start + 4, buffer_start + 8):
942 * Read Pointer address (aka tail). Updated by code running on the
943 * target after it consumes data.
944 * [buffer_start + 8, buffer_start + buffer_size):
945 * Circular buffer contents.
946 *
947 * See contrib/loaders/flash/stm32f1x.S for an example.
948 *
949 * @param target used to run the algorithm
950 * @param buffer address on the host where data to be sent is located
951 * @param count number of blocks to send
952 * @param block_size size in bytes of each block
953 * @param num_mem_params count of memory-based params to pass to algorithm
954 * @param mem_params memory-based params to pass to algorithm
955 * @param num_reg_params count of register-based params to pass to algorithm
956 * @param reg_params memory-based params to pass to algorithm
957 * @param buffer_start address on the target of the circular buffer structure
958 * @param buffer_size size of the circular buffer structure
959 * @param entry_point address on the target to execute to start the algorithm
960 * @param exit_point address at which to set a breakpoint to catch the
961 * end of the algorithm; can be 0 if target triggers a breakpoint itself
962 * @param arch_info
963 */
964
965 int target_run_flash_async_algorithm(struct target *target,
966 const uint8_t *buffer, uint32_t count, int block_size,
967 int num_mem_params, struct mem_param *mem_params,
968 int num_reg_params, struct reg_param *reg_params,
969 uint32_t buffer_start, uint32_t buffer_size,
970 uint32_t entry_point, uint32_t exit_point, void *arch_info)
971 {
972 int retval;
973 int timeout = 0;
974
975 const uint8_t *buffer_orig = buffer;
976
977 /* Set up working area. First word is write pointer, second word is read pointer,
978 * rest is fifo data area. */
979 uint32_t wp_addr = buffer_start;
980 uint32_t rp_addr = buffer_start + 4;
981 uint32_t fifo_start_addr = buffer_start + 8;
982 uint32_t fifo_end_addr = buffer_start + buffer_size;
983
984 uint32_t wp = fifo_start_addr;
985 uint32_t rp = fifo_start_addr;
986
987 /* validate block_size is 2^n */
988 assert(IS_PWR_OF_2(block_size));
989
990 retval = target_write_u32(target, wp_addr, wp);
991 if (retval != ERROR_OK)
992 return retval;
993 retval = target_write_u32(target, rp_addr, rp);
994 if (retval != ERROR_OK)
995 return retval;
996
997 /* Start up algorithm on target and let it idle while writing the first chunk */
998 retval = target_start_algorithm(target, num_mem_params, mem_params,
999 num_reg_params, reg_params,
1000 entry_point,
1001 exit_point,
1002 arch_info);
1003
1004 if (retval != ERROR_OK) {
1005 LOG_ERROR("error starting target flash write algorithm");
1006 return retval;
1007 }
1008
1009 while (count > 0) {
1010
1011 retval = target_read_u32(target, rp_addr, &rp);
1012 if (retval != ERROR_OK) {
1013 LOG_ERROR("failed to get read pointer");
1014 break;
1015 }
1016
1017 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1018 (size_t) (buffer - buffer_orig), count, wp, rp);
1019
1020 if (rp == 0) {
1021 LOG_ERROR("flash write algorithm aborted by target");
1022 retval = ERROR_FLASH_OPERATION_FAILED;
1023 break;
1024 }
1025
1026 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1027 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1028 break;
1029 }
1030
1031 /* Count the number of bytes available in the fifo without
1032 * crossing the wrap around. Make sure to not fill it completely,
1033 * because that would make wp == rp and that's the empty condition. */
1034 uint32_t thisrun_bytes;
1035 if (rp > wp)
1036 thisrun_bytes = rp - wp - block_size;
1037 else if (rp > fifo_start_addr)
1038 thisrun_bytes = fifo_end_addr - wp;
1039 else
1040 thisrun_bytes = fifo_end_addr - wp - block_size;
1041
1042 if (thisrun_bytes == 0) {
1043 /* Throttle polling a bit if transfer is (much) faster than flash
1044 * programming. The exact delay shouldn't matter as long as it's
1045 * less than buffer size / flash speed. This is very unlikely to
1046 * run when using high latency connections such as USB. */
1047 alive_sleep(2);
1048
1049 /* to stop an infinite loop on some targets check and increment a timeout
1050 * this issue was observed on a stellaris using the new ICDI interface */
1051 if (timeout++ >= 2500) {
1052 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1053 return ERROR_FLASH_OPERATION_FAILED;
1054 }
1055 continue;
1056 }
1057
1058 /* reset our timeout */
1059 timeout = 0;
1060
1061 /* Limit to the amount of data we actually want to write */
1062 if (thisrun_bytes > count * block_size)
1063 thisrun_bytes = count * block_size;
1064
1065 /* Force end of large blocks to be word aligned */
1066 if (thisrun_bytes >= 16)
1067 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1068
1069 /* Write data to fifo */
1070 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1071 if (retval != ERROR_OK)
1072 break;
1073
1074 /* Update counters and wrap write pointer */
1075 buffer += thisrun_bytes;
1076 count -= thisrun_bytes / block_size;
1077 wp += thisrun_bytes;
1078 if (wp >= fifo_end_addr)
1079 wp = fifo_start_addr;
1080
1081 /* Store updated write pointer to target */
1082 retval = target_write_u32(target, wp_addr, wp);
1083 if (retval != ERROR_OK)
1084 break;
1085
1086 /* Avoid GDB timeouts */
1087 keep_alive();
1088 }
1089
1090 if (retval != ERROR_OK) {
1091 /* abort flash write algorithm on target */
1092 target_write_u32(target, wp_addr, 0);
1093 }
1094
1095 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1096 num_reg_params, reg_params,
1097 exit_point,
1098 10000,
1099 arch_info);
1100
1101 if (retval2 != ERROR_OK) {
1102 LOG_ERROR("error waiting for target flash write algorithm");
1103 retval = retval2;
1104 }
1105
1106 if (retval == ERROR_OK) {
1107 /* check if algorithm set rp = 0 after fifo writer loop finished */
1108 retval = target_read_u32(target, rp_addr, &rp);
1109 if (retval == ERROR_OK && rp == 0) {
1110 LOG_ERROR("flash write algorithm aborted by target");
1111 retval = ERROR_FLASH_OPERATION_FAILED;
1112 }
1113 }
1114
1115 return retval;
1116 }
1117
1118 int target_run_read_async_algorithm(struct target *target,
1119 uint8_t *buffer, uint32_t count, int block_size,
1120 int num_mem_params, struct mem_param *mem_params,
1121 int num_reg_params, struct reg_param *reg_params,
1122 uint32_t buffer_start, uint32_t buffer_size,
1123 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1124 {
1125 int retval;
1126 int timeout = 0;
1127
1128 const uint8_t *buffer_orig = buffer;
1129
1130 /* Set up working area. First word is write pointer, second word is read pointer,
1131 * rest is fifo data area. */
1132 uint32_t wp_addr = buffer_start;
1133 uint32_t rp_addr = buffer_start + 4;
1134 uint32_t fifo_start_addr = buffer_start + 8;
1135 uint32_t fifo_end_addr = buffer_start + buffer_size;
1136
1137 uint32_t wp = fifo_start_addr;
1138 uint32_t rp = fifo_start_addr;
1139
1140 /* validate block_size is 2^n */
1141 assert(IS_PWR_OF_2(block_size));
1142
1143 retval = target_write_u32(target, wp_addr, wp);
1144 if (retval != ERROR_OK)
1145 return retval;
1146 retval = target_write_u32(target, rp_addr, rp);
1147 if (retval != ERROR_OK)
1148 return retval;
1149
1150 /* Start up algorithm on target */
1151 retval = target_start_algorithm(target, num_mem_params, mem_params,
1152 num_reg_params, reg_params,
1153 entry_point,
1154 exit_point,
1155 arch_info);
1156
1157 if (retval != ERROR_OK) {
1158 LOG_ERROR("error starting target flash read algorithm");
1159 return retval;
1160 }
1161
1162 while (count > 0) {
1163 retval = target_read_u32(target, wp_addr, &wp);
1164 if (retval != ERROR_OK) {
1165 LOG_ERROR("failed to get write pointer");
1166 break;
1167 }
1168
1169 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1170 (size_t)(buffer - buffer_orig), count, wp, rp);
1171
1172 if (wp == 0) {
1173 LOG_ERROR("flash read algorithm aborted by target");
1174 retval = ERROR_FLASH_OPERATION_FAILED;
1175 break;
1176 }
1177
1178 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1179 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1180 break;
1181 }
1182
1183 /* Count the number of bytes available in the fifo without
1184 * crossing the wrap around. */
1185 uint32_t thisrun_bytes;
1186 if (wp >= rp)
1187 thisrun_bytes = wp - rp;
1188 else
1189 thisrun_bytes = fifo_end_addr - rp;
1190
1191 if (thisrun_bytes == 0) {
1192 /* Throttle polling a bit if transfer is (much) faster than flash
1193 * reading. The exact delay shouldn't matter as long as it's
1194 * less than buffer size / flash speed. This is very unlikely to
1195 * run when using high latency connections such as USB. */
1196 alive_sleep(2);
1197
1198 /* to stop an infinite loop on some targets check and increment a timeout
1199 * this issue was observed on a stellaris using the new ICDI interface */
1200 if (timeout++ >= 2500) {
1201 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1202 return ERROR_FLASH_OPERATION_FAILED;
1203 }
1204 continue;
1205 }
1206
1207 /* Reset our timeout */
1208 timeout = 0;
1209
1210 /* Limit to the amount of data we actually want to read */
1211 if (thisrun_bytes > count * block_size)
1212 thisrun_bytes = count * block_size;
1213
1214 /* Force end of large blocks to be word aligned */
1215 if (thisrun_bytes >= 16)
1216 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1217
1218 /* Read data from fifo */
1219 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1220 if (retval != ERROR_OK)
1221 break;
1222
1223 /* Update counters and wrap write pointer */
1224 buffer += thisrun_bytes;
1225 count -= thisrun_bytes / block_size;
1226 rp += thisrun_bytes;
1227 if (rp >= fifo_end_addr)
1228 rp = fifo_start_addr;
1229
1230 /* Store updated write pointer to target */
1231 retval = target_write_u32(target, rp_addr, rp);
1232 if (retval != ERROR_OK)
1233 break;
1234
1235 /* Avoid GDB timeouts */
1236 keep_alive();
1237
1238 }
1239
1240 if (retval != ERROR_OK) {
1241 /* abort flash write algorithm on target */
1242 target_write_u32(target, rp_addr, 0);
1243 }
1244
1245 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1246 num_reg_params, reg_params,
1247 exit_point,
1248 10000,
1249 arch_info);
1250
1251 if (retval2 != ERROR_OK) {
1252 LOG_ERROR("error waiting for target flash write algorithm");
1253 retval = retval2;
1254 }
1255
1256 if (retval == ERROR_OK) {
1257 /* check if algorithm set wp = 0 after fifo writer loop finished */
1258 retval = target_read_u32(target, wp_addr, &wp);
1259 if (retval == ERROR_OK && wp == 0) {
1260 LOG_ERROR("flash read algorithm aborted by target");
1261 retval = ERROR_FLASH_OPERATION_FAILED;
1262 }
1263 }
1264
1265 return retval;
1266 }
1267
1268 int target_read_memory(struct target *target,
1269 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1270 {
1271 if (!target_was_examined(target)) {
1272 LOG_ERROR("Target not examined yet");
1273 return ERROR_FAIL;
1274 }
1275 if (!target->type->read_memory) {
1276 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1277 return ERROR_FAIL;
1278 }
1279 return target->type->read_memory(target, address, size, count, buffer);
1280 }
1281
1282 int target_read_phys_memory(struct target *target,
1283 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1284 {
1285 if (!target_was_examined(target)) {
1286 LOG_ERROR("Target not examined yet");
1287 return ERROR_FAIL;
1288 }
1289 if (!target->type->read_phys_memory) {
1290 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1291 return ERROR_FAIL;
1292 }
1293 return target->type->read_phys_memory(target, address, size, count, buffer);
1294 }
1295
1296 int target_write_memory(struct target *target,
1297 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1298 {
1299 if (!target_was_examined(target)) {
1300 LOG_ERROR("Target not examined yet");
1301 return ERROR_FAIL;
1302 }
1303 if (!target->type->write_memory) {
1304 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1305 return ERROR_FAIL;
1306 }
1307 return target->type->write_memory(target, address, size, count, buffer);
1308 }
1309
1310 int target_write_phys_memory(struct target *target,
1311 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1312 {
1313 if (!target_was_examined(target)) {
1314 LOG_ERROR("Target not examined yet");
1315 return ERROR_FAIL;
1316 }
1317 if (!target->type->write_phys_memory) {
1318 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1319 return ERROR_FAIL;
1320 }
1321 return target->type->write_phys_memory(target, address, size, count, buffer);
1322 }
1323
1324 int target_add_breakpoint(struct target *target,
1325 struct breakpoint *breakpoint)
1326 {
1327 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1328 LOG_TARGET_ERROR(target, "not halted (add breakpoint)");
1329 return ERROR_TARGET_NOT_HALTED;
1330 }
1331 return target->type->add_breakpoint(target, breakpoint);
1332 }
1333
1334 int target_add_context_breakpoint(struct target *target,
1335 struct breakpoint *breakpoint)
1336 {
1337 if (target->state != TARGET_HALTED) {
1338 LOG_TARGET_ERROR(target, "not halted (add context breakpoint)");
1339 return ERROR_TARGET_NOT_HALTED;
1340 }
1341 return target->type->add_context_breakpoint(target, breakpoint);
1342 }
1343
1344 int target_add_hybrid_breakpoint(struct target *target,
1345 struct breakpoint *breakpoint)
1346 {
1347 if (target->state != TARGET_HALTED) {
1348 LOG_TARGET_ERROR(target, "not halted (add hybrid breakpoint)");
1349 return ERROR_TARGET_NOT_HALTED;
1350 }
1351 return target->type->add_hybrid_breakpoint(target, breakpoint);
1352 }
1353
1354 int target_remove_breakpoint(struct target *target,
1355 struct breakpoint *breakpoint)
1356 {
1357 return target->type->remove_breakpoint(target, breakpoint);
1358 }
1359
1360 int target_add_watchpoint(struct target *target,
1361 struct watchpoint *watchpoint)
1362 {
1363 if (target->state != TARGET_HALTED) {
1364 LOG_TARGET_ERROR(target, "not halted (add watchpoint)");
1365 return ERROR_TARGET_NOT_HALTED;
1366 }
1367 return target->type->add_watchpoint(target, watchpoint);
1368 }
1369 int target_remove_watchpoint(struct target *target,
1370 struct watchpoint *watchpoint)
1371 {
1372 return target->type->remove_watchpoint(target, watchpoint);
1373 }
1374 int target_hit_watchpoint(struct target *target,
1375 struct watchpoint **hit_watchpoint)
1376 {
1377 if (target->state != TARGET_HALTED) {
1378 LOG_TARGET_ERROR(target, "not halted (hit watchpoint)");
1379 return ERROR_TARGET_NOT_HALTED;
1380 }
1381
1382 if (!target->type->hit_watchpoint) {
1383 /* For backward compatible, if hit_watchpoint is not implemented,
1384 * return ERROR_FAIL such that gdb_server will not take the nonsense
1385 * information. */
1386 return ERROR_FAIL;
1387 }
1388
1389 return target->type->hit_watchpoint(target, hit_watchpoint);
1390 }
1391
1392 const char *target_get_gdb_arch(struct target *target)
1393 {
1394 if (!target->type->get_gdb_arch)
1395 return NULL;
1396 return target->type->get_gdb_arch(target);
1397 }
1398
1399 int target_get_gdb_reg_list(struct target *target,
1400 struct reg **reg_list[], int *reg_list_size,
1401 enum target_register_class reg_class)
1402 {
1403 int result = ERROR_FAIL;
1404
1405 if (!target_was_examined(target)) {
1406 LOG_ERROR("Target not examined yet");
1407 goto done;
1408 }
1409
1410 result = target->type->get_gdb_reg_list(target, reg_list,
1411 reg_list_size, reg_class);
1412
1413 done:
1414 if (result != ERROR_OK) {
1415 *reg_list = NULL;
1416 *reg_list_size = 0;
1417 }
1418 return result;
1419 }
1420
1421 int target_get_gdb_reg_list_noread(struct target *target,
1422 struct reg **reg_list[], int *reg_list_size,
1423 enum target_register_class reg_class)
1424 {
1425 if (target->type->get_gdb_reg_list_noread &&
1426 target->type->get_gdb_reg_list_noread(target, reg_list,
1427 reg_list_size, reg_class) == ERROR_OK)
1428 return ERROR_OK;
1429 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1430 }
1431
1432 bool target_supports_gdb_connection(struct target *target)
1433 {
1434 /*
1435 * exclude all the targets that don't provide get_gdb_reg_list
1436 * or that have explicit gdb_max_connection == 0
1437 */
1438 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1439 }
1440
1441 int target_step(struct target *target,
1442 int current, target_addr_t address, int handle_breakpoints)
1443 {
1444 int retval;
1445
1446 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1447
1448 retval = target->type->step(target, current, address, handle_breakpoints);
1449 if (retval != ERROR_OK)
1450 return retval;
1451
1452 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1453
1454 return retval;
1455 }
1456
1457 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1458 {
1459 if (target->state != TARGET_HALTED) {
1460 LOG_TARGET_ERROR(target, "not halted (gdb fileio)");
1461 return ERROR_TARGET_NOT_HALTED;
1462 }
1463 return target->type->get_gdb_fileio_info(target, fileio_info);
1464 }
1465
1466 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1467 {
1468 if (target->state != TARGET_HALTED) {
1469 LOG_TARGET_ERROR(target, "not halted (gdb fileio end)");
1470 return ERROR_TARGET_NOT_HALTED;
1471 }
1472 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1473 }
1474
1475 target_addr_t target_address_max(struct target *target)
1476 {
1477 unsigned bits = target_address_bits(target);
1478 if (sizeof(target_addr_t) * 8 == bits)
1479 return (target_addr_t) -1;
1480 else
1481 return (((target_addr_t) 1) << bits) - 1;
1482 }
1483
1484 unsigned target_address_bits(struct target *target)
1485 {
1486 if (target->type->address_bits)
1487 return target->type->address_bits(target);
1488 return 32;
1489 }
1490
1491 unsigned int target_data_bits(struct target *target)
1492 {
1493 if (target->type->data_bits)
1494 return target->type->data_bits(target);
1495 return 32;
1496 }
1497
1498 static int target_profiling(struct target *target, uint32_t *samples,
1499 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1500 {
1501 return target->type->profiling(target, samples, max_num_samples,
1502 num_samples, seconds);
1503 }
1504
1505 static int handle_target(void *priv);
1506
1507 static int target_init_one(struct command_context *cmd_ctx,
1508 struct target *target)
1509 {
1510 target_reset_examined(target);
1511
1512 struct target_type *type = target->type;
1513 if (!type->examine)
1514 type->examine = default_examine;
1515
1516 if (!type->check_reset)
1517 type->check_reset = default_check_reset;
1518
1519 assert(type->init_target);
1520
1521 int retval = type->init_target(cmd_ctx, target);
1522 if (retval != ERROR_OK) {
1523 LOG_ERROR("target '%s' init failed", target_name(target));
1524 return retval;
1525 }
1526
1527 /* Sanity-check MMU support ... stub in what we must, to help
1528 * implement it in stages, but warn if we need to do so.
1529 */
1530 if (type->mmu) {
1531 if (!type->virt2phys) {
1532 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1533 type->virt2phys = identity_virt2phys;
1534 }
1535 } else {
1536 /* Make sure no-MMU targets all behave the same: make no
1537 * distinction between physical and virtual addresses, and
1538 * ensure that virt2phys() is always an identity mapping.
1539 */
1540 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1541 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1542
1543 type->mmu = no_mmu;
1544 type->write_phys_memory = type->write_memory;
1545 type->read_phys_memory = type->read_memory;
1546 type->virt2phys = identity_virt2phys;
1547 }
1548
1549 if (!target->type->read_buffer)
1550 target->type->read_buffer = target_read_buffer_default;
1551
1552 if (!target->type->write_buffer)
1553 target->type->write_buffer = target_write_buffer_default;
1554
1555 if (!target->type->get_gdb_fileio_info)
1556 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1557
1558 if (!target->type->gdb_fileio_end)
1559 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1560
1561 if (!target->type->profiling)
1562 target->type->profiling = target_profiling_default;
1563
1564 return ERROR_OK;
1565 }
1566
1567 static int target_init(struct command_context *cmd_ctx)
1568 {
1569 struct target *target;
1570 int retval;
1571
1572 for (target = all_targets; target; target = target->next) {
1573 retval = target_init_one(cmd_ctx, target);
1574 if (retval != ERROR_OK)
1575 return retval;
1576 }
1577
1578 if (!all_targets)
1579 return ERROR_OK;
1580
1581 retval = target_register_user_commands(cmd_ctx);
1582 if (retval != ERROR_OK)
1583 return retval;
1584
1585 retval = target_register_timer_callback(&handle_target,
1586 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1587 if (retval != ERROR_OK)
1588 return retval;
1589
1590 return ERROR_OK;
1591 }
1592
1593 COMMAND_HANDLER(handle_target_init_command)
1594 {
1595 int retval;
1596
1597 if (CMD_ARGC != 0)
1598 return ERROR_COMMAND_SYNTAX_ERROR;
1599
1600 static bool target_initialized;
1601 if (target_initialized) {
1602 LOG_INFO("'target init' has already been called");
1603 return ERROR_OK;
1604 }
1605 target_initialized = true;
1606
1607 retval = command_run_line(CMD_CTX, "init_targets");
1608 if (retval != ERROR_OK)
1609 return retval;
1610
1611 retval = command_run_line(CMD_CTX, "init_target_events");
1612 if (retval != ERROR_OK)
1613 return retval;
1614
1615 retval = command_run_line(CMD_CTX, "init_board");
1616 if (retval != ERROR_OK)
1617 return retval;
1618
1619 LOG_DEBUG("Initializing targets...");
1620 return target_init(CMD_CTX);
1621 }
1622
1623 int target_register_event_callback(int (*callback)(struct target *target,
1624 enum target_event event, void *priv), void *priv)
1625 {
1626 struct target_event_callback **callbacks_p = &target_event_callbacks;
1627
1628 if (!callback)
1629 return ERROR_COMMAND_SYNTAX_ERROR;
1630
1631 if (*callbacks_p) {
1632 while ((*callbacks_p)->next)
1633 callbacks_p = &((*callbacks_p)->next);
1634 callbacks_p = &((*callbacks_p)->next);
1635 }
1636
1637 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1638 (*callbacks_p)->callback = callback;
1639 (*callbacks_p)->priv = priv;
1640 (*callbacks_p)->next = NULL;
1641
1642 return ERROR_OK;
1643 }
1644
1645 int target_register_reset_callback(int (*callback)(struct target *target,
1646 enum target_reset_mode reset_mode, void *priv), void *priv)
1647 {
1648 struct target_reset_callback *entry;
1649
1650 if (!callback)
1651 return ERROR_COMMAND_SYNTAX_ERROR;
1652
1653 entry = malloc(sizeof(struct target_reset_callback));
1654 if (!entry) {
1655 LOG_ERROR("error allocating buffer for reset callback entry");
1656 return ERROR_COMMAND_SYNTAX_ERROR;
1657 }
1658
1659 entry->callback = callback;
1660 entry->priv = priv;
1661 list_add(&entry->list, &target_reset_callback_list);
1662
1663
1664 return ERROR_OK;
1665 }
1666
1667 int target_register_trace_callback(int (*callback)(struct target *target,
1668 size_t len, uint8_t *data, void *priv), void *priv)
1669 {
1670 struct target_trace_callback *entry;
1671
1672 if (!callback)
1673 return ERROR_COMMAND_SYNTAX_ERROR;
1674
1675 entry = malloc(sizeof(struct target_trace_callback));
1676 if (!entry) {
1677 LOG_ERROR("error allocating buffer for trace callback entry");
1678 return ERROR_COMMAND_SYNTAX_ERROR;
1679 }
1680
1681 entry->callback = callback;
1682 entry->priv = priv;
1683 list_add(&entry->list, &target_trace_callback_list);
1684
1685
1686 return ERROR_OK;
1687 }
1688
1689 int target_register_timer_callback(int (*callback)(void *priv),
1690 unsigned int time_ms, enum target_timer_type type, void *priv)
1691 {
1692 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1693
1694 if (!callback)
1695 return ERROR_COMMAND_SYNTAX_ERROR;
1696
1697 if (*callbacks_p) {
1698 while ((*callbacks_p)->next)
1699 callbacks_p = &((*callbacks_p)->next);
1700 callbacks_p = &((*callbacks_p)->next);
1701 }
1702
1703 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1704 (*callbacks_p)->callback = callback;
1705 (*callbacks_p)->type = type;
1706 (*callbacks_p)->time_ms = time_ms;
1707 (*callbacks_p)->removed = false;
1708
1709 (*callbacks_p)->when = timeval_ms() + time_ms;
1710 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1711
1712 (*callbacks_p)->priv = priv;
1713 (*callbacks_p)->next = NULL;
1714
1715 return ERROR_OK;
1716 }
1717
1718 int target_unregister_event_callback(int (*callback)(struct target *target,
1719 enum target_event event, void *priv), void *priv)
1720 {
1721 struct target_event_callback **p = &target_event_callbacks;
1722 struct target_event_callback *c = target_event_callbacks;
1723
1724 if (!callback)
1725 return ERROR_COMMAND_SYNTAX_ERROR;
1726
1727 while (c) {
1728 struct target_event_callback *next = c->next;
1729 if ((c->callback == callback) && (c->priv == priv)) {
1730 *p = next;
1731 free(c);
1732 return ERROR_OK;
1733 } else
1734 p = &(c->next);
1735 c = next;
1736 }
1737
1738 return ERROR_OK;
1739 }
1740
1741 int target_unregister_reset_callback(int (*callback)(struct target *target,
1742 enum target_reset_mode reset_mode, void *priv), void *priv)
1743 {
1744 struct target_reset_callback *entry;
1745
1746 if (!callback)
1747 return ERROR_COMMAND_SYNTAX_ERROR;
1748
1749 list_for_each_entry(entry, &target_reset_callback_list, list) {
1750 if (entry->callback == callback && entry->priv == priv) {
1751 list_del(&entry->list);
1752 free(entry);
1753 break;
1754 }
1755 }
1756
1757 return ERROR_OK;
1758 }
1759
1760 int target_unregister_trace_callback(int (*callback)(struct target *target,
1761 size_t len, uint8_t *data, void *priv), void *priv)
1762 {
1763 struct target_trace_callback *entry;
1764
1765 if (!callback)
1766 return ERROR_COMMAND_SYNTAX_ERROR;
1767
1768 list_for_each_entry(entry, &target_trace_callback_list, list) {
1769 if (entry->callback == callback && entry->priv == priv) {
1770 list_del(&entry->list);
1771 free(entry);
1772 break;
1773 }
1774 }
1775
1776 return ERROR_OK;
1777 }
1778
1779 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1780 {
1781 if (!callback)
1782 return ERROR_COMMAND_SYNTAX_ERROR;
1783
1784 for (struct target_timer_callback *c = target_timer_callbacks;
1785 c; c = c->next) {
1786 if ((c->callback == callback) && (c->priv == priv)) {
1787 c->removed = true;
1788 return ERROR_OK;
1789 }
1790 }
1791
1792 return ERROR_FAIL;
1793 }
1794
1795 int target_call_event_callbacks(struct target *target, enum target_event event)
1796 {
1797 struct target_event_callback *callback = target_event_callbacks;
1798 struct target_event_callback *next_callback;
1799
1800 if (event == TARGET_EVENT_HALTED) {
1801 /* execute early halted first */
1802 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1803 }
1804
1805 LOG_DEBUG("target event %i (%s) for core %s", event,
1806 target_event_name(event),
1807 target_name(target));
1808
1809 target_handle_event(target, event);
1810
1811 while (callback) {
1812 next_callback = callback->next;
1813 callback->callback(target, event, callback->priv);
1814 callback = next_callback;
1815 }
1816
1817 return ERROR_OK;
1818 }
1819
1820 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1821 {
1822 struct target_reset_callback *callback;
1823
1824 LOG_DEBUG("target reset %i (%s)", reset_mode,
1825 nvp_value2name(nvp_reset_modes, reset_mode)->name);
1826
1827 list_for_each_entry(callback, &target_reset_callback_list, list)
1828 callback->callback(target, reset_mode, callback->priv);
1829
1830 return ERROR_OK;
1831 }
1832
1833 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1834 {
1835 struct target_trace_callback *callback;
1836
1837 list_for_each_entry(callback, &target_trace_callback_list, list)
1838 callback->callback(target, len, data, callback->priv);
1839
1840 return ERROR_OK;
1841 }
1842
1843 static int target_timer_callback_periodic_restart(
1844 struct target_timer_callback *cb, int64_t *now)
1845 {
1846 cb->when = *now + cb->time_ms;
1847 return ERROR_OK;
1848 }
1849
1850 static int target_call_timer_callback(struct target_timer_callback *cb,
1851 int64_t *now)
1852 {
1853 cb->callback(cb->priv);
1854
1855 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1856 return target_timer_callback_periodic_restart(cb, now);
1857
1858 return target_unregister_timer_callback(cb->callback, cb->priv);
1859 }
1860
1861 static int target_call_timer_callbacks_check_time(int checktime)
1862 {
1863 static bool callback_processing;
1864
1865 /* Do not allow nesting */
1866 if (callback_processing)
1867 return ERROR_OK;
1868
1869 callback_processing = true;
1870
1871 keep_alive();
1872
1873 int64_t now = timeval_ms();
1874
1875 /* Initialize to a default value that's a ways into the future.
1876 * The loop below will make it closer to now if there are
1877 * callbacks that want to be called sooner. */
1878 target_timer_next_event_value = now + 1000;
1879
1880 /* Store an address of the place containing a pointer to the
1881 * next item; initially, that's a standalone "root of the
1882 * list" variable. */
1883 struct target_timer_callback **callback = &target_timer_callbacks;
1884 while (callback && *callback) {
1885 if ((*callback)->removed) {
1886 struct target_timer_callback *p = *callback;
1887 *callback = (*callback)->next;
1888 free(p);
1889 continue;
1890 }
1891
1892 bool call_it = (*callback)->callback &&
1893 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1894 now >= (*callback)->when);
1895
1896 if (call_it)
1897 target_call_timer_callback(*callback, &now);
1898
1899 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1900 target_timer_next_event_value = (*callback)->when;
1901
1902 callback = &(*callback)->next;
1903 }
1904
1905 callback_processing = false;
1906 return ERROR_OK;
1907 }
1908
1909 int target_call_timer_callbacks(void)
1910 {
1911 return target_call_timer_callbacks_check_time(1);
1912 }
1913
1914 /* invoke periodic callbacks immediately */
1915 int target_call_timer_callbacks_now(void)
1916 {
1917 return target_call_timer_callbacks_check_time(0);
1918 }
1919
1920 int64_t target_timer_next_event(void)
1921 {
1922 return target_timer_next_event_value;
1923 }
1924
1925 /* Prints the working area layout for debug purposes */
1926 static void print_wa_layout(struct target *target)
1927 {
1928 struct working_area *c = target->working_areas;
1929
1930 while (c) {
1931 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1932 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1933 c->address, c->address + c->size - 1, c->size);
1934 c = c->next;
1935 }
1936 }
1937
1938 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1939 static void target_split_working_area(struct working_area *area, uint32_t size)
1940 {
1941 assert(area->free); /* Shouldn't split an allocated area */
1942 assert(size <= area->size); /* Caller should guarantee this */
1943
1944 /* Split only if not already the right size */
1945 if (size < area->size) {
1946 struct working_area *new_wa = malloc(sizeof(*new_wa));
1947
1948 if (!new_wa)
1949 return;
1950
1951 new_wa->next = area->next;
1952 new_wa->size = area->size - size;
1953 new_wa->address = area->address + size;
1954 new_wa->backup = NULL;
1955 new_wa->user = NULL;
1956 new_wa->free = true;
1957
1958 area->next = new_wa;
1959 area->size = size;
1960
1961 /* If backup memory was allocated to this area, it has the wrong size
1962 * now so free it and it will be reallocated if/when needed */
1963 free(area->backup);
1964 area->backup = NULL;
1965 }
1966 }
1967
1968 /* Merge all adjacent free areas into one */
1969 static void target_merge_working_areas(struct target *target)
1970 {
1971 struct working_area *c = target->working_areas;
1972
1973 while (c && c->next) {
1974 assert(c->next->address == c->address + c->size); /* This is an invariant */
1975
1976 /* Find two adjacent free areas */
1977 if (c->free && c->next->free) {
1978 /* Merge the last into the first */
1979 c->size += c->next->size;
1980
1981 /* Remove the last */
1982 struct working_area *to_be_freed = c->next;
1983 c->next = c->next->next;
1984 free(to_be_freed->backup);
1985 free(to_be_freed);
1986
1987 /* If backup memory was allocated to the remaining area, it's has
1988 * the wrong size now */
1989 free(c->backup);
1990 c->backup = NULL;
1991 } else {
1992 c = c->next;
1993 }
1994 }
1995 }
1996
1997 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
1998 {
1999 /* Reevaluate working area address based on MMU state*/
2000 if (!target->working_areas) {
2001 int retval;
2002 int enabled;
2003
2004 retval = target->type->mmu(target, &enabled);
2005 if (retval != ERROR_OK)
2006 return retval;
2007
2008 if (!enabled) {
2009 if (target->working_area_phys_spec) {
2010 LOG_DEBUG("MMU disabled, using physical "
2011 "address for working memory " TARGET_ADDR_FMT,
2012 target->working_area_phys);
2013 target->working_area = target->working_area_phys;
2014 } else {
2015 LOG_ERROR("No working memory available. "
2016 "Specify -work-area-phys to target.");
2017 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2018 }
2019 } else {
2020 if (target->working_area_virt_spec) {
2021 LOG_DEBUG("MMU enabled, using virtual "
2022 "address for working memory " TARGET_ADDR_FMT,
2023 target->working_area_virt);
2024 target->working_area = target->working_area_virt;
2025 } else {
2026 LOG_ERROR("No working memory available. "
2027 "Specify -work-area-virt to target.");
2028 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2029 }
2030 }
2031
2032 /* Set up initial working area on first call */
2033 struct working_area *new_wa = malloc(sizeof(*new_wa));
2034 if (new_wa) {
2035 new_wa->next = NULL;
2036 new_wa->size = ALIGN_DOWN(target->working_area_size, 4); /* 4-byte align */
2037 new_wa->address = target->working_area;
2038 new_wa->backup = NULL;
2039 new_wa->user = NULL;
2040 new_wa->free = true;
2041 }
2042
2043 target->working_areas = new_wa;
2044 }
2045
2046 /* only allocate multiples of 4 byte */
2047 size = ALIGN_UP(size, 4);
2048
2049 struct working_area *c = target->working_areas;
2050
2051 /* Find the first large enough working area */
2052 while (c) {
2053 if (c->free && c->size >= size)
2054 break;
2055 c = c->next;
2056 }
2057
2058 if (!c)
2059 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2060
2061 /* Split the working area into the requested size */
2062 target_split_working_area(c, size);
2063
2064 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2065 size, c->address);
2066
2067 if (target->backup_working_area) {
2068 if (!c->backup) {
2069 c->backup = malloc(c->size);
2070 if (!c->backup)
2071 return ERROR_FAIL;
2072 }
2073
2074 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2075 if (retval != ERROR_OK)
2076 return retval;
2077 }
2078
2079 /* mark as used, and return the new (reused) area */
2080 c->free = false;
2081 *area = c;
2082
2083 /* user pointer */
2084 c->user = area;
2085
2086 print_wa_layout(target);
2087
2088 return ERROR_OK;
2089 }
2090
2091 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2092 {
2093 int retval;
2094
2095 retval = target_alloc_working_area_try(target, size, area);
2096 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2097 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2098 return retval;
2099
2100 }
2101
2102 static int target_restore_working_area(struct target *target, struct working_area *area)
2103 {
2104 int retval = ERROR_OK;
2105
2106 if (target->backup_working_area && area->backup) {
2107 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2108 if (retval != ERROR_OK)
2109 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2110 area->size, area->address);
2111 }
2112
2113 return retval;
2114 }
2115
2116 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2117 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2118 {
2119 if (!area || area->free)
2120 return ERROR_OK;
2121
2122 int retval = ERROR_OK;
2123 if (restore) {
2124 retval = target_restore_working_area(target, area);
2125 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2126 if (retval != ERROR_OK)
2127 return retval;
2128 }
2129
2130 area->free = true;
2131
2132 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2133 area->size, area->address);
2134
2135 /* mark user pointer invalid */
2136 /* TODO: Is this really safe? It points to some previous caller's memory.
2137 * How could we know that the area pointer is still in that place and not
2138 * some other vital data? What's the purpose of this, anyway? */
2139 *area->user = NULL;
2140 area->user = NULL;
2141
2142 target_merge_working_areas(target);
2143
2144 print_wa_layout(target);
2145
2146 return retval;
2147 }
2148
2149 int target_free_working_area(struct target *target, struct working_area *area)
2150 {
2151 return target_free_working_area_restore(target, area, 1);
2152 }
2153
2154 /* free resources and restore memory, if restoring memory fails,
2155 * free up resources anyway
2156 */
2157 static void target_free_all_working_areas_restore(struct target *target, int restore)
2158 {
2159 struct working_area *c = target->working_areas;
2160
2161 LOG_DEBUG("freeing all working areas");
2162
2163 /* Loop through all areas, restoring the allocated ones and marking them as free */
2164 while (c) {
2165 if (!c->free) {
2166 if (restore)
2167 target_restore_working_area(target, c);
2168 c->free = true;
2169 *c->user = NULL; /* Same as above */
2170 c->user = NULL;
2171 }
2172 c = c->next;
2173 }
2174
2175 /* Run a merge pass to combine all areas into one */
2176 target_merge_working_areas(target);
2177
2178 print_wa_layout(target);
2179 }
2180
2181 void target_free_all_working_areas(struct target *target)
2182 {
2183 target_free_all_working_areas_restore(target, 1);
2184
2185 /* Now we have none or only one working area marked as free */
2186 if (target->working_areas) {
2187 /* Free the last one to allow on-the-fly moving and resizing */
2188 free(target->working_areas->backup);
2189 free(target->working_areas);
2190 target->working_areas = NULL;
2191 }
2192 }
2193
2194 /* Find the largest number of bytes that can be allocated */
2195 uint32_t target_get_working_area_avail(struct target *target)
2196 {
2197 struct working_area *c = target->working_areas;
2198 uint32_t max_size = 0;
2199
2200 if (!c)
2201 return ALIGN_DOWN(target->working_area_size, 4);
2202
2203 while (c) {
2204 if (c->free && max_size < c->size)
2205 max_size = c->size;
2206
2207 c = c->next;
2208 }
2209
2210 return max_size;
2211 }
2212
2213 static void target_destroy(struct target *target)
2214 {
2215 if (target->type->deinit_target)
2216 target->type->deinit_target(target);
2217
2218 if (target->semihosting)
2219 free(target->semihosting->basedir);
2220 free(target->semihosting);
2221
2222 jtag_unregister_event_callback(jtag_enable_callback, target);
2223
2224 struct target_event_action *teap = target->event_action;
2225 while (teap) {
2226 struct target_event_action *next = teap->next;
2227 Jim_DecrRefCount(teap->interp, teap->body);
2228 free(teap);
2229 teap = next;
2230 }
2231
2232 target_free_all_working_areas(target);
2233
2234 /* release the targets SMP list */
2235 if (target->smp) {
2236 struct target_list *head, *tmp;
2237
2238 list_for_each_entry_safe(head, tmp, target->smp_targets, lh) {
2239 list_del(&head->lh);
2240 head->target->smp = 0;
2241 free(head);
2242 }
2243 if (target->smp_targets != &empty_smp_targets)
2244 free(target->smp_targets);
2245 target->smp = 0;
2246 }
2247
2248 rtos_destroy(target);
2249
2250 free(target->gdb_port_override);
2251 free(target->type);
2252 free(target->trace_info);
2253 free(target->fileio_info);
2254 free(target->cmd_name);
2255 free(target);
2256 }
2257
2258 void target_quit(void)
2259 {
2260 struct target_event_callback *pe = target_event_callbacks;
2261 while (pe) {
2262 struct target_event_callback *t = pe->next;
2263 free(pe);
2264 pe = t;
2265 }
2266 target_event_callbacks = NULL;
2267
2268 struct target_timer_callback *pt = target_timer_callbacks;
2269 while (pt) {
2270 struct target_timer_callback *t = pt->next;
2271 free(pt);
2272 pt = t;
2273 }
2274 target_timer_callbacks = NULL;
2275
2276 for (struct target *target = all_targets; target;) {
2277 struct target *tmp;
2278
2279 tmp = target->next;
2280 target_destroy(target);
2281 target = tmp;
2282 }
2283
2284 all_targets = NULL;
2285 }
2286
2287 int target_arch_state(struct target *target)
2288 {
2289 int retval;
2290 if (!target) {
2291 LOG_WARNING("No target has been configured");
2292 return ERROR_OK;
2293 }
2294
2295 if (target->state != TARGET_HALTED)
2296 return ERROR_OK;
2297
2298 retval = target->type->arch_state(target);
2299 return retval;
2300 }
2301
2302 static int target_get_gdb_fileio_info_default(struct target *target,
2303 struct gdb_fileio_info *fileio_info)
2304 {
2305 /* If target does not support semi-hosting function, target
2306 has no need to provide .get_gdb_fileio_info callback.
2307 It just return ERROR_FAIL and gdb_server will return "Txx"
2308 as target halted every time. */
2309 return ERROR_FAIL;
2310 }
2311
2312 static int target_gdb_fileio_end_default(struct target *target,
2313 int retcode, int fileio_errno, bool ctrl_c)
2314 {
2315 return ERROR_OK;
2316 }
2317
2318 int target_profiling_default(struct target *target, uint32_t *samples,
2319 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2320 {
2321 struct timeval timeout, now;
2322
2323 gettimeofday(&timeout, NULL);
2324 timeval_add_time(&timeout, seconds, 0);
2325
2326 LOG_INFO("Starting profiling. Halting and resuming the"
2327 " target as often as we can...");
2328
2329 uint32_t sample_count = 0;
2330 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2331 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2332
2333 int retval = ERROR_OK;
2334 for (;;) {
2335 target_poll(target);
2336 if (target->state == TARGET_HALTED) {
2337 uint32_t t = buf_get_u32(reg->value, 0, 32);
2338 samples[sample_count++] = t;
2339 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2340 retval = target_resume(target, 1, 0, 0, 0);
2341 target_poll(target);
2342 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2343 } else if (target->state == TARGET_RUNNING) {
2344 /* We want to quickly sample the PC. */
2345 retval = target_halt(target);
2346 } else {
2347 LOG_INFO("Target not halted or running");
2348 retval = ERROR_OK;
2349 break;
2350 }
2351
2352 if (retval != ERROR_OK)
2353 break;
2354
2355 gettimeofday(&now, NULL);
2356 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2357 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2358 break;
2359 }
2360 }
2361
2362 *num_samples = sample_count;
2363 return retval;
2364 }
2365
2366 /* Single aligned words are guaranteed to use 16 or 32 bit access
2367 * mode respectively, otherwise data is handled as quickly as
2368 * possible
2369 */
2370 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2371 {
2372 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2373 size, address);
2374
2375 if (!target_was_examined(target)) {
2376 LOG_ERROR("Target not examined yet");
2377 return ERROR_FAIL;
2378 }
2379
2380 if (size == 0)
2381 return ERROR_OK;
2382
2383 if ((address + size - 1) < address) {
2384 /* GDB can request this when e.g. PC is 0xfffffffc */
2385 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2386 address,
2387 size);
2388 return ERROR_FAIL;
2389 }
2390
2391 return target->type->write_buffer(target, address, size, buffer);
2392 }
2393
2394 static int target_write_buffer_default(struct target *target,
2395 target_addr_t address, uint32_t count, const uint8_t *buffer)
2396 {
2397 uint32_t size;
2398 unsigned int data_bytes = target_data_bits(target) / 8;
2399
2400 /* Align up to maximum bytes. The loop condition makes sure the next pass
2401 * will have something to do with the size we leave to it. */
2402 for (size = 1;
2403 size < data_bytes && count >= size * 2 + (address & size);
2404 size *= 2) {
2405 if (address & size) {
2406 int retval = target_write_memory(target, address, size, 1, buffer);
2407 if (retval != ERROR_OK)
2408 return retval;
2409 address += size;
2410 count -= size;
2411 buffer += size;
2412 }
2413 }
2414
2415 /* Write the data with as large access size as possible. */
2416 for (; size > 0; size /= 2) {
2417 uint32_t aligned = count - count % size;
2418 if (aligned > 0) {
2419 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2420 if (retval != ERROR_OK)
2421 return retval;
2422 address += aligned;
2423 count -= aligned;
2424 buffer += aligned;
2425 }
2426 }
2427
2428 return ERROR_OK;
2429 }
2430
2431 /* Single aligned words are guaranteed to use 16 or 32 bit access
2432 * mode respectively, otherwise data is handled as quickly as
2433 * possible
2434 */
2435 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2436 {
2437 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2438 size, address);
2439
2440 if (!target_was_examined(target)) {
2441 LOG_ERROR("Target not examined yet");
2442 return ERROR_FAIL;
2443 }
2444
2445 if (size == 0)
2446 return ERROR_OK;
2447
2448 if ((address + size - 1) < address) {
2449 /* GDB can request this when e.g. PC is 0xfffffffc */
2450 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2451 address,
2452 size);
2453 return ERROR_FAIL;
2454 }
2455
2456 return target->type->read_buffer(target, address, size, buffer);
2457 }
2458
2459 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2460 {
2461 uint32_t size;
2462 unsigned int data_bytes = target_data_bits(target) / 8;
2463
2464 /* Align up to maximum bytes. The loop condition makes sure the next pass
2465 * will have something to do with the size we leave to it. */
2466 for (size = 1;
2467 size < data_bytes && count >= size * 2 + (address & size);
2468 size *= 2) {
2469 if (address & size) {
2470 int retval = target_read_memory(target, address, size, 1, buffer);
2471 if (retval != ERROR_OK)
2472 return retval;
2473 address += size;
2474 count -= size;
2475 buffer += size;
2476 }
2477 }
2478
2479 /* Read the data with as large access size as possible. */
2480 for (; size > 0; size /= 2) {
2481 uint32_t aligned = count - count % size;
2482 if (aligned > 0) {
2483 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2484 if (retval != ERROR_OK)
2485 return retval;
2486 address += aligned;
2487 count -= aligned;
2488 buffer += aligned;
2489 }
2490 }
2491
2492 return ERROR_OK;
2493 }
2494
2495 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2496 {
2497 uint8_t *buffer;
2498 int retval;
2499 uint32_t i;
2500 uint32_t checksum = 0;
2501 if (!target_was_examined(target)) {
2502 LOG_ERROR("Target not examined yet");
2503 return ERROR_FAIL;
2504 }
2505 if (!target->type->checksum_memory) {
2506 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2507 return ERROR_FAIL;
2508 }
2509
2510 retval = target->type->checksum_memory(target, address, size, &checksum);
2511 if (retval != ERROR_OK) {
2512 buffer = malloc(size);
2513 if (!buffer) {
2514 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2515 return ERROR_COMMAND_SYNTAX_ERROR;
2516 }
2517 retval = target_read_buffer(target, address, size, buffer);
2518 if (retval != ERROR_OK) {
2519 free(buffer);
2520 return retval;
2521 }
2522
2523 /* convert to target endianness */
2524 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2525 uint32_t target_data;
2526 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2527 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2528 }
2529
2530 retval = image_calculate_checksum(buffer, size, &checksum);
2531 free(buffer);
2532 }
2533
2534 *crc = checksum;
2535
2536 return retval;
2537 }
2538
2539 int target_blank_check_memory(struct target *target,
2540 struct target_memory_check_block *blocks, int num_blocks,
2541 uint8_t erased_value)
2542 {
2543 if (!target_was_examined(target)) {
2544 LOG_ERROR("Target not examined yet");
2545 return ERROR_FAIL;
2546 }
2547
2548 if (!target->type->blank_check_memory)
2549 return ERROR_NOT_IMPLEMENTED;
2550
2551 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2552 }
2553
2554 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2555 {
2556 uint8_t value_buf[8];
2557 if (!target_was_examined(target)) {
2558 LOG_ERROR("Target not examined yet");
2559 return ERROR_FAIL;
2560 }
2561
2562 int retval = target_read_memory(target, address, 8, 1, value_buf);
2563
2564 if (retval == ERROR_OK) {
2565 *value = target_buffer_get_u64(target, value_buf);
2566 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2567 address,
2568 *value);
2569 } else {
2570 *value = 0x0;
2571 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2572 address);
2573 }
2574
2575 return retval;
2576 }
2577
2578 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2579 {
2580 uint8_t value_buf[4];
2581 if (!target_was_examined(target)) {
2582 LOG_ERROR("Target not examined yet");
2583 return ERROR_FAIL;
2584 }
2585
2586 int retval = target_read_memory(target, address, 4, 1, value_buf);
2587
2588 if (retval == ERROR_OK) {
2589 *value = target_buffer_get_u32(target, value_buf);
2590 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2591 address,
2592 *value);
2593 } else {
2594 *value = 0x0;
2595 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2596 address);
2597 }
2598
2599 return retval;
2600 }
2601
2602 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2603 {
2604 uint8_t value_buf[2];
2605 if (!target_was_examined(target)) {
2606 LOG_ERROR("Target not examined yet");
2607 return ERROR_FAIL;
2608 }
2609
2610 int retval = target_read_memory(target, address, 2, 1, value_buf);
2611
2612 if (retval == ERROR_OK) {
2613 *value = target_buffer_get_u16(target, value_buf);
2614 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2615 address,
2616 *value);
2617 } else {
2618 *value = 0x0;
2619 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2620 address);
2621 }
2622
2623 return retval;
2624 }
2625
2626 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2627 {
2628 if (!target_was_examined(target)) {
2629 LOG_ERROR("Target not examined yet");
2630 return ERROR_FAIL;
2631 }
2632
2633 int retval = target_read_memory(target, address, 1, 1, value);
2634
2635 if (retval == ERROR_OK) {
2636 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2637 address,
2638 *value);
2639 } else {
2640 *value = 0x0;
2641 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2642 address);
2643 }
2644
2645 return retval;
2646 }
2647
2648 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2649 {
2650 int retval;
2651 uint8_t value_buf[8];
2652 if (!target_was_examined(target)) {
2653 LOG_ERROR("Target not examined yet");
2654 return ERROR_FAIL;
2655 }
2656
2657 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2658 address,
2659 value);
2660
2661 target_buffer_set_u64(target, value_buf, value);
2662 retval = target_write_memory(target, address, 8, 1, value_buf);
2663 if (retval != ERROR_OK)
2664 LOG_DEBUG("failed: %i", retval);
2665
2666 return retval;
2667 }
2668
2669 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2670 {
2671 int retval;
2672 uint8_t value_buf[4];
2673 if (!target_was_examined(target)) {
2674 LOG_ERROR("Target not examined yet");
2675 return ERROR_FAIL;
2676 }
2677
2678 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2679 address,
2680 value);
2681
2682 target_buffer_set_u32(target, value_buf, value);
2683 retval = target_write_memory(target, address, 4, 1, value_buf);
2684 if (retval != ERROR_OK)
2685 LOG_DEBUG("failed: %i", retval);
2686
2687 return retval;
2688 }
2689
2690 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2691 {
2692 int retval;
2693 uint8_t value_buf[2];
2694 if (!target_was_examined(target)) {
2695 LOG_ERROR("Target not examined yet");
2696 return ERROR_FAIL;
2697 }
2698
2699 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2700 address,
2701 value);
2702
2703 target_buffer_set_u16(target, value_buf, value);
2704 retval = target_write_memory(target, address, 2, 1, value_buf);
2705 if (retval != ERROR_OK)
2706 LOG_DEBUG("failed: %i", retval);
2707
2708 return retval;
2709 }
2710
2711 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2712 {
2713 int retval;
2714 if (!target_was_examined(target)) {
2715 LOG_ERROR("Target not examined yet");
2716 return ERROR_FAIL;
2717 }
2718
2719 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2720 address, value);
2721
2722 retval = target_write_memory(target, address, 1, 1, &value);
2723 if (retval != ERROR_OK)
2724 LOG_DEBUG("failed: %i", retval);
2725
2726 return retval;
2727 }
2728
2729 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2730 {
2731 int retval;
2732 uint8_t value_buf[8];
2733 if (!target_was_examined(target)) {
2734 LOG_ERROR("Target not examined yet");
2735 return ERROR_FAIL;
2736 }
2737
2738 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2739 address,
2740 value);
2741
2742 target_buffer_set_u64(target, value_buf, value);
2743 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2744 if (retval != ERROR_OK)
2745 LOG_DEBUG("failed: %i", retval);
2746
2747 return retval;
2748 }
2749
2750 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2751 {
2752 int retval;
2753 uint8_t value_buf[4];
2754 if (!target_was_examined(target)) {
2755 LOG_ERROR("Target not examined yet");
2756 return ERROR_FAIL;
2757 }
2758
2759 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2760 address,
2761 value);
2762
2763 target_buffer_set_u32(target, value_buf, value);
2764 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2765 if (retval != ERROR_OK)
2766 LOG_DEBUG("failed: %i", retval);
2767
2768 return retval;
2769 }
2770
2771 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2772 {
2773 int retval;
2774 uint8_t value_buf[2];
2775 if (!target_was_examined(target)) {
2776 LOG_ERROR("Target not examined yet");
2777 return ERROR_FAIL;
2778 }
2779
2780 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2781 address,
2782 value);
2783
2784 target_buffer_set_u16(target, value_buf, value);
2785 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2786 if (retval != ERROR_OK)
2787 LOG_DEBUG("failed: %i", retval);
2788
2789 return retval;
2790 }
2791
2792 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2793 {
2794 int retval;
2795 if (!target_was_examined(target)) {
2796 LOG_ERROR("Target not examined yet");
2797 return ERROR_FAIL;
2798 }
2799
2800 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2801 address, value);
2802
2803 retval = target_write_phys_memory(target, address, 1, 1, &value);
2804 if (retval != ERROR_OK)
2805 LOG_DEBUG("failed: %i", retval);
2806
2807 return retval;
2808 }
2809
2810 static int find_target(struct command_invocation *cmd, const char *name)
2811 {
2812 struct target *target = get_target(name);
2813 if (!target) {
2814 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2815 return ERROR_FAIL;
2816 }
2817 if (!target->tap->enabled) {
2818 command_print(cmd, "Target: TAP %s is disabled, "
2819 "can't be the current target\n",
2820 target->tap->dotted_name);
2821 return ERROR_FAIL;
2822 }
2823
2824 cmd->ctx->current_target = target;
2825 if (cmd->ctx->current_target_override)
2826 cmd->ctx->current_target_override = target;
2827
2828 return ERROR_OK;
2829 }
2830
2831
2832 COMMAND_HANDLER(handle_targets_command)
2833 {
2834 int retval = ERROR_OK;
2835 if (CMD_ARGC == 1) {
2836 retval = find_target(CMD, CMD_ARGV[0]);
2837 if (retval == ERROR_OK) {
2838 /* we're done! */
2839 return retval;
2840 }
2841 }
2842
2843 struct target *target = all_targets;
2844 command_print(CMD, " TargetName Type Endian TapName State ");
2845 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2846 while (target) {
2847 const char *state;
2848 char marker = ' ';
2849
2850 if (target->tap->enabled)
2851 state = target_state_name(target);
2852 else
2853 state = "tap-disabled";
2854
2855 if (CMD_CTX->current_target == target)
2856 marker = '*';
2857
2858 /* keep columns lined up to match the headers above */
2859 command_print(CMD,
2860 "%2d%c %-18s %-10s %-6s %-18s %s",
2861 target->target_number,
2862 marker,
2863 target_name(target),
2864 target_type_name(target),
2865 jim_nvp_value2name_simple(nvp_target_endian,
2866 target->endianness)->name,
2867 target->tap->dotted_name,
2868 state);
2869 target = target->next;
2870 }
2871
2872 return retval;
2873 }
2874
2875 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2876
2877 static int power_dropout;
2878 static int srst_asserted;
2879
2880 static int run_power_restore;
2881 static int run_power_dropout;
2882 static int run_srst_asserted;
2883 static int run_srst_deasserted;
2884
2885 static int sense_handler(void)
2886 {
2887 static int prev_srst_asserted;
2888 static int prev_power_dropout;
2889
2890 int retval = jtag_power_dropout(&power_dropout);
2891 if (retval != ERROR_OK)
2892 return retval;
2893
2894 int power_restored;
2895 power_restored = prev_power_dropout && !power_dropout;
2896 if (power_restored)
2897 run_power_restore = 1;
2898
2899 int64_t current = timeval_ms();
2900 static int64_t last_power;
2901 bool wait_more = last_power + 2000 > current;
2902 if (power_dropout && !wait_more) {
2903 run_power_dropout = 1;
2904 last_power = current;
2905 }
2906
2907 retval = jtag_srst_asserted(&srst_asserted);
2908 if (retval != ERROR_OK)
2909 return retval;
2910
2911 int srst_deasserted;
2912 srst_deasserted = prev_srst_asserted && !srst_asserted;
2913
2914 static int64_t last_srst;
2915 wait_more = last_srst + 2000 > current;
2916 if (srst_deasserted && !wait_more) {
2917 run_srst_deasserted = 1;
2918 last_srst = current;
2919 }
2920
2921 if (!prev_srst_asserted && srst_asserted)
2922 run_srst_asserted = 1;
2923
2924 prev_srst_asserted = srst_asserted;
2925 prev_power_dropout = power_dropout;
2926
2927 if (srst_deasserted || power_restored) {
2928 /* Other than logging the event we can't do anything here.
2929 * Issuing a reset is a particularly bad idea as we might
2930 * be inside a reset already.
2931 */
2932 }
2933
2934 return ERROR_OK;
2935 }
2936
2937 /* process target state changes */
2938 static int handle_target(void *priv)
2939 {
2940 Jim_Interp *interp = (Jim_Interp *)priv;
2941 int retval = ERROR_OK;
2942
2943 if (!is_jtag_poll_safe()) {
2944 /* polling is disabled currently */
2945 return ERROR_OK;
2946 }
2947
2948 /* we do not want to recurse here... */
2949 static int recursive;
2950 if (!recursive) {
2951 recursive = 1;
2952 sense_handler();
2953 /* danger! running these procedures can trigger srst assertions and power dropouts.
2954 * We need to avoid an infinite loop/recursion here and we do that by
2955 * clearing the flags after running these events.
2956 */
2957 int did_something = 0;
2958 if (run_srst_asserted) {
2959 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2960 Jim_Eval(interp, "srst_asserted");
2961 did_something = 1;
2962 }
2963 if (run_srst_deasserted) {
2964 Jim_Eval(interp, "srst_deasserted");
2965 did_something = 1;
2966 }
2967 if (run_power_dropout) {
2968 LOG_INFO("Power dropout detected, running power_dropout proc.");
2969 Jim_Eval(interp, "power_dropout");
2970 did_something = 1;
2971 }
2972 if (run_power_restore) {
2973 Jim_Eval(interp, "power_restore");
2974 did_something = 1;
2975 }
2976
2977 if (did_something) {
2978 /* clear detect flags */
2979 sense_handler();
2980 }
2981
2982 /* clear action flags */
2983
2984 run_srst_asserted = 0;
2985 run_srst_deasserted = 0;
2986 run_power_restore = 0;
2987 run_power_dropout = 0;
2988
2989 recursive = 0;
2990 }
2991
2992 /* Poll targets for state changes unless that's globally disabled.
2993 * Skip targets that are currently disabled.
2994 */
2995 for (struct target *target = all_targets;
2996 is_jtag_poll_safe() && target;
2997 target = target->next) {
2998
2999 if (!target_was_examined(target))
3000 continue;
3001
3002 if (!target->tap->enabled)
3003 continue;
3004
3005 if (target->backoff.times > target->backoff.count) {
3006 /* do not poll this time as we failed previously */
3007 target->backoff.count++;
3008 continue;
3009 }
3010 target->backoff.count = 0;
3011
3012 /* only poll target if we've got power and srst isn't asserted */
3013 if (!power_dropout && !srst_asserted) {
3014 /* polling may fail silently until the target has been examined */
3015 retval = target_poll(target);
3016 if (retval != ERROR_OK) {
3017 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3018 if (target->backoff.times * polling_interval < 5000) {
3019 target->backoff.times *= 2;
3020 target->backoff.times++;
3021 }
3022
3023 /* Tell GDB to halt the debugger. This allows the user to
3024 * run monitor commands to handle the situation.
3025 */
3026 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3027 }
3028 if (target->backoff.times > 0) {
3029 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3030 target_reset_examined(target);
3031 retval = target_examine_one(target);
3032 /* Target examination could have failed due to unstable connection,
3033 * but we set the examined flag anyway to repoll it later */
3034 if (retval != ERROR_OK) {
3035 target_set_examined(target);
3036 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3037 target->backoff.times * polling_interval);
3038 return retval;
3039 }
3040 }
3041
3042 /* Since we succeeded, we reset backoff count */
3043 target->backoff.times = 0;
3044 }
3045 }
3046
3047 return retval;
3048 }
3049
3050 COMMAND_HANDLER(handle_reg_command)
3051 {
3052 LOG_DEBUG("-");
3053
3054 struct target *target = get_current_target(CMD_CTX);
3055 struct reg *reg = NULL;
3056
3057 /* list all available registers for the current target */
3058 if (CMD_ARGC == 0) {
3059 struct reg_cache *cache = target->reg_cache;
3060
3061 unsigned int count = 0;
3062 while (cache) {
3063 unsigned i;
3064
3065 command_print(CMD, "===== %s", cache->name);
3066
3067 for (i = 0, reg = cache->reg_list;
3068 i < cache->num_regs;
3069 i++, reg++, count++) {
3070 if (reg->exist == false || reg->hidden)
3071 continue;
3072 /* only print cached values if they are valid */
3073 if (reg->valid) {
3074 char *value = buf_to_hex_str(reg->value,
3075 reg->size);
3076 command_print(CMD,
3077 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3078 count, reg->name,
3079 reg->size, value,
3080 reg->dirty
3081 ? " (dirty)"
3082 : "");
3083 free(value);
3084 } else {
3085 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3086 count, reg->name,
3087 reg->size);
3088 }
3089 }
3090 cache = cache->next;
3091 }
3092
3093 return ERROR_OK;
3094 }
3095
3096 /* access a single register by its ordinal number */
3097 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3098 unsigned num;
3099 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3100
3101 struct reg_cache *cache = target->reg_cache;
3102 unsigned int count = 0;
3103 while (cache) {
3104 unsigned i;
3105 for (i = 0; i < cache->num_regs; i++) {
3106 if (count++ == num) {
3107 reg = &cache->reg_list[i];
3108 break;
3109 }
3110 }
3111 if (reg)
3112 break;
3113 cache = cache->next;
3114 }
3115
3116 if (!reg) {
3117 command_print(CMD, "%i is out of bounds, the current target "
3118 "has only %i registers (0 - %i)", num, count, count - 1);
3119 return ERROR_OK;
3120 }
3121 } else {
3122 /* access a single register by its name */
3123 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3124
3125 if (!reg)
3126 goto not_found;
3127 }
3128
3129 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3130
3131 if (!reg->exist)
3132 goto not_found;
3133
3134 /* display a register */
3135 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3136 && (CMD_ARGV[1][0] <= '9')))) {
3137 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3138 reg->valid = 0;
3139
3140 if (reg->valid == 0) {
3141 int retval = reg->type->get(reg);
3142 if (retval != ERROR_OK) {
3143 LOG_ERROR("Could not read register '%s'", reg->name);
3144 return retval;
3145 }
3146 }
3147 char *value = buf_to_hex_str(reg->value, reg->size);
3148 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3149 free(value);
3150 return ERROR_OK;
3151 }
3152
3153 /* set register value */
3154 if (CMD_ARGC == 2) {
3155 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3156 if (!buf)
3157 return ERROR_FAIL;
3158 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3159
3160 int retval = reg->type->set(reg, buf);
3161 if (retval != ERROR_OK) {
3162 LOG_ERROR("Could not write to register '%s'", reg->name);
3163 } else {
3164 char *value = buf_to_hex_str(reg->value, reg->size);
3165 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3166 free(value);
3167 }
3168
3169 free(buf);
3170
3171 return retval;
3172 }
3173
3174 return ERROR_COMMAND_SYNTAX_ERROR;
3175
3176 not_found:
3177 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3178 return ERROR_OK;
3179 }
3180
3181 COMMAND_HANDLER(handle_poll_command)
3182 {
3183 int retval = ERROR_OK;
3184 struct target *target = get_current_target(CMD_CTX);
3185
3186 if (CMD_ARGC == 0) {
3187 command_print(CMD, "background polling: %s",
3188 jtag_poll_get_enabled() ? "on" : "off");
3189 command_print(CMD, "TAP: %s (%s)",
3190 target->tap->dotted_name,
3191 target->tap->enabled ? "enabled" : "disabled");
3192 if (!target->tap->enabled)
3193 return ERROR_OK;
3194 retval = target_poll(target);
3195 if (retval != ERROR_OK)
3196 return retval;
3197 retval = target_arch_state(target);
3198 if (retval != ERROR_OK)
3199 return retval;
3200 } else if (CMD_ARGC == 1) {
3201 bool enable;
3202 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3203 jtag_poll_set_enabled(enable);
3204 } else
3205 return ERROR_COMMAND_SYNTAX_ERROR;
3206
3207 return retval;
3208 }
3209
3210 COMMAND_HANDLER(handle_wait_halt_command)
3211 {
3212 if (CMD_ARGC > 1)
3213 return ERROR_COMMAND_SYNTAX_ERROR;
3214
3215 unsigned ms = DEFAULT_HALT_TIMEOUT;
3216 if (1 == CMD_ARGC) {
3217 int retval = parse_uint(CMD_ARGV[0], &ms);
3218 if (retval != ERROR_OK)
3219 return ERROR_COMMAND_SYNTAX_ERROR;
3220 }
3221
3222 struct target *target = get_current_target(CMD_CTX);
3223 return target_wait_state(target, TARGET_HALTED, ms);
3224 }
3225
3226 /* wait for target state to change. The trick here is to have a low
3227 * latency for short waits and not to suck up all the CPU time
3228 * on longer waits.
3229 *
3230 * After 500ms, keep_alive() is invoked
3231 */
3232 int target_wait_state(struct target *target, enum target_state state, unsigned int ms)
3233 {
3234 int retval;
3235 int64_t then = 0, cur;
3236 bool once = true;
3237
3238 for (;;) {
3239 retval = target_poll(target);
3240 if (retval != ERROR_OK)
3241 return retval;
3242 if (target->state == state)
3243 break;
3244 cur = timeval_ms();
3245 if (once) {
3246 once = false;
3247 then = timeval_ms();
3248 LOG_DEBUG("waiting for target %s...",
3249 nvp_value2name(nvp_target_state, state)->name);
3250 }
3251
3252 if (cur-then > 500)
3253 keep_alive();
3254
3255 if ((cur-then) > ms) {
3256 LOG_ERROR("timed out while waiting for target %s",
3257 nvp_value2name(nvp_target_state, state)->name);
3258 return ERROR_FAIL;
3259 }
3260 }
3261
3262 return ERROR_OK;
3263 }
3264
3265 COMMAND_HANDLER(handle_halt_command)
3266 {
3267 LOG_DEBUG("-");
3268
3269 struct target *target = get_current_target(CMD_CTX);
3270
3271 target->verbose_halt_msg = true;
3272
3273 int retval = target_halt(target);
3274 if (retval != ERROR_OK)
3275 return retval;
3276
3277 if (CMD_ARGC == 1) {
3278 unsigned wait_local;
3279 retval = parse_uint(CMD_ARGV[0], &wait_local);
3280 if (retval != ERROR_OK)
3281 return ERROR_COMMAND_SYNTAX_ERROR;
3282 if (!wait_local)
3283 return ERROR_OK;
3284 }
3285
3286 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3287 }
3288
3289 COMMAND_HANDLER(handle_soft_reset_halt_command)
3290 {
3291 struct target *target = get_current_target(CMD_CTX);
3292
3293 LOG_TARGET_INFO(target, "requesting target halt and executing a soft reset");
3294
3295 target_soft_reset_halt(target);
3296
3297 return ERROR_OK;
3298 }
3299
3300 COMMAND_HANDLER(handle_reset_command)
3301 {
3302 if (CMD_ARGC > 1)
3303 return ERROR_COMMAND_SYNTAX_ERROR;
3304
3305 enum target_reset_mode reset_mode = RESET_RUN;
3306 if (CMD_ARGC == 1) {
3307 const struct nvp *n;
3308 n = nvp_name2value(nvp_reset_modes, CMD_ARGV[0]);
3309 if ((!n->name) || (n->value == RESET_UNKNOWN))
3310 return ERROR_COMMAND_SYNTAX_ERROR;
3311 reset_mode = n->value;
3312 }
3313
3314 /* reset *all* targets */
3315 return target_process_reset(CMD, reset_mode);
3316 }
3317
3318
3319 COMMAND_HANDLER(handle_resume_command)
3320 {
3321 int current = 1;
3322 if (CMD_ARGC > 1)
3323 return ERROR_COMMAND_SYNTAX_ERROR;
3324
3325 struct target *target = get_current_target(CMD_CTX);
3326
3327 /* with no CMD_ARGV, resume from current pc, addr = 0,
3328 * with one arguments, addr = CMD_ARGV[0],
3329 * handle breakpoints, not debugging */
3330 target_addr_t addr = 0;
3331 if (CMD_ARGC == 1) {
3332 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3333 current = 0;
3334 }
3335
3336 return target_resume(target, current, addr, 1, 0);
3337 }
3338
3339 COMMAND_HANDLER(handle_step_command)
3340 {
3341 if (CMD_ARGC > 1)
3342 return ERROR_COMMAND_SYNTAX_ERROR;
3343
3344 LOG_DEBUG("-");
3345
3346 /* with no CMD_ARGV, step from current pc, addr = 0,
3347 * with one argument addr = CMD_ARGV[0],
3348 * handle breakpoints, debugging */
3349 target_addr_t addr = 0;
3350 int current_pc = 1;
3351 if (CMD_ARGC == 1) {
3352 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3353 current_pc = 0;
3354 }
3355
3356 struct target *target = get_current_target(CMD_CTX);
3357
3358 return target_step(target, current_pc, addr, 1);
3359 }
3360
3361 void target_handle_md_output(struct command_invocation *cmd,
3362 struct target *target, target_addr_t address, unsigned size,
3363 unsigned count, const uint8_t *buffer)
3364 {
3365 const unsigned line_bytecnt = 32;
3366 unsigned line_modulo = line_bytecnt / size;
3367
3368 char output[line_bytecnt * 4 + 1];
3369 unsigned output_len = 0;
3370
3371 const char *value_fmt;
3372 switch (size) {
3373 case 8:
3374 value_fmt = "%16.16"PRIx64" ";
3375 break;
3376 case 4:
3377 value_fmt = "%8.8"PRIx64" ";
3378 break;
3379 case 2:
3380 value_fmt = "%4.4"PRIx64" ";
3381 break;
3382 case 1:
3383 value_fmt = "%2.2"PRIx64" ";
3384 break;
3385 default:
3386 /* "can't happen", caller checked */
3387 LOG_ERROR("invalid memory read size: %u", size);
3388 return;
3389 }
3390
3391 for (unsigned i = 0; i < count; i++) {
3392 if (i % line_modulo == 0) {
3393 output_len += snprintf(output + output_len,
3394 sizeof(output) - output_len,
3395 TARGET_ADDR_FMT ": ",
3396 (address + (i * size)));
3397 }
3398
3399 uint64_t value = 0;
3400 const uint8_t *value_ptr = buffer + i * size;
3401 switch (size) {
3402 case 8:
3403 value = target_buffer_get_u64(target, value_ptr);
3404 break;
3405 case 4:
3406 value = target_buffer_get_u32(target, value_ptr);
3407 break;
3408 case 2:
3409 value = target_buffer_get_u16(target, value_ptr);
3410 break;
3411 case 1:
3412 value = *value_ptr;
3413 }
3414 output_len += snprintf(output + output_len,
3415 sizeof(output) - output_len,
3416 value_fmt, value);
3417
3418 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3419 command_print(cmd, "%s", output);
3420 output_len = 0;
3421 }
3422 }
3423 }
3424
3425 COMMAND_HANDLER(handle_md_command)
3426 {
3427 if (CMD_ARGC < 1)
3428 return ERROR_COMMAND_SYNTAX_ERROR;
3429
3430 unsigned size = 0;
3431 switch (CMD_NAME[2]) {
3432 case 'd':
3433 size = 8;
3434 break;
3435 case 'w':
3436 size = 4;
3437 break;
3438 case 'h':
3439 size = 2;
3440 break;
3441 case 'b':
3442 size = 1;
3443 break;
3444 default:
3445 return ERROR_COMMAND_SYNTAX_ERROR;
3446 }
3447
3448 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3449 int (*fn)(struct target *target,
3450 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3451 if (physical) {
3452 CMD_ARGC--;
3453 CMD_ARGV++;
3454 fn = target_read_phys_memory;
3455 } else
3456 fn = target_read_memory;
3457 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3458 return ERROR_COMMAND_SYNTAX_ERROR;
3459
3460 target_addr_t address;
3461 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3462
3463 unsigned count = 1;
3464 if (CMD_ARGC == 2)
3465 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3466
3467 uint8_t *buffer = calloc(count, size);
3468 if (!buffer) {
3469 LOG_ERROR("Failed to allocate md read buffer");
3470 return ERROR_FAIL;
3471 }
3472
3473 struct target *target = get_current_target(CMD_CTX);
3474 int retval = fn(target, address, size, count, buffer);
3475 if (retval == ERROR_OK)
3476 target_handle_md_output(CMD, target, address, size, count, buffer);
3477
3478 free(buffer);
3479
3480 return retval;
3481 }
3482
3483 typedef int (*target_write_fn)(struct target *target,
3484 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3485
3486 static int target_fill_mem(struct target *target,
3487 target_addr_t address,
3488 target_write_fn fn,
3489 unsigned data_size,
3490 /* value */
3491 uint64_t b,
3492 /* count */
3493 unsigned c)
3494 {
3495 /* We have to write in reasonably large chunks to be able
3496 * to fill large memory areas with any sane speed */
3497 const unsigned chunk_size = 16384;
3498 uint8_t *target_buf = malloc(chunk_size * data_size);
3499 if (!target_buf) {
3500 LOG_ERROR("Out of memory");
3501 return ERROR_FAIL;
3502 }
3503
3504 for (unsigned i = 0; i < chunk_size; i++) {
3505 switch (data_size) {
3506 case 8:
3507 target_buffer_set_u64(target, target_buf + i * data_size, b);
3508 break;
3509 case 4:
3510 target_buffer_set_u32(target, target_buf + i * data_size, b);
3511 break;
3512 case 2:
3513 target_buffer_set_u16(target, target_buf + i * data_size, b);
3514 break;
3515 case 1:
3516 target_buffer_set_u8(target, target_buf + i * data_size, b);
3517 break;
3518 default:
3519 exit(-1);
3520 }
3521 }
3522
3523 int retval = ERROR_OK;
3524
3525 for (unsigned x = 0; x < c; x += chunk_size) {
3526 unsigned current;
3527 current = c - x;
3528 if (current > chunk_size)
3529 current = chunk_size;
3530 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3531 if (retval != ERROR_OK)
3532 break;
3533 /* avoid GDB timeouts */
3534 keep_alive();
3535 }
3536 free(target_buf);
3537
3538 return retval;
3539 }
3540
3541
3542 COMMAND_HANDLER(handle_mw_command)
3543 {
3544 if (CMD_ARGC < 2)
3545 return ERROR_COMMAND_SYNTAX_ERROR;
3546 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3547 target_write_fn fn;
3548 if (physical) {
3549 CMD_ARGC--;
3550 CMD_ARGV++;
3551 fn = target_write_phys_memory;
3552 } else
3553 fn = target_write_memory;
3554 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3555 return ERROR_COMMAND_SYNTAX_ERROR;
3556
3557 target_addr_t address;
3558 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3559
3560 uint64_t value;
3561 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3562
3563 unsigned count = 1;
3564 if (CMD_ARGC == 3)
3565 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3566
3567 struct target *target = get_current_target(CMD_CTX);
3568 unsigned wordsize;
3569 switch (CMD_NAME[2]) {
3570 case 'd':
3571 wordsize = 8;
3572 break;
3573 case 'w':
3574 wordsize = 4;
3575 break;
3576 case 'h':
3577 wordsize = 2;
3578 break;
3579 case 'b':
3580 wordsize = 1;
3581 break;
3582 default:
3583 return ERROR_COMMAND_SYNTAX_ERROR;
3584 }
3585
3586 return target_fill_mem(target, address, fn, wordsize, value, count);
3587 }
3588
3589 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3590 target_addr_t *min_address, target_addr_t *max_address)
3591 {
3592 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3593 return ERROR_COMMAND_SYNTAX_ERROR;
3594
3595 /* a base address isn't always necessary,
3596 * default to 0x0 (i.e. don't relocate) */
3597 if (CMD_ARGC >= 2) {
3598 target_addr_t addr;
3599 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3600 image->base_address = addr;
3601 image->base_address_set = true;
3602 } else
3603 image->base_address_set = false;
3604
3605 image->start_address_set = false;
3606
3607 if (CMD_ARGC >= 4)
3608 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3609 if (CMD_ARGC == 5) {
3610 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3611 /* use size (given) to find max (required) */
3612 *max_address += *min_address;
3613 }
3614
3615 if (*min_address > *max_address)
3616 return ERROR_COMMAND_SYNTAX_ERROR;
3617
3618 return ERROR_OK;
3619 }
3620
3621 COMMAND_HANDLER(handle_load_image_command)
3622 {
3623 uint8_t *buffer;
3624 size_t buf_cnt;
3625 uint32_t image_size;
3626 target_addr_t min_address = 0;
3627 target_addr_t max_address = -1;
3628 struct image image;
3629
3630 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3631 &image, &min_address, &max_address);
3632 if (retval != ERROR_OK)
3633 return retval;
3634
3635 struct target *target = get_current_target(CMD_CTX);
3636
3637 struct duration bench;
3638 duration_start(&bench);
3639
3640 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3641 return ERROR_FAIL;
3642
3643 image_size = 0x0;
3644 retval = ERROR_OK;
3645 for (unsigned int i = 0; i < image.num_sections; i++) {
3646 buffer = malloc(image.sections[i].size);
3647 if (!buffer) {
3648 command_print(CMD,
3649 "error allocating buffer for section (%d bytes)",
3650 (int)(image.sections[i].size));
3651 retval = ERROR_FAIL;
3652 break;
3653 }
3654
3655 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3656 if (retval != ERROR_OK) {
3657 free(buffer);
3658 break;
3659 }
3660
3661 uint32_t offset = 0;
3662 uint32_t length = buf_cnt;
3663
3664 /* DANGER!!! beware of unsigned comparison here!!! */
3665
3666 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3667 (image.sections[i].base_address < max_address)) {
3668
3669 if (image.sections[i].base_address < min_address) {
3670 /* clip addresses below */
3671 offset += min_address-image.sections[i].base_address;
3672 length -= offset;
3673 }
3674
3675 if (image.sections[i].base_address + buf_cnt > max_address)
3676 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3677
3678 retval = target_write_buffer(target,
3679 image.sections[i].base_address + offset, length, buffer + offset);
3680 if (retval != ERROR_OK) {
3681 free(buffer);
3682 break;
3683 }
3684 image_size += length;
3685 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3686 (unsigned int)length,
3687 image.sections[i].base_address + offset);
3688 }
3689
3690 free(buffer);
3691 }
3692
3693 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3694 command_print(CMD, "downloaded %" PRIu32 " bytes "
3695 "in %fs (%0.3f KiB/s)", image_size,
3696 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3697 }
3698
3699 image_close(&image);
3700
3701 return retval;
3702
3703 }
3704
3705 COMMAND_HANDLER(handle_dump_image_command)
3706 {
3707 struct fileio *fileio;
3708 uint8_t *buffer;
3709 int retval, retvaltemp;
3710 target_addr_t address, size;
3711 struct duration bench;
3712 struct target *target = get_current_target(CMD_CTX);
3713
3714 if (CMD_ARGC != 3)
3715 return ERROR_COMMAND_SYNTAX_ERROR;
3716
3717 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3718 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3719
3720 uint32_t buf_size = (size > 4096) ? 4096 : size;
3721 buffer = malloc(buf_size);
3722 if (!buffer)
3723 return ERROR_FAIL;
3724
3725 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3726 if (retval != ERROR_OK) {
3727 free(buffer);
3728 return retval;
3729 }
3730
3731 duration_start(&bench);
3732
3733 while (size > 0) {
3734 size_t size_written;
3735 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3736 retval = target_read_buffer(target, address, this_run_size, buffer);
3737 if (retval != ERROR_OK)
3738 break;
3739
3740 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3741 if (retval != ERROR_OK)
3742 break;
3743
3744 size -= this_run_size;
3745 address += this_run_size;
3746 }
3747
3748 free(buffer);
3749
3750 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3751 size_t filesize;
3752 retval = fileio_size(fileio, &filesize);
3753 if (retval != ERROR_OK)
3754 return retval;
3755 command_print(CMD,
3756 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3757 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3758 }
3759
3760 retvaltemp = fileio_close(fileio);
3761 if (retvaltemp != ERROR_OK)
3762 return retvaltemp;
3763
3764 return retval;
3765 }
3766
3767 enum verify_mode {
3768 IMAGE_TEST = 0,
3769 IMAGE_VERIFY = 1,
3770 IMAGE_CHECKSUM_ONLY = 2
3771 };
3772
3773 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3774 {
3775 uint8_t *buffer;
3776 size_t buf_cnt;
3777 uint32_t image_size;
3778 int retval;
3779 uint32_t checksum = 0;
3780 uint32_t mem_checksum = 0;
3781
3782 struct image image;
3783
3784 struct target *target = get_current_target(CMD_CTX);
3785
3786 if (CMD_ARGC < 1)
3787 return ERROR_COMMAND_SYNTAX_ERROR;
3788
3789 if (!target) {
3790 LOG_ERROR("no target selected");
3791 return ERROR_FAIL;
3792 }
3793
3794 struct duration bench;
3795 duration_start(&bench);
3796
3797 if (CMD_ARGC >= 2) {
3798 target_addr_t addr;
3799 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3800 image.base_address = addr;
3801 image.base_address_set = true;
3802 } else {
3803 image.base_address_set = false;
3804 image.base_address = 0x0;
3805 }
3806
3807 image.start_address_set = false;
3808
3809 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3810 if (retval != ERROR_OK)
3811 return retval;
3812
3813 image_size = 0x0;
3814 int diffs = 0;
3815 retval = ERROR_OK;
3816 for (unsigned int i = 0; i < image.num_sections; i++) {
3817 buffer = malloc(image.sections[i].size);
3818 if (!buffer) {
3819 command_print(CMD,
3820 "error allocating buffer for section (%" PRIu32 " bytes)",
3821 image.sections[i].size);
3822 break;
3823 }
3824 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3825 if (retval != ERROR_OK) {
3826 free(buffer);
3827 break;
3828 }
3829
3830 if (verify >= IMAGE_VERIFY) {
3831 /* calculate checksum of image */
3832 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3833 if (retval != ERROR_OK) {
3834 free(buffer);
3835 break;
3836 }
3837
3838 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3839 if (retval != ERROR_OK) {
3840 free(buffer);
3841 break;
3842 }
3843 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3844 LOG_ERROR("checksum mismatch");
3845 free(buffer);
3846 retval = ERROR_FAIL;
3847 goto done;
3848 }
3849 if (checksum != mem_checksum) {
3850 /* failed crc checksum, fall back to a binary compare */
3851 uint8_t *data;
3852
3853 if (diffs == 0)
3854 LOG_ERROR("checksum mismatch - attempting binary compare");
3855
3856 data = malloc(buf_cnt);
3857
3858 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3859 if (retval == ERROR_OK) {
3860 uint32_t t;
3861 for (t = 0; t < buf_cnt; t++) {
3862 if (data[t] != buffer[t]) {
3863 command_print(CMD,
3864 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3865 diffs,
3866 (unsigned)(t + image.sections[i].base_address),
3867 data[t],
3868 buffer[t]);
3869 if (diffs++ >= 127) {
3870 command_print(CMD, "More than 128 errors, the rest are not printed.");
3871 free(data);
3872 free(buffer);
3873 goto done;
3874 }
3875 }
3876 keep_alive();
3877 }
3878 }
3879 free(data);
3880 }
3881 } else {
3882 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3883 image.sections[i].base_address,
3884 buf_cnt);
3885 }
3886
3887 free(buffer);
3888 image_size += buf_cnt;
3889 }
3890 if (diffs > 0)
3891 command_print(CMD, "No more differences found.");
3892 done:
3893 if (diffs > 0)
3894 retval = ERROR_FAIL;
3895 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3896 command_print(CMD, "verified %" PRIu32 " bytes "
3897 "in %fs (%0.3f KiB/s)", image_size,
3898 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3899 }
3900
3901 image_close(&image);
3902
3903 return retval;
3904 }
3905
3906 COMMAND_HANDLER(handle_verify_image_checksum_command)
3907 {
3908 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3909 }
3910
3911 COMMAND_HANDLER(handle_verify_image_command)
3912 {
3913 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3914 }
3915
3916 COMMAND_HANDLER(handle_test_image_command)
3917 {
3918 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3919 }
3920
3921 static int handle_bp_command_list(struct command_invocation *cmd)
3922 {
3923 struct target *target = get_current_target(cmd->ctx);
3924 struct breakpoint *breakpoint = target->breakpoints;
3925 while (breakpoint) {
3926 if (breakpoint->type == BKPT_SOFT) {
3927 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3928 breakpoint->length);
3929 command_print(cmd, "Software breakpoint(IVA): addr=" TARGET_ADDR_FMT ", len=0x%x, orig_instr=0x%s",
3930 breakpoint->address,
3931 breakpoint->length,
3932 buf);
3933 free(buf);
3934 } else {
3935 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3936 command_print(cmd, "Context breakpoint: asid=0x%8.8" PRIx32 ", len=0x%x, num=%u",
3937 breakpoint->asid,
3938 breakpoint->length, breakpoint->number);
3939 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3940 command_print(cmd, "Hybrid breakpoint(IVA): addr=" TARGET_ADDR_FMT ", len=0x%x, num=%u",
3941 breakpoint->address,
3942 breakpoint->length, breakpoint->number);
3943 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3944 breakpoint->asid);
3945 } else
3946 command_print(cmd, "Hardware breakpoint(IVA): addr=" TARGET_ADDR_FMT ", len=0x%x, num=%u",
3947 breakpoint->address,
3948 breakpoint->length, breakpoint->number);
3949 }
3950
3951 breakpoint = breakpoint->next;
3952 }
3953 return ERROR_OK;
3954 }
3955
3956 static int handle_bp_command_set(struct command_invocation *cmd,
3957 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3958 {
3959 struct target *target = get_current_target(cmd->ctx);
3960 int retval;
3961
3962 if (asid == 0) {
3963 retval = breakpoint_add(target, addr, length, hw);
3964 /* error is always logged in breakpoint_add(), do not print it again */
3965 if (retval == ERROR_OK)
3966 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
3967
3968 } else if (addr == 0) {
3969 if (!target->type->add_context_breakpoint) {
3970 LOG_ERROR("Context breakpoint not available");
3971 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3972 }
3973 retval = context_breakpoint_add(target, asid, length, hw);
3974 /* error is always logged in context_breakpoint_add(), do not print it again */
3975 if (retval == ERROR_OK)
3976 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
3977
3978 } else {
3979 if (!target->type->add_hybrid_breakpoint) {
3980 LOG_ERROR("Hybrid breakpoint not available");
3981 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3982 }
3983 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
3984 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
3985 if (retval == ERROR_OK)
3986 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
3987 }
3988 return retval;
3989 }
3990
3991 COMMAND_HANDLER(handle_bp_command)
3992 {
3993 target_addr_t addr;
3994 uint32_t asid;
3995 uint32_t length;
3996 int hw = BKPT_SOFT;
3997
3998 switch (CMD_ARGC) {
3999 case 0:
4000 return handle_bp_command_list(CMD);
4001
4002 case 2:
4003 asid = 0;
4004 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4005 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4006 return handle_bp_command_set(CMD, addr, asid, length, hw);
4007
4008 case 3:
4009 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4010 hw = BKPT_HARD;
4011 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4012 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4013 asid = 0;
4014 return handle_bp_command_set(CMD, addr, asid, length, hw);
4015 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4016 hw = BKPT_HARD;
4017 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4018 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4019 addr = 0;
4020 return handle_bp_command_set(CMD, addr, asid, length, hw);
4021 }
4022 /* fallthrough */
4023 case 4:
4024 hw = BKPT_HARD;
4025 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4026 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4027 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4028 return handle_bp_command_set(CMD, addr, asid, length, hw);
4029
4030 default:
4031 return ERROR_COMMAND_SYNTAX_ERROR;
4032 }
4033 }
4034
4035 COMMAND_HANDLER(handle_rbp_command)
4036 {
4037 int retval;
4038
4039 if (CMD_ARGC != 1)
4040 return ERROR_COMMAND_SYNTAX_ERROR;
4041
4042 struct target *target = get_current_target(CMD_CTX);
4043
4044 if (!strcmp(CMD_ARGV[0], "all")) {
4045 retval = breakpoint_remove_all(target);
4046
4047 if (retval != ERROR_OK) {
4048 command_print(CMD, "Error encountered during removal of all breakpoints.");
4049 command_print(CMD, "Some breakpoints may have remained set.");
4050 }
4051 } else {
4052 target_addr_t addr;
4053 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4054
4055 retval = breakpoint_remove(target, addr);
4056
4057 if (retval != ERROR_OK)
4058 command_print(CMD, "Error during removal of breakpoint at address " TARGET_ADDR_FMT, addr);
4059 }
4060
4061 return retval;
4062 }
4063
4064 COMMAND_HANDLER(handle_wp_command)
4065 {
4066 struct target *target = get_current_target(CMD_CTX);
4067
4068 if (CMD_ARGC == 0) {
4069 struct watchpoint *watchpoint = target->watchpoints;
4070
4071 while (watchpoint) {
4072 command_print(CMD, "address: " TARGET_ADDR_FMT
4073 ", len: 0x%8.8" PRIx32
4074 ", r/w/a: %i, value: 0x%8.8" PRIx64
4075 ", mask: 0x%8.8" PRIx64,
4076 watchpoint->address,
4077 watchpoint->length,
4078 (int)watchpoint->rw,
4079 watchpoint->value,
4080 watchpoint->mask);
4081 watchpoint = watchpoint->next;
4082 }
4083 return ERROR_OK;
4084 }
4085
4086 enum watchpoint_rw type = WPT_ACCESS;
4087 target_addr_t addr = 0;
4088 uint32_t length = 0;
4089 uint64_t data_value = 0x0;
4090 uint64_t data_mask = WATCHPOINT_IGNORE_DATA_VALUE_MASK;
4091 bool mask_specified = false;
4092
4093 switch (CMD_ARGC) {
4094 case 5:
4095 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[4], data_mask);
4096 mask_specified = true;
4097 /* fall through */
4098 case 4:
4099 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[3], data_value);
4100 // if user specified only data value without mask - the mask should be 0
4101 if (!mask_specified)
4102 data_mask = 0;
4103 /* fall through */
4104 case 3:
4105 switch (CMD_ARGV[2][0]) {
4106 case 'r':
4107 type = WPT_READ;
4108 break;
4109 case 'w':
4110 type = WPT_WRITE;
4111 break;
4112 case 'a':
4113 type = WPT_ACCESS;
4114 break;
4115 default:
4116 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4117 return ERROR_COMMAND_SYNTAX_ERROR;
4118 }
4119 /* fall through */
4120 case 2:
4121 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4122 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4123 break;
4124
4125 default:
4126 return ERROR_COMMAND_SYNTAX_ERROR;
4127 }
4128
4129 int retval = watchpoint_add(target, addr, length, type,
4130 data_value, data_mask);
4131 if (retval != ERROR_OK)
4132 LOG_ERROR("Failure setting watchpoints");
4133
4134 return retval;
4135 }
4136
4137 COMMAND_HANDLER(handle_rwp_command)
4138 {
4139 if (CMD_ARGC != 1)
4140 return ERROR_COMMAND_SYNTAX_ERROR;
4141
4142 target_addr_t addr;
4143 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4144
4145 struct target *target = get_current_target(CMD_CTX);
4146 int retval = watchpoint_remove(target, addr);
4147
4148 if (retval != ERROR_OK)
4149 command_print(CMD, "Error during removal of watchpoint at address " TARGET_ADDR_FMT, addr);
4150
4151 return retval;
4152 }
4153
4154 /**
4155 * Translate a virtual address to a physical address.
4156 *
4157 * The low-level target implementation must have logged a detailed error
4158 * which is forwarded to telnet/GDB session.
4159 */
4160 COMMAND_HANDLER(handle_virt2phys_command)
4161 {
4162 if (CMD_ARGC != 1)
4163 return ERROR_COMMAND_SYNTAX_ERROR;
4164
4165 target_addr_t va;
4166 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4167 target_addr_t pa;
4168
4169 struct target *target = get_current_target(CMD_CTX);
4170 int retval = target->type->virt2phys(target, va, &pa);
4171 if (retval == ERROR_OK)
4172 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4173
4174 return retval;
4175 }
4176
4177 static void write_data(FILE *f, const void *data, size_t len)
4178 {
4179 size_t written = fwrite(data, 1, len, f);
4180 if (written != len)
4181 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4182 }
4183
4184 static void write_long(FILE *f, int l, struct target *target)
4185 {
4186 uint8_t val[4];
4187
4188 target_buffer_set_u32(target, val, l);
4189 write_data(f, val, 4);
4190 }
4191
4192 static void write_string(FILE *f, char *s)
4193 {
4194 write_data(f, s, strlen(s));
4195 }
4196
4197 typedef unsigned char UNIT[2]; /* unit of profiling */
4198
4199 /* Dump a gmon.out histogram file. */
4200 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4201 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4202 {
4203 uint32_t i;
4204 FILE *f = fopen(filename, "w");
4205 if (!f)
4206 return;
4207 write_string(f, "gmon");
4208 write_long(f, 0x00000001, target); /* Version */
4209 write_long(f, 0, target); /* padding */
4210 write_long(f, 0, target); /* padding */
4211 write_long(f, 0, target); /* padding */
4212
4213 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4214 write_data(f, &zero, 1);
4215
4216 /* figure out bucket size */
4217 uint32_t min;
4218 uint32_t max;
4219 if (with_range) {
4220 min = start_address;
4221 max = end_address;
4222 } else {
4223 min = samples[0];
4224 max = samples[0];
4225 for (i = 0; i < sample_num; i++) {
4226 if (min > samples[i])
4227 min = samples[i];
4228 if (max < samples[i])
4229 max = samples[i];
4230 }
4231
4232 /* max should be (largest sample + 1)
4233 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4234 if (max < UINT32_MAX)
4235 max++;
4236
4237 /* gprof requires (max - min) >= 2 */
4238 while ((max - min) < 2) {
4239 if (max < UINT32_MAX)
4240 max++;
4241 else
4242 min--;
4243 }
4244 }
4245
4246 uint32_t address_space = max - min;
4247
4248 /* FIXME: What is the reasonable number of buckets?
4249 * The profiling result will be more accurate if there are enough buckets. */
4250 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4251 uint32_t num_buckets = address_space / sizeof(UNIT);
4252 if (num_buckets > max_buckets)
4253 num_buckets = max_buckets;
4254 int *buckets = malloc(sizeof(int) * num_buckets);
4255 if (!buckets) {
4256 fclose(f);
4257 return;
4258 }
4259 memset(buckets, 0, sizeof(int) * num_buckets);
4260 for (i = 0; i < sample_num; i++) {
4261 uint32_t address = samples[i];
4262
4263 if ((address < min) || (max <= address))
4264 continue;
4265
4266 long long a = address - min;
4267 long long b = num_buckets;
4268 long long c = address_space;
4269 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4270 buckets[index_t]++;
4271 }
4272
4273 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4274 write_long(f, min, target); /* low_pc */
4275 write_long(f, max, target); /* high_pc */
4276 write_long(f, num_buckets, target); /* # of buckets */
4277 float sample_rate = sample_num / (duration_ms / 1000.0);
4278 write_long(f, sample_rate, target);
4279 write_string(f, "seconds");
4280 for (i = 0; i < (15-strlen("seconds")); i++)
4281 write_data(f, &zero, 1);
4282 write_string(f, "s");
4283
4284 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4285
4286 char *data = malloc(2 * num_buckets);
4287 if (data) {
4288 for (i = 0; i < num_buckets; i++) {
4289 int val;
4290 val = buckets[i];
4291 if (val > 65535)
4292 val = 65535;
4293 data[i * 2] = val&0xff;
4294 data[i * 2 + 1] = (val >> 8) & 0xff;
4295 }
4296 free(buckets);
4297 write_data(f, data, num_buckets * 2);
4298 free(data);
4299 } else
4300 free(buckets);
4301
4302 fclose(f);
4303 }
4304
4305 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4306 * which will be used as a random sampling of PC */
4307 COMMAND_HANDLER(handle_profile_command)
4308 {
4309 struct target *target = get_current_target(CMD_CTX);
4310
4311 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4312 return ERROR_COMMAND_SYNTAX_ERROR;
4313
4314 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4315 uint32_t offset;
4316 uint32_t num_of_samples;
4317 int retval = ERROR_OK;
4318 bool halted_before_profiling = target->state == TARGET_HALTED;
4319
4320 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4321
4322 uint32_t start_address = 0;
4323 uint32_t end_address = 0;
4324 bool with_range = false;
4325 if (CMD_ARGC == 4) {
4326 with_range = true;
4327 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4328 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4329 if (start_address > end_address || (end_address - start_address) < 2) {
4330 command_print(CMD, "Error: end - start < 2");
4331 return ERROR_COMMAND_ARGUMENT_INVALID;
4332 }
4333 }
4334
4335 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4336 if (!samples) {
4337 LOG_ERROR("No memory to store samples.");
4338 return ERROR_FAIL;
4339 }
4340
4341 uint64_t timestart_ms = timeval_ms();
4342 /**
4343 * Some cores let us sample the PC without the
4344 * annoying halt/resume step; for example, ARMv7 PCSR.
4345 * Provide a way to use that more efficient mechanism.
4346 */
4347 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4348 &num_of_samples, offset);
4349 if (retval != ERROR_OK) {
4350 free(samples);
4351 return retval;
4352 }
4353 uint32_t duration_ms = timeval_ms() - timestart_ms;
4354
4355 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4356
4357 retval = target_poll(target);
4358 if (retval != ERROR_OK) {
4359 free(samples);
4360 return retval;
4361 }
4362
4363 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4364 /* The target was halted before we started and is running now. Halt it,
4365 * for consistency. */
4366 retval = target_halt(target);
4367 if (retval != ERROR_OK) {
4368 free(samples);
4369 return retval;
4370 }
4371 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4372 /* The target was running before we started and is halted now. Resume
4373 * it, for consistency. */
4374 retval = target_resume(target, 1, 0, 0, 0);
4375 if (retval != ERROR_OK) {
4376 free(samples);
4377 return retval;
4378 }
4379 }
4380
4381 retval = target_poll(target);
4382 if (retval != ERROR_OK) {
4383 free(samples);
4384 return retval;
4385 }
4386
4387 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4388 with_range, start_address, end_address, target, duration_ms);
4389 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4390
4391 free(samples);
4392 return retval;
4393 }
4394
4395 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4396 {
4397 char *namebuf;
4398 Jim_Obj *obj_name, *obj_val;
4399 int result;
4400
4401 namebuf = alloc_printf("%s(%d)", varname, idx);
4402 if (!namebuf)
4403 return JIM_ERR;
4404
4405 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4406 jim_wide wide_val = val;
4407 obj_val = Jim_NewWideObj(interp, wide_val);
4408 if (!obj_name || !obj_val) {
4409 free(namebuf);
4410 return JIM_ERR;
4411 }
4412
4413 Jim_IncrRefCount(obj_name);
4414 Jim_IncrRefCount(obj_val);
4415 result = Jim_SetVariable(interp, obj_name, obj_val);
4416 Jim_DecrRefCount(interp, obj_name);
4417 Jim_DecrRefCount(interp, obj_val);
4418 free(namebuf);
4419 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4420 return result;
4421 }
4422
4423 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4424 {
4425 int e;
4426
4427 LOG_WARNING("DEPRECATED! use 'read_memory' not 'mem2array'");
4428
4429 /* argv[0] = name of array to receive the data
4430 * argv[1] = desired element width in bits
4431 * argv[2] = memory address
4432 * argv[3] = count of times to read
4433 * argv[4] = optional "phys"
4434 */
4435 if (argc < 4 || argc > 5) {
4436 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4437 return JIM_ERR;
4438 }
4439
4440 /* Arg 0: Name of the array variable */
4441 const char *varname = Jim_GetString(argv[0], NULL);
4442
4443 /* Arg 1: Bit width of one element */
4444 long l;
4445 e = Jim_GetLong(interp, argv[1], &l);
4446 if (e != JIM_OK)
4447 return e;
4448 const unsigned int width_bits = l;
4449
4450 if (width_bits != 8 &&
4451 width_bits != 16 &&
4452 width_bits != 32 &&
4453 width_bits != 64) {
4454 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4455 Jim_AppendStrings(interp, Jim_GetResult(interp),
4456 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4457 return JIM_ERR;
4458 }
4459 const unsigned int width = width_bits / 8;
4460
4461 /* Arg 2: Memory address */
4462 jim_wide wide_addr;
4463 e = Jim_GetWide(interp, argv[2], &wide_addr);
4464 if (e != JIM_OK)
4465 return e;
4466 target_addr_t addr = (target_addr_t)wide_addr;
4467
4468 /* Arg 3: Number of elements to read */
4469 e = Jim_GetLong(interp, argv[3], &l);
4470 if (e != JIM_OK)
4471 return e;
4472 size_t len = l;
4473
4474 /* Arg 4: phys */
4475 bool is_phys = false;
4476 if (argc > 4) {
4477 int str_len = 0;
4478 const char *phys = Jim_GetString(argv[4], &str_len);
4479 if (!strncmp(phys, "phys", str_len))
4480 is_phys = true;
4481 else
4482 return JIM_ERR;
4483 }
4484
4485 /* Argument checks */
4486 if (len == 0) {
4487 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4488 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4489 return JIM_ERR;
4490 }
4491 if ((addr + (len * width)) < addr) {
4492 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4493 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4494 return JIM_ERR;
4495 }
4496 if (len > 65536) {
4497 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4498 Jim_AppendStrings(interp, Jim_GetResult(interp),
4499 "mem2array: too large read request, exceeds 64K items", NULL);
4500 return JIM_ERR;
4501 }
4502
4503 if ((width == 1) ||
4504 ((width == 2) && ((addr & 1) == 0)) ||
4505 ((width == 4) && ((addr & 3) == 0)) ||
4506 ((width == 8) && ((addr & 7) == 0))) {
4507 /* alignment correct */
4508 } else {
4509 char buf[100];
4510 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4511 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4512 addr,
4513 width);
4514 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4515 return JIM_ERR;
4516 }
4517
4518 /* Transfer loop */
4519
4520 /* index counter */
4521 size_t idx = 0;
4522
4523 const size_t buffersize = 4096;
4524 uint8_t *buffer = malloc(buffersize);
4525 if (!buffer)
4526 return JIM_ERR;
4527
4528 /* assume ok */
4529 e = JIM_OK;
4530 while (len) {
4531 /* Slurp... in buffer size chunks */
4532 const unsigned int max_chunk_len = buffersize / width;
4533 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4534
4535 int retval;
4536 if (is_phys)
4537 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4538 else
4539 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4540 if (retval != ERROR_OK) {
4541 /* BOO !*/
4542 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4543 addr,
4544 width,
4545 chunk_len);
4546 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4547 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4548 e = JIM_ERR;
4549 break;
4550 } else {
4551 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4552 uint64_t v = 0;
4553 switch (width) {
4554 case 8:
4555 v = target_buffer_get_u64(target, &buffer[i*width]);
4556 break;
4557 case 4:
4558 v = target_buffer_get_u32(target, &buffer[i*width]);
4559 break;
4560 case 2:
4561 v = target_buffer_get_u16(target, &buffer[i*width]);
4562 break;
4563 case 1:
4564 v = buffer[i] & 0x0ff;
4565 break;
4566 }
4567 new_u64_array_element(interp, varname, idx, v);
4568 }
4569 len -= chunk_len;
4570 addr += chunk_len * width;
4571 }
4572 }
4573
4574 free(buffer);
4575
4576 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4577
4578 return e;
4579 }
4580
4581 COMMAND_HANDLER(handle_target_read_memory)
4582 {
4583 /*
4584 * CMD_ARGV[0] = memory address
4585 * CMD_ARGV[1] = desired element width in bits
4586 * CMD_ARGV[2] = number of elements to read
4587 * CMD_ARGV[3] = optional "phys"
4588 */
4589
4590 if (CMD_ARGC < 3 || CMD_ARGC > 4)
4591 return ERROR_COMMAND_SYNTAX_ERROR;
4592
4593 /* Arg 1: Memory address. */
4594 target_addr_t addr;
4595 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], addr);
4596
4597 /* Arg 2: Bit width of one element. */
4598 unsigned int width_bits;
4599 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], width_bits);
4600
4601 /* Arg 3: Number of elements to read. */
4602 unsigned int count;
4603 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
4604
4605 /* Arg 4: Optional 'phys'. */
4606 bool is_phys = false;
4607 if (CMD_ARGC == 4) {
4608 if (strcmp(CMD_ARGV[3], "phys")) {
4609 command_print(CMD, "invalid argument '%s', must be 'phys'", CMD_ARGV[3]);
4610 return ERROR_COMMAND_ARGUMENT_INVALID;
4611 }
4612
4613 is_phys = true;
4614 }
4615
4616 switch (width_bits) {
4617 case 8:
4618 case 16:
4619 case 32:
4620 case 64:
4621 break;
4622 default:
4623 command_print(CMD, "invalid width, must be 8, 16, 32 or 64");
4624 return ERROR_COMMAND_ARGUMENT_INVALID;
4625 }
4626
4627 const unsigned int width = width_bits / 8;
4628
4629 if ((addr + (count * width)) < addr) {
4630 command_print(CMD, "read_memory: addr + count wraps to zero");
4631 return ERROR_COMMAND_ARGUMENT_INVALID;
4632 }
4633
4634 if (count > 65536) {
4635 command_print(CMD, "read_memory: too large read request, exceeds 64K elements");
4636 return ERROR_COMMAND_ARGUMENT_INVALID;
4637 }
4638
4639 struct target *target = get_current_target(CMD_CTX);
4640
4641 const size_t buffersize = 4096;
4642 uint8_t *buffer = malloc(buffersize);
4643
4644 if (!buffer) {
4645 LOG_ERROR("Failed to allocate memory");
4646 return ERROR_FAIL;
4647 }
4648
4649 char *separator = "";
4650 while (count > 0) {
4651 const unsigned int max_chunk_len = buffersize / width;
4652 const size_t chunk_len = MIN(count, max_chunk_len);
4653
4654 int retval;
4655
4656 if (is_phys)
4657 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4658 else
4659 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4660
4661 if (retval != ERROR_OK) {
4662 LOG_DEBUG("read_memory: read at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
4663 addr, width_bits, chunk_len);
4664 /*
4665 * FIXME: we append the errmsg to the list of value already read.
4666 * Add a way to flush and replace old output, but LOG_DEBUG() it
4667 */
4668 command_print(CMD, "read_memory: failed to read memory");
4669 free(buffer);
4670 return retval;
4671 }
4672
4673 for (size_t i = 0; i < chunk_len ; i++) {
4674 uint64_t v = 0;
4675
4676 switch (width) {
4677 case 8:
4678 v = target_buffer_get_u64(target, &buffer[i * width]);
4679 break;
4680 case 4:
4681 v = target_buffer_get_u32(target, &buffer[i * width]);
4682 break;
4683 case 2:
4684 v = target_buffer_get_u16(target, &buffer[i * width]);
4685 break;
4686 case 1:
4687 v = buffer[i];
4688 break;
4689 }
4690
4691 command_print_sameline(CMD, "%s0x%" PRIx64, separator, v);
4692 separator = " ";
4693 }
4694
4695 count -= chunk_len;
4696 addr += chunk_len * width;
4697 }
4698
4699 free(buffer);
4700
4701 return ERROR_OK;
4702 }
4703
4704 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4705 {
4706 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4707 if (!namebuf)
4708 return JIM_ERR;
4709
4710 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4711 if (!obj_name) {
4712 free(namebuf);
4713 return JIM_ERR;
4714 }
4715
4716 Jim_IncrRefCount(obj_name);
4717 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4718 Jim_DecrRefCount(interp, obj_name);
4719 free(namebuf);
4720 if (!obj_val)
4721 return JIM_ERR;
4722
4723 jim_wide wide_val;
4724 int result = Jim_GetWide(interp, obj_val, &wide_val);
4725 *val = wide_val;
4726 return result;
4727 }
4728
4729 static int target_array2mem(Jim_Interp *interp, struct target *target,
4730 int argc, Jim_Obj *const *argv)
4731 {
4732 int e;
4733
4734 LOG_WARNING("DEPRECATED! use 'write_memory' not 'array2mem'");
4735
4736 /* argv[0] = name of array from which to read the data
4737 * argv[1] = desired element width in bits
4738 * argv[2] = memory address
4739 * argv[3] = number of elements to write
4740 * argv[4] = optional "phys"
4741 */
4742 if (argc < 4 || argc > 5) {
4743 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4744 return JIM_ERR;
4745 }
4746
4747 /* Arg 0: Name of the array variable */
4748 const char *varname = Jim_GetString(argv[0], NULL);
4749
4750 /* Arg 1: Bit width of one element */
4751 long l;
4752 e = Jim_GetLong(interp, argv[1], &l);
4753 if (e != JIM_OK)
4754 return e;
4755 const unsigned int width_bits = l;
4756
4757 if (width_bits != 8 &&
4758 width_bits != 16 &&
4759 width_bits != 32 &&
4760 width_bits != 64) {
4761 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4762 Jim_AppendStrings(interp, Jim_GetResult(interp),
4763 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4764 return JIM_ERR;
4765 }
4766 const unsigned int width = width_bits / 8;
4767
4768 /* Arg 2: Memory address */
4769 jim_wide wide_addr;
4770 e = Jim_GetWide(interp, argv[2], &wide_addr);
4771 if (e != JIM_OK)
4772 return e;
4773 target_addr_t addr = (target_addr_t)wide_addr;
4774
4775 /* Arg 3: Number of elements to write */
4776 e = Jim_GetLong(interp, argv[3], &l);
4777 if (e != JIM_OK)
4778 return e;
4779 size_t len = l;
4780
4781 /* Arg 4: Phys */
4782 bool is_phys = false;
4783 if (argc > 4) {
4784 int str_len = 0;
4785 const char *phys = Jim_GetString(argv[4], &str_len);
4786 if (!strncmp(phys, "phys", str_len))
4787 is_phys = true;
4788 else
4789 return JIM_ERR;
4790 }
4791
4792 /* Argument checks */
4793 if (len == 0) {
4794 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4795 Jim_AppendStrings(interp, Jim_GetResult(interp),
4796 "array2mem: zero width read?", NULL);
4797 return JIM_ERR;
4798 }
4799
4800 if ((addr + (len * width)) < addr) {
4801 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4802 Jim_AppendStrings(interp, Jim_GetResult(interp),
4803 "array2mem: addr + len - wraps to zero?", NULL);
4804 return JIM_ERR;
4805 }
4806
4807 if (len > 65536) {
4808 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4809 Jim_AppendStrings(interp, Jim_GetResult(interp),
4810 "array2mem: too large memory write request, exceeds 64K items", NULL);
4811 return JIM_ERR;
4812 }
4813
4814 if ((width == 1) ||
4815 ((width == 2) && ((addr & 1) == 0)) ||
4816 ((width == 4) && ((addr & 3) == 0)) ||
4817 ((width == 8) && ((addr & 7) == 0))) {
4818 /* alignment correct */
4819 } else {
4820 char buf[100];
4821 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4822 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4823 addr,
4824 width);
4825 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4826 return JIM_ERR;
4827 }
4828
4829 /* Transfer loop */
4830
4831 /* assume ok */
4832 e = JIM_OK;
4833
4834 const size_t buffersize = 4096;
4835 uint8_t *buffer = malloc(buffersize);
4836 if (!buffer)
4837 return JIM_ERR;
4838
4839 /* index counter */
4840 size_t idx = 0;
4841
4842 while (len) {
4843 /* Slurp... in buffer size chunks */
4844 const unsigned int max_chunk_len = buffersize / width;
4845
4846 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4847
4848 /* Fill the buffer */
4849 for (size_t i = 0; i < chunk_len; i++, idx++) {
4850 uint64_t v = 0;
4851 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4852 free(buffer);
4853 return JIM_ERR;
4854 }
4855 switch (width) {
4856 case 8:
4857 target_buffer_set_u64(target, &buffer[i * width], v);
4858 break;
4859 case 4:
4860 target_buffer_set_u32(target, &buffer[i * width], v);
4861 break;
4862 case 2:
4863 target_buffer_set_u16(target, &buffer[i * width], v);
4864 break;
4865 case 1:
4866 buffer[i] = v & 0x0ff;
4867 break;
4868 }
4869 }
4870 len -= chunk_len;
4871
4872 /* Write the buffer to memory */
4873 int retval;
4874 if (is_phys)
4875 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4876 else
4877 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4878 if (retval != ERROR_OK) {
4879 /* BOO !*/
4880 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4881 addr,
4882 width,
4883 chunk_len);
4884 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4885 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4886 e = JIM_ERR;
4887 break;
4888 }
4889 addr += chunk_len * width;
4890 }
4891
4892 free(buffer);
4893
4894 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4895
4896 return e;
4897 }
4898
4899 static int target_jim_write_memory(Jim_Interp *interp, int argc,
4900 Jim_Obj * const *argv)
4901 {
4902 /*
4903 * argv[1] = memory address
4904 * argv[2] = desired element width in bits
4905 * argv[3] = list of data to write
4906 * argv[4] = optional "phys"
4907 */
4908
4909 if (argc < 4 || argc > 5) {
4910 Jim_WrongNumArgs(interp, 1, argv, "address width data ['phys']");
4911 return JIM_ERR;
4912 }
4913
4914 /* Arg 1: Memory address. */
4915 int e;
4916 jim_wide wide_addr;
4917 e = Jim_GetWide(interp, argv[1], &wide_addr);
4918
4919 if (e != JIM_OK)
4920 return e;
4921
4922 target_addr_t addr = (target_addr_t)wide_addr;
4923
4924 /* Arg 2: Bit width of one element. */
4925 long l;
4926 e = Jim_GetLong(interp, argv[2], &l);
4927
4928 if (e != JIM_OK)
4929 return e;
4930
4931 const unsigned int width_bits = l;
4932 size_t count = Jim_ListLength(interp, argv[3]);
4933
4934 /* Arg 4: Optional 'phys'. */
4935 bool is_phys = false;
4936
4937 if (argc > 4) {
4938 const char *phys = Jim_GetString(argv[4], NULL);
4939
4940 if (strcmp(phys, "phys")) {
4941 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4942 return JIM_ERR;
4943 }
4944
4945 is_phys = true;
4946 }
4947
4948 switch (width_bits) {
4949 case 8:
4950 case 16:
4951 case 32:
4952 case 64:
4953 break;
4954 default:
4955 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
4956 return JIM_ERR;
4957 }
4958
4959 const unsigned int width = width_bits / 8;
4960
4961 if ((addr + (count * width)) < addr) {
4962 Jim_SetResultString(interp, "write_memory: addr + len wraps to zero", -1);
4963 return JIM_ERR;
4964 }
4965
4966 if (count > 65536) {
4967 Jim_SetResultString(interp, "write_memory: too large memory write request, exceeds 64K elements", -1);
4968 return JIM_ERR;
4969 }
4970
4971 struct command_context *cmd_ctx = current_command_context(interp);
4972 assert(cmd_ctx != NULL);
4973 struct target *target = get_current_target(cmd_ctx);
4974
4975 const size_t buffersize = 4096;
4976 uint8_t *buffer = malloc(buffersize);
4977
4978 if (!buffer) {
4979 LOG_ERROR("Failed to allocate memory");
4980 return JIM_ERR;
4981 }
4982
4983 size_t j = 0;
4984
4985 while (count > 0) {
4986 const unsigned int max_chunk_len = buffersize / width;
4987 const size_t chunk_len = MIN(count, max_chunk_len);
4988
4989 for (size_t i = 0; i < chunk_len; i++, j++) {
4990 Jim_Obj *tmp = Jim_ListGetIndex(interp, argv[3], j);
4991 jim_wide element_wide;
4992 Jim_GetWide(interp, tmp, &element_wide);
4993
4994 const uint64_t v = element_wide;
4995
4996 switch (width) {
4997 case 8:
4998 target_buffer_set_u64(target, &buffer[i * width], v);
4999 break;
5000 case 4:
5001 target_buffer_set_u32(target, &buffer[i * width], v);
5002 break;
5003 case 2:
5004 target_buffer_set_u16(target, &buffer[i * width], v);
5005 break;
5006 case 1:
5007 buffer[i] = v & 0x0ff;
5008 break;
5009 }
5010 }
5011
5012 count -= chunk_len;
5013
5014 int retval;
5015
5016 if (is_phys)
5017 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
5018 else
5019 retval = target_write_memory(target, addr, width, chunk_len, buffer);
5020
5021 if (retval != ERROR_OK) {
5022 LOG_ERROR("write_memory: write at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
5023 addr, width_bits, chunk_len);
5024 Jim_SetResultString(interp, "write_memory: failed to write memory", -1);
5025 e = JIM_ERR;
5026 break;
5027 }
5028
5029 addr += chunk_len * width;
5030 }
5031
5032 free(buffer);
5033
5034 return e;
5035 }
5036
5037 /* FIX? should we propagate errors here rather than printing them
5038 * and continuing?
5039 */
5040 void target_handle_event(struct target *target, enum target_event e)
5041 {
5042 struct target_event_action *teap;
5043 int retval;
5044
5045 for (teap = target->event_action; teap; teap = teap->next) {
5046 if (teap->event == e) {
5047 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
5048 target->target_number,
5049 target_name(target),
5050 target_type_name(target),
5051 e,
5052 target_event_name(e),
5053 Jim_GetString(teap->body, NULL));
5054
5055 /* Override current target by the target an event
5056 * is issued from (lot of scripts need it).
5057 * Return back to previous override as soon
5058 * as the handler processing is done */
5059 struct command_context *cmd_ctx = current_command_context(teap->interp);
5060 struct target *saved_target_override = cmd_ctx->current_target_override;
5061 cmd_ctx->current_target_override = target;
5062
5063 retval = Jim_EvalObj(teap->interp, teap->body);
5064
5065 cmd_ctx->current_target_override = saved_target_override;
5066
5067 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
5068 return;
5069
5070 if (retval == JIM_RETURN)
5071 retval = teap->interp->returnCode;
5072
5073 if (retval != JIM_OK) {
5074 Jim_MakeErrorMessage(teap->interp);
5075 LOG_USER("Error executing event %s on target %s:\n%s",
5076 target_event_name(e),
5077 target_name(target),
5078 Jim_GetString(Jim_GetResult(teap->interp), NULL));
5079 /* clean both error code and stacktrace before return */
5080 Jim_Eval(teap->interp, "error \"\" \"\"");
5081 }
5082 }
5083 }
5084 }
5085
5086 static int target_jim_get_reg(Jim_Interp *interp, int argc,
5087 Jim_Obj * const *argv)
5088 {
5089 bool force = false;
5090
5091 if (argc == 3) {
5092 const char *option = Jim_GetString(argv[1], NULL);
5093
5094 if (!strcmp(option, "-force")) {
5095 argc--;
5096 argv++;
5097 force = true;
5098 } else {
5099 Jim_SetResultFormatted(interp, "invalid option '%s'", option);
5100 return JIM_ERR;
5101 }
5102 }
5103
5104 if (argc != 2) {
5105 Jim_WrongNumArgs(interp, 1, argv, "[-force] list");
5106 return JIM_ERR;
5107 }
5108
5109 const int length = Jim_ListLength(interp, argv[1]);
5110
5111 Jim_Obj *result_dict = Jim_NewDictObj(interp, NULL, 0);
5112
5113 if (!result_dict)
5114 return JIM_ERR;
5115
5116 struct command_context *cmd_ctx = current_command_context(interp);
5117 assert(cmd_ctx != NULL);
5118 const struct target *target = get_current_target(cmd_ctx);
5119
5120 for (int i = 0; i < length; i++) {
5121 Jim_Obj *elem = Jim_ListGetIndex(interp, argv[1], i);
5122
5123 if (!elem)
5124 return JIM_ERR;
5125
5126 const char *reg_name = Jim_String(elem);
5127
5128 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5129 false);
5130
5131 if (!reg || !reg->exist) {
5132 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5133 return JIM_ERR;
5134 }
5135
5136 if (force) {
5137 int retval = reg->type->get(reg);
5138
5139 if (retval != ERROR_OK) {
5140 Jim_SetResultFormatted(interp, "failed to read register '%s'",
5141 reg_name);
5142 return JIM_ERR;
5143 }
5144 }
5145
5146 char *reg_value = buf_to_hex_str(reg->value, reg->size);
5147
5148 if (!reg_value) {
5149 LOG_ERROR("Failed to allocate memory");
5150 return JIM_ERR;
5151 }
5152
5153 char *tmp = alloc_printf("0x%s", reg_value);
5154
5155 free(reg_value);
5156
5157 if (!tmp) {
5158 LOG_ERROR("Failed to allocate memory");
5159 return JIM_ERR;
5160 }
5161
5162 Jim_DictAddElement(interp, result_dict, elem,
5163 Jim_NewStringObj(interp, tmp, -1));
5164
5165 free(tmp);
5166 }
5167
5168 Jim_SetResult(interp, result_dict);
5169
5170 return JIM_OK;
5171 }
5172
5173 static int target_jim_set_reg(Jim_Interp *interp, int argc,
5174 Jim_Obj * const *argv)
5175 {
5176 if (argc != 2) {
5177 Jim_WrongNumArgs(interp, 1, argv, "dict");
5178 return JIM_ERR;
5179 }
5180
5181 int tmp;
5182 #if JIM_VERSION >= 80
5183 Jim_Obj **dict = Jim_DictPairs(interp, argv[1], &tmp);
5184
5185 if (!dict)
5186 return JIM_ERR;
5187 #else
5188 Jim_Obj **dict;
5189 int ret = Jim_DictPairs(interp, argv[1], &dict, &tmp);
5190
5191 if (ret != JIM_OK)
5192 return ret;
5193 #endif
5194
5195 const unsigned int length = tmp;
5196 struct command_context *cmd_ctx = current_command_context(interp);
5197 assert(cmd_ctx);
5198 const struct target *target = get_current_target(cmd_ctx);
5199
5200 for (unsigned int i = 0; i < length; i += 2) {
5201 const char *reg_name = Jim_String(dict[i]);
5202 const char *reg_value = Jim_String(dict[i + 1]);
5203 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5204 false);
5205
5206 if (!reg || !reg->exist) {
5207 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5208 return JIM_ERR;
5209 }
5210
5211 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
5212
5213 if (!buf) {
5214 LOG_ERROR("Failed to allocate memory");
5215 return JIM_ERR;
5216 }
5217
5218 str_to_buf(reg_value, strlen(reg_value), buf, reg->size, 0);
5219 int retval = reg->type->set(reg, buf);
5220 free(buf);
5221
5222 if (retval != ERROR_OK) {
5223 Jim_SetResultFormatted(interp, "failed to set '%s' to register '%s'",
5224 reg_value, reg_name);
5225 return JIM_ERR;
5226 }
5227 }
5228
5229 return JIM_OK;
5230 }
5231
5232 /**
5233 * Returns true only if the target has a handler for the specified event.
5234 */
5235 bool target_has_event_action(struct target *target, enum target_event event)
5236 {
5237 struct target_event_action *teap;
5238
5239 for (teap = target->event_action; teap; teap = teap->next) {
5240 if (teap->event == event)
5241 return true;
5242 }
5243 return false;
5244 }
5245
5246 enum target_cfg_param {
5247 TCFG_TYPE,
5248 TCFG_EVENT,
5249 TCFG_WORK_AREA_VIRT,
5250 TCFG_WORK_AREA_PHYS,
5251 TCFG_WORK_AREA_SIZE,
5252 TCFG_WORK_AREA_BACKUP,
5253 TCFG_ENDIAN,
5254 TCFG_COREID,
5255 TCFG_CHAIN_POSITION,
5256 TCFG_DBGBASE,
5257 TCFG_RTOS,
5258 TCFG_DEFER_EXAMINE,
5259 TCFG_GDB_PORT,
5260 TCFG_GDB_MAX_CONNECTIONS,
5261 };
5262
5263 static struct jim_nvp nvp_config_opts[] = {
5264 { .name = "-type", .value = TCFG_TYPE },
5265 { .name = "-event", .value = TCFG_EVENT },
5266 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
5267 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
5268 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
5269 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
5270 { .name = "-endian", .value = TCFG_ENDIAN },
5271 { .name = "-coreid", .value = TCFG_COREID },
5272 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
5273 { .name = "-dbgbase", .value = TCFG_DBGBASE },
5274 { .name = "-rtos", .value = TCFG_RTOS },
5275 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
5276 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
5277 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
5278 { .name = NULL, .value = -1 }
5279 };
5280
5281 static int target_configure(struct jim_getopt_info *goi, struct target *target)
5282 {
5283 struct jim_nvp *n;
5284 Jim_Obj *o;
5285 jim_wide w;
5286 int e;
5287
5288 /* parse config or cget options ... */
5289 while (goi->argc > 0) {
5290 Jim_SetEmptyResult(goi->interp);
5291 /* jim_getopt_debug(goi); */
5292
5293 if (target->type->target_jim_configure) {
5294 /* target defines a configure function */
5295 /* target gets first dibs on parameters */
5296 e = (*(target->type->target_jim_configure))(target, goi);
5297 if (e == JIM_OK) {
5298 /* more? */
5299 continue;
5300 }
5301 if (e == JIM_ERR) {
5302 /* An error */
5303 return e;
5304 }
5305 /* otherwise we 'continue' below */
5306 }
5307 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
5308 if (e != JIM_OK) {
5309 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
5310 return e;
5311 }
5312 switch (n->value) {
5313 case TCFG_TYPE:
5314 /* not settable */
5315 if (goi->isconfigure) {
5316 Jim_SetResultFormatted(goi->interp,
5317 "not settable: %s", n->name);
5318 return JIM_ERR;
5319 } else {
5320 no_params:
5321 if (goi->argc != 0) {
5322 Jim_WrongNumArgs(goi->interp,
5323 goi->argc, goi->argv,
5324 "NO PARAMS");
5325 return JIM_ERR;
5326 }
5327 }
5328 Jim_SetResultString(goi->interp,
5329 target_type_name(target), -1);
5330 /* loop for more */
5331 break;
5332 case TCFG_EVENT:
5333 if (goi->argc == 0) {
5334 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
5335 return JIM_ERR;
5336 }
5337
5338 e = jim_getopt_nvp(goi, nvp_target_event, &n);
5339 if (e != JIM_OK) {
5340 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
5341 return e;
5342 }
5343
5344 if (goi->isconfigure) {
5345 if (goi->argc != 1) {
5346 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
5347 return JIM_ERR;
5348 }
5349 } else {
5350 if (goi->argc != 0) {
5351 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
5352 return JIM_ERR;
5353 }
5354 }
5355
5356 {
5357 struct target_event_action *teap;
5358
5359 teap = target->event_action;
5360 /* replace existing? */
5361 while (teap) {
5362 if (teap->event == (enum target_event)n->value)
5363 break;
5364 teap = teap->next;
5365 }
5366
5367 if (goi->isconfigure) {
5368 /* START_DEPRECATED_TPIU */
5369 if (n->value == TARGET_EVENT_TRACE_CONFIG)
5370 LOG_INFO("DEPRECATED target event %s; use TPIU events {pre,post}-{enable,disable}", n->name);
5371 /* END_DEPRECATED_TPIU */
5372
5373 bool replace = true;
5374 if (!teap) {
5375 /* create new */
5376 teap = calloc(1, sizeof(*teap));
5377 replace = false;
5378 }
5379 teap->event = n->value;
5380 teap->interp = goi->interp;
5381 jim_getopt_obj(goi, &o);
5382 if (teap->body)
5383 Jim_DecrRefCount(teap->interp, teap->body);
5384 teap->body = Jim_DuplicateObj(goi->interp, o);
5385 /*
5386 * FIXME:
5387 * Tcl/TK - "tk events" have a nice feature.
5388 * See the "BIND" command.
5389 * We should support that here.
5390 * You can specify %X and %Y in the event code.
5391 * The idea is: %T - target name.
5392 * The idea is: %N - target number
5393 * The idea is: %E - event name.
5394 */
5395 Jim_IncrRefCount(teap->body);
5396
5397 if (!replace) {
5398 /* add to head of event list */
5399 teap->next = target->event_action;
5400 target->event_action = teap;
5401 }
5402 Jim_SetEmptyResult(goi->interp);
5403 } else {
5404 /* get */
5405 if (!teap)
5406 Jim_SetEmptyResult(goi->interp);
5407 else
5408 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5409 }
5410 }
5411 /* loop for more */
5412 break;
5413
5414 case TCFG_WORK_AREA_VIRT:
5415 if (goi->isconfigure) {
5416 target_free_all_working_areas(target);
5417 e = jim_getopt_wide(goi, &w);
5418 if (e != JIM_OK)
5419 return e;
5420 target->working_area_virt = w;
5421 target->working_area_virt_spec = true;
5422 } else {
5423 if (goi->argc != 0)
5424 goto no_params;
5425 }
5426 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5427 /* loop for more */
5428 break;
5429
5430 case TCFG_WORK_AREA_PHYS:
5431 if (goi->isconfigure) {
5432 target_free_all_working_areas(target);
5433 e = jim_getopt_wide(goi, &w);
5434 if (e != JIM_OK)
5435 return e;
5436 target->working_area_phys = w;
5437 target->working_area_phys_spec = true;
5438 } else {
5439 if (goi->argc != 0)
5440 goto no_params;
5441 }
5442 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5443 /* loop for more */
5444 break;
5445
5446 case TCFG_WORK_AREA_SIZE:
5447 if (goi->isconfigure) {
5448 target_free_all_working_areas(target);
5449 e = jim_getopt_wide(goi, &w);
5450 if (e != JIM_OK)
5451 return e;
5452 target->working_area_size = w;
5453 } else {
5454 if (goi->argc != 0)
5455 goto no_params;
5456 }
5457 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5458 /* loop for more */
5459 break;
5460
5461 case TCFG_WORK_AREA_BACKUP:
5462 if (goi->isconfigure) {
5463 target_free_all_working_areas(target);
5464 e = jim_getopt_wide(goi, &w);
5465 if (e != JIM_OK)
5466 return e;
5467 /* make this exactly 1 or 0 */
5468 target->backup_working_area = (!!w);
5469 } else {
5470 if (goi->argc != 0)
5471 goto no_params;
5472 }
5473 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5474 /* loop for more e*/
5475 break;
5476
5477
5478 case TCFG_ENDIAN:
5479 if (goi->isconfigure) {
5480 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5481 if (e != JIM_OK) {
5482 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5483 return e;
5484 }
5485 target->endianness = n->value;
5486 } else {
5487 if (goi->argc != 0)
5488 goto no_params;
5489 }
5490 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5491 if (!n->name) {
5492 target->endianness = TARGET_LITTLE_ENDIAN;
5493 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5494 }
5495 Jim_SetResultString(goi->interp, n->name, -1);
5496 /* loop for more */
5497 break;
5498
5499 case TCFG_COREID:
5500 if (goi->isconfigure) {
5501 e = jim_getopt_wide(goi, &w);
5502 if (e != JIM_OK)
5503 return e;
5504 target->coreid = (int32_t)w;
5505 } else {
5506 if (goi->argc != 0)
5507 goto no_params;
5508 }
5509 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5510 /* loop for more */
5511 break;
5512
5513 case TCFG_CHAIN_POSITION:
5514 if (goi->isconfigure) {
5515 Jim_Obj *o_t;
5516 struct jtag_tap *tap;
5517
5518 if (target->has_dap) {
5519 Jim_SetResultString(goi->interp,
5520 "target requires -dap parameter instead of -chain-position!", -1);
5521 return JIM_ERR;
5522 }
5523
5524 target_free_all_working_areas(target);
5525 e = jim_getopt_obj(goi, &o_t);
5526 if (e != JIM_OK)
5527 return e;
5528 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5529 if (!tap)
5530 return JIM_ERR;
5531 target->tap = tap;
5532 target->tap_configured = true;
5533 } else {
5534 if (goi->argc != 0)
5535 goto no_params;
5536 }
5537 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5538 /* loop for more e*/
5539 break;
5540 case TCFG_DBGBASE:
5541 if (goi->isconfigure) {
5542 e = jim_getopt_wide(goi, &w);
5543 if (e != JIM_OK)
5544 return e;
5545 target->dbgbase = (uint32_t)w;
5546 target->dbgbase_set = true;
5547 } else {
5548 if (goi->argc != 0)
5549 goto no_params;
5550 }
5551 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5552 /* loop for more */
5553 break;
5554 case TCFG_RTOS:
5555 /* RTOS */
5556 {
5557 int result = rtos_create(goi, target);
5558 if (result != JIM_OK)
5559 return result;
5560 }
5561 /* loop for more */
5562 break;
5563
5564 case TCFG_DEFER_EXAMINE:
5565 /* DEFER_EXAMINE */
5566 target->defer_examine = true;
5567 /* loop for more */
5568 break;
5569
5570 case TCFG_GDB_PORT:
5571 if (goi->isconfigure) {
5572 struct command_context *cmd_ctx = current_command_context(goi->interp);
5573 if (cmd_ctx->mode != COMMAND_CONFIG) {
5574 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5575 return JIM_ERR;
5576 }
5577
5578 const char *s;
5579 e = jim_getopt_string(goi, &s, NULL);
5580 if (e != JIM_OK)
5581 return e;
5582 free(target->gdb_port_override);
5583 target->gdb_port_override = strdup(s);
5584 } else {
5585 if (goi->argc != 0)
5586 goto no_params;
5587 }
5588 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5589 /* loop for more */
5590 break;
5591
5592 case TCFG_GDB_MAX_CONNECTIONS:
5593 if (goi->isconfigure) {
5594 struct command_context *cmd_ctx = current_command_context(goi->interp);
5595 if (cmd_ctx->mode != COMMAND_CONFIG) {
5596 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5597 return JIM_ERR;
5598 }
5599
5600 e = jim_getopt_wide(goi, &w);
5601 if (e != JIM_OK)
5602 return e;
5603 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5604 } else {
5605 if (goi->argc != 0)
5606 goto no_params;
5607 }
5608 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5609 break;
5610 }
5611 } /* while (goi->argc) */
5612
5613
5614 /* done - we return */
5615 return JIM_OK;
5616 }
5617
5618 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5619 {
5620 struct command *c = jim_to_command(interp);
5621 struct jim_getopt_info goi;
5622
5623 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5624 goi.isconfigure = !strcmp(c->name, "configure");
5625 if (goi.argc < 1) {
5626 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5627 "missing: -option ...");
5628 return JIM_ERR;
5629 }
5630 struct command_context *cmd_ctx = current_command_context(interp);
5631 assert(cmd_ctx);
5632 struct target *target = get_current_target(cmd_ctx);
5633 return target_configure(&goi, target);
5634 }
5635
5636 static int jim_target_mem2array(Jim_Interp *interp,
5637 int argc, Jim_Obj *const *argv)
5638 {
5639 struct command_context *cmd_ctx = current_command_context(interp);
5640 assert(cmd_ctx);
5641 struct target *target = get_current_target(cmd_ctx);
5642 return target_mem2array(interp, target, argc - 1, argv + 1);
5643 }
5644
5645 static int jim_target_array2mem(Jim_Interp *interp,
5646 int argc, Jim_Obj *const *argv)
5647 {
5648 struct command_context *cmd_ctx = current_command_context(interp);
5649 assert(cmd_ctx);
5650 struct target *target = get_current_target(cmd_ctx);
5651 return target_array2mem(interp, target, argc - 1, argv + 1);
5652 }
5653
5654 COMMAND_HANDLER(handle_target_examine)
5655 {
5656 bool allow_defer = false;
5657
5658 if (CMD_ARGC > 1)
5659 return ERROR_COMMAND_SYNTAX_ERROR;
5660
5661 if (CMD_ARGC == 1) {
5662 if (strcmp(CMD_ARGV[0], "allow-defer"))
5663 return ERROR_COMMAND_ARGUMENT_INVALID;
5664 allow_defer = true;
5665 }
5666
5667 struct target *target = get_current_target(CMD_CTX);
5668 if (!target->tap->enabled) {
5669 command_print(CMD, "[TAP is disabled]");
5670 return ERROR_FAIL;
5671 }
5672
5673 if (allow_defer && target->defer_examine) {
5674 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5675 LOG_INFO("Use arp_examine command to examine it manually!");
5676 return ERROR_OK;
5677 }
5678
5679 int retval = target->type->examine(target);
5680 if (retval != ERROR_OK) {
5681 target_reset_examined(target);
5682 return retval;
5683 }
5684
5685 target_set_examined(target);
5686
5687 return ERROR_OK;
5688 }
5689
5690 COMMAND_HANDLER(handle_target_was_examined)
5691 {
5692 if (CMD_ARGC != 0)
5693 return ERROR_COMMAND_SYNTAX_ERROR;
5694
5695 struct target *target = get_current_target(CMD_CTX);
5696
5697 command_print(CMD, "%d", target_was_examined(target) ? 1 : 0);
5698
5699 return ERROR_OK;
5700 }
5701
5702 COMMAND_HANDLER(handle_target_examine_deferred)
5703 {
5704 if (CMD_ARGC != 0)
5705 return ERROR_COMMAND_SYNTAX_ERROR;
5706
5707 struct target *target = get_current_target(CMD_CTX);
5708
5709 command_print(CMD, "%d", target->defer_examine ? 1 : 0);
5710
5711 return ERROR_OK;
5712 }
5713
5714 COMMAND_HANDLER(handle_target_halt_gdb)
5715 {
5716 if (CMD_ARGC != 0)
5717 return ERROR_COMMAND_SYNTAX_ERROR;
5718
5719 struct target *target = get_current_target(CMD_CTX);
5720
5721 return target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
5722 }
5723
5724 COMMAND_HANDLER(handle_target_poll)
5725 {
5726 if (CMD_ARGC != 0)
5727 return ERROR_COMMAND_SYNTAX_ERROR;
5728
5729 struct target *target = get_current_target(CMD_CTX);
5730 if (!target->tap->enabled) {
5731 command_print(CMD, "[TAP is disabled]");
5732 return ERROR_FAIL;
5733 }
5734
5735 if (!(target_was_examined(target)))
5736 return ERROR_TARGET_NOT_EXAMINED;
5737
5738 return target->type->poll(target);
5739 }
5740
5741 COMMAND_HANDLER(handle_target_reset)
5742 {
5743 if (CMD_ARGC != 2)
5744 return ERROR_COMMAND_SYNTAX_ERROR;
5745
5746 const struct nvp *n = nvp_name2value(nvp_assert, CMD_ARGV[0]);
5747 if (!n->name) {
5748 nvp_unknown_command_print(CMD, nvp_assert, NULL, CMD_ARGV[0]);
5749 return ERROR_COMMAND_ARGUMENT_INVALID;
5750 }
5751
5752 /* the halt or not param */
5753 int a;
5754 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], a);
5755
5756 struct target *target = get_current_target(CMD_CTX);
5757 if (!target->tap->enabled) {
5758 command_print(CMD, "[TAP is disabled]");
5759 return ERROR_FAIL;
5760 }
5761
5762 if (!target->type->assert_reset || !target->type->deassert_reset) {
5763 command_print(CMD, "No target-specific reset for %s", target_name(target));
5764 return ERROR_FAIL;
5765 }
5766
5767 if (target->defer_examine)
5768 target_reset_examined(target);
5769
5770 /* determine if we should halt or not. */
5771 target->reset_halt = (a != 0);
5772 /* When this happens - all workareas are invalid. */
5773 target_free_all_working_areas_restore(target, 0);
5774
5775 /* do the assert */
5776 if (n->value == NVP_ASSERT)
5777 return target->type->assert_reset(target);
5778 return target->type->deassert_reset(target);
5779 }
5780
5781 COMMAND_HANDLER(handle_target_halt)
5782 {
5783 if (CMD_ARGC != 0)
5784 return ERROR_COMMAND_SYNTAX_ERROR;
5785
5786 struct target *target = get_current_target(CMD_CTX);
5787 if (!target->tap->enabled) {
5788 command_print(CMD, "[TAP is disabled]");
5789 return ERROR_FAIL;
5790 }
5791
5792 return target->type->halt(target);
5793 }
5794
5795 COMMAND_HANDLER(handle_target_wait_state)
5796 {
5797 if (CMD_ARGC != 2)
5798 return ERROR_COMMAND_SYNTAX_ERROR;
5799
5800 const struct nvp *n = nvp_name2value(nvp_target_state, CMD_ARGV[0]);
5801 if (!n->name) {
5802 nvp_unknown_command_print(CMD, nvp_target_state, NULL, CMD_ARGV[0]);
5803 return ERROR_COMMAND_ARGUMENT_INVALID;
5804 }
5805
5806 unsigned int a;
5807 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], a);
5808
5809 struct target *target = get_current_target(CMD_CTX);
5810 if (!target->tap->enabled) {
5811 command_print(CMD, "[TAP is disabled]");
5812 return ERROR_FAIL;
5813 }
5814
5815 int retval = target_wait_state(target, n->value, a);
5816 if (retval != ERROR_OK) {
5817 command_print(CMD,
5818 "target: %s wait %s fails (%d) %s",
5819 target_name(target), n->name,
5820 retval, target_strerror_safe(retval));
5821 return retval;
5822 }
5823 return ERROR_OK;
5824 }
5825 /* List for human, Events defined for this target.
5826 * scripts/programs should use 'name cget -event NAME'
5827 */
5828 COMMAND_HANDLER(handle_target_event_list)
5829 {
5830 struct target *target = get_current_target(CMD_CTX);
5831 struct target_event_action *teap = target->event_action;
5832
5833 command_print(CMD, "Event actions for target (%d) %s\n",
5834 target->target_number,
5835 target_name(target));
5836 command_print(CMD, "%-25s | Body", "Event");
5837 command_print(CMD, "------------------------- | "
5838 "----------------------------------------");
5839 while (teap) {
5840 command_print(CMD, "%-25s | %s",
5841 target_event_name(teap->event),
5842 Jim_GetString(teap->body, NULL));
5843 teap = teap->next;
5844 }
5845 command_print(CMD, "***END***");
5846 return ERROR_OK;
5847 }
5848
5849 COMMAND_HANDLER(handle_target_current_state)
5850 {
5851 if (CMD_ARGC != 0)
5852 return ERROR_COMMAND_SYNTAX_ERROR;
5853
5854 struct target *target = get_current_target(CMD_CTX);
5855
5856 command_print(CMD, "%s", target_state_name(target));
5857
5858 return ERROR_OK;
5859 }
5860
5861 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5862 {
5863 struct jim_getopt_info goi;
5864 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5865 if (goi.argc != 1) {
5866 const char *cmd_name = Jim_GetString(argv[0], NULL);
5867 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5868 return JIM_ERR;
5869 }
5870 struct jim_nvp *n;
5871 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5872 if (e != JIM_OK) {
5873 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5874 return e;
5875 }
5876 struct command_context *cmd_ctx = current_command_context(interp);
5877 assert(cmd_ctx);
5878 struct target *target = get_current_target(cmd_ctx);
5879 target_handle_event(target, n->value);
5880 return JIM_OK;
5881 }
5882
5883 static const struct command_registration target_instance_command_handlers[] = {
5884 {
5885 .name = "configure",
5886 .mode = COMMAND_ANY,
5887 .jim_handler = jim_target_configure,
5888 .help = "configure a new target for use",
5889 .usage = "[target_attribute ...]",
5890 },
5891 {
5892 .name = "cget",
5893 .mode = COMMAND_ANY,
5894 .jim_handler = jim_target_configure,
5895 .help = "returns the specified target attribute",
5896 .usage = "target_attribute",
5897 },
5898 {
5899 .name = "mwd",
5900 .handler = handle_mw_command,
5901 .mode = COMMAND_EXEC,
5902 .help = "Write 64-bit word(s) to target memory",
5903 .usage = "address data [count]",
5904 },
5905 {
5906 .name = "mww",
5907 .handler = handle_mw_command,
5908 .mode = COMMAND_EXEC,
5909 .help = "Write 32-bit word(s) to target memory",
5910 .usage = "address data [count]",
5911 },
5912 {
5913 .name = "mwh",
5914 .handler = handle_mw_command,
5915 .mode = COMMAND_EXEC,
5916 .help = "Write 16-bit half-word(s) to target memory",
5917 .usage = "address data [count]",
5918 },
5919 {
5920 .name = "mwb",
5921 .handler = handle_mw_command,
5922 .mode = COMMAND_EXEC,
5923 .help = "Write byte(s) to target memory",
5924 .usage = "address data [count]",
5925 },
5926 {
5927 .name = "mdd",
5928 .handler = handle_md_command,
5929 .mode = COMMAND_EXEC,
5930 .help = "Display target memory as 64-bit words",
5931 .usage = "address [count]",
5932 },
5933 {
5934 .name = "mdw",
5935 .handler = handle_md_command,
5936 .mode = COMMAND_EXEC,
5937 .help = "Display target memory as 32-bit words",
5938 .usage = "address [count]",
5939 },
5940 {
5941 .name = "mdh",
5942 .handler = handle_md_command,
5943 .mode = COMMAND_EXEC,
5944 .help = "Display target memory as 16-bit half-words",
5945 .usage = "address [count]",
5946 },
5947 {
5948 .name = "mdb",
5949 .handler = handle_md_command,
5950 .mode = COMMAND_EXEC,
5951 .help = "Display target memory as 8-bit bytes",
5952 .usage = "address [count]",
5953 },
5954 {
5955 .name = "array2mem",
5956 .mode = COMMAND_EXEC,
5957 .jim_handler = jim_target_array2mem,
5958 .help = "Writes Tcl array of 8/16/32 bit numbers "
5959 "to target memory",
5960 .usage = "arrayname bitwidth address count",
5961 },
5962 {
5963 .name = "mem2array",
5964 .mode = COMMAND_EXEC,
5965 .jim_handler = jim_target_mem2array,
5966 .help = "Loads Tcl array of 8/16/32 bit numbers "
5967 "from target memory",
5968 .usage = "arrayname bitwidth address count",
5969 },
5970 {
5971 .name = "get_reg",
5972 .mode = COMMAND_EXEC,
5973 .jim_handler = target_jim_get_reg,
5974 .help = "Get register values from the target",
5975 .usage = "list",
5976 },
5977 {
5978 .name = "set_reg",
5979 .mode = COMMAND_EXEC,
5980 .jim_handler = target_jim_set_reg,
5981 .help = "Set target register values",
5982 .usage = "dict",
5983 },
5984 {
5985 .name = "read_memory",
5986 .mode = COMMAND_EXEC,
5987 .handler = handle_target_read_memory,
5988 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
5989 .usage = "address width count ['phys']",
5990 },
5991 {
5992 .name = "write_memory",
5993 .mode = COMMAND_EXEC,
5994 .jim_handler = target_jim_write_memory,
5995 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
5996 .usage = "address width data ['phys']",
5997 },
5998 {
5999 .name = "eventlist",
6000 .handler = handle_target_event_list,
6001 .mode = COMMAND_EXEC,
6002 .help = "displays a table of events defined for this target",
6003 .usage = "",
6004 },
6005 {
6006 .name = "curstate",
6007 .mode = COMMAND_EXEC,
6008 .handler = handle_target_current_state,
6009 .help = "displays the current state of this target",
6010 .usage = "",
6011 },
6012 {
6013 .name = "arp_examine",
6014 .mode = COMMAND_EXEC,
6015 .handler = handle_target_examine,
6016 .help = "used internally for reset processing",
6017 .usage = "['allow-defer']",
6018 },
6019 {
6020 .name = "was_examined",
6021 .mode = COMMAND_EXEC,
6022 .handler = handle_target_was_examined,
6023 .help = "used internally for reset processing",
6024 .usage = "",
6025 },
6026 {
6027 .name = "examine_deferred",
6028 .mode = COMMAND_EXEC,
6029 .handler = handle_target_examine_deferred,
6030 .help = "used internally for reset processing",
6031 .usage = "",
6032 },
6033 {
6034 .name = "arp_halt_gdb",
6035 .mode = COMMAND_EXEC,
6036 .handler = handle_target_halt_gdb,
6037 .help = "used internally for reset processing to halt GDB",
6038 .usage = "",
6039 },
6040 {
6041 .name = "arp_poll",
6042 .mode = COMMAND_EXEC,
6043 .handler = handle_target_poll,
6044 .help = "used internally for reset processing",
6045 .usage = "",
6046 },
6047 {
6048 .name = "arp_reset",
6049 .mode = COMMAND_EXEC,
6050 .handler = handle_target_reset,
6051 .help = "used internally for reset processing",
6052 .usage = "'assert'|'deassert' halt",
6053 },
6054 {
6055 .name = "arp_halt",
6056 .mode = COMMAND_EXEC,
6057 .handler = handle_target_halt,
6058 .help = "used internally for reset processing",
6059 .usage = "",
6060 },
6061 {
6062 .name = "arp_waitstate",
6063 .mode = COMMAND_EXEC,
6064 .handler = handle_target_wait_state,
6065 .help = "used internally for reset processing",
6066 .usage = "statename timeoutmsecs",
6067 },
6068 {
6069 .name = "invoke-event",
6070 .mode = COMMAND_EXEC,
6071 .jim_handler = jim_target_invoke_event,
6072 .help = "invoke handler for specified event",
6073 .usage = "event_name",
6074 },
6075 COMMAND_REGISTRATION_DONE
6076 };
6077
6078 static int target_create(struct jim_getopt_info *goi)
6079 {
6080 Jim_Obj *new_cmd;
6081 Jim_Cmd *cmd;
6082 const char *cp;
6083 int e;
6084 int x;
6085 struct target *target;
6086 struct command_context *cmd_ctx;
6087
6088 cmd_ctx = current_command_context(goi->interp);
6089 assert(cmd_ctx);
6090
6091 if (goi->argc < 3) {
6092 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
6093 return JIM_ERR;
6094 }
6095
6096 /* COMMAND */
6097 jim_getopt_obj(goi, &new_cmd);
6098 /* does this command exist? */
6099 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_NONE);
6100 if (cmd) {
6101 cp = Jim_GetString(new_cmd, NULL);
6102 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
6103 return JIM_ERR;
6104 }
6105
6106 /* TYPE */
6107 e = jim_getopt_string(goi, &cp, NULL);
6108 if (e != JIM_OK)
6109 return e;
6110 struct transport *tr = get_current_transport();
6111 if (tr->override_target) {
6112 e = tr->override_target(&cp);
6113 if (e != ERROR_OK) {
6114 LOG_ERROR("The selected transport doesn't support this target");
6115 return JIM_ERR;
6116 }
6117 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
6118 }
6119 /* now does target type exist */
6120 for (x = 0 ; target_types[x] ; x++) {
6121 if (strcmp(cp, target_types[x]->name) == 0) {
6122 /* found */
6123 break;
6124 }
6125 }
6126 if (!target_types[x]) {
6127 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
6128 for (x = 0 ; target_types[x] ; x++) {
6129 if (target_types[x + 1]) {
6130 Jim_AppendStrings(goi->interp,
6131 Jim_GetResult(goi->interp),
6132 target_types[x]->name,
6133 ", ", NULL);
6134 } else {
6135 Jim_AppendStrings(goi->interp,
6136 Jim_GetResult(goi->interp),
6137 " or ",
6138 target_types[x]->name, NULL);
6139 }
6140 }
6141 return JIM_ERR;
6142 }
6143
6144 /* Create it */
6145 target = calloc(1, sizeof(struct target));
6146 if (!target) {
6147 LOG_ERROR("Out of memory");
6148 return JIM_ERR;
6149 }
6150
6151 /* set empty smp cluster */
6152 target->smp_targets = &empty_smp_targets;
6153
6154 /* set target number */
6155 target->target_number = new_target_number();
6156
6157 /* allocate memory for each unique target type */
6158 target->type = malloc(sizeof(struct target_type));
6159 if (!target->type) {
6160 LOG_ERROR("Out of memory");
6161 free(target);
6162 return JIM_ERR;
6163 }
6164
6165 memcpy(target->type, target_types[x], sizeof(struct target_type));
6166
6167 /* default to first core, override with -coreid */
6168 target->coreid = 0;
6169
6170 target->working_area = 0x0;
6171 target->working_area_size = 0x0;
6172 target->working_areas = NULL;
6173 target->backup_working_area = 0;
6174
6175 target->state = TARGET_UNKNOWN;
6176 target->debug_reason = DBG_REASON_UNDEFINED;
6177 target->reg_cache = NULL;
6178 target->breakpoints = NULL;
6179 target->watchpoints = NULL;
6180 target->next = NULL;
6181 target->arch_info = NULL;
6182
6183 target->verbose_halt_msg = true;
6184
6185 target->halt_issued = false;
6186
6187 /* initialize trace information */
6188 target->trace_info = calloc(1, sizeof(struct trace));
6189 if (!target->trace_info) {
6190 LOG_ERROR("Out of memory");
6191 free(target->type);
6192 free(target);
6193 return JIM_ERR;
6194 }
6195
6196 target->dbgmsg = NULL;
6197 target->dbg_msg_enabled = 0;
6198
6199 target->endianness = TARGET_ENDIAN_UNKNOWN;
6200
6201 target->rtos = NULL;
6202 target->rtos_auto_detect = false;
6203
6204 target->gdb_port_override = NULL;
6205 target->gdb_max_connections = 1;
6206
6207 /* Do the rest as "configure" options */
6208 goi->isconfigure = 1;
6209 e = target_configure(goi, target);
6210
6211 if (e == JIM_OK) {
6212 if (target->has_dap) {
6213 if (!target->dap_configured) {
6214 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
6215 e = JIM_ERR;
6216 }
6217 } else {
6218 if (!target->tap_configured) {
6219 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
6220 e = JIM_ERR;
6221 }
6222 }
6223 /* tap must be set after target was configured */
6224 if (!target->tap)
6225 e = JIM_ERR;
6226 }
6227
6228 if (e != JIM_OK) {
6229 rtos_destroy(target);
6230 free(target->gdb_port_override);
6231 free(target->trace_info);
6232 free(target->type);
6233 free(target);
6234 return e;
6235 }
6236
6237 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
6238 /* default endian to little if not specified */
6239 target->endianness = TARGET_LITTLE_ENDIAN;
6240 }
6241
6242 cp = Jim_GetString(new_cmd, NULL);
6243 target->cmd_name = strdup(cp);
6244 if (!target->cmd_name) {
6245 LOG_ERROR("Out of memory");
6246 rtos_destroy(target);
6247 free(target->gdb_port_override);
6248 free(target->trace_info);
6249 free(target->type);
6250 free(target);
6251 return JIM_ERR;
6252 }
6253
6254 if (target->type->target_create) {
6255 e = (*(target->type->target_create))(target, goi->interp);
6256 if (e != ERROR_OK) {
6257 LOG_DEBUG("target_create failed");
6258 free(target->cmd_name);
6259 rtos_destroy(target);
6260 free(target->gdb_port_override);
6261 free(target->trace_info);
6262 free(target->type);
6263 free(target);
6264 return JIM_ERR;
6265 }
6266 }
6267
6268 /* create the target specific commands */
6269 if (target->type->commands) {
6270 e = register_commands(cmd_ctx, NULL, target->type->commands);
6271 if (e != ERROR_OK)
6272 LOG_ERROR("unable to register '%s' commands", cp);
6273 }
6274
6275 /* now - create the new target name command */
6276 const struct command_registration target_subcommands[] = {
6277 {
6278 .chain = target_instance_command_handlers,
6279 },
6280 {
6281 .chain = target->type->commands,
6282 },
6283 COMMAND_REGISTRATION_DONE
6284 };
6285 const struct command_registration target_commands[] = {
6286 {
6287 .name = cp,
6288 .mode = COMMAND_ANY,
6289 .help = "target command group",
6290 .usage = "",
6291 .chain = target_subcommands,
6292 },
6293 COMMAND_REGISTRATION_DONE
6294 };
6295 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
6296 if (e != ERROR_OK) {
6297 if (target->type->deinit_target)
6298 target->type->deinit_target(target);
6299 free(target->cmd_name);
6300 rtos_destroy(target);
6301 free(target->gdb_port_override);
6302 free(target->trace_info);
6303 free(target->type);
6304 free(target);
6305 return JIM_ERR;
6306 }
6307
6308 /* append to end of list */
6309 append_to_list_all_targets(target);
6310
6311 cmd_ctx->current_target = target;
6312 return JIM_OK;
6313 }
6314
6315 COMMAND_HANDLER(handle_target_current)
6316 {
6317 if (CMD_ARGC != 0)
6318 return ERROR_COMMAND_SYNTAX_ERROR;
6319
6320 struct target *target = get_current_target_or_null(CMD_CTX);
6321 if (target)
6322 command_print(CMD, "%s", target_name(target));
6323
6324 return ERROR_OK;
6325 }
6326
6327 COMMAND_HANDLER(handle_target_types)
6328 {
6329 if (CMD_ARGC != 0)
6330 return ERROR_COMMAND_SYNTAX_ERROR;
6331
6332 for (unsigned int x = 0; target_types[x]; x++)
6333 command_print(CMD, "%s", target_types[x]->name);
6334
6335 return ERROR_OK;
6336 }
6337
6338 COMMAND_HANDLER(handle_target_names)
6339 {
6340 if (CMD_ARGC != 0)
6341 return ERROR_COMMAND_SYNTAX_ERROR;
6342
6343 struct target *target = all_targets;
6344 while (target) {
6345 command_print(CMD, "%s", target_name(target));
6346 target = target->next;
6347 }
6348
6349 return ERROR_OK;
6350 }
6351
6352 static struct target_list *
6353 __attribute__((warn_unused_result))
6354 create_target_list_node(const char *targetname)
6355 {
6356 struct target *target = get_target(targetname);
6357 LOG_DEBUG("%s ", targetname);
6358 if (!target)
6359 return NULL;
6360
6361 struct target_list *new = malloc(sizeof(struct target_list));
6362 if (!new) {
6363 LOG_ERROR("Out of memory");
6364 return new;
6365 }
6366
6367 new->target = target;
6368 return new;
6369 }
6370
6371 static int get_target_with_common_rtos_type(struct command_invocation *cmd,
6372 struct list_head *lh, struct target **result)
6373 {
6374 struct target *target = NULL;
6375 struct target_list *curr;
6376 foreach_smp_target(curr, lh) {
6377 struct rtos *curr_rtos = curr->target->rtos;
6378 if (curr_rtos) {
6379 if (target && target->rtos && target->rtos->type != curr_rtos->type) {
6380 command_print(cmd, "Different rtos types in members of one smp target!");
6381 return ERROR_FAIL;
6382 }
6383 target = curr->target;
6384 }
6385 }
6386 *result = target;
6387 return ERROR_OK;
6388 }
6389
6390 COMMAND_HANDLER(handle_target_smp)
6391 {
6392 static int smp_group = 1;
6393
6394 if (CMD_ARGC == 0) {
6395 LOG_DEBUG("Empty SMP target");
6396 return ERROR_OK;
6397 }
6398 LOG_DEBUG("%d", CMD_ARGC);
6399 /* CMD_ARGC[0] = target to associate in smp
6400 * CMD_ARGC[1] = target to associate in smp
6401 * CMD_ARGC[2] ...
6402 */
6403
6404 struct list_head *lh = malloc(sizeof(*lh));
6405 if (!lh) {
6406 LOG_ERROR("Out of memory");
6407 return ERROR_FAIL;
6408 }
6409 INIT_LIST_HEAD(lh);
6410
6411 for (unsigned int i = 0; i < CMD_ARGC; i++) {
6412 struct target_list *new = create_target_list_node(CMD_ARGV[i]);
6413 if (new)
6414 list_add_tail(&new->lh, lh);
6415 }
6416 /* now parse the list of cpu and put the target in smp mode*/
6417 struct target_list *curr;
6418 foreach_smp_target(curr, lh) {
6419 struct target *target = curr->target;
6420 target->smp = smp_group;
6421 target->smp_targets = lh;
6422 }
6423 smp_group++;
6424
6425 struct target *rtos_target;
6426 int retval = get_target_with_common_rtos_type(CMD, lh, &rtos_target);
6427 if (retval == ERROR_OK && rtos_target)
6428 retval = rtos_smp_init(rtos_target);
6429
6430 return retval;
6431 }
6432
6433 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6434 {
6435 struct jim_getopt_info goi;
6436 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6437 if (goi.argc < 3) {
6438 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6439 "<name> <target_type> [<target_options> ...]");
6440 return JIM_ERR;
6441 }
6442 return target_create(&goi);
6443 }
6444
6445 static const struct command_registration target_subcommand_handlers[] = {
6446 {
6447 .name = "init",
6448 .mode = COMMAND_CONFIG,
6449 .handler = handle_target_init_command,
6450 .help = "initialize targets",
6451 .usage = "",
6452 },
6453 {
6454 .name = "create",
6455 .mode = COMMAND_CONFIG,
6456 .jim_handler = jim_target_create,
6457 .usage = "name type '-chain-position' name [options ...]",
6458 .help = "Creates and selects a new target",
6459 },
6460 {
6461 .name = "current",
6462 .mode = COMMAND_ANY,
6463 .handler = handle_target_current,
6464 .help = "Returns the currently selected target",
6465 .usage = "",
6466 },
6467 {
6468 .name = "types",
6469 .mode = COMMAND_ANY,
6470 .handler = handle_target_types,
6471 .help = "Returns the available target types as "
6472 "a list of strings",
6473 .usage = "",
6474 },
6475 {
6476 .name = "names",
6477 .mode = COMMAND_ANY,
6478 .handler = handle_target_names,
6479 .help = "Returns the names of all targets as a list of strings",
6480 .usage = "",
6481 },
6482 {
6483 .name = "smp",
6484 .mode = COMMAND_ANY,
6485 .handler = handle_target_smp,
6486 .usage = "targetname1 targetname2 ...",
6487 .help = "gather several target in a smp list"
6488 },
6489
6490 COMMAND_REGISTRATION_DONE
6491 };
6492
6493 struct fast_load {
6494 target_addr_t address;
6495 uint8_t *data;
6496 int length;
6497
6498 };
6499
6500 static int fastload_num;
6501 static struct fast_load *fastload;
6502
6503 static void free_fastload(void)
6504 {
6505 if (fastload) {
6506 for (int i = 0; i < fastload_num; i++)
6507 free(fastload[i].data);
6508 free(fastload);
6509 fastload = NULL;
6510 }
6511 }
6512
6513 COMMAND_HANDLER(handle_fast_load_image_command)
6514 {
6515 uint8_t *buffer;
6516 size_t buf_cnt;
6517 uint32_t image_size;
6518 target_addr_t min_address = 0;
6519 target_addr_t max_address = -1;
6520
6521 struct image image;
6522
6523 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6524 &image, &min_address, &max_address);
6525 if (retval != ERROR_OK)
6526 return retval;
6527
6528 struct duration bench;
6529 duration_start(&bench);
6530
6531 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6532 if (retval != ERROR_OK)
6533 return retval;
6534
6535 image_size = 0x0;
6536 retval = ERROR_OK;
6537 fastload_num = image.num_sections;
6538 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6539 if (!fastload) {
6540 command_print(CMD, "out of memory");
6541 image_close(&image);
6542 return ERROR_FAIL;
6543 }
6544 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6545 for (unsigned int i = 0; i < image.num_sections; i++) {
6546 buffer = malloc(image.sections[i].size);
6547 if (!buffer) {
6548 command_print(CMD, "error allocating buffer for section (%d bytes)",
6549 (int)(image.sections[i].size));
6550 retval = ERROR_FAIL;
6551 break;
6552 }
6553
6554 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6555 if (retval != ERROR_OK) {
6556 free(buffer);
6557 break;
6558 }
6559
6560 uint32_t offset = 0;
6561 uint32_t length = buf_cnt;
6562
6563 /* DANGER!!! beware of unsigned comparison here!!! */
6564
6565 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6566 (image.sections[i].base_address < max_address)) {
6567 if (image.sections[i].base_address < min_address) {
6568 /* clip addresses below */
6569 offset += min_address-image.sections[i].base_address;
6570 length -= offset;
6571 }
6572
6573 if (image.sections[i].base_address + buf_cnt > max_address)
6574 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6575
6576 fastload[i].address = image.sections[i].base_address + offset;
6577 fastload[i].data = malloc(length);
6578 if (!fastload[i].data) {
6579 free(buffer);
6580 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6581 length);
6582 retval = ERROR_FAIL;
6583 break;
6584 }
6585 memcpy(fastload[i].data, buffer + offset, length);
6586 fastload[i].length = length;
6587
6588 image_size += length;
6589 command_print(CMD, "%u bytes written at address 0x%8.8x",
6590 (unsigned int)length,
6591 ((unsigned int)(image.sections[i].base_address + offset)));
6592 }
6593
6594 free(buffer);
6595 }
6596
6597 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6598 command_print(CMD, "Loaded %" PRIu32 " bytes "
6599 "in %fs (%0.3f KiB/s)", image_size,
6600 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6601
6602 command_print(CMD,
6603 "WARNING: image has not been loaded to target!"
6604 "You can issue a 'fast_load' to finish loading.");
6605 }
6606
6607 image_close(&image);
6608
6609 if (retval != ERROR_OK)
6610 free_fastload();
6611
6612 return retval;
6613 }
6614
6615 COMMAND_HANDLER(handle_fast_load_command)
6616 {
6617 if (CMD_ARGC > 0)
6618 return ERROR_COMMAND_SYNTAX_ERROR;
6619 if (!fastload) {
6620 LOG_ERROR("No image in memory");
6621 return ERROR_FAIL;
6622 }
6623 int i;
6624 int64_t ms = timeval_ms();
6625 int size = 0;
6626 int retval = ERROR_OK;
6627 for (i = 0; i < fastload_num; i++) {
6628 struct target *target = get_current_target(CMD_CTX);
6629 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6630 (unsigned int)(fastload[i].address),
6631 (unsigned int)(fastload[i].length));
6632 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6633 if (retval != ERROR_OK)
6634 break;
6635 size += fastload[i].length;
6636 }
6637 if (retval == ERROR_OK) {
6638 int64_t after = timeval_ms();
6639 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6640 }
6641 return retval;
6642 }
6643
6644 static const struct command_registration target_command_handlers[] = {
6645 {
6646 .name = "targets",
6647 .handler = handle_targets_command,
6648 .mode = COMMAND_ANY,
6649 .help = "change current default target (one parameter) "
6650 "or prints table of all targets (no parameters)",
6651 .usage = "[target]",
6652 },
6653 {
6654 .name = "target",
6655 .mode = COMMAND_CONFIG,
6656 .help = "configure target",
6657 .chain = target_subcommand_handlers,
6658 .usage = "",
6659 },
6660 COMMAND_REGISTRATION_DONE
6661 };
6662
6663 int target_register_commands(struct command_context *cmd_ctx)
6664 {
6665 return register_commands(cmd_ctx, NULL, target_command_handlers);
6666 }
6667
6668 static bool target_reset_nag = true;
6669
6670 bool get_target_reset_nag(void)
6671 {
6672 return target_reset_nag;
6673 }
6674
6675 COMMAND_HANDLER(handle_target_reset_nag)
6676 {
6677 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6678 &target_reset_nag, "Nag after each reset about options to improve "
6679 "performance");
6680 }
6681
6682 COMMAND_HANDLER(handle_ps_command)
6683 {
6684 struct target *target = get_current_target(CMD_CTX);
6685 char *display;
6686 if (target->state != TARGET_HALTED) {
6687 command_print(CMD, "Error: [%s] not halted", target_name(target));
6688 return ERROR_TARGET_NOT_HALTED;
6689 }
6690
6691 if ((target->rtos) && (target->rtos->type)
6692 && (target->rtos->type->ps_command)) {
6693 display = target->rtos->type->ps_command(target);
6694 command_print(CMD, "%s", display);
6695 free(display);
6696 return ERROR_OK;
6697 } else {
6698 LOG_INFO("failed");
6699 return ERROR_TARGET_FAILURE;
6700 }
6701 }
6702
6703 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6704 {
6705 if (text)
6706 command_print_sameline(cmd, "%s", text);
6707 for (int i = 0; i < size; i++)
6708 command_print_sameline(cmd, " %02x", buf[i]);
6709 command_print(cmd, " ");
6710 }
6711
6712 COMMAND_HANDLER(handle_test_mem_access_command)
6713 {
6714 struct target *target = get_current_target(CMD_CTX);
6715 uint32_t test_size;
6716 int retval = ERROR_OK;
6717
6718 if (target->state != TARGET_HALTED) {
6719 command_print(CMD, "Error: [%s] not halted", target_name(target));
6720 return ERROR_TARGET_NOT_HALTED;
6721 }
6722
6723 if (CMD_ARGC != 1)
6724 return ERROR_COMMAND_SYNTAX_ERROR;
6725
6726 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6727
6728 /* Test reads */
6729 size_t num_bytes = test_size + 4;
6730
6731 struct working_area *wa = NULL;
6732 retval = target_alloc_working_area(target, num_bytes, &wa);
6733 if (retval != ERROR_OK) {
6734 LOG_ERROR("Not enough working area");
6735 return ERROR_FAIL;
6736 }
6737
6738 uint8_t *test_pattern = malloc(num_bytes);
6739
6740 for (size_t i = 0; i < num_bytes; i++)
6741 test_pattern[i] = rand();
6742
6743 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6744 if (retval != ERROR_OK) {
6745 LOG_ERROR("Test pattern write failed");
6746 goto out;
6747 }
6748
6749 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6750 for (int size = 1; size <= 4; size *= 2) {
6751 for (int offset = 0; offset < 4; offset++) {
6752 uint32_t count = test_size / size;
6753 size_t host_bufsiz = (count + 2) * size + host_offset;
6754 uint8_t *read_ref = malloc(host_bufsiz);
6755 uint8_t *read_buf = malloc(host_bufsiz);
6756
6757 for (size_t i = 0; i < host_bufsiz; i++) {
6758 read_ref[i] = rand();
6759 read_buf[i] = read_ref[i];
6760 }
6761 command_print_sameline(CMD,
6762 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6763 size, offset, host_offset ? "un" : "");
6764
6765 struct duration bench;
6766 duration_start(&bench);
6767
6768 retval = target_read_memory(target, wa->address + offset, size, count,
6769 read_buf + size + host_offset);
6770
6771 duration_measure(&bench);
6772
6773 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6774 command_print(CMD, "Unsupported alignment");
6775 goto next;
6776 } else if (retval != ERROR_OK) {
6777 command_print(CMD, "Memory read failed");
6778 goto next;
6779 }
6780
6781 /* replay on host */
6782 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6783
6784 /* check result */
6785 int result = memcmp(read_ref, read_buf, host_bufsiz);
6786 if (result == 0) {
6787 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6788 duration_elapsed(&bench),
6789 duration_kbps(&bench, count * size));
6790 } else {
6791 command_print(CMD, "Compare failed");
6792 binprint(CMD, "ref:", read_ref, host_bufsiz);
6793 binprint(CMD, "buf:", read_buf, host_bufsiz);
6794 }
6795 next:
6796 free(read_ref);
6797 free(read_buf);
6798 }
6799 }
6800 }
6801
6802 out:
6803 free(test_pattern);
6804
6805 target_free_working_area(target, wa);
6806
6807 /* Test writes */
6808 num_bytes = test_size + 4 + 4 + 4;
6809
6810 retval = target_alloc_working_area(target, num_bytes, &wa);
6811 if (retval != ERROR_OK) {
6812 LOG_ERROR("Not enough working area");
6813 return ERROR_FAIL;
6814 }
6815
6816 test_pattern = malloc(num_bytes);
6817
6818 for (size_t i = 0; i < num_bytes; i++)
6819 test_pattern[i] = rand();
6820
6821 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6822 for (int size = 1; size <= 4; size *= 2) {
6823 for (int offset = 0; offset < 4; offset++) {
6824 uint32_t count = test_size / size;
6825 size_t host_bufsiz = count * size + host_offset;
6826 uint8_t *read_ref = malloc(num_bytes);
6827 uint8_t *read_buf = malloc(num_bytes);
6828 uint8_t *write_buf = malloc(host_bufsiz);
6829
6830 for (size_t i = 0; i < host_bufsiz; i++)
6831 write_buf[i] = rand();
6832 command_print_sameline(CMD,
6833 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6834 size, offset, host_offset ? "un" : "");
6835
6836 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6837 if (retval != ERROR_OK) {
6838 command_print(CMD, "Test pattern write failed");
6839 goto nextw;
6840 }
6841
6842 /* replay on host */
6843 memcpy(read_ref, test_pattern, num_bytes);
6844 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6845
6846 struct duration bench;
6847 duration_start(&bench);
6848
6849 retval = target_write_memory(target, wa->address + size + offset, size, count,
6850 write_buf + host_offset);
6851
6852 duration_measure(&bench);
6853
6854 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6855 command_print(CMD, "Unsupported alignment");
6856 goto nextw;
6857 } else if (retval != ERROR_OK) {
6858 command_print(CMD, "Memory write failed");
6859 goto nextw;
6860 }
6861
6862 /* read back */
6863 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6864 if (retval != ERROR_OK) {
6865 command_print(CMD, "Test pattern write failed");
6866 goto nextw;
6867 }
6868
6869 /* check result */
6870 int result = memcmp(read_ref, read_buf, num_bytes);
6871 if (result == 0) {
6872 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6873 duration_elapsed(&bench),
6874 duration_kbps(&bench, count * size));
6875 } else {
6876 command_print(CMD, "Compare failed");
6877 binprint(CMD, "ref:", read_ref, num_bytes);
6878 binprint(CMD, "buf:", read_buf, num_bytes);
6879 }
6880 nextw:
6881 free(read_ref);
6882 free(read_buf);
6883 }
6884 }
6885 }
6886
6887 free(test_pattern);
6888
6889 target_free_working_area(target, wa);
6890 return retval;
6891 }
6892
6893 static const struct command_registration target_exec_command_handlers[] = {
6894 {
6895 .name = "fast_load_image",
6896 .handler = handle_fast_load_image_command,
6897 .mode = COMMAND_ANY,
6898 .help = "Load image into server memory for later use by "
6899 "fast_load; primarily for profiling",
6900 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6901 "[min_address [max_length]]",
6902 },
6903 {
6904 .name = "fast_load",
6905 .handler = handle_fast_load_command,
6906 .mode = COMMAND_EXEC,
6907 .help = "loads active fast load image to current target "
6908 "- mainly for profiling purposes",
6909 .usage = "",
6910 },
6911 {
6912 .name = "profile",
6913 .handler = handle_profile_command,
6914 .mode = COMMAND_EXEC,
6915 .usage = "seconds filename [start end]",
6916 .help = "profiling samples the CPU PC",
6917 },
6918 /** @todo don't register virt2phys() unless target supports it */
6919 {
6920 .name = "virt2phys",
6921 .handler = handle_virt2phys_command,
6922 .mode = COMMAND_ANY,
6923 .help = "translate a virtual address into a physical address",
6924 .usage = "virtual_address",
6925 },
6926 {
6927 .name = "reg",
6928 .handler = handle_reg_command,
6929 .mode = COMMAND_EXEC,
6930 .help = "display (reread from target with \"force\") or set a register; "
6931 "with no arguments, displays all registers and their values",
6932 .usage = "[(register_number|register_name) [(value|'force')]]",
6933 },
6934 {
6935 .name = "poll",
6936 .handler = handle_poll_command,
6937 .mode = COMMAND_EXEC,
6938 .help = "poll target state; or reconfigure background polling",
6939 .usage = "['on'|'off']",
6940 },
6941 {
6942 .name = "wait_halt",
6943 .handler = handle_wait_halt_command,
6944 .mode = COMMAND_EXEC,
6945 .help = "wait up to the specified number of milliseconds "
6946 "(default 5000) for a previously requested halt",
6947 .usage = "[milliseconds]",
6948 },
6949 {
6950 .name = "halt",
6951 .handler = handle_halt_command,
6952 .mode = COMMAND_EXEC,
6953 .help = "request target to halt, then wait up to the specified "
6954 "number of milliseconds (default 5000) for it to complete",
6955 .usage = "[milliseconds]",
6956 },
6957 {
6958 .name = "resume",
6959 .handler = handle_resume_command,
6960 .mode = COMMAND_EXEC,
6961 .help = "resume target execution from current PC or address",
6962 .usage = "[address]",
6963 },
6964 {
6965 .name = "reset",
6966 .handler = handle_reset_command,
6967 .mode = COMMAND_EXEC,
6968 .usage = "[run|halt|init]",
6969 .help = "Reset all targets into the specified mode. "
6970 "Default reset mode is run, if not given.",
6971 },
6972 {
6973 .name = "soft_reset_halt",
6974 .handler = handle_soft_reset_halt_command,
6975 .mode = COMMAND_EXEC,
6976 .usage = "",
6977 .help = "halt the target and do a soft reset",
6978 },
6979 {
6980 .name = "step",
6981 .handler = handle_step_command,
6982 .mode = COMMAND_EXEC,
6983 .help = "step one instruction from current PC or address",
6984 .usage = "[address]",
6985 },
6986 {
6987 .name = "mdd",
6988 .handler = handle_md_command,
6989 .mode = COMMAND_EXEC,
6990 .help = "display memory double-words",
6991 .usage = "['phys'] address [count]",
6992 },
6993 {
6994 .name = "mdw",
6995 .handler = handle_md_command,
6996 .mode = COMMAND_EXEC,
6997 .help = "display memory words",
6998 .usage = "['phys'] address [count]",
6999 },
7000 {
7001 .name = "mdh",
7002 .handler = handle_md_command,
7003 .mode = COMMAND_EXEC,
7004 .help = "display memory half-words",
7005 .usage = "['phys'] address [count]",
7006 },
7007 {
7008 .name = "mdb",
7009 .handler = handle_md_command,
7010 .mode = COMMAND_EXEC,
7011 .help = "display memory bytes",
7012 .usage = "['phys'] address [count]",
7013 },
7014 {
7015 .name = "mwd",
7016 .handler = handle_mw_command,
7017 .mode = COMMAND_EXEC,
7018 .help = "write memory double-word",
7019 .usage = "['phys'] address value [count]",
7020 },
7021 {
7022 .name = "mww",
7023 .handler = handle_mw_command,
7024 .mode = COMMAND_EXEC,
7025 .help = "write memory word",
7026 .usage = "['phys'] address value [count]",
7027 },
7028 {
7029 .name = "mwh",
7030 .handler = handle_mw_command,
7031 .mode = COMMAND_EXEC,
7032 .help = "write memory half-word",
7033 .usage = "['phys'] address value [count]",
7034 },
7035 {
7036 .name = "mwb",
7037 .handler = handle_mw_command,
7038 .mode = COMMAND_EXEC,
7039 .help = "write memory byte",
7040 .usage = "['phys'] address value [count]",
7041 },
7042 {
7043 .name = "bp",
7044 .handler = handle_bp_command,
7045 .mode = COMMAND_EXEC,
7046 .help = "list or set hardware or software breakpoint",
7047 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
7048 },
7049 {
7050 .name = "rbp",
7051 .handler = handle_rbp_command,
7052 .mode = COMMAND_EXEC,
7053 .help = "remove breakpoint",
7054 .usage = "'all' | address",
7055 },
7056 {
7057 .name = "wp",
7058 .handler = handle_wp_command,
7059 .mode = COMMAND_EXEC,
7060 .help = "list (no params) or create watchpoints",
7061 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
7062 },
7063 {
7064 .name = "rwp",
7065 .handler = handle_rwp_command,
7066 .mode = COMMAND_EXEC,
7067 .help = "remove watchpoint",
7068 .usage = "address",
7069 },
7070 {
7071 .name = "load_image",
7072 .handler = handle_load_image_command,
7073 .mode = COMMAND_EXEC,
7074 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
7075 "[min_address] [max_length]",
7076 },
7077 {
7078 .name = "dump_image",
7079 .handler = handle_dump_image_command,
7080 .mode = COMMAND_EXEC,
7081 .usage = "filename address size",
7082 },
7083 {
7084 .name = "verify_image_checksum",
7085 .handler = handle_verify_image_checksum_command,
7086 .mode = COMMAND_EXEC,
7087 .usage = "filename [offset [type]]",
7088 },
7089 {
7090 .name = "verify_image",
7091 .handler = handle_verify_image_command,
7092 .mode = COMMAND_EXEC,
7093 .usage = "filename [offset [type]]",
7094 },
7095 {
7096 .name = "test_image",
7097 .handler = handle_test_image_command,
7098 .mode = COMMAND_EXEC,
7099 .usage = "filename [offset [type]]",
7100 },
7101 {
7102 .name = "get_reg",
7103 .mode = COMMAND_EXEC,
7104 .jim_handler = target_jim_get_reg,
7105 .help = "Get register values from the target",
7106 .usage = "list",
7107 },
7108 {
7109 .name = "set_reg",
7110 .mode = COMMAND_EXEC,
7111 .jim_handler = target_jim_set_reg,
7112 .help = "Set target register values",
7113 .usage = "dict",
7114 },
7115 {
7116 .name = "read_memory",
7117 .mode = COMMAND_EXEC,
7118 .handler = handle_target_read_memory,
7119 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
7120 .usage = "address width count ['phys']",
7121 },
7122 {
7123 .name = "write_memory",
7124 .mode = COMMAND_EXEC,
7125 .jim_handler = target_jim_write_memory,
7126 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
7127 .usage = "address width data ['phys']",
7128 },
7129 {
7130 .name = "reset_nag",
7131 .handler = handle_target_reset_nag,
7132 .mode = COMMAND_ANY,
7133 .help = "Nag after each reset about options that could have been "
7134 "enabled to improve performance.",
7135 .usage = "['enable'|'disable']",
7136 },
7137 {
7138 .name = "ps",
7139 .handler = handle_ps_command,
7140 .mode = COMMAND_EXEC,
7141 .help = "list all tasks",
7142 .usage = "",
7143 },
7144 {
7145 .name = "test_mem_access",
7146 .handler = handle_test_mem_access_command,
7147 .mode = COMMAND_EXEC,
7148 .help = "Test the target's memory access functions",
7149 .usage = "size",
7150 },
7151
7152 COMMAND_REGISTRATION_DONE
7153 };
7154 static int target_register_user_commands(struct command_context *cmd_ctx)
7155 {
7156 int retval = ERROR_OK;
7157 retval = target_request_register_commands(cmd_ctx);
7158 if (retval != ERROR_OK)
7159 return retval;
7160
7161 retval = trace_register_commands(cmd_ctx);
7162 if (retval != ERROR_OK)
7163 return retval;
7164
7165
7166 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
7167 }
7168
7169 const char *target_debug_reason_str(enum target_debug_reason reason)
7170 {
7171 switch (reason) {
7172 case DBG_REASON_DBGRQ:
7173 return "DBGRQ";
7174 case DBG_REASON_BREAKPOINT:
7175 return "BREAKPOINT";
7176 case DBG_REASON_WATCHPOINT:
7177 return "WATCHPOINT";
7178 case DBG_REASON_WPTANDBKPT:
7179 return "WPTANDBKPT";
7180 case DBG_REASON_SINGLESTEP:
7181 return "SINGLESTEP";
7182 case DBG_REASON_NOTHALTED:
7183 return "NOTHALTED";
7184 case DBG_REASON_EXIT:
7185 return "EXIT";
7186 case DBG_REASON_EXC_CATCH:
7187 return "EXC_CATCH";
7188 case DBG_REASON_UNDEFINED:
7189 return "UNDEFINED";
7190 default:
7191 return "UNKNOWN!";
7192 }
7193 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)