breakpoints: add rwp all command
[openocd.git] / src / target / target.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2007-2010 Øyvind Harboe *
8 * oyvind.harboe@zylin.com *
9 * *
10 * Copyright (C) 2008, Duane Ellis *
11 * openocd@duaneeellis.com *
12 * *
13 * Copyright (C) 2008 by Spencer Oliver *
14 * spen@spen-soft.co.uk *
15 * *
16 * Copyright (C) 2008 by Rick Altherr *
17 * kc8apf@kc8apf.net> *
18 * *
19 * Copyright (C) 2011 by Broadcom Corporation *
20 * Evan Hunter - ehunter@broadcom.com *
21 * *
22 * Copyright (C) ST-Ericsson SA 2011 *
23 * michel.jaouen@stericsson.com : smp minimum support *
24 * *
25 * Copyright (C) 2011 Andreas Fritiofson *
26 * andreas.fritiofson@gmail.com *
27 ***************************************************************************/
28
29 #ifdef HAVE_CONFIG_H
30 #include "config.h"
31 #endif
32
33 #include <helper/align.h>
34 #include <helper/nvp.h>
35 #include <helper/time_support.h>
36 #include <jtag/jtag.h>
37 #include <flash/nor/core.h>
38
39 #include "target.h"
40 #include "target_type.h"
41 #include "target_request.h"
42 #include "breakpoints.h"
43 #include "register.h"
44 #include "trace.h"
45 #include "image.h"
46 #include "rtos/rtos.h"
47 #include "transport/transport.h"
48 #include "arm_cti.h"
49 #include "smp.h"
50 #include "semihosting_common.h"
51
52 /* default halt wait timeout (ms) */
53 #define DEFAULT_HALT_TIMEOUT 5000
54
55 static int target_read_buffer_default(struct target *target, target_addr_t address,
56 uint32_t count, uint8_t *buffer);
57 static int target_write_buffer_default(struct target *target, target_addr_t address,
58 uint32_t count, const uint8_t *buffer);
59 static int target_array2mem(Jim_Interp *interp, struct target *target,
60 int argc, Jim_Obj * const *argv);
61 static int target_mem2array(Jim_Interp *interp, struct target *target,
62 int argc, Jim_Obj * const *argv);
63 static int target_register_user_commands(struct command_context *cmd_ctx);
64 static int target_get_gdb_fileio_info_default(struct target *target,
65 struct gdb_fileio_info *fileio_info);
66 static int target_gdb_fileio_end_default(struct target *target, int retcode,
67 int fileio_errno, bool ctrl_c);
68
69 static struct target_type *target_types[] = {
70 &arm7tdmi_target,
71 &arm9tdmi_target,
72 &arm920t_target,
73 &arm720t_target,
74 &arm966e_target,
75 &arm946e_target,
76 &arm926ejs_target,
77 &fa526_target,
78 &feroceon_target,
79 &dragonite_target,
80 &xscale_target,
81 &xtensa_chip_target,
82 &cortexm_target,
83 &cortexa_target,
84 &cortexr4_target,
85 &arm11_target,
86 &ls1_sap_target,
87 &mips_m4k_target,
88 &avr_target,
89 &dsp563xx_target,
90 &dsp5680xx_target,
91 &testee_target,
92 &avr32_ap7k_target,
93 &hla_target,
94 &esp32_target,
95 &esp32s2_target,
96 &esp32s3_target,
97 &or1k_target,
98 &quark_x10xx_target,
99 &quark_d20xx_target,
100 &stm8_target,
101 &riscv_target,
102 &mem_ap_target,
103 &esirisc_target,
104 &arcv2_target,
105 &aarch64_target,
106 &armv8r_target,
107 &mips_mips64_target,
108 NULL,
109 };
110
111 struct target *all_targets;
112 static struct target_event_callback *target_event_callbacks;
113 static struct target_timer_callback *target_timer_callbacks;
114 static int64_t target_timer_next_event_value;
115 static LIST_HEAD(target_reset_callback_list);
116 static LIST_HEAD(target_trace_callback_list);
117 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
118 static LIST_HEAD(empty_smp_targets);
119
120 enum nvp_assert {
121 NVP_DEASSERT,
122 NVP_ASSERT,
123 };
124
125 static const struct nvp nvp_assert[] = {
126 { .name = "assert", NVP_ASSERT },
127 { .name = "deassert", NVP_DEASSERT },
128 { .name = "T", NVP_ASSERT },
129 { .name = "F", NVP_DEASSERT },
130 { .name = "t", NVP_ASSERT },
131 { .name = "f", NVP_DEASSERT },
132 { .name = NULL, .value = -1 }
133 };
134
135 static const struct nvp nvp_error_target[] = {
136 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
137 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
138 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
139 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
140 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
141 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
142 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
143 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
144 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
145 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
146 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
147 { .value = -1, .name = NULL }
148 };
149
150 static const char *target_strerror_safe(int err)
151 {
152 const struct nvp *n;
153
154 n = nvp_value2name(nvp_error_target, err);
155 if (!n->name)
156 return "unknown";
157 else
158 return n->name;
159 }
160
161 static const struct jim_nvp nvp_target_event[] = {
162
163 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
164 { .value = TARGET_EVENT_HALTED, .name = "halted" },
165 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
166 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
167 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
168 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
169 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
170
171 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
172 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
173
174 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
175 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
176 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
177 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
178 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
179 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
180 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
181 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
182
183 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
184 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
185 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
186
187 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
188 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
189
190 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
191 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
192
193 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
194 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
195
196 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
197 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
198
199 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
200
201 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X100, .name = "semihosting-user-cmd-0x100" },
202 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X101, .name = "semihosting-user-cmd-0x101" },
203 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X102, .name = "semihosting-user-cmd-0x102" },
204 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X103, .name = "semihosting-user-cmd-0x103" },
205 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X104, .name = "semihosting-user-cmd-0x104" },
206 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X105, .name = "semihosting-user-cmd-0x105" },
207 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X106, .name = "semihosting-user-cmd-0x106" },
208 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X107, .name = "semihosting-user-cmd-0x107" },
209
210 { .name = NULL, .value = -1 }
211 };
212
213 static const struct nvp nvp_target_state[] = {
214 { .name = "unknown", .value = TARGET_UNKNOWN },
215 { .name = "running", .value = TARGET_RUNNING },
216 { .name = "halted", .value = TARGET_HALTED },
217 { .name = "reset", .value = TARGET_RESET },
218 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
219 { .name = NULL, .value = -1 },
220 };
221
222 static const struct nvp nvp_target_debug_reason[] = {
223 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
224 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
225 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
226 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
227 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
228 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
229 { .name = "program-exit", .value = DBG_REASON_EXIT },
230 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
231 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
232 { .name = NULL, .value = -1 },
233 };
234
235 static const struct jim_nvp nvp_target_endian[] = {
236 { .name = "big", .value = TARGET_BIG_ENDIAN },
237 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
238 { .name = "be", .value = TARGET_BIG_ENDIAN },
239 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
240 { .name = NULL, .value = -1 },
241 };
242
243 static const struct nvp nvp_reset_modes[] = {
244 { .name = "unknown", .value = RESET_UNKNOWN },
245 { .name = "run", .value = RESET_RUN },
246 { .name = "halt", .value = RESET_HALT },
247 { .name = "init", .value = RESET_INIT },
248 { .name = NULL, .value = -1 },
249 };
250
251 const char *debug_reason_name(struct target *t)
252 {
253 const char *cp;
254
255 cp = nvp_value2name(nvp_target_debug_reason,
256 t->debug_reason)->name;
257 if (!cp) {
258 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
259 cp = "(*BUG*unknown*BUG*)";
260 }
261 return cp;
262 }
263
264 const char *target_state_name(struct target *t)
265 {
266 const char *cp;
267 cp = nvp_value2name(nvp_target_state, t->state)->name;
268 if (!cp) {
269 LOG_ERROR("Invalid target state: %d", (int)(t->state));
270 cp = "(*BUG*unknown*BUG*)";
271 }
272
273 if (!target_was_examined(t) && t->defer_examine)
274 cp = "examine deferred";
275
276 return cp;
277 }
278
279 const char *target_event_name(enum target_event event)
280 {
281 const char *cp;
282 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
283 if (!cp) {
284 LOG_ERROR("Invalid target event: %d", (int)(event));
285 cp = "(*BUG*unknown*BUG*)";
286 }
287 return cp;
288 }
289
290 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
291 {
292 const char *cp;
293 cp = nvp_value2name(nvp_reset_modes, reset_mode)->name;
294 if (!cp) {
295 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
296 cp = "(*BUG*unknown*BUG*)";
297 }
298 return cp;
299 }
300
301 /* determine the number of the new target */
302 static int new_target_number(void)
303 {
304 struct target *t;
305 int x;
306
307 /* number is 0 based */
308 x = -1;
309 t = all_targets;
310 while (t) {
311 if (x < t->target_number)
312 x = t->target_number;
313 t = t->next;
314 }
315 return x + 1;
316 }
317
318 static void append_to_list_all_targets(struct target *target)
319 {
320 struct target **t = &all_targets;
321
322 while (*t)
323 t = &((*t)->next);
324 *t = target;
325 }
326
327 /* read a uint64_t from a buffer in target memory endianness */
328 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
329 {
330 if (target->endianness == TARGET_LITTLE_ENDIAN)
331 return le_to_h_u64(buffer);
332 else
333 return be_to_h_u64(buffer);
334 }
335
336 /* read a uint32_t from a buffer in target memory endianness */
337 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
338 {
339 if (target->endianness == TARGET_LITTLE_ENDIAN)
340 return le_to_h_u32(buffer);
341 else
342 return be_to_h_u32(buffer);
343 }
344
345 /* read a uint24_t from a buffer in target memory endianness */
346 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
347 {
348 if (target->endianness == TARGET_LITTLE_ENDIAN)
349 return le_to_h_u24(buffer);
350 else
351 return be_to_h_u24(buffer);
352 }
353
354 /* read a uint16_t from a buffer in target memory endianness */
355 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
356 {
357 if (target->endianness == TARGET_LITTLE_ENDIAN)
358 return le_to_h_u16(buffer);
359 else
360 return be_to_h_u16(buffer);
361 }
362
363 /* write a uint64_t to a buffer in target memory endianness */
364 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
365 {
366 if (target->endianness == TARGET_LITTLE_ENDIAN)
367 h_u64_to_le(buffer, value);
368 else
369 h_u64_to_be(buffer, value);
370 }
371
372 /* write a uint32_t to a buffer in target memory endianness */
373 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
374 {
375 if (target->endianness == TARGET_LITTLE_ENDIAN)
376 h_u32_to_le(buffer, value);
377 else
378 h_u32_to_be(buffer, value);
379 }
380
381 /* write a uint24_t to a buffer in target memory endianness */
382 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
383 {
384 if (target->endianness == TARGET_LITTLE_ENDIAN)
385 h_u24_to_le(buffer, value);
386 else
387 h_u24_to_be(buffer, value);
388 }
389
390 /* write a uint16_t to a buffer in target memory endianness */
391 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
392 {
393 if (target->endianness == TARGET_LITTLE_ENDIAN)
394 h_u16_to_le(buffer, value);
395 else
396 h_u16_to_be(buffer, value);
397 }
398
399 /* write a uint8_t to a buffer in target memory endianness */
400 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
401 {
402 *buffer = value;
403 }
404
405 /* write a uint64_t array to a buffer in target memory endianness */
406 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
407 {
408 uint32_t i;
409 for (i = 0; i < count; i++)
410 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
411 }
412
413 /* write a uint32_t array to a buffer in target memory endianness */
414 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
415 {
416 uint32_t i;
417 for (i = 0; i < count; i++)
418 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
419 }
420
421 /* write a uint16_t array to a buffer in target memory endianness */
422 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
423 {
424 uint32_t i;
425 for (i = 0; i < count; i++)
426 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
427 }
428
429 /* write a uint64_t array to a buffer in target memory endianness */
430 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
431 {
432 uint32_t i;
433 for (i = 0; i < count; i++)
434 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
435 }
436
437 /* write a uint32_t array to a buffer in target memory endianness */
438 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
439 {
440 uint32_t i;
441 for (i = 0; i < count; i++)
442 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
443 }
444
445 /* write a uint16_t array to a buffer in target memory endianness */
446 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
447 {
448 uint32_t i;
449 for (i = 0; i < count; i++)
450 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
451 }
452
453 /* return a pointer to a configured target; id is name or number */
454 struct target *get_target(const char *id)
455 {
456 struct target *target;
457
458 /* try as tcltarget name */
459 for (target = all_targets; target; target = target->next) {
460 if (!target_name(target))
461 continue;
462 if (strcmp(id, target_name(target)) == 0)
463 return target;
464 }
465
466 /* It's OK to remove this fallback sometime after August 2010 or so */
467
468 /* no match, try as number */
469 unsigned num;
470 if (parse_uint(id, &num) != ERROR_OK)
471 return NULL;
472
473 for (target = all_targets; target; target = target->next) {
474 if (target->target_number == (int)num) {
475 LOG_WARNING("use '%s' as target identifier, not '%u'",
476 target_name(target), num);
477 return target;
478 }
479 }
480
481 return NULL;
482 }
483
484 /* returns a pointer to the n-th configured target */
485 struct target *get_target_by_num(int num)
486 {
487 struct target *target = all_targets;
488
489 while (target) {
490 if (target->target_number == num)
491 return target;
492 target = target->next;
493 }
494
495 return NULL;
496 }
497
498 struct target *get_current_target(struct command_context *cmd_ctx)
499 {
500 struct target *target = get_current_target_or_null(cmd_ctx);
501
502 if (!target) {
503 LOG_ERROR("BUG: current_target out of bounds");
504 exit(-1);
505 }
506
507 return target;
508 }
509
510 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
511 {
512 return cmd_ctx->current_target_override
513 ? cmd_ctx->current_target_override
514 : cmd_ctx->current_target;
515 }
516
517 int target_poll(struct target *target)
518 {
519 int retval;
520
521 /* We can't poll until after examine */
522 if (!target_was_examined(target)) {
523 /* Fail silently lest we pollute the log */
524 return ERROR_FAIL;
525 }
526
527 retval = target->type->poll(target);
528 if (retval != ERROR_OK)
529 return retval;
530
531 if (target->halt_issued) {
532 if (target->state == TARGET_HALTED)
533 target->halt_issued = false;
534 else {
535 int64_t t = timeval_ms() - target->halt_issued_time;
536 if (t > DEFAULT_HALT_TIMEOUT) {
537 target->halt_issued = false;
538 LOG_INFO("Halt timed out, wake up GDB.");
539 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
540 }
541 }
542 }
543
544 return ERROR_OK;
545 }
546
547 int target_halt(struct target *target)
548 {
549 int retval;
550 /* We can't poll until after examine */
551 if (!target_was_examined(target)) {
552 LOG_ERROR("Target not examined yet");
553 return ERROR_FAIL;
554 }
555
556 retval = target->type->halt(target);
557 if (retval != ERROR_OK)
558 return retval;
559
560 target->halt_issued = true;
561 target->halt_issued_time = timeval_ms();
562
563 return ERROR_OK;
564 }
565
566 /**
567 * Make the target (re)start executing using its saved execution
568 * context (possibly with some modifications).
569 *
570 * @param target Which target should start executing.
571 * @param current True to use the target's saved program counter instead
572 * of the address parameter
573 * @param address Optionally used as the program counter.
574 * @param handle_breakpoints True iff breakpoints at the resumption PC
575 * should be skipped. (For example, maybe execution was stopped by
576 * such a breakpoint, in which case it would be counterproductive to
577 * let it re-trigger.
578 * @param debug_execution False if all working areas allocated by OpenOCD
579 * should be released and/or restored to their original contents.
580 * (This would for example be true to run some downloaded "helper"
581 * algorithm code, which resides in one such working buffer and uses
582 * another for data storage.)
583 *
584 * @todo Resolve the ambiguity about what the "debug_execution" flag
585 * signifies. For example, Target implementations don't agree on how
586 * it relates to invalidation of the register cache, or to whether
587 * breakpoints and watchpoints should be enabled. (It would seem wrong
588 * to enable breakpoints when running downloaded "helper" algorithms
589 * (debug_execution true), since the breakpoints would be set to match
590 * target firmware being debugged, not the helper algorithm.... and
591 * enabling them could cause such helpers to malfunction (for example,
592 * by overwriting data with a breakpoint instruction. On the other
593 * hand the infrastructure for running such helpers might use this
594 * procedure but rely on hardware breakpoint to detect termination.)
595 */
596 int target_resume(struct target *target, int current, target_addr_t address,
597 int handle_breakpoints, int debug_execution)
598 {
599 int retval;
600
601 /* We can't poll until after examine */
602 if (!target_was_examined(target)) {
603 LOG_ERROR("Target not examined yet");
604 return ERROR_FAIL;
605 }
606
607 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
608
609 /* note that resume *must* be asynchronous. The CPU can halt before
610 * we poll. The CPU can even halt at the current PC as a result of
611 * a software breakpoint being inserted by (a bug?) the application.
612 */
613 /*
614 * resume() triggers the event 'resumed'. The execution of TCL commands
615 * in the event handler causes the polling of targets. If the target has
616 * already halted for a breakpoint, polling will run the 'halted' event
617 * handler before the pending 'resumed' handler.
618 * Disable polling during resume() to guarantee the execution of handlers
619 * in the correct order.
620 */
621 bool save_poll_mask = jtag_poll_mask();
622 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
623 jtag_poll_unmask(save_poll_mask);
624
625 if (retval != ERROR_OK)
626 return retval;
627
628 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
629
630 return retval;
631 }
632
633 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
634 {
635 char buf[100];
636 int retval;
637 const struct nvp *n;
638 n = nvp_value2name(nvp_reset_modes, reset_mode);
639 if (!n->name) {
640 LOG_ERROR("invalid reset mode");
641 return ERROR_FAIL;
642 }
643
644 struct target *target;
645 for (target = all_targets; target; target = target->next)
646 target_call_reset_callbacks(target, reset_mode);
647
648 /* disable polling during reset to make reset event scripts
649 * more predictable, i.e. dr/irscan & pathmove in events will
650 * not have JTAG operations injected into the middle of a sequence.
651 */
652 bool save_poll_mask = jtag_poll_mask();
653
654 sprintf(buf, "ocd_process_reset %s", n->name);
655 retval = Jim_Eval(cmd->ctx->interp, buf);
656
657 jtag_poll_unmask(save_poll_mask);
658
659 if (retval != JIM_OK) {
660 Jim_MakeErrorMessage(cmd->ctx->interp);
661 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
662 return ERROR_FAIL;
663 }
664
665 /* We want any events to be processed before the prompt */
666 retval = target_call_timer_callbacks_now();
667
668 for (target = all_targets; target; target = target->next) {
669 target->type->check_reset(target);
670 target->running_alg = false;
671 }
672
673 return retval;
674 }
675
676 static int identity_virt2phys(struct target *target,
677 target_addr_t virtual, target_addr_t *physical)
678 {
679 *physical = virtual;
680 return ERROR_OK;
681 }
682
683 static int no_mmu(struct target *target, int *enabled)
684 {
685 *enabled = 0;
686 return ERROR_OK;
687 }
688
689 /**
690 * Reset the @c examined flag for the given target.
691 * Pure paranoia -- targets are zeroed on allocation.
692 */
693 static inline void target_reset_examined(struct target *target)
694 {
695 target->examined = false;
696 }
697
698 static int default_examine(struct target *target)
699 {
700 target_set_examined(target);
701 return ERROR_OK;
702 }
703
704 /* no check by default */
705 static int default_check_reset(struct target *target)
706 {
707 return ERROR_OK;
708 }
709
710 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
711 * Keep in sync */
712 int target_examine_one(struct target *target)
713 {
714 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
715
716 int retval = target->type->examine(target);
717 if (retval != ERROR_OK) {
718 target_reset_examined(target);
719 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
720 return retval;
721 }
722
723 target_set_examined(target);
724 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
725
726 return ERROR_OK;
727 }
728
729 static int jtag_enable_callback(enum jtag_event event, void *priv)
730 {
731 struct target *target = priv;
732
733 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
734 return ERROR_OK;
735
736 jtag_unregister_event_callback(jtag_enable_callback, target);
737
738 return target_examine_one(target);
739 }
740
741 /* Targets that correctly implement init + examine, i.e.
742 * no communication with target during init:
743 *
744 * XScale
745 */
746 int target_examine(void)
747 {
748 int retval = ERROR_OK;
749 struct target *target;
750
751 for (target = all_targets; target; target = target->next) {
752 /* defer examination, but don't skip it */
753 if (!target->tap->enabled) {
754 jtag_register_event_callback(jtag_enable_callback,
755 target);
756 continue;
757 }
758
759 if (target->defer_examine)
760 continue;
761
762 int retval2 = target_examine_one(target);
763 if (retval2 != ERROR_OK) {
764 LOG_WARNING("target %s examination failed", target_name(target));
765 retval = retval2;
766 }
767 }
768 return retval;
769 }
770
771 const char *target_type_name(struct target *target)
772 {
773 return target->type->name;
774 }
775
776 static int target_soft_reset_halt(struct target *target)
777 {
778 if (!target_was_examined(target)) {
779 LOG_ERROR("Target not examined yet");
780 return ERROR_FAIL;
781 }
782 if (!target->type->soft_reset_halt) {
783 LOG_ERROR("Target %s does not support soft_reset_halt",
784 target_name(target));
785 return ERROR_FAIL;
786 }
787 return target->type->soft_reset_halt(target);
788 }
789
790 /**
791 * Downloads a target-specific native code algorithm to the target,
792 * and executes it. * Note that some targets may need to set up, enable,
793 * and tear down a breakpoint (hard or * soft) to detect algorithm
794 * termination, while others may support lower overhead schemes where
795 * soft breakpoints embedded in the algorithm automatically terminate the
796 * algorithm.
797 *
798 * @param target used to run the algorithm
799 * @param num_mem_params
800 * @param mem_params
801 * @param num_reg_params
802 * @param reg_param
803 * @param entry_point
804 * @param exit_point
805 * @param timeout_ms
806 * @param arch_info target-specific description of the algorithm.
807 */
808 int target_run_algorithm(struct target *target,
809 int num_mem_params, struct mem_param *mem_params,
810 int num_reg_params, struct reg_param *reg_param,
811 target_addr_t entry_point, target_addr_t exit_point,
812 unsigned int timeout_ms, void *arch_info)
813 {
814 int retval = ERROR_FAIL;
815
816 if (!target_was_examined(target)) {
817 LOG_ERROR("Target not examined yet");
818 goto done;
819 }
820 if (!target->type->run_algorithm) {
821 LOG_ERROR("Target type '%s' does not support %s",
822 target_type_name(target), __func__);
823 goto done;
824 }
825
826 target->running_alg = true;
827 retval = target->type->run_algorithm(target,
828 num_mem_params, mem_params,
829 num_reg_params, reg_param,
830 entry_point, exit_point, timeout_ms, arch_info);
831 target->running_alg = false;
832
833 done:
834 return retval;
835 }
836
837 /**
838 * Executes a target-specific native code algorithm and leaves it running.
839 *
840 * @param target used to run the algorithm
841 * @param num_mem_params
842 * @param mem_params
843 * @param num_reg_params
844 * @param reg_params
845 * @param entry_point
846 * @param exit_point
847 * @param arch_info target-specific description of the algorithm.
848 */
849 int target_start_algorithm(struct target *target,
850 int num_mem_params, struct mem_param *mem_params,
851 int num_reg_params, struct reg_param *reg_params,
852 target_addr_t entry_point, target_addr_t exit_point,
853 void *arch_info)
854 {
855 int retval = ERROR_FAIL;
856
857 if (!target_was_examined(target)) {
858 LOG_ERROR("Target not examined yet");
859 goto done;
860 }
861 if (!target->type->start_algorithm) {
862 LOG_ERROR("Target type '%s' does not support %s",
863 target_type_name(target), __func__);
864 goto done;
865 }
866 if (target->running_alg) {
867 LOG_ERROR("Target is already running an algorithm");
868 goto done;
869 }
870
871 target->running_alg = true;
872 retval = target->type->start_algorithm(target,
873 num_mem_params, mem_params,
874 num_reg_params, reg_params,
875 entry_point, exit_point, arch_info);
876
877 done:
878 return retval;
879 }
880
881 /**
882 * Waits for an algorithm started with target_start_algorithm() to complete.
883 *
884 * @param target used to run the algorithm
885 * @param num_mem_params
886 * @param mem_params
887 * @param num_reg_params
888 * @param reg_params
889 * @param exit_point
890 * @param timeout_ms
891 * @param arch_info target-specific description of the algorithm.
892 */
893 int target_wait_algorithm(struct target *target,
894 int num_mem_params, struct mem_param *mem_params,
895 int num_reg_params, struct reg_param *reg_params,
896 target_addr_t exit_point, unsigned int timeout_ms,
897 void *arch_info)
898 {
899 int retval = ERROR_FAIL;
900
901 if (!target->type->wait_algorithm) {
902 LOG_ERROR("Target type '%s' does not support %s",
903 target_type_name(target), __func__);
904 goto done;
905 }
906 if (!target->running_alg) {
907 LOG_ERROR("Target is not running an algorithm");
908 goto done;
909 }
910
911 retval = target->type->wait_algorithm(target,
912 num_mem_params, mem_params,
913 num_reg_params, reg_params,
914 exit_point, timeout_ms, arch_info);
915 if (retval != ERROR_TARGET_TIMEOUT)
916 target->running_alg = false;
917
918 done:
919 return retval;
920 }
921
922 /**
923 * Streams data to a circular buffer on target intended for consumption by code
924 * running asynchronously on target.
925 *
926 * This is intended for applications where target-specific native code runs
927 * on the target, receives data from the circular buffer, does something with
928 * it (most likely writing it to a flash memory), and advances the circular
929 * buffer pointer.
930 *
931 * This assumes that the helper algorithm has already been loaded to the target,
932 * but has not been started yet. Given memory and register parameters are passed
933 * to the algorithm.
934 *
935 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
936 * following format:
937 *
938 * [buffer_start + 0, buffer_start + 4):
939 * Write Pointer address (aka head). Written and updated by this
940 * routine when new data is written to the circular buffer.
941 * [buffer_start + 4, buffer_start + 8):
942 * Read Pointer address (aka tail). Updated by code running on the
943 * target after it consumes data.
944 * [buffer_start + 8, buffer_start + buffer_size):
945 * Circular buffer contents.
946 *
947 * See contrib/loaders/flash/stm32f1x.S for an example.
948 *
949 * @param target used to run the algorithm
950 * @param buffer address on the host where data to be sent is located
951 * @param count number of blocks to send
952 * @param block_size size in bytes of each block
953 * @param num_mem_params count of memory-based params to pass to algorithm
954 * @param mem_params memory-based params to pass to algorithm
955 * @param num_reg_params count of register-based params to pass to algorithm
956 * @param reg_params memory-based params to pass to algorithm
957 * @param buffer_start address on the target of the circular buffer structure
958 * @param buffer_size size of the circular buffer structure
959 * @param entry_point address on the target to execute to start the algorithm
960 * @param exit_point address at which to set a breakpoint to catch the
961 * end of the algorithm; can be 0 if target triggers a breakpoint itself
962 * @param arch_info
963 */
964
965 int target_run_flash_async_algorithm(struct target *target,
966 const uint8_t *buffer, uint32_t count, int block_size,
967 int num_mem_params, struct mem_param *mem_params,
968 int num_reg_params, struct reg_param *reg_params,
969 uint32_t buffer_start, uint32_t buffer_size,
970 uint32_t entry_point, uint32_t exit_point, void *arch_info)
971 {
972 int retval;
973 int timeout = 0;
974
975 const uint8_t *buffer_orig = buffer;
976
977 /* Set up working area. First word is write pointer, second word is read pointer,
978 * rest is fifo data area. */
979 uint32_t wp_addr = buffer_start;
980 uint32_t rp_addr = buffer_start + 4;
981 uint32_t fifo_start_addr = buffer_start + 8;
982 uint32_t fifo_end_addr = buffer_start + buffer_size;
983
984 uint32_t wp = fifo_start_addr;
985 uint32_t rp = fifo_start_addr;
986
987 /* validate block_size is 2^n */
988 assert(IS_PWR_OF_2(block_size));
989
990 retval = target_write_u32(target, wp_addr, wp);
991 if (retval != ERROR_OK)
992 return retval;
993 retval = target_write_u32(target, rp_addr, rp);
994 if (retval != ERROR_OK)
995 return retval;
996
997 /* Start up algorithm on target and let it idle while writing the first chunk */
998 retval = target_start_algorithm(target, num_mem_params, mem_params,
999 num_reg_params, reg_params,
1000 entry_point,
1001 exit_point,
1002 arch_info);
1003
1004 if (retval != ERROR_OK) {
1005 LOG_ERROR("error starting target flash write algorithm");
1006 return retval;
1007 }
1008
1009 while (count > 0) {
1010
1011 retval = target_read_u32(target, rp_addr, &rp);
1012 if (retval != ERROR_OK) {
1013 LOG_ERROR("failed to get read pointer");
1014 break;
1015 }
1016
1017 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1018 (size_t) (buffer - buffer_orig), count, wp, rp);
1019
1020 if (rp == 0) {
1021 LOG_ERROR("flash write algorithm aborted by target");
1022 retval = ERROR_FLASH_OPERATION_FAILED;
1023 break;
1024 }
1025
1026 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1027 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1028 break;
1029 }
1030
1031 /* Count the number of bytes available in the fifo without
1032 * crossing the wrap around. Make sure to not fill it completely,
1033 * because that would make wp == rp and that's the empty condition. */
1034 uint32_t thisrun_bytes;
1035 if (rp > wp)
1036 thisrun_bytes = rp - wp - block_size;
1037 else if (rp > fifo_start_addr)
1038 thisrun_bytes = fifo_end_addr - wp;
1039 else
1040 thisrun_bytes = fifo_end_addr - wp - block_size;
1041
1042 if (thisrun_bytes == 0) {
1043 /* Throttle polling a bit if transfer is (much) faster than flash
1044 * programming. The exact delay shouldn't matter as long as it's
1045 * less than buffer size / flash speed. This is very unlikely to
1046 * run when using high latency connections such as USB. */
1047 alive_sleep(2);
1048
1049 /* to stop an infinite loop on some targets check and increment a timeout
1050 * this issue was observed on a stellaris using the new ICDI interface */
1051 if (timeout++ >= 2500) {
1052 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1053 return ERROR_FLASH_OPERATION_FAILED;
1054 }
1055 continue;
1056 }
1057
1058 /* reset our timeout */
1059 timeout = 0;
1060
1061 /* Limit to the amount of data we actually want to write */
1062 if (thisrun_bytes > count * block_size)
1063 thisrun_bytes = count * block_size;
1064
1065 /* Force end of large blocks to be word aligned */
1066 if (thisrun_bytes >= 16)
1067 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1068
1069 /* Write data to fifo */
1070 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1071 if (retval != ERROR_OK)
1072 break;
1073
1074 /* Update counters and wrap write pointer */
1075 buffer += thisrun_bytes;
1076 count -= thisrun_bytes / block_size;
1077 wp += thisrun_bytes;
1078 if (wp >= fifo_end_addr)
1079 wp = fifo_start_addr;
1080
1081 /* Store updated write pointer to target */
1082 retval = target_write_u32(target, wp_addr, wp);
1083 if (retval != ERROR_OK)
1084 break;
1085
1086 /* Avoid GDB timeouts */
1087 keep_alive();
1088 }
1089
1090 if (retval != ERROR_OK) {
1091 /* abort flash write algorithm on target */
1092 target_write_u32(target, wp_addr, 0);
1093 }
1094
1095 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1096 num_reg_params, reg_params,
1097 exit_point,
1098 10000,
1099 arch_info);
1100
1101 if (retval2 != ERROR_OK) {
1102 LOG_ERROR("error waiting for target flash write algorithm");
1103 retval = retval2;
1104 }
1105
1106 if (retval == ERROR_OK) {
1107 /* check if algorithm set rp = 0 after fifo writer loop finished */
1108 retval = target_read_u32(target, rp_addr, &rp);
1109 if (retval == ERROR_OK && rp == 0) {
1110 LOG_ERROR("flash write algorithm aborted by target");
1111 retval = ERROR_FLASH_OPERATION_FAILED;
1112 }
1113 }
1114
1115 return retval;
1116 }
1117
1118 int target_run_read_async_algorithm(struct target *target,
1119 uint8_t *buffer, uint32_t count, int block_size,
1120 int num_mem_params, struct mem_param *mem_params,
1121 int num_reg_params, struct reg_param *reg_params,
1122 uint32_t buffer_start, uint32_t buffer_size,
1123 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1124 {
1125 int retval;
1126 int timeout = 0;
1127
1128 const uint8_t *buffer_orig = buffer;
1129
1130 /* Set up working area. First word is write pointer, second word is read pointer,
1131 * rest is fifo data area. */
1132 uint32_t wp_addr = buffer_start;
1133 uint32_t rp_addr = buffer_start + 4;
1134 uint32_t fifo_start_addr = buffer_start + 8;
1135 uint32_t fifo_end_addr = buffer_start + buffer_size;
1136
1137 uint32_t wp = fifo_start_addr;
1138 uint32_t rp = fifo_start_addr;
1139
1140 /* validate block_size is 2^n */
1141 assert(IS_PWR_OF_2(block_size));
1142
1143 retval = target_write_u32(target, wp_addr, wp);
1144 if (retval != ERROR_OK)
1145 return retval;
1146 retval = target_write_u32(target, rp_addr, rp);
1147 if (retval != ERROR_OK)
1148 return retval;
1149
1150 /* Start up algorithm on target */
1151 retval = target_start_algorithm(target, num_mem_params, mem_params,
1152 num_reg_params, reg_params,
1153 entry_point,
1154 exit_point,
1155 arch_info);
1156
1157 if (retval != ERROR_OK) {
1158 LOG_ERROR("error starting target flash read algorithm");
1159 return retval;
1160 }
1161
1162 while (count > 0) {
1163 retval = target_read_u32(target, wp_addr, &wp);
1164 if (retval != ERROR_OK) {
1165 LOG_ERROR("failed to get write pointer");
1166 break;
1167 }
1168
1169 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1170 (size_t)(buffer - buffer_orig), count, wp, rp);
1171
1172 if (wp == 0) {
1173 LOG_ERROR("flash read algorithm aborted by target");
1174 retval = ERROR_FLASH_OPERATION_FAILED;
1175 break;
1176 }
1177
1178 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1179 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1180 break;
1181 }
1182
1183 /* Count the number of bytes available in the fifo without
1184 * crossing the wrap around. */
1185 uint32_t thisrun_bytes;
1186 if (wp >= rp)
1187 thisrun_bytes = wp - rp;
1188 else
1189 thisrun_bytes = fifo_end_addr - rp;
1190
1191 if (thisrun_bytes == 0) {
1192 /* Throttle polling a bit if transfer is (much) faster than flash
1193 * reading. The exact delay shouldn't matter as long as it's
1194 * less than buffer size / flash speed. This is very unlikely to
1195 * run when using high latency connections such as USB. */
1196 alive_sleep(2);
1197
1198 /* to stop an infinite loop on some targets check and increment a timeout
1199 * this issue was observed on a stellaris using the new ICDI interface */
1200 if (timeout++ >= 2500) {
1201 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1202 return ERROR_FLASH_OPERATION_FAILED;
1203 }
1204 continue;
1205 }
1206
1207 /* Reset our timeout */
1208 timeout = 0;
1209
1210 /* Limit to the amount of data we actually want to read */
1211 if (thisrun_bytes > count * block_size)
1212 thisrun_bytes = count * block_size;
1213
1214 /* Force end of large blocks to be word aligned */
1215 if (thisrun_bytes >= 16)
1216 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1217
1218 /* Read data from fifo */
1219 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1220 if (retval != ERROR_OK)
1221 break;
1222
1223 /* Update counters and wrap write pointer */
1224 buffer += thisrun_bytes;
1225 count -= thisrun_bytes / block_size;
1226 rp += thisrun_bytes;
1227 if (rp >= fifo_end_addr)
1228 rp = fifo_start_addr;
1229
1230 /* Store updated write pointer to target */
1231 retval = target_write_u32(target, rp_addr, rp);
1232 if (retval != ERROR_OK)
1233 break;
1234
1235 /* Avoid GDB timeouts */
1236 keep_alive();
1237
1238 }
1239
1240 if (retval != ERROR_OK) {
1241 /* abort flash write algorithm on target */
1242 target_write_u32(target, rp_addr, 0);
1243 }
1244
1245 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1246 num_reg_params, reg_params,
1247 exit_point,
1248 10000,
1249 arch_info);
1250
1251 if (retval2 != ERROR_OK) {
1252 LOG_ERROR("error waiting for target flash write algorithm");
1253 retval = retval2;
1254 }
1255
1256 if (retval == ERROR_OK) {
1257 /* check if algorithm set wp = 0 after fifo writer loop finished */
1258 retval = target_read_u32(target, wp_addr, &wp);
1259 if (retval == ERROR_OK && wp == 0) {
1260 LOG_ERROR("flash read algorithm aborted by target");
1261 retval = ERROR_FLASH_OPERATION_FAILED;
1262 }
1263 }
1264
1265 return retval;
1266 }
1267
1268 int target_read_memory(struct target *target,
1269 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1270 {
1271 if (!target_was_examined(target)) {
1272 LOG_ERROR("Target not examined yet");
1273 return ERROR_FAIL;
1274 }
1275 if (!target->type->read_memory) {
1276 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1277 return ERROR_FAIL;
1278 }
1279 return target->type->read_memory(target, address, size, count, buffer);
1280 }
1281
1282 int target_read_phys_memory(struct target *target,
1283 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1284 {
1285 if (!target_was_examined(target)) {
1286 LOG_ERROR("Target not examined yet");
1287 return ERROR_FAIL;
1288 }
1289 if (!target->type->read_phys_memory) {
1290 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1291 return ERROR_FAIL;
1292 }
1293 return target->type->read_phys_memory(target, address, size, count, buffer);
1294 }
1295
1296 int target_write_memory(struct target *target,
1297 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1298 {
1299 if (!target_was_examined(target)) {
1300 LOG_ERROR("Target not examined yet");
1301 return ERROR_FAIL;
1302 }
1303 if (!target->type->write_memory) {
1304 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1305 return ERROR_FAIL;
1306 }
1307 return target->type->write_memory(target, address, size, count, buffer);
1308 }
1309
1310 int target_write_phys_memory(struct target *target,
1311 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1312 {
1313 if (!target_was_examined(target)) {
1314 LOG_ERROR("Target not examined yet");
1315 return ERROR_FAIL;
1316 }
1317 if (!target->type->write_phys_memory) {
1318 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1319 return ERROR_FAIL;
1320 }
1321 return target->type->write_phys_memory(target, address, size, count, buffer);
1322 }
1323
1324 int target_add_breakpoint(struct target *target,
1325 struct breakpoint *breakpoint)
1326 {
1327 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1328 LOG_TARGET_ERROR(target, "not halted (add breakpoint)");
1329 return ERROR_TARGET_NOT_HALTED;
1330 }
1331 return target->type->add_breakpoint(target, breakpoint);
1332 }
1333
1334 int target_add_context_breakpoint(struct target *target,
1335 struct breakpoint *breakpoint)
1336 {
1337 if (target->state != TARGET_HALTED) {
1338 LOG_TARGET_ERROR(target, "not halted (add context breakpoint)");
1339 return ERROR_TARGET_NOT_HALTED;
1340 }
1341 return target->type->add_context_breakpoint(target, breakpoint);
1342 }
1343
1344 int target_add_hybrid_breakpoint(struct target *target,
1345 struct breakpoint *breakpoint)
1346 {
1347 if (target->state != TARGET_HALTED) {
1348 LOG_TARGET_ERROR(target, "not halted (add hybrid breakpoint)");
1349 return ERROR_TARGET_NOT_HALTED;
1350 }
1351 return target->type->add_hybrid_breakpoint(target, breakpoint);
1352 }
1353
1354 int target_remove_breakpoint(struct target *target,
1355 struct breakpoint *breakpoint)
1356 {
1357 return target->type->remove_breakpoint(target, breakpoint);
1358 }
1359
1360 int target_add_watchpoint(struct target *target,
1361 struct watchpoint *watchpoint)
1362 {
1363 if (target->state != TARGET_HALTED) {
1364 LOG_TARGET_ERROR(target, "not halted (add watchpoint)");
1365 return ERROR_TARGET_NOT_HALTED;
1366 }
1367 return target->type->add_watchpoint(target, watchpoint);
1368 }
1369 int target_remove_watchpoint(struct target *target,
1370 struct watchpoint *watchpoint)
1371 {
1372 return target->type->remove_watchpoint(target, watchpoint);
1373 }
1374 int target_hit_watchpoint(struct target *target,
1375 struct watchpoint **hit_watchpoint)
1376 {
1377 if (target->state != TARGET_HALTED) {
1378 LOG_TARGET_ERROR(target, "not halted (hit watchpoint)");
1379 return ERROR_TARGET_NOT_HALTED;
1380 }
1381
1382 if (!target->type->hit_watchpoint) {
1383 /* For backward compatible, if hit_watchpoint is not implemented,
1384 * return ERROR_FAIL such that gdb_server will not take the nonsense
1385 * information. */
1386 return ERROR_FAIL;
1387 }
1388
1389 return target->type->hit_watchpoint(target, hit_watchpoint);
1390 }
1391
1392 const char *target_get_gdb_arch(struct target *target)
1393 {
1394 if (!target->type->get_gdb_arch)
1395 return NULL;
1396 return target->type->get_gdb_arch(target);
1397 }
1398
1399 int target_get_gdb_reg_list(struct target *target,
1400 struct reg **reg_list[], int *reg_list_size,
1401 enum target_register_class reg_class)
1402 {
1403 int result = ERROR_FAIL;
1404
1405 if (!target_was_examined(target)) {
1406 LOG_ERROR("Target not examined yet");
1407 goto done;
1408 }
1409
1410 result = target->type->get_gdb_reg_list(target, reg_list,
1411 reg_list_size, reg_class);
1412
1413 done:
1414 if (result != ERROR_OK) {
1415 *reg_list = NULL;
1416 *reg_list_size = 0;
1417 }
1418 return result;
1419 }
1420
1421 int target_get_gdb_reg_list_noread(struct target *target,
1422 struct reg **reg_list[], int *reg_list_size,
1423 enum target_register_class reg_class)
1424 {
1425 if (target->type->get_gdb_reg_list_noread &&
1426 target->type->get_gdb_reg_list_noread(target, reg_list,
1427 reg_list_size, reg_class) == ERROR_OK)
1428 return ERROR_OK;
1429 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1430 }
1431
1432 bool target_supports_gdb_connection(struct target *target)
1433 {
1434 /*
1435 * exclude all the targets that don't provide get_gdb_reg_list
1436 * or that have explicit gdb_max_connection == 0
1437 */
1438 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1439 }
1440
1441 int target_step(struct target *target,
1442 int current, target_addr_t address, int handle_breakpoints)
1443 {
1444 int retval;
1445
1446 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1447
1448 retval = target->type->step(target, current, address, handle_breakpoints);
1449 if (retval != ERROR_OK)
1450 return retval;
1451
1452 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1453
1454 return retval;
1455 }
1456
1457 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1458 {
1459 if (target->state != TARGET_HALTED) {
1460 LOG_TARGET_ERROR(target, "not halted (gdb fileio)");
1461 return ERROR_TARGET_NOT_HALTED;
1462 }
1463 return target->type->get_gdb_fileio_info(target, fileio_info);
1464 }
1465
1466 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1467 {
1468 if (target->state != TARGET_HALTED) {
1469 LOG_TARGET_ERROR(target, "not halted (gdb fileio end)");
1470 return ERROR_TARGET_NOT_HALTED;
1471 }
1472 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1473 }
1474
1475 target_addr_t target_address_max(struct target *target)
1476 {
1477 unsigned bits = target_address_bits(target);
1478 if (sizeof(target_addr_t) * 8 == bits)
1479 return (target_addr_t) -1;
1480 else
1481 return (((target_addr_t) 1) << bits) - 1;
1482 }
1483
1484 unsigned target_address_bits(struct target *target)
1485 {
1486 if (target->type->address_bits)
1487 return target->type->address_bits(target);
1488 return 32;
1489 }
1490
1491 unsigned int target_data_bits(struct target *target)
1492 {
1493 if (target->type->data_bits)
1494 return target->type->data_bits(target);
1495 return 32;
1496 }
1497
1498 static int target_profiling(struct target *target, uint32_t *samples,
1499 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1500 {
1501 return target->type->profiling(target, samples, max_num_samples,
1502 num_samples, seconds);
1503 }
1504
1505 static int handle_target(void *priv);
1506
1507 static int target_init_one(struct command_context *cmd_ctx,
1508 struct target *target)
1509 {
1510 target_reset_examined(target);
1511
1512 struct target_type *type = target->type;
1513 if (!type->examine)
1514 type->examine = default_examine;
1515
1516 if (!type->check_reset)
1517 type->check_reset = default_check_reset;
1518
1519 assert(type->init_target);
1520
1521 int retval = type->init_target(cmd_ctx, target);
1522 if (retval != ERROR_OK) {
1523 LOG_ERROR("target '%s' init failed", target_name(target));
1524 return retval;
1525 }
1526
1527 /* Sanity-check MMU support ... stub in what we must, to help
1528 * implement it in stages, but warn if we need to do so.
1529 */
1530 if (type->mmu) {
1531 if (!type->virt2phys) {
1532 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1533 type->virt2phys = identity_virt2phys;
1534 }
1535 } else {
1536 /* Make sure no-MMU targets all behave the same: make no
1537 * distinction between physical and virtual addresses, and
1538 * ensure that virt2phys() is always an identity mapping.
1539 */
1540 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1541 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1542
1543 type->mmu = no_mmu;
1544 type->write_phys_memory = type->write_memory;
1545 type->read_phys_memory = type->read_memory;
1546 type->virt2phys = identity_virt2phys;
1547 }
1548
1549 if (!target->type->read_buffer)
1550 target->type->read_buffer = target_read_buffer_default;
1551
1552 if (!target->type->write_buffer)
1553 target->type->write_buffer = target_write_buffer_default;
1554
1555 if (!target->type->get_gdb_fileio_info)
1556 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1557
1558 if (!target->type->gdb_fileio_end)
1559 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1560
1561 if (!target->type->profiling)
1562 target->type->profiling = target_profiling_default;
1563
1564 return ERROR_OK;
1565 }
1566
1567 static int target_init(struct command_context *cmd_ctx)
1568 {
1569 struct target *target;
1570 int retval;
1571
1572 for (target = all_targets; target; target = target->next) {
1573 retval = target_init_one(cmd_ctx, target);
1574 if (retval != ERROR_OK)
1575 return retval;
1576 }
1577
1578 if (!all_targets)
1579 return ERROR_OK;
1580
1581 retval = target_register_user_commands(cmd_ctx);
1582 if (retval != ERROR_OK)
1583 return retval;
1584
1585 retval = target_register_timer_callback(&handle_target,
1586 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1587 if (retval != ERROR_OK)
1588 return retval;
1589
1590 return ERROR_OK;
1591 }
1592
1593 COMMAND_HANDLER(handle_target_init_command)
1594 {
1595 int retval;
1596
1597 if (CMD_ARGC != 0)
1598 return ERROR_COMMAND_SYNTAX_ERROR;
1599
1600 static bool target_initialized;
1601 if (target_initialized) {
1602 LOG_INFO("'target init' has already been called");
1603 return ERROR_OK;
1604 }
1605 target_initialized = true;
1606
1607 retval = command_run_line(CMD_CTX, "init_targets");
1608 if (retval != ERROR_OK)
1609 return retval;
1610
1611 retval = command_run_line(CMD_CTX, "init_target_events");
1612 if (retval != ERROR_OK)
1613 return retval;
1614
1615 retval = command_run_line(CMD_CTX, "init_board");
1616 if (retval != ERROR_OK)
1617 return retval;
1618
1619 LOG_DEBUG("Initializing targets...");
1620 return target_init(CMD_CTX);
1621 }
1622
1623 int target_register_event_callback(int (*callback)(struct target *target,
1624 enum target_event event, void *priv), void *priv)
1625 {
1626 struct target_event_callback **callbacks_p = &target_event_callbacks;
1627
1628 if (!callback)
1629 return ERROR_COMMAND_SYNTAX_ERROR;
1630
1631 if (*callbacks_p) {
1632 while ((*callbacks_p)->next)
1633 callbacks_p = &((*callbacks_p)->next);
1634 callbacks_p = &((*callbacks_p)->next);
1635 }
1636
1637 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1638 (*callbacks_p)->callback = callback;
1639 (*callbacks_p)->priv = priv;
1640 (*callbacks_p)->next = NULL;
1641
1642 return ERROR_OK;
1643 }
1644
1645 int target_register_reset_callback(int (*callback)(struct target *target,
1646 enum target_reset_mode reset_mode, void *priv), void *priv)
1647 {
1648 struct target_reset_callback *entry;
1649
1650 if (!callback)
1651 return ERROR_COMMAND_SYNTAX_ERROR;
1652
1653 entry = malloc(sizeof(struct target_reset_callback));
1654 if (!entry) {
1655 LOG_ERROR("error allocating buffer for reset callback entry");
1656 return ERROR_COMMAND_SYNTAX_ERROR;
1657 }
1658
1659 entry->callback = callback;
1660 entry->priv = priv;
1661 list_add(&entry->list, &target_reset_callback_list);
1662
1663
1664 return ERROR_OK;
1665 }
1666
1667 int target_register_trace_callback(int (*callback)(struct target *target,
1668 size_t len, uint8_t *data, void *priv), void *priv)
1669 {
1670 struct target_trace_callback *entry;
1671
1672 if (!callback)
1673 return ERROR_COMMAND_SYNTAX_ERROR;
1674
1675 entry = malloc(sizeof(struct target_trace_callback));
1676 if (!entry) {
1677 LOG_ERROR("error allocating buffer for trace callback entry");
1678 return ERROR_COMMAND_SYNTAX_ERROR;
1679 }
1680
1681 entry->callback = callback;
1682 entry->priv = priv;
1683 list_add(&entry->list, &target_trace_callback_list);
1684
1685
1686 return ERROR_OK;
1687 }
1688
1689 int target_register_timer_callback(int (*callback)(void *priv),
1690 unsigned int time_ms, enum target_timer_type type, void *priv)
1691 {
1692 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1693
1694 if (!callback)
1695 return ERROR_COMMAND_SYNTAX_ERROR;
1696
1697 if (*callbacks_p) {
1698 while ((*callbacks_p)->next)
1699 callbacks_p = &((*callbacks_p)->next);
1700 callbacks_p = &((*callbacks_p)->next);
1701 }
1702
1703 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1704 (*callbacks_p)->callback = callback;
1705 (*callbacks_p)->type = type;
1706 (*callbacks_p)->time_ms = time_ms;
1707 (*callbacks_p)->removed = false;
1708
1709 (*callbacks_p)->when = timeval_ms() + time_ms;
1710 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1711
1712 (*callbacks_p)->priv = priv;
1713 (*callbacks_p)->next = NULL;
1714
1715 return ERROR_OK;
1716 }
1717
1718 int target_unregister_event_callback(int (*callback)(struct target *target,
1719 enum target_event event, void *priv), void *priv)
1720 {
1721 struct target_event_callback **p = &target_event_callbacks;
1722 struct target_event_callback *c = target_event_callbacks;
1723
1724 if (!callback)
1725 return ERROR_COMMAND_SYNTAX_ERROR;
1726
1727 while (c) {
1728 struct target_event_callback *next = c->next;
1729 if ((c->callback == callback) && (c->priv == priv)) {
1730 *p = next;
1731 free(c);
1732 return ERROR_OK;
1733 } else
1734 p = &(c->next);
1735 c = next;
1736 }
1737
1738 return ERROR_OK;
1739 }
1740
1741 int target_unregister_reset_callback(int (*callback)(struct target *target,
1742 enum target_reset_mode reset_mode, void *priv), void *priv)
1743 {
1744 struct target_reset_callback *entry;
1745
1746 if (!callback)
1747 return ERROR_COMMAND_SYNTAX_ERROR;
1748
1749 list_for_each_entry(entry, &target_reset_callback_list, list) {
1750 if (entry->callback == callback && entry->priv == priv) {
1751 list_del(&entry->list);
1752 free(entry);
1753 break;
1754 }
1755 }
1756
1757 return ERROR_OK;
1758 }
1759
1760 int target_unregister_trace_callback(int (*callback)(struct target *target,
1761 size_t len, uint8_t *data, void *priv), void *priv)
1762 {
1763 struct target_trace_callback *entry;
1764
1765 if (!callback)
1766 return ERROR_COMMAND_SYNTAX_ERROR;
1767
1768 list_for_each_entry(entry, &target_trace_callback_list, list) {
1769 if (entry->callback == callback && entry->priv == priv) {
1770 list_del(&entry->list);
1771 free(entry);
1772 break;
1773 }
1774 }
1775
1776 return ERROR_OK;
1777 }
1778
1779 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1780 {
1781 if (!callback)
1782 return ERROR_COMMAND_SYNTAX_ERROR;
1783
1784 for (struct target_timer_callback *c = target_timer_callbacks;
1785 c; c = c->next) {
1786 if ((c->callback == callback) && (c->priv == priv)) {
1787 c->removed = true;
1788 return ERROR_OK;
1789 }
1790 }
1791
1792 return ERROR_FAIL;
1793 }
1794
1795 int target_call_event_callbacks(struct target *target, enum target_event event)
1796 {
1797 struct target_event_callback *callback = target_event_callbacks;
1798 struct target_event_callback *next_callback;
1799
1800 if (event == TARGET_EVENT_HALTED) {
1801 /* execute early halted first */
1802 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1803 }
1804
1805 LOG_DEBUG("target event %i (%s) for core %s", event,
1806 target_event_name(event),
1807 target_name(target));
1808
1809 target_handle_event(target, event);
1810
1811 while (callback) {
1812 next_callback = callback->next;
1813 callback->callback(target, event, callback->priv);
1814 callback = next_callback;
1815 }
1816
1817 return ERROR_OK;
1818 }
1819
1820 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1821 {
1822 struct target_reset_callback *callback;
1823
1824 LOG_DEBUG("target reset %i (%s)", reset_mode,
1825 nvp_value2name(nvp_reset_modes, reset_mode)->name);
1826
1827 list_for_each_entry(callback, &target_reset_callback_list, list)
1828 callback->callback(target, reset_mode, callback->priv);
1829
1830 return ERROR_OK;
1831 }
1832
1833 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1834 {
1835 struct target_trace_callback *callback;
1836
1837 list_for_each_entry(callback, &target_trace_callback_list, list)
1838 callback->callback(target, len, data, callback->priv);
1839
1840 return ERROR_OK;
1841 }
1842
1843 static int target_timer_callback_periodic_restart(
1844 struct target_timer_callback *cb, int64_t *now)
1845 {
1846 cb->when = *now + cb->time_ms;
1847 return ERROR_OK;
1848 }
1849
1850 static int target_call_timer_callback(struct target_timer_callback *cb,
1851 int64_t *now)
1852 {
1853 cb->callback(cb->priv);
1854
1855 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1856 return target_timer_callback_periodic_restart(cb, now);
1857
1858 return target_unregister_timer_callback(cb->callback, cb->priv);
1859 }
1860
1861 static int target_call_timer_callbacks_check_time(int checktime)
1862 {
1863 static bool callback_processing;
1864
1865 /* Do not allow nesting */
1866 if (callback_processing)
1867 return ERROR_OK;
1868
1869 callback_processing = true;
1870
1871 keep_alive();
1872
1873 int64_t now = timeval_ms();
1874
1875 /* Initialize to a default value that's a ways into the future.
1876 * The loop below will make it closer to now if there are
1877 * callbacks that want to be called sooner. */
1878 target_timer_next_event_value = now + 1000;
1879
1880 /* Store an address of the place containing a pointer to the
1881 * next item; initially, that's a standalone "root of the
1882 * list" variable. */
1883 struct target_timer_callback **callback = &target_timer_callbacks;
1884 while (callback && *callback) {
1885 if ((*callback)->removed) {
1886 struct target_timer_callback *p = *callback;
1887 *callback = (*callback)->next;
1888 free(p);
1889 continue;
1890 }
1891
1892 bool call_it = (*callback)->callback &&
1893 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1894 now >= (*callback)->when);
1895
1896 if (call_it)
1897 target_call_timer_callback(*callback, &now);
1898
1899 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1900 target_timer_next_event_value = (*callback)->when;
1901
1902 callback = &(*callback)->next;
1903 }
1904
1905 callback_processing = false;
1906 return ERROR_OK;
1907 }
1908
1909 int target_call_timer_callbacks(void)
1910 {
1911 return target_call_timer_callbacks_check_time(1);
1912 }
1913
1914 /* invoke periodic callbacks immediately */
1915 int target_call_timer_callbacks_now(void)
1916 {
1917 return target_call_timer_callbacks_check_time(0);
1918 }
1919
1920 int64_t target_timer_next_event(void)
1921 {
1922 return target_timer_next_event_value;
1923 }
1924
1925 /* Prints the working area layout for debug purposes */
1926 static void print_wa_layout(struct target *target)
1927 {
1928 struct working_area *c = target->working_areas;
1929
1930 while (c) {
1931 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1932 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1933 c->address, c->address + c->size - 1, c->size);
1934 c = c->next;
1935 }
1936 }
1937
1938 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1939 static void target_split_working_area(struct working_area *area, uint32_t size)
1940 {
1941 assert(area->free); /* Shouldn't split an allocated area */
1942 assert(size <= area->size); /* Caller should guarantee this */
1943
1944 /* Split only if not already the right size */
1945 if (size < area->size) {
1946 struct working_area *new_wa = malloc(sizeof(*new_wa));
1947
1948 if (!new_wa)
1949 return;
1950
1951 new_wa->next = area->next;
1952 new_wa->size = area->size - size;
1953 new_wa->address = area->address + size;
1954 new_wa->backup = NULL;
1955 new_wa->user = NULL;
1956 new_wa->free = true;
1957
1958 area->next = new_wa;
1959 area->size = size;
1960
1961 /* If backup memory was allocated to this area, it has the wrong size
1962 * now so free it and it will be reallocated if/when needed */
1963 free(area->backup);
1964 area->backup = NULL;
1965 }
1966 }
1967
1968 /* Merge all adjacent free areas into one */
1969 static void target_merge_working_areas(struct target *target)
1970 {
1971 struct working_area *c = target->working_areas;
1972
1973 while (c && c->next) {
1974 assert(c->next->address == c->address + c->size); /* This is an invariant */
1975
1976 /* Find two adjacent free areas */
1977 if (c->free && c->next->free) {
1978 /* Merge the last into the first */
1979 c->size += c->next->size;
1980
1981 /* Remove the last */
1982 struct working_area *to_be_freed = c->next;
1983 c->next = c->next->next;
1984 free(to_be_freed->backup);
1985 free(to_be_freed);
1986
1987 /* If backup memory was allocated to the remaining area, it's has
1988 * the wrong size now */
1989 free(c->backup);
1990 c->backup = NULL;
1991 } else {
1992 c = c->next;
1993 }
1994 }
1995 }
1996
1997 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
1998 {
1999 /* Reevaluate working area address based on MMU state*/
2000 if (!target->working_areas) {
2001 int retval;
2002 int enabled;
2003
2004 retval = target->type->mmu(target, &enabled);
2005 if (retval != ERROR_OK)
2006 return retval;
2007
2008 if (!enabled) {
2009 if (target->working_area_phys_spec) {
2010 LOG_DEBUG("MMU disabled, using physical "
2011 "address for working memory " TARGET_ADDR_FMT,
2012 target->working_area_phys);
2013 target->working_area = target->working_area_phys;
2014 } else {
2015 LOG_ERROR("No working memory available. "
2016 "Specify -work-area-phys to target.");
2017 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2018 }
2019 } else {
2020 if (target->working_area_virt_spec) {
2021 LOG_DEBUG("MMU enabled, using virtual "
2022 "address for working memory " TARGET_ADDR_FMT,
2023 target->working_area_virt);
2024 target->working_area = target->working_area_virt;
2025 } else {
2026 LOG_ERROR("No working memory available. "
2027 "Specify -work-area-virt to target.");
2028 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2029 }
2030 }
2031
2032 /* Set up initial working area on first call */
2033 struct working_area *new_wa = malloc(sizeof(*new_wa));
2034 if (new_wa) {
2035 new_wa->next = NULL;
2036 new_wa->size = ALIGN_DOWN(target->working_area_size, 4); /* 4-byte align */
2037 new_wa->address = target->working_area;
2038 new_wa->backup = NULL;
2039 new_wa->user = NULL;
2040 new_wa->free = true;
2041 }
2042
2043 target->working_areas = new_wa;
2044 }
2045
2046 /* only allocate multiples of 4 byte */
2047 size = ALIGN_UP(size, 4);
2048
2049 struct working_area *c = target->working_areas;
2050
2051 /* Find the first large enough working area */
2052 while (c) {
2053 if (c->free && c->size >= size)
2054 break;
2055 c = c->next;
2056 }
2057
2058 if (!c)
2059 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2060
2061 /* Split the working area into the requested size */
2062 target_split_working_area(c, size);
2063
2064 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2065 size, c->address);
2066
2067 if (target->backup_working_area) {
2068 if (!c->backup) {
2069 c->backup = malloc(c->size);
2070 if (!c->backup)
2071 return ERROR_FAIL;
2072 }
2073
2074 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2075 if (retval != ERROR_OK)
2076 return retval;
2077 }
2078
2079 /* mark as used, and return the new (reused) area */
2080 c->free = false;
2081 *area = c;
2082
2083 /* user pointer */
2084 c->user = area;
2085
2086 print_wa_layout(target);
2087
2088 return ERROR_OK;
2089 }
2090
2091 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2092 {
2093 int retval;
2094
2095 retval = target_alloc_working_area_try(target, size, area);
2096 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2097 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2098 return retval;
2099
2100 }
2101
2102 static int target_restore_working_area(struct target *target, struct working_area *area)
2103 {
2104 int retval = ERROR_OK;
2105
2106 if (target->backup_working_area && area->backup) {
2107 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2108 if (retval != ERROR_OK)
2109 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2110 area->size, area->address);
2111 }
2112
2113 return retval;
2114 }
2115
2116 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2117 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2118 {
2119 if (!area || area->free)
2120 return ERROR_OK;
2121
2122 int retval = ERROR_OK;
2123 if (restore) {
2124 retval = target_restore_working_area(target, area);
2125 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2126 if (retval != ERROR_OK)
2127 return retval;
2128 }
2129
2130 area->free = true;
2131
2132 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2133 area->size, area->address);
2134
2135 /* mark user pointer invalid */
2136 /* TODO: Is this really safe? It points to some previous caller's memory.
2137 * How could we know that the area pointer is still in that place and not
2138 * some other vital data? What's the purpose of this, anyway? */
2139 *area->user = NULL;
2140 area->user = NULL;
2141
2142 target_merge_working_areas(target);
2143
2144 print_wa_layout(target);
2145
2146 return retval;
2147 }
2148
2149 int target_free_working_area(struct target *target, struct working_area *area)
2150 {
2151 return target_free_working_area_restore(target, area, 1);
2152 }
2153
2154 /* free resources and restore memory, if restoring memory fails,
2155 * free up resources anyway
2156 */
2157 static void target_free_all_working_areas_restore(struct target *target, int restore)
2158 {
2159 struct working_area *c = target->working_areas;
2160
2161 LOG_DEBUG("freeing all working areas");
2162
2163 /* Loop through all areas, restoring the allocated ones and marking them as free */
2164 while (c) {
2165 if (!c->free) {
2166 if (restore)
2167 target_restore_working_area(target, c);
2168 c->free = true;
2169 *c->user = NULL; /* Same as above */
2170 c->user = NULL;
2171 }
2172 c = c->next;
2173 }
2174
2175 /* Run a merge pass to combine all areas into one */
2176 target_merge_working_areas(target);
2177
2178 print_wa_layout(target);
2179 }
2180
2181 void target_free_all_working_areas(struct target *target)
2182 {
2183 target_free_all_working_areas_restore(target, 1);
2184
2185 /* Now we have none or only one working area marked as free */
2186 if (target->working_areas) {
2187 /* Free the last one to allow on-the-fly moving and resizing */
2188 free(target->working_areas->backup);
2189 free(target->working_areas);
2190 target->working_areas = NULL;
2191 }
2192 }
2193
2194 /* Find the largest number of bytes that can be allocated */
2195 uint32_t target_get_working_area_avail(struct target *target)
2196 {
2197 struct working_area *c = target->working_areas;
2198 uint32_t max_size = 0;
2199
2200 if (!c)
2201 return ALIGN_DOWN(target->working_area_size, 4);
2202
2203 while (c) {
2204 if (c->free && max_size < c->size)
2205 max_size = c->size;
2206
2207 c = c->next;
2208 }
2209
2210 return max_size;
2211 }
2212
2213 static void target_destroy(struct target *target)
2214 {
2215 if (target->type->deinit_target)
2216 target->type->deinit_target(target);
2217
2218 if (target->semihosting)
2219 free(target->semihosting->basedir);
2220 free(target->semihosting);
2221
2222 jtag_unregister_event_callback(jtag_enable_callback, target);
2223
2224 struct target_event_action *teap = target->event_action;
2225 while (teap) {
2226 struct target_event_action *next = teap->next;
2227 Jim_DecrRefCount(teap->interp, teap->body);
2228 free(teap);
2229 teap = next;
2230 }
2231
2232 target_free_all_working_areas(target);
2233
2234 /* release the targets SMP list */
2235 if (target->smp) {
2236 struct target_list *head, *tmp;
2237
2238 list_for_each_entry_safe(head, tmp, target->smp_targets, lh) {
2239 list_del(&head->lh);
2240 head->target->smp = 0;
2241 free(head);
2242 }
2243 if (target->smp_targets != &empty_smp_targets)
2244 free(target->smp_targets);
2245 target->smp = 0;
2246 }
2247
2248 rtos_destroy(target);
2249
2250 free(target->gdb_port_override);
2251 free(target->type);
2252 free(target->trace_info);
2253 free(target->fileio_info);
2254 free(target->cmd_name);
2255 free(target);
2256 }
2257
2258 void target_quit(void)
2259 {
2260 struct target_event_callback *pe = target_event_callbacks;
2261 while (pe) {
2262 struct target_event_callback *t = pe->next;
2263 free(pe);
2264 pe = t;
2265 }
2266 target_event_callbacks = NULL;
2267
2268 struct target_timer_callback *pt = target_timer_callbacks;
2269 while (pt) {
2270 struct target_timer_callback *t = pt->next;
2271 free(pt);
2272 pt = t;
2273 }
2274 target_timer_callbacks = NULL;
2275
2276 for (struct target *target = all_targets; target;) {
2277 struct target *tmp;
2278
2279 tmp = target->next;
2280 target_destroy(target);
2281 target = tmp;
2282 }
2283
2284 all_targets = NULL;
2285 }
2286
2287 int target_arch_state(struct target *target)
2288 {
2289 int retval;
2290 if (!target) {
2291 LOG_WARNING("No target has been configured");
2292 return ERROR_OK;
2293 }
2294
2295 if (target->state != TARGET_HALTED)
2296 return ERROR_OK;
2297
2298 retval = target->type->arch_state(target);
2299 return retval;
2300 }
2301
2302 static int target_get_gdb_fileio_info_default(struct target *target,
2303 struct gdb_fileio_info *fileio_info)
2304 {
2305 /* If target does not support semi-hosting function, target
2306 has no need to provide .get_gdb_fileio_info callback.
2307 It just return ERROR_FAIL and gdb_server will return "Txx"
2308 as target halted every time. */
2309 return ERROR_FAIL;
2310 }
2311
2312 static int target_gdb_fileio_end_default(struct target *target,
2313 int retcode, int fileio_errno, bool ctrl_c)
2314 {
2315 return ERROR_OK;
2316 }
2317
2318 int target_profiling_default(struct target *target, uint32_t *samples,
2319 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2320 {
2321 struct timeval timeout, now;
2322
2323 gettimeofday(&timeout, NULL);
2324 timeval_add_time(&timeout, seconds, 0);
2325
2326 LOG_INFO("Starting profiling. Halting and resuming the"
2327 " target as often as we can...");
2328
2329 uint32_t sample_count = 0;
2330 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2331 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2332
2333 int retval = ERROR_OK;
2334 for (;;) {
2335 target_poll(target);
2336 if (target->state == TARGET_HALTED) {
2337 uint32_t t = buf_get_u32(reg->value, 0, 32);
2338 samples[sample_count++] = t;
2339 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2340 retval = target_resume(target, 1, 0, 0, 0);
2341 target_poll(target);
2342 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2343 } else if (target->state == TARGET_RUNNING) {
2344 /* We want to quickly sample the PC. */
2345 retval = target_halt(target);
2346 } else {
2347 LOG_INFO("Target not halted or running");
2348 retval = ERROR_OK;
2349 break;
2350 }
2351
2352 if (retval != ERROR_OK)
2353 break;
2354
2355 gettimeofday(&now, NULL);
2356 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2357 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2358 break;
2359 }
2360 }
2361
2362 *num_samples = sample_count;
2363 return retval;
2364 }
2365
2366 /* Single aligned words are guaranteed to use 16 or 32 bit access
2367 * mode respectively, otherwise data is handled as quickly as
2368 * possible
2369 */
2370 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2371 {
2372 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2373 size, address);
2374
2375 if (!target_was_examined(target)) {
2376 LOG_ERROR("Target not examined yet");
2377 return ERROR_FAIL;
2378 }
2379
2380 if (size == 0)
2381 return ERROR_OK;
2382
2383 if ((address + size - 1) < address) {
2384 /* GDB can request this when e.g. PC is 0xfffffffc */
2385 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2386 address,
2387 size);
2388 return ERROR_FAIL;
2389 }
2390
2391 return target->type->write_buffer(target, address, size, buffer);
2392 }
2393
2394 static int target_write_buffer_default(struct target *target,
2395 target_addr_t address, uint32_t count, const uint8_t *buffer)
2396 {
2397 uint32_t size;
2398 unsigned int data_bytes = target_data_bits(target) / 8;
2399
2400 /* Align up to maximum bytes. The loop condition makes sure the next pass
2401 * will have something to do with the size we leave to it. */
2402 for (size = 1;
2403 size < data_bytes && count >= size * 2 + (address & size);
2404 size *= 2) {
2405 if (address & size) {
2406 int retval = target_write_memory(target, address, size, 1, buffer);
2407 if (retval != ERROR_OK)
2408 return retval;
2409 address += size;
2410 count -= size;
2411 buffer += size;
2412 }
2413 }
2414
2415 /* Write the data with as large access size as possible. */
2416 for (; size > 0; size /= 2) {
2417 uint32_t aligned = count - count % size;
2418 if (aligned > 0) {
2419 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2420 if (retval != ERROR_OK)
2421 return retval;
2422 address += aligned;
2423 count -= aligned;
2424 buffer += aligned;
2425 }
2426 }
2427
2428 return ERROR_OK;
2429 }
2430
2431 /* Single aligned words are guaranteed to use 16 or 32 bit access
2432 * mode respectively, otherwise data is handled as quickly as
2433 * possible
2434 */
2435 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2436 {
2437 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2438 size, address);
2439
2440 if (!target_was_examined(target)) {
2441 LOG_ERROR("Target not examined yet");
2442 return ERROR_FAIL;
2443 }
2444
2445 if (size == 0)
2446 return ERROR_OK;
2447
2448 if ((address + size - 1) < address) {
2449 /* GDB can request this when e.g. PC is 0xfffffffc */
2450 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2451 address,
2452 size);
2453 return ERROR_FAIL;
2454 }
2455
2456 return target->type->read_buffer(target, address, size, buffer);
2457 }
2458
2459 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2460 {
2461 uint32_t size;
2462 unsigned int data_bytes = target_data_bits(target) / 8;
2463
2464 /* Align up to maximum bytes. The loop condition makes sure the next pass
2465 * will have something to do with the size we leave to it. */
2466 for (size = 1;
2467 size < data_bytes && count >= size * 2 + (address & size);
2468 size *= 2) {
2469 if (address & size) {
2470 int retval = target_read_memory(target, address, size, 1, buffer);
2471 if (retval != ERROR_OK)
2472 return retval;
2473 address += size;
2474 count -= size;
2475 buffer += size;
2476 }
2477 }
2478
2479 /* Read the data with as large access size as possible. */
2480 for (; size > 0; size /= 2) {
2481 uint32_t aligned = count - count % size;
2482 if (aligned > 0) {
2483 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2484 if (retval != ERROR_OK)
2485 return retval;
2486 address += aligned;
2487 count -= aligned;
2488 buffer += aligned;
2489 }
2490 }
2491
2492 return ERROR_OK;
2493 }
2494
2495 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2496 {
2497 uint8_t *buffer;
2498 int retval;
2499 uint32_t i;
2500 uint32_t checksum = 0;
2501 if (!target_was_examined(target)) {
2502 LOG_ERROR("Target not examined yet");
2503 return ERROR_FAIL;
2504 }
2505 if (!target->type->checksum_memory) {
2506 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2507 return ERROR_FAIL;
2508 }
2509
2510 retval = target->type->checksum_memory(target, address, size, &checksum);
2511 if (retval != ERROR_OK) {
2512 buffer = malloc(size);
2513 if (!buffer) {
2514 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2515 return ERROR_COMMAND_SYNTAX_ERROR;
2516 }
2517 retval = target_read_buffer(target, address, size, buffer);
2518 if (retval != ERROR_OK) {
2519 free(buffer);
2520 return retval;
2521 }
2522
2523 /* convert to target endianness */
2524 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2525 uint32_t target_data;
2526 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2527 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2528 }
2529
2530 retval = image_calculate_checksum(buffer, size, &checksum);
2531 free(buffer);
2532 }
2533
2534 *crc = checksum;
2535
2536 return retval;
2537 }
2538
2539 int target_blank_check_memory(struct target *target,
2540 struct target_memory_check_block *blocks, int num_blocks,
2541 uint8_t erased_value)
2542 {
2543 if (!target_was_examined(target)) {
2544 LOG_ERROR("Target not examined yet");
2545 return ERROR_FAIL;
2546 }
2547
2548 if (!target->type->blank_check_memory)
2549 return ERROR_NOT_IMPLEMENTED;
2550
2551 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2552 }
2553
2554 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2555 {
2556 uint8_t value_buf[8];
2557 if (!target_was_examined(target)) {
2558 LOG_ERROR("Target not examined yet");
2559 return ERROR_FAIL;
2560 }
2561
2562 int retval = target_read_memory(target, address, 8, 1, value_buf);
2563
2564 if (retval == ERROR_OK) {
2565 *value = target_buffer_get_u64(target, value_buf);
2566 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2567 address,
2568 *value);
2569 } else {
2570 *value = 0x0;
2571 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2572 address);
2573 }
2574
2575 return retval;
2576 }
2577
2578 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2579 {
2580 uint8_t value_buf[4];
2581 if (!target_was_examined(target)) {
2582 LOG_ERROR("Target not examined yet");
2583 return ERROR_FAIL;
2584 }
2585
2586 int retval = target_read_memory(target, address, 4, 1, value_buf);
2587
2588 if (retval == ERROR_OK) {
2589 *value = target_buffer_get_u32(target, value_buf);
2590 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2591 address,
2592 *value);
2593 } else {
2594 *value = 0x0;
2595 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2596 address);
2597 }
2598
2599 return retval;
2600 }
2601
2602 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2603 {
2604 uint8_t value_buf[2];
2605 if (!target_was_examined(target)) {
2606 LOG_ERROR("Target not examined yet");
2607 return ERROR_FAIL;
2608 }
2609
2610 int retval = target_read_memory(target, address, 2, 1, value_buf);
2611
2612 if (retval == ERROR_OK) {
2613 *value = target_buffer_get_u16(target, value_buf);
2614 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2615 address,
2616 *value);
2617 } else {
2618 *value = 0x0;
2619 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2620 address);
2621 }
2622
2623 return retval;
2624 }
2625
2626 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2627 {
2628 if (!target_was_examined(target)) {
2629 LOG_ERROR("Target not examined yet");
2630 return ERROR_FAIL;
2631 }
2632
2633 int retval = target_read_memory(target, address, 1, 1, value);
2634
2635 if (retval == ERROR_OK) {
2636 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2637 address,
2638 *value);
2639 } else {
2640 *value = 0x0;
2641 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2642 address);
2643 }
2644
2645 return retval;
2646 }
2647
2648 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2649 {
2650 int retval;
2651 uint8_t value_buf[8];
2652 if (!target_was_examined(target)) {
2653 LOG_ERROR("Target not examined yet");
2654 return ERROR_FAIL;
2655 }
2656
2657 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2658 address,
2659 value);
2660
2661 target_buffer_set_u64(target, value_buf, value);
2662 retval = target_write_memory(target, address, 8, 1, value_buf);
2663 if (retval != ERROR_OK)
2664 LOG_DEBUG("failed: %i", retval);
2665
2666 return retval;
2667 }
2668
2669 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2670 {
2671 int retval;
2672 uint8_t value_buf[4];
2673 if (!target_was_examined(target)) {
2674 LOG_ERROR("Target not examined yet");
2675 return ERROR_FAIL;
2676 }
2677
2678 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2679 address,
2680 value);
2681
2682 target_buffer_set_u32(target, value_buf, value);
2683 retval = target_write_memory(target, address, 4, 1, value_buf);
2684 if (retval != ERROR_OK)
2685 LOG_DEBUG("failed: %i", retval);
2686
2687 return retval;
2688 }
2689
2690 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2691 {
2692 int retval;
2693 uint8_t value_buf[2];
2694 if (!target_was_examined(target)) {
2695 LOG_ERROR("Target not examined yet");
2696 return ERROR_FAIL;
2697 }
2698
2699 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2700 address,
2701 value);
2702
2703 target_buffer_set_u16(target, value_buf, value);
2704 retval = target_write_memory(target, address, 2, 1, value_buf);
2705 if (retval != ERROR_OK)
2706 LOG_DEBUG("failed: %i", retval);
2707
2708 return retval;
2709 }
2710
2711 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2712 {
2713 int retval;
2714 if (!target_was_examined(target)) {
2715 LOG_ERROR("Target not examined yet");
2716 return ERROR_FAIL;
2717 }
2718
2719 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2720 address, value);
2721
2722 retval = target_write_memory(target, address, 1, 1, &value);
2723 if (retval != ERROR_OK)
2724 LOG_DEBUG("failed: %i", retval);
2725
2726 return retval;
2727 }
2728
2729 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2730 {
2731 int retval;
2732 uint8_t value_buf[8];
2733 if (!target_was_examined(target)) {
2734 LOG_ERROR("Target not examined yet");
2735 return ERROR_FAIL;
2736 }
2737
2738 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2739 address,
2740 value);
2741
2742 target_buffer_set_u64(target, value_buf, value);
2743 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2744 if (retval != ERROR_OK)
2745 LOG_DEBUG("failed: %i", retval);
2746
2747 return retval;
2748 }
2749
2750 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2751 {
2752 int retval;
2753 uint8_t value_buf[4];
2754 if (!target_was_examined(target)) {
2755 LOG_ERROR("Target not examined yet");
2756 return ERROR_FAIL;
2757 }
2758
2759 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2760 address,
2761 value);
2762
2763 target_buffer_set_u32(target, value_buf, value);
2764 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2765 if (retval != ERROR_OK)
2766 LOG_DEBUG("failed: %i", retval);
2767
2768 return retval;
2769 }
2770
2771 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2772 {
2773 int retval;
2774 uint8_t value_buf[2];
2775 if (!target_was_examined(target)) {
2776 LOG_ERROR("Target not examined yet");
2777 return ERROR_FAIL;
2778 }
2779
2780 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2781 address,
2782 value);
2783
2784 target_buffer_set_u16(target, value_buf, value);
2785 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2786 if (retval != ERROR_OK)
2787 LOG_DEBUG("failed: %i", retval);
2788
2789 return retval;
2790 }
2791
2792 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2793 {
2794 int retval;
2795 if (!target_was_examined(target)) {
2796 LOG_ERROR("Target not examined yet");
2797 return ERROR_FAIL;
2798 }
2799
2800 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2801 address, value);
2802
2803 retval = target_write_phys_memory(target, address, 1, 1, &value);
2804 if (retval != ERROR_OK)
2805 LOG_DEBUG("failed: %i", retval);
2806
2807 return retval;
2808 }
2809
2810 static int find_target(struct command_invocation *cmd, const char *name)
2811 {
2812 struct target *target = get_target(name);
2813 if (!target) {
2814 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2815 return ERROR_FAIL;
2816 }
2817 if (!target->tap->enabled) {
2818 command_print(cmd, "Target: TAP %s is disabled, "
2819 "can't be the current target\n",
2820 target->tap->dotted_name);
2821 return ERROR_FAIL;
2822 }
2823
2824 cmd->ctx->current_target = target;
2825 if (cmd->ctx->current_target_override)
2826 cmd->ctx->current_target_override = target;
2827
2828 return ERROR_OK;
2829 }
2830
2831
2832 COMMAND_HANDLER(handle_targets_command)
2833 {
2834 int retval = ERROR_OK;
2835 if (CMD_ARGC == 1) {
2836 retval = find_target(CMD, CMD_ARGV[0]);
2837 if (retval == ERROR_OK) {
2838 /* we're done! */
2839 return retval;
2840 }
2841 }
2842
2843 struct target *target = all_targets;
2844 command_print(CMD, " TargetName Type Endian TapName State ");
2845 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2846 while (target) {
2847 const char *state;
2848 char marker = ' ';
2849
2850 if (target->tap->enabled)
2851 state = target_state_name(target);
2852 else
2853 state = "tap-disabled";
2854
2855 if (CMD_CTX->current_target == target)
2856 marker = '*';
2857
2858 /* keep columns lined up to match the headers above */
2859 command_print(CMD,
2860 "%2d%c %-18s %-10s %-6s %-18s %s",
2861 target->target_number,
2862 marker,
2863 target_name(target),
2864 target_type_name(target),
2865 jim_nvp_value2name_simple(nvp_target_endian,
2866 target->endianness)->name,
2867 target->tap->dotted_name,
2868 state);
2869 target = target->next;
2870 }
2871
2872 return retval;
2873 }
2874
2875 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2876
2877 static int power_dropout;
2878 static int srst_asserted;
2879
2880 static int run_power_restore;
2881 static int run_power_dropout;
2882 static int run_srst_asserted;
2883 static int run_srst_deasserted;
2884
2885 static int sense_handler(void)
2886 {
2887 static int prev_srst_asserted;
2888 static int prev_power_dropout;
2889
2890 int retval = jtag_power_dropout(&power_dropout);
2891 if (retval != ERROR_OK)
2892 return retval;
2893
2894 int power_restored;
2895 power_restored = prev_power_dropout && !power_dropout;
2896 if (power_restored)
2897 run_power_restore = 1;
2898
2899 int64_t current = timeval_ms();
2900 static int64_t last_power;
2901 bool wait_more = last_power + 2000 > current;
2902 if (power_dropout && !wait_more) {
2903 run_power_dropout = 1;
2904 last_power = current;
2905 }
2906
2907 retval = jtag_srst_asserted(&srst_asserted);
2908 if (retval != ERROR_OK)
2909 return retval;
2910
2911 int srst_deasserted;
2912 srst_deasserted = prev_srst_asserted && !srst_asserted;
2913
2914 static int64_t last_srst;
2915 wait_more = last_srst + 2000 > current;
2916 if (srst_deasserted && !wait_more) {
2917 run_srst_deasserted = 1;
2918 last_srst = current;
2919 }
2920
2921 if (!prev_srst_asserted && srst_asserted)
2922 run_srst_asserted = 1;
2923
2924 prev_srst_asserted = srst_asserted;
2925 prev_power_dropout = power_dropout;
2926
2927 if (srst_deasserted || power_restored) {
2928 /* Other than logging the event we can't do anything here.
2929 * Issuing a reset is a particularly bad idea as we might
2930 * be inside a reset already.
2931 */
2932 }
2933
2934 return ERROR_OK;
2935 }
2936
2937 /* process target state changes */
2938 static int handle_target(void *priv)
2939 {
2940 Jim_Interp *interp = (Jim_Interp *)priv;
2941 int retval = ERROR_OK;
2942
2943 if (!is_jtag_poll_safe()) {
2944 /* polling is disabled currently */
2945 return ERROR_OK;
2946 }
2947
2948 /* we do not want to recurse here... */
2949 static int recursive;
2950 if (!recursive) {
2951 recursive = 1;
2952 sense_handler();
2953 /* danger! running these procedures can trigger srst assertions and power dropouts.
2954 * We need to avoid an infinite loop/recursion here and we do that by
2955 * clearing the flags after running these events.
2956 */
2957 int did_something = 0;
2958 if (run_srst_asserted) {
2959 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2960 Jim_Eval(interp, "srst_asserted");
2961 did_something = 1;
2962 }
2963 if (run_srst_deasserted) {
2964 Jim_Eval(interp, "srst_deasserted");
2965 did_something = 1;
2966 }
2967 if (run_power_dropout) {
2968 LOG_INFO("Power dropout detected, running power_dropout proc.");
2969 Jim_Eval(interp, "power_dropout");
2970 did_something = 1;
2971 }
2972 if (run_power_restore) {
2973 Jim_Eval(interp, "power_restore");
2974 did_something = 1;
2975 }
2976
2977 if (did_something) {
2978 /* clear detect flags */
2979 sense_handler();
2980 }
2981
2982 /* clear action flags */
2983
2984 run_srst_asserted = 0;
2985 run_srst_deasserted = 0;
2986 run_power_restore = 0;
2987 run_power_dropout = 0;
2988
2989 recursive = 0;
2990 }
2991
2992 /* Poll targets for state changes unless that's globally disabled.
2993 * Skip targets that are currently disabled.
2994 */
2995 for (struct target *target = all_targets;
2996 is_jtag_poll_safe() && target;
2997 target = target->next) {
2998
2999 if (!target_was_examined(target))
3000 continue;
3001
3002 if (!target->tap->enabled)
3003 continue;
3004
3005 if (target->backoff.times > target->backoff.count) {
3006 /* do not poll this time as we failed previously */
3007 target->backoff.count++;
3008 continue;
3009 }
3010 target->backoff.count = 0;
3011
3012 /* only poll target if we've got power and srst isn't asserted */
3013 if (!power_dropout && !srst_asserted) {
3014 /* polling may fail silently until the target has been examined */
3015 retval = target_poll(target);
3016 if (retval != ERROR_OK) {
3017 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3018 if (target->backoff.times * polling_interval < 5000) {
3019 target->backoff.times *= 2;
3020 target->backoff.times++;
3021 }
3022
3023 /* Tell GDB to halt the debugger. This allows the user to
3024 * run monitor commands to handle the situation.
3025 */
3026 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3027 }
3028 if (target->backoff.times > 0) {
3029 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3030 target_reset_examined(target);
3031 retval = target_examine_one(target);
3032 /* Target examination could have failed due to unstable connection,
3033 * but we set the examined flag anyway to repoll it later */
3034 if (retval != ERROR_OK) {
3035 target_set_examined(target);
3036 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3037 target->backoff.times * polling_interval);
3038 return retval;
3039 }
3040 }
3041
3042 /* Since we succeeded, we reset backoff count */
3043 target->backoff.times = 0;
3044 }
3045 }
3046
3047 return retval;
3048 }
3049
3050 COMMAND_HANDLER(handle_reg_command)
3051 {
3052 LOG_DEBUG("-");
3053
3054 struct target *target = get_current_target(CMD_CTX);
3055 struct reg *reg = NULL;
3056
3057 /* list all available registers for the current target */
3058 if (CMD_ARGC == 0) {
3059 struct reg_cache *cache = target->reg_cache;
3060
3061 unsigned int count = 0;
3062 while (cache) {
3063 unsigned i;
3064
3065 command_print(CMD, "===== %s", cache->name);
3066
3067 for (i = 0, reg = cache->reg_list;
3068 i < cache->num_regs;
3069 i++, reg++, count++) {
3070 if (reg->exist == false || reg->hidden)
3071 continue;
3072 /* only print cached values if they are valid */
3073 if (reg->valid) {
3074 char *value = buf_to_hex_str(reg->value,
3075 reg->size);
3076 command_print(CMD,
3077 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3078 count, reg->name,
3079 reg->size, value,
3080 reg->dirty
3081 ? " (dirty)"
3082 : "");
3083 free(value);
3084 } else {
3085 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3086 count, reg->name,
3087 reg->size);
3088 }
3089 }
3090 cache = cache->next;
3091 }
3092
3093 return ERROR_OK;
3094 }
3095
3096 /* access a single register by its ordinal number */
3097 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3098 unsigned num;
3099 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3100
3101 struct reg_cache *cache = target->reg_cache;
3102 unsigned int count = 0;
3103 while (cache) {
3104 unsigned i;
3105 for (i = 0; i < cache->num_regs; i++) {
3106 if (count++ == num) {
3107 reg = &cache->reg_list[i];
3108 break;
3109 }
3110 }
3111 if (reg)
3112 break;
3113 cache = cache->next;
3114 }
3115
3116 if (!reg) {
3117 command_print(CMD, "%i is out of bounds, the current target "
3118 "has only %i registers (0 - %i)", num, count, count - 1);
3119 return ERROR_FAIL;
3120 }
3121 } else {
3122 /* access a single register by its name */
3123 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3124
3125 if (!reg)
3126 goto not_found;
3127 }
3128
3129 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3130
3131 if (!reg->exist)
3132 goto not_found;
3133
3134 /* display a register */
3135 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3136 && (CMD_ARGV[1][0] <= '9')))) {
3137 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3138 reg->valid = 0;
3139
3140 if (reg->valid == 0) {
3141 int retval = reg->type->get(reg);
3142 if (retval != ERROR_OK) {
3143 LOG_ERROR("Could not read register '%s'", reg->name);
3144 return retval;
3145 }
3146 }
3147 char *value = buf_to_hex_str(reg->value, reg->size);
3148 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3149 free(value);
3150 return ERROR_OK;
3151 }
3152
3153 /* set register value */
3154 if (CMD_ARGC == 2) {
3155 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3156 if (!buf)
3157 return ERROR_FAIL;
3158 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3159
3160 int retval = reg->type->set(reg, buf);
3161 if (retval != ERROR_OK) {
3162 LOG_ERROR("Could not write to register '%s'", reg->name);
3163 } else {
3164 char *value = buf_to_hex_str(reg->value, reg->size);
3165 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3166 free(value);
3167 }
3168
3169 free(buf);
3170
3171 return retval;
3172 }
3173
3174 return ERROR_COMMAND_SYNTAX_ERROR;
3175
3176 not_found:
3177 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3178 return ERROR_FAIL;
3179 }
3180
3181 COMMAND_HANDLER(handle_poll_command)
3182 {
3183 int retval = ERROR_OK;
3184 struct target *target = get_current_target(CMD_CTX);
3185
3186 if (CMD_ARGC == 0) {
3187 command_print(CMD, "background polling: %s",
3188 jtag_poll_get_enabled() ? "on" : "off");
3189 command_print(CMD, "TAP: %s (%s)",
3190 target->tap->dotted_name,
3191 target->tap->enabled ? "enabled" : "disabled");
3192 if (!target->tap->enabled)
3193 return ERROR_OK;
3194 retval = target_poll(target);
3195 if (retval != ERROR_OK)
3196 return retval;
3197 retval = target_arch_state(target);
3198 if (retval != ERROR_OK)
3199 return retval;
3200 } else if (CMD_ARGC == 1) {
3201 bool enable;
3202 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3203 jtag_poll_set_enabled(enable);
3204 } else
3205 return ERROR_COMMAND_SYNTAX_ERROR;
3206
3207 return retval;
3208 }
3209
3210 COMMAND_HANDLER(handle_wait_halt_command)
3211 {
3212 if (CMD_ARGC > 1)
3213 return ERROR_COMMAND_SYNTAX_ERROR;
3214
3215 unsigned ms = DEFAULT_HALT_TIMEOUT;
3216 if (1 == CMD_ARGC) {
3217 int retval = parse_uint(CMD_ARGV[0], &ms);
3218 if (retval != ERROR_OK)
3219 return ERROR_COMMAND_SYNTAX_ERROR;
3220 }
3221
3222 struct target *target = get_current_target(CMD_CTX);
3223 return target_wait_state(target, TARGET_HALTED, ms);
3224 }
3225
3226 /* wait for target state to change. The trick here is to have a low
3227 * latency for short waits and not to suck up all the CPU time
3228 * on longer waits.
3229 *
3230 * After 500ms, keep_alive() is invoked
3231 */
3232 int target_wait_state(struct target *target, enum target_state state, unsigned int ms)
3233 {
3234 int retval;
3235 int64_t then = 0, cur;
3236 bool once = true;
3237
3238 for (;;) {
3239 retval = target_poll(target);
3240 if (retval != ERROR_OK)
3241 return retval;
3242 if (target->state == state)
3243 break;
3244 cur = timeval_ms();
3245 if (once) {
3246 once = false;
3247 then = timeval_ms();
3248 LOG_DEBUG("waiting for target %s...",
3249 nvp_value2name(nvp_target_state, state)->name);
3250 }
3251
3252 if (cur-then > 500)
3253 keep_alive();
3254
3255 if ((cur-then) > ms) {
3256 LOG_ERROR("timed out while waiting for target %s",
3257 nvp_value2name(nvp_target_state, state)->name);
3258 return ERROR_FAIL;
3259 }
3260 }
3261
3262 return ERROR_OK;
3263 }
3264
3265 COMMAND_HANDLER(handle_halt_command)
3266 {
3267 LOG_DEBUG("-");
3268
3269 struct target *target = get_current_target(CMD_CTX);
3270
3271 target->verbose_halt_msg = true;
3272
3273 int retval = target_halt(target);
3274 if (retval != ERROR_OK)
3275 return retval;
3276
3277 if (CMD_ARGC == 1) {
3278 unsigned wait_local;
3279 retval = parse_uint(CMD_ARGV[0], &wait_local);
3280 if (retval != ERROR_OK)
3281 return ERROR_COMMAND_SYNTAX_ERROR;
3282 if (!wait_local)
3283 return ERROR_OK;
3284 }
3285
3286 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3287 }
3288
3289 COMMAND_HANDLER(handle_soft_reset_halt_command)
3290 {
3291 struct target *target = get_current_target(CMD_CTX);
3292
3293 LOG_TARGET_INFO(target, "requesting target halt and executing a soft reset");
3294
3295 target_soft_reset_halt(target);
3296
3297 return ERROR_OK;
3298 }
3299
3300 COMMAND_HANDLER(handle_reset_command)
3301 {
3302 if (CMD_ARGC > 1)
3303 return ERROR_COMMAND_SYNTAX_ERROR;
3304
3305 enum target_reset_mode reset_mode = RESET_RUN;
3306 if (CMD_ARGC == 1) {
3307 const struct nvp *n;
3308 n = nvp_name2value(nvp_reset_modes, CMD_ARGV[0]);
3309 if ((!n->name) || (n->value == RESET_UNKNOWN))
3310 return ERROR_COMMAND_SYNTAX_ERROR;
3311 reset_mode = n->value;
3312 }
3313
3314 /* reset *all* targets */
3315 return target_process_reset(CMD, reset_mode);
3316 }
3317
3318
3319 COMMAND_HANDLER(handle_resume_command)
3320 {
3321 int current = 1;
3322 if (CMD_ARGC > 1)
3323 return ERROR_COMMAND_SYNTAX_ERROR;
3324
3325 struct target *target = get_current_target(CMD_CTX);
3326
3327 /* with no CMD_ARGV, resume from current pc, addr = 0,
3328 * with one arguments, addr = CMD_ARGV[0],
3329 * handle breakpoints, not debugging */
3330 target_addr_t addr = 0;
3331 if (CMD_ARGC == 1) {
3332 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3333 current = 0;
3334 }
3335
3336 return target_resume(target, current, addr, 1, 0);
3337 }
3338
3339 COMMAND_HANDLER(handle_step_command)
3340 {
3341 if (CMD_ARGC > 1)
3342 return ERROR_COMMAND_SYNTAX_ERROR;
3343
3344 LOG_DEBUG("-");
3345
3346 /* with no CMD_ARGV, step from current pc, addr = 0,
3347 * with one argument addr = CMD_ARGV[0],
3348 * handle breakpoints, debugging */
3349 target_addr_t addr = 0;
3350 int current_pc = 1;
3351 if (CMD_ARGC == 1) {
3352 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3353 current_pc = 0;
3354 }
3355
3356 struct target *target = get_current_target(CMD_CTX);
3357
3358 return target_step(target, current_pc, addr, 1);
3359 }
3360
3361 void target_handle_md_output(struct command_invocation *cmd,
3362 struct target *target, target_addr_t address, unsigned size,
3363 unsigned count, const uint8_t *buffer)
3364 {
3365 const unsigned line_bytecnt = 32;
3366 unsigned line_modulo = line_bytecnt / size;
3367
3368 char output[line_bytecnt * 4 + 1];
3369 unsigned output_len = 0;
3370
3371 const char *value_fmt;
3372 switch (size) {
3373 case 8:
3374 value_fmt = "%16.16"PRIx64" ";
3375 break;
3376 case 4:
3377 value_fmt = "%8.8"PRIx64" ";
3378 break;
3379 case 2:
3380 value_fmt = "%4.4"PRIx64" ";
3381 break;
3382 case 1:
3383 value_fmt = "%2.2"PRIx64" ";
3384 break;
3385 default:
3386 /* "can't happen", caller checked */
3387 LOG_ERROR("invalid memory read size: %u", size);
3388 return;
3389 }
3390
3391 for (unsigned i = 0; i < count; i++) {
3392 if (i % line_modulo == 0) {
3393 output_len += snprintf(output + output_len,
3394 sizeof(output) - output_len,
3395 TARGET_ADDR_FMT ": ",
3396 (address + (i * size)));
3397 }
3398
3399 uint64_t value = 0;
3400 const uint8_t *value_ptr = buffer + i * size;
3401 switch (size) {
3402 case 8:
3403 value = target_buffer_get_u64(target, value_ptr);
3404 break;
3405 case 4:
3406 value = target_buffer_get_u32(target, value_ptr);
3407 break;
3408 case 2:
3409 value = target_buffer_get_u16(target, value_ptr);
3410 break;
3411 case 1:
3412 value = *value_ptr;
3413 }
3414 output_len += snprintf(output + output_len,
3415 sizeof(output) - output_len,
3416 value_fmt, value);
3417
3418 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3419 command_print(cmd, "%s", output);
3420 output_len = 0;
3421 }
3422 }
3423 }
3424
3425 COMMAND_HANDLER(handle_md_command)
3426 {
3427 if (CMD_ARGC < 1)
3428 return ERROR_COMMAND_SYNTAX_ERROR;
3429
3430 unsigned size = 0;
3431 switch (CMD_NAME[2]) {
3432 case 'd':
3433 size = 8;
3434 break;
3435 case 'w':
3436 size = 4;
3437 break;
3438 case 'h':
3439 size = 2;
3440 break;
3441 case 'b':
3442 size = 1;
3443 break;
3444 default:
3445 return ERROR_COMMAND_SYNTAX_ERROR;
3446 }
3447
3448 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3449 int (*fn)(struct target *target,
3450 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3451 if (physical) {
3452 CMD_ARGC--;
3453 CMD_ARGV++;
3454 fn = target_read_phys_memory;
3455 } else
3456 fn = target_read_memory;
3457 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3458 return ERROR_COMMAND_SYNTAX_ERROR;
3459
3460 target_addr_t address;
3461 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3462
3463 unsigned count = 1;
3464 if (CMD_ARGC == 2)
3465 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3466
3467 uint8_t *buffer = calloc(count, size);
3468 if (!buffer) {
3469 LOG_ERROR("Failed to allocate md read buffer");
3470 return ERROR_FAIL;
3471 }
3472
3473 struct target *target = get_current_target(CMD_CTX);
3474 int retval = fn(target, address, size, count, buffer);
3475 if (retval == ERROR_OK)
3476 target_handle_md_output(CMD, target, address, size, count, buffer);
3477
3478 free(buffer);
3479
3480 return retval;
3481 }
3482
3483 typedef int (*target_write_fn)(struct target *target,
3484 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3485
3486 static int target_fill_mem(struct target *target,
3487 target_addr_t address,
3488 target_write_fn fn,
3489 unsigned data_size,
3490 /* value */
3491 uint64_t b,
3492 /* count */
3493 unsigned c)
3494 {
3495 /* We have to write in reasonably large chunks to be able
3496 * to fill large memory areas with any sane speed */
3497 const unsigned chunk_size = 16384;
3498 uint8_t *target_buf = malloc(chunk_size * data_size);
3499 if (!target_buf) {
3500 LOG_ERROR("Out of memory");
3501 return ERROR_FAIL;
3502 }
3503
3504 for (unsigned i = 0; i < chunk_size; i++) {
3505 switch (data_size) {
3506 case 8:
3507 target_buffer_set_u64(target, target_buf + i * data_size, b);
3508 break;
3509 case 4:
3510 target_buffer_set_u32(target, target_buf + i * data_size, b);
3511 break;
3512 case 2:
3513 target_buffer_set_u16(target, target_buf + i * data_size, b);
3514 break;
3515 case 1:
3516 target_buffer_set_u8(target, target_buf + i * data_size, b);
3517 break;
3518 default:
3519 exit(-1);
3520 }
3521 }
3522
3523 int retval = ERROR_OK;
3524
3525 for (unsigned x = 0; x < c; x += chunk_size) {
3526 unsigned current;
3527 current = c - x;
3528 if (current > chunk_size)
3529 current = chunk_size;
3530 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3531 if (retval != ERROR_OK)
3532 break;
3533 /* avoid GDB timeouts */
3534 keep_alive();
3535 }
3536 free(target_buf);
3537
3538 return retval;
3539 }
3540
3541
3542 COMMAND_HANDLER(handle_mw_command)
3543 {
3544 if (CMD_ARGC < 2)
3545 return ERROR_COMMAND_SYNTAX_ERROR;
3546 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3547 target_write_fn fn;
3548 if (physical) {
3549 CMD_ARGC--;
3550 CMD_ARGV++;
3551 fn = target_write_phys_memory;
3552 } else
3553 fn = target_write_memory;
3554 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3555 return ERROR_COMMAND_SYNTAX_ERROR;
3556
3557 target_addr_t address;
3558 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3559
3560 uint64_t value;
3561 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3562
3563 unsigned count = 1;
3564 if (CMD_ARGC == 3)
3565 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3566
3567 struct target *target = get_current_target(CMD_CTX);
3568 unsigned wordsize;
3569 switch (CMD_NAME[2]) {
3570 case 'd':
3571 wordsize = 8;
3572 break;
3573 case 'w':
3574 wordsize = 4;
3575 break;
3576 case 'h':
3577 wordsize = 2;
3578 break;
3579 case 'b':
3580 wordsize = 1;
3581 break;
3582 default:
3583 return ERROR_COMMAND_SYNTAX_ERROR;
3584 }
3585
3586 return target_fill_mem(target, address, fn, wordsize, value, count);
3587 }
3588
3589 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3590 target_addr_t *min_address, target_addr_t *max_address)
3591 {
3592 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3593 return ERROR_COMMAND_SYNTAX_ERROR;
3594
3595 /* a base address isn't always necessary,
3596 * default to 0x0 (i.e. don't relocate) */
3597 if (CMD_ARGC >= 2) {
3598 target_addr_t addr;
3599 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3600 image->base_address = addr;
3601 image->base_address_set = true;
3602 } else
3603 image->base_address_set = false;
3604
3605 image->start_address_set = false;
3606
3607 if (CMD_ARGC >= 4)
3608 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3609 if (CMD_ARGC == 5) {
3610 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3611 /* use size (given) to find max (required) */
3612 *max_address += *min_address;
3613 }
3614
3615 if (*min_address > *max_address)
3616 return ERROR_COMMAND_SYNTAX_ERROR;
3617
3618 return ERROR_OK;
3619 }
3620
3621 COMMAND_HANDLER(handle_load_image_command)
3622 {
3623 uint8_t *buffer;
3624 size_t buf_cnt;
3625 uint32_t image_size;
3626 target_addr_t min_address = 0;
3627 target_addr_t max_address = -1;
3628 struct image image;
3629
3630 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3631 &image, &min_address, &max_address);
3632 if (retval != ERROR_OK)
3633 return retval;
3634
3635 struct target *target = get_current_target(CMD_CTX);
3636
3637 struct duration bench;
3638 duration_start(&bench);
3639
3640 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3641 return ERROR_FAIL;
3642
3643 image_size = 0x0;
3644 retval = ERROR_OK;
3645 for (unsigned int i = 0; i < image.num_sections; i++) {
3646 buffer = malloc(image.sections[i].size);
3647 if (!buffer) {
3648 command_print(CMD,
3649 "error allocating buffer for section (%d bytes)",
3650 (int)(image.sections[i].size));
3651 retval = ERROR_FAIL;
3652 break;
3653 }
3654
3655 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3656 if (retval != ERROR_OK) {
3657 free(buffer);
3658 break;
3659 }
3660
3661 uint32_t offset = 0;
3662 uint32_t length = buf_cnt;
3663
3664 /* DANGER!!! beware of unsigned comparison here!!! */
3665
3666 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3667 (image.sections[i].base_address < max_address)) {
3668
3669 if (image.sections[i].base_address < min_address) {
3670 /* clip addresses below */
3671 offset += min_address-image.sections[i].base_address;
3672 length -= offset;
3673 }
3674
3675 if (image.sections[i].base_address + buf_cnt > max_address)
3676 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3677
3678 retval = target_write_buffer(target,
3679 image.sections[i].base_address + offset, length, buffer + offset);
3680 if (retval != ERROR_OK) {
3681 free(buffer);
3682 break;
3683 }
3684 image_size += length;
3685 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3686 (unsigned int)length,
3687 image.sections[i].base_address + offset);
3688 }
3689
3690 free(buffer);
3691 }
3692
3693 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3694 command_print(CMD, "downloaded %" PRIu32 " bytes "
3695 "in %fs (%0.3f KiB/s)", image_size,
3696 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3697 }
3698
3699 image_close(&image);
3700
3701 return retval;
3702
3703 }
3704
3705 COMMAND_HANDLER(handle_dump_image_command)
3706 {
3707 struct fileio *fileio;
3708 uint8_t *buffer;
3709 int retval, retvaltemp;
3710 target_addr_t address, size;
3711 struct duration bench;
3712 struct target *target = get_current_target(CMD_CTX);
3713
3714 if (CMD_ARGC != 3)
3715 return ERROR_COMMAND_SYNTAX_ERROR;
3716
3717 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3718 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3719
3720 uint32_t buf_size = (size > 4096) ? 4096 : size;
3721 buffer = malloc(buf_size);
3722 if (!buffer)
3723 return ERROR_FAIL;
3724
3725 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3726 if (retval != ERROR_OK) {
3727 free(buffer);
3728 return retval;
3729 }
3730
3731 duration_start(&bench);
3732
3733 while (size > 0) {
3734 size_t size_written;
3735 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3736 retval = target_read_buffer(target, address, this_run_size, buffer);
3737 if (retval != ERROR_OK)
3738 break;
3739
3740 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3741 if (retval != ERROR_OK)
3742 break;
3743
3744 size -= this_run_size;
3745 address += this_run_size;
3746 }
3747
3748 free(buffer);
3749
3750 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3751 size_t filesize;
3752 retval = fileio_size(fileio, &filesize);
3753 if (retval != ERROR_OK)
3754 return retval;
3755 command_print(CMD,
3756 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3757 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3758 }
3759
3760 retvaltemp = fileio_close(fileio);
3761 if (retvaltemp != ERROR_OK)
3762 return retvaltemp;
3763
3764 return retval;
3765 }
3766
3767 enum verify_mode {
3768 IMAGE_TEST = 0,
3769 IMAGE_VERIFY = 1,
3770 IMAGE_CHECKSUM_ONLY = 2
3771 };
3772
3773 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3774 {
3775 uint8_t *buffer;
3776 size_t buf_cnt;
3777 uint32_t image_size;
3778 int retval;
3779 uint32_t checksum = 0;
3780 uint32_t mem_checksum = 0;
3781
3782 struct image image;
3783
3784 struct target *target = get_current_target(CMD_CTX);
3785
3786 if (CMD_ARGC < 1)
3787 return ERROR_COMMAND_SYNTAX_ERROR;
3788
3789 if (!target) {
3790 LOG_ERROR("no target selected");
3791 return ERROR_FAIL;
3792 }
3793
3794 struct duration bench;
3795 duration_start(&bench);
3796
3797 if (CMD_ARGC >= 2) {
3798 target_addr_t addr;
3799 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3800 image.base_address = addr;
3801 image.base_address_set = true;
3802 } else {
3803 image.base_address_set = false;
3804 image.base_address = 0x0;
3805 }
3806
3807 image.start_address_set = false;
3808
3809 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3810 if (retval != ERROR_OK)
3811 return retval;
3812
3813 image_size = 0x0;
3814 int diffs = 0;
3815 retval = ERROR_OK;
3816 for (unsigned int i = 0; i < image.num_sections; i++) {
3817 buffer = malloc(image.sections[i].size);
3818 if (!buffer) {
3819 command_print(CMD,
3820 "error allocating buffer for section (%" PRIu32 " bytes)",
3821 image.sections[i].size);
3822 break;
3823 }
3824 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3825 if (retval != ERROR_OK) {
3826 free(buffer);
3827 break;
3828 }
3829
3830 if (verify >= IMAGE_VERIFY) {
3831 /* calculate checksum of image */
3832 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3833 if (retval != ERROR_OK) {
3834 free(buffer);
3835 break;
3836 }
3837
3838 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3839 if (retval != ERROR_OK) {
3840 free(buffer);
3841 break;
3842 }
3843 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3844 LOG_ERROR("checksum mismatch");
3845 free(buffer);
3846 retval = ERROR_FAIL;
3847 goto done;
3848 }
3849 if (checksum != mem_checksum) {
3850 /* failed crc checksum, fall back to a binary compare */
3851 uint8_t *data;
3852
3853 if (diffs == 0)
3854 LOG_ERROR("checksum mismatch - attempting binary compare");
3855
3856 data = malloc(buf_cnt);
3857
3858 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3859 if (retval == ERROR_OK) {
3860 uint32_t t;
3861 for (t = 0; t < buf_cnt; t++) {
3862 if (data[t] != buffer[t]) {
3863 command_print(CMD,
3864 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3865 diffs,
3866 (unsigned)(t + image.sections[i].base_address),
3867 data[t],
3868 buffer[t]);
3869 if (diffs++ >= 127) {
3870 command_print(CMD, "More than 128 errors, the rest are not printed.");
3871 free(data);
3872 free(buffer);
3873 goto done;
3874 }
3875 }
3876 keep_alive();
3877 }
3878 }
3879 free(data);
3880 }
3881 } else {
3882 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3883 image.sections[i].base_address,
3884 buf_cnt);
3885 }
3886
3887 free(buffer);
3888 image_size += buf_cnt;
3889 }
3890 if (diffs > 0)
3891 command_print(CMD, "No more differences found.");
3892 done:
3893 if (diffs > 0)
3894 retval = ERROR_FAIL;
3895 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3896 command_print(CMD, "verified %" PRIu32 " bytes "
3897 "in %fs (%0.3f KiB/s)", image_size,
3898 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3899 }
3900
3901 image_close(&image);
3902
3903 return retval;
3904 }
3905
3906 COMMAND_HANDLER(handle_verify_image_checksum_command)
3907 {
3908 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3909 }
3910
3911 COMMAND_HANDLER(handle_verify_image_command)
3912 {
3913 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3914 }
3915
3916 COMMAND_HANDLER(handle_test_image_command)
3917 {
3918 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3919 }
3920
3921 static int handle_bp_command_list(struct command_invocation *cmd)
3922 {
3923 struct target *target = get_current_target(cmd->ctx);
3924 struct breakpoint *breakpoint = target->breakpoints;
3925 while (breakpoint) {
3926 if (breakpoint->type == BKPT_SOFT) {
3927 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3928 breakpoint->length);
3929 command_print(cmd, "Software breakpoint(IVA): addr=" TARGET_ADDR_FMT ", len=0x%x, orig_instr=0x%s",
3930 breakpoint->address,
3931 breakpoint->length,
3932 buf);
3933 free(buf);
3934 } else {
3935 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3936 command_print(cmd, "Context breakpoint: asid=0x%8.8" PRIx32 ", len=0x%x, num=%u",
3937 breakpoint->asid,
3938 breakpoint->length, breakpoint->number);
3939 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3940 command_print(cmd, "Hybrid breakpoint(IVA): addr=" TARGET_ADDR_FMT ", len=0x%x, num=%u",
3941 breakpoint->address,
3942 breakpoint->length, breakpoint->number);
3943 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3944 breakpoint->asid);
3945 } else
3946 command_print(cmd, "Hardware breakpoint(IVA): addr=" TARGET_ADDR_FMT ", len=0x%x, num=%u",
3947 breakpoint->address,
3948 breakpoint->length, breakpoint->number);
3949 }
3950
3951 breakpoint = breakpoint->next;
3952 }
3953 return ERROR_OK;
3954 }
3955
3956 static int handle_bp_command_set(struct command_invocation *cmd,
3957 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3958 {
3959 struct target *target = get_current_target(cmd->ctx);
3960 int retval;
3961
3962 if (asid == 0) {
3963 retval = breakpoint_add(target, addr, length, hw);
3964 /* error is always logged in breakpoint_add(), do not print it again */
3965 if (retval == ERROR_OK)
3966 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
3967
3968 } else if (addr == 0) {
3969 if (!target->type->add_context_breakpoint) {
3970 LOG_ERROR("Context breakpoint not available");
3971 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3972 }
3973 retval = context_breakpoint_add(target, asid, length, hw);
3974 /* error is always logged in context_breakpoint_add(), do not print it again */
3975 if (retval == ERROR_OK)
3976 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
3977
3978 } else {
3979 if (!target->type->add_hybrid_breakpoint) {
3980 LOG_ERROR("Hybrid breakpoint not available");
3981 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3982 }
3983 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
3984 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
3985 if (retval == ERROR_OK)
3986 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
3987 }
3988 return retval;
3989 }
3990
3991 COMMAND_HANDLER(handle_bp_command)
3992 {
3993 target_addr_t addr;
3994 uint32_t asid;
3995 uint32_t length;
3996 int hw = BKPT_SOFT;
3997
3998 switch (CMD_ARGC) {
3999 case 0:
4000 return handle_bp_command_list(CMD);
4001
4002 case 2:
4003 asid = 0;
4004 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4005 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4006 return handle_bp_command_set(CMD, addr, asid, length, hw);
4007
4008 case 3:
4009 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4010 hw = BKPT_HARD;
4011 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4012 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4013 asid = 0;
4014 return handle_bp_command_set(CMD, addr, asid, length, hw);
4015 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4016 hw = BKPT_HARD;
4017 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4018 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4019 addr = 0;
4020 return handle_bp_command_set(CMD, addr, asid, length, hw);
4021 }
4022 /* fallthrough */
4023 case 4:
4024 hw = BKPT_HARD;
4025 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4026 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4027 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4028 return handle_bp_command_set(CMD, addr, asid, length, hw);
4029
4030 default:
4031 return ERROR_COMMAND_SYNTAX_ERROR;
4032 }
4033 }
4034
4035 COMMAND_HANDLER(handle_rbp_command)
4036 {
4037 int retval;
4038
4039 if (CMD_ARGC != 1)
4040 return ERROR_COMMAND_SYNTAX_ERROR;
4041
4042 struct target *target = get_current_target(CMD_CTX);
4043
4044 if (!strcmp(CMD_ARGV[0], "all")) {
4045 retval = breakpoint_remove_all(target);
4046
4047 if (retval != ERROR_OK) {
4048 command_print(CMD, "Error encountered during removal of all breakpoints.");
4049 command_print(CMD, "Some breakpoints may have remained set.");
4050 }
4051 } else {
4052 target_addr_t addr;
4053 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4054
4055 retval = breakpoint_remove(target, addr);
4056
4057 if (retval != ERROR_OK)
4058 command_print(CMD, "Error during removal of breakpoint at address " TARGET_ADDR_FMT, addr);
4059 }
4060
4061 return retval;
4062 }
4063
4064 COMMAND_HANDLER(handle_wp_command)
4065 {
4066 struct target *target = get_current_target(CMD_CTX);
4067
4068 if (CMD_ARGC == 0) {
4069 struct watchpoint *watchpoint = target->watchpoints;
4070
4071 while (watchpoint) {
4072 command_print(CMD, "address: " TARGET_ADDR_FMT
4073 ", len: 0x%8.8" PRIx32
4074 ", r/w/a: %i, value: 0x%8.8" PRIx64
4075 ", mask: 0x%8.8" PRIx64,
4076 watchpoint->address,
4077 watchpoint->length,
4078 (int)watchpoint->rw,
4079 watchpoint->value,
4080 watchpoint->mask);
4081 watchpoint = watchpoint->next;
4082 }
4083 return ERROR_OK;
4084 }
4085
4086 enum watchpoint_rw type = WPT_ACCESS;
4087 target_addr_t addr = 0;
4088 uint32_t length = 0;
4089 uint64_t data_value = 0x0;
4090 uint64_t data_mask = WATCHPOINT_IGNORE_DATA_VALUE_MASK;
4091 bool mask_specified = false;
4092
4093 switch (CMD_ARGC) {
4094 case 5:
4095 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[4], data_mask);
4096 mask_specified = true;
4097 /* fall through */
4098 case 4:
4099 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[3], data_value);
4100 // if user specified only data value without mask - the mask should be 0
4101 if (!mask_specified)
4102 data_mask = 0;
4103 /* fall through */
4104 case 3:
4105 switch (CMD_ARGV[2][0]) {
4106 case 'r':
4107 type = WPT_READ;
4108 break;
4109 case 'w':
4110 type = WPT_WRITE;
4111 break;
4112 case 'a':
4113 type = WPT_ACCESS;
4114 break;
4115 default:
4116 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4117 return ERROR_COMMAND_SYNTAX_ERROR;
4118 }
4119 /* fall through */
4120 case 2:
4121 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4122 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4123 break;
4124
4125 default:
4126 return ERROR_COMMAND_SYNTAX_ERROR;
4127 }
4128
4129 int retval = watchpoint_add(target, addr, length, type,
4130 data_value, data_mask);
4131 if (retval != ERROR_OK)
4132 LOG_ERROR("Failure setting watchpoints");
4133
4134 return retval;
4135 }
4136
4137 COMMAND_HANDLER(handle_rwp_command)
4138 {
4139 int retval;
4140
4141 if (CMD_ARGC != 1)
4142 return ERROR_COMMAND_SYNTAX_ERROR;
4143
4144 struct target *target = get_current_target(CMD_CTX);
4145 if (!strcmp(CMD_ARGV[0], "all")) {
4146 retval = watchpoint_remove_all(target);
4147
4148 if (retval != ERROR_OK) {
4149 command_print(CMD, "Error encountered during removal of all watchpoints.");
4150 command_print(CMD, "Some watchpoints may have remained set.");
4151 }
4152 } else {
4153 target_addr_t addr;
4154 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4155
4156 retval = watchpoint_remove(target, addr);
4157
4158 if (retval != ERROR_OK)
4159 command_print(CMD, "Error during removal of watchpoint at address " TARGET_ADDR_FMT, addr);
4160 }
4161
4162 return retval;
4163 }
4164
4165 /**
4166 * Translate a virtual address to a physical address.
4167 *
4168 * The low-level target implementation must have logged a detailed error
4169 * which is forwarded to telnet/GDB session.
4170 */
4171 COMMAND_HANDLER(handle_virt2phys_command)
4172 {
4173 if (CMD_ARGC != 1)
4174 return ERROR_COMMAND_SYNTAX_ERROR;
4175
4176 target_addr_t va;
4177 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4178 target_addr_t pa;
4179
4180 struct target *target = get_current_target(CMD_CTX);
4181 int retval = target->type->virt2phys(target, va, &pa);
4182 if (retval == ERROR_OK)
4183 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4184
4185 return retval;
4186 }
4187
4188 static void write_data(FILE *f, const void *data, size_t len)
4189 {
4190 size_t written = fwrite(data, 1, len, f);
4191 if (written != len)
4192 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4193 }
4194
4195 static void write_long(FILE *f, int l, struct target *target)
4196 {
4197 uint8_t val[4];
4198
4199 target_buffer_set_u32(target, val, l);
4200 write_data(f, val, 4);
4201 }
4202
4203 static void write_string(FILE *f, char *s)
4204 {
4205 write_data(f, s, strlen(s));
4206 }
4207
4208 typedef unsigned char UNIT[2]; /* unit of profiling */
4209
4210 /* Dump a gmon.out histogram file. */
4211 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4212 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4213 {
4214 uint32_t i;
4215 FILE *f = fopen(filename, "w");
4216 if (!f)
4217 return;
4218 write_string(f, "gmon");
4219 write_long(f, 0x00000001, target); /* Version */
4220 write_long(f, 0, target); /* padding */
4221 write_long(f, 0, target); /* padding */
4222 write_long(f, 0, target); /* padding */
4223
4224 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4225 write_data(f, &zero, 1);
4226
4227 /* figure out bucket size */
4228 uint32_t min;
4229 uint32_t max;
4230 if (with_range) {
4231 min = start_address;
4232 max = end_address;
4233 } else {
4234 min = samples[0];
4235 max = samples[0];
4236 for (i = 0; i < sample_num; i++) {
4237 if (min > samples[i])
4238 min = samples[i];
4239 if (max < samples[i])
4240 max = samples[i];
4241 }
4242
4243 /* max should be (largest sample + 1)
4244 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4245 if (max < UINT32_MAX)
4246 max++;
4247
4248 /* gprof requires (max - min) >= 2 */
4249 while ((max - min) < 2) {
4250 if (max < UINT32_MAX)
4251 max++;
4252 else
4253 min--;
4254 }
4255 }
4256
4257 uint32_t address_space = max - min;
4258
4259 /* FIXME: What is the reasonable number of buckets?
4260 * The profiling result will be more accurate if there are enough buckets. */
4261 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4262 uint32_t num_buckets = address_space / sizeof(UNIT);
4263 if (num_buckets > max_buckets)
4264 num_buckets = max_buckets;
4265 int *buckets = malloc(sizeof(int) * num_buckets);
4266 if (!buckets) {
4267 fclose(f);
4268 return;
4269 }
4270 memset(buckets, 0, sizeof(int) * num_buckets);
4271 for (i = 0; i < sample_num; i++) {
4272 uint32_t address = samples[i];
4273
4274 if ((address < min) || (max <= address))
4275 continue;
4276
4277 long long a = address - min;
4278 long long b = num_buckets;
4279 long long c = address_space;
4280 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4281 buckets[index_t]++;
4282 }
4283
4284 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4285 write_long(f, min, target); /* low_pc */
4286 write_long(f, max, target); /* high_pc */
4287 write_long(f, num_buckets, target); /* # of buckets */
4288 float sample_rate = sample_num / (duration_ms / 1000.0);
4289 write_long(f, sample_rate, target);
4290 write_string(f, "seconds");
4291 for (i = 0; i < (15-strlen("seconds")); i++)
4292 write_data(f, &zero, 1);
4293 write_string(f, "s");
4294
4295 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4296
4297 char *data = malloc(2 * num_buckets);
4298 if (data) {
4299 for (i = 0; i < num_buckets; i++) {
4300 int val;
4301 val = buckets[i];
4302 if (val > 65535)
4303 val = 65535;
4304 data[i * 2] = val&0xff;
4305 data[i * 2 + 1] = (val >> 8) & 0xff;
4306 }
4307 free(buckets);
4308 write_data(f, data, num_buckets * 2);
4309 free(data);
4310 } else
4311 free(buckets);
4312
4313 fclose(f);
4314 }
4315
4316 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4317 * which will be used as a random sampling of PC */
4318 COMMAND_HANDLER(handle_profile_command)
4319 {
4320 struct target *target = get_current_target(CMD_CTX);
4321
4322 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4323 return ERROR_COMMAND_SYNTAX_ERROR;
4324
4325 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4326 uint32_t offset;
4327 uint32_t num_of_samples;
4328 int retval = ERROR_OK;
4329 bool halted_before_profiling = target->state == TARGET_HALTED;
4330
4331 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4332
4333 uint32_t start_address = 0;
4334 uint32_t end_address = 0;
4335 bool with_range = false;
4336 if (CMD_ARGC == 4) {
4337 with_range = true;
4338 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4339 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4340 if (start_address > end_address || (end_address - start_address) < 2) {
4341 command_print(CMD, "Error: end - start < 2");
4342 return ERROR_COMMAND_ARGUMENT_INVALID;
4343 }
4344 }
4345
4346 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4347 if (!samples) {
4348 LOG_ERROR("No memory to store samples.");
4349 return ERROR_FAIL;
4350 }
4351
4352 uint64_t timestart_ms = timeval_ms();
4353 /**
4354 * Some cores let us sample the PC without the
4355 * annoying halt/resume step; for example, ARMv7 PCSR.
4356 * Provide a way to use that more efficient mechanism.
4357 */
4358 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4359 &num_of_samples, offset);
4360 if (retval != ERROR_OK) {
4361 free(samples);
4362 return retval;
4363 }
4364 uint32_t duration_ms = timeval_ms() - timestart_ms;
4365
4366 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4367
4368 retval = target_poll(target);
4369 if (retval != ERROR_OK) {
4370 free(samples);
4371 return retval;
4372 }
4373
4374 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4375 /* The target was halted before we started and is running now. Halt it,
4376 * for consistency. */
4377 retval = target_halt(target);
4378 if (retval != ERROR_OK) {
4379 free(samples);
4380 return retval;
4381 }
4382 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4383 /* The target was running before we started and is halted now. Resume
4384 * it, for consistency. */
4385 retval = target_resume(target, 1, 0, 0, 0);
4386 if (retval != ERROR_OK) {
4387 free(samples);
4388 return retval;
4389 }
4390 }
4391
4392 retval = target_poll(target);
4393 if (retval != ERROR_OK) {
4394 free(samples);
4395 return retval;
4396 }
4397
4398 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4399 with_range, start_address, end_address, target, duration_ms);
4400 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4401
4402 free(samples);
4403 return retval;
4404 }
4405
4406 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4407 {
4408 char *namebuf;
4409 Jim_Obj *obj_name, *obj_val;
4410 int result;
4411
4412 namebuf = alloc_printf("%s(%d)", varname, idx);
4413 if (!namebuf)
4414 return JIM_ERR;
4415
4416 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4417 jim_wide wide_val = val;
4418 obj_val = Jim_NewWideObj(interp, wide_val);
4419 if (!obj_name || !obj_val) {
4420 free(namebuf);
4421 return JIM_ERR;
4422 }
4423
4424 Jim_IncrRefCount(obj_name);
4425 Jim_IncrRefCount(obj_val);
4426 result = Jim_SetVariable(interp, obj_name, obj_val);
4427 Jim_DecrRefCount(interp, obj_name);
4428 Jim_DecrRefCount(interp, obj_val);
4429 free(namebuf);
4430 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4431 return result;
4432 }
4433
4434 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4435 {
4436 int e;
4437
4438 LOG_WARNING("DEPRECATED! use 'read_memory' not 'mem2array'");
4439
4440 /* argv[0] = name of array to receive the data
4441 * argv[1] = desired element width in bits
4442 * argv[2] = memory address
4443 * argv[3] = count of times to read
4444 * argv[4] = optional "phys"
4445 */
4446 if (argc < 4 || argc > 5) {
4447 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4448 return JIM_ERR;
4449 }
4450
4451 /* Arg 0: Name of the array variable */
4452 const char *varname = Jim_GetString(argv[0], NULL);
4453
4454 /* Arg 1: Bit width of one element */
4455 long l;
4456 e = Jim_GetLong(interp, argv[1], &l);
4457 if (e != JIM_OK)
4458 return e;
4459 const unsigned int width_bits = l;
4460
4461 if (width_bits != 8 &&
4462 width_bits != 16 &&
4463 width_bits != 32 &&
4464 width_bits != 64) {
4465 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4466 Jim_AppendStrings(interp, Jim_GetResult(interp),
4467 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4468 return JIM_ERR;
4469 }
4470 const unsigned int width = width_bits / 8;
4471
4472 /* Arg 2: Memory address */
4473 jim_wide wide_addr;
4474 e = Jim_GetWide(interp, argv[2], &wide_addr);
4475 if (e != JIM_OK)
4476 return e;
4477 target_addr_t addr = (target_addr_t)wide_addr;
4478
4479 /* Arg 3: Number of elements to read */
4480 e = Jim_GetLong(interp, argv[3], &l);
4481 if (e != JIM_OK)
4482 return e;
4483 size_t len = l;
4484
4485 /* Arg 4: phys */
4486 bool is_phys = false;
4487 if (argc > 4) {
4488 int str_len = 0;
4489 const char *phys = Jim_GetString(argv[4], &str_len);
4490 if (!strncmp(phys, "phys", str_len))
4491 is_phys = true;
4492 else
4493 return JIM_ERR;
4494 }
4495
4496 /* Argument checks */
4497 if (len == 0) {
4498 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4499 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4500 return JIM_ERR;
4501 }
4502 if ((addr + (len * width)) < addr) {
4503 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4504 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4505 return JIM_ERR;
4506 }
4507 if (len > 65536) {
4508 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4509 Jim_AppendStrings(interp, Jim_GetResult(interp),
4510 "mem2array: too large read request, exceeds 64K items", NULL);
4511 return JIM_ERR;
4512 }
4513
4514 if ((width == 1) ||
4515 ((width == 2) && ((addr & 1) == 0)) ||
4516 ((width == 4) && ((addr & 3) == 0)) ||
4517 ((width == 8) && ((addr & 7) == 0))) {
4518 /* alignment correct */
4519 } else {
4520 char buf[100];
4521 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4522 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4523 addr,
4524 width);
4525 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4526 return JIM_ERR;
4527 }
4528
4529 /* Transfer loop */
4530
4531 /* index counter */
4532 size_t idx = 0;
4533
4534 const size_t buffersize = 4096;
4535 uint8_t *buffer = malloc(buffersize);
4536 if (!buffer)
4537 return JIM_ERR;
4538
4539 /* assume ok */
4540 e = JIM_OK;
4541 while (len) {
4542 /* Slurp... in buffer size chunks */
4543 const unsigned int max_chunk_len = buffersize / width;
4544 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4545
4546 int retval;
4547 if (is_phys)
4548 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4549 else
4550 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4551 if (retval != ERROR_OK) {
4552 /* BOO !*/
4553 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4554 addr,
4555 width,
4556 chunk_len);
4557 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4558 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4559 e = JIM_ERR;
4560 break;
4561 } else {
4562 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4563 uint64_t v = 0;
4564 switch (width) {
4565 case 8:
4566 v = target_buffer_get_u64(target, &buffer[i*width]);
4567 break;
4568 case 4:
4569 v = target_buffer_get_u32(target, &buffer[i*width]);
4570 break;
4571 case 2:
4572 v = target_buffer_get_u16(target, &buffer[i*width]);
4573 break;
4574 case 1:
4575 v = buffer[i] & 0x0ff;
4576 break;
4577 }
4578 new_u64_array_element(interp, varname, idx, v);
4579 }
4580 len -= chunk_len;
4581 addr += chunk_len * width;
4582 }
4583 }
4584
4585 free(buffer);
4586
4587 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4588
4589 return e;
4590 }
4591
4592 COMMAND_HANDLER(handle_target_read_memory)
4593 {
4594 /*
4595 * CMD_ARGV[0] = memory address
4596 * CMD_ARGV[1] = desired element width in bits
4597 * CMD_ARGV[2] = number of elements to read
4598 * CMD_ARGV[3] = optional "phys"
4599 */
4600
4601 if (CMD_ARGC < 3 || CMD_ARGC > 4)
4602 return ERROR_COMMAND_SYNTAX_ERROR;
4603
4604 /* Arg 1: Memory address. */
4605 target_addr_t addr;
4606 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], addr);
4607
4608 /* Arg 2: Bit width of one element. */
4609 unsigned int width_bits;
4610 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], width_bits);
4611
4612 /* Arg 3: Number of elements to read. */
4613 unsigned int count;
4614 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
4615
4616 /* Arg 4: Optional 'phys'. */
4617 bool is_phys = false;
4618 if (CMD_ARGC == 4) {
4619 if (strcmp(CMD_ARGV[3], "phys")) {
4620 command_print(CMD, "invalid argument '%s', must be 'phys'", CMD_ARGV[3]);
4621 return ERROR_COMMAND_ARGUMENT_INVALID;
4622 }
4623
4624 is_phys = true;
4625 }
4626
4627 switch (width_bits) {
4628 case 8:
4629 case 16:
4630 case 32:
4631 case 64:
4632 break;
4633 default:
4634 command_print(CMD, "invalid width, must be 8, 16, 32 or 64");
4635 return ERROR_COMMAND_ARGUMENT_INVALID;
4636 }
4637
4638 const unsigned int width = width_bits / 8;
4639
4640 if ((addr + (count * width)) < addr) {
4641 command_print(CMD, "read_memory: addr + count wraps to zero");
4642 return ERROR_COMMAND_ARGUMENT_INVALID;
4643 }
4644
4645 if (count > 65536) {
4646 command_print(CMD, "read_memory: too large read request, exceeds 64K elements");
4647 return ERROR_COMMAND_ARGUMENT_INVALID;
4648 }
4649
4650 struct target *target = get_current_target(CMD_CTX);
4651
4652 const size_t buffersize = 4096;
4653 uint8_t *buffer = malloc(buffersize);
4654
4655 if (!buffer) {
4656 LOG_ERROR("Failed to allocate memory");
4657 return ERROR_FAIL;
4658 }
4659
4660 char *separator = "";
4661 while (count > 0) {
4662 const unsigned int max_chunk_len = buffersize / width;
4663 const size_t chunk_len = MIN(count, max_chunk_len);
4664
4665 int retval;
4666
4667 if (is_phys)
4668 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4669 else
4670 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4671
4672 if (retval != ERROR_OK) {
4673 LOG_DEBUG("read_memory: read at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
4674 addr, width_bits, chunk_len);
4675 /*
4676 * FIXME: we append the errmsg to the list of value already read.
4677 * Add a way to flush and replace old output, but LOG_DEBUG() it
4678 */
4679 command_print(CMD, "read_memory: failed to read memory");
4680 free(buffer);
4681 return retval;
4682 }
4683
4684 for (size_t i = 0; i < chunk_len ; i++) {
4685 uint64_t v = 0;
4686
4687 switch (width) {
4688 case 8:
4689 v = target_buffer_get_u64(target, &buffer[i * width]);
4690 break;
4691 case 4:
4692 v = target_buffer_get_u32(target, &buffer[i * width]);
4693 break;
4694 case 2:
4695 v = target_buffer_get_u16(target, &buffer[i * width]);
4696 break;
4697 case 1:
4698 v = buffer[i];
4699 break;
4700 }
4701
4702 command_print_sameline(CMD, "%s0x%" PRIx64, separator, v);
4703 separator = " ";
4704 }
4705
4706 count -= chunk_len;
4707 addr += chunk_len * width;
4708 }
4709
4710 free(buffer);
4711
4712 return ERROR_OK;
4713 }
4714
4715 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4716 {
4717 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4718 if (!namebuf)
4719 return JIM_ERR;
4720
4721 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4722 if (!obj_name) {
4723 free(namebuf);
4724 return JIM_ERR;
4725 }
4726
4727 Jim_IncrRefCount(obj_name);
4728 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4729 Jim_DecrRefCount(interp, obj_name);
4730 free(namebuf);
4731 if (!obj_val)
4732 return JIM_ERR;
4733
4734 jim_wide wide_val;
4735 int result = Jim_GetWide(interp, obj_val, &wide_val);
4736 *val = wide_val;
4737 return result;
4738 }
4739
4740 static int target_array2mem(Jim_Interp *interp, struct target *target,
4741 int argc, Jim_Obj *const *argv)
4742 {
4743 int e;
4744
4745 LOG_WARNING("DEPRECATED! use 'write_memory' not 'array2mem'");
4746
4747 /* argv[0] = name of array from which to read the data
4748 * argv[1] = desired element width in bits
4749 * argv[2] = memory address
4750 * argv[3] = number of elements to write
4751 * argv[4] = optional "phys"
4752 */
4753 if (argc < 4 || argc > 5) {
4754 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4755 return JIM_ERR;
4756 }
4757
4758 /* Arg 0: Name of the array variable */
4759 const char *varname = Jim_GetString(argv[0], NULL);
4760
4761 /* Arg 1: Bit width of one element */
4762 long l;
4763 e = Jim_GetLong(interp, argv[1], &l);
4764 if (e != JIM_OK)
4765 return e;
4766 const unsigned int width_bits = l;
4767
4768 if (width_bits != 8 &&
4769 width_bits != 16 &&
4770 width_bits != 32 &&
4771 width_bits != 64) {
4772 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4773 Jim_AppendStrings(interp, Jim_GetResult(interp),
4774 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4775 return JIM_ERR;
4776 }
4777 const unsigned int width = width_bits / 8;
4778
4779 /* Arg 2: Memory address */
4780 jim_wide wide_addr;
4781 e = Jim_GetWide(interp, argv[2], &wide_addr);
4782 if (e != JIM_OK)
4783 return e;
4784 target_addr_t addr = (target_addr_t)wide_addr;
4785
4786 /* Arg 3: Number of elements to write */
4787 e = Jim_GetLong(interp, argv[3], &l);
4788 if (e != JIM_OK)
4789 return e;
4790 size_t len = l;
4791
4792 /* Arg 4: Phys */
4793 bool is_phys = false;
4794 if (argc > 4) {
4795 int str_len = 0;
4796 const char *phys = Jim_GetString(argv[4], &str_len);
4797 if (!strncmp(phys, "phys", str_len))
4798 is_phys = true;
4799 else
4800 return JIM_ERR;
4801 }
4802
4803 /* Argument checks */
4804 if (len == 0) {
4805 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4806 Jim_AppendStrings(interp, Jim_GetResult(interp),
4807 "array2mem: zero width read?", NULL);
4808 return JIM_ERR;
4809 }
4810
4811 if ((addr + (len * width)) < addr) {
4812 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4813 Jim_AppendStrings(interp, Jim_GetResult(interp),
4814 "array2mem: addr + len - wraps to zero?", NULL);
4815 return JIM_ERR;
4816 }
4817
4818 if (len > 65536) {
4819 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4820 Jim_AppendStrings(interp, Jim_GetResult(interp),
4821 "array2mem: too large memory write request, exceeds 64K items", NULL);
4822 return JIM_ERR;
4823 }
4824
4825 if ((width == 1) ||
4826 ((width == 2) && ((addr & 1) == 0)) ||
4827 ((width == 4) && ((addr & 3) == 0)) ||
4828 ((width == 8) && ((addr & 7) == 0))) {
4829 /* alignment correct */
4830 } else {
4831 char buf[100];
4832 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4833 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4834 addr,
4835 width);
4836 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4837 return JIM_ERR;
4838 }
4839
4840 /* Transfer loop */
4841
4842 /* assume ok */
4843 e = JIM_OK;
4844
4845 const size_t buffersize = 4096;
4846 uint8_t *buffer = malloc(buffersize);
4847 if (!buffer)
4848 return JIM_ERR;
4849
4850 /* index counter */
4851 size_t idx = 0;
4852
4853 while (len) {
4854 /* Slurp... in buffer size chunks */
4855 const unsigned int max_chunk_len = buffersize / width;
4856
4857 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4858
4859 /* Fill the buffer */
4860 for (size_t i = 0; i < chunk_len; i++, idx++) {
4861 uint64_t v = 0;
4862 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4863 free(buffer);
4864 return JIM_ERR;
4865 }
4866 switch (width) {
4867 case 8:
4868 target_buffer_set_u64(target, &buffer[i * width], v);
4869 break;
4870 case 4:
4871 target_buffer_set_u32(target, &buffer[i * width], v);
4872 break;
4873 case 2:
4874 target_buffer_set_u16(target, &buffer[i * width], v);
4875 break;
4876 case 1:
4877 buffer[i] = v & 0x0ff;
4878 break;
4879 }
4880 }
4881 len -= chunk_len;
4882
4883 /* Write the buffer to memory */
4884 int retval;
4885 if (is_phys)
4886 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4887 else
4888 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4889 if (retval != ERROR_OK) {
4890 /* BOO !*/
4891 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4892 addr,
4893 width,
4894 chunk_len);
4895 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4896 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4897 e = JIM_ERR;
4898 break;
4899 }
4900 addr += chunk_len * width;
4901 }
4902
4903 free(buffer);
4904
4905 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4906
4907 return e;
4908 }
4909
4910 static int target_jim_write_memory(Jim_Interp *interp, int argc,
4911 Jim_Obj * const *argv)
4912 {
4913 /*
4914 * argv[1] = memory address
4915 * argv[2] = desired element width in bits
4916 * argv[3] = list of data to write
4917 * argv[4] = optional "phys"
4918 */
4919
4920 if (argc < 4 || argc > 5) {
4921 Jim_WrongNumArgs(interp, 1, argv, "address width data ['phys']");
4922 return JIM_ERR;
4923 }
4924
4925 /* Arg 1: Memory address. */
4926 int e;
4927 jim_wide wide_addr;
4928 e = Jim_GetWide(interp, argv[1], &wide_addr);
4929
4930 if (e != JIM_OK)
4931 return e;
4932
4933 target_addr_t addr = (target_addr_t)wide_addr;
4934
4935 /* Arg 2: Bit width of one element. */
4936 long l;
4937 e = Jim_GetLong(interp, argv[2], &l);
4938
4939 if (e != JIM_OK)
4940 return e;
4941
4942 const unsigned int width_bits = l;
4943 size_t count = Jim_ListLength(interp, argv[3]);
4944
4945 /* Arg 4: Optional 'phys'. */
4946 bool is_phys = false;
4947
4948 if (argc > 4) {
4949 const char *phys = Jim_GetString(argv[4], NULL);
4950
4951 if (strcmp(phys, "phys")) {
4952 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4953 return JIM_ERR;
4954 }
4955
4956 is_phys = true;
4957 }
4958
4959 switch (width_bits) {
4960 case 8:
4961 case 16:
4962 case 32:
4963 case 64:
4964 break;
4965 default:
4966 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
4967 return JIM_ERR;
4968 }
4969
4970 const unsigned int width = width_bits / 8;
4971
4972 if ((addr + (count * width)) < addr) {
4973 Jim_SetResultString(interp, "write_memory: addr + len wraps to zero", -1);
4974 return JIM_ERR;
4975 }
4976
4977 if (count > 65536) {
4978 Jim_SetResultString(interp, "write_memory: too large memory write request, exceeds 64K elements", -1);
4979 return JIM_ERR;
4980 }
4981
4982 struct command_context *cmd_ctx = current_command_context(interp);
4983 assert(cmd_ctx != NULL);
4984 struct target *target = get_current_target(cmd_ctx);
4985
4986 const size_t buffersize = 4096;
4987 uint8_t *buffer = malloc(buffersize);
4988
4989 if (!buffer) {
4990 LOG_ERROR("Failed to allocate memory");
4991 return JIM_ERR;
4992 }
4993
4994 size_t j = 0;
4995
4996 while (count > 0) {
4997 const unsigned int max_chunk_len = buffersize / width;
4998 const size_t chunk_len = MIN(count, max_chunk_len);
4999
5000 for (size_t i = 0; i < chunk_len; i++, j++) {
5001 Jim_Obj *tmp = Jim_ListGetIndex(interp, argv[3], j);
5002 jim_wide element_wide;
5003 Jim_GetWide(interp, tmp, &element_wide);
5004
5005 const uint64_t v = element_wide;
5006
5007 switch (width) {
5008 case 8:
5009 target_buffer_set_u64(target, &buffer[i * width], v);
5010 break;
5011 case 4:
5012 target_buffer_set_u32(target, &buffer[i * width], v);
5013 break;
5014 case 2:
5015 target_buffer_set_u16(target, &buffer[i * width], v);
5016 break;
5017 case 1:
5018 buffer[i] = v & 0x0ff;
5019 break;
5020 }
5021 }
5022
5023 count -= chunk_len;
5024
5025 int retval;
5026
5027 if (is_phys)
5028 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
5029 else
5030 retval = target_write_memory(target, addr, width, chunk_len, buffer);
5031
5032 if (retval != ERROR_OK) {
5033 LOG_ERROR("write_memory: write at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
5034 addr, width_bits, chunk_len);
5035 Jim_SetResultString(interp, "write_memory: failed to write memory", -1);
5036 e = JIM_ERR;
5037 break;
5038 }
5039
5040 addr += chunk_len * width;
5041 }
5042
5043 free(buffer);
5044
5045 return e;
5046 }
5047
5048 /* FIX? should we propagate errors here rather than printing them
5049 * and continuing?
5050 */
5051 void target_handle_event(struct target *target, enum target_event e)
5052 {
5053 struct target_event_action *teap;
5054 int retval;
5055
5056 for (teap = target->event_action; teap; teap = teap->next) {
5057 if (teap->event == e) {
5058 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
5059 target->target_number,
5060 target_name(target),
5061 target_type_name(target),
5062 e,
5063 target_event_name(e),
5064 Jim_GetString(teap->body, NULL));
5065
5066 /* Override current target by the target an event
5067 * is issued from (lot of scripts need it).
5068 * Return back to previous override as soon
5069 * as the handler processing is done */
5070 struct command_context *cmd_ctx = current_command_context(teap->interp);
5071 struct target *saved_target_override = cmd_ctx->current_target_override;
5072 cmd_ctx->current_target_override = target;
5073
5074 retval = Jim_EvalObj(teap->interp, teap->body);
5075
5076 cmd_ctx->current_target_override = saved_target_override;
5077
5078 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
5079 return;
5080
5081 if (retval == JIM_RETURN)
5082 retval = teap->interp->returnCode;
5083
5084 if (retval != JIM_OK) {
5085 Jim_MakeErrorMessage(teap->interp);
5086 LOG_USER("Error executing event %s on target %s:\n%s",
5087 target_event_name(e),
5088 target_name(target),
5089 Jim_GetString(Jim_GetResult(teap->interp), NULL));
5090 /* clean both error code and stacktrace before return */
5091 Jim_Eval(teap->interp, "error \"\" \"\"");
5092 }
5093 }
5094 }
5095 }
5096
5097 static int target_jim_get_reg(Jim_Interp *interp, int argc,
5098 Jim_Obj * const *argv)
5099 {
5100 bool force = false;
5101
5102 if (argc == 3) {
5103 const char *option = Jim_GetString(argv[1], NULL);
5104
5105 if (!strcmp(option, "-force")) {
5106 argc--;
5107 argv++;
5108 force = true;
5109 } else {
5110 Jim_SetResultFormatted(interp, "invalid option '%s'", option);
5111 return JIM_ERR;
5112 }
5113 }
5114
5115 if (argc != 2) {
5116 Jim_WrongNumArgs(interp, 1, argv, "[-force] list");
5117 return JIM_ERR;
5118 }
5119
5120 const int length = Jim_ListLength(interp, argv[1]);
5121
5122 Jim_Obj *result_dict = Jim_NewDictObj(interp, NULL, 0);
5123
5124 if (!result_dict)
5125 return JIM_ERR;
5126
5127 struct command_context *cmd_ctx = current_command_context(interp);
5128 assert(cmd_ctx != NULL);
5129 const struct target *target = get_current_target(cmd_ctx);
5130
5131 for (int i = 0; i < length; i++) {
5132 Jim_Obj *elem = Jim_ListGetIndex(interp, argv[1], i);
5133
5134 if (!elem)
5135 return JIM_ERR;
5136
5137 const char *reg_name = Jim_String(elem);
5138
5139 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5140 false);
5141
5142 if (!reg || !reg->exist) {
5143 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5144 return JIM_ERR;
5145 }
5146
5147 if (force) {
5148 int retval = reg->type->get(reg);
5149
5150 if (retval != ERROR_OK) {
5151 Jim_SetResultFormatted(interp, "failed to read register '%s'",
5152 reg_name);
5153 return JIM_ERR;
5154 }
5155 }
5156
5157 char *reg_value = buf_to_hex_str(reg->value, reg->size);
5158
5159 if (!reg_value) {
5160 LOG_ERROR("Failed to allocate memory");
5161 return JIM_ERR;
5162 }
5163
5164 char *tmp = alloc_printf("0x%s", reg_value);
5165
5166 free(reg_value);
5167
5168 if (!tmp) {
5169 LOG_ERROR("Failed to allocate memory");
5170 return JIM_ERR;
5171 }
5172
5173 Jim_DictAddElement(interp, result_dict, elem,
5174 Jim_NewStringObj(interp, tmp, -1));
5175
5176 free(tmp);
5177 }
5178
5179 Jim_SetResult(interp, result_dict);
5180
5181 return JIM_OK;
5182 }
5183
5184 static int target_jim_set_reg(Jim_Interp *interp, int argc,
5185 Jim_Obj * const *argv)
5186 {
5187 if (argc != 2) {
5188 Jim_WrongNumArgs(interp, 1, argv, "dict");
5189 return JIM_ERR;
5190 }
5191
5192 int tmp;
5193 #if JIM_VERSION >= 80
5194 Jim_Obj **dict = Jim_DictPairs(interp, argv[1], &tmp);
5195
5196 if (!dict)
5197 return JIM_ERR;
5198 #else
5199 Jim_Obj **dict;
5200 int ret = Jim_DictPairs(interp, argv[1], &dict, &tmp);
5201
5202 if (ret != JIM_OK)
5203 return ret;
5204 #endif
5205
5206 const unsigned int length = tmp;
5207 struct command_context *cmd_ctx = current_command_context(interp);
5208 assert(cmd_ctx);
5209 const struct target *target = get_current_target(cmd_ctx);
5210
5211 for (unsigned int i = 0; i < length; i += 2) {
5212 const char *reg_name = Jim_String(dict[i]);
5213 const char *reg_value = Jim_String(dict[i + 1]);
5214 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5215 false);
5216
5217 if (!reg || !reg->exist) {
5218 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5219 return JIM_ERR;
5220 }
5221
5222 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
5223
5224 if (!buf) {
5225 LOG_ERROR("Failed to allocate memory");
5226 return JIM_ERR;
5227 }
5228
5229 str_to_buf(reg_value, strlen(reg_value), buf, reg->size, 0);
5230 int retval = reg->type->set(reg, buf);
5231 free(buf);
5232
5233 if (retval != ERROR_OK) {
5234 Jim_SetResultFormatted(interp, "failed to set '%s' to register '%s'",
5235 reg_value, reg_name);
5236 return JIM_ERR;
5237 }
5238 }
5239
5240 return JIM_OK;
5241 }
5242
5243 /**
5244 * Returns true only if the target has a handler for the specified event.
5245 */
5246 bool target_has_event_action(struct target *target, enum target_event event)
5247 {
5248 struct target_event_action *teap;
5249
5250 for (teap = target->event_action; teap; teap = teap->next) {
5251 if (teap->event == event)
5252 return true;
5253 }
5254 return false;
5255 }
5256
5257 enum target_cfg_param {
5258 TCFG_TYPE,
5259 TCFG_EVENT,
5260 TCFG_WORK_AREA_VIRT,
5261 TCFG_WORK_AREA_PHYS,
5262 TCFG_WORK_AREA_SIZE,
5263 TCFG_WORK_AREA_BACKUP,
5264 TCFG_ENDIAN,
5265 TCFG_COREID,
5266 TCFG_CHAIN_POSITION,
5267 TCFG_DBGBASE,
5268 TCFG_RTOS,
5269 TCFG_DEFER_EXAMINE,
5270 TCFG_GDB_PORT,
5271 TCFG_GDB_MAX_CONNECTIONS,
5272 };
5273
5274 static struct jim_nvp nvp_config_opts[] = {
5275 { .name = "-type", .value = TCFG_TYPE },
5276 { .name = "-event", .value = TCFG_EVENT },
5277 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
5278 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
5279 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
5280 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
5281 { .name = "-endian", .value = TCFG_ENDIAN },
5282 { .name = "-coreid", .value = TCFG_COREID },
5283 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
5284 { .name = "-dbgbase", .value = TCFG_DBGBASE },
5285 { .name = "-rtos", .value = TCFG_RTOS },
5286 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
5287 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
5288 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
5289 { .name = NULL, .value = -1 }
5290 };
5291
5292 static int target_configure(struct jim_getopt_info *goi, struct target *target)
5293 {
5294 struct jim_nvp *n;
5295 Jim_Obj *o;
5296 jim_wide w;
5297 int e;
5298
5299 /* parse config or cget options ... */
5300 while (goi->argc > 0) {
5301 Jim_SetEmptyResult(goi->interp);
5302 /* jim_getopt_debug(goi); */
5303
5304 if (target->type->target_jim_configure) {
5305 /* target defines a configure function */
5306 /* target gets first dibs on parameters */
5307 e = (*(target->type->target_jim_configure))(target, goi);
5308 if (e == JIM_OK) {
5309 /* more? */
5310 continue;
5311 }
5312 if (e == JIM_ERR) {
5313 /* An error */
5314 return e;
5315 }
5316 /* otherwise we 'continue' below */
5317 }
5318 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
5319 if (e != JIM_OK) {
5320 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
5321 return e;
5322 }
5323 switch (n->value) {
5324 case TCFG_TYPE:
5325 /* not settable */
5326 if (goi->isconfigure) {
5327 Jim_SetResultFormatted(goi->interp,
5328 "not settable: %s", n->name);
5329 return JIM_ERR;
5330 } else {
5331 no_params:
5332 if (goi->argc != 0) {
5333 Jim_WrongNumArgs(goi->interp,
5334 goi->argc, goi->argv,
5335 "NO PARAMS");
5336 return JIM_ERR;
5337 }
5338 }
5339 Jim_SetResultString(goi->interp,
5340 target_type_name(target), -1);
5341 /* loop for more */
5342 break;
5343 case TCFG_EVENT:
5344 if (goi->argc == 0) {
5345 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
5346 return JIM_ERR;
5347 }
5348
5349 e = jim_getopt_nvp(goi, nvp_target_event, &n);
5350 if (e != JIM_OK) {
5351 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
5352 return e;
5353 }
5354
5355 if (goi->isconfigure) {
5356 if (goi->argc != 1) {
5357 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
5358 return JIM_ERR;
5359 }
5360 } else {
5361 if (goi->argc != 0) {
5362 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
5363 return JIM_ERR;
5364 }
5365 }
5366
5367 {
5368 struct target_event_action *teap;
5369
5370 teap = target->event_action;
5371 /* replace existing? */
5372 while (teap) {
5373 if (teap->event == (enum target_event)n->value)
5374 break;
5375 teap = teap->next;
5376 }
5377
5378 if (goi->isconfigure) {
5379 /* START_DEPRECATED_TPIU */
5380 if (n->value == TARGET_EVENT_TRACE_CONFIG)
5381 LOG_INFO("DEPRECATED target event %s; use TPIU events {pre,post}-{enable,disable}", n->name);
5382 /* END_DEPRECATED_TPIU */
5383
5384 bool replace = true;
5385 if (!teap) {
5386 /* create new */
5387 teap = calloc(1, sizeof(*teap));
5388 replace = false;
5389 }
5390 teap->event = n->value;
5391 teap->interp = goi->interp;
5392 jim_getopt_obj(goi, &o);
5393 if (teap->body)
5394 Jim_DecrRefCount(teap->interp, teap->body);
5395 teap->body = Jim_DuplicateObj(goi->interp, o);
5396 /*
5397 * FIXME:
5398 * Tcl/TK - "tk events" have a nice feature.
5399 * See the "BIND" command.
5400 * We should support that here.
5401 * You can specify %X and %Y in the event code.
5402 * The idea is: %T - target name.
5403 * The idea is: %N - target number
5404 * The idea is: %E - event name.
5405 */
5406 Jim_IncrRefCount(teap->body);
5407
5408 if (!replace) {
5409 /* add to head of event list */
5410 teap->next = target->event_action;
5411 target->event_action = teap;
5412 }
5413 Jim_SetEmptyResult(goi->interp);
5414 } else {
5415 /* get */
5416 if (!teap)
5417 Jim_SetEmptyResult(goi->interp);
5418 else
5419 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5420 }
5421 }
5422 /* loop for more */
5423 break;
5424
5425 case TCFG_WORK_AREA_VIRT:
5426 if (goi->isconfigure) {
5427 target_free_all_working_areas(target);
5428 e = jim_getopt_wide(goi, &w);
5429 if (e != JIM_OK)
5430 return e;
5431 target->working_area_virt = w;
5432 target->working_area_virt_spec = true;
5433 } else {
5434 if (goi->argc != 0)
5435 goto no_params;
5436 }
5437 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5438 /* loop for more */
5439 break;
5440
5441 case TCFG_WORK_AREA_PHYS:
5442 if (goi->isconfigure) {
5443 target_free_all_working_areas(target);
5444 e = jim_getopt_wide(goi, &w);
5445 if (e != JIM_OK)
5446 return e;
5447 target->working_area_phys = w;
5448 target->working_area_phys_spec = true;
5449 } else {
5450 if (goi->argc != 0)
5451 goto no_params;
5452 }
5453 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5454 /* loop for more */
5455 break;
5456
5457 case TCFG_WORK_AREA_SIZE:
5458 if (goi->isconfigure) {
5459 target_free_all_working_areas(target);
5460 e = jim_getopt_wide(goi, &w);
5461 if (e != JIM_OK)
5462 return e;
5463 target->working_area_size = w;
5464 } else {
5465 if (goi->argc != 0)
5466 goto no_params;
5467 }
5468 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5469 /* loop for more */
5470 break;
5471
5472 case TCFG_WORK_AREA_BACKUP:
5473 if (goi->isconfigure) {
5474 target_free_all_working_areas(target);
5475 e = jim_getopt_wide(goi, &w);
5476 if (e != JIM_OK)
5477 return e;
5478 /* make this exactly 1 or 0 */
5479 target->backup_working_area = (!!w);
5480 } else {
5481 if (goi->argc != 0)
5482 goto no_params;
5483 }
5484 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5485 /* loop for more e*/
5486 break;
5487
5488
5489 case TCFG_ENDIAN:
5490 if (goi->isconfigure) {
5491 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5492 if (e != JIM_OK) {
5493 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5494 return e;
5495 }
5496 target->endianness = n->value;
5497 } else {
5498 if (goi->argc != 0)
5499 goto no_params;
5500 }
5501 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5502 if (!n->name) {
5503 target->endianness = TARGET_LITTLE_ENDIAN;
5504 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5505 }
5506 Jim_SetResultString(goi->interp, n->name, -1);
5507 /* loop for more */
5508 break;
5509
5510 case TCFG_COREID:
5511 if (goi->isconfigure) {
5512 e = jim_getopt_wide(goi, &w);
5513 if (e != JIM_OK)
5514 return e;
5515 target->coreid = (int32_t)w;
5516 } else {
5517 if (goi->argc != 0)
5518 goto no_params;
5519 }
5520 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5521 /* loop for more */
5522 break;
5523
5524 case TCFG_CHAIN_POSITION:
5525 if (goi->isconfigure) {
5526 Jim_Obj *o_t;
5527 struct jtag_tap *tap;
5528
5529 if (target->has_dap) {
5530 Jim_SetResultString(goi->interp,
5531 "target requires -dap parameter instead of -chain-position!", -1);
5532 return JIM_ERR;
5533 }
5534
5535 target_free_all_working_areas(target);
5536 e = jim_getopt_obj(goi, &o_t);
5537 if (e != JIM_OK)
5538 return e;
5539 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5540 if (!tap)
5541 return JIM_ERR;
5542 target->tap = tap;
5543 target->tap_configured = true;
5544 } else {
5545 if (goi->argc != 0)
5546 goto no_params;
5547 }
5548 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5549 /* loop for more e*/
5550 break;
5551 case TCFG_DBGBASE:
5552 if (goi->isconfigure) {
5553 e = jim_getopt_wide(goi, &w);
5554 if (e != JIM_OK)
5555 return e;
5556 target->dbgbase = (uint32_t)w;
5557 target->dbgbase_set = true;
5558 } else {
5559 if (goi->argc != 0)
5560 goto no_params;
5561 }
5562 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5563 /* loop for more */
5564 break;
5565 case TCFG_RTOS:
5566 /* RTOS */
5567 {
5568 int result = rtos_create(goi, target);
5569 if (result != JIM_OK)
5570 return result;
5571 }
5572 /* loop for more */
5573 break;
5574
5575 case TCFG_DEFER_EXAMINE:
5576 /* DEFER_EXAMINE */
5577 target->defer_examine = true;
5578 /* loop for more */
5579 break;
5580
5581 case TCFG_GDB_PORT:
5582 if (goi->isconfigure) {
5583 struct command_context *cmd_ctx = current_command_context(goi->interp);
5584 if (cmd_ctx->mode != COMMAND_CONFIG) {
5585 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5586 return JIM_ERR;
5587 }
5588
5589 const char *s;
5590 e = jim_getopt_string(goi, &s, NULL);
5591 if (e != JIM_OK)
5592 return e;
5593 free(target->gdb_port_override);
5594 target->gdb_port_override = strdup(s);
5595 } else {
5596 if (goi->argc != 0)
5597 goto no_params;
5598 }
5599 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5600 /* loop for more */
5601 break;
5602
5603 case TCFG_GDB_MAX_CONNECTIONS:
5604 if (goi->isconfigure) {
5605 struct command_context *cmd_ctx = current_command_context(goi->interp);
5606 if (cmd_ctx->mode != COMMAND_CONFIG) {
5607 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5608 return JIM_ERR;
5609 }
5610
5611 e = jim_getopt_wide(goi, &w);
5612 if (e != JIM_OK)
5613 return e;
5614 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5615 } else {
5616 if (goi->argc != 0)
5617 goto no_params;
5618 }
5619 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5620 break;
5621 }
5622 } /* while (goi->argc) */
5623
5624
5625 /* done - we return */
5626 return JIM_OK;
5627 }
5628
5629 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5630 {
5631 struct command *c = jim_to_command(interp);
5632 struct jim_getopt_info goi;
5633
5634 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5635 goi.isconfigure = !strcmp(c->name, "configure");
5636 if (goi.argc < 1) {
5637 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5638 "missing: -option ...");
5639 return JIM_ERR;
5640 }
5641 struct command_context *cmd_ctx = current_command_context(interp);
5642 assert(cmd_ctx);
5643 struct target *target = get_current_target(cmd_ctx);
5644 return target_configure(&goi, target);
5645 }
5646
5647 static int jim_target_mem2array(Jim_Interp *interp,
5648 int argc, Jim_Obj *const *argv)
5649 {
5650 struct command_context *cmd_ctx = current_command_context(interp);
5651 assert(cmd_ctx);
5652 struct target *target = get_current_target(cmd_ctx);
5653 return target_mem2array(interp, target, argc - 1, argv + 1);
5654 }
5655
5656 static int jim_target_array2mem(Jim_Interp *interp,
5657 int argc, Jim_Obj *const *argv)
5658 {
5659 struct command_context *cmd_ctx = current_command_context(interp);
5660 assert(cmd_ctx);
5661 struct target *target = get_current_target(cmd_ctx);
5662 return target_array2mem(interp, target, argc - 1, argv + 1);
5663 }
5664
5665 COMMAND_HANDLER(handle_target_examine)
5666 {
5667 bool allow_defer = false;
5668
5669 if (CMD_ARGC > 1)
5670 return ERROR_COMMAND_SYNTAX_ERROR;
5671
5672 if (CMD_ARGC == 1) {
5673 if (strcmp(CMD_ARGV[0], "allow-defer"))
5674 return ERROR_COMMAND_ARGUMENT_INVALID;
5675 allow_defer = true;
5676 }
5677
5678 struct target *target = get_current_target(CMD_CTX);
5679 if (!target->tap->enabled) {
5680 command_print(CMD, "[TAP is disabled]");
5681 return ERROR_FAIL;
5682 }
5683
5684 if (allow_defer && target->defer_examine) {
5685 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5686 LOG_INFO("Use arp_examine command to examine it manually!");
5687 return ERROR_OK;
5688 }
5689
5690 int retval = target->type->examine(target);
5691 if (retval != ERROR_OK) {
5692 target_reset_examined(target);
5693 return retval;
5694 }
5695
5696 target_set_examined(target);
5697
5698 return ERROR_OK;
5699 }
5700
5701 COMMAND_HANDLER(handle_target_was_examined)
5702 {
5703 if (CMD_ARGC != 0)
5704 return ERROR_COMMAND_SYNTAX_ERROR;
5705
5706 struct target *target = get_current_target(CMD_CTX);
5707
5708 command_print(CMD, "%d", target_was_examined(target) ? 1 : 0);
5709
5710 return ERROR_OK;
5711 }
5712
5713 COMMAND_HANDLER(handle_target_examine_deferred)
5714 {
5715 if (CMD_ARGC != 0)
5716 return ERROR_COMMAND_SYNTAX_ERROR;
5717
5718 struct target *target = get_current_target(CMD_CTX);
5719
5720 command_print(CMD, "%d", target->defer_examine ? 1 : 0);
5721
5722 return ERROR_OK;
5723 }
5724
5725 COMMAND_HANDLER(handle_target_halt_gdb)
5726 {
5727 if (CMD_ARGC != 0)
5728 return ERROR_COMMAND_SYNTAX_ERROR;
5729
5730 struct target *target = get_current_target(CMD_CTX);
5731
5732 return target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
5733 }
5734
5735 COMMAND_HANDLER(handle_target_poll)
5736 {
5737 if (CMD_ARGC != 0)
5738 return ERROR_COMMAND_SYNTAX_ERROR;
5739
5740 struct target *target = get_current_target(CMD_CTX);
5741 if (!target->tap->enabled) {
5742 command_print(CMD, "[TAP is disabled]");
5743 return ERROR_FAIL;
5744 }
5745
5746 if (!(target_was_examined(target)))
5747 return ERROR_TARGET_NOT_EXAMINED;
5748
5749 return target->type->poll(target);
5750 }
5751
5752 COMMAND_HANDLER(handle_target_reset)
5753 {
5754 if (CMD_ARGC != 2)
5755 return ERROR_COMMAND_SYNTAX_ERROR;
5756
5757 const struct nvp *n = nvp_name2value(nvp_assert, CMD_ARGV[0]);
5758 if (!n->name) {
5759 nvp_unknown_command_print(CMD, nvp_assert, NULL, CMD_ARGV[0]);
5760 return ERROR_COMMAND_ARGUMENT_INVALID;
5761 }
5762
5763 /* the halt or not param */
5764 int a;
5765 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], a);
5766
5767 struct target *target = get_current_target(CMD_CTX);
5768 if (!target->tap->enabled) {
5769 command_print(CMD, "[TAP is disabled]");
5770 return ERROR_FAIL;
5771 }
5772
5773 if (!target->type->assert_reset || !target->type->deassert_reset) {
5774 command_print(CMD, "No target-specific reset for %s", target_name(target));
5775 return ERROR_FAIL;
5776 }
5777
5778 if (target->defer_examine)
5779 target_reset_examined(target);
5780
5781 /* determine if we should halt or not. */
5782 target->reset_halt = (a != 0);
5783 /* When this happens - all workareas are invalid. */
5784 target_free_all_working_areas_restore(target, 0);
5785
5786 /* do the assert */
5787 if (n->value == NVP_ASSERT)
5788 return target->type->assert_reset(target);
5789 return target->type->deassert_reset(target);
5790 }
5791
5792 COMMAND_HANDLER(handle_target_halt)
5793 {
5794 if (CMD_ARGC != 0)
5795 return ERROR_COMMAND_SYNTAX_ERROR;
5796
5797 struct target *target = get_current_target(CMD_CTX);
5798 if (!target->tap->enabled) {
5799 command_print(CMD, "[TAP is disabled]");
5800 return ERROR_FAIL;
5801 }
5802
5803 return target->type->halt(target);
5804 }
5805
5806 COMMAND_HANDLER(handle_target_wait_state)
5807 {
5808 if (CMD_ARGC != 2)
5809 return ERROR_COMMAND_SYNTAX_ERROR;
5810
5811 const struct nvp *n = nvp_name2value(nvp_target_state, CMD_ARGV[0]);
5812 if (!n->name) {
5813 nvp_unknown_command_print(CMD, nvp_target_state, NULL, CMD_ARGV[0]);
5814 return ERROR_COMMAND_ARGUMENT_INVALID;
5815 }
5816
5817 unsigned int a;
5818 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], a);
5819
5820 struct target *target = get_current_target(CMD_CTX);
5821 if (!target->tap->enabled) {
5822 command_print(CMD, "[TAP is disabled]");
5823 return ERROR_FAIL;
5824 }
5825
5826 int retval = target_wait_state(target, n->value, a);
5827 if (retval != ERROR_OK) {
5828 command_print(CMD,
5829 "target: %s wait %s fails (%d) %s",
5830 target_name(target), n->name,
5831 retval, target_strerror_safe(retval));
5832 return retval;
5833 }
5834 return ERROR_OK;
5835 }
5836 /* List for human, Events defined for this target.
5837 * scripts/programs should use 'name cget -event NAME'
5838 */
5839 COMMAND_HANDLER(handle_target_event_list)
5840 {
5841 struct target *target = get_current_target(CMD_CTX);
5842 struct target_event_action *teap = target->event_action;
5843
5844 command_print(CMD, "Event actions for target (%d) %s\n",
5845 target->target_number,
5846 target_name(target));
5847 command_print(CMD, "%-25s | Body", "Event");
5848 command_print(CMD, "------------------------- | "
5849 "----------------------------------------");
5850 while (teap) {
5851 command_print(CMD, "%-25s | %s",
5852 target_event_name(teap->event),
5853 Jim_GetString(teap->body, NULL));
5854 teap = teap->next;
5855 }
5856 command_print(CMD, "***END***");
5857 return ERROR_OK;
5858 }
5859
5860 COMMAND_HANDLER(handle_target_current_state)
5861 {
5862 if (CMD_ARGC != 0)
5863 return ERROR_COMMAND_SYNTAX_ERROR;
5864
5865 struct target *target = get_current_target(CMD_CTX);
5866
5867 command_print(CMD, "%s", target_state_name(target));
5868
5869 return ERROR_OK;
5870 }
5871
5872 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5873 {
5874 struct jim_getopt_info goi;
5875 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5876 if (goi.argc != 1) {
5877 const char *cmd_name = Jim_GetString(argv[0], NULL);
5878 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5879 return JIM_ERR;
5880 }
5881 struct jim_nvp *n;
5882 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5883 if (e != JIM_OK) {
5884 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5885 return e;
5886 }
5887 struct command_context *cmd_ctx = current_command_context(interp);
5888 assert(cmd_ctx);
5889 struct target *target = get_current_target(cmd_ctx);
5890 target_handle_event(target, n->value);
5891 return JIM_OK;
5892 }
5893
5894 static const struct command_registration target_instance_command_handlers[] = {
5895 {
5896 .name = "configure",
5897 .mode = COMMAND_ANY,
5898 .jim_handler = jim_target_configure,
5899 .help = "configure a new target for use",
5900 .usage = "[target_attribute ...]",
5901 },
5902 {
5903 .name = "cget",
5904 .mode = COMMAND_ANY,
5905 .jim_handler = jim_target_configure,
5906 .help = "returns the specified target attribute",
5907 .usage = "target_attribute",
5908 },
5909 {
5910 .name = "mwd",
5911 .handler = handle_mw_command,
5912 .mode = COMMAND_EXEC,
5913 .help = "Write 64-bit word(s) to target memory",
5914 .usage = "address data [count]",
5915 },
5916 {
5917 .name = "mww",
5918 .handler = handle_mw_command,
5919 .mode = COMMAND_EXEC,
5920 .help = "Write 32-bit word(s) to target memory",
5921 .usage = "address data [count]",
5922 },
5923 {
5924 .name = "mwh",
5925 .handler = handle_mw_command,
5926 .mode = COMMAND_EXEC,
5927 .help = "Write 16-bit half-word(s) to target memory",
5928 .usage = "address data [count]",
5929 },
5930 {
5931 .name = "mwb",
5932 .handler = handle_mw_command,
5933 .mode = COMMAND_EXEC,
5934 .help = "Write byte(s) to target memory",
5935 .usage = "address data [count]",
5936 },
5937 {
5938 .name = "mdd",
5939 .handler = handle_md_command,
5940 .mode = COMMAND_EXEC,
5941 .help = "Display target memory as 64-bit words",
5942 .usage = "address [count]",
5943 },
5944 {
5945 .name = "mdw",
5946 .handler = handle_md_command,
5947 .mode = COMMAND_EXEC,
5948 .help = "Display target memory as 32-bit words",
5949 .usage = "address [count]",
5950 },
5951 {
5952 .name = "mdh",
5953 .handler = handle_md_command,
5954 .mode = COMMAND_EXEC,
5955 .help = "Display target memory as 16-bit half-words",
5956 .usage = "address [count]",
5957 },
5958 {
5959 .name = "mdb",
5960 .handler = handle_md_command,
5961 .mode = COMMAND_EXEC,
5962 .help = "Display target memory as 8-bit bytes",
5963 .usage = "address [count]",
5964 },
5965 {
5966 .name = "array2mem",
5967 .mode = COMMAND_EXEC,
5968 .jim_handler = jim_target_array2mem,
5969 .help = "Writes Tcl array of 8/16/32 bit numbers "
5970 "to target memory",
5971 .usage = "arrayname bitwidth address count",
5972 },
5973 {
5974 .name = "mem2array",
5975 .mode = COMMAND_EXEC,
5976 .jim_handler = jim_target_mem2array,
5977 .help = "Loads Tcl array of 8/16/32 bit numbers "
5978 "from target memory",
5979 .usage = "arrayname bitwidth address count",
5980 },
5981 {
5982 .name = "get_reg",
5983 .mode = COMMAND_EXEC,
5984 .jim_handler = target_jim_get_reg,
5985 .help = "Get register values from the target",
5986 .usage = "list",
5987 },
5988 {
5989 .name = "set_reg",
5990 .mode = COMMAND_EXEC,
5991 .jim_handler = target_jim_set_reg,
5992 .help = "Set target register values",
5993 .usage = "dict",
5994 },
5995 {
5996 .name = "read_memory",
5997 .mode = COMMAND_EXEC,
5998 .handler = handle_target_read_memory,
5999 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
6000 .usage = "address width count ['phys']",
6001 },
6002 {
6003 .name = "write_memory",
6004 .mode = COMMAND_EXEC,
6005 .jim_handler = target_jim_write_memory,
6006 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
6007 .usage = "address width data ['phys']",
6008 },
6009 {
6010 .name = "eventlist",
6011 .handler = handle_target_event_list,
6012 .mode = COMMAND_EXEC,
6013 .help = "displays a table of events defined for this target",
6014 .usage = "",
6015 },
6016 {
6017 .name = "curstate",
6018 .mode = COMMAND_EXEC,
6019 .handler = handle_target_current_state,
6020 .help = "displays the current state of this target",
6021 .usage = "",
6022 },
6023 {
6024 .name = "arp_examine",
6025 .mode = COMMAND_EXEC,
6026 .handler = handle_target_examine,
6027 .help = "used internally for reset processing",
6028 .usage = "['allow-defer']",
6029 },
6030 {
6031 .name = "was_examined",
6032 .mode = COMMAND_EXEC,
6033 .handler = handle_target_was_examined,
6034 .help = "used internally for reset processing",
6035 .usage = "",
6036 },
6037 {
6038 .name = "examine_deferred",
6039 .mode = COMMAND_EXEC,
6040 .handler = handle_target_examine_deferred,
6041 .help = "used internally for reset processing",
6042 .usage = "",
6043 },
6044 {
6045 .name = "arp_halt_gdb",
6046 .mode = COMMAND_EXEC,
6047 .handler = handle_target_halt_gdb,
6048 .help = "used internally for reset processing to halt GDB",
6049 .usage = "",
6050 },
6051 {
6052 .name = "arp_poll",
6053 .mode = COMMAND_EXEC,
6054 .handler = handle_target_poll,
6055 .help = "used internally for reset processing",
6056 .usage = "",
6057 },
6058 {
6059 .name = "arp_reset",
6060 .mode = COMMAND_EXEC,
6061 .handler = handle_target_reset,
6062 .help = "used internally for reset processing",
6063 .usage = "'assert'|'deassert' halt",
6064 },
6065 {
6066 .name = "arp_halt",
6067 .mode = COMMAND_EXEC,
6068 .handler = handle_target_halt,
6069 .help = "used internally for reset processing",
6070 .usage = "",
6071 },
6072 {
6073 .name = "arp_waitstate",
6074 .mode = COMMAND_EXEC,
6075 .handler = handle_target_wait_state,
6076 .help = "used internally for reset processing",
6077 .usage = "statename timeoutmsecs",
6078 },
6079 {
6080 .name = "invoke-event",
6081 .mode = COMMAND_EXEC,
6082 .jim_handler = jim_target_invoke_event,
6083 .help = "invoke handler for specified event",
6084 .usage = "event_name",
6085 },
6086 COMMAND_REGISTRATION_DONE
6087 };
6088
6089 static int target_create(struct jim_getopt_info *goi)
6090 {
6091 Jim_Obj *new_cmd;
6092 Jim_Cmd *cmd;
6093 const char *cp;
6094 int e;
6095 int x;
6096 struct target *target;
6097 struct command_context *cmd_ctx;
6098
6099 cmd_ctx = current_command_context(goi->interp);
6100 assert(cmd_ctx);
6101
6102 if (goi->argc < 3) {
6103 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
6104 return JIM_ERR;
6105 }
6106
6107 /* COMMAND */
6108 jim_getopt_obj(goi, &new_cmd);
6109 /* does this command exist? */
6110 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_NONE);
6111 if (cmd) {
6112 cp = Jim_GetString(new_cmd, NULL);
6113 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
6114 return JIM_ERR;
6115 }
6116
6117 /* TYPE */
6118 e = jim_getopt_string(goi, &cp, NULL);
6119 if (e != JIM_OK)
6120 return e;
6121 struct transport *tr = get_current_transport();
6122 if (tr->override_target) {
6123 e = tr->override_target(&cp);
6124 if (e != ERROR_OK) {
6125 LOG_ERROR("The selected transport doesn't support this target");
6126 return JIM_ERR;
6127 }
6128 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
6129 }
6130 /* now does target type exist */
6131 for (x = 0 ; target_types[x] ; x++) {
6132 if (strcmp(cp, target_types[x]->name) == 0) {
6133 /* found */
6134 break;
6135 }
6136 }
6137 if (!target_types[x]) {
6138 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
6139 for (x = 0 ; target_types[x] ; x++) {
6140 if (target_types[x + 1]) {
6141 Jim_AppendStrings(goi->interp,
6142 Jim_GetResult(goi->interp),
6143 target_types[x]->name,
6144 ", ", NULL);
6145 } else {
6146 Jim_AppendStrings(goi->interp,
6147 Jim_GetResult(goi->interp),
6148 " or ",
6149 target_types[x]->name, NULL);
6150 }
6151 }
6152 return JIM_ERR;
6153 }
6154
6155 /* Create it */
6156 target = calloc(1, sizeof(struct target));
6157 if (!target) {
6158 LOG_ERROR("Out of memory");
6159 return JIM_ERR;
6160 }
6161
6162 /* set empty smp cluster */
6163 target->smp_targets = &empty_smp_targets;
6164
6165 /* set target number */
6166 target->target_number = new_target_number();
6167
6168 /* allocate memory for each unique target type */
6169 target->type = malloc(sizeof(struct target_type));
6170 if (!target->type) {
6171 LOG_ERROR("Out of memory");
6172 free(target);
6173 return JIM_ERR;
6174 }
6175
6176 memcpy(target->type, target_types[x], sizeof(struct target_type));
6177
6178 /* default to first core, override with -coreid */
6179 target->coreid = 0;
6180
6181 target->working_area = 0x0;
6182 target->working_area_size = 0x0;
6183 target->working_areas = NULL;
6184 target->backup_working_area = 0;
6185
6186 target->state = TARGET_UNKNOWN;
6187 target->debug_reason = DBG_REASON_UNDEFINED;
6188 target->reg_cache = NULL;
6189 target->breakpoints = NULL;
6190 target->watchpoints = NULL;
6191 target->next = NULL;
6192 target->arch_info = NULL;
6193
6194 target->verbose_halt_msg = true;
6195
6196 target->halt_issued = false;
6197
6198 /* initialize trace information */
6199 target->trace_info = calloc(1, sizeof(struct trace));
6200 if (!target->trace_info) {
6201 LOG_ERROR("Out of memory");
6202 free(target->type);
6203 free(target);
6204 return JIM_ERR;
6205 }
6206
6207 target->dbgmsg = NULL;
6208 target->dbg_msg_enabled = 0;
6209
6210 target->endianness = TARGET_ENDIAN_UNKNOWN;
6211
6212 target->rtos = NULL;
6213 target->rtos_auto_detect = false;
6214
6215 target->gdb_port_override = NULL;
6216 target->gdb_max_connections = 1;
6217
6218 /* Do the rest as "configure" options */
6219 goi->isconfigure = 1;
6220 e = target_configure(goi, target);
6221
6222 if (e == JIM_OK) {
6223 if (target->has_dap) {
6224 if (!target->dap_configured) {
6225 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
6226 e = JIM_ERR;
6227 }
6228 } else {
6229 if (!target->tap_configured) {
6230 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
6231 e = JIM_ERR;
6232 }
6233 }
6234 /* tap must be set after target was configured */
6235 if (!target->tap)
6236 e = JIM_ERR;
6237 }
6238
6239 if (e != JIM_OK) {
6240 rtos_destroy(target);
6241 free(target->gdb_port_override);
6242 free(target->trace_info);
6243 free(target->type);
6244 free(target);
6245 return e;
6246 }
6247
6248 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
6249 /* default endian to little if not specified */
6250 target->endianness = TARGET_LITTLE_ENDIAN;
6251 }
6252
6253 cp = Jim_GetString(new_cmd, NULL);
6254 target->cmd_name = strdup(cp);
6255 if (!target->cmd_name) {
6256 LOG_ERROR("Out of memory");
6257 rtos_destroy(target);
6258 free(target->gdb_port_override);
6259 free(target->trace_info);
6260 free(target->type);
6261 free(target);
6262 return JIM_ERR;
6263 }
6264
6265 if (target->type->target_create) {
6266 e = (*(target->type->target_create))(target, goi->interp);
6267 if (e != ERROR_OK) {
6268 LOG_DEBUG("target_create failed");
6269 free(target->cmd_name);
6270 rtos_destroy(target);
6271 free(target->gdb_port_override);
6272 free(target->trace_info);
6273 free(target->type);
6274 free(target);
6275 return JIM_ERR;
6276 }
6277 }
6278
6279 /* create the target specific commands */
6280 if (target->type->commands) {
6281 e = register_commands(cmd_ctx, NULL, target->type->commands);
6282 if (e != ERROR_OK)
6283 LOG_ERROR("unable to register '%s' commands", cp);
6284 }
6285
6286 /* now - create the new target name command */
6287 const struct command_registration target_subcommands[] = {
6288 {
6289 .chain = target_instance_command_handlers,
6290 },
6291 {
6292 .chain = target->type->commands,
6293 },
6294 COMMAND_REGISTRATION_DONE
6295 };
6296 const struct command_registration target_commands[] = {
6297 {
6298 .name = cp,
6299 .mode = COMMAND_ANY,
6300 .help = "target command group",
6301 .usage = "",
6302 .chain = target_subcommands,
6303 },
6304 COMMAND_REGISTRATION_DONE
6305 };
6306 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
6307 if (e != ERROR_OK) {
6308 if (target->type->deinit_target)
6309 target->type->deinit_target(target);
6310 free(target->cmd_name);
6311 rtos_destroy(target);
6312 free(target->gdb_port_override);
6313 free(target->trace_info);
6314 free(target->type);
6315 free(target);
6316 return JIM_ERR;
6317 }
6318
6319 /* append to end of list */
6320 append_to_list_all_targets(target);
6321
6322 cmd_ctx->current_target = target;
6323 return JIM_OK;
6324 }
6325
6326 COMMAND_HANDLER(handle_target_current)
6327 {
6328 if (CMD_ARGC != 0)
6329 return ERROR_COMMAND_SYNTAX_ERROR;
6330
6331 struct target *target = get_current_target_or_null(CMD_CTX);
6332 if (target)
6333 command_print(CMD, "%s", target_name(target));
6334
6335 return ERROR_OK;
6336 }
6337
6338 COMMAND_HANDLER(handle_target_types)
6339 {
6340 if (CMD_ARGC != 0)
6341 return ERROR_COMMAND_SYNTAX_ERROR;
6342
6343 for (unsigned int x = 0; target_types[x]; x++)
6344 command_print(CMD, "%s", target_types[x]->name);
6345
6346 return ERROR_OK;
6347 }
6348
6349 COMMAND_HANDLER(handle_target_names)
6350 {
6351 if (CMD_ARGC != 0)
6352 return ERROR_COMMAND_SYNTAX_ERROR;
6353
6354 struct target *target = all_targets;
6355 while (target) {
6356 command_print(CMD, "%s", target_name(target));
6357 target = target->next;
6358 }
6359
6360 return ERROR_OK;
6361 }
6362
6363 static struct target_list *
6364 __attribute__((warn_unused_result))
6365 create_target_list_node(const char *targetname)
6366 {
6367 struct target *target = get_target(targetname);
6368 LOG_DEBUG("%s ", targetname);
6369 if (!target)
6370 return NULL;
6371
6372 struct target_list *new = malloc(sizeof(struct target_list));
6373 if (!new) {
6374 LOG_ERROR("Out of memory");
6375 return new;
6376 }
6377
6378 new->target = target;
6379 return new;
6380 }
6381
6382 static int get_target_with_common_rtos_type(struct command_invocation *cmd,
6383 struct list_head *lh, struct target **result)
6384 {
6385 struct target *target = NULL;
6386 struct target_list *curr;
6387 foreach_smp_target(curr, lh) {
6388 struct rtos *curr_rtos = curr->target->rtos;
6389 if (curr_rtos) {
6390 if (target && target->rtos && target->rtos->type != curr_rtos->type) {
6391 command_print(cmd, "Different rtos types in members of one smp target!");
6392 return ERROR_FAIL;
6393 }
6394 target = curr->target;
6395 }
6396 }
6397 *result = target;
6398 return ERROR_OK;
6399 }
6400
6401 COMMAND_HANDLER(handle_target_smp)
6402 {
6403 static int smp_group = 1;
6404
6405 if (CMD_ARGC == 0) {
6406 LOG_DEBUG("Empty SMP target");
6407 return ERROR_OK;
6408 }
6409 LOG_DEBUG("%d", CMD_ARGC);
6410 /* CMD_ARGC[0] = target to associate in smp
6411 * CMD_ARGC[1] = target to associate in smp
6412 * CMD_ARGC[2] ...
6413 */
6414
6415 struct list_head *lh = malloc(sizeof(*lh));
6416 if (!lh) {
6417 LOG_ERROR("Out of memory");
6418 return ERROR_FAIL;
6419 }
6420 INIT_LIST_HEAD(lh);
6421
6422 for (unsigned int i = 0; i < CMD_ARGC; i++) {
6423 struct target_list *new = create_target_list_node(CMD_ARGV[i]);
6424 if (new)
6425 list_add_tail(&new->lh, lh);
6426 }
6427 /* now parse the list of cpu and put the target in smp mode*/
6428 struct target_list *curr;
6429 foreach_smp_target(curr, lh) {
6430 struct target *target = curr->target;
6431 target->smp = smp_group;
6432 target->smp_targets = lh;
6433 }
6434 smp_group++;
6435
6436 struct target *rtos_target;
6437 int retval = get_target_with_common_rtos_type(CMD, lh, &rtos_target);
6438 if (retval == ERROR_OK && rtos_target)
6439 retval = rtos_smp_init(rtos_target);
6440
6441 return retval;
6442 }
6443
6444 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6445 {
6446 struct jim_getopt_info goi;
6447 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6448 if (goi.argc < 3) {
6449 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6450 "<name> <target_type> [<target_options> ...]");
6451 return JIM_ERR;
6452 }
6453 return target_create(&goi);
6454 }
6455
6456 static const struct command_registration target_subcommand_handlers[] = {
6457 {
6458 .name = "init",
6459 .mode = COMMAND_CONFIG,
6460 .handler = handle_target_init_command,
6461 .help = "initialize targets",
6462 .usage = "",
6463 },
6464 {
6465 .name = "create",
6466 .mode = COMMAND_CONFIG,
6467 .jim_handler = jim_target_create,
6468 .usage = "name type '-chain-position' name [options ...]",
6469 .help = "Creates and selects a new target",
6470 },
6471 {
6472 .name = "current",
6473 .mode = COMMAND_ANY,
6474 .handler = handle_target_current,
6475 .help = "Returns the currently selected target",
6476 .usage = "",
6477 },
6478 {
6479 .name = "types",
6480 .mode = COMMAND_ANY,
6481 .handler = handle_target_types,
6482 .help = "Returns the available target types as "
6483 "a list of strings",
6484 .usage = "",
6485 },
6486 {
6487 .name = "names",
6488 .mode = COMMAND_ANY,
6489 .handler = handle_target_names,
6490 .help = "Returns the names of all targets as a list of strings",
6491 .usage = "",
6492 },
6493 {
6494 .name = "smp",
6495 .mode = COMMAND_ANY,
6496 .handler = handle_target_smp,
6497 .usage = "targetname1 targetname2 ...",
6498 .help = "gather several target in a smp list"
6499 },
6500
6501 COMMAND_REGISTRATION_DONE
6502 };
6503
6504 struct fast_load {
6505 target_addr_t address;
6506 uint8_t *data;
6507 int length;
6508
6509 };
6510
6511 static int fastload_num;
6512 static struct fast_load *fastload;
6513
6514 static void free_fastload(void)
6515 {
6516 if (fastload) {
6517 for (int i = 0; i < fastload_num; i++)
6518 free(fastload[i].data);
6519 free(fastload);
6520 fastload = NULL;
6521 }
6522 }
6523
6524 COMMAND_HANDLER(handle_fast_load_image_command)
6525 {
6526 uint8_t *buffer;
6527 size_t buf_cnt;
6528 uint32_t image_size;
6529 target_addr_t min_address = 0;
6530 target_addr_t max_address = -1;
6531
6532 struct image image;
6533
6534 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6535 &image, &min_address, &max_address);
6536 if (retval != ERROR_OK)
6537 return retval;
6538
6539 struct duration bench;
6540 duration_start(&bench);
6541
6542 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6543 if (retval != ERROR_OK)
6544 return retval;
6545
6546 image_size = 0x0;
6547 retval = ERROR_OK;
6548 fastload_num = image.num_sections;
6549 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6550 if (!fastload) {
6551 command_print(CMD, "out of memory");
6552 image_close(&image);
6553 return ERROR_FAIL;
6554 }
6555 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6556 for (unsigned int i = 0; i < image.num_sections; i++) {
6557 buffer = malloc(image.sections[i].size);
6558 if (!buffer) {
6559 command_print(CMD, "error allocating buffer for section (%d bytes)",
6560 (int)(image.sections[i].size));
6561 retval = ERROR_FAIL;
6562 break;
6563 }
6564
6565 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6566 if (retval != ERROR_OK) {
6567 free(buffer);
6568 break;
6569 }
6570
6571 uint32_t offset = 0;
6572 uint32_t length = buf_cnt;
6573
6574 /* DANGER!!! beware of unsigned comparison here!!! */
6575
6576 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6577 (image.sections[i].base_address < max_address)) {
6578 if (image.sections[i].base_address < min_address) {
6579 /* clip addresses below */
6580 offset += min_address-image.sections[i].base_address;
6581 length -= offset;
6582 }
6583
6584 if (image.sections[i].base_address + buf_cnt > max_address)
6585 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6586
6587 fastload[i].address = image.sections[i].base_address + offset;
6588 fastload[i].data = malloc(length);
6589 if (!fastload[i].data) {
6590 free(buffer);
6591 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6592 length);
6593 retval = ERROR_FAIL;
6594 break;
6595 }
6596 memcpy(fastload[i].data, buffer + offset, length);
6597 fastload[i].length = length;
6598
6599 image_size += length;
6600 command_print(CMD, "%u bytes written at address 0x%8.8x",
6601 (unsigned int)length,
6602 ((unsigned int)(image.sections[i].base_address + offset)));
6603 }
6604
6605 free(buffer);
6606 }
6607
6608 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6609 command_print(CMD, "Loaded %" PRIu32 " bytes "
6610 "in %fs (%0.3f KiB/s)", image_size,
6611 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6612
6613 command_print(CMD,
6614 "WARNING: image has not been loaded to target!"
6615 "You can issue a 'fast_load' to finish loading.");
6616 }
6617
6618 image_close(&image);
6619
6620 if (retval != ERROR_OK)
6621 free_fastload();
6622
6623 return retval;
6624 }
6625
6626 COMMAND_HANDLER(handle_fast_load_command)
6627 {
6628 if (CMD_ARGC > 0)
6629 return ERROR_COMMAND_SYNTAX_ERROR;
6630 if (!fastload) {
6631 LOG_ERROR("No image in memory");
6632 return ERROR_FAIL;
6633 }
6634 int i;
6635 int64_t ms = timeval_ms();
6636 int size = 0;
6637 int retval = ERROR_OK;
6638 for (i = 0; i < fastload_num; i++) {
6639 struct target *target = get_current_target(CMD_CTX);
6640 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6641 (unsigned int)(fastload[i].address),
6642 (unsigned int)(fastload[i].length));
6643 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6644 if (retval != ERROR_OK)
6645 break;
6646 size += fastload[i].length;
6647 }
6648 if (retval == ERROR_OK) {
6649 int64_t after = timeval_ms();
6650 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6651 }
6652 return retval;
6653 }
6654
6655 static const struct command_registration target_command_handlers[] = {
6656 {
6657 .name = "targets",
6658 .handler = handle_targets_command,
6659 .mode = COMMAND_ANY,
6660 .help = "change current default target (one parameter) "
6661 "or prints table of all targets (no parameters)",
6662 .usage = "[target]",
6663 },
6664 {
6665 .name = "target",
6666 .mode = COMMAND_CONFIG,
6667 .help = "configure target",
6668 .chain = target_subcommand_handlers,
6669 .usage = "",
6670 },
6671 COMMAND_REGISTRATION_DONE
6672 };
6673
6674 int target_register_commands(struct command_context *cmd_ctx)
6675 {
6676 return register_commands(cmd_ctx, NULL, target_command_handlers);
6677 }
6678
6679 static bool target_reset_nag = true;
6680
6681 bool get_target_reset_nag(void)
6682 {
6683 return target_reset_nag;
6684 }
6685
6686 COMMAND_HANDLER(handle_target_reset_nag)
6687 {
6688 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6689 &target_reset_nag, "Nag after each reset about options to improve "
6690 "performance");
6691 }
6692
6693 COMMAND_HANDLER(handle_ps_command)
6694 {
6695 struct target *target = get_current_target(CMD_CTX);
6696 char *display;
6697 if (target->state != TARGET_HALTED) {
6698 command_print(CMD, "Error: [%s] not halted", target_name(target));
6699 return ERROR_TARGET_NOT_HALTED;
6700 }
6701
6702 if ((target->rtos) && (target->rtos->type)
6703 && (target->rtos->type->ps_command)) {
6704 display = target->rtos->type->ps_command(target);
6705 command_print(CMD, "%s", display);
6706 free(display);
6707 return ERROR_OK;
6708 } else {
6709 LOG_INFO("failed");
6710 return ERROR_TARGET_FAILURE;
6711 }
6712 }
6713
6714 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6715 {
6716 if (text)
6717 command_print_sameline(cmd, "%s", text);
6718 for (int i = 0; i < size; i++)
6719 command_print_sameline(cmd, " %02x", buf[i]);
6720 command_print(cmd, " ");
6721 }
6722
6723 COMMAND_HANDLER(handle_test_mem_access_command)
6724 {
6725 struct target *target = get_current_target(CMD_CTX);
6726 uint32_t test_size;
6727 int retval = ERROR_OK;
6728
6729 if (target->state != TARGET_HALTED) {
6730 command_print(CMD, "Error: [%s] not halted", target_name(target));
6731 return ERROR_TARGET_NOT_HALTED;
6732 }
6733
6734 if (CMD_ARGC != 1)
6735 return ERROR_COMMAND_SYNTAX_ERROR;
6736
6737 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6738
6739 /* Test reads */
6740 size_t num_bytes = test_size + 4;
6741
6742 struct working_area *wa = NULL;
6743 retval = target_alloc_working_area(target, num_bytes, &wa);
6744 if (retval != ERROR_OK) {
6745 LOG_ERROR("Not enough working area");
6746 return ERROR_FAIL;
6747 }
6748
6749 uint8_t *test_pattern = malloc(num_bytes);
6750
6751 for (size_t i = 0; i < num_bytes; i++)
6752 test_pattern[i] = rand();
6753
6754 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6755 if (retval != ERROR_OK) {
6756 LOG_ERROR("Test pattern write failed");
6757 goto out;
6758 }
6759
6760 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6761 for (int size = 1; size <= 4; size *= 2) {
6762 for (int offset = 0; offset < 4; offset++) {
6763 uint32_t count = test_size / size;
6764 size_t host_bufsiz = (count + 2) * size + host_offset;
6765 uint8_t *read_ref = malloc(host_bufsiz);
6766 uint8_t *read_buf = malloc(host_bufsiz);
6767
6768 for (size_t i = 0; i < host_bufsiz; i++) {
6769 read_ref[i] = rand();
6770 read_buf[i] = read_ref[i];
6771 }
6772 command_print_sameline(CMD,
6773 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6774 size, offset, host_offset ? "un" : "");
6775
6776 struct duration bench;
6777 duration_start(&bench);
6778
6779 retval = target_read_memory(target, wa->address + offset, size, count,
6780 read_buf + size + host_offset);
6781
6782 duration_measure(&bench);
6783
6784 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6785 command_print(CMD, "Unsupported alignment");
6786 goto next;
6787 } else if (retval != ERROR_OK) {
6788 command_print(CMD, "Memory read failed");
6789 goto next;
6790 }
6791
6792 /* replay on host */
6793 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6794
6795 /* check result */
6796 int result = memcmp(read_ref, read_buf, host_bufsiz);
6797 if (result == 0) {
6798 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6799 duration_elapsed(&bench),
6800 duration_kbps(&bench, count * size));
6801 } else {
6802 command_print(CMD, "Compare failed");
6803 binprint(CMD, "ref:", read_ref, host_bufsiz);
6804 binprint(CMD, "buf:", read_buf, host_bufsiz);
6805 }
6806 next:
6807 free(read_ref);
6808 free(read_buf);
6809 }
6810 }
6811 }
6812
6813 out:
6814 free(test_pattern);
6815
6816 target_free_working_area(target, wa);
6817
6818 /* Test writes */
6819 num_bytes = test_size + 4 + 4 + 4;
6820
6821 retval = target_alloc_working_area(target, num_bytes, &wa);
6822 if (retval != ERROR_OK) {
6823 LOG_ERROR("Not enough working area");
6824 return ERROR_FAIL;
6825 }
6826
6827 test_pattern = malloc(num_bytes);
6828
6829 for (size_t i = 0; i < num_bytes; i++)
6830 test_pattern[i] = rand();
6831
6832 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6833 for (int size = 1; size <= 4; size *= 2) {
6834 for (int offset = 0; offset < 4; offset++) {
6835 uint32_t count = test_size / size;
6836 size_t host_bufsiz = count * size + host_offset;
6837 uint8_t *read_ref = malloc(num_bytes);
6838 uint8_t *read_buf = malloc(num_bytes);
6839 uint8_t *write_buf = malloc(host_bufsiz);
6840
6841 for (size_t i = 0; i < host_bufsiz; i++)
6842 write_buf[i] = rand();
6843 command_print_sameline(CMD,
6844 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6845 size, offset, host_offset ? "un" : "");
6846
6847 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6848 if (retval != ERROR_OK) {
6849 command_print(CMD, "Test pattern write failed");
6850 goto nextw;
6851 }
6852
6853 /* replay on host */
6854 memcpy(read_ref, test_pattern, num_bytes);
6855 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6856
6857 struct duration bench;
6858 duration_start(&bench);
6859
6860 retval = target_write_memory(target, wa->address + size + offset, size, count,
6861 write_buf + host_offset);
6862
6863 duration_measure(&bench);
6864
6865 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6866 command_print(CMD, "Unsupported alignment");
6867 goto nextw;
6868 } else if (retval != ERROR_OK) {
6869 command_print(CMD, "Memory write failed");
6870 goto nextw;
6871 }
6872
6873 /* read back */
6874 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6875 if (retval != ERROR_OK) {
6876 command_print(CMD, "Test pattern write failed");
6877 goto nextw;
6878 }
6879
6880 /* check result */
6881 int result = memcmp(read_ref, read_buf, num_bytes);
6882 if (result == 0) {
6883 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6884 duration_elapsed(&bench),
6885 duration_kbps(&bench, count * size));
6886 } else {
6887 command_print(CMD, "Compare failed");
6888 binprint(CMD, "ref:", read_ref, num_bytes);
6889 binprint(CMD, "buf:", read_buf, num_bytes);
6890 }
6891 nextw:
6892 free(read_ref);
6893 free(read_buf);
6894 }
6895 }
6896 }
6897
6898 free(test_pattern);
6899
6900 target_free_working_area(target, wa);
6901 return retval;
6902 }
6903
6904 static const struct command_registration target_exec_command_handlers[] = {
6905 {
6906 .name = "fast_load_image",
6907 .handler = handle_fast_load_image_command,
6908 .mode = COMMAND_ANY,
6909 .help = "Load image into server memory for later use by "
6910 "fast_load; primarily for profiling",
6911 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6912 "[min_address [max_length]]",
6913 },
6914 {
6915 .name = "fast_load",
6916 .handler = handle_fast_load_command,
6917 .mode = COMMAND_EXEC,
6918 .help = "loads active fast load image to current target "
6919 "- mainly for profiling purposes",
6920 .usage = "",
6921 },
6922 {
6923 .name = "profile",
6924 .handler = handle_profile_command,
6925 .mode = COMMAND_EXEC,
6926 .usage = "seconds filename [start end]",
6927 .help = "profiling samples the CPU PC",
6928 },
6929 /** @todo don't register virt2phys() unless target supports it */
6930 {
6931 .name = "virt2phys",
6932 .handler = handle_virt2phys_command,
6933 .mode = COMMAND_ANY,
6934 .help = "translate a virtual address into a physical address",
6935 .usage = "virtual_address",
6936 },
6937 {
6938 .name = "reg",
6939 .handler = handle_reg_command,
6940 .mode = COMMAND_EXEC,
6941 .help = "display (reread from target with \"force\") or set a register; "
6942 "with no arguments, displays all registers and their values",
6943 .usage = "[(register_number|register_name) [(value|'force')]]",
6944 },
6945 {
6946 .name = "poll",
6947 .handler = handle_poll_command,
6948 .mode = COMMAND_EXEC,
6949 .help = "poll target state; or reconfigure background polling",
6950 .usage = "['on'|'off']",
6951 },
6952 {
6953 .name = "wait_halt",
6954 .handler = handle_wait_halt_command,
6955 .mode = COMMAND_EXEC,
6956 .help = "wait up to the specified number of milliseconds "
6957 "(default 5000) for a previously requested halt",
6958 .usage = "[milliseconds]",
6959 },
6960 {
6961 .name = "halt",
6962 .handler = handle_halt_command,
6963 .mode = COMMAND_EXEC,
6964 .help = "request target to halt, then wait up to the specified "
6965 "number of milliseconds (default 5000) for it to complete",
6966 .usage = "[milliseconds]",
6967 },
6968 {
6969 .name = "resume",
6970 .handler = handle_resume_command,
6971 .mode = COMMAND_EXEC,
6972 .help = "resume target execution from current PC or address",
6973 .usage = "[address]",
6974 },
6975 {
6976 .name = "reset",
6977 .handler = handle_reset_command,
6978 .mode = COMMAND_EXEC,
6979 .usage = "[run|halt|init]",
6980 .help = "Reset all targets into the specified mode. "
6981 "Default reset mode is run, if not given.",
6982 },
6983 {
6984 .name = "soft_reset_halt",
6985 .handler = handle_soft_reset_halt_command,
6986 .mode = COMMAND_EXEC,
6987 .usage = "",
6988 .help = "halt the target and do a soft reset",
6989 },
6990 {
6991 .name = "step",
6992 .handler = handle_step_command,
6993 .mode = COMMAND_EXEC,
6994 .help = "step one instruction from current PC or address",
6995 .usage = "[address]",
6996 },
6997 {
6998 .name = "mdd",
6999 .handler = handle_md_command,
7000 .mode = COMMAND_EXEC,
7001 .help = "display memory double-words",
7002 .usage = "['phys'] address [count]",
7003 },
7004 {
7005 .name = "mdw",
7006 .handler = handle_md_command,
7007 .mode = COMMAND_EXEC,
7008 .help = "display memory words",
7009 .usage = "['phys'] address [count]",
7010 },
7011 {
7012 .name = "mdh",
7013 .handler = handle_md_command,
7014 .mode = COMMAND_EXEC,
7015 .help = "display memory half-words",
7016 .usage = "['phys'] address [count]",
7017 },
7018 {
7019 .name = "mdb",
7020 .handler = handle_md_command,
7021 .mode = COMMAND_EXEC,
7022 .help = "display memory bytes",
7023 .usage = "['phys'] address [count]",
7024 },
7025 {
7026 .name = "mwd",
7027 .handler = handle_mw_command,
7028 .mode = COMMAND_EXEC,
7029 .help = "write memory double-word",
7030 .usage = "['phys'] address value [count]",
7031 },
7032 {
7033 .name = "mww",
7034 .handler = handle_mw_command,
7035 .mode = COMMAND_EXEC,
7036 .help = "write memory word",
7037 .usage = "['phys'] address value [count]",
7038 },
7039 {
7040 .name = "mwh",
7041 .handler = handle_mw_command,
7042 .mode = COMMAND_EXEC,
7043 .help = "write memory half-word",
7044 .usage = "['phys'] address value [count]",
7045 },
7046 {
7047 .name = "mwb",
7048 .handler = handle_mw_command,
7049 .mode = COMMAND_EXEC,
7050 .help = "write memory byte",
7051 .usage = "['phys'] address value [count]",
7052 },
7053 {
7054 .name = "bp",
7055 .handler = handle_bp_command,
7056 .mode = COMMAND_EXEC,
7057 .help = "list or set hardware or software breakpoint",
7058 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
7059 },
7060 {
7061 .name = "rbp",
7062 .handler = handle_rbp_command,
7063 .mode = COMMAND_EXEC,
7064 .help = "remove breakpoint",
7065 .usage = "'all' | address",
7066 },
7067 {
7068 .name = "wp",
7069 .handler = handle_wp_command,
7070 .mode = COMMAND_EXEC,
7071 .help = "list (no params) or create watchpoints",
7072 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
7073 },
7074 {
7075 .name = "rwp",
7076 .handler = handle_rwp_command,
7077 .mode = COMMAND_EXEC,
7078 .help = "remove watchpoint",
7079 .usage = "'all' | address",
7080 },
7081 {
7082 .name = "load_image",
7083 .handler = handle_load_image_command,
7084 .mode = COMMAND_EXEC,
7085 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
7086 "[min_address] [max_length]",
7087 },
7088 {
7089 .name = "dump_image",
7090 .handler = handle_dump_image_command,
7091 .mode = COMMAND_EXEC,
7092 .usage = "filename address size",
7093 },
7094 {
7095 .name = "verify_image_checksum",
7096 .handler = handle_verify_image_checksum_command,
7097 .mode = COMMAND_EXEC,
7098 .usage = "filename [offset [type]]",
7099 },
7100 {
7101 .name = "verify_image",
7102 .handler = handle_verify_image_command,
7103 .mode = COMMAND_EXEC,
7104 .usage = "filename [offset [type]]",
7105 },
7106 {
7107 .name = "test_image",
7108 .handler = handle_test_image_command,
7109 .mode = COMMAND_EXEC,
7110 .usage = "filename [offset [type]]",
7111 },
7112 {
7113 .name = "get_reg",
7114 .mode = COMMAND_EXEC,
7115 .jim_handler = target_jim_get_reg,
7116 .help = "Get register values from the target",
7117 .usage = "list",
7118 },
7119 {
7120 .name = "set_reg",
7121 .mode = COMMAND_EXEC,
7122 .jim_handler = target_jim_set_reg,
7123 .help = "Set target register values",
7124 .usage = "dict",
7125 },
7126 {
7127 .name = "read_memory",
7128 .mode = COMMAND_EXEC,
7129 .handler = handle_target_read_memory,
7130 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
7131 .usage = "address width count ['phys']",
7132 },
7133 {
7134 .name = "write_memory",
7135 .mode = COMMAND_EXEC,
7136 .jim_handler = target_jim_write_memory,
7137 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
7138 .usage = "address width data ['phys']",
7139 },
7140 {
7141 .name = "reset_nag",
7142 .handler = handle_target_reset_nag,
7143 .mode = COMMAND_ANY,
7144 .help = "Nag after each reset about options that could have been "
7145 "enabled to improve performance.",
7146 .usage = "['enable'|'disable']",
7147 },
7148 {
7149 .name = "ps",
7150 .handler = handle_ps_command,
7151 .mode = COMMAND_EXEC,
7152 .help = "list all tasks",
7153 .usage = "",
7154 },
7155 {
7156 .name = "test_mem_access",
7157 .handler = handle_test_mem_access_command,
7158 .mode = COMMAND_EXEC,
7159 .help = "Test the target's memory access functions",
7160 .usage = "size",
7161 },
7162
7163 COMMAND_REGISTRATION_DONE
7164 };
7165 static int target_register_user_commands(struct command_context *cmd_ctx)
7166 {
7167 int retval = ERROR_OK;
7168 retval = target_request_register_commands(cmd_ctx);
7169 if (retval != ERROR_OK)
7170 return retval;
7171
7172 retval = trace_register_commands(cmd_ctx);
7173 if (retval != ERROR_OK)
7174 return retval;
7175
7176
7177 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
7178 }
7179
7180 const char *target_debug_reason_str(enum target_debug_reason reason)
7181 {
7182 switch (reason) {
7183 case DBG_REASON_DBGRQ:
7184 return "DBGRQ";
7185 case DBG_REASON_BREAKPOINT:
7186 return "BREAKPOINT";
7187 case DBG_REASON_WATCHPOINT:
7188 return "WATCHPOINT";
7189 case DBG_REASON_WPTANDBKPT:
7190 return "WPTANDBKPT";
7191 case DBG_REASON_SINGLESTEP:
7192 return "SINGLESTEP";
7193 case DBG_REASON_NOTHALTED:
7194 return "NOTHALTED";
7195 case DBG_REASON_EXIT:
7196 return "EXIT";
7197 case DBG_REASON_EXC_CATCH:
7198 return "EXC_CATCH";
7199 case DBG_REASON_UNDEFINED:
7200 return "UNDEFINED";
7201 default:
7202 return "UNKNOWN!";
7203 }
7204 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)