target: Remove break/watchpoints in target_destroy()
[openocd.git] / src / target / target.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2007-2010 Øyvind Harboe *
8 * oyvind.harboe@zylin.com *
9 * *
10 * Copyright (C) 2008, Duane Ellis *
11 * openocd@duaneeellis.com *
12 * *
13 * Copyright (C) 2008 by Spencer Oliver *
14 * spen@spen-soft.co.uk *
15 * *
16 * Copyright (C) 2008 by Rick Altherr *
17 * kc8apf@kc8apf.net> *
18 * *
19 * Copyright (C) 2011 by Broadcom Corporation *
20 * Evan Hunter - ehunter@broadcom.com *
21 * *
22 * Copyright (C) ST-Ericsson SA 2011 *
23 * michel.jaouen@stericsson.com : smp minimum support *
24 * *
25 * Copyright (C) 2011 Andreas Fritiofson *
26 * andreas.fritiofson@gmail.com *
27 ***************************************************************************/
28
29 #ifdef HAVE_CONFIG_H
30 #include "config.h"
31 #endif
32
33 #include <helper/align.h>
34 #include <helper/nvp.h>
35 #include <helper/time_support.h>
36 #include <jtag/jtag.h>
37 #include <flash/nor/core.h>
38
39 #include "target.h"
40 #include "target_type.h"
41 #include "target_request.h"
42 #include "breakpoints.h"
43 #include "register.h"
44 #include "trace.h"
45 #include "image.h"
46 #include "rtos/rtos.h"
47 #include "transport/transport.h"
48 #include "arm_cti.h"
49 #include "smp.h"
50 #include "semihosting_common.h"
51
52 /* default halt wait timeout (ms) */
53 #define DEFAULT_HALT_TIMEOUT 5000
54
55 static int target_read_buffer_default(struct target *target, target_addr_t address,
56 uint32_t count, uint8_t *buffer);
57 static int target_write_buffer_default(struct target *target, target_addr_t address,
58 uint32_t count, const uint8_t *buffer);
59 static int target_array2mem(Jim_Interp *interp, struct target *target,
60 int argc, Jim_Obj * const *argv);
61 static int target_mem2array(Jim_Interp *interp, struct target *target,
62 int argc, Jim_Obj * const *argv);
63 static int target_register_user_commands(struct command_context *cmd_ctx);
64 static int target_get_gdb_fileio_info_default(struct target *target,
65 struct gdb_fileio_info *fileio_info);
66 static int target_gdb_fileio_end_default(struct target *target, int retcode,
67 int fileio_errno, bool ctrl_c);
68
69 static struct target_type *target_types[] = {
70 &arm7tdmi_target,
71 &arm9tdmi_target,
72 &arm920t_target,
73 &arm720t_target,
74 &arm966e_target,
75 &arm946e_target,
76 &arm926ejs_target,
77 &fa526_target,
78 &feroceon_target,
79 &dragonite_target,
80 &xscale_target,
81 &xtensa_chip_target,
82 &cortexm_target,
83 &cortexa_target,
84 &cortexr4_target,
85 &arm11_target,
86 &ls1_sap_target,
87 &mips_m4k_target,
88 &avr_target,
89 &dsp563xx_target,
90 &dsp5680xx_target,
91 &testee_target,
92 &avr32_ap7k_target,
93 &hla_target,
94 &esp32_target,
95 &esp32s2_target,
96 &esp32s3_target,
97 &or1k_target,
98 &quark_x10xx_target,
99 &quark_d20xx_target,
100 &stm8_target,
101 &riscv_target,
102 &mem_ap_target,
103 &esirisc_target,
104 &arcv2_target,
105 &aarch64_target,
106 &armv8r_target,
107 &mips_mips64_target,
108 NULL,
109 };
110
111 struct target *all_targets;
112 static struct target_event_callback *target_event_callbacks;
113 static struct target_timer_callback *target_timer_callbacks;
114 static int64_t target_timer_next_event_value;
115 static LIST_HEAD(target_reset_callback_list);
116 static LIST_HEAD(target_trace_callback_list);
117 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
118 static LIST_HEAD(empty_smp_targets);
119
120 enum nvp_assert {
121 NVP_DEASSERT,
122 NVP_ASSERT,
123 };
124
125 static const struct nvp nvp_assert[] = {
126 { .name = "assert", NVP_ASSERT },
127 { .name = "deassert", NVP_DEASSERT },
128 { .name = "T", NVP_ASSERT },
129 { .name = "F", NVP_DEASSERT },
130 { .name = "t", NVP_ASSERT },
131 { .name = "f", NVP_DEASSERT },
132 { .name = NULL, .value = -1 }
133 };
134
135 static const struct nvp nvp_error_target[] = {
136 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
137 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
138 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
139 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
140 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
141 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
142 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
143 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
144 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
145 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
146 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
147 { .value = -1, .name = NULL }
148 };
149
150 static const char *target_strerror_safe(int err)
151 {
152 const struct nvp *n;
153
154 n = nvp_value2name(nvp_error_target, err);
155 if (!n->name)
156 return "unknown";
157 else
158 return n->name;
159 }
160
161 static const struct jim_nvp nvp_target_event[] = {
162
163 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
164 { .value = TARGET_EVENT_HALTED, .name = "halted" },
165 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
166 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
167 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
168 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
169 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
170
171 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
172 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
173
174 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
175 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
176 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
177 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
178 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
179 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
180 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
181 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
182
183 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
184 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
185 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
186
187 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
188 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
189
190 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
191 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
192
193 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
194 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
195
196 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
197 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
198
199 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
200
201 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X100, .name = "semihosting-user-cmd-0x100" },
202 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X101, .name = "semihosting-user-cmd-0x101" },
203 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X102, .name = "semihosting-user-cmd-0x102" },
204 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X103, .name = "semihosting-user-cmd-0x103" },
205 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X104, .name = "semihosting-user-cmd-0x104" },
206 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X105, .name = "semihosting-user-cmd-0x105" },
207 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X106, .name = "semihosting-user-cmd-0x106" },
208 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X107, .name = "semihosting-user-cmd-0x107" },
209
210 { .name = NULL, .value = -1 }
211 };
212
213 static const struct nvp nvp_target_state[] = {
214 { .name = "unknown", .value = TARGET_UNKNOWN },
215 { .name = "running", .value = TARGET_RUNNING },
216 { .name = "halted", .value = TARGET_HALTED },
217 { .name = "reset", .value = TARGET_RESET },
218 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
219 { .name = NULL, .value = -1 },
220 };
221
222 static const struct nvp nvp_target_debug_reason[] = {
223 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
224 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
225 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
226 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
227 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
228 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
229 { .name = "program-exit", .value = DBG_REASON_EXIT },
230 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
231 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
232 { .name = NULL, .value = -1 },
233 };
234
235 static const struct jim_nvp nvp_target_endian[] = {
236 { .name = "big", .value = TARGET_BIG_ENDIAN },
237 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
238 { .name = "be", .value = TARGET_BIG_ENDIAN },
239 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
240 { .name = NULL, .value = -1 },
241 };
242
243 static const struct nvp nvp_reset_modes[] = {
244 { .name = "unknown", .value = RESET_UNKNOWN },
245 { .name = "run", .value = RESET_RUN },
246 { .name = "halt", .value = RESET_HALT },
247 { .name = "init", .value = RESET_INIT },
248 { .name = NULL, .value = -1 },
249 };
250
251 const char *debug_reason_name(struct target *t)
252 {
253 const char *cp;
254
255 cp = nvp_value2name(nvp_target_debug_reason,
256 t->debug_reason)->name;
257 if (!cp) {
258 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
259 cp = "(*BUG*unknown*BUG*)";
260 }
261 return cp;
262 }
263
264 const char *target_state_name(struct target *t)
265 {
266 const char *cp;
267 cp = nvp_value2name(nvp_target_state, t->state)->name;
268 if (!cp) {
269 LOG_ERROR("Invalid target state: %d", (int)(t->state));
270 cp = "(*BUG*unknown*BUG*)";
271 }
272
273 if (!target_was_examined(t) && t->defer_examine)
274 cp = "examine deferred";
275
276 return cp;
277 }
278
279 const char *target_event_name(enum target_event event)
280 {
281 const char *cp;
282 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
283 if (!cp) {
284 LOG_ERROR("Invalid target event: %d", (int)(event));
285 cp = "(*BUG*unknown*BUG*)";
286 }
287 return cp;
288 }
289
290 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
291 {
292 const char *cp;
293 cp = nvp_value2name(nvp_reset_modes, reset_mode)->name;
294 if (!cp) {
295 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
296 cp = "(*BUG*unknown*BUG*)";
297 }
298 return cp;
299 }
300
301 /* determine the number of the new target */
302 static int new_target_number(void)
303 {
304 struct target *t;
305 int x;
306
307 /* number is 0 based */
308 x = -1;
309 t = all_targets;
310 while (t) {
311 if (x < t->target_number)
312 x = t->target_number;
313 t = t->next;
314 }
315 return x + 1;
316 }
317
318 static void append_to_list_all_targets(struct target *target)
319 {
320 struct target **t = &all_targets;
321
322 while (*t)
323 t = &((*t)->next);
324 *t = target;
325 }
326
327 /* read a uint64_t from a buffer in target memory endianness */
328 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
329 {
330 if (target->endianness == TARGET_LITTLE_ENDIAN)
331 return le_to_h_u64(buffer);
332 else
333 return be_to_h_u64(buffer);
334 }
335
336 /* read a uint32_t from a buffer in target memory endianness */
337 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
338 {
339 if (target->endianness == TARGET_LITTLE_ENDIAN)
340 return le_to_h_u32(buffer);
341 else
342 return be_to_h_u32(buffer);
343 }
344
345 /* read a uint24_t from a buffer in target memory endianness */
346 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
347 {
348 if (target->endianness == TARGET_LITTLE_ENDIAN)
349 return le_to_h_u24(buffer);
350 else
351 return be_to_h_u24(buffer);
352 }
353
354 /* read a uint16_t from a buffer in target memory endianness */
355 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
356 {
357 if (target->endianness == TARGET_LITTLE_ENDIAN)
358 return le_to_h_u16(buffer);
359 else
360 return be_to_h_u16(buffer);
361 }
362
363 /* write a uint64_t to a buffer in target memory endianness */
364 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
365 {
366 if (target->endianness == TARGET_LITTLE_ENDIAN)
367 h_u64_to_le(buffer, value);
368 else
369 h_u64_to_be(buffer, value);
370 }
371
372 /* write a uint32_t to a buffer in target memory endianness */
373 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
374 {
375 if (target->endianness == TARGET_LITTLE_ENDIAN)
376 h_u32_to_le(buffer, value);
377 else
378 h_u32_to_be(buffer, value);
379 }
380
381 /* write a uint24_t to a buffer in target memory endianness */
382 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
383 {
384 if (target->endianness == TARGET_LITTLE_ENDIAN)
385 h_u24_to_le(buffer, value);
386 else
387 h_u24_to_be(buffer, value);
388 }
389
390 /* write a uint16_t to a buffer in target memory endianness */
391 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
392 {
393 if (target->endianness == TARGET_LITTLE_ENDIAN)
394 h_u16_to_le(buffer, value);
395 else
396 h_u16_to_be(buffer, value);
397 }
398
399 /* write a uint8_t to a buffer in target memory endianness */
400 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
401 {
402 *buffer = value;
403 }
404
405 /* write a uint64_t array to a buffer in target memory endianness */
406 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
407 {
408 uint32_t i;
409 for (i = 0; i < count; i++)
410 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
411 }
412
413 /* write a uint32_t array to a buffer in target memory endianness */
414 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
415 {
416 uint32_t i;
417 for (i = 0; i < count; i++)
418 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
419 }
420
421 /* write a uint16_t array to a buffer in target memory endianness */
422 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
423 {
424 uint32_t i;
425 for (i = 0; i < count; i++)
426 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
427 }
428
429 /* write a uint64_t array to a buffer in target memory endianness */
430 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
431 {
432 uint32_t i;
433 for (i = 0; i < count; i++)
434 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
435 }
436
437 /* write a uint32_t array to a buffer in target memory endianness */
438 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
439 {
440 uint32_t i;
441 for (i = 0; i < count; i++)
442 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
443 }
444
445 /* write a uint16_t array to a buffer in target memory endianness */
446 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
447 {
448 uint32_t i;
449 for (i = 0; i < count; i++)
450 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
451 }
452
453 /* return a pointer to a configured target; id is name or number */
454 struct target *get_target(const char *id)
455 {
456 struct target *target;
457
458 /* try as tcltarget name */
459 for (target = all_targets; target; target = target->next) {
460 if (!target_name(target))
461 continue;
462 if (strcmp(id, target_name(target)) == 0)
463 return target;
464 }
465
466 /* It's OK to remove this fallback sometime after August 2010 or so */
467
468 /* no match, try as number */
469 unsigned num;
470 if (parse_uint(id, &num) != ERROR_OK)
471 return NULL;
472
473 for (target = all_targets; target; target = target->next) {
474 if (target->target_number == (int)num) {
475 LOG_WARNING("use '%s' as target identifier, not '%u'",
476 target_name(target), num);
477 return target;
478 }
479 }
480
481 return NULL;
482 }
483
484 /* returns a pointer to the n-th configured target */
485 struct target *get_target_by_num(int num)
486 {
487 struct target *target = all_targets;
488
489 while (target) {
490 if (target->target_number == num)
491 return target;
492 target = target->next;
493 }
494
495 return NULL;
496 }
497
498 struct target *get_current_target(struct command_context *cmd_ctx)
499 {
500 struct target *target = get_current_target_or_null(cmd_ctx);
501
502 if (!target) {
503 LOG_ERROR("BUG: current_target out of bounds");
504 exit(-1);
505 }
506
507 return target;
508 }
509
510 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
511 {
512 return cmd_ctx->current_target_override
513 ? cmd_ctx->current_target_override
514 : cmd_ctx->current_target;
515 }
516
517 int target_poll(struct target *target)
518 {
519 int retval;
520
521 /* We can't poll until after examine */
522 if (!target_was_examined(target)) {
523 /* Fail silently lest we pollute the log */
524 return ERROR_FAIL;
525 }
526
527 retval = target->type->poll(target);
528 if (retval != ERROR_OK)
529 return retval;
530
531 if (target->halt_issued) {
532 if (target->state == TARGET_HALTED)
533 target->halt_issued = false;
534 else {
535 int64_t t = timeval_ms() - target->halt_issued_time;
536 if (t > DEFAULT_HALT_TIMEOUT) {
537 target->halt_issued = false;
538 LOG_INFO("Halt timed out, wake up GDB.");
539 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
540 }
541 }
542 }
543
544 return ERROR_OK;
545 }
546
547 int target_halt(struct target *target)
548 {
549 int retval;
550 /* We can't poll until after examine */
551 if (!target_was_examined(target)) {
552 LOG_ERROR("Target not examined yet");
553 return ERROR_FAIL;
554 }
555
556 retval = target->type->halt(target);
557 if (retval != ERROR_OK)
558 return retval;
559
560 target->halt_issued = true;
561 target->halt_issued_time = timeval_ms();
562
563 return ERROR_OK;
564 }
565
566 /**
567 * Make the target (re)start executing using its saved execution
568 * context (possibly with some modifications).
569 *
570 * @param target Which target should start executing.
571 * @param current True to use the target's saved program counter instead
572 * of the address parameter
573 * @param address Optionally used as the program counter.
574 * @param handle_breakpoints True iff breakpoints at the resumption PC
575 * should be skipped. (For example, maybe execution was stopped by
576 * such a breakpoint, in which case it would be counterproductive to
577 * let it re-trigger.
578 * @param debug_execution False if all working areas allocated by OpenOCD
579 * should be released and/or restored to their original contents.
580 * (This would for example be true to run some downloaded "helper"
581 * algorithm code, which resides in one such working buffer and uses
582 * another for data storage.)
583 *
584 * @todo Resolve the ambiguity about what the "debug_execution" flag
585 * signifies. For example, Target implementations don't agree on how
586 * it relates to invalidation of the register cache, or to whether
587 * breakpoints and watchpoints should be enabled. (It would seem wrong
588 * to enable breakpoints when running downloaded "helper" algorithms
589 * (debug_execution true), since the breakpoints would be set to match
590 * target firmware being debugged, not the helper algorithm.... and
591 * enabling them could cause such helpers to malfunction (for example,
592 * by overwriting data with a breakpoint instruction. On the other
593 * hand the infrastructure for running such helpers might use this
594 * procedure but rely on hardware breakpoint to detect termination.)
595 */
596 int target_resume(struct target *target, int current, target_addr_t address,
597 int handle_breakpoints, int debug_execution)
598 {
599 int retval;
600
601 /* We can't poll until after examine */
602 if (!target_was_examined(target)) {
603 LOG_ERROR("Target not examined yet");
604 return ERROR_FAIL;
605 }
606
607 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
608
609 /* note that resume *must* be asynchronous. The CPU can halt before
610 * we poll. The CPU can even halt at the current PC as a result of
611 * a software breakpoint being inserted by (a bug?) the application.
612 */
613 /*
614 * resume() triggers the event 'resumed'. The execution of TCL commands
615 * in the event handler causes the polling of targets. If the target has
616 * already halted for a breakpoint, polling will run the 'halted' event
617 * handler before the pending 'resumed' handler.
618 * Disable polling during resume() to guarantee the execution of handlers
619 * in the correct order.
620 */
621 bool save_poll_mask = jtag_poll_mask();
622 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
623 jtag_poll_unmask(save_poll_mask);
624
625 if (retval != ERROR_OK)
626 return retval;
627
628 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
629
630 return retval;
631 }
632
633 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
634 {
635 char buf[100];
636 int retval;
637 const struct nvp *n;
638 n = nvp_value2name(nvp_reset_modes, reset_mode);
639 if (!n->name) {
640 LOG_ERROR("invalid reset mode");
641 return ERROR_FAIL;
642 }
643
644 struct target *target;
645 for (target = all_targets; target; target = target->next)
646 target_call_reset_callbacks(target, reset_mode);
647
648 /* disable polling during reset to make reset event scripts
649 * more predictable, i.e. dr/irscan & pathmove in events will
650 * not have JTAG operations injected into the middle of a sequence.
651 */
652 bool save_poll_mask = jtag_poll_mask();
653
654 sprintf(buf, "ocd_process_reset %s", n->name);
655 retval = Jim_Eval(cmd->ctx->interp, buf);
656
657 jtag_poll_unmask(save_poll_mask);
658
659 if (retval != JIM_OK) {
660 Jim_MakeErrorMessage(cmd->ctx->interp);
661 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
662 return ERROR_FAIL;
663 }
664
665 /* We want any events to be processed before the prompt */
666 retval = target_call_timer_callbacks_now();
667
668 for (target = all_targets; target; target = target->next) {
669 target->type->check_reset(target);
670 target->running_alg = false;
671 }
672
673 return retval;
674 }
675
676 static int identity_virt2phys(struct target *target,
677 target_addr_t virtual, target_addr_t *physical)
678 {
679 *physical = virtual;
680 return ERROR_OK;
681 }
682
683 static int no_mmu(struct target *target, int *enabled)
684 {
685 *enabled = 0;
686 return ERROR_OK;
687 }
688
689 /**
690 * Reset the @c examined flag for the given target.
691 * Pure paranoia -- targets are zeroed on allocation.
692 */
693 static inline void target_reset_examined(struct target *target)
694 {
695 target->examined = false;
696 }
697
698 static int default_examine(struct target *target)
699 {
700 target_set_examined(target);
701 return ERROR_OK;
702 }
703
704 /* no check by default */
705 static int default_check_reset(struct target *target)
706 {
707 return ERROR_OK;
708 }
709
710 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
711 * Keep in sync */
712 int target_examine_one(struct target *target)
713 {
714 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
715
716 int retval = target->type->examine(target);
717 if (retval != ERROR_OK) {
718 target_reset_examined(target);
719 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
720 return retval;
721 }
722
723 target_set_examined(target);
724 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
725
726 return ERROR_OK;
727 }
728
729 static int jtag_enable_callback(enum jtag_event event, void *priv)
730 {
731 struct target *target = priv;
732
733 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
734 return ERROR_OK;
735
736 jtag_unregister_event_callback(jtag_enable_callback, target);
737
738 return target_examine_one(target);
739 }
740
741 /* Targets that correctly implement init + examine, i.e.
742 * no communication with target during init:
743 *
744 * XScale
745 */
746 int target_examine(void)
747 {
748 int retval = ERROR_OK;
749 struct target *target;
750
751 for (target = all_targets; target; target = target->next) {
752 /* defer examination, but don't skip it */
753 if (!target->tap->enabled) {
754 jtag_register_event_callback(jtag_enable_callback,
755 target);
756 continue;
757 }
758
759 if (target->defer_examine)
760 continue;
761
762 int retval2 = target_examine_one(target);
763 if (retval2 != ERROR_OK) {
764 LOG_WARNING("target %s examination failed", target_name(target));
765 retval = retval2;
766 }
767 }
768 return retval;
769 }
770
771 const char *target_type_name(struct target *target)
772 {
773 return target->type->name;
774 }
775
776 static int target_soft_reset_halt(struct target *target)
777 {
778 if (!target_was_examined(target)) {
779 LOG_ERROR("Target not examined yet");
780 return ERROR_FAIL;
781 }
782 if (!target->type->soft_reset_halt) {
783 LOG_ERROR("Target %s does not support soft_reset_halt",
784 target_name(target));
785 return ERROR_FAIL;
786 }
787 return target->type->soft_reset_halt(target);
788 }
789
790 /**
791 * Downloads a target-specific native code algorithm to the target,
792 * and executes it. * Note that some targets may need to set up, enable,
793 * and tear down a breakpoint (hard or * soft) to detect algorithm
794 * termination, while others may support lower overhead schemes where
795 * soft breakpoints embedded in the algorithm automatically terminate the
796 * algorithm.
797 *
798 * @param target used to run the algorithm
799 * @param num_mem_params
800 * @param mem_params
801 * @param num_reg_params
802 * @param reg_param
803 * @param entry_point
804 * @param exit_point
805 * @param timeout_ms
806 * @param arch_info target-specific description of the algorithm.
807 */
808 int target_run_algorithm(struct target *target,
809 int num_mem_params, struct mem_param *mem_params,
810 int num_reg_params, struct reg_param *reg_param,
811 target_addr_t entry_point, target_addr_t exit_point,
812 unsigned int timeout_ms, void *arch_info)
813 {
814 int retval = ERROR_FAIL;
815
816 if (!target_was_examined(target)) {
817 LOG_ERROR("Target not examined yet");
818 goto done;
819 }
820 if (!target->type->run_algorithm) {
821 LOG_ERROR("Target type '%s' does not support %s",
822 target_type_name(target), __func__);
823 goto done;
824 }
825
826 target->running_alg = true;
827 retval = target->type->run_algorithm(target,
828 num_mem_params, mem_params,
829 num_reg_params, reg_param,
830 entry_point, exit_point, timeout_ms, arch_info);
831 target->running_alg = false;
832
833 done:
834 return retval;
835 }
836
837 /**
838 * Executes a target-specific native code algorithm and leaves it running.
839 *
840 * @param target used to run the algorithm
841 * @param num_mem_params
842 * @param mem_params
843 * @param num_reg_params
844 * @param reg_params
845 * @param entry_point
846 * @param exit_point
847 * @param arch_info target-specific description of the algorithm.
848 */
849 int target_start_algorithm(struct target *target,
850 int num_mem_params, struct mem_param *mem_params,
851 int num_reg_params, struct reg_param *reg_params,
852 target_addr_t entry_point, target_addr_t exit_point,
853 void *arch_info)
854 {
855 int retval = ERROR_FAIL;
856
857 if (!target_was_examined(target)) {
858 LOG_ERROR("Target not examined yet");
859 goto done;
860 }
861 if (!target->type->start_algorithm) {
862 LOG_ERROR("Target type '%s' does not support %s",
863 target_type_name(target), __func__);
864 goto done;
865 }
866 if (target->running_alg) {
867 LOG_ERROR("Target is already running an algorithm");
868 goto done;
869 }
870
871 target->running_alg = true;
872 retval = target->type->start_algorithm(target,
873 num_mem_params, mem_params,
874 num_reg_params, reg_params,
875 entry_point, exit_point, arch_info);
876
877 done:
878 return retval;
879 }
880
881 /**
882 * Waits for an algorithm started with target_start_algorithm() to complete.
883 *
884 * @param target used to run the algorithm
885 * @param num_mem_params
886 * @param mem_params
887 * @param num_reg_params
888 * @param reg_params
889 * @param exit_point
890 * @param timeout_ms
891 * @param arch_info target-specific description of the algorithm.
892 */
893 int target_wait_algorithm(struct target *target,
894 int num_mem_params, struct mem_param *mem_params,
895 int num_reg_params, struct reg_param *reg_params,
896 target_addr_t exit_point, unsigned int timeout_ms,
897 void *arch_info)
898 {
899 int retval = ERROR_FAIL;
900
901 if (!target->type->wait_algorithm) {
902 LOG_ERROR("Target type '%s' does not support %s",
903 target_type_name(target), __func__);
904 goto done;
905 }
906 if (!target->running_alg) {
907 LOG_ERROR("Target is not running an algorithm");
908 goto done;
909 }
910
911 retval = target->type->wait_algorithm(target,
912 num_mem_params, mem_params,
913 num_reg_params, reg_params,
914 exit_point, timeout_ms, arch_info);
915 if (retval != ERROR_TARGET_TIMEOUT)
916 target->running_alg = false;
917
918 done:
919 return retval;
920 }
921
922 /**
923 * Streams data to a circular buffer on target intended for consumption by code
924 * running asynchronously on target.
925 *
926 * This is intended for applications where target-specific native code runs
927 * on the target, receives data from the circular buffer, does something with
928 * it (most likely writing it to a flash memory), and advances the circular
929 * buffer pointer.
930 *
931 * This assumes that the helper algorithm has already been loaded to the target,
932 * but has not been started yet. Given memory and register parameters are passed
933 * to the algorithm.
934 *
935 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
936 * following format:
937 *
938 * [buffer_start + 0, buffer_start + 4):
939 * Write Pointer address (aka head). Written and updated by this
940 * routine when new data is written to the circular buffer.
941 * [buffer_start + 4, buffer_start + 8):
942 * Read Pointer address (aka tail). Updated by code running on the
943 * target after it consumes data.
944 * [buffer_start + 8, buffer_start + buffer_size):
945 * Circular buffer contents.
946 *
947 * See contrib/loaders/flash/stm32f1x.S for an example.
948 *
949 * @param target used to run the algorithm
950 * @param buffer address on the host where data to be sent is located
951 * @param count number of blocks to send
952 * @param block_size size in bytes of each block
953 * @param num_mem_params count of memory-based params to pass to algorithm
954 * @param mem_params memory-based params to pass to algorithm
955 * @param num_reg_params count of register-based params to pass to algorithm
956 * @param reg_params memory-based params to pass to algorithm
957 * @param buffer_start address on the target of the circular buffer structure
958 * @param buffer_size size of the circular buffer structure
959 * @param entry_point address on the target to execute to start the algorithm
960 * @param exit_point address at which to set a breakpoint to catch the
961 * end of the algorithm; can be 0 if target triggers a breakpoint itself
962 * @param arch_info
963 */
964
965 int target_run_flash_async_algorithm(struct target *target,
966 const uint8_t *buffer, uint32_t count, int block_size,
967 int num_mem_params, struct mem_param *mem_params,
968 int num_reg_params, struct reg_param *reg_params,
969 uint32_t buffer_start, uint32_t buffer_size,
970 uint32_t entry_point, uint32_t exit_point, void *arch_info)
971 {
972 int retval;
973 int timeout = 0;
974
975 const uint8_t *buffer_orig = buffer;
976
977 /* Set up working area. First word is write pointer, second word is read pointer,
978 * rest is fifo data area. */
979 uint32_t wp_addr = buffer_start;
980 uint32_t rp_addr = buffer_start + 4;
981 uint32_t fifo_start_addr = buffer_start + 8;
982 uint32_t fifo_end_addr = buffer_start + buffer_size;
983
984 uint32_t wp = fifo_start_addr;
985 uint32_t rp = fifo_start_addr;
986
987 /* validate block_size is 2^n */
988 assert(IS_PWR_OF_2(block_size));
989
990 retval = target_write_u32(target, wp_addr, wp);
991 if (retval != ERROR_OK)
992 return retval;
993 retval = target_write_u32(target, rp_addr, rp);
994 if (retval != ERROR_OK)
995 return retval;
996
997 /* Start up algorithm on target and let it idle while writing the first chunk */
998 retval = target_start_algorithm(target, num_mem_params, mem_params,
999 num_reg_params, reg_params,
1000 entry_point,
1001 exit_point,
1002 arch_info);
1003
1004 if (retval != ERROR_OK) {
1005 LOG_ERROR("error starting target flash write algorithm");
1006 return retval;
1007 }
1008
1009 while (count > 0) {
1010
1011 retval = target_read_u32(target, rp_addr, &rp);
1012 if (retval != ERROR_OK) {
1013 LOG_ERROR("failed to get read pointer");
1014 break;
1015 }
1016
1017 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1018 (size_t) (buffer - buffer_orig), count, wp, rp);
1019
1020 if (rp == 0) {
1021 LOG_ERROR("flash write algorithm aborted by target");
1022 retval = ERROR_FLASH_OPERATION_FAILED;
1023 break;
1024 }
1025
1026 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1027 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1028 break;
1029 }
1030
1031 /* Count the number of bytes available in the fifo without
1032 * crossing the wrap around. Make sure to not fill it completely,
1033 * because that would make wp == rp and that's the empty condition. */
1034 uint32_t thisrun_bytes;
1035 if (rp > wp)
1036 thisrun_bytes = rp - wp - block_size;
1037 else if (rp > fifo_start_addr)
1038 thisrun_bytes = fifo_end_addr - wp;
1039 else
1040 thisrun_bytes = fifo_end_addr - wp - block_size;
1041
1042 if (thisrun_bytes == 0) {
1043 /* Throttle polling a bit if transfer is (much) faster than flash
1044 * programming. The exact delay shouldn't matter as long as it's
1045 * less than buffer size / flash speed. This is very unlikely to
1046 * run when using high latency connections such as USB. */
1047 alive_sleep(2);
1048
1049 /* to stop an infinite loop on some targets check and increment a timeout
1050 * this issue was observed on a stellaris using the new ICDI interface */
1051 if (timeout++ >= 2500) {
1052 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1053 return ERROR_FLASH_OPERATION_FAILED;
1054 }
1055 continue;
1056 }
1057
1058 /* reset our timeout */
1059 timeout = 0;
1060
1061 /* Limit to the amount of data we actually want to write */
1062 if (thisrun_bytes > count * block_size)
1063 thisrun_bytes = count * block_size;
1064
1065 /* Force end of large blocks to be word aligned */
1066 if (thisrun_bytes >= 16)
1067 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1068
1069 /* Write data to fifo */
1070 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1071 if (retval != ERROR_OK)
1072 break;
1073
1074 /* Update counters and wrap write pointer */
1075 buffer += thisrun_bytes;
1076 count -= thisrun_bytes / block_size;
1077 wp += thisrun_bytes;
1078 if (wp >= fifo_end_addr)
1079 wp = fifo_start_addr;
1080
1081 /* Store updated write pointer to target */
1082 retval = target_write_u32(target, wp_addr, wp);
1083 if (retval != ERROR_OK)
1084 break;
1085
1086 /* Avoid GDB timeouts */
1087 keep_alive();
1088 }
1089
1090 if (retval != ERROR_OK) {
1091 /* abort flash write algorithm on target */
1092 target_write_u32(target, wp_addr, 0);
1093 }
1094
1095 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1096 num_reg_params, reg_params,
1097 exit_point,
1098 10000,
1099 arch_info);
1100
1101 if (retval2 != ERROR_OK) {
1102 LOG_ERROR("error waiting for target flash write algorithm");
1103 retval = retval2;
1104 }
1105
1106 if (retval == ERROR_OK) {
1107 /* check if algorithm set rp = 0 after fifo writer loop finished */
1108 retval = target_read_u32(target, rp_addr, &rp);
1109 if (retval == ERROR_OK && rp == 0) {
1110 LOG_ERROR("flash write algorithm aborted by target");
1111 retval = ERROR_FLASH_OPERATION_FAILED;
1112 }
1113 }
1114
1115 return retval;
1116 }
1117
1118 int target_run_read_async_algorithm(struct target *target,
1119 uint8_t *buffer, uint32_t count, int block_size,
1120 int num_mem_params, struct mem_param *mem_params,
1121 int num_reg_params, struct reg_param *reg_params,
1122 uint32_t buffer_start, uint32_t buffer_size,
1123 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1124 {
1125 int retval;
1126 int timeout = 0;
1127
1128 const uint8_t *buffer_orig = buffer;
1129
1130 /* Set up working area. First word is write pointer, second word is read pointer,
1131 * rest is fifo data area. */
1132 uint32_t wp_addr = buffer_start;
1133 uint32_t rp_addr = buffer_start + 4;
1134 uint32_t fifo_start_addr = buffer_start + 8;
1135 uint32_t fifo_end_addr = buffer_start + buffer_size;
1136
1137 uint32_t wp = fifo_start_addr;
1138 uint32_t rp = fifo_start_addr;
1139
1140 /* validate block_size is 2^n */
1141 assert(IS_PWR_OF_2(block_size));
1142
1143 retval = target_write_u32(target, wp_addr, wp);
1144 if (retval != ERROR_OK)
1145 return retval;
1146 retval = target_write_u32(target, rp_addr, rp);
1147 if (retval != ERROR_OK)
1148 return retval;
1149
1150 /* Start up algorithm on target */
1151 retval = target_start_algorithm(target, num_mem_params, mem_params,
1152 num_reg_params, reg_params,
1153 entry_point,
1154 exit_point,
1155 arch_info);
1156
1157 if (retval != ERROR_OK) {
1158 LOG_ERROR("error starting target flash read algorithm");
1159 return retval;
1160 }
1161
1162 while (count > 0) {
1163 retval = target_read_u32(target, wp_addr, &wp);
1164 if (retval != ERROR_OK) {
1165 LOG_ERROR("failed to get write pointer");
1166 break;
1167 }
1168
1169 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1170 (size_t)(buffer - buffer_orig), count, wp, rp);
1171
1172 if (wp == 0) {
1173 LOG_ERROR("flash read algorithm aborted by target");
1174 retval = ERROR_FLASH_OPERATION_FAILED;
1175 break;
1176 }
1177
1178 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1179 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1180 break;
1181 }
1182
1183 /* Count the number of bytes available in the fifo without
1184 * crossing the wrap around. */
1185 uint32_t thisrun_bytes;
1186 if (wp >= rp)
1187 thisrun_bytes = wp - rp;
1188 else
1189 thisrun_bytes = fifo_end_addr - rp;
1190
1191 if (thisrun_bytes == 0) {
1192 /* Throttle polling a bit if transfer is (much) faster than flash
1193 * reading. The exact delay shouldn't matter as long as it's
1194 * less than buffer size / flash speed. This is very unlikely to
1195 * run when using high latency connections such as USB. */
1196 alive_sleep(2);
1197
1198 /* to stop an infinite loop on some targets check and increment a timeout
1199 * this issue was observed on a stellaris using the new ICDI interface */
1200 if (timeout++ >= 2500) {
1201 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1202 return ERROR_FLASH_OPERATION_FAILED;
1203 }
1204 continue;
1205 }
1206
1207 /* Reset our timeout */
1208 timeout = 0;
1209
1210 /* Limit to the amount of data we actually want to read */
1211 if (thisrun_bytes > count * block_size)
1212 thisrun_bytes = count * block_size;
1213
1214 /* Force end of large blocks to be word aligned */
1215 if (thisrun_bytes >= 16)
1216 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1217
1218 /* Read data from fifo */
1219 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1220 if (retval != ERROR_OK)
1221 break;
1222
1223 /* Update counters and wrap write pointer */
1224 buffer += thisrun_bytes;
1225 count -= thisrun_bytes / block_size;
1226 rp += thisrun_bytes;
1227 if (rp >= fifo_end_addr)
1228 rp = fifo_start_addr;
1229
1230 /* Store updated write pointer to target */
1231 retval = target_write_u32(target, rp_addr, rp);
1232 if (retval != ERROR_OK)
1233 break;
1234
1235 /* Avoid GDB timeouts */
1236 keep_alive();
1237
1238 }
1239
1240 if (retval != ERROR_OK) {
1241 /* abort flash write algorithm on target */
1242 target_write_u32(target, rp_addr, 0);
1243 }
1244
1245 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1246 num_reg_params, reg_params,
1247 exit_point,
1248 10000,
1249 arch_info);
1250
1251 if (retval2 != ERROR_OK) {
1252 LOG_ERROR("error waiting for target flash write algorithm");
1253 retval = retval2;
1254 }
1255
1256 if (retval == ERROR_OK) {
1257 /* check if algorithm set wp = 0 after fifo writer loop finished */
1258 retval = target_read_u32(target, wp_addr, &wp);
1259 if (retval == ERROR_OK && wp == 0) {
1260 LOG_ERROR("flash read algorithm aborted by target");
1261 retval = ERROR_FLASH_OPERATION_FAILED;
1262 }
1263 }
1264
1265 return retval;
1266 }
1267
1268 int target_read_memory(struct target *target,
1269 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1270 {
1271 if (!target_was_examined(target)) {
1272 LOG_ERROR("Target not examined yet");
1273 return ERROR_FAIL;
1274 }
1275 if (!target->type->read_memory) {
1276 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1277 return ERROR_FAIL;
1278 }
1279 return target->type->read_memory(target, address, size, count, buffer);
1280 }
1281
1282 int target_read_phys_memory(struct target *target,
1283 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1284 {
1285 if (!target_was_examined(target)) {
1286 LOG_ERROR("Target not examined yet");
1287 return ERROR_FAIL;
1288 }
1289 if (!target->type->read_phys_memory) {
1290 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1291 return ERROR_FAIL;
1292 }
1293 return target->type->read_phys_memory(target, address, size, count, buffer);
1294 }
1295
1296 int target_write_memory(struct target *target,
1297 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1298 {
1299 if (!target_was_examined(target)) {
1300 LOG_ERROR("Target not examined yet");
1301 return ERROR_FAIL;
1302 }
1303 if (!target->type->write_memory) {
1304 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1305 return ERROR_FAIL;
1306 }
1307 return target->type->write_memory(target, address, size, count, buffer);
1308 }
1309
1310 int target_write_phys_memory(struct target *target,
1311 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1312 {
1313 if (!target_was_examined(target)) {
1314 LOG_ERROR("Target not examined yet");
1315 return ERROR_FAIL;
1316 }
1317 if (!target->type->write_phys_memory) {
1318 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1319 return ERROR_FAIL;
1320 }
1321 return target->type->write_phys_memory(target, address, size, count, buffer);
1322 }
1323
1324 int target_add_breakpoint(struct target *target,
1325 struct breakpoint *breakpoint)
1326 {
1327 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1328 LOG_TARGET_ERROR(target, "not halted (add breakpoint)");
1329 return ERROR_TARGET_NOT_HALTED;
1330 }
1331 return target->type->add_breakpoint(target, breakpoint);
1332 }
1333
1334 int target_add_context_breakpoint(struct target *target,
1335 struct breakpoint *breakpoint)
1336 {
1337 if (target->state != TARGET_HALTED) {
1338 LOG_TARGET_ERROR(target, "not halted (add context breakpoint)");
1339 return ERROR_TARGET_NOT_HALTED;
1340 }
1341 return target->type->add_context_breakpoint(target, breakpoint);
1342 }
1343
1344 int target_add_hybrid_breakpoint(struct target *target,
1345 struct breakpoint *breakpoint)
1346 {
1347 if (target->state != TARGET_HALTED) {
1348 LOG_TARGET_ERROR(target, "not halted (add hybrid breakpoint)");
1349 return ERROR_TARGET_NOT_HALTED;
1350 }
1351 return target->type->add_hybrid_breakpoint(target, breakpoint);
1352 }
1353
1354 int target_remove_breakpoint(struct target *target,
1355 struct breakpoint *breakpoint)
1356 {
1357 return target->type->remove_breakpoint(target, breakpoint);
1358 }
1359
1360 int target_add_watchpoint(struct target *target,
1361 struct watchpoint *watchpoint)
1362 {
1363 if (target->state != TARGET_HALTED) {
1364 LOG_TARGET_ERROR(target, "not halted (add watchpoint)");
1365 return ERROR_TARGET_NOT_HALTED;
1366 }
1367 return target->type->add_watchpoint(target, watchpoint);
1368 }
1369 int target_remove_watchpoint(struct target *target,
1370 struct watchpoint *watchpoint)
1371 {
1372 return target->type->remove_watchpoint(target, watchpoint);
1373 }
1374 int target_hit_watchpoint(struct target *target,
1375 struct watchpoint **hit_watchpoint)
1376 {
1377 if (target->state != TARGET_HALTED) {
1378 LOG_TARGET_ERROR(target, "not halted (hit watchpoint)");
1379 return ERROR_TARGET_NOT_HALTED;
1380 }
1381
1382 if (!target->type->hit_watchpoint) {
1383 /* For backward compatible, if hit_watchpoint is not implemented,
1384 * return ERROR_FAIL such that gdb_server will not take the nonsense
1385 * information. */
1386 return ERROR_FAIL;
1387 }
1388
1389 return target->type->hit_watchpoint(target, hit_watchpoint);
1390 }
1391
1392 const char *target_get_gdb_arch(struct target *target)
1393 {
1394 if (!target->type->get_gdb_arch)
1395 return NULL;
1396 return target->type->get_gdb_arch(target);
1397 }
1398
1399 int target_get_gdb_reg_list(struct target *target,
1400 struct reg **reg_list[], int *reg_list_size,
1401 enum target_register_class reg_class)
1402 {
1403 int result = ERROR_FAIL;
1404
1405 if (!target_was_examined(target)) {
1406 LOG_ERROR("Target not examined yet");
1407 goto done;
1408 }
1409
1410 result = target->type->get_gdb_reg_list(target, reg_list,
1411 reg_list_size, reg_class);
1412
1413 done:
1414 if (result != ERROR_OK) {
1415 *reg_list = NULL;
1416 *reg_list_size = 0;
1417 }
1418 return result;
1419 }
1420
1421 int target_get_gdb_reg_list_noread(struct target *target,
1422 struct reg **reg_list[], int *reg_list_size,
1423 enum target_register_class reg_class)
1424 {
1425 if (target->type->get_gdb_reg_list_noread &&
1426 target->type->get_gdb_reg_list_noread(target, reg_list,
1427 reg_list_size, reg_class) == ERROR_OK)
1428 return ERROR_OK;
1429 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1430 }
1431
1432 bool target_supports_gdb_connection(struct target *target)
1433 {
1434 /*
1435 * exclude all the targets that don't provide get_gdb_reg_list
1436 * or that have explicit gdb_max_connection == 0
1437 */
1438 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1439 }
1440
1441 int target_step(struct target *target,
1442 int current, target_addr_t address, int handle_breakpoints)
1443 {
1444 int retval;
1445
1446 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1447
1448 retval = target->type->step(target, current, address, handle_breakpoints);
1449 if (retval != ERROR_OK)
1450 return retval;
1451
1452 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1453
1454 return retval;
1455 }
1456
1457 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1458 {
1459 if (target->state != TARGET_HALTED) {
1460 LOG_TARGET_ERROR(target, "not halted (gdb fileio)");
1461 return ERROR_TARGET_NOT_HALTED;
1462 }
1463 return target->type->get_gdb_fileio_info(target, fileio_info);
1464 }
1465
1466 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1467 {
1468 if (target->state != TARGET_HALTED) {
1469 LOG_TARGET_ERROR(target, "not halted (gdb fileio end)");
1470 return ERROR_TARGET_NOT_HALTED;
1471 }
1472 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1473 }
1474
1475 target_addr_t target_address_max(struct target *target)
1476 {
1477 unsigned bits = target_address_bits(target);
1478 if (sizeof(target_addr_t) * 8 == bits)
1479 return (target_addr_t) -1;
1480 else
1481 return (((target_addr_t) 1) << bits) - 1;
1482 }
1483
1484 unsigned target_address_bits(struct target *target)
1485 {
1486 if (target->type->address_bits)
1487 return target->type->address_bits(target);
1488 return 32;
1489 }
1490
1491 unsigned int target_data_bits(struct target *target)
1492 {
1493 if (target->type->data_bits)
1494 return target->type->data_bits(target);
1495 return 32;
1496 }
1497
1498 static int target_profiling(struct target *target, uint32_t *samples,
1499 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1500 {
1501 return target->type->profiling(target, samples, max_num_samples,
1502 num_samples, seconds);
1503 }
1504
1505 static int handle_target(void *priv);
1506
1507 static int target_init_one(struct command_context *cmd_ctx,
1508 struct target *target)
1509 {
1510 target_reset_examined(target);
1511
1512 struct target_type *type = target->type;
1513 if (!type->examine)
1514 type->examine = default_examine;
1515
1516 if (!type->check_reset)
1517 type->check_reset = default_check_reset;
1518
1519 assert(type->init_target);
1520
1521 int retval = type->init_target(cmd_ctx, target);
1522 if (retval != ERROR_OK) {
1523 LOG_ERROR("target '%s' init failed", target_name(target));
1524 return retval;
1525 }
1526
1527 /* Sanity-check MMU support ... stub in what we must, to help
1528 * implement it in stages, but warn if we need to do so.
1529 */
1530 if (type->mmu) {
1531 if (!type->virt2phys) {
1532 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1533 type->virt2phys = identity_virt2phys;
1534 }
1535 } else {
1536 /* Make sure no-MMU targets all behave the same: make no
1537 * distinction between physical and virtual addresses, and
1538 * ensure that virt2phys() is always an identity mapping.
1539 */
1540 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1541 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1542
1543 type->mmu = no_mmu;
1544 type->write_phys_memory = type->write_memory;
1545 type->read_phys_memory = type->read_memory;
1546 type->virt2phys = identity_virt2phys;
1547 }
1548
1549 if (!target->type->read_buffer)
1550 target->type->read_buffer = target_read_buffer_default;
1551
1552 if (!target->type->write_buffer)
1553 target->type->write_buffer = target_write_buffer_default;
1554
1555 if (!target->type->get_gdb_fileio_info)
1556 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1557
1558 if (!target->type->gdb_fileio_end)
1559 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1560
1561 if (!target->type->profiling)
1562 target->type->profiling = target_profiling_default;
1563
1564 return ERROR_OK;
1565 }
1566
1567 static int target_init(struct command_context *cmd_ctx)
1568 {
1569 struct target *target;
1570 int retval;
1571
1572 for (target = all_targets; target; target = target->next) {
1573 retval = target_init_one(cmd_ctx, target);
1574 if (retval != ERROR_OK)
1575 return retval;
1576 }
1577
1578 if (!all_targets)
1579 return ERROR_OK;
1580
1581 retval = target_register_user_commands(cmd_ctx);
1582 if (retval != ERROR_OK)
1583 return retval;
1584
1585 retval = target_register_timer_callback(&handle_target,
1586 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1587 if (retval != ERROR_OK)
1588 return retval;
1589
1590 return ERROR_OK;
1591 }
1592
1593 COMMAND_HANDLER(handle_target_init_command)
1594 {
1595 int retval;
1596
1597 if (CMD_ARGC != 0)
1598 return ERROR_COMMAND_SYNTAX_ERROR;
1599
1600 static bool target_initialized;
1601 if (target_initialized) {
1602 LOG_INFO("'target init' has already been called");
1603 return ERROR_OK;
1604 }
1605 target_initialized = true;
1606
1607 retval = command_run_line(CMD_CTX, "init_targets");
1608 if (retval != ERROR_OK)
1609 return retval;
1610
1611 retval = command_run_line(CMD_CTX, "init_target_events");
1612 if (retval != ERROR_OK)
1613 return retval;
1614
1615 retval = command_run_line(CMD_CTX, "init_board");
1616 if (retval != ERROR_OK)
1617 return retval;
1618
1619 LOG_DEBUG("Initializing targets...");
1620 return target_init(CMD_CTX);
1621 }
1622
1623 int target_register_event_callback(int (*callback)(struct target *target,
1624 enum target_event event, void *priv), void *priv)
1625 {
1626 struct target_event_callback **callbacks_p = &target_event_callbacks;
1627
1628 if (!callback)
1629 return ERROR_COMMAND_SYNTAX_ERROR;
1630
1631 if (*callbacks_p) {
1632 while ((*callbacks_p)->next)
1633 callbacks_p = &((*callbacks_p)->next);
1634 callbacks_p = &((*callbacks_p)->next);
1635 }
1636
1637 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1638 (*callbacks_p)->callback = callback;
1639 (*callbacks_p)->priv = priv;
1640 (*callbacks_p)->next = NULL;
1641
1642 return ERROR_OK;
1643 }
1644
1645 int target_register_reset_callback(int (*callback)(struct target *target,
1646 enum target_reset_mode reset_mode, void *priv), void *priv)
1647 {
1648 struct target_reset_callback *entry;
1649
1650 if (!callback)
1651 return ERROR_COMMAND_SYNTAX_ERROR;
1652
1653 entry = malloc(sizeof(struct target_reset_callback));
1654 if (!entry) {
1655 LOG_ERROR("error allocating buffer for reset callback entry");
1656 return ERROR_COMMAND_SYNTAX_ERROR;
1657 }
1658
1659 entry->callback = callback;
1660 entry->priv = priv;
1661 list_add(&entry->list, &target_reset_callback_list);
1662
1663
1664 return ERROR_OK;
1665 }
1666
1667 int target_register_trace_callback(int (*callback)(struct target *target,
1668 size_t len, uint8_t *data, void *priv), void *priv)
1669 {
1670 struct target_trace_callback *entry;
1671
1672 if (!callback)
1673 return ERROR_COMMAND_SYNTAX_ERROR;
1674
1675 entry = malloc(sizeof(struct target_trace_callback));
1676 if (!entry) {
1677 LOG_ERROR("error allocating buffer for trace callback entry");
1678 return ERROR_COMMAND_SYNTAX_ERROR;
1679 }
1680
1681 entry->callback = callback;
1682 entry->priv = priv;
1683 list_add(&entry->list, &target_trace_callback_list);
1684
1685
1686 return ERROR_OK;
1687 }
1688
1689 int target_register_timer_callback(int (*callback)(void *priv),
1690 unsigned int time_ms, enum target_timer_type type, void *priv)
1691 {
1692 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1693
1694 if (!callback)
1695 return ERROR_COMMAND_SYNTAX_ERROR;
1696
1697 if (*callbacks_p) {
1698 while ((*callbacks_p)->next)
1699 callbacks_p = &((*callbacks_p)->next);
1700 callbacks_p = &((*callbacks_p)->next);
1701 }
1702
1703 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1704 (*callbacks_p)->callback = callback;
1705 (*callbacks_p)->type = type;
1706 (*callbacks_p)->time_ms = time_ms;
1707 (*callbacks_p)->removed = false;
1708
1709 (*callbacks_p)->when = timeval_ms() + time_ms;
1710 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1711
1712 (*callbacks_p)->priv = priv;
1713 (*callbacks_p)->next = NULL;
1714
1715 return ERROR_OK;
1716 }
1717
1718 int target_unregister_event_callback(int (*callback)(struct target *target,
1719 enum target_event event, void *priv), void *priv)
1720 {
1721 struct target_event_callback **p = &target_event_callbacks;
1722 struct target_event_callback *c = target_event_callbacks;
1723
1724 if (!callback)
1725 return ERROR_COMMAND_SYNTAX_ERROR;
1726
1727 while (c) {
1728 struct target_event_callback *next = c->next;
1729 if ((c->callback == callback) && (c->priv == priv)) {
1730 *p = next;
1731 free(c);
1732 return ERROR_OK;
1733 } else
1734 p = &(c->next);
1735 c = next;
1736 }
1737
1738 return ERROR_OK;
1739 }
1740
1741 int target_unregister_reset_callback(int (*callback)(struct target *target,
1742 enum target_reset_mode reset_mode, void *priv), void *priv)
1743 {
1744 struct target_reset_callback *entry;
1745
1746 if (!callback)
1747 return ERROR_COMMAND_SYNTAX_ERROR;
1748
1749 list_for_each_entry(entry, &target_reset_callback_list, list) {
1750 if (entry->callback == callback && entry->priv == priv) {
1751 list_del(&entry->list);
1752 free(entry);
1753 break;
1754 }
1755 }
1756
1757 return ERROR_OK;
1758 }
1759
1760 int target_unregister_trace_callback(int (*callback)(struct target *target,
1761 size_t len, uint8_t *data, void *priv), void *priv)
1762 {
1763 struct target_trace_callback *entry;
1764
1765 if (!callback)
1766 return ERROR_COMMAND_SYNTAX_ERROR;
1767
1768 list_for_each_entry(entry, &target_trace_callback_list, list) {
1769 if (entry->callback == callback && entry->priv == priv) {
1770 list_del(&entry->list);
1771 free(entry);
1772 break;
1773 }
1774 }
1775
1776 return ERROR_OK;
1777 }
1778
1779 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1780 {
1781 if (!callback)
1782 return ERROR_COMMAND_SYNTAX_ERROR;
1783
1784 for (struct target_timer_callback *c = target_timer_callbacks;
1785 c; c = c->next) {
1786 if ((c->callback == callback) && (c->priv == priv)) {
1787 c->removed = true;
1788 return ERROR_OK;
1789 }
1790 }
1791
1792 return ERROR_FAIL;
1793 }
1794
1795 int target_call_event_callbacks(struct target *target, enum target_event event)
1796 {
1797 struct target_event_callback *callback = target_event_callbacks;
1798 struct target_event_callback *next_callback;
1799
1800 if (event == TARGET_EVENT_HALTED) {
1801 /* execute early halted first */
1802 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1803 }
1804
1805 LOG_DEBUG("target event %i (%s) for core %s", event,
1806 target_event_name(event),
1807 target_name(target));
1808
1809 target_handle_event(target, event);
1810
1811 while (callback) {
1812 next_callback = callback->next;
1813 callback->callback(target, event, callback->priv);
1814 callback = next_callback;
1815 }
1816
1817 return ERROR_OK;
1818 }
1819
1820 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1821 {
1822 struct target_reset_callback *callback;
1823
1824 LOG_DEBUG("target reset %i (%s)", reset_mode,
1825 nvp_value2name(nvp_reset_modes, reset_mode)->name);
1826
1827 list_for_each_entry(callback, &target_reset_callback_list, list)
1828 callback->callback(target, reset_mode, callback->priv);
1829
1830 return ERROR_OK;
1831 }
1832
1833 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1834 {
1835 struct target_trace_callback *callback;
1836
1837 list_for_each_entry(callback, &target_trace_callback_list, list)
1838 callback->callback(target, len, data, callback->priv);
1839
1840 return ERROR_OK;
1841 }
1842
1843 static int target_timer_callback_periodic_restart(
1844 struct target_timer_callback *cb, int64_t *now)
1845 {
1846 cb->when = *now + cb->time_ms;
1847 return ERROR_OK;
1848 }
1849
1850 static int target_call_timer_callback(struct target_timer_callback *cb,
1851 int64_t *now)
1852 {
1853 cb->callback(cb->priv);
1854
1855 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1856 return target_timer_callback_periodic_restart(cb, now);
1857
1858 return target_unregister_timer_callback(cb->callback, cb->priv);
1859 }
1860
1861 static int target_call_timer_callbacks_check_time(int checktime)
1862 {
1863 static bool callback_processing;
1864
1865 /* Do not allow nesting */
1866 if (callback_processing)
1867 return ERROR_OK;
1868
1869 callback_processing = true;
1870
1871 keep_alive();
1872
1873 int64_t now = timeval_ms();
1874
1875 /* Initialize to a default value that's a ways into the future.
1876 * The loop below will make it closer to now if there are
1877 * callbacks that want to be called sooner. */
1878 target_timer_next_event_value = now + 1000;
1879
1880 /* Store an address of the place containing a pointer to the
1881 * next item; initially, that's a standalone "root of the
1882 * list" variable. */
1883 struct target_timer_callback **callback = &target_timer_callbacks;
1884 while (callback && *callback) {
1885 if ((*callback)->removed) {
1886 struct target_timer_callback *p = *callback;
1887 *callback = (*callback)->next;
1888 free(p);
1889 continue;
1890 }
1891
1892 bool call_it = (*callback)->callback &&
1893 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1894 now >= (*callback)->when);
1895
1896 if (call_it)
1897 target_call_timer_callback(*callback, &now);
1898
1899 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1900 target_timer_next_event_value = (*callback)->when;
1901
1902 callback = &(*callback)->next;
1903 }
1904
1905 callback_processing = false;
1906 return ERROR_OK;
1907 }
1908
1909 int target_call_timer_callbacks(void)
1910 {
1911 return target_call_timer_callbacks_check_time(1);
1912 }
1913
1914 /* invoke periodic callbacks immediately */
1915 int target_call_timer_callbacks_now(void)
1916 {
1917 return target_call_timer_callbacks_check_time(0);
1918 }
1919
1920 int64_t target_timer_next_event(void)
1921 {
1922 return target_timer_next_event_value;
1923 }
1924
1925 /* Prints the working area layout for debug purposes */
1926 static void print_wa_layout(struct target *target)
1927 {
1928 struct working_area *c = target->working_areas;
1929
1930 while (c) {
1931 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1932 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1933 c->address, c->address + c->size - 1, c->size);
1934 c = c->next;
1935 }
1936 }
1937
1938 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1939 static void target_split_working_area(struct working_area *area, uint32_t size)
1940 {
1941 assert(area->free); /* Shouldn't split an allocated area */
1942 assert(size <= area->size); /* Caller should guarantee this */
1943
1944 /* Split only if not already the right size */
1945 if (size < area->size) {
1946 struct working_area *new_wa = malloc(sizeof(*new_wa));
1947
1948 if (!new_wa)
1949 return;
1950
1951 new_wa->next = area->next;
1952 new_wa->size = area->size - size;
1953 new_wa->address = area->address + size;
1954 new_wa->backup = NULL;
1955 new_wa->user = NULL;
1956 new_wa->free = true;
1957
1958 area->next = new_wa;
1959 area->size = size;
1960
1961 /* If backup memory was allocated to this area, it has the wrong size
1962 * now so free it and it will be reallocated if/when needed */
1963 free(area->backup);
1964 area->backup = NULL;
1965 }
1966 }
1967
1968 /* Merge all adjacent free areas into one */
1969 static void target_merge_working_areas(struct target *target)
1970 {
1971 struct working_area *c = target->working_areas;
1972
1973 while (c && c->next) {
1974 assert(c->next->address == c->address + c->size); /* This is an invariant */
1975
1976 /* Find two adjacent free areas */
1977 if (c->free && c->next->free) {
1978 /* Merge the last into the first */
1979 c->size += c->next->size;
1980
1981 /* Remove the last */
1982 struct working_area *to_be_freed = c->next;
1983 c->next = c->next->next;
1984 free(to_be_freed->backup);
1985 free(to_be_freed);
1986
1987 /* If backup memory was allocated to the remaining area, it's has
1988 * the wrong size now */
1989 free(c->backup);
1990 c->backup = NULL;
1991 } else {
1992 c = c->next;
1993 }
1994 }
1995 }
1996
1997 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
1998 {
1999 /* Reevaluate working area address based on MMU state*/
2000 if (!target->working_areas) {
2001 int retval;
2002 int enabled;
2003
2004 retval = target->type->mmu(target, &enabled);
2005 if (retval != ERROR_OK)
2006 return retval;
2007
2008 if (!enabled) {
2009 if (target->working_area_phys_spec) {
2010 LOG_DEBUG("MMU disabled, using physical "
2011 "address for working memory " TARGET_ADDR_FMT,
2012 target->working_area_phys);
2013 target->working_area = target->working_area_phys;
2014 } else {
2015 LOG_ERROR("No working memory available. "
2016 "Specify -work-area-phys to target.");
2017 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2018 }
2019 } else {
2020 if (target->working_area_virt_spec) {
2021 LOG_DEBUG("MMU enabled, using virtual "
2022 "address for working memory " TARGET_ADDR_FMT,
2023 target->working_area_virt);
2024 target->working_area = target->working_area_virt;
2025 } else {
2026 LOG_ERROR("No working memory available. "
2027 "Specify -work-area-virt to target.");
2028 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2029 }
2030 }
2031
2032 /* Set up initial working area on first call */
2033 struct working_area *new_wa = malloc(sizeof(*new_wa));
2034 if (new_wa) {
2035 new_wa->next = NULL;
2036 new_wa->size = ALIGN_DOWN(target->working_area_size, 4); /* 4-byte align */
2037 new_wa->address = target->working_area;
2038 new_wa->backup = NULL;
2039 new_wa->user = NULL;
2040 new_wa->free = true;
2041 }
2042
2043 target->working_areas = new_wa;
2044 }
2045
2046 /* only allocate multiples of 4 byte */
2047 size = ALIGN_UP(size, 4);
2048
2049 struct working_area *c = target->working_areas;
2050
2051 /* Find the first large enough working area */
2052 while (c) {
2053 if (c->free && c->size >= size)
2054 break;
2055 c = c->next;
2056 }
2057
2058 if (!c)
2059 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2060
2061 /* Split the working area into the requested size */
2062 target_split_working_area(c, size);
2063
2064 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2065 size, c->address);
2066
2067 if (target->backup_working_area) {
2068 if (!c->backup) {
2069 c->backup = malloc(c->size);
2070 if (!c->backup)
2071 return ERROR_FAIL;
2072 }
2073
2074 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2075 if (retval != ERROR_OK)
2076 return retval;
2077 }
2078
2079 /* mark as used, and return the new (reused) area */
2080 c->free = false;
2081 *area = c;
2082
2083 /* user pointer */
2084 c->user = area;
2085
2086 print_wa_layout(target);
2087
2088 return ERROR_OK;
2089 }
2090
2091 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2092 {
2093 int retval;
2094
2095 retval = target_alloc_working_area_try(target, size, area);
2096 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2097 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2098 return retval;
2099
2100 }
2101
2102 static int target_restore_working_area(struct target *target, struct working_area *area)
2103 {
2104 int retval = ERROR_OK;
2105
2106 if (target->backup_working_area && area->backup) {
2107 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2108 if (retval != ERROR_OK)
2109 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2110 area->size, area->address);
2111 }
2112
2113 return retval;
2114 }
2115
2116 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2117 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2118 {
2119 if (!area || area->free)
2120 return ERROR_OK;
2121
2122 int retval = ERROR_OK;
2123 if (restore) {
2124 retval = target_restore_working_area(target, area);
2125 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2126 if (retval != ERROR_OK)
2127 return retval;
2128 }
2129
2130 area->free = true;
2131
2132 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2133 area->size, area->address);
2134
2135 /* mark user pointer invalid */
2136 /* TODO: Is this really safe? It points to some previous caller's memory.
2137 * How could we know that the area pointer is still in that place and not
2138 * some other vital data? What's the purpose of this, anyway? */
2139 *area->user = NULL;
2140 area->user = NULL;
2141
2142 target_merge_working_areas(target);
2143
2144 print_wa_layout(target);
2145
2146 return retval;
2147 }
2148
2149 int target_free_working_area(struct target *target, struct working_area *area)
2150 {
2151 return target_free_working_area_restore(target, area, 1);
2152 }
2153
2154 /* free resources and restore memory, if restoring memory fails,
2155 * free up resources anyway
2156 */
2157 static void target_free_all_working_areas_restore(struct target *target, int restore)
2158 {
2159 struct working_area *c = target->working_areas;
2160
2161 LOG_DEBUG("freeing all working areas");
2162
2163 /* Loop through all areas, restoring the allocated ones and marking them as free */
2164 while (c) {
2165 if (!c->free) {
2166 if (restore)
2167 target_restore_working_area(target, c);
2168 c->free = true;
2169 *c->user = NULL; /* Same as above */
2170 c->user = NULL;
2171 }
2172 c = c->next;
2173 }
2174
2175 /* Run a merge pass to combine all areas into one */
2176 target_merge_working_areas(target);
2177
2178 print_wa_layout(target);
2179 }
2180
2181 void target_free_all_working_areas(struct target *target)
2182 {
2183 target_free_all_working_areas_restore(target, 1);
2184
2185 /* Now we have none or only one working area marked as free */
2186 if (target->working_areas) {
2187 /* Free the last one to allow on-the-fly moving and resizing */
2188 free(target->working_areas->backup);
2189 free(target->working_areas);
2190 target->working_areas = NULL;
2191 }
2192 }
2193
2194 /* Find the largest number of bytes that can be allocated */
2195 uint32_t target_get_working_area_avail(struct target *target)
2196 {
2197 struct working_area *c = target->working_areas;
2198 uint32_t max_size = 0;
2199
2200 if (!c)
2201 return ALIGN_DOWN(target->working_area_size, 4);
2202
2203 while (c) {
2204 if (c->free && max_size < c->size)
2205 max_size = c->size;
2206
2207 c = c->next;
2208 }
2209
2210 return max_size;
2211 }
2212
2213 static void target_destroy(struct target *target)
2214 {
2215 breakpoint_remove_all(target);
2216 watchpoint_remove_all(target);
2217
2218 if (target->type->deinit_target)
2219 target->type->deinit_target(target);
2220
2221 if (target->semihosting)
2222 free(target->semihosting->basedir);
2223 free(target->semihosting);
2224
2225 jtag_unregister_event_callback(jtag_enable_callback, target);
2226
2227 struct target_event_action *teap = target->event_action;
2228 while (teap) {
2229 struct target_event_action *next = teap->next;
2230 Jim_DecrRefCount(teap->interp, teap->body);
2231 free(teap);
2232 teap = next;
2233 }
2234
2235 target_free_all_working_areas(target);
2236
2237 /* release the targets SMP list */
2238 if (target->smp) {
2239 struct target_list *head, *tmp;
2240
2241 list_for_each_entry_safe(head, tmp, target->smp_targets, lh) {
2242 list_del(&head->lh);
2243 head->target->smp = 0;
2244 free(head);
2245 }
2246 if (target->smp_targets != &empty_smp_targets)
2247 free(target->smp_targets);
2248 target->smp = 0;
2249 }
2250
2251 rtos_destroy(target);
2252
2253 free(target->gdb_port_override);
2254 free(target->type);
2255 free(target->trace_info);
2256 free(target->fileio_info);
2257 free(target->cmd_name);
2258 free(target);
2259 }
2260
2261 void target_quit(void)
2262 {
2263 struct target_event_callback *pe = target_event_callbacks;
2264 while (pe) {
2265 struct target_event_callback *t = pe->next;
2266 free(pe);
2267 pe = t;
2268 }
2269 target_event_callbacks = NULL;
2270
2271 struct target_timer_callback *pt = target_timer_callbacks;
2272 while (pt) {
2273 struct target_timer_callback *t = pt->next;
2274 free(pt);
2275 pt = t;
2276 }
2277 target_timer_callbacks = NULL;
2278
2279 for (struct target *target = all_targets; target;) {
2280 struct target *tmp;
2281
2282 tmp = target->next;
2283 target_destroy(target);
2284 target = tmp;
2285 }
2286
2287 all_targets = NULL;
2288 }
2289
2290 int target_arch_state(struct target *target)
2291 {
2292 int retval;
2293 if (!target) {
2294 LOG_WARNING("No target has been configured");
2295 return ERROR_OK;
2296 }
2297
2298 if (target->state != TARGET_HALTED)
2299 return ERROR_OK;
2300
2301 retval = target->type->arch_state(target);
2302 return retval;
2303 }
2304
2305 static int target_get_gdb_fileio_info_default(struct target *target,
2306 struct gdb_fileio_info *fileio_info)
2307 {
2308 /* If target does not support semi-hosting function, target
2309 has no need to provide .get_gdb_fileio_info callback.
2310 It just return ERROR_FAIL and gdb_server will return "Txx"
2311 as target halted every time. */
2312 return ERROR_FAIL;
2313 }
2314
2315 static int target_gdb_fileio_end_default(struct target *target,
2316 int retcode, int fileio_errno, bool ctrl_c)
2317 {
2318 return ERROR_OK;
2319 }
2320
2321 int target_profiling_default(struct target *target, uint32_t *samples,
2322 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2323 {
2324 struct timeval timeout, now;
2325
2326 gettimeofday(&timeout, NULL);
2327 timeval_add_time(&timeout, seconds, 0);
2328
2329 LOG_INFO("Starting profiling. Halting and resuming the"
2330 " target as often as we can...");
2331
2332 uint32_t sample_count = 0;
2333 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2334 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2335
2336 int retval = ERROR_OK;
2337 for (;;) {
2338 target_poll(target);
2339 if (target->state == TARGET_HALTED) {
2340 uint32_t t = buf_get_u32(reg->value, 0, 32);
2341 samples[sample_count++] = t;
2342 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2343 retval = target_resume(target, 1, 0, 0, 0);
2344 target_poll(target);
2345 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2346 } else if (target->state == TARGET_RUNNING) {
2347 /* We want to quickly sample the PC. */
2348 retval = target_halt(target);
2349 } else {
2350 LOG_INFO("Target not halted or running");
2351 retval = ERROR_OK;
2352 break;
2353 }
2354
2355 if (retval != ERROR_OK)
2356 break;
2357
2358 gettimeofday(&now, NULL);
2359 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2360 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2361 break;
2362 }
2363 }
2364
2365 *num_samples = sample_count;
2366 return retval;
2367 }
2368
2369 /* Single aligned words are guaranteed to use 16 or 32 bit access
2370 * mode respectively, otherwise data is handled as quickly as
2371 * possible
2372 */
2373 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2374 {
2375 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2376 size, address);
2377
2378 if (!target_was_examined(target)) {
2379 LOG_ERROR("Target not examined yet");
2380 return ERROR_FAIL;
2381 }
2382
2383 if (size == 0)
2384 return ERROR_OK;
2385
2386 if ((address + size - 1) < address) {
2387 /* GDB can request this when e.g. PC is 0xfffffffc */
2388 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2389 address,
2390 size);
2391 return ERROR_FAIL;
2392 }
2393
2394 return target->type->write_buffer(target, address, size, buffer);
2395 }
2396
2397 static int target_write_buffer_default(struct target *target,
2398 target_addr_t address, uint32_t count, const uint8_t *buffer)
2399 {
2400 uint32_t size;
2401 unsigned int data_bytes = target_data_bits(target) / 8;
2402
2403 /* Align up to maximum bytes. The loop condition makes sure the next pass
2404 * will have something to do with the size we leave to it. */
2405 for (size = 1;
2406 size < data_bytes && count >= size * 2 + (address & size);
2407 size *= 2) {
2408 if (address & size) {
2409 int retval = target_write_memory(target, address, size, 1, buffer);
2410 if (retval != ERROR_OK)
2411 return retval;
2412 address += size;
2413 count -= size;
2414 buffer += size;
2415 }
2416 }
2417
2418 /* Write the data with as large access size as possible. */
2419 for (; size > 0; size /= 2) {
2420 uint32_t aligned = count - count % size;
2421 if (aligned > 0) {
2422 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2423 if (retval != ERROR_OK)
2424 return retval;
2425 address += aligned;
2426 count -= aligned;
2427 buffer += aligned;
2428 }
2429 }
2430
2431 return ERROR_OK;
2432 }
2433
2434 /* Single aligned words are guaranteed to use 16 or 32 bit access
2435 * mode respectively, otherwise data is handled as quickly as
2436 * possible
2437 */
2438 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2439 {
2440 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2441 size, address);
2442
2443 if (!target_was_examined(target)) {
2444 LOG_ERROR("Target not examined yet");
2445 return ERROR_FAIL;
2446 }
2447
2448 if (size == 0)
2449 return ERROR_OK;
2450
2451 if ((address + size - 1) < address) {
2452 /* GDB can request this when e.g. PC is 0xfffffffc */
2453 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2454 address,
2455 size);
2456 return ERROR_FAIL;
2457 }
2458
2459 return target->type->read_buffer(target, address, size, buffer);
2460 }
2461
2462 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2463 {
2464 uint32_t size;
2465 unsigned int data_bytes = target_data_bits(target) / 8;
2466
2467 /* Align up to maximum bytes. The loop condition makes sure the next pass
2468 * will have something to do with the size we leave to it. */
2469 for (size = 1;
2470 size < data_bytes && count >= size * 2 + (address & size);
2471 size *= 2) {
2472 if (address & size) {
2473 int retval = target_read_memory(target, address, size, 1, buffer);
2474 if (retval != ERROR_OK)
2475 return retval;
2476 address += size;
2477 count -= size;
2478 buffer += size;
2479 }
2480 }
2481
2482 /* Read the data with as large access size as possible. */
2483 for (; size > 0; size /= 2) {
2484 uint32_t aligned = count - count % size;
2485 if (aligned > 0) {
2486 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2487 if (retval != ERROR_OK)
2488 return retval;
2489 address += aligned;
2490 count -= aligned;
2491 buffer += aligned;
2492 }
2493 }
2494
2495 return ERROR_OK;
2496 }
2497
2498 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2499 {
2500 uint8_t *buffer;
2501 int retval;
2502 uint32_t i;
2503 uint32_t checksum = 0;
2504 if (!target_was_examined(target)) {
2505 LOG_ERROR("Target not examined yet");
2506 return ERROR_FAIL;
2507 }
2508 if (!target->type->checksum_memory) {
2509 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2510 return ERROR_FAIL;
2511 }
2512
2513 retval = target->type->checksum_memory(target, address, size, &checksum);
2514 if (retval != ERROR_OK) {
2515 buffer = malloc(size);
2516 if (!buffer) {
2517 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2518 return ERROR_COMMAND_SYNTAX_ERROR;
2519 }
2520 retval = target_read_buffer(target, address, size, buffer);
2521 if (retval != ERROR_OK) {
2522 free(buffer);
2523 return retval;
2524 }
2525
2526 /* convert to target endianness */
2527 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2528 uint32_t target_data;
2529 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2530 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2531 }
2532
2533 retval = image_calculate_checksum(buffer, size, &checksum);
2534 free(buffer);
2535 }
2536
2537 *crc = checksum;
2538
2539 return retval;
2540 }
2541
2542 int target_blank_check_memory(struct target *target,
2543 struct target_memory_check_block *blocks, int num_blocks,
2544 uint8_t erased_value)
2545 {
2546 if (!target_was_examined(target)) {
2547 LOG_ERROR("Target not examined yet");
2548 return ERROR_FAIL;
2549 }
2550
2551 if (!target->type->blank_check_memory)
2552 return ERROR_NOT_IMPLEMENTED;
2553
2554 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2555 }
2556
2557 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2558 {
2559 uint8_t value_buf[8];
2560 if (!target_was_examined(target)) {
2561 LOG_ERROR("Target not examined yet");
2562 return ERROR_FAIL;
2563 }
2564
2565 int retval = target_read_memory(target, address, 8, 1, value_buf);
2566
2567 if (retval == ERROR_OK) {
2568 *value = target_buffer_get_u64(target, value_buf);
2569 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2570 address,
2571 *value);
2572 } else {
2573 *value = 0x0;
2574 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2575 address);
2576 }
2577
2578 return retval;
2579 }
2580
2581 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2582 {
2583 uint8_t value_buf[4];
2584 if (!target_was_examined(target)) {
2585 LOG_ERROR("Target not examined yet");
2586 return ERROR_FAIL;
2587 }
2588
2589 int retval = target_read_memory(target, address, 4, 1, value_buf);
2590
2591 if (retval == ERROR_OK) {
2592 *value = target_buffer_get_u32(target, value_buf);
2593 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2594 address,
2595 *value);
2596 } else {
2597 *value = 0x0;
2598 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2599 address);
2600 }
2601
2602 return retval;
2603 }
2604
2605 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2606 {
2607 uint8_t value_buf[2];
2608 if (!target_was_examined(target)) {
2609 LOG_ERROR("Target not examined yet");
2610 return ERROR_FAIL;
2611 }
2612
2613 int retval = target_read_memory(target, address, 2, 1, value_buf);
2614
2615 if (retval == ERROR_OK) {
2616 *value = target_buffer_get_u16(target, value_buf);
2617 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2618 address,
2619 *value);
2620 } else {
2621 *value = 0x0;
2622 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2623 address);
2624 }
2625
2626 return retval;
2627 }
2628
2629 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2630 {
2631 if (!target_was_examined(target)) {
2632 LOG_ERROR("Target not examined yet");
2633 return ERROR_FAIL;
2634 }
2635
2636 int retval = target_read_memory(target, address, 1, 1, value);
2637
2638 if (retval == ERROR_OK) {
2639 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2640 address,
2641 *value);
2642 } else {
2643 *value = 0x0;
2644 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2645 address);
2646 }
2647
2648 return retval;
2649 }
2650
2651 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2652 {
2653 int retval;
2654 uint8_t value_buf[8];
2655 if (!target_was_examined(target)) {
2656 LOG_ERROR("Target not examined yet");
2657 return ERROR_FAIL;
2658 }
2659
2660 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2661 address,
2662 value);
2663
2664 target_buffer_set_u64(target, value_buf, value);
2665 retval = target_write_memory(target, address, 8, 1, value_buf);
2666 if (retval != ERROR_OK)
2667 LOG_DEBUG("failed: %i", retval);
2668
2669 return retval;
2670 }
2671
2672 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2673 {
2674 int retval;
2675 uint8_t value_buf[4];
2676 if (!target_was_examined(target)) {
2677 LOG_ERROR("Target not examined yet");
2678 return ERROR_FAIL;
2679 }
2680
2681 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2682 address,
2683 value);
2684
2685 target_buffer_set_u32(target, value_buf, value);
2686 retval = target_write_memory(target, address, 4, 1, value_buf);
2687 if (retval != ERROR_OK)
2688 LOG_DEBUG("failed: %i", retval);
2689
2690 return retval;
2691 }
2692
2693 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2694 {
2695 int retval;
2696 uint8_t value_buf[2];
2697 if (!target_was_examined(target)) {
2698 LOG_ERROR("Target not examined yet");
2699 return ERROR_FAIL;
2700 }
2701
2702 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2703 address,
2704 value);
2705
2706 target_buffer_set_u16(target, value_buf, value);
2707 retval = target_write_memory(target, address, 2, 1, value_buf);
2708 if (retval != ERROR_OK)
2709 LOG_DEBUG("failed: %i", retval);
2710
2711 return retval;
2712 }
2713
2714 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2715 {
2716 int retval;
2717 if (!target_was_examined(target)) {
2718 LOG_ERROR("Target not examined yet");
2719 return ERROR_FAIL;
2720 }
2721
2722 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2723 address, value);
2724
2725 retval = target_write_memory(target, address, 1, 1, &value);
2726 if (retval != ERROR_OK)
2727 LOG_DEBUG("failed: %i", retval);
2728
2729 return retval;
2730 }
2731
2732 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2733 {
2734 int retval;
2735 uint8_t value_buf[8];
2736 if (!target_was_examined(target)) {
2737 LOG_ERROR("Target not examined yet");
2738 return ERROR_FAIL;
2739 }
2740
2741 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2742 address,
2743 value);
2744
2745 target_buffer_set_u64(target, value_buf, value);
2746 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2747 if (retval != ERROR_OK)
2748 LOG_DEBUG("failed: %i", retval);
2749
2750 return retval;
2751 }
2752
2753 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2754 {
2755 int retval;
2756 uint8_t value_buf[4];
2757 if (!target_was_examined(target)) {
2758 LOG_ERROR("Target not examined yet");
2759 return ERROR_FAIL;
2760 }
2761
2762 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2763 address,
2764 value);
2765
2766 target_buffer_set_u32(target, value_buf, value);
2767 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2768 if (retval != ERROR_OK)
2769 LOG_DEBUG("failed: %i", retval);
2770
2771 return retval;
2772 }
2773
2774 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2775 {
2776 int retval;
2777 uint8_t value_buf[2];
2778 if (!target_was_examined(target)) {
2779 LOG_ERROR("Target not examined yet");
2780 return ERROR_FAIL;
2781 }
2782
2783 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2784 address,
2785 value);
2786
2787 target_buffer_set_u16(target, value_buf, value);
2788 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2789 if (retval != ERROR_OK)
2790 LOG_DEBUG("failed: %i", retval);
2791
2792 return retval;
2793 }
2794
2795 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2796 {
2797 int retval;
2798 if (!target_was_examined(target)) {
2799 LOG_ERROR("Target not examined yet");
2800 return ERROR_FAIL;
2801 }
2802
2803 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2804 address, value);
2805
2806 retval = target_write_phys_memory(target, address, 1, 1, &value);
2807 if (retval != ERROR_OK)
2808 LOG_DEBUG("failed: %i", retval);
2809
2810 return retval;
2811 }
2812
2813 static int find_target(struct command_invocation *cmd, const char *name)
2814 {
2815 struct target *target = get_target(name);
2816 if (!target) {
2817 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2818 return ERROR_FAIL;
2819 }
2820 if (!target->tap->enabled) {
2821 command_print(cmd, "Target: TAP %s is disabled, "
2822 "can't be the current target\n",
2823 target->tap->dotted_name);
2824 return ERROR_FAIL;
2825 }
2826
2827 cmd->ctx->current_target = target;
2828 if (cmd->ctx->current_target_override)
2829 cmd->ctx->current_target_override = target;
2830
2831 return ERROR_OK;
2832 }
2833
2834
2835 COMMAND_HANDLER(handle_targets_command)
2836 {
2837 int retval = ERROR_OK;
2838 if (CMD_ARGC == 1) {
2839 retval = find_target(CMD, CMD_ARGV[0]);
2840 if (retval == ERROR_OK) {
2841 /* we're done! */
2842 return retval;
2843 }
2844 }
2845
2846 struct target *target = all_targets;
2847 command_print(CMD, " TargetName Type Endian TapName State ");
2848 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2849 while (target) {
2850 const char *state;
2851 char marker = ' ';
2852
2853 if (target->tap->enabled)
2854 state = target_state_name(target);
2855 else
2856 state = "tap-disabled";
2857
2858 if (CMD_CTX->current_target == target)
2859 marker = '*';
2860
2861 /* keep columns lined up to match the headers above */
2862 command_print(CMD,
2863 "%2d%c %-18s %-10s %-6s %-18s %s",
2864 target->target_number,
2865 marker,
2866 target_name(target),
2867 target_type_name(target),
2868 jim_nvp_value2name_simple(nvp_target_endian,
2869 target->endianness)->name,
2870 target->tap->dotted_name,
2871 state);
2872 target = target->next;
2873 }
2874
2875 return retval;
2876 }
2877
2878 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2879
2880 static int power_dropout;
2881 static int srst_asserted;
2882
2883 static int run_power_restore;
2884 static int run_power_dropout;
2885 static int run_srst_asserted;
2886 static int run_srst_deasserted;
2887
2888 static int sense_handler(void)
2889 {
2890 static int prev_srst_asserted;
2891 static int prev_power_dropout;
2892
2893 int retval = jtag_power_dropout(&power_dropout);
2894 if (retval != ERROR_OK)
2895 return retval;
2896
2897 int power_restored;
2898 power_restored = prev_power_dropout && !power_dropout;
2899 if (power_restored)
2900 run_power_restore = 1;
2901
2902 int64_t current = timeval_ms();
2903 static int64_t last_power;
2904 bool wait_more = last_power + 2000 > current;
2905 if (power_dropout && !wait_more) {
2906 run_power_dropout = 1;
2907 last_power = current;
2908 }
2909
2910 retval = jtag_srst_asserted(&srst_asserted);
2911 if (retval != ERROR_OK)
2912 return retval;
2913
2914 int srst_deasserted;
2915 srst_deasserted = prev_srst_asserted && !srst_asserted;
2916
2917 static int64_t last_srst;
2918 wait_more = last_srst + 2000 > current;
2919 if (srst_deasserted && !wait_more) {
2920 run_srst_deasserted = 1;
2921 last_srst = current;
2922 }
2923
2924 if (!prev_srst_asserted && srst_asserted)
2925 run_srst_asserted = 1;
2926
2927 prev_srst_asserted = srst_asserted;
2928 prev_power_dropout = power_dropout;
2929
2930 if (srst_deasserted || power_restored) {
2931 /* Other than logging the event we can't do anything here.
2932 * Issuing a reset is a particularly bad idea as we might
2933 * be inside a reset already.
2934 */
2935 }
2936
2937 return ERROR_OK;
2938 }
2939
2940 /* process target state changes */
2941 static int handle_target(void *priv)
2942 {
2943 Jim_Interp *interp = (Jim_Interp *)priv;
2944 int retval = ERROR_OK;
2945
2946 if (!is_jtag_poll_safe()) {
2947 /* polling is disabled currently */
2948 return ERROR_OK;
2949 }
2950
2951 /* we do not want to recurse here... */
2952 static int recursive;
2953 if (!recursive) {
2954 recursive = 1;
2955 sense_handler();
2956 /* danger! running these procedures can trigger srst assertions and power dropouts.
2957 * We need to avoid an infinite loop/recursion here and we do that by
2958 * clearing the flags after running these events.
2959 */
2960 int did_something = 0;
2961 if (run_srst_asserted) {
2962 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2963 Jim_Eval(interp, "srst_asserted");
2964 did_something = 1;
2965 }
2966 if (run_srst_deasserted) {
2967 Jim_Eval(interp, "srst_deasserted");
2968 did_something = 1;
2969 }
2970 if (run_power_dropout) {
2971 LOG_INFO("Power dropout detected, running power_dropout proc.");
2972 Jim_Eval(interp, "power_dropout");
2973 did_something = 1;
2974 }
2975 if (run_power_restore) {
2976 Jim_Eval(interp, "power_restore");
2977 did_something = 1;
2978 }
2979
2980 if (did_something) {
2981 /* clear detect flags */
2982 sense_handler();
2983 }
2984
2985 /* clear action flags */
2986
2987 run_srst_asserted = 0;
2988 run_srst_deasserted = 0;
2989 run_power_restore = 0;
2990 run_power_dropout = 0;
2991
2992 recursive = 0;
2993 }
2994
2995 /* Poll targets for state changes unless that's globally disabled.
2996 * Skip targets that are currently disabled.
2997 */
2998 for (struct target *target = all_targets;
2999 is_jtag_poll_safe() && target;
3000 target = target->next) {
3001
3002 if (!target_was_examined(target))
3003 continue;
3004
3005 if (!target->tap->enabled)
3006 continue;
3007
3008 if (target->backoff.times > target->backoff.count) {
3009 /* do not poll this time as we failed previously */
3010 target->backoff.count++;
3011 continue;
3012 }
3013 target->backoff.count = 0;
3014
3015 /* only poll target if we've got power and srst isn't asserted */
3016 if (!power_dropout && !srst_asserted) {
3017 /* polling may fail silently until the target has been examined */
3018 retval = target_poll(target);
3019 if (retval != ERROR_OK) {
3020 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3021 if (target->backoff.times * polling_interval < 5000) {
3022 target->backoff.times *= 2;
3023 target->backoff.times++;
3024 }
3025
3026 /* Tell GDB to halt the debugger. This allows the user to
3027 * run monitor commands to handle the situation.
3028 */
3029 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3030 }
3031 if (target->backoff.times > 0) {
3032 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3033 target_reset_examined(target);
3034 retval = target_examine_one(target);
3035 /* Target examination could have failed due to unstable connection,
3036 * but we set the examined flag anyway to repoll it later */
3037 if (retval != ERROR_OK) {
3038 target_set_examined(target);
3039 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3040 target->backoff.times * polling_interval);
3041 return retval;
3042 }
3043 }
3044
3045 /* Since we succeeded, we reset backoff count */
3046 target->backoff.times = 0;
3047 }
3048 }
3049
3050 return retval;
3051 }
3052
3053 COMMAND_HANDLER(handle_reg_command)
3054 {
3055 LOG_DEBUG("-");
3056
3057 struct target *target = get_current_target(CMD_CTX);
3058 if (!target_was_examined(target)) {
3059 LOG_ERROR("Target not examined yet");
3060 return ERROR_TARGET_NOT_EXAMINED;
3061 }
3062 struct reg *reg = NULL;
3063
3064 /* list all available registers for the current target */
3065 if (CMD_ARGC == 0) {
3066 struct reg_cache *cache = target->reg_cache;
3067
3068 unsigned int count = 0;
3069 while (cache) {
3070 unsigned i;
3071
3072 command_print(CMD, "===== %s", cache->name);
3073
3074 for (i = 0, reg = cache->reg_list;
3075 i < cache->num_regs;
3076 i++, reg++, count++) {
3077 if (reg->exist == false || reg->hidden)
3078 continue;
3079 /* only print cached values if they are valid */
3080 if (reg->valid) {
3081 char *value = buf_to_hex_str(reg->value,
3082 reg->size);
3083 command_print(CMD,
3084 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3085 count, reg->name,
3086 reg->size, value,
3087 reg->dirty
3088 ? " (dirty)"
3089 : "");
3090 free(value);
3091 } else {
3092 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3093 count, reg->name,
3094 reg->size);
3095 }
3096 }
3097 cache = cache->next;
3098 }
3099
3100 return ERROR_OK;
3101 }
3102
3103 /* access a single register by its ordinal number */
3104 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3105 unsigned num;
3106 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3107
3108 struct reg_cache *cache = target->reg_cache;
3109 unsigned int count = 0;
3110 while (cache) {
3111 unsigned i;
3112 for (i = 0; i < cache->num_regs; i++) {
3113 if (count++ == num) {
3114 reg = &cache->reg_list[i];
3115 break;
3116 }
3117 }
3118 if (reg)
3119 break;
3120 cache = cache->next;
3121 }
3122
3123 if (!reg) {
3124 command_print(CMD, "%i is out of bounds, the current target "
3125 "has only %i registers (0 - %i)", num, count, count - 1);
3126 return ERROR_FAIL;
3127 }
3128 } else {
3129 /* access a single register by its name */
3130 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3131
3132 if (!reg)
3133 goto not_found;
3134 }
3135
3136 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3137
3138 if (!reg->exist)
3139 goto not_found;
3140
3141 /* display a register */
3142 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3143 && (CMD_ARGV[1][0] <= '9')))) {
3144 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3145 reg->valid = false;
3146
3147 if (!reg->valid) {
3148 int retval = reg->type->get(reg);
3149 if (retval != ERROR_OK) {
3150 LOG_ERROR("Could not read register '%s'", reg->name);
3151 return retval;
3152 }
3153 }
3154 char *value = buf_to_hex_str(reg->value, reg->size);
3155 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3156 free(value);
3157 return ERROR_OK;
3158 }
3159
3160 /* set register value */
3161 if (CMD_ARGC == 2) {
3162 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3163 if (!buf)
3164 return ERROR_FAIL;
3165 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3166
3167 int retval = reg->type->set(reg, buf);
3168 if (retval != ERROR_OK) {
3169 LOG_ERROR("Could not write to register '%s'", reg->name);
3170 } else {
3171 char *value = buf_to_hex_str(reg->value, reg->size);
3172 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3173 free(value);
3174 }
3175
3176 free(buf);
3177
3178 return retval;
3179 }
3180
3181 return ERROR_COMMAND_SYNTAX_ERROR;
3182
3183 not_found:
3184 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3185 return ERROR_FAIL;
3186 }
3187
3188 COMMAND_HANDLER(handle_poll_command)
3189 {
3190 int retval = ERROR_OK;
3191 struct target *target = get_current_target(CMD_CTX);
3192
3193 if (CMD_ARGC == 0) {
3194 command_print(CMD, "background polling: %s",
3195 jtag_poll_get_enabled() ? "on" : "off");
3196 command_print(CMD, "TAP: %s (%s)",
3197 target->tap->dotted_name,
3198 target->tap->enabled ? "enabled" : "disabled");
3199 if (!target->tap->enabled)
3200 return ERROR_OK;
3201 retval = target_poll(target);
3202 if (retval != ERROR_OK)
3203 return retval;
3204 retval = target_arch_state(target);
3205 if (retval != ERROR_OK)
3206 return retval;
3207 } else if (CMD_ARGC == 1) {
3208 bool enable;
3209 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3210 jtag_poll_set_enabled(enable);
3211 } else
3212 return ERROR_COMMAND_SYNTAX_ERROR;
3213
3214 return retval;
3215 }
3216
3217 COMMAND_HANDLER(handle_wait_halt_command)
3218 {
3219 if (CMD_ARGC > 1)
3220 return ERROR_COMMAND_SYNTAX_ERROR;
3221
3222 unsigned ms = DEFAULT_HALT_TIMEOUT;
3223 if (1 == CMD_ARGC) {
3224 int retval = parse_uint(CMD_ARGV[0], &ms);
3225 if (retval != ERROR_OK)
3226 return ERROR_COMMAND_SYNTAX_ERROR;
3227 }
3228
3229 struct target *target = get_current_target(CMD_CTX);
3230 return target_wait_state(target, TARGET_HALTED, ms);
3231 }
3232
3233 /* wait for target state to change. The trick here is to have a low
3234 * latency for short waits and not to suck up all the CPU time
3235 * on longer waits.
3236 *
3237 * After 500ms, keep_alive() is invoked
3238 */
3239 int target_wait_state(struct target *target, enum target_state state, unsigned int ms)
3240 {
3241 int retval;
3242 int64_t then = 0, cur;
3243 bool once = true;
3244
3245 for (;;) {
3246 retval = target_poll(target);
3247 if (retval != ERROR_OK)
3248 return retval;
3249 if (target->state == state)
3250 break;
3251 cur = timeval_ms();
3252 if (once) {
3253 once = false;
3254 then = timeval_ms();
3255 LOG_DEBUG("waiting for target %s...",
3256 nvp_value2name(nvp_target_state, state)->name);
3257 }
3258
3259 if (cur-then > 500)
3260 keep_alive();
3261
3262 if ((cur-then) > ms) {
3263 LOG_ERROR("timed out while waiting for target %s",
3264 nvp_value2name(nvp_target_state, state)->name);
3265 return ERROR_FAIL;
3266 }
3267 }
3268
3269 return ERROR_OK;
3270 }
3271
3272 COMMAND_HANDLER(handle_halt_command)
3273 {
3274 LOG_DEBUG("-");
3275
3276 struct target *target = get_current_target(CMD_CTX);
3277
3278 target->verbose_halt_msg = true;
3279
3280 int retval = target_halt(target);
3281 if (retval != ERROR_OK)
3282 return retval;
3283
3284 if (CMD_ARGC == 1) {
3285 unsigned wait_local;
3286 retval = parse_uint(CMD_ARGV[0], &wait_local);
3287 if (retval != ERROR_OK)
3288 return ERROR_COMMAND_SYNTAX_ERROR;
3289 if (!wait_local)
3290 return ERROR_OK;
3291 }
3292
3293 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3294 }
3295
3296 COMMAND_HANDLER(handle_soft_reset_halt_command)
3297 {
3298 struct target *target = get_current_target(CMD_CTX);
3299
3300 LOG_TARGET_INFO(target, "requesting target halt and executing a soft reset");
3301
3302 target_soft_reset_halt(target);
3303
3304 return ERROR_OK;
3305 }
3306
3307 COMMAND_HANDLER(handle_reset_command)
3308 {
3309 if (CMD_ARGC > 1)
3310 return ERROR_COMMAND_SYNTAX_ERROR;
3311
3312 enum target_reset_mode reset_mode = RESET_RUN;
3313 if (CMD_ARGC == 1) {
3314 const struct nvp *n;
3315 n = nvp_name2value(nvp_reset_modes, CMD_ARGV[0]);
3316 if ((!n->name) || (n->value == RESET_UNKNOWN))
3317 return ERROR_COMMAND_SYNTAX_ERROR;
3318 reset_mode = n->value;
3319 }
3320
3321 /* reset *all* targets */
3322 return target_process_reset(CMD, reset_mode);
3323 }
3324
3325
3326 COMMAND_HANDLER(handle_resume_command)
3327 {
3328 int current = 1;
3329 if (CMD_ARGC > 1)
3330 return ERROR_COMMAND_SYNTAX_ERROR;
3331
3332 struct target *target = get_current_target(CMD_CTX);
3333
3334 /* with no CMD_ARGV, resume from current pc, addr = 0,
3335 * with one arguments, addr = CMD_ARGV[0],
3336 * handle breakpoints, not debugging */
3337 target_addr_t addr = 0;
3338 if (CMD_ARGC == 1) {
3339 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3340 current = 0;
3341 }
3342
3343 return target_resume(target, current, addr, 1, 0);
3344 }
3345
3346 COMMAND_HANDLER(handle_step_command)
3347 {
3348 if (CMD_ARGC > 1)
3349 return ERROR_COMMAND_SYNTAX_ERROR;
3350
3351 LOG_DEBUG("-");
3352
3353 /* with no CMD_ARGV, step from current pc, addr = 0,
3354 * with one argument addr = CMD_ARGV[0],
3355 * handle breakpoints, debugging */
3356 target_addr_t addr = 0;
3357 int current_pc = 1;
3358 if (CMD_ARGC == 1) {
3359 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3360 current_pc = 0;
3361 }
3362
3363 struct target *target = get_current_target(CMD_CTX);
3364
3365 return target_step(target, current_pc, addr, 1);
3366 }
3367
3368 void target_handle_md_output(struct command_invocation *cmd,
3369 struct target *target, target_addr_t address, unsigned size,
3370 unsigned count, const uint8_t *buffer)
3371 {
3372 const unsigned line_bytecnt = 32;
3373 unsigned line_modulo = line_bytecnt / size;
3374
3375 char output[line_bytecnt * 4 + 1];
3376 unsigned output_len = 0;
3377
3378 const char *value_fmt;
3379 switch (size) {
3380 case 8:
3381 value_fmt = "%16.16"PRIx64" ";
3382 break;
3383 case 4:
3384 value_fmt = "%8.8"PRIx64" ";
3385 break;
3386 case 2:
3387 value_fmt = "%4.4"PRIx64" ";
3388 break;
3389 case 1:
3390 value_fmt = "%2.2"PRIx64" ";
3391 break;
3392 default:
3393 /* "can't happen", caller checked */
3394 LOG_ERROR("invalid memory read size: %u", size);
3395 return;
3396 }
3397
3398 for (unsigned i = 0; i < count; i++) {
3399 if (i % line_modulo == 0) {
3400 output_len += snprintf(output + output_len,
3401 sizeof(output) - output_len,
3402 TARGET_ADDR_FMT ": ",
3403 (address + (i * size)));
3404 }
3405
3406 uint64_t value = 0;
3407 const uint8_t *value_ptr = buffer + i * size;
3408 switch (size) {
3409 case 8:
3410 value = target_buffer_get_u64(target, value_ptr);
3411 break;
3412 case 4:
3413 value = target_buffer_get_u32(target, value_ptr);
3414 break;
3415 case 2:
3416 value = target_buffer_get_u16(target, value_ptr);
3417 break;
3418 case 1:
3419 value = *value_ptr;
3420 }
3421 output_len += snprintf(output + output_len,
3422 sizeof(output) - output_len,
3423 value_fmt, value);
3424
3425 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3426 command_print(cmd, "%s", output);
3427 output_len = 0;
3428 }
3429 }
3430 }
3431
3432 COMMAND_HANDLER(handle_md_command)
3433 {
3434 if (CMD_ARGC < 1)
3435 return ERROR_COMMAND_SYNTAX_ERROR;
3436
3437 unsigned size = 0;
3438 switch (CMD_NAME[2]) {
3439 case 'd':
3440 size = 8;
3441 break;
3442 case 'w':
3443 size = 4;
3444 break;
3445 case 'h':
3446 size = 2;
3447 break;
3448 case 'b':
3449 size = 1;
3450 break;
3451 default:
3452 return ERROR_COMMAND_SYNTAX_ERROR;
3453 }
3454
3455 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3456 int (*fn)(struct target *target,
3457 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3458 if (physical) {
3459 CMD_ARGC--;
3460 CMD_ARGV++;
3461 fn = target_read_phys_memory;
3462 } else
3463 fn = target_read_memory;
3464 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3465 return ERROR_COMMAND_SYNTAX_ERROR;
3466
3467 target_addr_t address;
3468 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3469
3470 unsigned count = 1;
3471 if (CMD_ARGC == 2)
3472 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3473
3474 uint8_t *buffer = calloc(count, size);
3475 if (!buffer) {
3476 LOG_ERROR("Failed to allocate md read buffer");
3477 return ERROR_FAIL;
3478 }
3479
3480 struct target *target = get_current_target(CMD_CTX);
3481 int retval = fn(target, address, size, count, buffer);
3482 if (retval == ERROR_OK)
3483 target_handle_md_output(CMD, target, address, size, count, buffer);
3484
3485 free(buffer);
3486
3487 return retval;
3488 }
3489
3490 typedef int (*target_write_fn)(struct target *target,
3491 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3492
3493 static int target_fill_mem(struct target *target,
3494 target_addr_t address,
3495 target_write_fn fn,
3496 unsigned data_size,
3497 /* value */
3498 uint64_t b,
3499 /* count */
3500 unsigned c)
3501 {
3502 /* We have to write in reasonably large chunks to be able
3503 * to fill large memory areas with any sane speed */
3504 const unsigned chunk_size = 16384;
3505 uint8_t *target_buf = malloc(chunk_size * data_size);
3506 if (!target_buf) {
3507 LOG_ERROR("Out of memory");
3508 return ERROR_FAIL;
3509 }
3510
3511 for (unsigned i = 0; i < chunk_size; i++) {
3512 switch (data_size) {
3513 case 8:
3514 target_buffer_set_u64(target, target_buf + i * data_size, b);
3515 break;
3516 case 4:
3517 target_buffer_set_u32(target, target_buf + i * data_size, b);
3518 break;
3519 case 2:
3520 target_buffer_set_u16(target, target_buf + i * data_size, b);
3521 break;
3522 case 1:
3523 target_buffer_set_u8(target, target_buf + i * data_size, b);
3524 break;
3525 default:
3526 exit(-1);
3527 }
3528 }
3529
3530 int retval = ERROR_OK;
3531
3532 for (unsigned x = 0; x < c; x += chunk_size) {
3533 unsigned current;
3534 current = c - x;
3535 if (current > chunk_size)
3536 current = chunk_size;
3537 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3538 if (retval != ERROR_OK)
3539 break;
3540 /* avoid GDB timeouts */
3541 keep_alive();
3542 }
3543 free(target_buf);
3544
3545 return retval;
3546 }
3547
3548
3549 COMMAND_HANDLER(handle_mw_command)
3550 {
3551 if (CMD_ARGC < 2)
3552 return ERROR_COMMAND_SYNTAX_ERROR;
3553 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3554 target_write_fn fn;
3555 if (physical) {
3556 CMD_ARGC--;
3557 CMD_ARGV++;
3558 fn = target_write_phys_memory;
3559 } else
3560 fn = target_write_memory;
3561 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3562 return ERROR_COMMAND_SYNTAX_ERROR;
3563
3564 target_addr_t address;
3565 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3566
3567 uint64_t value;
3568 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3569
3570 unsigned count = 1;
3571 if (CMD_ARGC == 3)
3572 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3573
3574 struct target *target = get_current_target(CMD_CTX);
3575 unsigned wordsize;
3576 switch (CMD_NAME[2]) {
3577 case 'd':
3578 wordsize = 8;
3579 break;
3580 case 'w':
3581 wordsize = 4;
3582 break;
3583 case 'h':
3584 wordsize = 2;
3585 break;
3586 case 'b':
3587 wordsize = 1;
3588 break;
3589 default:
3590 return ERROR_COMMAND_SYNTAX_ERROR;
3591 }
3592
3593 return target_fill_mem(target, address, fn, wordsize, value, count);
3594 }
3595
3596 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3597 target_addr_t *min_address, target_addr_t *max_address)
3598 {
3599 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3600 return ERROR_COMMAND_SYNTAX_ERROR;
3601
3602 /* a base address isn't always necessary,
3603 * default to 0x0 (i.e. don't relocate) */
3604 if (CMD_ARGC >= 2) {
3605 target_addr_t addr;
3606 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3607 image->base_address = addr;
3608 image->base_address_set = true;
3609 } else
3610 image->base_address_set = false;
3611
3612 image->start_address_set = false;
3613
3614 if (CMD_ARGC >= 4)
3615 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3616 if (CMD_ARGC == 5) {
3617 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3618 /* use size (given) to find max (required) */
3619 *max_address += *min_address;
3620 }
3621
3622 if (*min_address > *max_address)
3623 return ERROR_COMMAND_SYNTAX_ERROR;
3624
3625 return ERROR_OK;
3626 }
3627
3628 COMMAND_HANDLER(handle_load_image_command)
3629 {
3630 uint8_t *buffer;
3631 size_t buf_cnt;
3632 uint32_t image_size;
3633 target_addr_t min_address = 0;
3634 target_addr_t max_address = -1;
3635 struct image image;
3636
3637 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3638 &image, &min_address, &max_address);
3639 if (retval != ERROR_OK)
3640 return retval;
3641
3642 struct target *target = get_current_target(CMD_CTX);
3643
3644 struct duration bench;
3645 duration_start(&bench);
3646
3647 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3648 return ERROR_FAIL;
3649
3650 image_size = 0x0;
3651 retval = ERROR_OK;
3652 for (unsigned int i = 0; i < image.num_sections; i++) {
3653 buffer = malloc(image.sections[i].size);
3654 if (!buffer) {
3655 command_print(CMD,
3656 "error allocating buffer for section (%d bytes)",
3657 (int)(image.sections[i].size));
3658 retval = ERROR_FAIL;
3659 break;
3660 }
3661
3662 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3663 if (retval != ERROR_OK) {
3664 free(buffer);
3665 break;
3666 }
3667
3668 uint32_t offset = 0;
3669 uint32_t length = buf_cnt;
3670
3671 /* DANGER!!! beware of unsigned comparison here!!! */
3672
3673 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3674 (image.sections[i].base_address < max_address)) {
3675
3676 if (image.sections[i].base_address < min_address) {
3677 /* clip addresses below */
3678 offset += min_address-image.sections[i].base_address;
3679 length -= offset;
3680 }
3681
3682 if (image.sections[i].base_address + buf_cnt > max_address)
3683 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3684
3685 retval = target_write_buffer(target,
3686 image.sections[i].base_address + offset, length, buffer + offset);
3687 if (retval != ERROR_OK) {
3688 free(buffer);
3689 break;
3690 }
3691 image_size += length;
3692 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3693 (unsigned int)length,
3694 image.sections[i].base_address + offset);
3695 }
3696
3697 free(buffer);
3698 }
3699
3700 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3701 command_print(CMD, "downloaded %" PRIu32 " bytes "
3702 "in %fs (%0.3f KiB/s)", image_size,
3703 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3704 }
3705
3706 image_close(&image);
3707
3708 return retval;
3709
3710 }
3711
3712 COMMAND_HANDLER(handle_dump_image_command)
3713 {
3714 struct fileio *fileio;
3715 uint8_t *buffer;
3716 int retval, retvaltemp;
3717 target_addr_t address, size;
3718 struct duration bench;
3719 struct target *target = get_current_target(CMD_CTX);
3720
3721 if (CMD_ARGC != 3)
3722 return ERROR_COMMAND_SYNTAX_ERROR;
3723
3724 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3725 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3726
3727 uint32_t buf_size = (size > 4096) ? 4096 : size;
3728 buffer = malloc(buf_size);
3729 if (!buffer)
3730 return ERROR_FAIL;
3731
3732 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3733 if (retval != ERROR_OK) {
3734 free(buffer);
3735 return retval;
3736 }
3737
3738 duration_start(&bench);
3739
3740 while (size > 0) {
3741 size_t size_written;
3742 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3743 retval = target_read_buffer(target, address, this_run_size, buffer);
3744 if (retval != ERROR_OK)
3745 break;
3746
3747 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3748 if (retval != ERROR_OK)
3749 break;
3750
3751 size -= this_run_size;
3752 address += this_run_size;
3753 }
3754
3755 free(buffer);
3756
3757 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3758 size_t filesize;
3759 retval = fileio_size(fileio, &filesize);
3760 if (retval != ERROR_OK)
3761 return retval;
3762 command_print(CMD,
3763 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3764 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3765 }
3766
3767 retvaltemp = fileio_close(fileio);
3768 if (retvaltemp != ERROR_OK)
3769 return retvaltemp;
3770
3771 return retval;
3772 }
3773
3774 enum verify_mode {
3775 IMAGE_TEST = 0,
3776 IMAGE_VERIFY = 1,
3777 IMAGE_CHECKSUM_ONLY = 2
3778 };
3779
3780 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3781 {
3782 uint8_t *buffer;
3783 size_t buf_cnt;
3784 uint32_t image_size;
3785 int retval;
3786 uint32_t checksum = 0;
3787 uint32_t mem_checksum = 0;
3788
3789 struct image image;
3790
3791 struct target *target = get_current_target(CMD_CTX);
3792
3793 if (CMD_ARGC < 1)
3794 return ERROR_COMMAND_SYNTAX_ERROR;
3795
3796 if (!target) {
3797 LOG_ERROR("no target selected");
3798 return ERROR_FAIL;
3799 }
3800
3801 struct duration bench;
3802 duration_start(&bench);
3803
3804 if (CMD_ARGC >= 2) {
3805 target_addr_t addr;
3806 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3807 image.base_address = addr;
3808 image.base_address_set = true;
3809 } else {
3810 image.base_address_set = false;
3811 image.base_address = 0x0;
3812 }
3813
3814 image.start_address_set = false;
3815
3816 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3817 if (retval != ERROR_OK)
3818 return retval;
3819
3820 image_size = 0x0;
3821 int diffs = 0;
3822 retval = ERROR_OK;
3823 for (unsigned int i = 0; i < image.num_sections; i++) {
3824 buffer = malloc(image.sections[i].size);
3825 if (!buffer) {
3826 command_print(CMD,
3827 "error allocating buffer for section (%" PRIu32 " bytes)",
3828 image.sections[i].size);
3829 break;
3830 }
3831 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3832 if (retval != ERROR_OK) {
3833 free(buffer);
3834 break;
3835 }
3836
3837 if (verify >= IMAGE_VERIFY) {
3838 /* calculate checksum of image */
3839 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3840 if (retval != ERROR_OK) {
3841 free(buffer);
3842 break;
3843 }
3844
3845 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3846 if (retval != ERROR_OK) {
3847 free(buffer);
3848 break;
3849 }
3850 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3851 LOG_ERROR("checksum mismatch");
3852 free(buffer);
3853 retval = ERROR_FAIL;
3854 goto done;
3855 }
3856 if (checksum != mem_checksum) {
3857 /* failed crc checksum, fall back to a binary compare */
3858 uint8_t *data;
3859
3860 if (diffs == 0)
3861 LOG_ERROR("checksum mismatch - attempting binary compare");
3862
3863 data = malloc(buf_cnt);
3864
3865 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3866 if (retval == ERROR_OK) {
3867 uint32_t t;
3868 for (t = 0; t < buf_cnt; t++) {
3869 if (data[t] != buffer[t]) {
3870 command_print(CMD,
3871 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3872 diffs,
3873 (unsigned)(t + image.sections[i].base_address),
3874 data[t],
3875 buffer[t]);
3876 if (diffs++ >= 127) {
3877 command_print(CMD, "More than 128 errors, the rest are not printed.");
3878 free(data);
3879 free(buffer);
3880 goto done;
3881 }
3882 }
3883 keep_alive();
3884 }
3885 }
3886 free(data);
3887 }
3888 } else {
3889 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3890 image.sections[i].base_address,
3891 buf_cnt);
3892 }
3893
3894 free(buffer);
3895 image_size += buf_cnt;
3896 }
3897 if (diffs > 0)
3898 command_print(CMD, "No more differences found.");
3899 done:
3900 if (diffs > 0)
3901 retval = ERROR_FAIL;
3902 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3903 command_print(CMD, "verified %" PRIu32 " bytes "
3904 "in %fs (%0.3f KiB/s)", image_size,
3905 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3906 }
3907
3908 image_close(&image);
3909
3910 return retval;
3911 }
3912
3913 COMMAND_HANDLER(handle_verify_image_checksum_command)
3914 {
3915 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3916 }
3917
3918 COMMAND_HANDLER(handle_verify_image_command)
3919 {
3920 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3921 }
3922
3923 COMMAND_HANDLER(handle_test_image_command)
3924 {
3925 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3926 }
3927
3928 static int handle_bp_command_list(struct command_invocation *cmd)
3929 {
3930 struct target *target = get_current_target(cmd->ctx);
3931 struct breakpoint *breakpoint = target->breakpoints;
3932 while (breakpoint) {
3933 if (breakpoint->type == BKPT_SOFT) {
3934 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3935 breakpoint->length);
3936 command_print(cmd, "Software breakpoint(IVA): addr=" TARGET_ADDR_FMT ", len=0x%x, orig_instr=0x%s",
3937 breakpoint->address,
3938 breakpoint->length,
3939 buf);
3940 free(buf);
3941 } else {
3942 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3943 command_print(cmd, "Context breakpoint: asid=0x%8.8" PRIx32 ", len=0x%x, num=%u",
3944 breakpoint->asid,
3945 breakpoint->length, breakpoint->number);
3946 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3947 command_print(cmd, "Hybrid breakpoint(IVA): addr=" TARGET_ADDR_FMT ", len=0x%x, num=%u",
3948 breakpoint->address,
3949 breakpoint->length, breakpoint->number);
3950 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3951 breakpoint->asid);
3952 } else
3953 command_print(cmd, "Hardware breakpoint(IVA): addr=" TARGET_ADDR_FMT ", len=0x%x, num=%u",
3954 breakpoint->address,
3955 breakpoint->length, breakpoint->number);
3956 }
3957
3958 breakpoint = breakpoint->next;
3959 }
3960 return ERROR_OK;
3961 }
3962
3963 static int handle_bp_command_set(struct command_invocation *cmd,
3964 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3965 {
3966 struct target *target = get_current_target(cmd->ctx);
3967 int retval;
3968
3969 if (asid == 0) {
3970 retval = breakpoint_add(target, addr, length, hw);
3971 /* error is always logged in breakpoint_add(), do not print it again */
3972 if (retval == ERROR_OK)
3973 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
3974
3975 } else if (addr == 0) {
3976 if (!target->type->add_context_breakpoint) {
3977 LOG_ERROR("Context breakpoint not available");
3978 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3979 }
3980 retval = context_breakpoint_add(target, asid, length, hw);
3981 /* error is always logged in context_breakpoint_add(), do not print it again */
3982 if (retval == ERROR_OK)
3983 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
3984
3985 } else {
3986 if (!target->type->add_hybrid_breakpoint) {
3987 LOG_ERROR("Hybrid breakpoint not available");
3988 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3989 }
3990 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
3991 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
3992 if (retval == ERROR_OK)
3993 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
3994 }
3995 return retval;
3996 }
3997
3998 COMMAND_HANDLER(handle_bp_command)
3999 {
4000 target_addr_t addr;
4001 uint32_t asid;
4002 uint32_t length;
4003 int hw = BKPT_SOFT;
4004
4005 switch (CMD_ARGC) {
4006 case 0:
4007 return handle_bp_command_list(CMD);
4008
4009 case 2:
4010 asid = 0;
4011 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4012 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4013 return handle_bp_command_set(CMD, addr, asid, length, hw);
4014
4015 case 3:
4016 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4017 hw = BKPT_HARD;
4018 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4019 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4020 asid = 0;
4021 return handle_bp_command_set(CMD, addr, asid, length, hw);
4022 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4023 hw = BKPT_HARD;
4024 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4025 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4026 addr = 0;
4027 return handle_bp_command_set(CMD, addr, asid, length, hw);
4028 }
4029 /* fallthrough */
4030 case 4:
4031 hw = BKPT_HARD;
4032 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4033 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4034 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4035 return handle_bp_command_set(CMD, addr, asid, length, hw);
4036
4037 default:
4038 return ERROR_COMMAND_SYNTAX_ERROR;
4039 }
4040 }
4041
4042 COMMAND_HANDLER(handle_rbp_command)
4043 {
4044 int retval;
4045
4046 if (CMD_ARGC != 1)
4047 return ERROR_COMMAND_SYNTAX_ERROR;
4048
4049 struct target *target = get_current_target(CMD_CTX);
4050
4051 if (!strcmp(CMD_ARGV[0], "all")) {
4052 retval = breakpoint_remove_all(target);
4053
4054 if (retval != ERROR_OK) {
4055 command_print(CMD, "Error encountered during removal of all breakpoints.");
4056 command_print(CMD, "Some breakpoints may have remained set.");
4057 }
4058 } else {
4059 target_addr_t addr;
4060 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4061
4062 retval = breakpoint_remove(target, addr);
4063
4064 if (retval != ERROR_OK)
4065 command_print(CMD, "Error during removal of breakpoint at address " TARGET_ADDR_FMT, addr);
4066 }
4067
4068 return retval;
4069 }
4070
4071 COMMAND_HANDLER(handle_wp_command)
4072 {
4073 struct target *target = get_current_target(CMD_CTX);
4074
4075 if (CMD_ARGC == 0) {
4076 struct watchpoint *watchpoint = target->watchpoints;
4077
4078 while (watchpoint) {
4079 char wp_type = (watchpoint->rw == WPT_READ ? 'r' : (watchpoint->rw == WPT_WRITE ? 'w' : 'a'));
4080 command_print(CMD, "address: " TARGET_ADDR_FMT
4081 ", len: 0x%8.8" PRIx32
4082 ", r/w/a: %c, value: 0x%8.8" PRIx64
4083 ", mask: 0x%8.8" PRIx64,
4084 watchpoint->address,
4085 watchpoint->length,
4086 wp_type,
4087 watchpoint->value,
4088 watchpoint->mask);
4089 watchpoint = watchpoint->next;
4090 }
4091 return ERROR_OK;
4092 }
4093
4094 enum watchpoint_rw type = WPT_ACCESS;
4095 target_addr_t addr = 0;
4096 uint32_t length = 0;
4097 uint64_t data_value = 0x0;
4098 uint64_t data_mask = WATCHPOINT_IGNORE_DATA_VALUE_MASK;
4099 bool mask_specified = false;
4100
4101 switch (CMD_ARGC) {
4102 case 5:
4103 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[4], data_mask);
4104 mask_specified = true;
4105 /* fall through */
4106 case 4:
4107 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[3], data_value);
4108 // if user specified only data value without mask - the mask should be 0
4109 if (!mask_specified)
4110 data_mask = 0;
4111 /* fall through */
4112 case 3:
4113 switch (CMD_ARGV[2][0]) {
4114 case 'r':
4115 type = WPT_READ;
4116 break;
4117 case 'w':
4118 type = WPT_WRITE;
4119 break;
4120 case 'a':
4121 type = WPT_ACCESS;
4122 break;
4123 default:
4124 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4125 return ERROR_COMMAND_SYNTAX_ERROR;
4126 }
4127 /* fall through */
4128 case 2:
4129 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4130 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4131 break;
4132
4133 default:
4134 return ERROR_COMMAND_SYNTAX_ERROR;
4135 }
4136
4137 int retval = watchpoint_add(target, addr, length, type,
4138 data_value, data_mask);
4139 if (retval != ERROR_OK)
4140 LOG_ERROR("Failure setting watchpoints");
4141
4142 return retval;
4143 }
4144
4145 COMMAND_HANDLER(handle_rwp_command)
4146 {
4147 int retval;
4148
4149 if (CMD_ARGC != 1)
4150 return ERROR_COMMAND_SYNTAX_ERROR;
4151
4152 struct target *target = get_current_target(CMD_CTX);
4153 if (!strcmp(CMD_ARGV[0], "all")) {
4154 retval = watchpoint_remove_all(target);
4155
4156 if (retval != ERROR_OK) {
4157 command_print(CMD, "Error encountered during removal of all watchpoints.");
4158 command_print(CMD, "Some watchpoints may have remained set.");
4159 }
4160 } else {
4161 target_addr_t addr;
4162 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4163
4164 retval = watchpoint_remove(target, addr);
4165
4166 if (retval != ERROR_OK)
4167 command_print(CMD, "Error during removal of watchpoint at address " TARGET_ADDR_FMT, addr);
4168 }
4169
4170 return retval;
4171 }
4172
4173 /**
4174 * Translate a virtual address to a physical address.
4175 *
4176 * The low-level target implementation must have logged a detailed error
4177 * which is forwarded to telnet/GDB session.
4178 */
4179 COMMAND_HANDLER(handle_virt2phys_command)
4180 {
4181 if (CMD_ARGC != 1)
4182 return ERROR_COMMAND_SYNTAX_ERROR;
4183
4184 target_addr_t va;
4185 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4186 target_addr_t pa;
4187
4188 struct target *target = get_current_target(CMD_CTX);
4189 int retval = target->type->virt2phys(target, va, &pa);
4190 if (retval == ERROR_OK)
4191 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4192
4193 return retval;
4194 }
4195
4196 static void write_data(FILE *f, const void *data, size_t len)
4197 {
4198 size_t written = fwrite(data, 1, len, f);
4199 if (written != len)
4200 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4201 }
4202
4203 static void write_long(FILE *f, int l, struct target *target)
4204 {
4205 uint8_t val[4];
4206
4207 target_buffer_set_u32(target, val, l);
4208 write_data(f, val, 4);
4209 }
4210
4211 static void write_string(FILE *f, char *s)
4212 {
4213 write_data(f, s, strlen(s));
4214 }
4215
4216 typedef unsigned char UNIT[2]; /* unit of profiling */
4217
4218 /* Dump a gmon.out histogram file. */
4219 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4220 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4221 {
4222 uint32_t i;
4223 FILE *f = fopen(filename, "w");
4224 if (!f)
4225 return;
4226 write_string(f, "gmon");
4227 write_long(f, 0x00000001, target); /* Version */
4228 write_long(f, 0, target); /* padding */
4229 write_long(f, 0, target); /* padding */
4230 write_long(f, 0, target); /* padding */
4231
4232 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4233 write_data(f, &zero, 1);
4234
4235 /* figure out bucket size */
4236 uint32_t min;
4237 uint32_t max;
4238 if (with_range) {
4239 min = start_address;
4240 max = end_address;
4241 } else {
4242 min = samples[0];
4243 max = samples[0];
4244 for (i = 0; i < sample_num; i++) {
4245 if (min > samples[i])
4246 min = samples[i];
4247 if (max < samples[i])
4248 max = samples[i];
4249 }
4250
4251 /* max should be (largest sample + 1)
4252 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4253 if (max < UINT32_MAX)
4254 max++;
4255
4256 /* gprof requires (max - min) >= 2 */
4257 while ((max - min) < 2) {
4258 if (max < UINT32_MAX)
4259 max++;
4260 else
4261 min--;
4262 }
4263 }
4264
4265 uint32_t address_space = max - min;
4266
4267 /* FIXME: What is the reasonable number of buckets?
4268 * The profiling result will be more accurate if there are enough buckets. */
4269 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4270 uint32_t num_buckets = address_space / sizeof(UNIT);
4271 if (num_buckets > max_buckets)
4272 num_buckets = max_buckets;
4273 int *buckets = malloc(sizeof(int) * num_buckets);
4274 if (!buckets) {
4275 fclose(f);
4276 return;
4277 }
4278 memset(buckets, 0, sizeof(int) * num_buckets);
4279 for (i = 0; i < sample_num; i++) {
4280 uint32_t address = samples[i];
4281
4282 if ((address < min) || (max <= address))
4283 continue;
4284
4285 long long a = address - min;
4286 long long b = num_buckets;
4287 long long c = address_space;
4288 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4289 buckets[index_t]++;
4290 }
4291
4292 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4293 write_long(f, min, target); /* low_pc */
4294 write_long(f, max, target); /* high_pc */
4295 write_long(f, num_buckets, target); /* # of buckets */
4296 float sample_rate = sample_num / (duration_ms / 1000.0);
4297 write_long(f, sample_rate, target);
4298 write_string(f, "seconds");
4299 for (i = 0; i < (15-strlen("seconds")); i++)
4300 write_data(f, &zero, 1);
4301 write_string(f, "s");
4302
4303 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4304
4305 char *data = malloc(2 * num_buckets);
4306 if (data) {
4307 for (i = 0; i < num_buckets; i++) {
4308 int val;
4309 val = buckets[i];
4310 if (val > 65535)
4311 val = 65535;
4312 data[i * 2] = val&0xff;
4313 data[i * 2 + 1] = (val >> 8) & 0xff;
4314 }
4315 free(buckets);
4316 write_data(f, data, num_buckets * 2);
4317 free(data);
4318 } else
4319 free(buckets);
4320
4321 fclose(f);
4322 }
4323
4324 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4325 * which will be used as a random sampling of PC */
4326 COMMAND_HANDLER(handle_profile_command)
4327 {
4328 struct target *target = get_current_target(CMD_CTX);
4329
4330 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4331 return ERROR_COMMAND_SYNTAX_ERROR;
4332
4333 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4334 uint32_t offset;
4335 uint32_t num_of_samples;
4336 int retval = ERROR_OK;
4337 bool halted_before_profiling = target->state == TARGET_HALTED;
4338
4339 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4340
4341 uint32_t start_address = 0;
4342 uint32_t end_address = 0;
4343 bool with_range = false;
4344 if (CMD_ARGC == 4) {
4345 with_range = true;
4346 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4347 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4348 if (start_address > end_address || (end_address - start_address) < 2) {
4349 command_print(CMD, "Error: end - start < 2");
4350 return ERROR_COMMAND_ARGUMENT_INVALID;
4351 }
4352 }
4353
4354 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4355 if (!samples) {
4356 LOG_ERROR("No memory to store samples.");
4357 return ERROR_FAIL;
4358 }
4359
4360 uint64_t timestart_ms = timeval_ms();
4361 /**
4362 * Some cores let us sample the PC without the
4363 * annoying halt/resume step; for example, ARMv7 PCSR.
4364 * Provide a way to use that more efficient mechanism.
4365 */
4366 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4367 &num_of_samples, offset);
4368 if (retval != ERROR_OK) {
4369 free(samples);
4370 return retval;
4371 }
4372 uint32_t duration_ms = timeval_ms() - timestart_ms;
4373
4374 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4375
4376 retval = target_poll(target);
4377 if (retval != ERROR_OK) {
4378 free(samples);
4379 return retval;
4380 }
4381
4382 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4383 /* The target was halted before we started and is running now. Halt it,
4384 * for consistency. */
4385 retval = target_halt(target);
4386 if (retval != ERROR_OK) {
4387 free(samples);
4388 return retval;
4389 }
4390 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4391 /* The target was running before we started and is halted now. Resume
4392 * it, for consistency. */
4393 retval = target_resume(target, 1, 0, 0, 0);
4394 if (retval != ERROR_OK) {
4395 free(samples);
4396 return retval;
4397 }
4398 }
4399
4400 retval = target_poll(target);
4401 if (retval != ERROR_OK) {
4402 free(samples);
4403 return retval;
4404 }
4405
4406 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4407 with_range, start_address, end_address, target, duration_ms);
4408 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4409
4410 free(samples);
4411 return retval;
4412 }
4413
4414 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4415 {
4416 char *namebuf;
4417 Jim_Obj *obj_name, *obj_val;
4418 int result;
4419
4420 namebuf = alloc_printf("%s(%d)", varname, idx);
4421 if (!namebuf)
4422 return JIM_ERR;
4423
4424 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4425 jim_wide wide_val = val;
4426 obj_val = Jim_NewWideObj(interp, wide_val);
4427 if (!obj_name || !obj_val) {
4428 free(namebuf);
4429 return JIM_ERR;
4430 }
4431
4432 Jim_IncrRefCount(obj_name);
4433 Jim_IncrRefCount(obj_val);
4434 result = Jim_SetVariable(interp, obj_name, obj_val);
4435 Jim_DecrRefCount(interp, obj_name);
4436 Jim_DecrRefCount(interp, obj_val);
4437 free(namebuf);
4438 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4439 return result;
4440 }
4441
4442 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4443 {
4444 int e;
4445
4446 LOG_WARNING("DEPRECATED! use 'read_memory' not 'mem2array'");
4447
4448 /* argv[0] = name of array to receive the data
4449 * argv[1] = desired element width in bits
4450 * argv[2] = memory address
4451 * argv[3] = count of times to read
4452 * argv[4] = optional "phys"
4453 */
4454 if (argc < 4 || argc > 5) {
4455 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4456 return JIM_ERR;
4457 }
4458
4459 /* Arg 0: Name of the array variable */
4460 const char *varname = Jim_GetString(argv[0], NULL);
4461
4462 /* Arg 1: Bit width of one element */
4463 long l;
4464 e = Jim_GetLong(interp, argv[1], &l);
4465 if (e != JIM_OK)
4466 return e;
4467 const unsigned int width_bits = l;
4468
4469 if (width_bits != 8 &&
4470 width_bits != 16 &&
4471 width_bits != 32 &&
4472 width_bits != 64) {
4473 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4474 Jim_AppendStrings(interp, Jim_GetResult(interp),
4475 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4476 return JIM_ERR;
4477 }
4478 const unsigned int width = width_bits / 8;
4479
4480 /* Arg 2: Memory address */
4481 jim_wide wide_addr;
4482 e = Jim_GetWide(interp, argv[2], &wide_addr);
4483 if (e != JIM_OK)
4484 return e;
4485 target_addr_t addr = (target_addr_t)wide_addr;
4486
4487 /* Arg 3: Number of elements to read */
4488 e = Jim_GetLong(interp, argv[3], &l);
4489 if (e != JIM_OK)
4490 return e;
4491 size_t len = l;
4492
4493 /* Arg 4: phys */
4494 bool is_phys = false;
4495 if (argc > 4) {
4496 int str_len = 0;
4497 const char *phys = Jim_GetString(argv[4], &str_len);
4498 if (!strncmp(phys, "phys", str_len))
4499 is_phys = true;
4500 else
4501 return JIM_ERR;
4502 }
4503
4504 /* Argument checks */
4505 if (len == 0) {
4506 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4507 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4508 return JIM_ERR;
4509 }
4510 if ((addr + (len * width)) < addr) {
4511 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4512 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4513 return JIM_ERR;
4514 }
4515 if (len > 65536) {
4516 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4517 Jim_AppendStrings(interp, Jim_GetResult(interp),
4518 "mem2array: too large read request, exceeds 64K items", NULL);
4519 return JIM_ERR;
4520 }
4521
4522 if ((width == 1) ||
4523 ((width == 2) && ((addr & 1) == 0)) ||
4524 ((width == 4) && ((addr & 3) == 0)) ||
4525 ((width == 8) && ((addr & 7) == 0))) {
4526 /* alignment correct */
4527 } else {
4528 char buf[100];
4529 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4530 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4531 addr,
4532 width);
4533 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4534 return JIM_ERR;
4535 }
4536
4537 /* Transfer loop */
4538
4539 /* index counter */
4540 size_t idx = 0;
4541
4542 const size_t buffersize = 4096;
4543 uint8_t *buffer = malloc(buffersize);
4544 if (!buffer)
4545 return JIM_ERR;
4546
4547 /* assume ok */
4548 e = JIM_OK;
4549 while (len) {
4550 /* Slurp... in buffer size chunks */
4551 const unsigned int max_chunk_len = buffersize / width;
4552 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4553
4554 int retval;
4555 if (is_phys)
4556 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4557 else
4558 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4559 if (retval != ERROR_OK) {
4560 /* BOO !*/
4561 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4562 addr,
4563 width,
4564 chunk_len);
4565 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4566 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4567 e = JIM_ERR;
4568 break;
4569 } else {
4570 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4571 uint64_t v = 0;
4572 switch (width) {
4573 case 8:
4574 v = target_buffer_get_u64(target, &buffer[i*width]);
4575 break;
4576 case 4:
4577 v = target_buffer_get_u32(target, &buffer[i*width]);
4578 break;
4579 case 2:
4580 v = target_buffer_get_u16(target, &buffer[i*width]);
4581 break;
4582 case 1:
4583 v = buffer[i] & 0x0ff;
4584 break;
4585 }
4586 new_u64_array_element(interp, varname, idx, v);
4587 }
4588 len -= chunk_len;
4589 addr += chunk_len * width;
4590 }
4591 }
4592
4593 free(buffer);
4594
4595 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4596
4597 return e;
4598 }
4599
4600 COMMAND_HANDLER(handle_target_read_memory)
4601 {
4602 /*
4603 * CMD_ARGV[0] = memory address
4604 * CMD_ARGV[1] = desired element width in bits
4605 * CMD_ARGV[2] = number of elements to read
4606 * CMD_ARGV[3] = optional "phys"
4607 */
4608
4609 if (CMD_ARGC < 3 || CMD_ARGC > 4)
4610 return ERROR_COMMAND_SYNTAX_ERROR;
4611
4612 /* Arg 1: Memory address. */
4613 target_addr_t addr;
4614 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], addr);
4615
4616 /* Arg 2: Bit width of one element. */
4617 unsigned int width_bits;
4618 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], width_bits);
4619
4620 /* Arg 3: Number of elements to read. */
4621 unsigned int count;
4622 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
4623
4624 /* Arg 4: Optional 'phys'. */
4625 bool is_phys = false;
4626 if (CMD_ARGC == 4) {
4627 if (strcmp(CMD_ARGV[3], "phys")) {
4628 command_print(CMD, "invalid argument '%s', must be 'phys'", CMD_ARGV[3]);
4629 return ERROR_COMMAND_ARGUMENT_INVALID;
4630 }
4631
4632 is_phys = true;
4633 }
4634
4635 switch (width_bits) {
4636 case 8:
4637 case 16:
4638 case 32:
4639 case 64:
4640 break;
4641 default:
4642 command_print(CMD, "invalid width, must be 8, 16, 32 or 64");
4643 return ERROR_COMMAND_ARGUMENT_INVALID;
4644 }
4645
4646 const unsigned int width = width_bits / 8;
4647
4648 if ((addr + (count * width)) < addr) {
4649 command_print(CMD, "read_memory: addr + count wraps to zero");
4650 return ERROR_COMMAND_ARGUMENT_INVALID;
4651 }
4652
4653 if (count > 65536) {
4654 command_print(CMD, "read_memory: too large read request, exceeds 64K elements");
4655 return ERROR_COMMAND_ARGUMENT_INVALID;
4656 }
4657
4658 struct target *target = get_current_target(CMD_CTX);
4659
4660 const size_t buffersize = 4096;
4661 uint8_t *buffer = malloc(buffersize);
4662
4663 if (!buffer) {
4664 LOG_ERROR("Failed to allocate memory");
4665 return ERROR_FAIL;
4666 }
4667
4668 char *separator = "";
4669 while (count > 0) {
4670 const unsigned int max_chunk_len = buffersize / width;
4671 const size_t chunk_len = MIN(count, max_chunk_len);
4672
4673 int retval;
4674
4675 if (is_phys)
4676 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4677 else
4678 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4679
4680 if (retval != ERROR_OK) {
4681 LOG_DEBUG("read_memory: read at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
4682 addr, width_bits, chunk_len);
4683 /*
4684 * FIXME: we append the errmsg to the list of value already read.
4685 * Add a way to flush and replace old output, but LOG_DEBUG() it
4686 */
4687 command_print(CMD, "read_memory: failed to read memory");
4688 free(buffer);
4689 return retval;
4690 }
4691
4692 for (size_t i = 0; i < chunk_len ; i++) {
4693 uint64_t v = 0;
4694
4695 switch (width) {
4696 case 8:
4697 v = target_buffer_get_u64(target, &buffer[i * width]);
4698 break;
4699 case 4:
4700 v = target_buffer_get_u32(target, &buffer[i * width]);
4701 break;
4702 case 2:
4703 v = target_buffer_get_u16(target, &buffer[i * width]);
4704 break;
4705 case 1:
4706 v = buffer[i];
4707 break;
4708 }
4709
4710 command_print_sameline(CMD, "%s0x%" PRIx64, separator, v);
4711 separator = " ";
4712 }
4713
4714 count -= chunk_len;
4715 addr += chunk_len * width;
4716 }
4717
4718 free(buffer);
4719
4720 return ERROR_OK;
4721 }
4722
4723 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4724 {
4725 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4726 if (!namebuf)
4727 return JIM_ERR;
4728
4729 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4730 if (!obj_name) {
4731 free(namebuf);
4732 return JIM_ERR;
4733 }
4734
4735 Jim_IncrRefCount(obj_name);
4736 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4737 Jim_DecrRefCount(interp, obj_name);
4738 free(namebuf);
4739 if (!obj_val)
4740 return JIM_ERR;
4741
4742 jim_wide wide_val;
4743 int result = Jim_GetWide(interp, obj_val, &wide_val);
4744 *val = wide_val;
4745 return result;
4746 }
4747
4748 static int target_array2mem(Jim_Interp *interp, struct target *target,
4749 int argc, Jim_Obj *const *argv)
4750 {
4751 int e;
4752
4753 LOG_WARNING("DEPRECATED! use 'write_memory' not 'array2mem'");
4754
4755 /* argv[0] = name of array from which to read the data
4756 * argv[1] = desired element width in bits
4757 * argv[2] = memory address
4758 * argv[3] = number of elements to write
4759 * argv[4] = optional "phys"
4760 */
4761 if (argc < 4 || argc > 5) {
4762 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4763 return JIM_ERR;
4764 }
4765
4766 /* Arg 0: Name of the array variable */
4767 const char *varname = Jim_GetString(argv[0], NULL);
4768
4769 /* Arg 1: Bit width of one element */
4770 long l;
4771 e = Jim_GetLong(interp, argv[1], &l);
4772 if (e != JIM_OK)
4773 return e;
4774 const unsigned int width_bits = l;
4775
4776 if (width_bits != 8 &&
4777 width_bits != 16 &&
4778 width_bits != 32 &&
4779 width_bits != 64) {
4780 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4781 Jim_AppendStrings(interp, Jim_GetResult(interp),
4782 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4783 return JIM_ERR;
4784 }
4785 const unsigned int width = width_bits / 8;
4786
4787 /* Arg 2: Memory address */
4788 jim_wide wide_addr;
4789 e = Jim_GetWide(interp, argv[2], &wide_addr);
4790 if (e != JIM_OK)
4791 return e;
4792 target_addr_t addr = (target_addr_t)wide_addr;
4793
4794 /* Arg 3: Number of elements to write */
4795 e = Jim_GetLong(interp, argv[3], &l);
4796 if (e != JIM_OK)
4797 return e;
4798 size_t len = l;
4799
4800 /* Arg 4: Phys */
4801 bool is_phys = false;
4802 if (argc > 4) {
4803 int str_len = 0;
4804 const char *phys = Jim_GetString(argv[4], &str_len);
4805 if (!strncmp(phys, "phys", str_len))
4806 is_phys = true;
4807 else
4808 return JIM_ERR;
4809 }
4810
4811 /* Argument checks */
4812 if (len == 0) {
4813 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4814 Jim_AppendStrings(interp, Jim_GetResult(interp),
4815 "array2mem: zero width read?", NULL);
4816 return JIM_ERR;
4817 }
4818
4819 if ((addr + (len * width)) < addr) {
4820 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4821 Jim_AppendStrings(interp, Jim_GetResult(interp),
4822 "array2mem: addr + len - wraps to zero?", NULL);
4823 return JIM_ERR;
4824 }
4825
4826 if (len > 65536) {
4827 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4828 Jim_AppendStrings(interp, Jim_GetResult(interp),
4829 "array2mem: too large memory write request, exceeds 64K items", NULL);
4830 return JIM_ERR;
4831 }
4832
4833 if ((width == 1) ||
4834 ((width == 2) && ((addr & 1) == 0)) ||
4835 ((width == 4) && ((addr & 3) == 0)) ||
4836 ((width == 8) && ((addr & 7) == 0))) {
4837 /* alignment correct */
4838 } else {
4839 char buf[100];
4840 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4841 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4842 addr,
4843 width);
4844 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4845 return JIM_ERR;
4846 }
4847
4848 /* Transfer loop */
4849
4850 /* assume ok */
4851 e = JIM_OK;
4852
4853 const size_t buffersize = 4096;
4854 uint8_t *buffer = malloc(buffersize);
4855 if (!buffer)
4856 return JIM_ERR;
4857
4858 /* index counter */
4859 size_t idx = 0;
4860
4861 while (len) {
4862 /* Slurp... in buffer size chunks */
4863 const unsigned int max_chunk_len = buffersize / width;
4864
4865 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4866
4867 /* Fill the buffer */
4868 for (size_t i = 0; i < chunk_len; i++, idx++) {
4869 uint64_t v = 0;
4870 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4871 free(buffer);
4872 return JIM_ERR;
4873 }
4874 switch (width) {
4875 case 8:
4876 target_buffer_set_u64(target, &buffer[i * width], v);
4877 break;
4878 case 4:
4879 target_buffer_set_u32(target, &buffer[i * width], v);
4880 break;
4881 case 2:
4882 target_buffer_set_u16(target, &buffer[i * width], v);
4883 break;
4884 case 1:
4885 buffer[i] = v & 0x0ff;
4886 break;
4887 }
4888 }
4889 len -= chunk_len;
4890
4891 /* Write the buffer to memory */
4892 int retval;
4893 if (is_phys)
4894 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4895 else
4896 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4897 if (retval != ERROR_OK) {
4898 /* BOO !*/
4899 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4900 addr,
4901 width,
4902 chunk_len);
4903 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4904 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4905 e = JIM_ERR;
4906 break;
4907 }
4908 addr += chunk_len * width;
4909 }
4910
4911 free(buffer);
4912
4913 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4914
4915 return e;
4916 }
4917
4918 static int target_jim_write_memory(Jim_Interp *interp, int argc,
4919 Jim_Obj * const *argv)
4920 {
4921 /*
4922 * argv[1] = memory address
4923 * argv[2] = desired element width in bits
4924 * argv[3] = list of data to write
4925 * argv[4] = optional "phys"
4926 */
4927
4928 if (argc < 4 || argc > 5) {
4929 Jim_WrongNumArgs(interp, 1, argv, "address width data ['phys']");
4930 return JIM_ERR;
4931 }
4932
4933 /* Arg 1: Memory address. */
4934 int e;
4935 jim_wide wide_addr;
4936 e = Jim_GetWide(interp, argv[1], &wide_addr);
4937
4938 if (e != JIM_OK)
4939 return e;
4940
4941 target_addr_t addr = (target_addr_t)wide_addr;
4942
4943 /* Arg 2: Bit width of one element. */
4944 long l;
4945 e = Jim_GetLong(interp, argv[2], &l);
4946
4947 if (e != JIM_OK)
4948 return e;
4949
4950 const unsigned int width_bits = l;
4951 size_t count = Jim_ListLength(interp, argv[3]);
4952
4953 /* Arg 4: Optional 'phys'. */
4954 bool is_phys = false;
4955
4956 if (argc > 4) {
4957 const char *phys = Jim_GetString(argv[4], NULL);
4958
4959 if (strcmp(phys, "phys")) {
4960 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4961 return JIM_ERR;
4962 }
4963
4964 is_phys = true;
4965 }
4966
4967 switch (width_bits) {
4968 case 8:
4969 case 16:
4970 case 32:
4971 case 64:
4972 break;
4973 default:
4974 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
4975 return JIM_ERR;
4976 }
4977
4978 const unsigned int width = width_bits / 8;
4979
4980 if ((addr + (count * width)) < addr) {
4981 Jim_SetResultString(interp, "write_memory: addr + len wraps to zero", -1);
4982 return JIM_ERR;
4983 }
4984
4985 if (count > 65536) {
4986 Jim_SetResultString(interp, "write_memory: too large memory write request, exceeds 64K elements", -1);
4987 return JIM_ERR;
4988 }
4989
4990 struct command_context *cmd_ctx = current_command_context(interp);
4991 assert(cmd_ctx != NULL);
4992 struct target *target = get_current_target(cmd_ctx);
4993
4994 const size_t buffersize = 4096;
4995 uint8_t *buffer = malloc(buffersize);
4996
4997 if (!buffer) {
4998 LOG_ERROR("Failed to allocate memory");
4999 return JIM_ERR;
5000 }
5001
5002 size_t j = 0;
5003
5004 while (count > 0) {
5005 const unsigned int max_chunk_len = buffersize / width;
5006 const size_t chunk_len = MIN(count, max_chunk_len);
5007
5008 for (size_t i = 0; i < chunk_len; i++, j++) {
5009 Jim_Obj *tmp = Jim_ListGetIndex(interp, argv[3], j);
5010 jim_wide element_wide;
5011 Jim_GetWide(interp, tmp, &element_wide);
5012
5013 const uint64_t v = element_wide;
5014
5015 switch (width) {
5016 case 8:
5017 target_buffer_set_u64(target, &buffer[i * width], v);
5018 break;
5019 case 4:
5020 target_buffer_set_u32(target, &buffer[i * width], v);
5021 break;
5022 case 2:
5023 target_buffer_set_u16(target, &buffer[i * width], v);
5024 break;
5025 case 1:
5026 buffer[i] = v & 0x0ff;
5027 break;
5028 }
5029 }
5030
5031 count -= chunk_len;
5032
5033 int retval;
5034
5035 if (is_phys)
5036 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
5037 else
5038 retval = target_write_memory(target, addr, width, chunk_len, buffer);
5039
5040 if (retval != ERROR_OK) {
5041 LOG_ERROR("write_memory: write at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
5042 addr, width_bits, chunk_len);
5043 Jim_SetResultString(interp, "write_memory: failed to write memory", -1);
5044 e = JIM_ERR;
5045 break;
5046 }
5047
5048 addr += chunk_len * width;
5049 }
5050
5051 free(buffer);
5052
5053 return e;
5054 }
5055
5056 /* FIX? should we propagate errors here rather than printing them
5057 * and continuing?
5058 */
5059 void target_handle_event(struct target *target, enum target_event e)
5060 {
5061 struct target_event_action *teap;
5062 int retval;
5063
5064 for (teap = target->event_action; teap; teap = teap->next) {
5065 if (teap->event == e) {
5066 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
5067 target->target_number,
5068 target_name(target),
5069 target_type_name(target),
5070 e,
5071 target_event_name(e),
5072 Jim_GetString(teap->body, NULL));
5073
5074 /* Override current target by the target an event
5075 * is issued from (lot of scripts need it).
5076 * Return back to previous override as soon
5077 * as the handler processing is done */
5078 struct command_context *cmd_ctx = current_command_context(teap->interp);
5079 struct target *saved_target_override = cmd_ctx->current_target_override;
5080 cmd_ctx->current_target_override = target;
5081
5082 retval = Jim_EvalObj(teap->interp, teap->body);
5083
5084 cmd_ctx->current_target_override = saved_target_override;
5085
5086 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
5087 return;
5088
5089 if (retval == JIM_RETURN)
5090 retval = teap->interp->returnCode;
5091
5092 if (retval != JIM_OK) {
5093 Jim_MakeErrorMessage(teap->interp);
5094 LOG_USER("Error executing event %s on target %s:\n%s",
5095 target_event_name(e),
5096 target_name(target),
5097 Jim_GetString(Jim_GetResult(teap->interp), NULL));
5098 /* clean both error code and stacktrace before return */
5099 Jim_Eval(teap->interp, "error \"\" \"\"");
5100 }
5101 }
5102 }
5103 }
5104
5105 static int target_jim_get_reg(Jim_Interp *interp, int argc,
5106 Jim_Obj * const *argv)
5107 {
5108 bool force = false;
5109
5110 if (argc == 3) {
5111 const char *option = Jim_GetString(argv[1], NULL);
5112
5113 if (!strcmp(option, "-force")) {
5114 argc--;
5115 argv++;
5116 force = true;
5117 } else {
5118 Jim_SetResultFormatted(interp, "invalid option '%s'", option);
5119 return JIM_ERR;
5120 }
5121 }
5122
5123 if (argc != 2) {
5124 Jim_WrongNumArgs(interp, 1, argv, "[-force] list");
5125 return JIM_ERR;
5126 }
5127
5128 const int length = Jim_ListLength(interp, argv[1]);
5129
5130 Jim_Obj *result_dict = Jim_NewDictObj(interp, NULL, 0);
5131
5132 if (!result_dict)
5133 return JIM_ERR;
5134
5135 struct command_context *cmd_ctx = current_command_context(interp);
5136 assert(cmd_ctx != NULL);
5137 const struct target *target = get_current_target(cmd_ctx);
5138
5139 for (int i = 0; i < length; i++) {
5140 Jim_Obj *elem = Jim_ListGetIndex(interp, argv[1], i);
5141
5142 if (!elem)
5143 return JIM_ERR;
5144
5145 const char *reg_name = Jim_String(elem);
5146
5147 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5148 false);
5149
5150 if (!reg || !reg->exist) {
5151 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5152 return JIM_ERR;
5153 }
5154
5155 if (force || !reg->valid) {
5156 int retval = reg->type->get(reg);
5157
5158 if (retval != ERROR_OK) {
5159 Jim_SetResultFormatted(interp, "failed to read register '%s'",
5160 reg_name);
5161 return JIM_ERR;
5162 }
5163 }
5164
5165 char *reg_value = buf_to_hex_str(reg->value, reg->size);
5166
5167 if (!reg_value) {
5168 LOG_ERROR("Failed to allocate memory");
5169 return JIM_ERR;
5170 }
5171
5172 char *tmp = alloc_printf("0x%s", reg_value);
5173
5174 free(reg_value);
5175
5176 if (!tmp) {
5177 LOG_ERROR("Failed to allocate memory");
5178 return JIM_ERR;
5179 }
5180
5181 Jim_DictAddElement(interp, result_dict, elem,
5182 Jim_NewStringObj(interp, tmp, -1));
5183
5184 free(tmp);
5185 }
5186
5187 Jim_SetResult(interp, result_dict);
5188
5189 return JIM_OK;
5190 }
5191
5192 static int target_jim_set_reg(Jim_Interp *interp, int argc,
5193 Jim_Obj * const *argv)
5194 {
5195 if (argc != 2) {
5196 Jim_WrongNumArgs(interp, 1, argv, "dict");
5197 return JIM_ERR;
5198 }
5199
5200 int tmp;
5201 #if JIM_VERSION >= 80
5202 Jim_Obj **dict = Jim_DictPairs(interp, argv[1], &tmp);
5203
5204 if (!dict)
5205 return JIM_ERR;
5206 #else
5207 Jim_Obj **dict;
5208 int ret = Jim_DictPairs(interp, argv[1], &dict, &tmp);
5209
5210 if (ret != JIM_OK)
5211 return ret;
5212 #endif
5213
5214 const unsigned int length = tmp;
5215 struct command_context *cmd_ctx = current_command_context(interp);
5216 assert(cmd_ctx);
5217 const struct target *target = get_current_target(cmd_ctx);
5218
5219 for (unsigned int i = 0; i < length; i += 2) {
5220 const char *reg_name = Jim_String(dict[i]);
5221 const char *reg_value = Jim_String(dict[i + 1]);
5222 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5223 false);
5224
5225 if (!reg || !reg->exist) {
5226 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5227 return JIM_ERR;
5228 }
5229
5230 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
5231
5232 if (!buf) {
5233 LOG_ERROR("Failed to allocate memory");
5234 return JIM_ERR;
5235 }
5236
5237 str_to_buf(reg_value, strlen(reg_value), buf, reg->size, 0);
5238 int retval = reg->type->set(reg, buf);
5239 free(buf);
5240
5241 if (retval != ERROR_OK) {
5242 Jim_SetResultFormatted(interp, "failed to set '%s' to register '%s'",
5243 reg_value, reg_name);
5244 return JIM_ERR;
5245 }
5246 }
5247
5248 return JIM_OK;
5249 }
5250
5251 /**
5252 * Returns true only if the target has a handler for the specified event.
5253 */
5254 bool target_has_event_action(struct target *target, enum target_event event)
5255 {
5256 struct target_event_action *teap;
5257
5258 for (teap = target->event_action; teap; teap = teap->next) {
5259 if (teap->event == event)
5260 return true;
5261 }
5262 return false;
5263 }
5264
5265 enum target_cfg_param {
5266 TCFG_TYPE,
5267 TCFG_EVENT,
5268 TCFG_WORK_AREA_VIRT,
5269 TCFG_WORK_AREA_PHYS,
5270 TCFG_WORK_AREA_SIZE,
5271 TCFG_WORK_AREA_BACKUP,
5272 TCFG_ENDIAN,
5273 TCFG_COREID,
5274 TCFG_CHAIN_POSITION,
5275 TCFG_DBGBASE,
5276 TCFG_RTOS,
5277 TCFG_DEFER_EXAMINE,
5278 TCFG_GDB_PORT,
5279 TCFG_GDB_MAX_CONNECTIONS,
5280 };
5281
5282 static struct jim_nvp nvp_config_opts[] = {
5283 { .name = "-type", .value = TCFG_TYPE },
5284 { .name = "-event", .value = TCFG_EVENT },
5285 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
5286 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
5287 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
5288 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
5289 { .name = "-endian", .value = TCFG_ENDIAN },
5290 { .name = "-coreid", .value = TCFG_COREID },
5291 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
5292 { .name = "-dbgbase", .value = TCFG_DBGBASE },
5293 { .name = "-rtos", .value = TCFG_RTOS },
5294 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
5295 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
5296 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
5297 { .name = NULL, .value = -1 }
5298 };
5299
5300 static int target_configure(struct jim_getopt_info *goi, struct target *target)
5301 {
5302 struct jim_nvp *n;
5303 Jim_Obj *o;
5304 jim_wide w;
5305 int e;
5306
5307 /* parse config or cget options ... */
5308 while (goi->argc > 0) {
5309 Jim_SetEmptyResult(goi->interp);
5310 /* jim_getopt_debug(goi); */
5311
5312 if (target->type->target_jim_configure) {
5313 /* target defines a configure function */
5314 /* target gets first dibs on parameters */
5315 e = (*(target->type->target_jim_configure))(target, goi);
5316 if (e == JIM_OK) {
5317 /* more? */
5318 continue;
5319 }
5320 if (e == JIM_ERR) {
5321 /* An error */
5322 return e;
5323 }
5324 /* otherwise we 'continue' below */
5325 }
5326 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
5327 if (e != JIM_OK) {
5328 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
5329 return e;
5330 }
5331 switch (n->value) {
5332 case TCFG_TYPE:
5333 /* not settable */
5334 if (goi->isconfigure) {
5335 Jim_SetResultFormatted(goi->interp,
5336 "not settable: %s", n->name);
5337 return JIM_ERR;
5338 } else {
5339 no_params:
5340 if (goi->argc != 0) {
5341 Jim_WrongNumArgs(goi->interp,
5342 goi->argc, goi->argv,
5343 "NO PARAMS");
5344 return JIM_ERR;
5345 }
5346 }
5347 Jim_SetResultString(goi->interp,
5348 target_type_name(target), -1);
5349 /* loop for more */
5350 break;
5351 case TCFG_EVENT:
5352 if (goi->argc == 0) {
5353 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
5354 return JIM_ERR;
5355 }
5356
5357 e = jim_getopt_nvp(goi, nvp_target_event, &n);
5358 if (e != JIM_OK) {
5359 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
5360 return e;
5361 }
5362
5363 if (goi->isconfigure) {
5364 if (goi->argc != 1) {
5365 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
5366 return JIM_ERR;
5367 }
5368 } else {
5369 if (goi->argc != 0) {
5370 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
5371 return JIM_ERR;
5372 }
5373 }
5374
5375 {
5376 struct target_event_action *teap;
5377
5378 teap = target->event_action;
5379 /* replace existing? */
5380 while (teap) {
5381 if (teap->event == (enum target_event)n->value)
5382 break;
5383 teap = teap->next;
5384 }
5385
5386 if (goi->isconfigure) {
5387 /* START_DEPRECATED_TPIU */
5388 if (n->value == TARGET_EVENT_TRACE_CONFIG)
5389 LOG_INFO("DEPRECATED target event %s; use TPIU events {pre,post}-{enable,disable}", n->name);
5390 /* END_DEPRECATED_TPIU */
5391
5392 bool replace = true;
5393 if (!teap) {
5394 /* create new */
5395 teap = calloc(1, sizeof(*teap));
5396 replace = false;
5397 }
5398 teap->event = n->value;
5399 teap->interp = goi->interp;
5400 jim_getopt_obj(goi, &o);
5401 if (teap->body)
5402 Jim_DecrRefCount(teap->interp, teap->body);
5403 teap->body = Jim_DuplicateObj(goi->interp, o);
5404 /*
5405 * FIXME:
5406 * Tcl/TK - "tk events" have a nice feature.
5407 * See the "BIND" command.
5408 * We should support that here.
5409 * You can specify %X and %Y in the event code.
5410 * The idea is: %T - target name.
5411 * The idea is: %N - target number
5412 * The idea is: %E - event name.
5413 */
5414 Jim_IncrRefCount(teap->body);
5415
5416 if (!replace) {
5417 /* add to head of event list */
5418 teap->next = target->event_action;
5419 target->event_action = teap;
5420 }
5421 Jim_SetEmptyResult(goi->interp);
5422 } else {
5423 /* get */
5424 if (!teap)
5425 Jim_SetEmptyResult(goi->interp);
5426 else
5427 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5428 }
5429 }
5430 /* loop for more */
5431 break;
5432
5433 case TCFG_WORK_AREA_VIRT:
5434 if (goi->isconfigure) {
5435 target_free_all_working_areas(target);
5436 e = jim_getopt_wide(goi, &w);
5437 if (e != JIM_OK)
5438 return e;
5439 target->working_area_virt = w;
5440 target->working_area_virt_spec = true;
5441 } else {
5442 if (goi->argc != 0)
5443 goto no_params;
5444 }
5445 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5446 /* loop for more */
5447 break;
5448
5449 case TCFG_WORK_AREA_PHYS:
5450 if (goi->isconfigure) {
5451 target_free_all_working_areas(target);
5452 e = jim_getopt_wide(goi, &w);
5453 if (e != JIM_OK)
5454 return e;
5455 target->working_area_phys = w;
5456 target->working_area_phys_spec = true;
5457 } else {
5458 if (goi->argc != 0)
5459 goto no_params;
5460 }
5461 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5462 /* loop for more */
5463 break;
5464
5465 case TCFG_WORK_AREA_SIZE:
5466 if (goi->isconfigure) {
5467 target_free_all_working_areas(target);
5468 e = jim_getopt_wide(goi, &w);
5469 if (e != JIM_OK)
5470 return e;
5471 target->working_area_size = w;
5472 } else {
5473 if (goi->argc != 0)
5474 goto no_params;
5475 }
5476 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5477 /* loop for more */
5478 break;
5479
5480 case TCFG_WORK_AREA_BACKUP:
5481 if (goi->isconfigure) {
5482 target_free_all_working_areas(target);
5483 e = jim_getopt_wide(goi, &w);
5484 if (e != JIM_OK)
5485 return e;
5486 /* make this exactly 1 or 0 */
5487 target->backup_working_area = (!!w);
5488 } else {
5489 if (goi->argc != 0)
5490 goto no_params;
5491 }
5492 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5493 /* loop for more e*/
5494 break;
5495
5496
5497 case TCFG_ENDIAN:
5498 if (goi->isconfigure) {
5499 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5500 if (e != JIM_OK) {
5501 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5502 return e;
5503 }
5504 target->endianness = n->value;
5505 } else {
5506 if (goi->argc != 0)
5507 goto no_params;
5508 }
5509 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5510 if (!n->name) {
5511 target->endianness = TARGET_LITTLE_ENDIAN;
5512 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5513 }
5514 Jim_SetResultString(goi->interp, n->name, -1);
5515 /* loop for more */
5516 break;
5517
5518 case TCFG_COREID:
5519 if (goi->isconfigure) {
5520 e = jim_getopt_wide(goi, &w);
5521 if (e != JIM_OK)
5522 return e;
5523 target->coreid = (int32_t)w;
5524 } else {
5525 if (goi->argc != 0)
5526 goto no_params;
5527 }
5528 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5529 /* loop for more */
5530 break;
5531
5532 case TCFG_CHAIN_POSITION:
5533 if (goi->isconfigure) {
5534 Jim_Obj *o_t;
5535 struct jtag_tap *tap;
5536
5537 if (target->has_dap) {
5538 Jim_SetResultString(goi->interp,
5539 "target requires -dap parameter instead of -chain-position!", -1);
5540 return JIM_ERR;
5541 }
5542
5543 target_free_all_working_areas(target);
5544 e = jim_getopt_obj(goi, &o_t);
5545 if (e != JIM_OK)
5546 return e;
5547 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5548 if (!tap)
5549 return JIM_ERR;
5550 target->tap = tap;
5551 target->tap_configured = true;
5552 } else {
5553 if (goi->argc != 0)
5554 goto no_params;
5555 }
5556 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5557 /* loop for more e*/
5558 break;
5559 case TCFG_DBGBASE:
5560 if (goi->isconfigure) {
5561 e = jim_getopt_wide(goi, &w);
5562 if (e != JIM_OK)
5563 return e;
5564 target->dbgbase = (uint32_t)w;
5565 target->dbgbase_set = true;
5566 } else {
5567 if (goi->argc != 0)
5568 goto no_params;
5569 }
5570 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5571 /* loop for more */
5572 break;
5573 case TCFG_RTOS:
5574 /* RTOS */
5575 {
5576 int result = rtos_create(goi, target);
5577 if (result != JIM_OK)
5578 return result;
5579 }
5580 /* loop for more */
5581 break;
5582
5583 case TCFG_DEFER_EXAMINE:
5584 /* DEFER_EXAMINE */
5585 target->defer_examine = true;
5586 /* loop for more */
5587 break;
5588
5589 case TCFG_GDB_PORT:
5590 if (goi->isconfigure) {
5591 struct command_context *cmd_ctx = current_command_context(goi->interp);
5592 if (cmd_ctx->mode != COMMAND_CONFIG) {
5593 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5594 return JIM_ERR;
5595 }
5596
5597 const char *s;
5598 e = jim_getopt_string(goi, &s, NULL);
5599 if (e != JIM_OK)
5600 return e;
5601 free(target->gdb_port_override);
5602 target->gdb_port_override = strdup(s);
5603 } else {
5604 if (goi->argc != 0)
5605 goto no_params;
5606 }
5607 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5608 /* loop for more */
5609 break;
5610
5611 case TCFG_GDB_MAX_CONNECTIONS:
5612 if (goi->isconfigure) {
5613 struct command_context *cmd_ctx = current_command_context(goi->interp);
5614 if (cmd_ctx->mode != COMMAND_CONFIG) {
5615 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5616 return JIM_ERR;
5617 }
5618
5619 e = jim_getopt_wide(goi, &w);
5620 if (e != JIM_OK)
5621 return e;
5622 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5623 } else {
5624 if (goi->argc != 0)
5625 goto no_params;
5626 }
5627 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5628 break;
5629 }
5630 } /* while (goi->argc) */
5631
5632
5633 /* done - we return */
5634 return JIM_OK;
5635 }
5636
5637 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5638 {
5639 struct command *c = jim_to_command(interp);
5640 struct jim_getopt_info goi;
5641
5642 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5643 goi.isconfigure = !strcmp(c->name, "configure");
5644 if (goi.argc < 1) {
5645 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5646 "missing: -option ...");
5647 return JIM_ERR;
5648 }
5649 struct command_context *cmd_ctx = current_command_context(interp);
5650 assert(cmd_ctx);
5651 struct target *target = get_current_target(cmd_ctx);
5652 return target_configure(&goi, target);
5653 }
5654
5655 static int jim_target_mem2array(Jim_Interp *interp,
5656 int argc, Jim_Obj *const *argv)
5657 {
5658 struct command_context *cmd_ctx = current_command_context(interp);
5659 assert(cmd_ctx);
5660 struct target *target = get_current_target(cmd_ctx);
5661 return target_mem2array(interp, target, argc - 1, argv + 1);
5662 }
5663
5664 static int jim_target_array2mem(Jim_Interp *interp,
5665 int argc, Jim_Obj *const *argv)
5666 {
5667 struct command_context *cmd_ctx = current_command_context(interp);
5668 assert(cmd_ctx);
5669 struct target *target = get_current_target(cmd_ctx);
5670 return target_array2mem(interp, target, argc - 1, argv + 1);
5671 }
5672
5673 COMMAND_HANDLER(handle_target_examine)
5674 {
5675 bool allow_defer = false;
5676
5677 if (CMD_ARGC > 1)
5678 return ERROR_COMMAND_SYNTAX_ERROR;
5679
5680 if (CMD_ARGC == 1) {
5681 if (strcmp(CMD_ARGV[0], "allow-defer"))
5682 return ERROR_COMMAND_ARGUMENT_INVALID;
5683 allow_defer = true;
5684 }
5685
5686 struct target *target = get_current_target(CMD_CTX);
5687 if (!target->tap->enabled) {
5688 command_print(CMD, "[TAP is disabled]");
5689 return ERROR_FAIL;
5690 }
5691
5692 if (allow_defer && target->defer_examine) {
5693 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5694 LOG_INFO("Use arp_examine command to examine it manually!");
5695 return ERROR_OK;
5696 }
5697
5698 int retval = target->type->examine(target);
5699 if (retval != ERROR_OK) {
5700 target_reset_examined(target);
5701 return retval;
5702 }
5703
5704 target_set_examined(target);
5705
5706 return ERROR_OK;
5707 }
5708
5709 COMMAND_HANDLER(handle_target_was_examined)
5710 {
5711 if (CMD_ARGC != 0)
5712 return ERROR_COMMAND_SYNTAX_ERROR;
5713
5714 struct target *target = get_current_target(CMD_CTX);
5715
5716 command_print(CMD, "%d", target_was_examined(target) ? 1 : 0);
5717
5718 return ERROR_OK;
5719 }
5720
5721 COMMAND_HANDLER(handle_target_examine_deferred)
5722 {
5723 if (CMD_ARGC != 0)
5724 return ERROR_COMMAND_SYNTAX_ERROR;
5725
5726 struct target *target = get_current_target(CMD_CTX);
5727
5728 command_print(CMD, "%d", target->defer_examine ? 1 : 0);
5729
5730 return ERROR_OK;
5731 }
5732
5733 COMMAND_HANDLER(handle_target_halt_gdb)
5734 {
5735 if (CMD_ARGC != 0)
5736 return ERROR_COMMAND_SYNTAX_ERROR;
5737
5738 struct target *target = get_current_target(CMD_CTX);
5739
5740 return target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
5741 }
5742
5743 COMMAND_HANDLER(handle_target_poll)
5744 {
5745 if (CMD_ARGC != 0)
5746 return ERROR_COMMAND_SYNTAX_ERROR;
5747
5748 struct target *target = get_current_target(CMD_CTX);
5749 if (!target->tap->enabled) {
5750 command_print(CMD, "[TAP is disabled]");
5751 return ERROR_FAIL;
5752 }
5753
5754 if (!(target_was_examined(target)))
5755 return ERROR_TARGET_NOT_EXAMINED;
5756
5757 return target->type->poll(target);
5758 }
5759
5760 COMMAND_HANDLER(handle_target_reset)
5761 {
5762 if (CMD_ARGC != 2)
5763 return ERROR_COMMAND_SYNTAX_ERROR;
5764
5765 const struct nvp *n = nvp_name2value(nvp_assert, CMD_ARGV[0]);
5766 if (!n->name) {
5767 nvp_unknown_command_print(CMD, nvp_assert, NULL, CMD_ARGV[0]);
5768 return ERROR_COMMAND_ARGUMENT_INVALID;
5769 }
5770
5771 /* the halt or not param */
5772 int a;
5773 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], a);
5774
5775 struct target *target = get_current_target(CMD_CTX);
5776 if (!target->tap->enabled) {
5777 command_print(CMD, "[TAP is disabled]");
5778 return ERROR_FAIL;
5779 }
5780
5781 if (!target->type->assert_reset || !target->type->deassert_reset) {
5782 command_print(CMD, "No target-specific reset for %s", target_name(target));
5783 return ERROR_FAIL;
5784 }
5785
5786 if (target->defer_examine)
5787 target_reset_examined(target);
5788
5789 /* determine if we should halt or not. */
5790 target->reset_halt = (a != 0);
5791 /* When this happens - all workareas are invalid. */
5792 target_free_all_working_areas_restore(target, 0);
5793
5794 /* do the assert */
5795 if (n->value == NVP_ASSERT)
5796 return target->type->assert_reset(target);
5797 return target->type->deassert_reset(target);
5798 }
5799
5800 COMMAND_HANDLER(handle_target_halt)
5801 {
5802 if (CMD_ARGC != 0)
5803 return ERROR_COMMAND_SYNTAX_ERROR;
5804
5805 struct target *target = get_current_target(CMD_CTX);
5806 if (!target->tap->enabled) {
5807 command_print(CMD, "[TAP is disabled]");
5808 return ERROR_FAIL;
5809 }
5810
5811 return target->type->halt(target);
5812 }
5813
5814 COMMAND_HANDLER(handle_target_wait_state)
5815 {
5816 if (CMD_ARGC != 2)
5817 return ERROR_COMMAND_SYNTAX_ERROR;
5818
5819 const struct nvp *n = nvp_name2value(nvp_target_state, CMD_ARGV[0]);
5820 if (!n->name) {
5821 nvp_unknown_command_print(CMD, nvp_target_state, NULL, CMD_ARGV[0]);
5822 return ERROR_COMMAND_ARGUMENT_INVALID;
5823 }
5824
5825 unsigned int a;
5826 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], a);
5827
5828 struct target *target = get_current_target(CMD_CTX);
5829 if (!target->tap->enabled) {
5830 command_print(CMD, "[TAP is disabled]");
5831 return ERROR_FAIL;
5832 }
5833
5834 int retval = target_wait_state(target, n->value, a);
5835 if (retval != ERROR_OK) {
5836 command_print(CMD,
5837 "target: %s wait %s fails (%d) %s",
5838 target_name(target), n->name,
5839 retval, target_strerror_safe(retval));
5840 return retval;
5841 }
5842 return ERROR_OK;
5843 }
5844 /* List for human, Events defined for this target.
5845 * scripts/programs should use 'name cget -event NAME'
5846 */
5847 COMMAND_HANDLER(handle_target_event_list)
5848 {
5849 struct target *target = get_current_target(CMD_CTX);
5850 struct target_event_action *teap = target->event_action;
5851
5852 command_print(CMD, "Event actions for target (%d) %s\n",
5853 target->target_number,
5854 target_name(target));
5855 command_print(CMD, "%-25s | Body", "Event");
5856 command_print(CMD, "------------------------- | "
5857 "----------------------------------------");
5858 while (teap) {
5859 command_print(CMD, "%-25s | %s",
5860 target_event_name(teap->event),
5861 Jim_GetString(teap->body, NULL));
5862 teap = teap->next;
5863 }
5864 command_print(CMD, "***END***");
5865 return ERROR_OK;
5866 }
5867
5868 COMMAND_HANDLER(handle_target_current_state)
5869 {
5870 if (CMD_ARGC != 0)
5871 return ERROR_COMMAND_SYNTAX_ERROR;
5872
5873 struct target *target = get_current_target(CMD_CTX);
5874
5875 command_print(CMD, "%s", target_state_name(target));
5876
5877 return ERROR_OK;
5878 }
5879
5880 COMMAND_HANDLER(handle_target_debug_reason)
5881 {
5882 if (CMD_ARGC != 0)
5883 return ERROR_COMMAND_SYNTAX_ERROR;
5884
5885 struct target *target = get_current_target(CMD_CTX);
5886
5887 command_print(CMD, "%s", debug_reason_name(target));
5888
5889 return ERROR_OK;
5890 }
5891
5892 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5893 {
5894 struct jim_getopt_info goi;
5895 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5896 if (goi.argc != 1) {
5897 const char *cmd_name = Jim_GetString(argv[0], NULL);
5898 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5899 return JIM_ERR;
5900 }
5901 struct jim_nvp *n;
5902 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5903 if (e != JIM_OK) {
5904 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5905 return e;
5906 }
5907 struct command_context *cmd_ctx = current_command_context(interp);
5908 assert(cmd_ctx);
5909 struct target *target = get_current_target(cmd_ctx);
5910 target_handle_event(target, n->value);
5911 return JIM_OK;
5912 }
5913
5914 static const struct command_registration target_instance_command_handlers[] = {
5915 {
5916 .name = "configure",
5917 .mode = COMMAND_ANY,
5918 .jim_handler = jim_target_configure,
5919 .help = "configure a new target for use",
5920 .usage = "[target_attribute ...]",
5921 },
5922 {
5923 .name = "cget",
5924 .mode = COMMAND_ANY,
5925 .jim_handler = jim_target_configure,
5926 .help = "returns the specified target attribute",
5927 .usage = "target_attribute",
5928 },
5929 {
5930 .name = "mwd",
5931 .handler = handle_mw_command,
5932 .mode = COMMAND_EXEC,
5933 .help = "Write 64-bit word(s) to target memory",
5934 .usage = "address data [count]",
5935 },
5936 {
5937 .name = "mww",
5938 .handler = handle_mw_command,
5939 .mode = COMMAND_EXEC,
5940 .help = "Write 32-bit word(s) to target memory",
5941 .usage = "address data [count]",
5942 },
5943 {
5944 .name = "mwh",
5945 .handler = handle_mw_command,
5946 .mode = COMMAND_EXEC,
5947 .help = "Write 16-bit half-word(s) to target memory",
5948 .usage = "address data [count]",
5949 },
5950 {
5951 .name = "mwb",
5952 .handler = handle_mw_command,
5953 .mode = COMMAND_EXEC,
5954 .help = "Write byte(s) to target memory",
5955 .usage = "address data [count]",
5956 },
5957 {
5958 .name = "mdd",
5959 .handler = handle_md_command,
5960 .mode = COMMAND_EXEC,
5961 .help = "Display target memory as 64-bit words",
5962 .usage = "address [count]",
5963 },
5964 {
5965 .name = "mdw",
5966 .handler = handle_md_command,
5967 .mode = COMMAND_EXEC,
5968 .help = "Display target memory as 32-bit words",
5969 .usage = "address [count]",
5970 },
5971 {
5972 .name = "mdh",
5973 .handler = handle_md_command,
5974 .mode = COMMAND_EXEC,
5975 .help = "Display target memory as 16-bit half-words",
5976 .usage = "address [count]",
5977 },
5978 {
5979 .name = "mdb",
5980 .handler = handle_md_command,
5981 .mode = COMMAND_EXEC,
5982 .help = "Display target memory as 8-bit bytes",
5983 .usage = "address [count]",
5984 },
5985 {
5986 .name = "array2mem",
5987 .mode = COMMAND_EXEC,
5988 .jim_handler = jim_target_array2mem,
5989 .help = "Writes Tcl array of 8/16/32 bit numbers "
5990 "to target memory",
5991 .usage = "arrayname bitwidth address count",
5992 },
5993 {
5994 .name = "mem2array",
5995 .mode = COMMAND_EXEC,
5996 .jim_handler = jim_target_mem2array,
5997 .help = "Loads Tcl array of 8/16/32 bit numbers "
5998 "from target memory",
5999 .usage = "arrayname bitwidth address count",
6000 },
6001 {
6002 .name = "get_reg",
6003 .mode = COMMAND_EXEC,
6004 .jim_handler = target_jim_get_reg,
6005 .help = "Get register values from the target",
6006 .usage = "list",
6007 },
6008 {
6009 .name = "set_reg",
6010 .mode = COMMAND_EXEC,
6011 .jim_handler = target_jim_set_reg,
6012 .help = "Set target register values",
6013 .usage = "dict",
6014 },
6015 {
6016 .name = "read_memory",
6017 .mode = COMMAND_EXEC,
6018 .handler = handle_target_read_memory,
6019 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
6020 .usage = "address width count ['phys']",
6021 },
6022 {
6023 .name = "write_memory",
6024 .mode = COMMAND_EXEC,
6025 .jim_handler = target_jim_write_memory,
6026 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
6027 .usage = "address width data ['phys']",
6028 },
6029 {
6030 .name = "eventlist",
6031 .handler = handle_target_event_list,
6032 .mode = COMMAND_EXEC,
6033 .help = "displays a table of events defined for this target",
6034 .usage = "",
6035 },
6036 {
6037 .name = "curstate",
6038 .mode = COMMAND_EXEC,
6039 .handler = handle_target_current_state,
6040 .help = "displays the current state of this target",
6041 .usage = "",
6042 },
6043 {
6044 .name = "debug_reason",
6045 .mode = COMMAND_EXEC,
6046 .handler = handle_target_debug_reason,
6047 .help = "displays the debug reason of this target",
6048 .usage = "",
6049 },
6050 {
6051 .name = "arp_examine",
6052 .mode = COMMAND_EXEC,
6053 .handler = handle_target_examine,
6054 .help = "used internally for reset processing",
6055 .usage = "['allow-defer']",
6056 },
6057 {
6058 .name = "was_examined",
6059 .mode = COMMAND_EXEC,
6060 .handler = handle_target_was_examined,
6061 .help = "used internally for reset processing",
6062 .usage = "",
6063 },
6064 {
6065 .name = "examine_deferred",
6066 .mode = COMMAND_EXEC,
6067 .handler = handle_target_examine_deferred,
6068 .help = "used internally for reset processing",
6069 .usage = "",
6070 },
6071 {
6072 .name = "arp_halt_gdb",
6073 .mode = COMMAND_EXEC,
6074 .handler = handle_target_halt_gdb,
6075 .help = "used internally for reset processing to halt GDB",
6076 .usage = "",
6077 },
6078 {
6079 .name = "arp_poll",
6080 .mode = COMMAND_EXEC,
6081 .handler = handle_target_poll,
6082 .help = "used internally for reset processing",
6083 .usage = "",
6084 },
6085 {
6086 .name = "arp_reset",
6087 .mode = COMMAND_EXEC,
6088 .handler = handle_target_reset,
6089 .help = "used internally for reset processing",
6090 .usage = "'assert'|'deassert' halt",
6091 },
6092 {
6093 .name = "arp_halt",
6094 .mode = COMMAND_EXEC,
6095 .handler = handle_target_halt,
6096 .help = "used internally for reset processing",
6097 .usage = "",
6098 },
6099 {
6100 .name = "arp_waitstate",
6101 .mode = COMMAND_EXEC,
6102 .handler = handle_target_wait_state,
6103 .help = "used internally for reset processing",
6104 .usage = "statename timeoutmsecs",
6105 },
6106 {
6107 .name = "invoke-event",
6108 .mode = COMMAND_EXEC,
6109 .jim_handler = jim_target_invoke_event,
6110 .help = "invoke handler for specified event",
6111 .usage = "event_name",
6112 },
6113 COMMAND_REGISTRATION_DONE
6114 };
6115
6116 static int target_create(struct jim_getopt_info *goi)
6117 {
6118 Jim_Obj *new_cmd;
6119 Jim_Cmd *cmd;
6120 const char *cp;
6121 int e;
6122 int x;
6123 struct target *target;
6124 struct command_context *cmd_ctx;
6125
6126 cmd_ctx = current_command_context(goi->interp);
6127 assert(cmd_ctx);
6128
6129 if (goi->argc < 3) {
6130 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
6131 return JIM_ERR;
6132 }
6133
6134 /* COMMAND */
6135 jim_getopt_obj(goi, &new_cmd);
6136 /* does this command exist? */
6137 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_NONE);
6138 if (cmd) {
6139 cp = Jim_GetString(new_cmd, NULL);
6140 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
6141 return JIM_ERR;
6142 }
6143
6144 /* TYPE */
6145 e = jim_getopt_string(goi, &cp, NULL);
6146 if (e != JIM_OK)
6147 return e;
6148 struct transport *tr = get_current_transport();
6149 if (tr && tr->override_target) {
6150 e = tr->override_target(&cp);
6151 if (e != ERROR_OK) {
6152 LOG_ERROR("The selected transport doesn't support this target");
6153 return JIM_ERR;
6154 }
6155 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
6156 }
6157 /* now does target type exist */
6158 for (x = 0 ; target_types[x] ; x++) {
6159 if (strcmp(cp, target_types[x]->name) == 0) {
6160 /* found */
6161 break;
6162 }
6163 }
6164 if (!target_types[x]) {
6165 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
6166 for (x = 0 ; target_types[x] ; x++) {
6167 if (target_types[x + 1]) {
6168 Jim_AppendStrings(goi->interp,
6169 Jim_GetResult(goi->interp),
6170 target_types[x]->name,
6171 ", ", NULL);
6172 } else {
6173 Jim_AppendStrings(goi->interp,
6174 Jim_GetResult(goi->interp),
6175 " or ",
6176 target_types[x]->name, NULL);
6177 }
6178 }
6179 return JIM_ERR;
6180 }
6181
6182 /* Create it */
6183 target = calloc(1, sizeof(struct target));
6184 if (!target) {
6185 LOG_ERROR("Out of memory");
6186 return JIM_ERR;
6187 }
6188
6189 /* set empty smp cluster */
6190 target->smp_targets = &empty_smp_targets;
6191
6192 /* set target number */
6193 target->target_number = new_target_number();
6194
6195 /* allocate memory for each unique target type */
6196 target->type = malloc(sizeof(struct target_type));
6197 if (!target->type) {
6198 LOG_ERROR("Out of memory");
6199 free(target);
6200 return JIM_ERR;
6201 }
6202
6203 memcpy(target->type, target_types[x], sizeof(struct target_type));
6204
6205 /* default to first core, override with -coreid */
6206 target->coreid = 0;
6207
6208 target->working_area = 0x0;
6209 target->working_area_size = 0x0;
6210 target->working_areas = NULL;
6211 target->backup_working_area = 0;
6212
6213 target->state = TARGET_UNKNOWN;
6214 target->debug_reason = DBG_REASON_UNDEFINED;
6215 target->reg_cache = NULL;
6216 target->breakpoints = NULL;
6217 target->watchpoints = NULL;
6218 target->next = NULL;
6219 target->arch_info = NULL;
6220
6221 target->verbose_halt_msg = true;
6222
6223 target->halt_issued = false;
6224
6225 /* initialize trace information */
6226 target->trace_info = calloc(1, sizeof(struct trace));
6227 if (!target->trace_info) {
6228 LOG_ERROR("Out of memory");
6229 free(target->type);
6230 free(target);
6231 return JIM_ERR;
6232 }
6233
6234 target->dbgmsg = NULL;
6235 target->dbg_msg_enabled = 0;
6236
6237 target->endianness = TARGET_ENDIAN_UNKNOWN;
6238
6239 target->rtos = NULL;
6240 target->rtos_auto_detect = false;
6241
6242 target->gdb_port_override = NULL;
6243 target->gdb_max_connections = 1;
6244
6245 /* Do the rest as "configure" options */
6246 goi->isconfigure = 1;
6247 e = target_configure(goi, target);
6248
6249 if (e == JIM_OK) {
6250 if (target->has_dap) {
6251 if (!target->dap_configured) {
6252 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
6253 e = JIM_ERR;
6254 }
6255 } else {
6256 if (!target->tap_configured) {
6257 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
6258 e = JIM_ERR;
6259 }
6260 }
6261 /* tap must be set after target was configured */
6262 if (!target->tap)
6263 e = JIM_ERR;
6264 }
6265
6266 if (e != JIM_OK) {
6267 rtos_destroy(target);
6268 free(target->gdb_port_override);
6269 free(target->trace_info);
6270 free(target->type);
6271 free(target);
6272 return e;
6273 }
6274
6275 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
6276 /* default endian to little if not specified */
6277 target->endianness = TARGET_LITTLE_ENDIAN;
6278 }
6279
6280 cp = Jim_GetString(new_cmd, NULL);
6281 target->cmd_name = strdup(cp);
6282 if (!target->cmd_name) {
6283 LOG_ERROR("Out of memory");
6284 rtos_destroy(target);
6285 free(target->gdb_port_override);
6286 free(target->trace_info);
6287 free(target->type);
6288 free(target);
6289 return JIM_ERR;
6290 }
6291
6292 if (target->type->target_create) {
6293 e = (*(target->type->target_create))(target, goi->interp);
6294 if (e != ERROR_OK) {
6295 LOG_DEBUG("target_create failed");
6296 free(target->cmd_name);
6297 rtos_destroy(target);
6298 free(target->gdb_port_override);
6299 free(target->trace_info);
6300 free(target->type);
6301 free(target);
6302 return JIM_ERR;
6303 }
6304 }
6305
6306 /* create the target specific commands */
6307 if (target->type->commands) {
6308 e = register_commands(cmd_ctx, NULL, target->type->commands);
6309 if (e != ERROR_OK)
6310 LOG_ERROR("unable to register '%s' commands", cp);
6311 }
6312
6313 /* now - create the new target name command */
6314 const struct command_registration target_subcommands[] = {
6315 {
6316 .chain = target_instance_command_handlers,
6317 },
6318 {
6319 .chain = target->type->commands,
6320 },
6321 COMMAND_REGISTRATION_DONE
6322 };
6323 const struct command_registration target_commands[] = {
6324 {
6325 .name = cp,
6326 .mode = COMMAND_ANY,
6327 .help = "target command group",
6328 .usage = "",
6329 .chain = target_subcommands,
6330 },
6331 COMMAND_REGISTRATION_DONE
6332 };
6333 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
6334 if (e != ERROR_OK) {
6335 if (target->type->deinit_target)
6336 target->type->deinit_target(target);
6337 free(target->cmd_name);
6338 rtos_destroy(target);
6339 free(target->gdb_port_override);
6340 free(target->trace_info);
6341 free(target->type);
6342 free(target);
6343 return JIM_ERR;
6344 }
6345
6346 /* append to end of list */
6347 append_to_list_all_targets(target);
6348
6349 cmd_ctx->current_target = target;
6350 return JIM_OK;
6351 }
6352
6353 COMMAND_HANDLER(handle_target_current)
6354 {
6355 if (CMD_ARGC != 0)
6356 return ERROR_COMMAND_SYNTAX_ERROR;
6357
6358 struct target *target = get_current_target_or_null(CMD_CTX);
6359 if (target)
6360 command_print(CMD, "%s", target_name(target));
6361
6362 return ERROR_OK;
6363 }
6364
6365 COMMAND_HANDLER(handle_target_types)
6366 {
6367 if (CMD_ARGC != 0)
6368 return ERROR_COMMAND_SYNTAX_ERROR;
6369
6370 for (unsigned int x = 0; target_types[x]; x++)
6371 command_print(CMD, "%s", target_types[x]->name);
6372
6373 return ERROR_OK;
6374 }
6375
6376 COMMAND_HANDLER(handle_target_names)
6377 {
6378 if (CMD_ARGC != 0)
6379 return ERROR_COMMAND_SYNTAX_ERROR;
6380
6381 struct target *target = all_targets;
6382 while (target) {
6383 command_print(CMD, "%s", target_name(target));
6384 target = target->next;
6385 }
6386
6387 return ERROR_OK;
6388 }
6389
6390 static struct target_list *
6391 __attribute__((warn_unused_result))
6392 create_target_list_node(const char *targetname)
6393 {
6394 struct target *target = get_target(targetname);
6395 LOG_DEBUG("%s ", targetname);
6396 if (!target)
6397 return NULL;
6398
6399 struct target_list *new = malloc(sizeof(struct target_list));
6400 if (!new) {
6401 LOG_ERROR("Out of memory");
6402 return new;
6403 }
6404
6405 new->target = target;
6406 return new;
6407 }
6408
6409 static int get_target_with_common_rtos_type(struct command_invocation *cmd,
6410 struct list_head *lh, struct target **result)
6411 {
6412 struct target *target = NULL;
6413 struct target_list *curr;
6414 foreach_smp_target(curr, lh) {
6415 struct rtos *curr_rtos = curr->target->rtos;
6416 if (curr_rtos) {
6417 if (target && target->rtos && target->rtos->type != curr_rtos->type) {
6418 command_print(cmd, "Different rtos types in members of one smp target!");
6419 return ERROR_FAIL;
6420 }
6421 target = curr->target;
6422 }
6423 }
6424 *result = target;
6425 return ERROR_OK;
6426 }
6427
6428 COMMAND_HANDLER(handle_target_smp)
6429 {
6430 static int smp_group = 1;
6431
6432 if (CMD_ARGC == 0) {
6433 LOG_DEBUG("Empty SMP target");
6434 return ERROR_OK;
6435 }
6436 LOG_DEBUG("%d", CMD_ARGC);
6437 /* CMD_ARGC[0] = target to associate in smp
6438 * CMD_ARGC[1] = target to associate in smp
6439 * CMD_ARGC[2] ...
6440 */
6441
6442 struct list_head *lh = malloc(sizeof(*lh));
6443 if (!lh) {
6444 LOG_ERROR("Out of memory");
6445 return ERROR_FAIL;
6446 }
6447 INIT_LIST_HEAD(lh);
6448
6449 for (unsigned int i = 0; i < CMD_ARGC; i++) {
6450 struct target_list *new = create_target_list_node(CMD_ARGV[i]);
6451 if (new)
6452 list_add_tail(&new->lh, lh);
6453 }
6454 /* now parse the list of cpu and put the target in smp mode*/
6455 struct target_list *curr;
6456 foreach_smp_target(curr, lh) {
6457 struct target *target = curr->target;
6458 target->smp = smp_group;
6459 target->smp_targets = lh;
6460 }
6461 smp_group++;
6462
6463 struct target *rtos_target;
6464 int retval = get_target_with_common_rtos_type(CMD, lh, &rtos_target);
6465 if (retval == ERROR_OK && rtos_target)
6466 retval = rtos_smp_init(rtos_target);
6467
6468 return retval;
6469 }
6470
6471 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6472 {
6473 struct jim_getopt_info goi;
6474 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6475 if (goi.argc < 3) {
6476 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6477 "<name> <target_type> [<target_options> ...]");
6478 return JIM_ERR;
6479 }
6480 return target_create(&goi);
6481 }
6482
6483 static const struct command_registration target_subcommand_handlers[] = {
6484 {
6485 .name = "init",
6486 .mode = COMMAND_CONFIG,
6487 .handler = handle_target_init_command,
6488 .help = "initialize targets",
6489 .usage = "",
6490 },
6491 {
6492 .name = "create",
6493 .mode = COMMAND_CONFIG,
6494 .jim_handler = jim_target_create,
6495 .usage = "name type '-chain-position' name [options ...]",
6496 .help = "Creates and selects a new target",
6497 },
6498 {
6499 .name = "current",
6500 .mode = COMMAND_ANY,
6501 .handler = handle_target_current,
6502 .help = "Returns the currently selected target",
6503 .usage = "",
6504 },
6505 {
6506 .name = "types",
6507 .mode = COMMAND_ANY,
6508 .handler = handle_target_types,
6509 .help = "Returns the available target types as "
6510 "a list of strings",
6511 .usage = "",
6512 },
6513 {
6514 .name = "names",
6515 .mode = COMMAND_ANY,
6516 .handler = handle_target_names,
6517 .help = "Returns the names of all targets as a list of strings",
6518 .usage = "",
6519 },
6520 {
6521 .name = "smp",
6522 .mode = COMMAND_ANY,
6523 .handler = handle_target_smp,
6524 .usage = "targetname1 targetname2 ...",
6525 .help = "gather several target in a smp list"
6526 },
6527
6528 COMMAND_REGISTRATION_DONE
6529 };
6530
6531 struct fast_load {
6532 target_addr_t address;
6533 uint8_t *data;
6534 int length;
6535
6536 };
6537
6538 static int fastload_num;
6539 static struct fast_load *fastload;
6540
6541 static void free_fastload(void)
6542 {
6543 if (fastload) {
6544 for (int i = 0; i < fastload_num; i++)
6545 free(fastload[i].data);
6546 free(fastload);
6547 fastload = NULL;
6548 }
6549 }
6550
6551 COMMAND_HANDLER(handle_fast_load_image_command)
6552 {
6553 uint8_t *buffer;
6554 size_t buf_cnt;
6555 uint32_t image_size;
6556 target_addr_t min_address = 0;
6557 target_addr_t max_address = -1;
6558
6559 struct image image;
6560
6561 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6562 &image, &min_address, &max_address);
6563 if (retval != ERROR_OK)
6564 return retval;
6565
6566 struct duration bench;
6567 duration_start(&bench);
6568
6569 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6570 if (retval != ERROR_OK)
6571 return retval;
6572
6573 image_size = 0x0;
6574 retval = ERROR_OK;
6575 fastload_num = image.num_sections;
6576 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6577 if (!fastload) {
6578 command_print(CMD, "out of memory");
6579 image_close(&image);
6580 return ERROR_FAIL;
6581 }
6582 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6583 for (unsigned int i = 0; i < image.num_sections; i++) {
6584 buffer = malloc(image.sections[i].size);
6585 if (!buffer) {
6586 command_print(CMD, "error allocating buffer for section (%d bytes)",
6587 (int)(image.sections[i].size));
6588 retval = ERROR_FAIL;
6589 break;
6590 }
6591
6592 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6593 if (retval != ERROR_OK) {
6594 free(buffer);
6595 break;
6596 }
6597
6598 uint32_t offset = 0;
6599 uint32_t length = buf_cnt;
6600
6601 /* DANGER!!! beware of unsigned comparison here!!! */
6602
6603 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6604 (image.sections[i].base_address < max_address)) {
6605 if (image.sections[i].base_address < min_address) {
6606 /* clip addresses below */
6607 offset += min_address-image.sections[i].base_address;
6608 length -= offset;
6609 }
6610
6611 if (image.sections[i].base_address + buf_cnt > max_address)
6612 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6613
6614 fastload[i].address = image.sections[i].base_address + offset;
6615 fastload[i].data = malloc(length);
6616 if (!fastload[i].data) {
6617 free(buffer);
6618 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6619 length);
6620 retval = ERROR_FAIL;
6621 break;
6622 }
6623 memcpy(fastload[i].data, buffer + offset, length);
6624 fastload[i].length = length;
6625
6626 image_size += length;
6627 command_print(CMD, "%u bytes written at address 0x%8.8x",
6628 (unsigned int)length,
6629 ((unsigned int)(image.sections[i].base_address + offset)));
6630 }
6631
6632 free(buffer);
6633 }
6634
6635 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6636 command_print(CMD, "Loaded %" PRIu32 " bytes "
6637 "in %fs (%0.3f KiB/s)", image_size,
6638 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6639
6640 command_print(CMD,
6641 "WARNING: image has not been loaded to target!"
6642 "You can issue a 'fast_load' to finish loading.");
6643 }
6644
6645 image_close(&image);
6646
6647 if (retval != ERROR_OK)
6648 free_fastload();
6649
6650 return retval;
6651 }
6652
6653 COMMAND_HANDLER(handle_fast_load_command)
6654 {
6655 if (CMD_ARGC > 0)
6656 return ERROR_COMMAND_SYNTAX_ERROR;
6657 if (!fastload) {
6658 LOG_ERROR("No image in memory");
6659 return ERROR_FAIL;
6660 }
6661 int i;
6662 int64_t ms = timeval_ms();
6663 int size = 0;
6664 int retval = ERROR_OK;
6665 for (i = 0; i < fastload_num; i++) {
6666 struct target *target = get_current_target(CMD_CTX);
6667 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6668 (unsigned int)(fastload[i].address),
6669 (unsigned int)(fastload[i].length));
6670 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6671 if (retval != ERROR_OK)
6672 break;
6673 size += fastload[i].length;
6674 }
6675 if (retval == ERROR_OK) {
6676 int64_t after = timeval_ms();
6677 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6678 }
6679 return retval;
6680 }
6681
6682 static const struct command_registration target_command_handlers[] = {
6683 {
6684 .name = "targets",
6685 .handler = handle_targets_command,
6686 .mode = COMMAND_ANY,
6687 .help = "change current default target (one parameter) "
6688 "or prints table of all targets (no parameters)",
6689 .usage = "[target]",
6690 },
6691 {
6692 .name = "target",
6693 .mode = COMMAND_CONFIG,
6694 .help = "configure target",
6695 .chain = target_subcommand_handlers,
6696 .usage = "",
6697 },
6698 COMMAND_REGISTRATION_DONE
6699 };
6700
6701 int target_register_commands(struct command_context *cmd_ctx)
6702 {
6703 return register_commands(cmd_ctx, NULL, target_command_handlers);
6704 }
6705
6706 static bool target_reset_nag = true;
6707
6708 bool get_target_reset_nag(void)
6709 {
6710 return target_reset_nag;
6711 }
6712
6713 COMMAND_HANDLER(handle_target_reset_nag)
6714 {
6715 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6716 &target_reset_nag, "Nag after each reset about options to improve "
6717 "performance");
6718 }
6719
6720 COMMAND_HANDLER(handle_ps_command)
6721 {
6722 struct target *target = get_current_target(CMD_CTX);
6723 char *display;
6724 if (target->state != TARGET_HALTED) {
6725 command_print(CMD, "Error: [%s] not halted", target_name(target));
6726 return ERROR_TARGET_NOT_HALTED;
6727 }
6728
6729 if ((target->rtos) && (target->rtos->type)
6730 && (target->rtos->type->ps_command)) {
6731 display = target->rtos->type->ps_command(target);
6732 command_print(CMD, "%s", display);
6733 free(display);
6734 return ERROR_OK;
6735 } else {
6736 LOG_INFO("failed");
6737 return ERROR_TARGET_FAILURE;
6738 }
6739 }
6740
6741 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6742 {
6743 if (text)
6744 command_print_sameline(cmd, "%s", text);
6745 for (int i = 0; i < size; i++)
6746 command_print_sameline(cmd, " %02x", buf[i]);
6747 command_print(cmd, " ");
6748 }
6749
6750 COMMAND_HANDLER(handle_test_mem_access_command)
6751 {
6752 struct target *target = get_current_target(CMD_CTX);
6753 uint32_t test_size;
6754 int retval = ERROR_OK;
6755
6756 if (target->state != TARGET_HALTED) {
6757 command_print(CMD, "Error: [%s] not halted", target_name(target));
6758 return ERROR_TARGET_NOT_HALTED;
6759 }
6760
6761 if (CMD_ARGC != 1)
6762 return ERROR_COMMAND_SYNTAX_ERROR;
6763
6764 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6765
6766 /* Test reads */
6767 size_t num_bytes = test_size + 4;
6768
6769 struct working_area *wa = NULL;
6770 retval = target_alloc_working_area(target, num_bytes, &wa);
6771 if (retval != ERROR_OK) {
6772 LOG_ERROR("Not enough working area");
6773 return ERROR_FAIL;
6774 }
6775
6776 uint8_t *test_pattern = malloc(num_bytes);
6777
6778 for (size_t i = 0; i < num_bytes; i++)
6779 test_pattern[i] = rand();
6780
6781 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6782 if (retval != ERROR_OK) {
6783 LOG_ERROR("Test pattern write failed");
6784 goto out;
6785 }
6786
6787 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6788 for (int size = 1; size <= 4; size *= 2) {
6789 for (int offset = 0; offset < 4; offset++) {
6790 uint32_t count = test_size / size;
6791 size_t host_bufsiz = (count + 2) * size + host_offset;
6792 uint8_t *read_ref = malloc(host_bufsiz);
6793 uint8_t *read_buf = malloc(host_bufsiz);
6794
6795 for (size_t i = 0; i < host_bufsiz; i++) {
6796 read_ref[i] = rand();
6797 read_buf[i] = read_ref[i];
6798 }
6799 command_print_sameline(CMD,
6800 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6801 size, offset, host_offset ? "un" : "");
6802
6803 struct duration bench;
6804 duration_start(&bench);
6805
6806 retval = target_read_memory(target, wa->address + offset, size, count,
6807 read_buf + size + host_offset);
6808
6809 duration_measure(&bench);
6810
6811 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6812 command_print(CMD, "Unsupported alignment");
6813 goto next;
6814 } else if (retval != ERROR_OK) {
6815 command_print(CMD, "Memory read failed");
6816 goto next;
6817 }
6818
6819 /* replay on host */
6820 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6821
6822 /* check result */
6823 int result = memcmp(read_ref, read_buf, host_bufsiz);
6824 if (result == 0) {
6825 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6826 duration_elapsed(&bench),
6827 duration_kbps(&bench, count * size));
6828 } else {
6829 command_print(CMD, "Compare failed");
6830 binprint(CMD, "ref:", read_ref, host_bufsiz);
6831 binprint(CMD, "buf:", read_buf, host_bufsiz);
6832 }
6833 next:
6834 free(read_ref);
6835 free(read_buf);
6836 }
6837 }
6838 }
6839
6840 out:
6841 free(test_pattern);
6842
6843 target_free_working_area(target, wa);
6844
6845 /* Test writes */
6846 num_bytes = test_size + 4 + 4 + 4;
6847
6848 retval = target_alloc_working_area(target, num_bytes, &wa);
6849 if (retval != ERROR_OK) {
6850 LOG_ERROR("Not enough working area");
6851 return ERROR_FAIL;
6852 }
6853
6854 test_pattern = malloc(num_bytes);
6855
6856 for (size_t i = 0; i < num_bytes; i++)
6857 test_pattern[i] = rand();
6858
6859 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6860 for (int size = 1; size <= 4; size *= 2) {
6861 for (int offset = 0; offset < 4; offset++) {
6862 uint32_t count = test_size / size;
6863 size_t host_bufsiz = count * size + host_offset;
6864 uint8_t *read_ref = malloc(num_bytes);
6865 uint8_t *read_buf = malloc(num_bytes);
6866 uint8_t *write_buf = malloc(host_bufsiz);
6867
6868 for (size_t i = 0; i < host_bufsiz; i++)
6869 write_buf[i] = rand();
6870 command_print_sameline(CMD,
6871 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6872 size, offset, host_offset ? "un" : "");
6873
6874 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6875 if (retval != ERROR_OK) {
6876 command_print(CMD, "Test pattern write failed");
6877 goto nextw;
6878 }
6879
6880 /* replay on host */
6881 memcpy(read_ref, test_pattern, num_bytes);
6882 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6883
6884 struct duration bench;
6885 duration_start(&bench);
6886
6887 retval = target_write_memory(target, wa->address + size + offset, size, count,
6888 write_buf + host_offset);
6889
6890 duration_measure(&bench);
6891
6892 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6893 command_print(CMD, "Unsupported alignment");
6894 goto nextw;
6895 } else if (retval != ERROR_OK) {
6896 command_print(CMD, "Memory write failed");
6897 goto nextw;
6898 }
6899
6900 /* read back */
6901 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6902 if (retval != ERROR_OK) {
6903 command_print(CMD, "Test pattern write failed");
6904 goto nextw;
6905 }
6906
6907 /* check result */
6908 int result = memcmp(read_ref, read_buf, num_bytes);
6909 if (result == 0) {
6910 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6911 duration_elapsed(&bench),
6912 duration_kbps(&bench, count * size));
6913 } else {
6914 command_print(CMD, "Compare failed");
6915 binprint(CMD, "ref:", read_ref, num_bytes);
6916 binprint(CMD, "buf:", read_buf, num_bytes);
6917 }
6918 nextw:
6919 free(read_ref);
6920 free(read_buf);
6921 }
6922 }
6923 }
6924
6925 free(test_pattern);
6926
6927 target_free_working_area(target, wa);
6928 return retval;
6929 }
6930
6931 static const struct command_registration target_exec_command_handlers[] = {
6932 {
6933 .name = "fast_load_image",
6934 .handler = handle_fast_load_image_command,
6935 .mode = COMMAND_ANY,
6936 .help = "Load image into server memory for later use by "
6937 "fast_load; primarily for profiling",
6938 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6939 "[min_address [max_length]]",
6940 },
6941 {
6942 .name = "fast_load",
6943 .handler = handle_fast_load_command,
6944 .mode = COMMAND_EXEC,
6945 .help = "loads active fast load image to current target "
6946 "- mainly for profiling purposes",
6947 .usage = "",
6948 },
6949 {
6950 .name = "profile",
6951 .handler = handle_profile_command,
6952 .mode = COMMAND_EXEC,
6953 .usage = "seconds filename [start end]",
6954 .help = "profiling samples the CPU PC",
6955 },
6956 /** @todo don't register virt2phys() unless target supports it */
6957 {
6958 .name = "virt2phys",
6959 .handler = handle_virt2phys_command,
6960 .mode = COMMAND_ANY,
6961 .help = "translate a virtual address into a physical address",
6962 .usage = "virtual_address",
6963 },
6964 {
6965 .name = "reg",
6966 .handler = handle_reg_command,
6967 .mode = COMMAND_EXEC,
6968 .help = "display (reread from target with \"force\") or set a register; "
6969 "with no arguments, displays all registers and their values",
6970 .usage = "[(register_number|register_name) [(value|'force')]]",
6971 },
6972 {
6973 .name = "poll",
6974 .handler = handle_poll_command,
6975 .mode = COMMAND_EXEC,
6976 .help = "poll target state; or reconfigure background polling",
6977 .usage = "['on'|'off']",
6978 },
6979 {
6980 .name = "wait_halt",
6981 .handler = handle_wait_halt_command,
6982 .mode = COMMAND_EXEC,
6983 .help = "wait up to the specified number of milliseconds "
6984 "(default 5000) for a previously requested halt",
6985 .usage = "[milliseconds]",
6986 },
6987 {
6988 .name = "halt",
6989 .handler = handle_halt_command,
6990 .mode = COMMAND_EXEC,
6991 .help = "request target to halt, then wait up to the specified "
6992 "number of milliseconds (default 5000) for it to complete",
6993 .usage = "[milliseconds]",
6994 },
6995 {
6996 .name = "resume",
6997 .handler = handle_resume_command,
6998 .mode = COMMAND_EXEC,
6999 .help = "resume target execution from current PC or address",
7000 .usage = "[address]",
7001 },
7002 {
7003 .name = "reset",
7004 .handler = handle_reset_command,
7005 .mode = COMMAND_EXEC,
7006 .usage = "[run|halt|init]",
7007 .help = "Reset all targets into the specified mode. "
7008 "Default reset mode is run, if not given.",
7009 },
7010 {
7011 .name = "soft_reset_halt",
7012 .handler = handle_soft_reset_halt_command,
7013 .mode = COMMAND_EXEC,
7014 .usage = "",
7015 .help = "halt the target and do a soft reset",
7016 },
7017 {
7018 .name = "step",
7019 .handler = handle_step_command,
7020 .mode = COMMAND_EXEC,
7021 .help = "step one instruction from current PC or address",
7022 .usage = "[address]",
7023 },
7024 {
7025 .name = "mdd",
7026 .handler = handle_md_command,
7027 .mode = COMMAND_EXEC,
7028 .help = "display memory double-words",
7029 .usage = "['phys'] address [count]",
7030 },
7031 {
7032 .name = "mdw",
7033 .handler = handle_md_command,
7034 .mode = COMMAND_EXEC,
7035 .help = "display memory words",
7036 .usage = "['phys'] address [count]",
7037 },
7038 {
7039 .name = "mdh",
7040 .handler = handle_md_command,
7041 .mode = COMMAND_EXEC,
7042 .help = "display memory half-words",
7043 .usage = "['phys'] address [count]",
7044 },
7045 {
7046 .name = "mdb",
7047 .handler = handle_md_command,
7048 .mode = COMMAND_EXEC,
7049 .help = "display memory bytes",
7050 .usage = "['phys'] address [count]",
7051 },
7052 {
7053 .name = "mwd",
7054 .handler = handle_mw_command,
7055 .mode = COMMAND_EXEC,
7056 .help = "write memory double-word",
7057 .usage = "['phys'] address value [count]",
7058 },
7059 {
7060 .name = "mww",
7061 .handler = handle_mw_command,
7062 .mode = COMMAND_EXEC,
7063 .help = "write memory word",
7064 .usage = "['phys'] address value [count]",
7065 },
7066 {
7067 .name = "mwh",
7068 .handler = handle_mw_command,
7069 .mode = COMMAND_EXEC,
7070 .help = "write memory half-word",
7071 .usage = "['phys'] address value [count]",
7072 },
7073 {
7074 .name = "mwb",
7075 .handler = handle_mw_command,
7076 .mode = COMMAND_EXEC,
7077 .help = "write memory byte",
7078 .usage = "['phys'] address value [count]",
7079 },
7080 {
7081 .name = "bp",
7082 .handler = handle_bp_command,
7083 .mode = COMMAND_EXEC,
7084 .help = "list or set hardware or software breakpoint",
7085 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
7086 },
7087 {
7088 .name = "rbp",
7089 .handler = handle_rbp_command,
7090 .mode = COMMAND_EXEC,
7091 .help = "remove breakpoint",
7092 .usage = "'all' | address",
7093 },
7094 {
7095 .name = "wp",
7096 .handler = handle_wp_command,
7097 .mode = COMMAND_EXEC,
7098 .help = "list (no params) or create watchpoints",
7099 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
7100 },
7101 {
7102 .name = "rwp",
7103 .handler = handle_rwp_command,
7104 .mode = COMMAND_EXEC,
7105 .help = "remove watchpoint",
7106 .usage = "'all' | address",
7107 },
7108 {
7109 .name = "load_image",
7110 .handler = handle_load_image_command,
7111 .mode = COMMAND_EXEC,
7112 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
7113 "[min_address] [max_length]",
7114 },
7115 {
7116 .name = "dump_image",
7117 .handler = handle_dump_image_command,
7118 .mode = COMMAND_EXEC,
7119 .usage = "filename address size",
7120 },
7121 {
7122 .name = "verify_image_checksum",
7123 .handler = handle_verify_image_checksum_command,
7124 .mode = COMMAND_EXEC,
7125 .usage = "filename [offset [type]]",
7126 },
7127 {
7128 .name = "verify_image",
7129 .handler = handle_verify_image_command,
7130 .mode = COMMAND_EXEC,
7131 .usage = "filename [offset [type]]",
7132 },
7133 {
7134 .name = "test_image",
7135 .handler = handle_test_image_command,
7136 .mode = COMMAND_EXEC,
7137 .usage = "filename [offset [type]]",
7138 },
7139 {
7140 .name = "get_reg",
7141 .mode = COMMAND_EXEC,
7142 .jim_handler = target_jim_get_reg,
7143 .help = "Get register values from the target",
7144 .usage = "list",
7145 },
7146 {
7147 .name = "set_reg",
7148 .mode = COMMAND_EXEC,
7149 .jim_handler = target_jim_set_reg,
7150 .help = "Set target register values",
7151 .usage = "dict",
7152 },
7153 {
7154 .name = "read_memory",
7155 .mode = COMMAND_EXEC,
7156 .handler = handle_target_read_memory,
7157 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
7158 .usage = "address width count ['phys']",
7159 },
7160 {
7161 .name = "write_memory",
7162 .mode = COMMAND_EXEC,
7163 .jim_handler = target_jim_write_memory,
7164 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
7165 .usage = "address width data ['phys']",
7166 },
7167 {
7168 .name = "reset_nag",
7169 .handler = handle_target_reset_nag,
7170 .mode = COMMAND_ANY,
7171 .help = "Nag after each reset about options that could have been "
7172 "enabled to improve performance.",
7173 .usage = "['enable'|'disable']",
7174 },
7175 {
7176 .name = "ps",
7177 .handler = handle_ps_command,
7178 .mode = COMMAND_EXEC,
7179 .help = "list all tasks",
7180 .usage = "",
7181 },
7182 {
7183 .name = "test_mem_access",
7184 .handler = handle_test_mem_access_command,
7185 .mode = COMMAND_EXEC,
7186 .help = "Test the target's memory access functions",
7187 .usage = "size",
7188 },
7189
7190 COMMAND_REGISTRATION_DONE
7191 };
7192 static int target_register_user_commands(struct command_context *cmd_ctx)
7193 {
7194 int retval = ERROR_OK;
7195 retval = target_request_register_commands(cmd_ctx);
7196 if (retval != ERROR_OK)
7197 return retval;
7198
7199 retval = trace_register_commands(cmd_ctx);
7200 if (retval != ERROR_OK)
7201 return retval;
7202
7203
7204 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
7205 }
7206
7207 const char *target_debug_reason_str(enum target_debug_reason reason)
7208 {
7209 switch (reason) {
7210 case DBG_REASON_DBGRQ:
7211 return "DBGRQ";
7212 case DBG_REASON_BREAKPOINT:
7213 return "BREAKPOINT";
7214 case DBG_REASON_WATCHPOINT:
7215 return "WATCHPOINT";
7216 case DBG_REASON_WPTANDBKPT:
7217 return "WPTANDBKPT";
7218 case DBG_REASON_SINGLESTEP:
7219 return "SINGLESTEP";
7220 case DBG_REASON_NOTHALTED:
7221 return "NOTHALTED";
7222 case DBG_REASON_EXIT:
7223 return "EXIT";
7224 case DBG_REASON_EXC_CATCH:
7225 return "EXC_CATCH";
7226 case DBG_REASON_UNDEFINED:
7227 return "UNDEFINED";
7228 default:
7229 return "UNKNOWN!";
7230 }
7231 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)