breakpoints: use 64-bit type for watchpoint mask and value
[openocd.git] / src / target / target.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2007-2010 Øyvind Harboe *
8 * oyvind.harboe@zylin.com *
9 * *
10 * Copyright (C) 2008, Duane Ellis *
11 * openocd@duaneeellis.com *
12 * *
13 * Copyright (C) 2008 by Spencer Oliver *
14 * spen@spen-soft.co.uk *
15 * *
16 * Copyright (C) 2008 by Rick Altherr *
17 * kc8apf@kc8apf.net> *
18 * *
19 * Copyright (C) 2011 by Broadcom Corporation *
20 * Evan Hunter - ehunter@broadcom.com *
21 * *
22 * Copyright (C) ST-Ericsson SA 2011 *
23 * michel.jaouen@stericsson.com : smp minimum support *
24 * *
25 * Copyright (C) 2011 Andreas Fritiofson *
26 * andreas.fritiofson@gmail.com *
27 ***************************************************************************/
28
29 #ifdef HAVE_CONFIG_H
30 #include "config.h"
31 #endif
32
33 #include <helper/align.h>
34 #include <helper/nvp.h>
35 #include <helper/time_support.h>
36 #include <jtag/jtag.h>
37 #include <flash/nor/core.h>
38
39 #include "target.h"
40 #include "target_type.h"
41 #include "target_request.h"
42 #include "breakpoints.h"
43 #include "register.h"
44 #include "trace.h"
45 #include "image.h"
46 #include "rtos/rtos.h"
47 #include "transport/transport.h"
48 #include "arm_cti.h"
49 #include "smp.h"
50 #include "semihosting_common.h"
51
52 /* default halt wait timeout (ms) */
53 #define DEFAULT_HALT_TIMEOUT 5000
54
55 static int target_read_buffer_default(struct target *target, target_addr_t address,
56 uint32_t count, uint8_t *buffer);
57 static int target_write_buffer_default(struct target *target, target_addr_t address,
58 uint32_t count, const uint8_t *buffer);
59 static int target_array2mem(Jim_Interp *interp, struct target *target,
60 int argc, Jim_Obj * const *argv);
61 static int target_mem2array(Jim_Interp *interp, struct target *target,
62 int argc, Jim_Obj * const *argv);
63 static int target_register_user_commands(struct command_context *cmd_ctx);
64 static int target_get_gdb_fileio_info_default(struct target *target,
65 struct gdb_fileio_info *fileio_info);
66 static int target_gdb_fileio_end_default(struct target *target, int retcode,
67 int fileio_errno, bool ctrl_c);
68
69 static struct target_type *target_types[] = {
70 &arm7tdmi_target,
71 &arm9tdmi_target,
72 &arm920t_target,
73 &arm720t_target,
74 &arm966e_target,
75 &arm946e_target,
76 &arm926ejs_target,
77 &fa526_target,
78 &feroceon_target,
79 &dragonite_target,
80 &xscale_target,
81 &xtensa_chip_target,
82 &cortexm_target,
83 &cortexa_target,
84 &cortexr4_target,
85 &arm11_target,
86 &ls1_sap_target,
87 &mips_m4k_target,
88 &avr_target,
89 &dsp563xx_target,
90 &dsp5680xx_target,
91 &testee_target,
92 &avr32_ap7k_target,
93 &hla_target,
94 &esp32_target,
95 &esp32s2_target,
96 &esp32s3_target,
97 &or1k_target,
98 &quark_x10xx_target,
99 &quark_d20xx_target,
100 &stm8_target,
101 &riscv_target,
102 &mem_ap_target,
103 &esirisc_target,
104 &arcv2_target,
105 &aarch64_target,
106 &armv8r_target,
107 &mips_mips64_target,
108 NULL,
109 };
110
111 struct target *all_targets;
112 static struct target_event_callback *target_event_callbacks;
113 static struct target_timer_callback *target_timer_callbacks;
114 static int64_t target_timer_next_event_value;
115 static LIST_HEAD(target_reset_callback_list);
116 static LIST_HEAD(target_trace_callback_list);
117 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
118 static LIST_HEAD(empty_smp_targets);
119
120 enum nvp_assert {
121 NVP_DEASSERT,
122 NVP_ASSERT,
123 };
124
125 static const struct nvp nvp_assert[] = {
126 { .name = "assert", NVP_ASSERT },
127 { .name = "deassert", NVP_DEASSERT },
128 { .name = "T", NVP_ASSERT },
129 { .name = "F", NVP_DEASSERT },
130 { .name = "t", NVP_ASSERT },
131 { .name = "f", NVP_DEASSERT },
132 { .name = NULL, .value = -1 }
133 };
134
135 static const struct nvp nvp_error_target[] = {
136 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
137 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
138 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
139 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
140 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
141 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
142 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
143 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
144 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
145 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
146 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
147 { .value = -1, .name = NULL }
148 };
149
150 static const char *target_strerror_safe(int err)
151 {
152 const struct nvp *n;
153
154 n = nvp_value2name(nvp_error_target, err);
155 if (!n->name)
156 return "unknown";
157 else
158 return n->name;
159 }
160
161 static const struct jim_nvp nvp_target_event[] = {
162
163 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
164 { .value = TARGET_EVENT_HALTED, .name = "halted" },
165 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
166 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
167 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
168 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
169 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
170
171 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
172 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
173
174 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
175 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
176 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
177 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
178 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
179 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
180 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
181 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
182
183 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
184 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
185 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
186
187 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
188 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
189
190 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
191 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
192
193 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
194 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
195
196 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
197 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
198
199 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
200
201 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X100, .name = "semihosting-user-cmd-0x100" },
202 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X101, .name = "semihosting-user-cmd-0x101" },
203 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X102, .name = "semihosting-user-cmd-0x102" },
204 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X103, .name = "semihosting-user-cmd-0x103" },
205 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X104, .name = "semihosting-user-cmd-0x104" },
206 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X105, .name = "semihosting-user-cmd-0x105" },
207 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X106, .name = "semihosting-user-cmd-0x106" },
208 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X107, .name = "semihosting-user-cmd-0x107" },
209
210 { .name = NULL, .value = -1 }
211 };
212
213 static const struct nvp nvp_target_state[] = {
214 { .name = "unknown", .value = TARGET_UNKNOWN },
215 { .name = "running", .value = TARGET_RUNNING },
216 { .name = "halted", .value = TARGET_HALTED },
217 { .name = "reset", .value = TARGET_RESET },
218 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
219 { .name = NULL, .value = -1 },
220 };
221
222 static const struct nvp nvp_target_debug_reason[] = {
223 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
224 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
225 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
226 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
227 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
228 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
229 { .name = "program-exit", .value = DBG_REASON_EXIT },
230 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
231 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
232 { .name = NULL, .value = -1 },
233 };
234
235 static const struct jim_nvp nvp_target_endian[] = {
236 { .name = "big", .value = TARGET_BIG_ENDIAN },
237 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
238 { .name = "be", .value = TARGET_BIG_ENDIAN },
239 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
240 { .name = NULL, .value = -1 },
241 };
242
243 static const struct nvp nvp_reset_modes[] = {
244 { .name = "unknown", .value = RESET_UNKNOWN },
245 { .name = "run", .value = RESET_RUN },
246 { .name = "halt", .value = RESET_HALT },
247 { .name = "init", .value = RESET_INIT },
248 { .name = NULL, .value = -1 },
249 };
250
251 const char *debug_reason_name(struct target *t)
252 {
253 const char *cp;
254
255 cp = nvp_value2name(nvp_target_debug_reason,
256 t->debug_reason)->name;
257 if (!cp) {
258 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
259 cp = "(*BUG*unknown*BUG*)";
260 }
261 return cp;
262 }
263
264 const char *target_state_name(struct target *t)
265 {
266 const char *cp;
267 cp = nvp_value2name(nvp_target_state, t->state)->name;
268 if (!cp) {
269 LOG_ERROR("Invalid target state: %d", (int)(t->state));
270 cp = "(*BUG*unknown*BUG*)";
271 }
272
273 if (!target_was_examined(t) && t->defer_examine)
274 cp = "examine deferred";
275
276 return cp;
277 }
278
279 const char *target_event_name(enum target_event event)
280 {
281 const char *cp;
282 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
283 if (!cp) {
284 LOG_ERROR("Invalid target event: %d", (int)(event));
285 cp = "(*BUG*unknown*BUG*)";
286 }
287 return cp;
288 }
289
290 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
291 {
292 const char *cp;
293 cp = nvp_value2name(nvp_reset_modes, reset_mode)->name;
294 if (!cp) {
295 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
296 cp = "(*BUG*unknown*BUG*)";
297 }
298 return cp;
299 }
300
301 /* determine the number of the new target */
302 static int new_target_number(void)
303 {
304 struct target *t;
305 int x;
306
307 /* number is 0 based */
308 x = -1;
309 t = all_targets;
310 while (t) {
311 if (x < t->target_number)
312 x = t->target_number;
313 t = t->next;
314 }
315 return x + 1;
316 }
317
318 static void append_to_list_all_targets(struct target *target)
319 {
320 struct target **t = &all_targets;
321
322 while (*t)
323 t = &((*t)->next);
324 *t = target;
325 }
326
327 /* read a uint64_t from a buffer in target memory endianness */
328 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
329 {
330 if (target->endianness == TARGET_LITTLE_ENDIAN)
331 return le_to_h_u64(buffer);
332 else
333 return be_to_h_u64(buffer);
334 }
335
336 /* read a uint32_t from a buffer in target memory endianness */
337 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
338 {
339 if (target->endianness == TARGET_LITTLE_ENDIAN)
340 return le_to_h_u32(buffer);
341 else
342 return be_to_h_u32(buffer);
343 }
344
345 /* read a uint24_t from a buffer in target memory endianness */
346 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
347 {
348 if (target->endianness == TARGET_LITTLE_ENDIAN)
349 return le_to_h_u24(buffer);
350 else
351 return be_to_h_u24(buffer);
352 }
353
354 /* read a uint16_t from a buffer in target memory endianness */
355 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
356 {
357 if (target->endianness == TARGET_LITTLE_ENDIAN)
358 return le_to_h_u16(buffer);
359 else
360 return be_to_h_u16(buffer);
361 }
362
363 /* write a uint64_t to a buffer in target memory endianness */
364 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
365 {
366 if (target->endianness == TARGET_LITTLE_ENDIAN)
367 h_u64_to_le(buffer, value);
368 else
369 h_u64_to_be(buffer, value);
370 }
371
372 /* write a uint32_t to a buffer in target memory endianness */
373 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
374 {
375 if (target->endianness == TARGET_LITTLE_ENDIAN)
376 h_u32_to_le(buffer, value);
377 else
378 h_u32_to_be(buffer, value);
379 }
380
381 /* write a uint24_t to a buffer in target memory endianness */
382 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
383 {
384 if (target->endianness == TARGET_LITTLE_ENDIAN)
385 h_u24_to_le(buffer, value);
386 else
387 h_u24_to_be(buffer, value);
388 }
389
390 /* write a uint16_t to a buffer in target memory endianness */
391 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
392 {
393 if (target->endianness == TARGET_LITTLE_ENDIAN)
394 h_u16_to_le(buffer, value);
395 else
396 h_u16_to_be(buffer, value);
397 }
398
399 /* write a uint8_t to a buffer in target memory endianness */
400 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
401 {
402 *buffer = value;
403 }
404
405 /* write a uint64_t array to a buffer in target memory endianness */
406 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
407 {
408 uint32_t i;
409 for (i = 0; i < count; i++)
410 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
411 }
412
413 /* write a uint32_t array to a buffer in target memory endianness */
414 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
415 {
416 uint32_t i;
417 for (i = 0; i < count; i++)
418 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
419 }
420
421 /* write a uint16_t array to a buffer in target memory endianness */
422 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
423 {
424 uint32_t i;
425 for (i = 0; i < count; i++)
426 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
427 }
428
429 /* write a uint64_t array to a buffer in target memory endianness */
430 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
431 {
432 uint32_t i;
433 for (i = 0; i < count; i++)
434 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
435 }
436
437 /* write a uint32_t array to a buffer in target memory endianness */
438 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
439 {
440 uint32_t i;
441 for (i = 0; i < count; i++)
442 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
443 }
444
445 /* write a uint16_t array to a buffer in target memory endianness */
446 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
447 {
448 uint32_t i;
449 for (i = 0; i < count; i++)
450 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
451 }
452
453 /* return a pointer to a configured target; id is name or number */
454 struct target *get_target(const char *id)
455 {
456 struct target *target;
457
458 /* try as tcltarget name */
459 for (target = all_targets; target; target = target->next) {
460 if (!target_name(target))
461 continue;
462 if (strcmp(id, target_name(target)) == 0)
463 return target;
464 }
465
466 /* It's OK to remove this fallback sometime after August 2010 or so */
467
468 /* no match, try as number */
469 unsigned num;
470 if (parse_uint(id, &num) != ERROR_OK)
471 return NULL;
472
473 for (target = all_targets; target; target = target->next) {
474 if (target->target_number == (int)num) {
475 LOG_WARNING("use '%s' as target identifier, not '%u'",
476 target_name(target), num);
477 return target;
478 }
479 }
480
481 return NULL;
482 }
483
484 /* returns a pointer to the n-th configured target */
485 struct target *get_target_by_num(int num)
486 {
487 struct target *target = all_targets;
488
489 while (target) {
490 if (target->target_number == num)
491 return target;
492 target = target->next;
493 }
494
495 return NULL;
496 }
497
498 struct target *get_current_target(struct command_context *cmd_ctx)
499 {
500 struct target *target = get_current_target_or_null(cmd_ctx);
501
502 if (!target) {
503 LOG_ERROR("BUG: current_target out of bounds");
504 exit(-1);
505 }
506
507 return target;
508 }
509
510 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
511 {
512 return cmd_ctx->current_target_override
513 ? cmd_ctx->current_target_override
514 : cmd_ctx->current_target;
515 }
516
517 int target_poll(struct target *target)
518 {
519 int retval;
520
521 /* We can't poll until after examine */
522 if (!target_was_examined(target)) {
523 /* Fail silently lest we pollute the log */
524 return ERROR_FAIL;
525 }
526
527 retval = target->type->poll(target);
528 if (retval != ERROR_OK)
529 return retval;
530
531 if (target->halt_issued) {
532 if (target->state == TARGET_HALTED)
533 target->halt_issued = false;
534 else {
535 int64_t t = timeval_ms() - target->halt_issued_time;
536 if (t > DEFAULT_HALT_TIMEOUT) {
537 target->halt_issued = false;
538 LOG_INFO("Halt timed out, wake up GDB.");
539 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
540 }
541 }
542 }
543
544 return ERROR_OK;
545 }
546
547 int target_halt(struct target *target)
548 {
549 int retval;
550 /* We can't poll until after examine */
551 if (!target_was_examined(target)) {
552 LOG_ERROR("Target not examined yet");
553 return ERROR_FAIL;
554 }
555
556 retval = target->type->halt(target);
557 if (retval != ERROR_OK)
558 return retval;
559
560 target->halt_issued = true;
561 target->halt_issued_time = timeval_ms();
562
563 return ERROR_OK;
564 }
565
566 /**
567 * Make the target (re)start executing using its saved execution
568 * context (possibly with some modifications).
569 *
570 * @param target Which target should start executing.
571 * @param current True to use the target's saved program counter instead
572 * of the address parameter
573 * @param address Optionally used as the program counter.
574 * @param handle_breakpoints True iff breakpoints at the resumption PC
575 * should be skipped. (For example, maybe execution was stopped by
576 * such a breakpoint, in which case it would be counterproductive to
577 * let it re-trigger.
578 * @param debug_execution False if all working areas allocated by OpenOCD
579 * should be released and/or restored to their original contents.
580 * (This would for example be true to run some downloaded "helper"
581 * algorithm code, which resides in one such working buffer and uses
582 * another for data storage.)
583 *
584 * @todo Resolve the ambiguity about what the "debug_execution" flag
585 * signifies. For example, Target implementations don't agree on how
586 * it relates to invalidation of the register cache, or to whether
587 * breakpoints and watchpoints should be enabled. (It would seem wrong
588 * to enable breakpoints when running downloaded "helper" algorithms
589 * (debug_execution true), since the breakpoints would be set to match
590 * target firmware being debugged, not the helper algorithm.... and
591 * enabling them could cause such helpers to malfunction (for example,
592 * by overwriting data with a breakpoint instruction. On the other
593 * hand the infrastructure for running such helpers might use this
594 * procedure but rely on hardware breakpoint to detect termination.)
595 */
596 int target_resume(struct target *target, int current, target_addr_t address,
597 int handle_breakpoints, int debug_execution)
598 {
599 int retval;
600
601 /* We can't poll until after examine */
602 if (!target_was_examined(target)) {
603 LOG_ERROR("Target not examined yet");
604 return ERROR_FAIL;
605 }
606
607 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
608
609 /* note that resume *must* be asynchronous. The CPU can halt before
610 * we poll. The CPU can even halt at the current PC as a result of
611 * a software breakpoint being inserted by (a bug?) the application.
612 */
613 /*
614 * resume() triggers the event 'resumed'. The execution of TCL commands
615 * in the event handler causes the polling of targets. If the target has
616 * already halted for a breakpoint, polling will run the 'halted' event
617 * handler before the pending 'resumed' handler.
618 * Disable polling during resume() to guarantee the execution of handlers
619 * in the correct order.
620 */
621 bool save_poll_mask = jtag_poll_mask();
622 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
623 jtag_poll_unmask(save_poll_mask);
624
625 if (retval != ERROR_OK)
626 return retval;
627
628 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
629
630 return retval;
631 }
632
633 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
634 {
635 char buf[100];
636 int retval;
637 const struct nvp *n;
638 n = nvp_value2name(nvp_reset_modes, reset_mode);
639 if (!n->name) {
640 LOG_ERROR("invalid reset mode");
641 return ERROR_FAIL;
642 }
643
644 struct target *target;
645 for (target = all_targets; target; target = target->next)
646 target_call_reset_callbacks(target, reset_mode);
647
648 /* disable polling during reset to make reset event scripts
649 * more predictable, i.e. dr/irscan & pathmove in events will
650 * not have JTAG operations injected into the middle of a sequence.
651 */
652 bool save_poll_mask = jtag_poll_mask();
653
654 sprintf(buf, "ocd_process_reset %s", n->name);
655 retval = Jim_Eval(cmd->ctx->interp, buf);
656
657 jtag_poll_unmask(save_poll_mask);
658
659 if (retval != JIM_OK) {
660 Jim_MakeErrorMessage(cmd->ctx->interp);
661 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
662 return ERROR_FAIL;
663 }
664
665 /* We want any events to be processed before the prompt */
666 retval = target_call_timer_callbacks_now();
667
668 for (target = all_targets; target; target = target->next) {
669 target->type->check_reset(target);
670 target->running_alg = false;
671 }
672
673 return retval;
674 }
675
676 static int identity_virt2phys(struct target *target,
677 target_addr_t virtual, target_addr_t *physical)
678 {
679 *physical = virtual;
680 return ERROR_OK;
681 }
682
683 static int no_mmu(struct target *target, int *enabled)
684 {
685 *enabled = 0;
686 return ERROR_OK;
687 }
688
689 /**
690 * Reset the @c examined flag for the given target.
691 * Pure paranoia -- targets are zeroed on allocation.
692 */
693 static inline void target_reset_examined(struct target *target)
694 {
695 target->examined = false;
696 }
697
698 static int default_examine(struct target *target)
699 {
700 target_set_examined(target);
701 return ERROR_OK;
702 }
703
704 /* no check by default */
705 static int default_check_reset(struct target *target)
706 {
707 return ERROR_OK;
708 }
709
710 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
711 * Keep in sync */
712 int target_examine_one(struct target *target)
713 {
714 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
715
716 int retval = target->type->examine(target);
717 if (retval != ERROR_OK) {
718 target_reset_examined(target);
719 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
720 return retval;
721 }
722
723 target_set_examined(target);
724 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
725
726 return ERROR_OK;
727 }
728
729 static int jtag_enable_callback(enum jtag_event event, void *priv)
730 {
731 struct target *target = priv;
732
733 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
734 return ERROR_OK;
735
736 jtag_unregister_event_callback(jtag_enable_callback, target);
737
738 return target_examine_one(target);
739 }
740
741 /* Targets that correctly implement init + examine, i.e.
742 * no communication with target during init:
743 *
744 * XScale
745 */
746 int target_examine(void)
747 {
748 int retval = ERROR_OK;
749 struct target *target;
750
751 for (target = all_targets; target; target = target->next) {
752 /* defer examination, but don't skip it */
753 if (!target->tap->enabled) {
754 jtag_register_event_callback(jtag_enable_callback,
755 target);
756 continue;
757 }
758
759 if (target->defer_examine)
760 continue;
761
762 int retval2 = target_examine_one(target);
763 if (retval2 != ERROR_OK) {
764 LOG_WARNING("target %s examination failed", target_name(target));
765 retval = retval2;
766 }
767 }
768 return retval;
769 }
770
771 const char *target_type_name(struct target *target)
772 {
773 return target->type->name;
774 }
775
776 static int target_soft_reset_halt(struct target *target)
777 {
778 if (!target_was_examined(target)) {
779 LOG_ERROR("Target not examined yet");
780 return ERROR_FAIL;
781 }
782 if (!target->type->soft_reset_halt) {
783 LOG_ERROR("Target %s does not support soft_reset_halt",
784 target_name(target));
785 return ERROR_FAIL;
786 }
787 return target->type->soft_reset_halt(target);
788 }
789
790 /**
791 * Downloads a target-specific native code algorithm to the target,
792 * and executes it. * Note that some targets may need to set up, enable,
793 * and tear down a breakpoint (hard or * soft) to detect algorithm
794 * termination, while others may support lower overhead schemes where
795 * soft breakpoints embedded in the algorithm automatically terminate the
796 * algorithm.
797 *
798 * @param target used to run the algorithm
799 * @param num_mem_params
800 * @param mem_params
801 * @param num_reg_params
802 * @param reg_param
803 * @param entry_point
804 * @param exit_point
805 * @param timeout_ms
806 * @param arch_info target-specific description of the algorithm.
807 */
808 int target_run_algorithm(struct target *target,
809 int num_mem_params, struct mem_param *mem_params,
810 int num_reg_params, struct reg_param *reg_param,
811 target_addr_t entry_point, target_addr_t exit_point,
812 unsigned int timeout_ms, void *arch_info)
813 {
814 int retval = ERROR_FAIL;
815
816 if (!target_was_examined(target)) {
817 LOG_ERROR("Target not examined yet");
818 goto done;
819 }
820 if (!target->type->run_algorithm) {
821 LOG_ERROR("Target type '%s' does not support %s",
822 target_type_name(target), __func__);
823 goto done;
824 }
825
826 target->running_alg = true;
827 retval = target->type->run_algorithm(target,
828 num_mem_params, mem_params,
829 num_reg_params, reg_param,
830 entry_point, exit_point, timeout_ms, arch_info);
831 target->running_alg = false;
832
833 done:
834 return retval;
835 }
836
837 /**
838 * Executes a target-specific native code algorithm and leaves it running.
839 *
840 * @param target used to run the algorithm
841 * @param num_mem_params
842 * @param mem_params
843 * @param num_reg_params
844 * @param reg_params
845 * @param entry_point
846 * @param exit_point
847 * @param arch_info target-specific description of the algorithm.
848 */
849 int target_start_algorithm(struct target *target,
850 int num_mem_params, struct mem_param *mem_params,
851 int num_reg_params, struct reg_param *reg_params,
852 target_addr_t entry_point, target_addr_t exit_point,
853 void *arch_info)
854 {
855 int retval = ERROR_FAIL;
856
857 if (!target_was_examined(target)) {
858 LOG_ERROR("Target not examined yet");
859 goto done;
860 }
861 if (!target->type->start_algorithm) {
862 LOG_ERROR("Target type '%s' does not support %s",
863 target_type_name(target), __func__);
864 goto done;
865 }
866 if (target->running_alg) {
867 LOG_ERROR("Target is already running an algorithm");
868 goto done;
869 }
870
871 target->running_alg = true;
872 retval = target->type->start_algorithm(target,
873 num_mem_params, mem_params,
874 num_reg_params, reg_params,
875 entry_point, exit_point, arch_info);
876
877 done:
878 return retval;
879 }
880
881 /**
882 * Waits for an algorithm started with target_start_algorithm() to complete.
883 *
884 * @param target used to run the algorithm
885 * @param num_mem_params
886 * @param mem_params
887 * @param num_reg_params
888 * @param reg_params
889 * @param exit_point
890 * @param timeout_ms
891 * @param arch_info target-specific description of the algorithm.
892 */
893 int target_wait_algorithm(struct target *target,
894 int num_mem_params, struct mem_param *mem_params,
895 int num_reg_params, struct reg_param *reg_params,
896 target_addr_t exit_point, unsigned int timeout_ms,
897 void *arch_info)
898 {
899 int retval = ERROR_FAIL;
900
901 if (!target->type->wait_algorithm) {
902 LOG_ERROR("Target type '%s' does not support %s",
903 target_type_name(target), __func__);
904 goto done;
905 }
906 if (!target->running_alg) {
907 LOG_ERROR("Target is not running an algorithm");
908 goto done;
909 }
910
911 retval = target->type->wait_algorithm(target,
912 num_mem_params, mem_params,
913 num_reg_params, reg_params,
914 exit_point, timeout_ms, arch_info);
915 if (retval != ERROR_TARGET_TIMEOUT)
916 target->running_alg = false;
917
918 done:
919 return retval;
920 }
921
922 /**
923 * Streams data to a circular buffer on target intended for consumption by code
924 * running asynchronously on target.
925 *
926 * This is intended for applications where target-specific native code runs
927 * on the target, receives data from the circular buffer, does something with
928 * it (most likely writing it to a flash memory), and advances the circular
929 * buffer pointer.
930 *
931 * This assumes that the helper algorithm has already been loaded to the target,
932 * but has not been started yet. Given memory and register parameters are passed
933 * to the algorithm.
934 *
935 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
936 * following format:
937 *
938 * [buffer_start + 0, buffer_start + 4):
939 * Write Pointer address (aka head). Written and updated by this
940 * routine when new data is written to the circular buffer.
941 * [buffer_start + 4, buffer_start + 8):
942 * Read Pointer address (aka tail). Updated by code running on the
943 * target after it consumes data.
944 * [buffer_start + 8, buffer_start + buffer_size):
945 * Circular buffer contents.
946 *
947 * See contrib/loaders/flash/stm32f1x.S for an example.
948 *
949 * @param target used to run the algorithm
950 * @param buffer address on the host where data to be sent is located
951 * @param count number of blocks to send
952 * @param block_size size in bytes of each block
953 * @param num_mem_params count of memory-based params to pass to algorithm
954 * @param mem_params memory-based params to pass to algorithm
955 * @param num_reg_params count of register-based params to pass to algorithm
956 * @param reg_params memory-based params to pass to algorithm
957 * @param buffer_start address on the target of the circular buffer structure
958 * @param buffer_size size of the circular buffer structure
959 * @param entry_point address on the target to execute to start the algorithm
960 * @param exit_point address at which to set a breakpoint to catch the
961 * end of the algorithm; can be 0 if target triggers a breakpoint itself
962 * @param arch_info
963 */
964
965 int target_run_flash_async_algorithm(struct target *target,
966 const uint8_t *buffer, uint32_t count, int block_size,
967 int num_mem_params, struct mem_param *mem_params,
968 int num_reg_params, struct reg_param *reg_params,
969 uint32_t buffer_start, uint32_t buffer_size,
970 uint32_t entry_point, uint32_t exit_point, void *arch_info)
971 {
972 int retval;
973 int timeout = 0;
974
975 const uint8_t *buffer_orig = buffer;
976
977 /* Set up working area. First word is write pointer, second word is read pointer,
978 * rest is fifo data area. */
979 uint32_t wp_addr = buffer_start;
980 uint32_t rp_addr = buffer_start + 4;
981 uint32_t fifo_start_addr = buffer_start + 8;
982 uint32_t fifo_end_addr = buffer_start + buffer_size;
983
984 uint32_t wp = fifo_start_addr;
985 uint32_t rp = fifo_start_addr;
986
987 /* validate block_size is 2^n */
988 assert(IS_PWR_OF_2(block_size));
989
990 retval = target_write_u32(target, wp_addr, wp);
991 if (retval != ERROR_OK)
992 return retval;
993 retval = target_write_u32(target, rp_addr, rp);
994 if (retval != ERROR_OK)
995 return retval;
996
997 /* Start up algorithm on target and let it idle while writing the first chunk */
998 retval = target_start_algorithm(target, num_mem_params, mem_params,
999 num_reg_params, reg_params,
1000 entry_point,
1001 exit_point,
1002 arch_info);
1003
1004 if (retval != ERROR_OK) {
1005 LOG_ERROR("error starting target flash write algorithm");
1006 return retval;
1007 }
1008
1009 while (count > 0) {
1010
1011 retval = target_read_u32(target, rp_addr, &rp);
1012 if (retval != ERROR_OK) {
1013 LOG_ERROR("failed to get read pointer");
1014 break;
1015 }
1016
1017 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1018 (size_t) (buffer - buffer_orig), count, wp, rp);
1019
1020 if (rp == 0) {
1021 LOG_ERROR("flash write algorithm aborted by target");
1022 retval = ERROR_FLASH_OPERATION_FAILED;
1023 break;
1024 }
1025
1026 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1027 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1028 break;
1029 }
1030
1031 /* Count the number of bytes available in the fifo without
1032 * crossing the wrap around. Make sure to not fill it completely,
1033 * because that would make wp == rp and that's the empty condition. */
1034 uint32_t thisrun_bytes;
1035 if (rp > wp)
1036 thisrun_bytes = rp - wp - block_size;
1037 else if (rp > fifo_start_addr)
1038 thisrun_bytes = fifo_end_addr - wp;
1039 else
1040 thisrun_bytes = fifo_end_addr - wp - block_size;
1041
1042 if (thisrun_bytes == 0) {
1043 /* Throttle polling a bit if transfer is (much) faster than flash
1044 * programming. The exact delay shouldn't matter as long as it's
1045 * less than buffer size / flash speed. This is very unlikely to
1046 * run when using high latency connections such as USB. */
1047 alive_sleep(2);
1048
1049 /* to stop an infinite loop on some targets check and increment a timeout
1050 * this issue was observed on a stellaris using the new ICDI interface */
1051 if (timeout++ >= 2500) {
1052 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1053 return ERROR_FLASH_OPERATION_FAILED;
1054 }
1055 continue;
1056 }
1057
1058 /* reset our timeout */
1059 timeout = 0;
1060
1061 /* Limit to the amount of data we actually want to write */
1062 if (thisrun_bytes > count * block_size)
1063 thisrun_bytes = count * block_size;
1064
1065 /* Force end of large blocks to be word aligned */
1066 if (thisrun_bytes >= 16)
1067 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1068
1069 /* Write data to fifo */
1070 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1071 if (retval != ERROR_OK)
1072 break;
1073
1074 /* Update counters and wrap write pointer */
1075 buffer += thisrun_bytes;
1076 count -= thisrun_bytes / block_size;
1077 wp += thisrun_bytes;
1078 if (wp >= fifo_end_addr)
1079 wp = fifo_start_addr;
1080
1081 /* Store updated write pointer to target */
1082 retval = target_write_u32(target, wp_addr, wp);
1083 if (retval != ERROR_OK)
1084 break;
1085
1086 /* Avoid GDB timeouts */
1087 keep_alive();
1088 }
1089
1090 if (retval != ERROR_OK) {
1091 /* abort flash write algorithm on target */
1092 target_write_u32(target, wp_addr, 0);
1093 }
1094
1095 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1096 num_reg_params, reg_params,
1097 exit_point,
1098 10000,
1099 arch_info);
1100
1101 if (retval2 != ERROR_OK) {
1102 LOG_ERROR("error waiting for target flash write algorithm");
1103 retval = retval2;
1104 }
1105
1106 if (retval == ERROR_OK) {
1107 /* check if algorithm set rp = 0 after fifo writer loop finished */
1108 retval = target_read_u32(target, rp_addr, &rp);
1109 if (retval == ERROR_OK && rp == 0) {
1110 LOG_ERROR("flash write algorithm aborted by target");
1111 retval = ERROR_FLASH_OPERATION_FAILED;
1112 }
1113 }
1114
1115 return retval;
1116 }
1117
1118 int target_run_read_async_algorithm(struct target *target,
1119 uint8_t *buffer, uint32_t count, int block_size,
1120 int num_mem_params, struct mem_param *mem_params,
1121 int num_reg_params, struct reg_param *reg_params,
1122 uint32_t buffer_start, uint32_t buffer_size,
1123 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1124 {
1125 int retval;
1126 int timeout = 0;
1127
1128 const uint8_t *buffer_orig = buffer;
1129
1130 /* Set up working area. First word is write pointer, second word is read pointer,
1131 * rest is fifo data area. */
1132 uint32_t wp_addr = buffer_start;
1133 uint32_t rp_addr = buffer_start + 4;
1134 uint32_t fifo_start_addr = buffer_start + 8;
1135 uint32_t fifo_end_addr = buffer_start + buffer_size;
1136
1137 uint32_t wp = fifo_start_addr;
1138 uint32_t rp = fifo_start_addr;
1139
1140 /* validate block_size is 2^n */
1141 assert(IS_PWR_OF_2(block_size));
1142
1143 retval = target_write_u32(target, wp_addr, wp);
1144 if (retval != ERROR_OK)
1145 return retval;
1146 retval = target_write_u32(target, rp_addr, rp);
1147 if (retval != ERROR_OK)
1148 return retval;
1149
1150 /* Start up algorithm on target */
1151 retval = target_start_algorithm(target, num_mem_params, mem_params,
1152 num_reg_params, reg_params,
1153 entry_point,
1154 exit_point,
1155 arch_info);
1156
1157 if (retval != ERROR_OK) {
1158 LOG_ERROR("error starting target flash read algorithm");
1159 return retval;
1160 }
1161
1162 while (count > 0) {
1163 retval = target_read_u32(target, wp_addr, &wp);
1164 if (retval != ERROR_OK) {
1165 LOG_ERROR("failed to get write pointer");
1166 break;
1167 }
1168
1169 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1170 (size_t)(buffer - buffer_orig), count, wp, rp);
1171
1172 if (wp == 0) {
1173 LOG_ERROR("flash read algorithm aborted by target");
1174 retval = ERROR_FLASH_OPERATION_FAILED;
1175 break;
1176 }
1177
1178 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1179 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1180 break;
1181 }
1182
1183 /* Count the number of bytes available in the fifo without
1184 * crossing the wrap around. */
1185 uint32_t thisrun_bytes;
1186 if (wp >= rp)
1187 thisrun_bytes = wp - rp;
1188 else
1189 thisrun_bytes = fifo_end_addr - rp;
1190
1191 if (thisrun_bytes == 0) {
1192 /* Throttle polling a bit if transfer is (much) faster than flash
1193 * reading. The exact delay shouldn't matter as long as it's
1194 * less than buffer size / flash speed. This is very unlikely to
1195 * run when using high latency connections such as USB. */
1196 alive_sleep(2);
1197
1198 /* to stop an infinite loop on some targets check and increment a timeout
1199 * this issue was observed on a stellaris using the new ICDI interface */
1200 if (timeout++ >= 2500) {
1201 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1202 return ERROR_FLASH_OPERATION_FAILED;
1203 }
1204 continue;
1205 }
1206
1207 /* Reset our timeout */
1208 timeout = 0;
1209
1210 /* Limit to the amount of data we actually want to read */
1211 if (thisrun_bytes > count * block_size)
1212 thisrun_bytes = count * block_size;
1213
1214 /* Force end of large blocks to be word aligned */
1215 if (thisrun_bytes >= 16)
1216 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1217
1218 /* Read data from fifo */
1219 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1220 if (retval != ERROR_OK)
1221 break;
1222
1223 /* Update counters and wrap write pointer */
1224 buffer += thisrun_bytes;
1225 count -= thisrun_bytes / block_size;
1226 rp += thisrun_bytes;
1227 if (rp >= fifo_end_addr)
1228 rp = fifo_start_addr;
1229
1230 /* Store updated write pointer to target */
1231 retval = target_write_u32(target, rp_addr, rp);
1232 if (retval != ERROR_OK)
1233 break;
1234
1235 /* Avoid GDB timeouts */
1236 keep_alive();
1237
1238 }
1239
1240 if (retval != ERROR_OK) {
1241 /* abort flash write algorithm on target */
1242 target_write_u32(target, rp_addr, 0);
1243 }
1244
1245 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1246 num_reg_params, reg_params,
1247 exit_point,
1248 10000,
1249 arch_info);
1250
1251 if (retval2 != ERROR_OK) {
1252 LOG_ERROR("error waiting for target flash write algorithm");
1253 retval = retval2;
1254 }
1255
1256 if (retval == ERROR_OK) {
1257 /* check if algorithm set wp = 0 after fifo writer loop finished */
1258 retval = target_read_u32(target, wp_addr, &wp);
1259 if (retval == ERROR_OK && wp == 0) {
1260 LOG_ERROR("flash read algorithm aborted by target");
1261 retval = ERROR_FLASH_OPERATION_FAILED;
1262 }
1263 }
1264
1265 return retval;
1266 }
1267
1268 int target_read_memory(struct target *target,
1269 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1270 {
1271 if (!target_was_examined(target)) {
1272 LOG_ERROR("Target not examined yet");
1273 return ERROR_FAIL;
1274 }
1275 if (!target->type->read_memory) {
1276 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1277 return ERROR_FAIL;
1278 }
1279 return target->type->read_memory(target, address, size, count, buffer);
1280 }
1281
1282 int target_read_phys_memory(struct target *target,
1283 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1284 {
1285 if (!target_was_examined(target)) {
1286 LOG_ERROR("Target not examined yet");
1287 return ERROR_FAIL;
1288 }
1289 if (!target->type->read_phys_memory) {
1290 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1291 return ERROR_FAIL;
1292 }
1293 return target->type->read_phys_memory(target, address, size, count, buffer);
1294 }
1295
1296 int target_write_memory(struct target *target,
1297 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1298 {
1299 if (!target_was_examined(target)) {
1300 LOG_ERROR("Target not examined yet");
1301 return ERROR_FAIL;
1302 }
1303 if (!target->type->write_memory) {
1304 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1305 return ERROR_FAIL;
1306 }
1307 return target->type->write_memory(target, address, size, count, buffer);
1308 }
1309
1310 int target_write_phys_memory(struct target *target,
1311 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1312 {
1313 if (!target_was_examined(target)) {
1314 LOG_ERROR("Target not examined yet");
1315 return ERROR_FAIL;
1316 }
1317 if (!target->type->write_phys_memory) {
1318 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1319 return ERROR_FAIL;
1320 }
1321 return target->type->write_phys_memory(target, address, size, count, buffer);
1322 }
1323
1324 int target_add_breakpoint(struct target *target,
1325 struct breakpoint *breakpoint)
1326 {
1327 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1328 LOG_TARGET_ERROR(target, "not halted (add breakpoint)");
1329 return ERROR_TARGET_NOT_HALTED;
1330 }
1331 return target->type->add_breakpoint(target, breakpoint);
1332 }
1333
1334 int target_add_context_breakpoint(struct target *target,
1335 struct breakpoint *breakpoint)
1336 {
1337 if (target->state != TARGET_HALTED) {
1338 LOG_TARGET_ERROR(target, "not halted (add context breakpoint)");
1339 return ERROR_TARGET_NOT_HALTED;
1340 }
1341 return target->type->add_context_breakpoint(target, breakpoint);
1342 }
1343
1344 int target_add_hybrid_breakpoint(struct target *target,
1345 struct breakpoint *breakpoint)
1346 {
1347 if (target->state != TARGET_HALTED) {
1348 LOG_TARGET_ERROR(target, "not halted (add hybrid breakpoint)");
1349 return ERROR_TARGET_NOT_HALTED;
1350 }
1351 return target->type->add_hybrid_breakpoint(target, breakpoint);
1352 }
1353
1354 int target_remove_breakpoint(struct target *target,
1355 struct breakpoint *breakpoint)
1356 {
1357 return target->type->remove_breakpoint(target, breakpoint);
1358 }
1359
1360 int target_add_watchpoint(struct target *target,
1361 struct watchpoint *watchpoint)
1362 {
1363 if (target->state != TARGET_HALTED) {
1364 LOG_TARGET_ERROR(target, "not halted (add watchpoint)");
1365 return ERROR_TARGET_NOT_HALTED;
1366 }
1367 return target->type->add_watchpoint(target, watchpoint);
1368 }
1369 int target_remove_watchpoint(struct target *target,
1370 struct watchpoint *watchpoint)
1371 {
1372 return target->type->remove_watchpoint(target, watchpoint);
1373 }
1374 int target_hit_watchpoint(struct target *target,
1375 struct watchpoint **hit_watchpoint)
1376 {
1377 if (target->state != TARGET_HALTED) {
1378 LOG_TARGET_ERROR(target, "not halted (hit watchpoint)");
1379 return ERROR_TARGET_NOT_HALTED;
1380 }
1381
1382 if (!target->type->hit_watchpoint) {
1383 /* For backward compatible, if hit_watchpoint is not implemented,
1384 * return ERROR_FAIL such that gdb_server will not take the nonsense
1385 * information. */
1386 return ERROR_FAIL;
1387 }
1388
1389 return target->type->hit_watchpoint(target, hit_watchpoint);
1390 }
1391
1392 const char *target_get_gdb_arch(struct target *target)
1393 {
1394 if (!target->type->get_gdb_arch)
1395 return NULL;
1396 return target->type->get_gdb_arch(target);
1397 }
1398
1399 int target_get_gdb_reg_list(struct target *target,
1400 struct reg **reg_list[], int *reg_list_size,
1401 enum target_register_class reg_class)
1402 {
1403 int result = ERROR_FAIL;
1404
1405 if (!target_was_examined(target)) {
1406 LOG_ERROR("Target not examined yet");
1407 goto done;
1408 }
1409
1410 result = target->type->get_gdb_reg_list(target, reg_list,
1411 reg_list_size, reg_class);
1412
1413 done:
1414 if (result != ERROR_OK) {
1415 *reg_list = NULL;
1416 *reg_list_size = 0;
1417 }
1418 return result;
1419 }
1420
1421 int target_get_gdb_reg_list_noread(struct target *target,
1422 struct reg **reg_list[], int *reg_list_size,
1423 enum target_register_class reg_class)
1424 {
1425 if (target->type->get_gdb_reg_list_noread &&
1426 target->type->get_gdb_reg_list_noread(target, reg_list,
1427 reg_list_size, reg_class) == ERROR_OK)
1428 return ERROR_OK;
1429 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1430 }
1431
1432 bool target_supports_gdb_connection(struct target *target)
1433 {
1434 /*
1435 * exclude all the targets that don't provide get_gdb_reg_list
1436 * or that have explicit gdb_max_connection == 0
1437 */
1438 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1439 }
1440
1441 int target_step(struct target *target,
1442 int current, target_addr_t address, int handle_breakpoints)
1443 {
1444 int retval;
1445
1446 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1447
1448 retval = target->type->step(target, current, address, handle_breakpoints);
1449 if (retval != ERROR_OK)
1450 return retval;
1451
1452 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1453
1454 return retval;
1455 }
1456
1457 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1458 {
1459 if (target->state != TARGET_HALTED) {
1460 LOG_TARGET_ERROR(target, "not halted (gdb fileio)");
1461 return ERROR_TARGET_NOT_HALTED;
1462 }
1463 return target->type->get_gdb_fileio_info(target, fileio_info);
1464 }
1465
1466 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1467 {
1468 if (target->state != TARGET_HALTED) {
1469 LOG_TARGET_ERROR(target, "not halted (gdb fileio end)");
1470 return ERROR_TARGET_NOT_HALTED;
1471 }
1472 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1473 }
1474
1475 target_addr_t target_address_max(struct target *target)
1476 {
1477 unsigned bits = target_address_bits(target);
1478 if (sizeof(target_addr_t) * 8 == bits)
1479 return (target_addr_t) -1;
1480 else
1481 return (((target_addr_t) 1) << bits) - 1;
1482 }
1483
1484 unsigned target_address_bits(struct target *target)
1485 {
1486 if (target->type->address_bits)
1487 return target->type->address_bits(target);
1488 return 32;
1489 }
1490
1491 unsigned int target_data_bits(struct target *target)
1492 {
1493 if (target->type->data_bits)
1494 return target->type->data_bits(target);
1495 return 32;
1496 }
1497
1498 static int target_profiling(struct target *target, uint32_t *samples,
1499 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1500 {
1501 return target->type->profiling(target, samples, max_num_samples,
1502 num_samples, seconds);
1503 }
1504
1505 static int handle_target(void *priv);
1506
1507 static int target_init_one(struct command_context *cmd_ctx,
1508 struct target *target)
1509 {
1510 target_reset_examined(target);
1511
1512 struct target_type *type = target->type;
1513 if (!type->examine)
1514 type->examine = default_examine;
1515
1516 if (!type->check_reset)
1517 type->check_reset = default_check_reset;
1518
1519 assert(type->init_target);
1520
1521 int retval = type->init_target(cmd_ctx, target);
1522 if (retval != ERROR_OK) {
1523 LOG_ERROR("target '%s' init failed", target_name(target));
1524 return retval;
1525 }
1526
1527 /* Sanity-check MMU support ... stub in what we must, to help
1528 * implement it in stages, but warn if we need to do so.
1529 */
1530 if (type->mmu) {
1531 if (!type->virt2phys) {
1532 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1533 type->virt2phys = identity_virt2phys;
1534 }
1535 } else {
1536 /* Make sure no-MMU targets all behave the same: make no
1537 * distinction between physical and virtual addresses, and
1538 * ensure that virt2phys() is always an identity mapping.
1539 */
1540 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1541 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1542
1543 type->mmu = no_mmu;
1544 type->write_phys_memory = type->write_memory;
1545 type->read_phys_memory = type->read_memory;
1546 type->virt2phys = identity_virt2phys;
1547 }
1548
1549 if (!target->type->read_buffer)
1550 target->type->read_buffer = target_read_buffer_default;
1551
1552 if (!target->type->write_buffer)
1553 target->type->write_buffer = target_write_buffer_default;
1554
1555 if (!target->type->get_gdb_fileio_info)
1556 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1557
1558 if (!target->type->gdb_fileio_end)
1559 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1560
1561 if (!target->type->profiling)
1562 target->type->profiling = target_profiling_default;
1563
1564 return ERROR_OK;
1565 }
1566
1567 static int target_init(struct command_context *cmd_ctx)
1568 {
1569 struct target *target;
1570 int retval;
1571
1572 for (target = all_targets; target; target = target->next) {
1573 retval = target_init_one(cmd_ctx, target);
1574 if (retval != ERROR_OK)
1575 return retval;
1576 }
1577
1578 if (!all_targets)
1579 return ERROR_OK;
1580
1581 retval = target_register_user_commands(cmd_ctx);
1582 if (retval != ERROR_OK)
1583 return retval;
1584
1585 retval = target_register_timer_callback(&handle_target,
1586 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1587 if (retval != ERROR_OK)
1588 return retval;
1589
1590 return ERROR_OK;
1591 }
1592
1593 COMMAND_HANDLER(handle_target_init_command)
1594 {
1595 int retval;
1596
1597 if (CMD_ARGC != 0)
1598 return ERROR_COMMAND_SYNTAX_ERROR;
1599
1600 static bool target_initialized;
1601 if (target_initialized) {
1602 LOG_INFO("'target init' has already been called");
1603 return ERROR_OK;
1604 }
1605 target_initialized = true;
1606
1607 retval = command_run_line(CMD_CTX, "init_targets");
1608 if (retval != ERROR_OK)
1609 return retval;
1610
1611 retval = command_run_line(CMD_CTX, "init_target_events");
1612 if (retval != ERROR_OK)
1613 return retval;
1614
1615 retval = command_run_line(CMD_CTX, "init_board");
1616 if (retval != ERROR_OK)
1617 return retval;
1618
1619 LOG_DEBUG("Initializing targets...");
1620 return target_init(CMD_CTX);
1621 }
1622
1623 int target_register_event_callback(int (*callback)(struct target *target,
1624 enum target_event event, void *priv), void *priv)
1625 {
1626 struct target_event_callback **callbacks_p = &target_event_callbacks;
1627
1628 if (!callback)
1629 return ERROR_COMMAND_SYNTAX_ERROR;
1630
1631 if (*callbacks_p) {
1632 while ((*callbacks_p)->next)
1633 callbacks_p = &((*callbacks_p)->next);
1634 callbacks_p = &((*callbacks_p)->next);
1635 }
1636
1637 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1638 (*callbacks_p)->callback = callback;
1639 (*callbacks_p)->priv = priv;
1640 (*callbacks_p)->next = NULL;
1641
1642 return ERROR_OK;
1643 }
1644
1645 int target_register_reset_callback(int (*callback)(struct target *target,
1646 enum target_reset_mode reset_mode, void *priv), void *priv)
1647 {
1648 struct target_reset_callback *entry;
1649
1650 if (!callback)
1651 return ERROR_COMMAND_SYNTAX_ERROR;
1652
1653 entry = malloc(sizeof(struct target_reset_callback));
1654 if (!entry) {
1655 LOG_ERROR("error allocating buffer for reset callback entry");
1656 return ERROR_COMMAND_SYNTAX_ERROR;
1657 }
1658
1659 entry->callback = callback;
1660 entry->priv = priv;
1661 list_add(&entry->list, &target_reset_callback_list);
1662
1663
1664 return ERROR_OK;
1665 }
1666
1667 int target_register_trace_callback(int (*callback)(struct target *target,
1668 size_t len, uint8_t *data, void *priv), void *priv)
1669 {
1670 struct target_trace_callback *entry;
1671
1672 if (!callback)
1673 return ERROR_COMMAND_SYNTAX_ERROR;
1674
1675 entry = malloc(sizeof(struct target_trace_callback));
1676 if (!entry) {
1677 LOG_ERROR("error allocating buffer for trace callback entry");
1678 return ERROR_COMMAND_SYNTAX_ERROR;
1679 }
1680
1681 entry->callback = callback;
1682 entry->priv = priv;
1683 list_add(&entry->list, &target_trace_callback_list);
1684
1685
1686 return ERROR_OK;
1687 }
1688
1689 int target_register_timer_callback(int (*callback)(void *priv),
1690 unsigned int time_ms, enum target_timer_type type, void *priv)
1691 {
1692 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1693
1694 if (!callback)
1695 return ERROR_COMMAND_SYNTAX_ERROR;
1696
1697 if (*callbacks_p) {
1698 while ((*callbacks_p)->next)
1699 callbacks_p = &((*callbacks_p)->next);
1700 callbacks_p = &((*callbacks_p)->next);
1701 }
1702
1703 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1704 (*callbacks_p)->callback = callback;
1705 (*callbacks_p)->type = type;
1706 (*callbacks_p)->time_ms = time_ms;
1707 (*callbacks_p)->removed = false;
1708
1709 (*callbacks_p)->when = timeval_ms() + time_ms;
1710 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1711
1712 (*callbacks_p)->priv = priv;
1713 (*callbacks_p)->next = NULL;
1714
1715 return ERROR_OK;
1716 }
1717
1718 int target_unregister_event_callback(int (*callback)(struct target *target,
1719 enum target_event event, void *priv), void *priv)
1720 {
1721 struct target_event_callback **p = &target_event_callbacks;
1722 struct target_event_callback *c = target_event_callbacks;
1723
1724 if (!callback)
1725 return ERROR_COMMAND_SYNTAX_ERROR;
1726
1727 while (c) {
1728 struct target_event_callback *next = c->next;
1729 if ((c->callback == callback) && (c->priv == priv)) {
1730 *p = next;
1731 free(c);
1732 return ERROR_OK;
1733 } else
1734 p = &(c->next);
1735 c = next;
1736 }
1737
1738 return ERROR_OK;
1739 }
1740
1741 int target_unregister_reset_callback(int (*callback)(struct target *target,
1742 enum target_reset_mode reset_mode, void *priv), void *priv)
1743 {
1744 struct target_reset_callback *entry;
1745
1746 if (!callback)
1747 return ERROR_COMMAND_SYNTAX_ERROR;
1748
1749 list_for_each_entry(entry, &target_reset_callback_list, list) {
1750 if (entry->callback == callback && entry->priv == priv) {
1751 list_del(&entry->list);
1752 free(entry);
1753 break;
1754 }
1755 }
1756
1757 return ERROR_OK;
1758 }
1759
1760 int target_unregister_trace_callback(int (*callback)(struct target *target,
1761 size_t len, uint8_t *data, void *priv), void *priv)
1762 {
1763 struct target_trace_callback *entry;
1764
1765 if (!callback)
1766 return ERROR_COMMAND_SYNTAX_ERROR;
1767
1768 list_for_each_entry(entry, &target_trace_callback_list, list) {
1769 if (entry->callback == callback && entry->priv == priv) {
1770 list_del(&entry->list);
1771 free(entry);
1772 break;
1773 }
1774 }
1775
1776 return ERROR_OK;
1777 }
1778
1779 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1780 {
1781 if (!callback)
1782 return ERROR_COMMAND_SYNTAX_ERROR;
1783
1784 for (struct target_timer_callback *c = target_timer_callbacks;
1785 c; c = c->next) {
1786 if ((c->callback == callback) && (c->priv == priv)) {
1787 c->removed = true;
1788 return ERROR_OK;
1789 }
1790 }
1791
1792 return ERROR_FAIL;
1793 }
1794
1795 int target_call_event_callbacks(struct target *target, enum target_event event)
1796 {
1797 struct target_event_callback *callback = target_event_callbacks;
1798 struct target_event_callback *next_callback;
1799
1800 if (event == TARGET_EVENT_HALTED) {
1801 /* execute early halted first */
1802 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1803 }
1804
1805 LOG_DEBUG("target event %i (%s) for core %s", event,
1806 target_event_name(event),
1807 target_name(target));
1808
1809 target_handle_event(target, event);
1810
1811 while (callback) {
1812 next_callback = callback->next;
1813 callback->callback(target, event, callback->priv);
1814 callback = next_callback;
1815 }
1816
1817 return ERROR_OK;
1818 }
1819
1820 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1821 {
1822 struct target_reset_callback *callback;
1823
1824 LOG_DEBUG("target reset %i (%s)", reset_mode,
1825 nvp_value2name(nvp_reset_modes, reset_mode)->name);
1826
1827 list_for_each_entry(callback, &target_reset_callback_list, list)
1828 callback->callback(target, reset_mode, callback->priv);
1829
1830 return ERROR_OK;
1831 }
1832
1833 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1834 {
1835 struct target_trace_callback *callback;
1836
1837 list_for_each_entry(callback, &target_trace_callback_list, list)
1838 callback->callback(target, len, data, callback->priv);
1839
1840 return ERROR_OK;
1841 }
1842
1843 static int target_timer_callback_periodic_restart(
1844 struct target_timer_callback *cb, int64_t *now)
1845 {
1846 cb->when = *now + cb->time_ms;
1847 return ERROR_OK;
1848 }
1849
1850 static int target_call_timer_callback(struct target_timer_callback *cb,
1851 int64_t *now)
1852 {
1853 cb->callback(cb->priv);
1854
1855 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1856 return target_timer_callback_periodic_restart(cb, now);
1857
1858 return target_unregister_timer_callback(cb->callback, cb->priv);
1859 }
1860
1861 static int target_call_timer_callbacks_check_time(int checktime)
1862 {
1863 static bool callback_processing;
1864
1865 /* Do not allow nesting */
1866 if (callback_processing)
1867 return ERROR_OK;
1868
1869 callback_processing = true;
1870
1871 keep_alive();
1872
1873 int64_t now = timeval_ms();
1874
1875 /* Initialize to a default value that's a ways into the future.
1876 * The loop below will make it closer to now if there are
1877 * callbacks that want to be called sooner. */
1878 target_timer_next_event_value = now + 1000;
1879
1880 /* Store an address of the place containing a pointer to the
1881 * next item; initially, that's a standalone "root of the
1882 * list" variable. */
1883 struct target_timer_callback **callback = &target_timer_callbacks;
1884 while (callback && *callback) {
1885 if ((*callback)->removed) {
1886 struct target_timer_callback *p = *callback;
1887 *callback = (*callback)->next;
1888 free(p);
1889 continue;
1890 }
1891
1892 bool call_it = (*callback)->callback &&
1893 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1894 now >= (*callback)->when);
1895
1896 if (call_it)
1897 target_call_timer_callback(*callback, &now);
1898
1899 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1900 target_timer_next_event_value = (*callback)->when;
1901
1902 callback = &(*callback)->next;
1903 }
1904
1905 callback_processing = false;
1906 return ERROR_OK;
1907 }
1908
1909 int target_call_timer_callbacks(void)
1910 {
1911 return target_call_timer_callbacks_check_time(1);
1912 }
1913
1914 /* invoke periodic callbacks immediately */
1915 int target_call_timer_callbacks_now(void)
1916 {
1917 return target_call_timer_callbacks_check_time(0);
1918 }
1919
1920 int64_t target_timer_next_event(void)
1921 {
1922 return target_timer_next_event_value;
1923 }
1924
1925 /* Prints the working area layout for debug purposes */
1926 static void print_wa_layout(struct target *target)
1927 {
1928 struct working_area *c = target->working_areas;
1929
1930 while (c) {
1931 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1932 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1933 c->address, c->address + c->size - 1, c->size);
1934 c = c->next;
1935 }
1936 }
1937
1938 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1939 static void target_split_working_area(struct working_area *area, uint32_t size)
1940 {
1941 assert(area->free); /* Shouldn't split an allocated area */
1942 assert(size <= area->size); /* Caller should guarantee this */
1943
1944 /* Split only if not already the right size */
1945 if (size < area->size) {
1946 struct working_area *new_wa = malloc(sizeof(*new_wa));
1947
1948 if (!new_wa)
1949 return;
1950
1951 new_wa->next = area->next;
1952 new_wa->size = area->size - size;
1953 new_wa->address = area->address + size;
1954 new_wa->backup = NULL;
1955 new_wa->user = NULL;
1956 new_wa->free = true;
1957
1958 area->next = new_wa;
1959 area->size = size;
1960
1961 /* If backup memory was allocated to this area, it has the wrong size
1962 * now so free it and it will be reallocated if/when needed */
1963 free(area->backup);
1964 area->backup = NULL;
1965 }
1966 }
1967
1968 /* Merge all adjacent free areas into one */
1969 static void target_merge_working_areas(struct target *target)
1970 {
1971 struct working_area *c = target->working_areas;
1972
1973 while (c && c->next) {
1974 assert(c->next->address == c->address + c->size); /* This is an invariant */
1975
1976 /* Find two adjacent free areas */
1977 if (c->free && c->next->free) {
1978 /* Merge the last into the first */
1979 c->size += c->next->size;
1980
1981 /* Remove the last */
1982 struct working_area *to_be_freed = c->next;
1983 c->next = c->next->next;
1984 free(to_be_freed->backup);
1985 free(to_be_freed);
1986
1987 /* If backup memory was allocated to the remaining area, it's has
1988 * the wrong size now */
1989 free(c->backup);
1990 c->backup = NULL;
1991 } else {
1992 c = c->next;
1993 }
1994 }
1995 }
1996
1997 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
1998 {
1999 /* Reevaluate working area address based on MMU state*/
2000 if (!target->working_areas) {
2001 int retval;
2002 int enabled;
2003
2004 retval = target->type->mmu(target, &enabled);
2005 if (retval != ERROR_OK)
2006 return retval;
2007
2008 if (!enabled) {
2009 if (target->working_area_phys_spec) {
2010 LOG_DEBUG("MMU disabled, using physical "
2011 "address for working memory " TARGET_ADDR_FMT,
2012 target->working_area_phys);
2013 target->working_area = target->working_area_phys;
2014 } else {
2015 LOG_ERROR("No working memory available. "
2016 "Specify -work-area-phys to target.");
2017 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2018 }
2019 } else {
2020 if (target->working_area_virt_spec) {
2021 LOG_DEBUG("MMU enabled, using virtual "
2022 "address for working memory " TARGET_ADDR_FMT,
2023 target->working_area_virt);
2024 target->working_area = target->working_area_virt;
2025 } else {
2026 LOG_ERROR("No working memory available. "
2027 "Specify -work-area-virt to target.");
2028 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2029 }
2030 }
2031
2032 /* Set up initial working area on first call */
2033 struct working_area *new_wa = malloc(sizeof(*new_wa));
2034 if (new_wa) {
2035 new_wa->next = NULL;
2036 new_wa->size = ALIGN_DOWN(target->working_area_size, 4); /* 4-byte align */
2037 new_wa->address = target->working_area;
2038 new_wa->backup = NULL;
2039 new_wa->user = NULL;
2040 new_wa->free = true;
2041 }
2042
2043 target->working_areas = new_wa;
2044 }
2045
2046 /* only allocate multiples of 4 byte */
2047 size = ALIGN_UP(size, 4);
2048
2049 struct working_area *c = target->working_areas;
2050
2051 /* Find the first large enough working area */
2052 while (c) {
2053 if (c->free && c->size >= size)
2054 break;
2055 c = c->next;
2056 }
2057
2058 if (!c)
2059 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2060
2061 /* Split the working area into the requested size */
2062 target_split_working_area(c, size);
2063
2064 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2065 size, c->address);
2066
2067 if (target->backup_working_area) {
2068 if (!c->backup) {
2069 c->backup = malloc(c->size);
2070 if (!c->backup)
2071 return ERROR_FAIL;
2072 }
2073
2074 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2075 if (retval != ERROR_OK)
2076 return retval;
2077 }
2078
2079 /* mark as used, and return the new (reused) area */
2080 c->free = false;
2081 *area = c;
2082
2083 /* user pointer */
2084 c->user = area;
2085
2086 print_wa_layout(target);
2087
2088 return ERROR_OK;
2089 }
2090
2091 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2092 {
2093 int retval;
2094
2095 retval = target_alloc_working_area_try(target, size, area);
2096 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2097 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2098 return retval;
2099
2100 }
2101
2102 static int target_restore_working_area(struct target *target, struct working_area *area)
2103 {
2104 int retval = ERROR_OK;
2105
2106 if (target->backup_working_area && area->backup) {
2107 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2108 if (retval != ERROR_OK)
2109 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2110 area->size, area->address);
2111 }
2112
2113 return retval;
2114 }
2115
2116 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2117 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2118 {
2119 if (!area || area->free)
2120 return ERROR_OK;
2121
2122 int retval = ERROR_OK;
2123 if (restore) {
2124 retval = target_restore_working_area(target, area);
2125 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2126 if (retval != ERROR_OK)
2127 return retval;
2128 }
2129
2130 area->free = true;
2131
2132 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2133 area->size, area->address);
2134
2135 /* mark user pointer invalid */
2136 /* TODO: Is this really safe? It points to some previous caller's memory.
2137 * How could we know that the area pointer is still in that place and not
2138 * some other vital data? What's the purpose of this, anyway? */
2139 *area->user = NULL;
2140 area->user = NULL;
2141
2142 target_merge_working_areas(target);
2143
2144 print_wa_layout(target);
2145
2146 return retval;
2147 }
2148
2149 int target_free_working_area(struct target *target, struct working_area *area)
2150 {
2151 return target_free_working_area_restore(target, area, 1);
2152 }
2153
2154 /* free resources and restore memory, if restoring memory fails,
2155 * free up resources anyway
2156 */
2157 static void target_free_all_working_areas_restore(struct target *target, int restore)
2158 {
2159 struct working_area *c = target->working_areas;
2160
2161 LOG_DEBUG("freeing all working areas");
2162
2163 /* Loop through all areas, restoring the allocated ones and marking them as free */
2164 while (c) {
2165 if (!c->free) {
2166 if (restore)
2167 target_restore_working_area(target, c);
2168 c->free = true;
2169 *c->user = NULL; /* Same as above */
2170 c->user = NULL;
2171 }
2172 c = c->next;
2173 }
2174
2175 /* Run a merge pass to combine all areas into one */
2176 target_merge_working_areas(target);
2177
2178 print_wa_layout(target);
2179 }
2180
2181 void target_free_all_working_areas(struct target *target)
2182 {
2183 target_free_all_working_areas_restore(target, 1);
2184
2185 /* Now we have none or only one working area marked as free */
2186 if (target->working_areas) {
2187 /* Free the last one to allow on-the-fly moving and resizing */
2188 free(target->working_areas->backup);
2189 free(target->working_areas);
2190 target->working_areas = NULL;
2191 }
2192 }
2193
2194 /* Find the largest number of bytes that can be allocated */
2195 uint32_t target_get_working_area_avail(struct target *target)
2196 {
2197 struct working_area *c = target->working_areas;
2198 uint32_t max_size = 0;
2199
2200 if (!c)
2201 return ALIGN_DOWN(target->working_area_size, 4);
2202
2203 while (c) {
2204 if (c->free && max_size < c->size)
2205 max_size = c->size;
2206
2207 c = c->next;
2208 }
2209
2210 return max_size;
2211 }
2212
2213 static void target_destroy(struct target *target)
2214 {
2215 if (target->type->deinit_target)
2216 target->type->deinit_target(target);
2217
2218 if (target->semihosting)
2219 free(target->semihosting->basedir);
2220 free(target->semihosting);
2221
2222 jtag_unregister_event_callback(jtag_enable_callback, target);
2223
2224 struct target_event_action *teap = target->event_action;
2225 while (teap) {
2226 struct target_event_action *next = teap->next;
2227 Jim_DecrRefCount(teap->interp, teap->body);
2228 free(teap);
2229 teap = next;
2230 }
2231
2232 target_free_all_working_areas(target);
2233
2234 /* release the targets SMP list */
2235 if (target->smp) {
2236 struct target_list *head, *tmp;
2237
2238 list_for_each_entry_safe(head, tmp, target->smp_targets, lh) {
2239 list_del(&head->lh);
2240 head->target->smp = 0;
2241 free(head);
2242 }
2243 if (target->smp_targets != &empty_smp_targets)
2244 free(target->smp_targets);
2245 target->smp = 0;
2246 }
2247
2248 rtos_destroy(target);
2249
2250 free(target->gdb_port_override);
2251 free(target->type);
2252 free(target->trace_info);
2253 free(target->fileio_info);
2254 free(target->cmd_name);
2255 free(target);
2256 }
2257
2258 void target_quit(void)
2259 {
2260 struct target_event_callback *pe = target_event_callbacks;
2261 while (pe) {
2262 struct target_event_callback *t = pe->next;
2263 free(pe);
2264 pe = t;
2265 }
2266 target_event_callbacks = NULL;
2267
2268 struct target_timer_callback *pt = target_timer_callbacks;
2269 while (pt) {
2270 struct target_timer_callback *t = pt->next;
2271 free(pt);
2272 pt = t;
2273 }
2274 target_timer_callbacks = NULL;
2275
2276 for (struct target *target = all_targets; target;) {
2277 struct target *tmp;
2278
2279 tmp = target->next;
2280 target_destroy(target);
2281 target = tmp;
2282 }
2283
2284 all_targets = NULL;
2285 }
2286
2287 int target_arch_state(struct target *target)
2288 {
2289 int retval;
2290 if (!target) {
2291 LOG_WARNING("No target has been configured");
2292 return ERROR_OK;
2293 }
2294
2295 if (target->state != TARGET_HALTED)
2296 return ERROR_OK;
2297
2298 retval = target->type->arch_state(target);
2299 return retval;
2300 }
2301
2302 static int target_get_gdb_fileio_info_default(struct target *target,
2303 struct gdb_fileio_info *fileio_info)
2304 {
2305 /* If target does not support semi-hosting function, target
2306 has no need to provide .get_gdb_fileio_info callback.
2307 It just return ERROR_FAIL and gdb_server will return "Txx"
2308 as target halted every time. */
2309 return ERROR_FAIL;
2310 }
2311
2312 static int target_gdb_fileio_end_default(struct target *target,
2313 int retcode, int fileio_errno, bool ctrl_c)
2314 {
2315 return ERROR_OK;
2316 }
2317
2318 int target_profiling_default(struct target *target, uint32_t *samples,
2319 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2320 {
2321 struct timeval timeout, now;
2322
2323 gettimeofday(&timeout, NULL);
2324 timeval_add_time(&timeout, seconds, 0);
2325
2326 LOG_INFO("Starting profiling. Halting and resuming the"
2327 " target as often as we can...");
2328
2329 uint32_t sample_count = 0;
2330 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2331 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2332
2333 int retval = ERROR_OK;
2334 for (;;) {
2335 target_poll(target);
2336 if (target->state == TARGET_HALTED) {
2337 uint32_t t = buf_get_u32(reg->value, 0, 32);
2338 samples[sample_count++] = t;
2339 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2340 retval = target_resume(target, 1, 0, 0, 0);
2341 target_poll(target);
2342 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2343 } else if (target->state == TARGET_RUNNING) {
2344 /* We want to quickly sample the PC. */
2345 retval = target_halt(target);
2346 } else {
2347 LOG_INFO("Target not halted or running");
2348 retval = ERROR_OK;
2349 break;
2350 }
2351
2352 if (retval != ERROR_OK)
2353 break;
2354
2355 gettimeofday(&now, NULL);
2356 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2357 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2358 break;
2359 }
2360 }
2361
2362 *num_samples = sample_count;
2363 return retval;
2364 }
2365
2366 /* Single aligned words are guaranteed to use 16 or 32 bit access
2367 * mode respectively, otherwise data is handled as quickly as
2368 * possible
2369 */
2370 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2371 {
2372 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2373 size, address);
2374
2375 if (!target_was_examined(target)) {
2376 LOG_ERROR("Target not examined yet");
2377 return ERROR_FAIL;
2378 }
2379
2380 if (size == 0)
2381 return ERROR_OK;
2382
2383 if ((address + size - 1) < address) {
2384 /* GDB can request this when e.g. PC is 0xfffffffc */
2385 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2386 address,
2387 size);
2388 return ERROR_FAIL;
2389 }
2390
2391 return target->type->write_buffer(target, address, size, buffer);
2392 }
2393
2394 static int target_write_buffer_default(struct target *target,
2395 target_addr_t address, uint32_t count, const uint8_t *buffer)
2396 {
2397 uint32_t size;
2398 unsigned int data_bytes = target_data_bits(target) / 8;
2399
2400 /* Align up to maximum bytes. The loop condition makes sure the next pass
2401 * will have something to do with the size we leave to it. */
2402 for (size = 1;
2403 size < data_bytes && count >= size * 2 + (address & size);
2404 size *= 2) {
2405 if (address & size) {
2406 int retval = target_write_memory(target, address, size, 1, buffer);
2407 if (retval != ERROR_OK)
2408 return retval;
2409 address += size;
2410 count -= size;
2411 buffer += size;
2412 }
2413 }
2414
2415 /* Write the data with as large access size as possible. */
2416 for (; size > 0; size /= 2) {
2417 uint32_t aligned = count - count % size;
2418 if (aligned > 0) {
2419 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2420 if (retval != ERROR_OK)
2421 return retval;
2422 address += aligned;
2423 count -= aligned;
2424 buffer += aligned;
2425 }
2426 }
2427
2428 return ERROR_OK;
2429 }
2430
2431 /* Single aligned words are guaranteed to use 16 or 32 bit access
2432 * mode respectively, otherwise data is handled as quickly as
2433 * possible
2434 */
2435 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2436 {
2437 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2438 size, address);
2439
2440 if (!target_was_examined(target)) {
2441 LOG_ERROR("Target not examined yet");
2442 return ERROR_FAIL;
2443 }
2444
2445 if (size == 0)
2446 return ERROR_OK;
2447
2448 if ((address + size - 1) < address) {
2449 /* GDB can request this when e.g. PC is 0xfffffffc */
2450 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2451 address,
2452 size);
2453 return ERROR_FAIL;
2454 }
2455
2456 return target->type->read_buffer(target, address, size, buffer);
2457 }
2458
2459 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2460 {
2461 uint32_t size;
2462 unsigned int data_bytes = target_data_bits(target) / 8;
2463
2464 /* Align up to maximum bytes. The loop condition makes sure the next pass
2465 * will have something to do with the size we leave to it. */
2466 for (size = 1;
2467 size < data_bytes && count >= size * 2 + (address & size);
2468 size *= 2) {
2469 if (address & size) {
2470 int retval = target_read_memory(target, address, size, 1, buffer);
2471 if (retval != ERROR_OK)
2472 return retval;
2473 address += size;
2474 count -= size;
2475 buffer += size;
2476 }
2477 }
2478
2479 /* Read the data with as large access size as possible. */
2480 for (; size > 0; size /= 2) {
2481 uint32_t aligned = count - count % size;
2482 if (aligned > 0) {
2483 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2484 if (retval != ERROR_OK)
2485 return retval;
2486 address += aligned;
2487 count -= aligned;
2488 buffer += aligned;
2489 }
2490 }
2491
2492 return ERROR_OK;
2493 }
2494
2495 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2496 {
2497 uint8_t *buffer;
2498 int retval;
2499 uint32_t i;
2500 uint32_t checksum = 0;
2501 if (!target_was_examined(target)) {
2502 LOG_ERROR("Target not examined yet");
2503 return ERROR_FAIL;
2504 }
2505 if (!target->type->checksum_memory) {
2506 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2507 return ERROR_FAIL;
2508 }
2509
2510 retval = target->type->checksum_memory(target, address, size, &checksum);
2511 if (retval != ERROR_OK) {
2512 buffer = malloc(size);
2513 if (!buffer) {
2514 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2515 return ERROR_COMMAND_SYNTAX_ERROR;
2516 }
2517 retval = target_read_buffer(target, address, size, buffer);
2518 if (retval != ERROR_OK) {
2519 free(buffer);
2520 return retval;
2521 }
2522
2523 /* convert to target endianness */
2524 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2525 uint32_t target_data;
2526 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2527 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2528 }
2529
2530 retval = image_calculate_checksum(buffer, size, &checksum);
2531 free(buffer);
2532 }
2533
2534 *crc = checksum;
2535
2536 return retval;
2537 }
2538
2539 int target_blank_check_memory(struct target *target,
2540 struct target_memory_check_block *blocks, int num_blocks,
2541 uint8_t erased_value)
2542 {
2543 if (!target_was_examined(target)) {
2544 LOG_ERROR("Target not examined yet");
2545 return ERROR_FAIL;
2546 }
2547
2548 if (!target->type->blank_check_memory)
2549 return ERROR_NOT_IMPLEMENTED;
2550
2551 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2552 }
2553
2554 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2555 {
2556 uint8_t value_buf[8];
2557 if (!target_was_examined(target)) {
2558 LOG_ERROR("Target not examined yet");
2559 return ERROR_FAIL;
2560 }
2561
2562 int retval = target_read_memory(target, address, 8, 1, value_buf);
2563
2564 if (retval == ERROR_OK) {
2565 *value = target_buffer_get_u64(target, value_buf);
2566 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2567 address,
2568 *value);
2569 } else {
2570 *value = 0x0;
2571 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2572 address);
2573 }
2574
2575 return retval;
2576 }
2577
2578 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2579 {
2580 uint8_t value_buf[4];
2581 if (!target_was_examined(target)) {
2582 LOG_ERROR("Target not examined yet");
2583 return ERROR_FAIL;
2584 }
2585
2586 int retval = target_read_memory(target, address, 4, 1, value_buf);
2587
2588 if (retval == ERROR_OK) {
2589 *value = target_buffer_get_u32(target, value_buf);
2590 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2591 address,
2592 *value);
2593 } else {
2594 *value = 0x0;
2595 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2596 address);
2597 }
2598
2599 return retval;
2600 }
2601
2602 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2603 {
2604 uint8_t value_buf[2];
2605 if (!target_was_examined(target)) {
2606 LOG_ERROR("Target not examined yet");
2607 return ERROR_FAIL;
2608 }
2609
2610 int retval = target_read_memory(target, address, 2, 1, value_buf);
2611
2612 if (retval == ERROR_OK) {
2613 *value = target_buffer_get_u16(target, value_buf);
2614 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2615 address,
2616 *value);
2617 } else {
2618 *value = 0x0;
2619 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2620 address);
2621 }
2622
2623 return retval;
2624 }
2625
2626 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2627 {
2628 if (!target_was_examined(target)) {
2629 LOG_ERROR("Target not examined yet");
2630 return ERROR_FAIL;
2631 }
2632
2633 int retval = target_read_memory(target, address, 1, 1, value);
2634
2635 if (retval == ERROR_OK) {
2636 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2637 address,
2638 *value);
2639 } else {
2640 *value = 0x0;
2641 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2642 address);
2643 }
2644
2645 return retval;
2646 }
2647
2648 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2649 {
2650 int retval;
2651 uint8_t value_buf[8];
2652 if (!target_was_examined(target)) {
2653 LOG_ERROR("Target not examined yet");
2654 return ERROR_FAIL;
2655 }
2656
2657 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2658 address,
2659 value);
2660
2661 target_buffer_set_u64(target, value_buf, value);
2662 retval = target_write_memory(target, address, 8, 1, value_buf);
2663 if (retval != ERROR_OK)
2664 LOG_DEBUG("failed: %i", retval);
2665
2666 return retval;
2667 }
2668
2669 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2670 {
2671 int retval;
2672 uint8_t value_buf[4];
2673 if (!target_was_examined(target)) {
2674 LOG_ERROR("Target not examined yet");
2675 return ERROR_FAIL;
2676 }
2677
2678 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2679 address,
2680 value);
2681
2682 target_buffer_set_u32(target, value_buf, value);
2683 retval = target_write_memory(target, address, 4, 1, value_buf);
2684 if (retval != ERROR_OK)
2685 LOG_DEBUG("failed: %i", retval);
2686
2687 return retval;
2688 }
2689
2690 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2691 {
2692 int retval;
2693 uint8_t value_buf[2];
2694 if (!target_was_examined(target)) {
2695 LOG_ERROR("Target not examined yet");
2696 return ERROR_FAIL;
2697 }
2698
2699 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2700 address,
2701 value);
2702
2703 target_buffer_set_u16(target, value_buf, value);
2704 retval = target_write_memory(target, address, 2, 1, value_buf);
2705 if (retval != ERROR_OK)
2706 LOG_DEBUG("failed: %i", retval);
2707
2708 return retval;
2709 }
2710
2711 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2712 {
2713 int retval;
2714 if (!target_was_examined(target)) {
2715 LOG_ERROR("Target not examined yet");
2716 return ERROR_FAIL;
2717 }
2718
2719 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2720 address, value);
2721
2722 retval = target_write_memory(target, address, 1, 1, &value);
2723 if (retval != ERROR_OK)
2724 LOG_DEBUG("failed: %i", retval);
2725
2726 return retval;
2727 }
2728
2729 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2730 {
2731 int retval;
2732 uint8_t value_buf[8];
2733 if (!target_was_examined(target)) {
2734 LOG_ERROR("Target not examined yet");
2735 return ERROR_FAIL;
2736 }
2737
2738 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2739 address,
2740 value);
2741
2742 target_buffer_set_u64(target, value_buf, value);
2743 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2744 if (retval != ERROR_OK)
2745 LOG_DEBUG("failed: %i", retval);
2746
2747 return retval;
2748 }
2749
2750 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2751 {
2752 int retval;
2753 uint8_t value_buf[4];
2754 if (!target_was_examined(target)) {
2755 LOG_ERROR("Target not examined yet");
2756 return ERROR_FAIL;
2757 }
2758
2759 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2760 address,
2761 value);
2762
2763 target_buffer_set_u32(target, value_buf, value);
2764 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2765 if (retval != ERROR_OK)
2766 LOG_DEBUG("failed: %i", retval);
2767
2768 return retval;
2769 }
2770
2771 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2772 {
2773 int retval;
2774 uint8_t value_buf[2];
2775 if (!target_was_examined(target)) {
2776 LOG_ERROR("Target not examined yet");
2777 return ERROR_FAIL;
2778 }
2779
2780 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2781 address,
2782 value);
2783
2784 target_buffer_set_u16(target, value_buf, value);
2785 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2786 if (retval != ERROR_OK)
2787 LOG_DEBUG("failed: %i", retval);
2788
2789 return retval;
2790 }
2791
2792 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2793 {
2794 int retval;
2795 if (!target_was_examined(target)) {
2796 LOG_ERROR("Target not examined yet");
2797 return ERROR_FAIL;
2798 }
2799
2800 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2801 address, value);
2802
2803 retval = target_write_phys_memory(target, address, 1, 1, &value);
2804 if (retval != ERROR_OK)
2805 LOG_DEBUG("failed: %i", retval);
2806
2807 return retval;
2808 }
2809
2810 static int find_target(struct command_invocation *cmd, const char *name)
2811 {
2812 struct target *target = get_target(name);
2813 if (!target) {
2814 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2815 return ERROR_FAIL;
2816 }
2817 if (!target->tap->enabled) {
2818 command_print(cmd, "Target: TAP %s is disabled, "
2819 "can't be the current target\n",
2820 target->tap->dotted_name);
2821 return ERROR_FAIL;
2822 }
2823
2824 cmd->ctx->current_target = target;
2825 if (cmd->ctx->current_target_override)
2826 cmd->ctx->current_target_override = target;
2827
2828 return ERROR_OK;
2829 }
2830
2831
2832 COMMAND_HANDLER(handle_targets_command)
2833 {
2834 int retval = ERROR_OK;
2835 if (CMD_ARGC == 1) {
2836 retval = find_target(CMD, CMD_ARGV[0]);
2837 if (retval == ERROR_OK) {
2838 /* we're done! */
2839 return retval;
2840 }
2841 }
2842
2843 struct target *target = all_targets;
2844 command_print(CMD, " TargetName Type Endian TapName State ");
2845 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2846 while (target) {
2847 const char *state;
2848 char marker = ' ';
2849
2850 if (target->tap->enabled)
2851 state = target_state_name(target);
2852 else
2853 state = "tap-disabled";
2854
2855 if (CMD_CTX->current_target == target)
2856 marker = '*';
2857
2858 /* keep columns lined up to match the headers above */
2859 command_print(CMD,
2860 "%2d%c %-18s %-10s %-6s %-18s %s",
2861 target->target_number,
2862 marker,
2863 target_name(target),
2864 target_type_name(target),
2865 jim_nvp_value2name_simple(nvp_target_endian,
2866 target->endianness)->name,
2867 target->tap->dotted_name,
2868 state);
2869 target = target->next;
2870 }
2871
2872 return retval;
2873 }
2874
2875 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2876
2877 static int power_dropout;
2878 static int srst_asserted;
2879
2880 static int run_power_restore;
2881 static int run_power_dropout;
2882 static int run_srst_asserted;
2883 static int run_srst_deasserted;
2884
2885 static int sense_handler(void)
2886 {
2887 static int prev_srst_asserted;
2888 static int prev_power_dropout;
2889
2890 int retval = jtag_power_dropout(&power_dropout);
2891 if (retval != ERROR_OK)
2892 return retval;
2893
2894 int power_restored;
2895 power_restored = prev_power_dropout && !power_dropout;
2896 if (power_restored)
2897 run_power_restore = 1;
2898
2899 int64_t current = timeval_ms();
2900 static int64_t last_power;
2901 bool wait_more = last_power + 2000 > current;
2902 if (power_dropout && !wait_more) {
2903 run_power_dropout = 1;
2904 last_power = current;
2905 }
2906
2907 retval = jtag_srst_asserted(&srst_asserted);
2908 if (retval != ERROR_OK)
2909 return retval;
2910
2911 int srst_deasserted;
2912 srst_deasserted = prev_srst_asserted && !srst_asserted;
2913
2914 static int64_t last_srst;
2915 wait_more = last_srst + 2000 > current;
2916 if (srst_deasserted && !wait_more) {
2917 run_srst_deasserted = 1;
2918 last_srst = current;
2919 }
2920
2921 if (!prev_srst_asserted && srst_asserted)
2922 run_srst_asserted = 1;
2923
2924 prev_srst_asserted = srst_asserted;
2925 prev_power_dropout = power_dropout;
2926
2927 if (srst_deasserted || power_restored) {
2928 /* Other than logging the event we can't do anything here.
2929 * Issuing a reset is a particularly bad idea as we might
2930 * be inside a reset already.
2931 */
2932 }
2933
2934 return ERROR_OK;
2935 }
2936
2937 /* process target state changes */
2938 static int handle_target(void *priv)
2939 {
2940 Jim_Interp *interp = (Jim_Interp *)priv;
2941 int retval = ERROR_OK;
2942
2943 if (!is_jtag_poll_safe()) {
2944 /* polling is disabled currently */
2945 return ERROR_OK;
2946 }
2947
2948 /* we do not want to recurse here... */
2949 static int recursive;
2950 if (!recursive) {
2951 recursive = 1;
2952 sense_handler();
2953 /* danger! running these procedures can trigger srst assertions and power dropouts.
2954 * We need to avoid an infinite loop/recursion here and we do that by
2955 * clearing the flags after running these events.
2956 */
2957 int did_something = 0;
2958 if (run_srst_asserted) {
2959 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2960 Jim_Eval(interp, "srst_asserted");
2961 did_something = 1;
2962 }
2963 if (run_srst_deasserted) {
2964 Jim_Eval(interp, "srst_deasserted");
2965 did_something = 1;
2966 }
2967 if (run_power_dropout) {
2968 LOG_INFO("Power dropout detected, running power_dropout proc.");
2969 Jim_Eval(interp, "power_dropout");
2970 did_something = 1;
2971 }
2972 if (run_power_restore) {
2973 Jim_Eval(interp, "power_restore");
2974 did_something = 1;
2975 }
2976
2977 if (did_something) {
2978 /* clear detect flags */
2979 sense_handler();
2980 }
2981
2982 /* clear action flags */
2983
2984 run_srst_asserted = 0;
2985 run_srst_deasserted = 0;
2986 run_power_restore = 0;
2987 run_power_dropout = 0;
2988
2989 recursive = 0;
2990 }
2991
2992 /* Poll targets for state changes unless that's globally disabled.
2993 * Skip targets that are currently disabled.
2994 */
2995 for (struct target *target = all_targets;
2996 is_jtag_poll_safe() && target;
2997 target = target->next) {
2998
2999 if (!target_was_examined(target))
3000 continue;
3001
3002 if (!target->tap->enabled)
3003 continue;
3004
3005 if (target->backoff.times > target->backoff.count) {
3006 /* do not poll this time as we failed previously */
3007 target->backoff.count++;
3008 continue;
3009 }
3010 target->backoff.count = 0;
3011
3012 /* only poll target if we've got power and srst isn't asserted */
3013 if (!power_dropout && !srst_asserted) {
3014 /* polling may fail silently until the target has been examined */
3015 retval = target_poll(target);
3016 if (retval != ERROR_OK) {
3017 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3018 if (target->backoff.times * polling_interval < 5000) {
3019 target->backoff.times *= 2;
3020 target->backoff.times++;
3021 }
3022
3023 /* Tell GDB to halt the debugger. This allows the user to
3024 * run monitor commands to handle the situation.
3025 */
3026 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3027 }
3028 if (target->backoff.times > 0) {
3029 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3030 target_reset_examined(target);
3031 retval = target_examine_one(target);
3032 /* Target examination could have failed due to unstable connection,
3033 * but we set the examined flag anyway to repoll it later */
3034 if (retval != ERROR_OK) {
3035 target_set_examined(target);
3036 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3037 target->backoff.times * polling_interval);
3038 return retval;
3039 }
3040 }
3041
3042 /* Since we succeeded, we reset backoff count */
3043 target->backoff.times = 0;
3044 }
3045 }
3046
3047 return retval;
3048 }
3049
3050 COMMAND_HANDLER(handle_reg_command)
3051 {
3052 LOG_DEBUG("-");
3053
3054 struct target *target = get_current_target(CMD_CTX);
3055 struct reg *reg = NULL;
3056
3057 /* list all available registers for the current target */
3058 if (CMD_ARGC == 0) {
3059 struct reg_cache *cache = target->reg_cache;
3060
3061 unsigned int count = 0;
3062 while (cache) {
3063 unsigned i;
3064
3065 command_print(CMD, "===== %s", cache->name);
3066
3067 for (i = 0, reg = cache->reg_list;
3068 i < cache->num_regs;
3069 i++, reg++, count++) {
3070 if (reg->exist == false || reg->hidden)
3071 continue;
3072 /* only print cached values if they are valid */
3073 if (reg->valid) {
3074 char *value = buf_to_hex_str(reg->value,
3075 reg->size);
3076 command_print(CMD,
3077 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3078 count, reg->name,
3079 reg->size, value,
3080 reg->dirty
3081 ? " (dirty)"
3082 : "");
3083 free(value);
3084 } else {
3085 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3086 count, reg->name,
3087 reg->size);
3088 }
3089 }
3090 cache = cache->next;
3091 }
3092
3093 return ERROR_OK;
3094 }
3095
3096 /* access a single register by its ordinal number */
3097 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3098 unsigned num;
3099 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3100
3101 struct reg_cache *cache = target->reg_cache;
3102 unsigned int count = 0;
3103 while (cache) {
3104 unsigned i;
3105 for (i = 0; i < cache->num_regs; i++) {
3106 if (count++ == num) {
3107 reg = &cache->reg_list[i];
3108 break;
3109 }
3110 }
3111 if (reg)
3112 break;
3113 cache = cache->next;
3114 }
3115
3116 if (!reg) {
3117 command_print(CMD, "%i is out of bounds, the current target "
3118 "has only %i registers (0 - %i)", num, count, count - 1);
3119 return ERROR_OK;
3120 }
3121 } else {
3122 /* access a single register by its name */
3123 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3124
3125 if (!reg)
3126 goto not_found;
3127 }
3128
3129 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3130
3131 if (!reg->exist)
3132 goto not_found;
3133
3134 /* display a register */
3135 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3136 && (CMD_ARGV[1][0] <= '9')))) {
3137 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3138 reg->valid = 0;
3139
3140 if (reg->valid == 0) {
3141 int retval = reg->type->get(reg);
3142 if (retval != ERROR_OK) {
3143 LOG_ERROR("Could not read register '%s'", reg->name);
3144 return retval;
3145 }
3146 }
3147 char *value = buf_to_hex_str(reg->value, reg->size);
3148 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3149 free(value);
3150 return ERROR_OK;
3151 }
3152
3153 /* set register value */
3154 if (CMD_ARGC == 2) {
3155 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3156 if (!buf)
3157 return ERROR_FAIL;
3158 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3159
3160 int retval = reg->type->set(reg, buf);
3161 if (retval != ERROR_OK) {
3162 LOG_ERROR("Could not write to register '%s'", reg->name);
3163 } else {
3164 char *value = buf_to_hex_str(reg->value, reg->size);
3165 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3166 free(value);
3167 }
3168
3169 free(buf);
3170
3171 return retval;
3172 }
3173
3174 return ERROR_COMMAND_SYNTAX_ERROR;
3175
3176 not_found:
3177 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3178 return ERROR_OK;
3179 }
3180
3181 COMMAND_HANDLER(handle_poll_command)
3182 {
3183 int retval = ERROR_OK;
3184 struct target *target = get_current_target(CMD_CTX);
3185
3186 if (CMD_ARGC == 0) {
3187 command_print(CMD, "background polling: %s",
3188 jtag_poll_get_enabled() ? "on" : "off");
3189 command_print(CMD, "TAP: %s (%s)",
3190 target->tap->dotted_name,
3191 target->tap->enabled ? "enabled" : "disabled");
3192 if (!target->tap->enabled)
3193 return ERROR_OK;
3194 retval = target_poll(target);
3195 if (retval != ERROR_OK)
3196 return retval;
3197 retval = target_arch_state(target);
3198 if (retval != ERROR_OK)
3199 return retval;
3200 } else if (CMD_ARGC == 1) {
3201 bool enable;
3202 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3203 jtag_poll_set_enabled(enable);
3204 } else
3205 return ERROR_COMMAND_SYNTAX_ERROR;
3206
3207 return retval;
3208 }
3209
3210 COMMAND_HANDLER(handle_wait_halt_command)
3211 {
3212 if (CMD_ARGC > 1)
3213 return ERROR_COMMAND_SYNTAX_ERROR;
3214
3215 unsigned ms = DEFAULT_HALT_TIMEOUT;
3216 if (1 == CMD_ARGC) {
3217 int retval = parse_uint(CMD_ARGV[0], &ms);
3218 if (retval != ERROR_OK)
3219 return ERROR_COMMAND_SYNTAX_ERROR;
3220 }
3221
3222 struct target *target = get_current_target(CMD_CTX);
3223 return target_wait_state(target, TARGET_HALTED, ms);
3224 }
3225
3226 /* wait for target state to change. The trick here is to have a low
3227 * latency for short waits and not to suck up all the CPU time
3228 * on longer waits.
3229 *
3230 * After 500ms, keep_alive() is invoked
3231 */
3232 int target_wait_state(struct target *target, enum target_state state, unsigned int ms)
3233 {
3234 int retval;
3235 int64_t then = 0, cur;
3236 bool once = true;
3237
3238 for (;;) {
3239 retval = target_poll(target);
3240 if (retval != ERROR_OK)
3241 return retval;
3242 if (target->state == state)
3243 break;
3244 cur = timeval_ms();
3245 if (once) {
3246 once = false;
3247 then = timeval_ms();
3248 LOG_DEBUG("waiting for target %s...",
3249 nvp_value2name(nvp_target_state, state)->name);
3250 }
3251
3252 if (cur-then > 500)
3253 keep_alive();
3254
3255 if ((cur-then) > ms) {
3256 LOG_ERROR("timed out while waiting for target %s",
3257 nvp_value2name(nvp_target_state, state)->name);
3258 return ERROR_FAIL;
3259 }
3260 }
3261
3262 return ERROR_OK;
3263 }
3264
3265 COMMAND_HANDLER(handle_halt_command)
3266 {
3267 LOG_DEBUG("-");
3268
3269 struct target *target = get_current_target(CMD_CTX);
3270
3271 target->verbose_halt_msg = true;
3272
3273 int retval = target_halt(target);
3274 if (retval != ERROR_OK)
3275 return retval;
3276
3277 if (CMD_ARGC == 1) {
3278 unsigned wait_local;
3279 retval = parse_uint(CMD_ARGV[0], &wait_local);
3280 if (retval != ERROR_OK)
3281 return ERROR_COMMAND_SYNTAX_ERROR;
3282 if (!wait_local)
3283 return ERROR_OK;
3284 }
3285
3286 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3287 }
3288
3289 COMMAND_HANDLER(handle_soft_reset_halt_command)
3290 {
3291 struct target *target = get_current_target(CMD_CTX);
3292
3293 LOG_TARGET_INFO(target, "requesting target halt and executing a soft reset");
3294
3295 target_soft_reset_halt(target);
3296
3297 return ERROR_OK;
3298 }
3299
3300 COMMAND_HANDLER(handle_reset_command)
3301 {
3302 if (CMD_ARGC > 1)
3303 return ERROR_COMMAND_SYNTAX_ERROR;
3304
3305 enum target_reset_mode reset_mode = RESET_RUN;
3306 if (CMD_ARGC == 1) {
3307 const struct nvp *n;
3308 n = nvp_name2value(nvp_reset_modes, CMD_ARGV[0]);
3309 if ((!n->name) || (n->value == RESET_UNKNOWN))
3310 return ERROR_COMMAND_SYNTAX_ERROR;
3311 reset_mode = n->value;
3312 }
3313
3314 /* reset *all* targets */
3315 return target_process_reset(CMD, reset_mode);
3316 }
3317
3318
3319 COMMAND_HANDLER(handle_resume_command)
3320 {
3321 int current = 1;
3322 if (CMD_ARGC > 1)
3323 return ERROR_COMMAND_SYNTAX_ERROR;
3324
3325 struct target *target = get_current_target(CMD_CTX);
3326
3327 /* with no CMD_ARGV, resume from current pc, addr = 0,
3328 * with one arguments, addr = CMD_ARGV[0],
3329 * handle breakpoints, not debugging */
3330 target_addr_t addr = 0;
3331 if (CMD_ARGC == 1) {
3332 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3333 current = 0;
3334 }
3335
3336 return target_resume(target, current, addr, 1, 0);
3337 }
3338
3339 COMMAND_HANDLER(handle_step_command)
3340 {
3341 if (CMD_ARGC > 1)
3342 return ERROR_COMMAND_SYNTAX_ERROR;
3343
3344 LOG_DEBUG("-");
3345
3346 /* with no CMD_ARGV, step from current pc, addr = 0,
3347 * with one argument addr = CMD_ARGV[0],
3348 * handle breakpoints, debugging */
3349 target_addr_t addr = 0;
3350 int current_pc = 1;
3351 if (CMD_ARGC == 1) {
3352 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3353 current_pc = 0;
3354 }
3355
3356 struct target *target = get_current_target(CMD_CTX);
3357
3358 return target_step(target, current_pc, addr, 1);
3359 }
3360
3361 void target_handle_md_output(struct command_invocation *cmd,
3362 struct target *target, target_addr_t address, unsigned size,
3363 unsigned count, const uint8_t *buffer)
3364 {
3365 const unsigned line_bytecnt = 32;
3366 unsigned line_modulo = line_bytecnt / size;
3367
3368 char output[line_bytecnt * 4 + 1];
3369 unsigned output_len = 0;
3370
3371 const char *value_fmt;
3372 switch (size) {
3373 case 8:
3374 value_fmt = "%16.16"PRIx64" ";
3375 break;
3376 case 4:
3377 value_fmt = "%8.8"PRIx64" ";
3378 break;
3379 case 2:
3380 value_fmt = "%4.4"PRIx64" ";
3381 break;
3382 case 1:
3383 value_fmt = "%2.2"PRIx64" ";
3384 break;
3385 default:
3386 /* "can't happen", caller checked */
3387 LOG_ERROR("invalid memory read size: %u", size);
3388 return;
3389 }
3390
3391 for (unsigned i = 0; i < count; i++) {
3392 if (i % line_modulo == 0) {
3393 output_len += snprintf(output + output_len,
3394 sizeof(output) - output_len,
3395 TARGET_ADDR_FMT ": ",
3396 (address + (i * size)));
3397 }
3398
3399 uint64_t value = 0;
3400 const uint8_t *value_ptr = buffer + i * size;
3401 switch (size) {
3402 case 8:
3403 value = target_buffer_get_u64(target, value_ptr);
3404 break;
3405 case 4:
3406 value = target_buffer_get_u32(target, value_ptr);
3407 break;
3408 case 2:
3409 value = target_buffer_get_u16(target, value_ptr);
3410 break;
3411 case 1:
3412 value = *value_ptr;
3413 }
3414 output_len += snprintf(output + output_len,
3415 sizeof(output) - output_len,
3416 value_fmt, value);
3417
3418 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3419 command_print(cmd, "%s", output);
3420 output_len = 0;
3421 }
3422 }
3423 }
3424
3425 COMMAND_HANDLER(handle_md_command)
3426 {
3427 if (CMD_ARGC < 1)
3428 return ERROR_COMMAND_SYNTAX_ERROR;
3429
3430 unsigned size = 0;
3431 switch (CMD_NAME[2]) {
3432 case 'd':
3433 size = 8;
3434 break;
3435 case 'w':
3436 size = 4;
3437 break;
3438 case 'h':
3439 size = 2;
3440 break;
3441 case 'b':
3442 size = 1;
3443 break;
3444 default:
3445 return ERROR_COMMAND_SYNTAX_ERROR;
3446 }
3447
3448 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3449 int (*fn)(struct target *target,
3450 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3451 if (physical) {
3452 CMD_ARGC--;
3453 CMD_ARGV++;
3454 fn = target_read_phys_memory;
3455 } else
3456 fn = target_read_memory;
3457 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3458 return ERROR_COMMAND_SYNTAX_ERROR;
3459
3460 target_addr_t address;
3461 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3462
3463 unsigned count = 1;
3464 if (CMD_ARGC == 2)
3465 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3466
3467 uint8_t *buffer = calloc(count, size);
3468 if (!buffer) {
3469 LOG_ERROR("Failed to allocate md read buffer");
3470 return ERROR_FAIL;
3471 }
3472
3473 struct target *target = get_current_target(CMD_CTX);
3474 int retval = fn(target, address, size, count, buffer);
3475 if (retval == ERROR_OK)
3476 target_handle_md_output(CMD, target, address, size, count, buffer);
3477
3478 free(buffer);
3479
3480 return retval;
3481 }
3482
3483 typedef int (*target_write_fn)(struct target *target,
3484 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3485
3486 static int target_fill_mem(struct target *target,
3487 target_addr_t address,
3488 target_write_fn fn,
3489 unsigned data_size,
3490 /* value */
3491 uint64_t b,
3492 /* count */
3493 unsigned c)
3494 {
3495 /* We have to write in reasonably large chunks to be able
3496 * to fill large memory areas with any sane speed */
3497 const unsigned chunk_size = 16384;
3498 uint8_t *target_buf = malloc(chunk_size * data_size);
3499 if (!target_buf) {
3500 LOG_ERROR("Out of memory");
3501 return ERROR_FAIL;
3502 }
3503
3504 for (unsigned i = 0; i < chunk_size; i++) {
3505 switch (data_size) {
3506 case 8:
3507 target_buffer_set_u64(target, target_buf + i * data_size, b);
3508 break;
3509 case 4:
3510 target_buffer_set_u32(target, target_buf + i * data_size, b);
3511 break;
3512 case 2:
3513 target_buffer_set_u16(target, target_buf + i * data_size, b);
3514 break;
3515 case 1:
3516 target_buffer_set_u8(target, target_buf + i * data_size, b);
3517 break;
3518 default:
3519 exit(-1);
3520 }
3521 }
3522
3523 int retval = ERROR_OK;
3524
3525 for (unsigned x = 0; x < c; x += chunk_size) {
3526 unsigned current;
3527 current = c - x;
3528 if (current > chunk_size)
3529 current = chunk_size;
3530 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3531 if (retval != ERROR_OK)
3532 break;
3533 /* avoid GDB timeouts */
3534 keep_alive();
3535 }
3536 free(target_buf);
3537
3538 return retval;
3539 }
3540
3541
3542 COMMAND_HANDLER(handle_mw_command)
3543 {
3544 if (CMD_ARGC < 2)
3545 return ERROR_COMMAND_SYNTAX_ERROR;
3546 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3547 target_write_fn fn;
3548 if (physical) {
3549 CMD_ARGC--;
3550 CMD_ARGV++;
3551 fn = target_write_phys_memory;
3552 } else
3553 fn = target_write_memory;
3554 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3555 return ERROR_COMMAND_SYNTAX_ERROR;
3556
3557 target_addr_t address;
3558 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3559
3560 uint64_t value;
3561 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3562
3563 unsigned count = 1;
3564 if (CMD_ARGC == 3)
3565 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3566
3567 struct target *target = get_current_target(CMD_CTX);
3568 unsigned wordsize;
3569 switch (CMD_NAME[2]) {
3570 case 'd':
3571 wordsize = 8;
3572 break;
3573 case 'w':
3574 wordsize = 4;
3575 break;
3576 case 'h':
3577 wordsize = 2;
3578 break;
3579 case 'b':
3580 wordsize = 1;
3581 break;
3582 default:
3583 return ERROR_COMMAND_SYNTAX_ERROR;
3584 }
3585
3586 return target_fill_mem(target, address, fn, wordsize, value, count);
3587 }
3588
3589 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3590 target_addr_t *min_address, target_addr_t *max_address)
3591 {
3592 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3593 return ERROR_COMMAND_SYNTAX_ERROR;
3594
3595 /* a base address isn't always necessary,
3596 * default to 0x0 (i.e. don't relocate) */
3597 if (CMD_ARGC >= 2) {
3598 target_addr_t addr;
3599 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3600 image->base_address = addr;
3601 image->base_address_set = true;
3602 } else
3603 image->base_address_set = false;
3604
3605 image->start_address_set = false;
3606
3607 if (CMD_ARGC >= 4)
3608 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3609 if (CMD_ARGC == 5) {
3610 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3611 /* use size (given) to find max (required) */
3612 *max_address += *min_address;
3613 }
3614
3615 if (*min_address > *max_address)
3616 return ERROR_COMMAND_SYNTAX_ERROR;
3617
3618 return ERROR_OK;
3619 }
3620
3621 COMMAND_HANDLER(handle_load_image_command)
3622 {
3623 uint8_t *buffer;
3624 size_t buf_cnt;
3625 uint32_t image_size;
3626 target_addr_t min_address = 0;
3627 target_addr_t max_address = -1;
3628 struct image image;
3629
3630 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3631 &image, &min_address, &max_address);
3632 if (retval != ERROR_OK)
3633 return retval;
3634
3635 struct target *target = get_current_target(CMD_CTX);
3636
3637 struct duration bench;
3638 duration_start(&bench);
3639
3640 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3641 return ERROR_FAIL;
3642
3643 image_size = 0x0;
3644 retval = ERROR_OK;
3645 for (unsigned int i = 0; i < image.num_sections; i++) {
3646 buffer = malloc(image.sections[i].size);
3647 if (!buffer) {
3648 command_print(CMD,
3649 "error allocating buffer for section (%d bytes)",
3650 (int)(image.sections[i].size));
3651 retval = ERROR_FAIL;
3652 break;
3653 }
3654
3655 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3656 if (retval != ERROR_OK) {
3657 free(buffer);
3658 break;
3659 }
3660
3661 uint32_t offset = 0;
3662 uint32_t length = buf_cnt;
3663
3664 /* DANGER!!! beware of unsigned comparison here!!! */
3665
3666 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3667 (image.sections[i].base_address < max_address)) {
3668
3669 if (image.sections[i].base_address < min_address) {
3670 /* clip addresses below */
3671 offset += min_address-image.sections[i].base_address;
3672 length -= offset;
3673 }
3674
3675 if (image.sections[i].base_address + buf_cnt > max_address)
3676 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3677
3678 retval = target_write_buffer(target,
3679 image.sections[i].base_address + offset, length, buffer + offset);
3680 if (retval != ERROR_OK) {
3681 free(buffer);
3682 break;
3683 }
3684 image_size += length;
3685 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3686 (unsigned int)length,
3687 image.sections[i].base_address + offset);
3688 }
3689
3690 free(buffer);
3691 }
3692
3693 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3694 command_print(CMD, "downloaded %" PRIu32 " bytes "
3695 "in %fs (%0.3f KiB/s)", image_size,
3696 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3697 }
3698
3699 image_close(&image);
3700
3701 return retval;
3702
3703 }
3704
3705 COMMAND_HANDLER(handle_dump_image_command)
3706 {
3707 struct fileio *fileio;
3708 uint8_t *buffer;
3709 int retval, retvaltemp;
3710 target_addr_t address, size;
3711 struct duration bench;
3712 struct target *target = get_current_target(CMD_CTX);
3713
3714 if (CMD_ARGC != 3)
3715 return ERROR_COMMAND_SYNTAX_ERROR;
3716
3717 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3718 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3719
3720 uint32_t buf_size = (size > 4096) ? 4096 : size;
3721 buffer = malloc(buf_size);
3722 if (!buffer)
3723 return ERROR_FAIL;
3724
3725 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3726 if (retval != ERROR_OK) {
3727 free(buffer);
3728 return retval;
3729 }
3730
3731 duration_start(&bench);
3732
3733 while (size > 0) {
3734 size_t size_written;
3735 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3736 retval = target_read_buffer(target, address, this_run_size, buffer);
3737 if (retval != ERROR_OK)
3738 break;
3739
3740 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3741 if (retval != ERROR_OK)
3742 break;
3743
3744 size -= this_run_size;
3745 address += this_run_size;
3746 }
3747
3748 free(buffer);
3749
3750 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3751 size_t filesize;
3752 retval = fileio_size(fileio, &filesize);
3753 if (retval != ERROR_OK)
3754 return retval;
3755 command_print(CMD,
3756 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3757 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3758 }
3759
3760 retvaltemp = fileio_close(fileio);
3761 if (retvaltemp != ERROR_OK)
3762 return retvaltemp;
3763
3764 return retval;
3765 }
3766
3767 enum verify_mode {
3768 IMAGE_TEST = 0,
3769 IMAGE_VERIFY = 1,
3770 IMAGE_CHECKSUM_ONLY = 2
3771 };
3772
3773 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3774 {
3775 uint8_t *buffer;
3776 size_t buf_cnt;
3777 uint32_t image_size;
3778 int retval;
3779 uint32_t checksum = 0;
3780 uint32_t mem_checksum = 0;
3781
3782 struct image image;
3783
3784 struct target *target = get_current_target(CMD_CTX);
3785
3786 if (CMD_ARGC < 1)
3787 return ERROR_COMMAND_SYNTAX_ERROR;
3788
3789 if (!target) {
3790 LOG_ERROR("no target selected");
3791 return ERROR_FAIL;
3792 }
3793
3794 struct duration bench;
3795 duration_start(&bench);
3796
3797 if (CMD_ARGC >= 2) {
3798 target_addr_t addr;
3799 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3800 image.base_address = addr;
3801 image.base_address_set = true;
3802 } else {
3803 image.base_address_set = false;
3804 image.base_address = 0x0;
3805 }
3806
3807 image.start_address_set = false;
3808
3809 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3810 if (retval != ERROR_OK)
3811 return retval;
3812
3813 image_size = 0x0;
3814 int diffs = 0;
3815 retval = ERROR_OK;
3816 for (unsigned int i = 0; i < image.num_sections; i++) {
3817 buffer = malloc(image.sections[i].size);
3818 if (!buffer) {
3819 command_print(CMD,
3820 "error allocating buffer for section (%" PRIu32 " bytes)",
3821 image.sections[i].size);
3822 break;
3823 }
3824 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3825 if (retval != ERROR_OK) {
3826 free(buffer);
3827 break;
3828 }
3829
3830 if (verify >= IMAGE_VERIFY) {
3831 /* calculate checksum of image */
3832 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3833 if (retval != ERROR_OK) {
3834 free(buffer);
3835 break;
3836 }
3837
3838 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3839 if (retval != ERROR_OK) {
3840 free(buffer);
3841 break;
3842 }
3843 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3844 LOG_ERROR("checksum mismatch");
3845 free(buffer);
3846 retval = ERROR_FAIL;
3847 goto done;
3848 }
3849 if (checksum != mem_checksum) {
3850 /* failed crc checksum, fall back to a binary compare */
3851 uint8_t *data;
3852
3853 if (diffs == 0)
3854 LOG_ERROR("checksum mismatch - attempting binary compare");
3855
3856 data = malloc(buf_cnt);
3857
3858 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3859 if (retval == ERROR_OK) {
3860 uint32_t t;
3861 for (t = 0; t < buf_cnt; t++) {
3862 if (data[t] != buffer[t]) {
3863 command_print(CMD,
3864 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3865 diffs,
3866 (unsigned)(t + image.sections[i].base_address),
3867 data[t],
3868 buffer[t]);
3869 if (diffs++ >= 127) {
3870 command_print(CMD, "More than 128 errors, the rest are not printed.");
3871 free(data);
3872 free(buffer);
3873 goto done;
3874 }
3875 }
3876 keep_alive();
3877 }
3878 }
3879 free(data);
3880 }
3881 } else {
3882 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3883 image.sections[i].base_address,
3884 buf_cnt);
3885 }
3886
3887 free(buffer);
3888 image_size += buf_cnt;
3889 }
3890 if (diffs > 0)
3891 command_print(CMD, "No more differences found.");
3892 done:
3893 if (diffs > 0)
3894 retval = ERROR_FAIL;
3895 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3896 command_print(CMD, "verified %" PRIu32 " bytes "
3897 "in %fs (%0.3f KiB/s)", image_size,
3898 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3899 }
3900
3901 image_close(&image);
3902
3903 return retval;
3904 }
3905
3906 COMMAND_HANDLER(handle_verify_image_checksum_command)
3907 {
3908 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3909 }
3910
3911 COMMAND_HANDLER(handle_verify_image_command)
3912 {
3913 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3914 }
3915
3916 COMMAND_HANDLER(handle_test_image_command)
3917 {
3918 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3919 }
3920
3921 static int handle_bp_command_list(struct command_invocation *cmd)
3922 {
3923 struct target *target = get_current_target(cmd->ctx);
3924 struct breakpoint *breakpoint = target->breakpoints;
3925 while (breakpoint) {
3926 if (breakpoint->type == BKPT_SOFT) {
3927 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3928 breakpoint->length);
3929 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, 0x%s",
3930 breakpoint->address,
3931 breakpoint->length,
3932 buf);
3933 free(buf);
3934 } else {
3935 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3936 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %u",
3937 breakpoint->asid,
3938 breakpoint->length, breakpoint->number);
3939 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3940 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %u",
3941 breakpoint->address,
3942 breakpoint->length, breakpoint->number);
3943 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3944 breakpoint->asid);
3945 } else
3946 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %u",
3947 breakpoint->address,
3948 breakpoint->length, breakpoint->number);
3949 }
3950
3951 breakpoint = breakpoint->next;
3952 }
3953 return ERROR_OK;
3954 }
3955
3956 static int handle_bp_command_set(struct command_invocation *cmd,
3957 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3958 {
3959 struct target *target = get_current_target(cmd->ctx);
3960 int retval;
3961
3962 if (asid == 0) {
3963 retval = breakpoint_add(target, addr, length, hw);
3964 /* error is always logged in breakpoint_add(), do not print it again */
3965 if (retval == ERROR_OK)
3966 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
3967
3968 } else if (addr == 0) {
3969 if (!target->type->add_context_breakpoint) {
3970 LOG_ERROR("Context breakpoint not available");
3971 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3972 }
3973 retval = context_breakpoint_add(target, asid, length, hw);
3974 /* error is always logged in context_breakpoint_add(), do not print it again */
3975 if (retval == ERROR_OK)
3976 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
3977
3978 } else {
3979 if (!target->type->add_hybrid_breakpoint) {
3980 LOG_ERROR("Hybrid breakpoint not available");
3981 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3982 }
3983 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
3984 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
3985 if (retval == ERROR_OK)
3986 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
3987 }
3988 return retval;
3989 }
3990
3991 COMMAND_HANDLER(handle_bp_command)
3992 {
3993 target_addr_t addr;
3994 uint32_t asid;
3995 uint32_t length;
3996 int hw = BKPT_SOFT;
3997
3998 switch (CMD_ARGC) {
3999 case 0:
4000 return handle_bp_command_list(CMD);
4001
4002 case 2:
4003 asid = 0;
4004 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4005 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4006 return handle_bp_command_set(CMD, addr, asid, length, hw);
4007
4008 case 3:
4009 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4010 hw = BKPT_HARD;
4011 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4012 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4013 asid = 0;
4014 return handle_bp_command_set(CMD, addr, asid, length, hw);
4015 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4016 hw = BKPT_HARD;
4017 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4018 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4019 addr = 0;
4020 return handle_bp_command_set(CMD, addr, asid, length, hw);
4021 }
4022 /* fallthrough */
4023 case 4:
4024 hw = BKPT_HARD;
4025 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4026 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4027 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4028 return handle_bp_command_set(CMD, addr, asid, length, hw);
4029
4030 default:
4031 return ERROR_COMMAND_SYNTAX_ERROR;
4032 }
4033 }
4034
4035 COMMAND_HANDLER(handle_rbp_command)
4036 {
4037 if (CMD_ARGC != 1)
4038 return ERROR_COMMAND_SYNTAX_ERROR;
4039
4040 struct target *target = get_current_target(CMD_CTX);
4041
4042 if (!strcmp(CMD_ARGV[0], "all")) {
4043 breakpoint_remove_all(target);
4044 } else {
4045 target_addr_t addr;
4046 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4047
4048 breakpoint_remove(target, addr);
4049 }
4050
4051 return ERROR_OK;
4052 }
4053
4054 COMMAND_HANDLER(handle_wp_command)
4055 {
4056 struct target *target = get_current_target(CMD_CTX);
4057
4058 if (CMD_ARGC == 0) {
4059 struct watchpoint *watchpoint = target->watchpoints;
4060
4061 while (watchpoint) {
4062 command_print(CMD, "address: " TARGET_ADDR_FMT
4063 ", len: 0x%8.8" PRIx32
4064 ", r/w/a: %i, value: 0x%8.8" PRIx64
4065 ", mask: 0x%8.8" PRIx64,
4066 watchpoint->address,
4067 watchpoint->length,
4068 (int)watchpoint->rw,
4069 watchpoint->value,
4070 watchpoint->mask);
4071 watchpoint = watchpoint->next;
4072 }
4073 return ERROR_OK;
4074 }
4075
4076 enum watchpoint_rw type = WPT_ACCESS;
4077 target_addr_t addr = 0;
4078 uint32_t length = 0;
4079 uint64_t data_value = 0x0;
4080 uint64_t data_mask = WATCHPOINT_IGNORE_DATA_VALUE_MASK;
4081 bool mask_specified = false;
4082
4083 switch (CMD_ARGC) {
4084 case 5:
4085 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[4], data_mask);
4086 mask_specified = true;
4087 /* fall through */
4088 case 4:
4089 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[3], data_value);
4090 // if user specified only data value without mask - the mask should be 0
4091 if (!mask_specified)
4092 data_mask = 0;
4093 /* fall through */
4094 case 3:
4095 switch (CMD_ARGV[2][0]) {
4096 case 'r':
4097 type = WPT_READ;
4098 break;
4099 case 'w':
4100 type = WPT_WRITE;
4101 break;
4102 case 'a':
4103 type = WPT_ACCESS;
4104 break;
4105 default:
4106 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4107 return ERROR_COMMAND_SYNTAX_ERROR;
4108 }
4109 /* fall through */
4110 case 2:
4111 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4112 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4113 break;
4114
4115 default:
4116 return ERROR_COMMAND_SYNTAX_ERROR;
4117 }
4118
4119 int retval = watchpoint_add(target, addr, length, type,
4120 data_value, data_mask);
4121 if (retval != ERROR_OK)
4122 LOG_ERROR("Failure setting watchpoints");
4123
4124 return retval;
4125 }
4126
4127 COMMAND_HANDLER(handle_rwp_command)
4128 {
4129 if (CMD_ARGC != 1)
4130 return ERROR_COMMAND_SYNTAX_ERROR;
4131
4132 target_addr_t addr;
4133 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4134
4135 struct target *target = get_current_target(CMD_CTX);
4136 watchpoint_remove(target, addr);
4137
4138 return ERROR_OK;
4139 }
4140
4141 /**
4142 * Translate a virtual address to a physical address.
4143 *
4144 * The low-level target implementation must have logged a detailed error
4145 * which is forwarded to telnet/GDB session.
4146 */
4147 COMMAND_HANDLER(handle_virt2phys_command)
4148 {
4149 if (CMD_ARGC != 1)
4150 return ERROR_COMMAND_SYNTAX_ERROR;
4151
4152 target_addr_t va;
4153 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4154 target_addr_t pa;
4155
4156 struct target *target = get_current_target(CMD_CTX);
4157 int retval = target->type->virt2phys(target, va, &pa);
4158 if (retval == ERROR_OK)
4159 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4160
4161 return retval;
4162 }
4163
4164 static void write_data(FILE *f, const void *data, size_t len)
4165 {
4166 size_t written = fwrite(data, 1, len, f);
4167 if (written != len)
4168 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4169 }
4170
4171 static void write_long(FILE *f, int l, struct target *target)
4172 {
4173 uint8_t val[4];
4174
4175 target_buffer_set_u32(target, val, l);
4176 write_data(f, val, 4);
4177 }
4178
4179 static void write_string(FILE *f, char *s)
4180 {
4181 write_data(f, s, strlen(s));
4182 }
4183
4184 typedef unsigned char UNIT[2]; /* unit of profiling */
4185
4186 /* Dump a gmon.out histogram file. */
4187 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4188 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4189 {
4190 uint32_t i;
4191 FILE *f = fopen(filename, "w");
4192 if (!f)
4193 return;
4194 write_string(f, "gmon");
4195 write_long(f, 0x00000001, target); /* Version */
4196 write_long(f, 0, target); /* padding */
4197 write_long(f, 0, target); /* padding */
4198 write_long(f, 0, target); /* padding */
4199
4200 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4201 write_data(f, &zero, 1);
4202
4203 /* figure out bucket size */
4204 uint32_t min;
4205 uint32_t max;
4206 if (with_range) {
4207 min = start_address;
4208 max = end_address;
4209 } else {
4210 min = samples[0];
4211 max = samples[0];
4212 for (i = 0; i < sample_num; i++) {
4213 if (min > samples[i])
4214 min = samples[i];
4215 if (max < samples[i])
4216 max = samples[i];
4217 }
4218
4219 /* max should be (largest sample + 1)
4220 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4221 if (max < UINT32_MAX)
4222 max++;
4223
4224 /* gprof requires (max - min) >= 2 */
4225 while ((max - min) < 2) {
4226 if (max < UINT32_MAX)
4227 max++;
4228 else
4229 min--;
4230 }
4231 }
4232
4233 uint32_t address_space = max - min;
4234
4235 /* FIXME: What is the reasonable number of buckets?
4236 * The profiling result will be more accurate if there are enough buckets. */
4237 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4238 uint32_t num_buckets = address_space / sizeof(UNIT);
4239 if (num_buckets > max_buckets)
4240 num_buckets = max_buckets;
4241 int *buckets = malloc(sizeof(int) * num_buckets);
4242 if (!buckets) {
4243 fclose(f);
4244 return;
4245 }
4246 memset(buckets, 0, sizeof(int) * num_buckets);
4247 for (i = 0; i < sample_num; i++) {
4248 uint32_t address = samples[i];
4249
4250 if ((address < min) || (max <= address))
4251 continue;
4252
4253 long long a = address - min;
4254 long long b = num_buckets;
4255 long long c = address_space;
4256 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4257 buckets[index_t]++;
4258 }
4259
4260 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4261 write_long(f, min, target); /* low_pc */
4262 write_long(f, max, target); /* high_pc */
4263 write_long(f, num_buckets, target); /* # of buckets */
4264 float sample_rate = sample_num / (duration_ms / 1000.0);
4265 write_long(f, sample_rate, target);
4266 write_string(f, "seconds");
4267 for (i = 0; i < (15-strlen("seconds")); i++)
4268 write_data(f, &zero, 1);
4269 write_string(f, "s");
4270
4271 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4272
4273 char *data = malloc(2 * num_buckets);
4274 if (data) {
4275 for (i = 0; i < num_buckets; i++) {
4276 int val;
4277 val = buckets[i];
4278 if (val > 65535)
4279 val = 65535;
4280 data[i * 2] = val&0xff;
4281 data[i * 2 + 1] = (val >> 8) & 0xff;
4282 }
4283 free(buckets);
4284 write_data(f, data, num_buckets * 2);
4285 free(data);
4286 } else
4287 free(buckets);
4288
4289 fclose(f);
4290 }
4291
4292 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4293 * which will be used as a random sampling of PC */
4294 COMMAND_HANDLER(handle_profile_command)
4295 {
4296 struct target *target = get_current_target(CMD_CTX);
4297
4298 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4299 return ERROR_COMMAND_SYNTAX_ERROR;
4300
4301 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4302 uint32_t offset;
4303 uint32_t num_of_samples;
4304 int retval = ERROR_OK;
4305 bool halted_before_profiling = target->state == TARGET_HALTED;
4306
4307 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4308
4309 uint32_t start_address = 0;
4310 uint32_t end_address = 0;
4311 bool with_range = false;
4312 if (CMD_ARGC == 4) {
4313 with_range = true;
4314 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4315 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4316 if (start_address > end_address || (end_address - start_address) < 2) {
4317 command_print(CMD, "Error: end - start < 2");
4318 return ERROR_COMMAND_ARGUMENT_INVALID;
4319 }
4320 }
4321
4322 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4323 if (!samples) {
4324 LOG_ERROR("No memory to store samples.");
4325 return ERROR_FAIL;
4326 }
4327
4328 uint64_t timestart_ms = timeval_ms();
4329 /**
4330 * Some cores let us sample the PC without the
4331 * annoying halt/resume step; for example, ARMv7 PCSR.
4332 * Provide a way to use that more efficient mechanism.
4333 */
4334 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4335 &num_of_samples, offset);
4336 if (retval != ERROR_OK) {
4337 free(samples);
4338 return retval;
4339 }
4340 uint32_t duration_ms = timeval_ms() - timestart_ms;
4341
4342 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4343
4344 retval = target_poll(target);
4345 if (retval != ERROR_OK) {
4346 free(samples);
4347 return retval;
4348 }
4349
4350 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4351 /* The target was halted before we started and is running now. Halt it,
4352 * for consistency. */
4353 retval = target_halt(target);
4354 if (retval != ERROR_OK) {
4355 free(samples);
4356 return retval;
4357 }
4358 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4359 /* The target was running before we started and is halted now. Resume
4360 * it, for consistency. */
4361 retval = target_resume(target, 1, 0, 0, 0);
4362 if (retval != ERROR_OK) {
4363 free(samples);
4364 return retval;
4365 }
4366 }
4367
4368 retval = target_poll(target);
4369 if (retval != ERROR_OK) {
4370 free(samples);
4371 return retval;
4372 }
4373
4374 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4375 with_range, start_address, end_address, target, duration_ms);
4376 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4377
4378 free(samples);
4379 return retval;
4380 }
4381
4382 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4383 {
4384 char *namebuf;
4385 Jim_Obj *obj_name, *obj_val;
4386 int result;
4387
4388 namebuf = alloc_printf("%s(%d)", varname, idx);
4389 if (!namebuf)
4390 return JIM_ERR;
4391
4392 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4393 jim_wide wide_val = val;
4394 obj_val = Jim_NewWideObj(interp, wide_val);
4395 if (!obj_name || !obj_val) {
4396 free(namebuf);
4397 return JIM_ERR;
4398 }
4399
4400 Jim_IncrRefCount(obj_name);
4401 Jim_IncrRefCount(obj_val);
4402 result = Jim_SetVariable(interp, obj_name, obj_val);
4403 Jim_DecrRefCount(interp, obj_name);
4404 Jim_DecrRefCount(interp, obj_val);
4405 free(namebuf);
4406 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4407 return result;
4408 }
4409
4410 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4411 {
4412 int e;
4413
4414 LOG_WARNING("DEPRECATED! use 'read_memory' not 'mem2array'");
4415
4416 /* argv[0] = name of array to receive the data
4417 * argv[1] = desired element width in bits
4418 * argv[2] = memory address
4419 * argv[3] = count of times to read
4420 * argv[4] = optional "phys"
4421 */
4422 if (argc < 4 || argc > 5) {
4423 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4424 return JIM_ERR;
4425 }
4426
4427 /* Arg 0: Name of the array variable */
4428 const char *varname = Jim_GetString(argv[0], NULL);
4429
4430 /* Arg 1: Bit width of one element */
4431 long l;
4432 e = Jim_GetLong(interp, argv[1], &l);
4433 if (e != JIM_OK)
4434 return e;
4435 const unsigned int width_bits = l;
4436
4437 if (width_bits != 8 &&
4438 width_bits != 16 &&
4439 width_bits != 32 &&
4440 width_bits != 64) {
4441 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4442 Jim_AppendStrings(interp, Jim_GetResult(interp),
4443 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4444 return JIM_ERR;
4445 }
4446 const unsigned int width = width_bits / 8;
4447
4448 /* Arg 2: Memory address */
4449 jim_wide wide_addr;
4450 e = Jim_GetWide(interp, argv[2], &wide_addr);
4451 if (e != JIM_OK)
4452 return e;
4453 target_addr_t addr = (target_addr_t)wide_addr;
4454
4455 /* Arg 3: Number of elements to read */
4456 e = Jim_GetLong(interp, argv[3], &l);
4457 if (e != JIM_OK)
4458 return e;
4459 size_t len = l;
4460
4461 /* Arg 4: phys */
4462 bool is_phys = false;
4463 if (argc > 4) {
4464 int str_len = 0;
4465 const char *phys = Jim_GetString(argv[4], &str_len);
4466 if (!strncmp(phys, "phys", str_len))
4467 is_phys = true;
4468 else
4469 return JIM_ERR;
4470 }
4471
4472 /* Argument checks */
4473 if (len == 0) {
4474 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4475 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4476 return JIM_ERR;
4477 }
4478 if ((addr + (len * width)) < addr) {
4479 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4480 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4481 return JIM_ERR;
4482 }
4483 if (len > 65536) {
4484 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4485 Jim_AppendStrings(interp, Jim_GetResult(interp),
4486 "mem2array: too large read request, exceeds 64K items", NULL);
4487 return JIM_ERR;
4488 }
4489
4490 if ((width == 1) ||
4491 ((width == 2) && ((addr & 1) == 0)) ||
4492 ((width == 4) && ((addr & 3) == 0)) ||
4493 ((width == 8) && ((addr & 7) == 0))) {
4494 /* alignment correct */
4495 } else {
4496 char buf[100];
4497 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4498 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4499 addr,
4500 width);
4501 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4502 return JIM_ERR;
4503 }
4504
4505 /* Transfer loop */
4506
4507 /* index counter */
4508 size_t idx = 0;
4509
4510 const size_t buffersize = 4096;
4511 uint8_t *buffer = malloc(buffersize);
4512 if (!buffer)
4513 return JIM_ERR;
4514
4515 /* assume ok */
4516 e = JIM_OK;
4517 while (len) {
4518 /* Slurp... in buffer size chunks */
4519 const unsigned int max_chunk_len = buffersize / width;
4520 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4521
4522 int retval;
4523 if (is_phys)
4524 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4525 else
4526 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4527 if (retval != ERROR_OK) {
4528 /* BOO !*/
4529 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4530 addr,
4531 width,
4532 chunk_len);
4533 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4534 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4535 e = JIM_ERR;
4536 break;
4537 } else {
4538 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4539 uint64_t v = 0;
4540 switch (width) {
4541 case 8:
4542 v = target_buffer_get_u64(target, &buffer[i*width]);
4543 break;
4544 case 4:
4545 v = target_buffer_get_u32(target, &buffer[i*width]);
4546 break;
4547 case 2:
4548 v = target_buffer_get_u16(target, &buffer[i*width]);
4549 break;
4550 case 1:
4551 v = buffer[i] & 0x0ff;
4552 break;
4553 }
4554 new_u64_array_element(interp, varname, idx, v);
4555 }
4556 len -= chunk_len;
4557 addr += chunk_len * width;
4558 }
4559 }
4560
4561 free(buffer);
4562
4563 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4564
4565 return e;
4566 }
4567
4568 COMMAND_HANDLER(handle_target_read_memory)
4569 {
4570 /*
4571 * CMD_ARGV[0] = memory address
4572 * CMD_ARGV[1] = desired element width in bits
4573 * CMD_ARGV[2] = number of elements to read
4574 * CMD_ARGV[3] = optional "phys"
4575 */
4576
4577 if (CMD_ARGC < 3 || CMD_ARGC > 4)
4578 return ERROR_COMMAND_SYNTAX_ERROR;
4579
4580 /* Arg 1: Memory address. */
4581 target_addr_t addr;
4582 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], addr);
4583
4584 /* Arg 2: Bit width of one element. */
4585 unsigned int width_bits;
4586 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], width_bits);
4587
4588 /* Arg 3: Number of elements to read. */
4589 unsigned int count;
4590 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
4591
4592 /* Arg 4: Optional 'phys'. */
4593 bool is_phys = false;
4594 if (CMD_ARGC == 4) {
4595 if (strcmp(CMD_ARGV[3], "phys")) {
4596 command_print(CMD, "invalid argument '%s', must be 'phys'", CMD_ARGV[3]);
4597 return ERROR_COMMAND_ARGUMENT_INVALID;
4598 }
4599
4600 is_phys = true;
4601 }
4602
4603 switch (width_bits) {
4604 case 8:
4605 case 16:
4606 case 32:
4607 case 64:
4608 break;
4609 default:
4610 command_print(CMD, "invalid width, must be 8, 16, 32 or 64");
4611 return ERROR_COMMAND_ARGUMENT_INVALID;
4612 }
4613
4614 const unsigned int width = width_bits / 8;
4615
4616 if ((addr + (count * width)) < addr) {
4617 command_print(CMD, "read_memory: addr + count wraps to zero");
4618 return ERROR_COMMAND_ARGUMENT_INVALID;
4619 }
4620
4621 if (count > 65536) {
4622 command_print(CMD, "read_memory: too large read request, exceeds 64K elements");
4623 return ERROR_COMMAND_ARGUMENT_INVALID;
4624 }
4625
4626 struct target *target = get_current_target(CMD_CTX);
4627
4628 const size_t buffersize = 4096;
4629 uint8_t *buffer = malloc(buffersize);
4630
4631 if (!buffer) {
4632 LOG_ERROR("Failed to allocate memory");
4633 return ERROR_FAIL;
4634 }
4635
4636 char *separator = "";
4637 while (count > 0) {
4638 const unsigned int max_chunk_len = buffersize / width;
4639 const size_t chunk_len = MIN(count, max_chunk_len);
4640
4641 int retval;
4642
4643 if (is_phys)
4644 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4645 else
4646 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4647
4648 if (retval != ERROR_OK) {
4649 LOG_DEBUG("read_memory: read at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
4650 addr, width_bits, chunk_len);
4651 /*
4652 * FIXME: we append the errmsg to the list of value already read.
4653 * Add a way to flush and replace old output, but LOG_DEBUG() it
4654 */
4655 command_print(CMD, "read_memory: failed to read memory");
4656 free(buffer);
4657 return retval;
4658 }
4659
4660 for (size_t i = 0; i < chunk_len ; i++) {
4661 uint64_t v = 0;
4662
4663 switch (width) {
4664 case 8:
4665 v = target_buffer_get_u64(target, &buffer[i * width]);
4666 break;
4667 case 4:
4668 v = target_buffer_get_u32(target, &buffer[i * width]);
4669 break;
4670 case 2:
4671 v = target_buffer_get_u16(target, &buffer[i * width]);
4672 break;
4673 case 1:
4674 v = buffer[i];
4675 break;
4676 }
4677
4678 command_print_sameline(CMD, "%s0x%" PRIx64, separator, v);
4679 separator = " ";
4680 }
4681
4682 count -= chunk_len;
4683 addr += chunk_len * width;
4684 }
4685
4686 free(buffer);
4687
4688 return ERROR_OK;
4689 }
4690
4691 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4692 {
4693 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4694 if (!namebuf)
4695 return JIM_ERR;
4696
4697 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4698 if (!obj_name) {
4699 free(namebuf);
4700 return JIM_ERR;
4701 }
4702
4703 Jim_IncrRefCount(obj_name);
4704 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4705 Jim_DecrRefCount(interp, obj_name);
4706 free(namebuf);
4707 if (!obj_val)
4708 return JIM_ERR;
4709
4710 jim_wide wide_val;
4711 int result = Jim_GetWide(interp, obj_val, &wide_val);
4712 *val = wide_val;
4713 return result;
4714 }
4715
4716 static int target_array2mem(Jim_Interp *interp, struct target *target,
4717 int argc, Jim_Obj *const *argv)
4718 {
4719 int e;
4720
4721 LOG_WARNING("DEPRECATED! use 'write_memory' not 'array2mem'");
4722
4723 /* argv[0] = name of array from which to read the data
4724 * argv[1] = desired element width in bits
4725 * argv[2] = memory address
4726 * argv[3] = number of elements to write
4727 * argv[4] = optional "phys"
4728 */
4729 if (argc < 4 || argc > 5) {
4730 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4731 return JIM_ERR;
4732 }
4733
4734 /* Arg 0: Name of the array variable */
4735 const char *varname = Jim_GetString(argv[0], NULL);
4736
4737 /* Arg 1: Bit width of one element */
4738 long l;
4739 e = Jim_GetLong(interp, argv[1], &l);
4740 if (e != JIM_OK)
4741 return e;
4742 const unsigned int width_bits = l;
4743
4744 if (width_bits != 8 &&
4745 width_bits != 16 &&
4746 width_bits != 32 &&
4747 width_bits != 64) {
4748 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4749 Jim_AppendStrings(interp, Jim_GetResult(interp),
4750 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4751 return JIM_ERR;
4752 }
4753 const unsigned int width = width_bits / 8;
4754
4755 /* Arg 2: Memory address */
4756 jim_wide wide_addr;
4757 e = Jim_GetWide(interp, argv[2], &wide_addr);
4758 if (e != JIM_OK)
4759 return e;
4760 target_addr_t addr = (target_addr_t)wide_addr;
4761
4762 /* Arg 3: Number of elements to write */
4763 e = Jim_GetLong(interp, argv[3], &l);
4764 if (e != JIM_OK)
4765 return e;
4766 size_t len = l;
4767
4768 /* Arg 4: Phys */
4769 bool is_phys = false;
4770 if (argc > 4) {
4771 int str_len = 0;
4772 const char *phys = Jim_GetString(argv[4], &str_len);
4773 if (!strncmp(phys, "phys", str_len))
4774 is_phys = true;
4775 else
4776 return JIM_ERR;
4777 }
4778
4779 /* Argument checks */
4780 if (len == 0) {
4781 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4782 Jim_AppendStrings(interp, Jim_GetResult(interp),
4783 "array2mem: zero width read?", NULL);
4784 return JIM_ERR;
4785 }
4786
4787 if ((addr + (len * width)) < addr) {
4788 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4789 Jim_AppendStrings(interp, Jim_GetResult(interp),
4790 "array2mem: addr + len - wraps to zero?", NULL);
4791 return JIM_ERR;
4792 }
4793
4794 if (len > 65536) {
4795 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4796 Jim_AppendStrings(interp, Jim_GetResult(interp),
4797 "array2mem: too large memory write request, exceeds 64K items", NULL);
4798 return JIM_ERR;
4799 }
4800
4801 if ((width == 1) ||
4802 ((width == 2) && ((addr & 1) == 0)) ||
4803 ((width == 4) && ((addr & 3) == 0)) ||
4804 ((width == 8) && ((addr & 7) == 0))) {
4805 /* alignment correct */
4806 } else {
4807 char buf[100];
4808 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4809 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4810 addr,
4811 width);
4812 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4813 return JIM_ERR;
4814 }
4815
4816 /* Transfer loop */
4817
4818 /* assume ok */
4819 e = JIM_OK;
4820
4821 const size_t buffersize = 4096;
4822 uint8_t *buffer = malloc(buffersize);
4823 if (!buffer)
4824 return JIM_ERR;
4825
4826 /* index counter */
4827 size_t idx = 0;
4828
4829 while (len) {
4830 /* Slurp... in buffer size chunks */
4831 const unsigned int max_chunk_len = buffersize / width;
4832
4833 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4834
4835 /* Fill the buffer */
4836 for (size_t i = 0; i < chunk_len; i++, idx++) {
4837 uint64_t v = 0;
4838 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4839 free(buffer);
4840 return JIM_ERR;
4841 }
4842 switch (width) {
4843 case 8:
4844 target_buffer_set_u64(target, &buffer[i * width], v);
4845 break;
4846 case 4:
4847 target_buffer_set_u32(target, &buffer[i * width], v);
4848 break;
4849 case 2:
4850 target_buffer_set_u16(target, &buffer[i * width], v);
4851 break;
4852 case 1:
4853 buffer[i] = v & 0x0ff;
4854 break;
4855 }
4856 }
4857 len -= chunk_len;
4858
4859 /* Write the buffer to memory */
4860 int retval;
4861 if (is_phys)
4862 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4863 else
4864 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4865 if (retval != ERROR_OK) {
4866 /* BOO !*/
4867 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4868 addr,
4869 width,
4870 chunk_len);
4871 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4872 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4873 e = JIM_ERR;
4874 break;
4875 }
4876 addr += chunk_len * width;
4877 }
4878
4879 free(buffer);
4880
4881 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4882
4883 return e;
4884 }
4885
4886 static int target_jim_write_memory(Jim_Interp *interp, int argc,
4887 Jim_Obj * const *argv)
4888 {
4889 /*
4890 * argv[1] = memory address
4891 * argv[2] = desired element width in bits
4892 * argv[3] = list of data to write
4893 * argv[4] = optional "phys"
4894 */
4895
4896 if (argc < 4 || argc > 5) {
4897 Jim_WrongNumArgs(interp, 1, argv, "address width data ['phys']");
4898 return JIM_ERR;
4899 }
4900
4901 /* Arg 1: Memory address. */
4902 int e;
4903 jim_wide wide_addr;
4904 e = Jim_GetWide(interp, argv[1], &wide_addr);
4905
4906 if (e != JIM_OK)
4907 return e;
4908
4909 target_addr_t addr = (target_addr_t)wide_addr;
4910
4911 /* Arg 2: Bit width of one element. */
4912 long l;
4913 e = Jim_GetLong(interp, argv[2], &l);
4914
4915 if (e != JIM_OK)
4916 return e;
4917
4918 const unsigned int width_bits = l;
4919 size_t count = Jim_ListLength(interp, argv[3]);
4920
4921 /* Arg 4: Optional 'phys'. */
4922 bool is_phys = false;
4923
4924 if (argc > 4) {
4925 const char *phys = Jim_GetString(argv[4], NULL);
4926
4927 if (strcmp(phys, "phys")) {
4928 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4929 return JIM_ERR;
4930 }
4931
4932 is_phys = true;
4933 }
4934
4935 switch (width_bits) {
4936 case 8:
4937 case 16:
4938 case 32:
4939 case 64:
4940 break;
4941 default:
4942 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
4943 return JIM_ERR;
4944 }
4945
4946 const unsigned int width = width_bits / 8;
4947
4948 if ((addr + (count * width)) < addr) {
4949 Jim_SetResultString(interp, "write_memory: addr + len wraps to zero", -1);
4950 return JIM_ERR;
4951 }
4952
4953 if (count > 65536) {
4954 Jim_SetResultString(interp, "write_memory: too large memory write request, exceeds 64K elements", -1);
4955 return JIM_ERR;
4956 }
4957
4958 struct command_context *cmd_ctx = current_command_context(interp);
4959 assert(cmd_ctx != NULL);
4960 struct target *target = get_current_target(cmd_ctx);
4961
4962 const size_t buffersize = 4096;
4963 uint8_t *buffer = malloc(buffersize);
4964
4965 if (!buffer) {
4966 LOG_ERROR("Failed to allocate memory");
4967 return JIM_ERR;
4968 }
4969
4970 size_t j = 0;
4971
4972 while (count > 0) {
4973 const unsigned int max_chunk_len = buffersize / width;
4974 const size_t chunk_len = MIN(count, max_chunk_len);
4975
4976 for (size_t i = 0; i < chunk_len; i++, j++) {
4977 Jim_Obj *tmp = Jim_ListGetIndex(interp, argv[3], j);
4978 jim_wide element_wide;
4979 Jim_GetWide(interp, tmp, &element_wide);
4980
4981 const uint64_t v = element_wide;
4982
4983 switch (width) {
4984 case 8:
4985 target_buffer_set_u64(target, &buffer[i * width], v);
4986 break;
4987 case 4:
4988 target_buffer_set_u32(target, &buffer[i * width], v);
4989 break;
4990 case 2:
4991 target_buffer_set_u16(target, &buffer[i * width], v);
4992 break;
4993 case 1:
4994 buffer[i] = v & 0x0ff;
4995 break;
4996 }
4997 }
4998
4999 count -= chunk_len;
5000
5001 int retval;
5002
5003 if (is_phys)
5004 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
5005 else
5006 retval = target_write_memory(target, addr, width, chunk_len, buffer);
5007
5008 if (retval != ERROR_OK) {
5009 LOG_ERROR("write_memory: write at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
5010 addr, width_bits, chunk_len);
5011 Jim_SetResultString(interp, "write_memory: failed to write memory", -1);
5012 e = JIM_ERR;
5013 break;
5014 }
5015
5016 addr += chunk_len * width;
5017 }
5018
5019 free(buffer);
5020
5021 return e;
5022 }
5023
5024 /* FIX? should we propagate errors here rather than printing them
5025 * and continuing?
5026 */
5027 void target_handle_event(struct target *target, enum target_event e)
5028 {
5029 struct target_event_action *teap;
5030 int retval;
5031
5032 for (teap = target->event_action; teap; teap = teap->next) {
5033 if (teap->event == e) {
5034 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
5035 target->target_number,
5036 target_name(target),
5037 target_type_name(target),
5038 e,
5039 target_event_name(e),
5040 Jim_GetString(teap->body, NULL));
5041
5042 /* Override current target by the target an event
5043 * is issued from (lot of scripts need it).
5044 * Return back to previous override as soon
5045 * as the handler processing is done */
5046 struct command_context *cmd_ctx = current_command_context(teap->interp);
5047 struct target *saved_target_override = cmd_ctx->current_target_override;
5048 cmd_ctx->current_target_override = target;
5049
5050 retval = Jim_EvalObj(teap->interp, teap->body);
5051
5052 cmd_ctx->current_target_override = saved_target_override;
5053
5054 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
5055 return;
5056
5057 if (retval == JIM_RETURN)
5058 retval = teap->interp->returnCode;
5059
5060 if (retval != JIM_OK) {
5061 Jim_MakeErrorMessage(teap->interp);
5062 LOG_USER("Error executing event %s on target %s:\n%s",
5063 target_event_name(e),
5064 target_name(target),
5065 Jim_GetString(Jim_GetResult(teap->interp), NULL));
5066 /* clean both error code and stacktrace before return */
5067 Jim_Eval(teap->interp, "error \"\" \"\"");
5068 }
5069 }
5070 }
5071 }
5072
5073 static int target_jim_get_reg(Jim_Interp *interp, int argc,
5074 Jim_Obj * const *argv)
5075 {
5076 bool force = false;
5077
5078 if (argc == 3) {
5079 const char *option = Jim_GetString(argv[1], NULL);
5080
5081 if (!strcmp(option, "-force")) {
5082 argc--;
5083 argv++;
5084 force = true;
5085 } else {
5086 Jim_SetResultFormatted(interp, "invalid option '%s'", option);
5087 return JIM_ERR;
5088 }
5089 }
5090
5091 if (argc != 2) {
5092 Jim_WrongNumArgs(interp, 1, argv, "[-force] list");
5093 return JIM_ERR;
5094 }
5095
5096 const int length = Jim_ListLength(interp, argv[1]);
5097
5098 Jim_Obj *result_dict = Jim_NewDictObj(interp, NULL, 0);
5099
5100 if (!result_dict)
5101 return JIM_ERR;
5102
5103 struct command_context *cmd_ctx = current_command_context(interp);
5104 assert(cmd_ctx != NULL);
5105 const struct target *target = get_current_target(cmd_ctx);
5106
5107 for (int i = 0; i < length; i++) {
5108 Jim_Obj *elem = Jim_ListGetIndex(interp, argv[1], i);
5109
5110 if (!elem)
5111 return JIM_ERR;
5112
5113 const char *reg_name = Jim_String(elem);
5114
5115 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5116 false);
5117
5118 if (!reg || !reg->exist) {
5119 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5120 return JIM_ERR;
5121 }
5122
5123 if (force) {
5124 int retval = reg->type->get(reg);
5125
5126 if (retval != ERROR_OK) {
5127 Jim_SetResultFormatted(interp, "failed to read register '%s'",
5128 reg_name);
5129 return JIM_ERR;
5130 }
5131 }
5132
5133 char *reg_value = buf_to_hex_str(reg->value, reg->size);
5134
5135 if (!reg_value) {
5136 LOG_ERROR("Failed to allocate memory");
5137 return JIM_ERR;
5138 }
5139
5140 char *tmp = alloc_printf("0x%s", reg_value);
5141
5142 free(reg_value);
5143
5144 if (!tmp) {
5145 LOG_ERROR("Failed to allocate memory");
5146 return JIM_ERR;
5147 }
5148
5149 Jim_DictAddElement(interp, result_dict, elem,
5150 Jim_NewStringObj(interp, tmp, -1));
5151
5152 free(tmp);
5153 }
5154
5155 Jim_SetResult(interp, result_dict);
5156
5157 return JIM_OK;
5158 }
5159
5160 static int target_jim_set_reg(Jim_Interp *interp, int argc,
5161 Jim_Obj * const *argv)
5162 {
5163 if (argc != 2) {
5164 Jim_WrongNumArgs(interp, 1, argv, "dict");
5165 return JIM_ERR;
5166 }
5167
5168 int tmp;
5169 #if JIM_VERSION >= 80
5170 Jim_Obj **dict = Jim_DictPairs(interp, argv[1], &tmp);
5171
5172 if (!dict)
5173 return JIM_ERR;
5174 #else
5175 Jim_Obj **dict;
5176 int ret = Jim_DictPairs(interp, argv[1], &dict, &tmp);
5177
5178 if (ret != JIM_OK)
5179 return ret;
5180 #endif
5181
5182 const unsigned int length = tmp;
5183 struct command_context *cmd_ctx = current_command_context(interp);
5184 assert(cmd_ctx);
5185 const struct target *target = get_current_target(cmd_ctx);
5186
5187 for (unsigned int i = 0; i < length; i += 2) {
5188 const char *reg_name = Jim_String(dict[i]);
5189 const char *reg_value = Jim_String(dict[i + 1]);
5190 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5191 false);
5192
5193 if (!reg || !reg->exist) {
5194 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5195 return JIM_ERR;
5196 }
5197
5198 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
5199
5200 if (!buf) {
5201 LOG_ERROR("Failed to allocate memory");
5202 return JIM_ERR;
5203 }
5204
5205 str_to_buf(reg_value, strlen(reg_value), buf, reg->size, 0);
5206 int retval = reg->type->set(reg, buf);
5207 free(buf);
5208
5209 if (retval != ERROR_OK) {
5210 Jim_SetResultFormatted(interp, "failed to set '%s' to register '%s'",
5211 reg_value, reg_name);
5212 return JIM_ERR;
5213 }
5214 }
5215
5216 return JIM_OK;
5217 }
5218
5219 /**
5220 * Returns true only if the target has a handler for the specified event.
5221 */
5222 bool target_has_event_action(struct target *target, enum target_event event)
5223 {
5224 struct target_event_action *teap;
5225
5226 for (teap = target->event_action; teap; teap = teap->next) {
5227 if (teap->event == event)
5228 return true;
5229 }
5230 return false;
5231 }
5232
5233 enum target_cfg_param {
5234 TCFG_TYPE,
5235 TCFG_EVENT,
5236 TCFG_WORK_AREA_VIRT,
5237 TCFG_WORK_AREA_PHYS,
5238 TCFG_WORK_AREA_SIZE,
5239 TCFG_WORK_AREA_BACKUP,
5240 TCFG_ENDIAN,
5241 TCFG_COREID,
5242 TCFG_CHAIN_POSITION,
5243 TCFG_DBGBASE,
5244 TCFG_RTOS,
5245 TCFG_DEFER_EXAMINE,
5246 TCFG_GDB_PORT,
5247 TCFG_GDB_MAX_CONNECTIONS,
5248 };
5249
5250 static struct jim_nvp nvp_config_opts[] = {
5251 { .name = "-type", .value = TCFG_TYPE },
5252 { .name = "-event", .value = TCFG_EVENT },
5253 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
5254 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
5255 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
5256 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
5257 { .name = "-endian", .value = TCFG_ENDIAN },
5258 { .name = "-coreid", .value = TCFG_COREID },
5259 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
5260 { .name = "-dbgbase", .value = TCFG_DBGBASE },
5261 { .name = "-rtos", .value = TCFG_RTOS },
5262 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
5263 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
5264 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
5265 { .name = NULL, .value = -1 }
5266 };
5267
5268 static int target_configure(struct jim_getopt_info *goi, struct target *target)
5269 {
5270 struct jim_nvp *n;
5271 Jim_Obj *o;
5272 jim_wide w;
5273 int e;
5274
5275 /* parse config or cget options ... */
5276 while (goi->argc > 0) {
5277 Jim_SetEmptyResult(goi->interp);
5278 /* jim_getopt_debug(goi); */
5279
5280 if (target->type->target_jim_configure) {
5281 /* target defines a configure function */
5282 /* target gets first dibs on parameters */
5283 e = (*(target->type->target_jim_configure))(target, goi);
5284 if (e == JIM_OK) {
5285 /* more? */
5286 continue;
5287 }
5288 if (e == JIM_ERR) {
5289 /* An error */
5290 return e;
5291 }
5292 /* otherwise we 'continue' below */
5293 }
5294 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
5295 if (e != JIM_OK) {
5296 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
5297 return e;
5298 }
5299 switch (n->value) {
5300 case TCFG_TYPE:
5301 /* not settable */
5302 if (goi->isconfigure) {
5303 Jim_SetResultFormatted(goi->interp,
5304 "not settable: %s", n->name);
5305 return JIM_ERR;
5306 } else {
5307 no_params:
5308 if (goi->argc != 0) {
5309 Jim_WrongNumArgs(goi->interp,
5310 goi->argc, goi->argv,
5311 "NO PARAMS");
5312 return JIM_ERR;
5313 }
5314 }
5315 Jim_SetResultString(goi->interp,
5316 target_type_name(target), -1);
5317 /* loop for more */
5318 break;
5319 case TCFG_EVENT:
5320 if (goi->argc == 0) {
5321 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
5322 return JIM_ERR;
5323 }
5324
5325 e = jim_getopt_nvp(goi, nvp_target_event, &n);
5326 if (e != JIM_OK) {
5327 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
5328 return e;
5329 }
5330
5331 if (goi->isconfigure) {
5332 if (goi->argc != 1) {
5333 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
5334 return JIM_ERR;
5335 }
5336 } else {
5337 if (goi->argc != 0) {
5338 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
5339 return JIM_ERR;
5340 }
5341 }
5342
5343 {
5344 struct target_event_action *teap;
5345
5346 teap = target->event_action;
5347 /* replace existing? */
5348 while (teap) {
5349 if (teap->event == (enum target_event)n->value)
5350 break;
5351 teap = teap->next;
5352 }
5353
5354 if (goi->isconfigure) {
5355 /* START_DEPRECATED_TPIU */
5356 if (n->value == TARGET_EVENT_TRACE_CONFIG)
5357 LOG_INFO("DEPRECATED target event %s; use TPIU events {pre,post}-{enable,disable}", n->name);
5358 /* END_DEPRECATED_TPIU */
5359
5360 bool replace = true;
5361 if (!teap) {
5362 /* create new */
5363 teap = calloc(1, sizeof(*teap));
5364 replace = false;
5365 }
5366 teap->event = n->value;
5367 teap->interp = goi->interp;
5368 jim_getopt_obj(goi, &o);
5369 if (teap->body)
5370 Jim_DecrRefCount(teap->interp, teap->body);
5371 teap->body = Jim_DuplicateObj(goi->interp, o);
5372 /*
5373 * FIXME:
5374 * Tcl/TK - "tk events" have a nice feature.
5375 * See the "BIND" command.
5376 * We should support that here.
5377 * You can specify %X and %Y in the event code.
5378 * The idea is: %T - target name.
5379 * The idea is: %N - target number
5380 * The idea is: %E - event name.
5381 */
5382 Jim_IncrRefCount(teap->body);
5383
5384 if (!replace) {
5385 /* add to head of event list */
5386 teap->next = target->event_action;
5387 target->event_action = teap;
5388 }
5389 Jim_SetEmptyResult(goi->interp);
5390 } else {
5391 /* get */
5392 if (!teap)
5393 Jim_SetEmptyResult(goi->interp);
5394 else
5395 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5396 }
5397 }
5398 /* loop for more */
5399 break;
5400
5401 case TCFG_WORK_AREA_VIRT:
5402 if (goi->isconfigure) {
5403 target_free_all_working_areas(target);
5404 e = jim_getopt_wide(goi, &w);
5405 if (e != JIM_OK)
5406 return e;
5407 target->working_area_virt = w;
5408 target->working_area_virt_spec = true;
5409 } else {
5410 if (goi->argc != 0)
5411 goto no_params;
5412 }
5413 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5414 /* loop for more */
5415 break;
5416
5417 case TCFG_WORK_AREA_PHYS:
5418 if (goi->isconfigure) {
5419 target_free_all_working_areas(target);
5420 e = jim_getopt_wide(goi, &w);
5421 if (e != JIM_OK)
5422 return e;
5423 target->working_area_phys = w;
5424 target->working_area_phys_spec = true;
5425 } else {
5426 if (goi->argc != 0)
5427 goto no_params;
5428 }
5429 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5430 /* loop for more */
5431 break;
5432
5433 case TCFG_WORK_AREA_SIZE:
5434 if (goi->isconfigure) {
5435 target_free_all_working_areas(target);
5436 e = jim_getopt_wide(goi, &w);
5437 if (e != JIM_OK)
5438 return e;
5439 target->working_area_size = w;
5440 } else {
5441 if (goi->argc != 0)
5442 goto no_params;
5443 }
5444 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5445 /* loop for more */
5446 break;
5447
5448 case TCFG_WORK_AREA_BACKUP:
5449 if (goi->isconfigure) {
5450 target_free_all_working_areas(target);
5451 e = jim_getopt_wide(goi, &w);
5452 if (e != JIM_OK)
5453 return e;
5454 /* make this exactly 1 or 0 */
5455 target->backup_working_area = (!!w);
5456 } else {
5457 if (goi->argc != 0)
5458 goto no_params;
5459 }
5460 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5461 /* loop for more e*/
5462 break;
5463
5464
5465 case TCFG_ENDIAN:
5466 if (goi->isconfigure) {
5467 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5468 if (e != JIM_OK) {
5469 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5470 return e;
5471 }
5472 target->endianness = n->value;
5473 } else {
5474 if (goi->argc != 0)
5475 goto no_params;
5476 }
5477 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5478 if (!n->name) {
5479 target->endianness = TARGET_LITTLE_ENDIAN;
5480 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5481 }
5482 Jim_SetResultString(goi->interp, n->name, -1);
5483 /* loop for more */
5484 break;
5485
5486 case TCFG_COREID:
5487 if (goi->isconfigure) {
5488 e = jim_getopt_wide(goi, &w);
5489 if (e != JIM_OK)
5490 return e;
5491 target->coreid = (int32_t)w;
5492 } else {
5493 if (goi->argc != 0)
5494 goto no_params;
5495 }
5496 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5497 /* loop for more */
5498 break;
5499
5500 case TCFG_CHAIN_POSITION:
5501 if (goi->isconfigure) {
5502 Jim_Obj *o_t;
5503 struct jtag_tap *tap;
5504
5505 if (target->has_dap) {
5506 Jim_SetResultString(goi->interp,
5507 "target requires -dap parameter instead of -chain-position!", -1);
5508 return JIM_ERR;
5509 }
5510
5511 target_free_all_working_areas(target);
5512 e = jim_getopt_obj(goi, &o_t);
5513 if (e != JIM_OK)
5514 return e;
5515 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5516 if (!tap)
5517 return JIM_ERR;
5518 target->tap = tap;
5519 target->tap_configured = true;
5520 } else {
5521 if (goi->argc != 0)
5522 goto no_params;
5523 }
5524 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5525 /* loop for more e*/
5526 break;
5527 case TCFG_DBGBASE:
5528 if (goi->isconfigure) {
5529 e = jim_getopt_wide(goi, &w);
5530 if (e != JIM_OK)
5531 return e;
5532 target->dbgbase = (uint32_t)w;
5533 target->dbgbase_set = true;
5534 } else {
5535 if (goi->argc != 0)
5536 goto no_params;
5537 }
5538 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5539 /* loop for more */
5540 break;
5541 case TCFG_RTOS:
5542 /* RTOS */
5543 {
5544 int result = rtos_create(goi, target);
5545 if (result != JIM_OK)
5546 return result;
5547 }
5548 /* loop for more */
5549 break;
5550
5551 case TCFG_DEFER_EXAMINE:
5552 /* DEFER_EXAMINE */
5553 target->defer_examine = true;
5554 /* loop for more */
5555 break;
5556
5557 case TCFG_GDB_PORT:
5558 if (goi->isconfigure) {
5559 struct command_context *cmd_ctx = current_command_context(goi->interp);
5560 if (cmd_ctx->mode != COMMAND_CONFIG) {
5561 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5562 return JIM_ERR;
5563 }
5564
5565 const char *s;
5566 e = jim_getopt_string(goi, &s, NULL);
5567 if (e != JIM_OK)
5568 return e;
5569 free(target->gdb_port_override);
5570 target->gdb_port_override = strdup(s);
5571 } else {
5572 if (goi->argc != 0)
5573 goto no_params;
5574 }
5575 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5576 /* loop for more */
5577 break;
5578
5579 case TCFG_GDB_MAX_CONNECTIONS:
5580 if (goi->isconfigure) {
5581 struct command_context *cmd_ctx = current_command_context(goi->interp);
5582 if (cmd_ctx->mode != COMMAND_CONFIG) {
5583 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5584 return JIM_ERR;
5585 }
5586
5587 e = jim_getopt_wide(goi, &w);
5588 if (e != JIM_OK)
5589 return e;
5590 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5591 } else {
5592 if (goi->argc != 0)
5593 goto no_params;
5594 }
5595 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5596 break;
5597 }
5598 } /* while (goi->argc) */
5599
5600
5601 /* done - we return */
5602 return JIM_OK;
5603 }
5604
5605 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5606 {
5607 struct command *c = jim_to_command(interp);
5608 struct jim_getopt_info goi;
5609
5610 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5611 goi.isconfigure = !strcmp(c->name, "configure");
5612 if (goi.argc < 1) {
5613 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5614 "missing: -option ...");
5615 return JIM_ERR;
5616 }
5617 struct command_context *cmd_ctx = current_command_context(interp);
5618 assert(cmd_ctx);
5619 struct target *target = get_current_target(cmd_ctx);
5620 return target_configure(&goi, target);
5621 }
5622
5623 static int jim_target_mem2array(Jim_Interp *interp,
5624 int argc, Jim_Obj *const *argv)
5625 {
5626 struct command_context *cmd_ctx = current_command_context(interp);
5627 assert(cmd_ctx);
5628 struct target *target = get_current_target(cmd_ctx);
5629 return target_mem2array(interp, target, argc - 1, argv + 1);
5630 }
5631
5632 static int jim_target_array2mem(Jim_Interp *interp,
5633 int argc, Jim_Obj *const *argv)
5634 {
5635 struct command_context *cmd_ctx = current_command_context(interp);
5636 assert(cmd_ctx);
5637 struct target *target = get_current_target(cmd_ctx);
5638 return target_array2mem(interp, target, argc - 1, argv + 1);
5639 }
5640
5641 COMMAND_HANDLER(handle_target_examine)
5642 {
5643 bool allow_defer = false;
5644
5645 if (CMD_ARGC > 1)
5646 return ERROR_COMMAND_SYNTAX_ERROR;
5647
5648 if (CMD_ARGC == 1) {
5649 if (strcmp(CMD_ARGV[0], "allow-defer"))
5650 return ERROR_COMMAND_ARGUMENT_INVALID;
5651 allow_defer = true;
5652 }
5653
5654 struct target *target = get_current_target(CMD_CTX);
5655 if (!target->tap->enabled) {
5656 command_print(CMD, "[TAP is disabled]");
5657 return ERROR_FAIL;
5658 }
5659
5660 if (allow_defer && target->defer_examine) {
5661 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5662 LOG_INFO("Use arp_examine command to examine it manually!");
5663 return ERROR_OK;
5664 }
5665
5666 int retval = target->type->examine(target);
5667 if (retval != ERROR_OK) {
5668 target_reset_examined(target);
5669 return retval;
5670 }
5671
5672 target_set_examined(target);
5673
5674 return ERROR_OK;
5675 }
5676
5677 COMMAND_HANDLER(handle_target_was_examined)
5678 {
5679 if (CMD_ARGC != 0)
5680 return ERROR_COMMAND_SYNTAX_ERROR;
5681
5682 struct target *target = get_current_target(CMD_CTX);
5683
5684 command_print(CMD, "%d", target_was_examined(target) ? 1 : 0);
5685
5686 return ERROR_OK;
5687 }
5688
5689 COMMAND_HANDLER(handle_target_examine_deferred)
5690 {
5691 if (CMD_ARGC != 0)
5692 return ERROR_COMMAND_SYNTAX_ERROR;
5693
5694 struct target *target = get_current_target(CMD_CTX);
5695
5696 command_print(CMD, "%d", target->defer_examine ? 1 : 0);
5697
5698 return ERROR_OK;
5699 }
5700
5701 COMMAND_HANDLER(handle_target_halt_gdb)
5702 {
5703 if (CMD_ARGC != 0)
5704 return ERROR_COMMAND_SYNTAX_ERROR;
5705
5706 struct target *target = get_current_target(CMD_CTX);
5707
5708 return target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
5709 }
5710
5711 COMMAND_HANDLER(handle_target_poll)
5712 {
5713 if (CMD_ARGC != 0)
5714 return ERROR_COMMAND_SYNTAX_ERROR;
5715
5716 struct target *target = get_current_target(CMD_CTX);
5717 if (!target->tap->enabled) {
5718 command_print(CMD, "[TAP is disabled]");
5719 return ERROR_FAIL;
5720 }
5721
5722 if (!(target_was_examined(target)))
5723 return ERROR_TARGET_NOT_EXAMINED;
5724
5725 return target->type->poll(target);
5726 }
5727
5728 COMMAND_HANDLER(handle_target_reset)
5729 {
5730 if (CMD_ARGC != 2)
5731 return ERROR_COMMAND_SYNTAX_ERROR;
5732
5733 const struct nvp *n = nvp_name2value(nvp_assert, CMD_ARGV[0]);
5734 if (!n->name) {
5735 nvp_unknown_command_print(CMD, nvp_assert, NULL, CMD_ARGV[0]);
5736 return ERROR_COMMAND_ARGUMENT_INVALID;
5737 }
5738
5739 /* the halt or not param */
5740 int a;
5741 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], a);
5742
5743 struct target *target = get_current_target(CMD_CTX);
5744 if (!target->tap->enabled) {
5745 command_print(CMD, "[TAP is disabled]");
5746 return ERROR_FAIL;
5747 }
5748
5749 if (!target->type->assert_reset || !target->type->deassert_reset) {
5750 command_print(CMD, "No target-specific reset for %s", target_name(target));
5751 return ERROR_FAIL;
5752 }
5753
5754 if (target->defer_examine)
5755 target_reset_examined(target);
5756
5757 /* determine if we should halt or not. */
5758 target->reset_halt = (a != 0);
5759 /* When this happens - all workareas are invalid. */
5760 target_free_all_working_areas_restore(target, 0);
5761
5762 /* do the assert */
5763 if (n->value == NVP_ASSERT)
5764 return target->type->assert_reset(target);
5765 return target->type->deassert_reset(target);
5766 }
5767
5768 COMMAND_HANDLER(handle_target_halt)
5769 {
5770 if (CMD_ARGC != 0)
5771 return ERROR_COMMAND_SYNTAX_ERROR;
5772
5773 struct target *target = get_current_target(CMD_CTX);
5774 if (!target->tap->enabled) {
5775 command_print(CMD, "[TAP is disabled]");
5776 return ERROR_FAIL;
5777 }
5778
5779 return target->type->halt(target);
5780 }
5781
5782 COMMAND_HANDLER(handle_target_wait_state)
5783 {
5784 if (CMD_ARGC != 2)
5785 return ERROR_COMMAND_SYNTAX_ERROR;
5786
5787 const struct nvp *n = nvp_name2value(nvp_target_state, CMD_ARGV[0]);
5788 if (!n->name) {
5789 nvp_unknown_command_print(CMD, nvp_target_state, NULL, CMD_ARGV[0]);
5790 return ERROR_COMMAND_ARGUMENT_INVALID;
5791 }
5792
5793 unsigned int a;
5794 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], a);
5795
5796 struct target *target = get_current_target(CMD_CTX);
5797 if (!target->tap->enabled) {
5798 command_print(CMD, "[TAP is disabled]");
5799 return ERROR_FAIL;
5800 }
5801
5802 int retval = target_wait_state(target, n->value, a);
5803 if (retval != ERROR_OK) {
5804 command_print(CMD,
5805 "target: %s wait %s fails (%d) %s",
5806 target_name(target), n->name,
5807 retval, target_strerror_safe(retval));
5808 return retval;
5809 }
5810 return ERROR_OK;
5811 }
5812 /* List for human, Events defined for this target.
5813 * scripts/programs should use 'name cget -event NAME'
5814 */
5815 COMMAND_HANDLER(handle_target_event_list)
5816 {
5817 struct target *target = get_current_target(CMD_CTX);
5818 struct target_event_action *teap = target->event_action;
5819
5820 command_print(CMD, "Event actions for target (%d) %s\n",
5821 target->target_number,
5822 target_name(target));
5823 command_print(CMD, "%-25s | Body", "Event");
5824 command_print(CMD, "------------------------- | "
5825 "----------------------------------------");
5826 while (teap) {
5827 command_print(CMD, "%-25s | %s",
5828 target_event_name(teap->event),
5829 Jim_GetString(teap->body, NULL));
5830 teap = teap->next;
5831 }
5832 command_print(CMD, "***END***");
5833 return ERROR_OK;
5834 }
5835
5836 COMMAND_HANDLER(handle_target_current_state)
5837 {
5838 if (CMD_ARGC != 0)
5839 return ERROR_COMMAND_SYNTAX_ERROR;
5840
5841 struct target *target = get_current_target(CMD_CTX);
5842
5843 command_print(CMD, "%s", target_state_name(target));
5844
5845 return ERROR_OK;
5846 }
5847
5848 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5849 {
5850 struct jim_getopt_info goi;
5851 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5852 if (goi.argc != 1) {
5853 const char *cmd_name = Jim_GetString(argv[0], NULL);
5854 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5855 return JIM_ERR;
5856 }
5857 struct jim_nvp *n;
5858 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5859 if (e != JIM_OK) {
5860 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5861 return e;
5862 }
5863 struct command_context *cmd_ctx = current_command_context(interp);
5864 assert(cmd_ctx);
5865 struct target *target = get_current_target(cmd_ctx);
5866 target_handle_event(target, n->value);
5867 return JIM_OK;
5868 }
5869
5870 static const struct command_registration target_instance_command_handlers[] = {
5871 {
5872 .name = "configure",
5873 .mode = COMMAND_ANY,
5874 .jim_handler = jim_target_configure,
5875 .help = "configure a new target for use",
5876 .usage = "[target_attribute ...]",
5877 },
5878 {
5879 .name = "cget",
5880 .mode = COMMAND_ANY,
5881 .jim_handler = jim_target_configure,
5882 .help = "returns the specified target attribute",
5883 .usage = "target_attribute",
5884 },
5885 {
5886 .name = "mwd",
5887 .handler = handle_mw_command,
5888 .mode = COMMAND_EXEC,
5889 .help = "Write 64-bit word(s) to target memory",
5890 .usage = "address data [count]",
5891 },
5892 {
5893 .name = "mww",
5894 .handler = handle_mw_command,
5895 .mode = COMMAND_EXEC,
5896 .help = "Write 32-bit word(s) to target memory",
5897 .usage = "address data [count]",
5898 },
5899 {
5900 .name = "mwh",
5901 .handler = handle_mw_command,
5902 .mode = COMMAND_EXEC,
5903 .help = "Write 16-bit half-word(s) to target memory",
5904 .usage = "address data [count]",
5905 },
5906 {
5907 .name = "mwb",
5908 .handler = handle_mw_command,
5909 .mode = COMMAND_EXEC,
5910 .help = "Write byte(s) to target memory",
5911 .usage = "address data [count]",
5912 },
5913 {
5914 .name = "mdd",
5915 .handler = handle_md_command,
5916 .mode = COMMAND_EXEC,
5917 .help = "Display target memory as 64-bit words",
5918 .usage = "address [count]",
5919 },
5920 {
5921 .name = "mdw",
5922 .handler = handle_md_command,
5923 .mode = COMMAND_EXEC,
5924 .help = "Display target memory as 32-bit words",
5925 .usage = "address [count]",
5926 },
5927 {
5928 .name = "mdh",
5929 .handler = handle_md_command,
5930 .mode = COMMAND_EXEC,
5931 .help = "Display target memory as 16-bit half-words",
5932 .usage = "address [count]",
5933 },
5934 {
5935 .name = "mdb",
5936 .handler = handle_md_command,
5937 .mode = COMMAND_EXEC,
5938 .help = "Display target memory as 8-bit bytes",
5939 .usage = "address [count]",
5940 },
5941 {
5942 .name = "array2mem",
5943 .mode = COMMAND_EXEC,
5944 .jim_handler = jim_target_array2mem,
5945 .help = "Writes Tcl array of 8/16/32 bit numbers "
5946 "to target memory",
5947 .usage = "arrayname bitwidth address count",
5948 },
5949 {
5950 .name = "mem2array",
5951 .mode = COMMAND_EXEC,
5952 .jim_handler = jim_target_mem2array,
5953 .help = "Loads Tcl array of 8/16/32 bit numbers "
5954 "from target memory",
5955 .usage = "arrayname bitwidth address count",
5956 },
5957 {
5958 .name = "get_reg",
5959 .mode = COMMAND_EXEC,
5960 .jim_handler = target_jim_get_reg,
5961 .help = "Get register values from the target",
5962 .usage = "list",
5963 },
5964 {
5965 .name = "set_reg",
5966 .mode = COMMAND_EXEC,
5967 .jim_handler = target_jim_set_reg,
5968 .help = "Set target register values",
5969 .usage = "dict",
5970 },
5971 {
5972 .name = "read_memory",
5973 .mode = COMMAND_EXEC,
5974 .handler = handle_target_read_memory,
5975 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
5976 .usage = "address width count ['phys']",
5977 },
5978 {
5979 .name = "write_memory",
5980 .mode = COMMAND_EXEC,
5981 .jim_handler = target_jim_write_memory,
5982 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
5983 .usage = "address width data ['phys']",
5984 },
5985 {
5986 .name = "eventlist",
5987 .handler = handle_target_event_list,
5988 .mode = COMMAND_EXEC,
5989 .help = "displays a table of events defined for this target",
5990 .usage = "",
5991 },
5992 {
5993 .name = "curstate",
5994 .mode = COMMAND_EXEC,
5995 .handler = handle_target_current_state,
5996 .help = "displays the current state of this target",
5997 .usage = "",
5998 },
5999 {
6000 .name = "arp_examine",
6001 .mode = COMMAND_EXEC,
6002 .handler = handle_target_examine,
6003 .help = "used internally for reset processing",
6004 .usage = "['allow-defer']",
6005 },
6006 {
6007 .name = "was_examined",
6008 .mode = COMMAND_EXEC,
6009 .handler = handle_target_was_examined,
6010 .help = "used internally for reset processing",
6011 .usage = "",
6012 },
6013 {
6014 .name = "examine_deferred",
6015 .mode = COMMAND_EXEC,
6016 .handler = handle_target_examine_deferred,
6017 .help = "used internally for reset processing",
6018 .usage = "",
6019 },
6020 {
6021 .name = "arp_halt_gdb",
6022 .mode = COMMAND_EXEC,
6023 .handler = handle_target_halt_gdb,
6024 .help = "used internally for reset processing to halt GDB",
6025 .usage = "",
6026 },
6027 {
6028 .name = "arp_poll",
6029 .mode = COMMAND_EXEC,
6030 .handler = handle_target_poll,
6031 .help = "used internally for reset processing",
6032 .usage = "",
6033 },
6034 {
6035 .name = "arp_reset",
6036 .mode = COMMAND_EXEC,
6037 .handler = handle_target_reset,
6038 .help = "used internally for reset processing",
6039 .usage = "'assert'|'deassert' halt",
6040 },
6041 {
6042 .name = "arp_halt",
6043 .mode = COMMAND_EXEC,
6044 .handler = handle_target_halt,
6045 .help = "used internally for reset processing",
6046 .usage = "",
6047 },
6048 {
6049 .name = "arp_waitstate",
6050 .mode = COMMAND_EXEC,
6051 .handler = handle_target_wait_state,
6052 .help = "used internally for reset processing",
6053 .usage = "statename timeoutmsecs",
6054 },
6055 {
6056 .name = "invoke-event",
6057 .mode = COMMAND_EXEC,
6058 .jim_handler = jim_target_invoke_event,
6059 .help = "invoke handler for specified event",
6060 .usage = "event_name",
6061 },
6062 COMMAND_REGISTRATION_DONE
6063 };
6064
6065 static int target_create(struct jim_getopt_info *goi)
6066 {
6067 Jim_Obj *new_cmd;
6068 Jim_Cmd *cmd;
6069 const char *cp;
6070 int e;
6071 int x;
6072 struct target *target;
6073 struct command_context *cmd_ctx;
6074
6075 cmd_ctx = current_command_context(goi->interp);
6076 assert(cmd_ctx);
6077
6078 if (goi->argc < 3) {
6079 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
6080 return JIM_ERR;
6081 }
6082
6083 /* COMMAND */
6084 jim_getopt_obj(goi, &new_cmd);
6085 /* does this command exist? */
6086 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_NONE);
6087 if (cmd) {
6088 cp = Jim_GetString(new_cmd, NULL);
6089 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
6090 return JIM_ERR;
6091 }
6092
6093 /* TYPE */
6094 e = jim_getopt_string(goi, &cp, NULL);
6095 if (e != JIM_OK)
6096 return e;
6097 struct transport *tr = get_current_transport();
6098 if (tr->override_target) {
6099 e = tr->override_target(&cp);
6100 if (e != ERROR_OK) {
6101 LOG_ERROR("The selected transport doesn't support this target");
6102 return JIM_ERR;
6103 }
6104 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
6105 }
6106 /* now does target type exist */
6107 for (x = 0 ; target_types[x] ; x++) {
6108 if (strcmp(cp, target_types[x]->name) == 0) {
6109 /* found */
6110 break;
6111 }
6112 }
6113 if (!target_types[x]) {
6114 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
6115 for (x = 0 ; target_types[x] ; x++) {
6116 if (target_types[x + 1]) {
6117 Jim_AppendStrings(goi->interp,
6118 Jim_GetResult(goi->interp),
6119 target_types[x]->name,
6120 ", ", NULL);
6121 } else {
6122 Jim_AppendStrings(goi->interp,
6123 Jim_GetResult(goi->interp),
6124 " or ",
6125 target_types[x]->name, NULL);
6126 }
6127 }
6128 return JIM_ERR;
6129 }
6130
6131 /* Create it */
6132 target = calloc(1, sizeof(struct target));
6133 if (!target) {
6134 LOG_ERROR("Out of memory");
6135 return JIM_ERR;
6136 }
6137
6138 /* set empty smp cluster */
6139 target->smp_targets = &empty_smp_targets;
6140
6141 /* set target number */
6142 target->target_number = new_target_number();
6143
6144 /* allocate memory for each unique target type */
6145 target->type = malloc(sizeof(struct target_type));
6146 if (!target->type) {
6147 LOG_ERROR("Out of memory");
6148 free(target);
6149 return JIM_ERR;
6150 }
6151
6152 memcpy(target->type, target_types[x], sizeof(struct target_type));
6153
6154 /* default to first core, override with -coreid */
6155 target->coreid = 0;
6156
6157 target->working_area = 0x0;
6158 target->working_area_size = 0x0;
6159 target->working_areas = NULL;
6160 target->backup_working_area = 0;
6161
6162 target->state = TARGET_UNKNOWN;
6163 target->debug_reason = DBG_REASON_UNDEFINED;
6164 target->reg_cache = NULL;
6165 target->breakpoints = NULL;
6166 target->watchpoints = NULL;
6167 target->next = NULL;
6168 target->arch_info = NULL;
6169
6170 target->verbose_halt_msg = true;
6171
6172 target->halt_issued = false;
6173
6174 /* initialize trace information */
6175 target->trace_info = calloc(1, sizeof(struct trace));
6176 if (!target->trace_info) {
6177 LOG_ERROR("Out of memory");
6178 free(target->type);
6179 free(target);
6180 return JIM_ERR;
6181 }
6182
6183 target->dbgmsg = NULL;
6184 target->dbg_msg_enabled = 0;
6185
6186 target->endianness = TARGET_ENDIAN_UNKNOWN;
6187
6188 target->rtos = NULL;
6189 target->rtos_auto_detect = false;
6190
6191 target->gdb_port_override = NULL;
6192 target->gdb_max_connections = 1;
6193
6194 /* Do the rest as "configure" options */
6195 goi->isconfigure = 1;
6196 e = target_configure(goi, target);
6197
6198 if (e == JIM_OK) {
6199 if (target->has_dap) {
6200 if (!target->dap_configured) {
6201 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
6202 e = JIM_ERR;
6203 }
6204 } else {
6205 if (!target->tap_configured) {
6206 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
6207 e = JIM_ERR;
6208 }
6209 }
6210 /* tap must be set after target was configured */
6211 if (!target->tap)
6212 e = JIM_ERR;
6213 }
6214
6215 if (e != JIM_OK) {
6216 rtos_destroy(target);
6217 free(target->gdb_port_override);
6218 free(target->trace_info);
6219 free(target->type);
6220 free(target);
6221 return e;
6222 }
6223
6224 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
6225 /* default endian to little if not specified */
6226 target->endianness = TARGET_LITTLE_ENDIAN;
6227 }
6228
6229 cp = Jim_GetString(new_cmd, NULL);
6230 target->cmd_name = strdup(cp);
6231 if (!target->cmd_name) {
6232 LOG_ERROR("Out of memory");
6233 rtos_destroy(target);
6234 free(target->gdb_port_override);
6235 free(target->trace_info);
6236 free(target->type);
6237 free(target);
6238 return JIM_ERR;
6239 }
6240
6241 if (target->type->target_create) {
6242 e = (*(target->type->target_create))(target, goi->interp);
6243 if (e != ERROR_OK) {
6244 LOG_DEBUG("target_create failed");
6245 free(target->cmd_name);
6246 rtos_destroy(target);
6247 free(target->gdb_port_override);
6248 free(target->trace_info);
6249 free(target->type);
6250 free(target);
6251 return JIM_ERR;
6252 }
6253 }
6254
6255 /* create the target specific commands */
6256 if (target->type->commands) {
6257 e = register_commands(cmd_ctx, NULL, target->type->commands);
6258 if (e != ERROR_OK)
6259 LOG_ERROR("unable to register '%s' commands", cp);
6260 }
6261
6262 /* now - create the new target name command */
6263 const struct command_registration target_subcommands[] = {
6264 {
6265 .chain = target_instance_command_handlers,
6266 },
6267 {
6268 .chain = target->type->commands,
6269 },
6270 COMMAND_REGISTRATION_DONE
6271 };
6272 const struct command_registration target_commands[] = {
6273 {
6274 .name = cp,
6275 .mode = COMMAND_ANY,
6276 .help = "target command group",
6277 .usage = "",
6278 .chain = target_subcommands,
6279 },
6280 COMMAND_REGISTRATION_DONE
6281 };
6282 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
6283 if (e != ERROR_OK) {
6284 if (target->type->deinit_target)
6285 target->type->deinit_target(target);
6286 free(target->cmd_name);
6287 rtos_destroy(target);
6288 free(target->gdb_port_override);
6289 free(target->trace_info);
6290 free(target->type);
6291 free(target);
6292 return JIM_ERR;
6293 }
6294
6295 /* append to end of list */
6296 append_to_list_all_targets(target);
6297
6298 cmd_ctx->current_target = target;
6299 return JIM_OK;
6300 }
6301
6302 COMMAND_HANDLER(handle_target_current)
6303 {
6304 if (CMD_ARGC != 0)
6305 return ERROR_COMMAND_SYNTAX_ERROR;
6306
6307 struct target *target = get_current_target_or_null(CMD_CTX);
6308 if (target)
6309 command_print(CMD, "%s", target_name(target));
6310
6311 return ERROR_OK;
6312 }
6313
6314 COMMAND_HANDLER(handle_target_types)
6315 {
6316 if (CMD_ARGC != 0)
6317 return ERROR_COMMAND_SYNTAX_ERROR;
6318
6319 for (unsigned int x = 0; target_types[x]; x++)
6320 command_print(CMD, "%s", target_types[x]->name);
6321
6322 return ERROR_OK;
6323 }
6324
6325 COMMAND_HANDLER(handle_target_names)
6326 {
6327 if (CMD_ARGC != 0)
6328 return ERROR_COMMAND_SYNTAX_ERROR;
6329
6330 struct target *target = all_targets;
6331 while (target) {
6332 command_print(CMD, "%s", target_name(target));
6333 target = target->next;
6334 }
6335
6336 return ERROR_OK;
6337 }
6338
6339 static struct target_list *
6340 __attribute__((warn_unused_result))
6341 create_target_list_node(const char *targetname)
6342 {
6343 struct target *target = get_target(targetname);
6344 LOG_DEBUG("%s ", targetname);
6345 if (!target)
6346 return NULL;
6347
6348 struct target_list *new = malloc(sizeof(struct target_list));
6349 if (!new) {
6350 LOG_ERROR("Out of memory");
6351 return new;
6352 }
6353
6354 new->target = target;
6355 return new;
6356 }
6357
6358 static int get_target_with_common_rtos_type(struct command_invocation *cmd,
6359 struct list_head *lh, struct target **result)
6360 {
6361 struct target *target = NULL;
6362 struct target_list *curr;
6363 foreach_smp_target(curr, lh) {
6364 struct rtos *curr_rtos = curr->target->rtos;
6365 if (curr_rtos) {
6366 if (target && target->rtos && target->rtos->type != curr_rtos->type) {
6367 command_print(cmd, "Different rtos types in members of one smp target!");
6368 return ERROR_FAIL;
6369 }
6370 target = curr->target;
6371 }
6372 }
6373 *result = target;
6374 return ERROR_OK;
6375 }
6376
6377 COMMAND_HANDLER(handle_target_smp)
6378 {
6379 static int smp_group = 1;
6380
6381 if (CMD_ARGC == 0) {
6382 LOG_DEBUG("Empty SMP target");
6383 return ERROR_OK;
6384 }
6385 LOG_DEBUG("%d", CMD_ARGC);
6386 /* CMD_ARGC[0] = target to associate in smp
6387 * CMD_ARGC[1] = target to associate in smp
6388 * CMD_ARGC[2] ...
6389 */
6390
6391 struct list_head *lh = malloc(sizeof(*lh));
6392 if (!lh) {
6393 LOG_ERROR("Out of memory");
6394 return ERROR_FAIL;
6395 }
6396 INIT_LIST_HEAD(lh);
6397
6398 for (unsigned int i = 0; i < CMD_ARGC; i++) {
6399 struct target_list *new = create_target_list_node(CMD_ARGV[i]);
6400 if (new)
6401 list_add_tail(&new->lh, lh);
6402 }
6403 /* now parse the list of cpu and put the target in smp mode*/
6404 struct target_list *curr;
6405 foreach_smp_target(curr, lh) {
6406 struct target *target = curr->target;
6407 target->smp = smp_group;
6408 target->smp_targets = lh;
6409 }
6410 smp_group++;
6411
6412 struct target *rtos_target;
6413 int retval = get_target_with_common_rtos_type(CMD, lh, &rtos_target);
6414 if (retval == ERROR_OK && rtos_target)
6415 retval = rtos_smp_init(rtos_target);
6416
6417 return retval;
6418 }
6419
6420 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6421 {
6422 struct jim_getopt_info goi;
6423 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6424 if (goi.argc < 3) {
6425 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6426 "<name> <target_type> [<target_options> ...]");
6427 return JIM_ERR;
6428 }
6429 return target_create(&goi);
6430 }
6431
6432 static const struct command_registration target_subcommand_handlers[] = {
6433 {
6434 .name = "init",
6435 .mode = COMMAND_CONFIG,
6436 .handler = handle_target_init_command,
6437 .help = "initialize targets",
6438 .usage = "",
6439 },
6440 {
6441 .name = "create",
6442 .mode = COMMAND_CONFIG,
6443 .jim_handler = jim_target_create,
6444 .usage = "name type '-chain-position' name [options ...]",
6445 .help = "Creates and selects a new target",
6446 },
6447 {
6448 .name = "current",
6449 .mode = COMMAND_ANY,
6450 .handler = handle_target_current,
6451 .help = "Returns the currently selected target",
6452 .usage = "",
6453 },
6454 {
6455 .name = "types",
6456 .mode = COMMAND_ANY,
6457 .handler = handle_target_types,
6458 .help = "Returns the available target types as "
6459 "a list of strings",
6460 .usage = "",
6461 },
6462 {
6463 .name = "names",
6464 .mode = COMMAND_ANY,
6465 .handler = handle_target_names,
6466 .help = "Returns the names of all targets as a list of strings",
6467 .usage = "",
6468 },
6469 {
6470 .name = "smp",
6471 .mode = COMMAND_ANY,
6472 .handler = handle_target_smp,
6473 .usage = "targetname1 targetname2 ...",
6474 .help = "gather several target in a smp list"
6475 },
6476
6477 COMMAND_REGISTRATION_DONE
6478 };
6479
6480 struct fast_load {
6481 target_addr_t address;
6482 uint8_t *data;
6483 int length;
6484
6485 };
6486
6487 static int fastload_num;
6488 static struct fast_load *fastload;
6489
6490 static void free_fastload(void)
6491 {
6492 if (fastload) {
6493 for (int i = 0; i < fastload_num; i++)
6494 free(fastload[i].data);
6495 free(fastload);
6496 fastload = NULL;
6497 }
6498 }
6499
6500 COMMAND_HANDLER(handle_fast_load_image_command)
6501 {
6502 uint8_t *buffer;
6503 size_t buf_cnt;
6504 uint32_t image_size;
6505 target_addr_t min_address = 0;
6506 target_addr_t max_address = -1;
6507
6508 struct image image;
6509
6510 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6511 &image, &min_address, &max_address);
6512 if (retval != ERROR_OK)
6513 return retval;
6514
6515 struct duration bench;
6516 duration_start(&bench);
6517
6518 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6519 if (retval != ERROR_OK)
6520 return retval;
6521
6522 image_size = 0x0;
6523 retval = ERROR_OK;
6524 fastload_num = image.num_sections;
6525 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6526 if (!fastload) {
6527 command_print(CMD, "out of memory");
6528 image_close(&image);
6529 return ERROR_FAIL;
6530 }
6531 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6532 for (unsigned int i = 0; i < image.num_sections; i++) {
6533 buffer = malloc(image.sections[i].size);
6534 if (!buffer) {
6535 command_print(CMD, "error allocating buffer for section (%d bytes)",
6536 (int)(image.sections[i].size));
6537 retval = ERROR_FAIL;
6538 break;
6539 }
6540
6541 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6542 if (retval != ERROR_OK) {
6543 free(buffer);
6544 break;
6545 }
6546
6547 uint32_t offset = 0;
6548 uint32_t length = buf_cnt;
6549
6550 /* DANGER!!! beware of unsigned comparison here!!! */
6551
6552 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6553 (image.sections[i].base_address < max_address)) {
6554 if (image.sections[i].base_address < min_address) {
6555 /* clip addresses below */
6556 offset += min_address-image.sections[i].base_address;
6557 length -= offset;
6558 }
6559
6560 if (image.sections[i].base_address + buf_cnt > max_address)
6561 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6562
6563 fastload[i].address = image.sections[i].base_address + offset;
6564 fastload[i].data = malloc(length);
6565 if (!fastload[i].data) {
6566 free(buffer);
6567 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6568 length);
6569 retval = ERROR_FAIL;
6570 break;
6571 }
6572 memcpy(fastload[i].data, buffer + offset, length);
6573 fastload[i].length = length;
6574
6575 image_size += length;
6576 command_print(CMD, "%u bytes written at address 0x%8.8x",
6577 (unsigned int)length,
6578 ((unsigned int)(image.sections[i].base_address + offset)));
6579 }
6580
6581 free(buffer);
6582 }
6583
6584 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6585 command_print(CMD, "Loaded %" PRIu32 " bytes "
6586 "in %fs (%0.3f KiB/s)", image_size,
6587 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6588
6589 command_print(CMD,
6590 "WARNING: image has not been loaded to target!"
6591 "You can issue a 'fast_load' to finish loading.");
6592 }
6593
6594 image_close(&image);
6595
6596 if (retval != ERROR_OK)
6597 free_fastload();
6598
6599 return retval;
6600 }
6601
6602 COMMAND_HANDLER(handle_fast_load_command)
6603 {
6604 if (CMD_ARGC > 0)
6605 return ERROR_COMMAND_SYNTAX_ERROR;
6606 if (!fastload) {
6607 LOG_ERROR("No image in memory");
6608 return ERROR_FAIL;
6609 }
6610 int i;
6611 int64_t ms = timeval_ms();
6612 int size = 0;
6613 int retval = ERROR_OK;
6614 for (i = 0; i < fastload_num; i++) {
6615 struct target *target = get_current_target(CMD_CTX);
6616 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6617 (unsigned int)(fastload[i].address),
6618 (unsigned int)(fastload[i].length));
6619 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6620 if (retval != ERROR_OK)
6621 break;
6622 size += fastload[i].length;
6623 }
6624 if (retval == ERROR_OK) {
6625 int64_t after = timeval_ms();
6626 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6627 }
6628 return retval;
6629 }
6630
6631 static const struct command_registration target_command_handlers[] = {
6632 {
6633 .name = "targets",
6634 .handler = handle_targets_command,
6635 .mode = COMMAND_ANY,
6636 .help = "change current default target (one parameter) "
6637 "or prints table of all targets (no parameters)",
6638 .usage = "[target]",
6639 },
6640 {
6641 .name = "target",
6642 .mode = COMMAND_CONFIG,
6643 .help = "configure target",
6644 .chain = target_subcommand_handlers,
6645 .usage = "",
6646 },
6647 COMMAND_REGISTRATION_DONE
6648 };
6649
6650 int target_register_commands(struct command_context *cmd_ctx)
6651 {
6652 return register_commands(cmd_ctx, NULL, target_command_handlers);
6653 }
6654
6655 static bool target_reset_nag = true;
6656
6657 bool get_target_reset_nag(void)
6658 {
6659 return target_reset_nag;
6660 }
6661
6662 COMMAND_HANDLER(handle_target_reset_nag)
6663 {
6664 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6665 &target_reset_nag, "Nag after each reset about options to improve "
6666 "performance");
6667 }
6668
6669 COMMAND_HANDLER(handle_ps_command)
6670 {
6671 struct target *target = get_current_target(CMD_CTX);
6672 char *display;
6673 if (target->state != TARGET_HALTED) {
6674 command_print(CMD, "Error: [%s] not halted", target_name(target));
6675 return ERROR_TARGET_NOT_HALTED;
6676 }
6677
6678 if ((target->rtos) && (target->rtos->type)
6679 && (target->rtos->type->ps_command)) {
6680 display = target->rtos->type->ps_command(target);
6681 command_print(CMD, "%s", display);
6682 free(display);
6683 return ERROR_OK;
6684 } else {
6685 LOG_INFO("failed");
6686 return ERROR_TARGET_FAILURE;
6687 }
6688 }
6689
6690 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6691 {
6692 if (text)
6693 command_print_sameline(cmd, "%s", text);
6694 for (int i = 0; i < size; i++)
6695 command_print_sameline(cmd, " %02x", buf[i]);
6696 command_print(cmd, " ");
6697 }
6698
6699 COMMAND_HANDLER(handle_test_mem_access_command)
6700 {
6701 struct target *target = get_current_target(CMD_CTX);
6702 uint32_t test_size;
6703 int retval = ERROR_OK;
6704
6705 if (target->state != TARGET_HALTED) {
6706 command_print(CMD, "Error: [%s] not halted", target_name(target));
6707 return ERROR_TARGET_NOT_HALTED;
6708 }
6709
6710 if (CMD_ARGC != 1)
6711 return ERROR_COMMAND_SYNTAX_ERROR;
6712
6713 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6714
6715 /* Test reads */
6716 size_t num_bytes = test_size + 4;
6717
6718 struct working_area *wa = NULL;
6719 retval = target_alloc_working_area(target, num_bytes, &wa);
6720 if (retval != ERROR_OK) {
6721 LOG_ERROR("Not enough working area");
6722 return ERROR_FAIL;
6723 }
6724
6725 uint8_t *test_pattern = malloc(num_bytes);
6726
6727 for (size_t i = 0; i < num_bytes; i++)
6728 test_pattern[i] = rand();
6729
6730 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6731 if (retval != ERROR_OK) {
6732 LOG_ERROR("Test pattern write failed");
6733 goto out;
6734 }
6735
6736 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6737 for (int size = 1; size <= 4; size *= 2) {
6738 for (int offset = 0; offset < 4; offset++) {
6739 uint32_t count = test_size / size;
6740 size_t host_bufsiz = (count + 2) * size + host_offset;
6741 uint8_t *read_ref = malloc(host_bufsiz);
6742 uint8_t *read_buf = malloc(host_bufsiz);
6743
6744 for (size_t i = 0; i < host_bufsiz; i++) {
6745 read_ref[i] = rand();
6746 read_buf[i] = read_ref[i];
6747 }
6748 command_print_sameline(CMD,
6749 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6750 size, offset, host_offset ? "un" : "");
6751
6752 struct duration bench;
6753 duration_start(&bench);
6754
6755 retval = target_read_memory(target, wa->address + offset, size, count,
6756 read_buf + size + host_offset);
6757
6758 duration_measure(&bench);
6759
6760 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6761 command_print(CMD, "Unsupported alignment");
6762 goto next;
6763 } else if (retval != ERROR_OK) {
6764 command_print(CMD, "Memory read failed");
6765 goto next;
6766 }
6767
6768 /* replay on host */
6769 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6770
6771 /* check result */
6772 int result = memcmp(read_ref, read_buf, host_bufsiz);
6773 if (result == 0) {
6774 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6775 duration_elapsed(&bench),
6776 duration_kbps(&bench, count * size));
6777 } else {
6778 command_print(CMD, "Compare failed");
6779 binprint(CMD, "ref:", read_ref, host_bufsiz);
6780 binprint(CMD, "buf:", read_buf, host_bufsiz);
6781 }
6782 next:
6783 free(read_ref);
6784 free(read_buf);
6785 }
6786 }
6787 }
6788
6789 out:
6790 free(test_pattern);
6791
6792 target_free_working_area(target, wa);
6793
6794 /* Test writes */
6795 num_bytes = test_size + 4 + 4 + 4;
6796
6797 retval = target_alloc_working_area(target, num_bytes, &wa);
6798 if (retval != ERROR_OK) {
6799 LOG_ERROR("Not enough working area");
6800 return ERROR_FAIL;
6801 }
6802
6803 test_pattern = malloc(num_bytes);
6804
6805 for (size_t i = 0; i < num_bytes; i++)
6806 test_pattern[i] = rand();
6807
6808 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6809 for (int size = 1; size <= 4; size *= 2) {
6810 for (int offset = 0; offset < 4; offset++) {
6811 uint32_t count = test_size / size;
6812 size_t host_bufsiz = count * size + host_offset;
6813 uint8_t *read_ref = malloc(num_bytes);
6814 uint8_t *read_buf = malloc(num_bytes);
6815 uint8_t *write_buf = malloc(host_bufsiz);
6816
6817 for (size_t i = 0; i < host_bufsiz; i++)
6818 write_buf[i] = rand();
6819 command_print_sameline(CMD,
6820 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6821 size, offset, host_offset ? "un" : "");
6822
6823 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6824 if (retval != ERROR_OK) {
6825 command_print(CMD, "Test pattern write failed");
6826 goto nextw;
6827 }
6828
6829 /* replay on host */
6830 memcpy(read_ref, test_pattern, num_bytes);
6831 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6832
6833 struct duration bench;
6834 duration_start(&bench);
6835
6836 retval = target_write_memory(target, wa->address + size + offset, size, count,
6837 write_buf + host_offset);
6838
6839 duration_measure(&bench);
6840
6841 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6842 command_print(CMD, "Unsupported alignment");
6843 goto nextw;
6844 } else if (retval != ERROR_OK) {
6845 command_print(CMD, "Memory write failed");
6846 goto nextw;
6847 }
6848
6849 /* read back */
6850 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6851 if (retval != ERROR_OK) {
6852 command_print(CMD, "Test pattern write failed");
6853 goto nextw;
6854 }
6855
6856 /* check result */
6857 int result = memcmp(read_ref, read_buf, num_bytes);
6858 if (result == 0) {
6859 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6860 duration_elapsed(&bench),
6861 duration_kbps(&bench, count * size));
6862 } else {
6863 command_print(CMD, "Compare failed");
6864 binprint(CMD, "ref:", read_ref, num_bytes);
6865 binprint(CMD, "buf:", read_buf, num_bytes);
6866 }
6867 nextw:
6868 free(read_ref);
6869 free(read_buf);
6870 }
6871 }
6872 }
6873
6874 free(test_pattern);
6875
6876 target_free_working_area(target, wa);
6877 return retval;
6878 }
6879
6880 static const struct command_registration target_exec_command_handlers[] = {
6881 {
6882 .name = "fast_load_image",
6883 .handler = handle_fast_load_image_command,
6884 .mode = COMMAND_ANY,
6885 .help = "Load image into server memory for later use by "
6886 "fast_load; primarily for profiling",
6887 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6888 "[min_address [max_length]]",
6889 },
6890 {
6891 .name = "fast_load",
6892 .handler = handle_fast_load_command,
6893 .mode = COMMAND_EXEC,
6894 .help = "loads active fast load image to current target "
6895 "- mainly for profiling purposes",
6896 .usage = "",
6897 },
6898 {
6899 .name = "profile",
6900 .handler = handle_profile_command,
6901 .mode = COMMAND_EXEC,
6902 .usage = "seconds filename [start end]",
6903 .help = "profiling samples the CPU PC",
6904 },
6905 /** @todo don't register virt2phys() unless target supports it */
6906 {
6907 .name = "virt2phys",
6908 .handler = handle_virt2phys_command,
6909 .mode = COMMAND_ANY,
6910 .help = "translate a virtual address into a physical address",
6911 .usage = "virtual_address",
6912 },
6913 {
6914 .name = "reg",
6915 .handler = handle_reg_command,
6916 .mode = COMMAND_EXEC,
6917 .help = "display (reread from target with \"force\") or set a register; "
6918 "with no arguments, displays all registers and their values",
6919 .usage = "[(register_number|register_name) [(value|'force')]]",
6920 },
6921 {
6922 .name = "poll",
6923 .handler = handle_poll_command,
6924 .mode = COMMAND_EXEC,
6925 .help = "poll target state; or reconfigure background polling",
6926 .usage = "['on'|'off']",
6927 },
6928 {
6929 .name = "wait_halt",
6930 .handler = handle_wait_halt_command,
6931 .mode = COMMAND_EXEC,
6932 .help = "wait up to the specified number of milliseconds "
6933 "(default 5000) for a previously requested halt",
6934 .usage = "[milliseconds]",
6935 },
6936 {
6937 .name = "halt",
6938 .handler = handle_halt_command,
6939 .mode = COMMAND_EXEC,
6940 .help = "request target to halt, then wait up to the specified "
6941 "number of milliseconds (default 5000) for it to complete",
6942 .usage = "[milliseconds]",
6943 },
6944 {
6945 .name = "resume",
6946 .handler = handle_resume_command,
6947 .mode = COMMAND_EXEC,
6948 .help = "resume target execution from current PC or address",
6949 .usage = "[address]",
6950 },
6951 {
6952 .name = "reset",
6953 .handler = handle_reset_command,
6954 .mode = COMMAND_EXEC,
6955 .usage = "[run|halt|init]",
6956 .help = "Reset all targets into the specified mode. "
6957 "Default reset mode is run, if not given.",
6958 },
6959 {
6960 .name = "soft_reset_halt",
6961 .handler = handle_soft_reset_halt_command,
6962 .mode = COMMAND_EXEC,
6963 .usage = "",
6964 .help = "halt the target and do a soft reset",
6965 },
6966 {
6967 .name = "step",
6968 .handler = handle_step_command,
6969 .mode = COMMAND_EXEC,
6970 .help = "step one instruction from current PC or address",
6971 .usage = "[address]",
6972 },
6973 {
6974 .name = "mdd",
6975 .handler = handle_md_command,
6976 .mode = COMMAND_EXEC,
6977 .help = "display memory double-words",
6978 .usage = "['phys'] address [count]",
6979 },
6980 {
6981 .name = "mdw",
6982 .handler = handle_md_command,
6983 .mode = COMMAND_EXEC,
6984 .help = "display memory words",
6985 .usage = "['phys'] address [count]",
6986 },
6987 {
6988 .name = "mdh",
6989 .handler = handle_md_command,
6990 .mode = COMMAND_EXEC,
6991 .help = "display memory half-words",
6992 .usage = "['phys'] address [count]",
6993 },
6994 {
6995 .name = "mdb",
6996 .handler = handle_md_command,
6997 .mode = COMMAND_EXEC,
6998 .help = "display memory bytes",
6999 .usage = "['phys'] address [count]",
7000 },
7001 {
7002 .name = "mwd",
7003 .handler = handle_mw_command,
7004 .mode = COMMAND_EXEC,
7005 .help = "write memory double-word",
7006 .usage = "['phys'] address value [count]",
7007 },
7008 {
7009 .name = "mww",
7010 .handler = handle_mw_command,
7011 .mode = COMMAND_EXEC,
7012 .help = "write memory word",
7013 .usage = "['phys'] address value [count]",
7014 },
7015 {
7016 .name = "mwh",
7017 .handler = handle_mw_command,
7018 .mode = COMMAND_EXEC,
7019 .help = "write memory half-word",
7020 .usage = "['phys'] address value [count]",
7021 },
7022 {
7023 .name = "mwb",
7024 .handler = handle_mw_command,
7025 .mode = COMMAND_EXEC,
7026 .help = "write memory byte",
7027 .usage = "['phys'] address value [count]",
7028 },
7029 {
7030 .name = "bp",
7031 .handler = handle_bp_command,
7032 .mode = COMMAND_EXEC,
7033 .help = "list or set hardware or software breakpoint",
7034 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
7035 },
7036 {
7037 .name = "rbp",
7038 .handler = handle_rbp_command,
7039 .mode = COMMAND_EXEC,
7040 .help = "remove breakpoint",
7041 .usage = "'all' | address",
7042 },
7043 {
7044 .name = "wp",
7045 .handler = handle_wp_command,
7046 .mode = COMMAND_EXEC,
7047 .help = "list (no params) or create watchpoints",
7048 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
7049 },
7050 {
7051 .name = "rwp",
7052 .handler = handle_rwp_command,
7053 .mode = COMMAND_EXEC,
7054 .help = "remove watchpoint",
7055 .usage = "address",
7056 },
7057 {
7058 .name = "load_image",
7059 .handler = handle_load_image_command,
7060 .mode = COMMAND_EXEC,
7061 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
7062 "[min_address] [max_length]",
7063 },
7064 {
7065 .name = "dump_image",
7066 .handler = handle_dump_image_command,
7067 .mode = COMMAND_EXEC,
7068 .usage = "filename address size",
7069 },
7070 {
7071 .name = "verify_image_checksum",
7072 .handler = handle_verify_image_checksum_command,
7073 .mode = COMMAND_EXEC,
7074 .usage = "filename [offset [type]]",
7075 },
7076 {
7077 .name = "verify_image",
7078 .handler = handle_verify_image_command,
7079 .mode = COMMAND_EXEC,
7080 .usage = "filename [offset [type]]",
7081 },
7082 {
7083 .name = "test_image",
7084 .handler = handle_test_image_command,
7085 .mode = COMMAND_EXEC,
7086 .usage = "filename [offset [type]]",
7087 },
7088 {
7089 .name = "get_reg",
7090 .mode = COMMAND_EXEC,
7091 .jim_handler = target_jim_get_reg,
7092 .help = "Get register values from the target",
7093 .usage = "list",
7094 },
7095 {
7096 .name = "set_reg",
7097 .mode = COMMAND_EXEC,
7098 .jim_handler = target_jim_set_reg,
7099 .help = "Set target register values",
7100 .usage = "dict",
7101 },
7102 {
7103 .name = "read_memory",
7104 .mode = COMMAND_EXEC,
7105 .handler = handle_target_read_memory,
7106 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
7107 .usage = "address width count ['phys']",
7108 },
7109 {
7110 .name = "write_memory",
7111 .mode = COMMAND_EXEC,
7112 .jim_handler = target_jim_write_memory,
7113 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
7114 .usage = "address width data ['phys']",
7115 },
7116 {
7117 .name = "reset_nag",
7118 .handler = handle_target_reset_nag,
7119 .mode = COMMAND_ANY,
7120 .help = "Nag after each reset about options that could have been "
7121 "enabled to improve performance.",
7122 .usage = "['enable'|'disable']",
7123 },
7124 {
7125 .name = "ps",
7126 .handler = handle_ps_command,
7127 .mode = COMMAND_EXEC,
7128 .help = "list all tasks",
7129 .usage = "",
7130 },
7131 {
7132 .name = "test_mem_access",
7133 .handler = handle_test_mem_access_command,
7134 .mode = COMMAND_EXEC,
7135 .help = "Test the target's memory access functions",
7136 .usage = "size",
7137 },
7138
7139 COMMAND_REGISTRATION_DONE
7140 };
7141 static int target_register_user_commands(struct command_context *cmd_ctx)
7142 {
7143 int retval = ERROR_OK;
7144 retval = target_request_register_commands(cmd_ctx);
7145 if (retval != ERROR_OK)
7146 return retval;
7147
7148 retval = trace_register_commands(cmd_ctx);
7149 if (retval != ERROR_OK)
7150 return retval;
7151
7152
7153 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
7154 }
7155
7156 const char *target_debug_reason_str(enum target_debug_reason reason)
7157 {
7158 switch (reason) {
7159 case DBG_REASON_DBGRQ:
7160 return "DBGRQ";
7161 case DBG_REASON_BREAKPOINT:
7162 return "BREAKPOINT";
7163 case DBG_REASON_WATCHPOINT:
7164 return "WATCHPOINT";
7165 case DBG_REASON_WPTANDBKPT:
7166 return "WPTANDBKPT";
7167 case DBG_REASON_SINGLESTEP:
7168 return "SINGLESTEP";
7169 case DBG_REASON_NOTHALTED:
7170 return "NOTHALTED";
7171 case DBG_REASON_EXIT:
7172 return "EXIT";
7173 case DBG_REASON_EXC_CATCH:
7174 return "EXC_CATCH";
7175 case DBG_REASON_UNDEFINED:
7176 return "UNDEFINED";
7177 default:
7178 return "UNKNOWN!";
7179 }
7180 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)