nds32: drop it, together with aice adapter driver
[openocd.git] / src / target / target.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2007-2010 Øyvind Harboe *
8 * oyvind.harboe@zylin.com *
9 * *
10 * Copyright (C) 2008, Duane Ellis *
11 * openocd@duaneeellis.com *
12 * *
13 * Copyright (C) 2008 by Spencer Oliver *
14 * spen@spen-soft.co.uk *
15 * *
16 * Copyright (C) 2008 by Rick Altherr *
17 * kc8apf@kc8apf.net> *
18 * *
19 * Copyright (C) 2011 by Broadcom Corporation *
20 * Evan Hunter - ehunter@broadcom.com *
21 * *
22 * Copyright (C) ST-Ericsson SA 2011 *
23 * michel.jaouen@stericsson.com : smp minimum support *
24 * *
25 * Copyright (C) 2011 Andreas Fritiofson *
26 * andreas.fritiofson@gmail.com *
27 ***************************************************************************/
28
29 #ifdef HAVE_CONFIG_H
30 #include "config.h"
31 #endif
32
33 #include <helper/align.h>
34 #include <helper/time_support.h>
35 #include <jtag/jtag.h>
36 #include <flash/nor/core.h>
37
38 #include "target.h"
39 #include "target_type.h"
40 #include "target_request.h"
41 #include "breakpoints.h"
42 #include "register.h"
43 #include "trace.h"
44 #include "image.h"
45 #include "rtos/rtos.h"
46 #include "transport/transport.h"
47 #include "arm_cti.h"
48 #include "smp.h"
49 #include "semihosting_common.h"
50
51 /* default halt wait timeout (ms) */
52 #define DEFAULT_HALT_TIMEOUT 5000
53
54 static int target_read_buffer_default(struct target *target, target_addr_t address,
55 uint32_t count, uint8_t *buffer);
56 static int target_write_buffer_default(struct target *target, target_addr_t address,
57 uint32_t count, const uint8_t *buffer);
58 static int target_array2mem(Jim_Interp *interp, struct target *target,
59 int argc, Jim_Obj * const *argv);
60 static int target_mem2array(Jim_Interp *interp, struct target *target,
61 int argc, Jim_Obj * const *argv);
62 static int target_register_user_commands(struct command_context *cmd_ctx);
63 static int target_get_gdb_fileio_info_default(struct target *target,
64 struct gdb_fileio_info *fileio_info);
65 static int target_gdb_fileio_end_default(struct target *target, int retcode,
66 int fileio_errno, bool ctrl_c);
67
68 /* targets */
69 extern struct target_type arm7tdmi_target;
70 extern struct target_type arm720t_target;
71 extern struct target_type arm9tdmi_target;
72 extern struct target_type arm920t_target;
73 extern struct target_type arm966e_target;
74 extern struct target_type arm946e_target;
75 extern struct target_type arm926ejs_target;
76 extern struct target_type fa526_target;
77 extern struct target_type feroceon_target;
78 extern struct target_type dragonite_target;
79 extern struct target_type xscale_target;
80 extern struct target_type xtensa_chip_target;
81 extern struct target_type cortexm_target;
82 extern struct target_type cortexa_target;
83 extern struct target_type aarch64_target;
84 extern struct target_type cortexr4_target;
85 extern struct target_type arm11_target;
86 extern struct target_type ls1_sap_target;
87 extern struct target_type mips_m4k_target;
88 extern struct target_type mips_mips64_target;
89 extern struct target_type avr_target;
90 extern struct target_type dsp563xx_target;
91 extern struct target_type dsp5680xx_target;
92 extern struct target_type testee_target;
93 extern struct target_type avr32_ap7k_target;
94 extern struct target_type hla_target;
95 extern struct target_type esp32_target;
96 extern struct target_type esp32s2_target;
97 extern struct target_type esp32s3_target;
98 extern struct target_type or1k_target;
99 extern struct target_type quark_x10xx_target;
100 extern struct target_type quark_d20xx_target;
101 extern struct target_type stm8_target;
102 extern struct target_type riscv_target;
103 extern struct target_type mem_ap_target;
104 extern struct target_type esirisc_target;
105 extern struct target_type arcv2_target;
106
107 static struct target_type *target_types[] = {
108 &arm7tdmi_target,
109 &arm9tdmi_target,
110 &arm920t_target,
111 &arm720t_target,
112 &arm966e_target,
113 &arm946e_target,
114 &arm926ejs_target,
115 &fa526_target,
116 &feroceon_target,
117 &dragonite_target,
118 &xscale_target,
119 &xtensa_chip_target,
120 &cortexm_target,
121 &cortexa_target,
122 &cortexr4_target,
123 &arm11_target,
124 &ls1_sap_target,
125 &mips_m4k_target,
126 &avr_target,
127 &dsp563xx_target,
128 &dsp5680xx_target,
129 &testee_target,
130 &avr32_ap7k_target,
131 &hla_target,
132 &esp32_target,
133 &esp32s2_target,
134 &esp32s3_target,
135 &or1k_target,
136 &quark_x10xx_target,
137 &quark_d20xx_target,
138 &stm8_target,
139 &riscv_target,
140 &mem_ap_target,
141 &esirisc_target,
142 &arcv2_target,
143 &aarch64_target,
144 &mips_mips64_target,
145 NULL,
146 };
147
148 struct target *all_targets;
149 static struct target_event_callback *target_event_callbacks;
150 static struct target_timer_callback *target_timer_callbacks;
151 static int64_t target_timer_next_event_value;
152 static LIST_HEAD(target_reset_callback_list);
153 static LIST_HEAD(target_trace_callback_list);
154 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
155 static LIST_HEAD(empty_smp_targets);
156
157 static const struct jim_nvp nvp_assert[] = {
158 { .name = "assert", NVP_ASSERT },
159 { .name = "deassert", NVP_DEASSERT },
160 { .name = "T", NVP_ASSERT },
161 { .name = "F", NVP_DEASSERT },
162 { .name = "t", NVP_ASSERT },
163 { .name = "f", NVP_DEASSERT },
164 { .name = NULL, .value = -1 }
165 };
166
167 static const struct jim_nvp nvp_error_target[] = {
168 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
169 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
170 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
171 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
172 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
173 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
174 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
175 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
176 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
177 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
178 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
179 { .value = -1, .name = NULL }
180 };
181
182 static const char *target_strerror_safe(int err)
183 {
184 const struct jim_nvp *n;
185
186 n = jim_nvp_value2name_simple(nvp_error_target, err);
187 if (!n->name)
188 return "unknown";
189 else
190 return n->name;
191 }
192
193 static const struct jim_nvp nvp_target_event[] = {
194
195 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
196 { .value = TARGET_EVENT_HALTED, .name = "halted" },
197 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
198 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
199 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
200 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
201 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
202
203 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
204 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
205
206 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
207 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
208 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
209 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
210 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
211 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
212 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
213 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
214
215 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
216 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
217 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
218
219 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
220 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
221
222 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
223 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
224
225 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
226 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
227
228 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
229 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
230
231 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
232
233 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X100, .name = "semihosting-user-cmd-0x100" },
234 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X101, .name = "semihosting-user-cmd-0x101" },
235 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X102, .name = "semihosting-user-cmd-0x102" },
236 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X103, .name = "semihosting-user-cmd-0x103" },
237 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X104, .name = "semihosting-user-cmd-0x104" },
238 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X105, .name = "semihosting-user-cmd-0x105" },
239 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X106, .name = "semihosting-user-cmd-0x106" },
240 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X107, .name = "semihosting-user-cmd-0x107" },
241
242 { .name = NULL, .value = -1 }
243 };
244
245 static const struct jim_nvp nvp_target_state[] = {
246 { .name = "unknown", .value = TARGET_UNKNOWN },
247 { .name = "running", .value = TARGET_RUNNING },
248 { .name = "halted", .value = TARGET_HALTED },
249 { .name = "reset", .value = TARGET_RESET },
250 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
251 { .name = NULL, .value = -1 },
252 };
253
254 static const struct jim_nvp nvp_target_debug_reason[] = {
255 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
256 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
257 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
258 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
259 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
260 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
261 { .name = "program-exit", .value = DBG_REASON_EXIT },
262 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
263 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
264 { .name = NULL, .value = -1 },
265 };
266
267 static const struct jim_nvp nvp_target_endian[] = {
268 { .name = "big", .value = TARGET_BIG_ENDIAN },
269 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
270 { .name = "be", .value = TARGET_BIG_ENDIAN },
271 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
272 { .name = NULL, .value = -1 },
273 };
274
275 static const struct jim_nvp nvp_reset_modes[] = {
276 { .name = "unknown", .value = RESET_UNKNOWN },
277 { .name = "run", .value = RESET_RUN },
278 { .name = "halt", .value = RESET_HALT },
279 { .name = "init", .value = RESET_INIT },
280 { .name = NULL, .value = -1 },
281 };
282
283 const char *debug_reason_name(struct target *t)
284 {
285 const char *cp;
286
287 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
288 t->debug_reason)->name;
289 if (!cp) {
290 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
291 cp = "(*BUG*unknown*BUG*)";
292 }
293 return cp;
294 }
295
296 const char *target_state_name(struct target *t)
297 {
298 const char *cp;
299 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
300 if (!cp) {
301 LOG_ERROR("Invalid target state: %d", (int)(t->state));
302 cp = "(*BUG*unknown*BUG*)";
303 }
304
305 if (!target_was_examined(t) && t->defer_examine)
306 cp = "examine deferred";
307
308 return cp;
309 }
310
311 const char *target_event_name(enum target_event event)
312 {
313 const char *cp;
314 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
315 if (!cp) {
316 LOG_ERROR("Invalid target event: %d", (int)(event));
317 cp = "(*BUG*unknown*BUG*)";
318 }
319 return cp;
320 }
321
322 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
323 {
324 const char *cp;
325 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
326 if (!cp) {
327 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
328 cp = "(*BUG*unknown*BUG*)";
329 }
330 return cp;
331 }
332
333 /* determine the number of the new target */
334 static int new_target_number(void)
335 {
336 struct target *t;
337 int x;
338
339 /* number is 0 based */
340 x = -1;
341 t = all_targets;
342 while (t) {
343 if (x < t->target_number)
344 x = t->target_number;
345 t = t->next;
346 }
347 return x + 1;
348 }
349
350 static void append_to_list_all_targets(struct target *target)
351 {
352 struct target **t = &all_targets;
353
354 while (*t)
355 t = &((*t)->next);
356 *t = target;
357 }
358
359 /* read a uint64_t from a buffer in target memory endianness */
360 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
361 {
362 if (target->endianness == TARGET_LITTLE_ENDIAN)
363 return le_to_h_u64(buffer);
364 else
365 return be_to_h_u64(buffer);
366 }
367
368 /* read a uint32_t from a buffer in target memory endianness */
369 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
370 {
371 if (target->endianness == TARGET_LITTLE_ENDIAN)
372 return le_to_h_u32(buffer);
373 else
374 return be_to_h_u32(buffer);
375 }
376
377 /* read a uint24_t from a buffer in target memory endianness */
378 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
379 {
380 if (target->endianness == TARGET_LITTLE_ENDIAN)
381 return le_to_h_u24(buffer);
382 else
383 return be_to_h_u24(buffer);
384 }
385
386 /* read a uint16_t from a buffer in target memory endianness */
387 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
388 {
389 if (target->endianness == TARGET_LITTLE_ENDIAN)
390 return le_to_h_u16(buffer);
391 else
392 return be_to_h_u16(buffer);
393 }
394
395 /* write a uint64_t to a buffer in target memory endianness */
396 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
397 {
398 if (target->endianness == TARGET_LITTLE_ENDIAN)
399 h_u64_to_le(buffer, value);
400 else
401 h_u64_to_be(buffer, value);
402 }
403
404 /* write a uint32_t to a buffer in target memory endianness */
405 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
406 {
407 if (target->endianness == TARGET_LITTLE_ENDIAN)
408 h_u32_to_le(buffer, value);
409 else
410 h_u32_to_be(buffer, value);
411 }
412
413 /* write a uint24_t to a buffer in target memory endianness */
414 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
415 {
416 if (target->endianness == TARGET_LITTLE_ENDIAN)
417 h_u24_to_le(buffer, value);
418 else
419 h_u24_to_be(buffer, value);
420 }
421
422 /* write a uint16_t to a buffer in target memory endianness */
423 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
424 {
425 if (target->endianness == TARGET_LITTLE_ENDIAN)
426 h_u16_to_le(buffer, value);
427 else
428 h_u16_to_be(buffer, value);
429 }
430
431 /* write a uint8_t to a buffer in target memory endianness */
432 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
433 {
434 *buffer = value;
435 }
436
437 /* write a uint64_t array to a buffer in target memory endianness */
438 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
439 {
440 uint32_t i;
441 for (i = 0; i < count; i++)
442 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
443 }
444
445 /* write a uint32_t array to a buffer in target memory endianness */
446 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
447 {
448 uint32_t i;
449 for (i = 0; i < count; i++)
450 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
451 }
452
453 /* write a uint16_t array to a buffer in target memory endianness */
454 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
455 {
456 uint32_t i;
457 for (i = 0; i < count; i++)
458 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
459 }
460
461 /* write a uint64_t array to a buffer in target memory endianness */
462 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
463 {
464 uint32_t i;
465 for (i = 0; i < count; i++)
466 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
467 }
468
469 /* write a uint32_t array to a buffer in target memory endianness */
470 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
471 {
472 uint32_t i;
473 for (i = 0; i < count; i++)
474 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
475 }
476
477 /* write a uint16_t array to a buffer in target memory endianness */
478 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
479 {
480 uint32_t i;
481 for (i = 0; i < count; i++)
482 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
483 }
484
485 /* return a pointer to a configured target; id is name or number */
486 struct target *get_target(const char *id)
487 {
488 struct target *target;
489
490 /* try as tcltarget name */
491 for (target = all_targets; target; target = target->next) {
492 if (!target_name(target))
493 continue;
494 if (strcmp(id, target_name(target)) == 0)
495 return target;
496 }
497
498 /* It's OK to remove this fallback sometime after August 2010 or so */
499
500 /* no match, try as number */
501 unsigned num;
502 if (parse_uint(id, &num) != ERROR_OK)
503 return NULL;
504
505 for (target = all_targets; target; target = target->next) {
506 if (target->target_number == (int)num) {
507 LOG_WARNING("use '%s' as target identifier, not '%u'",
508 target_name(target), num);
509 return target;
510 }
511 }
512
513 return NULL;
514 }
515
516 /* returns a pointer to the n-th configured target */
517 struct target *get_target_by_num(int num)
518 {
519 struct target *target = all_targets;
520
521 while (target) {
522 if (target->target_number == num)
523 return target;
524 target = target->next;
525 }
526
527 return NULL;
528 }
529
530 struct target *get_current_target(struct command_context *cmd_ctx)
531 {
532 struct target *target = get_current_target_or_null(cmd_ctx);
533
534 if (!target) {
535 LOG_ERROR("BUG: current_target out of bounds");
536 exit(-1);
537 }
538
539 return target;
540 }
541
542 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
543 {
544 return cmd_ctx->current_target_override
545 ? cmd_ctx->current_target_override
546 : cmd_ctx->current_target;
547 }
548
549 int target_poll(struct target *target)
550 {
551 int retval;
552
553 /* We can't poll until after examine */
554 if (!target_was_examined(target)) {
555 /* Fail silently lest we pollute the log */
556 return ERROR_FAIL;
557 }
558
559 retval = target->type->poll(target);
560 if (retval != ERROR_OK)
561 return retval;
562
563 if (target->halt_issued) {
564 if (target->state == TARGET_HALTED)
565 target->halt_issued = false;
566 else {
567 int64_t t = timeval_ms() - target->halt_issued_time;
568 if (t > DEFAULT_HALT_TIMEOUT) {
569 target->halt_issued = false;
570 LOG_INFO("Halt timed out, wake up GDB.");
571 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
572 }
573 }
574 }
575
576 return ERROR_OK;
577 }
578
579 int target_halt(struct target *target)
580 {
581 int retval;
582 /* We can't poll until after examine */
583 if (!target_was_examined(target)) {
584 LOG_ERROR("Target not examined yet");
585 return ERROR_FAIL;
586 }
587
588 retval = target->type->halt(target);
589 if (retval != ERROR_OK)
590 return retval;
591
592 target->halt_issued = true;
593 target->halt_issued_time = timeval_ms();
594
595 return ERROR_OK;
596 }
597
598 /**
599 * Make the target (re)start executing using its saved execution
600 * context (possibly with some modifications).
601 *
602 * @param target Which target should start executing.
603 * @param current True to use the target's saved program counter instead
604 * of the address parameter
605 * @param address Optionally used as the program counter.
606 * @param handle_breakpoints True iff breakpoints at the resumption PC
607 * should be skipped. (For example, maybe execution was stopped by
608 * such a breakpoint, in which case it would be counterproductive to
609 * let it re-trigger.
610 * @param debug_execution False if all working areas allocated by OpenOCD
611 * should be released and/or restored to their original contents.
612 * (This would for example be true to run some downloaded "helper"
613 * algorithm code, which resides in one such working buffer and uses
614 * another for data storage.)
615 *
616 * @todo Resolve the ambiguity about what the "debug_execution" flag
617 * signifies. For example, Target implementations don't agree on how
618 * it relates to invalidation of the register cache, or to whether
619 * breakpoints and watchpoints should be enabled. (It would seem wrong
620 * to enable breakpoints when running downloaded "helper" algorithms
621 * (debug_execution true), since the breakpoints would be set to match
622 * target firmware being debugged, not the helper algorithm.... and
623 * enabling them could cause such helpers to malfunction (for example,
624 * by overwriting data with a breakpoint instruction. On the other
625 * hand the infrastructure for running such helpers might use this
626 * procedure but rely on hardware breakpoint to detect termination.)
627 */
628 int target_resume(struct target *target, int current, target_addr_t address,
629 int handle_breakpoints, int debug_execution)
630 {
631 int retval;
632
633 /* We can't poll until after examine */
634 if (!target_was_examined(target)) {
635 LOG_ERROR("Target not examined yet");
636 return ERROR_FAIL;
637 }
638
639 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
640
641 /* note that resume *must* be asynchronous. The CPU can halt before
642 * we poll. The CPU can even halt at the current PC as a result of
643 * a software breakpoint being inserted by (a bug?) the application.
644 */
645 /*
646 * resume() triggers the event 'resumed'. The execution of TCL commands
647 * in the event handler causes the polling of targets. If the target has
648 * already halted for a breakpoint, polling will run the 'halted' event
649 * handler before the pending 'resumed' handler.
650 * Disable polling during resume() to guarantee the execution of handlers
651 * in the correct order.
652 */
653 bool save_poll_mask = jtag_poll_mask();
654 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
655 jtag_poll_unmask(save_poll_mask);
656
657 if (retval != ERROR_OK)
658 return retval;
659
660 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
661
662 return retval;
663 }
664
665 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
666 {
667 char buf[100];
668 int retval;
669 struct jim_nvp *n;
670 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
671 if (!n->name) {
672 LOG_ERROR("invalid reset mode");
673 return ERROR_FAIL;
674 }
675
676 struct target *target;
677 for (target = all_targets; target; target = target->next)
678 target_call_reset_callbacks(target, reset_mode);
679
680 /* disable polling during reset to make reset event scripts
681 * more predictable, i.e. dr/irscan & pathmove in events will
682 * not have JTAG operations injected into the middle of a sequence.
683 */
684 bool save_poll_mask = jtag_poll_mask();
685
686 sprintf(buf, "ocd_process_reset %s", n->name);
687 retval = Jim_Eval(cmd->ctx->interp, buf);
688
689 jtag_poll_unmask(save_poll_mask);
690
691 if (retval != JIM_OK) {
692 Jim_MakeErrorMessage(cmd->ctx->interp);
693 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
694 return ERROR_FAIL;
695 }
696
697 /* We want any events to be processed before the prompt */
698 retval = target_call_timer_callbacks_now();
699
700 for (target = all_targets; target; target = target->next) {
701 target->type->check_reset(target);
702 target->running_alg = false;
703 }
704
705 return retval;
706 }
707
708 static int identity_virt2phys(struct target *target,
709 target_addr_t virtual, target_addr_t *physical)
710 {
711 *physical = virtual;
712 return ERROR_OK;
713 }
714
715 static int no_mmu(struct target *target, int *enabled)
716 {
717 *enabled = 0;
718 return ERROR_OK;
719 }
720
721 /**
722 * Reset the @c examined flag for the given target.
723 * Pure paranoia -- targets are zeroed on allocation.
724 */
725 static inline void target_reset_examined(struct target *target)
726 {
727 target->examined = false;
728 }
729
730 static int default_examine(struct target *target)
731 {
732 target_set_examined(target);
733 return ERROR_OK;
734 }
735
736 /* no check by default */
737 static int default_check_reset(struct target *target)
738 {
739 return ERROR_OK;
740 }
741
742 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
743 * Keep in sync */
744 int target_examine_one(struct target *target)
745 {
746 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
747
748 int retval = target->type->examine(target);
749 if (retval != ERROR_OK) {
750 target_reset_examined(target);
751 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
752 return retval;
753 }
754
755 target_set_examined(target);
756 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
757
758 return ERROR_OK;
759 }
760
761 static int jtag_enable_callback(enum jtag_event event, void *priv)
762 {
763 struct target *target = priv;
764
765 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
766 return ERROR_OK;
767
768 jtag_unregister_event_callback(jtag_enable_callback, target);
769
770 return target_examine_one(target);
771 }
772
773 /* Targets that correctly implement init + examine, i.e.
774 * no communication with target during init:
775 *
776 * XScale
777 */
778 int target_examine(void)
779 {
780 int retval = ERROR_OK;
781 struct target *target;
782
783 for (target = all_targets; target; target = target->next) {
784 /* defer examination, but don't skip it */
785 if (!target->tap->enabled) {
786 jtag_register_event_callback(jtag_enable_callback,
787 target);
788 continue;
789 }
790
791 if (target->defer_examine)
792 continue;
793
794 int retval2 = target_examine_one(target);
795 if (retval2 != ERROR_OK) {
796 LOG_WARNING("target %s examination failed", target_name(target));
797 retval = retval2;
798 }
799 }
800 return retval;
801 }
802
803 const char *target_type_name(struct target *target)
804 {
805 return target->type->name;
806 }
807
808 static int target_soft_reset_halt(struct target *target)
809 {
810 if (!target_was_examined(target)) {
811 LOG_ERROR("Target not examined yet");
812 return ERROR_FAIL;
813 }
814 if (!target->type->soft_reset_halt) {
815 LOG_ERROR("Target %s does not support soft_reset_halt",
816 target_name(target));
817 return ERROR_FAIL;
818 }
819 return target->type->soft_reset_halt(target);
820 }
821
822 /**
823 * Downloads a target-specific native code algorithm to the target,
824 * and executes it. * Note that some targets may need to set up, enable,
825 * and tear down a breakpoint (hard or * soft) to detect algorithm
826 * termination, while others may support lower overhead schemes where
827 * soft breakpoints embedded in the algorithm automatically terminate the
828 * algorithm.
829 *
830 * @param target used to run the algorithm
831 * @param num_mem_params
832 * @param mem_params
833 * @param num_reg_params
834 * @param reg_param
835 * @param entry_point
836 * @param exit_point
837 * @param timeout_ms
838 * @param arch_info target-specific description of the algorithm.
839 */
840 int target_run_algorithm(struct target *target,
841 int num_mem_params, struct mem_param *mem_params,
842 int num_reg_params, struct reg_param *reg_param,
843 target_addr_t entry_point, target_addr_t exit_point,
844 int timeout_ms, void *arch_info)
845 {
846 int retval = ERROR_FAIL;
847
848 if (!target_was_examined(target)) {
849 LOG_ERROR("Target not examined yet");
850 goto done;
851 }
852 if (!target->type->run_algorithm) {
853 LOG_ERROR("Target type '%s' does not support %s",
854 target_type_name(target), __func__);
855 goto done;
856 }
857
858 target->running_alg = true;
859 retval = target->type->run_algorithm(target,
860 num_mem_params, mem_params,
861 num_reg_params, reg_param,
862 entry_point, exit_point, timeout_ms, arch_info);
863 target->running_alg = false;
864
865 done:
866 return retval;
867 }
868
869 /**
870 * Executes a target-specific native code algorithm and leaves it running.
871 *
872 * @param target used to run the algorithm
873 * @param num_mem_params
874 * @param mem_params
875 * @param num_reg_params
876 * @param reg_params
877 * @param entry_point
878 * @param exit_point
879 * @param arch_info target-specific description of the algorithm.
880 */
881 int target_start_algorithm(struct target *target,
882 int num_mem_params, struct mem_param *mem_params,
883 int num_reg_params, struct reg_param *reg_params,
884 target_addr_t entry_point, target_addr_t exit_point,
885 void *arch_info)
886 {
887 int retval = ERROR_FAIL;
888
889 if (!target_was_examined(target)) {
890 LOG_ERROR("Target not examined yet");
891 goto done;
892 }
893 if (!target->type->start_algorithm) {
894 LOG_ERROR("Target type '%s' does not support %s",
895 target_type_name(target), __func__);
896 goto done;
897 }
898 if (target->running_alg) {
899 LOG_ERROR("Target is already running an algorithm");
900 goto done;
901 }
902
903 target->running_alg = true;
904 retval = target->type->start_algorithm(target,
905 num_mem_params, mem_params,
906 num_reg_params, reg_params,
907 entry_point, exit_point, arch_info);
908
909 done:
910 return retval;
911 }
912
913 /**
914 * Waits for an algorithm started with target_start_algorithm() to complete.
915 *
916 * @param target used to run the algorithm
917 * @param num_mem_params
918 * @param mem_params
919 * @param num_reg_params
920 * @param reg_params
921 * @param exit_point
922 * @param timeout_ms
923 * @param arch_info target-specific description of the algorithm.
924 */
925 int target_wait_algorithm(struct target *target,
926 int num_mem_params, struct mem_param *mem_params,
927 int num_reg_params, struct reg_param *reg_params,
928 target_addr_t exit_point, int timeout_ms,
929 void *arch_info)
930 {
931 int retval = ERROR_FAIL;
932
933 if (!target->type->wait_algorithm) {
934 LOG_ERROR("Target type '%s' does not support %s",
935 target_type_name(target), __func__);
936 goto done;
937 }
938 if (!target->running_alg) {
939 LOG_ERROR("Target is not running an algorithm");
940 goto done;
941 }
942
943 retval = target->type->wait_algorithm(target,
944 num_mem_params, mem_params,
945 num_reg_params, reg_params,
946 exit_point, timeout_ms, arch_info);
947 if (retval != ERROR_TARGET_TIMEOUT)
948 target->running_alg = false;
949
950 done:
951 return retval;
952 }
953
954 /**
955 * Streams data to a circular buffer on target intended for consumption by code
956 * running asynchronously on target.
957 *
958 * This is intended for applications where target-specific native code runs
959 * on the target, receives data from the circular buffer, does something with
960 * it (most likely writing it to a flash memory), and advances the circular
961 * buffer pointer.
962 *
963 * This assumes that the helper algorithm has already been loaded to the target,
964 * but has not been started yet. Given memory and register parameters are passed
965 * to the algorithm.
966 *
967 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
968 * following format:
969 *
970 * [buffer_start + 0, buffer_start + 4):
971 * Write Pointer address (aka head). Written and updated by this
972 * routine when new data is written to the circular buffer.
973 * [buffer_start + 4, buffer_start + 8):
974 * Read Pointer address (aka tail). Updated by code running on the
975 * target after it consumes data.
976 * [buffer_start + 8, buffer_start + buffer_size):
977 * Circular buffer contents.
978 *
979 * See contrib/loaders/flash/stm32f1x.S for an example.
980 *
981 * @param target used to run the algorithm
982 * @param buffer address on the host where data to be sent is located
983 * @param count number of blocks to send
984 * @param block_size size in bytes of each block
985 * @param num_mem_params count of memory-based params to pass to algorithm
986 * @param mem_params memory-based params to pass to algorithm
987 * @param num_reg_params count of register-based params to pass to algorithm
988 * @param reg_params memory-based params to pass to algorithm
989 * @param buffer_start address on the target of the circular buffer structure
990 * @param buffer_size size of the circular buffer structure
991 * @param entry_point address on the target to execute to start the algorithm
992 * @param exit_point address at which to set a breakpoint to catch the
993 * end of the algorithm; can be 0 if target triggers a breakpoint itself
994 * @param arch_info
995 */
996
997 int target_run_flash_async_algorithm(struct target *target,
998 const uint8_t *buffer, uint32_t count, int block_size,
999 int num_mem_params, struct mem_param *mem_params,
1000 int num_reg_params, struct reg_param *reg_params,
1001 uint32_t buffer_start, uint32_t buffer_size,
1002 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1003 {
1004 int retval;
1005 int timeout = 0;
1006
1007 const uint8_t *buffer_orig = buffer;
1008
1009 /* Set up working area. First word is write pointer, second word is read pointer,
1010 * rest is fifo data area. */
1011 uint32_t wp_addr = buffer_start;
1012 uint32_t rp_addr = buffer_start + 4;
1013 uint32_t fifo_start_addr = buffer_start + 8;
1014 uint32_t fifo_end_addr = buffer_start + buffer_size;
1015
1016 uint32_t wp = fifo_start_addr;
1017 uint32_t rp = fifo_start_addr;
1018
1019 /* validate block_size is 2^n */
1020 assert(IS_PWR_OF_2(block_size));
1021
1022 retval = target_write_u32(target, wp_addr, wp);
1023 if (retval != ERROR_OK)
1024 return retval;
1025 retval = target_write_u32(target, rp_addr, rp);
1026 if (retval != ERROR_OK)
1027 return retval;
1028
1029 /* Start up algorithm on target and let it idle while writing the first chunk */
1030 retval = target_start_algorithm(target, num_mem_params, mem_params,
1031 num_reg_params, reg_params,
1032 entry_point,
1033 exit_point,
1034 arch_info);
1035
1036 if (retval != ERROR_OK) {
1037 LOG_ERROR("error starting target flash write algorithm");
1038 return retval;
1039 }
1040
1041 while (count > 0) {
1042
1043 retval = target_read_u32(target, rp_addr, &rp);
1044 if (retval != ERROR_OK) {
1045 LOG_ERROR("failed to get read pointer");
1046 break;
1047 }
1048
1049 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1050 (size_t) (buffer - buffer_orig), count, wp, rp);
1051
1052 if (rp == 0) {
1053 LOG_ERROR("flash write algorithm aborted by target");
1054 retval = ERROR_FLASH_OPERATION_FAILED;
1055 break;
1056 }
1057
1058 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1059 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1060 break;
1061 }
1062
1063 /* Count the number of bytes available in the fifo without
1064 * crossing the wrap around. Make sure to not fill it completely,
1065 * because that would make wp == rp and that's the empty condition. */
1066 uint32_t thisrun_bytes;
1067 if (rp > wp)
1068 thisrun_bytes = rp - wp - block_size;
1069 else if (rp > fifo_start_addr)
1070 thisrun_bytes = fifo_end_addr - wp;
1071 else
1072 thisrun_bytes = fifo_end_addr - wp - block_size;
1073
1074 if (thisrun_bytes == 0) {
1075 /* Throttle polling a bit if transfer is (much) faster than flash
1076 * programming. The exact delay shouldn't matter as long as it's
1077 * less than buffer size / flash speed. This is very unlikely to
1078 * run when using high latency connections such as USB. */
1079 alive_sleep(2);
1080
1081 /* to stop an infinite loop on some targets check and increment a timeout
1082 * this issue was observed on a stellaris using the new ICDI interface */
1083 if (timeout++ >= 2500) {
1084 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1085 return ERROR_FLASH_OPERATION_FAILED;
1086 }
1087 continue;
1088 }
1089
1090 /* reset our timeout */
1091 timeout = 0;
1092
1093 /* Limit to the amount of data we actually want to write */
1094 if (thisrun_bytes > count * block_size)
1095 thisrun_bytes = count * block_size;
1096
1097 /* Force end of large blocks to be word aligned */
1098 if (thisrun_bytes >= 16)
1099 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1100
1101 /* Write data to fifo */
1102 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1103 if (retval != ERROR_OK)
1104 break;
1105
1106 /* Update counters and wrap write pointer */
1107 buffer += thisrun_bytes;
1108 count -= thisrun_bytes / block_size;
1109 wp += thisrun_bytes;
1110 if (wp >= fifo_end_addr)
1111 wp = fifo_start_addr;
1112
1113 /* Store updated write pointer to target */
1114 retval = target_write_u32(target, wp_addr, wp);
1115 if (retval != ERROR_OK)
1116 break;
1117
1118 /* Avoid GDB timeouts */
1119 keep_alive();
1120 }
1121
1122 if (retval != ERROR_OK) {
1123 /* abort flash write algorithm on target */
1124 target_write_u32(target, wp_addr, 0);
1125 }
1126
1127 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1128 num_reg_params, reg_params,
1129 exit_point,
1130 10000,
1131 arch_info);
1132
1133 if (retval2 != ERROR_OK) {
1134 LOG_ERROR("error waiting for target flash write algorithm");
1135 retval = retval2;
1136 }
1137
1138 if (retval == ERROR_OK) {
1139 /* check if algorithm set rp = 0 after fifo writer loop finished */
1140 retval = target_read_u32(target, rp_addr, &rp);
1141 if (retval == ERROR_OK && rp == 0) {
1142 LOG_ERROR("flash write algorithm aborted by target");
1143 retval = ERROR_FLASH_OPERATION_FAILED;
1144 }
1145 }
1146
1147 return retval;
1148 }
1149
1150 int target_run_read_async_algorithm(struct target *target,
1151 uint8_t *buffer, uint32_t count, int block_size,
1152 int num_mem_params, struct mem_param *mem_params,
1153 int num_reg_params, struct reg_param *reg_params,
1154 uint32_t buffer_start, uint32_t buffer_size,
1155 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1156 {
1157 int retval;
1158 int timeout = 0;
1159
1160 const uint8_t *buffer_orig = buffer;
1161
1162 /* Set up working area. First word is write pointer, second word is read pointer,
1163 * rest is fifo data area. */
1164 uint32_t wp_addr = buffer_start;
1165 uint32_t rp_addr = buffer_start + 4;
1166 uint32_t fifo_start_addr = buffer_start + 8;
1167 uint32_t fifo_end_addr = buffer_start + buffer_size;
1168
1169 uint32_t wp = fifo_start_addr;
1170 uint32_t rp = fifo_start_addr;
1171
1172 /* validate block_size is 2^n */
1173 assert(IS_PWR_OF_2(block_size));
1174
1175 retval = target_write_u32(target, wp_addr, wp);
1176 if (retval != ERROR_OK)
1177 return retval;
1178 retval = target_write_u32(target, rp_addr, rp);
1179 if (retval != ERROR_OK)
1180 return retval;
1181
1182 /* Start up algorithm on target */
1183 retval = target_start_algorithm(target, num_mem_params, mem_params,
1184 num_reg_params, reg_params,
1185 entry_point,
1186 exit_point,
1187 arch_info);
1188
1189 if (retval != ERROR_OK) {
1190 LOG_ERROR("error starting target flash read algorithm");
1191 return retval;
1192 }
1193
1194 while (count > 0) {
1195 retval = target_read_u32(target, wp_addr, &wp);
1196 if (retval != ERROR_OK) {
1197 LOG_ERROR("failed to get write pointer");
1198 break;
1199 }
1200
1201 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1202 (size_t)(buffer - buffer_orig), count, wp, rp);
1203
1204 if (wp == 0) {
1205 LOG_ERROR("flash read algorithm aborted by target");
1206 retval = ERROR_FLASH_OPERATION_FAILED;
1207 break;
1208 }
1209
1210 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1211 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1212 break;
1213 }
1214
1215 /* Count the number of bytes available in the fifo without
1216 * crossing the wrap around. */
1217 uint32_t thisrun_bytes;
1218 if (wp >= rp)
1219 thisrun_bytes = wp - rp;
1220 else
1221 thisrun_bytes = fifo_end_addr - rp;
1222
1223 if (thisrun_bytes == 0) {
1224 /* Throttle polling a bit if transfer is (much) faster than flash
1225 * reading. The exact delay shouldn't matter as long as it's
1226 * less than buffer size / flash speed. This is very unlikely to
1227 * run when using high latency connections such as USB. */
1228 alive_sleep(2);
1229
1230 /* to stop an infinite loop on some targets check and increment a timeout
1231 * this issue was observed on a stellaris using the new ICDI interface */
1232 if (timeout++ >= 2500) {
1233 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1234 return ERROR_FLASH_OPERATION_FAILED;
1235 }
1236 continue;
1237 }
1238
1239 /* Reset our timeout */
1240 timeout = 0;
1241
1242 /* Limit to the amount of data we actually want to read */
1243 if (thisrun_bytes > count * block_size)
1244 thisrun_bytes = count * block_size;
1245
1246 /* Force end of large blocks to be word aligned */
1247 if (thisrun_bytes >= 16)
1248 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1249
1250 /* Read data from fifo */
1251 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1252 if (retval != ERROR_OK)
1253 break;
1254
1255 /* Update counters and wrap write pointer */
1256 buffer += thisrun_bytes;
1257 count -= thisrun_bytes / block_size;
1258 rp += thisrun_bytes;
1259 if (rp >= fifo_end_addr)
1260 rp = fifo_start_addr;
1261
1262 /* Store updated write pointer to target */
1263 retval = target_write_u32(target, rp_addr, rp);
1264 if (retval != ERROR_OK)
1265 break;
1266
1267 /* Avoid GDB timeouts */
1268 keep_alive();
1269
1270 }
1271
1272 if (retval != ERROR_OK) {
1273 /* abort flash write algorithm on target */
1274 target_write_u32(target, rp_addr, 0);
1275 }
1276
1277 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1278 num_reg_params, reg_params,
1279 exit_point,
1280 10000,
1281 arch_info);
1282
1283 if (retval2 != ERROR_OK) {
1284 LOG_ERROR("error waiting for target flash write algorithm");
1285 retval = retval2;
1286 }
1287
1288 if (retval == ERROR_OK) {
1289 /* check if algorithm set wp = 0 after fifo writer loop finished */
1290 retval = target_read_u32(target, wp_addr, &wp);
1291 if (retval == ERROR_OK && wp == 0) {
1292 LOG_ERROR("flash read algorithm aborted by target");
1293 retval = ERROR_FLASH_OPERATION_FAILED;
1294 }
1295 }
1296
1297 return retval;
1298 }
1299
1300 int target_read_memory(struct target *target,
1301 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1302 {
1303 if (!target_was_examined(target)) {
1304 LOG_ERROR("Target not examined yet");
1305 return ERROR_FAIL;
1306 }
1307 if (!target->type->read_memory) {
1308 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1309 return ERROR_FAIL;
1310 }
1311 return target->type->read_memory(target, address, size, count, buffer);
1312 }
1313
1314 int target_read_phys_memory(struct target *target,
1315 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1316 {
1317 if (!target_was_examined(target)) {
1318 LOG_ERROR("Target not examined yet");
1319 return ERROR_FAIL;
1320 }
1321 if (!target->type->read_phys_memory) {
1322 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1323 return ERROR_FAIL;
1324 }
1325 return target->type->read_phys_memory(target, address, size, count, buffer);
1326 }
1327
1328 int target_write_memory(struct target *target,
1329 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1330 {
1331 if (!target_was_examined(target)) {
1332 LOG_ERROR("Target not examined yet");
1333 return ERROR_FAIL;
1334 }
1335 if (!target->type->write_memory) {
1336 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1337 return ERROR_FAIL;
1338 }
1339 return target->type->write_memory(target, address, size, count, buffer);
1340 }
1341
1342 int target_write_phys_memory(struct target *target,
1343 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1344 {
1345 if (!target_was_examined(target)) {
1346 LOG_ERROR("Target not examined yet");
1347 return ERROR_FAIL;
1348 }
1349 if (!target->type->write_phys_memory) {
1350 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1351 return ERROR_FAIL;
1352 }
1353 return target->type->write_phys_memory(target, address, size, count, buffer);
1354 }
1355
1356 int target_add_breakpoint(struct target *target,
1357 struct breakpoint *breakpoint)
1358 {
1359 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1360 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1361 return ERROR_TARGET_NOT_HALTED;
1362 }
1363 return target->type->add_breakpoint(target, breakpoint);
1364 }
1365
1366 int target_add_context_breakpoint(struct target *target,
1367 struct breakpoint *breakpoint)
1368 {
1369 if (target->state != TARGET_HALTED) {
1370 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1371 return ERROR_TARGET_NOT_HALTED;
1372 }
1373 return target->type->add_context_breakpoint(target, breakpoint);
1374 }
1375
1376 int target_add_hybrid_breakpoint(struct target *target,
1377 struct breakpoint *breakpoint)
1378 {
1379 if (target->state != TARGET_HALTED) {
1380 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1381 return ERROR_TARGET_NOT_HALTED;
1382 }
1383 return target->type->add_hybrid_breakpoint(target, breakpoint);
1384 }
1385
1386 int target_remove_breakpoint(struct target *target,
1387 struct breakpoint *breakpoint)
1388 {
1389 return target->type->remove_breakpoint(target, breakpoint);
1390 }
1391
1392 int target_add_watchpoint(struct target *target,
1393 struct watchpoint *watchpoint)
1394 {
1395 if (target->state != TARGET_HALTED) {
1396 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1397 return ERROR_TARGET_NOT_HALTED;
1398 }
1399 return target->type->add_watchpoint(target, watchpoint);
1400 }
1401 int target_remove_watchpoint(struct target *target,
1402 struct watchpoint *watchpoint)
1403 {
1404 return target->type->remove_watchpoint(target, watchpoint);
1405 }
1406 int target_hit_watchpoint(struct target *target,
1407 struct watchpoint **hit_watchpoint)
1408 {
1409 if (target->state != TARGET_HALTED) {
1410 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1411 return ERROR_TARGET_NOT_HALTED;
1412 }
1413
1414 if (!target->type->hit_watchpoint) {
1415 /* For backward compatible, if hit_watchpoint is not implemented,
1416 * return ERROR_FAIL such that gdb_server will not take the nonsense
1417 * information. */
1418 return ERROR_FAIL;
1419 }
1420
1421 return target->type->hit_watchpoint(target, hit_watchpoint);
1422 }
1423
1424 const char *target_get_gdb_arch(struct target *target)
1425 {
1426 if (!target->type->get_gdb_arch)
1427 return NULL;
1428 return target->type->get_gdb_arch(target);
1429 }
1430
1431 int target_get_gdb_reg_list(struct target *target,
1432 struct reg **reg_list[], int *reg_list_size,
1433 enum target_register_class reg_class)
1434 {
1435 int result = ERROR_FAIL;
1436
1437 if (!target_was_examined(target)) {
1438 LOG_ERROR("Target not examined yet");
1439 goto done;
1440 }
1441
1442 result = target->type->get_gdb_reg_list(target, reg_list,
1443 reg_list_size, reg_class);
1444
1445 done:
1446 if (result != ERROR_OK) {
1447 *reg_list = NULL;
1448 *reg_list_size = 0;
1449 }
1450 return result;
1451 }
1452
1453 int target_get_gdb_reg_list_noread(struct target *target,
1454 struct reg **reg_list[], int *reg_list_size,
1455 enum target_register_class reg_class)
1456 {
1457 if (target->type->get_gdb_reg_list_noread &&
1458 target->type->get_gdb_reg_list_noread(target, reg_list,
1459 reg_list_size, reg_class) == ERROR_OK)
1460 return ERROR_OK;
1461 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1462 }
1463
1464 bool target_supports_gdb_connection(struct target *target)
1465 {
1466 /*
1467 * exclude all the targets that don't provide get_gdb_reg_list
1468 * or that have explicit gdb_max_connection == 0
1469 */
1470 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1471 }
1472
1473 int target_step(struct target *target,
1474 int current, target_addr_t address, int handle_breakpoints)
1475 {
1476 int retval;
1477
1478 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1479
1480 retval = target->type->step(target, current, address, handle_breakpoints);
1481 if (retval != ERROR_OK)
1482 return retval;
1483
1484 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1485
1486 return retval;
1487 }
1488
1489 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1490 {
1491 if (target->state != TARGET_HALTED) {
1492 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1493 return ERROR_TARGET_NOT_HALTED;
1494 }
1495 return target->type->get_gdb_fileio_info(target, fileio_info);
1496 }
1497
1498 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1499 {
1500 if (target->state != TARGET_HALTED) {
1501 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1502 return ERROR_TARGET_NOT_HALTED;
1503 }
1504 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1505 }
1506
1507 target_addr_t target_address_max(struct target *target)
1508 {
1509 unsigned bits = target_address_bits(target);
1510 if (sizeof(target_addr_t) * 8 == bits)
1511 return (target_addr_t) -1;
1512 else
1513 return (((target_addr_t) 1) << bits) - 1;
1514 }
1515
1516 unsigned target_address_bits(struct target *target)
1517 {
1518 if (target->type->address_bits)
1519 return target->type->address_bits(target);
1520 return 32;
1521 }
1522
1523 unsigned int target_data_bits(struct target *target)
1524 {
1525 if (target->type->data_bits)
1526 return target->type->data_bits(target);
1527 return 32;
1528 }
1529
1530 static int target_profiling(struct target *target, uint32_t *samples,
1531 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1532 {
1533 return target->type->profiling(target, samples, max_num_samples,
1534 num_samples, seconds);
1535 }
1536
1537 static int handle_target(void *priv);
1538
1539 static int target_init_one(struct command_context *cmd_ctx,
1540 struct target *target)
1541 {
1542 target_reset_examined(target);
1543
1544 struct target_type *type = target->type;
1545 if (!type->examine)
1546 type->examine = default_examine;
1547
1548 if (!type->check_reset)
1549 type->check_reset = default_check_reset;
1550
1551 assert(type->init_target);
1552
1553 int retval = type->init_target(cmd_ctx, target);
1554 if (retval != ERROR_OK) {
1555 LOG_ERROR("target '%s' init failed", target_name(target));
1556 return retval;
1557 }
1558
1559 /* Sanity-check MMU support ... stub in what we must, to help
1560 * implement it in stages, but warn if we need to do so.
1561 */
1562 if (type->mmu) {
1563 if (!type->virt2phys) {
1564 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1565 type->virt2phys = identity_virt2phys;
1566 }
1567 } else {
1568 /* Make sure no-MMU targets all behave the same: make no
1569 * distinction between physical and virtual addresses, and
1570 * ensure that virt2phys() is always an identity mapping.
1571 */
1572 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1573 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1574
1575 type->mmu = no_mmu;
1576 type->write_phys_memory = type->write_memory;
1577 type->read_phys_memory = type->read_memory;
1578 type->virt2phys = identity_virt2phys;
1579 }
1580
1581 if (!target->type->read_buffer)
1582 target->type->read_buffer = target_read_buffer_default;
1583
1584 if (!target->type->write_buffer)
1585 target->type->write_buffer = target_write_buffer_default;
1586
1587 if (!target->type->get_gdb_fileio_info)
1588 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1589
1590 if (!target->type->gdb_fileio_end)
1591 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1592
1593 if (!target->type->profiling)
1594 target->type->profiling = target_profiling_default;
1595
1596 return ERROR_OK;
1597 }
1598
1599 static int target_init(struct command_context *cmd_ctx)
1600 {
1601 struct target *target;
1602 int retval;
1603
1604 for (target = all_targets; target; target = target->next) {
1605 retval = target_init_one(cmd_ctx, target);
1606 if (retval != ERROR_OK)
1607 return retval;
1608 }
1609
1610 if (!all_targets)
1611 return ERROR_OK;
1612
1613 retval = target_register_user_commands(cmd_ctx);
1614 if (retval != ERROR_OK)
1615 return retval;
1616
1617 retval = target_register_timer_callback(&handle_target,
1618 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1619 if (retval != ERROR_OK)
1620 return retval;
1621
1622 return ERROR_OK;
1623 }
1624
1625 COMMAND_HANDLER(handle_target_init_command)
1626 {
1627 int retval;
1628
1629 if (CMD_ARGC != 0)
1630 return ERROR_COMMAND_SYNTAX_ERROR;
1631
1632 static bool target_initialized;
1633 if (target_initialized) {
1634 LOG_INFO("'target init' has already been called");
1635 return ERROR_OK;
1636 }
1637 target_initialized = true;
1638
1639 retval = command_run_line(CMD_CTX, "init_targets");
1640 if (retval != ERROR_OK)
1641 return retval;
1642
1643 retval = command_run_line(CMD_CTX, "init_target_events");
1644 if (retval != ERROR_OK)
1645 return retval;
1646
1647 retval = command_run_line(CMD_CTX, "init_board");
1648 if (retval != ERROR_OK)
1649 return retval;
1650
1651 LOG_DEBUG("Initializing targets...");
1652 return target_init(CMD_CTX);
1653 }
1654
1655 int target_register_event_callback(int (*callback)(struct target *target,
1656 enum target_event event, void *priv), void *priv)
1657 {
1658 struct target_event_callback **callbacks_p = &target_event_callbacks;
1659
1660 if (!callback)
1661 return ERROR_COMMAND_SYNTAX_ERROR;
1662
1663 if (*callbacks_p) {
1664 while ((*callbacks_p)->next)
1665 callbacks_p = &((*callbacks_p)->next);
1666 callbacks_p = &((*callbacks_p)->next);
1667 }
1668
1669 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1670 (*callbacks_p)->callback = callback;
1671 (*callbacks_p)->priv = priv;
1672 (*callbacks_p)->next = NULL;
1673
1674 return ERROR_OK;
1675 }
1676
1677 int target_register_reset_callback(int (*callback)(struct target *target,
1678 enum target_reset_mode reset_mode, void *priv), void *priv)
1679 {
1680 struct target_reset_callback *entry;
1681
1682 if (!callback)
1683 return ERROR_COMMAND_SYNTAX_ERROR;
1684
1685 entry = malloc(sizeof(struct target_reset_callback));
1686 if (!entry) {
1687 LOG_ERROR("error allocating buffer for reset callback entry");
1688 return ERROR_COMMAND_SYNTAX_ERROR;
1689 }
1690
1691 entry->callback = callback;
1692 entry->priv = priv;
1693 list_add(&entry->list, &target_reset_callback_list);
1694
1695
1696 return ERROR_OK;
1697 }
1698
1699 int target_register_trace_callback(int (*callback)(struct target *target,
1700 size_t len, uint8_t *data, void *priv), void *priv)
1701 {
1702 struct target_trace_callback *entry;
1703
1704 if (!callback)
1705 return ERROR_COMMAND_SYNTAX_ERROR;
1706
1707 entry = malloc(sizeof(struct target_trace_callback));
1708 if (!entry) {
1709 LOG_ERROR("error allocating buffer for trace callback entry");
1710 return ERROR_COMMAND_SYNTAX_ERROR;
1711 }
1712
1713 entry->callback = callback;
1714 entry->priv = priv;
1715 list_add(&entry->list, &target_trace_callback_list);
1716
1717
1718 return ERROR_OK;
1719 }
1720
1721 int target_register_timer_callback(int (*callback)(void *priv),
1722 unsigned int time_ms, enum target_timer_type type, void *priv)
1723 {
1724 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1725
1726 if (!callback)
1727 return ERROR_COMMAND_SYNTAX_ERROR;
1728
1729 if (*callbacks_p) {
1730 while ((*callbacks_p)->next)
1731 callbacks_p = &((*callbacks_p)->next);
1732 callbacks_p = &((*callbacks_p)->next);
1733 }
1734
1735 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1736 (*callbacks_p)->callback = callback;
1737 (*callbacks_p)->type = type;
1738 (*callbacks_p)->time_ms = time_ms;
1739 (*callbacks_p)->removed = false;
1740
1741 (*callbacks_p)->when = timeval_ms() + time_ms;
1742 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1743
1744 (*callbacks_p)->priv = priv;
1745 (*callbacks_p)->next = NULL;
1746
1747 return ERROR_OK;
1748 }
1749
1750 int target_unregister_event_callback(int (*callback)(struct target *target,
1751 enum target_event event, void *priv), void *priv)
1752 {
1753 struct target_event_callback **p = &target_event_callbacks;
1754 struct target_event_callback *c = target_event_callbacks;
1755
1756 if (!callback)
1757 return ERROR_COMMAND_SYNTAX_ERROR;
1758
1759 while (c) {
1760 struct target_event_callback *next = c->next;
1761 if ((c->callback == callback) && (c->priv == priv)) {
1762 *p = next;
1763 free(c);
1764 return ERROR_OK;
1765 } else
1766 p = &(c->next);
1767 c = next;
1768 }
1769
1770 return ERROR_OK;
1771 }
1772
1773 int target_unregister_reset_callback(int (*callback)(struct target *target,
1774 enum target_reset_mode reset_mode, void *priv), void *priv)
1775 {
1776 struct target_reset_callback *entry;
1777
1778 if (!callback)
1779 return ERROR_COMMAND_SYNTAX_ERROR;
1780
1781 list_for_each_entry(entry, &target_reset_callback_list, list) {
1782 if (entry->callback == callback && entry->priv == priv) {
1783 list_del(&entry->list);
1784 free(entry);
1785 break;
1786 }
1787 }
1788
1789 return ERROR_OK;
1790 }
1791
1792 int target_unregister_trace_callback(int (*callback)(struct target *target,
1793 size_t len, uint8_t *data, void *priv), void *priv)
1794 {
1795 struct target_trace_callback *entry;
1796
1797 if (!callback)
1798 return ERROR_COMMAND_SYNTAX_ERROR;
1799
1800 list_for_each_entry(entry, &target_trace_callback_list, list) {
1801 if (entry->callback == callback && entry->priv == priv) {
1802 list_del(&entry->list);
1803 free(entry);
1804 break;
1805 }
1806 }
1807
1808 return ERROR_OK;
1809 }
1810
1811 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1812 {
1813 if (!callback)
1814 return ERROR_COMMAND_SYNTAX_ERROR;
1815
1816 for (struct target_timer_callback *c = target_timer_callbacks;
1817 c; c = c->next) {
1818 if ((c->callback == callback) && (c->priv == priv)) {
1819 c->removed = true;
1820 return ERROR_OK;
1821 }
1822 }
1823
1824 return ERROR_FAIL;
1825 }
1826
1827 int target_call_event_callbacks(struct target *target, enum target_event event)
1828 {
1829 struct target_event_callback *callback = target_event_callbacks;
1830 struct target_event_callback *next_callback;
1831
1832 if (event == TARGET_EVENT_HALTED) {
1833 /* execute early halted first */
1834 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1835 }
1836
1837 LOG_DEBUG("target event %i (%s) for core %s", event,
1838 target_event_name(event),
1839 target_name(target));
1840
1841 target_handle_event(target, event);
1842
1843 while (callback) {
1844 next_callback = callback->next;
1845 callback->callback(target, event, callback->priv);
1846 callback = next_callback;
1847 }
1848
1849 return ERROR_OK;
1850 }
1851
1852 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1853 {
1854 struct target_reset_callback *callback;
1855
1856 LOG_DEBUG("target reset %i (%s)", reset_mode,
1857 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1858
1859 list_for_each_entry(callback, &target_reset_callback_list, list)
1860 callback->callback(target, reset_mode, callback->priv);
1861
1862 return ERROR_OK;
1863 }
1864
1865 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1866 {
1867 struct target_trace_callback *callback;
1868
1869 list_for_each_entry(callback, &target_trace_callback_list, list)
1870 callback->callback(target, len, data, callback->priv);
1871
1872 return ERROR_OK;
1873 }
1874
1875 static int target_timer_callback_periodic_restart(
1876 struct target_timer_callback *cb, int64_t *now)
1877 {
1878 cb->when = *now + cb->time_ms;
1879 return ERROR_OK;
1880 }
1881
1882 static int target_call_timer_callback(struct target_timer_callback *cb,
1883 int64_t *now)
1884 {
1885 cb->callback(cb->priv);
1886
1887 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1888 return target_timer_callback_periodic_restart(cb, now);
1889
1890 return target_unregister_timer_callback(cb->callback, cb->priv);
1891 }
1892
1893 static int target_call_timer_callbacks_check_time(int checktime)
1894 {
1895 static bool callback_processing;
1896
1897 /* Do not allow nesting */
1898 if (callback_processing)
1899 return ERROR_OK;
1900
1901 callback_processing = true;
1902
1903 keep_alive();
1904
1905 int64_t now = timeval_ms();
1906
1907 /* Initialize to a default value that's a ways into the future.
1908 * The loop below will make it closer to now if there are
1909 * callbacks that want to be called sooner. */
1910 target_timer_next_event_value = now + 1000;
1911
1912 /* Store an address of the place containing a pointer to the
1913 * next item; initially, that's a standalone "root of the
1914 * list" variable. */
1915 struct target_timer_callback **callback = &target_timer_callbacks;
1916 while (callback && *callback) {
1917 if ((*callback)->removed) {
1918 struct target_timer_callback *p = *callback;
1919 *callback = (*callback)->next;
1920 free(p);
1921 continue;
1922 }
1923
1924 bool call_it = (*callback)->callback &&
1925 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1926 now >= (*callback)->when);
1927
1928 if (call_it)
1929 target_call_timer_callback(*callback, &now);
1930
1931 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1932 target_timer_next_event_value = (*callback)->when;
1933
1934 callback = &(*callback)->next;
1935 }
1936
1937 callback_processing = false;
1938 return ERROR_OK;
1939 }
1940
1941 int target_call_timer_callbacks()
1942 {
1943 return target_call_timer_callbacks_check_time(1);
1944 }
1945
1946 /* invoke periodic callbacks immediately */
1947 int target_call_timer_callbacks_now()
1948 {
1949 return target_call_timer_callbacks_check_time(0);
1950 }
1951
1952 int64_t target_timer_next_event(void)
1953 {
1954 return target_timer_next_event_value;
1955 }
1956
1957 /* Prints the working area layout for debug purposes */
1958 static void print_wa_layout(struct target *target)
1959 {
1960 struct working_area *c = target->working_areas;
1961
1962 while (c) {
1963 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1964 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1965 c->address, c->address + c->size - 1, c->size);
1966 c = c->next;
1967 }
1968 }
1969
1970 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1971 static void target_split_working_area(struct working_area *area, uint32_t size)
1972 {
1973 assert(area->free); /* Shouldn't split an allocated area */
1974 assert(size <= area->size); /* Caller should guarantee this */
1975
1976 /* Split only if not already the right size */
1977 if (size < area->size) {
1978 struct working_area *new_wa = malloc(sizeof(*new_wa));
1979
1980 if (!new_wa)
1981 return;
1982
1983 new_wa->next = area->next;
1984 new_wa->size = area->size - size;
1985 new_wa->address = area->address + size;
1986 new_wa->backup = NULL;
1987 new_wa->user = NULL;
1988 new_wa->free = true;
1989
1990 area->next = new_wa;
1991 area->size = size;
1992
1993 /* If backup memory was allocated to this area, it has the wrong size
1994 * now so free it and it will be reallocated if/when needed */
1995 free(area->backup);
1996 area->backup = NULL;
1997 }
1998 }
1999
2000 /* Merge all adjacent free areas into one */
2001 static void target_merge_working_areas(struct target *target)
2002 {
2003 struct working_area *c = target->working_areas;
2004
2005 while (c && c->next) {
2006 assert(c->next->address == c->address + c->size); /* This is an invariant */
2007
2008 /* Find two adjacent free areas */
2009 if (c->free && c->next->free) {
2010 /* Merge the last into the first */
2011 c->size += c->next->size;
2012
2013 /* Remove the last */
2014 struct working_area *to_be_freed = c->next;
2015 c->next = c->next->next;
2016 free(to_be_freed->backup);
2017 free(to_be_freed);
2018
2019 /* If backup memory was allocated to the remaining area, it's has
2020 * the wrong size now */
2021 free(c->backup);
2022 c->backup = NULL;
2023 } else {
2024 c = c->next;
2025 }
2026 }
2027 }
2028
2029 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2030 {
2031 /* Reevaluate working area address based on MMU state*/
2032 if (!target->working_areas) {
2033 int retval;
2034 int enabled;
2035
2036 retval = target->type->mmu(target, &enabled);
2037 if (retval != ERROR_OK)
2038 return retval;
2039
2040 if (!enabled) {
2041 if (target->working_area_phys_spec) {
2042 LOG_DEBUG("MMU disabled, using physical "
2043 "address for working memory " TARGET_ADDR_FMT,
2044 target->working_area_phys);
2045 target->working_area = target->working_area_phys;
2046 } else {
2047 LOG_ERROR("No working memory available. "
2048 "Specify -work-area-phys to target.");
2049 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2050 }
2051 } else {
2052 if (target->working_area_virt_spec) {
2053 LOG_DEBUG("MMU enabled, using virtual "
2054 "address for working memory " TARGET_ADDR_FMT,
2055 target->working_area_virt);
2056 target->working_area = target->working_area_virt;
2057 } else {
2058 LOG_ERROR("No working memory available. "
2059 "Specify -work-area-virt to target.");
2060 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2061 }
2062 }
2063
2064 /* Set up initial working area on first call */
2065 struct working_area *new_wa = malloc(sizeof(*new_wa));
2066 if (new_wa) {
2067 new_wa->next = NULL;
2068 new_wa->size = ALIGN_DOWN(target->working_area_size, 4); /* 4-byte align */
2069 new_wa->address = target->working_area;
2070 new_wa->backup = NULL;
2071 new_wa->user = NULL;
2072 new_wa->free = true;
2073 }
2074
2075 target->working_areas = new_wa;
2076 }
2077
2078 /* only allocate multiples of 4 byte */
2079 size = ALIGN_UP(size, 4);
2080
2081 struct working_area *c = target->working_areas;
2082
2083 /* Find the first large enough working area */
2084 while (c) {
2085 if (c->free && c->size >= size)
2086 break;
2087 c = c->next;
2088 }
2089
2090 if (!c)
2091 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2092
2093 /* Split the working area into the requested size */
2094 target_split_working_area(c, size);
2095
2096 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2097 size, c->address);
2098
2099 if (target->backup_working_area) {
2100 if (!c->backup) {
2101 c->backup = malloc(c->size);
2102 if (!c->backup)
2103 return ERROR_FAIL;
2104 }
2105
2106 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2107 if (retval != ERROR_OK)
2108 return retval;
2109 }
2110
2111 /* mark as used, and return the new (reused) area */
2112 c->free = false;
2113 *area = c;
2114
2115 /* user pointer */
2116 c->user = area;
2117
2118 print_wa_layout(target);
2119
2120 return ERROR_OK;
2121 }
2122
2123 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2124 {
2125 int retval;
2126
2127 retval = target_alloc_working_area_try(target, size, area);
2128 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2129 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2130 return retval;
2131
2132 }
2133
2134 static int target_restore_working_area(struct target *target, struct working_area *area)
2135 {
2136 int retval = ERROR_OK;
2137
2138 if (target->backup_working_area && area->backup) {
2139 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2140 if (retval != ERROR_OK)
2141 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2142 area->size, area->address);
2143 }
2144
2145 return retval;
2146 }
2147
2148 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2149 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2150 {
2151 if (!area || area->free)
2152 return ERROR_OK;
2153
2154 int retval = ERROR_OK;
2155 if (restore) {
2156 retval = target_restore_working_area(target, area);
2157 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2158 if (retval != ERROR_OK)
2159 return retval;
2160 }
2161
2162 area->free = true;
2163
2164 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2165 area->size, area->address);
2166
2167 /* mark user pointer invalid */
2168 /* TODO: Is this really safe? It points to some previous caller's memory.
2169 * How could we know that the area pointer is still in that place and not
2170 * some other vital data? What's the purpose of this, anyway? */
2171 *area->user = NULL;
2172 area->user = NULL;
2173
2174 target_merge_working_areas(target);
2175
2176 print_wa_layout(target);
2177
2178 return retval;
2179 }
2180
2181 int target_free_working_area(struct target *target, struct working_area *area)
2182 {
2183 return target_free_working_area_restore(target, area, 1);
2184 }
2185
2186 /* free resources and restore memory, if restoring memory fails,
2187 * free up resources anyway
2188 */
2189 static void target_free_all_working_areas_restore(struct target *target, int restore)
2190 {
2191 struct working_area *c = target->working_areas;
2192
2193 LOG_DEBUG("freeing all working areas");
2194
2195 /* Loop through all areas, restoring the allocated ones and marking them as free */
2196 while (c) {
2197 if (!c->free) {
2198 if (restore)
2199 target_restore_working_area(target, c);
2200 c->free = true;
2201 *c->user = NULL; /* Same as above */
2202 c->user = NULL;
2203 }
2204 c = c->next;
2205 }
2206
2207 /* Run a merge pass to combine all areas into one */
2208 target_merge_working_areas(target);
2209
2210 print_wa_layout(target);
2211 }
2212
2213 void target_free_all_working_areas(struct target *target)
2214 {
2215 target_free_all_working_areas_restore(target, 1);
2216
2217 /* Now we have none or only one working area marked as free */
2218 if (target->working_areas) {
2219 /* Free the last one to allow on-the-fly moving and resizing */
2220 free(target->working_areas->backup);
2221 free(target->working_areas);
2222 target->working_areas = NULL;
2223 }
2224 }
2225
2226 /* Find the largest number of bytes that can be allocated */
2227 uint32_t target_get_working_area_avail(struct target *target)
2228 {
2229 struct working_area *c = target->working_areas;
2230 uint32_t max_size = 0;
2231
2232 if (!c)
2233 return ALIGN_DOWN(target->working_area_size, 4);
2234
2235 while (c) {
2236 if (c->free && max_size < c->size)
2237 max_size = c->size;
2238
2239 c = c->next;
2240 }
2241
2242 return max_size;
2243 }
2244
2245 static void target_destroy(struct target *target)
2246 {
2247 if (target->type->deinit_target)
2248 target->type->deinit_target(target);
2249
2250 if (target->semihosting)
2251 free(target->semihosting->basedir);
2252 free(target->semihosting);
2253
2254 jtag_unregister_event_callback(jtag_enable_callback, target);
2255
2256 struct target_event_action *teap = target->event_action;
2257 while (teap) {
2258 struct target_event_action *next = teap->next;
2259 Jim_DecrRefCount(teap->interp, teap->body);
2260 free(teap);
2261 teap = next;
2262 }
2263
2264 target_free_all_working_areas(target);
2265
2266 /* release the targets SMP list */
2267 if (target->smp) {
2268 struct target_list *head, *tmp;
2269
2270 list_for_each_entry_safe(head, tmp, target->smp_targets, lh) {
2271 list_del(&head->lh);
2272 head->target->smp = 0;
2273 free(head);
2274 }
2275 if (target->smp_targets != &empty_smp_targets)
2276 free(target->smp_targets);
2277 target->smp = 0;
2278 }
2279
2280 rtos_destroy(target);
2281
2282 free(target->gdb_port_override);
2283 free(target->type);
2284 free(target->trace_info);
2285 free(target->fileio_info);
2286 free(target->cmd_name);
2287 free(target);
2288 }
2289
2290 void target_quit(void)
2291 {
2292 struct target_event_callback *pe = target_event_callbacks;
2293 while (pe) {
2294 struct target_event_callback *t = pe->next;
2295 free(pe);
2296 pe = t;
2297 }
2298 target_event_callbacks = NULL;
2299
2300 struct target_timer_callback *pt = target_timer_callbacks;
2301 while (pt) {
2302 struct target_timer_callback *t = pt->next;
2303 free(pt);
2304 pt = t;
2305 }
2306 target_timer_callbacks = NULL;
2307
2308 for (struct target *target = all_targets; target;) {
2309 struct target *tmp;
2310
2311 tmp = target->next;
2312 target_destroy(target);
2313 target = tmp;
2314 }
2315
2316 all_targets = NULL;
2317 }
2318
2319 int target_arch_state(struct target *target)
2320 {
2321 int retval;
2322 if (!target) {
2323 LOG_WARNING("No target has been configured");
2324 return ERROR_OK;
2325 }
2326
2327 if (target->state != TARGET_HALTED)
2328 return ERROR_OK;
2329
2330 retval = target->type->arch_state(target);
2331 return retval;
2332 }
2333
2334 static int target_get_gdb_fileio_info_default(struct target *target,
2335 struct gdb_fileio_info *fileio_info)
2336 {
2337 /* If target does not support semi-hosting function, target
2338 has no need to provide .get_gdb_fileio_info callback.
2339 It just return ERROR_FAIL and gdb_server will return "Txx"
2340 as target halted every time. */
2341 return ERROR_FAIL;
2342 }
2343
2344 static int target_gdb_fileio_end_default(struct target *target,
2345 int retcode, int fileio_errno, bool ctrl_c)
2346 {
2347 return ERROR_OK;
2348 }
2349
2350 int target_profiling_default(struct target *target, uint32_t *samples,
2351 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2352 {
2353 struct timeval timeout, now;
2354
2355 gettimeofday(&timeout, NULL);
2356 timeval_add_time(&timeout, seconds, 0);
2357
2358 LOG_INFO("Starting profiling. Halting and resuming the"
2359 " target as often as we can...");
2360
2361 uint32_t sample_count = 0;
2362 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2363 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2364
2365 int retval = ERROR_OK;
2366 for (;;) {
2367 target_poll(target);
2368 if (target->state == TARGET_HALTED) {
2369 uint32_t t = buf_get_u32(reg->value, 0, 32);
2370 samples[sample_count++] = t;
2371 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2372 retval = target_resume(target, 1, 0, 0, 0);
2373 target_poll(target);
2374 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2375 } else if (target->state == TARGET_RUNNING) {
2376 /* We want to quickly sample the PC. */
2377 retval = target_halt(target);
2378 } else {
2379 LOG_INFO("Target not halted or running");
2380 retval = ERROR_OK;
2381 break;
2382 }
2383
2384 if (retval != ERROR_OK)
2385 break;
2386
2387 gettimeofday(&now, NULL);
2388 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2389 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2390 break;
2391 }
2392 }
2393
2394 *num_samples = sample_count;
2395 return retval;
2396 }
2397
2398 /* Single aligned words are guaranteed to use 16 or 32 bit access
2399 * mode respectively, otherwise data is handled as quickly as
2400 * possible
2401 */
2402 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2403 {
2404 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2405 size, address);
2406
2407 if (!target_was_examined(target)) {
2408 LOG_ERROR("Target not examined yet");
2409 return ERROR_FAIL;
2410 }
2411
2412 if (size == 0)
2413 return ERROR_OK;
2414
2415 if ((address + size - 1) < address) {
2416 /* GDB can request this when e.g. PC is 0xfffffffc */
2417 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2418 address,
2419 size);
2420 return ERROR_FAIL;
2421 }
2422
2423 return target->type->write_buffer(target, address, size, buffer);
2424 }
2425
2426 static int target_write_buffer_default(struct target *target,
2427 target_addr_t address, uint32_t count, const uint8_t *buffer)
2428 {
2429 uint32_t size;
2430 unsigned int data_bytes = target_data_bits(target) / 8;
2431
2432 /* Align up to maximum bytes. The loop condition makes sure the next pass
2433 * will have something to do with the size we leave to it. */
2434 for (size = 1;
2435 size < data_bytes && count >= size * 2 + (address & size);
2436 size *= 2) {
2437 if (address & size) {
2438 int retval = target_write_memory(target, address, size, 1, buffer);
2439 if (retval != ERROR_OK)
2440 return retval;
2441 address += size;
2442 count -= size;
2443 buffer += size;
2444 }
2445 }
2446
2447 /* Write the data with as large access size as possible. */
2448 for (; size > 0; size /= 2) {
2449 uint32_t aligned = count - count % size;
2450 if (aligned > 0) {
2451 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2452 if (retval != ERROR_OK)
2453 return retval;
2454 address += aligned;
2455 count -= aligned;
2456 buffer += aligned;
2457 }
2458 }
2459
2460 return ERROR_OK;
2461 }
2462
2463 /* Single aligned words are guaranteed to use 16 or 32 bit access
2464 * mode respectively, otherwise data is handled as quickly as
2465 * possible
2466 */
2467 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2468 {
2469 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2470 size, address);
2471
2472 if (!target_was_examined(target)) {
2473 LOG_ERROR("Target not examined yet");
2474 return ERROR_FAIL;
2475 }
2476
2477 if (size == 0)
2478 return ERROR_OK;
2479
2480 if ((address + size - 1) < address) {
2481 /* GDB can request this when e.g. PC is 0xfffffffc */
2482 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2483 address,
2484 size);
2485 return ERROR_FAIL;
2486 }
2487
2488 return target->type->read_buffer(target, address, size, buffer);
2489 }
2490
2491 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2492 {
2493 uint32_t size;
2494 unsigned int data_bytes = target_data_bits(target) / 8;
2495
2496 /* Align up to maximum bytes. The loop condition makes sure the next pass
2497 * will have something to do with the size we leave to it. */
2498 for (size = 1;
2499 size < data_bytes && count >= size * 2 + (address & size);
2500 size *= 2) {
2501 if (address & size) {
2502 int retval = target_read_memory(target, address, size, 1, buffer);
2503 if (retval != ERROR_OK)
2504 return retval;
2505 address += size;
2506 count -= size;
2507 buffer += size;
2508 }
2509 }
2510
2511 /* Read the data with as large access size as possible. */
2512 for (; size > 0; size /= 2) {
2513 uint32_t aligned = count - count % size;
2514 if (aligned > 0) {
2515 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2516 if (retval != ERROR_OK)
2517 return retval;
2518 address += aligned;
2519 count -= aligned;
2520 buffer += aligned;
2521 }
2522 }
2523
2524 return ERROR_OK;
2525 }
2526
2527 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2528 {
2529 uint8_t *buffer;
2530 int retval;
2531 uint32_t i;
2532 uint32_t checksum = 0;
2533 if (!target_was_examined(target)) {
2534 LOG_ERROR("Target not examined yet");
2535 return ERROR_FAIL;
2536 }
2537 if (!target->type->checksum_memory) {
2538 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2539 return ERROR_FAIL;
2540 }
2541
2542 retval = target->type->checksum_memory(target, address, size, &checksum);
2543 if (retval != ERROR_OK) {
2544 buffer = malloc(size);
2545 if (!buffer) {
2546 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2547 return ERROR_COMMAND_SYNTAX_ERROR;
2548 }
2549 retval = target_read_buffer(target, address, size, buffer);
2550 if (retval != ERROR_OK) {
2551 free(buffer);
2552 return retval;
2553 }
2554
2555 /* convert to target endianness */
2556 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2557 uint32_t target_data;
2558 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2559 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2560 }
2561
2562 retval = image_calculate_checksum(buffer, size, &checksum);
2563 free(buffer);
2564 }
2565
2566 *crc = checksum;
2567
2568 return retval;
2569 }
2570
2571 int target_blank_check_memory(struct target *target,
2572 struct target_memory_check_block *blocks, int num_blocks,
2573 uint8_t erased_value)
2574 {
2575 if (!target_was_examined(target)) {
2576 LOG_ERROR("Target not examined yet");
2577 return ERROR_FAIL;
2578 }
2579
2580 if (!target->type->blank_check_memory)
2581 return ERROR_NOT_IMPLEMENTED;
2582
2583 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2584 }
2585
2586 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2587 {
2588 uint8_t value_buf[8];
2589 if (!target_was_examined(target)) {
2590 LOG_ERROR("Target not examined yet");
2591 return ERROR_FAIL;
2592 }
2593
2594 int retval = target_read_memory(target, address, 8, 1, value_buf);
2595
2596 if (retval == ERROR_OK) {
2597 *value = target_buffer_get_u64(target, value_buf);
2598 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2599 address,
2600 *value);
2601 } else {
2602 *value = 0x0;
2603 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2604 address);
2605 }
2606
2607 return retval;
2608 }
2609
2610 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2611 {
2612 uint8_t value_buf[4];
2613 if (!target_was_examined(target)) {
2614 LOG_ERROR("Target not examined yet");
2615 return ERROR_FAIL;
2616 }
2617
2618 int retval = target_read_memory(target, address, 4, 1, value_buf);
2619
2620 if (retval == ERROR_OK) {
2621 *value = target_buffer_get_u32(target, value_buf);
2622 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2623 address,
2624 *value);
2625 } else {
2626 *value = 0x0;
2627 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2628 address);
2629 }
2630
2631 return retval;
2632 }
2633
2634 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2635 {
2636 uint8_t value_buf[2];
2637 if (!target_was_examined(target)) {
2638 LOG_ERROR("Target not examined yet");
2639 return ERROR_FAIL;
2640 }
2641
2642 int retval = target_read_memory(target, address, 2, 1, value_buf);
2643
2644 if (retval == ERROR_OK) {
2645 *value = target_buffer_get_u16(target, value_buf);
2646 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2647 address,
2648 *value);
2649 } else {
2650 *value = 0x0;
2651 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2652 address);
2653 }
2654
2655 return retval;
2656 }
2657
2658 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2659 {
2660 if (!target_was_examined(target)) {
2661 LOG_ERROR("Target not examined yet");
2662 return ERROR_FAIL;
2663 }
2664
2665 int retval = target_read_memory(target, address, 1, 1, value);
2666
2667 if (retval == ERROR_OK) {
2668 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2669 address,
2670 *value);
2671 } else {
2672 *value = 0x0;
2673 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2674 address);
2675 }
2676
2677 return retval;
2678 }
2679
2680 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2681 {
2682 int retval;
2683 uint8_t value_buf[8];
2684 if (!target_was_examined(target)) {
2685 LOG_ERROR("Target not examined yet");
2686 return ERROR_FAIL;
2687 }
2688
2689 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2690 address,
2691 value);
2692
2693 target_buffer_set_u64(target, value_buf, value);
2694 retval = target_write_memory(target, address, 8, 1, value_buf);
2695 if (retval != ERROR_OK)
2696 LOG_DEBUG("failed: %i", retval);
2697
2698 return retval;
2699 }
2700
2701 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2702 {
2703 int retval;
2704 uint8_t value_buf[4];
2705 if (!target_was_examined(target)) {
2706 LOG_ERROR("Target not examined yet");
2707 return ERROR_FAIL;
2708 }
2709
2710 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2711 address,
2712 value);
2713
2714 target_buffer_set_u32(target, value_buf, value);
2715 retval = target_write_memory(target, address, 4, 1, value_buf);
2716 if (retval != ERROR_OK)
2717 LOG_DEBUG("failed: %i", retval);
2718
2719 return retval;
2720 }
2721
2722 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2723 {
2724 int retval;
2725 uint8_t value_buf[2];
2726 if (!target_was_examined(target)) {
2727 LOG_ERROR("Target not examined yet");
2728 return ERROR_FAIL;
2729 }
2730
2731 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2732 address,
2733 value);
2734
2735 target_buffer_set_u16(target, value_buf, value);
2736 retval = target_write_memory(target, address, 2, 1, value_buf);
2737 if (retval != ERROR_OK)
2738 LOG_DEBUG("failed: %i", retval);
2739
2740 return retval;
2741 }
2742
2743 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2744 {
2745 int retval;
2746 if (!target_was_examined(target)) {
2747 LOG_ERROR("Target not examined yet");
2748 return ERROR_FAIL;
2749 }
2750
2751 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2752 address, value);
2753
2754 retval = target_write_memory(target, address, 1, 1, &value);
2755 if (retval != ERROR_OK)
2756 LOG_DEBUG("failed: %i", retval);
2757
2758 return retval;
2759 }
2760
2761 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2762 {
2763 int retval;
2764 uint8_t value_buf[8];
2765 if (!target_was_examined(target)) {
2766 LOG_ERROR("Target not examined yet");
2767 return ERROR_FAIL;
2768 }
2769
2770 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2771 address,
2772 value);
2773
2774 target_buffer_set_u64(target, value_buf, value);
2775 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2776 if (retval != ERROR_OK)
2777 LOG_DEBUG("failed: %i", retval);
2778
2779 return retval;
2780 }
2781
2782 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2783 {
2784 int retval;
2785 uint8_t value_buf[4];
2786 if (!target_was_examined(target)) {
2787 LOG_ERROR("Target not examined yet");
2788 return ERROR_FAIL;
2789 }
2790
2791 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2792 address,
2793 value);
2794
2795 target_buffer_set_u32(target, value_buf, value);
2796 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2797 if (retval != ERROR_OK)
2798 LOG_DEBUG("failed: %i", retval);
2799
2800 return retval;
2801 }
2802
2803 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2804 {
2805 int retval;
2806 uint8_t value_buf[2];
2807 if (!target_was_examined(target)) {
2808 LOG_ERROR("Target not examined yet");
2809 return ERROR_FAIL;
2810 }
2811
2812 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2813 address,
2814 value);
2815
2816 target_buffer_set_u16(target, value_buf, value);
2817 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2818 if (retval != ERROR_OK)
2819 LOG_DEBUG("failed: %i", retval);
2820
2821 return retval;
2822 }
2823
2824 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2825 {
2826 int retval;
2827 if (!target_was_examined(target)) {
2828 LOG_ERROR("Target not examined yet");
2829 return ERROR_FAIL;
2830 }
2831
2832 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2833 address, value);
2834
2835 retval = target_write_phys_memory(target, address, 1, 1, &value);
2836 if (retval != ERROR_OK)
2837 LOG_DEBUG("failed: %i", retval);
2838
2839 return retval;
2840 }
2841
2842 static int find_target(struct command_invocation *cmd, const char *name)
2843 {
2844 struct target *target = get_target(name);
2845 if (!target) {
2846 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2847 return ERROR_FAIL;
2848 }
2849 if (!target->tap->enabled) {
2850 command_print(cmd, "Target: TAP %s is disabled, "
2851 "can't be the current target\n",
2852 target->tap->dotted_name);
2853 return ERROR_FAIL;
2854 }
2855
2856 cmd->ctx->current_target = target;
2857 if (cmd->ctx->current_target_override)
2858 cmd->ctx->current_target_override = target;
2859
2860 return ERROR_OK;
2861 }
2862
2863
2864 COMMAND_HANDLER(handle_targets_command)
2865 {
2866 int retval = ERROR_OK;
2867 if (CMD_ARGC == 1) {
2868 retval = find_target(CMD, CMD_ARGV[0]);
2869 if (retval == ERROR_OK) {
2870 /* we're done! */
2871 return retval;
2872 }
2873 }
2874
2875 struct target *target = all_targets;
2876 command_print(CMD, " TargetName Type Endian TapName State ");
2877 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2878 while (target) {
2879 const char *state;
2880 char marker = ' ';
2881
2882 if (target->tap->enabled)
2883 state = target_state_name(target);
2884 else
2885 state = "tap-disabled";
2886
2887 if (CMD_CTX->current_target == target)
2888 marker = '*';
2889
2890 /* keep columns lined up to match the headers above */
2891 command_print(CMD,
2892 "%2d%c %-18s %-10s %-6s %-18s %s",
2893 target->target_number,
2894 marker,
2895 target_name(target),
2896 target_type_name(target),
2897 jim_nvp_value2name_simple(nvp_target_endian,
2898 target->endianness)->name,
2899 target->tap->dotted_name,
2900 state);
2901 target = target->next;
2902 }
2903
2904 return retval;
2905 }
2906
2907 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2908
2909 static int power_dropout;
2910 static int srst_asserted;
2911
2912 static int run_power_restore;
2913 static int run_power_dropout;
2914 static int run_srst_asserted;
2915 static int run_srst_deasserted;
2916
2917 static int sense_handler(void)
2918 {
2919 static int prev_srst_asserted;
2920 static int prev_power_dropout;
2921
2922 int retval = jtag_power_dropout(&power_dropout);
2923 if (retval != ERROR_OK)
2924 return retval;
2925
2926 int power_restored;
2927 power_restored = prev_power_dropout && !power_dropout;
2928 if (power_restored)
2929 run_power_restore = 1;
2930
2931 int64_t current = timeval_ms();
2932 static int64_t last_power;
2933 bool wait_more = last_power + 2000 > current;
2934 if (power_dropout && !wait_more) {
2935 run_power_dropout = 1;
2936 last_power = current;
2937 }
2938
2939 retval = jtag_srst_asserted(&srst_asserted);
2940 if (retval != ERROR_OK)
2941 return retval;
2942
2943 int srst_deasserted;
2944 srst_deasserted = prev_srst_asserted && !srst_asserted;
2945
2946 static int64_t last_srst;
2947 wait_more = last_srst + 2000 > current;
2948 if (srst_deasserted && !wait_more) {
2949 run_srst_deasserted = 1;
2950 last_srst = current;
2951 }
2952
2953 if (!prev_srst_asserted && srst_asserted)
2954 run_srst_asserted = 1;
2955
2956 prev_srst_asserted = srst_asserted;
2957 prev_power_dropout = power_dropout;
2958
2959 if (srst_deasserted || power_restored) {
2960 /* Other than logging the event we can't do anything here.
2961 * Issuing a reset is a particularly bad idea as we might
2962 * be inside a reset already.
2963 */
2964 }
2965
2966 return ERROR_OK;
2967 }
2968
2969 /* process target state changes */
2970 static int handle_target(void *priv)
2971 {
2972 Jim_Interp *interp = (Jim_Interp *)priv;
2973 int retval = ERROR_OK;
2974
2975 if (!is_jtag_poll_safe()) {
2976 /* polling is disabled currently */
2977 return ERROR_OK;
2978 }
2979
2980 /* we do not want to recurse here... */
2981 static int recursive;
2982 if (!recursive) {
2983 recursive = 1;
2984 sense_handler();
2985 /* danger! running these procedures can trigger srst assertions and power dropouts.
2986 * We need to avoid an infinite loop/recursion here and we do that by
2987 * clearing the flags after running these events.
2988 */
2989 int did_something = 0;
2990 if (run_srst_asserted) {
2991 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2992 Jim_Eval(interp, "srst_asserted");
2993 did_something = 1;
2994 }
2995 if (run_srst_deasserted) {
2996 Jim_Eval(interp, "srst_deasserted");
2997 did_something = 1;
2998 }
2999 if (run_power_dropout) {
3000 LOG_INFO("Power dropout detected, running power_dropout proc.");
3001 Jim_Eval(interp, "power_dropout");
3002 did_something = 1;
3003 }
3004 if (run_power_restore) {
3005 Jim_Eval(interp, "power_restore");
3006 did_something = 1;
3007 }
3008
3009 if (did_something) {
3010 /* clear detect flags */
3011 sense_handler();
3012 }
3013
3014 /* clear action flags */
3015
3016 run_srst_asserted = 0;
3017 run_srst_deasserted = 0;
3018 run_power_restore = 0;
3019 run_power_dropout = 0;
3020
3021 recursive = 0;
3022 }
3023
3024 /* Poll targets for state changes unless that's globally disabled.
3025 * Skip targets that are currently disabled.
3026 */
3027 for (struct target *target = all_targets;
3028 is_jtag_poll_safe() && target;
3029 target = target->next) {
3030
3031 if (!target_was_examined(target))
3032 continue;
3033
3034 if (!target->tap->enabled)
3035 continue;
3036
3037 if (target->backoff.times > target->backoff.count) {
3038 /* do not poll this time as we failed previously */
3039 target->backoff.count++;
3040 continue;
3041 }
3042 target->backoff.count = 0;
3043
3044 /* only poll target if we've got power and srst isn't asserted */
3045 if (!power_dropout && !srst_asserted) {
3046 /* polling may fail silently until the target has been examined */
3047 retval = target_poll(target);
3048 if (retval != ERROR_OK) {
3049 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3050 if (target->backoff.times * polling_interval < 5000) {
3051 target->backoff.times *= 2;
3052 target->backoff.times++;
3053 }
3054
3055 /* Tell GDB to halt the debugger. This allows the user to
3056 * run monitor commands to handle the situation.
3057 */
3058 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3059 }
3060 if (target->backoff.times > 0) {
3061 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3062 target_reset_examined(target);
3063 retval = target_examine_one(target);
3064 /* Target examination could have failed due to unstable connection,
3065 * but we set the examined flag anyway to repoll it later */
3066 if (retval != ERROR_OK) {
3067 target_set_examined(target);
3068 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3069 target->backoff.times * polling_interval);
3070 return retval;
3071 }
3072 }
3073
3074 /* Since we succeeded, we reset backoff count */
3075 target->backoff.times = 0;
3076 }
3077 }
3078
3079 return retval;
3080 }
3081
3082 COMMAND_HANDLER(handle_reg_command)
3083 {
3084 LOG_DEBUG("-");
3085
3086 struct target *target = get_current_target(CMD_CTX);
3087 struct reg *reg = NULL;
3088
3089 /* list all available registers for the current target */
3090 if (CMD_ARGC == 0) {
3091 struct reg_cache *cache = target->reg_cache;
3092
3093 unsigned int count = 0;
3094 while (cache) {
3095 unsigned i;
3096
3097 command_print(CMD, "===== %s", cache->name);
3098
3099 for (i = 0, reg = cache->reg_list;
3100 i < cache->num_regs;
3101 i++, reg++, count++) {
3102 if (reg->exist == false || reg->hidden)
3103 continue;
3104 /* only print cached values if they are valid */
3105 if (reg->valid) {
3106 char *value = buf_to_hex_str(reg->value,
3107 reg->size);
3108 command_print(CMD,
3109 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3110 count, reg->name,
3111 reg->size, value,
3112 reg->dirty
3113 ? " (dirty)"
3114 : "");
3115 free(value);
3116 } else {
3117 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3118 count, reg->name,
3119 reg->size);
3120 }
3121 }
3122 cache = cache->next;
3123 }
3124
3125 return ERROR_OK;
3126 }
3127
3128 /* access a single register by its ordinal number */
3129 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3130 unsigned num;
3131 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3132
3133 struct reg_cache *cache = target->reg_cache;
3134 unsigned int count = 0;
3135 while (cache) {
3136 unsigned i;
3137 for (i = 0; i < cache->num_regs; i++) {
3138 if (count++ == num) {
3139 reg = &cache->reg_list[i];
3140 break;
3141 }
3142 }
3143 if (reg)
3144 break;
3145 cache = cache->next;
3146 }
3147
3148 if (!reg) {
3149 command_print(CMD, "%i is out of bounds, the current target "
3150 "has only %i registers (0 - %i)", num, count, count - 1);
3151 return ERROR_OK;
3152 }
3153 } else {
3154 /* access a single register by its name */
3155 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3156
3157 if (!reg)
3158 goto not_found;
3159 }
3160
3161 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3162
3163 if (!reg->exist)
3164 goto not_found;
3165
3166 /* display a register */
3167 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3168 && (CMD_ARGV[1][0] <= '9')))) {
3169 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3170 reg->valid = 0;
3171
3172 if (reg->valid == 0) {
3173 int retval = reg->type->get(reg);
3174 if (retval != ERROR_OK) {
3175 LOG_ERROR("Could not read register '%s'", reg->name);
3176 return retval;
3177 }
3178 }
3179 char *value = buf_to_hex_str(reg->value, reg->size);
3180 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3181 free(value);
3182 return ERROR_OK;
3183 }
3184
3185 /* set register value */
3186 if (CMD_ARGC == 2) {
3187 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3188 if (!buf)
3189 return ERROR_FAIL;
3190 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3191
3192 int retval = reg->type->set(reg, buf);
3193 if (retval != ERROR_OK) {
3194 LOG_ERROR("Could not write to register '%s'", reg->name);
3195 } else {
3196 char *value = buf_to_hex_str(reg->value, reg->size);
3197 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3198 free(value);
3199 }
3200
3201 free(buf);
3202
3203 return retval;
3204 }
3205
3206 return ERROR_COMMAND_SYNTAX_ERROR;
3207
3208 not_found:
3209 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3210 return ERROR_OK;
3211 }
3212
3213 COMMAND_HANDLER(handle_poll_command)
3214 {
3215 int retval = ERROR_OK;
3216 struct target *target = get_current_target(CMD_CTX);
3217
3218 if (CMD_ARGC == 0) {
3219 command_print(CMD, "background polling: %s",
3220 jtag_poll_get_enabled() ? "on" : "off");
3221 command_print(CMD, "TAP: %s (%s)",
3222 target->tap->dotted_name,
3223 target->tap->enabled ? "enabled" : "disabled");
3224 if (!target->tap->enabled)
3225 return ERROR_OK;
3226 retval = target_poll(target);
3227 if (retval != ERROR_OK)
3228 return retval;
3229 retval = target_arch_state(target);
3230 if (retval != ERROR_OK)
3231 return retval;
3232 } else if (CMD_ARGC == 1) {
3233 bool enable;
3234 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3235 jtag_poll_set_enabled(enable);
3236 } else
3237 return ERROR_COMMAND_SYNTAX_ERROR;
3238
3239 return retval;
3240 }
3241
3242 COMMAND_HANDLER(handle_wait_halt_command)
3243 {
3244 if (CMD_ARGC > 1)
3245 return ERROR_COMMAND_SYNTAX_ERROR;
3246
3247 unsigned ms = DEFAULT_HALT_TIMEOUT;
3248 if (1 == CMD_ARGC) {
3249 int retval = parse_uint(CMD_ARGV[0], &ms);
3250 if (retval != ERROR_OK)
3251 return ERROR_COMMAND_SYNTAX_ERROR;
3252 }
3253
3254 struct target *target = get_current_target(CMD_CTX);
3255 return target_wait_state(target, TARGET_HALTED, ms);
3256 }
3257
3258 /* wait for target state to change. The trick here is to have a low
3259 * latency for short waits and not to suck up all the CPU time
3260 * on longer waits.
3261 *
3262 * After 500ms, keep_alive() is invoked
3263 */
3264 int target_wait_state(struct target *target, enum target_state state, int ms)
3265 {
3266 int retval;
3267 int64_t then = 0, cur;
3268 bool once = true;
3269
3270 for (;;) {
3271 retval = target_poll(target);
3272 if (retval != ERROR_OK)
3273 return retval;
3274 if (target->state == state)
3275 break;
3276 cur = timeval_ms();
3277 if (once) {
3278 once = false;
3279 then = timeval_ms();
3280 LOG_DEBUG("waiting for target %s...",
3281 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3282 }
3283
3284 if (cur-then > 500)
3285 keep_alive();
3286
3287 if ((cur-then) > ms) {
3288 LOG_ERROR("timed out while waiting for target %s",
3289 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3290 return ERROR_FAIL;
3291 }
3292 }
3293
3294 return ERROR_OK;
3295 }
3296
3297 COMMAND_HANDLER(handle_halt_command)
3298 {
3299 LOG_DEBUG("-");
3300
3301 struct target *target = get_current_target(CMD_CTX);
3302
3303 target->verbose_halt_msg = true;
3304
3305 int retval = target_halt(target);
3306 if (retval != ERROR_OK)
3307 return retval;
3308
3309 if (CMD_ARGC == 1) {
3310 unsigned wait_local;
3311 retval = parse_uint(CMD_ARGV[0], &wait_local);
3312 if (retval != ERROR_OK)
3313 return ERROR_COMMAND_SYNTAX_ERROR;
3314 if (!wait_local)
3315 return ERROR_OK;
3316 }
3317
3318 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3319 }
3320
3321 COMMAND_HANDLER(handle_soft_reset_halt_command)
3322 {
3323 struct target *target = get_current_target(CMD_CTX);
3324
3325 LOG_TARGET_INFO(target, "requesting target halt and executing a soft reset");
3326
3327 target_soft_reset_halt(target);
3328
3329 return ERROR_OK;
3330 }
3331
3332 COMMAND_HANDLER(handle_reset_command)
3333 {
3334 if (CMD_ARGC > 1)
3335 return ERROR_COMMAND_SYNTAX_ERROR;
3336
3337 enum target_reset_mode reset_mode = RESET_RUN;
3338 if (CMD_ARGC == 1) {
3339 const struct jim_nvp *n;
3340 n = jim_nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3341 if ((!n->name) || (n->value == RESET_UNKNOWN))
3342 return ERROR_COMMAND_SYNTAX_ERROR;
3343 reset_mode = n->value;
3344 }
3345
3346 /* reset *all* targets */
3347 return target_process_reset(CMD, reset_mode);
3348 }
3349
3350
3351 COMMAND_HANDLER(handle_resume_command)
3352 {
3353 int current = 1;
3354 if (CMD_ARGC > 1)
3355 return ERROR_COMMAND_SYNTAX_ERROR;
3356
3357 struct target *target = get_current_target(CMD_CTX);
3358
3359 /* with no CMD_ARGV, resume from current pc, addr = 0,
3360 * with one arguments, addr = CMD_ARGV[0],
3361 * handle breakpoints, not debugging */
3362 target_addr_t addr = 0;
3363 if (CMD_ARGC == 1) {
3364 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3365 current = 0;
3366 }
3367
3368 return target_resume(target, current, addr, 1, 0);
3369 }
3370
3371 COMMAND_HANDLER(handle_step_command)
3372 {
3373 if (CMD_ARGC > 1)
3374 return ERROR_COMMAND_SYNTAX_ERROR;
3375
3376 LOG_DEBUG("-");
3377
3378 /* with no CMD_ARGV, step from current pc, addr = 0,
3379 * with one argument addr = CMD_ARGV[0],
3380 * handle breakpoints, debugging */
3381 target_addr_t addr = 0;
3382 int current_pc = 1;
3383 if (CMD_ARGC == 1) {
3384 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3385 current_pc = 0;
3386 }
3387
3388 struct target *target = get_current_target(CMD_CTX);
3389
3390 return target_step(target, current_pc, addr, 1);
3391 }
3392
3393 void target_handle_md_output(struct command_invocation *cmd,
3394 struct target *target, target_addr_t address, unsigned size,
3395 unsigned count, const uint8_t *buffer)
3396 {
3397 const unsigned line_bytecnt = 32;
3398 unsigned line_modulo = line_bytecnt / size;
3399
3400 char output[line_bytecnt * 4 + 1];
3401 unsigned output_len = 0;
3402
3403 const char *value_fmt;
3404 switch (size) {
3405 case 8:
3406 value_fmt = "%16.16"PRIx64" ";
3407 break;
3408 case 4:
3409 value_fmt = "%8.8"PRIx64" ";
3410 break;
3411 case 2:
3412 value_fmt = "%4.4"PRIx64" ";
3413 break;
3414 case 1:
3415 value_fmt = "%2.2"PRIx64" ";
3416 break;
3417 default:
3418 /* "can't happen", caller checked */
3419 LOG_ERROR("invalid memory read size: %u", size);
3420 return;
3421 }
3422
3423 for (unsigned i = 0; i < count; i++) {
3424 if (i % line_modulo == 0) {
3425 output_len += snprintf(output + output_len,
3426 sizeof(output) - output_len,
3427 TARGET_ADDR_FMT ": ",
3428 (address + (i * size)));
3429 }
3430
3431 uint64_t value = 0;
3432 const uint8_t *value_ptr = buffer + i * size;
3433 switch (size) {
3434 case 8:
3435 value = target_buffer_get_u64(target, value_ptr);
3436 break;
3437 case 4:
3438 value = target_buffer_get_u32(target, value_ptr);
3439 break;
3440 case 2:
3441 value = target_buffer_get_u16(target, value_ptr);
3442 break;
3443 case 1:
3444 value = *value_ptr;
3445 }
3446 output_len += snprintf(output + output_len,
3447 sizeof(output) - output_len,
3448 value_fmt, value);
3449
3450 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3451 command_print(cmd, "%s", output);
3452 output_len = 0;
3453 }
3454 }
3455 }
3456
3457 COMMAND_HANDLER(handle_md_command)
3458 {
3459 if (CMD_ARGC < 1)
3460 return ERROR_COMMAND_SYNTAX_ERROR;
3461
3462 unsigned size = 0;
3463 switch (CMD_NAME[2]) {
3464 case 'd':
3465 size = 8;
3466 break;
3467 case 'w':
3468 size = 4;
3469 break;
3470 case 'h':
3471 size = 2;
3472 break;
3473 case 'b':
3474 size = 1;
3475 break;
3476 default:
3477 return ERROR_COMMAND_SYNTAX_ERROR;
3478 }
3479
3480 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3481 int (*fn)(struct target *target,
3482 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3483 if (physical) {
3484 CMD_ARGC--;
3485 CMD_ARGV++;
3486 fn = target_read_phys_memory;
3487 } else
3488 fn = target_read_memory;
3489 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3490 return ERROR_COMMAND_SYNTAX_ERROR;
3491
3492 target_addr_t address;
3493 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3494
3495 unsigned count = 1;
3496 if (CMD_ARGC == 2)
3497 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3498
3499 uint8_t *buffer = calloc(count, size);
3500 if (!buffer) {
3501 LOG_ERROR("Failed to allocate md read buffer");
3502 return ERROR_FAIL;
3503 }
3504
3505 struct target *target = get_current_target(CMD_CTX);
3506 int retval = fn(target, address, size, count, buffer);
3507 if (retval == ERROR_OK)
3508 target_handle_md_output(CMD, target, address, size, count, buffer);
3509
3510 free(buffer);
3511
3512 return retval;
3513 }
3514
3515 typedef int (*target_write_fn)(struct target *target,
3516 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3517
3518 static int target_fill_mem(struct target *target,
3519 target_addr_t address,
3520 target_write_fn fn,
3521 unsigned data_size,
3522 /* value */
3523 uint64_t b,
3524 /* count */
3525 unsigned c)
3526 {
3527 /* We have to write in reasonably large chunks to be able
3528 * to fill large memory areas with any sane speed */
3529 const unsigned chunk_size = 16384;
3530 uint8_t *target_buf = malloc(chunk_size * data_size);
3531 if (!target_buf) {
3532 LOG_ERROR("Out of memory");
3533 return ERROR_FAIL;
3534 }
3535
3536 for (unsigned i = 0; i < chunk_size; i++) {
3537 switch (data_size) {
3538 case 8:
3539 target_buffer_set_u64(target, target_buf + i * data_size, b);
3540 break;
3541 case 4:
3542 target_buffer_set_u32(target, target_buf + i * data_size, b);
3543 break;
3544 case 2:
3545 target_buffer_set_u16(target, target_buf + i * data_size, b);
3546 break;
3547 case 1:
3548 target_buffer_set_u8(target, target_buf + i * data_size, b);
3549 break;
3550 default:
3551 exit(-1);
3552 }
3553 }
3554
3555 int retval = ERROR_OK;
3556
3557 for (unsigned x = 0; x < c; x += chunk_size) {
3558 unsigned current;
3559 current = c - x;
3560 if (current > chunk_size)
3561 current = chunk_size;
3562 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3563 if (retval != ERROR_OK)
3564 break;
3565 /* avoid GDB timeouts */
3566 keep_alive();
3567 }
3568 free(target_buf);
3569
3570 return retval;
3571 }
3572
3573
3574 COMMAND_HANDLER(handle_mw_command)
3575 {
3576 if (CMD_ARGC < 2)
3577 return ERROR_COMMAND_SYNTAX_ERROR;
3578 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3579 target_write_fn fn;
3580 if (physical) {
3581 CMD_ARGC--;
3582 CMD_ARGV++;
3583 fn = target_write_phys_memory;
3584 } else
3585 fn = target_write_memory;
3586 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3587 return ERROR_COMMAND_SYNTAX_ERROR;
3588
3589 target_addr_t address;
3590 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3591
3592 uint64_t value;
3593 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3594
3595 unsigned count = 1;
3596 if (CMD_ARGC == 3)
3597 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3598
3599 struct target *target = get_current_target(CMD_CTX);
3600 unsigned wordsize;
3601 switch (CMD_NAME[2]) {
3602 case 'd':
3603 wordsize = 8;
3604 break;
3605 case 'w':
3606 wordsize = 4;
3607 break;
3608 case 'h':
3609 wordsize = 2;
3610 break;
3611 case 'b':
3612 wordsize = 1;
3613 break;
3614 default:
3615 return ERROR_COMMAND_SYNTAX_ERROR;
3616 }
3617
3618 return target_fill_mem(target, address, fn, wordsize, value, count);
3619 }
3620
3621 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3622 target_addr_t *min_address, target_addr_t *max_address)
3623 {
3624 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3625 return ERROR_COMMAND_SYNTAX_ERROR;
3626
3627 /* a base address isn't always necessary,
3628 * default to 0x0 (i.e. don't relocate) */
3629 if (CMD_ARGC >= 2) {
3630 target_addr_t addr;
3631 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3632 image->base_address = addr;
3633 image->base_address_set = true;
3634 } else
3635 image->base_address_set = false;
3636
3637 image->start_address_set = false;
3638
3639 if (CMD_ARGC >= 4)
3640 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3641 if (CMD_ARGC == 5) {
3642 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3643 /* use size (given) to find max (required) */
3644 *max_address += *min_address;
3645 }
3646
3647 if (*min_address > *max_address)
3648 return ERROR_COMMAND_SYNTAX_ERROR;
3649
3650 return ERROR_OK;
3651 }
3652
3653 COMMAND_HANDLER(handle_load_image_command)
3654 {
3655 uint8_t *buffer;
3656 size_t buf_cnt;
3657 uint32_t image_size;
3658 target_addr_t min_address = 0;
3659 target_addr_t max_address = -1;
3660 struct image image;
3661
3662 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3663 &image, &min_address, &max_address);
3664 if (retval != ERROR_OK)
3665 return retval;
3666
3667 struct target *target = get_current_target(CMD_CTX);
3668
3669 struct duration bench;
3670 duration_start(&bench);
3671
3672 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3673 return ERROR_FAIL;
3674
3675 image_size = 0x0;
3676 retval = ERROR_OK;
3677 for (unsigned int i = 0; i < image.num_sections; i++) {
3678 buffer = malloc(image.sections[i].size);
3679 if (!buffer) {
3680 command_print(CMD,
3681 "error allocating buffer for section (%d bytes)",
3682 (int)(image.sections[i].size));
3683 retval = ERROR_FAIL;
3684 break;
3685 }
3686
3687 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3688 if (retval != ERROR_OK) {
3689 free(buffer);
3690 break;
3691 }
3692
3693 uint32_t offset = 0;
3694 uint32_t length = buf_cnt;
3695
3696 /* DANGER!!! beware of unsigned comparison here!!! */
3697
3698 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3699 (image.sections[i].base_address < max_address)) {
3700
3701 if (image.sections[i].base_address < min_address) {
3702 /* clip addresses below */
3703 offset += min_address-image.sections[i].base_address;
3704 length -= offset;
3705 }
3706
3707 if (image.sections[i].base_address + buf_cnt > max_address)
3708 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3709
3710 retval = target_write_buffer(target,
3711 image.sections[i].base_address + offset, length, buffer + offset);
3712 if (retval != ERROR_OK) {
3713 free(buffer);
3714 break;
3715 }
3716 image_size += length;
3717 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3718 (unsigned int)length,
3719 image.sections[i].base_address + offset);
3720 }
3721
3722 free(buffer);
3723 }
3724
3725 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3726 command_print(CMD, "downloaded %" PRIu32 " bytes "
3727 "in %fs (%0.3f KiB/s)", image_size,
3728 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3729 }
3730
3731 image_close(&image);
3732
3733 return retval;
3734
3735 }
3736
3737 COMMAND_HANDLER(handle_dump_image_command)
3738 {
3739 struct fileio *fileio;
3740 uint8_t *buffer;
3741 int retval, retvaltemp;
3742 target_addr_t address, size;
3743 struct duration bench;
3744 struct target *target = get_current_target(CMD_CTX);
3745
3746 if (CMD_ARGC != 3)
3747 return ERROR_COMMAND_SYNTAX_ERROR;
3748
3749 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3750 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3751
3752 uint32_t buf_size = (size > 4096) ? 4096 : size;
3753 buffer = malloc(buf_size);
3754 if (!buffer)
3755 return ERROR_FAIL;
3756
3757 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3758 if (retval != ERROR_OK) {
3759 free(buffer);
3760 return retval;
3761 }
3762
3763 duration_start(&bench);
3764
3765 while (size > 0) {
3766 size_t size_written;
3767 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3768 retval = target_read_buffer(target, address, this_run_size, buffer);
3769 if (retval != ERROR_OK)
3770 break;
3771
3772 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3773 if (retval != ERROR_OK)
3774 break;
3775
3776 size -= this_run_size;
3777 address += this_run_size;
3778 }
3779
3780 free(buffer);
3781
3782 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3783 size_t filesize;
3784 retval = fileio_size(fileio, &filesize);
3785 if (retval != ERROR_OK)
3786 return retval;
3787 command_print(CMD,
3788 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3789 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3790 }
3791
3792 retvaltemp = fileio_close(fileio);
3793 if (retvaltemp != ERROR_OK)
3794 return retvaltemp;
3795
3796 return retval;
3797 }
3798
3799 enum verify_mode {
3800 IMAGE_TEST = 0,
3801 IMAGE_VERIFY = 1,
3802 IMAGE_CHECKSUM_ONLY = 2
3803 };
3804
3805 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3806 {
3807 uint8_t *buffer;
3808 size_t buf_cnt;
3809 uint32_t image_size;
3810 int retval;
3811 uint32_t checksum = 0;
3812 uint32_t mem_checksum = 0;
3813
3814 struct image image;
3815
3816 struct target *target = get_current_target(CMD_CTX);
3817
3818 if (CMD_ARGC < 1)
3819 return ERROR_COMMAND_SYNTAX_ERROR;
3820
3821 if (!target) {
3822 LOG_ERROR("no target selected");
3823 return ERROR_FAIL;
3824 }
3825
3826 struct duration bench;
3827 duration_start(&bench);
3828
3829 if (CMD_ARGC >= 2) {
3830 target_addr_t addr;
3831 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3832 image.base_address = addr;
3833 image.base_address_set = true;
3834 } else {
3835 image.base_address_set = false;
3836 image.base_address = 0x0;
3837 }
3838
3839 image.start_address_set = false;
3840
3841 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3842 if (retval != ERROR_OK)
3843 return retval;
3844
3845 image_size = 0x0;
3846 int diffs = 0;
3847 retval = ERROR_OK;
3848 for (unsigned int i = 0; i < image.num_sections; i++) {
3849 buffer = malloc(image.sections[i].size);
3850 if (!buffer) {
3851 command_print(CMD,
3852 "error allocating buffer for section (%" PRIu32 " bytes)",
3853 image.sections[i].size);
3854 break;
3855 }
3856 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3857 if (retval != ERROR_OK) {
3858 free(buffer);
3859 break;
3860 }
3861
3862 if (verify >= IMAGE_VERIFY) {
3863 /* calculate checksum of image */
3864 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3865 if (retval != ERROR_OK) {
3866 free(buffer);
3867 break;
3868 }
3869
3870 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3871 if (retval != ERROR_OK) {
3872 free(buffer);
3873 break;
3874 }
3875 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3876 LOG_ERROR("checksum mismatch");
3877 free(buffer);
3878 retval = ERROR_FAIL;
3879 goto done;
3880 }
3881 if (checksum != mem_checksum) {
3882 /* failed crc checksum, fall back to a binary compare */
3883 uint8_t *data;
3884
3885 if (diffs == 0)
3886 LOG_ERROR("checksum mismatch - attempting binary compare");
3887
3888 data = malloc(buf_cnt);
3889
3890 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3891 if (retval == ERROR_OK) {
3892 uint32_t t;
3893 for (t = 0; t < buf_cnt; t++) {
3894 if (data[t] != buffer[t]) {
3895 command_print(CMD,
3896 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3897 diffs,
3898 (unsigned)(t + image.sections[i].base_address),
3899 data[t],
3900 buffer[t]);
3901 if (diffs++ >= 127) {
3902 command_print(CMD, "More than 128 errors, the rest are not printed.");
3903 free(data);
3904 free(buffer);
3905 goto done;
3906 }
3907 }
3908 keep_alive();
3909 }
3910 }
3911 free(data);
3912 }
3913 } else {
3914 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3915 image.sections[i].base_address,
3916 buf_cnt);
3917 }
3918
3919 free(buffer);
3920 image_size += buf_cnt;
3921 }
3922 if (diffs > 0)
3923 command_print(CMD, "No more differences found.");
3924 done:
3925 if (diffs > 0)
3926 retval = ERROR_FAIL;
3927 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3928 command_print(CMD, "verified %" PRIu32 " bytes "
3929 "in %fs (%0.3f KiB/s)", image_size,
3930 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3931 }
3932
3933 image_close(&image);
3934
3935 return retval;
3936 }
3937
3938 COMMAND_HANDLER(handle_verify_image_checksum_command)
3939 {
3940 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3941 }
3942
3943 COMMAND_HANDLER(handle_verify_image_command)
3944 {
3945 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3946 }
3947
3948 COMMAND_HANDLER(handle_test_image_command)
3949 {
3950 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3951 }
3952
3953 static int handle_bp_command_list(struct command_invocation *cmd)
3954 {
3955 struct target *target = get_current_target(cmd->ctx);
3956 struct breakpoint *breakpoint = target->breakpoints;
3957 while (breakpoint) {
3958 if (breakpoint->type == BKPT_SOFT) {
3959 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3960 breakpoint->length);
3961 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, 0x%s",
3962 breakpoint->address,
3963 breakpoint->length,
3964 buf);
3965 free(buf);
3966 } else {
3967 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3968 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %u",
3969 breakpoint->asid,
3970 breakpoint->length, breakpoint->number);
3971 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3972 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %u",
3973 breakpoint->address,
3974 breakpoint->length, breakpoint->number);
3975 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3976 breakpoint->asid);
3977 } else
3978 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %u",
3979 breakpoint->address,
3980 breakpoint->length, breakpoint->number);
3981 }
3982
3983 breakpoint = breakpoint->next;
3984 }
3985 return ERROR_OK;
3986 }
3987
3988 static int handle_bp_command_set(struct command_invocation *cmd,
3989 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3990 {
3991 struct target *target = get_current_target(cmd->ctx);
3992 int retval;
3993
3994 if (asid == 0) {
3995 retval = breakpoint_add(target, addr, length, hw);
3996 /* error is always logged in breakpoint_add(), do not print it again */
3997 if (retval == ERROR_OK)
3998 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
3999
4000 } else if (addr == 0) {
4001 if (!target->type->add_context_breakpoint) {
4002 LOG_ERROR("Context breakpoint not available");
4003 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4004 }
4005 retval = context_breakpoint_add(target, asid, length, hw);
4006 /* error is always logged in context_breakpoint_add(), do not print it again */
4007 if (retval == ERROR_OK)
4008 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
4009
4010 } else {
4011 if (!target->type->add_hybrid_breakpoint) {
4012 LOG_ERROR("Hybrid breakpoint not available");
4013 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4014 }
4015 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
4016 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
4017 if (retval == ERROR_OK)
4018 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
4019 }
4020 return retval;
4021 }
4022
4023 COMMAND_HANDLER(handle_bp_command)
4024 {
4025 target_addr_t addr;
4026 uint32_t asid;
4027 uint32_t length;
4028 int hw = BKPT_SOFT;
4029
4030 switch (CMD_ARGC) {
4031 case 0:
4032 return handle_bp_command_list(CMD);
4033
4034 case 2:
4035 asid = 0;
4036 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4037 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4038 return handle_bp_command_set(CMD, addr, asid, length, hw);
4039
4040 case 3:
4041 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4042 hw = BKPT_HARD;
4043 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4044 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4045 asid = 0;
4046 return handle_bp_command_set(CMD, addr, asid, length, hw);
4047 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4048 hw = BKPT_HARD;
4049 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4050 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4051 addr = 0;
4052 return handle_bp_command_set(CMD, addr, asid, length, hw);
4053 }
4054 /* fallthrough */
4055 case 4:
4056 hw = BKPT_HARD;
4057 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4058 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4059 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4060 return handle_bp_command_set(CMD, addr, asid, length, hw);
4061
4062 default:
4063 return ERROR_COMMAND_SYNTAX_ERROR;
4064 }
4065 }
4066
4067 COMMAND_HANDLER(handle_rbp_command)
4068 {
4069 if (CMD_ARGC != 1)
4070 return ERROR_COMMAND_SYNTAX_ERROR;
4071
4072 struct target *target = get_current_target(CMD_CTX);
4073
4074 if (!strcmp(CMD_ARGV[0], "all")) {
4075 breakpoint_remove_all(target);
4076 } else {
4077 target_addr_t addr;
4078 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4079
4080 breakpoint_remove(target, addr);
4081 }
4082
4083 return ERROR_OK;
4084 }
4085
4086 COMMAND_HANDLER(handle_wp_command)
4087 {
4088 struct target *target = get_current_target(CMD_CTX);
4089
4090 if (CMD_ARGC == 0) {
4091 struct watchpoint *watchpoint = target->watchpoints;
4092
4093 while (watchpoint) {
4094 command_print(CMD, "address: " TARGET_ADDR_FMT
4095 ", len: 0x%8.8" PRIx32
4096 ", r/w/a: %i, value: 0x%8.8" PRIx32
4097 ", mask: 0x%8.8" PRIx32,
4098 watchpoint->address,
4099 watchpoint->length,
4100 (int)watchpoint->rw,
4101 watchpoint->value,
4102 watchpoint->mask);
4103 watchpoint = watchpoint->next;
4104 }
4105 return ERROR_OK;
4106 }
4107
4108 enum watchpoint_rw type = WPT_ACCESS;
4109 target_addr_t addr = 0;
4110 uint32_t length = 0;
4111 uint32_t data_value = 0x0;
4112 uint32_t data_mask = 0xffffffff;
4113
4114 switch (CMD_ARGC) {
4115 case 5:
4116 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
4117 /* fall through */
4118 case 4:
4119 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
4120 /* fall through */
4121 case 3:
4122 switch (CMD_ARGV[2][0]) {
4123 case 'r':
4124 type = WPT_READ;
4125 break;
4126 case 'w':
4127 type = WPT_WRITE;
4128 break;
4129 case 'a':
4130 type = WPT_ACCESS;
4131 break;
4132 default:
4133 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4134 return ERROR_COMMAND_SYNTAX_ERROR;
4135 }
4136 /* fall through */
4137 case 2:
4138 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4139 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4140 break;
4141
4142 default:
4143 return ERROR_COMMAND_SYNTAX_ERROR;
4144 }
4145
4146 int retval = watchpoint_add(target, addr, length, type,
4147 data_value, data_mask);
4148 if (retval != ERROR_OK)
4149 LOG_ERROR("Failure setting watchpoints");
4150
4151 return retval;
4152 }
4153
4154 COMMAND_HANDLER(handle_rwp_command)
4155 {
4156 if (CMD_ARGC != 1)
4157 return ERROR_COMMAND_SYNTAX_ERROR;
4158
4159 target_addr_t addr;
4160 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4161
4162 struct target *target = get_current_target(CMD_CTX);
4163 watchpoint_remove(target, addr);
4164
4165 return ERROR_OK;
4166 }
4167
4168 /**
4169 * Translate a virtual address to a physical address.
4170 *
4171 * The low-level target implementation must have logged a detailed error
4172 * which is forwarded to telnet/GDB session.
4173 */
4174 COMMAND_HANDLER(handle_virt2phys_command)
4175 {
4176 if (CMD_ARGC != 1)
4177 return ERROR_COMMAND_SYNTAX_ERROR;
4178
4179 target_addr_t va;
4180 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4181 target_addr_t pa;
4182
4183 struct target *target = get_current_target(CMD_CTX);
4184 int retval = target->type->virt2phys(target, va, &pa);
4185 if (retval == ERROR_OK)
4186 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4187
4188 return retval;
4189 }
4190
4191 static void write_data(FILE *f, const void *data, size_t len)
4192 {
4193 size_t written = fwrite(data, 1, len, f);
4194 if (written != len)
4195 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4196 }
4197
4198 static void write_long(FILE *f, int l, struct target *target)
4199 {
4200 uint8_t val[4];
4201
4202 target_buffer_set_u32(target, val, l);
4203 write_data(f, val, 4);
4204 }
4205
4206 static void write_string(FILE *f, char *s)
4207 {
4208 write_data(f, s, strlen(s));
4209 }
4210
4211 typedef unsigned char UNIT[2]; /* unit of profiling */
4212
4213 /* Dump a gmon.out histogram file. */
4214 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4215 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4216 {
4217 uint32_t i;
4218 FILE *f = fopen(filename, "w");
4219 if (!f)
4220 return;
4221 write_string(f, "gmon");
4222 write_long(f, 0x00000001, target); /* Version */
4223 write_long(f, 0, target); /* padding */
4224 write_long(f, 0, target); /* padding */
4225 write_long(f, 0, target); /* padding */
4226
4227 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4228 write_data(f, &zero, 1);
4229
4230 /* figure out bucket size */
4231 uint32_t min;
4232 uint32_t max;
4233 if (with_range) {
4234 min = start_address;
4235 max = end_address;
4236 } else {
4237 min = samples[0];
4238 max = samples[0];
4239 for (i = 0; i < sample_num; i++) {
4240 if (min > samples[i])
4241 min = samples[i];
4242 if (max < samples[i])
4243 max = samples[i];
4244 }
4245
4246 /* max should be (largest sample + 1)
4247 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4248 if (max < UINT32_MAX)
4249 max++;
4250
4251 /* gprof requires (max - min) >= 2 */
4252 while ((max - min) < 2) {
4253 if (max < UINT32_MAX)
4254 max++;
4255 else
4256 min--;
4257 }
4258 }
4259
4260 uint32_t address_space = max - min;
4261
4262 /* FIXME: What is the reasonable number of buckets?
4263 * The profiling result will be more accurate if there are enough buckets. */
4264 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4265 uint32_t num_buckets = address_space / sizeof(UNIT);
4266 if (num_buckets > max_buckets)
4267 num_buckets = max_buckets;
4268 int *buckets = malloc(sizeof(int) * num_buckets);
4269 if (!buckets) {
4270 fclose(f);
4271 return;
4272 }
4273 memset(buckets, 0, sizeof(int) * num_buckets);
4274 for (i = 0; i < sample_num; i++) {
4275 uint32_t address = samples[i];
4276
4277 if ((address < min) || (max <= address))
4278 continue;
4279
4280 long long a = address - min;
4281 long long b = num_buckets;
4282 long long c = address_space;
4283 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4284 buckets[index_t]++;
4285 }
4286
4287 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4288 write_long(f, min, target); /* low_pc */
4289 write_long(f, max, target); /* high_pc */
4290 write_long(f, num_buckets, target); /* # of buckets */
4291 float sample_rate = sample_num / (duration_ms / 1000.0);
4292 write_long(f, sample_rate, target);
4293 write_string(f, "seconds");
4294 for (i = 0; i < (15-strlen("seconds")); i++)
4295 write_data(f, &zero, 1);
4296 write_string(f, "s");
4297
4298 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4299
4300 char *data = malloc(2 * num_buckets);
4301 if (data) {
4302 for (i = 0; i < num_buckets; i++) {
4303 int val;
4304 val = buckets[i];
4305 if (val > 65535)
4306 val = 65535;
4307 data[i * 2] = val&0xff;
4308 data[i * 2 + 1] = (val >> 8) & 0xff;
4309 }
4310 free(buckets);
4311 write_data(f, data, num_buckets * 2);
4312 free(data);
4313 } else
4314 free(buckets);
4315
4316 fclose(f);
4317 }
4318
4319 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4320 * which will be used as a random sampling of PC */
4321 COMMAND_HANDLER(handle_profile_command)
4322 {
4323 struct target *target = get_current_target(CMD_CTX);
4324
4325 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4326 return ERROR_COMMAND_SYNTAX_ERROR;
4327
4328 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4329 uint32_t offset;
4330 uint32_t num_of_samples;
4331 int retval = ERROR_OK;
4332 bool halted_before_profiling = target->state == TARGET_HALTED;
4333
4334 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4335
4336 uint32_t start_address = 0;
4337 uint32_t end_address = 0;
4338 bool with_range = false;
4339 if (CMD_ARGC == 4) {
4340 with_range = true;
4341 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4342 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4343 if (start_address > end_address || (end_address - start_address) < 2) {
4344 command_print(CMD, "Error: end - start < 2");
4345 return ERROR_COMMAND_ARGUMENT_INVALID;
4346 }
4347 }
4348
4349 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4350 if (!samples) {
4351 LOG_ERROR("No memory to store samples.");
4352 return ERROR_FAIL;
4353 }
4354
4355 uint64_t timestart_ms = timeval_ms();
4356 /**
4357 * Some cores let us sample the PC without the
4358 * annoying halt/resume step; for example, ARMv7 PCSR.
4359 * Provide a way to use that more efficient mechanism.
4360 */
4361 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4362 &num_of_samples, offset);
4363 if (retval != ERROR_OK) {
4364 free(samples);
4365 return retval;
4366 }
4367 uint32_t duration_ms = timeval_ms() - timestart_ms;
4368
4369 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4370
4371 retval = target_poll(target);
4372 if (retval != ERROR_OK) {
4373 free(samples);
4374 return retval;
4375 }
4376
4377 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4378 /* The target was halted before we started and is running now. Halt it,
4379 * for consistency. */
4380 retval = target_halt(target);
4381 if (retval != ERROR_OK) {
4382 free(samples);
4383 return retval;
4384 }
4385 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4386 /* The target was running before we started and is halted now. Resume
4387 * it, for consistency. */
4388 retval = target_resume(target, 1, 0, 0, 0);
4389 if (retval != ERROR_OK) {
4390 free(samples);
4391 return retval;
4392 }
4393 }
4394
4395 retval = target_poll(target);
4396 if (retval != ERROR_OK) {
4397 free(samples);
4398 return retval;
4399 }
4400
4401 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4402 with_range, start_address, end_address, target, duration_ms);
4403 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4404
4405 free(samples);
4406 return retval;
4407 }
4408
4409 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4410 {
4411 char *namebuf;
4412 Jim_Obj *obj_name, *obj_val;
4413 int result;
4414
4415 namebuf = alloc_printf("%s(%d)", varname, idx);
4416 if (!namebuf)
4417 return JIM_ERR;
4418
4419 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4420 jim_wide wide_val = val;
4421 obj_val = Jim_NewWideObj(interp, wide_val);
4422 if (!obj_name || !obj_val) {
4423 free(namebuf);
4424 return JIM_ERR;
4425 }
4426
4427 Jim_IncrRefCount(obj_name);
4428 Jim_IncrRefCount(obj_val);
4429 result = Jim_SetVariable(interp, obj_name, obj_val);
4430 Jim_DecrRefCount(interp, obj_name);
4431 Jim_DecrRefCount(interp, obj_val);
4432 free(namebuf);
4433 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4434 return result;
4435 }
4436
4437 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4438 {
4439 int e;
4440
4441 LOG_WARNING("DEPRECATED! use 'read_memory' not 'mem2array'");
4442
4443 /* argv[0] = name of array to receive the data
4444 * argv[1] = desired element width in bits
4445 * argv[2] = memory address
4446 * argv[3] = count of times to read
4447 * argv[4] = optional "phys"
4448 */
4449 if (argc < 4 || argc > 5) {
4450 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4451 return JIM_ERR;
4452 }
4453
4454 /* Arg 0: Name of the array variable */
4455 const char *varname = Jim_GetString(argv[0], NULL);
4456
4457 /* Arg 1: Bit width of one element */
4458 long l;
4459 e = Jim_GetLong(interp, argv[1], &l);
4460 if (e != JIM_OK)
4461 return e;
4462 const unsigned int width_bits = l;
4463
4464 if (width_bits != 8 &&
4465 width_bits != 16 &&
4466 width_bits != 32 &&
4467 width_bits != 64) {
4468 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4469 Jim_AppendStrings(interp, Jim_GetResult(interp),
4470 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4471 return JIM_ERR;
4472 }
4473 const unsigned int width = width_bits / 8;
4474
4475 /* Arg 2: Memory address */
4476 jim_wide wide_addr;
4477 e = Jim_GetWide(interp, argv[2], &wide_addr);
4478 if (e != JIM_OK)
4479 return e;
4480 target_addr_t addr = (target_addr_t)wide_addr;
4481
4482 /* Arg 3: Number of elements to read */
4483 e = Jim_GetLong(interp, argv[3], &l);
4484 if (e != JIM_OK)
4485 return e;
4486 size_t len = l;
4487
4488 /* Arg 4: phys */
4489 bool is_phys = false;
4490 if (argc > 4) {
4491 int str_len = 0;
4492 const char *phys = Jim_GetString(argv[4], &str_len);
4493 if (!strncmp(phys, "phys", str_len))
4494 is_phys = true;
4495 else
4496 return JIM_ERR;
4497 }
4498
4499 /* Argument checks */
4500 if (len == 0) {
4501 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4502 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4503 return JIM_ERR;
4504 }
4505 if ((addr + (len * width)) < addr) {
4506 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4507 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4508 return JIM_ERR;
4509 }
4510 if (len > 65536) {
4511 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4512 Jim_AppendStrings(interp, Jim_GetResult(interp),
4513 "mem2array: too large read request, exceeds 64K items", NULL);
4514 return JIM_ERR;
4515 }
4516
4517 if ((width == 1) ||
4518 ((width == 2) && ((addr & 1) == 0)) ||
4519 ((width == 4) && ((addr & 3) == 0)) ||
4520 ((width == 8) && ((addr & 7) == 0))) {
4521 /* alignment correct */
4522 } else {
4523 char buf[100];
4524 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4525 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4526 addr,
4527 width);
4528 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4529 return JIM_ERR;
4530 }
4531
4532 /* Transfer loop */
4533
4534 /* index counter */
4535 size_t idx = 0;
4536
4537 const size_t buffersize = 4096;
4538 uint8_t *buffer = malloc(buffersize);
4539 if (!buffer)
4540 return JIM_ERR;
4541
4542 /* assume ok */
4543 e = JIM_OK;
4544 while (len) {
4545 /* Slurp... in buffer size chunks */
4546 const unsigned int max_chunk_len = buffersize / width;
4547 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4548
4549 int retval;
4550 if (is_phys)
4551 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4552 else
4553 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4554 if (retval != ERROR_OK) {
4555 /* BOO !*/
4556 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4557 addr,
4558 width,
4559 chunk_len);
4560 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4561 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4562 e = JIM_ERR;
4563 break;
4564 } else {
4565 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4566 uint64_t v = 0;
4567 switch (width) {
4568 case 8:
4569 v = target_buffer_get_u64(target, &buffer[i*width]);
4570 break;
4571 case 4:
4572 v = target_buffer_get_u32(target, &buffer[i*width]);
4573 break;
4574 case 2:
4575 v = target_buffer_get_u16(target, &buffer[i*width]);
4576 break;
4577 case 1:
4578 v = buffer[i] & 0x0ff;
4579 break;
4580 }
4581 new_u64_array_element(interp, varname, idx, v);
4582 }
4583 len -= chunk_len;
4584 addr += chunk_len * width;
4585 }
4586 }
4587
4588 free(buffer);
4589
4590 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4591
4592 return e;
4593 }
4594
4595 static int target_jim_read_memory(Jim_Interp *interp, int argc,
4596 Jim_Obj * const *argv)
4597 {
4598 /*
4599 * argv[1] = memory address
4600 * argv[2] = desired element width in bits
4601 * argv[3] = number of elements to read
4602 * argv[4] = optional "phys"
4603 */
4604
4605 if (argc < 4 || argc > 5) {
4606 Jim_WrongNumArgs(interp, 1, argv, "address width count ['phys']");
4607 return JIM_ERR;
4608 }
4609
4610 /* Arg 1: Memory address. */
4611 jim_wide wide_addr;
4612 int e;
4613 e = Jim_GetWide(interp, argv[1], &wide_addr);
4614
4615 if (e != JIM_OK)
4616 return e;
4617
4618 target_addr_t addr = (target_addr_t)wide_addr;
4619
4620 /* Arg 2: Bit width of one element. */
4621 long l;
4622 e = Jim_GetLong(interp, argv[2], &l);
4623
4624 if (e != JIM_OK)
4625 return e;
4626
4627 const unsigned int width_bits = l;
4628
4629 /* Arg 3: Number of elements to read. */
4630 e = Jim_GetLong(interp, argv[3], &l);
4631
4632 if (e != JIM_OK)
4633 return e;
4634
4635 size_t count = l;
4636
4637 /* Arg 4: Optional 'phys'. */
4638 bool is_phys = false;
4639
4640 if (argc > 4) {
4641 const char *phys = Jim_GetString(argv[4], NULL);
4642
4643 if (strcmp(phys, "phys")) {
4644 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4645 return JIM_ERR;
4646 }
4647
4648 is_phys = true;
4649 }
4650
4651 switch (width_bits) {
4652 case 8:
4653 case 16:
4654 case 32:
4655 case 64:
4656 break;
4657 default:
4658 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
4659 return JIM_ERR;
4660 }
4661
4662 const unsigned int width = width_bits / 8;
4663
4664 if ((addr + (count * width)) < addr) {
4665 Jim_SetResultString(interp, "read_memory: addr + count wraps to zero", -1);
4666 return JIM_ERR;
4667 }
4668
4669 if (count > 65536) {
4670 Jim_SetResultString(interp, "read_memory: too large read request, exeeds 64K elements", -1);
4671 return JIM_ERR;
4672 }
4673
4674 struct command_context *cmd_ctx = current_command_context(interp);
4675 assert(cmd_ctx != NULL);
4676 struct target *target = get_current_target(cmd_ctx);
4677
4678 const size_t buffersize = 4096;
4679 uint8_t *buffer = malloc(buffersize);
4680
4681 if (!buffer) {
4682 LOG_ERROR("Failed to allocate memory");
4683 return JIM_ERR;
4684 }
4685
4686 Jim_Obj *result_list = Jim_NewListObj(interp, NULL, 0);
4687 Jim_IncrRefCount(result_list);
4688
4689 while (count > 0) {
4690 const unsigned int max_chunk_len = buffersize / width;
4691 const size_t chunk_len = MIN(count, max_chunk_len);
4692
4693 int retval;
4694
4695 if (is_phys)
4696 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4697 else
4698 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4699
4700 if (retval != ERROR_OK) {
4701 LOG_ERROR("read_memory: read at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
4702 addr, width_bits, chunk_len);
4703 Jim_SetResultString(interp, "read_memory: failed to read memory", -1);
4704 e = JIM_ERR;
4705 break;
4706 }
4707
4708 for (size_t i = 0; i < chunk_len ; i++) {
4709 uint64_t v = 0;
4710
4711 switch (width) {
4712 case 8:
4713 v = target_buffer_get_u64(target, &buffer[i * width]);
4714 break;
4715 case 4:
4716 v = target_buffer_get_u32(target, &buffer[i * width]);
4717 break;
4718 case 2:
4719 v = target_buffer_get_u16(target, &buffer[i * width]);
4720 break;
4721 case 1:
4722 v = buffer[i];
4723 break;
4724 }
4725
4726 char value_buf[19];
4727 snprintf(value_buf, sizeof(value_buf), "0x%" PRIx64, v);
4728
4729 Jim_ListAppendElement(interp, result_list,
4730 Jim_NewStringObj(interp, value_buf, -1));
4731 }
4732
4733 count -= chunk_len;
4734 addr += chunk_len * width;
4735 }
4736
4737 free(buffer);
4738
4739 if (e != JIM_OK) {
4740 Jim_DecrRefCount(interp, result_list);
4741 return e;
4742 }
4743
4744 Jim_SetResult(interp, result_list);
4745 Jim_DecrRefCount(interp, result_list);
4746
4747 return JIM_OK;
4748 }
4749
4750 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4751 {
4752 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4753 if (!namebuf)
4754 return JIM_ERR;
4755
4756 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4757 if (!obj_name) {
4758 free(namebuf);
4759 return JIM_ERR;
4760 }
4761
4762 Jim_IncrRefCount(obj_name);
4763 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4764 Jim_DecrRefCount(interp, obj_name);
4765 free(namebuf);
4766 if (!obj_val)
4767 return JIM_ERR;
4768
4769 jim_wide wide_val;
4770 int result = Jim_GetWide(interp, obj_val, &wide_val);
4771 *val = wide_val;
4772 return result;
4773 }
4774
4775 static int target_array2mem(Jim_Interp *interp, struct target *target,
4776 int argc, Jim_Obj *const *argv)
4777 {
4778 int e;
4779
4780 LOG_WARNING("DEPRECATED! use 'write_memory' not 'array2mem'");
4781
4782 /* argv[0] = name of array from which to read the data
4783 * argv[1] = desired element width in bits
4784 * argv[2] = memory address
4785 * argv[3] = number of elements to write
4786 * argv[4] = optional "phys"
4787 */
4788 if (argc < 4 || argc > 5) {
4789 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4790 return JIM_ERR;
4791 }
4792
4793 /* Arg 0: Name of the array variable */
4794 const char *varname = Jim_GetString(argv[0], NULL);
4795
4796 /* Arg 1: Bit width of one element */
4797 long l;
4798 e = Jim_GetLong(interp, argv[1], &l);
4799 if (e != JIM_OK)
4800 return e;
4801 const unsigned int width_bits = l;
4802
4803 if (width_bits != 8 &&
4804 width_bits != 16 &&
4805 width_bits != 32 &&
4806 width_bits != 64) {
4807 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4808 Jim_AppendStrings(interp, Jim_GetResult(interp),
4809 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4810 return JIM_ERR;
4811 }
4812 const unsigned int width = width_bits / 8;
4813
4814 /* Arg 2: Memory address */
4815 jim_wide wide_addr;
4816 e = Jim_GetWide(interp, argv[2], &wide_addr);
4817 if (e != JIM_OK)
4818 return e;
4819 target_addr_t addr = (target_addr_t)wide_addr;
4820
4821 /* Arg 3: Number of elements to write */
4822 e = Jim_GetLong(interp, argv[3], &l);
4823 if (e != JIM_OK)
4824 return e;
4825 size_t len = l;
4826
4827 /* Arg 4: Phys */
4828 bool is_phys = false;
4829 if (argc > 4) {
4830 int str_len = 0;
4831 const char *phys = Jim_GetString(argv[4], &str_len);
4832 if (!strncmp(phys, "phys", str_len))
4833 is_phys = true;
4834 else
4835 return JIM_ERR;
4836 }
4837
4838 /* Argument checks */
4839 if (len == 0) {
4840 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4841 Jim_AppendStrings(interp, Jim_GetResult(interp),
4842 "array2mem: zero width read?", NULL);
4843 return JIM_ERR;
4844 }
4845
4846 if ((addr + (len * width)) < addr) {
4847 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4848 Jim_AppendStrings(interp, Jim_GetResult(interp),
4849 "array2mem: addr + len - wraps to zero?", NULL);
4850 return JIM_ERR;
4851 }
4852
4853 if (len > 65536) {
4854 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4855 Jim_AppendStrings(interp, Jim_GetResult(interp),
4856 "array2mem: too large memory write request, exceeds 64K items", NULL);
4857 return JIM_ERR;
4858 }
4859
4860 if ((width == 1) ||
4861 ((width == 2) && ((addr & 1) == 0)) ||
4862 ((width == 4) && ((addr & 3) == 0)) ||
4863 ((width == 8) && ((addr & 7) == 0))) {
4864 /* alignment correct */
4865 } else {
4866 char buf[100];
4867 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4868 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4869 addr,
4870 width);
4871 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4872 return JIM_ERR;
4873 }
4874
4875 /* Transfer loop */
4876
4877 /* assume ok */
4878 e = JIM_OK;
4879
4880 const size_t buffersize = 4096;
4881 uint8_t *buffer = malloc(buffersize);
4882 if (!buffer)
4883 return JIM_ERR;
4884
4885 /* index counter */
4886 size_t idx = 0;
4887
4888 while (len) {
4889 /* Slurp... in buffer size chunks */
4890 const unsigned int max_chunk_len = buffersize / width;
4891
4892 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4893
4894 /* Fill the buffer */
4895 for (size_t i = 0; i < chunk_len; i++, idx++) {
4896 uint64_t v = 0;
4897 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4898 free(buffer);
4899 return JIM_ERR;
4900 }
4901 switch (width) {
4902 case 8:
4903 target_buffer_set_u64(target, &buffer[i * width], v);
4904 break;
4905 case 4:
4906 target_buffer_set_u32(target, &buffer[i * width], v);
4907 break;
4908 case 2:
4909 target_buffer_set_u16(target, &buffer[i * width], v);
4910 break;
4911 case 1:
4912 buffer[i] = v & 0x0ff;
4913 break;
4914 }
4915 }
4916 len -= chunk_len;
4917
4918 /* Write the buffer to memory */
4919 int retval;
4920 if (is_phys)
4921 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4922 else
4923 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4924 if (retval != ERROR_OK) {
4925 /* BOO !*/
4926 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4927 addr,
4928 width,
4929 chunk_len);
4930 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4931 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4932 e = JIM_ERR;
4933 break;
4934 }
4935 addr += chunk_len * width;
4936 }
4937
4938 free(buffer);
4939
4940 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4941
4942 return e;
4943 }
4944
4945 static int target_jim_write_memory(Jim_Interp *interp, int argc,
4946 Jim_Obj * const *argv)
4947 {
4948 /*
4949 * argv[1] = memory address
4950 * argv[2] = desired element width in bits
4951 * argv[3] = list of data to write
4952 * argv[4] = optional "phys"
4953 */
4954
4955 if (argc < 4 || argc > 5) {
4956 Jim_WrongNumArgs(interp, 1, argv, "address width data ['phys']");
4957 return JIM_ERR;
4958 }
4959
4960 /* Arg 1: Memory address. */
4961 int e;
4962 jim_wide wide_addr;
4963 e = Jim_GetWide(interp, argv[1], &wide_addr);
4964
4965 if (e != JIM_OK)
4966 return e;
4967
4968 target_addr_t addr = (target_addr_t)wide_addr;
4969
4970 /* Arg 2: Bit width of one element. */
4971 long l;
4972 e = Jim_GetLong(interp, argv[2], &l);
4973
4974 if (e != JIM_OK)
4975 return e;
4976
4977 const unsigned int width_bits = l;
4978 size_t count = Jim_ListLength(interp, argv[3]);
4979
4980 /* Arg 4: Optional 'phys'. */
4981 bool is_phys = false;
4982
4983 if (argc > 4) {
4984 const char *phys = Jim_GetString(argv[4], NULL);
4985
4986 if (strcmp(phys, "phys")) {
4987 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4988 return JIM_ERR;
4989 }
4990
4991 is_phys = true;
4992 }
4993
4994 switch (width_bits) {
4995 case 8:
4996 case 16:
4997 case 32:
4998 case 64:
4999 break;
5000 default:
5001 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
5002 return JIM_ERR;
5003 }
5004
5005 const unsigned int width = width_bits / 8;
5006
5007 if ((addr + (count * width)) < addr) {
5008 Jim_SetResultString(interp, "write_memory: addr + len wraps to zero", -1);
5009 return JIM_ERR;
5010 }
5011
5012 if (count > 65536) {
5013 Jim_SetResultString(interp, "write_memory: too large memory write request, exceeds 64K elements", -1);
5014 return JIM_ERR;
5015 }
5016
5017 struct command_context *cmd_ctx = current_command_context(interp);
5018 assert(cmd_ctx != NULL);
5019 struct target *target = get_current_target(cmd_ctx);
5020
5021 const size_t buffersize = 4096;
5022 uint8_t *buffer = malloc(buffersize);
5023
5024 if (!buffer) {
5025 LOG_ERROR("Failed to allocate memory");
5026 return JIM_ERR;
5027 }
5028
5029 size_t j = 0;
5030
5031 while (count > 0) {
5032 const unsigned int max_chunk_len = buffersize / width;
5033 const size_t chunk_len = MIN(count, max_chunk_len);
5034
5035 for (size_t i = 0; i < chunk_len; i++, j++) {
5036 Jim_Obj *tmp = Jim_ListGetIndex(interp, argv[3], j);
5037 jim_wide element_wide;
5038 Jim_GetWide(interp, tmp, &element_wide);
5039
5040 const uint64_t v = element_wide;
5041
5042 switch (width) {
5043 case 8:
5044 target_buffer_set_u64(target, &buffer[i * width], v);
5045 break;
5046 case 4:
5047 target_buffer_set_u32(target, &buffer[i * width], v);
5048 break;
5049 case 2:
5050 target_buffer_set_u16(target, &buffer[i * width], v);
5051 break;
5052 case 1:
5053 buffer[i] = v & 0x0ff;
5054 break;
5055 }
5056 }
5057
5058 count -= chunk_len;
5059
5060 int retval;
5061
5062 if (is_phys)
5063 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
5064 else
5065 retval = target_write_memory(target, addr, width, chunk_len, buffer);
5066
5067 if (retval != ERROR_OK) {
5068 LOG_ERROR("write_memory: write at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
5069 addr, width_bits, chunk_len);
5070 Jim_SetResultString(interp, "write_memory: failed to write memory", -1);
5071 e = JIM_ERR;
5072 break;
5073 }
5074
5075 addr += chunk_len * width;
5076 }
5077
5078 free(buffer);
5079
5080 return e;
5081 }
5082
5083 /* FIX? should we propagate errors here rather than printing them
5084 * and continuing?
5085 */
5086 void target_handle_event(struct target *target, enum target_event e)
5087 {
5088 struct target_event_action *teap;
5089 int retval;
5090
5091 for (teap = target->event_action; teap; teap = teap->next) {
5092 if (teap->event == e) {
5093 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
5094 target->target_number,
5095 target_name(target),
5096 target_type_name(target),
5097 e,
5098 target_event_name(e),
5099 Jim_GetString(teap->body, NULL));
5100
5101 /* Override current target by the target an event
5102 * is issued from (lot of scripts need it).
5103 * Return back to previous override as soon
5104 * as the handler processing is done */
5105 struct command_context *cmd_ctx = current_command_context(teap->interp);
5106 struct target *saved_target_override = cmd_ctx->current_target_override;
5107 cmd_ctx->current_target_override = target;
5108
5109 retval = Jim_EvalObj(teap->interp, teap->body);
5110
5111 cmd_ctx->current_target_override = saved_target_override;
5112
5113 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
5114 return;
5115
5116 if (retval == JIM_RETURN)
5117 retval = teap->interp->returnCode;
5118
5119 if (retval != JIM_OK) {
5120 Jim_MakeErrorMessage(teap->interp);
5121 LOG_USER("Error executing event %s on target %s:\n%s",
5122 target_event_name(e),
5123 target_name(target),
5124 Jim_GetString(Jim_GetResult(teap->interp), NULL));
5125 /* clean both error code and stacktrace before return */
5126 Jim_Eval(teap->interp, "error \"\" \"\"");
5127 }
5128 }
5129 }
5130 }
5131
5132 static int target_jim_get_reg(Jim_Interp *interp, int argc,
5133 Jim_Obj * const *argv)
5134 {
5135 bool force = false;
5136
5137 if (argc == 3) {
5138 const char *option = Jim_GetString(argv[1], NULL);
5139
5140 if (!strcmp(option, "-force")) {
5141 argc--;
5142 argv++;
5143 force = true;
5144 } else {
5145 Jim_SetResultFormatted(interp, "invalid option '%s'", option);
5146 return JIM_ERR;
5147 }
5148 }
5149
5150 if (argc != 2) {
5151 Jim_WrongNumArgs(interp, 1, argv, "[-force] list");
5152 return JIM_ERR;
5153 }
5154
5155 const int length = Jim_ListLength(interp, argv[1]);
5156
5157 Jim_Obj *result_dict = Jim_NewDictObj(interp, NULL, 0);
5158
5159 if (!result_dict)
5160 return JIM_ERR;
5161
5162 struct command_context *cmd_ctx = current_command_context(interp);
5163 assert(cmd_ctx != NULL);
5164 const struct target *target = get_current_target(cmd_ctx);
5165
5166 for (int i = 0; i < length; i++) {
5167 Jim_Obj *elem = Jim_ListGetIndex(interp, argv[1], i);
5168
5169 if (!elem)
5170 return JIM_ERR;
5171
5172 const char *reg_name = Jim_String(elem);
5173
5174 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5175 false);
5176
5177 if (!reg || !reg->exist) {
5178 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5179 return JIM_ERR;
5180 }
5181
5182 if (force) {
5183 int retval = reg->type->get(reg);
5184
5185 if (retval != ERROR_OK) {
5186 Jim_SetResultFormatted(interp, "failed to read register '%s'",
5187 reg_name);
5188 return JIM_ERR;
5189 }
5190 }
5191
5192 char *reg_value = buf_to_hex_str(reg->value, reg->size);
5193
5194 if (!reg_value) {
5195 LOG_ERROR("Failed to allocate memory");
5196 return JIM_ERR;
5197 }
5198
5199 char *tmp = alloc_printf("0x%s", reg_value);
5200
5201 free(reg_value);
5202
5203 if (!tmp) {
5204 LOG_ERROR("Failed to allocate memory");
5205 return JIM_ERR;
5206 }
5207
5208 Jim_DictAddElement(interp, result_dict, elem,
5209 Jim_NewStringObj(interp, tmp, -1));
5210
5211 free(tmp);
5212 }
5213
5214 Jim_SetResult(interp, result_dict);
5215
5216 return JIM_OK;
5217 }
5218
5219 static int target_jim_set_reg(Jim_Interp *interp, int argc,
5220 Jim_Obj * const *argv)
5221 {
5222 if (argc != 2) {
5223 Jim_WrongNumArgs(interp, 1, argv, "dict");
5224 return JIM_ERR;
5225 }
5226
5227 int tmp;
5228 #if JIM_VERSION >= 80
5229 Jim_Obj **dict = Jim_DictPairs(interp, argv[1], &tmp);
5230
5231 if (!dict)
5232 return JIM_ERR;
5233 #else
5234 Jim_Obj **dict;
5235 int ret = Jim_DictPairs(interp, argv[1], &dict, &tmp);
5236
5237 if (ret != JIM_OK)
5238 return ret;
5239 #endif
5240
5241 const unsigned int length = tmp;
5242 struct command_context *cmd_ctx = current_command_context(interp);
5243 assert(cmd_ctx);
5244 const struct target *target = get_current_target(cmd_ctx);
5245
5246 for (unsigned int i = 0; i < length; i += 2) {
5247 const char *reg_name = Jim_String(dict[i]);
5248 const char *reg_value = Jim_String(dict[i + 1]);
5249 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5250 false);
5251
5252 if (!reg || !reg->exist) {
5253 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5254 return JIM_ERR;
5255 }
5256
5257 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
5258
5259 if (!buf) {
5260 LOG_ERROR("Failed to allocate memory");
5261 return JIM_ERR;
5262 }
5263
5264 str_to_buf(reg_value, strlen(reg_value), buf, reg->size, 0);
5265 int retval = reg->type->set(reg, buf);
5266 free(buf);
5267
5268 if (retval != ERROR_OK) {
5269 Jim_SetResultFormatted(interp, "failed to set '%s' to register '%s'",
5270 reg_value, reg_name);
5271 return JIM_ERR;
5272 }
5273 }
5274
5275 return JIM_OK;
5276 }
5277
5278 /**
5279 * Returns true only if the target has a handler for the specified event.
5280 */
5281 bool target_has_event_action(struct target *target, enum target_event event)
5282 {
5283 struct target_event_action *teap;
5284
5285 for (teap = target->event_action; teap; teap = teap->next) {
5286 if (teap->event == event)
5287 return true;
5288 }
5289 return false;
5290 }
5291
5292 enum target_cfg_param {
5293 TCFG_TYPE,
5294 TCFG_EVENT,
5295 TCFG_WORK_AREA_VIRT,
5296 TCFG_WORK_AREA_PHYS,
5297 TCFG_WORK_AREA_SIZE,
5298 TCFG_WORK_AREA_BACKUP,
5299 TCFG_ENDIAN,
5300 TCFG_COREID,
5301 TCFG_CHAIN_POSITION,
5302 TCFG_DBGBASE,
5303 TCFG_RTOS,
5304 TCFG_DEFER_EXAMINE,
5305 TCFG_GDB_PORT,
5306 TCFG_GDB_MAX_CONNECTIONS,
5307 };
5308
5309 static struct jim_nvp nvp_config_opts[] = {
5310 { .name = "-type", .value = TCFG_TYPE },
5311 { .name = "-event", .value = TCFG_EVENT },
5312 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
5313 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
5314 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
5315 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
5316 { .name = "-endian", .value = TCFG_ENDIAN },
5317 { .name = "-coreid", .value = TCFG_COREID },
5318 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
5319 { .name = "-dbgbase", .value = TCFG_DBGBASE },
5320 { .name = "-rtos", .value = TCFG_RTOS },
5321 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
5322 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
5323 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
5324 { .name = NULL, .value = -1 }
5325 };
5326
5327 static int target_configure(struct jim_getopt_info *goi, struct target *target)
5328 {
5329 struct jim_nvp *n;
5330 Jim_Obj *o;
5331 jim_wide w;
5332 int e;
5333
5334 /* parse config or cget options ... */
5335 while (goi->argc > 0) {
5336 Jim_SetEmptyResult(goi->interp);
5337 /* jim_getopt_debug(goi); */
5338
5339 if (target->type->target_jim_configure) {
5340 /* target defines a configure function */
5341 /* target gets first dibs on parameters */
5342 e = (*(target->type->target_jim_configure))(target, goi);
5343 if (e == JIM_OK) {
5344 /* more? */
5345 continue;
5346 }
5347 if (e == JIM_ERR) {
5348 /* An error */
5349 return e;
5350 }
5351 /* otherwise we 'continue' below */
5352 }
5353 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
5354 if (e != JIM_OK) {
5355 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
5356 return e;
5357 }
5358 switch (n->value) {
5359 case TCFG_TYPE:
5360 /* not settable */
5361 if (goi->isconfigure) {
5362 Jim_SetResultFormatted(goi->interp,
5363 "not settable: %s", n->name);
5364 return JIM_ERR;
5365 } else {
5366 no_params:
5367 if (goi->argc != 0) {
5368 Jim_WrongNumArgs(goi->interp,
5369 goi->argc, goi->argv,
5370 "NO PARAMS");
5371 return JIM_ERR;
5372 }
5373 }
5374 Jim_SetResultString(goi->interp,
5375 target_type_name(target), -1);
5376 /* loop for more */
5377 break;
5378 case TCFG_EVENT:
5379 if (goi->argc == 0) {
5380 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
5381 return JIM_ERR;
5382 }
5383
5384 e = jim_getopt_nvp(goi, nvp_target_event, &n);
5385 if (e != JIM_OK) {
5386 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
5387 return e;
5388 }
5389
5390 if (goi->isconfigure) {
5391 if (goi->argc != 1) {
5392 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
5393 return JIM_ERR;
5394 }
5395 } else {
5396 if (goi->argc != 0) {
5397 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
5398 return JIM_ERR;
5399 }
5400 }
5401
5402 {
5403 struct target_event_action *teap;
5404
5405 teap = target->event_action;
5406 /* replace existing? */
5407 while (teap) {
5408 if (teap->event == (enum target_event)n->value)
5409 break;
5410 teap = teap->next;
5411 }
5412
5413 if (goi->isconfigure) {
5414 /* START_DEPRECATED_TPIU */
5415 if (n->value == TARGET_EVENT_TRACE_CONFIG)
5416 LOG_INFO("DEPRECATED target event %s; use TPIU events {pre,post}-{enable,disable}", n->name);
5417 /* END_DEPRECATED_TPIU */
5418
5419 bool replace = true;
5420 if (!teap) {
5421 /* create new */
5422 teap = calloc(1, sizeof(*teap));
5423 replace = false;
5424 }
5425 teap->event = n->value;
5426 teap->interp = goi->interp;
5427 jim_getopt_obj(goi, &o);
5428 if (teap->body)
5429 Jim_DecrRefCount(teap->interp, teap->body);
5430 teap->body = Jim_DuplicateObj(goi->interp, o);
5431 /*
5432 * FIXME:
5433 * Tcl/TK - "tk events" have a nice feature.
5434 * See the "BIND" command.
5435 * We should support that here.
5436 * You can specify %X and %Y in the event code.
5437 * The idea is: %T - target name.
5438 * The idea is: %N - target number
5439 * The idea is: %E - event name.
5440 */
5441 Jim_IncrRefCount(teap->body);
5442
5443 if (!replace) {
5444 /* add to head of event list */
5445 teap->next = target->event_action;
5446 target->event_action = teap;
5447 }
5448 Jim_SetEmptyResult(goi->interp);
5449 } else {
5450 /* get */
5451 if (!teap)
5452 Jim_SetEmptyResult(goi->interp);
5453 else
5454 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5455 }
5456 }
5457 /* loop for more */
5458 break;
5459
5460 case TCFG_WORK_AREA_VIRT:
5461 if (goi->isconfigure) {
5462 target_free_all_working_areas(target);
5463 e = jim_getopt_wide(goi, &w);
5464 if (e != JIM_OK)
5465 return e;
5466 target->working_area_virt = w;
5467 target->working_area_virt_spec = true;
5468 } else {
5469 if (goi->argc != 0)
5470 goto no_params;
5471 }
5472 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5473 /* loop for more */
5474 break;
5475
5476 case TCFG_WORK_AREA_PHYS:
5477 if (goi->isconfigure) {
5478 target_free_all_working_areas(target);
5479 e = jim_getopt_wide(goi, &w);
5480 if (e != JIM_OK)
5481 return e;
5482 target->working_area_phys = w;
5483 target->working_area_phys_spec = true;
5484 } else {
5485 if (goi->argc != 0)
5486 goto no_params;
5487 }
5488 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5489 /* loop for more */
5490 break;
5491
5492 case TCFG_WORK_AREA_SIZE:
5493 if (goi->isconfigure) {
5494 target_free_all_working_areas(target);
5495 e = jim_getopt_wide(goi, &w);
5496 if (e != JIM_OK)
5497 return e;
5498 target->working_area_size = w;
5499 } else {
5500 if (goi->argc != 0)
5501 goto no_params;
5502 }
5503 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5504 /* loop for more */
5505 break;
5506
5507 case TCFG_WORK_AREA_BACKUP:
5508 if (goi->isconfigure) {
5509 target_free_all_working_areas(target);
5510 e = jim_getopt_wide(goi, &w);
5511 if (e != JIM_OK)
5512 return e;
5513 /* make this exactly 1 or 0 */
5514 target->backup_working_area = (!!w);
5515 } else {
5516 if (goi->argc != 0)
5517 goto no_params;
5518 }
5519 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5520 /* loop for more e*/
5521 break;
5522
5523
5524 case TCFG_ENDIAN:
5525 if (goi->isconfigure) {
5526 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5527 if (e != JIM_OK) {
5528 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5529 return e;
5530 }
5531 target->endianness = n->value;
5532 } else {
5533 if (goi->argc != 0)
5534 goto no_params;
5535 }
5536 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5537 if (!n->name) {
5538 target->endianness = TARGET_LITTLE_ENDIAN;
5539 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5540 }
5541 Jim_SetResultString(goi->interp, n->name, -1);
5542 /* loop for more */
5543 break;
5544
5545 case TCFG_COREID:
5546 if (goi->isconfigure) {
5547 e = jim_getopt_wide(goi, &w);
5548 if (e != JIM_OK)
5549 return e;
5550 target->coreid = (int32_t)w;
5551 } else {
5552 if (goi->argc != 0)
5553 goto no_params;
5554 }
5555 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5556 /* loop for more */
5557 break;
5558
5559 case TCFG_CHAIN_POSITION:
5560 if (goi->isconfigure) {
5561 Jim_Obj *o_t;
5562 struct jtag_tap *tap;
5563
5564 if (target->has_dap) {
5565 Jim_SetResultString(goi->interp,
5566 "target requires -dap parameter instead of -chain-position!", -1);
5567 return JIM_ERR;
5568 }
5569
5570 target_free_all_working_areas(target);
5571 e = jim_getopt_obj(goi, &o_t);
5572 if (e != JIM_OK)
5573 return e;
5574 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5575 if (!tap)
5576 return JIM_ERR;
5577 target->tap = tap;
5578 target->tap_configured = true;
5579 } else {
5580 if (goi->argc != 0)
5581 goto no_params;
5582 }
5583 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5584 /* loop for more e*/
5585 break;
5586 case TCFG_DBGBASE:
5587 if (goi->isconfigure) {
5588 e = jim_getopt_wide(goi, &w);
5589 if (e != JIM_OK)
5590 return e;
5591 target->dbgbase = (uint32_t)w;
5592 target->dbgbase_set = true;
5593 } else {
5594 if (goi->argc != 0)
5595 goto no_params;
5596 }
5597 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5598 /* loop for more */
5599 break;
5600 case TCFG_RTOS:
5601 /* RTOS */
5602 {
5603 int result = rtos_create(goi, target);
5604 if (result != JIM_OK)
5605 return result;
5606 }
5607 /* loop for more */
5608 break;
5609
5610 case TCFG_DEFER_EXAMINE:
5611 /* DEFER_EXAMINE */
5612 target->defer_examine = true;
5613 /* loop for more */
5614 break;
5615
5616 case TCFG_GDB_PORT:
5617 if (goi->isconfigure) {
5618 struct command_context *cmd_ctx = current_command_context(goi->interp);
5619 if (cmd_ctx->mode != COMMAND_CONFIG) {
5620 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5621 return JIM_ERR;
5622 }
5623
5624 const char *s;
5625 e = jim_getopt_string(goi, &s, NULL);
5626 if (e != JIM_OK)
5627 return e;
5628 free(target->gdb_port_override);
5629 target->gdb_port_override = strdup(s);
5630 } else {
5631 if (goi->argc != 0)
5632 goto no_params;
5633 }
5634 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5635 /* loop for more */
5636 break;
5637
5638 case TCFG_GDB_MAX_CONNECTIONS:
5639 if (goi->isconfigure) {
5640 struct command_context *cmd_ctx = current_command_context(goi->interp);
5641 if (cmd_ctx->mode != COMMAND_CONFIG) {
5642 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5643 return JIM_ERR;
5644 }
5645
5646 e = jim_getopt_wide(goi, &w);
5647 if (e != JIM_OK)
5648 return e;
5649 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5650 } else {
5651 if (goi->argc != 0)
5652 goto no_params;
5653 }
5654 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5655 break;
5656 }
5657 } /* while (goi->argc) */
5658
5659
5660 /* done - we return */
5661 return JIM_OK;
5662 }
5663
5664 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5665 {
5666 struct command *c = jim_to_command(interp);
5667 struct jim_getopt_info goi;
5668
5669 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5670 goi.isconfigure = !strcmp(c->name, "configure");
5671 if (goi.argc < 1) {
5672 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5673 "missing: -option ...");
5674 return JIM_ERR;
5675 }
5676 struct command_context *cmd_ctx = current_command_context(interp);
5677 assert(cmd_ctx);
5678 struct target *target = get_current_target(cmd_ctx);
5679 return target_configure(&goi, target);
5680 }
5681
5682 static int jim_target_mem2array(Jim_Interp *interp,
5683 int argc, Jim_Obj *const *argv)
5684 {
5685 struct command_context *cmd_ctx = current_command_context(interp);
5686 assert(cmd_ctx);
5687 struct target *target = get_current_target(cmd_ctx);
5688 return target_mem2array(interp, target, argc - 1, argv + 1);
5689 }
5690
5691 static int jim_target_array2mem(Jim_Interp *interp,
5692 int argc, Jim_Obj *const *argv)
5693 {
5694 struct command_context *cmd_ctx = current_command_context(interp);
5695 assert(cmd_ctx);
5696 struct target *target = get_current_target(cmd_ctx);
5697 return target_array2mem(interp, target, argc - 1, argv + 1);
5698 }
5699
5700 static int jim_target_tap_disabled(Jim_Interp *interp)
5701 {
5702 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5703 return JIM_ERR;
5704 }
5705
5706 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5707 {
5708 bool allow_defer = false;
5709
5710 struct jim_getopt_info goi;
5711 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5712 if (goi.argc > 1) {
5713 const char *cmd_name = Jim_GetString(argv[0], NULL);
5714 Jim_SetResultFormatted(goi.interp,
5715 "usage: %s ['allow-defer']", cmd_name);
5716 return JIM_ERR;
5717 }
5718 if (goi.argc > 0 &&
5719 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5720 /* consume it */
5721 Jim_Obj *obj;
5722 int e = jim_getopt_obj(&goi, &obj);
5723 if (e != JIM_OK)
5724 return e;
5725 allow_defer = true;
5726 }
5727
5728 struct command_context *cmd_ctx = current_command_context(interp);
5729 assert(cmd_ctx);
5730 struct target *target = get_current_target(cmd_ctx);
5731 if (!target->tap->enabled)
5732 return jim_target_tap_disabled(interp);
5733
5734 if (allow_defer && target->defer_examine) {
5735 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5736 LOG_INFO("Use arp_examine command to examine it manually!");
5737 return JIM_OK;
5738 }
5739
5740 int e = target->type->examine(target);
5741 if (e != ERROR_OK) {
5742 target_reset_examined(target);
5743 return JIM_ERR;
5744 }
5745
5746 target_set_examined(target);
5747
5748 return JIM_OK;
5749 }
5750
5751 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5752 {
5753 struct command_context *cmd_ctx = current_command_context(interp);
5754 assert(cmd_ctx);
5755 struct target *target = get_current_target(cmd_ctx);
5756
5757 Jim_SetResultBool(interp, target_was_examined(target));
5758 return JIM_OK;
5759 }
5760
5761 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5762 {
5763 struct command_context *cmd_ctx = current_command_context(interp);
5764 assert(cmd_ctx);
5765 struct target *target = get_current_target(cmd_ctx);
5766
5767 Jim_SetResultBool(interp, target->defer_examine);
5768 return JIM_OK;
5769 }
5770
5771 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5772 {
5773 if (argc != 1) {
5774 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5775 return JIM_ERR;
5776 }
5777 struct command_context *cmd_ctx = current_command_context(interp);
5778 assert(cmd_ctx);
5779 struct target *target = get_current_target(cmd_ctx);
5780
5781 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5782 return JIM_ERR;
5783
5784 return JIM_OK;
5785 }
5786
5787 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5788 {
5789 if (argc != 1) {
5790 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5791 return JIM_ERR;
5792 }
5793 struct command_context *cmd_ctx = current_command_context(interp);
5794 assert(cmd_ctx);
5795 struct target *target = get_current_target(cmd_ctx);
5796 if (!target->tap->enabled)
5797 return jim_target_tap_disabled(interp);
5798
5799 int e;
5800 if (!(target_was_examined(target)))
5801 e = ERROR_TARGET_NOT_EXAMINED;
5802 else
5803 e = target->type->poll(target);
5804 if (e != ERROR_OK)
5805 return JIM_ERR;
5806 return JIM_OK;
5807 }
5808
5809 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5810 {
5811 struct jim_getopt_info goi;
5812 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5813
5814 if (goi.argc != 2) {
5815 Jim_WrongNumArgs(interp, 0, argv,
5816 "([tT]|[fF]|assert|deassert) BOOL");
5817 return JIM_ERR;
5818 }
5819
5820 struct jim_nvp *n;
5821 int e = jim_getopt_nvp(&goi, nvp_assert, &n);
5822 if (e != JIM_OK) {
5823 jim_getopt_nvp_unknown(&goi, nvp_assert, 1);
5824 return e;
5825 }
5826 /* the halt or not param */
5827 jim_wide a;
5828 e = jim_getopt_wide(&goi, &a);
5829 if (e != JIM_OK)
5830 return e;
5831
5832 struct command_context *cmd_ctx = current_command_context(interp);
5833 assert(cmd_ctx);
5834 struct target *target = get_current_target(cmd_ctx);
5835 if (!target->tap->enabled)
5836 return jim_target_tap_disabled(interp);
5837
5838 if (!target->type->assert_reset || !target->type->deassert_reset) {
5839 Jim_SetResultFormatted(interp,
5840 "No target-specific reset for %s",
5841 target_name(target));
5842 return JIM_ERR;
5843 }
5844
5845 if (target->defer_examine)
5846 target_reset_examined(target);
5847
5848 /* determine if we should halt or not. */
5849 target->reset_halt = (a != 0);
5850 /* When this happens - all workareas are invalid. */
5851 target_free_all_working_areas_restore(target, 0);
5852
5853 /* do the assert */
5854 if (n->value == NVP_ASSERT)
5855 e = target->type->assert_reset(target);
5856 else
5857 e = target->type->deassert_reset(target);
5858 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5859 }
5860
5861 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5862 {
5863 if (argc != 1) {
5864 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5865 return JIM_ERR;
5866 }
5867 struct command_context *cmd_ctx = current_command_context(interp);
5868 assert(cmd_ctx);
5869 struct target *target = get_current_target(cmd_ctx);
5870 if (!target->tap->enabled)
5871 return jim_target_tap_disabled(interp);
5872 int e = target->type->halt(target);
5873 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5874 }
5875
5876 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5877 {
5878 struct jim_getopt_info goi;
5879 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5880
5881 /* params: <name> statename timeoutmsecs */
5882 if (goi.argc != 2) {
5883 const char *cmd_name = Jim_GetString(argv[0], NULL);
5884 Jim_SetResultFormatted(goi.interp,
5885 "%s <state_name> <timeout_in_msec>", cmd_name);
5886 return JIM_ERR;
5887 }
5888
5889 struct jim_nvp *n;
5890 int e = jim_getopt_nvp(&goi, nvp_target_state, &n);
5891 if (e != JIM_OK) {
5892 jim_getopt_nvp_unknown(&goi, nvp_target_state, 1);
5893 return e;
5894 }
5895 jim_wide a;
5896 e = jim_getopt_wide(&goi, &a);
5897 if (e != JIM_OK)
5898 return e;
5899 struct command_context *cmd_ctx = current_command_context(interp);
5900 assert(cmd_ctx);
5901 struct target *target = get_current_target(cmd_ctx);
5902 if (!target->tap->enabled)
5903 return jim_target_tap_disabled(interp);
5904
5905 e = target_wait_state(target, n->value, a);
5906 if (e != ERROR_OK) {
5907 Jim_Obj *obj = Jim_NewIntObj(interp, e);
5908 Jim_SetResultFormatted(goi.interp,
5909 "target: %s wait %s fails (%#s) %s",
5910 target_name(target), n->name,
5911 obj, target_strerror_safe(e));
5912 return JIM_ERR;
5913 }
5914 return JIM_OK;
5915 }
5916 /* List for human, Events defined for this target.
5917 * scripts/programs should use 'name cget -event NAME'
5918 */
5919 COMMAND_HANDLER(handle_target_event_list)
5920 {
5921 struct target *target = get_current_target(CMD_CTX);
5922 struct target_event_action *teap = target->event_action;
5923
5924 command_print(CMD, "Event actions for target (%d) %s\n",
5925 target->target_number,
5926 target_name(target));
5927 command_print(CMD, "%-25s | Body", "Event");
5928 command_print(CMD, "------------------------- | "
5929 "----------------------------------------");
5930 while (teap) {
5931 command_print(CMD, "%-25s | %s",
5932 target_event_name(teap->event),
5933 Jim_GetString(teap->body, NULL));
5934 teap = teap->next;
5935 }
5936 command_print(CMD, "***END***");
5937 return ERROR_OK;
5938 }
5939 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5940 {
5941 if (argc != 1) {
5942 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5943 return JIM_ERR;
5944 }
5945 struct command_context *cmd_ctx = current_command_context(interp);
5946 assert(cmd_ctx);
5947 struct target *target = get_current_target(cmd_ctx);
5948 Jim_SetResultString(interp, target_state_name(target), -1);
5949 return JIM_OK;
5950 }
5951 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5952 {
5953 struct jim_getopt_info goi;
5954 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5955 if (goi.argc != 1) {
5956 const char *cmd_name = Jim_GetString(argv[0], NULL);
5957 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5958 return JIM_ERR;
5959 }
5960 struct jim_nvp *n;
5961 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5962 if (e != JIM_OK) {
5963 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5964 return e;
5965 }
5966 struct command_context *cmd_ctx = current_command_context(interp);
5967 assert(cmd_ctx);
5968 struct target *target = get_current_target(cmd_ctx);
5969 target_handle_event(target, n->value);
5970 return JIM_OK;
5971 }
5972
5973 static const struct command_registration target_instance_command_handlers[] = {
5974 {
5975 .name = "configure",
5976 .mode = COMMAND_ANY,
5977 .jim_handler = jim_target_configure,
5978 .help = "configure a new target for use",
5979 .usage = "[target_attribute ...]",
5980 },
5981 {
5982 .name = "cget",
5983 .mode = COMMAND_ANY,
5984 .jim_handler = jim_target_configure,
5985 .help = "returns the specified target attribute",
5986 .usage = "target_attribute",
5987 },
5988 {
5989 .name = "mwd",
5990 .handler = handle_mw_command,
5991 .mode = COMMAND_EXEC,
5992 .help = "Write 64-bit word(s) to target memory",
5993 .usage = "address data [count]",
5994 },
5995 {
5996 .name = "mww",
5997 .handler = handle_mw_command,
5998 .mode = COMMAND_EXEC,
5999 .help = "Write 32-bit word(s) to target memory",
6000 .usage = "address data [count]",
6001 },
6002 {
6003 .name = "mwh",
6004 .handler = handle_mw_command,
6005 .mode = COMMAND_EXEC,
6006 .help = "Write 16-bit half-word(s) to target memory",
6007 .usage = "address data [count]",
6008 },
6009 {
6010 .name = "mwb",
6011 .handler = handle_mw_command,
6012 .mode = COMMAND_EXEC,
6013 .help = "Write byte(s) to target memory",
6014 .usage = "address data [count]",
6015 },
6016 {
6017 .name = "mdd",
6018 .handler = handle_md_command,
6019 .mode = COMMAND_EXEC,
6020 .help = "Display target memory as 64-bit words",
6021 .usage = "address [count]",
6022 },
6023 {
6024 .name = "mdw",
6025 .handler = handle_md_command,
6026 .mode = COMMAND_EXEC,
6027 .help = "Display target memory as 32-bit words",
6028 .usage = "address [count]",
6029 },
6030 {
6031 .name = "mdh",
6032 .handler = handle_md_command,
6033 .mode = COMMAND_EXEC,
6034 .help = "Display target memory as 16-bit half-words",
6035 .usage = "address [count]",
6036 },
6037 {
6038 .name = "mdb",
6039 .handler = handle_md_command,
6040 .mode = COMMAND_EXEC,
6041 .help = "Display target memory as 8-bit bytes",
6042 .usage = "address [count]",
6043 },
6044 {
6045 .name = "array2mem",
6046 .mode = COMMAND_EXEC,
6047 .jim_handler = jim_target_array2mem,
6048 .help = "Writes Tcl array of 8/16/32 bit numbers "
6049 "to target memory",
6050 .usage = "arrayname bitwidth address count",
6051 },
6052 {
6053 .name = "mem2array",
6054 .mode = COMMAND_EXEC,
6055 .jim_handler = jim_target_mem2array,
6056 .help = "Loads Tcl array of 8/16/32 bit numbers "
6057 "from target memory",
6058 .usage = "arrayname bitwidth address count",
6059 },
6060 {
6061 .name = "get_reg",
6062 .mode = COMMAND_EXEC,
6063 .jim_handler = target_jim_get_reg,
6064 .help = "Get register values from the target",
6065 .usage = "list",
6066 },
6067 {
6068 .name = "set_reg",
6069 .mode = COMMAND_EXEC,
6070 .jim_handler = target_jim_set_reg,
6071 .help = "Set target register values",
6072 .usage = "dict",
6073 },
6074 {
6075 .name = "read_memory",
6076 .mode = COMMAND_EXEC,
6077 .jim_handler = target_jim_read_memory,
6078 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
6079 .usage = "address width count ['phys']",
6080 },
6081 {
6082 .name = "write_memory",
6083 .mode = COMMAND_EXEC,
6084 .jim_handler = target_jim_write_memory,
6085 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
6086 .usage = "address width data ['phys']",
6087 },
6088 {
6089 .name = "eventlist",
6090 .handler = handle_target_event_list,
6091 .mode = COMMAND_EXEC,
6092 .help = "displays a table of events defined for this target",
6093 .usage = "",
6094 },
6095 {
6096 .name = "curstate",
6097 .mode = COMMAND_EXEC,
6098 .jim_handler = jim_target_current_state,
6099 .help = "displays the current state of this target",
6100 },
6101 {
6102 .name = "arp_examine",
6103 .mode = COMMAND_EXEC,
6104 .jim_handler = jim_target_examine,
6105 .help = "used internally for reset processing",
6106 .usage = "['allow-defer']",
6107 },
6108 {
6109 .name = "was_examined",
6110 .mode = COMMAND_EXEC,
6111 .jim_handler = jim_target_was_examined,
6112 .help = "used internally for reset processing",
6113 },
6114 {
6115 .name = "examine_deferred",
6116 .mode = COMMAND_EXEC,
6117 .jim_handler = jim_target_examine_deferred,
6118 .help = "used internally for reset processing",
6119 },
6120 {
6121 .name = "arp_halt_gdb",
6122 .mode = COMMAND_EXEC,
6123 .jim_handler = jim_target_halt_gdb,
6124 .help = "used internally for reset processing to halt GDB",
6125 },
6126 {
6127 .name = "arp_poll",
6128 .mode = COMMAND_EXEC,
6129 .jim_handler = jim_target_poll,
6130 .help = "used internally for reset processing",
6131 },
6132 {
6133 .name = "arp_reset",
6134 .mode = COMMAND_EXEC,
6135 .jim_handler = jim_target_reset,
6136 .help = "used internally for reset processing",
6137 },
6138 {
6139 .name = "arp_halt",
6140 .mode = COMMAND_EXEC,
6141 .jim_handler = jim_target_halt,
6142 .help = "used internally for reset processing",
6143 },
6144 {
6145 .name = "arp_waitstate",
6146 .mode = COMMAND_EXEC,
6147 .jim_handler = jim_target_wait_state,
6148 .help = "used internally for reset processing",
6149 },
6150 {
6151 .name = "invoke-event",
6152 .mode = COMMAND_EXEC,
6153 .jim_handler = jim_target_invoke_event,
6154 .help = "invoke handler for specified event",
6155 .usage = "event_name",
6156 },
6157 COMMAND_REGISTRATION_DONE
6158 };
6159
6160 static int target_create(struct jim_getopt_info *goi)
6161 {
6162 Jim_Obj *new_cmd;
6163 Jim_Cmd *cmd;
6164 const char *cp;
6165 int e;
6166 int x;
6167 struct target *target;
6168 struct command_context *cmd_ctx;
6169
6170 cmd_ctx = current_command_context(goi->interp);
6171 assert(cmd_ctx);
6172
6173 if (goi->argc < 3) {
6174 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
6175 return JIM_ERR;
6176 }
6177
6178 /* COMMAND */
6179 jim_getopt_obj(goi, &new_cmd);
6180 /* does this command exist? */
6181 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_NONE);
6182 if (cmd) {
6183 cp = Jim_GetString(new_cmd, NULL);
6184 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
6185 return JIM_ERR;
6186 }
6187
6188 /* TYPE */
6189 e = jim_getopt_string(goi, &cp, NULL);
6190 if (e != JIM_OK)
6191 return e;
6192 struct transport *tr = get_current_transport();
6193 if (tr->override_target) {
6194 e = tr->override_target(&cp);
6195 if (e != ERROR_OK) {
6196 LOG_ERROR("The selected transport doesn't support this target");
6197 return JIM_ERR;
6198 }
6199 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
6200 }
6201 /* now does target type exist */
6202 for (x = 0 ; target_types[x] ; x++) {
6203 if (strcmp(cp, target_types[x]->name) == 0) {
6204 /* found */
6205 break;
6206 }
6207 }
6208 if (!target_types[x]) {
6209 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
6210 for (x = 0 ; target_types[x] ; x++) {
6211 if (target_types[x + 1]) {
6212 Jim_AppendStrings(goi->interp,
6213 Jim_GetResult(goi->interp),
6214 target_types[x]->name,
6215 ", ", NULL);
6216 } else {
6217 Jim_AppendStrings(goi->interp,
6218 Jim_GetResult(goi->interp),
6219 " or ",
6220 target_types[x]->name, NULL);
6221 }
6222 }
6223 return JIM_ERR;
6224 }
6225
6226 /* Create it */
6227 target = calloc(1, sizeof(struct target));
6228 if (!target) {
6229 LOG_ERROR("Out of memory");
6230 return JIM_ERR;
6231 }
6232
6233 /* set empty smp cluster */
6234 target->smp_targets = &empty_smp_targets;
6235
6236 /* set target number */
6237 target->target_number = new_target_number();
6238
6239 /* allocate memory for each unique target type */
6240 target->type = malloc(sizeof(struct target_type));
6241 if (!target->type) {
6242 LOG_ERROR("Out of memory");
6243 free(target);
6244 return JIM_ERR;
6245 }
6246
6247 memcpy(target->type, target_types[x], sizeof(struct target_type));
6248
6249 /* default to first core, override with -coreid */
6250 target->coreid = 0;
6251
6252 target->working_area = 0x0;
6253 target->working_area_size = 0x0;
6254 target->working_areas = NULL;
6255 target->backup_working_area = 0;
6256
6257 target->state = TARGET_UNKNOWN;
6258 target->debug_reason = DBG_REASON_UNDEFINED;
6259 target->reg_cache = NULL;
6260 target->breakpoints = NULL;
6261 target->watchpoints = NULL;
6262 target->next = NULL;
6263 target->arch_info = NULL;
6264
6265 target->verbose_halt_msg = true;
6266
6267 target->halt_issued = false;
6268
6269 /* initialize trace information */
6270 target->trace_info = calloc(1, sizeof(struct trace));
6271 if (!target->trace_info) {
6272 LOG_ERROR("Out of memory");
6273 free(target->type);
6274 free(target);
6275 return JIM_ERR;
6276 }
6277
6278 target->dbgmsg = NULL;
6279 target->dbg_msg_enabled = 0;
6280
6281 target->endianness = TARGET_ENDIAN_UNKNOWN;
6282
6283 target->rtos = NULL;
6284 target->rtos_auto_detect = false;
6285
6286 target->gdb_port_override = NULL;
6287 target->gdb_max_connections = 1;
6288
6289 /* Do the rest as "configure" options */
6290 goi->isconfigure = 1;
6291 e = target_configure(goi, target);
6292
6293 if (e == JIM_OK) {
6294 if (target->has_dap) {
6295 if (!target->dap_configured) {
6296 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
6297 e = JIM_ERR;
6298 }
6299 } else {
6300 if (!target->tap_configured) {
6301 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
6302 e = JIM_ERR;
6303 }
6304 }
6305 /* tap must be set after target was configured */
6306 if (!target->tap)
6307 e = JIM_ERR;
6308 }
6309
6310 if (e != JIM_OK) {
6311 rtos_destroy(target);
6312 free(target->gdb_port_override);
6313 free(target->trace_info);
6314 free(target->type);
6315 free(target);
6316 return e;
6317 }
6318
6319 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
6320 /* default endian to little if not specified */
6321 target->endianness = TARGET_LITTLE_ENDIAN;
6322 }
6323
6324 cp = Jim_GetString(new_cmd, NULL);
6325 target->cmd_name = strdup(cp);
6326 if (!target->cmd_name) {
6327 LOG_ERROR("Out of memory");
6328 rtos_destroy(target);
6329 free(target->gdb_port_override);
6330 free(target->trace_info);
6331 free(target->type);
6332 free(target);
6333 return JIM_ERR;
6334 }
6335
6336 if (target->type->target_create) {
6337 e = (*(target->type->target_create))(target, goi->interp);
6338 if (e != ERROR_OK) {
6339 LOG_DEBUG("target_create failed");
6340 free(target->cmd_name);
6341 rtos_destroy(target);
6342 free(target->gdb_port_override);
6343 free(target->trace_info);
6344 free(target->type);
6345 free(target);
6346 return JIM_ERR;
6347 }
6348 }
6349
6350 /* create the target specific commands */
6351 if (target->type->commands) {
6352 e = register_commands(cmd_ctx, NULL, target->type->commands);
6353 if (e != ERROR_OK)
6354 LOG_ERROR("unable to register '%s' commands", cp);
6355 }
6356
6357 /* now - create the new target name command */
6358 const struct command_registration target_subcommands[] = {
6359 {
6360 .chain = target_instance_command_handlers,
6361 },
6362 {
6363 .chain = target->type->commands,
6364 },
6365 COMMAND_REGISTRATION_DONE
6366 };
6367 const struct command_registration target_commands[] = {
6368 {
6369 .name = cp,
6370 .mode = COMMAND_ANY,
6371 .help = "target command group",
6372 .usage = "",
6373 .chain = target_subcommands,
6374 },
6375 COMMAND_REGISTRATION_DONE
6376 };
6377 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
6378 if (e != ERROR_OK) {
6379 if (target->type->deinit_target)
6380 target->type->deinit_target(target);
6381 free(target->cmd_name);
6382 rtos_destroy(target);
6383 free(target->gdb_port_override);
6384 free(target->trace_info);
6385 free(target->type);
6386 free(target);
6387 return JIM_ERR;
6388 }
6389
6390 /* append to end of list */
6391 append_to_list_all_targets(target);
6392
6393 cmd_ctx->current_target = target;
6394 return JIM_OK;
6395 }
6396
6397 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6398 {
6399 if (argc != 1) {
6400 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6401 return JIM_ERR;
6402 }
6403 struct command_context *cmd_ctx = current_command_context(interp);
6404 assert(cmd_ctx);
6405
6406 struct target *target = get_current_target_or_null(cmd_ctx);
6407 if (target)
6408 Jim_SetResultString(interp, target_name(target), -1);
6409 return JIM_OK;
6410 }
6411
6412 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6413 {
6414 if (argc != 1) {
6415 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6416 return JIM_ERR;
6417 }
6418 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6419 for (unsigned x = 0; target_types[x]; x++) {
6420 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6421 Jim_NewStringObj(interp, target_types[x]->name, -1));
6422 }
6423 return JIM_OK;
6424 }
6425
6426 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6427 {
6428 if (argc != 1) {
6429 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6430 return JIM_ERR;
6431 }
6432 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6433 struct target *target = all_targets;
6434 while (target) {
6435 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6436 Jim_NewStringObj(interp, target_name(target), -1));
6437 target = target->next;
6438 }
6439 return JIM_OK;
6440 }
6441
6442 static struct target_list *
6443 __attribute__((warn_unused_result))
6444 create_target_list_node(Jim_Obj *const name) {
6445 int len;
6446 const char *targetname = Jim_GetString(name, &len);
6447 struct target *target = get_target(targetname);
6448 LOG_DEBUG("%s ", targetname);
6449 if (!target)
6450 return NULL;
6451
6452 struct target_list *new = malloc(sizeof(struct target_list));
6453 if (!new) {
6454 LOG_ERROR("Out of memory");
6455 return new;
6456 }
6457
6458 new->target = target;
6459 return new;
6460 }
6461
6462 static int get_target_with_common_rtos_type(struct list_head *lh, struct target **result)
6463 {
6464 struct target *target = NULL;
6465 struct target_list *curr;
6466 foreach_smp_target(curr, lh) {
6467 struct rtos *curr_rtos = curr->target->rtos;
6468 if (curr_rtos) {
6469 if (target && target->rtos && target->rtos->type != curr_rtos->type) {
6470 LOG_ERROR("Different rtos types in members of one smp target!");
6471 return JIM_ERR;
6472 }
6473 target = curr->target;
6474 }
6475 }
6476 *result = target;
6477 return JIM_OK;
6478 }
6479
6480 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6481 {
6482 static int smp_group = 1;
6483
6484 if (argc == 1) {
6485 LOG_DEBUG("Empty SMP target");
6486 return JIM_OK;
6487 }
6488 LOG_DEBUG("%d", argc);
6489 /* argv[1] = target to associate in smp
6490 * argv[2] = target to associate in smp
6491 * argv[3] ...
6492 */
6493
6494 struct list_head *lh = malloc(sizeof(*lh));
6495 if (!lh) {
6496 LOG_ERROR("Out of memory");
6497 return JIM_ERR;
6498 }
6499 INIT_LIST_HEAD(lh);
6500
6501 for (int i = 1; i < argc; i++) {
6502 struct target_list *new = create_target_list_node(argv[i]);
6503 if (new)
6504 list_add_tail(&new->lh, lh);
6505 }
6506 /* now parse the list of cpu and put the target in smp mode*/
6507 struct target_list *curr;
6508 foreach_smp_target(curr, lh) {
6509 struct target *target = curr->target;
6510 target->smp = smp_group;
6511 target->smp_targets = lh;
6512 }
6513 smp_group++;
6514
6515 struct target *rtos_target;
6516 int retval = get_target_with_common_rtos_type(lh, &rtos_target);
6517 if (retval == JIM_OK && rtos_target)
6518 retval = rtos_smp_init(rtos_target);
6519
6520 return retval;
6521 }
6522
6523
6524 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6525 {
6526 struct jim_getopt_info goi;
6527 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6528 if (goi.argc < 3) {
6529 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6530 "<name> <target_type> [<target_options> ...]");
6531 return JIM_ERR;
6532 }
6533 return target_create(&goi);
6534 }
6535
6536 static const struct command_registration target_subcommand_handlers[] = {
6537 {
6538 .name = "init",
6539 .mode = COMMAND_CONFIG,
6540 .handler = handle_target_init_command,
6541 .help = "initialize targets",
6542 .usage = "",
6543 },
6544 {
6545 .name = "create",
6546 .mode = COMMAND_CONFIG,
6547 .jim_handler = jim_target_create,
6548 .usage = "name type '-chain-position' name [options ...]",
6549 .help = "Creates and selects a new target",
6550 },
6551 {
6552 .name = "current",
6553 .mode = COMMAND_ANY,
6554 .jim_handler = jim_target_current,
6555 .help = "Returns the currently selected target",
6556 },
6557 {
6558 .name = "types",
6559 .mode = COMMAND_ANY,
6560 .jim_handler = jim_target_types,
6561 .help = "Returns the available target types as "
6562 "a list of strings",
6563 },
6564 {
6565 .name = "names",
6566 .mode = COMMAND_ANY,
6567 .jim_handler = jim_target_names,
6568 .help = "Returns the names of all targets as a list of strings",
6569 },
6570 {
6571 .name = "smp",
6572 .mode = COMMAND_ANY,
6573 .jim_handler = jim_target_smp,
6574 .usage = "targetname1 targetname2 ...",
6575 .help = "gather several target in a smp list"
6576 },
6577
6578 COMMAND_REGISTRATION_DONE
6579 };
6580
6581 struct fast_load {
6582 target_addr_t address;
6583 uint8_t *data;
6584 int length;
6585
6586 };
6587
6588 static int fastload_num;
6589 static struct fast_load *fastload;
6590
6591 static void free_fastload(void)
6592 {
6593 if (fastload) {
6594 for (int i = 0; i < fastload_num; i++)
6595 free(fastload[i].data);
6596 free(fastload);
6597 fastload = NULL;
6598 }
6599 }
6600
6601 COMMAND_HANDLER(handle_fast_load_image_command)
6602 {
6603 uint8_t *buffer;
6604 size_t buf_cnt;
6605 uint32_t image_size;
6606 target_addr_t min_address = 0;
6607 target_addr_t max_address = -1;
6608
6609 struct image image;
6610
6611 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6612 &image, &min_address, &max_address);
6613 if (retval != ERROR_OK)
6614 return retval;
6615
6616 struct duration bench;
6617 duration_start(&bench);
6618
6619 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6620 if (retval != ERROR_OK)
6621 return retval;
6622
6623 image_size = 0x0;
6624 retval = ERROR_OK;
6625 fastload_num = image.num_sections;
6626 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6627 if (!fastload) {
6628 command_print(CMD, "out of memory");
6629 image_close(&image);
6630 return ERROR_FAIL;
6631 }
6632 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6633 for (unsigned int i = 0; i < image.num_sections; i++) {
6634 buffer = malloc(image.sections[i].size);
6635 if (!buffer) {
6636 command_print(CMD, "error allocating buffer for section (%d bytes)",
6637 (int)(image.sections[i].size));
6638 retval = ERROR_FAIL;
6639 break;
6640 }
6641
6642 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6643 if (retval != ERROR_OK) {
6644 free(buffer);
6645 break;
6646 }
6647
6648 uint32_t offset = 0;
6649 uint32_t length = buf_cnt;
6650
6651 /* DANGER!!! beware of unsigned comparison here!!! */
6652
6653 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6654 (image.sections[i].base_address < max_address)) {
6655 if (image.sections[i].base_address < min_address) {
6656 /* clip addresses below */
6657 offset += min_address-image.sections[i].base_address;
6658 length -= offset;
6659 }
6660
6661 if (image.sections[i].base_address + buf_cnt > max_address)
6662 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6663
6664 fastload[i].address = image.sections[i].base_address + offset;
6665 fastload[i].data = malloc(length);
6666 if (!fastload[i].data) {
6667 free(buffer);
6668 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6669 length);
6670 retval = ERROR_FAIL;
6671 break;
6672 }
6673 memcpy(fastload[i].data, buffer + offset, length);
6674 fastload[i].length = length;
6675
6676 image_size += length;
6677 command_print(CMD, "%u bytes written at address 0x%8.8x",
6678 (unsigned int)length,
6679 ((unsigned int)(image.sections[i].base_address + offset)));
6680 }
6681
6682 free(buffer);
6683 }
6684
6685 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6686 command_print(CMD, "Loaded %" PRIu32 " bytes "
6687 "in %fs (%0.3f KiB/s)", image_size,
6688 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6689
6690 command_print(CMD,
6691 "WARNING: image has not been loaded to target!"
6692 "You can issue a 'fast_load' to finish loading.");
6693 }
6694
6695 image_close(&image);
6696
6697 if (retval != ERROR_OK)
6698 free_fastload();
6699
6700 return retval;
6701 }
6702
6703 COMMAND_HANDLER(handle_fast_load_command)
6704 {
6705 if (CMD_ARGC > 0)
6706 return ERROR_COMMAND_SYNTAX_ERROR;
6707 if (!fastload) {
6708 LOG_ERROR("No image in memory");
6709 return ERROR_FAIL;
6710 }
6711 int i;
6712 int64_t ms = timeval_ms();
6713 int size = 0;
6714 int retval = ERROR_OK;
6715 for (i = 0; i < fastload_num; i++) {
6716 struct target *target = get_current_target(CMD_CTX);
6717 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6718 (unsigned int)(fastload[i].address),
6719 (unsigned int)(fastload[i].length));
6720 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6721 if (retval != ERROR_OK)
6722 break;
6723 size += fastload[i].length;
6724 }
6725 if (retval == ERROR_OK) {
6726 int64_t after = timeval_ms();
6727 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6728 }
6729 return retval;
6730 }
6731
6732 static const struct command_registration target_command_handlers[] = {
6733 {
6734 .name = "targets",
6735 .handler = handle_targets_command,
6736 .mode = COMMAND_ANY,
6737 .help = "change current default target (one parameter) "
6738 "or prints table of all targets (no parameters)",
6739 .usage = "[target]",
6740 },
6741 {
6742 .name = "target",
6743 .mode = COMMAND_CONFIG,
6744 .help = "configure target",
6745 .chain = target_subcommand_handlers,
6746 .usage = "",
6747 },
6748 COMMAND_REGISTRATION_DONE
6749 };
6750
6751 int target_register_commands(struct command_context *cmd_ctx)
6752 {
6753 return register_commands(cmd_ctx, NULL, target_command_handlers);
6754 }
6755
6756 static bool target_reset_nag = true;
6757
6758 bool get_target_reset_nag(void)
6759 {
6760 return target_reset_nag;
6761 }
6762
6763 COMMAND_HANDLER(handle_target_reset_nag)
6764 {
6765 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6766 &target_reset_nag, "Nag after each reset about options to improve "
6767 "performance");
6768 }
6769
6770 COMMAND_HANDLER(handle_ps_command)
6771 {
6772 struct target *target = get_current_target(CMD_CTX);
6773 char *display;
6774 if (target->state != TARGET_HALTED) {
6775 LOG_INFO("target not halted !!");
6776 return ERROR_OK;
6777 }
6778
6779 if ((target->rtos) && (target->rtos->type)
6780 && (target->rtos->type->ps_command)) {
6781 display = target->rtos->type->ps_command(target);
6782 command_print(CMD, "%s", display);
6783 free(display);
6784 return ERROR_OK;
6785 } else {
6786 LOG_INFO("failed");
6787 return ERROR_TARGET_FAILURE;
6788 }
6789 }
6790
6791 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6792 {
6793 if (text)
6794 command_print_sameline(cmd, "%s", text);
6795 for (int i = 0; i < size; i++)
6796 command_print_sameline(cmd, " %02x", buf[i]);
6797 command_print(cmd, " ");
6798 }
6799
6800 COMMAND_HANDLER(handle_test_mem_access_command)
6801 {
6802 struct target *target = get_current_target(CMD_CTX);
6803 uint32_t test_size;
6804 int retval = ERROR_OK;
6805
6806 if (target->state != TARGET_HALTED) {
6807 LOG_INFO("target not halted !!");
6808 return ERROR_FAIL;
6809 }
6810
6811 if (CMD_ARGC != 1)
6812 return ERROR_COMMAND_SYNTAX_ERROR;
6813
6814 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6815
6816 /* Test reads */
6817 size_t num_bytes = test_size + 4;
6818
6819 struct working_area *wa = NULL;
6820 retval = target_alloc_working_area(target, num_bytes, &wa);
6821 if (retval != ERROR_OK) {
6822 LOG_ERROR("Not enough working area");
6823 return ERROR_FAIL;
6824 }
6825
6826 uint8_t *test_pattern = malloc(num_bytes);
6827
6828 for (size_t i = 0; i < num_bytes; i++)
6829 test_pattern[i] = rand();
6830
6831 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6832 if (retval != ERROR_OK) {
6833 LOG_ERROR("Test pattern write failed");
6834 goto out;
6835 }
6836
6837 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6838 for (int size = 1; size <= 4; size *= 2) {
6839 for (int offset = 0; offset < 4; offset++) {
6840 uint32_t count = test_size / size;
6841 size_t host_bufsiz = (count + 2) * size + host_offset;
6842 uint8_t *read_ref = malloc(host_bufsiz);
6843 uint8_t *read_buf = malloc(host_bufsiz);
6844
6845 for (size_t i = 0; i < host_bufsiz; i++) {
6846 read_ref[i] = rand();
6847 read_buf[i] = read_ref[i];
6848 }
6849 command_print_sameline(CMD,
6850 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6851 size, offset, host_offset ? "un" : "");
6852
6853 struct duration bench;
6854 duration_start(&bench);
6855
6856 retval = target_read_memory(target, wa->address + offset, size, count,
6857 read_buf + size + host_offset);
6858
6859 duration_measure(&bench);
6860
6861 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6862 command_print(CMD, "Unsupported alignment");
6863 goto next;
6864 } else if (retval != ERROR_OK) {
6865 command_print(CMD, "Memory read failed");
6866 goto next;
6867 }
6868
6869 /* replay on host */
6870 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6871
6872 /* check result */
6873 int result = memcmp(read_ref, read_buf, host_bufsiz);
6874 if (result == 0) {
6875 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6876 duration_elapsed(&bench),
6877 duration_kbps(&bench, count * size));
6878 } else {
6879 command_print(CMD, "Compare failed");
6880 binprint(CMD, "ref:", read_ref, host_bufsiz);
6881 binprint(CMD, "buf:", read_buf, host_bufsiz);
6882 }
6883 next:
6884 free(read_ref);
6885 free(read_buf);
6886 }
6887 }
6888 }
6889
6890 out:
6891 free(test_pattern);
6892
6893 target_free_working_area(target, wa);
6894
6895 /* Test writes */
6896 num_bytes = test_size + 4 + 4 + 4;
6897
6898 retval = target_alloc_working_area(target, num_bytes, &wa);
6899 if (retval != ERROR_OK) {
6900 LOG_ERROR("Not enough working area");
6901 return ERROR_FAIL;
6902 }
6903
6904 test_pattern = malloc(num_bytes);
6905
6906 for (size_t i = 0; i < num_bytes; i++)
6907 test_pattern[i] = rand();
6908
6909 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6910 for (int size = 1; size <= 4; size *= 2) {
6911 for (int offset = 0; offset < 4; offset++) {
6912 uint32_t count = test_size / size;
6913 size_t host_bufsiz = count * size + host_offset;
6914 uint8_t *read_ref = malloc(num_bytes);
6915 uint8_t *read_buf = malloc(num_bytes);
6916 uint8_t *write_buf = malloc(host_bufsiz);
6917
6918 for (size_t i = 0; i < host_bufsiz; i++)
6919 write_buf[i] = rand();
6920 command_print_sameline(CMD,
6921 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6922 size, offset, host_offset ? "un" : "");
6923
6924 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6925 if (retval != ERROR_OK) {
6926 command_print(CMD, "Test pattern write failed");
6927 goto nextw;
6928 }
6929
6930 /* replay on host */
6931 memcpy(read_ref, test_pattern, num_bytes);
6932 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6933
6934 struct duration bench;
6935 duration_start(&bench);
6936
6937 retval = target_write_memory(target, wa->address + size + offset, size, count,
6938 write_buf + host_offset);
6939
6940 duration_measure(&bench);
6941
6942 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6943 command_print(CMD, "Unsupported alignment");
6944 goto nextw;
6945 } else if (retval != ERROR_OK) {
6946 command_print(CMD, "Memory write failed");
6947 goto nextw;
6948 }
6949
6950 /* read back */
6951 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6952 if (retval != ERROR_OK) {
6953 command_print(CMD, "Test pattern write failed");
6954 goto nextw;
6955 }
6956
6957 /* check result */
6958 int result = memcmp(read_ref, read_buf, num_bytes);
6959 if (result == 0) {
6960 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6961 duration_elapsed(&bench),
6962 duration_kbps(&bench, count * size));
6963 } else {
6964 command_print(CMD, "Compare failed");
6965 binprint(CMD, "ref:", read_ref, num_bytes);
6966 binprint(CMD, "buf:", read_buf, num_bytes);
6967 }
6968 nextw:
6969 free(read_ref);
6970 free(read_buf);
6971 }
6972 }
6973 }
6974
6975 free(test_pattern);
6976
6977 target_free_working_area(target, wa);
6978 return retval;
6979 }
6980
6981 static const struct command_registration target_exec_command_handlers[] = {
6982 {
6983 .name = "fast_load_image",
6984 .handler = handle_fast_load_image_command,
6985 .mode = COMMAND_ANY,
6986 .help = "Load image into server memory for later use by "
6987 "fast_load; primarily for profiling",
6988 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6989 "[min_address [max_length]]",
6990 },
6991 {
6992 .name = "fast_load",
6993 .handler = handle_fast_load_command,
6994 .mode = COMMAND_EXEC,
6995 .help = "loads active fast load image to current target "
6996 "- mainly for profiling purposes",
6997 .usage = "",
6998 },
6999 {
7000 .name = "profile",
7001 .handler = handle_profile_command,
7002 .mode = COMMAND_EXEC,
7003 .usage = "seconds filename [start end]",
7004 .help = "profiling samples the CPU PC",
7005 },
7006 /** @todo don't register virt2phys() unless target supports it */
7007 {
7008 .name = "virt2phys",
7009 .handler = handle_virt2phys_command,
7010 .mode = COMMAND_ANY,
7011 .help = "translate a virtual address into a physical address",
7012 .usage = "virtual_address",
7013 },
7014 {
7015 .name = "reg",
7016 .handler = handle_reg_command,
7017 .mode = COMMAND_EXEC,
7018 .help = "display (reread from target with \"force\") or set a register; "
7019 "with no arguments, displays all registers and their values",
7020 .usage = "[(register_number|register_name) [(value|'force')]]",
7021 },
7022 {
7023 .name = "poll",
7024 .handler = handle_poll_command,
7025 .mode = COMMAND_EXEC,
7026 .help = "poll target state; or reconfigure background polling",
7027 .usage = "['on'|'off']",
7028 },
7029 {
7030 .name = "wait_halt",
7031 .handler = handle_wait_halt_command,
7032 .mode = COMMAND_EXEC,
7033 .help = "wait up to the specified number of milliseconds "
7034 "(default 5000) for a previously requested halt",
7035 .usage = "[milliseconds]",
7036 },
7037 {
7038 .name = "halt",
7039 .handler = handle_halt_command,
7040 .mode = COMMAND_EXEC,
7041 .help = "request target to halt, then wait up to the specified "
7042 "number of milliseconds (default 5000) for it to complete",
7043 .usage = "[milliseconds]",
7044 },
7045 {
7046 .name = "resume",
7047 .handler = handle_resume_command,
7048 .mode = COMMAND_EXEC,
7049 .help = "resume target execution from current PC or address",
7050 .usage = "[address]",
7051 },
7052 {
7053 .name = "reset",
7054 .handler = handle_reset_command,
7055 .mode = COMMAND_EXEC,
7056 .usage = "[run|halt|init]",
7057 .help = "Reset all targets into the specified mode. "
7058 "Default reset mode is run, if not given.",
7059 },
7060 {
7061 .name = "soft_reset_halt",
7062 .handler = handle_soft_reset_halt_command,
7063 .mode = COMMAND_EXEC,
7064 .usage = "",
7065 .help = "halt the target and do a soft reset",
7066 },
7067 {
7068 .name = "step",
7069 .handler = handle_step_command,
7070 .mode = COMMAND_EXEC,
7071 .help = "step one instruction from current PC or address",
7072 .usage = "[address]",
7073 },
7074 {
7075 .name = "mdd",
7076 .handler = handle_md_command,
7077 .mode = COMMAND_EXEC,
7078 .help = "display memory double-words",
7079 .usage = "['phys'] address [count]",
7080 },
7081 {
7082 .name = "mdw",
7083 .handler = handle_md_command,
7084 .mode = COMMAND_EXEC,
7085 .help = "display memory words",
7086 .usage = "['phys'] address [count]",
7087 },
7088 {
7089 .name = "mdh",
7090 .handler = handle_md_command,
7091 .mode = COMMAND_EXEC,
7092 .help = "display memory half-words",
7093 .usage = "['phys'] address [count]",
7094 },
7095 {
7096 .name = "mdb",
7097 .handler = handle_md_command,
7098 .mode = COMMAND_EXEC,
7099 .help = "display memory bytes",
7100 .usage = "['phys'] address [count]",
7101 },
7102 {
7103 .name = "mwd",
7104 .handler = handle_mw_command,
7105 .mode = COMMAND_EXEC,
7106 .help = "write memory double-word",
7107 .usage = "['phys'] address value [count]",
7108 },
7109 {
7110 .name = "mww",
7111 .handler = handle_mw_command,
7112 .mode = COMMAND_EXEC,
7113 .help = "write memory word",
7114 .usage = "['phys'] address value [count]",
7115 },
7116 {
7117 .name = "mwh",
7118 .handler = handle_mw_command,
7119 .mode = COMMAND_EXEC,
7120 .help = "write memory half-word",
7121 .usage = "['phys'] address value [count]",
7122 },
7123 {
7124 .name = "mwb",
7125 .handler = handle_mw_command,
7126 .mode = COMMAND_EXEC,
7127 .help = "write memory byte",
7128 .usage = "['phys'] address value [count]",
7129 },
7130 {
7131 .name = "bp",
7132 .handler = handle_bp_command,
7133 .mode = COMMAND_EXEC,
7134 .help = "list or set hardware or software breakpoint",
7135 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
7136 },
7137 {
7138 .name = "rbp",
7139 .handler = handle_rbp_command,
7140 .mode = COMMAND_EXEC,
7141 .help = "remove breakpoint",
7142 .usage = "'all' | address",
7143 },
7144 {
7145 .name = "wp",
7146 .handler = handle_wp_command,
7147 .mode = COMMAND_EXEC,
7148 .help = "list (no params) or create watchpoints",
7149 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
7150 },
7151 {
7152 .name = "rwp",
7153 .handler = handle_rwp_command,
7154 .mode = COMMAND_EXEC,
7155 .help = "remove watchpoint",
7156 .usage = "address",
7157 },
7158 {
7159 .name = "load_image",
7160 .handler = handle_load_image_command,
7161 .mode = COMMAND_EXEC,
7162 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
7163 "[min_address] [max_length]",
7164 },
7165 {
7166 .name = "dump_image",
7167 .handler = handle_dump_image_command,
7168 .mode = COMMAND_EXEC,
7169 .usage = "filename address size",
7170 },
7171 {
7172 .name = "verify_image_checksum",
7173 .handler = handle_verify_image_checksum_command,
7174 .mode = COMMAND_EXEC,
7175 .usage = "filename [offset [type]]",
7176 },
7177 {
7178 .name = "verify_image",
7179 .handler = handle_verify_image_command,
7180 .mode = COMMAND_EXEC,
7181 .usage = "filename [offset [type]]",
7182 },
7183 {
7184 .name = "test_image",
7185 .handler = handle_test_image_command,
7186 .mode = COMMAND_EXEC,
7187 .usage = "filename [offset [type]]",
7188 },
7189 {
7190 .name = "get_reg",
7191 .mode = COMMAND_EXEC,
7192 .jim_handler = target_jim_get_reg,
7193 .help = "Get register values from the target",
7194 .usage = "list",
7195 },
7196 {
7197 .name = "set_reg",
7198 .mode = COMMAND_EXEC,
7199 .jim_handler = target_jim_set_reg,
7200 .help = "Set target register values",
7201 .usage = "dict",
7202 },
7203 {
7204 .name = "read_memory",
7205 .mode = COMMAND_EXEC,
7206 .jim_handler = target_jim_read_memory,
7207 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
7208 .usage = "address width count ['phys']",
7209 },
7210 {
7211 .name = "write_memory",
7212 .mode = COMMAND_EXEC,
7213 .jim_handler = target_jim_write_memory,
7214 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
7215 .usage = "address width data ['phys']",
7216 },
7217 {
7218 .name = "reset_nag",
7219 .handler = handle_target_reset_nag,
7220 .mode = COMMAND_ANY,
7221 .help = "Nag after each reset about options that could have been "
7222 "enabled to improve performance.",
7223 .usage = "['enable'|'disable']",
7224 },
7225 {
7226 .name = "ps",
7227 .handler = handle_ps_command,
7228 .mode = COMMAND_EXEC,
7229 .help = "list all tasks",
7230 .usage = "",
7231 },
7232 {
7233 .name = "test_mem_access",
7234 .handler = handle_test_mem_access_command,
7235 .mode = COMMAND_EXEC,
7236 .help = "Test the target's memory access functions",
7237 .usage = "size",
7238 },
7239
7240 COMMAND_REGISTRATION_DONE
7241 };
7242 static int target_register_user_commands(struct command_context *cmd_ctx)
7243 {
7244 int retval = ERROR_OK;
7245 retval = target_request_register_commands(cmd_ctx);
7246 if (retval != ERROR_OK)
7247 return retval;
7248
7249 retval = trace_register_commands(cmd_ctx);
7250 if (retval != ERROR_OK)
7251 return retval;
7252
7253
7254 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
7255 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)