target: fix assert in 'monitor profile' on constant PC
[openocd.git] / src / target / target.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2007-2010 Øyvind Harboe *
8 * oyvind.harboe@zylin.com *
9 * *
10 * Copyright (C) 2008, Duane Ellis *
11 * openocd@duaneeellis.com *
12 * *
13 * Copyright (C) 2008 by Spencer Oliver *
14 * spen@spen-soft.co.uk *
15 * *
16 * Copyright (C) 2008 by Rick Altherr *
17 * kc8apf@kc8apf.net> *
18 * *
19 * Copyright (C) 2011 by Broadcom Corporation *
20 * Evan Hunter - ehunter@broadcom.com *
21 * *
22 * Copyright (C) ST-Ericsson SA 2011 *
23 * michel.jaouen@stericsson.com : smp minimum support *
24 * *
25 * Copyright (C) 2011 Andreas Fritiofson *
26 * andreas.fritiofson@gmail.com *
27 ***************************************************************************/
28
29 #ifdef HAVE_CONFIG_H
30 #include "config.h"
31 #endif
32
33 #include <helper/align.h>
34 #include <helper/time_support.h>
35 #include <jtag/jtag.h>
36 #include <flash/nor/core.h>
37
38 #include "target.h"
39 #include "target_type.h"
40 #include "target_request.h"
41 #include "breakpoints.h"
42 #include "register.h"
43 #include "trace.h"
44 #include "image.h"
45 #include "rtos/rtos.h"
46 #include "transport/transport.h"
47 #include "arm_cti.h"
48 #include "smp.h"
49 #include "semihosting_common.h"
50
51 /* default halt wait timeout (ms) */
52 #define DEFAULT_HALT_TIMEOUT 5000
53
54 static int target_read_buffer_default(struct target *target, target_addr_t address,
55 uint32_t count, uint8_t *buffer);
56 static int target_write_buffer_default(struct target *target, target_addr_t address,
57 uint32_t count, const uint8_t *buffer);
58 static int target_array2mem(Jim_Interp *interp, struct target *target,
59 int argc, Jim_Obj * const *argv);
60 static int target_mem2array(Jim_Interp *interp, struct target *target,
61 int argc, Jim_Obj * const *argv);
62 static int target_register_user_commands(struct command_context *cmd_ctx);
63 static int target_get_gdb_fileio_info_default(struct target *target,
64 struct gdb_fileio_info *fileio_info);
65 static int target_gdb_fileio_end_default(struct target *target, int retcode,
66 int fileio_errno, bool ctrl_c);
67
68 /* targets */
69 extern struct target_type arm7tdmi_target;
70 extern struct target_type arm720t_target;
71 extern struct target_type arm9tdmi_target;
72 extern struct target_type arm920t_target;
73 extern struct target_type arm966e_target;
74 extern struct target_type arm946e_target;
75 extern struct target_type arm926ejs_target;
76 extern struct target_type fa526_target;
77 extern struct target_type feroceon_target;
78 extern struct target_type dragonite_target;
79 extern struct target_type xscale_target;
80 extern struct target_type xtensa_chip_target;
81 extern struct target_type cortexm_target;
82 extern struct target_type cortexa_target;
83 extern struct target_type aarch64_target;
84 extern struct target_type cortexr4_target;
85 extern struct target_type arm11_target;
86 extern struct target_type ls1_sap_target;
87 extern struct target_type mips_m4k_target;
88 extern struct target_type mips_mips64_target;
89 extern struct target_type avr_target;
90 extern struct target_type dsp563xx_target;
91 extern struct target_type dsp5680xx_target;
92 extern struct target_type testee_target;
93 extern struct target_type avr32_ap7k_target;
94 extern struct target_type hla_target;
95 extern struct target_type nds32_v2_target;
96 extern struct target_type nds32_v3_target;
97 extern struct target_type nds32_v3m_target;
98 extern struct target_type esp32_target;
99 extern struct target_type esp32s2_target;
100 extern struct target_type esp32s3_target;
101 extern struct target_type or1k_target;
102 extern struct target_type quark_x10xx_target;
103 extern struct target_type quark_d20xx_target;
104 extern struct target_type stm8_target;
105 extern struct target_type riscv_target;
106 extern struct target_type mem_ap_target;
107 extern struct target_type esirisc_target;
108 extern struct target_type arcv2_target;
109
110 static struct target_type *target_types[] = {
111 &arm7tdmi_target,
112 &arm9tdmi_target,
113 &arm920t_target,
114 &arm720t_target,
115 &arm966e_target,
116 &arm946e_target,
117 &arm926ejs_target,
118 &fa526_target,
119 &feroceon_target,
120 &dragonite_target,
121 &xscale_target,
122 &xtensa_chip_target,
123 &cortexm_target,
124 &cortexa_target,
125 &cortexr4_target,
126 &arm11_target,
127 &ls1_sap_target,
128 &mips_m4k_target,
129 &avr_target,
130 &dsp563xx_target,
131 &dsp5680xx_target,
132 &testee_target,
133 &avr32_ap7k_target,
134 &hla_target,
135 &nds32_v2_target,
136 &nds32_v3_target,
137 &nds32_v3m_target,
138 &esp32_target,
139 &esp32s2_target,
140 &esp32s3_target,
141 &or1k_target,
142 &quark_x10xx_target,
143 &quark_d20xx_target,
144 &stm8_target,
145 &riscv_target,
146 &mem_ap_target,
147 &esirisc_target,
148 &arcv2_target,
149 &aarch64_target,
150 &mips_mips64_target,
151 NULL,
152 };
153
154 struct target *all_targets;
155 static struct target_event_callback *target_event_callbacks;
156 static struct target_timer_callback *target_timer_callbacks;
157 static int64_t target_timer_next_event_value;
158 static LIST_HEAD(target_reset_callback_list);
159 static LIST_HEAD(target_trace_callback_list);
160 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
161 static LIST_HEAD(empty_smp_targets);
162
163 static const struct jim_nvp nvp_assert[] = {
164 { .name = "assert", NVP_ASSERT },
165 { .name = "deassert", NVP_DEASSERT },
166 { .name = "T", NVP_ASSERT },
167 { .name = "F", NVP_DEASSERT },
168 { .name = "t", NVP_ASSERT },
169 { .name = "f", NVP_DEASSERT },
170 { .name = NULL, .value = -1 }
171 };
172
173 static const struct jim_nvp nvp_error_target[] = {
174 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
175 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
176 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
177 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
178 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
179 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
180 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
181 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
182 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
183 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
184 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
185 { .value = -1, .name = NULL }
186 };
187
188 static const char *target_strerror_safe(int err)
189 {
190 const struct jim_nvp *n;
191
192 n = jim_nvp_value2name_simple(nvp_error_target, err);
193 if (!n->name)
194 return "unknown";
195 else
196 return n->name;
197 }
198
199 static const struct jim_nvp nvp_target_event[] = {
200
201 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
202 { .value = TARGET_EVENT_HALTED, .name = "halted" },
203 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
204 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
205 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
206 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
207 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
208
209 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
210 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
211
212 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
213 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
214 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
215 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
216 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
217 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
218 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
219 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
220
221 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
222 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
223 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
224
225 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
226 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
227
228 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
229 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
230
231 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
232 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
233
234 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
235 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
236
237 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
238
239 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X100, .name = "semihosting-user-cmd-0x100" },
240 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X101, .name = "semihosting-user-cmd-0x101" },
241 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X102, .name = "semihosting-user-cmd-0x102" },
242 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X103, .name = "semihosting-user-cmd-0x103" },
243 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X104, .name = "semihosting-user-cmd-0x104" },
244 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X105, .name = "semihosting-user-cmd-0x105" },
245 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X106, .name = "semihosting-user-cmd-0x106" },
246 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X107, .name = "semihosting-user-cmd-0x107" },
247
248 { .name = NULL, .value = -1 }
249 };
250
251 static const struct jim_nvp nvp_target_state[] = {
252 { .name = "unknown", .value = TARGET_UNKNOWN },
253 { .name = "running", .value = TARGET_RUNNING },
254 { .name = "halted", .value = TARGET_HALTED },
255 { .name = "reset", .value = TARGET_RESET },
256 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
257 { .name = NULL, .value = -1 },
258 };
259
260 static const struct jim_nvp nvp_target_debug_reason[] = {
261 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
262 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
263 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
264 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
265 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
266 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
267 { .name = "program-exit", .value = DBG_REASON_EXIT },
268 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
269 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
270 { .name = NULL, .value = -1 },
271 };
272
273 static const struct jim_nvp nvp_target_endian[] = {
274 { .name = "big", .value = TARGET_BIG_ENDIAN },
275 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
276 { .name = "be", .value = TARGET_BIG_ENDIAN },
277 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
278 { .name = NULL, .value = -1 },
279 };
280
281 static const struct jim_nvp nvp_reset_modes[] = {
282 { .name = "unknown", .value = RESET_UNKNOWN },
283 { .name = "run", .value = RESET_RUN },
284 { .name = "halt", .value = RESET_HALT },
285 { .name = "init", .value = RESET_INIT },
286 { .name = NULL, .value = -1 },
287 };
288
289 const char *debug_reason_name(struct target *t)
290 {
291 const char *cp;
292
293 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
294 t->debug_reason)->name;
295 if (!cp) {
296 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
297 cp = "(*BUG*unknown*BUG*)";
298 }
299 return cp;
300 }
301
302 const char *target_state_name(struct target *t)
303 {
304 const char *cp;
305 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
306 if (!cp) {
307 LOG_ERROR("Invalid target state: %d", (int)(t->state));
308 cp = "(*BUG*unknown*BUG*)";
309 }
310
311 if (!target_was_examined(t) && t->defer_examine)
312 cp = "examine deferred";
313
314 return cp;
315 }
316
317 const char *target_event_name(enum target_event event)
318 {
319 const char *cp;
320 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
321 if (!cp) {
322 LOG_ERROR("Invalid target event: %d", (int)(event));
323 cp = "(*BUG*unknown*BUG*)";
324 }
325 return cp;
326 }
327
328 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
329 {
330 const char *cp;
331 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
332 if (!cp) {
333 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
334 cp = "(*BUG*unknown*BUG*)";
335 }
336 return cp;
337 }
338
339 /* determine the number of the new target */
340 static int new_target_number(void)
341 {
342 struct target *t;
343 int x;
344
345 /* number is 0 based */
346 x = -1;
347 t = all_targets;
348 while (t) {
349 if (x < t->target_number)
350 x = t->target_number;
351 t = t->next;
352 }
353 return x + 1;
354 }
355
356 static void append_to_list_all_targets(struct target *target)
357 {
358 struct target **t = &all_targets;
359
360 while (*t)
361 t = &((*t)->next);
362 *t = target;
363 }
364
365 /* read a uint64_t from a buffer in target memory endianness */
366 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
367 {
368 if (target->endianness == TARGET_LITTLE_ENDIAN)
369 return le_to_h_u64(buffer);
370 else
371 return be_to_h_u64(buffer);
372 }
373
374 /* read a uint32_t from a buffer in target memory endianness */
375 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
376 {
377 if (target->endianness == TARGET_LITTLE_ENDIAN)
378 return le_to_h_u32(buffer);
379 else
380 return be_to_h_u32(buffer);
381 }
382
383 /* read a uint24_t from a buffer in target memory endianness */
384 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
385 {
386 if (target->endianness == TARGET_LITTLE_ENDIAN)
387 return le_to_h_u24(buffer);
388 else
389 return be_to_h_u24(buffer);
390 }
391
392 /* read a uint16_t from a buffer in target memory endianness */
393 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
394 {
395 if (target->endianness == TARGET_LITTLE_ENDIAN)
396 return le_to_h_u16(buffer);
397 else
398 return be_to_h_u16(buffer);
399 }
400
401 /* write a uint64_t to a buffer in target memory endianness */
402 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
403 {
404 if (target->endianness == TARGET_LITTLE_ENDIAN)
405 h_u64_to_le(buffer, value);
406 else
407 h_u64_to_be(buffer, value);
408 }
409
410 /* write a uint32_t to a buffer in target memory endianness */
411 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
412 {
413 if (target->endianness == TARGET_LITTLE_ENDIAN)
414 h_u32_to_le(buffer, value);
415 else
416 h_u32_to_be(buffer, value);
417 }
418
419 /* write a uint24_t to a buffer in target memory endianness */
420 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
421 {
422 if (target->endianness == TARGET_LITTLE_ENDIAN)
423 h_u24_to_le(buffer, value);
424 else
425 h_u24_to_be(buffer, value);
426 }
427
428 /* write a uint16_t to a buffer in target memory endianness */
429 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
430 {
431 if (target->endianness == TARGET_LITTLE_ENDIAN)
432 h_u16_to_le(buffer, value);
433 else
434 h_u16_to_be(buffer, value);
435 }
436
437 /* write a uint8_t to a buffer in target memory endianness */
438 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
439 {
440 *buffer = value;
441 }
442
443 /* write a uint64_t array to a buffer in target memory endianness */
444 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
445 {
446 uint32_t i;
447 for (i = 0; i < count; i++)
448 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
449 }
450
451 /* write a uint32_t array to a buffer in target memory endianness */
452 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
453 {
454 uint32_t i;
455 for (i = 0; i < count; i++)
456 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
457 }
458
459 /* write a uint16_t array to a buffer in target memory endianness */
460 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
461 {
462 uint32_t i;
463 for (i = 0; i < count; i++)
464 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
465 }
466
467 /* write a uint64_t array to a buffer in target memory endianness */
468 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
469 {
470 uint32_t i;
471 for (i = 0; i < count; i++)
472 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
473 }
474
475 /* write a uint32_t array to a buffer in target memory endianness */
476 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
477 {
478 uint32_t i;
479 for (i = 0; i < count; i++)
480 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
481 }
482
483 /* write a uint16_t array to a buffer in target memory endianness */
484 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
485 {
486 uint32_t i;
487 for (i = 0; i < count; i++)
488 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
489 }
490
491 /* return a pointer to a configured target; id is name or number */
492 struct target *get_target(const char *id)
493 {
494 struct target *target;
495
496 /* try as tcltarget name */
497 for (target = all_targets; target; target = target->next) {
498 if (!target_name(target))
499 continue;
500 if (strcmp(id, target_name(target)) == 0)
501 return target;
502 }
503
504 /* It's OK to remove this fallback sometime after August 2010 or so */
505
506 /* no match, try as number */
507 unsigned num;
508 if (parse_uint(id, &num) != ERROR_OK)
509 return NULL;
510
511 for (target = all_targets; target; target = target->next) {
512 if (target->target_number == (int)num) {
513 LOG_WARNING("use '%s' as target identifier, not '%u'",
514 target_name(target), num);
515 return target;
516 }
517 }
518
519 return NULL;
520 }
521
522 /* returns a pointer to the n-th configured target */
523 struct target *get_target_by_num(int num)
524 {
525 struct target *target = all_targets;
526
527 while (target) {
528 if (target->target_number == num)
529 return target;
530 target = target->next;
531 }
532
533 return NULL;
534 }
535
536 struct target *get_current_target(struct command_context *cmd_ctx)
537 {
538 struct target *target = get_current_target_or_null(cmd_ctx);
539
540 if (!target) {
541 LOG_ERROR("BUG: current_target out of bounds");
542 exit(-1);
543 }
544
545 return target;
546 }
547
548 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
549 {
550 return cmd_ctx->current_target_override
551 ? cmd_ctx->current_target_override
552 : cmd_ctx->current_target;
553 }
554
555 int target_poll(struct target *target)
556 {
557 int retval;
558
559 /* We can't poll until after examine */
560 if (!target_was_examined(target)) {
561 /* Fail silently lest we pollute the log */
562 return ERROR_FAIL;
563 }
564
565 retval = target->type->poll(target);
566 if (retval != ERROR_OK)
567 return retval;
568
569 if (target->halt_issued) {
570 if (target->state == TARGET_HALTED)
571 target->halt_issued = false;
572 else {
573 int64_t t = timeval_ms() - target->halt_issued_time;
574 if (t > DEFAULT_HALT_TIMEOUT) {
575 target->halt_issued = false;
576 LOG_INFO("Halt timed out, wake up GDB.");
577 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
578 }
579 }
580 }
581
582 return ERROR_OK;
583 }
584
585 int target_halt(struct target *target)
586 {
587 int retval;
588 /* We can't poll until after examine */
589 if (!target_was_examined(target)) {
590 LOG_ERROR("Target not examined yet");
591 return ERROR_FAIL;
592 }
593
594 retval = target->type->halt(target);
595 if (retval != ERROR_OK)
596 return retval;
597
598 target->halt_issued = true;
599 target->halt_issued_time = timeval_ms();
600
601 return ERROR_OK;
602 }
603
604 /**
605 * Make the target (re)start executing using its saved execution
606 * context (possibly with some modifications).
607 *
608 * @param target Which target should start executing.
609 * @param current True to use the target's saved program counter instead
610 * of the address parameter
611 * @param address Optionally used as the program counter.
612 * @param handle_breakpoints True iff breakpoints at the resumption PC
613 * should be skipped. (For example, maybe execution was stopped by
614 * such a breakpoint, in which case it would be counterproductive to
615 * let it re-trigger.
616 * @param debug_execution False if all working areas allocated by OpenOCD
617 * should be released and/or restored to their original contents.
618 * (This would for example be true to run some downloaded "helper"
619 * algorithm code, which resides in one such working buffer and uses
620 * another for data storage.)
621 *
622 * @todo Resolve the ambiguity about what the "debug_execution" flag
623 * signifies. For example, Target implementations don't agree on how
624 * it relates to invalidation of the register cache, or to whether
625 * breakpoints and watchpoints should be enabled. (It would seem wrong
626 * to enable breakpoints when running downloaded "helper" algorithms
627 * (debug_execution true), since the breakpoints would be set to match
628 * target firmware being debugged, not the helper algorithm.... and
629 * enabling them could cause such helpers to malfunction (for example,
630 * by overwriting data with a breakpoint instruction. On the other
631 * hand the infrastructure for running such helpers might use this
632 * procedure but rely on hardware breakpoint to detect termination.)
633 */
634 int target_resume(struct target *target, int current, target_addr_t address,
635 int handle_breakpoints, int debug_execution)
636 {
637 int retval;
638
639 /* We can't poll until after examine */
640 if (!target_was_examined(target)) {
641 LOG_ERROR("Target not examined yet");
642 return ERROR_FAIL;
643 }
644
645 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
646
647 /* note that resume *must* be asynchronous. The CPU can halt before
648 * we poll. The CPU can even halt at the current PC as a result of
649 * a software breakpoint being inserted by (a bug?) the application.
650 */
651 /*
652 * resume() triggers the event 'resumed'. The execution of TCL commands
653 * in the event handler causes the polling of targets. If the target has
654 * already halted for a breakpoint, polling will run the 'halted' event
655 * handler before the pending 'resumed' handler.
656 * Disable polling during resume() to guarantee the execution of handlers
657 * in the correct order.
658 */
659 bool save_poll_mask = jtag_poll_mask();
660 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
661 jtag_poll_unmask(save_poll_mask);
662
663 if (retval != ERROR_OK)
664 return retval;
665
666 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
667
668 return retval;
669 }
670
671 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
672 {
673 char buf[100];
674 int retval;
675 struct jim_nvp *n;
676 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
677 if (!n->name) {
678 LOG_ERROR("invalid reset mode");
679 return ERROR_FAIL;
680 }
681
682 struct target *target;
683 for (target = all_targets; target; target = target->next)
684 target_call_reset_callbacks(target, reset_mode);
685
686 /* disable polling during reset to make reset event scripts
687 * more predictable, i.e. dr/irscan & pathmove in events will
688 * not have JTAG operations injected into the middle of a sequence.
689 */
690 bool save_poll_mask = jtag_poll_mask();
691
692 sprintf(buf, "ocd_process_reset %s", n->name);
693 retval = Jim_Eval(cmd->ctx->interp, buf);
694
695 jtag_poll_unmask(save_poll_mask);
696
697 if (retval != JIM_OK) {
698 Jim_MakeErrorMessage(cmd->ctx->interp);
699 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
700 return ERROR_FAIL;
701 }
702
703 /* We want any events to be processed before the prompt */
704 retval = target_call_timer_callbacks_now();
705
706 for (target = all_targets; target; target = target->next) {
707 target->type->check_reset(target);
708 target->running_alg = false;
709 }
710
711 return retval;
712 }
713
714 static int identity_virt2phys(struct target *target,
715 target_addr_t virtual, target_addr_t *physical)
716 {
717 *physical = virtual;
718 return ERROR_OK;
719 }
720
721 static int no_mmu(struct target *target, int *enabled)
722 {
723 *enabled = 0;
724 return ERROR_OK;
725 }
726
727 /**
728 * Reset the @c examined flag for the given target.
729 * Pure paranoia -- targets are zeroed on allocation.
730 */
731 static inline void target_reset_examined(struct target *target)
732 {
733 target->examined = false;
734 }
735
736 static int default_examine(struct target *target)
737 {
738 target_set_examined(target);
739 return ERROR_OK;
740 }
741
742 /* no check by default */
743 static int default_check_reset(struct target *target)
744 {
745 return ERROR_OK;
746 }
747
748 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
749 * Keep in sync */
750 int target_examine_one(struct target *target)
751 {
752 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
753
754 int retval = target->type->examine(target);
755 if (retval != ERROR_OK) {
756 target_reset_examined(target);
757 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
758 return retval;
759 }
760
761 target_set_examined(target);
762 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
763
764 return ERROR_OK;
765 }
766
767 static int jtag_enable_callback(enum jtag_event event, void *priv)
768 {
769 struct target *target = priv;
770
771 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
772 return ERROR_OK;
773
774 jtag_unregister_event_callback(jtag_enable_callback, target);
775
776 return target_examine_one(target);
777 }
778
779 /* Targets that correctly implement init + examine, i.e.
780 * no communication with target during init:
781 *
782 * XScale
783 */
784 int target_examine(void)
785 {
786 int retval = ERROR_OK;
787 struct target *target;
788
789 for (target = all_targets; target; target = target->next) {
790 /* defer examination, but don't skip it */
791 if (!target->tap->enabled) {
792 jtag_register_event_callback(jtag_enable_callback,
793 target);
794 continue;
795 }
796
797 if (target->defer_examine)
798 continue;
799
800 int retval2 = target_examine_one(target);
801 if (retval2 != ERROR_OK) {
802 LOG_WARNING("target %s examination failed", target_name(target));
803 retval = retval2;
804 }
805 }
806 return retval;
807 }
808
809 const char *target_type_name(struct target *target)
810 {
811 return target->type->name;
812 }
813
814 static int target_soft_reset_halt(struct target *target)
815 {
816 if (!target_was_examined(target)) {
817 LOG_ERROR("Target not examined yet");
818 return ERROR_FAIL;
819 }
820 if (!target->type->soft_reset_halt) {
821 LOG_ERROR("Target %s does not support soft_reset_halt",
822 target_name(target));
823 return ERROR_FAIL;
824 }
825 return target->type->soft_reset_halt(target);
826 }
827
828 /**
829 * Downloads a target-specific native code algorithm to the target,
830 * and executes it. * Note that some targets may need to set up, enable,
831 * and tear down a breakpoint (hard or * soft) to detect algorithm
832 * termination, while others may support lower overhead schemes where
833 * soft breakpoints embedded in the algorithm automatically terminate the
834 * algorithm.
835 *
836 * @param target used to run the algorithm
837 * @param num_mem_params
838 * @param mem_params
839 * @param num_reg_params
840 * @param reg_param
841 * @param entry_point
842 * @param exit_point
843 * @param timeout_ms
844 * @param arch_info target-specific description of the algorithm.
845 */
846 int target_run_algorithm(struct target *target,
847 int num_mem_params, struct mem_param *mem_params,
848 int num_reg_params, struct reg_param *reg_param,
849 target_addr_t entry_point, target_addr_t exit_point,
850 int timeout_ms, void *arch_info)
851 {
852 int retval = ERROR_FAIL;
853
854 if (!target_was_examined(target)) {
855 LOG_ERROR("Target not examined yet");
856 goto done;
857 }
858 if (!target->type->run_algorithm) {
859 LOG_ERROR("Target type '%s' does not support %s",
860 target_type_name(target), __func__);
861 goto done;
862 }
863
864 target->running_alg = true;
865 retval = target->type->run_algorithm(target,
866 num_mem_params, mem_params,
867 num_reg_params, reg_param,
868 entry_point, exit_point, timeout_ms, arch_info);
869 target->running_alg = false;
870
871 done:
872 return retval;
873 }
874
875 /**
876 * Executes a target-specific native code algorithm and leaves it running.
877 *
878 * @param target used to run the algorithm
879 * @param num_mem_params
880 * @param mem_params
881 * @param num_reg_params
882 * @param reg_params
883 * @param entry_point
884 * @param exit_point
885 * @param arch_info target-specific description of the algorithm.
886 */
887 int target_start_algorithm(struct target *target,
888 int num_mem_params, struct mem_param *mem_params,
889 int num_reg_params, struct reg_param *reg_params,
890 target_addr_t entry_point, target_addr_t exit_point,
891 void *arch_info)
892 {
893 int retval = ERROR_FAIL;
894
895 if (!target_was_examined(target)) {
896 LOG_ERROR("Target not examined yet");
897 goto done;
898 }
899 if (!target->type->start_algorithm) {
900 LOG_ERROR("Target type '%s' does not support %s",
901 target_type_name(target), __func__);
902 goto done;
903 }
904 if (target->running_alg) {
905 LOG_ERROR("Target is already running an algorithm");
906 goto done;
907 }
908
909 target->running_alg = true;
910 retval = target->type->start_algorithm(target,
911 num_mem_params, mem_params,
912 num_reg_params, reg_params,
913 entry_point, exit_point, arch_info);
914
915 done:
916 return retval;
917 }
918
919 /**
920 * Waits for an algorithm started with target_start_algorithm() to complete.
921 *
922 * @param target used to run the algorithm
923 * @param num_mem_params
924 * @param mem_params
925 * @param num_reg_params
926 * @param reg_params
927 * @param exit_point
928 * @param timeout_ms
929 * @param arch_info target-specific description of the algorithm.
930 */
931 int target_wait_algorithm(struct target *target,
932 int num_mem_params, struct mem_param *mem_params,
933 int num_reg_params, struct reg_param *reg_params,
934 target_addr_t exit_point, int timeout_ms,
935 void *arch_info)
936 {
937 int retval = ERROR_FAIL;
938
939 if (!target->type->wait_algorithm) {
940 LOG_ERROR("Target type '%s' does not support %s",
941 target_type_name(target), __func__);
942 goto done;
943 }
944 if (!target->running_alg) {
945 LOG_ERROR("Target is not running an algorithm");
946 goto done;
947 }
948
949 retval = target->type->wait_algorithm(target,
950 num_mem_params, mem_params,
951 num_reg_params, reg_params,
952 exit_point, timeout_ms, arch_info);
953 if (retval != ERROR_TARGET_TIMEOUT)
954 target->running_alg = false;
955
956 done:
957 return retval;
958 }
959
960 /**
961 * Streams data to a circular buffer on target intended for consumption by code
962 * running asynchronously on target.
963 *
964 * This is intended for applications where target-specific native code runs
965 * on the target, receives data from the circular buffer, does something with
966 * it (most likely writing it to a flash memory), and advances the circular
967 * buffer pointer.
968 *
969 * This assumes that the helper algorithm has already been loaded to the target,
970 * but has not been started yet. Given memory and register parameters are passed
971 * to the algorithm.
972 *
973 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
974 * following format:
975 *
976 * [buffer_start + 0, buffer_start + 4):
977 * Write Pointer address (aka head). Written and updated by this
978 * routine when new data is written to the circular buffer.
979 * [buffer_start + 4, buffer_start + 8):
980 * Read Pointer address (aka tail). Updated by code running on the
981 * target after it consumes data.
982 * [buffer_start + 8, buffer_start + buffer_size):
983 * Circular buffer contents.
984 *
985 * See contrib/loaders/flash/stm32f1x.S for an example.
986 *
987 * @param target used to run the algorithm
988 * @param buffer address on the host where data to be sent is located
989 * @param count number of blocks to send
990 * @param block_size size in bytes of each block
991 * @param num_mem_params count of memory-based params to pass to algorithm
992 * @param mem_params memory-based params to pass to algorithm
993 * @param num_reg_params count of register-based params to pass to algorithm
994 * @param reg_params memory-based params to pass to algorithm
995 * @param buffer_start address on the target of the circular buffer structure
996 * @param buffer_size size of the circular buffer structure
997 * @param entry_point address on the target to execute to start the algorithm
998 * @param exit_point address at which to set a breakpoint to catch the
999 * end of the algorithm; can be 0 if target triggers a breakpoint itself
1000 * @param arch_info
1001 */
1002
1003 int target_run_flash_async_algorithm(struct target *target,
1004 const uint8_t *buffer, uint32_t count, int block_size,
1005 int num_mem_params, struct mem_param *mem_params,
1006 int num_reg_params, struct reg_param *reg_params,
1007 uint32_t buffer_start, uint32_t buffer_size,
1008 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1009 {
1010 int retval;
1011 int timeout = 0;
1012
1013 const uint8_t *buffer_orig = buffer;
1014
1015 /* Set up working area. First word is write pointer, second word is read pointer,
1016 * rest is fifo data area. */
1017 uint32_t wp_addr = buffer_start;
1018 uint32_t rp_addr = buffer_start + 4;
1019 uint32_t fifo_start_addr = buffer_start + 8;
1020 uint32_t fifo_end_addr = buffer_start + buffer_size;
1021
1022 uint32_t wp = fifo_start_addr;
1023 uint32_t rp = fifo_start_addr;
1024
1025 /* validate block_size is 2^n */
1026 assert(IS_PWR_OF_2(block_size));
1027
1028 retval = target_write_u32(target, wp_addr, wp);
1029 if (retval != ERROR_OK)
1030 return retval;
1031 retval = target_write_u32(target, rp_addr, rp);
1032 if (retval != ERROR_OK)
1033 return retval;
1034
1035 /* Start up algorithm on target and let it idle while writing the first chunk */
1036 retval = target_start_algorithm(target, num_mem_params, mem_params,
1037 num_reg_params, reg_params,
1038 entry_point,
1039 exit_point,
1040 arch_info);
1041
1042 if (retval != ERROR_OK) {
1043 LOG_ERROR("error starting target flash write algorithm");
1044 return retval;
1045 }
1046
1047 while (count > 0) {
1048
1049 retval = target_read_u32(target, rp_addr, &rp);
1050 if (retval != ERROR_OK) {
1051 LOG_ERROR("failed to get read pointer");
1052 break;
1053 }
1054
1055 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1056 (size_t) (buffer - buffer_orig), count, wp, rp);
1057
1058 if (rp == 0) {
1059 LOG_ERROR("flash write algorithm aborted by target");
1060 retval = ERROR_FLASH_OPERATION_FAILED;
1061 break;
1062 }
1063
1064 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1065 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1066 break;
1067 }
1068
1069 /* Count the number of bytes available in the fifo without
1070 * crossing the wrap around. Make sure to not fill it completely,
1071 * because that would make wp == rp and that's the empty condition. */
1072 uint32_t thisrun_bytes;
1073 if (rp > wp)
1074 thisrun_bytes = rp - wp - block_size;
1075 else if (rp > fifo_start_addr)
1076 thisrun_bytes = fifo_end_addr - wp;
1077 else
1078 thisrun_bytes = fifo_end_addr - wp - block_size;
1079
1080 if (thisrun_bytes == 0) {
1081 /* Throttle polling a bit if transfer is (much) faster than flash
1082 * programming. The exact delay shouldn't matter as long as it's
1083 * less than buffer size / flash speed. This is very unlikely to
1084 * run when using high latency connections such as USB. */
1085 alive_sleep(2);
1086
1087 /* to stop an infinite loop on some targets check and increment a timeout
1088 * this issue was observed on a stellaris using the new ICDI interface */
1089 if (timeout++ >= 2500) {
1090 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1091 return ERROR_FLASH_OPERATION_FAILED;
1092 }
1093 continue;
1094 }
1095
1096 /* reset our timeout */
1097 timeout = 0;
1098
1099 /* Limit to the amount of data we actually want to write */
1100 if (thisrun_bytes > count * block_size)
1101 thisrun_bytes = count * block_size;
1102
1103 /* Force end of large blocks to be word aligned */
1104 if (thisrun_bytes >= 16)
1105 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1106
1107 /* Write data to fifo */
1108 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1109 if (retval != ERROR_OK)
1110 break;
1111
1112 /* Update counters and wrap write pointer */
1113 buffer += thisrun_bytes;
1114 count -= thisrun_bytes / block_size;
1115 wp += thisrun_bytes;
1116 if (wp >= fifo_end_addr)
1117 wp = fifo_start_addr;
1118
1119 /* Store updated write pointer to target */
1120 retval = target_write_u32(target, wp_addr, wp);
1121 if (retval != ERROR_OK)
1122 break;
1123
1124 /* Avoid GDB timeouts */
1125 keep_alive();
1126 }
1127
1128 if (retval != ERROR_OK) {
1129 /* abort flash write algorithm on target */
1130 target_write_u32(target, wp_addr, 0);
1131 }
1132
1133 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1134 num_reg_params, reg_params,
1135 exit_point,
1136 10000,
1137 arch_info);
1138
1139 if (retval2 != ERROR_OK) {
1140 LOG_ERROR("error waiting for target flash write algorithm");
1141 retval = retval2;
1142 }
1143
1144 if (retval == ERROR_OK) {
1145 /* check if algorithm set rp = 0 after fifo writer loop finished */
1146 retval = target_read_u32(target, rp_addr, &rp);
1147 if (retval == ERROR_OK && rp == 0) {
1148 LOG_ERROR("flash write algorithm aborted by target");
1149 retval = ERROR_FLASH_OPERATION_FAILED;
1150 }
1151 }
1152
1153 return retval;
1154 }
1155
1156 int target_run_read_async_algorithm(struct target *target,
1157 uint8_t *buffer, uint32_t count, int block_size,
1158 int num_mem_params, struct mem_param *mem_params,
1159 int num_reg_params, struct reg_param *reg_params,
1160 uint32_t buffer_start, uint32_t buffer_size,
1161 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1162 {
1163 int retval;
1164 int timeout = 0;
1165
1166 const uint8_t *buffer_orig = buffer;
1167
1168 /* Set up working area. First word is write pointer, second word is read pointer,
1169 * rest is fifo data area. */
1170 uint32_t wp_addr = buffer_start;
1171 uint32_t rp_addr = buffer_start + 4;
1172 uint32_t fifo_start_addr = buffer_start + 8;
1173 uint32_t fifo_end_addr = buffer_start + buffer_size;
1174
1175 uint32_t wp = fifo_start_addr;
1176 uint32_t rp = fifo_start_addr;
1177
1178 /* validate block_size is 2^n */
1179 assert(IS_PWR_OF_2(block_size));
1180
1181 retval = target_write_u32(target, wp_addr, wp);
1182 if (retval != ERROR_OK)
1183 return retval;
1184 retval = target_write_u32(target, rp_addr, rp);
1185 if (retval != ERROR_OK)
1186 return retval;
1187
1188 /* Start up algorithm on target */
1189 retval = target_start_algorithm(target, num_mem_params, mem_params,
1190 num_reg_params, reg_params,
1191 entry_point,
1192 exit_point,
1193 arch_info);
1194
1195 if (retval != ERROR_OK) {
1196 LOG_ERROR("error starting target flash read algorithm");
1197 return retval;
1198 }
1199
1200 while (count > 0) {
1201 retval = target_read_u32(target, wp_addr, &wp);
1202 if (retval != ERROR_OK) {
1203 LOG_ERROR("failed to get write pointer");
1204 break;
1205 }
1206
1207 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1208 (size_t)(buffer - buffer_orig), count, wp, rp);
1209
1210 if (wp == 0) {
1211 LOG_ERROR("flash read algorithm aborted by target");
1212 retval = ERROR_FLASH_OPERATION_FAILED;
1213 break;
1214 }
1215
1216 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1217 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1218 break;
1219 }
1220
1221 /* Count the number of bytes available in the fifo without
1222 * crossing the wrap around. */
1223 uint32_t thisrun_bytes;
1224 if (wp >= rp)
1225 thisrun_bytes = wp - rp;
1226 else
1227 thisrun_bytes = fifo_end_addr - rp;
1228
1229 if (thisrun_bytes == 0) {
1230 /* Throttle polling a bit if transfer is (much) faster than flash
1231 * reading. The exact delay shouldn't matter as long as it's
1232 * less than buffer size / flash speed. This is very unlikely to
1233 * run when using high latency connections such as USB. */
1234 alive_sleep(2);
1235
1236 /* to stop an infinite loop on some targets check and increment a timeout
1237 * this issue was observed on a stellaris using the new ICDI interface */
1238 if (timeout++ >= 2500) {
1239 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1240 return ERROR_FLASH_OPERATION_FAILED;
1241 }
1242 continue;
1243 }
1244
1245 /* Reset our timeout */
1246 timeout = 0;
1247
1248 /* Limit to the amount of data we actually want to read */
1249 if (thisrun_bytes > count * block_size)
1250 thisrun_bytes = count * block_size;
1251
1252 /* Force end of large blocks to be word aligned */
1253 if (thisrun_bytes >= 16)
1254 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1255
1256 /* Read data from fifo */
1257 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1258 if (retval != ERROR_OK)
1259 break;
1260
1261 /* Update counters and wrap write pointer */
1262 buffer += thisrun_bytes;
1263 count -= thisrun_bytes / block_size;
1264 rp += thisrun_bytes;
1265 if (rp >= fifo_end_addr)
1266 rp = fifo_start_addr;
1267
1268 /* Store updated write pointer to target */
1269 retval = target_write_u32(target, rp_addr, rp);
1270 if (retval != ERROR_OK)
1271 break;
1272
1273 /* Avoid GDB timeouts */
1274 keep_alive();
1275
1276 }
1277
1278 if (retval != ERROR_OK) {
1279 /* abort flash write algorithm on target */
1280 target_write_u32(target, rp_addr, 0);
1281 }
1282
1283 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1284 num_reg_params, reg_params,
1285 exit_point,
1286 10000,
1287 arch_info);
1288
1289 if (retval2 != ERROR_OK) {
1290 LOG_ERROR("error waiting for target flash write algorithm");
1291 retval = retval2;
1292 }
1293
1294 if (retval == ERROR_OK) {
1295 /* check if algorithm set wp = 0 after fifo writer loop finished */
1296 retval = target_read_u32(target, wp_addr, &wp);
1297 if (retval == ERROR_OK && wp == 0) {
1298 LOG_ERROR("flash read algorithm aborted by target");
1299 retval = ERROR_FLASH_OPERATION_FAILED;
1300 }
1301 }
1302
1303 return retval;
1304 }
1305
1306 int target_read_memory(struct target *target,
1307 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1308 {
1309 if (!target_was_examined(target)) {
1310 LOG_ERROR("Target not examined yet");
1311 return ERROR_FAIL;
1312 }
1313 if (!target->type->read_memory) {
1314 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1315 return ERROR_FAIL;
1316 }
1317 return target->type->read_memory(target, address, size, count, buffer);
1318 }
1319
1320 int target_read_phys_memory(struct target *target,
1321 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1322 {
1323 if (!target_was_examined(target)) {
1324 LOG_ERROR("Target not examined yet");
1325 return ERROR_FAIL;
1326 }
1327 if (!target->type->read_phys_memory) {
1328 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1329 return ERROR_FAIL;
1330 }
1331 return target->type->read_phys_memory(target, address, size, count, buffer);
1332 }
1333
1334 int target_write_memory(struct target *target,
1335 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1336 {
1337 if (!target_was_examined(target)) {
1338 LOG_ERROR("Target not examined yet");
1339 return ERROR_FAIL;
1340 }
1341 if (!target->type->write_memory) {
1342 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1343 return ERROR_FAIL;
1344 }
1345 return target->type->write_memory(target, address, size, count, buffer);
1346 }
1347
1348 int target_write_phys_memory(struct target *target,
1349 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1350 {
1351 if (!target_was_examined(target)) {
1352 LOG_ERROR("Target not examined yet");
1353 return ERROR_FAIL;
1354 }
1355 if (!target->type->write_phys_memory) {
1356 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1357 return ERROR_FAIL;
1358 }
1359 return target->type->write_phys_memory(target, address, size, count, buffer);
1360 }
1361
1362 int target_add_breakpoint(struct target *target,
1363 struct breakpoint *breakpoint)
1364 {
1365 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1366 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1367 return ERROR_TARGET_NOT_HALTED;
1368 }
1369 return target->type->add_breakpoint(target, breakpoint);
1370 }
1371
1372 int target_add_context_breakpoint(struct target *target,
1373 struct breakpoint *breakpoint)
1374 {
1375 if (target->state != TARGET_HALTED) {
1376 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1377 return ERROR_TARGET_NOT_HALTED;
1378 }
1379 return target->type->add_context_breakpoint(target, breakpoint);
1380 }
1381
1382 int target_add_hybrid_breakpoint(struct target *target,
1383 struct breakpoint *breakpoint)
1384 {
1385 if (target->state != TARGET_HALTED) {
1386 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1387 return ERROR_TARGET_NOT_HALTED;
1388 }
1389 return target->type->add_hybrid_breakpoint(target, breakpoint);
1390 }
1391
1392 int target_remove_breakpoint(struct target *target,
1393 struct breakpoint *breakpoint)
1394 {
1395 return target->type->remove_breakpoint(target, breakpoint);
1396 }
1397
1398 int target_add_watchpoint(struct target *target,
1399 struct watchpoint *watchpoint)
1400 {
1401 if (target->state != TARGET_HALTED) {
1402 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1403 return ERROR_TARGET_NOT_HALTED;
1404 }
1405 return target->type->add_watchpoint(target, watchpoint);
1406 }
1407 int target_remove_watchpoint(struct target *target,
1408 struct watchpoint *watchpoint)
1409 {
1410 return target->type->remove_watchpoint(target, watchpoint);
1411 }
1412 int target_hit_watchpoint(struct target *target,
1413 struct watchpoint **hit_watchpoint)
1414 {
1415 if (target->state != TARGET_HALTED) {
1416 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1417 return ERROR_TARGET_NOT_HALTED;
1418 }
1419
1420 if (!target->type->hit_watchpoint) {
1421 /* For backward compatible, if hit_watchpoint is not implemented,
1422 * return ERROR_FAIL such that gdb_server will not take the nonsense
1423 * information. */
1424 return ERROR_FAIL;
1425 }
1426
1427 return target->type->hit_watchpoint(target, hit_watchpoint);
1428 }
1429
1430 const char *target_get_gdb_arch(struct target *target)
1431 {
1432 if (!target->type->get_gdb_arch)
1433 return NULL;
1434 return target->type->get_gdb_arch(target);
1435 }
1436
1437 int target_get_gdb_reg_list(struct target *target,
1438 struct reg **reg_list[], int *reg_list_size,
1439 enum target_register_class reg_class)
1440 {
1441 int result = ERROR_FAIL;
1442
1443 if (!target_was_examined(target)) {
1444 LOG_ERROR("Target not examined yet");
1445 goto done;
1446 }
1447
1448 result = target->type->get_gdb_reg_list(target, reg_list,
1449 reg_list_size, reg_class);
1450
1451 done:
1452 if (result != ERROR_OK) {
1453 *reg_list = NULL;
1454 *reg_list_size = 0;
1455 }
1456 return result;
1457 }
1458
1459 int target_get_gdb_reg_list_noread(struct target *target,
1460 struct reg **reg_list[], int *reg_list_size,
1461 enum target_register_class reg_class)
1462 {
1463 if (target->type->get_gdb_reg_list_noread &&
1464 target->type->get_gdb_reg_list_noread(target, reg_list,
1465 reg_list_size, reg_class) == ERROR_OK)
1466 return ERROR_OK;
1467 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1468 }
1469
1470 bool target_supports_gdb_connection(struct target *target)
1471 {
1472 /*
1473 * exclude all the targets that don't provide get_gdb_reg_list
1474 * or that have explicit gdb_max_connection == 0
1475 */
1476 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1477 }
1478
1479 int target_step(struct target *target,
1480 int current, target_addr_t address, int handle_breakpoints)
1481 {
1482 int retval;
1483
1484 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1485
1486 retval = target->type->step(target, current, address, handle_breakpoints);
1487 if (retval != ERROR_OK)
1488 return retval;
1489
1490 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1491
1492 return retval;
1493 }
1494
1495 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1496 {
1497 if (target->state != TARGET_HALTED) {
1498 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1499 return ERROR_TARGET_NOT_HALTED;
1500 }
1501 return target->type->get_gdb_fileio_info(target, fileio_info);
1502 }
1503
1504 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1505 {
1506 if (target->state != TARGET_HALTED) {
1507 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1508 return ERROR_TARGET_NOT_HALTED;
1509 }
1510 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1511 }
1512
1513 target_addr_t target_address_max(struct target *target)
1514 {
1515 unsigned bits = target_address_bits(target);
1516 if (sizeof(target_addr_t) * 8 == bits)
1517 return (target_addr_t) -1;
1518 else
1519 return (((target_addr_t) 1) << bits) - 1;
1520 }
1521
1522 unsigned target_address_bits(struct target *target)
1523 {
1524 if (target->type->address_bits)
1525 return target->type->address_bits(target);
1526 return 32;
1527 }
1528
1529 unsigned int target_data_bits(struct target *target)
1530 {
1531 if (target->type->data_bits)
1532 return target->type->data_bits(target);
1533 return 32;
1534 }
1535
1536 static int target_profiling(struct target *target, uint32_t *samples,
1537 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1538 {
1539 return target->type->profiling(target, samples, max_num_samples,
1540 num_samples, seconds);
1541 }
1542
1543 static int handle_target(void *priv);
1544
1545 static int target_init_one(struct command_context *cmd_ctx,
1546 struct target *target)
1547 {
1548 target_reset_examined(target);
1549
1550 struct target_type *type = target->type;
1551 if (!type->examine)
1552 type->examine = default_examine;
1553
1554 if (!type->check_reset)
1555 type->check_reset = default_check_reset;
1556
1557 assert(type->init_target);
1558
1559 int retval = type->init_target(cmd_ctx, target);
1560 if (retval != ERROR_OK) {
1561 LOG_ERROR("target '%s' init failed", target_name(target));
1562 return retval;
1563 }
1564
1565 /* Sanity-check MMU support ... stub in what we must, to help
1566 * implement it in stages, but warn if we need to do so.
1567 */
1568 if (type->mmu) {
1569 if (!type->virt2phys) {
1570 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1571 type->virt2phys = identity_virt2phys;
1572 }
1573 } else {
1574 /* Make sure no-MMU targets all behave the same: make no
1575 * distinction between physical and virtual addresses, and
1576 * ensure that virt2phys() is always an identity mapping.
1577 */
1578 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1579 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1580
1581 type->mmu = no_mmu;
1582 type->write_phys_memory = type->write_memory;
1583 type->read_phys_memory = type->read_memory;
1584 type->virt2phys = identity_virt2phys;
1585 }
1586
1587 if (!target->type->read_buffer)
1588 target->type->read_buffer = target_read_buffer_default;
1589
1590 if (!target->type->write_buffer)
1591 target->type->write_buffer = target_write_buffer_default;
1592
1593 if (!target->type->get_gdb_fileio_info)
1594 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1595
1596 if (!target->type->gdb_fileio_end)
1597 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1598
1599 if (!target->type->profiling)
1600 target->type->profiling = target_profiling_default;
1601
1602 return ERROR_OK;
1603 }
1604
1605 static int target_init(struct command_context *cmd_ctx)
1606 {
1607 struct target *target;
1608 int retval;
1609
1610 for (target = all_targets; target; target = target->next) {
1611 retval = target_init_one(cmd_ctx, target);
1612 if (retval != ERROR_OK)
1613 return retval;
1614 }
1615
1616 if (!all_targets)
1617 return ERROR_OK;
1618
1619 retval = target_register_user_commands(cmd_ctx);
1620 if (retval != ERROR_OK)
1621 return retval;
1622
1623 retval = target_register_timer_callback(&handle_target,
1624 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1625 if (retval != ERROR_OK)
1626 return retval;
1627
1628 return ERROR_OK;
1629 }
1630
1631 COMMAND_HANDLER(handle_target_init_command)
1632 {
1633 int retval;
1634
1635 if (CMD_ARGC != 0)
1636 return ERROR_COMMAND_SYNTAX_ERROR;
1637
1638 static bool target_initialized;
1639 if (target_initialized) {
1640 LOG_INFO("'target init' has already been called");
1641 return ERROR_OK;
1642 }
1643 target_initialized = true;
1644
1645 retval = command_run_line(CMD_CTX, "init_targets");
1646 if (retval != ERROR_OK)
1647 return retval;
1648
1649 retval = command_run_line(CMD_CTX, "init_target_events");
1650 if (retval != ERROR_OK)
1651 return retval;
1652
1653 retval = command_run_line(CMD_CTX, "init_board");
1654 if (retval != ERROR_OK)
1655 return retval;
1656
1657 LOG_DEBUG("Initializing targets...");
1658 return target_init(CMD_CTX);
1659 }
1660
1661 int target_register_event_callback(int (*callback)(struct target *target,
1662 enum target_event event, void *priv), void *priv)
1663 {
1664 struct target_event_callback **callbacks_p = &target_event_callbacks;
1665
1666 if (!callback)
1667 return ERROR_COMMAND_SYNTAX_ERROR;
1668
1669 if (*callbacks_p) {
1670 while ((*callbacks_p)->next)
1671 callbacks_p = &((*callbacks_p)->next);
1672 callbacks_p = &((*callbacks_p)->next);
1673 }
1674
1675 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1676 (*callbacks_p)->callback = callback;
1677 (*callbacks_p)->priv = priv;
1678 (*callbacks_p)->next = NULL;
1679
1680 return ERROR_OK;
1681 }
1682
1683 int target_register_reset_callback(int (*callback)(struct target *target,
1684 enum target_reset_mode reset_mode, void *priv), void *priv)
1685 {
1686 struct target_reset_callback *entry;
1687
1688 if (!callback)
1689 return ERROR_COMMAND_SYNTAX_ERROR;
1690
1691 entry = malloc(sizeof(struct target_reset_callback));
1692 if (!entry) {
1693 LOG_ERROR("error allocating buffer for reset callback entry");
1694 return ERROR_COMMAND_SYNTAX_ERROR;
1695 }
1696
1697 entry->callback = callback;
1698 entry->priv = priv;
1699 list_add(&entry->list, &target_reset_callback_list);
1700
1701
1702 return ERROR_OK;
1703 }
1704
1705 int target_register_trace_callback(int (*callback)(struct target *target,
1706 size_t len, uint8_t *data, void *priv), void *priv)
1707 {
1708 struct target_trace_callback *entry;
1709
1710 if (!callback)
1711 return ERROR_COMMAND_SYNTAX_ERROR;
1712
1713 entry = malloc(sizeof(struct target_trace_callback));
1714 if (!entry) {
1715 LOG_ERROR("error allocating buffer for trace callback entry");
1716 return ERROR_COMMAND_SYNTAX_ERROR;
1717 }
1718
1719 entry->callback = callback;
1720 entry->priv = priv;
1721 list_add(&entry->list, &target_trace_callback_list);
1722
1723
1724 return ERROR_OK;
1725 }
1726
1727 int target_register_timer_callback(int (*callback)(void *priv),
1728 unsigned int time_ms, enum target_timer_type type, void *priv)
1729 {
1730 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1731
1732 if (!callback)
1733 return ERROR_COMMAND_SYNTAX_ERROR;
1734
1735 if (*callbacks_p) {
1736 while ((*callbacks_p)->next)
1737 callbacks_p = &((*callbacks_p)->next);
1738 callbacks_p = &((*callbacks_p)->next);
1739 }
1740
1741 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1742 (*callbacks_p)->callback = callback;
1743 (*callbacks_p)->type = type;
1744 (*callbacks_p)->time_ms = time_ms;
1745 (*callbacks_p)->removed = false;
1746
1747 (*callbacks_p)->when = timeval_ms() + time_ms;
1748 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1749
1750 (*callbacks_p)->priv = priv;
1751 (*callbacks_p)->next = NULL;
1752
1753 return ERROR_OK;
1754 }
1755
1756 int target_unregister_event_callback(int (*callback)(struct target *target,
1757 enum target_event event, void *priv), void *priv)
1758 {
1759 struct target_event_callback **p = &target_event_callbacks;
1760 struct target_event_callback *c = target_event_callbacks;
1761
1762 if (!callback)
1763 return ERROR_COMMAND_SYNTAX_ERROR;
1764
1765 while (c) {
1766 struct target_event_callback *next = c->next;
1767 if ((c->callback == callback) && (c->priv == priv)) {
1768 *p = next;
1769 free(c);
1770 return ERROR_OK;
1771 } else
1772 p = &(c->next);
1773 c = next;
1774 }
1775
1776 return ERROR_OK;
1777 }
1778
1779 int target_unregister_reset_callback(int (*callback)(struct target *target,
1780 enum target_reset_mode reset_mode, void *priv), void *priv)
1781 {
1782 struct target_reset_callback *entry;
1783
1784 if (!callback)
1785 return ERROR_COMMAND_SYNTAX_ERROR;
1786
1787 list_for_each_entry(entry, &target_reset_callback_list, list) {
1788 if (entry->callback == callback && entry->priv == priv) {
1789 list_del(&entry->list);
1790 free(entry);
1791 break;
1792 }
1793 }
1794
1795 return ERROR_OK;
1796 }
1797
1798 int target_unregister_trace_callback(int (*callback)(struct target *target,
1799 size_t len, uint8_t *data, void *priv), void *priv)
1800 {
1801 struct target_trace_callback *entry;
1802
1803 if (!callback)
1804 return ERROR_COMMAND_SYNTAX_ERROR;
1805
1806 list_for_each_entry(entry, &target_trace_callback_list, list) {
1807 if (entry->callback == callback && entry->priv == priv) {
1808 list_del(&entry->list);
1809 free(entry);
1810 break;
1811 }
1812 }
1813
1814 return ERROR_OK;
1815 }
1816
1817 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1818 {
1819 if (!callback)
1820 return ERROR_COMMAND_SYNTAX_ERROR;
1821
1822 for (struct target_timer_callback *c = target_timer_callbacks;
1823 c; c = c->next) {
1824 if ((c->callback == callback) && (c->priv == priv)) {
1825 c->removed = true;
1826 return ERROR_OK;
1827 }
1828 }
1829
1830 return ERROR_FAIL;
1831 }
1832
1833 int target_call_event_callbacks(struct target *target, enum target_event event)
1834 {
1835 struct target_event_callback *callback = target_event_callbacks;
1836 struct target_event_callback *next_callback;
1837
1838 if (event == TARGET_EVENT_HALTED) {
1839 /* execute early halted first */
1840 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1841 }
1842
1843 LOG_DEBUG("target event %i (%s) for core %s", event,
1844 target_event_name(event),
1845 target_name(target));
1846
1847 target_handle_event(target, event);
1848
1849 while (callback) {
1850 next_callback = callback->next;
1851 callback->callback(target, event, callback->priv);
1852 callback = next_callback;
1853 }
1854
1855 return ERROR_OK;
1856 }
1857
1858 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1859 {
1860 struct target_reset_callback *callback;
1861
1862 LOG_DEBUG("target reset %i (%s)", reset_mode,
1863 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1864
1865 list_for_each_entry(callback, &target_reset_callback_list, list)
1866 callback->callback(target, reset_mode, callback->priv);
1867
1868 return ERROR_OK;
1869 }
1870
1871 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1872 {
1873 struct target_trace_callback *callback;
1874
1875 list_for_each_entry(callback, &target_trace_callback_list, list)
1876 callback->callback(target, len, data, callback->priv);
1877
1878 return ERROR_OK;
1879 }
1880
1881 static int target_timer_callback_periodic_restart(
1882 struct target_timer_callback *cb, int64_t *now)
1883 {
1884 cb->when = *now + cb->time_ms;
1885 return ERROR_OK;
1886 }
1887
1888 static int target_call_timer_callback(struct target_timer_callback *cb,
1889 int64_t *now)
1890 {
1891 cb->callback(cb->priv);
1892
1893 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1894 return target_timer_callback_periodic_restart(cb, now);
1895
1896 return target_unregister_timer_callback(cb->callback, cb->priv);
1897 }
1898
1899 static int target_call_timer_callbacks_check_time(int checktime)
1900 {
1901 static bool callback_processing;
1902
1903 /* Do not allow nesting */
1904 if (callback_processing)
1905 return ERROR_OK;
1906
1907 callback_processing = true;
1908
1909 keep_alive();
1910
1911 int64_t now = timeval_ms();
1912
1913 /* Initialize to a default value that's a ways into the future.
1914 * The loop below will make it closer to now if there are
1915 * callbacks that want to be called sooner. */
1916 target_timer_next_event_value = now + 1000;
1917
1918 /* Store an address of the place containing a pointer to the
1919 * next item; initially, that's a standalone "root of the
1920 * list" variable. */
1921 struct target_timer_callback **callback = &target_timer_callbacks;
1922 while (callback && *callback) {
1923 if ((*callback)->removed) {
1924 struct target_timer_callback *p = *callback;
1925 *callback = (*callback)->next;
1926 free(p);
1927 continue;
1928 }
1929
1930 bool call_it = (*callback)->callback &&
1931 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1932 now >= (*callback)->when);
1933
1934 if (call_it)
1935 target_call_timer_callback(*callback, &now);
1936
1937 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1938 target_timer_next_event_value = (*callback)->when;
1939
1940 callback = &(*callback)->next;
1941 }
1942
1943 callback_processing = false;
1944 return ERROR_OK;
1945 }
1946
1947 int target_call_timer_callbacks()
1948 {
1949 return target_call_timer_callbacks_check_time(1);
1950 }
1951
1952 /* invoke periodic callbacks immediately */
1953 int target_call_timer_callbacks_now()
1954 {
1955 return target_call_timer_callbacks_check_time(0);
1956 }
1957
1958 int64_t target_timer_next_event(void)
1959 {
1960 return target_timer_next_event_value;
1961 }
1962
1963 /* Prints the working area layout for debug purposes */
1964 static void print_wa_layout(struct target *target)
1965 {
1966 struct working_area *c = target->working_areas;
1967
1968 while (c) {
1969 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1970 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1971 c->address, c->address + c->size - 1, c->size);
1972 c = c->next;
1973 }
1974 }
1975
1976 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1977 static void target_split_working_area(struct working_area *area, uint32_t size)
1978 {
1979 assert(area->free); /* Shouldn't split an allocated area */
1980 assert(size <= area->size); /* Caller should guarantee this */
1981
1982 /* Split only if not already the right size */
1983 if (size < area->size) {
1984 struct working_area *new_wa = malloc(sizeof(*new_wa));
1985
1986 if (!new_wa)
1987 return;
1988
1989 new_wa->next = area->next;
1990 new_wa->size = area->size - size;
1991 new_wa->address = area->address + size;
1992 new_wa->backup = NULL;
1993 new_wa->user = NULL;
1994 new_wa->free = true;
1995
1996 area->next = new_wa;
1997 area->size = size;
1998
1999 /* If backup memory was allocated to this area, it has the wrong size
2000 * now so free it and it will be reallocated if/when needed */
2001 free(area->backup);
2002 area->backup = NULL;
2003 }
2004 }
2005
2006 /* Merge all adjacent free areas into one */
2007 static void target_merge_working_areas(struct target *target)
2008 {
2009 struct working_area *c = target->working_areas;
2010
2011 while (c && c->next) {
2012 assert(c->next->address == c->address + c->size); /* This is an invariant */
2013
2014 /* Find two adjacent free areas */
2015 if (c->free && c->next->free) {
2016 /* Merge the last into the first */
2017 c->size += c->next->size;
2018
2019 /* Remove the last */
2020 struct working_area *to_be_freed = c->next;
2021 c->next = c->next->next;
2022 free(to_be_freed->backup);
2023 free(to_be_freed);
2024
2025 /* If backup memory was allocated to the remaining area, it's has
2026 * the wrong size now */
2027 free(c->backup);
2028 c->backup = NULL;
2029 } else {
2030 c = c->next;
2031 }
2032 }
2033 }
2034
2035 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2036 {
2037 /* Reevaluate working area address based on MMU state*/
2038 if (!target->working_areas) {
2039 int retval;
2040 int enabled;
2041
2042 retval = target->type->mmu(target, &enabled);
2043 if (retval != ERROR_OK)
2044 return retval;
2045
2046 if (!enabled) {
2047 if (target->working_area_phys_spec) {
2048 LOG_DEBUG("MMU disabled, using physical "
2049 "address for working memory " TARGET_ADDR_FMT,
2050 target->working_area_phys);
2051 target->working_area = target->working_area_phys;
2052 } else {
2053 LOG_ERROR("No working memory available. "
2054 "Specify -work-area-phys to target.");
2055 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2056 }
2057 } else {
2058 if (target->working_area_virt_spec) {
2059 LOG_DEBUG("MMU enabled, using virtual "
2060 "address for working memory " TARGET_ADDR_FMT,
2061 target->working_area_virt);
2062 target->working_area = target->working_area_virt;
2063 } else {
2064 LOG_ERROR("No working memory available. "
2065 "Specify -work-area-virt to target.");
2066 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2067 }
2068 }
2069
2070 /* Set up initial working area on first call */
2071 struct working_area *new_wa = malloc(sizeof(*new_wa));
2072 if (new_wa) {
2073 new_wa->next = NULL;
2074 new_wa->size = ALIGN_DOWN(target->working_area_size, 4); /* 4-byte align */
2075 new_wa->address = target->working_area;
2076 new_wa->backup = NULL;
2077 new_wa->user = NULL;
2078 new_wa->free = true;
2079 }
2080
2081 target->working_areas = new_wa;
2082 }
2083
2084 /* only allocate multiples of 4 byte */
2085 size = ALIGN_UP(size, 4);
2086
2087 struct working_area *c = target->working_areas;
2088
2089 /* Find the first large enough working area */
2090 while (c) {
2091 if (c->free && c->size >= size)
2092 break;
2093 c = c->next;
2094 }
2095
2096 if (!c)
2097 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2098
2099 /* Split the working area into the requested size */
2100 target_split_working_area(c, size);
2101
2102 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2103 size, c->address);
2104
2105 if (target->backup_working_area) {
2106 if (!c->backup) {
2107 c->backup = malloc(c->size);
2108 if (!c->backup)
2109 return ERROR_FAIL;
2110 }
2111
2112 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2113 if (retval != ERROR_OK)
2114 return retval;
2115 }
2116
2117 /* mark as used, and return the new (reused) area */
2118 c->free = false;
2119 *area = c;
2120
2121 /* user pointer */
2122 c->user = area;
2123
2124 print_wa_layout(target);
2125
2126 return ERROR_OK;
2127 }
2128
2129 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2130 {
2131 int retval;
2132
2133 retval = target_alloc_working_area_try(target, size, area);
2134 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2135 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2136 return retval;
2137
2138 }
2139
2140 static int target_restore_working_area(struct target *target, struct working_area *area)
2141 {
2142 int retval = ERROR_OK;
2143
2144 if (target->backup_working_area && area->backup) {
2145 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2146 if (retval != ERROR_OK)
2147 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2148 area->size, area->address);
2149 }
2150
2151 return retval;
2152 }
2153
2154 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2155 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2156 {
2157 if (!area || area->free)
2158 return ERROR_OK;
2159
2160 int retval = ERROR_OK;
2161 if (restore) {
2162 retval = target_restore_working_area(target, area);
2163 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2164 if (retval != ERROR_OK)
2165 return retval;
2166 }
2167
2168 area->free = true;
2169
2170 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2171 area->size, area->address);
2172
2173 /* mark user pointer invalid */
2174 /* TODO: Is this really safe? It points to some previous caller's memory.
2175 * How could we know that the area pointer is still in that place and not
2176 * some other vital data? What's the purpose of this, anyway? */
2177 *area->user = NULL;
2178 area->user = NULL;
2179
2180 target_merge_working_areas(target);
2181
2182 print_wa_layout(target);
2183
2184 return retval;
2185 }
2186
2187 int target_free_working_area(struct target *target, struct working_area *area)
2188 {
2189 return target_free_working_area_restore(target, area, 1);
2190 }
2191
2192 /* free resources and restore memory, if restoring memory fails,
2193 * free up resources anyway
2194 */
2195 static void target_free_all_working_areas_restore(struct target *target, int restore)
2196 {
2197 struct working_area *c = target->working_areas;
2198
2199 LOG_DEBUG("freeing all working areas");
2200
2201 /* Loop through all areas, restoring the allocated ones and marking them as free */
2202 while (c) {
2203 if (!c->free) {
2204 if (restore)
2205 target_restore_working_area(target, c);
2206 c->free = true;
2207 *c->user = NULL; /* Same as above */
2208 c->user = NULL;
2209 }
2210 c = c->next;
2211 }
2212
2213 /* Run a merge pass to combine all areas into one */
2214 target_merge_working_areas(target);
2215
2216 print_wa_layout(target);
2217 }
2218
2219 void target_free_all_working_areas(struct target *target)
2220 {
2221 target_free_all_working_areas_restore(target, 1);
2222
2223 /* Now we have none or only one working area marked as free */
2224 if (target->working_areas) {
2225 /* Free the last one to allow on-the-fly moving and resizing */
2226 free(target->working_areas->backup);
2227 free(target->working_areas);
2228 target->working_areas = NULL;
2229 }
2230 }
2231
2232 /* Find the largest number of bytes that can be allocated */
2233 uint32_t target_get_working_area_avail(struct target *target)
2234 {
2235 struct working_area *c = target->working_areas;
2236 uint32_t max_size = 0;
2237
2238 if (!c)
2239 return ALIGN_DOWN(target->working_area_size, 4);
2240
2241 while (c) {
2242 if (c->free && max_size < c->size)
2243 max_size = c->size;
2244
2245 c = c->next;
2246 }
2247
2248 return max_size;
2249 }
2250
2251 static void target_destroy(struct target *target)
2252 {
2253 if (target->type->deinit_target)
2254 target->type->deinit_target(target);
2255
2256 if (target->semihosting)
2257 free(target->semihosting->basedir);
2258 free(target->semihosting);
2259
2260 jtag_unregister_event_callback(jtag_enable_callback, target);
2261
2262 struct target_event_action *teap = target->event_action;
2263 while (teap) {
2264 struct target_event_action *next = teap->next;
2265 Jim_DecrRefCount(teap->interp, teap->body);
2266 free(teap);
2267 teap = next;
2268 }
2269
2270 target_free_all_working_areas(target);
2271
2272 /* release the targets SMP list */
2273 if (target->smp) {
2274 struct target_list *head, *tmp;
2275
2276 list_for_each_entry_safe(head, tmp, target->smp_targets, lh) {
2277 list_del(&head->lh);
2278 head->target->smp = 0;
2279 free(head);
2280 }
2281 if (target->smp_targets != &empty_smp_targets)
2282 free(target->smp_targets);
2283 target->smp = 0;
2284 }
2285
2286 rtos_destroy(target);
2287
2288 free(target->gdb_port_override);
2289 free(target->type);
2290 free(target->trace_info);
2291 free(target->fileio_info);
2292 free(target->cmd_name);
2293 free(target);
2294 }
2295
2296 void target_quit(void)
2297 {
2298 struct target_event_callback *pe = target_event_callbacks;
2299 while (pe) {
2300 struct target_event_callback *t = pe->next;
2301 free(pe);
2302 pe = t;
2303 }
2304 target_event_callbacks = NULL;
2305
2306 struct target_timer_callback *pt = target_timer_callbacks;
2307 while (pt) {
2308 struct target_timer_callback *t = pt->next;
2309 free(pt);
2310 pt = t;
2311 }
2312 target_timer_callbacks = NULL;
2313
2314 for (struct target *target = all_targets; target;) {
2315 struct target *tmp;
2316
2317 tmp = target->next;
2318 target_destroy(target);
2319 target = tmp;
2320 }
2321
2322 all_targets = NULL;
2323 }
2324
2325 int target_arch_state(struct target *target)
2326 {
2327 int retval;
2328 if (!target) {
2329 LOG_WARNING("No target has been configured");
2330 return ERROR_OK;
2331 }
2332
2333 if (target->state != TARGET_HALTED)
2334 return ERROR_OK;
2335
2336 retval = target->type->arch_state(target);
2337 return retval;
2338 }
2339
2340 static int target_get_gdb_fileio_info_default(struct target *target,
2341 struct gdb_fileio_info *fileio_info)
2342 {
2343 /* If target does not support semi-hosting function, target
2344 has no need to provide .get_gdb_fileio_info callback.
2345 It just return ERROR_FAIL and gdb_server will return "Txx"
2346 as target halted every time. */
2347 return ERROR_FAIL;
2348 }
2349
2350 static int target_gdb_fileio_end_default(struct target *target,
2351 int retcode, int fileio_errno, bool ctrl_c)
2352 {
2353 return ERROR_OK;
2354 }
2355
2356 int target_profiling_default(struct target *target, uint32_t *samples,
2357 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2358 {
2359 struct timeval timeout, now;
2360
2361 gettimeofday(&timeout, NULL);
2362 timeval_add_time(&timeout, seconds, 0);
2363
2364 LOG_INFO("Starting profiling. Halting and resuming the"
2365 " target as often as we can...");
2366
2367 uint32_t sample_count = 0;
2368 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2369 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2370
2371 int retval = ERROR_OK;
2372 for (;;) {
2373 target_poll(target);
2374 if (target->state == TARGET_HALTED) {
2375 uint32_t t = buf_get_u32(reg->value, 0, 32);
2376 samples[sample_count++] = t;
2377 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2378 retval = target_resume(target, 1, 0, 0, 0);
2379 target_poll(target);
2380 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2381 } else if (target->state == TARGET_RUNNING) {
2382 /* We want to quickly sample the PC. */
2383 retval = target_halt(target);
2384 } else {
2385 LOG_INFO("Target not halted or running");
2386 retval = ERROR_OK;
2387 break;
2388 }
2389
2390 if (retval != ERROR_OK)
2391 break;
2392
2393 gettimeofday(&now, NULL);
2394 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2395 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2396 break;
2397 }
2398 }
2399
2400 *num_samples = sample_count;
2401 return retval;
2402 }
2403
2404 /* Single aligned words are guaranteed to use 16 or 32 bit access
2405 * mode respectively, otherwise data is handled as quickly as
2406 * possible
2407 */
2408 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2409 {
2410 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2411 size, address);
2412
2413 if (!target_was_examined(target)) {
2414 LOG_ERROR("Target not examined yet");
2415 return ERROR_FAIL;
2416 }
2417
2418 if (size == 0)
2419 return ERROR_OK;
2420
2421 if ((address + size - 1) < address) {
2422 /* GDB can request this when e.g. PC is 0xfffffffc */
2423 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2424 address,
2425 size);
2426 return ERROR_FAIL;
2427 }
2428
2429 return target->type->write_buffer(target, address, size, buffer);
2430 }
2431
2432 static int target_write_buffer_default(struct target *target,
2433 target_addr_t address, uint32_t count, const uint8_t *buffer)
2434 {
2435 uint32_t size;
2436 unsigned int data_bytes = target_data_bits(target) / 8;
2437
2438 /* Align up to maximum bytes. The loop condition makes sure the next pass
2439 * will have something to do with the size we leave to it. */
2440 for (size = 1;
2441 size < data_bytes && count >= size * 2 + (address & size);
2442 size *= 2) {
2443 if (address & size) {
2444 int retval = target_write_memory(target, address, size, 1, buffer);
2445 if (retval != ERROR_OK)
2446 return retval;
2447 address += size;
2448 count -= size;
2449 buffer += size;
2450 }
2451 }
2452
2453 /* Write the data with as large access size as possible. */
2454 for (; size > 0; size /= 2) {
2455 uint32_t aligned = count - count % size;
2456 if (aligned > 0) {
2457 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2458 if (retval != ERROR_OK)
2459 return retval;
2460 address += aligned;
2461 count -= aligned;
2462 buffer += aligned;
2463 }
2464 }
2465
2466 return ERROR_OK;
2467 }
2468
2469 /* Single aligned words are guaranteed to use 16 or 32 bit access
2470 * mode respectively, otherwise data is handled as quickly as
2471 * possible
2472 */
2473 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2474 {
2475 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2476 size, address);
2477
2478 if (!target_was_examined(target)) {
2479 LOG_ERROR("Target not examined yet");
2480 return ERROR_FAIL;
2481 }
2482
2483 if (size == 0)
2484 return ERROR_OK;
2485
2486 if ((address + size - 1) < address) {
2487 /* GDB can request this when e.g. PC is 0xfffffffc */
2488 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2489 address,
2490 size);
2491 return ERROR_FAIL;
2492 }
2493
2494 return target->type->read_buffer(target, address, size, buffer);
2495 }
2496
2497 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2498 {
2499 uint32_t size;
2500 unsigned int data_bytes = target_data_bits(target) / 8;
2501
2502 /* Align up to maximum bytes. The loop condition makes sure the next pass
2503 * will have something to do with the size we leave to it. */
2504 for (size = 1;
2505 size < data_bytes && count >= size * 2 + (address & size);
2506 size *= 2) {
2507 if (address & size) {
2508 int retval = target_read_memory(target, address, size, 1, buffer);
2509 if (retval != ERROR_OK)
2510 return retval;
2511 address += size;
2512 count -= size;
2513 buffer += size;
2514 }
2515 }
2516
2517 /* Read the data with as large access size as possible. */
2518 for (; size > 0; size /= 2) {
2519 uint32_t aligned = count - count % size;
2520 if (aligned > 0) {
2521 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2522 if (retval != ERROR_OK)
2523 return retval;
2524 address += aligned;
2525 count -= aligned;
2526 buffer += aligned;
2527 }
2528 }
2529
2530 return ERROR_OK;
2531 }
2532
2533 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2534 {
2535 uint8_t *buffer;
2536 int retval;
2537 uint32_t i;
2538 uint32_t checksum = 0;
2539 if (!target_was_examined(target)) {
2540 LOG_ERROR("Target not examined yet");
2541 return ERROR_FAIL;
2542 }
2543 if (!target->type->checksum_memory) {
2544 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2545 return ERROR_FAIL;
2546 }
2547
2548 retval = target->type->checksum_memory(target, address, size, &checksum);
2549 if (retval != ERROR_OK) {
2550 buffer = malloc(size);
2551 if (!buffer) {
2552 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2553 return ERROR_COMMAND_SYNTAX_ERROR;
2554 }
2555 retval = target_read_buffer(target, address, size, buffer);
2556 if (retval != ERROR_OK) {
2557 free(buffer);
2558 return retval;
2559 }
2560
2561 /* convert to target endianness */
2562 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2563 uint32_t target_data;
2564 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2565 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2566 }
2567
2568 retval = image_calculate_checksum(buffer, size, &checksum);
2569 free(buffer);
2570 }
2571
2572 *crc = checksum;
2573
2574 return retval;
2575 }
2576
2577 int target_blank_check_memory(struct target *target,
2578 struct target_memory_check_block *blocks, int num_blocks,
2579 uint8_t erased_value)
2580 {
2581 if (!target_was_examined(target)) {
2582 LOG_ERROR("Target not examined yet");
2583 return ERROR_FAIL;
2584 }
2585
2586 if (!target->type->blank_check_memory)
2587 return ERROR_NOT_IMPLEMENTED;
2588
2589 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2590 }
2591
2592 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2593 {
2594 uint8_t value_buf[8];
2595 if (!target_was_examined(target)) {
2596 LOG_ERROR("Target not examined yet");
2597 return ERROR_FAIL;
2598 }
2599
2600 int retval = target_read_memory(target, address, 8, 1, value_buf);
2601
2602 if (retval == ERROR_OK) {
2603 *value = target_buffer_get_u64(target, value_buf);
2604 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2605 address,
2606 *value);
2607 } else {
2608 *value = 0x0;
2609 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2610 address);
2611 }
2612
2613 return retval;
2614 }
2615
2616 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2617 {
2618 uint8_t value_buf[4];
2619 if (!target_was_examined(target)) {
2620 LOG_ERROR("Target not examined yet");
2621 return ERROR_FAIL;
2622 }
2623
2624 int retval = target_read_memory(target, address, 4, 1, value_buf);
2625
2626 if (retval == ERROR_OK) {
2627 *value = target_buffer_get_u32(target, value_buf);
2628 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2629 address,
2630 *value);
2631 } else {
2632 *value = 0x0;
2633 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2634 address);
2635 }
2636
2637 return retval;
2638 }
2639
2640 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2641 {
2642 uint8_t value_buf[2];
2643 if (!target_was_examined(target)) {
2644 LOG_ERROR("Target not examined yet");
2645 return ERROR_FAIL;
2646 }
2647
2648 int retval = target_read_memory(target, address, 2, 1, value_buf);
2649
2650 if (retval == ERROR_OK) {
2651 *value = target_buffer_get_u16(target, value_buf);
2652 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2653 address,
2654 *value);
2655 } else {
2656 *value = 0x0;
2657 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2658 address);
2659 }
2660
2661 return retval;
2662 }
2663
2664 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2665 {
2666 if (!target_was_examined(target)) {
2667 LOG_ERROR("Target not examined yet");
2668 return ERROR_FAIL;
2669 }
2670
2671 int retval = target_read_memory(target, address, 1, 1, value);
2672
2673 if (retval == ERROR_OK) {
2674 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2675 address,
2676 *value);
2677 } else {
2678 *value = 0x0;
2679 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2680 address);
2681 }
2682
2683 return retval;
2684 }
2685
2686 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2687 {
2688 int retval;
2689 uint8_t value_buf[8];
2690 if (!target_was_examined(target)) {
2691 LOG_ERROR("Target not examined yet");
2692 return ERROR_FAIL;
2693 }
2694
2695 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2696 address,
2697 value);
2698
2699 target_buffer_set_u64(target, value_buf, value);
2700 retval = target_write_memory(target, address, 8, 1, value_buf);
2701 if (retval != ERROR_OK)
2702 LOG_DEBUG("failed: %i", retval);
2703
2704 return retval;
2705 }
2706
2707 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2708 {
2709 int retval;
2710 uint8_t value_buf[4];
2711 if (!target_was_examined(target)) {
2712 LOG_ERROR("Target not examined yet");
2713 return ERROR_FAIL;
2714 }
2715
2716 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2717 address,
2718 value);
2719
2720 target_buffer_set_u32(target, value_buf, value);
2721 retval = target_write_memory(target, address, 4, 1, value_buf);
2722 if (retval != ERROR_OK)
2723 LOG_DEBUG("failed: %i", retval);
2724
2725 return retval;
2726 }
2727
2728 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2729 {
2730 int retval;
2731 uint8_t value_buf[2];
2732 if (!target_was_examined(target)) {
2733 LOG_ERROR("Target not examined yet");
2734 return ERROR_FAIL;
2735 }
2736
2737 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2738 address,
2739 value);
2740
2741 target_buffer_set_u16(target, value_buf, value);
2742 retval = target_write_memory(target, address, 2, 1, value_buf);
2743 if (retval != ERROR_OK)
2744 LOG_DEBUG("failed: %i", retval);
2745
2746 return retval;
2747 }
2748
2749 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2750 {
2751 int retval;
2752 if (!target_was_examined(target)) {
2753 LOG_ERROR("Target not examined yet");
2754 return ERROR_FAIL;
2755 }
2756
2757 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2758 address, value);
2759
2760 retval = target_write_memory(target, address, 1, 1, &value);
2761 if (retval != ERROR_OK)
2762 LOG_DEBUG("failed: %i", retval);
2763
2764 return retval;
2765 }
2766
2767 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2768 {
2769 int retval;
2770 uint8_t value_buf[8];
2771 if (!target_was_examined(target)) {
2772 LOG_ERROR("Target not examined yet");
2773 return ERROR_FAIL;
2774 }
2775
2776 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2777 address,
2778 value);
2779
2780 target_buffer_set_u64(target, value_buf, value);
2781 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2782 if (retval != ERROR_OK)
2783 LOG_DEBUG("failed: %i", retval);
2784
2785 return retval;
2786 }
2787
2788 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2789 {
2790 int retval;
2791 uint8_t value_buf[4];
2792 if (!target_was_examined(target)) {
2793 LOG_ERROR("Target not examined yet");
2794 return ERROR_FAIL;
2795 }
2796
2797 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2798 address,
2799 value);
2800
2801 target_buffer_set_u32(target, value_buf, value);
2802 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2803 if (retval != ERROR_OK)
2804 LOG_DEBUG("failed: %i", retval);
2805
2806 return retval;
2807 }
2808
2809 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2810 {
2811 int retval;
2812 uint8_t value_buf[2];
2813 if (!target_was_examined(target)) {
2814 LOG_ERROR("Target not examined yet");
2815 return ERROR_FAIL;
2816 }
2817
2818 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2819 address,
2820 value);
2821
2822 target_buffer_set_u16(target, value_buf, value);
2823 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2824 if (retval != ERROR_OK)
2825 LOG_DEBUG("failed: %i", retval);
2826
2827 return retval;
2828 }
2829
2830 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2831 {
2832 int retval;
2833 if (!target_was_examined(target)) {
2834 LOG_ERROR("Target not examined yet");
2835 return ERROR_FAIL;
2836 }
2837
2838 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2839 address, value);
2840
2841 retval = target_write_phys_memory(target, address, 1, 1, &value);
2842 if (retval != ERROR_OK)
2843 LOG_DEBUG("failed: %i", retval);
2844
2845 return retval;
2846 }
2847
2848 static int find_target(struct command_invocation *cmd, const char *name)
2849 {
2850 struct target *target = get_target(name);
2851 if (!target) {
2852 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2853 return ERROR_FAIL;
2854 }
2855 if (!target->tap->enabled) {
2856 command_print(cmd, "Target: TAP %s is disabled, "
2857 "can't be the current target\n",
2858 target->tap->dotted_name);
2859 return ERROR_FAIL;
2860 }
2861
2862 cmd->ctx->current_target = target;
2863 if (cmd->ctx->current_target_override)
2864 cmd->ctx->current_target_override = target;
2865
2866 return ERROR_OK;
2867 }
2868
2869
2870 COMMAND_HANDLER(handle_targets_command)
2871 {
2872 int retval = ERROR_OK;
2873 if (CMD_ARGC == 1) {
2874 retval = find_target(CMD, CMD_ARGV[0]);
2875 if (retval == ERROR_OK) {
2876 /* we're done! */
2877 return retval;
2878 }
2879 }
2880
2881 struct target *target = all_targets;
2882 command_print(CMD, " TargetName Type Endian TapName State ");
2883 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2884 while (target) {
2885 const char *state;
2886 char marker = ' ';
2887
2888 if (target->tap->enabled)
2889 state = target_state_name(target);
2890 else
2891 state = "tap-disabled";
2892
2893 if (CMD_CTX->current_target == target)
2894 marker = '*';
2895
2896 /* keep columns lined up to match the headers above */
2897 command_print(CMD,
2898 "%2d%c %-18s %-10s %-6s %-18s %s",
2899 target->target_number,
2900 marker,
2901 target_name(target),
2902 target_type_name(target),
2903 jim_nvp_value2name_simple(nvp_target_endian,
2904 target->endianness)->name,
2905 target->tap->dotted_name,
2906 state);
2907 target = target->next;
2908 }
2909
2910 return retval;
2911 }
2912
2913 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2914
2915 static int power_dropout;
2916 static int srst_asserted;
2917
2918 static int run_power_restore;
2919 static int run_power_dropout;
2920 static int run_srst_asserted;
2921 static int run_srst_deasserted;
2922
2923 static int sense_handler(void)
2924 {
2925 static int prev_srst_asserted;
2926 static int prev_power_dropout;
2927
2928 int retval = jtag_power_dropout(&power_dropout);
2929 if (retval != ERROR_OK)
2930 return retval;
2931
2932 int power_restored;
2933 power_restored = prev_power_dropout && !power_dropout;
2934 if (power_restored)
2935 run_power_restore = 1;
2936
2937 int64_t current = timeval_ms();
2938 static int64_t last_power;
2939 bool wait_more = last_power + 2000 > current;
2940 if (power_dropout && !wait_more) {
2941 run_power_dropout = 1;
2942 last_power = current;
2943 }
2944
2945 retval = jtag_srst_asserted(&srst_asserted);
2946 if (retval != ERROR_OK)
2947 return retval;
2948
2949 int srst_deasserted;
2950 srst_deasserted = prev_srst_asserted && !srst_asserted;
2951
2952 static int64_t last_srst;
2953 wait_more = last_srst + 2000 > current;
2954 if (srst_deasserted && !wait_more) {
2955 run_srst_deasserted = 1;
2956 last_srst = current;
2957 }
2958
2959 if (!prev_srst_asserted && srst_asserted)
2960 run_srst_asserted = 1;
2961
2962 prev_srst_asserted = srst_asserted;
2963 prev_power_dropout = power_dropout;
2964
2965 if (srst_deasserted || power_restored) {
2966 /* Other than logging the event we can't do anything here.
2967 * Issuing a reset is a particularly bad idea as we might
2968 * be inside a reset already.
2969 */
2970 }
2971
2972 return ERROR_OK;
2973 }
2974
2975 /* process target state changes */
2976 static int handle_target(void *priv)
2977 {
2978 Jim_Interp *interp = (Jim_Interp *)priv;
2979 int retval = ERROR_OK;
2980
2981 if (!is_jtag_poll_safe()) {
2982 /* polling is disabled currently */
2983 return ERROR_OK;
2984 }
2985
2986 /* we do not want to recurse here... */
2987 static int recursive;
2988 if (!recursive) {
2989 recursive = 1;
2990 sense_handler();
2991 /* danger! running these procedures can trigger srst assertions and power dropouts.
2992 * We need to avoid an infinite loop/recursion here and we do that by
2993 * clearing the flags after running these events.
2994 */
2995 int did_something = 0;
2996 if (run_srst_asserted) {
2997 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2998 Jim_Eval(interp, "srst_asserted");
2999 did_something = 1;
3000 }
3001 if (run_srst_deasserted) {
3002 Jim_Eval(interp, "srst_deasserted");
3003 did_something = 1;
3004 }
3005 if (run_power_dropout) {
3006 LOG_INFO("Power dropout detected, running power_dropout proc.");
3007 Jim_Eval(interp, "power_dropout");
3008 did_something = 1;
3009 }
3010 if (run_power_restore) {
3011 Jim_Eval(interp, "power_restore");
3012 did_something = 1;
3013 }
3014
3015 if (did_something) {
3016 /* clear detect flags */
3017 sense_handler();
3018 }
3019
3020 /* clear action flags */
3021
3022 run_srst_asserted = 0;
3023 run_srst_deasserted = 0;
3024 run_power_restore = 0;
3025 run_power_dropout = 0;
3026
3027 recursive = 0;
3028 }
3029
3030 /* Poll targets for state changes unless that's globally disabled.
3031 * Skip targets that are currently disabled.
3032 */
3033 for (struct target *target = all_targets;
3034 is_jtag_poll_safe() && target;
3035 target = target->next) {
3036
3037 if (!target_was_examined(target))
3038 continue;
3039
3040 if (!target->tap->enabled)
3041 continue;
3042
3043 if (target->backoff.times > target->backoff.count) {
3044 /* do not poll this time as we failed previously */
3045 target->backoff.count++;
3046 continue;
3047 }
3048 target->backoff.count = 0;
3049
3050 /* only poll target if we've got power and srst isn't asserted */
3051 if (!power_dropout && !srst_asserted) {
3052 /* polling may fail silently until the target has been examined */
3053 retval = target_poll(target);
3054 if (retval != ERROR_OK) {
3055 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3056 if (target->backoff.times * polling_interval < 5000) {
3057 target->backoff.times *= 2;
3058 target->backoff.times++;
3059 }
3060
3061 /* Tell GDB to halt the debugger. This allows the user to
3062 * run monitor commands to handle the situation.
3063 */
3064 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3065 }
3066 if (target->backoff.times > 0) {
3067 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3068 target_reset_examined(target);
3069 retval = target_examine_one(target);
3070 /* Target examination could have failed due to unstable connection,
3071 * but we set the examined flag anyway to repoll it later */
3072 if (retval != ERROR_OK) {
3073 target_set_examined(target);
3074 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3075 target->backoff.times * polling_interval);
3076 return retval;
3077 }
3078 }
3079
3080 /* Since we succeeded, we reset backoff count */
3081 target->backoff.times = 0;
3082 }
3083 }
3084
3085 return retval;
3086 }
3087
3088 COMMAND_HANDLER(handle_reg_command)
3089 {
3090 LOG_DEBUG("-");
3091
3092 struct target *target = get_current_target(CMD_CTX);
3093 struct reg *reg = NULL;
3094
3095 /* list all available registers for the current target */
3096 if (CMD_ARGC == 0) {
3097 struct reg_cache *cache = target->reg_cache;
3098
3099 unsigned int count = 0;
3100 while (cache) {
3101 unsigned i;
3102
3103 command_print(CMD, "===== %s", cache->name);
3104
3105 for (i = 0, reg = cache->reg_list;
3106 i < cache->num_regs;
3107 i++, reg++, count++) {
3108 if (reg->exist == false || reg->hidden)
3109 continue;
3110 /* only print cached values if they are valid */
3111 if (reg->valid) {
3112 char *value = buf_to_hex_str(reg->value,
3113 reg->size);
3114 command_print(CMD,
3115 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3116 count, reg->name,
3117 reg->size, value,
3118 reg->dirty
3119 ? " (dirty)"
3120 : "");
3121 free(value);
3122 } else {
3123 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3124 count, reg->name,
3125 reg->size);
3126 }
3127 }
3128 cache = cache->next;
3129 }
3130
3131 return ERROR_OK;
3132 }
3133
3134 /* access a single register by its ordinal number */
3135 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3136 unsigned num;
3137 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3138
3139 struct reg_cache *cache = target->reg_cache;
3140 unsigned int count = 0;
3141 while (cache) {
3142 unsigned i;
3143 for (i = 0; i < cache->num_regs; i++) {
3144 if (count++ == num) {
3145 reg = &cache->reg_list[i];
3146 break;
3147 }
3148 }
3149 if (reg)
3150 break;
3151 cache = cache->next;
3152 }
3153
3154 if (!reg) {
3155 command_print(CMD, "%i is out of bounds, the current target "
3156 "has only %i registers (0 - %i)", num, count, count - 1);
3157 return ERROR_OK;
3158 }
3159 } else {
3160 /* access a single register by its name */
3161 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3162
3163 if (!reg)
3164 goto not_found;
3165 }
3166
3167 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3168
3169 if (!reg->exist)
3170 goto not_found;
3171
3172 /* display a register */
3173 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3174 && (CMD_ARGV[1][0] <= '9')))) {
3175 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3176 reg->valid = 0;
3177
3178 if (reg->valid == 0) {
3179 int retval = reg->type->get(reg);
3180 if (retval != ERROR_OK) {
3181 LOG_ERROR("Could not read register '%s'", reg->name);
3182 return retval;
3183 }
3184 }
3185 char *value = buf_to_hex_str(reg->value, reg->size);
3186 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3187 free(value);
3188 return ERROR_OK;
3189 }
3190
3191 /* set register value */
3192 if (CMD_ARGC == 2) {
3193 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3194 if (!buf)
3195 return ERROR_FAIL;
3196 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3197
3198 int retval = reg->type->set(reg, buf);
3199 if (retval != ERROR_OK) {
3200 LOG_ERROR("Could not write to register '%s'", reg->name);
3201 } else {
3202 char *value = buf_to_hex_str(reg->value, reg->size);
3203 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3204 free(value);
3205 }
3206
3207 free(buf);
3208
3209 return retval;
3210 }
3211
3212 return ERROR_COMMAND_SYNTAX_ERROR;
3213
3214 not_found:
3215 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3216 return ERROR_OK;
3217 }
3218
3219 COMMAND_HANDLER(handle_poll_command)
3220 {
3221 int retval = ERROR_OK;
3222 struct target *target = get_current_target(CMD_CTX);
3223
3224 if (CMD_ARGC == 0) {
3225 command_print(CMD, "background polling: %s",
3226 jtag_poll_get_enabled() ? "on" : "off");
3227 command_print(CMD, "TAP: %s (%s)",
3228 target->tap->dotted_name,
3229 target->tap->enabled ? "enabled" : "disabled");
3230 if (!target->tap->enabled)
3231 return ERROR_OK;
3232 retval = target_poll(target);
3233 if (retval != ERROR_OK)
3234 return retval;
3235 retval = target_arch_state(target);
3236 if (retval != ERROR_OK)
3237 return retval;
3238 } else if (CMD_ARGC == 1) {
3239 bool enable;
3240 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3241 jtag_poll_set_enabled(enable);
3242 } else
3243 return ERROR_COMMAND_SYNTAX_ERROR;
3244
3245 return retval;
3246 }
3247
3248 COMMAND_HANDLER(handle_wait_halt_command)
3249 {
3250 if (CMD_ARGC > 1)
3251 return ERROR_COMMAND_SYNTAX_ERROR;
3252
3253 unsigned ms = DEFAULT_HALT_TIMEOUT;
3254 if (1 == CMD_ARGC) {
3255 int retval = parse_uint(CMD_ARGV[0], &ms);
3256 if (retval != ERROR_OK)
3257 return ERROR_COMMAND_SYNTAX_ERROR;
3258 }
3259
3260 struct target *target = get_current_target(CMD_CTX);
3261 return target_wait_state(target, TARGET_HALTED, ms);
3262 }
3263
3264 /* wait for target state to change. The trick here is to have a low
3265 * latency for short waits and not to suck up all the CPU time
3266 * on longer waits.
3267 *
3268 * After 500ms, keep_alive() is invoked
3269 */
3270 int target_wait_state(struct target *target, enum target_state state, int ms)
3271 {
3272 int retval;
3273 int64_t then = 0, cur;
3274 bool once = true;
3275
3276 for (;;) {
3277 retval = target_poll(target);
3278 if (retval != ERROR_OK)
3279 return retval;
3280 if (target->state == state)
3281 break;
3282 cur = timeval_ms();
3283 if (once) {
3284 once = false;
3285 then = timeval_ms();
3286 LOG_DEBUG("waiting for target %s...",
3287 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3288 }
3289
3290 if (cur-then > 500)
3291 keep_alive();
3292
3293 if ((cur-then) > ms) {
3294 LOG_ERROR("timed out while waiting for target %s",
3295 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3296 return ERROR_FAIL;
3297 }
3298 }
3299
3300 return ERROR_OK;
3301 }
3302
3303 COMMAND_HANDLER(handle_halt_command)
3304 {
3305 LOG_DEBUG("-");
3306
3307 struct target *target = get_current_target(CMD_CTX);
3308
3309 target->verbose_halt_msg = true;
3310
3311 int retval = target_halt(target);
3312 if (retval != ERROR_OK)
3313 return retval;
3314
3315 if (CMD_ARGC == 1) {
3316 unsigned wait_local;
3317 retval = parse_uint(CMD_ARGV[0], &wait_local);
3318 if (retval != ERROR_OK)
3319 return ERROR_COMMAND_SYNTAX_ERROR;
3320 if (!wait_local)
3321 return ERROR_OK;
3322 }
3323
3324 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3325 }
3326
3327 COMMAND_HANDLER(handle_soft_reset_halt_command)
3328 {
3329 struct target *target = get_current_target(CMD_CTX);
3330
3331 LOG_TARGET_INFO(target, "requesting target halt and executing a soft reset");
3332
3333 target_soft_reset_halt(target);
3334
3335 return ERROR_OK;
3336 }
3337
3338 COMMAND_HANDLER(handle_reset_command)
3339 {
3340 if (CMD_ARGC > 1)
3341 return ERROR_COMMAND_SYNTAX_ERROR;
3342
3343 enum target_reset_mode reset_mode = RESET_RUN;
3344 if (CMD_ARGC == 1) {
3345 const struct jim_nvp *n;
3346 n = jim_nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3347 if ((!n->name) || (n->value == RESET_UNKNOWN))
3348 return ERROR_COMMAND_SYNTAX_ERROR;
3349 reset_mode = n->value;
3350 }
3351
3352 /* reset *all* targets */
3353 return target_process_reset(CMD, reset_mode);
3354 }
3355
3356
3357 COMMAND_HANDLER(handle_resume_command)
3358 {
3359 int current = 1;
3360 if (CMD_ARGC > 1)
3361 return ERROR_COMMAND_SYNTAX_ERROR;
3362
3363 struct target *target = get_current_target(CMD_CTX);
3364
3365 /* with no CMD_ARGV, resume from current pc, addr = 0,
3366 * with one arguments, addr = CMD_ARGV[0],
3367 * handle breakpoints, not debugging */
3368 target_addr_t addr = 0;
3369 if (CMD_ARGC == 1) {
3370 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3371 current = 0;
3372 }
3373
3374 return target_resume(target, current, addr, 1, 0);
3375 }
3376
3377 COMMAND_HANDLER(handle_step_command)
3378 {
3379 if (CMD_ARGC > 1)
3380 return ERROR_COMMAND_SYNTAX_ERROR;
3381
3382 LOG_DEBUG("-");
3383
3384 /* with no CMD_ARGV, step from current pc, addr = 0,
3385 * with one argument addr = CMD_ARGV[0],
3386 * handle breakpoints, debugging */
3387 target_addr_t addr = 0;
3388 int current_pc = 1;
3389 if (CMD_ARGC == 1) {
3390 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3391 current_pc = 0;
3392 }
3393
3394 struct target *target = get_current_target(CMD_CTX);
3395
3396 return target_step(target, current_pc, addr, 1);
3397 }
3398
3399 void target_handle_md_output(struct command_invocation *cmd,
3400 struct target *target, target_addr_t address, unsigned size,
3401 unsigned count, const uint8_t *buffer)
3402 {
3403 const unsigned line_bytecnt = 32;
3404 unsigned line_modulo = line_bytecnt / size;
3405
3406 char output[line_bytecnt * 4 + 1];
3407 unsigned output_len = 0;
3408
3409 const char *value_fmt;
3410 switch (size) {
3411 case 8:
3412 value_fmt = "%16.16"PRIx64" ";
3413 break;
3414 case 4:
3415 value_fmt = "%8.8"PRIx64" ";
3416 break;
3417 case 2:
3418 value_fmt = "%4.4"PRIx64" ";
3419 break;
3420 case 1:
3421 value_fmt = "%2.2"PRIx64" ";
3422 break;
3423 default:
3424 /* "can't happen", caller checked */
3425 LOG_ERROR("invalid memory read size: %u", size);
3426 return;
3427 }
3428
3429 for (unsigned i = 0; i < count; i++) {
3430 if (i % line_modulo == 0) {
3431 output_len += snprintf(output + output_len,
3432 sizeof(output) - output_len,
3433 TARGET_ADDR_FMT ": ",
3434 (address + (i * size)));
3435 }
3436
3437 uint64_t value = 0;
3438 const uint8_t *value_ptr = buffer + i * size;
3439 switch (size) {
3440 case 8:
3441 value = target_buffer_get_u64(target, value_ptr);
3442 break;
3443 case 4:
3444 value = target_buffer_get_u32(target, value_ptr);
3445 break;
3446 case 2:
3447 value = target_buffer_get_u16(target, value_ptr);
3448 break;
3449 case 1:
3450 value = *value_ptr;
3451 }
3452 output_len += snprintf(output + output_len,
3453 sizeof(output) - output_len,
3454 value_fmt, value);
3455
3456 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3457 command_print(cmd, "%s", output);
3458 output_len = 0;
3459 }
3460 }
3461 }
3462
3463 COMMAND_HANDLER(handle_md_command)
3464 {
3465 if (CMD_ARGC < 1)
3466 return ERROR_COMMAND_SYNTAX_ERROR;
3467
3468 unsigned size = 0;
3469 switch (CMD_NAME[2]) {
3470 case 'd':
3471 size = 8;
3472 break;
3473 case 'w':
3474 size = 4;
3475 break;
3476 case 'h':
3477 size = 2;
3478 break;
3479 case 'b':
3480 size = 1;
3481 break;
3482 default:
3483 return ERROR_COMMAND_SYNTAX_ERROR;
3484 }
3485
3486 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3487 int (*fn)(struct target *target,
3488 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3489 if (physical) {
3490 CMD_ARGC--;
3491 CMD_ARGV++;
3492 fn = target_read_phys_memory;
3493 } else
3494 fn = target_read_memory;
3495 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3496 return ERROR_COMMAND_SYNTAX_ERROR;
3497
3498 target_addr_t address;
3499 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3500
3501 unsigned count = 1;
3502 if (CMD_ARGC == 2)
3503 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3504
3505 uint8_t *buffer = calloc(count, size);
3506 if (!buffer) {
3507 LOG_ERROR("Failed to allocate md read buffer");
3508 return ERROR_FAIL;
3509 }
3510
3511 struct target *target = get_current_target(CMD_CTX);
3512 int retval = fn(target, address, size, count, buffer);
3513 if (retval == ERROR_OK)
3514 target_handle_md_output(CMD, target, address, size, count, buffer);
3515
3516 free(buffer);
3517
3518 return retval;
3519 }
3520
3521 typedef int (*target_write_fn)(struct target *target,
3522 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3523
3524 static int target_fill_mem(struct target *target,
3525 target_addr_t address,
3526 target_write_fn fn,
3527 unsigned data_size,
3528 /* value */
3529 uint64_t b,
3530 /* count */
3531 unsigned c)
3532 {
3533 /* We have to write in reasonably large chunks to be able
3534 * to fill large memory areas with any sane speed */
3535 const unsigned chunk_size = 16384;
3536 uint8_t *target_buf = malloc(chunk_size * data_size);
3537 if (!target_buf) {
3538 LOG_ERROR("Out of memory");
3539 return ERROR_FAIL;
3540 }
3541
3542 for (unsigned i = 0; i < chunk_size; i++) {
3543 switch (data_size) {
3544 case 8:
3545 target_buffer_set_u64(target, target_buf + i * data_size, b);
3546 break;
3547 case 4:
3548 target_buffer_set_u32(target, target_buf + i * data_size, b);
3549 break;
3550 case 2:
3551 target_buffer_set_u16(target, target_buf + i * data_size, b);
3552 break;
3553 case 1:
3554 target_buffer_set_u8(target, target_buf + i * data_size, b);
3555 break;
3556 default:
3557 exit(-1);
3558 }
3559 }
3560
3561 int retval = ERROR_OK;
3562
3563 for (unsigned x = 0; x < c; x += chunk_size) {
3564 unsigned current;
3565 current = c - x;
3566 if (current > chunk_size)
3567 current = chunk_size;
3568 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3569 if (retval != ERROR_OK)
3570 break;
3571 /* avoid GDB timeouts */
3572 keep_alive();
3573 }
3574 free(target_buf);
3575
3576 return retval;
3577 }
3578
3579
3580 COMMAND_HANDLER(handle_mw_command)
3581 {
3582 if (CMD_ARGC < 2)
3583 return ERROR_COMMAND_SYNTAX_ERROR;
3584 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3585 target_write_fn fn;
3586 if (physical) {
3587 CMD_ARGC--;
3588 CMD_ARGV++;
3589 fn = target_write_phys_memory;
3590 } else
3591 fn = target_write_memory;
3592 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3593 return ERROR_COMMAND_SYNTAX_ERROR;
3594
3595 target_addr_t address;
3596 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3597
3598 uint64_t value;
3599 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3600
3601 unsigned count = 1;
3602 if (CMD_ARGC == 3)
3603 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3604
3605 struct target *target = get_current_target(CMD_CTX);
3606 unsigned wordsize;
3607 switch (CMD_NAME[2]) {
3608 case 'd':
3609 wordsize = 8;
3610 break;
3611 case 'w':
3612 wordsize = 4;
3613 break;
3614 case 'h':
3615 wordsize = 2;
3616 break;
3617 case 'b':
3618 wordsize = 1;
3619 break;
3620 default:
3621 return ERROR_COMMAND_SYNTAX_ERROR;
3622 }
3623
3624 return target_fill_mem(target, address, fn, wordsize, value, count);
3625 }
3626
3627 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3628 target_addr_t *min_address, target_addr_t *max_address)
3629 {
3630 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3631 return ERROR_COMMAND_SYNTAX_ERROR;
3632
3633 /* a base address isn't always necessary,
3634 * default to 0x0 (i.e. don't relocate) */
3635 if (CMD_ARGC >= 2) {
3636 target_addr_t addr;
3637 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3638 image->base_address = addr;
3639 image->base_address_set = true;
3640 } else
3641 image->base_address_set = false;
3642
3643 image->start_address_set = false;
3644
3645 if (CMD_ARGC >= 4)
3646 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3647 if (CMD_ARGC == 5) {
3648 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3649 /* use size (given) to find max (required) */
3650 *max_address += *min_address;
3651 }
3652
3653 if (*min_address > *max_address)
3654 return ERROR_COMMAND_SYNTAX_ERROR;
3655
3656 return ERROR_OK;
3657 }
3658
3659 COMMAND_HANDLER(handle_load_image_command)
3660 {
3661 uint8_t *buffer;
3662 size_t buf_cnt;
3663 uint32_t image_size;
3664 target_addr_t min_address = 0;
3665 target_addr_t max_address = -1;
3666 struct image image;
3667
3668 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3669 &image, &min_address, &max_address);
3670 if (retval != ERROR_OK)
3671 return retval;
3672
3673 struct target *target = get_current_target(CMD_CTX);
3674
3675 struct duration bench;
3676 duration_start(&bench);
3677
3678 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3679 return ERROR_FAIL;
3680
3681 image_size = 0x0;
3682 retval = ERROR_OK;
3683 for (unsigned int i = 0; i < image.num_sections; i++) {
3684 buffer = malloc(image.sections[i].size);
3685 if (!buffer) {
3686 command_print(CMD,
3687 "error allocating buffer for section (%d bytes)",
3688 (int)(image.sections[i].size));
3689 retval = ERROR_FAIL;
3690 break;
3691 }
3692
3693 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3694 if (retval != ERROR_OK) {
3695 free(buffer);
3696 break;
3697 }
3698
3699 uint32_t offset = 0;
3700 uint32_t length = buf_cnt;
3701
3702 /* DANGER!!! beware of unsigned comparison here!!! */
3703
3704 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3705 (image.sections[i].base_address < max_address)) {
3706
3707 if (image.sections[i].base_address < min_address) {
3708 /* clip addresses below */
3709 offset += min_address-image.sections[i].base_address;
3710 length -= offset;
3711 }
3712
3713 if (image.sections[i].base_address + buf_cnt > max_address)
3714 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3715
3716 retval = target_write_buffer(target,
3717 image.sections[i].base_address + offset, length, buffer + offset);
3718 if (retval != ERROR_OK) {
3719 free(buffer);
3720 break;
3721 }
3722 image_size += length;
3723 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3724 (unsigned int)length,
3725 image.sections[i].base_address + offset);
3726 }
3727
3728 free(buffer);
3729 }
3730
3731 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3732 command_print(CMD, "downloaded %" PRIu32 " bytes "
3733 "in %fs (%0.3f KiB/s)", image_size,
3734 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3735 }
3736
3737 image_close(&image);
3738
3739 return retval;
3740
3741 }
3742
3743 COMMAND_HANDLER(handle_dump_image_command)
3744 {
3745 struct fileio *fileio;
3746 uint8_t *buffer;
3747 int retval, retvaltemp;
3748 target_addr_t address, size;
3749 struct duration bench;
3750 struct target *target = get_current_target(CMD_CTX);
3751
3752 if (CMD_ARGC != 3)
3753 return ERROR_COMMAND_SYNTAX_ERROR;
3754
3755 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3756 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3757
3758 uint32_t buf_size = (size > 4096) ? 4096 : size;
3759 buffer = malloc(buf_size);
3760 if (!buffer)
3761 return ERROR_FAIL;
3762
3763 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3764 if (retval != ERROR_OK) {
3765 free(buffer);
3766 return retval;
3767 }
3768
3769 duration_start(&bench);
3770
3771 while (size > 0) {
3772 size_t size_written;
3773 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3774 retval = target_read_buffer(target, address, this_run_size, buffer);
3775 if (retval != ERROR_OK)
3776 break;
3777
3778 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3779 if (retval != ERROR_OK)
3780 break;
3781
3782 size -= this_run_size;
3783 address += this_run_size;
3784 }
3785
3786 free(buffer);
3787
3788 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3789 size_t filesize;
3790 retval = fileio_size(fileio, &filesize);
3791 if (retval != ERROR_OK)
3792 return retval;
3793 command_print(CMD,
3794 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3795 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3796 }
3797
3798 retvaltemp = fileio_close(fileio);
3799 if (retvaltemp != ERROR_OK)
3800 return retvaltemp;
3801
3802 return retval;
3803 }
3804
3805 enum verify_mode {
3806 IMAGE_TEST = 0,
3807 IMAGE_VERIFY = 1,
3808 IMAGE_CHECKSUM_ONLY = 2
3809 };
3810
3811 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3812 {
3813 uint8_t *buffer;
3814 size_t buf_cnt;
3815 uint32_t image_size;
3816 int retval;
3817 uint32_t checksum = 0;
3818 uint32_t mem_checksum = 0;
3819
3820 struct image image;
3821
3822 struct target *target = get_current_target(CMD_CTX);
3823
3824 if (CMD_ARGC < 1)
3825 return ERROR_COMMAND_SYNTAX_ERROR;
3826
3827 if (!target) {
3828 LOG_ERROR("no target selected");
3829 return ERROR_FAIL;
3830 }
3831
3832 struct duration bench;
3833 duration_start(&bench);
3834
3835 if (CMD_ARGC >= 2) {
3836 target_addr_t addr;
3837 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3838 image.base_address = addr;
3839 image.base_address_set = true;
3840 } else {
3841 image.base_address_set = false;
3842 image.base_address = 0x0;
3843 }
3844
3845 image.start_address_set = false;
3846
3847 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3848 if (retval != ERROR_OK)
3849 return retval;
3850
3851 image_size = 0x0;
3852 int diffs = 0;
3853 retval = ERROR_OK;
3854 for (unsigned int i = 0; i < image.num_sections; i++) {
3855 buffer = malloc(image.sections[i].size);
3856 if (!buffer) {
3857 command_print(CMD,
3858 "error allocating buffer for section (%" PRIu32 " bytes)",
3859 image.sections[i].size);
3860 break;
3861 }
3862 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3863 if (retval != ERROR_OK) {
3864 free(buffer);
3865 break;
3866 }
3867
3868 if (verify >= IMAGE_VERIFY) {
3869 /* calculate checksum of image */
3870 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3871 if (retval != ERROR_OK) {
3872 free(buffer);
3873 break;
3874 }
3875
3876 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3877 if (retval != ERROR_OK) {
3878 free(buffer);
3879 break;
3880 }
3881 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3882 LOG_ERROR("checksum mismatch");
3883 free(buffer);
3884 retval = ERROR_FAIL;
3885 goto done;
3886 }
3887 if (checksum != mem_checksum) {
3888 /* failed crc checksum, fall back to a binary compare */
3889 uint8_t *data;
3890
3891 if (diffs == 0)
3892 LOG_ERROR("checksum mismatch - attempting binary compare");
3893
3894 data = malloc(buf_cnt);
3895
3896 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3897 if (retval == ERROR_OK) {
3898 uint32_t t;
3899 for (t = 0; t < buf_cnt; t++) {
3900 if (data[t] != buffer[t]) {
3901 command_print(CMD,
3902 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3903 diffs,
3904 (unsigned)(t + image.sections[i].base_address),
3905 data[t],
3906 buffer[t]);
3907 if (diffs++ >= 127) {
3908 command_print(CMD, "More than 128 errors, the rest are not printed.");
3909 free(data);
3910 free(buffer);
3911 goto done;
3912 }
3913 }
3914 keep_alive();
3915 }
3916 }
3917 free(data);
3918 }
3919 } else {
3920 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3921 image.sections[i].base_address,
3922 buf_cnt);
3923 }
3924
3925 free(buffer);
3926 image_size += buf_cnt;
3927 }
3928 if (diffs > 0)
3929 command_print(CMD, "No more differences found.");
3930 done:
3931 if (diffs > 0)
3932 retval = ERROR_FAIL;
3933 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3934 command_print(CMD, "verified %" PRIu32 " bytes "
3935 "in %fs (%0.3f KiB/s)", image_size,
3936 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3937 }
3938
3939 image_close(&image);
3940
3941 return retval;
3942 }
3943
3944 COMMAND_HANDLER(handle_verify_image_checksum_command)
3945 {
3946 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3947 }
3948
3949 COMMAND_HANDLER(handle_verify_image_command)
3950 {
3951 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3952 }
3953
3954 COMMAND_HANDLER(handle_test_image_command)
3955 {
3956 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3957 }
3958
3959 static int handle_bp_command_list(struct command_invocation *cmd)
3960 {
3961 struct target *target = get_current_target(cmd->ctx);
3962 struct breakpoint *breakpoint = target->breakpoints;
3963 while (breakpoint) {
3964 if (breakpoint->type == BKPT_SOFT) {
3965 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3966 breakpoint->length);
3967 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, 0x%s",
3968 breakpoint->address,
3969 breakpoint->length,
3970 buf);
3971 free(buf);
3972 } else {
3973 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3974 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %u",
3975 breakpoint->asid,
3976 breakpoint->length, breakpoint->number);
3977 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3978 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %u",
3979 breakpoint->address,
3980 breakpoint->length, breakpoint->number);
3981 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3982 breakpoint->asid);
3983 } else
3984 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %u",
3985 breakpoint->address,
3986 breakpoint->length, breakpoint->number);
3987 }
3988
3989 breakpoint = breakpoint->next;
3990 }
3991 return ERROR_OK;
3992 }
3993
3994 static int handle_bp_command_set(struct command_invocation *cmd,
3995 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3996 {
3997 struct target *target = get_current_target(cmd->ctx);
3998 int retval;
3999
4000 if (asid == 0) {
4001 retval = breakpoint_add(target, addr, length, hw);
4002 /* error is always logged in breakpoint_add(), do not print it again */
4003 if (retval == ERROR_OK)
4004 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
4005
4006 } else if (addr == 0) {
4007 if (!target->type->add_context_breakpoint) {
4008 LOG_ERROR("Context breakpoint not available");
4009 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4010 }
4011 retval = context_breakpoint_add(target, asid, length, hw);
4012 /* error is always logged in context_breakpoint_add(), do not print it again */
4013 if (retval == ERROR_OK)
4014 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
4015
4016 } else {
4017 if (!target->type->add_hybrid_breakpoint) {
4018 LOG_ERROR("Hybrid breakpoint not available");
4019 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4020 }
4021 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
4022 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
4023 if (retval == ERROR_OK)
4024 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
4025 }
4026 return retval;
4027 }
4028
4029 COMMAND_HANDLER(handle_bp_command)
4030 {
4031 target_addr_t addr;
4032 uint32_t asid;
4033 uint32_t length;
4034 int hw = BKPT_SOFT;
4035
4036 switch (CMD_ARGC) {
4037 case 0:
4038 return handle_bp_command_list(CMD);
4039
4040 case 2:
4041 asid = 0;
4042 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4043 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4044 return handle_bp_command_set(CMD, addr, asid, length, hw);
4045
4046 case 3:
4047 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4048 hw = BKPT_HARD;
4049 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4050 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4051 asid = 0;
4052 return handle_bp_command_set(CMD, addr, asid, length, hw);
4053 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4054 hw = BKPT_HARD;
4055 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4056 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4057 addr = 0;
4058 return handle_bp_command_set(CMD, addr, asid, length, hw);
4059 }
4060 /* fallthrough */
4061 case 4:
4062 hw = BKPT_HARD;
4063 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4064 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4065 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4066 return handle_bp_command_set(CMD, addr, asid, length, hw);
4067
4068 default:
4069 return ERROR_COMMAND_SYNTAX_ERROR;
4070 }
4071 }
4072
4073 COMMAND_HANDLER(handle_rbp_command)
4074 {
4075 if (CMD_ARGC != 1)
4076 return ERROR_COMMAND_SYNTAX_ERROR;
4077
4078 struct target *target = get_current_target(CMD_CTX);
4079
4080 if (!strcmp(CMD_ARGV[0], "all")) {
4081 breakpoint_remove_all(target);
4082 } else {
4083 target_addr_t addr;
4084 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4085
4086 breakpoint_remove(target, addr);
4087 }
4088
4089 return ERROR_OK;
4090 }
4091
4092 COMMAND_HANDLER(handle_wp_command)
4093 {
4094 struct target *target = get_current_target(CMD_CTX);
4095
4096 if (CMD_ARGC == 0) {
4097 struct watchpoint *watchpoint = target->watchpoints;
4098
4099 while (watchpoint) {
4100 command_print(CMD, "address: " TARGET_ADDR_FMT
4101 ", len: 0x%8.8" PRIx32
4102 ", r/w/a: %i, value: 0x%8.8" PRIx32
4103 ", mask: 0x%8.8" PRIx32,
4104 watchpoint->address,
4105 watchpoint->length,
4106 (int)watchpoint->rw,
4107 watchpoint->value,
4108 watchpoint->mask);
4109 watchpoint = watchpoint->next;
4110 }
4111 return ERROR_OK;
4112 }
4113
4114 enum watchpoint_rw type = WPT_ACCESS;
4115 target_addr_t addr = 0;
4116 uint32_t length = 0;
4117 uint32_t data_value = 0x0;
4118 uint32_t data_mask = 0xffffffff;
4119
4120 switch (CMD_ARGC) {
4121 case 5:
4122 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
4123 /* fall through */
4124 case 4:
4125 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
4126 /* fall through */
4127 case 3:
4128 switch (CMD_ARGV[2][0]) {
4129 case 'r':
4130 type = WPT_READ;
4131 break;
4132 case 'w':
4133 type = WPT_WRITE;
4134 break;
4135 case 'a':
4136 type = WPT_ACCESS;
4137 break;
4138 default:
4139 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4140 return ERROR_COMMAND_SYNTAX_ERROR;
4141 }
4142 /* fall through */
4143 case 2:
4144 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4145 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4146 break;
4147
4148 default:
4149 return ERROR_COMMAND_SYNTAX_ERROR;
4150 }
4151
4152 int retval = watchpoint_add(target, addr, length, type,
4153 data_value, data_mask);
4154 if (retval != ERROR_OK)
4155 LOG_ERROR("Failure setting watchpoints");
4156
4157 return retval;
4158 }
4159
4160 COMMAND_HANDLER(handle_rwp_command)
4161 {
4162 if (CMD_ARGC != 1)
4163 return ERROR_COMMAND_SYNTAX_ERROR;
4164
4165 target_addr_t addr;
4166 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4167
4168 struct target *target = get_current_target(CMD_CTX);
4169 watchpoint_remove(target, addr);
4170
4171 return ERROR_OK;
4172 }
4173
4174 /**
4175 * Translate a virtual address to a physical address.
4176 *
4177 * The low-level target implementation must have logged a detailed error
4178 * which is forwarded to telnet/GDB session.
4179 */
4180 COMMAND_HANDLER(handle_virt2phys_command)
4181 {
4182 if (CMD_ARGC != 1)
4183 return ERROR_COMMAND_SYNTAX_ERROR;
4184
4185 target_addr_t va;
4186 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4187 target_addr_t pa;
4188
4189 struct target *target = get_current_target(CMD_CTX);
4190 int retval = target->type->virt2phys(target, va, &pa);
4191 if (retval == ERROR_OK)
4192 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4193
4194 return retval;
4195 }
4196
4197 static void write_data(FILE *f, const void *data, size_t len)
4198 {
4199 size_t written = fwrite(data, 1, len, f);
4200 if (written != len)
4201 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4202 }
4203
4204 static void write_long(FILE *f, int l, struct target *target)
4205 {
4206 uint8_t val[4];
4207
4208 target_buffer_set_u32(target, val, l);
4209 write_data(f, val, 4);
4210 }
4211
4212 static void write_string(FILE *f, char *s)
4213 {
4214 write_data(f, s, strlen(s));
4215 }
4216
4217 typedef unsigned char UNIT[2]; /* unit of profiling */
4218
4219 /* Dump a gmon.out histogram file. */
4220 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4221 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4222 {
4223 uint32_t i;
4224 FILE *f = fopen(filename, "w");
4225 if (!f)
4226 return;
4227 write_string(f, "gmon");
4228 write_long(f, 0x00000001, target); /* Version */
4229 write_long(f, 0, target); /* padding */
4230 write_long(f, 0, target); /* padding */
4231 write_long(f, 0, target); /* padding */
4232
4233 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4234 write_data(f, &zero, 1);
4235
4236 /* figure out bucket size */
4237 uint32_t min;
4238 uint32_t max;
4239 if (with_range) {
4240 min = start_address;
4241 max = end_address;
4242 } else {
4243 min = samples[0];
4244 max = samples[0];
4245 for (i = 0; i < sample_num; i++) {
4246 if (min > samples[i])
4247 min = samples[i];
4248 if (max < samples[i])
4249 max = samples[i];
4250 }
4251
4252 /* max should be (largest sample + 1)
4253 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4254 if (max < UINT32_MAX)
4255 max++;
4256
4257 /* gprof requires (max - min) >= 2 */
4258 while ((max - min) < 2) {
4259 if (max < UINT32_MAX)
4260 max++;
4261 else
4262 min--;
4263 }
4264 }
4265
4266 uint32_t address_space = max - min;
4267
4268 /* FIXME: What is the reasonable number of buckets?
4269 * The profiling result will be more accurate if there are enough buckets. */
4270 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4271 uint32_t num_buckets = address_space / sizeof(UNIT);
4272 if (num_buckets > max_buckets)
4273 num_buckets = max_buckets;
4274 int *buckets = malloc(sizeof(int) * num_buckets);
4275 if (!buckets) {
4276 fclose(f);
4277 return;
4278 }
4279 memset(buckets, 0, sizeof(int) * num_buckets);
4280 for (i = 0; i < sample_num; i++) {
4281 uint32_t address = samples[i];
4282
4283 if ((address < min) || (max <= address))
4284 continue;
4285
4286 long long a = address - min;
4287 long long b = num_buckets;
4288 long long c = address_space;
4289 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4290 buckets[index_t]++;
4291 }
4292
4293 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4294 write_long(f, min, target); /* low_pc */
4295 write_long(f, max, target); /* high_pc */
4296 write_long(f, num_buckets, target); /* # of buckets */
4297 float sample_rate = sample_num / (duration_ms / 1000.0);
4298 write_long(f, sample_rate, target);
4299 write_string(f, "seconds");
4300 for (i = 0; i < (15-strlen("seconds")); i++)
4301 write_data(f, &zero, 1);
4302 write_string(f, "s");
4303
4304 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4305
4306 char *data = malloc(2 * num_buckets);
4307 if (data) {
4308 for (i = 0; i < num_buckets; i++) {
4309 int val;
4310 val = buckets[i];
4311 if (val > 65535)
4312 val = 65535;
4313 data[i * 2] = val&0xff;
4314 data[i * 2 + 1] = (val >> 8) & 0xff;
4315 }
4316 free(buckets);
4317 write_data(f, data, num_buckets * 2);
4318 free(data);
4319 } else
4320 free(buckets);
4321
4322 fclose(f);
4323 }
4324
4325 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4326 * which will be used as a random sampling of PC */
4327 COMMAND_HANDLER(handle_profile_command)
4328 {
4329 struct target *target = get_current_target(CMD_CTX);
4330
4331 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4332 return ERROR_COMMAND_SYNTAX_ERROR;
4333
4334 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4335 uint32_t offset;
4336 uint32_t num_of_samples;
4337 int retval = ERROR_OK;
4338 bool halted_before_profiling = target->state == TARGET_HALTED;
4339
4340 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4341
4342 uint32_t start_address = 0;
4343 uint32_t end_address = 0;
4344 bool with_range = false;
4345 if (CMD_ARGC == 4) {
4346 with_range = true;
4347 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4348 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4349 if (start_address > end_address || (end_address - start_address) < 2) {
4350 command_print(CMD, "Error: end - start < 2");
4351 return ERROR_COMMAND_ARGUMENT_INVALID;
4352 }
4353 }
4354
4355 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4356 if (!samples) {
4357 LOG_ERROR("No memory to store samples.");
4358 return ERROR_FAIL;
4359 }
4360
4361 uint64_t timestart_ms = timeval_ms();
4362 /**
4363 * Some cores let us sample the PC without the
4364 * annoying halt/resume step; for example, ARMv7 PCSR.
4365 * Provide a way to use that more efficient mechanism.
4366 */
4367 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4368 &num_of_samples, offset);
4369 if (retval != ERROR_OK) {
4370 free(samples);
4371 return retval;
4372 }
4373 uint32_t duration_ms = timeval_ms() - timestart_ms;
4374
4375 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4376
4377 retval = target_poll(target);
4378 if (retval != ERROR_OK) {
4379 free(samples);
4380 return retval;
4381 }
4382
4383 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4384 /* The target was halted before we started and is running now. Halt it,
4385 * for consistency. */
4386 retval = target_halt(target);
4387 if (retval != ERROR_OK) {
4388 free(samples);
4389 return retval;
4390 }
4391 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4392 /* The target was running before we started and is halted now. Resume
4393 * it, for consistency. */
4394 retval = target_resume(target, 1, 0, 0, 0);
4395 if (retval != ERROR_OK) {
4396 free(samples);
4397 return retval;
4398 }
4399 }
4400
4401 retval = target_poll(target);
4402 if (retval != ERROR_OK) {
4403 free(samples);
4404 return retval;
4405 }
4406
4407 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4408 with_range, start_address, end_address, target, duration_ms);
4409 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4410
4411 free(samples);
4412 return retval;
4413 }
4414
4415 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4416 {
4417 char *namebuf;
4418 Jim_Obj *obj_name, *obj_val;
4419 int result;
4420
4421 namebuf = alloc_printf("%s(%d)", varname, idx);
4422 if (!namebuf)
4423 return JIM_ERR;
4424
4425 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4426 jim_wide wide_val = val;
4427 obj_val = Jim_NewWideObj(interp, wide_val);
4428 if (!obj_name || !obj_val) {
4429 free(namebuf);
4430 return JIM_ERR;
4431 }
4432
4433 Jim_IncrRefCount(obj_name);
4434 Jim_IncrRefCount(obj_val);
4435 result = Jim_SetVariable(interp, obj_name, obj_val);
4436 Jim_DecrRefCount(interp, obj_name);
4437 Jim_DecrRefCount(interp, obj_val);
4438 free(namebuf);
4439 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4440 return result;
4441 }
4442
4443 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4444 {
4445 int e;
4446
4447 LOG_WARNING("DEPRECATED! use 'read_memory' not 'mem2array'");
4448
4449 /* argv[0] = name of array to receive the data
4450 * argv[1] = desired element width in bits
4451 * argv[2] = memory address
4452 * argv[3] = count of times to read
4453 * argv[4] = optional "phys"
4454 */
4455 if (argc < 4 || argc > 5) {
4456 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4457 return JIM_ERR;
4458 }
4459
4460 /* Arg 0: Name of the array variable */
4461 const char *varname = Jim_GetString(argv[0], NULL);
4462
4463 /* Arg 1: Bit width of one element */
4464 long l;
4465 e = Jim_GetLong(interp, argv[1], &l);
4466 if (e != JIM_OK)
4467 return e;
4468 const unsigned int width_bits = l;
4469
4470 if (width_bits != 8 &&
4471 width_bits != 16 &&
4472 width_bits != 32 &&
4473 width_bits != 64) {
4474 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4475 Jim_AppendStrings(interp, Jim_GetResult(interp),
4476 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4477 return JIM_ERR;
4478 }
4479 const unsigned int width = width_bits / 8;
4480
4481 /* Arg 2: Memory address */
4482 jim_wide wide_addr;
4483 e = Jim_GetWide(interp, argv[2], &wide_addr);
4484 if (e != JIM_OK)
4485 return e;
4486 target_addr_t addr = (target_addr_t)wide_addr;
4487
4488 /* Arg 3: Number of elements to read */
4489 e = Jim_GetLong(interp, argv[3], &l);
4490 if (e != JIM_OK)
4491 return e;
4492 size_t len = l;
4493
4494 /* Arg 4: phys */
4495 bool is_phys = false;
4496 if (argc > 4) {
4497 int str_len = 0;
4498 const char *phys = Jim_GetString(argv[4], &str_len);
4499 if (!strncmp(phys, "phys", str_len))
4500 is_phys = true;
4501 else
4502 return JIM_ERR;
4503 }
4504
4505 /* Argument checks */
4506 if (len == 0) {
4507 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4508 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4509 return JIM_ERR;
4510 }
4511 if ((addr + (len * width)) < addr) {
4512 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4513 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4514 return JIM_ERR;
4515 }
4516 if (len > 65536) {
4517 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4518 Jim_AppendStrings(interp, Jim_GetResult(interp),
4519 "mem2array: too large read request, exceeds 64K items", NULL);
4520 return JIM_ERR;
4521 }
4522
4523 if ((width == 1) ||
4524 ((width == 2) && ((addr & 1) == 0)) ||
4525 ((width == 4) && ((addr & 3) == 0)) ||
4526 ((width == 8) && ((addr & 7) == 0))) {
4527 /* alignment correct */
4528 } else {
4529 char buf[100];
4530 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4531 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4532 addr,
4533 width);
4534 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4535 return JIM_ERR;
4536 }
4537
4538 /* Transfer loop */
4539
4540 /* index counter */
4541 size_t idx = 0;
4542
4543 const size_t buffersize = 4096;
4544 uint8_t *buffer = malloc(buffersize);
4545 if (!buffer)
4546 return JIM_ERR;
4547
4548 /* assume ok */
4549 e = JIM_OK;
4550 while (len) {
4551 /* Slurp... in buffer size chunks */
4552 const unsigned int max_chunk_len = buffersize / width;
4553 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4554
4555 int retval;
4556 if (is_phys)
4557 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4558 else
4559 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4560 if (retval != ERROR_OK) {
4561 /* BOO !*/
4562 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4563 addr,
4564 width,
4565 chunk_len);
4566 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4567 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4568 e = JIM_ERR;
4569 break;
4570 } else {
4571 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4572 uint64_t v = 0;
4573 switch (width) {
4574 case 8:
4575 v = target_buffer_get_u64(target, &buffer[i*width]);
4576 break;
4577 case 4:
4578 v = target_buffer_get_u32(target, &buffer[i*width]);
4579 break;
4580 case 2:
4581 v = target_buffer_get_u16(target, &buffer[i*width]);
4582 break;
4583 case 1:
4584 v = buffer[i] & 0x0ff;
4585 break;
4586 }
4587 new_u64_array_element(interp, varname, idx, v);
4588 }
4589 len -= chunk_len;
4590 addr += chunk_len * width;
4591 }
4592 }
4593
4594 free(buffer);
4595
4596 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4597
4598 return e;
4599 }
4600
4601 static int target_jim_read_memory(Jim_Interp *interp, int argc,
4602 Jim_Obj * const *argv)
4603 {
4604 /*
4605 * argv[1] = memory address
4606 * argv[2] = desired element width in bits
4607 * argv[3] = number of elements to read
4608 * argv[4] = optional "phys"
4609 */
4610
4611 if (argc < 4 || argc > 5) {
4612 Jim_WrongNumArgs(interp, 1, argv, "address width count ['phys']");
4613 return JIM_ERR;
4614 }
4615
4616 /* Arg 1: Memory address. */
4617 jim_wide wide_addr;
4618 int e;
4619 e = Jim_GetWide(interp, argv[1], &wide_addr);
4620
4621 if (e != JIM_OK)
4622 return e;
4623
4624 target_addr_t addr = (target_addr_t)wide_addr;
4625
4626 /* Arg 2: Bit width of one element. */
4627 long l;
4628 e = Jim_GetLong(interp, argv[2], &l);
4629
4630 if (e != JIM_OK)
4631 return e;
4632
4633 const unsigned int width_bits = l;
4634
4635 /* Arg 3: Number of elements to read. */
4636 e = Jim_GetLong(interp, argv[3], &l);
4637
4638 if (e != JIM_OK)
4639 return e;
4640
4641 size_t count = l;
4642
4643 /* Arg 4: Optional 'phys'. */
4644 bool is_phys = false;
4645
4646 if (argc > 4) {
4647 const char *phys = Jim_GetString(argv[4], NULL);
4648
4649 if (strcmp(phys, "phys")) {
4650 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4651 return JIM_ERR;
4652 }
4653
4654 is_phys = true;
4655 }
4656
4657 switch (width_bits) {
4658 case 8:
4659 case 16:
4660 case 32:
4661 case 64:
4662 break;
4663 default:
4664 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
4665 return JIM_ERR;
4666 }
4667
4668 const unsigned int width = width_bits / 8;
4669
4670 if ((addr + (count * width)) < addr) {
4671 Jim_SetResultString(interp, "read_memory: addr + count wraps to zero", -1);
4672 return JIM_ERR;
4673 }
4674
4675 if (count > 65536) {
4676 Jim_SetResultString(interp, "read_memory: too large read request, exeeds 64K elements", -1);
4677 return JIM_ERR;
4678 }
4679
4680 struct command_context *cmd_ctx = current_command_context(interp);
4681 assert(cmd_ctx != NULL);
4682 struct target *target = get_current_target(cmd_ctx);
4683
4684 const size_t buffersize = 4096;
4685 uint8_t *buffer = malloc(buffersize);
4686
4687 if (!buffer) {
4688 LOG_ERROR("Failed to allocate memory");
4689 return JIM_ERR;
4690 }
4691
4692 Jim_Obj *result_list = Jim_NewListObj(interp, NULL, 0);
4693 Jim_IncrRefCount(result_list);
4694
4695 while (count > 0) {
4696 const unsigned int max_chunk_len = buffersize / width;
4697 const size_t chunk_len = MIN(count, max_chunk_len);
4698
4699 int retval;
4700
4701 if (is_phys)
4702 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4703 else
4704 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4705
4706 if (retval != ERROR_OK) {
4707 LOG_ERROR("read_memory: read at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
4708 addr, width_bits, chunk_len);
4709 Jim_SetResultString(interp, "read_memory: failed to read memory", -1);
4710 e = JIM_ERR;
4711 break;
4712 }
4713
4714 for (size_t i = 0; i < chunk_len ; i++) {
4715 uint64_t v = 0;
4716
4717 switch (width) {
4718 case 8:
4719 v = target_buffer_get_u64(target, &buffer[i * width]);
4720 break;
4721 case 4:
4722 v = target_buffer_get_u32(target, &buffer[i * width]);
4723 break;
4724 case 2:
4725 v = target_buffer_get_u16(target, &buffer[i * width]);
4726 break;
4727 case 1:
4728 v = buffer[i];
4729 break;
4730 }
4731
4732 char value_buf[19];
4733 snprintf(value_buf, sizeof(value_buf), "0x%" PRIx64, v);
4734
4735 Jim_ListAppendElement(interp, result_list,
4736 Jim_NewStringObj(interp, value_buf, -1));
4737 }
4738
4739 count -= chunk_len;
4740 addr += chunk_len * width;
4741 }
4742
4743 free(buffer);
4744
4745 if (e != JIM_OK) {
4746 Jim_DecrRefCount(interp, result_list);
4747 return e;
4748 }
4749
4750 Jim_SetResult(interp, result_list);
4751 Jim_DecrRefCount(interp, result_list);
4752
4753 return JIM_OK;
4754 }
4755
4756 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4757 {
4758 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4759 if (!namebuf)
4760 return JIM_ERR;
4761
4762 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4763 if (!obj_name) {
4764 free(namebuf);
4765 return JIM_ERR;
4766 }
4767
4768 Jim_IncrRefCount(obj_name);
4769 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4770 Jim_DecrRefCount(interp, obj_name);
4771 free(namebuf);
4772 if (!obj_val)
4773 return JIM_ERR;
4774
4775 jim_wide wide_val;
4776 int result = Jim_GetWide(interp, obj_val, &wide_val);
4777 *val = wide_val;
4778 return result;
4779 }
4780
4781 static int target_array2mem(Jim_Interp *interp, struct target *target,
4782 int argc, Jim_Obj *const *argv)
4783 {
4784 int e;
4785
4786 LOG_WARNING("DEPRECATED! use 'write_memory' not 'array2mem'");
4787
4788 /* argv[0] = name of array from which to read the data
4789 * argv[1] = desired element width in bits
4790 * argv[2] = memory address
4791 * argv[3] = number of elements to write
4792 * argv[4] = optional "phys"
4793 */
4794 if (argc < 4 || argc > 5) {
4795 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4796 return JIM_ERR;
4797 }
4798
4799 /* Arg 0: Name of the array variable */
4800 const char *varname = Jim_GetString(argv[0], NULL);
4801
4802 /* Arg 1: Bit width of one element */
4803 long l;
4804 e = Jim_GetLong(interp, argv[1], &l);
4805 if (e != JIM_OK)
4806 return e;
4807 const unsigned int width_bits = l;
4808
4809 if (width_bits != 8 &&
4810 width_bits != 16 &&
4811 width_bits != 32 &&
4812 width_bits != 64) {
4813 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4814 Jim_AppendStrings(interp, Jim_GetResult(interp),
4815 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4816 return JIM_ERR;
4817 }
4818 const unsigned int width = width_bits / 8;
4819
4820 /* Arg 2: Memory address */
4821 jim_wide wide_addr;
4822 e = Jim_GetWide(interp, argv[2], &wide_addr);
4823 if (e != JIM_OK)
4824 return e;
4825 target_addr_t addr = (target_addr_t)wide_addr;
4826
4827 /* Arg 3: Number of elements to write */
4828 e = Jim_GetLong(interp, argv[3], &l);
4829 if (e != JIM_OK)
4830 return e;
4831 size_t len = l;
4832
4833 /* Arg 4: Phys */
4834 bool is_phys = false;
4835 if (argc > 4) {
4836 int str_len = 0;
4837 const char *phys = Jim_GetString(argv[4], &str_len);
4838 if (!strncmp(phys, "phys", str_len))
4839 is_phys = true;
4840 else
4841 return JIM_ERR;
4842 }
4843
4844 /* Argument checks */
4845 if (len == 0) {
4846 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4847 Jim_AppendStrings(interp, Jim_GetResult(interp),
4848 "array2mem: zero width read?", NULL);
4849 return JIM_ERR;
4850 }
4851
4852 if ((addr + (len * width)) < addr) {
4853 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4854 Jim_AppendStrings(interp, Jim_GetResult(interp),
4855 "array2mem: addr + len - wraps to zero?", NULL);
4856 return JIM_ERR;
4857 }
4858
4859 if (len > 65536) {
4860 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4861 Jim_AppendStrings(interp, Jim_GetResult(interp),
4862 "array2mem: too large memory write request, exceeds 64K items", NULL);
4863 return JIM_ERR;
4864 }
4865
4866 if ((width == 1) ||
4867 ((width == 2) && ((addr & 1) == 0)) ||
4868 ((width == 4) && ((addr & 3) == 0)) ||
4869 ((width == 8) && ((addr & 7) == 0))) {
4870 /* alignment correct */
4871 } else {
4872 char buf[100];
4873 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4874 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4875 addr,
4876 width);
4877 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4878 return JIM_ERR;
4879 }
4880
4881 /* Transfer loop */
4882
4883 /* assume ok */
4884 e = JIM_OK;
4885
4886 const size_t buffersize = 4096;
4887 uint8_t *buffer = malloc(buffersize);
4888 if (!buffer)
4889 return JIM_ERR;
4890
4891 /* index counter */
4892 size_t idx = 0;
4893
4894 while (len) {
4895 /* Slurp... in buffer size chunks */
4896 const unsigned int max_chunk_len = buffersize / width;
4897
4898 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4899
4900 /* Fill the buffer */
4901 for (size_t i = 0; i < chunk_len; i++, idx++) {
4902 uint64_t v = 0;
4903 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4904 free(buffer);
4905 return JIM_ERR;
4906 }
4907 switch (width) {
4908 case 8:
4909 target_buffer_set_u64(target, &buffer[i * width], v);
4910 break;
4911 case 4:
4912 target_buffer_set_u32(target, &buffer[i * width], v);
4913 break;
4914 case 2:
4915 target_buffer_set_u16(target, &buffer[i * width], v);
4916 break;
4917 case 1:
4918 buffer[i] = v & 0x0ff;
4919 break;
4920 }
4921 }
4922 len -= chunk_len;
4923
4924 /* Write the buffer to memory */
4925 int retval;
4926 if (is_phys)
4927 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4928 else
4929 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4930 if (retval != ERROR_OK) {
4931 /* BOO !*/
4932 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4933 addr,
4934 width,
4935 chunk_len);
4936 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4937 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4938 e = JIM_ERR;
4939 break;
4940 }
4941 addr += chunk_len * width;
4942 }
4943
4944 free(buffer);
4945
4946 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4947
4948 return e;
4949 }
4950
4951 static int target_jim_write_memory(Jim_Interp *interp, int argc,
4952 Jim_Obj * const *argv)
4953 {
4954 /*
4955 * argv[1] = memory address
4956 * argv[2] = desired element width in bits
4957 * argv[3] = list of data to write
4958 * argv[4] = optional "phys"
4959 */
4960
4961 if (argc < 4 || argc > 5) {
4962 Jim_WrongNumArgs(interp, 1, argv, "address width data ['phys']");
4963 return JIM_ERR;
4964 }
4965
4966 /* Arg 1: Memory address. */
4967 int e;
4968 jim_wide wide_addr;
4969 e = Jim_GetWide(interp, argv[1], &wide_addr);
4970
4971 if (e != JIM_OK)
4972 return e;
4973
4974 target_addr_t addr = (target_addr_t)wide_addr;
4975
4976 /* Arg 2: Bit width of one element. */
4977 long l;
4978 e = Jim_GetLong(interp, argv[2], &l);
4979
4980 if (e != JIM_OK)
4981 return e;
4982
4983 const unsigned int width_bits = l;
4984 size_t count = Jim_ListLength(interp, argv[3]);
4985
4986 /* Arg 4: Optional 'phys'. */
4987 bool is_phys = false;
4988
4989 if (argc > 4) {
4990 const char *phys = Jim_GetString(argv[4], NULL);
4991
4992 if (strcmp(phys, "phys")) {
4993 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4994 return JIM_ERR;
4995 }
4996
4997 is_phys = true;
4998 }
4999
5000 switch (width_bits) {
5001 case 8:
5002 case 16:
5003 case 32:
5004 case 64:
5005 break;
5006 default:
5007 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
5008 return JIM_ERR;
5009 }
5010
5011 const unsigned int width = width_bits / 8;
5012
5013 if ((addr + (count * width)) < addr) {
5014 Jim_SetResultString(interp, "write_memory: addr + len wraps to zero", -1);
5015 return JIM_ERR;
5016 }
5017
5018 if (count > 65536) {
5019 Jim_SetResultString(interp, "write_memory: too large memory write request, exceeds 64K elements", -1);
5020 return JIM_ERR;
5021 }
5022
5023 struct command_context *cmd_ctx = current_command_context(interp);
5024 assert(cmd_ctx != NULL);
5025 struct target *target = get_current_target(cmd_ctx);
5026
5027 const size_t buffersize = 4096;
5028 uint8_t *buffer = malloc(buffersize);
5029
5030 if (!buffer) {
5031 LOG_ERROR("Failed to allocate memory");
5032 return JIM_ERR;
5033 }
5034
5035 size_t j = 0;
5036
5037 while (count > 0) {
5038 const unsigned int max_chunk_len = buffersize / width;
5039 const size_t chunk_len = MIN(count, max_chunk_len);
5040
5041 for (size_t i = 0; i < chunk_len; i++, j++) {
5042 Jim_Obj *tmp = Jim_ListGetIndex(interp, argv[3], j);
5043 jim_wide element_wide;
5044 Jim_GetWide(interp, tmp, &element_wide);
5045
5046 const uint64_t v = element_wide;
5047
5048 switch (width) {
5049 case 8:
5050 target_buffer_set_u64(target, &buffer[i * width], v);
5051 break;
5052 case 4:
5053 target_buffer_set_u32(target, &buffer[i * width], v);
5054 break;
5055 case 2:
5056 target_buffer_set_u16(target, &buffer[i * width], v);
5057 break;
5058 case 1:
5059 buffer[i] = v & 0x0ff;
5060 break;
5061 }
5062 }
5063
5064 count -= chunk_len;
5065
5066 int retval;
5067
5068 if (is_phys)
5069 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
5070 else
5071 retval = target_write_memory(target, addr, width, chunk_len, buffer);
5072
5073 if (retval != ERROR_OK) {
5074 LOG_ERROR("write_memory: write at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
5075 addr, width_bits, chunk_len);
5076 Jim_SetResultString(interp, "write_memory: failed to write memory", -1);
5077 e = JIM_ERR;
5078 break;
5079 }
5080
5081 addr += chunk_len * width;
5082 }
5083
5084 free(buffer);
5085
5086 return e;
5087 }
5088
5089 /* FIX? should we propagate errors here rather than printing them
5090 * and continuing?
5091 */
5092 void target_handle_event(struct target *target, enum target_event e)
5093 {
5094 struct target_event_action *teap;
5095 int retval;
5096
5097 for (teap = target->event_action; teap; teap = teap->next) {
5098 if (teap->event == e) {
5099 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
5100 target->target_number,
5101 target_name(target),
5102 target_type_name(target),
5103 e,
5104 target_event_name(e),
5105 Jim_GetString(teap->body, NULL));
5106
5107 /* Override current target by the target an event
5108 * is issued from (lot of scripts need it).
5109 * Return back to previous override as soon
5110 * as the handler processing is done */
5111 struct command_context *cmd_ctx = current_command_context(teap->interp);
5112 struct target *saved_target_override = cmd_ctx->current_target_override;
5113 cmd_ctx->current_target_override = target;
5114
5115 retval = Jim_EvalObj(teap->interp, teap->body);
5116
5117 cmd_ctx->current_target_override = saved_target_override;
5118
5119 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
5120 return;
5121
5122 if (retval == JIM_RETURN)
5123 retval = teap->interp->returnCode;
5124
5125 if (retval != JIM_OK) {
5126 Jim_MakeErrorMessage(teap->interp);
5127 LOG_USER("Error executing event %s on target %s:\n%s",
5128 target_event_name(e),
5129 target_name(target),
5130 Jim_GetString(Jim_GetResult(teap->interp), NULL));
5131 /* clean both error code and stacktrace before return */
5132 Jim_Eval(teap->interp, "error \"\" \"\"");
5133 }
5134 }
5135 }
5136 }
5137
5138 static int target_jim_get_reg(Jim_Interp *interp, int argc,
5139 Jim_Obj * const *argv)
5140 {
5141 bool force = false;
5142
5143 if (argc == 3) {
5144 const char *option = Jim_GetString(argv[1], NULL);
5145
5146 if (!strcmp(option, "-force")) {
5147 argc--;
5148 argv++;
5149 force = true;
5150 } else {
5151 Jim_SetResultFormatted(interp, "invalid option '%s'", option);
5152 return JIM_ERR;
5153 }
5154 }
5155
5156 if (argc != 2) {
5157 Jim_WrongNumArgs(interp, 1, argv, "[-force] list");
5158 return JIM_ERR;
5159 }
5160
5161 const int length = Jim_ListLength(interp, argv[1]);
5162
5163 Jim_Obj *result_dict = Jim_NewDictObj(interp, NULL, 0);
5164
5165 if (!result_dict)
5166 return JIM_ERR;
5167
5168 struct command_context *cmd_ctx = current_command_context(interp);
5169 assert(cmd_ctx != NULL);
5170 const struct target *target = get_current_target(cmd_ctx);
5171
5172 for (int i = 0; i < length; i++) {
5173 Jim_Obj *elem = Jim_ListGetIndex(interp, argv[1], i);
5174
5175 if (!elem)
5176 return JIM_ERR;
5177
5178 const char *reg_name = Jim_String(elem);
5179
5180 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5181 false);
5182
5183 if (!reg || !reg->exist) {
5184 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5185 return JIM_ERR;
5186 }
5187
5188 if (force) {
5189 int retval = reg->type->get(reg);
5190
5191 if (retval != ERROR_OK) {
5192 Jim_SetResultFormatted(interp, "failed to read register '%s'",
5193 reg_name);
5194 return JIM_ERR;
5195 }
5196 }
5197
5198 char *reg_value = buf_to_hex_str(reg->value, reg->size);
5199
5200 if (!reg_value) {
5201 LOG_ERROR("Failed to allocate memory");
5202 return JIM_ERR;
5203 }
5204
5205 char *tmp = alloc_printf("0x%s", reg_value);
5206
5207 free(reg_value);
5208
5209 if (!tmp) {
5210 LOG_ERROR("Failed to allocate memory");
5211 return JIM_ERR;
5212 }
5213
5214 Jim_DictAddElement(interp, result_dict, elem,
5215 Jim_NewStringObj(interp, tmp, -1));
5216
5217 free(tmp);
5218 }
5219
5220 Jim_SetResult(interp, result_dict);
5221
5222 return JIM_OK;
5223 }
5224
5225 static int target_jim_set_reg(Jim_Interp *interp, int argc,
5226 Jim_Obj * const *argv)
5227 {
5228 if (argc != 2) {
5229 Jim_WrongNumArgs(interp, 1, argv, "dict");
5230 return JIM_ERR;
5231 }
5232
5233 int tmp;
5234 #if JIM_VERSION >= 80
5235 Jim_Obj **dict = Jim_DictPairs(interp, argv[1], &tmp);
5236
5237 if (!dict)
5238 return JIM_ERR;
5239 #else
5240 Jim_Obj **dict;
5241 int ret = Jim_DictPairs(interp, argv[1], &dict, &tmp);
5242
5243 if (ret != JIM_OK)
5244 return ret;
5245 #endif
5246
5247 const unsigned int length = tmp;
5248 struct command_context *cmd_ctx = current_command_context(interp);
5249 assert(cmd_ctx);
5250 const struct target *target = get_current_target(cmd_ctx);
5251
5252 for (unsigned int i = 0; i < length; i += 2) {
5253 const char *reg_name = Jim_String(dict[i]);
5254 const char *reg_value = Jim_String(dict[i + 1]);
5255 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5256 false);
5257
5258 if (!reg || !reg->exist) {
5259 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5260 return JIM_ERR;
5261 }
5262
5263 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
5264
5265 if (!buf) {
5266 LOG_ERROR("Failed to allocate memory");
5267 return JIM_ERR;
5268 }
5269
5270 str_to_buf(reg_value, strlen(reg_value), buf, reg->size, 0);
5271 int retval = reg->type->set(reg, buf);
5272 free(buf);
5273
5274 if (retval != ERROR_OK) {
5275 Jim_SetResultFormatted(interp, "failed to set '%s' to register '%s'",
5276 reg_value, reg_name);
5277 return JIM_ERR;
5278 }
5279 }
5280
5281 return JIM_OK;
5282 }
5283
5284 /**
5285 * Returns true only if the target has a handler for the specified event.
5286 */
5287 bool target_has_event_action(struct target *target, enum target_event event)
5288 {
5289 struct target_event_action *teap;
5290
5291 for (teap = target->event_action; teap; teap = teap->next) {
5292 if (teap->event == event)
5293 return true;
5294 }
5295 return false;
5296 }
5297
5298 enum target_cfg_param {
5299 TCFG_TYPE,
5300 TCFG_EVENT,
5301 TCFG_WORK_AREA_VIRT,
5302 TCFG_WORK_AREA_PHYS,
5303 TCFG_WORK_AREA_SIZE,
5304 TCFG_WORK_AREA_BACKUP,
5305 TCFG_ENDIAN,
5306 TCFG_COREID,
5307 TCFG_CHAIN_POSITION,
5308 TCFG_DBGBASE,
5309 TCFG_RTOS,
5310 TCFG_DEFER_EXAMINE,
5311 TCFG_GDB_PORT,
5312 TCFG_GDB_MAX_CONNECTIONS,
5313 };
5314
5315 static struct jim_nvp nvp_config_opts[] = {
5316 { .name = "-type", .value = TCFG_TYPE },
5317 { .name = "-event", .value = TCFG_EVENT },
5318 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
5319 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
5320 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
5321 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
5322 { .name = "-endian", .value = TCFG_ENDIAN },
5323 { .name = "-coreid", .value = TCFG_COREID },
5324 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
5325 { .name = "-dbgbase", .value = TCFG_DBGBASE },
5326 { .name = "-rtos", .value = TCFG_RTOS },
5327 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
5328 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
5329 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
5330 { .name = NULL, .value = -1 }
5331 };
5332
5333 static int target_configure(struct jim_getopt_info *goi, struct target *target)
5334 {
5335 struct jim_nvp *n;
5336 Jim_Obj *o;
5337 jim_wide w;
5338 int e;
5339
5340 /* parse config or cget options ... */
5341 while (goi->argc > 0) {
5342 Jim_SetEmptyResult(goi->interp);
5343 /* jim_getopt_debug(goi); */
5344
5345 if (target->type->target_jim_configure) {
5346 /* target defines a configure function */
5347 /* target gets first dibs on parameters */
5348 e = (*(target->type->target_jim_configure))(target, goi);
5349 if (e == JIM_OK) {
5350 /* more? */
5351 continue;
5352 }
5353 if (e == JIM_ERR) {
5354 /* An error */
5355 return e;
5356 }
5357 /* otherwise we 'continue' below */
5358 }
5359 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
5360 if (e != JIM_OK) {
5361 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
5362 return e;
5363 }
5364 switch (n->value) {
5365 case TCFG_TYPE:
5366 /* not settable */
5367 if (goi->isconfigure) {
5368 Jim_SetResultFormatted(goi->interp,
5369 "not settable: %s", n->name);
5370 return JIM_ERR;
5371 } else {
5372 no_params:
5373 if (goi->argc != 0) {
5374 Jim_WrongNumArgs(goi->interp,
5375 goi->argc, goi->argv,
5376 "NO PARAMS");
5377 return JIM_ERR;
5378 }
5379 }
5380 Jim_SetResultString(goi->interp,
5381 target_type_name(target), -1);
5382 /* loop for more */
5383 break;
5384 case TCFG_EVENT:
5385 if (goi->argc == 0) {
5386 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
5387 return JIM_ERR;
5388 }
5389
5390 e = jim_getopt_nvp(goi, nvp_target_event, &n);
5391 if (e != JIM_OK) {
5392 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
5393 return e;
5394 }
5395
5396 if (goi->isconfigure) {
5397 if (goi->argc != 1) {
5398 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
5399 return JIM_ERR;
5400 }
5401 } else {
5402 if (goi->argc != 0) {
5403 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
5404 return JIM_ERR;
5405 }
5406 }
5407
5408 {
5409 struct target_event_action *teap;
5410
5411 teap = target->event_action;
5412 /* replace existing? */
5413 while (teap) {
5414 if (teap->event == (enum target_event)n->value)
5415 break;
5416 teap = teap->next;
5417 }
5418
5419 if (goi->isconfigure) {
5420 /* START_DEPRECATED_TPIU */
5421 if (n->value == TARGET_EVENT_TRACE_CONFIG)
5422 LOG_INFO("DEPRECATED target event %s; use TPIU events {pre,post}-{enable,disable}", n->name);
5423 /* END_DEPRECATED_TPIU */
5424
5425 bool replace = true;
5426 if (!teap) {
5427 /* create new */
5428 teap = calloc(1, sizeof(*teap));
5429 replace = false;
5430 }
5431 teap->event = n->value;
5432 teap->interp = goi->interp;
5433 jim_getopt_obj(goi, &o);
5434 if (teap->body)
5435 Jim_DecrRefCount(teap->interp, teap->body);
5436 teap->body = Jim_DuplicateObj(goi->interp, o);
5437 /*
5438 * FIXME:
5439 * Tcl/TK - "tk events" have a nice feature.
5440 * See the "BIND" command.
5441 * We should support that here.
5442 * You can specify %X and %Y in the event code.
5443 * The idea is: %T - target name.
5444 * The idea is: %N - target number
5445 * The idea is: %E - event name.
5446 */
5447 Jim_IncrRefCount(teap->body);
5448
5449 if (!replace) {
5450 /* add to head of event list */
5451 teap->next = target->event_action;
5452 target->event_action = teap;
5453 }
5454 Jim_SetEmptyResult(goi->interp);
5455 } else {
5456 /* get */
5457 if (!teap)
5458 Jim_SetEmptyResult(goi->interp);
5459 else
5460 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5461 }
5462 }
5463 /* loop for more */
5464 break;
5465
5466 case TCFG_WORK_AREA_VIRT:
5467 if (goi->isconfigure) {
5468 target_free_all_working_areas(target);
5469 e = jim_getopt_wide(goi, &w);
5470 if (e != JIM_OK)
5471 return e;
5472 target->working_area_virt = w;
5473 target->working_area_virt_spec = true;
5474 } else {
5475 if (goi->argc != 0)
5476 goto no_params;
5477 }
5478 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5479 /* loop for more */
5480 break;
5481
5482 case TCFG_WORK_AREA_PHYS:
5483 if (goi->isconfigure) {
5484 target_free_all_working_areas(target);
5485 e = jim_getopt_wide(goi, &w);
5486 if (e != JIM_OK)
5487 return e;
5488 target->working_area_phys = w;
5489 target->working_area_phys_spec = true;
5490 } else {
5491 if (goi->argc != 0)
5492 goto no_params;
5493 }
5494 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5495 /* loop for more */
5496 break;
5497
5498 case TCFG_WORK_AREA_SIZE:
5499 if (goi->isconfigure) {
5500 target_free_all_working_areas(target);
5501 e = jim_getopt_wide(goi, &w);
5502 if (e != JIM_OK)
5503 return e;
5504 target->working_area_size = w;
5505 } else {
5506 if (goi->argc != 0)
5507 goto no_params;
5508 }
5509 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5510 /* loop for more */
5511 break;
5512
5513 case TCFG_WORK_AREA_BACKUP:
5514 if (goi->isconfigure) {
5515 target_free_all_working_areas(target);
5516 e = jim_getopt_wide(goi, &w);
5517 if (e != JIM_OK)
5518 return e;
5519 /* make this exactly 1 or 0 */
5520 target->backup_working_area = (!!w);
5521 } else {
5522 if (goi->argc != 0)
5523 goto no_params;
5524 }
5525 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5526 /* loop for more e*/
5527 break;
5528
5529
5530 case TCFG_ENDIAN:
5531 if (goi->isconfigure) {
5532 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5533 if (e != JIM_OK) {
5534 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5535 return e;
5536 }
5537 target->endianness = n->value;
5538 } else {
5539 if (goi->argc != 0)
5540 goto no_params;
5541 }
5542 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5543 if (!n->name) {
5544 target->endianness = TARGET_LITTLE_ENDIAN;
5545 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5546 }
5547 Jim_SetResultString(goi->interp, n->name, -1);
5548 /* loop for more */
5549 break;
5550
5551 case TCFG_COREID:
5552 if (goi->isconfigure) {
5553 e = jim_getopt_wide(goi, &w);
5554 if (e != JIM_OK)
5555 return e;
5556 target->coreid = (int32_t)w;
5557 } else {
5558 if (goi->argc != 0)
5559 goto no_params;
5560 }
5561 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5562 /* loop for more */
5563 break;
5564
5565 case TCFG_CHAIN_POSITION:
5566 if (goi->isconfigure) {
5567 Jim_Obj *o_t;
5568 struct jtag_tap *tap;
5569
5570 if (target->has_dap) {
5571 Jim_SetResultString(goi->interp,
5572 "target requires -dap parameter instead of -chain-position!", -1);
5573 return JIM_ERR;
5574 }
5575
5576 target_free_all_working_areas(target);
5577 e = jim_getopt_obj(goi, &o_t);
5578 if (e != JIM_OK)
5579 return e;
5580 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5581 if (!tap)
5582 return JIM_ERR;
5583 target->tap = tap;
5584 target->tap_configured = true;
5585 } else {
5586 if (goi->argc != 0)
5587 goto no_params;
5588 }
5589 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5590 /* loop for more e*/
5591 break;
5592 case TCFG_DBGBASE:
5593 if (goi->isconfigure) {
5594 e = jim_getopt_wide(goi, &w);
5595 if (e != JIM_OK)
5596 return e;
5597 target->dbgbase = (uint32_t)w;
5598 target->dbgbase_set = true;
5599 } else {
5600 if (goi->argc != 0)
5601 goto no_params;
5602 }
5603 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5604 /* loop for more */
5605 break;
5606 case TCFG_RTOS:
5607 /* RTOS */
5608 {
5609 int result = rtos_create(goi, target);
5610 if (result != JIM_OK)
5611 return result;
5612 }
5613 /* loop for more */
5614 break;
5615
5616 case TCFG_DEFER_EXAMINE:
5617 /* DEFER_EXAMINE */
5618 target->defer_examine = true;
5619 /* loop for more */
5620 break;
5621
5622 case TCFG_GDB_PORT:
5623 if (goi->isconfigure) {
5624 struct command_context *cmd_ctx = current_command_context(goi->interp);
5625 if (cmd_ctx->mode != COMMAND_CONFIG) {
5626 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5627 return JIM_ERR;
5628 }
5629
5630 const char *s;
5631 e = jim_getopt_string(goi, &s, NULL);
5632 if (e != JIM_OK)
5633 return e;
5634 free(target->gdb_port_override);
5635 target->gdb_port_override = strdup(s);
5636 } else {
5637 if (goi->argc != 0)
5638 goto no_params;
5639 }
5640 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5641 /* loop for more */
5642 break;
5643
5644 case TCFG_GDB_MAX_CONNECTIONS:
5645 if (goi->isconfigure) {
5646 struct command_context *cmd_ctx = current_command_context(goi->interp);
5647 if (cmd_ctx->mode != COMMAND_CONFIG) {
5648 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5649 return JIM_ERR;
5650 }
5651
5652 e = jim_getopt_wide(goi, &w);
5653 if (e != JIM_OK)
5654 return e;
5655 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5656 } else {
5657 if (goi->argc != 0)
5658 goto no_params;
5659 }
5660 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5661 break;
5662 }
5663 } /* while (goi->argc) */
5664
5665
5666 /* done - we return */
5667 return JIM_OK;
5668 }
5669
5670 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5671 {
5672 struct command *c = jim_to_command(interp);
5673 struct jim_getopt_info goi;
5674
5675 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5676 goi.isconfigure = !strcmp(c->name, "configure");
5677 if (goi.argc < 1) {
5678 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5679 "missing: -option ...");
5680 return JIM_ERR;
5681 }
5682 struct command_context *cmd_ctx = current_command_context(interp);
5683 assert(cmd_ctx);
5684 struct target *target = get_current_target(cmd_ctx);
5685 return target_configure(&goi, target);
5686 }
5687
5688 static int jim_target_mem2array(Jim_Interp *interp,
5689 int argc, Jim_Obj *const *argv)
5690 {
5691 struct command_context *cmd_ctx = current_command_context(interp);
5692 assert(cmd_ctx);
5693 struct target *target = get_current_target(cmd_ctx);
5694 return target_mem2array(interp, target, argc - 1, argv + 1);
5695 }
5696
5697 static int jim_target_array2mem(Jim_Interp *interp,
5698 int argc, Jim_Obj *const *argv)
5699 {
5700 struct command_context *cmd_ctx = current_command_context(interp);
5701 assert(cmd_ctx);
5702 struct target *target = get_current_target(cmd_ctx);
5703 return target_array2mem(interp, target, argc - 1, argv + 1);
5704 }
5705
5706 static int jim_target_tap_disabled(Jim_Interp *interp)
5707 {
5708 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5709 return JIM_ERR;
5710 }
5711
5712 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5713 {
5714 bool allow_defer = false;
5715
5716 struct jim_getopt_info goi;
5717 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5718 if (goi.argc > 1) {
5719 const char *cmd_name = Jim_GetString(argv[0], NULL);
5720 Jim_SetResultFormatted(goi.interp,
5721 "usage: %s ['allow-defer']", cmd_name);
5722 return JIM_ERR;
5723 }
5724 if (goi.argc > 0 &&
5725 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5726 /* consume it */
5727 Jim_Obj *obj;
5728 int e = jim_getopt_obj(&goi, &obj);
5729 if (e != JIM_OK)
5730 return e;
5731 allow_defer = true;
5732 }
5733
5734 struct command_context *cmd_ctx = current_command_context(interp);
5735 assert(cmd_ctx);
5736 struct target *target = get_current_target(cmd_ctx);
5737 if (!target->tap->enabled)
5738 return jim_target_tap_disabled(interp);
5739
5740 if (allow_defer && target->defer_examine) {
5741 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5742 LOG_INFO("Use arp_examine command to examine it manually!");
5743 return JIM_OK;
5744 }
5745
5746 int e = target->type->examine(target);
5747 if (e != ERROR_OK) {
5748 target_reset_examined(target);
5749 return JIM_ERR;
5750 }
5751
5752 target_set_examined(target);
5753
5754 return JIM_OK;
5755 }
5756
5757 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5758 {
5759 struct command_context *cmd_ctx = current_command_context(interp);
5760 assert(cmd_ctx);
5761 struct target *target = get_current_target(cmd_ctx);
5762
5763 Jim_SetResultBool(interp, target_was_examined(target));
5764 return JIM_OK;
5765 }
5766
5767 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5768 {
5769 struct command_context *cmd_ctx = current_command_context(interp);
5770 assert(cmd_ctx);
5771 struct target *target = get_current_target(cmd_ctx);
5772
5773 Jim_SetResultBool(interp, target->defer_examine);
5774 return JIM_OK;
5775 }
5776
5777 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5778 {
5779 if (argc != 1) {
5780 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5781 return JIM_ERR;
5782 }
5783 struct command_context *cmd_ctx = current_command_context(interp);
5784 assert(cmd_ctx);
5785 struct target *target = get_current_target(cmd_ctx);
5786
5787 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5788 return JIM_ERR;
5789
5790 return JIM_OK;
5791 }
5792
5793 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5794 {
5795 if (argc != 1) {
5796 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5797 return JIM_ERR;
5798 }
5799 struct command_context *cmd_ctx = current_command_context(interp);
5800 assert(cmd_ctx);
5801 struct target *target = get_current_target(cmd_ctx);
5802 if (!target->tap->enabled)
5803 return jim_target_tap_disabled(interp);
5804
5805 int e;
5806 if (!(target_was_examined(target)))
5807 e = ERROR_TARGET_NOT_EXAMINED;
5808 else
5809 e = target->type->poll(target);
5810 if (e != ERROR_OK)
5811 return JIM_ERR;
5812 return JIM_OK;
5813 }
5814
5815 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5816 {
5817 struct jim_getopt_info goi;
5818 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5819
5820 if (goi.argc != 2) {
5821 Jim_WrongNumArgs(interp, 0, argv,
5822 "([tT]|[fF]|assert|deassert) BOOL");
5823 return JIM_ERR;
5824 }
5825
5826 struct jim_nvp *n;
5827 int e = jim_getopt_nvp(&goi, nvp_assert, &n);
5828 if (e != JIM_OK) {
5829 jim_getopt_nvp_unknown(&goi, nvp_assert, 1);
5830 return e;
5831 }
5832 /* the halt or not param */
5833 jim_wide a;
5834 e = jim_getopt_wide(&goi, &a);
5835 if (e != JIM_OK)
5836 return e;
5837
5838 struct command_context *cmd_ctx = current_command_context(interp);
5839 assert(cmd_ctx);
5840 struct target *target = get_current_target(cmd_ctx);
5841 if (!target->tap->enabled)
5842 return jim_target_tap_disabled(interp);
5843
5844 if (!target->type->assert_reset || !target->type->deassert_reset) {
5845 Jim_SetResultFormatted(interp,
5846 "No target-specific reset for %s",
5847 target_name(target));
5848 return JIM_ERR;
5849 }
5850
5851 if (target->defer_examine)
5852 target_reset_examined(target);
5853
5854 /* determine if we should halt or not. */
5855 target->reset_halt = (a != 0);
5856 /* When this happens - all workareas are invalid. */
5857 target_free_all_working_areas_restore(target, 0);
5858
5859 /* do the assert */
5860 if (n->value == NVP_ASSERT)
5861 e = target->type->assert_reset(target);
5862 else
5863 e = target->type->deassert_reset(target);
5864 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5865 }
5866
5867 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5868 {
5869 if (argc != 1) {
5870 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5871 return JIM_ERR;
5872 }
5873 struct command_context *cmd_ctx = current_command_context(interp);
5874 assert(cmd_ctx);
5875 struct target *target = get_current_target(cmd_ctx);
5876 if (!target->tap->enabled)
5877 return jim_target_tap_disabled(interp);
5878 int e = target->type->halt(target);
5879 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5880 }
5881
5882 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5883 {
5884 struct jim_getopt_info goi;
5885 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5886
5887 /* params: <name> statename timeoutmsecs */
5888 if (goi.argc != 2) {
5889 const char *cmd_name = Jim_GetString(argv[0], NULL);
5890 Jim_SetResultFormatted(goi.interp,
5891 "%s <state_name> <timeout_in_msec>", cmd_name);
5892 return JIM_ERR;
5893 }
5894
5895 struct jim_nvp *n;
5896 int e = jim_getopt_nvp(&goi, nvp_target_state, &n);
5897 if (e != JIM_OK) {
5898 jim_getopt_nvp_unknown(&goi, nvp_target_state, 1);
5899 return e;
5900 }
5901 jim_wide a;
5902 e = jim_getopt_wide(&goi, &a);
5903 if (e != JIM_OK)
5904 return e;
5905 struct command_context *cmd_ctx = current_command_context(interp);
5906 assert(cmd_ctx);
5907 struct target *target = get_current_target(cmd_ctx);
5908 if (!target->tap->enabled)
5909 return jim_target_tap_disabled(interp);
5910
5911 e = target_wait_state(target, n->value, a);
5912 if (e != ERROR_OK) {
5913 Jim_Obj *obj = Jim_NewIntObj(interp, e);
5914 Jim_SetResultFormatted(goi.interp,
5915 "target: %s wait %s fails (%#s) %s",
5916 target_name(target), n->name,
5917 obj, target_strerror_safe(e));
5918 return JIM_ERR;
5919 }
5920 return JIM_OK;
5921 }
5922 /* List for human, Events defined for this target.
5923 * scripts/programs should use 'name cget -event NAME'
5924 */
5925 COMMAND_HANDLER(handle_target_event_list)
5926 {
5927 struct target *target = get_current_target(CMD_CTX);
5928 struct target_event_action *teap = target->event_action;
5929
5930 command_print(CMD, "Event actions for target (%d) %s\n",
5931 target->target_number,
5932 target_name(target));
5933 command_print(CMD, "%-25s | Body", "Event");
5934 command_print(CMD, "------------------------- | "
5935 "----------------------------------------");
5936 while (teap) {
5937 command_print(CMD, "%-25s | %s",
5938 target_event_name(teap->event),
5939 Jim_GetString(teap->body, NULL));
5940 teap = teap->next;
5941 }
5942 command_print(CMD, "***END***");
5943 return ERROR_OK;
5944 }
5945 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5946 {
5947 if (argc != 1) {
5948 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5949 return JIM_ERR;
5950 }
5951 struct command_context *cmd_ctx = current_command_context(interp);
5952 assert(cmd_ctx);
5953 struct target *target = get_current_target(cmd_ctx);
5954 Jim_SetResultString(interp, target_state_name(target), -1);
5955 return JIM_OK;
5956 }
5957 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5958 {
5959 struct jim_getopt_info goi;
5960 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5961 if (goi.argc != 1) {
5962 const char *cmd_name = Jim_GetString(argv[0], NULL);
5963 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5964 return JIM_ERR;
5965 }
5966 struct jim_nvp *n;
5967 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5968 if (e != JIM_OK) {
5969 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5970 return e;
5971 }
5972 struct command_context *cmd_ctx = current_command_context(interp);
5973 assert(cmd_ctx);
5974 struct target *target = get_current_target(cmd_ctx);
5975 target_handle_event(target, n->value);
5976 return JIM_OK;
5977 }
5978
5979 static const struct command_registration target_instance_command_handlers[] = {
5980 {
5981 .name = "configure",
5982 .mode = COMMAND_ANY,
5983 .jim_handler = jim_target_configure,
5984 .help = "configure a new target for use",
5985 .usage = "[target_attribute ...]",
5986 },
5987 {
5988 .name = "cget",
5989 .mode = COMMAND_ANY,
5990 .jim_handler = jim_target_configure,
5991 .help = "returns the specified target attribute",
5992 .usage = "target_attribute",
5993 },
5994 {
5995 .name = "mwd",
5996 .handler = handle_mw_command,
5997 .mode = COMMAND_EXEC,
5998 .help = "Write 64-bit word(s) to target memory",
5999 .usage = "address data [count]",
6000 },
6001 {
6002 .name = "mww",
6003 .handler = handle_mw_command,
6004 .mode = COMMAND_EXEC,
6005 .help = "Write 32-bit word(s) to target memory",
6006 .usage = "address data [count]",
6007 },
6008 {
6009 .name = "mwh",
6010 .handler = handle_mw_command,
6011 .mode = COMMAND_EXEC,
6012 .help = "Write 16-bit half-word(s) to target memory",
6013 .usage = "address data [count]",
6014 },
6015 {
6016 .name = "mwb",
6017 .handler = handle_mw_command,
6018 .mode = COMMAND_EXEC,
6019 .help = "Write byte(s) to target memory",
6020 .usage = "address data [count]",
6021 },
6022 {
6023 .name = "mdd",
6024 .handler = handle_md_command,
6025 .mode = COMMAND_EXEC,
6026 .help = "Display target memory as 64-bit words",
6027 .usage = "address [count]",
6028 },
6029 {
6030 .name = "mdw",
6031 .handler = handle_md_command,
6032 .mode = COMMAND_EXEC,
6033 .help = "Display target memory as 32-bit words",
6034 .usage = "address [count]",
6035 },
6036 {
6037 .name = "mdh",
6038 .handler = handle_md_command,
6039 .mode = COMMAND_EXEC,
6040 .help = "Display target memory as 16-bit half-words",
6041 .usage = "address [count]",
6042 },
6043 {
6044 .name = "mdb",
6045 .handler = handle_md_command,
6046 .mode = COMMAND_EXEC,
6047 .help = "Display target memory as 8-bit bytes",
6048 .usage = "address [count]",
6049 },
6050 {
6051 .name = "array2mem",
6052 .mode = COMMAND_EXEC,
6053 .jim_handler = jim_target_array2mem,
6054 .help = "Writes Tcl array of 8/16/32 bit numbers "
6055 "to target memory",
6056 .usage = "arrayname bitwidth address count",
6057 },
6058 {
6059 .name = "mem2array",
6060 .mode = COMMAND_EXEC,
6061 .jim_handler = jim_target_mem2array,
6062 .help = "Loads Tcl array of 8/16/32 bit numbers "
6063 "from target memory",
6064 .usage = "arrayname bitwidth address count",
6065 },
6066 {
6067 .name = "get_reg",
6068 .mode = COMMAND_EXEC,
6069 .jim_handler = target_jim_get_reg,
6070 .help = "Get register values from the target",
6071 .usage = "list",
6072 },
6073 {
6074 .name = "set_reg",
6075 .mode = COMMAND_EXEC,
6076 .jim_handler = target_jim_set_reg,
6077 .help = "Set target register values",
6078 .usage = "dict",
6079 },
6080 {
6081 .name = "read_memory",
6082 .mode = COMMAND_EXEC,
6083 .jim_handler = target_jim_read_memory,
6084 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
6085 .usage = "address width count ['phys']",
6086 },
6087 {
6088 .name = "write_memory",
6089 .mode = COMMAND_EXEC,
6090 .jim_handler = target_jim_write_memory,
6091 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
6092 .usage = "address width data ['phys']",
6093 },
6094 {
6095 .name = "eventlist",
6096 .handler = handle_target_event_list,
6097 .mode = COMMAND_EXEC,
6098 .help = "displays a table of events defined for this target",
6099 .usage = "",
6100 },
6101 {
6102 .name = "curstate",
6103 .mode = COMMAND_EXEC,
6104 .jim_handler = jim_target_current_state,
6105 .help = "displays the current state of this target",
6106 },
6107 {
6108 .name = "arp_examine",
6109 .mode = COMMAND_EXEC,
6110 .jim_handler = jim_target_examine,
6111 .help = "used internally for reset processing",
6112 .usage = "['allow-defer']",
6113 },
6114 {
6115 .name = "was_examined",
6116 .mode = COMMAND_EXEC,
6117 .jim_handler = jim_target_was_examined,
6118 .help = "used internally for reset processing",
6119 },
6120 {
6121 .name = "examine_deferred",
6122 .mode = COMMAND_EXEC,
6123 .jim_handler = jim_target_examine_deferred,
6124 .help = "used internally for reset processing",
6125 },
6126 {
6127 .name = "arp_halt_gdb",
6128 .mode = COMMAND_EXEC,
6129 .jim_handler = jim_target_halt_gdb,
6130 .help = "used internally for reset processing to halt GDB",
6131 },
6132 {
6133 .name = "arp_poll",
6134 .mode = COMMAND_EXEC,
6135 .jim_handler = jim_target_poll,
6136 .help = "used internally for reset processing",
6137 },
6138 {
6139 .name = "arp_reset",
6140 .mode = COMMAND_EXEC,
6141 .jim_handler = jim_target_reset,
6142 .help = "used internally for reset processing",
6143 },
6144 {
6145 .name = "arp_halt",
6146 .mode = COMMAND_EXEC,
6147 .jim_handler = jim_target_halt,
6148 .help = "used internally for reset processing",
6149 },
6150 {
6151 .name = "arp_waitstate",
6152 .mode = COMMAND_EXEC,
6153 .jim_handler = jim_target_wait_state,
6154 .help = "used internally for reset processing",
6155 },
6156 {
6157 .name = "invoke-event",
6158 .mode = COMMAND_EXEC,
6159 .jim_handler = jim_target_invoke_event,
6160 .help = "invoke handler for specified event",
6161 .usage = "event_name",
6162 },
6163 COMMAND_REGISTRATION_DONE
6164 };
6165
6166 static int target_create(struct jim_getopt_info *goi)
6167 {
6168 Jim_Obj *new_cmd;
6169 Jim_Cmd *cmd;
6170 const char *cp;
6171 int e;
6172 int x;
6173 struct target *target;
6174 struct command_context *cmd_ctx;
6175
6176 cmd_ctx = current_command_context(goi->interp);
6177 assert(cmd_ctx);
6178
6179 if (goi->argc < 3) {
6180 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
6181 return JIM_ERR;
6182 }
6183
6184 /* COMMAND */
6185 jim_getopt_obj(goi, &new_cmd);
6186 /* does this command exist? */
6187 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_NONE);
6188 if (cmd) {
6189 cp = Jim_GetString(new_cmd, NULL);
6190 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
6191 return JIM_ERR;
6192 }
6193
6194 /* TYPE */
6195 e = jim_getopt_string(goi, &cp, NULL);
6196 if (e != JIM_OK)
6197 return e;
6198 struct transport *tr = get_current_transport();
6199 if (tr->override_target) {
6200 e = tr->override_target(&cp);
6201 if (e != ERROR_OK) {
6202 LOG_ERROR("The selected transport doesn't support this target");
6203 return JIM_ERR;
6204 }
6205 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
6206 }
6207 /* now does target type exist */
6208 for (x = 0 ; target_types[x] ; x++) {
6209 if (strcmp(cp, target_types[x]->name) == 0) {
6210 /* found */
6211 break;
6212 }
6213 }
6214 if (!target_types[x]) {
6215 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
6216 for (x = 0 ; target_types[x] ; x++) {
6217 if (target_types[x + 1]) {
6218 Jim_AppendStrings(goi->interp,
6219 Jim_GetResult(goi->interp),
6220 target_types[x]->name,
6221 ", ", NULL);
6222 } else {
6223 Jim_AppendStrings(goi->interp,
6224 Jim_GetResult(goi->interp),
6225 " or ",
6226 target_types[x]->name, NULL);
6227 }
6228 }
6229 return JIM_ERR;
6230 }
6231
6232 /* Create it */
6233 target = calloc(1, sizeof(struct target));
6234 if (!target) {
6235 LOG_ERROR("Out of memory");
6236 return JIM_ERR;
6237 }
6238
6239 /* set empty smp cluster */
6240 target->smp_targets = &empty_smp_targets;
6241
6242 /* set target number */
6243 target->target_number = new_target_number();
6244
6245 /* allocate memory for each unique target type */
6246 target->type = malloc(sizeof(struct target_type));
6247 if (!target->type) {
6248 LOG_ERROR("Out of memory");
6249 free(target);
6250 return JIM_ERR;
6251 }
6252
6253 memcpy(target->type, target_types[x], sizeof(struct target_type));
6254
6255 /* default to first core, override with -coreid */
6256 target->coreid = 0;
6257
6258 target->working_area = 0x0;
6259 target->working_area_size = 0x0;
6260 target->working_areas = NULL;
6261 target->backup_working_area = 0;
6262
6263 target->state = TARGET_UNKNOWN;
6264 target->debug_reason = DBG_REASON_UNDEFINED;
6265 target->reg_cache = NULL;
6266 target->breakpoints = NULL;
6267 target->watchpoints = NULL;
6268 target->next = NULL;
6269 target->arch_info = NULL;
6270
6271 target->verbose_halt_msg = true;
6272
6273 target->halt_issued = false;
6274
6275 /* initialize trace information */
6276 target->trace_info = calloc(1, sizeof(struct trace));
6277 if (!target->trace_info) {
6278 LOG_ERROR("Out of memory");
6279 free(target->type);
6280 free(target);
6281 return JIM_ERR;
6282 }
6283
6284 target->dbgmsg = NULL;
6285 target->dbg_msg_enabled = 0;
6286
6287 target->endianness = TARGET_ENDIAN_UNKNOWN;
6288
6289 target->rtos = NULL;
6290 target->rtos_auto_detect = false;
6291
6292 target->gdb_port_override = NULL;
6293 target->gdb_max_connections = 1;
6294
6295 /* Do the rest as "configure" options */
6296 goi->isconfigure = 1;
6297 e = target_configure(goi, target);
6298
6299 if (e == JIM_OK) {
6300 if (target->has_dap) {
6301 if (!target->dap_configured) {
6302 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
6303 e = JIM_ERR;
6304 }
6305 } else {
6306 if (!target->tap_configured) {
6307 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
6308 e = JIM_ERR;
6309 }
6310 }
6311 /* tap must be set after target was configured */
6312 if (!target->tap)
6313 e = JIM_ERR;
6314 }
6315
6316 if (e != JIM_OK) {
6317 rtos_destroy(target);
6318 free(target->gdb_port_override);
6319 free(target->trace_info);
6320 free(target->type);
6321 free(target);
6322 return e;
6323 }
6324
6325 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
6326 /* default endian to little if not specified */
6327 target->endianness = TARGET_LITTLE_ENDIAN;
6328 }
6329
6330 cp = Jim_GetString(new_cmd, NULL);
6331 target->cmd_name = strdup(cp);
6332 if (!target->cmd_name) {
6333 LOG_ERROR("Out of memory");
6334 rtos_destroy(target);
6335 free(target->gdb_port_override);
6336 free(target->trace_info);
6337 free(target->type);
6338 free(target);
6339 return JIM_ERR;
6340 }
6341
6342 if (target->type->target_create) {
6343 e = (*(target->type->target_create))(target, goi->interp);
6344 if (e != ERROR_OK) {
6345 LOG_DEBUG("target_create failed");
6346 free(target->cmd_name);
6347 rtos_destroy(target);
6348 free(target->gdb_port_override);
6349 free(target->trace_info);
6350 free(target->type);
6351 free(target);
6352 return JIM_ERR;
6353 }
6354 }
6355
6356 /* create the target specific commands */
6357 if (target->type->commands) {
6358 e = register_commands(cmd_ctx, NULL, target->type->commands);
6359 if (e != ERROR_OK)
6360 LOG_ERROR("unable to register '%s' commands", cp);
6361 }
6362
6363 /* now - create the new target name command */
6364 const struct command_registration target_subcommands[] = {
6365 {
6366 .chain = target_instance_command_handlers,
6367 },
6368 {
6369 .chain = target->type->commands,
6370 },
6371 COMMAND_REGISTRATION_DONE
6372 };
6373 const struct command_registration target_commands[] = {
6374 {
6375 .name = cp,
6376 .mode = COMMAND_ANY,
6377 .help = "target command group",
6378 .usage = "",
6379 .chain = target_subcommands,
6380 },
6381 COMMAND_REGISTRATION_DONE
6382 };
6383 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
6384 if (e != ERROR_OK) {
6385 if (target->type->deinit_target)
6386 target->type->deinit_target(target);
6387 free(target->cmd_name);
6388 rtos_destroy(target);
6389 free(target->gdb_port_override);
6390 free(target->trace_info);
6391 free(target->type);
6392 free(target);
6393 return JIM_ERR;
6394 }
6395
6396 /* append to end of list */
6397 append_to_list_all_targets(target);
6398
6399 cmd_ctx->current_target = target;
6400 return JIM_OK;
6401 }
6402
6403 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6404 {
6405 if (argc != 1) {
6406 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6407 return JIM_ERR;
6408 }
6409 struct command_context *cmd_ctx = current_command_context(interp);
6410 assert(cmd_ctx);
6411
6412 struct target *target = get_current_target_or_null(cmd_ctx);
6413 if (target)
6414 Jim_SetResultString(interp, target_name(target), -1);
6415 return JIM_OK;
6416 }
6417
6418 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6419 {
6420 if (argc != 1) {
6421 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6422 return JIM_ERR;
6423 }
6424 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6425 for (unsigned x = 0; target_types[x]; x++) {
6426 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6427 Jim_NewStringObj(interp, target_types[x]->name, -1));
6428 }
6429 return JIM_OK;
6430 }
6431
6432 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6433 {
6434 if (argc != 1) {
6435 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6436 return JIM_ERR;
6437 }
6438 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6439 struct target *target = all_targets;
6440 while (target) {
6441 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6442 Jim_NewStringObj(interp, target_name(target), -1));
6443 target = target->next;
6444 }
6445 return JIM_OK;
6446 }
6447
6448 static struct target_list *
6449 __attribute__((warn_unused_result))
6450 create_target_list_node(Jim_Obj *const name) {
6451 int len;
6452 const char *targetname = Jim_GetString(name, &len);
6453 struct target *target = get_target(targetname);
6454 LOG_DEBUG("%s ", targetname);
6455 if (!target)
6456 return NULL;
6457
6458 struct target_list *new = malloc(sizeof(struct target_list));
6459 if (!new) {
6460 LOG_ERROR("Out of memory");
6461 return new;
6462 }
6463
6464 new->target = target;
6465 return new;
6466 }
6467
6468 static int get_target_with_common_rtos_type(struct list_head *lh, struct target **result)
6469 {
6470 struct target *target = NULL;
6471 struct target_list *curr;
6472 foreach_smp_target(curr, lh) {
6473 struct rtos *curr_rtos = curr->target->rtos;
6474 if (curr_rtos) {
6475 if (target && target->rtos && target->rtos->type != curr_rtos->type) {
6476 LOG_ERROR("Different rtos types in members of one smp target!");
6477 return JIM_ERR;
6478 }
6479 target = curr->target;
6480 }
6481 }
6482 *result = target;
6483 return JIM_OK;
6484 }
6485
6486 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6487 {
6488 static int smp_group = 1;
6489
6490 if (argc == 1) {
6491 LOG_DEBUG("Empty SMP target");
6492 return JIM_OK;
6493 }
6494 LOG_DEBUG("%d", argc);
6495 /* argv[1] = target to associate in smp
6496 * argv[2] = target to associate in smp
6497 * argv[3] ...
6498 */
6499
6500 struct list_head *lh = malloc(sizeof(*lh));
6501 if (!lh) {
6502 LOG_ERROR("Out of memory");
6503 return JIM_ERR;
6504 }
6505 INIT_LIST_HEAD(lh);
6506
6507 for (int i = 1; i < argc; i++) {
6508 struct target_list *new = create_target_list_node(argv[i]);
6509 if (new)
6510 list_add_tail(&new->lh, lh);
6511 }
6512 /* now parse the list of cpu and put the target in smp mode*/
6513 struct target_list *curr;
6514 foreach_smp_target(curr, lh) {
6515 struct target *target = curr->target;
6516 target->smp = smp_group;
6517 target->smp_targets = lh;
6518 }
6519 smp_group++;
6520
6521 struct target *rtos_target;
6522 int retval = get_target_with_common_rtos_type(lh, &rtos_target);
6523 if (retval == JIM_OK && rtos_target)
6524 retval = rtos_smp_init(rtos_target);
6525
6526 return retval;
6527 }
6528
6529
6530 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6531 {
6532 struct jim_getopt_info goi;
6533 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6534 if (goi.argc < 3) {
6535 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6536 "<name> <target_type> [<target_options> ...]");
6537 return JIM_ERR;
6538 }
6539 return target_create(&goi);
6540 }
6541
6542 static const struct command_registration target_subcommand_handlers[] = {
6543 {
6544 .name = "init",
6545 .mode = COMMAND_CONFIG,
6546 .handler = handle_target_init_command,
6547 .help = "initialize targets",
6548 .usage = "",
6549 },
6550 {
6551 .name = "create",
6552 .mode = COMMAND_CONFIG,
6553 .jim_handler = jim_target_create,
6554 .usage = "name type '-chain-position' name [options ...]",
6555 .help = "Creates and selects a new target",
6556 },
6557 {
6558 .name = "current",
6559 .mode = COMMAND_ANY,
6560 .jim_handler = jim_target_current,
6561 .help = "Returns the currently selected target",
6562 },
6563 {
6564 .name = "types",
6565 .mode = COMMAND_ANY,
6566 .jim_handler = jim_target_types,
6567 .help = "Returns the available target types as "
6568 "a list of strings",
6569 },
6570 {
6571 .name = "names",
6572 .mode = COMMAND_ANY,
6573 .jim_handler = jim_target_names,
6574 .help = "Returns the names of all targets as a list of strings",
6575 },
6576 {
6577 .name = "smp",
6578 .mode = COMMAND_ANY,
6579 .jim_handler = jim_target_smp,
6580 .usage = "targetname1 targetname2 ...",
6581 .help = "gather several target in a smp list"
6582 },
6583
6584 COMMAND_REGISTRATION_DONE
6585 };
6586
6587 struct fast_load {
6588 target_addr_t address;
6589 uint8_t *data;
6590 int length;
6591
6592 };
6593
6594 static int fastload_num;
6595 static struct fast_load *fastload;
6596
6597 static void free_fastload(void)
6598 {
6599 if (fastload) {
6600 for (int i = 0; i < fastload_num; i++)
6601 free(fastload[i].data);
6602 free(fastload);
6603 fastload = NULL;
6604 }
6605 }
6606
6607 COMMAND_HANDLER(handle_fast_load_image_command)
6608 {
6609 uint8_t *buffer;
6610 size_t buf_cnt;
6611 uint32_t image_size;
6612 target_addr_t min_address = 0;
6613 target_addr_t max_address = -1;
6614
6615 struct image image;
6616
6617 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6618 &image, &min_address, &max_address);
6619 if (retval != ERROR_OK)
6620 return retval;
6621
6622 struct duration bench;
6623 duration_start(&bench);
6624
6625 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6626 if (retval != ERROR_OK)
6627 return retval;
6628
6629 image_size = 0x0;
6630 retval = ERROR_OK;
6631 fastload_num = image.num_sections;
6632 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6633 if (!fastload) {
6634 command_print(CMD, "out of memory");
6635 image_close(&image);
6636 return ERROR_FAIL;
6637 }
6638 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6639 for (unsigned int i = 0; i < image.num_sections; i++) {
6640 buffer = malloc(image.sections[i].size);
6641 if (!buffer) {
6642 command_print(CMD, "error allocating buffer for section (%d bytes)",
6643 (int)(image.sections[i].size));
6644 retval = ERROR_FAIL;
6645 break;
6646 }
6647
6648 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6649 if (retval != ERROR_OK) {
6650 free(buffer);
6651 break;
6652 }
6653
6654 uint32_t offset = 0;
6655 uint32_t length = buf_cnt;
6656
6657 /* DANGER!!! beware of unsigned comparison here!!! */
6658
6659 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6660 (image.sections[i].base_address < max_address)) {
6661 if (image.sections[i].base_address < min_address) {
6662 /* clip addresses below */
6663 offset += min_address-image.sections[i].base_address;
6664 length -= offset;
6665 }
6666
6667 if (image.sections[i].base_address + buf_cnt > max_address)
6668 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6669
6670 fastload[i].address = image.sections[i].base_address + offset;
6671 fastload[i].data = malloc(length);
6672 if (!fastload[i].data) {
6673 free(buffer);
6674 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6675 length);
6676 retval = ERROR_FAIL;
6677 break;
6678 }
6679 memcpy(fastload[i].data, buffer + offset, length);
6680 fastload[i].length = length;
6681
6682 image_size += length;
6683 command_print(CMD, "%u bytes written at address 0x%8.8x",
6684 (unsigned int)length,
6685 ((unsigned int)(image.sections[i].base_address + offset)));
6686 }
6687
6688 free(buffer);
6689 }
6690
6691 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6692 command_print(CMD, "Loaded %" PRIu32 " bytes "
6693 "in %fs (%0.3f KiB/s)", image_size,
6694 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6695
6696 command_print(CMD,
6697 "WARNING: image has not been loaded to target!"
6698 "You can issue a 'fast_load' to finish loading.");
6699 }
6700
6701 image_close(&image);
6702
6703 if (retval != ERROR_OK)
6704 free_fastload();
6705
6706 return retval;
6707 }
6708
6709 COMMAND_HANDLER(handle_fast_load_command)
6710 {
6711 if (CMD_ARGC > 0)
6712 return ERROR_COMMAND_SYNTAX_ERROR;
6713 if (!fastload) {
6714 LOG_ERROR("No image in memory");
6715 return ERROR_FAIL;
6716 }
6717 int i;
6718 int64_t ms = timeval_ms();
6719 int size = 0;
6720 int retval = ERROR_OK;
6721 for (i = 0; i < fastload_num; i++) {
6722 struct target *target = get_current_target(CMD_CTX);
6723 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6724 (unsigned int)(fastload[i].address),
6725 (unsigned int)(fastload[i].length));
6726 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6727 if (retval != ERROR_OK)
6728 break;
6729 size += fastload[i].length;
6730 }
6731 if (retval == ERROR_OK) {
6732 int64_t after = timeval_ms();
6733 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6734 }
6735 return retval;
6736 }
6737
6738 static const struct command_registration target_command_handlers[] = {
6739 {
6740 .name = "targets",
6741 .handler = handle_targets_command,
6742 .mode = COMMAND_ANY,
6743 .help = "change current default target (one parameter) "
6744 "or prints table of all targets (no parameters)",
6745 .usage = "[target]",
6746 },
6747 {
6748 .name = "target",
6749 .mode = COMMAND_CONFIG,
6750 .help = "configure target",
6751 .chain = target_subcommand_handlers,
6752 .usage = "",
6753 },
6754 COMMAND_REGISTRATION_DONE
6755 };
6756
6757 int target_register_commands(struct command_context *cmd_ctx)
6758 {
6759 return register_commands(cmd_ctx, NULL, target_command_handlers);
6760 }
6761
6762 static bool target_reset_nag = true;
6763
6764 bool get_target_reset_nag(void)
6765 {
6766 return target_reset_nag;
6767 }
6768
6769 COMMAND_HANDLER(handle_target_reset_nag)
6770 {
6771 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6772 &target_reset_nag, "Nag after each reset about options to improve "
6773 "performance");
6774 }
6775
6776 COMMAND_HANDLER(handle_ps_command)
6777 {
6778 struct target *target = get_current_target(CMD_CTX);
6779 char *display;
6780 if (target->state != TARGET_HALTED) {
6781 LOG_INFO("target not halted !!");
6782 return ERROR_OK;
6783 }
6784
6785 if ((target->rtos) && (target->rtos->type)
6786 && (target->rtos->type->ps_command)) {
6787 display = target->rtos->type->ps_command(target);
6788 command_print(CMD, "%s", display);
6789 free(display);
6790 return ERROR_OK;
6791 } else {
6792 LOG_INFO("failed");
6793 return ERROR_TARGET_FAILURE;
6794 }
6795 }
6796
6797 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6798 {
6799 if (text)
6800 command_print_sameline(cmd, "%s", text);
6801 for (int i = 0; i < size; i++)
6802 command_print_sameline(cmd, " %02x", buf[i]);
6803 command_print(cmd, " ");
6804 }
6805
6806 COMMAND_HANDLER(handle_test_mem_access_command)
6807 {
6808 struct target *target = get_current_target(CMD_CTX);
6809 uint32_t test_size;
6810 int retval = ERROR_OK;
6811
6812 if (target->state != TARGET_HALTED) {
6813 LOG_INFO("target not halted !!");
6814 return ERROR_FAIL;
6815 }
6816
6817 if (CMD_ARGC != 1)
6818 return ERROR_COMMAND_SYNTAX_ERROR;
6819
6820 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6821
6822 /* Test reads */
6823 size_t num_bytes = test_size + 4;
6824
6825 struct working_area *wa = NULL;
6826 retval = target_alloc_working_area(target, num_bytes, &wa);
6827 if (retval != ERROR_OK) {
6828 LOG_ERROR("Not enough working area");
6829 return ERROR_FAIL;
6830 }
6831
6832 uint8_t *test_pattern = malloc(num_bytes);
6833
6834 for (size_t i = 0; i < num_bytes; i++)
6835 test_pattern[i] = rand();
6836
6837 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6838 if (retval != ERROR_OK) {
6839 LOG_ERROR("Test pattern write failed");
6840 goto out;
6841 }
6842
6843 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6844 for (int size = 1; size <= 4; size *= 2) {
6845 for (int offset = 0; offset < 4; offset++) {
6846 uint32_t count = test_size / size;
6847 size_t host_bufsiz = (count + 2) * size + host_offset;
6848 uint8_t *read_ref = malloc(host_bufsiz);
6849 uint8_t *read_buf = malloc(host_bufsiz);
6850
6851 for (size_t i = 0; i < host_bufsiz; i++) {
6852 read_ref[i] = rand();
6853 read_buf[i] = read_ref[i];
6854 }
6855 command_print_sameline(CMD,
6856 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6857 size, offset, host_offset ? "un" : "");
6858
6859 struct duration bench;
6860 duration_start(&bench);
6861
6862 retval = target_read_memory(target, wa->address + offset, size, count,
6863 read_buf + size + host_offset);
6864
6865 duration_measure(&bench);
6866
6867 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6868 command_print(CMD, "Unsupported alignment");
6869 goto next;
6870 } else if (retval != ERROR_OK) {
6871 command_print(CMD, "Memory read failed");
6872 goto next;
6873 }
6874
6875 /* replay on host */
6876 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6877
6878 /* check result */
6879 int result = memcmp(read_ref, read_buf, host_bufsiz);
6880 if (result == 0) {
6881 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6882 duration_elapsed(&bench),
6883 duration_kbps(&bench, count * size));
6884 } else {
6885 command_print(CMD, "Compare failed");
6886 binprint(CMD, "ref:", read_ref, host_bufsiz);
6887 binprint(CMD, "buf:", read_buf, host_bufsiz);
6888 }
6889 next:
6890 free(read_ref);
6891 free(read_buf);
6892 }
6893 }
6894 }
6895
6896 out:
6897 free(test_pattern);
6898
6899 target_free_working_area(target, wa);
6900
6901 /* Test writes */
6902 num_bytes = test_size + 4 + 4 + 4;
6903
6904 retval = target_alloc_working_area(target, num_bytes, &wa);
6905 if (retval != ERROR_OK) {
6906 LOG_ERROR("Not enough working area");
6907 return ERROR_FAIL;
6908 }
6909
6910 test_pattern = malloc(num_bytes);
6911
6912 for (size_t i = 0; i < num_bytes; i++)
6913 test_pattern[i] = rand();
6914
6915 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6916 for (int size = 1; size <= 4; size *= 2) {
6917 for (int offset = 0; offset < 4; offset++) {
6918 uint32_t count = test_size / size;
6919 size_t host_bufsiz = count * size + host_offset;
6920 uint8_t *read_ref = malloc(num_bytes);
6921 uint8_t *read_buf = malloc(num_bytes);
6922 uint8_t *write_buf = malloc(host_bufsiz);
6923
6924 for (size_t i = 0; i < host_bufsiz; i++)
6925 write_buf[i] = rand();
6926 command_print_sameline(CMD,
6927 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6928 size, offset, host_offset ? "un" : "");
6929
6930 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6931 if (retval != ERROR_OK) {
6932 command_print(CMD, "Test pattern write failed");
6933 goto nextw;
6934 }
6935
6936 /* replay on host */
6937 memcpy(read_ref, test_pattern, num_bytes);
6938 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6939
6940 struct duration bench;
6941 duration_start(&bench);
6942
6943 retval = target_write_memory(target, wa->address + size + offset, size, count,
6944 write_buf + host_offset);
6945
6946 duration_measure(&bench);
6947
6948 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6949 command_print(CMD, "Unsupported alignment");
6950 goto nextw;
6951 } else if (retval != ERROR_OK) {
6952 command_print(CMD, "Memory write failed");
6953 goto nextw;
6954 }
6955
6956 /* read back */
6957 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6958 if (retval != ERROR_OK) {
6959 command_print(CMD, "Test pattern write failed");
6960 goto nextw;
6961 }
6962
6963 /* check result */
6964 int result = memcmp(read_ref, read_buf, num_bytes);
6965 if (result == 0) {
6966 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6967 duration_elapsed(&bench),
6968 duration_kbps(&bench, count * size));
6969 } else {
6970 command_print(CMD, "Compare failed");
6971 binprint(CMD, "ref:", read_ref, num_bytes);
6972 binprint(CMD, "buf:", read_buf, num_bytes);
6973 }
6974 nextw:
6975 free(read_ref);
6976 free(read_buf);
6977 }
6978 }
6979 }
6980
6981 free(test_pattern);
6982
6983 target_free_working_area(target, wa);
6984 return retval;
6985 }
6986
6987 static const struct command_registration target_exec_command_handlers[] = {
6988 {
6989 .name = "fast_load_image",
6990 .handler = handle_fast_load_image_command,
6991 .mode = COMMAND_ANY,
6992 .help = "Load image into server memory for later use by "
6993 "fast_load; primarily for profiling",
6994 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6995 "[min_address [max_length]]",
6996 },
6997 {
6998 .name = "fast_load",
6999 .handler = handle_fast_load_command,
7000 .mode = COMMAND_EXEC,
7001 .help = "loads active fast load image to current target "
7002 "- mainly for profiling purposes",
7003 .usage = "",
7004 },
7005 {
7006 .name = "profile",
7007 .handler = handle_profile_command,
7008 .mode = COMMAND_EXEC,
7009 .usage = "seconds filename [start end]",
7010 .help = "profiling samples the CPU PC",
7011 },
7012 /** @todo don't register virt2phys() unless target supports it */
7013 {
7014 .name = "virt2phys",
7015 .handler = handle_virt2phys_command,
7016 .mode = COMMAND_ANY,
7017 .help = "translate a virtual address into a physical address",
7018 .usage = "virtual_address",
7019 },
7020 {
7021 .name = "reg",
7022 .handler = handle_reg_command,
7023 .mode = COMMAND_EXEC,
7024 .help = "display (reread from target with \"force\") or set a register; "
7025 "with no arguments, displays all registers and their values",
7026 .usage = "[(register_number|register_name) [(value|'force')]]",
7027 },
7028 {
7029 .name = "poll",
7030 .handler = handle_poll_command,
7031 .mode = COMMAND_EXEC,
7032 .help = "poll target state; or reconfigure background polling",
7033 .usage = "['on'|'off']",
7034 },
7035 {
7036 .name = "wait_halt",
7037 .handler = handle_wait_halt_command,
7038 .mode = COMMAND_EXEC,
7039 .help = "wait up to the specified number of milliseconds "
7040 "(default 5000) for a previously requested halt",
7041 .usage = "[milliseconds]",
7042 },
7043 {
7044 .name = "halt",
7045 .handler = handle_halt_command,
7046 .mode = COMMAND_EXEC,
7047 .help = "request target to halt, then wait up to the specified "
7048 "number of milliseconds (default 5000) for it to complete",
7049 .usage = "[milliseconds]",
7050 },
7051 {
7052 .name = "resume",
7053 .handler = handle_resume_command,
7054 .mode = COMMAND_EXEC,
7055 .help = "resume target execution from current PC or address",
7056 .usage = "[address]",
7057 },
7058 {
7059 .name = "reset",
7060 .handler = handle_reset_command,
7061 .mode = COMMAND_EXEC,
7062 .usage = "[run|halt|init]",
7063 .help = "Reset all targets into the specified mode. "
7064 "Default reset mode is run, if not given.",
7065 },
7066 {
7067 .name = "soft_reset_halt",
7068 .handler = handle_soft_reset_halt_command,
7069 .mode = COMMAND_EXEC,
7070 .usage = "",
7071 .help = "halt the target and do a soft reset",
7072 },
7073 {
7074 .name = "step",
7075 .handler = handle_step_command,
7076 .mode = COMMAND_EXEC,
7077 .help = "step one instruction from current PC or address",
7078 .usage = "[address]",
7079 },
7080 {
7081 .name = "mdd",
7082 .handler = handle_md_command,
7083 .mode = COMMAND_EXEC,
7084 .help = "display memory double-words",
7085 .usage = "['phys'] address [count]",
7086 },
7087 {
7088 .name = "mdw",
7089 .handler = handle_md_command,
7090 .mode = COMMAND_EXEC,
7091 .help = "display memory words",
7092 .usage = "['phys'] address [count]",
7093 },
7094 {
7095 .name = "mdh",
7096 .handler = handle_md_command,
7097 .mode = COMMAND_EXEC,
7098 .help = "display memory half-words",
7099 .usage = "['phys'] address [count]",
7100 },
7101 {
7102 .name = "mdb",
7103 .handler = handle_md_command,
7104 .mode = COMMAND_EXEC,
7105 .help = "display memory bytes",
7106 .usage = "['phys'] address [count]",
7107 },
7108 {
7109 .name = "mwd",
7110 .handler = handle_mw_command,
7111 .mode = COMMAND_EXEC,
7112 .help = "write memory double-word",
7113 .usage = "['phys'] address value [count]",
7114 },
7115 {
7116 .name = "mww",
7117 .handler = handle_mw_command,
7118 .mode = COMMAND_EXEC,
7119 .help = "write memory word",
7120 .usage = "['phys'] address value [count]",
7121 },
7122 {
7123 .name = "mwh",
7124 .handler = handle_mw_command,
7125 .mode = COMMAND_EXEC,
7126 .help = "write memory half-word",
7127 .usage = "['phys'] address value [count]",
7128 },
7129 {
7130 .name = "mwb",
7131 .handler = handle_mw_command,
7132 .mode = COMMAND_EXEC,
7133 .help = "write memory byte",
7134 .usage = "['phys'] address value [count]",
7135 },
7136 {
7137 .name = "bp",
7138 .handler = handle_bp_command,
7139 .mode = COMMAND_EXEC,
7140 .help = "list or set hardware or software breakpoint",
7141 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
7142 },
7143 {
7144 .name = "rbp",
7145 .handler = handle_rbp_command,
7146 .mode = COMMAND_EXEC,
7147 .help = "remove breakpoint",
7148 .usage = "'all' | address",
7149 },
7150 {
7151 .name = "wp",
7152 .handler = handle_wp_command,
7153 .mode = COMMAND_EXEC,
7154 .help = "list (no params) or create watchpoints",
7155 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
7156 },
7157 {
7158 .name = "rwp",
7159 .handler = handle_rwp_command,
7160 .mode = COMMAND_EXEC,
7161 .help = "remove watchpoint",
7162 .usage = "address",
7163 },
7164 {
7165 .name = "load_image",
7166 .handler = handle_load_image_command,
7167 .mode = COMMAND_EXEC,
7168 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
7169 "[min_address] [max_length]",
7170 },
7171 {
7172 .name = "dump_image",
7173 .handler = handle_dump_image_command,
7174 .mode = COMMAND_EXEC,
7175 .usage = "filename address size",
7176 },
7177 {
7178 .name = "verify_image_checksum",
7179 .handler = handle_verify_image_checksum_command,
7180 .mode = COMMAND_EXEC,
7181 .usage = "filename [offset [type]]",
7182 },
7183 {
7184 .name = "verify_image",
7185 .handler = handle_verify_image_command,
7186 .mode = COMMAND_EXEC,
7187 .usage = "filename [offset [type]]",
7188 },
7189 {
7190 .name = "test_image",
7191 .handler = handle_test_image_command,
7192 .mode = COMMAND_EXEC,
7193 .usage = "filename [offset [type]]",
7194 },
7195 {
7196 .name = "get_reg",
7197 .mode = COMMAND_EXEC,
7198 .jim_handler = target_jim_get_reg,
7199 .help = "Get register values from the target",
7200 .usage = "list",
7201 },
7202 {
7203 .name = "set_reg",
7204 .mode = COMMAND_EXEC,
7205 .jim_handler = target_jim_set_reg,
7206 .help = "Set target register values",
7207 .usage = "dict",
7208 },
7209 {
7210 .name = "read_memory",
7211 .mode = COMMAND_EXEC,
7212 .jim_handler = target_jim_read_memory,
7213 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
7214 .usage = "address width count ['phys']",
7215 },
7216 {
7217 .name = "write_memory",
7218 .mode = COMMAND_EXEC,
7219 .jim_handler = target_jim_write_memory,
7220 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
7221 .usage = "address width data ['phys']",
7222 },
7223 {
7224 .name = "reset_nag",
7225 .handler = handle_target_reset_nag,
7226 .mode = COMMAND_ANY,
7227 .help = "Nag after each reset about options that could have been "
7228 "enabled to improve performance.",
7229 .usage = "['enable'|'disable']",
7230 },
7231 {
7232 .name = "ps",
7233 .handler = handle_ps_command,
7234 .mode = COMMAND_EXEC,
7235 .help = "list all tasks",
7236 .usage = "",
7237 },
7238 {
7239 .name = "test_mem_access",
7240 .handler = handle_test_mem_access_command,
7241 .mode = COMMAND_EXEC,
7242 .help = "Test the target's memory access functions",
7243 .usage = "size",
7244 },
7245
7246 COMMAND_REGISTRATION_DONE
7247 };
7248 static int target_register_user_commands(struct command_context *cmd_ctx)
7249 {
7250 int retval = ERROR_OK;
7251 retval = target_request_register_commands(cmd_ctx);
7252 if (retval != ERROR_OK)
7253 return retval;
7254
7255 retval = trace_register_commands(cmd_ctx);
7256 if (retval != ERROR_OK)
7257 return retval;
7258
7259
7260 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
7261 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)