openocd: fix SPDX tag format for files .c
[openocd.git] / src / target / target.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2007-2010 Øyvind Harboe *
8 * oyvind.harboe@zylin.com *
9 * *
10 * Copyright (C) 2008, Duane Ellis *
11 * openocd@duaneeellis.com *
12 * *
13 * Copyright (C) 2008 by Spencer Oliver *
14 * spen@spen-soft.co.uk *
15 * *
16 * Copyright (C) 2008 by Rick Altherr *
17 * kc8apf@kc8apf.net> *
18 * *
19 * Copyright (C) 2011 by Broadcom Corporation *
20 * Evan Hunter - ehunter@broadcom.com *
21 * *
22 * Copyright (C) ST-Ericsson SA 2011 *
23 * michel.jaouen@stericsson.com : smp minimum support *
24 * *
25 * Copyright (C) 2011 Andreas Fritiofson *
26 * andreas.fritiofson@gmail.com *
27 ***************************************************************************/
28
29 #ifdef HAVE_CONFIG_H
30 #include "config.h"
31 #endif
32
33 #include <helper/align.h>
34 #include <helper/time_support.h>
35 #include <jtag/jtag.h>
36 #include <flash/nor/core.h>
37
38 #include "target.h"
39 #include "target_type.h"
40 #include "target_request.h"
41 #include "breakpoints.h"
42 #include "register.h"
43 #include "trace.h"
44 #include "image.h"
45 #include "rtos/rtos.h"
46 #include "transport/transport.h"
47 #include "arm_cti.h"
48 #include "smp.h"
49 #include "semihosting_common.h"
50
51 /* default halt wait timeout (ms) */
52 #define DEFAULT_HALT_TIMEOUT 5000
53
54 static int target_read_buffer_default(struct target *target, target_addr_t address,
55 uint32_t count, uint8_t *buffer);
56 static int target_write_buffer_default(struct target *target, target_addr_t address,
57 uint32_t count, const uint8_t *buffer);
58 static int target_array2mem(Jim_Interp *interp, struct target *target,
59 int argc, Jim_Obj * const *argv);
60 static int target_mem2array(Jim_Interp *interp, struct target *target,
61 int argc, Jim_Obj * const *argv);
62 static int target_register_user_commands(struct command_context *cmd_ctx);
63 static int target_get_gdb_fileio_info_default(struct target *target,
64 struct gdb_fileio_info *fileio_info);
65 static int target_gdb_fileio_end_default(struct target *target, int retcode,
66 int fileio_errno, bool ctrl_c);
67
68 /* targets */
69 extern struct target_type arm7tdmi_target;
70 extern struct target_type arm720t_target;
71 extern struct target_type arm9tdmi_target;
72 extern struct target_type arm920t_target;
73 extern struct target_type arm966e_target;
74 extern struct target_type arm946e_target;
75 extern struct target_type arm926ejs_target;
76 extern struct target_type fa526_target;
77 extern struct target_type feroceon_target;
78 extern struct target_type dragonite_target;
79 extern struct target_type xscale_target;
80 extern struct target_type xtensa_chip_target;
81 extern struct target_type cortexm_target;
82 extern struct target_type cortexa_target;
83 extern struct target_type aarch64_target;
84 extern struct target_type cortexr4_target;
85 extern struct target_type arm11_target;
86 extern struct target_type ls1_sap_target;
87 extern struct target_type mips_m4k_target;
88 extern struct target_type mips_mips64_target;
89 extern struct target_type avr_target;
90 extern struct target_type dsp563xx_target;
91 extern struct target_type dsp5680xx_target;
92 extern struct target_type testee_target;
93 extern struct target_type avr32_ap7k_target;
94 extern struct target_type hla_target;
95 extern struct target_type nds32_v2_target;
96 extern struct target_type nds32_v3_target;
97 extern struct target_type nds32_v3m_target;
98 extern struct target_type esp32_target;
99 extern struct target_type esp32s2_target;
100 extern struct target_type esp32s3_target;
101 extern struct target_type or1k_target;
102 extern struct target_type quark_x10xx_target;
103 extern struct target_type quark_d20xx_target;
104 extern struct target_type stm8_target;
105 extern struct target_type riscv_target;
106 extern struct target_type mem_ap_target;
107 extern struct target_type esirisc_target;
108 extern struct target_type arcv2_target;
109
110 static struct target_type *target_types[] = {
111 &arm7tdmi_target,
112 &arm9tdmi_target,
113 &arm920t_target,
114 &arm720t_target,
115 &arm966e_target,
116 &arm946e_target,
117 &arm926ejs_target,
118 &fa526_target,
119 &feroceon_target,
120 &dragonite_target,
121 &xscale_target,
122 &xtensa_chip_target,
123 &cortexm_target,
124 &cortexa_target,
125 &cortexr4_target,
126 &arm11_target,
127 &ls1_sap_target,
128 &mips_m4k_target,
129 &avr_target,
130 &dsp563xx_target,
131 &dsp5680xx_target,
132 &testee_target,
133 &avr32_ap7k_target,
134 &hla_target,
135 &nds32_v2_target,
136 &nds32_v3_target,
137 &nds32_v3m_target,
138 &esp32_target,
139 &esp32s2_target,
140 &esp32s3_target,
141 &or1k_target,
142 &quark_x10xx_target,
143 &quark_d20xx_target,
144 &stm8_target,
145 &riscv_target,
146 &mem_ap_target,
147 &esirisc_target,
148 &arcv2_target,
149 &aarch64_target,
150 &mips_mips64_target,
151 NULL,
152 };
153
154 struct target *all_targets;
155 static struct target_event_callback *target_event_callbacks;
156 static struct target_timer_callback *target_timer_callbacks;
157 static int64_t target_timer_next_event_value;
158 static LIST_HEAD(target_reset_callback_list);
159 static LIST_HEAD(target_trace_callback_list);
160 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
161 static LIST_HEAD(empty_smp_targets);
162
163 static const struct jim_nvp nvp_assert[] = {
164 { .name = "assert", NVP_ASSERT },
165 { .name = "deassert", NVP_DEASSERT },
166 { .name = "T", NVP_ASSERT },
167 { .name = "F", NVP_DEASSERT },
168 { .name = "t", NVP_ASSERT },
169 { .name = "f", NVP_DEASSERT },
170 { .name = NULL, .value = -1 }
171 };
172
173 static const struct jim_nvp nvp_error_target[] = {
174 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
175 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
176 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
177 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
178 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
179 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
180 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
181 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
182 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
183 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
184 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
185 { .value = -1, .name = NULL }
186 };
187
188 static const char *target_strerror_safe(int err)
189 {
190 const struct jim_nvp *n;
191
192 n = jim_nvp_value2name_simple(nvp_error_target, err);
193 if (!n->name)
194 return "unknown";
195 else
196 return n->name;
197 }
198
199 static const struct jim_nvp nvp_target_event[] = {
200
201 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
202 { .value = TARGET_EVENT_HALTED, .name = "halted" },
203 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
204 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
205 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
206 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
207 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
208
209 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
210 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
211
212 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
213 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
214 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
215 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
216 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
217 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
218 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
219 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
220
221 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
222 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
223 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
224
225 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
226 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
227
228 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
229 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
230
231 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
232 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
233
234 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
235 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
236
237 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
238
239 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X100, .name = "semihosting-user-cmd-0x100" },
240 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X101, .name = "semihosting-user-cmd-0x101" },
241 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X102, .name = "semihosting-user-cmd-0x102" },
242 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X103, .name = "semihosting-user-cmd-0x103" },
243 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X104, .name = "semihosting-user-cmd-0x104" },
244 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X105, .name = "semihosting-user-cmd-0x105" },
245 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X106, .name = "semihosting-user-cmd-0x106" },
246 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X107, .name = "semihosting-user-cmd-0x107" },
247
248 { .name = NULL, .value = -1 }
249 };
250
251 static const struct jim_nvp nvp_target_state[] = {
252 { .name = "unknown", .value = TARGET_UNKNOWN },
253 { .name = "running", .value = TARGET_RUNNING },
254 { .name = "halted", .value = TARGET_HALTED },
255 { .name = "reset", .value = TARGET_RESET },
256 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
257 { .name = NULL, .value = -1 },
258 };
259
260 static const struct jim_nvp nvp_target_debug_reason[] = {
261 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
262 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
263 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
264 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
265 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
266 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
267 { .name = "program-exit", .value = DBG_REASON_EXIT },
268 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
269 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
270 { .name = NULL, .value = -1 },
271 };
272
273 static const struct jim_nvp nvp_target_endian[] = {
274 { .name = "big", .value = TARGET_BIG_ENDIAN },
275 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
276 { .name = "be", .value = TARGET_BIG_ENDIAN },
277 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
278 { .name = NULL, .value = -1 },
279 };
280
281 static const struct jim_nvp nvp_reset_modes[] = {
282 { .name = "unknown", .value = RESET_UNKNOWN },
283 { .name = "run", .value = RESET_RUN },
284 { .name = "halt", .value = RESET_HALT },
285 { .name = "init", .value = RESET_INIT },
286 { .name = NULL, .value = -1 },
287 };
288
289 const char *debug_reason_name(struct target *t)
290 {
291 const char *cp;
292
293 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
294 t->debug_reason)->name;
295 if (!cp) {
296 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
297 cp = "(*BUG*unknown*BUG*)";
298 }
299 return cp;
300 }
301
302 const char *target_state_name(struct target *t)
303 {
304 const char *cp;
305 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
306 if (!cp) {
307 LOG_ERROR("Invalid target state: %d", (int)(t->state));
308 cp = "(*BUG*unknown*BUG*)";
309 }
310
311 if (!target_was_examined(t) && t->defer_examine)
312 cp = "examine deferred";
313
314 return cp;
315 }
316
317 const char *target_event_name(enum target_event event)
318 {
319 const char *cp;
320 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
321 if (!cp) {
322 LOG_ERROR("Invalid target event: %d", (int)(event));
323 cp = "(*BUG*unknown*BUG*)";
324 }
325 return cp;
326 }
327
328 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
329 {
330 const char *cp;
331 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
332 if (!cp) {
333 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
334 cp = "(*BUG*unknown*BUG*)";
335 }
336 return cp;
337 }
338
339 /* determine the number of the new target */
340 static int new_target_number(void)
341 {
342 struct target *t;
343 int x;
344
345 /* number is 0 based */
346 x = -1;
347 t = all_targets;
348 while (t) {
349 if (x < t->target_number)
350 x = t->target_number;
351 t = t->next;
352 }
353 return x + 1;
354 }
355
356 static void append_to_list_all_targets(struct target *target)
357 {
358 struct target **t = &all_targets;
359
360 while (*t)
361 t = &((*t)->next);
362 *t = target;
363 }
364
365 /* read a uint64_t from a buffer in target memory endianness */
366 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
367 {
368 if (target->endianness == TARGET_LITTLE_ENDIAN)
369 return le_to_h_u64(buffer);
370 else
371 return be_to_h_u64(buffer);
372 }
373
374 /* read a uint32_t from a buffer in target memory endianness */
375 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
376 {
377 if (target->endianness == TARGET_LITTLE_ENDIAN)
378 return le_to_h_u32(buffer);
379 else
380 return be_to_h_u32(buffer);
381 }
382
383 /* read a uint24_t from a buffer in target memory endianness */
384 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
385 {
386 if (target->endianness == TARGET_LITTLE_ENDIAN)
387 return le_to_h_u24(buffer);
388 else
389 return be_to_h_u24(buffer);
390 }
391
392 /* read a uint16_t from a buffer in target memory endianness */
393 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
394 {
395 if (target->endianness == TARGET_LITTLE_ENDIAN)
396 return le_to_h_u16(buffer);
397 else
398 return be_to_h_u16(buffer);
399 }
400
401 /* write a uint64_t to a buffer in target memory endianness */
402 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
403 {
404 if (target->endianness == TARGET_LITTLE_ENDIAN)
405 h_u64_to_le(buffer, value);
406 else
407 h_u64_to_be(buffer, value);
408 }
409
410 /* write a uint32_t to a buffer in target memory endianness */
411 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
412 {
413 if (target->endianness == TARGET_LITTLE_ENDIAN)
414 h_u32_to_le(buffer, value);
415 else
416 h_u32_to_be(buffer, value);
417 }
418
419 /* write a uint24_t to a buffer in target memory endianness */
420 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
421 {
422 if (target->endianness == TARGET_LITTLE_ENDIAN)
423 h_u24_to_le(buffer, value);
424 else
425 h_u24_to_be(buffer, value);
426 }
427
428 /* write a uint16_t to a buffer in target memory endianness */
429 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
430 {
431 if (target->endianness == TARGET_LITTLE_ENDIAN)
432 h_u16_to_le(buffer, value);
433 else
434 h_u16_to_be(buffer, value);
435 }
436
437 /* write a uint8_t to a buffer in target memory endianness */
438 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
439 {
440 *buffer = value;
441 }
442
443 /* write a uint64_t array to a buffer in target memory endianness */
444 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
445 {
446 uint32_t i;
447 for (i = 0; i < count; i++)
448 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
449 }
450
451 /* write a uint32_t array to a buffer in target memory endianness */
452 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
453 {
454 uint32_t i;
455 for (i = 0; i < count; i++)
456 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
457 }
458
459 /* write a uint16_t array to a buffer in target memory endianness */
460 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
461 {
462 uint32_t i;
463 for (i = 0; i < count; i++)
464 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
465 }
466
467 /* write a uint64_t array to a buffer in target memory endianness */
468 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
469 {
470 uint32_t i;
471 for (i = 0; i < count; i++)
472 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
473 }
474
475 /* write a uint32_t array to a buffer in target memory endianness */
476 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
477 {
478 uint32_t i;
479 for (i = 0; i < count; i++)
480 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
481 }
482
483 /* write a uint16_t array to a buffer in target memory endianness */
484 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
485 {
486 uint32_t i;
487 for (i = 0; i < count; i++)
488 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
489 }
490
491 /* return a pointer to a configured target; id is name or number */
492 struct target *get_target(const char *id)
493 {
494 struct target *target;
495
496 /* try as tcltarget name */
497 for (target = all_targets; target; target = target->next) {
498 if (!target_name(target))
499 continue;
500 if (strcmp(id, target_name(target)) == 0)
501 return target;
502 }
503
504 /* It's OK to remove this fallback sometime after August 2010 or so */
505
506 /* no match, try as number */
507 unsigned num;
508 if (parse_uint(id, &num) != ERROR_OK)
509 return NULL;
510
511 for (target = all_targets; target; target = target->next) {
512 if (target->target_number == (int)num) {
513 LOG_WARNING("use '%s' as target identifier, not '%u'",
514 target_name(target), num);
515 return target;
516 }
517 }
518
519 return NULL;
520 }
521
522 /* returns a pointer to the n-th configured target */
523 struct target *get_target_by_num(int num)
524 {
525 struct target *target = all_targets;
526
527 while (target) {
528 if (target->target_number == num)
529 return target;
530 target = target->next;
531 }
532
533 return NULL;
534 }
535
536 struct target *get_current_target(struct command_context *cmd_ctx)
537 {
538 struct target *target = get_current_target_or_null(cmd_ctx);
539
540 if (!target) {
541 LOG_ERROR("BUG: current_target out of bounds");
542 exit(-1);
543 }
544
545 return target;
546 }
547
548 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
549 {
550 return cmd_ctx->current_target_override
551 ? cmd_ctx->current_target_override
552 : cmd_ctx->current_target;
553 }
554
555 int target_poll(struct target *target)
556 {
557 int retval;
558
559 /* We can't poll until after examine */
560 if (!target_was_examined(target)) {
561 /* Fail silently lest we pollute the log */
562 return ERROR_FAIL;
563 }
564
565 retval = target->type->poll(target);
566 if (retval != ERROR_OK)
567 return retval;
568
569 if (target->halt_issued) {
570 if (target->state == TARGET_HALTED)
571 target->halt_issued = false;
572 else {
573 int64_t t = timeval_ms() - target->halt_issued_time;
574 if (t > DEFAULT_HALT_TIMEOUT) {
575 target->halt_issued = false;
576 LOG_INFO("Halt timed out, wake up GDB.");
577 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
578 }
579 }
580 }
581
582 return ERROR_OK;
583 }
584
585 int target_halt(struct target *target)
586 {
587 int retval;
588 /* We can't poll until after examine */
589 if (!target_was_examined(target)) {
590 LOG_ERROR("Target not examined yet");
591 return ERROR_FAIL;
592 }
593
594 retval = target->type->halt(target);
595 if (retval != ERROR_OK)
596 return retval;
597
598 target->halt_issued = true;
599 target->halt_issued_time = timeval_ms();
600
601 return ERROR_OK;
602 }
603
604 /**
605 * Make the target (re)start executing using its saved execution
606 * context (possibly with some modifications).
607 *
608 * @param target Which target should start executing.
609 * @param current True to use the target's saved program counter instead
610 * of the address parameter
611 * @param address Optionally used as the program counter.
612 * @param handle_breakpoints True iff breakpoints at the resumption PC
613 * should be skipped. (For example, maybe execution was stopped by
614 * such a breakpoint, in which case it would be counterproductive to
615 * let it re-trigger.
616 * @param debug_execution False if all working areas allocated by OpenOCD
617 * should be released and/or restored to their original contents.
618 * (This would for example be true to run some downloaded "helper"
619 * algorithm code, which resides in one such working buffer and uses
620 * another for data storage.)
621 *
622 * @todo Resolve the ambiguity about what the "debug_execution" flag
623 * signifies. For example, Target implementations don't agree on how
624 * it relates to invalidation of the register cache, or to whether
625 * breakpoints and watchpoints should be enabled. (It would seem wrong
626 * to enable breakpoints when running downloaded "helper" algorithms
627 * (debug_execution true), since the breakpoints would be set to match
628 * target firmware being debugged, not the helper algorithm.... and
629 * enabling them could cause such helpers to malfunction (for example,
630 * by overwriting data with a breakpoint instruction. On the other
631 * hand the infrastructure for running such helpers might use this
632 * procedure but rely on hardware breakpoint to detect termination.)
633 */
634 int target_resume(struct target *target, int current, target_addr_t address,
635 int handle_breakpoints, int debug_execution)
636 {
637 int retval;
638
639 /* We can't poll until after examine */
640 if (!target_was_examined(target)) {
641 LOG_ERROR("Target not examined yet");
642 return ERROR_FAIL;
643 }
644
645 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
646
647 /* note that resume *must* be asynchronous. The CPU can halt before
648 * we poll. The CPU can even halt at the current PC as a result of
649 * a software breakpoint being inserted by (a bug?) the application.
650 */
651 /*
652 * resume() triggers the event 'resumed'. The execution of TCL commands
653 * in the event handler causes the polling of targets. If the target has
654 * already halted for a breakpoint, polling will run the 'halted' event
655 * handler before the pending 'resumed' handler.
656 * Disable polling during resume() to guarantee the execution of handlers
657 * in the correct order.
658 */
659 bool save_poll_mask = jtag_poll_mask();
660 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
661 jtag_poll_unmask(save_poll_mask);
662
663 if (retval != ERROR_OK)
664 return retval;
665
666 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
667
668 return retval;
669 }
670
671 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
672 {
673 char buf[100];
674 int retval;
675 struct jim_nvp *n;
676 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
677 if (!n->name) {
678 LOG_ERROR("invalid reset mode");
679 return ERROR_FAIL;
680 }
681
682 struct target *target;
683 for (target = all_targets; target; target = target->next)
684 target_call_reset_callbacks(target, reset_mode);
685
686 /* disable polling during reset to make reset event scripts
687 * more predictable, i.e. dr/irscan & pathmove in events will
688 * not have JTAG operations injected into the middle of a sequence.
689 */
690 bool save_poll_mask = jtag_poll_mask();
691
692 sprintf(buf, "ocd_process_reset %s", n->name);
693 retval = Jim_Eval(cmd->ctx->interp, buf);
694
695 jtag_poll_unmask(save_poll_mask);
696
697 if (retval != JIM_OK) {
698 Jim_MakeErrorMessage(cmd->ctx->interp);
699 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
700 return ERROR_FAIL;
701 }
702
703 /* We want any events to be processed before the prompt */
704 retval = target_call_timer_callbacks_now();
705
706 for (target = all_targets; target; target = target->next) {
707 target->type->check_reset(target);
708 target->running_alg = false;
709 }
710
711 return retval;
712 }
713
714 static int identity_virt2phys(struct target *target,
715 target_addr_t virtual, target_addr_t *physical)
716 {
717 *physical = virtual;
718 return ERROR_OK;
719 }
720
721 static int no_mmu(struct target *target, int *enabled)
722 {
723 *enabled = 0;
724 return ERROR_OK;
725 }
726
727 /**
728 * Reset the @c examined flag for the given target.
729 * Pure paranoia -- targets are zeroed on allocation.
730 */
731 static inline void target_reset_examined(struct target *target)
732 {
733 target->examined = false;
734 }
735
736 static int default_examine(struct target *target)
737 {
738 target_set_examined(target);
739 return ERROR_OK;
740 }
741
742 /* no check by default */
743 static int default_check_reset(struct target *target)
744 {
745 return ERROR_OK;
746 }
747
748 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
749 * Keep in sync */
750 int target_examine_one(struct target *target)
751 {
752 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
753
754 int retval = target->type->examine(target);
755 if (retval != ERROR_OK) {
756 target_reset_examined(target);
757 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
758 return retval;
759 }
760
761 target_set_examined(target);
762 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
763
764 return ERROR_OK;
765 }
766
767 static int jtag_enable_callback(enum jtag_event event, void *priv)
768 {
769 struct target *target = priv;
770
771 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
772 return ERROR_OK;
773
774 jtag_unregister_event_callback(jtag_enable_callback, target);
775
776 return target_examine_one(target);
777 }
778
779 /* Targets that correctly implement init + examine, i.e.
780 * no communication with target during init:
781 *
782 * XScale
783 */
784 int target_examine(void)
785 {
786 int retval = ERROR_OK;
787 struct target *target;
788
789 for (target = all_targets; target; target = target->next) {
790 /* defer examination, but don't skip it */
791 if (!target->tap->enabled) {
792 jtag_register_event_callback(jtag_enable_callback,
793 target);
794 continue;
795 }
796
797 if (target->defer_examine)
798 continue;
799
800 int retval2 = target_examine_one(target);
801 if (retval2 != ERROR_OK) {
802 LOG_WARNING("target %s examination failed", target_name(target));
803 retval = retval2;
804 }
805 }
806 return retval;
807 }
808
809 const char *target_type_name(struct target *target)
810 {
811 return target->type->name;
812 }
813
814 static int target_soft_reset_halt(struct target *target)
815 {
816 if (!target_was_examined(target)) {
817 LOG_ERROR("Target not examined yet");
818 return ERROR_FAIL;
819 }
820 if (!target->type->soft_reset_halt) {
821 LOG_ERROR("Target %s does not support soft_reset_halt",
822 target_name(target));
823 return ERROR_FAIL;
824 }
825 return target->type->soft_reset_halt(target);
826 }
827
828 /**
829 * Downloads a target-specific native code algorithm to the target,
830 * and executes it. * Note that some targets may need to set up, enable,
831 * and tear down a breakpoint (hard or * soft) to detect algorithm
832 * termination, while others may support lower overhead schemes where
833 * soft breakpoints embedded in the algorithm automatically terminate the
834 * algorithm.
835 *
836 * @param target used to run the algorithm
837 * @param num_mem_params
838 * @param mem_params
839 * @param num_reg_params
840 * @param reg_param
841 * @param entry_point
842 * @param exit_point
843 * @param timeout_ms
844 * @param arch_info target-specific description of the algorithm.
845 */
846 int target_run_algorithm(struct target *target,
847 int num_mem_params, struct mem_param *mem_params,
848 int num_reg_params, struct reg_param *reg_param,
849 target_addr_t entry_point, target_addr_t exit_point,
850 int timeout_ms, void *arch_info)
851 {
852 int retval = ERROR_FAIL;
853
854 if (!target_was_examined(target)) {
855 LOG_ERROR("Target not examined yet");
856 goto done;
857 }
858 if (!target->type->run_algorithm) {
859 LOG_ERROR("Target type '%s' does not support %s",
860 target_type_name(target), __func__);
861 goto done;
862 }
863
864 target->running_alg = true;
865 retval = target->type->run_algorithm(target,
866 num_mem_params, mem_params,
867 num_reg_params, reg_param,
868 entry_point, exit_point, timeout_ms, arch_info);
869 target->running_alg = false;
870
871 done:
872 return retval;
873 }
874
875 /**
876 * Executes a target-specific native code algorithm and leaves it running.
877 *
878 * @param target used to run the algorithm
879 * @param num_mem_params
880 * @param mem_params
881 * @param num_reg_params
882 * @param reg_params
883 * @param entry_point
884 * @param exit_point
885 * @param arch_info target-specific description of the algorithm.
886 */
887 int target_start_algorithm(struct target *target,
888 int num_mem_params, struct mem_param *mem_params,
889 int num_reg_params, struct reg_param *reg_params,
890 target_addr_t entry_point, target_addr_t exit_point,
891 void *arch_info)
892 {
893 int retval = ERROR_FAIL;
894
895 if (!target_was_examined(target)) {
896 LOG_ERROR("Target not examined yet");
897 goto done;
898 }
899 if (!target->type->start_algorithm) {
900 LOG_ERROR("Target type '%s' does not support %s",
901 target_type_name(target), __func__);
902 goto done;
903 }
904 if (target->running_alg) {
905 LOG_ERROR("Target is already running an algorithm");
906 goto done;
907 }
908
909 target->running_alg = true;
910 retval = target->type->start_algorithm(target,
911 num_mem_params, mem_params,
912 num_reg_params, reg_params,
913 entry_point, exit_point, arch_info);
914
915 done:
916 return retval;
917 }
918
919 /**
920 * Waits for an algorithm started with target_start_algorithm() to complete.
921 *
922 * @param target used to run the algorithm
923 * @param num_mem_params
924 * @param mem_params
925 * @param num_reg_params
926 * @param reg_params
927 * @param exit_point
928 * @param timeout_ms
929 * @param arch_info target-specific description of the algorithm.
930 */
931 int target_wait_algorithm(struct target *target,
932 int num_mem_params, struct mem_param *mem_params,
933 int num_reg_params, struct reg_param *reg_params,
934 target_addr_t exit_point, int timeout_ms,
935 void *arch_info)
936 {
937 int retval = ERROR_FAIL;
938
939 if (!target->type->wait_algorithm) {
940 LOG_ERROR("Target type '%s' does not support %s",
941 target_type_name(target), __func__);
942 goto done;
943 }
944 if (!target->running_alg) {
945 LOG_ERROR("Target is not running an algorithm");
946 goto done;
947 }
948
949 retval = target->type->wait_algorithm(target,
950 num_mem_params, mem_params,
951 num_reg_params, reg_params,
952 exit_point, timeout_ms, arch_info);
953 if (retval != ERROR_TARGET_TIMEOUT)
954 target->running_alg = false;
955
956 done:
957 return retval;
958 }
959
960 /**
961 * Streams data to a circular buffer on target intended for consumption by code
962 * running asynchronously on target.
963 *
964 * This is intended for applications where target-specific native code runs
965 * on the target, receives data from the circular buffer, does something with
966 * it (most likely writing it to a flash memory), and advances the circular
967 * buffer pointer.
968 *
969 * This assumes that the helper algorithm has already been loaded to the target,
970 * but has not been started yet. Given memory and register parameters are passed
971 * to the algorithm.
972 *
973 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
974 * following format:
975 *
976 * [buffer_start + 0, buffer_start + 4):
977 * Write Pointer address (aka head). Written and updated by this
978 * routine when new data is written to the circular buffer.
979 * [buffer_start + 4, buffer_start + 8):
980 * Read Pointer address (aka tail). Updated by code running on the
981 * target after it consumes data.
982 * [buffer_start + 8, buffer_start + buffer_size):
983 * Circular buffer contents.
984 *
985 * See contrib/loaders/flash/stm32f1x.S for an example.
986 *
987 * @param target used to run the algorithm
988 * @param buffer address on the host where data to be sent is located
989 * @param count number of blocks to send
990 * @param block_size size in bytes of each block
991 * @param num_mem_params count of memory-based params to pass to algorithm
992 * @param mem_params memory-based params to pass to algorithm
993 * @param num_reg_params count of register-based params to pass to algorithm
994 * @param reg_params memory-based params to pass to algorithm
995 * @param buffer_start address on the target of the circular buffer structure
996 * @param buffer_size size of the circular buffer structure
997 * @param entry_point address on the target to execute to start the algorithm
998 * @param exit_point address at which to set a breakpoint to catch the
999 * end of the algorithm; can be 0 if target triggers a breakpoint itself
1000 * @param arch_info
1001 */
1002
1003 int target_run_flash_async_algorithm(struct target *target,
1004 const uint8_t *buffer, uint32_t count, int block_size,
1005 int num_mem_params, struct mem_param *mem_params,
1006 int num_reg_params, struct reg_param *reg_params,
1007 uint32_t buffer_start, uint32_t buffer_size,
1008 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1009 {
1010 int retval;
1011 int timeout = 0;
1012
1013 const uint8_t *buffer_orig = buffer;
1014
1015 /* Set up working area. First word is write pointer, second word is read pointer,
1016 * rest is fifo data area. */
1017 uint32_t wp_addr = buffer_start;
1018 uint32_t rp_addr = buffer_start + 4;
1019 uint32_t fifo_start_addr = buffer_start + 8;
1020 uint32_t fifo_end_addr = buffer_start + buffer_size;
1021
1022 uint32_t wp = fifo_start_addr;
1023 uint32_t rp = fifo_start_addr;
1024
1025 /* validate block_size is 2^n */
1026 assert(IS_PWR_OF_2(block_size));
1027
1028 retval = target_write_u32(target, wp_addr, wp);
1029 if (retval != ERROR_OK)
1030 return retval;
1031 retval = target_write_u32(target, rp_addr, rp);
1032 if (retval != ERROR_OK)
1033 return retval;
1034
1035 /* Start up algorithm on target and let it idle while writing the first chunk */
1036 retval = target_start_algorithm(target, num_mem_params, mem_params,
1037 num_reg_params, reg_params,
1038 entry_point,
1039 exit_point,
1040 arch_info);
1041
1042 if (retval != ERROR_OK) {
1043 LOG_ERROR("error starting target flash write algorithm");
1044 return retval;
1045 }
1046
1047 while (count > 0) {
1048
1049 retval = target_read_u32(target, rp_addr, &rp);
1050 if (retval != ERROR_OK) {
1051 LOG_ERROR("failed to get read pointer");
1052 break;
1053 }
1054
1055 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1056 (size_t) (buffer - buffer_orig), count, wp, rp);
1057
1058 if (rp == 0) {
1059 LOG_ERROR("flash write algorithm aborted by target");
1060 retval = ERROR_FLASH_OPERATION_FAILED;
1061 break;
1062 }
1063
1064 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1065 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1066 break;
1067 }
1068
1069 /* Count the number of bytes available in the fifo without
1070 * crossing the wrap around. Make sure to not fill it completely,
1071 * because that would make wp == rp and that's the empty condition. */
1072 uint32_t thisrun_bytes;
1073 if (rp > wp)
1074 thisrun_bytes = rp - wp - block_size;
1075 else if (rp > fifo_start_addr)
1076 thisrun_bytes = fifo_end_addr - wp;
1077 else
1078 thisrun_bytes = fifo_end_addr - wp - block_size;
1079
1080 if (thisrun_bytes == 0) {
1081 /* Throttle polling a bit if transfer is (much) faster than flash
1082 * programming. The exact delay shouldn't matter as long as it's
1083 * less than buffer size / flash speed. This is very unlikely to
1084 * run when using high latency connections such as USB. */
1085 alive_sleep(2);
1086
1087 /* to stop an infinite loop on some targets check and increment a timeout
1088 * this issue was observed on a stellaris using the new ICDI interface */
1089 if (timeout++ >= 2500) {
1090 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1091 return ERROR_FLASH_OPERATION_FAILED;
1092 }
1093 continue;
1094 }
1095
1096 /* reset our timeout */
1097 timeout = 0;
1098
1099 /* Limit to the amount of data we actually want to write */
1100 if (thisrun_bytes > count * block_size)
1101 thisrun_bytes = count * block_size;
1102
1103 /* Force end of large blocks to be word aligned */
1104 if (thisrun_bytes >= 16)
1105 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1106
1107 /* Write data to fifo */
1108 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1109 if (retval != ERROR_OK)
1110 break;
1111
1112 /* Update counters and wrap write pointer */
1113 buffer += thisrun_bytes;
1114 count -= thisrun_bytes / block_size;
1115 wp += thisrun_bytes;
1116 if (wp >= fifo_end_addr)
1117 wp = fifo_start_addr;
1118
1119 /* Store updated write pointer to target */
1120 retval = target_write_u32(target, wp_addr, wp);
1121 if (retval != ERROR_OK)
1122 break;
1123
1124 /* Avoid GDB timeouts */
1125 keep_alive();
1126 }
1127
1128 if (retval != ERROR_OK) {
1129 /* abort flash write algorithm on target */
1130 target_write_u32(target, wp_addr, 0);
1131 }
1132
1133 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1134 num_reg_params, reg_params,
1135 exit_point,
1136 10000,
1137 arch_info);
1138
1139 if (retval2 != ERROR_OK) {
1140 LOG_ERROR("error waiting for target flash write algorithm");
1141 retval = retval2;
1142 }
1143
1144 if (retval == ERROR_OK) {
1145 /* check if algorithm set rp = 0 after fifo writer loop finished */
1146 retval = target_read_u32(target, rp_addr, &rp);
1147 if (retval == ERROR_OK && rp == 0) {
1148 LOG_ERROR("flash write algorithm aborted by target");
1149 retval = ERROR_FLASH_OPERATION_FAILED;
1150 }
1151 }
1152
1153 return retval;
1154 }
1155
1156 int target_run_read_async_algorithm(struct target *target,
1157 uint8_t *buffer, uint32_t count, int block_size,
1158 int num_mem_params, struct mem_param *mem_params,
1159 int num_reg_params, struct reg_param *reg_params,
1160 uint32_t buffer_start, uint32_t buffer_size,
1161 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1162 {
1163 int retval;
1164 int timeout = 0;
1165
1166 const uint8_t *buffer_orig = buffer;
1167
1168 /* Set up working area. First word is write pointer, second word is read pointer,
1169 * rest is fifo data area. */
1170 uint32_t wp_addr = buffer_start;
1171 uint32_t rp_addr = buffer_start + 4;
1172 uint32_t fifo_start_addr = buffer_start + 8;
1173 uint32_t fifo_end_addr = buffer_start + buffer_size;
1174
1175 uint32_t wp = fifo_start_addr;
1176 uint32_t rp = fifo_start_addr;
1177
1178 /* validate block_size is 2^n */
1179 assert(IS_PWR_OF_2(block_size));
1180
1181 retval = target_write_u32(target, wp_addr, wp);
1182 if (retval != ERROR_OK)
1183 return retval;
1184 retval = target_write_u32(target, rp_addr, rp);
1185 if (retval != ERROR_OK)
1186 return retval;
1187
1188 /* Start up algorithm on target */
1189 retval = target_start_algorithm(target, num_mem_params, mem_params,
1190 num_reg_params, reg_params,
1191 entry_point,
1192 exit_point,
1193 arch_info);
1194
1195 if (retval != ERROR_OK) {
1196 LOG_ERROR("error starting target flash read algorithm");
1197 return retval;
1198 }
1199
1200 while (count > 0) {
1201 retval = target_read_u32(target, wp_addr, &wp);
1202 if (retval != ERROR_OK) {
1203 LOG_ERROR("failed to get write pointer");
1204 break;
1205 }
1206
1207 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1208 (size_t)(buffer - buffer_orig), count, wp, rp);
1209
1210 if (wp == 0) {
1211 LOG_ERROR("flash read algorithm aborted by target");
1212 retval = ERROR_FLASH_OPERATION_FAILED;
1213 break;
1214 }
1215
1216 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1217 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1218 break;
1219 }
1220
1221 /* Count the number of bytes available in the fifo without
1222 * crossing the wrap around. */
1223 uint32_t thisrun_bytes;
1224 if (wp >= rp)
1225 thisrun_bytes = wp - rp;
1226 else
1227 thisrun_bytes = fifo_end_addr - rp;
1228
1229 if (thisrun_bytes == 0) {
1230 /* Throttle polling a bit if transfer is (much) faster than flash
1231 * reading. The exact delay shouldn't matter as long as it's
1232 * less than buffer size / flash speed. This is very unlikely to
1233 * run when using high latency connections such as USB. */
1234 alive_sleep(2);
1235
1236 /* to stop an infinite loop on some targets check and increment a timeout
1237 * this issue was observed on a stellaris using the new ICDI interface */
1238 if (timeout++ >= 2500) {
1239 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1240 return ERROR_FLASH_OPERATION_FAILED;
1241 }
1242 continue;
1243 }
1244
1245 /* Reset our timeout */
1246 timeout = 0;
1247
1248 /* Limit to the amount of data we actually want to read */
1249 if (thisrun_bytes > count * block_size)
1250 thisrun_bytes = count * block_size;
1251
1252 /* Force end of large blocks to be word aligned */
1253 if (thisrun_bytes >= 16)
1254 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1255
1256 /* Read data from fifo */
1257 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1258 if (retval != ERROR_OK)
1259 break;
1260
1261 /* Update counters and wrap write pointer */
1262 buffer += thisrun_bytes;
1263 count -= thisrun_bytes / block_size;
1264 rp += thisrun_bytes;
1265 if (rp >= fifo_end_addr)
1266 rp = fifo_start_addr;
1267
1268 /* Store updated write pointer to target */
1269 retval = target_write_u32(target, rp_addr, rp);
1270 if (retval != ERROR_OK)
1271 break;
1272
1273 /* Avoid GDB timeouts */
1274 keep_alive();
1275
1276 }
1277
1278 if (retval != ERROR_OK) {
1279 /* abort flash write algorithm on target */
1280 target_write_u32(target, rp_addr, 0);
1281 }
1282
1283 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1284 num_reg_params, reg_params,
1285 exit_point,
1286 10000,
1287 arch_info);
1288
1289 if (retval2 != ERROR_OK) {
1290 LOG_ERROR("error waiting for target flash write algorithm");
1291 retval = retval2;
1292 }
1293
1294 if (retval == ERROR_OK) {
1295 /* check if algorithm set wp = 0 after fifo writer loop finished */
1296 retval = target_read_u32(target, wp_addr, &wp);
1297 if (retval == ERROR_OK && wp == 0) {
1298 LOG_ERROR("flash read algorithm aborted by target");
1299 retval = ERROR_FLASH_OPERATION_FAILED;
1300 }
1301 }
1302
1303 return retval;
1304 }
1305
1306 int target_read_memory(struct target *target,
1307 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1308 {
1309 if (!target_was_examined(target)) {
1310 LOG_ERROR("Target not examined yet");
1311 return ERROR_FAIL;
1312 }
1313 if (!target->type->read_memory) {
1314 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1315 return ERROR_FAIL;
1316 }
1317 return target->type->read_memory(target, address, size, count, buffer);
1318 }
1319
1320 int target_read_phys_memory(struct target *target,
1321 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1322 {
1323 if (!target_was_examined(target)) {
1324 LOG_ERROR("Target not examined yet");
1325 return ERROR_FAIL;
1326 }
1327 if (!target->type->read_phys_memory) {
1328 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1329 return ERROR_FAIL;
1330 }
1331 return target->type->read_phys_memory(target, address, size, count, buffer);
1332 }
1333
1334 int target_write_memory(struct target *target,
1335 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1336 {
1337 if (!target_was_examined(target)) {
1338 LOG_ERROR("Target not examined yet");
1339 return ERROR_FAIL;
1340 }
1341 if (!target->type->write_memory) {
1342 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1343 return ERROR_FAIL;
1344 }
1345 return target->type->write_memory(target, address, size, count, buffer);
1346 }
1347
1348 int target_write_phys_memory(struct target *target,
1349 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1350 {
1351 if (!target_was_examined(target)) {
1352 LOG_ERROR("Target not examined yet");
1353 return ERROR_FAIL;
1354 }
1355 if (!target->type->write_phys_memory) {
1356 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1357 return ERROR_FAIL;
1358 }
1359 return target->type->write_phys_memory(target, address, size, count, buffer);
1360 }
1361
1362 int target_add_breakpoint(struct target *target,
1363 struct breakpoint *breakpoint)
1364 {
1365 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1366 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1367 return ERROR_TARGET_NOT_HALTED;
1368 }
1369 return target->type->add_breakpoint(target, breakpoint);
1370 }
1371
1372 int target_add_context_breakpoint(struct target *target,
1373 struct breakpoint *breakpoint)
1374 {
1375 if (target->state != TARGET_HALTED) {
1376 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1377 return ERROR_TARGET_NOT_HALTED;
1378 }
1379 return target->type->add_context_breakpoint(target, breakpoint);
1380 }
1381
1382 int target_add_hybrid_breakpoint(struct target *target,
1383 struct breakpoint *breakpoint)
1384 {
1385 if (target->state != TARGET_HALTED) {
1386 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1387 return ERROR_TARGET_NOT_HALTED;
1388 }
1389 return target->type->add_hybrid_breakpoint(target, breakpoint);
1390 }
1391
1392 int target_remove_breakpoint(struct target *target,
1393 struct breakpoint *breakpoint)
1394 {
1395 return target->type->remove_breakpoint(target, breakpoint);
1396 }
1397
1398 int target_add_watchpoint(struct target *target,
1399 struct watchpoint *watchpoint)
1400 {
1401 if (target->state != TARGET_HALTED) {
1402 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1403 return ERROR_TARGET_NOT_HALTED;
1404 }
1405 return target->type->add_watchpoint(target, watchpoint);
1406 }
1407 int target_remove_watchpoint(struct target *target,
1408 struct watchpoint *watchpoint)
1409 {
1410 return target->type->remove_watchpoint(target, watchpoint);
1411 }
1412 int target_hit_watchpoint(struct target *target,
1413 struct watchpoint **hit_watchpoint)
1414 {
1415 if (target->state != TARGET_HALTED) {
1416 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1417 return ERROR_TARGET_NOT_HALTED;
1418 }
1419
1420 if (!target->type->hit_watchpoint) {
1421 /* For backward compatible, if hit_watchpoint is not implemented,
1422 * return ERROR_FAIL such that gdb_server will not take the nonsense
1423 * information. */
1424 return ERROR_FAIL;
1425 }
1426
1427 return target->type->hit_watchpoint(target, hit_watchpoint);
1428 }
1429
1430 const char *target_get_gdb_arch(struct target *target)
1431 {
1432 if (!target->type->get_gdb_arch)
1433 return NULL;
1434 return target->type->get_gdb_arch(target);
1435 }
1436
1437 int target_get_gdb_reg_list(struct target *target,
1438 struct reg **reg_list[], int *reg_list_size,
1439 enum target_register_class reg_class)
1440 {
1441 int result = ERROR_FAIL;
1442
1443 if (!target_was_examined(target)) {
1444 LOG_ERROR("Target not examined yet");
1445 goto done;
1446 }
1447
1448 result = target->type->get_gdb_reg_list(target, reg_list,
1449 reg_list_size, reg_class);
1450
1451 done:
1452 if (result != ERROR_OK) {
1453 *reg_list = NULL;
1454 *reg_list_size = 0;
1455 }
1456 return result;
1457 }
1458
1459 int target_get_gdb_reg_list_noread(struct target *target,
1460 struct reg **reg_list[], int *reg_list_size,
1461 enum target_register_class reg_class)
1462 {
1463 if (target->type->get_gdb_reg_list_noread &&
1464 target->type->get_gdb_reg_list_noread(target, reg_list,
1465 reg_list_size, reg_class) == ERROR_OK)
1466 return ERROR_OK;
1467 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1468 }
1469
1470 bool target_supports_gdb_connection(struct target *target)
1471 {
1472 /*
1473 * exclude all the targets that don't provide get_gdb_reg_list
1474 * or that have explicit gdb_max_connection == 0
1475 */
1476 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1477 }
1478
1479 int target_step(struct target *target,
1480 int current, target_addr_t address, int handle_breakpoints)
1481 {
1482 int retval;
1483
1484 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1485
1486 retval = target->type->step(target, current, address, handle_breakpoints);
1487 if (retval != ERROR_OK)
1488 return retval;
1489
1490 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1491
1492 return retval;
1493 }
1494
1495 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1496 {
1497 if (target->state != TARGET_HALTED) {
1498 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1499 return ERROR_TARGET_NOT_HALTED;
1500 }
1501 return target->type->get_gdb_fileio_info(target, fileio_info);
1502 }
1503
1504 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1505 {
1506 if (target->state != TARGET_HALTED) {
1507 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1508 return ERROR_TARGET_NOT_HALTED;
1509 }
1510 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1511 }
1512
1513 target_addr_t target_address_max(struct target *target)
1514 {
1515 unsigned bits = target_address_bits(target);
1516 if (sizeof(target_addr_t) * 8 == bits)
1517 return (target_addr_t) -1;
1518 else
1519 return (((target_addr_t) 1) << bits) - 1;
1520 }
1521
1522 unsigned target_address_bits(struct target *target)
1523 {
1524 if (target->type->address_bits)
1525 return target->type->address_bits(target);
1526 return 32;
1527 }
1528
1529 unsigned int target_data_bits(struct target *target)
1530 {
1531 if (target->type->data_bits)
1532 return target->type->data_bits(target);
1533 return 32;
1534 }
1535
1536 static int target_profiling(struct target *target, uint32_t *samples,
1537 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1538 {
1539 return target->type->profiling(target, samples, max_num_samples,
1540 num_samples, seconds);
1541 }
1542
1543 static int handle_target(void *priv);
1544
1545 static int target_init_one(struct command_context *cmd_ctx,
1546 struct target *target)
1547 {
1548 target_reset_examined(target);
1549
1550 struct target_type *type = target->type;
1551 if (!type->examine)
1552 type->examine = default_examine;
1553
1554 if (!type->check_reset)
1555 type->check_reset = default_check_reset;
1556
1557 assert(type->init_target);
1558
1559 int retval = type->init_target(cmd_ctx, target);
1560 if (retval != ERROR_OK) {
1561 LOG_ERROR("target '%s' init failed", target_name(target));
1562 return retval;
1563 }
1564
1565 /* Sanity-check MMU support ... stub in what we must, to help
1566 * implement it in stages, but warn if we need to do so.
1567 */
1568 if (type->mmu) {
1569 if (!type->virt2phys) {
1570 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1571 type->virt2phys = identity_virt2phys;
1572 }
1573 } else {
1574 /* Make sure no-MMU targets all behave the same: make no
1575 * distinction between physical and virtual addresses, and
1576 * ensure that virt2phys() is always an identity mapping.
1577 */
1578 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1579 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1580
1581 type->mmu = no_mmu;
1582 type->write_phys_memory = type->write_memory;
1583 type->read_phys_memory = type->read_memory;
1584 type->virt2phys = identity_virt2phys;
1585 }
1586
1587 if (!target->type->read_buffer)
1588 target->type->read_buffer = target_read_buffer_default;
1589
1590 if (!target->type->write_buffer)
1591 target->type->write_buffer = target_write_buffer_default;
1592
1593 if (!target->type->get_gdb_fileio_info)
1594 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1595
1596 if (!target->type->gdb_fileio_end)
1597 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1598
1599 if (!target->type->profiling)
1600 target->type->profiling = target_profiling_default;
1601
1602 return ERROR_OK;
1603 }
1604
1605 static int target_init(struct command_context *cmd_ctx)
1606 {
1607 struct target *target;
1608 int retval;
1609
1610 for (target = all_targets; target; target = target->next) {
1611 retval = target_init_one(cmd_ctx, target);
1612 if (retval != ERROR_OK)
1613 return retval;
1614 }
1615
1616 if (!all_targets)
1617 return ERROR_OK;
1618
1619 retval = target_register_user_commands(cmd_ctx);
1620 if (retval != ERROR_OK)
1621 return retval;
1622
1623 retval = target_register_timer_callback(&handle_target,
1624 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1625 if (retval != ERROR_OK)
1626 return retval;
1627
1628 return ERROR_OK;
1629 }
1630
1631 COMMAND_HANDLER(handle_target_init_command)
1632 {
1633 int retval;
1634
1635 if (CMD_ARGC != 0)
1636 return ERROR_COMMAND_SYNTAX_ERROR;
1637
1638 static bool target_initialized;
1639 if (target_initialized) {
1640 LOG_INFO("'target init' has already been called");
1641 return ERROR_OK;
1642 }
1643 target_initialized = true;
1644
1645 retval = command_run_line(CMD_CTX, "init_targets");
1646 if (retval != ERROR_OK)
1647 return retval;
1648
1649 retval = command_run_line(CMD_CTX, "init_target_events");
1650 if (retval != ERROR_OK)
1651 return retval;
1652
1653 retval = command_run_line(CMD_CTX, "init_board");
1654 if (retval != ERROR_OK)
1655 return retval;
1656
1657 LOG_DEBUG("Initializing targets...");
1658 return target_init(CMD_CTX);
1659 }
1660
1661 int target_register_event_callback(int (*callback)(struct target *target,
1662 enum target_event event, void *priv), void *priv)
1663 {
1664 struct target_event_callback **callbacks_p = &target_event_callbacks;
1665
1666 if (!callback)
1667 return ERROR_COMMAND_SYNTAX_ERROR;
1668
1669 if (*callbacks_p) {
1670 while ((*callbacks_p)->next)
1671 callbacks_p = &((*callbacks_p)->next);
1672 callbacks_p = &((*callbacks_p)->next);
1673 }
1674
1675 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1676 (*callbacks_p)->callback = callback;
1677 (*callbacks_p)->priv = priv;
1678 (*callbacks_p)->next = NULL;
1679
1680 return ERROR_OK;
1681 }
1682
1683 int target_register_reset_callback(int (*callback)(struct target *target,
1684 enum target_reset_mode reset_mode, void *priv), void *priv)
1685 {
1686 struct target_reset_callback *entry;
1687
1688 if (!callback)
1689 return ERROR_COMMAND_SYNTAX_ERROR;
1690
1691 entry = malloc(sizeof(struct target_reset_callback));
1692 if (!entry) {
1693 LOG_ERROR("error allocating buffer for reset callback entry");
1694 return ERROR_COMMAND_SYNTAX_ERROR;
1695 }
1696
1697 entry->callback = callback;
1698 entry->priv = priv;
1699 list_add(&entry->list, &target_reset_callback_list);
1700
1701
1702 return ERROR_OK;
1703 }
1704
1705 int target_register_trace_callback(int (*callback)(struct target *target,
1706 size_t len, uint8_t *data, void *priv), void *priv)
1707 {
1708 struct target_trace_callback *entry;
1709
1710 if (!callback)
1711 return ERROR_COMMAND_SYNTAX_ERROR;
1712
1713 entry = malloc(sizeof(struct target_trace_callback));
1714 if (!entry) {
1715 LOG_ERROR("error allocating buffer for trace callback entry");
1716 return ERROR_COMMAND_SYNTAX_ERROR;
1717 }
1718
1719 entry->callback = callback;
1720 entry->priv = priv;
1721 list_add(&entry->list, &target_trace_callback_list);
1722
1723
1724 return ERROR_OK;
1725 }
1726
1727 int target_register_timer_callback(int (*callback)(void *priv),
1728 unsigned int time_ms, enum target_timer_type type, void *priv)
1729 {
1730 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1731
1732 if (!callback)
1733 return ERROR_COMMAND_SYNTAX_ERROR;
1734
1735 if (*callbacks_p) {
1736 while ((*callbacks_p)->next)
1737 callbacks_p = &((*callbacks_p)->next);
1738 callbacks_p = &((*callbacks_p)->next);
1739 }
1740
1741 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1742 (*callbacks_p)->callback = callback;
1743 (*callbacks_p)->type = type;
1744 (*callbacks_p)->time_ms = time_ms;
1745 (*callbacks_p)->removed = false;
1746
1747 (*callbacks_p)->when = timeval_ms() + time_ms;
1748 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1749
1750 (*callbacks_p)->priv = priv;
1751 (*callbacks_p)->next = NULL;
1752
1753 return ERROR_OK;
1754 }
1755
1756 int target_unregister_event_callback(int (*callback)(struct target *target,
1757 enum target_event event, void *priv), void *priv)
1758 {
1759 struct target_event_callback **p = &target_event_callbacks;
1760 struct target_event_callback *c = target_event_callbacks;
1761
1762 if (!callback)
1763 return ERROR_COMMAND_SYNTAX_ERROR;
1764
1765 while (c) {
1766 struct target_event_callback *next = c->next;
1767 if ((c->callback == callback) && (c->priv == priv)) {
1768 *p = next;
1769 free(c);
1770 return ERROR_OK;
1771 } else
1772 p = &(c->next);
1773 c = next;
1774 }
1775
1776 return ERROR_OK;
1777 }
1778
1779 int target_unregister_reset_callback(int (*callback)(struct target *target,
1780 enum target_reset_mode reset_mode, void *priv), void *priv)
1781 {
1782 struct target_reset_callback *entry;
1783
1784 if (!callback)
1785 return ERROR_COMMAND_SYNTAX_ERROR;
1786
1787 list_for_each_entry(entry, &target_reset_callback_list, list) {
1788 if (entry->callback == callback && entry->priv == priv) {
1789 list_del(&entry->list);
1790 free(entry);
1791 break;
1792 }
1793 }
1794
1795 return ERROR_OK;
1796 }
1797
1798 int target_unregister_trace_callback(int (*callback)(struct target *target,
1799 size_t len, uint8_t *data, void *priv), void *priv)
1800 {
1801 struct target_trace_callback *entry;
1802
1803 if (!callback)
1804 return ERROR_COMMAND_SYNTAX_ERROR;
1805
1806 list_for_each_entry(entry, &target_trace_callback_list, list) {
1807 if (entry->callback == callback && entry->priv == priv) {
1808 list_del(&entry->list);
1809 free(entry);
1810 break;
1811 }
1812 }
1813
1814 return ERROR_OK;
1815 }
1816
1817 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1818 {
1819 if (!callback)
1820 return ERROR_COMMAND_SYNTAX_ERROR;
1821
1822 for (struct target_timer_callback *c = target_timer_callbacks;
1823 c; c = c->next) {
1824 if ((c->callback == callback) && (c->priv == priv)) {
1825 c->removed = true;
1826 return ERROR_OK;
1827 }
1828 }
1829
1830 return ERROR_FAIL;
1831 }
1832
1833 int target_call_event_callbacks(struct target *target, enum target_event event)
1834 {
1835 struct target_event_callback *callback = target_event_callbacks;
1836 struct target_event_callback *next_callback;
1837
1838 if (event == TARGET_EVENT_HALTED) {
1839 /* execute early halted first */
1840 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1841 }
1842
1843 LOG_DEBUG("target event %i (%s) for core %s", event,
1844 target_event_name(event),
1845 target_name(target));
1846
1847 target_handle_event(target, event);
1848
1849 while (callback) {
1850 next_callback = callback->next;
1851 callback->callback(target, event, callback->priv);
1852 callback = next_callback;
1853 }
1854
1855 return ERROR_OK;
1856 }
1857
1858 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1859 {
1860 struct target_reset_callback *callback;
1861
1862 LOG_DEBUG("target reset %i (%s)", reset_mode,
1863 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1864
1865 list_for_each_entry(callback, &target_reset_callback_list, list)
1866 callback->callback(target, reset_mode, callback->priv);
1867
1868 return ERROR_OK;
1869 }
1870
1871 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1872 {
1873 struct target_trace_callback *callback;
1874
1875 list_for_each_entry(callback, &target_trace_callback_list, list)
1876 callback->callback(target, len, data, callback->priv);
1877
1878 return ERROR_OK;
1879 }
1880
1881 static int target_timer_callback_periodic_restart(
1882 struct target_timer_callback *cb, int64_t *now)
1883 {
1884 cb->when = *now + cb->time_ms;
1885 return ERROR_OK;
1886 }
1887
1888 static int target_call_timer_callback(struct target_timer_callback *cb,
1889 int64_t *now)
1890 {
1891 cb->callback(cb->priv);
1892
1893 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1894 return target_timer_callback_periodic_restart(cb, now);
1895
1896 return target_unregister_timer_callback(cb->callback, cb->priv);
1897 }
1898
1899 static int target_call_timer_callbacks_check_time(int checktime)
1900 {
1901 static bool callback_processing;
1902
1903 /* Do not allow nesting */
1904 if (callback_processing)
1905 return ERROR_OK;
1906
1907 callback_processing = true;
1908
1909 keep_alive();
1910
1911 int64_t now = timeval_ms();
1912
1913 /* Initialize to a default value that's a ways into the future.
1914 * The loop below will make it closer to now if there are
1915 * callbacks that want to be called sooner. */
1916 target_timer_next_event_value = now + 1000;
1917
1918 /* Store an address of the place containing a pointer to the
1919 * next item; initially, that's a standalone "root of the
1920 * list" variable. */
1921 struct target_timer_callback **callback = &target_timer_callbacks;
1922 while (callback && *callback) {
1923 if ((*callback)->removed) {
1924 struct target_timer_callback *p = *callback;
1925 *callback = (*callback)->next;
1926 free(p);
1927 continue;
1928 }
1929
1930 bool call_it = (*callback)->callback &&
1931 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1932 now >= (*callback)->when);
1933
1934 if (call_it)
1935 target_call_timer_callback(*callback, &now);
1936
1937 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1938 target_timer_next_event_value = (*callback)->when;
1939
1940 callback = &(*callback)->next;
1941 }
1942
1943 callback_processing = false;
1944 return ERROR_OK;
1945 }
1946
1947 int target_call_timer_callbacks()
1948 {
1949 return target_call_timer_callbacks_check_time(1);
1950 }
1951
1952 /* invoke periodic callbacks immediately */
1953 int target_call_timer_callbacks_now()
1954 {
1955 return target_call_timer_callbacks_check_time(0);
1956 }
1957
1958 int64_t target_timer_next_event(void)
1959 {
1960 return target_timer_next_event_value;
1961 }
1962
1963 /* Prints the working area layout for debug purposes */
1964 static void print_wa_layout(struct target *target)
1965 {
1966 struct working_area *c = target->working_areas;
1967
1968 while (c) {
1969 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1970 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1971 c->address, c->address + c->size - 1, c->size);
1972 c = c->next;
1973 }
1974 }
1975
1976 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1977 static void target_split_working_area(struct working_area *area, uint32_t size)
1978 {
1979 assert(area->free); /* Shouldn't split an allocated area */
1980 assert(size <= area->size); /* Caller should guarantee this */
1981
1982 /* Split only if not already the right size */
1983 if (size < area->size) {
1984 struct working_area *new_wa = malloc(sizeof(*new_wa));
1985
1986 if (!new_wa)
1987 return;
1988
1989 new_wa->next = area->next;
1990 new_wa->size = area->size - size;
1991 new_wa->address = area->address + size;
1992 new_wa->backup = NULL;
1993 new_wa->user = NULL;
1994 new_wa->free = true;
1995
1996 area->next = new_wa;
1997 area->size = size;
1998
1999 /* If backup memory was allocated to this area, it has the wrong size
2000 * now so free it and it will be reallocated if/when needed */
2001 free(area->backup);
2002 area->backup = NULL;
2003 }
2004 }
2005
2006 /* Merge all adjacent free areas into one */
2007 static void target_merge_working_areas(struct target *target)
2008 {
2009 struct working_area *c = target->working_areas;
2010
2011 while (c && c->next) {
2012 assert(c->next->address == c->address + c->size); /* This is an invariant */
2013
2014 /* Find two adjacent free areas */
2015 if (c->free && c->next->free) {
2016 /* Merge the last into the first */
2017 c->size += c->next->size;
2018
2019 /* Remove the last */
2020 struct working_area *to_be_freed = c->next;
2021 c->next = c->next->next;
2022 free(to_be_freed->backup);
2023 free(to_be_freed);
2024
2025 /* If backup memory was allocated to the remaining area, it's has
2026 * the wrong size now */
2027 free(c->backup);
2028 c->backup = NULL;
2029 } else {
2030 c = c->next;
2031 }
2032 }
2033 }
2034
2035 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2036 {
2037 /* Reevaluate working area address based on MMU state*/
2038 if (!target->working_areas) {
2039 int retval;
2040 int enabled;
2041
2042 retval = target->type->mmu(target, &enabled);
2043 if (retval != ERROR_OK)
2044 return retval;
2045
2046 if (!enabled) {
2047 if (target->working_area_phys_spec) {
2048 LOG_DEBUG("MMU disabled, using physical "
2049 "address for working memory " TARGET_ADDR_FMT,
2050 target->working_area_phys);
2051 target->working_area = target->working_area_phys;
2052 } else {
2053 LOG_ERROR("No working memory available. "
2054 "Specify -work-area-phys to target.");
2055 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2056 }
2057 } else {
2058 if (target->working_area_virt_spec) {
2059 LOG_DEBUG("MMU enabled, using virtual "
2060 "address for working memory " TARGET_ADDR_FMT,
2061 target->working_area_virt);
2062 target->working_area = target->working_area_virt;
2063 } else {
2064 LOG_ERROR("No working memory available. "
2065 "Specify -work-area-virt to target.");
2066 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2067 }
2068 }
2069
2070 /* Set up initial working area on first call */
2071 struct working_area *new_wa = malloc(sizeof(*new_wa));
2072 if (new_wa) {
2073 new_wa->next = NULL;
2074 new_wa->size = ALIGN_DOWN(target->working_area_size, 4); /* 4-byte align */
2075 new_wa->address = target->working_area;
2076 new_wa->backup = NULL;
2077 new_wa->user = NULL;
2078 new_wa->free = true;
2079 }
2080
2081 target->working_areas = new_wa;
2082 }
2083
2084 /* only allocate multiples of 4 byte */
2085 size = ALIGN_UP(size, 4);
2086
2087 struct working_area *c = target->working_areas;
2088
2089 /* Find the first large enough working area */
2090 while (c) {
2091 if (c->free && c->size >= size)
2092 break;
2093 c = c->next;
2094 }
2095
2096 if (!c)
2097 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2098
2099 /* Split the working area into the requested size */
2100 target_split_working_area(c, size);
2101
2102 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2103 size, c->address);
2104
2105 if (target->backup_working_area) {
2106 if (!c->backup) {
2107 c->backup = malloc(c->size);
2108 if (!c->backup)
2109 return ERROR_FAIL;
2110 }
2111
2112 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2113 if (retval != ERROR_OK)
2114 return retval;
2115 }
2116
2117 /* mark as used, and return the new (reused) area */
2118 c->free = false;
2119 *area = c;
2120
2121 /* user pointer */
2122 c->user = area;
2123
2124 print_wa_layout(target);
2125
2126 return ERROR_OK;
2127 }
2128
2129 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2130 {
2131 int retval;
2132
2133 retval = target_alloc_working_area_try(target, size, area);
2134 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2135 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2136 return retval;
2137
2138 }
2139
2140 static int target_restore_working_area(struct target *target, struct working_area *area)
2141 {
2142 int retval = ERROR_OK;
2143
2144 if (target->backup_working_area && area->backup) {
2145 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2146 if (retval != ERROR_OK)
2147 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2148 area->size, area->address);
2149 }
2150
2151 return retval;
2152 }
2153
2154 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2155 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2156 {
2157 if (!area || area->free)
2158 return ERROR_OK;
2159
2160 int retval = ERROR_OK;
2161 if (restore) {
2162 retval = target_restore_working_area(target, area);
2163 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2164 if (retval != ERROR_OK)
2165 return retval;
2166 }
2167
2168 area->free = true;
2169
2170 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2171 area->size, area->address);
2172
2173 /* mark user pointer invalid */
2174 /* TODO: Is this really safe? It points to some previous caller's memory.
2175 * How could we know that the area pointer is still in that place and not
2176 * some other vital data? What's the purpose of this, anyway? */
2177 *area->user = NULL;
2178 area->user = NULL;
2179
2180 target_merge_working_areas(target);
2181
2182 print_wa_layout(target);
2183
2184 return retval;
2185 }
2186
2187 int target_free_working_area(struct target *target, struct working_area *area)
2188 {
2189 return target_free_working_area_restore(target, area, 1);
2190 }
2191
2192 /* free resources and restore memory, if restoring memory fails,
2193 * free up resources anyway
2194 */
2195 static void target_free_all_working_areas_restore(struct target *target, int restore)
2196 {
2197 struct working_area *c = target->working_areas;
2198
2199 LOG_DEBUG("freeing all working areas");
2200
2201 /* Loop through all areas, restoring the allocated ones and marking them as free */
2202 while (c) {
2203 if (!c->free) {
2204 if (restore)
2205 target_restore_working_area(target, c);
2206 c->free = true;
2207 *c->user = NULL; /* Same as above */
2208 c->user = NULL;
2209 }
2210 c = c->next;
2211 }
2212
2213 /* Run a merge pass to combine all areas into one */
2214 target_merge_working_areas(target);
2215
2216 print_wa_layout(target);
2217 }
2218
2219 void target_free_all_working_areas(struct target *target)
2220 {
2221 target_free_all_working_areas_restore(target, 1);
2222
2223 /* Now we have none or only one working area marked as free */
2224 if (target->working_areas) {
2225 /* Free the last one to allow on-the-fly moving and resizing */
2226 free(target->working_areas->backup);
2227 free(target->working_areas);
2228 target->working_areas = NULL;
2229 }
2230 }
2231
2232 /* Find the largest number of bytes that can be allocated */
2233 uint32_t target_get_working_area_avail(struct target *target)
2234 {
2235 struct working_area *c = target->working_areas;
2236 uint32_t max_size = 0;
2237
2238 if (!c)
2239 return ALIGN_DOWN(target->working_area_size, 4);
2240
2241 while (c) {
2242 if (c->free && max_size < c->size)
2243 max_size = c->size;
2244
2245 c = c->next;
2246 }
2247
2248 return max_size;
2249 }
2250
2251 static void target_destroy(struct target *target)
2252 {
2253 if (target->type->deinit_target)
2254 target->type->deinit_target(target);
2255
2256 if (target->semihosting)
2257 free(target->semihosting->basedir);
2258 free(target->semihosting);
2259
2260 jtag_unregister_event_callback(jtag_enable_callback, target);
2261
2262 struct target_event_action *teap = target->event_action;
2263 while (teap) {
2264 struct target_event_action *next = teap->next;
2265 Jim_DecrRefCount(teap->interp, teap->body);
2266 free(teap);
2267 teap = next;
2268 }
2269
2270 target_free_all_working_areas(target);
2271
2272 /* release the targets SMP list */
2273 if (target->smp) {
2274 struct target_list *head, *tmp;
2275
2276 list_for_each_entry_safe(head, tmp, target->smp_targets, lh) {
2277 list_del(&head->lh);
2278 head->target->smp = 0;
2279 free(head);
2280 }
2281 if (target->smp_targets != &empty_smp_targets)
2282 free(target->smp_targets);
2283 target->smp = 0;
2284 }
2285
2286 rtos_destroy(target);
2287
2288 free(target->gdb_port_override);
2289 free(target->type);
2290 free(target->trace_info);
2291 free(target->fileio_info);
2292 free(target->cmd_name);
2293 free(target);
2294 }
2295
2296 void target_quit(void)
2297 {
2298 struct target_event_callback *pe = target_event_callbacks;
2299 while (pe) {
2300 struct target_event_callback *t = pe->next;
2301 free(pe);
2302 pe = t;
2303 }
2304 target_event_callbacks = NULL;
2305
2306 struct target_timer_callback *pt = target_timer_callbacks;
2307 while (pt) {
2308 struct target_timer_callback *t = pt->next;
2309 free(pt);
2310 pt = t;
2311 }
2312 target_timer_callbacks = NULL;
2313
2314 for (struct target *target = all_targets; target;) {
2315 struct target *tmp;
2316
2317 tmp = target->next;
2318 target_destroy(target);
2319 target = tmp;
2320 }
2321
2322 all_targets = NULL;
2323 }
2324
2325 int target_arch_state(struct target *target)
2326 {
2327 int retval;
2328 if (!target) {
2329 LOG_WARNING("No target has been configured");
2330 return ERROR_OK;
2331 }
2332
2333 if (target->state != TARGET_HALTED)
2334 return ERROR_OK;
2335
2336 retval = target->type->arch_state(target);
2337 return retval;
2338 }
2339
2340 static int target_get_gdb_fileio_info_default(struct target *target,
2341 struct gdb_fileio_info *fileio_info)
2342 {
2343 /* If target does not support semi-hosting function, target
2344 has no need to provide .get_gdb_fileio_info callback.
2345 It just return ERROR_FAIL and gdb_server will return "Txx"
2346 as target halted every time. */
2347 return ERROR_FAIL;
2348 }
2349
2350 static int target_gdb_fileio_end_default(struct target *target,
2351 int retcode, int fileio_errno, bool ctrl_c)
2352 {
2353 return ERROR_OK;
2354 }
2355
2356 int target_profiling_default(struct target *target, uint32_t *samples,
2357 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2358 {
2359 struct timeval timeout, now;
2360
2361 gettimeofday(&timeout, NULL);
2362 timeval_add_time(&timeout, seconds, 0);
2363
2364 LOG_INFO("Starting profiling. Halting and resuming the"
2365 " target as often as we can...");
2366
2367 uint32_t sample_count = 0;
2368 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2369 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2370
2371 int retval = ERROR_OK;
2372 for (;;) {
2373 target_poll(target);
2374 if (target->state == TARGET_HALTED) {
2375 uint32_t t = buf_get_u32(reg->value, 0, 32);
2376 samples[sample_count++] = t;
2377 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2378 retval = target_resume(target, 1, 0, 0, 0);
2379 target_poll(target);
2380 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2381 } else if (target->state == TARGET_RUNNING) {
2382 /* We want to quickly sample the PC. */
2383 retval = target_halt(target);
2384 } else {
2385 LOG_INFO("Target not halted or running");
2386 retval = ERROR_OK;
2387 break;
2388 }
2389
2390 if (retval != ERROR_OK)
2391 break;
2392
2393 gettimeofday(&now, NULL);
2394 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2395 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2396 break;
2397 }
2398 }
2399
2400 *num_samples = sample_count;
2401 return retval;
2402 }
2403
2404 /* Single aligned words are guaranteed to use 16 or 32 bit access
2405 * mode respectively, otherwise data is handled as quickly as
2406 * possible
2407 */
2408 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2409 {
2410 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2411 size, address);
2412
2413 if (!target_was_examined(target)) {
2414 LOG_ERROR("Target not examined yet");
2415 return ERROR_FAIL;
2416 }
2417
2418 if (size == 0)
2419 return ERROR_OK;
2420
2421 if ((address + size - 1) < address) {
2422 /* GDB can request this when e.g. PC is 0xfffffffc */
2423 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2424 address,
2425 size);
2426 return ERROR_FAIL;
2427 }
2428
2429 return target->type->write_buffer(target, address, size, buffer);
2430 }
2431
2432 static int target_write_buffer_default(struct target *target,
2433 target_addr_t address, uint32_t count, const uint8_t *buffer)
2434 {
2435 uint32_t size;
2436 unsigned int data_bytes = target_data_bits(target) / 8;
2437
2438 /* Align up to maximum bytes. The loop condition makes sure the next pass
2439 * will have something to do with the size we leave to it. */
2440 for (size = 1;
2441 size < data_bytes && count >= size * 2 + (address & size);
2442 size *= 2) {
2443 if (address & size) {
2444 int retval = target_write_memory(target, address, size, 1, buffer);
2445 if (retval != ERROR_OK)
2446 return retval;
2447 address += size;
2448 count -= size;
2449 buffer += size;
2450 }
2451 }
2452
2453 /* Write the data with as large access size as possible. */
2454 for (; size > 0; size /= 2) {
2455 uint32_t aligned = count - count % size;
2456 if (aligned > 0) {
2457 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2458 if (retval != ERROR_OK)
2459 return retval;
2460 address += aligned;
2461 count -= aligned;
2462 buffer += aligned;
2463 }
2464 }
2465
2466 return ERROR_OK;
2467 }
2468
2469 /* Single aligned words are guaranteed to use 16 or 32 bit access
2470 * mode respectively, otherwise data is handled as quickly as
2471 * possible
2472 */
2473 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2474 {
2475 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2476 size, address);
2477
2478 if (!target_was_examined(target)) {
2479 LOG_ERROR("Target not examined yet");
2480 return ERROR_FAIL;
2481 }
2482
2483 if (size == 0)
2484 return ERROR_OK;
2485
2486 if ((address + size - 1) < address) {
2487 /* GDB can request this when e.g. PC is 0xfffffffc */
2488 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2489 address,
2490 size);
2491 return ERROR_FAIL;
2492 }
2493
2494 return target->type->read_buffer(target, address, size, buffer);
2495 }
2496
2497 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2498 {
2499 uint32_t size;
2500 unsigned int data_bytes = target_data_bits(target) / 8;
2501
2502 /* Align up to maximum bytes. The loop condition makes sure the next pass
2503 * will have something to do with the size we leave to it. */
2504 for (size = 1;
2505 size < data_bytes && count >= size * 2 + (address & size);
2506 size *= 2) {
2507 if (address & size) {
2508 int retval = target_read_memory(target, address, size, 1, buffer);
2509 if (retval != ERROR_OK)
2510 return retval;
2511 address += size;
2512 count -= size;
2513 buffer += size;
2514 }
2515 }
2516
2517 /* Read the data with as large access size as possible. */
2518 for (; size > 0; size /= 2) {
2519 uint32_t aligned = count - count % size;
2520 if (aligned > 0) {
2521 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2522 if (retval != ERROR_OK)
2523 return retval;
2524 address += aligned;
2525 count -= aligned;
2526 buffer += aligned;
2527 }
2528 }
2529
2530 return ERROR_OK;
2531 }
2532
2533 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2534 {
2535 uint8_t *buffer;
2536 int retval;
2537 uint32_t i;
2538 uint32_t checksum = 0;
2539 if (!target_was_examined(target)) {
2540 LOG_ERROR("Target not examined yet");
2541 return ERROR_FAIL;
2542 }
2543 if (!target->type->checksum_memory) {
2544 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2545 return ERROR_FAIL;
2546 }
2547
2548 retval = target->type->checksum_memory(target, address, size, &checksum);
2549 if (retval != ERROR_OK) {
2550 buffer = malloc(size);
2551 if (!buffer) {
2552 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2553 return ERROR_COMMAND_SYNTAX_ERROR;
2554 }
2555 retval = target_read_buffer(target, address, size, buffer);
2556 if (retval != ERROR_OK) {
2557 free(buffer);
2558 return retval;
2559 }
2560
2561 /* convert to target endianness */
2562 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2563 uint32_t target_data;
2564 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2565 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2566 }
2567
2568 retval = image_calculate_checksum(buffer, size, &checksum);
2569 free(buffer);
2570 }
2571
2572 *crc = checksum;
2573
2574 return retval;
2575 }
2576
2577 int target_blank_check_memory(struct target *target,
2578 struct target_memory_check_block *blocks, int num_blocks,
2579 uint8_t erased_value)
2580 {
2581 if (!target_was_examined(target)) {
2582 LOG_ERROR("Target not examined yet");
2583 return ERROR_FAIL;
2584 }
2585
2586 if (!target->type->blank_check_memory)
2587 return ERROR_NOT_IMPLEMENTED;
2588
2589 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2590 }
2591
2592 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2593 {
2594 uint8_t value_buf[8];
2595 if (!target_was_examined(target)) {
2596 LOG_ERROR("Target not examined yet");
2597 return ERROR_FAIL;
2598 }
2599
2600 int retval = target_read_memory(target, address, 8, 1, value_buf);
2601
2602 if (retval == ERROR_OK) {
2603 *value = target_buffer_get_u64(target, value_buf);
2604 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2605 address,
2606 *value);
2607 } else {
2608 *value = 0x0;
2609 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2610 address);
2611 }
2612
2613 return retval;
2614 }
2615
2616 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2617 {
2618 uint8_t value_buf[4];
2619 if (!target_was_examined(target)) {
2620 LOG_ERROR("Target not examined yet");
2621 return ERROR_FAIL;
2622 }
2623
2624 int retval = target_read_memory(target, address, 4, 1, value_buf);
2625
2626 if (retval == ERROR_OK) {
2627 *value = target_buffer_get_u32(target, value_buf);
2628 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2629 address,
2630 *value);
2631 } else {
2632 *value = 0x0;
2633 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2634 address);
2635 }
2636
2637 return retval;
2638 }
2639
2640 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2641 {
2642 uint8_t value_buf[2];
2643 if (!target_was_examined(target)) {
2644 LOG_ERROR("Target not examined yet");
2645 return ERROR_FAIL;
2646 }
2647
2648 int retval = target_read_memory(target, address, 2, 1, value_buf);
2649
2650 if (retval == ERROR_OK) {
2651 *value = target_buffer_get_u16(target, value_buf);
2652 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2653 address,
2654 *value);
2655 } else {
2656 *value = 0x0;
2657 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2658 address);
2659 }
2660
2661 return retval;
2662 }
2663
2664 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2665 {
2666 if (!target_was_examined(target)) {
2667 LOG_ERROR("Target not examined yet");
2668 return ERROR_FAIL;
2669 }
2670
2671 int retval = target_read_memory(target, address, 1, 1, value);
2672
2673 if (retval == ERROR_OK) {
2674 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2675 address,
2676 *value);
2677 } else {
2678 *value = 0x0;
2679 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2680 address);
2681 }
2682
2683 return retval;
2684 }
2685
2686 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2687 {
2688 int retval;
2689 uint8_t value_buf[8];
2690 if (!target_was_examined(target)) {
2691 LOG_ERROR("Target not examined yet");
2692 return ERROR_FAIL;
2693 }
2694
2695 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2696 address,
2697 value);
2698
2699 target_buffer_set_u64(target, value_buf, value);
2700 retval = target_write_memory(target, address, 8, 1, value_buf);
2701 if (retval != ERROR_OK)
2702 LOG_DEBUG("failed: %i", retval);
2703
2704 return retval;
2705 }
2706
2707 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2708 {
2709 int retval;
2710 uint8_t value_buf[4];
2711 if (!target_was_examined(target)) {
2712 LOG_ERROR("Target not examined yet");
2713 return ERROR_FAIL;
2714 }
2715
2716 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2717 address,
2718 value);
2719
2720 target_buffer_set_u32(target, value_buf, value);
2721 retval = target_write_memory(target, address, 4, 1, value_buf);
2722 if (retval != ERROR_OK)
2723 LOG_DEBUG("failed: %i", retval);
2724
2725 return retval;
2726 }
2727
2728 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2729 {
2730 int retval;
2731 uint8_t value_buf[2];
2732 if (!target_was_examined(target)) {
2733 LOG_ERROR("Target not examined yet");
2734 return ERROR_FAIL;
2735 }
2736
2737 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2738 address,
2739 value);
2740
2741 target_buffer_set_u16(target, value_buf, value);
2742 retval = target_write_memory(target, address, 2, 1, value_buf);
2743 if (retval != ERROR_OK)
2744 LOG_DEBUG("failed: %i", retval);
2745
2746 return retval;
2747 }
2748
2749 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2750 {
2751 int retval;
2752 if (!target_was_examined(target)) {
2753 LOG_ERROR("Target not examined yet");
2754 return ERROR_FAIL;
2755 }
2756
2757 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2758 address, value);
2759
2760 retval = target_write_memory(target, address, 1, 1, &value);
2761 if (retval != ERROR_OK)
2762 LOG_DEBUG("failed: %i", retval);
2763
2764 return retval;
2765 }
2766
2767 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2768 {
2769 int retval;
2770 uint8_t value_buf[8];
2771 if (!target_was_examined(target)) {
2772 LOG_ERROR("Target not examined yet");
2773 return ERROR_FAIL;
2774 }
2775
2776 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2777 address,
2778 value);
2779
2780 target_buffer_set_u64(target, value_buf, value);
2781 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2782 if (retval != ERROR_OK)
2783 LOG_DEBUG("failed: %i", retval);
2784
2785 return retval;
2786 }
2787
2788 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2789 {
2790 int retval;
2791 uint8_t value_buf[4];
2792 if (!target_was_examined(target)) {
2793 LOG_ERROR("Target not examined yet");
2794 return ERROR_FAIL;
2795 }
2796
2797 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2798 address,
2799 value);
2800
2801 target_buffer_set_u32(target, value_buf, value);
2802 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2803 if (retval != ERROR_OK)
2804 LOG_DEBUG("failed: %i", retval);
2805
2806 return retval;
2807 }
2808
2809 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2810 {
2811 int retval;
2812 uint8_t value_buf[2];
2813 if (!target_was_examined(target)) {
2814 LOG_ERROR("Target not examined yet");
2815 return ERROR_FAIL;
2816 }
2817
2818 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2819 address,
2820 value);
2821
2822 target_buffer_set_u16(target, value_buf, value);
2823 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2824 if (retval != ERROR_OK)
2825 LOG_DEBUG("failed: %i", retval);
2826
2827 return retval;
2828 }
2829
2830 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2831 {
2832 int retval;
2833 if (!target_was_examined(target)) {
2834 LOG_ERROR("Target not examined yet");
2835 return ERROR_FAIL;
2836 }
2837
2838 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2839 address, value);
2840
2841 retval = target_write_phys_memory(target, address, 1, 1, &value);
2842 if (retval != ERROR_OK)
2843 LOG_DEBUG("failed: %i", retval);
2844
2845 return retval;
2846 }
2847
2848 static int find_target(struct command_invocation *cmd, const char *name)
2849 {
2850 struct target *target = get_target(name);
2851 if (!target) {
2852 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2853 return ERROR_FAIL;
2854 }
2855 if (!target->tap->enabled) {
2856 command_print(cmd, "Target: TAP %s is disabled, "
2857 "can't be the current target\n",
2858 target->tap->dotted_name);
2859 return ERROR_FAIL;
2860 }
2861
2862 cmd->ctx->current_target = target;
2863 if (cmd->ctx->current_target_override)
2864 cmd->ctx->current_target_override = target;
2865
2866 return ERROR_OK;
2867 }
2868
2869
2870 COMMAND_HANDLER(handle_targets_command)
2871 {
2872 int retval = ERROR_OK;
2873 if (CMD_ARGC == 1) {
2874 retval = find_target(CMD, CMD_ARGV[0]);
2875 if (retval == ERROR_OK) {
2876 /* we're done! */
2877 return retval;
2878 }
2879 }
2880
2881 struct target *target = all_targets;
2882 command_print(CMD, " TargetName Type Endian TapName State ");
2883 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2884 while (target) {
2885 const char *state;
2886 char marker = ' ';
2887
2888 if (target->tap->enabled)
2889 state = target_state_name(target);
2890 else
2891 state = "tap-disabled";
2892
2893 if (CMD_CTX->current_target == target)
2894 marker = '*';
2895
2896 /* keep columns lined up to match the headers above */
2897 command_print(CMD,
2898 "%2d%c %-18s %-10s %-6s %-18s %s",
2899 target->target_number,
2900 marker,
2901 target_name(target),
2902 target_type_name(target),
2903 jim_nvp_value2name_simple(nvp_target_endian,
2904 target->endianness)->name,
2905 target->tap->dotted_name,
2906 state);
2907 target = target->next;
2908 }
2909
2910 return retval;
2911 }
2912
2913 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2914
2915 static int power_dropout;
2916 static int srst_asserted;
2917
2918 static int run_power_restore;
2919 static int run_power_dropout;
2920 static int run_srst_asserted;
2921 static int run_srst_deasserted;
2922
2923 static int sense_handler(void)
2924 {
2925 static int prev_srst_asserted;
2926 static int prev_power_dropout;
2927
2928 int retval = jtag_power_dropout(&power_dropout);
2929 if (retval != ERROR_OK)
2930 return retval;
2931
2932 int power_restored;
2933 power_restored = prev_power_dropout && !power_dropout;
2934 if (power_restored)
2935 run_power_restore = 1;
2936
2937 int64_t current = timeval_ms();
2938 static int64_t last_power;
2939 bool wait_more = last_power + 2000 > current;
2940 if (power_dropout && !wait_more) {
2941 run_power_dropout = 1;
2942 last_power = current;
2943 }
2944
2945 retval = jtag_srst_asserted(&srst_asserted);
2946 if (retval != ERROR_OK)
2947 return retval;
2948
2949 int srst_deasserted;
2950 srst_deasserted = prev_srst_asserted && !srst_asserted;
2951
2952 static int64_t last_srst;
2953 wait_more = last_srst + 2000 > current;
2954 if (srst_deasserted && !wait_more) {
2955 run_srst_deasserted = 1;
2956 last_srst = current;
2957 }
2958
2959 if (!prev_srst_asserted && srst_asserted)
2960 run_srst_asserted = 1;
2961
2962 prev_srst_asserted = srst_asserted;
2963 prev_power_dropout = power_dropout;
2964
2965 if (srst_deasserted || power_restored) {
2966 /* Other than logging the event we can't do anything here.
2967 * Issuing a reset is a particularly bad idea as we might
2968 * be inside a reset already.
2969 */
2970 }
2971
2972 return ERROR_OK;
2973 }
2974
2975 /* process target state changes */
2976 static int handle_target(void *priv)
2977 {
2978 Jim_Interp *interp = (Jim_Interp *)priv;
2979 int retval = ERROR_OK;
2980
2981 if (!is_jtag_poll_safe()) {
2982 /* polling is disabled currently */
2983 return ERROR_OK;
2984 }
2985
2986 /* we do not want to recurse here... */
2987 static int recursive;
2988 if (!recursive) {
2989 recursive = 1;
2990 sense_handler();
2991 /* danger! running these procedures can trigger srst assertions and power dropouts.
2992 * We need to avoid an infinite loop/recursion here and we do that by
2993 * clearing the flags after running these events.
2994 */
2995 int did_something = 0;
2996 if (run_srst_asserted) {
2997 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2998 Jim_Eval(interp, "srst_asserted");
2999 did_something = 1;
3000 }
3001 if (run_srst_deasserted) {
3002 Jim_Eval(interp, "srst_deasserted");
3003 did_something = 1;
3004 }
3005 if (run_power_dropout) {
3006 LOG_INFO("Power dropout detected, running power_dropout proc.");
3007 Jim_Eval(interp, "power_dropout");
3008 did_something = 1;
3009 }
3010 if (run_power_restore) {
3011 Jim_Eval(interp, "power_restore");
3012 did_something = 1;
3013 }
3014
3015 if (did_something) {
3016 /* clear detect flags */
3017 sense_handler();
3018 }
3019
3020 /* clear action flags */
3021
3022 run_srst_asserted = 0;
3023 run_srst_deasserted = 0;
3024 run_power_restore = 0;
3025 run_power_dropout = 0;
3026
3027 recursive = 0;
3028 }
3029
3030 /* Poll targets for state changes unless that's globally disabled.
3031 * Skip targets that are currently disabled.
3032 */
3033 for (struct target *target = all_targets;
3034 is_jtag_poll_safe() && target;
3035 target = target->next) {
3036
3037 if (!target_was_examined(target))
3038 continue;
3039
3040 if (!target->tap->enabled)
3041 continue;
3042
3043 if (target->backoff.times > target->backoff.count) {
3044 /* do not poll this time as we failed previously */
3045 target->backoff.count++;
3046 continue;
3047 }
3048 target->backoff.count = 0;
3049
3050 /* only poll target if we've got power and srst isn't asserted */
3051 if (!power_dropout && !srst_asserted) {
3052 /* polling may fail silently until the target has been examined */
3053 retval = target_poll(target);
3054 if (retval != ERROR_OK) {
3055 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3056 if (target->backoff.times * polling_interval < 5000) {
3057 target->backoff.times *= 2;
3058 target->backoff.times++;
3059 }
3060
3061 /* Tell GDB to halt the debugger. This allows the user to
3062 * run monitor commands to handle the situation.
3063 */
3064 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3065 }
3066 if (target->backoff.times > 0) {
3067 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3068 target_reset_examined(target);
3069 retval = target_examine_one(target);
3070 /* Target examination could have failed due to unstable connection,
3071 * but we set the examined flag anyway to repoll it later */
3072 if (retval != ERROR_OK) {
3073 target_set_examined(target);
3074 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3075 target->backoff.times * polling_interval);
3076 return retval;
3077 }
3078 }
3079
3080 /* Since we succeeded, we reset backoff count */
3081 target->backoff.times = 0;
3082 }
3083 }
3084
3085 return retval;
3086 }
3087
3088 COMMAND_HANDLER(handle_reg_command)
3089 {
3090 LOG_DEBUG("-");
3091
3092 struct target *target = get_current_target(CMD_CTX);
3093 struct reg *reg = NULL;
3094
3095 /* list all available registers for the current target */
3096 if (CMD_ARGC == 0) {
3097 struct reg_cache *cache = target->reg_cache;
3098
3099 unsigned int count = 0;
3100 while (cache) {
3101 unsigned i;
3102
3103 command_print(CMD, "===== %s", cache->name);
3104
3105 for (i = 0, reg = cache->reg_list;
3106 i < cache->num_regs;
3107 i++, reg++, count++) {
3108 if (reg->exist == false || reg->hidden)
3109 continue;
3110 /* only print cached values if they are valid */
3111 if (reg->valid) {
3112 char *value = buf_to_hex_str(reg->value,
3113 reg->size);
3114 command_print(CMD,
3115 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3116 count, reg->name,
3117 reg->size, value,
3118 reg->dirty
3119 ? " (dirty)"
3120 : "");
3121 free(value);
3122 } else {
3123 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3124 count, reg->name,
3125 reg->size);
3126 }
3127 }
3128 cache = cache->next;
3129 }
3130
3131 return ERROR_OK;
3132 }
3133
3134 /* access a single register by its ordinal number */
3135 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3136 unsigned num;
3137 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3138
3139 struct reg_cache *cache = target->reg_cache;
3140 unsigned int count = 0;
3141 while (cache) {
3142 unsigned i;
3143 for (i = 0; i < cache->num_regs; i++) {
3144 if (count++ == num) {
3145 reg = &cache->reg_list[i];
3146 break;
3147 }
3148 }
3149 if (reg)
3150 break;
3151 cache = cache->next;
3152 }
3153
3154 if (!reg) {
3155 command_print(CMD, "%i is out of bounds, the current target "
3156 "has only %i registers (0 - %i)", num, count, count - 1);
3157 return ERROR_OK;
3158 }
3159 } else {
3160 /* access a single register by its name */
3161 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3162
3163 if (!reg)
3164 goto not_found;
3165 }
3166
3167 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3168
3169 if (!reg->exist)
3170 goto not_found;
3171
3172 /* display a register */
3173 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3174 && (CMD_ARGV[1][0] <= '9')))) {
3175 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3176 reg->valid = 0;
3177
3178 if (reg->valid == 0) {
3179 int retval = reg->type->get(reg);
3180 if (retval != ERROR_OK) {
3181 LOG_ERROR("Could not read register '%s'", reg->name);
3182 return retval;
3183 }
3184 }
3185 char *value = buf_to_hex_str(reg->value, reg->size);
3186 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3187 free(value);
3188 return ERROR_OK;
3189 }
3190
3191 /* set register value */
3192 if (CMD_ARGC == 2) {
3193 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3194 if (!buf)
3195 return ERROR_FAIL;
3196 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3197
3198 int retval = reg->type->set(reg, buf);
3199 if (retval != ERROR_OK) {
3200 LOG_ERROR("Could not write to register '%s'", reg->name);
3201 } else {
3202 char *value = buf_to_hex_str(reg->value, reg->size);
3203 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3204 free(value);
3205 }
3206
3207 free(buf);
3208
3209 return retval;
3210 }
3211
3212 return ERROR_COMMAND_SYNTAX_ERROR;
3213
3214 not_found:
3215 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3216 return ERROR_OK;
3217 }
3218
3219 COMMAND_HANDLER(handle_poll_command)
3220 {
3221 int retval = ERROR_OK;
3222 struct target *target = get_current_target(CMD_CTX);
3223
3224 if (CMD_ARGC == 0) {
3225 command_print(CMD, "background polling: %s",
3226 jtag_poll_get_enabled() ? "on" : "off");
3227 command_print(CMD, "TAP: %s (%s)",
3228 target->tap->dotted_name,
3229 target->tap->enabled ? "enabled" : "disabled");
3230 if (!target->tap->enabled)
3231 return ERROR_OK;
3232 retval = target_poll(target);
3233 if (retval != ERROR_OK)
3234 return retval;
3235 retval = target_arch_state(target);
3236 if (retval != ERROR_OK)
3237 return retval;
3238 } else if (CMD_ARGC == 1) {
3239 bool enable;
3240 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3241 jtag_poll_set_enabled(enable);
3242 } else
3243 return ERROR_COMMAND_SYNTAX_ERROR;
3244
3245 return retval;
3246 }
3247
3248 COMMAND_HANDLER(handle_wait_halt_command)
3249 {
3250 if (CMD_ARGC > 1)
3251 return ERROR_COMMAND_SYNTAX_ERROR;
3252
3253 unsigned ms = DEFAULT_HALT_TIMEOUT;
3254 if (1 == CMD_ARGC) {
3255 int retval = parse_uint(CMD_ARGV[0], &ms);
3256 if (retval != ERROR_OK)
3257 return ERROR_COMMAND_SYNTAX_ERROR;
3258 }
3259
3260 struct target *target = get_current_target(CMD_CTX);
3261 return target_wait_state(target, TARGET_HALTED, ms);
3262 }
3263
3264 /* wait for target state to change. The trick here is to have a low
3265 * latency for short waits and not to suck up all the CPU time
3266 * on longer waits.
3267 *
3268 * After 500ms, keep_alive() is invoked
3269 */
3270 int target_wait_state(struct target *target, enum target_state state, int ms)
3271 {
3272 int retval;
3273 int64_t then = 0, cur;
3274 bool once = true;
3275
3276 for (;;) {
3277 retval = target_poll(target);
3278 if (retval != ERROR_OK)
3279 return retval;
3280 if (target->state == state)
3281 break;
3282 cur = timeval_ms();
3283 if (once) {
3284 once = false;
3285 then = timeval_ms();
3286 LOG_DEBUG("waiting for target %s...",
3287 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3288 }
3289
3290 if (cur-then > 500)
3291 keep_alive();
3292
3293 if ((cur-then) > ms) {
3294 LOG_ERROR("timed out while waiting for target %s",
3295 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3296 return ERROR_FAIL;
3297 }
3298 }
3299
3300 return ERROR_OK;
3301 }
3302
3303 COMMAND_HANDLER(handle_halt_command)
3304 {
3305 LOG_DEBUG("-");
3306
3307 struct target *target = get_current_target(CMD_CTX);
3308
3309 target->verbose_halt_msg = true;
3310
3311 int retval = target_halt(target);
3312 if (retval != ERROR_OK)
3313 return retval;
3314
3315 if (CMD_ARGC == 1) {
3316 unsigned wait_local;
3317 retval = parse_uint(CMD_ARGV[0], &wait_local);
3318 if (retval != ERROR_OK)
3319 return ERROR_COMMAND_SYNTAX_ERROR;
3320 if (!wait_local)
3321 return ERROR_OK;
3322 }
3323
3324 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3325 }
3326
3327 COMMAND_HANDLER(handle_soft_reset_halt_command)
3328 {
3329 struct target *target = get_current_target(CMD_CTX);
3330
3331 LOG_TARGET_INFO(target, "requesting target halt and executing a soft reset");
3332
3333 target_soft_reset_halt(target);
3334
3335 return ERROR_OK;
3336 }
3337
3338 COMMAND_HANDLER(handle_reset_command)
3339 {
3340 if (CMD_ARGC > 1)
3341 return ERROR_COMMAND_SYNTAX_ERROR;
3342
3343 enum target_reset_mode reset_mode = RESET_RUN;
3344 if (CMD_ARGC == 1) {
3345 const struct jim_nvp *n;
3346 n = jim_nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3347 if ((!n->name) || (n->value == RESET_UNKNOWN))
3348 return ERROR_COMMAND_SYNTAX_ERROR;
3349 reset_mode = n->value;
3350 }
3351
3352 /* reset *all* targets */
3353 return target_process_reset(CMD, reset_mode);
3354 }
3355
3356
3357 COMMAND_HANDLER(handle_resume_command)
3358 {
3359 int current = 1;
3360 if (CMD_ARGC > 1)
3361 return ERROR_COMMAND_SYNTAX_ERROR;
3362
3363 struct target *target = get_current_target(CMD_CTX);
3364
3365 /* with no CMD_ARGV, resume from current pc, addr = 0,
3366 * with one arguments, addr = CMD_ARGV[0],
3367 * handle breakpoints, not debugging */
3368 target_addr_t addr = 0;
3369 if (CMD_ARGC == 1) {
3370 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3371 current = 0;
3372 }
3373
3374 return target_resume(target, current, addr, 1, 0);
3375 }
3376
3377 COMMAND_HANDLER(handle_step_command)
3378 {
3379 if (CMD_ARGC > 1)
3380 return ERROR_COMMAND_SYNTAX_ERROR;
3381
3382 LOG_DEBUG("-");
3383
3384 /* with no CMD_ARGV, step from current pc, addr = 0,
3385 * with one argument addr = CMD_ARGV[0],
3386 * handle breakpoints, debugging */
3387 target_addr_t addr = 0;
3388 int current_pc = 1;
3389 if (CMD_ARGC == 1) {
3390 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3391 current_pc = 0;
3392 }
3393
3394 struct target *target = get_current_target(CMD_CTX);
3395
3396 return target_step(target, current_pc, addr, 1);
3397 }
3398
3399 void target_handle_md_output(struct command_invocation *cmd,
3400 struct target *target, target_addr_t address, unsigned size,
3401 unsigned count, const uint8_t *buffer)
3402 {
3403 const unsigned line_bytecnt = 32;
3404 unsigned line_modulo = line_bytecnt / size;
3405
3406 char output[line_bytecnt * 4 + 1];
3407 unsigned output_len = 0;
3408
3409 const char *value_fmt;
3410 switch (size) {
3411 case 8:
3412 value_fmt = "%16.16"PRIx64" ";
3413 break;
3414 case 4:
3415 value_fmt = "%8.8"PRIx64" ";
3416 break;
3417 case 2:
3418 value_fmt = "%4.4"PRIx64" ";
3419 break;
3420 case 1:
3421 value_fmt = "%2.2"PRIx64" ";
3422 break;
3423 default:
3424 /* "can't happen", caller checked */
3425 LOG_ERROR("invalid memory read size: %u", size);
3426 return;
3427 }
3428
3429 for (unsigned i = 0; i < count; i++) {
3430 if (i % line_modulo == 0) {
3431 output_len += snprintf(output + output_len,
3432 sizeof(output) - output_len,
3433 TARGET_ADDR_FMT ": ",
3434 (address + (i * size)));
3435 }
3436
3437 uint64_t value = 0;
3438 const uint8_t *value_ptr = buffer + i * size;
3439 switch (size) {
3440 case 8:
3441 value = target_buffer_get_u64(target, value_ptr);
3442 break;
3443 case 4:
3444 value = target_buffer_get_u32(target, value_ptr);
3445 break;
3446 case 2:
3447 value = target_buffer_get_u16(target, value_ptr);
3448 break;
3449 case 1:
3450 value = *value_ptr;
3451 }
3452 output_len += snprintf(output + output_len,
3453 sizeof(output) - output_len,
3454 value_fmt, value);
3455
3456 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3457 command_print(cmd, "%s", output);
3458 output_len = 0;
3459 }
3460 }
3461 }
3462
3463 COMMAND_HANDLER(handle_md_command)
3464 {
3465 if (CMD_ARGC < 1)
3466 return ERROR_COMMAND_SYNTAX_ERROR;
3467
3468 unsigned size = 0;
3469 switch (CMD_NAME[2]) {
3470 case 'd':
3471 size = 8;
3472 break;
3473 case 'w':
3474 size = 4;
3475 break;
3476 case 'h':
3477 size = 2;
3478 break;
3479 case 'b':
3480 size = 1;
3481 break;
3482 default:
3483 return ERROR_COMMAND_SYNTAX_ERROR;
3484 }
3485
3486 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3487 int (*fn)(struct target *target,
3488 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3489 if (physical) {
3490 CMD_ARGC--;
3491 CMD_ARGV++;
3492 fn = target_read_phys_memory;
3493 } else
3494 fn = target_read_memory;
3495 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3496 return ERROR_COMMAND_SYNTAX_ERROR;
3497
3498 target_addr_t address;
3499 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3500
3501 unsigned count = 1;
3502 if (CMD_ARGC == 2)
3503 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3504
3505 uint8_t *buffer = calloc(count, size);
3506 if (!buffer) {
3507 LOG_ERROR("Failed to allocate md read buffer");
3508 return ERROR_FAIL;
3509 }
3510
3511 struct target *target = get_current_target(CMD_CTX);
3512 int retval = fn(target, address, size, count, buffer);
3513 if (retval == ERROR_OK)
3514 target_handle_md_output(CMD, target, address, size, count, buffer);
3515
3516 free(buffer);
3517
3518 return retval;
3519 }
3520
3521 typedef int (*target_write_fn)(struct target *target,
3522 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3523
3524 static int target_fill_mem(struct target *target,
3525 target_addr_t address,
3526 target_write_fn fn,
3527 unsigned data_size,
3528 /* value */
3529 uint64_t b,
3530 /* count */
3531 unsigned c)
3532 {
3533 /* We have to write in reasonably large chunks to be able
3534 * to fill large memory areas with any sane speed */
3535 const unsigned chunk_size = 16384;
3536 uint8_t *target_buf = malloc(chunk_size * data_size);
3537 if (!target_buf) {
3538 LOG_ERROR("Out of memory");
3539 return ERROR_FAIL;
3540 }
3541
3542 for (unsigned i = 0; i < chunk_size; i++) {
3543 switch (data_size) {
3544 case 8:
3545 target_buffer_set_u64(target, target_buf + i * data_size, b);
3546 break;
3547 case 4:
3548 target_buffer_set_u32(target, target_buf + i * data_size, b);
3549 break;
3550 case 2:
3551 target_buffer_set_u16(target, target_buf + i * data_size, b);
3552 break;
3553 case 1:
3554 target_buffer_set_u8(target, target_buf + i * data_size, b);
3555 break;
3556 default:
3557 exit(-1);
3558 }
3559 }
3560
3561 int retval = ERROR_OK;
3562
3563 for (unsigned x = 0; x < c; x += chunk_size) {
3564 unsigned current;
3565 current = c - x;
3566 if (current > chunk_size)
3567 current = chunk_size;
3568 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3569 if (retval != ERROR_OK)
3570 break;
3571 /* avoid GDB timeouts */
3572 keep_alive();
3573 }
3574 free(target_buf);
3575
3576 return retval;
3577 }
3578
3579
3580 COMMAND_HANDLER(handle_mw_command)
3581 {
3582 if (CMD_ARGC < 2)
3583 return ERROR_COMMAND_SYNTAX_ERROR;
3584 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3585 target_write_fn fn;
3586 if (physical) {
3587 CMD_ARGC--;
3588 CMD_ARGV++;
3589 fn = target_write_phys_memory;
3590 } else
3591 fn = target_write_memory;
3592 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3593 return ERROR_COMMAND_SYNTAX_ERROR;
3594
3595 target_addr_t address;
3596 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3597
3598 uint64_t value;
3599 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3600
3601 unsigned count = 1;
3602 if (CMD_ARGC == 3)
3603 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3604
3605 struct target *target = get_current_target(CMD_CTX);
3606 unsigned wordsize;
3607 switch (CMD_NAME[2]) {
3608 case 'd':
3609 wordsize = 8;
3610 break;
3611 case 'w':
3612 wordsize = 4;
3613 break;
3614 case 'h':
3615 wordsize = 2;
3616 break;
3617 case 'b':
3618 wordsize = 1;
3619 break;
3620 default:
3621 return ERROR_COMMAND_SYNTAX_ERROR;
3622 }
3623
3624 return target_fill_mem(target, address, fn, wordsize, value, count);
3625 }
3626
3627 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3628 target_addr_t *min_address, target_addr_t *max_address)
3629 {
3630 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3631 return ERROR_COMMAND_SYNTAX_ERROR;
3632
3633 /* a base address isn't always necessary,
3634 * default to 0x0 (i.e. don't relocate) */
3635 if (CMD_ARGC >= 2) {
3636 target_addr_t addr;
3637 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3638 image->base_address = addr;
3639 image->base_address_set = true;
3640 } else
3641 image->base_address_set = false;
3642
3643 image->start_address_set = false;
3644
3645 if (CMD_ARGC >= 4)
3646 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3647 if (CMD_ARGC == 5) {
3648 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3649 /* use size (given) to find max (required) */
3650 *max_address += *min_address;
3651 }
3652
3653 if (*min_address > *max_address)
3654 return ERROR_COMMAND_SYNTAX_ERROR;
3655
3656 return ERROR_OK;
3657 }
3658
3659 COMMAND_HANDLER(handle_load_image_command)
3660 {
3661 uint8_t *buffer;
3662 size_t buf_cnt;
3663 uint32_t image_size;
3664 target_addr_t min_address = 0;
3665 target_addr_t max_address = -1;
3666 struct image image;
3667
3668 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3669 &image, &min_address, &max_address);
3670 if (retval != ERROR_OK)
3671 return retval;
3672
3673 struct target *target = get_current_target(CMD_CTX);
3674
3675 struct duration bench;
3676 duration_start(&bench);
3677
3678 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3679 return ERROR_FAIL;
3680
3681 image_size = 0x0;
3682 retval = ERROR_OK;
3683 for (unsigned int i = 0; i < image.num_sections; i++) {
3684 buffer = malloc(image.sections[i].size);
3685 if (!buffer) {
3686 command_print(CMD,
3687 "error allocating buffer for section (%d bytes)",
3688 (int)(image.sections[i].size));
3689 retval = ERROR_FAIL;
3690 break;
3691 }
3692
3693 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3694 if (retval != ERROR_OK) {
3695 free(buffer);
3696 break;
3697 }
3698
3699 uint32_t offset = 0;
3700 uint32_t length = buf_cnt;
3701
3702 /* DANGER!!! beware of unsigned comparison here!!! */
3703
3704 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3705 (image.sections[i].base_address < max_address)) {
3706
3707 if (image.sections[i].base_address < min_address) {
3708 /* clip addresses below */
3709 offset += min_address-image.sections[i].base_address;
3710 length -= offset;
3711 }
3712
3713 if (image.sections[i].base_address + buf_cnt > max_address)
3714 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3715
3716 retval = target_write_buffer(target,
3717 image.sections[i].base_address + offset, length, buffer + offset);
3718 if (retval != ERROR_OK) {
3719 free(buffer);
3720 break;
3721 }
3722 image_size += length;
3723 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3724 (unsigned int)length,
3725 image.sections[i].base_address + offset);
3726 }
3727
3728 free(buffer);
3729 }
3730
3731 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3732 command_print(CMD, "downloaded %" PRIu32 " bytes "
3733 "in %fs (%0.3f KiB/s)", image_size,
3734 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3735 }
3736
3737 image_close(&image);
3738
3739 return retval;
3740
3741 }
3742
3743 COMMAND_HANDLER(handle_dump_image_command)
3744 {
3745 struct fileio *fileio;
3746 uint8_t *buffer;
3747 int retval, retvaltemp;
3748 target_addr_t address, size;
3749 struct duration bench;
3750 struct target *target = get_current_target(CMD_CTX);
3751
3752 if (CMD_ARGC != 3)
3753 return ERROR_COMMAND_SYNTAX_ERROR;
3754
3755 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3756 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3757
3758 uint32_t buf_size = (size > 4096) ? 4096 : size;
3759 buffer = malloc(buf_size);
3760 if (!buffer)
3761 return ERROR_FAIL;
3762
3763 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3764 if (retval != ERROR_OK) {
3765 free(buffer);
3766 return retval;
3767 }
3768
3769 duration_start(&bench);
3770
3771 while (size > 0) {
3772 size_t size_written;
3773 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3774 retval = target_read_buffer(target, address, this_run_size, buffer);
3775 if (retval != ERROR_OK)
3776 break;
3777
3778 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3779 if (retval != ERROR_OK)
3780 break;
3781
3782 size -= this_run_size;
3783 address += this_run_size;
3784 }
3785
3786 free(buffer);
3787
3788 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3789 size_t filesize;
3790 retval = fileio_size(fileio, &filesize);
3791 if (retval != ERROR_OK)
3792 return retval;
3793 command_print(CMD,
3794 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3795 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3796 }
3797
3798 retvaltemp = fileio_close(fileio);
3799 if (retvaltemp != ERROR_OK)
3800 return retvaltemp;
3801
3802 return retval;
3803 }
3804
3805 enum verify_mode {
3806 IMAGE_TEST = 0,
3807 IMAGE_VERIFY = 1,
3808 IMAGE_CHECKSUM_ONLY = 2
3809 };
3810
3811 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3812 {
3813 uint8_t *buffer;
3814 size_t buf_cnt;
3815 uint32_t image_size;
3816 int retval;
3817 uint32_t checksum = 0;
3818 uint32_t mem_checksum = 0;
3819
3820 struct image image;
3821
3822 struct target *target = get_current_target(CMD_CTX);
3823
3824 if (CMD_ARGC < 1)
3825 return ERROR_COMMAND_SYNTAX_ERROR;
3826
3827 if (!target) {
3828 LOG_ERROR("no target selected");
3829 return ERROR_FAIL;
3830 }
3831
3832 struct duration bench;
3833 duration_start(&bench);
3834
3835 if (CMD_ARGC >= 2) {
3836 target_addr_t addr;
3837 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3838 image.base_address = addr;
3839 image.base_address_set = true;
3840 } else {
3841 image.base_address_set = false;
3842 image.base_address = 0x0;
3843 }
3844
3845 image.start_address_set = false;
3846
3847 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3848 if (retval != ERROR_OK)
3849 return retval;
3850
3851 image_size = 0x0;
3852 int diffs = 0;
3853 retval = ERROR_OK;
3854 for (unsigned int i = 0; i < image.num_sections; i++) {
3855 buffer = malloc(image.sections[i].size);
3856 if (!buffer) {
3857 command_print(CMD,
3858 "error allocating buffer for section (%" PRIu32 " bytes)",
3859 image.sections[i].size);
3860 break;
3861 }
3862 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3863 if (retval != ERROR_OK) {
3864 free(buffer);
3865 break;
3866 }
3867
3868 if (verify >= IMAGE_VERIFY) {
3869 /* calculate checksum of image */
3870 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3871 if (retval != ERROR_OK) {
3872 free(buffer);
3873 break;
3874 }
3875
3876 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3877 if (retval != ERROR_OK) {
3878 free(buffer);
3879 break;
3880 }
3881 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3882 LOG_ERROR("checksum mismatch");
3883 free(buffer);
3884 retval = ERROR_FAIL;
3885 goto done;
3886 }
3887 if (checksum != mem_checksum) {
3888 /* failed crc checksum, fall back to a binary compare */
3889 uint8_t *data;
3890
3891 if (diffs == 0)
3892 LOG_ERROR("checksum mismatch - attempting binary compare");
3893
3894 data = malloc(buf_cnt);
3895
3896 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3897 if (retval == ERROR_OK) {
3898 uint32_t t;
3899 for (t = 0; t < buf_cnt; t++) {
3900 if (data[t] != buffer[t]) {
3901 command_print(CMD,
3902 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3903 diffs,
3904 (unsigned)(t + image.sections[i].base_address),
3905 data[t],
3906 buffer[t]);
3907 if (diffs++ >= 127) {
3908 command_print(CMD, "More than 128 errors, the rest are not printed.");
3909 free(data);
3910 free(buffer);
3911 goto done;
3912 }
3913 }
3914 keep_alive();
3915 }
3916 }
3917 free(data);
3918 }
3919 } else {
3920 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3921 image.sections[i].base_address,
3922 buf_cnt);
3923 }
3924
3925 free(buffer);
3926 image_size += buf_cnt;
3927 }
3928 if (diffs > 0)
3929 command_print(CMD, "No more differences found.");
3930 done:
3931 if (diffs > 0)
3932 retval = ERROR_FAIL;
3933 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3934 command_print(CMD, "verified %" PRIu32 " bytes "
3935 "in %fs (%0.3f KiB/s)", image_size,
3936 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3937 }
3938
3939 image_close(&image);
3940
3941 return retval;
3942 }
3943
3944 COMMAND_HANDLER(handle_verify_image_checksum_command)
3945 {
3946 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3947 }
3948
3949 COMMAND_HANDLER(handle_verify_image_command)
3950 {
3951 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3952 }
3953
3954 COMMAND_HANDLER(handle_test_image_command)
3955 {
3956 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3957 }
3958
3959 static int handle_bp_command_list(struct command_invocation *cmd)
3960 {
3961 struct target *target = get_current_target(cmd->ctx);
3962 struct breakpoint *breakpoint = target->breakpoints;
3963 while (breakpoint) {
3964 if (breakpoint->type == BKPT_SOFT) {
3965 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3966 breakpoint->length);
3967 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, 0x%s",
3968 breakpoint->address,
3969 breakpoint->length,
3970 buf);
3971 free(buf);
3972 } else {
3973 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3974 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %u",
3975 breakpoint->asid,
3976 breakpoint->length, breakpoint->number);
3977 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3978 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %u",
3979 breakpoint->address,
3980 breakpoint->length, breakpoint->number);
3981 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3982 breakpoint->asid);
3983 } else
3984 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %u",
3985 breakpoint->address,
3986 breakpoint->length, breakpoint->number);
3987 }
3988
3989 breakpoint = breakpoint->next;
3990 }
3991 return ERROR_OK;
3992 }
3993
3994 static int handle_bp_command_set(struct command_invocation *cmd,
3995 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3996 {
3997 struct target *target = get_current_target(cmd->ctx);
3998 int retval;
3999
4000 if (asid == 0) {
4001 retval = breakpoint_add(target, addr, length, hw);
4002 /* error is always logged in breakpoint_add(), do not print it again */
4003 if (retval == ERROR_OK)
4004 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
4005
4006 } else if (addr == 0) {
4007 if (!target->type->add_context_breakpoint) {
4008 LOG_ERROR("Context breakpoint not available");
4009 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4010 }
4011 retval = context_breakpoint_add(target, asid, length, hw);
4012 /* error is always logged in context_breakpoint_add(), do not print it again */
4013 if (retval == ERROR_OK)
4014 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
4015
4016 } else {
4017 if (!target->type->add_hybrid_breakpoint) {
4018 LOG_ERROR("Hybrid breakpoint not available");
4019 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4020 }
4021 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
4022 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
4023 if (retval == ERROR_OK)
4024 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
4025 }
4026 return retval;
4027 }
4028
4029 COMMAND_HANDLER(handle_bp_command)
4030 {
4031 target_addr_t addr;
4032 uint32_t asid;
4033 uint32_t length;
4034 int hw = BKPT_SOFT;
4035
4036 switch (CMD_ARGC) {
4037 case 0:
4038 return handle_bp_command_list(CMD);
4039
4040 case 2:
4041 asid = 0;
4042 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4043 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4044 return handle_bp_command_set(CMD, addr, asid, length, hw);
4045
4046 case 3:
4047 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4048 hw = BKPT_HARD;
4049 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4050 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4051 asid = 0;
4052 return handle_bp_command_set(CMD, addr, asid, length, hw);
4053 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4054 hw = BKPT_HARD;
4055 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4056 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4057 addr = 0;
4058 return handle_bp_command_set(CMD, addr, asid, length, hw);
4059 }
4060 /* fallthrough */
4061 case 4:
4062 hw = BKPT_HARD;
4063 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4064 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4065 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4066 return handle_bp_command_set(CMD, addr, asid, length, hw);
4067
4068 default:
4069 return ERROR_COMMAND_SYNTAX_ERROR;
4070 }
4071 }
4072
4073 COMMAND_HANDLER(handle_rbp_command)
4074 {
4075 if (CMD_ARGC != 1)
4076 return ERROR_COMMAND_SYNTAX_ERROR;
4077
4078 struct target *target = get_current_target(CMD_CTX);
4079
4080 if (!strcmp(CMD_ARGV[0], "all")) {
4081 breakpoint_remove_all(target);
4082 } else {
4083 target_addr_t addr;
4084 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4085
4086 breakpoint_remove(target, addr);
4087 }
4088
4089 return ERROR_OK;
4090 }
4091
4092 COMMAND_HANDLER(handle_wp_command)
4093 {
4094 struct target *target = get_current_target(CMD_CTX);
4095
4096 if (CMD_ARGC == 0) {
4097 struct watchpoint *watchpoint = target->watchpoints;
4098
4099 while (watchpoint) {
4100 command_print(CMD, "address: " TARGET_ADDR_FMT
4101 ", len: 0x%8.8" PRIx32
4102 ", r/w/a: %i, value: 0x%8.8" PRIx32
4103 ", mask: 0x%8.8" PRIx32,
4104 watchpoint->address,
4105 watchpoint->length,
4106 (int)watchpoint->rw,
4107 watchpoint->value,
4108 watchpoint->mask);
4109 watchpoint = watchpoint->next;
4110 }
4111 return ERROR_OK;
4112 }
4113
4114 enum watchpoint_rw type = WPT_ACCESS;
4115 target_addr_t addr = 0;
4116 uint32_t length = 0;
4117 uint32_t data_value = 0x0;
4118 uint32_t data_mask = 0xffffffff;
4119
4120 switch (CMD_ARGC) {
4121 case 5:
4122 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
4123 /* fall through */
4124 case 4:
4125 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
4126 /* fall through */
4127 case 3:
4128 switch (CMD_ARGV[2][0]) {
4129 case 'r':
4130 type = WPT_READ;
4131 break;
4132 case 'w':
4133 type = WPT_WRITE;
4134 break;
4135 case 'a':
4136 type = WPT_ACCESS;
4137 break;
4138 default:
4139 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4140 return ERROR_COMMAND_SYNTAX_ERROR;
4141 }
4142 /* fall through */
4143 case 2:
4144 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4145 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4146 break;
4147
4148 default:
4149 return ERROR_COMMAND_SYNTAX_ERROR;
4150 }
4151
4152 int retval = watchpoint_add(target, addr, length, type,
4153 data_value, data_mask);
4154 if (retval != ERROR_OK)
4155 LOG_ERROR("Failure setting watchpoints");
4156
4157 return retval;
4158 }
4159
4160 COMMAND_HANDLER(handle_rwp_command)
4161 {
4162 if (CMD_ARGC != 1)
4163 return ERROR_COMMAND_SYNTAX_ERROR;
4164
4165 target_addr_t addr;
4166 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4167
4168 struct target *target = get_current_target(CMD_CTX);
4169 watchpoint_remove(target, addr);
4170
4171 return ERROR_OK;
4172 }
4173
4174 /**
4175 * Translate a virtual address to a physical address.
4176 *
4177 * The low-level target implementation must have logged a detailed error
4178 * which is forwarded to telnet/GDB session.
4179 */
4180 COMMAND_HANDLER(handle_virt2phys_command)
4181 {
4182 if (CMD_ARGC != 1)
4183 return ERROR_COMMAND_SYNTAX_ERROR;
4184
4185 target_addr_t va;
4186 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4187 target_addr_t pa;
4188
4189 struct target *target = get_current_target(CMD_CTX);
4190 int retval = target->type->virt2phys(target, va, &pa);
4191 if (retval == ERROR_OK)
4192 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4193
4194 return retval;
4195 }
4196
4197 static void write_data(FILE *f, const void *data, size_t len)
4198 {
4199 size_t written = fwrite(data, 1, len, f);
4200 if (written != len)
4201 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4202 }
4203
4204 static void write_long(FILE *f, int l, struct target *target)
4205 {
4206 uint8_t val[4];
4207
4208 target_buffer_set_u32(target, val, l);
4209 write_data(f, val, 4);
4210 }
4211
4212 static void write_string(FILE *f, char *s)
4213 {
4214 write_data(f, s, strlen(s));
4215 }
4216
4217 typedef unsigned char UNIT[2]; /* unit of profiling */
4218
4219 /* Dump a gmon.out histogram file. */
4220 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4221 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4222 {
4223 uint32_t i;
4224 FILE *f = fopen(filename, "w");
4225 if (!f)
4226 return;
4227 write_string(f, "gmon");
4228 write_long(f, 0x00000001, target); /* Version */
4229 write_long(f, 0, target); /* padding */
4230 write_long(f, 0, target); /* padding */
4231 write_long(f, 0, target); /* padding */
4232
4233 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4234 write_data(f, &zero, 1);
4235
4236 /* figure out bucket size */
4237 uint32_t min;
4238 uint32_t max;
4239 if (with_range) {
4240 min = start_address;
4241 max = end_address;
4242 } else {
4243 min = samples[0];
4244 max = samples[0];
4245 for (i = 0; i < sample_num; i++) {
4246 if (min > samples[i])
4247 min = samples[i];
4248 if (max < samples[i])
4249 max = samples[i];
4250 }
4251
4252 /* max should be (largest sample + 1)
4253 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4254 max++;
4255 }
4256
4257 int address_space = max - min;
4258 assert(address_space >= 2);
4259
4260 /* FIXME: What is the reasonable number of buckets?
4261 * The profiling result will be more accurate if there are enough buckets. */
4262 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4263 uint32_t num_buckets = address_space / sizeof(UNIT);
4264 if (num_buckets > max_buckets)
4265 num_buckets = max_buckets;
4266 int *buckets = malloc(sizeof(int) * num_buckets);
4267 if (!buckets) {
4268 fclose(f);
4269 return;
4270 }
4271 memset(buckets, 0, sizeof(int) * num_buckets);
4272 for (i = 0; i < sample_num; i++) {
4273 uint32_t address = samples[i];
4274
4275 if ((address < min) || (max <= address))
4276 continue;
4277
4278 long long a = address - min;
4279 long long b = num_buckets;
4280 long long c = address_space;
4281 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4282 buckets[index_t]++;
4283 }
4284
4285 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4286 write_long(f, min, target); /* low_pc */
4287 write_long(f, max, target); /* high_pc */
4288 write_long(f, num_buckets, target); /* # of buckets */
4289 float sample_rate = sample_num / (duration_ms / 1000.0);
4290 write_long(f, sample_rate, target);
4291 write_string(f, "seconds");
4292 for (i = 0; i < (15-strlen("seconds")); i++)
4293 write_data(f, &zero, 1);
4294 write_string(f, "s");
4295
4296 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4297
4298 char *data = malloc(2 * num_buckets);
4299 if (data) {
4300 for (i = 0; i < num_buckets; i++) {
4301 int val;
4302 val = buckets[i];
4303 if (val > 65535)
4304 val = 65535;
4305 data[i * 2] = val&0xff;
4306 data[i * 2 + 1] = (val >> 8) & 0xff;
4307 }
4308 free(buckets);
4309 write_data(f, data, num_buckets * 2);
4310 free(data);
4311 } else
4312 free(buckets);
4313
4314 fclose(f);
4315 }
4316
4317 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4318 * which will be used as a random sampling of PC */
4319 COMMAND_HANDLER(handle_profile_command)
4320 {
4321 struct target *target = get_current_target(CMD_CTX);
4322
4323 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4324 return ERROR_COMMAND_SYNTAX_ERROR;
4325
4326 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4327 uint32_t offset;
4328 uint32_t num_of_samples;
4329 int retval = ERROR_OK;
4330 bool halted_before_profiling = target->state == TARGET_HALTED;
4331
4332 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4333
4334 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4335 if (!samples) {
4336 LOG_ERROR("No memory to store samples.");
4337 return ERROR_FAIL;
4338 }
4339
4340 uint64_t timestart_ms = timeval_ms();
4341 /**
4342 * Some cores let us sample the PC without the
4343 * annoying halt/resume step; for example, ARMv7 PCSR.
4344 * Provide a way to use that more efficient mechanism.
4345 */
4346 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4347 &num_of_samples, offset);
4348 if (retval != ERROR_OK) {
4349 free(samples);
4350 return retval;
4351 }
4352 uint32_t duration_ms = timeval_ms() - timestart_ms;
4353
4354 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4355
4356 retval = target_poll(target);
4357 if (retval != ERROR_OK) {
4358 free(samples);
4359 return retval;
4360 }
4361
4362 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4363 /* The target was halted before we started and is running now. Halt it,
4364 * for consistency. */
4365 retval = target_halt(target);
4366 if (retval != ERROR_OK) {
4367 free(samples);
4368 return retval;
4369 }
4370 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4371 /* The target was running before we started and is halted now. Resume
4372 * it, for consistency. */
4373 retval = target_resume(target, 1, 0, 0, 0);
4374 if (retval != ERROR_OK) {
4375 free(samples);
4376 return retval;
4377 }
4378 }
4379
4380 retval = target_poll(target);
4381 if (retval != ERROR_OK) {
4382 free(samples);
4383 return retval;
4384 }
4385
4386 uint32_t start_address = 0;
4387 uint32_t end_address = 0;
4388 bool with_range = false;
4389 if (CMD_ARGC == 4) {
4390 with_range = true;
4391 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4392 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4393 }
4394
4395 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4396 with_range, start_address, end_address, target, duration_ms);
4397 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4398
4399 free(samples);
4400 return retval;
4401 }
4402
4403 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4404 {
4405 char *namebuf;
4406 Jim_Obj *obj_name, *obj_val;
4407 int result;
4408
4409 namebuf = alloc_printf("%s(%d)", varname, idx);
4410 if (!namebuf)
4411 return JIM_ERR;
4412
4413 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4414 jim_wide wide_val = val;
4415 obj_val = Jim_NewWideObj(interp, wide_val);
4416 if (!obj_name || !obj_val) {
4417 free(namebuf);
4418 return JIM_ERR;
4419 }
4420
4421 Jim_IncrRefCount(obj_name);
4422 Jim_IncrRefCount(obj_val);
4423 result = Jim_SetVariable(interp, obj_name, obj_val);
4424 Jim_DecrRefCount(interp, obj_name);
4425 Jim_DecrRefCount(interp, obj_val);
4426 free(namebuf);
4427 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4428 return result;
4429 }
4430
4431 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4432 {
4433 int e;
4434
4435 LOG_WARNING("DEPRECATED! use 'read_memory' not 'mem2array'");
4436
4437 /* argv[0] = name of array to receive the data
4438 * argv[1] = desired element width in bits
4439 * argv[2] = memory address
4440 * argv[3] = count of times to read
4441 * argv[4] = optional "phys"
4442 */
4443 if (argc < 4 || argc > 5) {
4444 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4445 return JIM_ERR;
4446 }
4447
4448 /* Arg 0: Name of the array variable */
4449 const char *varname = Jim_GetString(argv[0], NULL);
4450
4451 /* Arg 1: Bit width of one element */
4452 long l;
4453 e = Jim_GetLong(interp, argv[1], &l);
4454 if (e != JIM_OK)
4455 return e;
4456 const unsigned int width_bits = l;
4457
4458 if (width_bits != 8 &&
4459 width_bits != 16 &&
4460 width_bits != 32 &&
4461 width_bits != 64) {
4462 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4463 Jim_AppendStrings(interp, Jim_GetResult(interp),
4464 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4465 return JIM_ERR;
4466 }
4467 const unsigned int width = width_bits / 8;
4468
4469 /* Arg 2: Memory address */
4470 jim_wide wide_addr;
4471 e = Jim_GetWide(interp, argv[2], &wide_addr);
4472 if (e != JIM_OK)
4473 return e;
4474 target_addr_t addr = (target_addr_t)wide_addr;
4475
4476 /* Arg 3: Number of elements to read */
4477 e = Jim_GetLong(interp, argv[3], &l);
4478 if (e != JIM_OK)
4479 return e;
4480 size_t len = l;
4481
4482 /* Arg 4: phys */
4483 bool is_phys = false;
4484 if (argc > 4) {
4485 int str_len = 0;
4486 const char *phys = Jim_GetString(argv[4], &str_len);
4487 if (!strncmp(phys, "phys", str_len))
4488 is_phys = true;
4489 else
4490 return JIM_ERR;
4491 }
4492
4493 /* Argument checks */
4494 if (len == 0) {
4495 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4496 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4497 return JIM_ERR;
4498 }
4499 if ((addr + (len * width)) < addr) {
4500 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4501 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4502 return JIM_ERR;
4503 }
4504 if (len > 65536) {
4505 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4506 Jim_AppendStrings(interp, Jim_GetResult(interp),
4507 "mem2array: too large read request, exceeds 64K items", NULL);
4508 return JIM_ERR;
4509 }
4510
4511 if ((width == 1) ||
4512 ((width == 2) && ((addr & 1) == 0)) ||
4513 ((width == 4) && ((addr & 3) == 0)) ||
4514 ((width == 8) && ((addr & 7) == 0))) {
4515 /* alignment correct */
4516 } else {
4517 char buf[100];
4518 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4519 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4520 addr,
4521 width);
4522 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4523 return JIM_ERR;
4524 }
4525
4526 /* Transfer loop */
4527
4528 /* index counter */
4529 size_t idx = 0;
4530
4531 const size_t buffersize = 4096;
4532 uint8_t *buffer = malloc(buffersize);
4533 if (!buffer)
4534 return JIM_ERR;
4535
4536 /* assume ok */
4537 e = JIM_OK;
4538 while (len) {
4539 /* Slurp... in buffer size chunks */
4540 const unsigned int max_chunk_len = buffersize / width;
4541 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4542
4543 int retval;
4544 if (is_phys)
4545 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4546 else
4547 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4548 if (retval != ERROR_OK) {
4549 /* BOO !*/
4550 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4551 addr,
4552 width,
4553 chunk_len);
4554 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4555 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4556 e = JIM_ERR;
4557 break;
4558 } else {
4559 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4560 uint64_t v = 0;
4561 switch (width) {
4562 case 8:
4563 v = target_buffer_get_u64(target, &buffer[i*width]);
4564 break;
4565 case 4:
4566 v = target_buffer_get_u32(target, &buffer[i*width]);
4567 break;
4568 case 2:
4569 v = target_buffer_get_u16(target, &buffer[i*width]);
4570 break;
4571 case 1:
4572 v = buffer[i] & 0x0ff;
4573 break;
4574 }
4575 new_u64_array_element(interp, varname, idx, v);
4576 }
4577 len -= chunk_len;
4578 addr += chunk_len * width;
4579 }
4580 }
4581
4582 free(buffer);
4583
4584 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4585
4586 return e;
4587 }
4588
4589 static int target_jim_read_memory(Jim_Interp *interp, int argc,
4590 Jim_Obj * const *argv)
4591 {
4592 /*
4593 * argv[1] = memory address
4594 * argv[2] = desired element width in bits
4595 * argv[3] = number of elements to read
4596 * argv[4] = optional "phys"
4597 */
4598
4599 if (argc < 4 || argc > 5) {
4600 Jim_WrongNumArgs(interp, 1, argv, "address width count ['phys']");
4601 return JIM_ERR;
4602 }
4603
4604 /* Arg 1: Memory address. */
4605 jim_wide wide_addr;
4606 int e;
4607 e = Jim_GetWide(interp, argv[1], &wide_addr);
4608
4609 if (e != JIM_OK)
4610 return e;
4611
4612 target_addr_t addr = (target_addr_t)wide_addr;
4613
4614 /* Arg 2: Bit width of one element. */
4615 long l;
4616 e = Jim_GetLong(interp, argv[2], &l);
4617
4618 if (e != JIM_OK)
4619 return e;
4620
4621 const unsigned int width_bits = l;
4622
4623 /* Arg 3: Number of elements to read. */
4624 e = Jim_GetLong(interp, argv[3], &l);
4625
4626 if (e != JIM_OK)
4627 return e;
4628
4629 size_t count = l;
4630
4631 /* Arg 4: Optional 'phys'. */
4632 bool is_phys = false;
4633
4634 if (argc > 4) {
4635 const char *phys = Jim_GetString(argv[4], NULL);
4636
4637 if (strcmp(phys, "phys")) {
4638 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4639 return JIM_ERR;
4640 }
4641
4642 is_phys = true;
4643 }
4644
4645 switch (width_bits) {
4646 case 8:
4647 case 16:
4648 case 32:
4649 case 64:
4650 break;
4651 default:
4652 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
4653 return JIM_ERR;
4654 }
4655
4656 const unsigned int width = width_bits / 8;
4657
4658 if ((addr + (count * width)) < addr) {
4659 Jim_SetResultString(interp, "read_memory: addr + count wraps to zero", -1);
4660 return JIM_ERR;
4661 }
4662
4663 if (count > 65536) {
4664 Jim_SetResultString(interp, "read_memory: too large read request, exeeds 64K elements", -1);
4665 return JIM_ERR;
4666 }
4667
4668 struct command_context *cmd_ctx = current_command_context(interp);
4669 assert(cmd_ctx != NULL);
4670 struct target *target = get_current_target(cmd_ctx);
4671
4672 const size_t buffersize = 4096;
4673 uint8_t *buffer = malloc(buffersize);
4674
4675 if (!buffer) {
4676 LOG_ERROR("Failed to allocate memory");
4677 return JIM_ERR;
4678 }
4679
4680 Jim_Obj *result_list = Jim_NewListObj(interp, NULL, 0);
4681 Jim_IncrRefCount(result_list);
4682
4683 while (count > 0) {
4684 const unsigned int max_chunk_len = buffersize / width;
4685 const size_t chunk_len = MIN(count, max_chunk_len);
4686
4687 int retval;
4688
4689 if (is_phys)
4690 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4691 else
4692 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4693
4694 if (retval != ERROR_OK) {
4695 LOG_ERROR("read_memory: read at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
4696 addr, width_bits, chunk_len);
4697 Jim_SetResultString(interp, "read_memory: failed to read memory", -1);
4698 e = JIM_ERR;
4699 break;
4700 }
4701
4702 for (size_t i = 0; i < chunk_len ; i++) {
4703 uint64_t v = 0;
4704
4705 switch (width) {
4706 case 8:
4707 v = target_buffer_get_u64(target, &buffer[i * width]);
4708 break;
4709 case 4:
4710 v = target_buffer_get_u32(target, &buffer[i * width]);
4711 break;
4712 case 2:
4713 v = target_buffer_get_u16(target, &buffer[i * width]);
4714 break;
4715 case 1:
4716 v = buffer[i];
4717 break;
4718 }
4719
4720 char value_buf[11];
4721 snprintf(value_buf, sizeof(value_buf), "0x%" PRIx64, v);
4722
4723 Jim_ListAppendElement(interp, result_list,
4724 Jim_NewStringObj(interp, value_buf, -1));
4725 }
4726
4727 count -= chunk_len;
4728 addr += chunk_len * width;
4729 }
4730
4731 free(buffer);
4732
4733 if (e != JIM_OK) {
4734 Jim_DecrRefCount(interp, result_list);
4735 return e;
4736 }
4737
4738 Jim_SetResult(interp, result_list);
4739 Jim_DecrRefCount(interp, result_list);
4740
4741 return JIM_OK;
4742 }
4743
4744 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4745 {
4746 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4747 if (!namebuf)
4748 return JIM_ERR;
4749
4750 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4751 if (!obj_name) {
4752 free(namebuf);
4753 return JIM_ERR;
4754 }
4755
4756 Jim_IncrRefCount(obj_name);
4757 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4758 Jim_DecrRefCount(interp, obj_name);
4759 free(namebuf);
4760 if (!obj_val)
4761 return JIM_ERR;
4762
4763 jim_wide wide_val;
4764 int result = Jim_GetWide(interp, obj_val, &wide_val);
4765 *val = wide_val;
4766 return result;
4767 }
4768
4769 static int target_array2mem(Jim_Interp *interp, struct target *target,
4770 int argc, Jim_Obj *const *argv)
4771 {
4772 int e;
4773
4774 LOG_WARNING("DEPRECATED! use 'write_memory' not 'array2mem'");
4775
4776 /* argv[0] = name of array from which to read the data
4777 * argv[1] = desired element width in bits
4778 * argv[2] = memory address
4779 * argv[3] = number of elements to write
4780 * argv[4] = optional "phys"
4781 */
4782 if (argc < 4 || argc > 5) {
4783 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4784 return JIM_ERR;
4785 }
4786
4787 /* Arg 0: Name of the array variable */
4788 const char *varname = Jim_GetString(argv[0], NULL);
4789
4790 /* Arg 1: Bit width of one element */
4791 long l;
4792 e = Jim_GetLong(interp, argv[1], &l);
4793 if (e != JIM_OK)
4794 return e;
4795 const unsigned int width_bits = l;
4796
4797 if (width_bits != 8 &&
4798 width_bits != 16 &&
4799 width_bits != 32 &&
4800 width_bits != 64) {
4801 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4802 Jim_AppendStrings(interp, Jim_GetResult(interp),
4803 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4804 return JIM_ERR;
4805 }
4806 const unsigned int width = width_bits / 8;
4807
4808 /* Arg 2: Memory address */
4809 jim_wide wide_addr;
4810 e = Jim_GetWide(interp, argv[2], &wide_addr);
4811 if (e != JIM_OK)
4812 return e;
4813 target_addr_t addr = (target_addr_t)wide_addr;
4814
4815 /* Arg 3: Number of elements to write */
4816 e = Jim_GetLong(interp, argv[3], &l);
4817 if (e != JIM_OK)
4818 return e;
4819 size_t len = l;
4820
4821 /* Arg 4: Phys */
4822 bool is_phys = false;
4823 if (argc > 4) {
4824 int str_len = 0;
4825 const char *phys = Jim_GetString(argv[4], &str_len);
4826 if (!strncmp(phys, "phys", str_len))
4827 is_phys = true;
4828 else
4829 return JIM_ERR;
4830 }
4831
4832 /* Argument checks */
4833 if (len == 0) {
4834 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4835 Jim_AppendStrings(interp, Jim_GetResult(interp),
4836 "array2mem: zero width read?", NULL);
4837 return JIM_ERR;
4838 }
4839
4840 if ((addr + (len * width)) < addr) {
4841 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4842 Jim_AppendStrings(interp, Jim_GetResult(interp),
4843 "array2mem: addr + len - wraps to zero?", NULL);
4844 return JIM_ERR;
4845 }
4846
4847 if (len > 65536) {
4848 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4849 Jim_AppendStrings(interp, Jim_GetResult(interp),
4850 "array2mem: too large memory write request, exceeds 64K items", NULL);
4851 return JIM_ERR;
4852 }
4853
4854 if ((width == 1) ||
4855 ((width == 2) && ((addr & 1) == 0)) ||
4856 ((width == 4) && ((addr & 3) == 0)) ||
4857 ((width == 8) && ((addr & 7) == 0))) {
4858 /* alignment correct */
4859 } else {
4860 char buf[100];
4861 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4862 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4863 addr,
4864 width);
4865 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4866 return JIM_ERR;
4867 }
4868
4869 /* Transfer loop */
4870
4871 /* assume ok */
4872 e = JIM_OK;
4873
4874 const size_t buffersize = 4096;
4875 uint8_t *buffer = malloc(buffersize);
4876 if (!buffer)
4877 return JIM_ERR;
4878
4879 /* index counter */
4880 size_t idx = 0;
4881
4882 while (len) {
4883 /* Slurp... in buffer size chunks */
4884 const unsigned int max_chunk_len = buffersize / width;
4885
4886 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4887
4888 /* Fill the buffer */
4889 for (size_t i = 0; i < chunk_len; i++, idx++) {
4890 uint64_t v = 0;
4891 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4892 free(buffer);
4893 return JIM_ERR;
4894 }
4895 switch (width) {
4896 case 8:
4897 target_buffer_set_u64(target, &buffer[i * width], v);
4898 break;
4899 case 4:
4900 target_buffer_set_u32(target, &buffer[i * width], v);
4901 break;
4902 case 2:
4903 target_buffer_set_u16(target, &buffer[i * width], v);
4904 break;
4905 case 1:
4906 buffer[i] = v & 0x0ff;
4907 break;
4908 }
4909 }
4910 len -= chunk_len;
4911
4912 /* Write the buffer to memory */
4913 int retval;
4914 if (is_phys)
4915 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4916 else
4917 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4918 if (retval != ERROR_OK) {
4919 /* BOO !*/
4920 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4921 addr,
4922 width,
4923 chunk_len);
4924 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4925 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4926 e = JIM_ERR;
4927 break;
4928 }
4929 addr += chunk_len * width;
4930 }
4931
4932 free(buffer);
4933
4934 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4935
4936 return e;
4937 }
4938
4939 static int target_jim_write_memory(Jim_Interp *interp, int argc,
4940 Jim_Obj * const *argv)
4941 {
4942 /*
4943 * argv[1] = memory address
4944 * argv[2] = desired element width in bits
4945 * argv[3] = list of data to write
4946 * argv[4] = optional "phys"
4947 */
4948
4949 if (argc < 4 || argc > 5) {
4950 Jim_WrongNumArgs(interp, 1, argv, "address width data ['phys']");
4951 return JIM_ERR;
4952 }
4953
4954 /* Arg 1: Memory address. */
4955 int e;
4956 jim_wide wide_addr;
4957 e = Jim_GetWide(interp, argv[1], &wide_addr);
4958
4959 if (e != JIM_OK)
4960 return e;
4961
4962 target_addr_t addr = (target_addr_t)wide_addr;
4963
4964 /* Arg 2: Bit width of one element. */
4965 long l;
4966 e = Jim_GetLong(interp, argv[2], &l);
4967
4968 if (e != JIM_OK)
4969 return e;
4970
4971 const unsigned int width_bits = l;
4972 size_t count = Jim_ListLength(interp, argv[3]);
4973
4974 /* Arg 4: Optional 'phys'. */
4975 bool is_phys = false;
4976
4977 if (argc > 4) {
4978 const char *phys = Jim_GetString(argv[4], NULL);
4979
4980 if (strcmp(phys, "phys")) {
4981 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4982 return JIM_ERR;
4983 }
4984
4985 is_phys = true;
4986 }
4987
4988 switch (width_bits) {
4989 case 8:
4990 case 16:
4991 case 32:
4992 case 64:
4993 break;
4994 default:
4995 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
4996 return JIM_ERR;
4997 }
4998
4999 const unsigned int width = width_bits / 8;
5000
5001 if ((addr + (count * width)) < addr) {
5002 Jim_SetResultString(interp, "write_memory: addr + len wraps to zero", -1);
5003 return JIM_ERR;
5004 }
5005
5006 if (count > 65536) {
5007 Jim_SetResultString(interp, "write_memory: too large memory write request, exceeds 64K elements", -1);
5008 return JIM_ERR;
5009 }
5010
5011 struct command_context *cmd_ctx = current_command_context(interp);
5012 assert(cmd_ctx != NULL);
5013 struct target *target = get_current_target(cmd_ctx);
5014
5015 const size_t buffersize = 4096;
5016 uint8_t *buffer = malloc(buffersize);
5017
5018 if (!buffer) {
5019 LOG_ERROR("Failed to allocate memory");
5020 return JIM_ERR;
5021 }
5022
5023 size_t j = 0;
5024
5025 while (count > 0) {
5026 const unsigned int max_chunk_len = buffersize / width;
5027 const size_t chunk_len = MIN(count, max_chunk_len);
5028
5029 for (size_t i = 0; i < chunk_len; i++, j++) {
5030 Jim_Obj *tmp = Jim_ListGetIndex(interp, argv[3], j);
5031 jim_wide element_wide;
5032 Jim_GetWide(interp, tmp, &element_wide);
5033
5034 const uint64_t v = element_wide;
5035
5036 switch (width) {
5037 case 8:
5038 target_buffer_set_u64(target, &buffer[i * width], v);
5039 break;
5040 case 4:
5041 target_buffer_set_u32(target, &buffer[i * width], v);
5042 break;
5043 case 2:
5044 target_buffer_set_u16(target, &buffer[i * width], v);
5045 break;
5046 case 1:
5047 buffer[i] = v & 0x0ff;
5048 break;
5049 }
5050 }
5051
5052 count -= chunk_len;
5053
5054 int retval;
5055
5056 if (is_phys)
5057 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
5058 else
5059 retval = target_write_memory(target, addr, width, chunk_len, buffer);
5060
5061 if (retval != ERROR_OK) {
5062 LOG_ERROR("write_memory: write at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
5063 addr, width_bits, chunk_len);
5064 Jim_SetResultString(interp, "write_memory: failed to write memory", -1);
5065 e = JIM_ERR;
5066 break;
5067 }
5068
5069 addr += chunk_len * width;
5070 }
5071
5072 free(buffer);
5073
5074 return e;
5075 }
5076
5077 /* FIX? should we propagate errors here rather than printing them
5078 * and continuing?
5079 */
5080 void target_handle_event(struct target *target, enum target_event e)
5081 {
5082 struct target_event_action *teap;
5083 int retval;
5084
5085 for (teap = target->event_action; teap; teap = teap->next) {
5086 if (teap->event == e) {
5087 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
5088 target->target_number,
5089 target_name(target),
5090 target_type_name(target),
5091 e,
5092 target_event_name(e),
5093 Jim_GetString(teap->body, NULL));
5094
5095 /* Override current target by the target an event
5096 * is issued from (lot of scripts need it).
5097 * Return back to previous override as soon
5098 * as the handler processing is done */
5099 struct command_context *cmd_ctx = current_command_context(teap->interp);
5100 struct target *saved_target_override = cmd_ctx->current_target_override;
5101 cmd_ctx->current_target_override = target;
5102
5103 retval = Jim_EvalObj(teap->interp, teap->body);
5104
5105 cmd_ctx->current_target_override = saved_target_override;
5106
5107 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
5108 return;
5109
5110 if (retval == JIM_RETURN)
5111 retval = teap->interp->returnCode;
5112
5113 if (retval != JIM_OK) {
5114 Jim_MakeErrorMessage(teap->interp);
5115 LOG_USER("Error executing event %s on target %s:\n%s",
5116 target_event_name(e),
5117 target_name(target),
5118 Jim_GetString(Jim_GetResult(teap->interp), NULL));
5119 /* clean both error code and stacktrace before return */
5120 Jim_Eval(teap->interp, "error \"\" \"\"");
5121 }
5122 }
5123 }
5124 }
5125
5126 static int target_jim_get_reg(Jim_Interp *interp, int argc,
5127 Jim_Obj * const *argv)
5128 {
5129 bool force = false;
5130
5131 if (argc == 3) {
5132 const char *option = Jim_GetString(argv[1], NULL);
5133
5134 if (!strcmp(option, "-force")) {
5135 argc--;
5136 argv++;
5137 force = true;
5138 } else {
5139 Jim_SetResultFormatted(interp, "invalid option '%s'", option);
5140 return JIM_ERR;
5141 }
5142 }
5143
5144 if (argc != 2) {
5145 Jim_WrongNumArgs(interp, 1, argv, "[-force] list");
5146 return JIM_ERR;
5147 }
5148
5149 const int length = Jim_ListLength(interp, argv[1]);
5150
5151 Jim_Obj *result_dict = Jim_NewDictObj(interp, NULL, 0);
5152
5153 if (!result_dict)
5154 return JIM_ERR;
5155
5156 struct command_context *cmd_ctx = current_command_context(interp);
5157 assert(cmd_ctx != NULL);
5158 const struct target *target = get_current_target(cmd_ctx);
5159
5160 for (int i = 0; i < length; i++) {
5161 Jim_Obj *elem = Jim_ListGetIndex(interp, argv[1], i);
5162
5163 if (!elem)
5164 return JIM_ERR;
5165
5166 const char *reg_name = Jim_String(elem);
5167
5168 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5169 false);
5170
5171 if (!reg || !reg->exist) {
5172 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5173 return JIM_ERR;
5174 }
5175
5176 if (force) {
5177 int retval = reg->type->get(reg);
5178
5179 if (retval != ERROR_OK) {
5180 Jim_SetResultFormatted(interp, "failed to read register '%s'",
5181 reg_name);
5182 return JIM_ERR;
5183 }
5184 }
5185
5186 char *reg_value = buf_to_hex_str(reg->value, reg->size);
5187
5188 if (!reg_value) {
5189 LOG_ERROR("Failed to allocate memory");
5190 return JIM_ERR;
5191 }
5192
5193 char *tmp = alloc_printf("0x%s", reg_value);
5194
5195 free(reg_value);
5196
5197 if (!tmp) {
5198 LOG_ERROR("Failed to allocate memory");
5199 return JIM_ERR;
5200 }
5201
5202 Jim_DictAddElement(interp, result_dict, elem,
5203 Jim_NewStringObj(interp, tmp, -1));
5204
5205 free(tmp);
5206 }
5207
5208 Jim_SetResult(interp, result_dict);
5209
5210 return JIM_OK;
5211 }
5212
5213 static int target_jim_set_reg(Jim_Interp *interp, int argc,
5214 Jim_Obj * const *argv)
5215 {
5216 if (argc != 2) {
5217 Jim_WrongNumArgs(interp, 1, argv, "dict");
5218 return JIM_ERR;
5219 }
5220
5221 int tmp;
5222 #if JIM_VERSION >= 80
5223 Jim_Obj **dict = Jim_DictPairs(interp, argv[1], &tmp);
5224
5225 if (!dict)
5226 return JIM_ERR;
5227 #else
5228 Jim_Obj **dict;
5229 int ret = Jim_DictPairs(interp, argv[1], &dict, &tmp);
5230
5231 if (ret != JIM_OK)
5232 return ret;
5233 #endif
5234
5235 const unsigned int length = tmp;
5236 struct command_context *cmd_ctx = current_command_context(interp);
5237 assert(cmd_ctx);
5238 const struct target *target = get_current_target(cmd_ctx);
5239
5240 for (unsigned int i = 0; i < length; i += 2) {
5241 const char *reg_name = Jim_String(dict[i]);
5242 const char *reg_value = Jim_String(dict[i + 1]);
5243 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5244 false);
5245
5246 if (!reg || !reg->exist) {
5247 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5248 return JIM_ERR;
5249 }
5250
5251 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
5252
5253 if (!buf) {
5254 LOG_ERROR("Failed to allocate memory");
5255 return JIM_ERR;
5256 }
5257
5258 str_to_buf(reg_value, strlen(reg_value), buf, reg->size, 0);
5259 int retval = reg->type->set(reg, buf);
5260 free(buf);
5261
5262 if (retval != ERROR_OK) {
5263 Jim_SetResultFormatted(interp, "failed to set '%s' to register '%s'",
5264 reg_value, reg_name);
5265 return JIM_ERR;
5266 }
5267 }
5268
5269 return JIM_OK;
5270 }
5271
5272 /**
5273 * Returns true only if the target has a handler for the specified event.
5274 */
5275 bool target_has_event_action(struct target *target, enum target_event event)
5276 {
5277 struct target_event_action *teap;
5278
5279 for (teap = target->event_action; teap; teap = teap->next) {
5280 if (teap->event == event)
5281 return true;
5282 }
5283 return false;
5284 }
5285
5286 enum target_cfg_param {
5287 TCFG_TYPE,
5288 TCFG_EVENT,
5289 TCFG_WORK_AREA_VIRT,
5290 TCFG_WORK_AREA_PHYS,
5291 TCFG_WORK_AREA_SIZE,
5292 TCFG_WORK_AREA_BACKUP,
5293 TCFG_ENDIAN,
5294 TCFG_COREID,
5295 TCFG_CHAIN_POSITION,
5296 TCFG_DBGBASE,
5297 TCFG_RTOS,
5298 TCFG_DEFER_EXAMINE,
5299 TCFG_GDB_PORT,
5300 TCFG_GDB_MAX_CONNECTIONS,
5301 };
5302
5303 static struct jim_nvp nvp_config_opts[] = {
5304 { .name = "-type", .value = TCFG_TYPE },
5305 { .name = "-event", .value = TCFG_EVENT },
5306 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
5307 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
5308 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
5309 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
5310 { .name = "-endian", .value = TCFG_ENDIAN },
5311 { .name = "-coreid", .value = TCFG_COREID },
5312 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
5313 { .name = "-dbgbase", .value = TCFG_DBGBASE },
5314 { .name = "-rtos", .value = TCFG_RTOS },
5315 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
5316 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
5317 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
5318 { .name = NULL, .value = -1 }
5319 };
5320
5321 static int target_configure(struct jim_getopt_info *goi, struct target *target)
5322 {
5323 struct jim_nvp *n;
5324 Jim_Obj *o;
5325 jim_wide w;
5326 int e;
5327
5328 /* parse config or cget options ... */
5329 while (goi->argc > 0) {
5330 Jim_SetEmptyResult(goi->interp);
5331 /* jim_getopt_debug(goi); */
5332
5333 if (target->type->target_jim_configure) {
5334 /* target defines a configure function */
5335 /* target gets first dibs on parameters */
5336 e = (*(target->type->target_jim_configure))(target, goi);
5337 if (e == JIM_OK) {
5338 /* more? */
5339 continue;
5340 }
5341 if (e == JIM_ERR) {
5342 /* An error */
5343 return e;
5344 }
5345 /* otherwise we 'continue' below */
5346 }
5347 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
5348 if (e != JIM_OK) {
5349 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
5350 return e;
5351 }
5352 switch (n->value) {
5353 case TCFG_TYPE:
5354 /* not settable */
5355 if (goi->isconfigure) {
5356 Jim_SetResultFormatted(goi->interp,
5357 "not settable: %s", n->name);
5358 return JIM_ERR;
5359 } else {
5360 no_params:
5361 if (goi->argc != 0) {
5362 Jim_WrongNumArgs(goi->interp,
5363 goi->argc, goi->argv,
5364 "NO PARAMS");
5365 return JIM_ERR;
5366 }
5367 }
5368 Jim_SetResultString(goi->interp,
5369 target_type_name(target), -1);
5370 /* loop for more */
5371 break;
5372 case TCFG_EVENT:
5373 if (goi->argc == 0) {
5374 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
5375 return JIM_ERR;
5376 }
5377
5378 e = jim_getopt_nvp(goi, nvp_target_event, &n);
5379 if (e != JIM_OK) {
5380 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
5381 return e;
5382 }
5383
5384 if (goi->isconfigure) {
5385 if (goi->argc != 1) {
5386 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
5387 return JIM_ERR;
5388 }
5389 } else {
5390 if (goi->argc != 0) {
5391 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
5392 return JIM_ERR;
5393 }
5394 }
5395
5396 {
5397 struct target_event_action *teap;
5398
5399 teap = target->event_action;
5400 /* replace existing? */
5401 while (teap) {
5402 if (teap->event == (enum target_event)n->value)
5403 break;
5404 teap = teap->next;
5405 }
5406
5407 if (goi->isconfigure) {
5408 /* START_DEPRECATED_TPIU */
5409 if (n->value == TARGET_EVENT_TRACE_CONFIG)
5410 LOG_INFO("DEPRECATED target event %s; use TPIU events {pre,post}-{enable,disable}", n->name);
5411 /* END_DEPRECATED_TPIU */
5412
5413 bool replace = true;
5414 if (!teap) {
5415 /* create new */
5416 teap = calloc(1, sizeof(*teap));
5417 replace = false;
5418 }
5419 teap->event = n->value;
5420 teap->interp = goi->interp;
5421 jim_getopt_obj(goi, &o);
5422 if (teap->body)
5423 Jim_DecrRefCount(teap->interp, teap->body);
5424 teap->body = Jim_DuplicateObj(goi->interp, o);
5425 /*
5426 * FIXME:
5427 * Tcl/TK - "tk events" have a nice feature.
5428 * See the "BIND" command.
5429 * We should support that here.
5430 * You can specify %X and %Y in the event code.
5431 * The idea is: %T - target name.
5432 * The idea is: %N - target number
5433 * The idea is: %E - event name.
5434 */
5435 Jim_IncrRefCount(teap->body);
5436
5437 if (!replace) {
5438 /* add to head of event list */
5439 teap->next = target->event_action;
5440 target->event_action = teap;
5441 }
5442 Jim_SetEmptyResult(goi->interp);
5443 } else {
5444 /* get */
5445 if (!teap)
5446 Jim_SetEmptyResult(goi->interp);
5447 else
5448 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5449 }
5450 }
5451 /* loop for more */
5452 break;
5453
5454 case TCFG_WORK_AREA_VIRT:
5455 if (goi->isconfigure) {
5456 target_free_all_working_areas(target);
5457 e = jim_getopt_wide(goi, &w);
5458 if (e != JIM_OK)
5459 return e;
5460 target->working_area_virt = w;
5461 target->working_area_virt_spec = true;
5462 } else {
5463 if (goi->argc != 0)
5464 goto no_params;
5465 }
5466 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5467 /* loop for more */
5468 break;
5469
5470 case TCFG_WORK_AREA_PHYS:
5471 if (goi->isconfigure) {
5472 target_free_all_working_areas(target);
5473 e = jim_getopt_wide(goi, &w);
5474 if (e != JIM_OK)
5475 return e;
5476 target->working_area_phys = w;
5477 target->working_area_phys_spec = true;
5478 } else {
5479 if (goi->argc != 0)
5480 goto no_params;
5481 }
5482 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5483 /* loop for more */
5484 break;
5485
5486 case TCFG_WORK_AREA_SIZE:
5487 if (goi->isconfigure) {
5488 target_free_all_working_areas(target);
5489 e = jim_getopt_wide(goi, &w);
5490 if (e != JIM_OK)
5491 return e;
5492 target->working_area_size = w;
5493 } else {
5494 if (goi->argc != 0)
5495 goto no_params;
5496 }
5497 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5498 /* loop for more */
5499 break;
5500
5501 case TCFG_WORK_AREA_BACKUP:
5502 if (goi->isconfigure) {
5503 target_free_all_working_areas(target);
5504 e = jim_getopt_wide(goi, &w);
5505 if (e != JIM_OK)
5506 return e;
5507 /* make this exactly 1 or 0 */
5508 target->backup_working_area = (!!w);
5509 } else {
5510 if (goi->argc != 0)
5511 goto no_params;
5512 }
5513 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5514 /* loop for more e*/
5515 break;
5516
5517
5518 case TCFG_ENDIAN:
5519 if (goi->isconfigure) {
5520 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5521 if (e != JIM_OK) {
5522 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5523 return e;
5524 }
5525 target->endianness = n->value;
5526 } else {
5527 if (goi->argc != 0)
5528 goto no_params;
5529 }
5530 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5531 if (!n->name) {
5532 target->endianness = TARGET_LITTLE_ENDIAN;
5533 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5534 }
5535 Jim_SetResultString(goi->interp, n->name, -1);
5536 /* loop for more */
5537 break;
5538
5539 case TCFG_COREID:
5540 if (goi->isconfigure) {
5541 e = jim_getopt_wide(goi, &w);
5542 if (e != JIM_OK)
5543 return e;
5544 target->coreid = (int32_t)w;
5545 } else {
5546 if (goi->argc != 0)
5547 goto no_params;
5548 }
5549 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5550 /* loop for more */
5551 break;
5552
5553 case TCFG_CHAIN_POSITION:
5554 if (goi->isconfigure) {
5555 Jim_Obj *o_t;
5556 struct jtag_tap *tap;
5557
5558 if (target->has_dap) {
5559 Jim_SetResultString(goi->interp,
5560 "target requires -dap parameter instead of -chain-position!", -1);
5561 return JIM_ERR;
5562 }
5563
5564 target_free_all_working_areas(target);
5565 e = jim_getopt_obj(goi, &o_t);
5566 if (e != JIM_OK)
5567 return e;
5568 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5569 if (!tap)
5570 return JIM_ERR;
5571 target->tap = tap;
5572 target->tap_configured = true;
5573 } else {
5574 if (goi->argc != 0)
5575 goto no_params;
5576 }
5577 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5578 /* loop for more e*/
5579 break;
5580 case TCFG_DBGBASE:
5581 if (goi->isconfigure) {
5582 e = jim_getopt_wide(goi, &w);
5583 if (e != JIM_OK)
5584 return e;
5585 target->dbgbase = (uint32_t)w;
5586 target->dbgbase_set = true;
5587 } else {
5588 if (goi->argc != 0)
5589 goto no_params;
5590 }
5591 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5592 /* loop for more */
5593 break;
5594 case TCFG_RTOS:
5595 /* RTOS */
5596 {
5597 int result = rtos_create(goi, target);
5598 if (result != JIM_OK)
5599 return result;
5600 }
5601 /* loop for more */
5602 break;
5603
5604 case TCFG_DEFER_EXAMINE:
5605 /* DEFER_EXAMINE */
5606 target->defer_examine = true;
5607 /* loop for more */
5608 break;
5609
5610 case TCFG_GDB_PORT:
5611 if (goi->isconfigure) {
5612 struct command_context *cmd_ctx = current_command_context(goi->interp);
5613 if (cmd_ctx->mode != COMMAND_CONFIG) {
5614 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5615 return JIM_ERR;
5616 }
5617
5618 const char *s;
5619 e = jim_getopt_string(goi, &s, NULL);
5620 if (e != JIM_OK)
5621 return e;
5622 free(target->gdb_port_override);
5623 target->gdb_port_override = strdup(s);
5624 } else {
5625 if (goi->argc != 0)
5626 goto no_params;
5627 }
5628 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5629 /* loop for more */
5630 break;
5631
5632 case TCFG_GDB_MAX_CONNECTIONS:
5633 if (goi->isconfigure) {
5634 struct command_context *cmd_ctx = current_command_context(goi->interp);
5635 if (cmd_ctx->mode != COMMAND_CONFIG) {
5636 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5637 return JIM_ERR;
5638 }
5639
5640 e = jim_getopt_wide(goi, &w);
5641 if (e != JIM_OK)
5642 return e;
5643 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5644 } else {
5645 if (goi->argc != 0)
5646 goto no_params;
5647 }
5648 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5649 break;
5650 }
5651 } /* while (goi->argc) */
5652
5653
5654 /* done - we return */
5655 return JIM_OK;
5656 }
5657
5658 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5659 {
5660 struct command *c = jim_to_command(interp);
5661 struct jim_getopt_info goi;
5662
5663 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5664 goi.isconfigure = !strcmp(c->name, "configure");
5665 if (goi.argc < 1) {
5666 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5667 "missing: -option ...");
5668 return JIM_ERR;
5669 }
5670 struct command_context *cmd_ctx = current_command_context(interp);
5671 assert(cmd_ctx);
5672 struct target *target = get_current_target(cmd_ctx);
5673 return target_configure(&goi, target);
5674 }
5675
5676 static int jim_target_mem2array(Jim_Interp *interp,
5677 int argc, Jim_Obj *const *argv)
5678 {
5679 struct command_context *cmd_ctx = current_command_context(interp);
5680 assert(cmd_ctx);
5681 struct target *target = get_current_target(cmd_ctx);
5682 return target_mem2array(interp, target, argc - 1, argv + 1);
5683 }
5684
5685 static int jim_target_array2mem(Jim_Interp *interp,
5686 int argc, Jim_Obj *const *argv)
5687 {
5688 struct command_context *cmd_ctx = current_command_context(interp);
5689 assert(cmd_ctx);
5690 struct target *target = get_current_target(cmd_ctx);
5691 return target_array2mem(interp, target, argc - 1, argv + 1);
5692 }
5693
5694 static int jim_target_tap_disabled(Jim_Interp *interp)
5695 {
5696 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5697 return JIM_ERR;
5698 }
5699
5700 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5701 {
5702 bool allow_defer = false;
5703
5704 struct jim_getopt_info goi;
5705 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5706 if (goi.argc > 1) {
5707 const char *cmd_name = Jim_GetString(argv[0], NULL);
5708 Jim_SetResultFormatted(goi.interp,
5709 "usage: %s ['allow-defer']", cmd_name);
5710 return JIM_ERR;
5711 }
5712 if (goi.argc > 0 &&
5713 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5714 /* consume it */
5715 Jim_Obj *obj;
5716 int e = jim_getopt_obj(&goi, &obj);
5717 if (e != JIM_OK)
5718 return e;
5719 allow_defer = true;
5720 }
5721
5722 struct command_context *cmd_ctx = current_command_context(interp);
5723 assert(cmd_ctx);
5724 struct target *target = get_current_target(cmd_ctx);
5725 if (!target->tap->enabled)
5726 return jim_target_tap_disabled(interp);
5727
5728 if (allow_defer && target->defer_examine) {
5729 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5730 LOG_INFO("Use arp_examine command to examine it manually!");
5731 return JIM_OK;
5732 }
5733
5734 int e = target->type->examine(target);
5735 if (e != ERROR_OK) {
5736 target_reset_examined(target);
5737 return JIM_ERR;
5738 }
5739
5740 target_set_examined(target);
5741
5742 return JIM_OK;
5743 }
5744
5745 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5746 {
5747 struct command_context *cmd_ctx = current_command_context(interp);
5748 assert(cmd_ctx);
5749 struct target *target = get_current_target(cmd_ctx);
5750
5751 Jim_SetResultBool(interp, target_was_examined(target));
5752 return JIM_OK;
5753 }
5754
5755 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5756 {
5757 struct command_context *cmd_ctx = current_command_context(interp);
5758 assert(cmd_ctx);
5759 struct target *target = get_current_target(cmd_ctx);
5760
5761 Jim_SetResultBool(interp, target->defer_examine);
5762 return JIM_OK;
5763 }
5764
5765 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5766 {
5767 if (argc != 1) {
5768 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5769 return JIM_ERR;
5770 }
5771 struct command_context *cmd_ctx = current_command_context(interp);
5772 assert(cmd_ctx);
5773 struct target *target = get_current_target(cmd_ctx);
5774
5775 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5776 return JIM_ERR;
5777
5778 return JIM_OK;
5779 }
5780
5781 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5782 {
5783 if (argc != 1) {
5784 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5785 return JIM_ERR;
5786 }
5787 struct command_context *cmd_ctx = current_command_context(interp);
5788 assert(cmd_ctx);
5789 struct target *target = get_current_target(cmd_ctx);
5790 if (!target->tap->enabled)
5791 return jim_target_tap_disabled(interp);
5792
5793 int e;
5794 if (!(target_was_examined(target)))
5795 e = ERROR_TARGET_NOT_EXAMINED;
5796 else
5797 e = target->type->poll(target);
5798 if (e != ERROR_OK)
5799 return JIM_ERR;
5800 return JIM_OK;
5801 }
5802
5803 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5804 {
5805 struct jim_getopt_info goi;
5806 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5807
5808 if (goi.argc != 2) {
5809 Jim_WrongNumArgs(interp, 0, argv,
5810 "([tT]|[fF]|assert|deassert) BOOL");
5811 return JIM_ERR;
5812 }
5813
5814 struct jim_nvp *n;
5815 int e = jim_getopt_nvp(&goi, nvp_assert, &n);
5816 if (e != JIM_OK) {
5817 jim_getopt_nvp_unknown(&goi, nvp_assert, 1);
5818 return e;
5819 }
5820 /* the halt or not param */
5821 jim_wide a;
5822 e = jim_getopt_wide(&goi, &a);
5823 if (e != JIM_OK)
5824 return e;
5825
5826 struct command_context *cmd_ctx = current_command_context(interp);
5827 assert(cmd_ctx);
5828 struct target *target = get_current_target(cmd_ctx);
5829 if (!target->tap->enabled)
5830 return jim_target_tap_disabled(interp);
5831
5832 if (!target->type->assert_reset || !target->type->deassert_reset) {
5833 Jim_SetResultFormatted(interp,
5834 "No target-specific reset for %s",
5835 target_name(target));
5836 return JIM_ERR;
5837 }
5838
5839 if (target->defer_examine)
5840 target_reset_examined(target);
5841
5842 /* determine if we should halt or not. */
5843 target->reset_halt = (a != 0);
5844 /* When this happens - all workareas are invalid. */
5845 target_free_all_working_areas_restore(target, 0);
5846
5847 /* do the assert */
5848 if (n->value == NVP_ASSERT)
5849 e = target->type->assert_reset(target);
5850 else
5851 e = target->type->deassert_reset(target);
5852 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5853 }
5854
5855 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5856 {
5857 if (argc != 1) {
5858 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5859 return JIM_ERR;
5860 }
5861 struct command_context *cmd_ctx = current_command_context(interp);
5862 assert(cmd_ctx);
5863 struct target *target = get_current_target(cmd_ctx);
5864 if (!target->tap->enabled)
5865 return jim_target_tap_disabled(interp);
5866 int e = target->type->halt(target);
5867 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5868 }
5869
5870 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5871 {
5872 struct jim_getopt_info goi;
5873 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5874
5875 /* params: <name> statename timeoutmsecs */
5876 if (goi.argc != 2) {
5877 const char *cmd_name = Jim_GetString(argv[0], NULL);
5878 Jim_SetResultFormatted(goi.interp,
5879 "%s <state_name> <timeout_in_msec>", cmd_name);
5880 return JIM_ERR;
5881 }
5882
5883 struct jim_nvp *n;
5884 int e = jim_getopt_nvp(&goi, nvp_target_state, &n);
5885 if (e != JIM_OK) {
5886 jim_getopt_nvp_unknown(&goi, nvp_target_state, 1);
5887 return e;
5888 }
5889 jim_wide a;
5890 e = jim_getopt_wide(&goi, &a);
5891 if (e != JIM_OK)
5892 return e;
5893 struct command_context *cmd_ctx = current_command_context(interp);
5894 assert(cmd_ctx);
5895 struct target *target = get_current_target(cmd_ctx);
5896 if (!target->tap->enabled)
5897 return jim_target_tap_disabled(interp);
5898
5899 e = target_wait_state(target, n->value, a);
5900 if (e != ERROR_OK) {
5901 Jim_Obj *obj = Jim_NewIntObj(interp, e);
5902 Jim_SetResultFormatted(goi.interp,
5903 "target: %s wait %s fails (%#s) %s",
5904 target_name(target), n->name,
5905 obj, target_strerror_safe(e));
5906 return JIM_ERR;
5907 }
5908 return JIM_OK;
5909 }
5910 /* List for human, Events defined for this target.
5911 * scripts/programs should use 'name cget -event NAME'
5912 */
5913 COMMAND_HANDLER(handle_target_event_list)
5914 {
5915 struct target *target = get_current_target(CMD_CTX);
5916 struct target_event_action *teap = target->event_action;
5917
5918 command_print(CMD, "Event actions for target (%d) %s\n",
5919 target->target_number,
5920 target_name(target));
5921 command_print(CMD, "%-25s | Body", "Event");
5922 command_print(CMD, "------------------------- | "
5923 "----------------------------------------");
5924 while (teap) {
5925 command_print(CMD, "%-25s | %s",
5926 target_event_name(teap->event),
5927 Jim_GetString(teap->body, NULL));
5928 teap = teap->next;
5929 }
5930 command_print(CMD, "***END***");
5931 return ERROR_OK;
5932 }
5933 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5934 {
5935 if (argc != 1) {
5936 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5937 return JIM_ERR;
5938 }
5939 struct command_context *cmd_ctx = current_command_context(interp);
5940 assert(cmd_ctx);
5941 struct target *target = get_current_target(cmd_ctx);
5942 Jim_SetResultString(interp, target_state_name(target), -1);
5943 return JIM_OK;
5944 }
5945 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5946 {
5947 struct jim_getopt_info goi;
5948 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5949 if (goi.argc != 1) {
5950 const char *cmd_name = Jim_GetString(argv[0], NULL);
5951 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5952 return JIM_ERR;
5953 }
5954 struct jim_nvp *n;
5955 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5956 if (e != JIM_OK) {
5957 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5958 return e;
5959 }
5960 struct command_context *cmd_ctx = current_command_context(interp);
5961 assert(cmd_ctx);
5962 struct target *target = get_current_target(cmd_ctx);
5963 target_handle_event(target, n->value);
5964 return JIM_OK;
5965 }
5966
5967 static const struct command_registration target_instance_command_handlers[] = {
5968 {
5969 .name = "configure",
5970 .mode = COMMAND_ANY,
5971 .jim_handler = jim_target_configure,
5972 .help = "configure a new target for use",
5973 .usage = "[target_attribute ...]",
5974 },
5975 {
5976 .name = "cget",
5977 .mode = COMMAND_ANY,
5978 .jim_handler = jim_target_configure,
5979 .help = "returns the specified target attribute",
5980 .usage = "target_attribute",
5981 },
5982 {
5983 .name = "mwd",
5984 .handler = handle_mw_command,
5985 .mode = COMMAND_EXEC,
5986 .help = "Write 64-bit word(s) to target memory",
5987 .usage = "address data [count]",
5988 },
5989 {
5990 .name = "mww",
5991 .handler = handle_mw_command,
5992 .mode = COMMAND_EXEC,
5993 .help = "Write 32-bit word(s) to target memory",
5994 .usage = "address data [count]",
5995 },
5996 {
5997 .name = "mwh",
5998 .handler = handle_mw_command,
5999 .mode = COMMAND_EXEC,
6000 .help = "Write 16-bit half-word(s) to target memory",
6001 .usage = "address data [count]",
6002 },
6003 {
6004 .name = "mwb",
6005 .handler = handle_mw_command,
6006 .mode = COMMAND_EXEC,
6007 .help = "Write byte(s) to target memory",
6008 .usage = "address data [count]",
6009 },
6010 {
6011 .name = "mdd",
6012 .handler = handle_md_command,
6013 .mode = COMMAND_EXEC,
6014 .help = "Display target memory as 64-bit words",
6015 .usage = "address [count]",
6016 },
6017 {
6018 .name = "mdw",
6019 .handler = handle_md_command,
6020 .mode = COMMAND_EXEC,
6021 .help = "Display target memory as 32-bit words",
6022 .usage = "address [count]",
6023 },
6024 {
6025 .name = "mdh",
6026 .handler = handle_md_command,
6027 .mode = COMMAND_EXEC,
6028 .help = "Display target memory as 16-bit half-words",
6029 .usage = "address [count]",
6030 },
6031 {
6032 .name = "mdb",
6033 .handler = handle_md_command,
6034 .mode = COMMAND_EXEC,
6035 .help = "Display target memory as 8-bit bytes",
6036 .usage = "address [count]",
6037 },
6038 {
6039 .name = "array2mem",
6040 .mode = COMMAND_EXEC,
6041 .jim_handler = jim_target_array2mem,
6042 .help = "Writes Tcl array of 8/16/32 bit numbers "
6043 "to target memory",
6044 .usage = "arrayname bitwidth address count",
6045 },
6046 {
6047 .name = "mem2array",
6048 .mode = COMMAND_EXEC,
6049 .jim_handler = jim_target_mem2array,
6050 .help = "Loads Tcl array of 8/16/32 bit numbers "
6051 "from target memory",
6052 .usage = "arrayname bitwidth address count",
6053 },
6054 {
6055 .name = "get_reg",
6056 .mode = COMMAND_EXEC,
6057 .jim_handler = target_jim_get_reg,
6058 .help = "Get register values from the target",
6059 .usage = "list",
6060 },
6061 {
6062 .name = "set_reg",
6063 .mode = COMMAND_EXEC,
6064 .jim_handler = target_jim_set_reg,
6065 .help = "Set target register values",
6066 .usage = "dict",
6067 },
6068 {
6069 .name = "read_memory",
6070 .mode = COMMAND_EXEC,
6071 .jim_handler = target_jim_read_memory,
6072 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
6073 .usage = "address width count ['phys']",
6074 },
6075 {
6076 .name = "write_memory",
6077 .mode = COMMAND_EXEC,
6078 .jim_handler = target_jim_write_memory,
6079 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
6080 .usage = "address width data ['phys']",
6081 },
6082 {
6083 .name = "eventlist",
6084 .handler = handle_target_event_list,
6085 .mode = COMMAND_EXEC,
6086 .help = "displays a table of events defined for this target",
6087 .usage = "",
6088 },
6089 {
6090 .name = "curstate",
6091 .mode = COMMAND_EXEC,
6092 .jim_handler = jim_target_current_state,
6093 .help = "displays the current state of this target",
6094 },
6095 {
6096 .name = "arp_examine",
6097 .mode = COMMAND_EXEC,
6098 .jim_handler = jim_target_examine,
6099 .help = "used internally for reset processing",
6100 .usage = "['allow-defer']",
6101 },
6102 {
6103 .name = "was_examined",
6104 .mode = COMMAND_EXEC,
6105 .jim_handler = jim_target_was_examined,
6106 .help = "used internally for reset processing",
6107 },
6108 {
6109 .name = "examine_deferred",
6110 .mode = COMMAND_EXEC,
6111 .jim_handler = jim_target_examine_deferred,
6112 .help = "used internally for reset processing",
6113 },
6114 {
6115 .name = "arp_halt_gdb",
6116 .mode = COMMAND_EXEC,
6117 .jim_handler = jim_target_halt_gdb,
6118 .help = "used internally for reset processing to halt GDB",
6119 },
6120 {
6121 .name = "arp_poll",
6122 .mode = COMMAND_EXEC,
6123 .jim_handler = jim_target_poll,
6124 .help = "used internally for reset processing",
6125 },
6126 {
6127 .name = "arp_reset",
6128 .mode = COMMAND_EXEC,
6129 .jim_handler = jim_target_reset,
6130 .help = "used internally for reset processing",
6131 },
6132 {
6133 .name = "arp_halt",
6134 .mode = COMMAND_EXEC,
6135 .jim_handler = jim_target_halt,
6136 .help = "used internally for reset processing",
6137 },
6138 {
6139 .name = "arp_waitstate",
6140 .mode = COMMAND_EXEC,
6141 .jim_handler = jim_target_wait_state,
6142 .help = "used internally for reset processing",
6143 },
6144 {
6145 .name = "invoke-event",
6146 .mode = COMMAND_EXEC,
6147 .jim_handler = jim_target_invoke_event,
6148 .help = "invoke handler for specified event",
6149 .usage = "event_name",
6150 },
6151 COMMAND_REGISTRATION_DONE
6152 };
6153
6154 static int target_create(struct jim_getopt_info *goi)
6155 {
6156 Jim_Obj *new_cmd;
6157 Jim_Cmd *cmd;
6158 const char *cp;
6159 int e;
6160 int x;
6161 struct target *target;
6162 struct command_context *cmd_ctx;
6163
6164 cmd_ctx = current_command_context(goi->interp);
6165 assert(cmd_ctx);
6166
6167 if (goi->argc < 3) {
6168 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
6169 return JIM_ERR;
6170 }
6171
6172 /* COMMAND */
6173 jim_getopt_obj(goi, &new_cmd);
6174 /* does this command exist? */
6175 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_NONE);
6176 if (cmd) {
6177 cp = Jim_GetString(new_cmd, NULL);
6178 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
6179 return JIM_ERR;
6180 }
6181
6182 /* TYPE */
6183 e = jim_getopt_string(goi, &cp, NULL);
6184 if (e != JIM_OK)
6185 return e;
6186 struct transport *tr = get_current_transport();
6187 if (tr->override_target) {
6188 e = tr->override_target(&cp);
6189 if (e != ERROR_OK) {
6190 LOG_ERROR("The selected transport doesn't support this target");
6191 return JIM_ERR;
6192 }
6193 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
6194 }
6195 /* now does target type exist */
6196 for (x = 0 ; target_types[x] ; x++) {
6197 if (strcmp(cp, target_types[x]->name) == 0) {
6198 /* found */
6199 break;
6200 }
6201 }
6202 if (!target_types[x]) {
6203 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
6204 for (x = 0 ; target_types[x] ; x++) {
6205 if (target_types[x + 1]) {
6206 Jim_AppendStrings(goi->interp,
6207 Jim_GetResult(goi->interp),
6208 target_types[x]->name,
6209 ", ", NULL);
6210 } else {
6211 Jim_AppendStrings(goi->interp,
6212 Jim_GetResult(goi->interp),
6213 " or ",
6214 target_types[x]->name, NULL);
6215 }
6216 }
6217 return JIM_ERR;
6218 }
6219
6220 /* Create it */
6221 target = calloc(1, sizeof(struct target));
6222 if (!target) {
6223 LOG_ERROR("Out of memory");
6224 return JIM_ERR;
6225 }
6226
6227 /* set empty smp cluster */
6228 target->smp_targets = &empty_smp_targets;
6229
6230 /* set target number */
6231 target->target_number = new_target_number();
6232
6233 /* allocate memory for each unique target type */
6234 target->type = malloc(sizeof(struct target_type));
6235 if (!target->type) {
6236 LOG_ERROR("Out of memory");
6237 free(target);
6238 return JIM_ERR;
6239 }
6240
6241 memcpy(target->type, target_types[x], sizeof(struct target_type));
6242
6243 /* default to first core, override with -coreid */
6244 target->coreid = 0;
6245
6246 target->working_area = 0x0;
6247 target->working_area_size = 0x0;
6248 target->working_areas = NULL;
6249 target->backup_working_area = 0;
6250
6251 target->state = TARGET_UNKNOWN;
6252 target->debug_reason = DBG_REASON_UNDEFINED;
6253 target->reg_cache = NULL;
6254 target->breakpoints = NULL;
6255 target->watchpoints = NULL;
6256 target->next = NULL;
6257 target->arch_info = NULL;
6258
6259 target->verbose_halt_msg = true;
6260
6261 target->halt_issued = false;
6262
6263 /* initialize trace information */
6264 target->trace_info = calloc(1, sizeof(struct trace));
6265 if (!target->trace_info) {
6266 LOG_ERROR("Out of memory");
6267 free(target->type);
6268 free(target);
6269 return JIM_ERR;
6270 }
6271
6272 target->dbgmsg = NULL;
6273 target->dbg_msg_enabled = 0;
6274
6275 target->endianness = TARGET_ENDIAN_UNKNOWN;
6276
6277 target->rtos = NULL;
6278 target->rtos_auto_detect = false;
6279
6280 target->gdb_port_override = NULL;
6281 target->gdb_max_connections = 1;
6282
6283 /* Do the rest as "configure" options */
6284 goi->isconfigure = 1;
6285 e = target_configure(goi, target);
6286
6287 if (e == JIM_OK) {
6288 if (target->has_dap) {
6289 if (!target->dap_configured) {
6290 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
6291 e = JIM_ERR;
6292 }
6293 } else {
6294 if (!target->tap_configured) {
6295 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
6296 e = JIM_ERR;
6297 }
6298 }
6299 /* tap must be set after target was configured */
6300 if (!target->tap)
6301 e = JIM_ERR;
6302 }
6303
6304 if (e != JIM_OK) {
6305 rtos_destroy(target);
6306 free(target->gdb_port_override);
6307 free(target->trace_info);
6308 free(target->type);
6309 free(target);
6310 return e;
6311 }
6312
6313 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
6314 /* default endian to little if not specified */
6315 target->endianness = TARGET_LITTLE_ENDIAN;
6316 }
6317
6318 cp = Jim_GetString(new_cmd, NULL);
6319 target->cmd_name = strdup(cp);
6320 if (!target->cmd_name) {
6321 LOG_ERROR("Out of memory");
6322 rtos_destroy(target);
6323 free(target->gdb_port_override);
6324 free(target->trace_info);
6325 free(target->type);
6326 free(target);
6327 return JIM_ERR;
6328 }
6329
6330 if (target->type->target_create) {
6331 e = (*(target->type->target_create))(target, goi->interp);
6332 if (e != ERROR_OK) {
6333 LOG_DEBUG("target_create failed");
6334 free(target->cmd_name);
6335 rtos_destroy(target);
6336 free(target->gdb_port_override);
6337 free(target->trace_info);
6338 free(target->type);
6339 free(target);
6340 return JIM_ERR;
6341 }
6342 }
6343
6344 /* create the target specific commands */
6345 if (target->type->commands) {
6346 e = register_commands(cmd_ctx, NULL, target->type->commands);
6347 if (e != ERROR_OK)
6348 LOG_ERROR("unable to register '%s' commands", cp);
6349 }
6350
6351 /* now - create the new target name command */
6352 const struct command_registration target_subcommands[] = {
6353 {
6354 .chain = target_instance_command_handlers,
6355 },
6356 {
6357 .chain = target->type->commands,
6358 },
6359 COMMAND_REGISTRATION_DONE
6360 };
6361 const struct command_registration target_commands[] = {
6362 {
6363 .name = cp,
6364 .mode = COMMAND_ANY,
6365 .help = "target command group",
6366 .usage = "",
6367 .chain = target_subcommands,
6368 },
6369 COMMAND_REGISTRATION_DONE
6370 };
6371 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
6372 if (e != ERROR_OK) {
6373 if (target->type->deinit_target)
6374 target->type->deinit_target(target);
6375 free(target->cmd_name);
6376 rtos_destroy(target);
6377 free(target->gdb_port_override);
6378 free(target->trace_info);
6379 free(target->type);
6380 free(target);
6381 return JIM_ERR;
6382 }
6383
6384 /* append to end of list */
6385 append_to_list_all_targets(target);
6386
6387 cmd_ctx->current_target = target;
6388 return JIM_OK;
6389 }
6390
6391 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6392 {
6393 if (argc != 1) {
6394 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6395 return JIM_ERR;
6396 }
6397 struct command_context *cmd_ctx = current_command_context(interp);
6398 assert(cmd_ctx);
6399
6400 struct target *target = get_current_target_or_null(cmd_ctx);
6401 if (target)
6402 Jim_SetResultString(interp, target_name(target), -1);
6403 return JIM_OK;
6404 }
6405
6406 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6407 {
6408 if (argc != 1) {
6409 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6410 return JIM_ERR;
6411 }
6412 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6413 for (unsigned x = 0; target_types[x]; x++) {
6414 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6415 Jim_NewStringObj(interp, target_types[x]->name, -1));
6416 }
6417 return JIM_OK;
6418 }
6419
6420 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6421 {
6422 if (argc != 1) {
6423 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6424 return JIM_ERR;
6425 }
6426 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6427 struct target *target = all_targets;
6428 while (target) {
6429 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6430 Jim_NewStringObj(interp, target_name(target), -1));
6431 target = target->next;
6432 }
6433 return JIM_OK;
6434 }
6435
6436 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6437 {
6438 int i;
6439 const char *targetname;
6440 int retval, len;
6441 static int smp_group = 1;
6442 struct target *target = NULL;
6443 struct target_list *head, *new;
6444
6445 retval = 0;
6446 LOG_DEBUG("%d", argc);
6447 /* argv[1] = target to associate in smp
6448 * argv[2] = target to associate in smp
6449 * argv[3] ...
6450 */
6451
6452 struct list_head *lh = malloc(sizeof(*lh));
6453 if (!lh) {
6454 LOG_ERROR("Out of memory");
6455 return JIM_ERR;
6456 }
6457 INIT_LIST_HEAD(lh);
6458
6459 for (i = 1; i < argc; i++) {
6460
6461 targetname = Jim_GetString(argv[i], &len);
6462 target = get_target(targetname);
6463 LOG_DEBUG("%s ", targetname);
6464 if (target) {
6465 new = malloc(sizeof(struct target_list));
6466 new->target = target;
6467 list_add_tail(&new->lh, lh);
6468 }
6469 }
6470 /* now parse the list of cpu and put the target in smp mode*/
6471 foreach_smp_target(head, lh) {
6472 target = head->target;
6473 target->smp = smp_group;
6474 target->smp_targets = lh;
6475 }
6476 smp_group++;
6477
6478 if (target && target->rtos)
6479 retval = rtos_smp_init(target);
6480
6481 return retval;
6482 }
6483
6484
6485 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6486 {
6487 struct jim_getopt_info goi;
6488 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6489 if (goi.argc < 3) {
6490 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6491 "<name> <target_type> [<target_options> ...]");
6492 return JIM_ERR;
6493 }
6494 return target_create(&goi);
6495 }
6496
6497 static const struct command_registration target_subcommand_handlers[] = {
6498 {
6499 .name = "init",
6500 .mode = COMMAND_CONFIG,
6501 .handler = handle_target_init_command,
6502 .help = "initialize targets",
6503 .usage = "",
6504 },
6505 {
6506 .name = "create",
6507 .mode = COMMAND_CONFIG,
6508 .jim_handler = jim_target_create,
6509 .usage = "name type '-chain-position' name [options ...]",
6510 .help = "Creates and selects a new target",
6511 },
6512 {
6513 .name = "current",
6514 .mode = COMMAND_ANY,
6515 .jim_handler = jim_target_current,
6516 .help = "Returns the currently selected target",
6517 },
6518 {
6519 .name = "types",
6520 .mode = COMMAND_ANY,
6521 .jim_handler = jim_target_types,
6522 .help = "Returns the available target types as "
6523 "a list of strings",
6524 },
6525 {
6526 .name = "names",
6527 .mode = COMMAND_ANY,
6528 .jim_handler = jim_target_names,
6529 .help = "Returns the names of all targets as a list of strings",
6530 },
6531 {
6532 .name = "smp",
6533 .mode = COMMAND_ANY,
6534 .jim_handler = jim_target_smp,
6535 .usage = "targetname1 targetname2 ...",
6536 .help = "gather several target in a smp list"
6537 },
6538
6539 COMMAND_REGISTRATION_DONE
6540 };
6541
6542 struct fast_load {
6543 target_addr_t address;
6544 uint8_t *data;
6545 int length;
6546
6547 };
6548
6549 static int fastload_num;
6550 static struct fast_load *fastload;
6551
6552 static void free_fastload(void)
6553 {
6554 if (fastload) {
6555 for (int i = 0; i < fastload_num; i++)
6556 free(fastload[i].data);
6557 free(fastload);
6558 fastload = NULL;
6559 }
6560 }
6561
6562 COMMAND_HANDLER(handle_fast_load_image_command)
6563 {
6564 uint8_t *buffer;
6565 size_t buf_cnt;
6566 uint32_t image_size;
6567 target_addr_t min_address = 0;
6568 target_addr_t max_address = -1;
6569
6570 struct image image;
6571
6572 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6573 &image, &min_address, &max_address);
6574 if (retval != ERROR_OK)
6575 return retval;
6576
6577 struct duration bench;
6578 duration_start(&bench);
6579
6580 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6581 if (retval != ERROR_OK)
6582 return retval;
6583
6584 image_size = 0x0;
6585 retval = ERROR_OK;
6586 fastload_num = image.num_sections;
6587 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6588 if (!fastload) {
6589 command_print(CMD, "out of memory");
6590 image_close(&image);
6591 return ERROR_FAIL;
6592 }
6593 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6594 for (unsigned int i = 0; i < image.num_sections; i++) {
6595 buffer = malloc(image.sections[i].size);
6596 if (!buffer) {
6597 command_print(CMD, "error allocating buffer for section (%d bytes)",
6598 (int)(image.sections[i].size));
6599 retval = ERROR_FAIL;
6600 break;
6601 }
6602
6603 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6604 if (retval != ERROR_OK) {
6605 free(buffer);
6606 break;
6607 }
6608
6609 uint32_t offset = 0;
6610 uint32_t length = buf_cnt;
6611
6612 /* DANGER!!! beware of unsigned comparison here!!! */
6613
6614 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6615 (image.sections[i].base_address < max_address)) {
6616 if (image.sections[i].base_address < min_address) {
6617 /* clip addresses below */
6618 offset += min_address-image.sections[i].base_address;
6619 length -= offset;
6620 }
6621
6622 if (image.sections[i].base_address + buf_cnt > max_address)
6623 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6624
6625 fastload[i].address = image.sections[i].base_address + offset;
6626 fastload[i].data = malloc(length);
6627 if (!fastload[i].data) {
6628 free(buffer);
6629 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6630 length);
6631 retval = ERROR_FAIL;
6632 break;
6633 }
6634 memcpy(fastload[i].data, buffer + offset, length);
6635 fastload[i].length = length;
6636
6637 image_size += length;
6638 command_print(CMD, "%u bytes written at address 0x%8.8x",
6639 (unsigned int)length,
6640 ((unsigned int)(image.sections[i].base_address + offset)));
6641 }
6642
6643 free(buffer);
6644 }
6645
6646 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6647 command_print(CMD, "Loaded %" PRIu32 " bytes "
6648 "in %fs (%0.3f KiB/s)", image_size,
6649 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6650
6651 command_print(CMD,
6652 "WARNING: image has not been loaded to target!"
6653 "You can issue a 'fast_load' to finish loading.");
6654 }
6655
6656 image_close(&image);
6657
6658 if (retval != ERROR_OK)
6659 free_fastload();
6660
6661 return retval;
6662 }
6663
6664 COMMAND_HANDLER(handle_fast_load_command)
6665 {
6666 if (CMD_ARGC > 0)
6667 return ERROR_COMMAND_SYNTAX_ERROR;
6668 if (!fastload) {
6669 LOG_ERROR("No image in memory");
6670 return ERROR_FAIL;
6671 }
6672 int i;
6673 int64_t ms = timeval_ms();
6674 int size = 0;
6675 int retval = ERROR_OK;
6676 for (i = 0; i < fastload_num; i++) {
6677 struct target *target = get_current_target(CMD_CTX);
6678 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6679 (unsigned int)(fastload[i].address),
6680 (unsigned int)(fastload[i].length));
6681 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6682 if (retval != ERROR_OK)
6683 break;
6684 size += fastload[i].length;
6685 }
6686 if (retval == ERROR_OK) {
6687 int64_t after = timeval_ms();
6688 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6689 }
6690 return retval;
6691 }
6692
6693 static const struct command_registration target_command_handlers[] = {
6694 {
6695 .name = "targets",
6696 .handler = handle_targets_command,
6697 .mode = COMMAND_ANY,
6698 .help = "change current default target (one parameter) "
6699 "or prints table of all targets (no parameters)",
6700 .usage = "[target]",
6701 },
6702 {
6703 .name = "target",
6704 .mode = COMMAND_CONFIG,
6705 .help = "configure target",
6706 .chain = target_subcommand_handlers,
6707 .usage = "",
6708 },
6709 COMMAND_REGISTRATION_DONE
6710 };
6711
6712 int target_register_commands(struct command_context *cmd_ctx)
6713 {
6714 return register_commands(cmd_ctx, NULL, target_command_handlers);
6715 }
6716
6717 static bool target_reset_nag = true;
6718
6719 bool get_target_reset_nag(void)
6720 {
6721 return target_reset_nag;
6722 }
6723
6724 COMMAND_HANDLER(handle_target_reset_nag)
6725 {
6726 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6727 &target_reset_nag, "Nag after each reset about options to improve "
6728 "performance");
6729 }
6730
6731 COMMAND_HANDLER(handle_ps_command)
6732 {
6733 struct target *target = get_current_target(CMD_CTX);
6734 char *display;
6735 if (target->state != TARGET_HALTED) {
6736 LOG_INFO("target not halted !!");
6737 return ERROR_OK;
6738 }
6739
6740 if ((target->rtos) && (target->rtos->type)
6741 && (target->rtos->type->ps_command)) {
6742 display = target->rtos->type->ps_command(target);
6743 command_print(CMD, "%s", display);
6744 free(display);
6745 return ERROR_OK;
6746 } else {
6747 LOG_INFO("failed");
6748 return ERROR_TARGET_FAILURE;
6749 }
6750 }
6751
6752 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6753 {
6754 if (text)
6755 command_print_sameline(cmd, "%s", text);
6756 for (int i = 0; i < size; i++)
6757 command_print_sameline(cmd, " %02x", buf[i]);
6758 command_print(cmd, " ");
6759 }
6760
6761 COMMAND_HANDLER(handle_test_mem_access_command)
6762 {
6763 struct target *target = get_current_target(CMD_CTX);
6764 uint32_t test_size;
6765 int retval = ERROR_OK;
6766
6767 if (target->state != TARGET_HALTED) {
6768 LOG_INFO("target not halted !!");
6769 return ERROR_FAIL;
6770 }
6771
6772 if (CMD_ARGC != 1)
6773 return ERROR_COMMAND_SYNTAX_ERROR;
6774
6775 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6776
6777 /* Test reads */
6778 size_t num_bytes = test_size + 4;
6779
6780 struct working_area *wa = NULL;
6781 retval = target_alloc_working_area(target, num_bytes, &wa);
6782 if (retval != ERROR_OK) {
6783 LOG_ERROR("Not enough working area");
6784 return ERROR_FAIL;
6785 }
6786
6787 uint8_t *test_pattern = malloc(num_bytes);
6788
6789 for (size_t i = 0; i < num_bytes; i++)
6790 test_pattern[i] = rand();
6791
6792 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6793 if (retval != ERROR_OK) {
6794 LOG_ERROR("Test pattern write failed");
6795 goto out;
6796 }
6797
6798 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6799 for (int size = 1; size <= 4; size *= 2) {
6800 for (int offset = 0; offset < 4; offset++) {
6801 uint32_t count = test_size / size;
6802 size_t host_bufsiz = (count + 2) * size + host_offset;
6803 uint8_t *read_ref = malloc(host_bufsiz);
6804 uint8_t *read_buf = malloc(host_bufsiz);
6805
6806 for (size_t i = 0; i < host_bufsiz; i++) {
6807 read_ref[i] = rand();
6808 read_buf[i] = read_ref[i];
6809 }
6810 command_print_sameline(CMD,
6811 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6812 size, offset, host_offset ? "un" : "");
6813
6814 struct duration bench;
6815 duration_start(&bench);
6816
6817 retval = target_read_memory(target, wa->address + offset, size, count,
6818 read_buf + size + host_offset);
6819
6820 duration_measure(&bench);
6821
6822 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6823 command_print(CMD, "Unsupported alignment");
6824 goto next;
6825 } else if (retval != ERROR_OK) {
6826 command_print(CMD, "Memory read failed");
6827 goto next;
6828 }
6829
6830 /* replay on host */
6831 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6832
6833 /* check result */
6834 int result = memcmp(read_ref, read_buf, host_bufsiz);
6835 if (result == 0) {
6836 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6837 duration_elapsed(&bench),
6838 duration_kbps(&bench, count * size));
6839 } else {
6840 command_print(CMD, "Compare failed");
6841 binprint(CMD, "ref:", read_ref, host_bufsiz);
6842 binprint(CMD, "buf:", read_buf, host_bufsiz);
6843 }
6844 next:
6845 free(read_ref);
6846 free(read_buf);
6847 }
6848 }
6849 }
6850
6851 out:
6852 free(test_pattern);
6853
6854 target_free_working_area(target, wa);
6855
6856 /* Test writes */
6857 num_bytes = test_size + 4 + 4 + 4;
6858
6859 retval = target_alloc_working_area(target, num_bytes, &wa);
6860 if (retval != ERROR_OK) {
6861 LOG_ERROR("Not enough working area");
6862 return ERROR_FAIL;
6863 }
6864
6865 test_pattern = malloc(num_bytes);
6866
6867 for (size_t i = 0; i < num_bytes; i++)
6868 test_pattern[i] = rand();
6869
6870 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6871 for (int size = 1; size <= 4; size *= 2) {
6872 for (int offset = 0; offset < 4; offset++) {
6873 uint32_t count = test_size / size;
6874 size_t host_bufsiz = count * size + host_offset;
6875 uint8_t *read_ref = malloc(num_bytes);
6876 uint8_t *read_buf = malloc(num_bytes);
6877 uint8_t *write_buf = malloc(host_bufsiz);
6878
6879 for (size_t i = 0; i < host_bufsiz; i++)
6880 write_buf[i] = rand();
6881 command_print_sameline(CMD,
6882 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6883 size, offset, host_offset ? "un" : "");
6884
6885 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6886 if (retval != ERROR_OK) {
6887 command_print(CMD, "Test pattern write failed");
6888 goto nextw;
6889 }
6890
6891 /* replay on host */
6892 memcpy(read_ref, test_pattern, num_bytes);
6893 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6894
6895 struct duration bench;
6896 duration_start(&bench);
6897
6898 retval = target_write_memory(target, wa->address + size + offset, size, count,
6899 write_buf + host_offset);
6900
6901 duration_measure(&bench);
6902
6903 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6904 command_print(CMD, "Unsupported alignment");
6905 goto nextw;
6906 } else if (retval != ERROR_OK) {
6907 command_print(CMD, "Memory write failed");
6908 goto nextw;
6909 }
6910
6911 /* read back */
6912 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6913 if (retval != ERROR_OK) {
6914 command_print(CMD, "Test pattern write failed");
6915 goto nextw;
6916 }
6917
6918 /* check result */
6919 int result = memcmp(read_ref, read_buf, num_bytes);
6920 if (result == 0) {
6921 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6922 duration_elapsed(&bench),
6923 duration_kbps(&bench, count * size));
6924 } else {
6925 command_print(CMD, "Compare failed");
6926 binprint(CMD, "ref:", read_ref, num_bytes);
6927 binprint(CMD, "buf:", read_buf, num_bytes);
6928 }
6929 nextw:
6930 free(read_ref);
6931 free(read_buf);
6932 }
6933 }
6934 }
6935
6936 free(test_pattern);
6937
6938 target_free_working_area(target, wa);
6939 return retval;
6940 }
6941
6942 static const struct command_registration target_exec_command_handlers[] = {
6943 {
6944 .name = "fast_load_image",
6945 .handler = handle_fast_load_image_command,
6946 .mode = COMMAND_ANY,
6947 .help = "Load image into server memory for later use by "
6948 "fast_load; primarily for profiling",
6949 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6950 "[min_address [max_length]]",
6951 },
6952 {
6953 .name = "fast_load",
6954 .handler = handle_fast_load_command,
6955 .mode = COMMAND_EXEC,
6956 .help = "loads active fast load image to current target "
6957 "- mainly for profiling purposes",
6958 .usage = "",
6959 },
6960 {
6961 .name = "profile",
6962 .handler = handle_profile_command,
6963 .mode = COMMAND_EXEC,
6964 .usage = "seconds filename [start end]",
6965 .help = "profiling samples the CPU PC",
6966 },
6967 /** @todo don't register virt2phys() unless target supports it */
6968 {
6969 .name = "virt2phys",
6970 .handler = handle_virt2phys_command,
6971 .mode = COMMAND_ANY,
6972 .help = "translate a virtual address into a physical address",
6973 .usage = "virtual_address",
6974 },
6975 {
6976 .name = "reg",
6977 .handler = handle_reg_command,
6978 .mode = COMMAND_EXEC,
6979 .help = "display (reread from target with \"force\") or set a register; "
6980 "with no arguments, displays all registers and their values",
6981 .usage = "[(register_number|register_name) [(value|'force')]]",
6982 },
6983 {
6984 .name = "poll",
6985 .handler = handle_poll_command,
6986 .mode = COMMAND_EXEC,
6987 .help = "poll target state; or reconfigure background polling",
6988 .usage = "['on'|'off']",
6989 },
6990 {
6991 .name = "wait_halt",
6992 .handler = handle_wait_halt_command,
6993 .mode = COMMAND_EXEC,
6994 .help = "wait up to the specified number of milliseconds "
6995 "(default 5000) for a previously requested halt",
6996 .usage = "[milliseconds]",
6997 },
6998 {
6999 .name = "halt",
7000 .handler = handle_halt_command,
7001 .mode = COMMAND_EXEC,
7002 .help = "request target to halt, then wait up to the specified "
7003 "number of milliseconds (default 5000) for it to complete",
7004 .usage = "[milliseconds]",
7005 },
7006 {
7007 .name = "resume",
7008 .handler = handle_resume_command,
7009 .mode = COMMAND_EXEC,
7010 .help = "resume target execution from current PC or address",
7011 .usage = "[address]",
7012 },
7013 {
7014 .name = "reset",
7015 .handler = handle_reset_command,
7016 .mode = COMMAND_EXEC,
7017 .usage = "[run|halt|init]",
7018 .help = "Reset all targets into the specified mode. "
7019 "Default reset mode is run, if not given.",
7020 },
7021 {
7022 .name = "soft_reset_halt",
7023 .handler = handle_soft_reset_halt_command,
7024 .mode = COMMAND_EXEC,
7025 .usage = "",
7026 .help = "halt the target and do a soft reset",
7027 },
7028 {
7029 .name = "step",
7030 .handler = handle_step_command,
7031 .mode = COMMAND_EXEC,
7032 .help = "step one instruction from current PC or address",
7033 .usage = "[address]",
7034 },
7035 {
7036 .name = "mdd",
7037 .handler = handle_md_command,
7038 .mode = COMMAND_EXEC,
7039 .help = "display memory double-words",
7040 .usage = "['phys'] address [count]",
7041 },
7042 {
7043 .name = "mdw",
7044 .handler = handle_md_command,
7045 .mode = COMMAND_EXEC,
7046 .help = "display memory words",
7047 .usage = "['phys'] address [count]",
7048 },
7049 {
7050 .name = "mdh",
7051 .handler = handle_md_command,
7052 .mode = COMMAND_EXEC,
7053 .help = "display memory half-words",
7054 .usage = "['phys'] address [count]",
7055 },
7056 {
7057 .name = "mdb",
7058 .handler = handle_md_command,
7059 .mode = COMMAND_EXEC,
7060 .help = "display memory bytes",
7061 .usage = "['phys'] address [count]",
7062 },
7063 {
7064 .name = "mwd",
7065 .handler = handle_mw_command,
7066 .mode = COMMAND_EXEC,
7067 .help = "write memory double-word",
7068 .usage = "['phys'] address value [count]",
7069 },
7070 {
7071 .name = "mww",
7072 .handler = handle_mw_command,
7073 .mode = COMMAND_EXEC,
7074 .help = "write memory word",
7075 .usage = "['phys'] address value [count]",
7076 },
7077 {
7078 .name = "mwh",
7079 .handler = handle_mw_command,
7080 .mode = COMMAND_EXEC,
7081 .help = "write memory half-word",
7082 .usage = "['phys'] address value [count]",
7083 },
7084 {
7085 .name = "mwb",
7086 .handler = handle_mw_command,
7087 .mode = COMMAND_EXEC,
7088 .help = "write memory byte",
7089 .usage = "['phys'] address value [count]",
7090 },
7091 {
7092 .name = "bp",
7093 .handler = handle_bp_command,
7094 .mode = COMMAND_EXEC,
7095 .help = "list or set hardware or software breakpoint",
7096 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
7097 },
7098 {
7099 .name = "rbp",
7100 .handler = handle_rbp_command,
7101 .mode = COMMAND_EXEC,
7102 .help = "remove breakpoint",
7103 .usage = "'all' | address",
7104 },
7105 {
7106 .name = "wp",
7107 .handler = handle_wp_command,
7108 .mode = COMMAND_EXEC,
7109 .help = "list (no params) or create watchpoints",
7110 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
7111 },
7112 {
7113 .name = "rwp",
7114 .handler = handle_rwp_command,
7115 .mode = COMMAND_EXEC,
7116 .help = "remove watchpoint",
7117 .usage = "address",
7118 },
7119 {
7120 .name = "load_image",
7121 .handler = handle_load_image_command,
7122 .mode = COMMAND_EXEC,
7123 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
7124 "[min_address] [max_length]",
7125 },
7126 {
7127 .name = "dump_image",
7128 .handler = handle_dump_image_command,
7129 .mode = COMMAND_EXEC,
7130 .usage = "filename address size",
7131 },
7132 {
7133 .name = "verify_image_checksum",
7134 .handler = handle_verify_image_checksum_command,
7135 .mode = COMMAND_EXEC,
7136 .usage = "filename [offset [type]]",
7137 },
7138 {
7139 .name = "verify_image",
7140 .handler = handle_verify_image_command,
7141 .mode = COMMAND_EXEC,
7142 .usage = "filename [offset [type]]",
7143 },
7144 {
7145 .name = "test_image",
7146 .handler = handle_test_image_command,
7147 .mode = COMMAND_EXEC,
7148 .usage = "filename [offset [type]]",
7149 },
7150 {
7151 .name = "get_reg",
7152 .mode = COMMAND_EXEC,
7153 .jim_handler = target_jim_get_reg,
7154 .help = "Get register values from the target",
7155 .usage = "list",
7156 },
7157 {
7158 .name = "set_reg",
7159 .mode = COMMAND_EXEC,
7160 .jim_handler = target_jim_set_reg,
7161 .help = "Set target register values",
7162 .usage = "dict",
7163 },
7164 {
7165 .name = "read_memory",
7166 .mode = COMMAND_EXEC,
7167 .jim_handler = target_jim_read_memory,
7168 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
7169 .usage = "address width count ['phys']",
7170 },
7171 {
7172 .name = "write_memory",
7173 .mode = COMMAND_EXEC,
7174 .jim_handler = target_jim_write_memory,
7175 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
7176 .usage = "address width data ['phys']",
7177 },
7178 {
7179 .name = "reset_nag",
7180 .handler = handle_target_reset_nag,
7181 .mode = COMMAND_ANY,
7182 .help = "Nag after each reset about options that could have been "
7183 "enabled to improve performance.",
7184 .usage = "['enable'|'disable']",
7185 },
7186 {
7187 .name = "ps",
7188 .handler = handle_ps_command,
7189 .mode = COMMAND_EXEC,
7190 .help = "list all tasks",
7191 .usage = "",
7192 },
7193 {
7194 .name = "test_mem_access",
7195 .handler = handle_test_mem_access_command,
7196 .mode = COMMAND_EXEC,
7197 .help = "Test the target's memory access functions",
7198 .usage = "size",
7199 },
7200
7201 COMMAND_REGISTRATION_DONE
7202 };
7203 static int target_register_user_commands(struct command_context *cmd_ctx)
7204 {
7205 int retval = ERROR_OK;
7206 retval = target_request_register_commands(cmd_ctx);
7207 if (retval != ERROR_OK)
7208 return retval;
7209
7210 retval = trace_register_commands(cmd_ctx);
7211 if (retval != ERROR_OK)
7212 return retval;
7213
7214
7215 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
7216 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)