semihosting: User defined operation, Tcl command exec on host
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/align.h>
45 #include <helper/time_support.h>
46 #include <jtag/jtag.h>
47 #include <flash/nor/core.h>
48
49 #include "target.h"
50 #include "target_type.h"
51 #include "target_request.h"
52 #include "breakpoints.h"
53 #include "register.h"
54 #include "trace.h"
55 #include "image.h"
56 #include "rtos/rtos.h"
57 #include "transport/transport.h"
58 #include "arm_cti.h"
59
60 /* default halt wait timeout (ms) */
61 #define DEFAULT_HALT_TIMEOUT 5000
62
63 static int target_read_buffer_default(struct target *target, target_addr_t address,
64 uint32_t count, uint8_t *buffer);
65 static int target_write_buffer_default(struct target *target, target_addr_t address,
66 uint32_t count, const uint8_t *buffer);
67 static int target_array2mem(Jim_Interp *interp, struct target *target,
68 int argc, Jim_Obj * const *argv);
69 static int target_mem2array(Jim_Interp *interp, struct target *target,
70 int argc, Jim_Obj * const *argv);
71 static int target_register_user_commands(struct command_context *cmd_ctx);
72 static int target_get_gdb_fileio_info_default(struct target *target,
73 struct gdb_fileio_info *fileio_info);
74 static int target_gdb_fileio_end_default(struct target *target, int retcode,
75 int fileio_errno, bool ctrl_c);
76
77 /* targets */
78 extern struct target_type arm7tdmi_target;
79 extern struct target_type arm720t_target;
80 extern struct target_type arm9tdmi_target;
81 extern struct target_type arm920t_target;
82 extern struct target_type arm966e_target;
83 extern struct target_type arm946e_target;
84 extern struct target_type arm926ejs_target;
85 extern struct target_type fa526_target;
86 extern struct target_type feroceon_target;
87 extern struct target_type dragonite_target;
88 extern struct target_type xscale_target;
89 extern struct target_type cortexm_target;
90 extern struct target_type cortexa_target;
91 extern struct target_type aarch64_target;
92 extern struct target_type cortexr4_target;
93 extern struct target_type arm11_target;
94 extern struct target_type ls1_sap_target;
95 extern struct target_type mips_m4k_target;
96 extern struct target_type mips_mips64_target;
97 extern struct target_type avr_target;
98 extern struct target_type dsp563xx_target;
99 extern struct target_type dsp5680xx_target;
100 extern struct target_type testee_target;
101 extern struct target_type avr32_ap7k_target;
102 extern struct target_type hla_target;
103 extern struct target_type nds32_v2_target;
104 extern struct target_type nds32_v3_target;
105 extern struct target_type nds32_v3m_target;
106 extern struct target_type or1k_target;
107 extern struct target_type quark_x10xx_target;
108 extern struct target_type quark_d20xx_target;
109 extern struct target_type stm8_target;
110 extern struct target_type riscv_target;
111 extern struct target_type mem_ap_target;
112 extern struct target_type esirisc_target;
113 extern struct target_type arcv2_target;
114
115 static struct target_type *target_types[] = {
116 &arm7tdmi_target,
117 &arm9tdmi_target,
118 &arm920t_target,
119 &arm720t_target,
120 &arm966e_target,
121 &arm946e_target,
122 &arm926ejs_target,
123 &fa526_target,
124 &feroceon_target,
125 &dragonite_target,
126 &xscale_target,
127 &cortexm_target,
128 &cortexa_target,
129 &cortexr4_target,
130 &arm11_target,
131 &ls1_sap_target,
132 &mips_m4k_target,
133 &avr_target,
134 &dsp563xx_target,
135 &dsp5680xx_target,
136 &testee_target,
137 &avr32_ap7k_target,
138 &hla_target,
139 &nds32_v2_target,
140 &nds32_v3_target,
141 &nds32_v3m_target,
142 &or1k_target,
143 &quark_x10xx_target,
144 &quark_d20xx_target,
145 &stm8_target,
146 &riscv_target,
147 &mem_ap_target,
148 &esirisc_target,
149 &arcv2_target,
150 &aarch64_target,
151 &mips_mips64_target,
152 NULL,
153 };
154
155 struct target *all_targets;
156 static struct target_event_callback *target_event_callbacks;
157 static struct target_timer_callback *target_timer_callbacks;
158 static int64_t target_timer_next_event_value;
159 static LIST_HEAD(target_reset_callback_list);
160 static LIST_HEAD(target_trace_callback_list);
161 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
162
163 static const struct jim_nvp nvp_assert[] = {
164 { .name = "assert", NVP_ASSERT },
165 { .name = "deassert", NVP_DEASSERT },
166 { .name = "T", NVP_ASSERT },
167 { .name = "F", NVP_DEASSERT },
168 { .name = "t", NVP_ASSERT },
169 { .name = "f", NVP_DEASSERT },
170 { .name = NULL, .value = -1 }
171 };
172
173 static const struct jim_nvp nvp_error_target[] = {
174 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
175 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
176 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
177 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
178 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
179 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
180 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
181 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
182 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
183 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
184 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
185 { .value = -1, .name = NULL }
186 };
187
188 static const char *target_strerror_safe(int err)
189 {
190 const struct jim_nvp *n;
191
192 n = jim_nvp_value2name_simple(nvp_error_target, err);
193 if (!n->name)
194 return "unknown";
195 else
196 return n->name;
197 }
198
199 static const struct jim_nvp nvp_target_event[] = {
200
201 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
202 { .value = TARGET_EVENT_HALTED, .name = "halted" },
203 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
204 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
205 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
206 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
207 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
208
209 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
210 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
211
212 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
213 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
214 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
215 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
216 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
217 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
218 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
219 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
220
221 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
222 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
223 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
224
225 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
226 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
227
228 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
229 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
230
231 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
232 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
233
234 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
235 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
236
237 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
238
239 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x100, .name = "semihosting-user-cmd-0x100" },
240 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x101, .name = "semihosting-user-cmd-0x101" },
241 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x102, .name = "semihosting-user-cmd-0x102" },
242 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x103, .name = "semihosting-user-cmd-0x103" },
243 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x104, .name = "semihosting-user-cmd-0x104" },
244 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x105, .name = "semihosting-user-cmd-0x105" },
245 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x106, .name = "semihosting-user-cmd-0x106" },
246 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x107, .name = "semihosting-user-cmd-0x107" },
247
248 { .name = NULL, .value = -1 }
249 };
250
251 static const struct jim_nvp nvp_target_state[] = {
252 { .name = "unknown", .value = TARGET_UNKNOWN },
253 { .name = "running", .value = TARGET_RUNNING },
254 { .name = "halted", .value = TARGET_HALTED },
255 { .name = "reset", .value = TARGET_RESET },
256 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
257 { .name = NULL, .value = -1 },
258 };
259
260 static const struct jim_nvp nvp_target_debug_reason[] = {
261 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
262 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
263 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
264 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
265 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
266 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
267 { .name = "program-exit", .value = DBG_REASON_EXIT },
268 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
269 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
270 { .name = NULL, .value = -1 },
271 };
272
273 static const struct jim_nvp nvp_target_endian[] = {
274 { .name = "big", .value = TARGET_BIG_ENDIAN },
275 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
276 { .name = "be", .value = TARGET_BIG_ENDIAN },
277 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
278 { .name = NULL, .value = -1 },
279 };
280
281 static const struct jim_nvp nvp_reset_modes[] = {
282 { .name = "unknown", .value = RESET_UNKNOWN },
283 { .name = "run", .value = RESET_RUN },
284 { .name = "halt", .value = RESET_HALT },
285 { .name = "init", .value = RESET_INIT },
286 { .name = NULL, .value = -1 },
287 };
288
289 const char *debug_reason_name(struct target *t)
290 {
291 const char *cp;
292
293 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
294 t->debug_reason)->name;
295 if (!cp) {
296 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
297 cp = "(*BUG*unknown*BUG*)";
298 }
299 return cp;
300 }
301
302 const char *target_state_name(struct target *t)
303 {
304 const char *cp;
305 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
306 if (!cp) {
307 LOG_ERROR("Invalid target state: %d", (int)(t->state));
308 cp = "(*BUG*unknown*BUG*)";
309 }
310
311 if (!target_was_examined(t) && t->defer_examine)
312 cp = "examine deferred";
313
314 return cp;
315 }
316
317 const char *target_event_name(enum target_event event)
318 {
319 const char *cp;
320 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
321 if (!cp) {
322 LOG_ERROR("Invalid target event: %d", (int)(event));
323 cp = "(*BUG*unknown*BUG*)";
324 }
325 return cp;
326 }
327
328 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
329 {
330 const char *cp;
331 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
332 if (!cp) {
333 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
334 cp = "(*BUG*unknown*BUG*)";
335 }
336 return cp;
337 }
338
339 /* determine the number of the new target */
340 static int new_target_number(void)
341 {
342 struct target *t;
343 int x;
344
345 /* number is 0 based */
346 x = -1;
347 t = all_targets;
348 while (t) {
349 if (x < t->target_number)
350 x = t->target_number;
351 t = t->next;
352 }
353 return x + 1;
354 }
355
356 static void append_to_list_all_targets(struct target *target)
357 {
358 struct target **t = &all_targets;
359
360 while (*t)
361 t = &((*t)->next);
362 *t = target;
363 }
364
365 /* read a uint64_t from a buffer in target memory endianness */
366 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
367 {
368 if (target->endianness == TARGET_LITTLE_ENDIAN)
369 return le_to_h_u64(buffer);
370 else
371 return be_to_h_u64(buffer);
372 }
373
374 /* read a uint32_t from a buffer in target memory endianness */
375 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
376 {
377 if (target->endianness == TARGET_LITTLE_ENDIAN)
378 return le_to_h_u32(buffer);
379 else
380 return be_to_h_u32(buffer);
381 }
382
383 /* read a uint24_t from a buffer in target memory endianness */
384 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
385 {
386 if (target->endianness == TARGET_LITTLE_ENDIAN)
387 return le_to_h_u24(buffer);
388 else
389 return be_to_h_u24(buffer);
390 }
391
392 /* read a uint16_t from a buffer in target memory endianness */
393 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
394 {
395 if (target->endianness == TARGET_LITTLE_ENDIAN)
396 return le_to_h_u16(buffer);
397 else
398 return be_to_h_u16(buffer);
399 }
400
401 /* write a uint64_t to a buffer in target memory endianness */
402 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
403 {
404 if (target->endianness == TARGET_LITTLE_ENDIAN)
405 h_u64_to_le(buffer, value);
406 else
407 h_u64_to_be(buffer, value);
408 }
409
410 /* write a uint32_t to a buffer in target memory endianness */
411 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
412 {
413 if (target->endianness == TARGET_LITTLE_ENDIAN)
414 h_u32_to_le(buffer, value);
415 else
416 h_u32_to_be(buffer, value);
417 }
418
419 /* write a uint24_t to a buffer in target memory endianness */
420 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
421 {
422 if (target->endianness == TARGET_LITTLE_ENDIAN)
423 h_u24_to_le(buffer, value);
424 else
425 h_u24_to_be(buffer, value);
426 }
427
428 /* write a uint16_t to a buffer in target memory endianness */
429 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
430 {
431 if (target->endianness == TARGET_LITTLE_ENDIAN)
432 h_u16_to_le(buffer, value);
433 else
434 h_u16_to_be(buffer, value);
435 }
436
437 /* write a uint8_t to a buffer in target memory endianness */
438 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
439 {
440 *buffer = value;
441 }
442
443 /* write a uint64_t array to a buffer in target memory endianness */
444 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
445 {
446 uint32_t i;
447 for (i = 0; i < count; i++)
448 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
449 }
450
451 /* write a uint32_t array to a buffer in target memory endianness */
452 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
453 {
454 uint32_t i;
455 for (i = 0; i < count; i++)
456 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
457 }
458
459 /* write a uint16_t array to a buffer in target memory endianness */
460 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
461 {
462 uint32_t i;
463 for (i = 0; i < count; i++)
464 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
465 }
466
467 /* write a uint64_t array to a buffer in target memory endianness */
468 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
469 {
470 uint32_t i;
471 for (i = 0; i < count; i++)
472 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
473 }
474
475 /* write a uint32_t array to a buffer in target memory endianness */
476 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
477 {
478 uint32_t i;
479 for (i = 0; i < count; i++)
480 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
481 }
482
483 /* write a uint16_t array to a buffer in target memory endianness */
484 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
485 {
486 uint32_t i;
487 for (i = 0; i < count; i++)
488 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
489 }
490
491 /* return a pointer to a configured target; id is name or number */
492 struct target *get_target(const char *id)
493 {
494 struct target *target;
495
496 /* try as tcltarget name */
497 for (target = all_targets; target; target = target->next) {
498 if (!target_name(target))
499 continue;
500 if (strcmp(id, target_name(target)) == 0)
501 return target;
502 }
503
504 /* It's OK to remove this fallback sometime after August 2010 or so */
505
506 /* no match, try as number */
507 unsigned num;
508 if (parse_uint(id, &num) != ERROR_OK)
509 return NULL;
510
511 for (target = all_targets; target; target = target->next) {
512 if (target->target_number == (int)num) {
513 LOG_WARNING("use '%s' as target identifier, not '%u'",
514 target_name(target), num);
515 return target;
516 }
517 }
518
519 return NULL;
520 }
521
522 /* returns a pointer to the n-th configured target */
523 struct target *get_target_by_num(int num)
524 {
525 struct target *target = all_targets;
526
527 while (target) {
528 if (target->target_number == num)
529 return target;
530 target = target->next;
531 }
532
533 return NULL;
534 }
535
536 struct target *get_current_target(struct command_context *cmd_ctx)
537 {
538 struct target *target = get_current_target_or_null(cmd_ctx);
539
540 if (!target) {
541 LOG_ERROR("BUG: current_target out of bounds");
542 exit(-1);
543 }
544
545 return target;
546 }
547
548 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
549 {
550 return cmd_ctx->current_target_override
551 ? cmd_ctx->current_target_override
552 : cmd_ctx->current_target;
553 }
554
555 int target_poll(struct target *target)
556 {
557 int retval;
558
559 /* We can't poll until after examine */
560 if (!target_was_examined(target)) {
561 /* Fail silently lest we pollute the log */
562 return ERROR_FAIL;
563 }
564
565 retval = target->type->poll(target);
566 if (retval != ERROR_OK)
567 return retval;
568
569 if (target->halt_issued) {
570 if (target->state == TARGET_HALTED)
571 target->halt_issued = false;
572 else {
573 int64_t t = timeval_ms() - target->halt_issued_time;
574 if (t > DEFAULT_HALT_TIMEOUT) {
575 target->halt_issued = false;
576 LOG_INFO("Halt timed out, wake up GDB.");
577 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
578 }
579 }
580 }
581
582 return ERROR_OK;
583 }
584
585 int target_halt(struct target *target)
586 {
587 int retval;
588 /* We can't poll until after examine */
589 if (!target_was_examined(target)) {
590 LOG_ERROR("Target not examined yet");
591 return ERROR_FAIL;
592 }
593
594 retval = target->type->halt(target);
595 if (retval != ERROR_OK)
596 return retval;
597
598 target->halt_issued = true;
599 target->halt_issued_time = timeval_ms();
600
601 return ERROR_OK;
602 }
603
604 /**
605 * Make the target (re)start executing using its saved execution
606 * context (possibly with some modifications).
607 *
608 * @param target Which target should start executing.
609 * @param current True to use the target's saved program counter instead
610 * of the address parameter
611 * @param address Optionally used as the program counter.
612 * @param handle_breakpoints True iff breakpoints at the resumption PC
613 * should be skipped. (For example, maybe execution was stopped by
614 * such a breakpoint, in which case it would be counterproductive to
615 * let it re-trigger.
616 * @param debug_execution False if all working areas allocated by OpenOCD
617 * should be released and/or restored to their original contents.
618 * (This would for example be true to run some downloaded "helper"
619 * algorithm code, which resides in one such working buffer and uses
620 * another for data storage.)
621 *
622 * @todo Resolve the ambiguity about what the "debug_execution" flag
623 * signifies. For example, Target implementations don't agree on how
624 * it relates to invalidation of the register cache, or to whether
625 * breakpoints and watchpoints should be enabled. (It would seem wrong
626 * to enable breakpoints when running downloaded "helper" algorithms
627 * (debug_execution true), since the breakpoints would be set to match
628 * target firmware being debugged, not the helper algorithm.... and
629 * enabling them could cause such helpers to malfunction (for example,
630 * by overwriting data with a breakpoint instruction. On the other
631 * hand the infrastructure for running such helpers might use this
632 * procedure but rely on hardware breakpoint to detect termination.)
633 */
634 int target_resume(struct target *target, int current, target_addr_t address,
635 int handle_breakpoints, int debug_execution)
636 {
637 int retval;
638
639 /* We can't poll until after examine */
640 if (!target_was_examined(target)) {
641 LOG_ERROR("Target not examined yet");
642 return ERROR_FAIL;
643 }
644
645 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
646
647 /* note that resume *must* be asynchronous. The CPU can halt before
648 * we poll. The CPU can even halt at the current PC as a result of
649 * a software breakpoint being inserted by (a bug?) the application.
650 */
651 /*
652 * resume() triggers the event 'resumed'. The execution of TCL commands
653 * in the event handler causes the polling of targets. If the target has
654 * already halted for a breakpoint, polling will run the 'halted' event
655 * handler before the pending 'resumed' handler.
656 * Disable polling during resume() to guarantee the execution of handlers
657 * in the correct order.
658 */
659 bool save_poll = jtag_poll_get_enabled();
660 jtag_poll_set_enabled(false);
661 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
662 jtag_poll_set_enabled(save_poll);
663 if (retval != ERROR_OK)
664 return retval;
665
666 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
667
668 return retval;
669 }
670
671 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
672 {
673 char buf[100];
674 int retval;
675 struct jim_nvp *n;
676 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
677 if (!n->name) {
678 LOG_ERROR("invalid reset mode");
679 return ERROR_FAIL;
680 }
681
682 struct target *target;
683 for (target = all_targets; target; target = target->next)
684 target_call_reset_callbacks(target, reset_mode);
685
686 /* disable polling during reset to make reset event scripts
687 * more predictable, i.e. dr/irscan & pathmove in events will
688 * not have JTAG operations injected into the middle of a sequence.
689 */
690 bool save_poll = jtag_poll_get_enabled();
691
692 jtag_poll_set_enabled(false);
693
694 sprintf(buf, "ocd_process_reset %s", n->name);
695 retval = Jim_Eval(cmd->ctx->interp, buf);
696
697 jtag_poll_set_enabled(save_poll);
698
699 if (retval != JIM_OK) {
700 Jim_MakeErrorMessage(cmd->ctx->interp);
701 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
702 return ERROR_FAIL;
703 }
704
705 /* We want any events to be processed before the prompt */
706 retval = target_call_timer_callbacks_now();
707
708 for (target = all_targets; target; target = target->next) {
709 target->type->check_reset(target);
710 target->running_alg = false;
711 }
712
713 return retval;
714 }
715
716 static int identity_virt2phys(struct target *target,
717 target_addr_t virtual, target_addr_t *physical)
718 {
719 *physical = virtual;
720 return ERROR_OK;
721 }
722
723 static int no_mmu(struct target *target, int *enabled)
724 {
725 *enabled = 0;
726 return ERROR_OK;
727 }
728
729 /**
730 * Reset the @c examined flag for the given target.
731 * Pure paranoia -- targets are zeroed on allocation.
732 */
733 static inline void target_reset_examined(struct target *target)
734 {
735 target->examined = false;
736 }
737
738 static int default_examine(struct target *target)
739 {
740 target_set_examined(target);
741 return ERROR_OK;
742 }
743
744 /* no check by default */
745 static int default_check_reset(struct target *target)
746 {
747 return ERROR_OK;
748 }
749
750 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
751 * Keep in sync */
752 int target_examine_one(struct target *target)
753 {
754 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
755
756 int retval = target->type->examine(target);
757 if (retval != ERROR_OK) {
758 target_reset_examined(target);
759 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
760 return retval;
761 }
762
763 target_set_examined(target);
764 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
765
766 return ERROR_OK;
767 }
768
769 static int jtag_enable_callback(enum jtag_event event, void *priv)
770 {
771 struct target *target = priv;
772
773 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
774 return ERROR_OK;
775
776 jtag_unregister_event_callback(jtag_enable_callback, target);
777
778 return target_examine_one(target);
779 }
780
781 /* Targets that correctly implement init + examine, i.e.
782 * no communication with target during init:
783 *
784 * XScale
785 */
786 int target_examine(void)
787 {
788 int retval = ERROR_OK;
789 struct target *target;
790
791 for (target = all_targets; target; target = target->next) {
792 /* defer examination, but don't skip it */
793 if (!target->tap->enabled) {
794 jtag_register_event_callback(jtag_enable_callback,
795 target);
796 continue;
797 }
798
799 if (target->defer_examine)
800 continue;
801
802 int retval2 = target_examine_one(target);
803 if (retval2 != ERROR_OK) {
804 LOG_WARNING("target %s examination failed", target_name(target));
805 retval = retval2;
806 }
807 }
808 return retval;
809 }
810
811 const char *target_type_name(struct target *target)
812 {
813 return target->type->name;
814 }
815
816 static int target_soft_reset_halt(struct target *target)
817 {
818 if (!target_was_examined(target)) {
819 LOG_ERROR("Target not examined yet");
820 return ERROR_FAIL;
821 }
822 if (!target->type->soft_reset_halt) {
823 LOG_ERROR("Target %s does not support soft_reset_halt",
824 target_name(target));
825 return ERROR_FAIL;
826 }
827 return target->type->soft_reset_halt(target);
828 }
829
830 /**
831 * Downloads a target-specific native code algorithm to the target,
832 * and executes it. * Note that some targets may need to set up, enable,
833 * and tear down a breakpoint (hard or * soft) to detect algorithm
834 * termination, while others may support lower overhead schemes where
835 * soft breakpoints embedded in the algorithm automatically terminate the
836 * algorithm.
837 *
838 * @param target used to run the algorithm
839 * @param num_mem_params
840 * @param mem_params
841 * @param num_reg_params
842 * @param reg_param
843 * @param entry_point
844 * @param exit_point
845 * @param timeout_ms
846 * @param arch_info target-specific description of the algorithm.
847 */
848 int target_run_algorithm(struct target *target,
849 int num_mem_params, struct mem_param *mem_params,
850 int num_reg_params, struct reg_param *reg_param,
851 target_addr_t entry_point, target_addr_t exit_point,
852 int timeout_ms, void *arch_info)
853 {
854 int retval = ERROR_FAIL;
855
856 if (!target_was_examined(target)) {
857 LOG_ERROR("Target not examined yet");
858 goto done;
859 }
860 if (!target->type->run_algorithm) {
861 LOG_ERROR("Target type '%s' does not support %s",
862 target_type_name(target), __func__);
863 goto done;
864 }
865
866 target->running_alg = true;
867 retval = target->type->run_algorithm(target,
868 num_mem_params, mem_params,
869 num_reg_params, reg_param,
870 entry_point, exit_point, timeout_ms, arch_info);
871 target->running_alg = false;
872
873 done:
874 return retval;
875 }
876
877 /**
878 * Executes a target-specific native code algorithm and leaves it running.
879 *
880 * @param target used to run the algorithm
881 * @param num_mem_params
882 * @param mem_params
883 * @param num_reg_params
884 * @param reg_params
885 * @param entry_point
886 * @param exit_point
887 * @param arch_info target-specific description of the algorithm.
888 */
889 int target_start_algorithm(struct target *target,
890 int num_mem_params, struct mem_param *mem_params,
891 int num_reg_params, struct reg_param *reg_params,
892 target_addr_t entry_point, target_addr_t exit_point,
893 void *arch_info)
894 {
895 int retval = ERROR_FAIL;
896
897 if (!target_was_examined(target)) {
898 LOG_ERROR("Target not examined yet");
899 goto done;
900 }
901 if (!target->type->start_algorithm) {
902 LOG_ERROR("Target type '%s' does not support %s",
903 target_type_name(target), __func__);
904 goto done;
905 }
906 if (target->running_alg) {
907 LOG_ERROR("Target is already running an algorithm");
908 goto done;
909 }
910
911 target->running_alg = true;
912 retval = target->type->start_algorithm(target,
913 num_mem_params, mem_params,
914 num_reg_params, reg_params,
915 entry_point, exit_point, arch_info);
916
917 done:
918 return retval;
919 }
920
921 /**
922 * Waits for an algorithm started with target_start_algorithm() to complete.
923 *
924 * @param target used to run the algorithm
925 * @param num_mem_params
926 * @param mem_params
927 * @param num_reg_params
928 * @param reg_params
929 * @param exit_point
930 * @param timeout_ms
931 * @param arch_info target-specific description of the algorithm.
932 */
933 int target_wait_algorithm(struct target *target,
934 int num_mem_params, struct mem_param *mem_params,
935 int num_reg_params, struct reg_param *reg_params,
936 target_addr_t exit_point, int timeout_ms,
937 void *arch_info)
938 {
939 int retval = ERROR_FAIL;
940
941 if (!target->type->wait_algorithm) {
942 LOG_ERROR("Target type '%s' does not support %s",
943 target_type_name(target), __func__);
944 goto done;
945 }
946 if (!target->running_alg) {
947 LOG_ERROR("Target is not running an algorithm");
948 goto done;
949 }
950
951 retval = target->type->wait_algorithm(target,
952 num_mem_params, mem_params,
953 num_reg_params, reg_params,
954 exit_point, timeout_ms, arch_info);
955 if (retval != ERROR_TARGET_TIMEOUT)
956 target->running_alg = false;
957
958 done:
959 return retval;
960 }
961
962 /**
963 * Streams data to a circular buffer on target intended for consumption by code
964 * running asynchronously on target.
965 *
966 * This is intended for applications where target-specific native code runs
967 * on the target, receives data from the circular buffer, does something with
968 * it (most likely writing it to a flash memory), and advances the circular
969 * buffer pointer.
970 *
971 * This assumes that the helper algorithm has already been loaded to the target,
972 * but has not been started yet. Given memory and register parameters are passed
973 * to the algorithm.
974 *
975 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
976 * following format:
977 *
978 * [buffer_start + 0, buffer_start + 4):
979 * Write Pointer address (aka head). Written and updated by this
980 * routine when new data is written to the circular buffer.
981 * [buffer_start + 4, buffer_start + 8):
982 * Read Pointer address (aka tail). Updated by code running on the
983 * target after it consumes data.
984 * [buffer_start + 8, buffer_start + buffer_size):
985 * Circular buffer contents.
986 *
987 * See contrib/loaders/flash/stm32f1x.S for an example.
988 *
989 * @param target used to run the algorithm
990 * @param buffer address on the host where data to be sent is located
991 * @param count number of blocks to send
992 * @param block_size size in bytes of each block
993 * @param num_mem_params count of memory-based params to pass to algorithm
994 * @param mem_params memory-based params to pass to algorithm
995 * @param num_reg_params count of register-based params to pass to algorithm
996 * @param reg_params memory-based params to pass to algorithm
997 * @param buffer_start address on the target of the circular buffer structure
998 * @param buffer_size size of the circular buffer structure
999 * @param entry_point address on the target to execute to start the algorithm
1000 * @param exit_point address at which to set a breakpoint to catch the
1001 * end of the algorithm; can be 0 if target triggers a breakpoint itself
1002 * @param arch_info
1003 */
1004
1005 int target_run_flash_async_algorithm(struct target *target,
1006 const uint8_t *buffer, uint32_t count, int block_size,
1007 int num_mem_params, struct mem_param *mem_params,
1008 int num_reg_params, struct reg_param *reg_params,
1009 uint32_t buffer_start, uint32_t buffer_size,
1010 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1011 {
1012 int retval;
1013 int timeout = 0;
1014
1015 const uint8_t *buffer_orig = buffer;
1016
1017 /* Set up working area. First word is write pointer, second word is read pointer,
1018 * rest is fifo data area. */
1019 uint32_t wp_addr = buffer_start;
1020 uint32_t rp_addr = buffer_start + 4;
1021 uint32_t fifo_start_addr = buffer_start + 8;
1022 uint32_t fifo_end_addr = buffer_start + buffer_size;
1023
1024 uint32_t wp = fifo_start_addr;
1025 uint32_t rp = fifo_start_addr;
1026
1027 /* validate block_size is 2^n */
1028 assert(IS_PWR_OF_2(block_size));
1029
1030 retval = target_write_u32(target, wp_addr, wp);
1031 if (retval != ERROR_OK)
1032 return retval;
1033 retval = target_write_u32(target, rp_addr, rp);
1034 if (retval != ERROR_OK)
1035 return retval;
1036
1037 /* Start up algorithm on target and let it idle while writing the first chunk */
1038 retval = target_start_algorithm(target, num_mem_params, mem_params,
1039 num_reg_params, reg_params,
1040 entry_point,
1041 exit_point,
1042 arch_info);
1043
1044 if (retval != ERROR_OK) {
1045 LOG_ERROR("error starting target flash write algorithm");
1046 return retval;
1047 }
1048
1049 while (count > 0) {
1050
1051 retval = target_read_u32(target, rp_addr, &rp);
1052 if (retval != ERROR_OK) {
1053 LOG_ERROR("failed to get read pointer");
1054 break;
1055 }
1056
1057 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1058 (size_t) (buffer - buffer_orig), count, wp, rp);
1059
1060 if (rp == 0) {
1061 LOG_ERROR("flash write algorithm aborted by target");
1062 retval = ERROR_FLASH_OPERATION_FAILED;
1063 break;
1064 }
1065
1066 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1067 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1068 break;
1069 }
1070
1071 /* Count the number of bytes available in the fifo without
1072 * crossing the wrap around. Make sure to not fill it completely,
1073 * because that would make wp == rp and that's the empty condition. */
1074 uint32_t thisrun_bytes;
1075 if (rp > wp)
1076 thisrun_bytes = rp - wp - block_size;
1077 else if (rp > fifo_start_addr)
1078 thisrun_bytes = fifo_end_addr - wp;
1079 else
1080 thisrun_bytes = fifo_end_addr - wp - block_size;
1081
1082 if (thisrun_bytes == 0) {
1083 /* Throttle polling a bit if transfer is (much) faster than flash
1084 * programming. The exact delay shouldn't matter as long as it's
1085 * less than buffer size / flash speed. This is very unlikely to
1086 * run when using high latency connections such as USB. */
1087 alive_sleep(2);
1088
1089 /* to stop an infinite loop on some targets check and increment a timeout
1090 * this issue was observed on a stellaris using the new ICDI interface */
1091 if (timeout++ >= 2500) {
1092 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1093 return ERROR_FLASH_OPERATION_FAILED;
1094 }
1095 continue;
1096 }
1097
1098 /* reset our timeout */
1099 timeout = 0;
1100
1101 /* Limit to the amount of data we actually want to write */
1102 if (thisrun_bytes > count * block_size)
1103 thisrun_bytes = count * block_size;
1104
1105 /* Force end of large blocks to be word aligned */
1106 if (thisrun_bytes >= 16)
1107 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1108
1109 /* Write data to fifo */
1110 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1111 if (retval != ERROR_OK)
1112 break;
1113
1114 /* Update counters and wrap write pointer */
1115 buffer += thisrun_bytes;
1116 count -= thisrun_bytes / block_size;
1117 wp += thisrun_bytes;
1118 if (wp >= fifo_end_addr)
1119 wp = fifo_start_addr;
1120
1121 /* Store updated write pointer to target */
1122 retval = target_write_u32(target, wp_addr, wp);
1123 if (retval != ERROR_OK)
1124 break;
1125
1126 /* Avoid GDB timeouts */
1127 keep_alive();
1128 }
1129
1130 if (retval != ERROR_OK) {
1131 /* abort flash write algorithm on target */
1132 target_write_u32(target, wp_addr, 0);
1133 }
1134
1135 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1136 num_reg_params, reg_params,
1137 exit_point,
1138 10000,
1139 arch_info);
1140
1141 if (retval2 != ERROR_OK) {
1142 LOG_ERROR("error waiting for target flash write algorithm");
1143 retval = retval2;
1144 }
1145
1146 if (retval == ERROR_OK) {
1147 /* check if algorithm set rp = 0 after fifo writer loop finished */
1148 retval = target_read_u32(target, rp_addr, &rp);
1149 if (retval == ERROR_OK && rp == 0) {
1150 LOG_ERROR("flash write algorithm aborted by target");
1151 retval = ERROR_FLASH_OPERATION_FAILED;
1152 }
1153 }
1154
1155 return retval;
1156 }
1157
1158 int target_run_read_async_algorithm(struct target *target,
1159 uint8_t *buffer, uint32_t count, int block_size,
1160 int num_mem_params, struct mem_param *mem_params,
1161 int num_reg_params, struct reg_param *reg_params,
1162 uint32_t buffer_start, uint32_t buffer_size,
1163 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1164 {
1165 int retval;
1166 int timeout = 0;
1167
1168 const uint8_t *buffer_orig = buffer;
1169
1170 /* Set up working area. First word is write pointer, second word is read pointer,
1171 * rest is fifo data area. */
1172 uint32_t wp_addr = buffer_start;
1173 uint32_t rp_addr = buffer_start + 4;
1174 uint32_t fifo_start_addr = buffer_start + 8;
1175 uint32_t fifo_end_addr = buffer_start + buffer_size;
1176
1177 uint32_t wp = fifo_start_addr;
1178 uint32_t rp = fifo_start_addr;
1179
1180 /* validate block_size is 2^n */
1181 assert(IS_PWR_OF_2(block_size));
1182
1183 retval = target_write_u32(target, wp_addr, wp);
1184 if (retval != ERROR_OK)
1185 return retval;
1186 retval = target_write_u32(target, rp_addr, rp);
1187 if (retval != ERROR_OK)
1188 return retval;
1189
1190 /* Start up algorithm on target */
1191 retval = target_start_algorithm(target, num_mem_params, mem_params,
1192 num_reg_params, reg_params,
1193 entry_point,
1194 exit_point,
1195 arch_info);
1196
1197 if (retval != ERROR_OK) {
1198 LOG_ERROR("error starting target flash read algorithm");
1199 return retval;
1200 }
1201
1202 while (count > 0) {
1203 retval = target_read_u32(target, wp_addr, &wp);
1204 if (retval != ERROR_OK) {
1205 LOG_ERROR("failed to get write pointer");
1206 break;
1207 }
1208
1209 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1210 (size_t)(buffer - buffer_orig), count, wp, rp);
1211
1212 if (wp == 0) {
1213 LOG_ERROR("flash read algorithm aborted by target");
1214 retval = ERROR_FLASH_OPERATION_FAILED;
1215 break;
1216 }
1217
1218 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1219 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1220 break;
1221 }
1222
1223 /* Count the number of bytes available in the fifo without
1224 * crossing the wrap around. */
1225 uint32_t thisrun_bytes;
1226 if (wp >= rp)
1227 thisrun_bytes = wp - rp;
1228 else
1229 thisrun_bytes = fifo_end_addr - rp;
1230
1231 if (thisrun_bytes == 0) {
1232 /* Throttle polling a bit if transfer is (much) faster than flash
1233 * reading. The exact delay shouldn't matter as long as it's
1234 * less than buffer size / flash speed. This is very unlikely to
1235 * run when using high latency connections such as USB. */
1236 alive_sleep(2);
1237
1238 /* to stop an infinite loop on some targets check and increment a timeout
1239 * this issue was observed on a stellaris using the new ICDI interface */
1240 if (timeout++ >= 2500) {
1241 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1242 return ERROR_FLASH_OPERATION_FAILED;
1243 }
1244 continue;
1245 }
1246
1247 /* Reset our timeout */
1248 timeout = 0;
1249
1250 /* Limit to the amount of data we actually want to read */
1251 if (thisrun_bytes > count * block_size)
1252 thisrun_bytes = count * block_size;
1253
1254 /* Force end of large blocks to be word aligned */
1255 if (thisrun_bytes >= 16)
1256 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1257
1258 /* Read data from fifo */
1259 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1260 if (retval != ERROR_OK)
1261 break;
1262
1263 /* Update counters and wrap write pointer */
1264 buffer += thisrun_bytes;
1265 count -= thisrun_bytes / block_size;
1266 rp += thisrun_bytes;
1267 if (rp >= fifo_end_addr)
1268 rp = fifo_start_addr;
1269
1270 /* Store updated write pointer to target */
1271 retval = target_write_u32(target, rp_addr, rp);
1272 if (retval != ERROR_OK)
1273 break;
1274
1275 /* Avoid GDB timeouts */
1276 keep_alive();
1277
1278 }
1279
1280 if (retval != ERROR_OK) {
1281 /* abort flash write algorithm on target */
1282 target_write_u32(target, rp_addr, 0);
1283 }
1284
1285 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1286 num_reg_params, reg_params,
1287 exit_point,
1288 10000,
1289 arch_info);
1290
1291 if (retval2 != ERROR_OK) {
1292 LOG_ERROR("error waiting for target flash write algorithm");
1293 retval = retval2;
1294 }
1295
1296 if (retval == ERROR_OK) {
1297 /* check if algorithm set wp = 0 after fifo writer loop finished */
1298 retval = target_read_u32(target, wp_addr, &wp);
1299 if (retval == ERROR_OK && wp == 0) {
1300 LOG_ERROR("flash read algorithm aborted by target");
1301 retval = ERROR_FLASH_OPERATION_FAILED;
1302 }
1303 }
1304
1305 return retval;
1306 }
1307
1308 int target_read_memory(struct target *target,
1309 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1310 {
1311 if (!target_was_examined(target)) {
1312 LOG_ERROR("Target not examined yet");
1313 return ERROR_FAIL;
1314 }
1315 if (!target->type->read_memory) {
1316 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1317 return ERROR_FAIL;
1318 }
1319 return target->type->read_memory(target, address, size, count, buffer);
1320 }
1321
1322 int target_read_phys_memory(struct target *target,
1323 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1324 {
1325 if (!target_was_examined(target)) {
1326 LOG_ERROR("Target not examined yet");
1327 return ERROR_FAIL;
1328 }
1329 if (!target->type->read_phys_memory) {
1330 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1331 return ERROR_FAIL;
1332 }
1333 return target->type->read_phys_memory(target, address, size, count, buffer);
1334 }
1335
1336 int target_write_memory(struct target *target,
1337 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1338 {
1339 if (!target_was_examined(target)) {
1340 LOG_ERROR("Target not examined yet");
1341 return ERROR_FAIL;
1342 }
1343 if (!target->type->write_memory) {
1344 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1345 return ERROR_FAIL;
1346 }
1347 return target->type->write_memory(target, address, size, count, buffer);
1348 }
1349
1350 int target_write_phys_memory(struct target *target,
1351 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1352 {
1353 if (!target_was_examined(target)) {
1354 LOG_ERROR("Target not examined yet");
1355 return ERROR_FAIL;
1356 }
1357 if (!target->type->write_phys_memory) {
1358 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1359 return ERROR_FAIL;
1360 }
1361 return target->type->write_phys_memory(target, address, size, count, buffer);
1362 }
1363
1364 int target_add_breakpoint(struct target *target,
1365 struct breakpoint *breakpoint)
1366 {
1367 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1368 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1369 return ERROR_TARGET_NOT_HALTED;
1370 }
1371 return target->type->add_breakpoint(target, breakpoint);
1372 }
1373
1374 int target_add_context_breakpoint(struct target *target,
1375 struct breakpoint *breakpoint)
1376 {
1377 if (target->state != TARGET_HALTED) {
1378 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1379 return ERROR_TARGET_NOT_HALTED;
1380 }
1381 return target->type->add_context_breakpoint(target, breakpoint);
1382 }
1383
1384 int target_add_hybrid_breakpoint(struct target *target,
1385 struct breakpoint *breakpoint)
1386 {
1387 if (target->state != TARGET_HALTED) {
1388 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1389 return ERROR_TARGET_NOT_HALTED;
1390 }
1391 return target->type->add_hybrid_breakpoint(target, breakpoint);
1392 }
1393
1394 int target_remove_breakpoint(struct target *target,
1395 struct breakpoint *breakpoint)
1396 {
1397 return target->type->remove_breakpoint(target, breakpoint);
1398 }
1399
1400 int target_add_watchpoint(struct target *target,
1401 struct watchpoint *watchpoint)
1402 {
1403 if (target->state != TARGET_HALTED) {
1404 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1405 return ERROR_TARGET_NOT_HALTED;
1406 }
1407 return target->type->add_watchpoint(target, watchpoint);
1408 }
1409 int target_remove_watchpoint(struct target *target,
1410 struct watchpoint *watchpoint)
1411 {
1412 return target->type->remove_watchpoint(target, watchpoint);
1413 }
1414 int target_hit_watchpoint(struct target *target,
1415 struct watchpoint **hit_watchpoint)
1416 {
1417 if (target->state != TARGET_HALTED) {
1418 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1419 return ERROR_TARGET_NOT_HALTED;
1420 }
1421
1422 if (!target->type->hit_watchpoint) {
1423 /* For backward compatible, if hit_watchpoint is not implemented,
1424 * return ERROR_FAIL such that gdb_server will not take the nonsense
1425 * information. */
1426 return ERROR_FAIL;
1427 }
1428
1429 return target->type->hit_watchpoint(target, hit_watchpoint);
1430 }
1431
1432 const char *target_get_gdb_arch(struct target *target)
1433 {
1434 if (!target->type->get_gdb_arch)
1435 return NULL;
1436 return target->type->get_gdb_arch(target);
1437 }
1438
1439 int target_get_gdb_reg_list(struct target *target,
1440 struct reg **reg_list[], int *reg_list_size,
1441 enum target_register_class reg_class)
1442 {
1443 int result = ERROR_FAIL;
1444
1445 if (!target_was_examined(target)) {
1446 LOG_ERROR("Target not examined yet");
1447 goto done;
1448 }
1449
1450 result = target->type->get_gdb_reg_list(target, reg_list,
1451 reg_list_size, reg_class);
1452
1453 done:
1454 if (result != ERROR_OK) {
1455 *reg_list = NULL;
1456 *reg_list_size = 0;
1457 }
1458 return result;
1459 }
1460
1461 int target_get_gdb_reg_list_noread(struct target *target,
1462 struct reg **reg_list[], int *reg_list_size,
1463 enum target_register_class reg_class)
1464 {
1465 if (target->type->get_gdb_reg_list_noread &&
1466 target->type->get_gdb_reg_list_noread(target, reg_list,
1467 reg_list_size, reg_class) == ERROR_OK)
1468 return ERROR_OK;
1469 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1470 }
1471
1472 bool target_supports_gdb_connection(struct target *target)
1473 {
1474 /*
1475 * exclude all the targets that don't provide get_gdb_reg_list
1476 * or that have explicit gdb_max_connection == 0
1477 */
1478 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1479 }
1480
1481 int target_step(struct target *target,
1482 int current, target_addr_t address, int handle_breakpoints)
1483 {
1484 int retval;
1485
1486 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1487
1488 retval = target->type->step(target, current, address, handle_breakpoints);
1489 if (retval != ERROR_OK)
1490 return retval;
1491
1492 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1493
1494 return retval;
1495 }
1496
1497 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1498 {
1499 if (target->state != TARGET_HALTED) {
1500 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1501 return ERROR_TARGET_NOT_HALTED;
1502 }
1503 return target->type->get_gdb_fileio_info(target, fileio_info);
1504 }
1505
1506 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1507 {
1508 if (target->state != TARGET_HALTED) {
1509 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1510 return ERROR_TARGET_NOT_HALTED;
1511 }
1512 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1513 }
1514
1515 target_addr_t target_address_max(struct target *target)
1516 {
1517 unsigned bits = target_address_bits(target);
1518 if (sizeof(target_addr_t) * 8 == bits)
1519 return (target_addr_t) -1;
1520 else
1521 return (((target_addr_t) 1) << bits) - 1;
1522 }
1523
1524 unsigned target_address_bits(struct target *target)
1525 {
1526 if (target->type->address_bits)
1527 return target->type->address_bits(target);
1528 return 32;
1529 }
1530
1531 unsigned int target_data_bits(struct target *target)
1532 {
1533 if (target->type->data_bits)
1534 return target->type->data_bits(target);
1535 return 32;
1536 }
1537
1538 static int target_profiling(struct target *target, uint32_t *samples,
1539 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1540 {
1541 return target->type->profiling(target, samples, max_num_samples,
1542 num_samples, seconds);
1543 }
1544
1545 static int handle_target(void *priv);
1546
1547 static int target_init_one(struct command_context *cmd_ctx,
1548 struct target *target)
1549 {
1550 target_reset_examined(target);
1551
1552 struct target_type *type = target->type;
1553 if (!type->examine)
1554 type->examine = default_examine;
1555
1556 if (!type->check_reset)
1557 type->check_reset = default_check_reset;
1558
1559 assert(type->init_target);
1560
1561 int retval = type->init_target(cmd_ctx, target);
1562 if (retval != ERROR_OK) {
1563 LOG_ERROR("target '%s' init failed", target_name(target));
1564 return retval;
1565 }
1566
1567 /* Sanity-check MMU support ... stub in what we must, to help
1568 * implement it in stages, but warn if we need to do so.
1569 */
1570 if (type->mmu) {
1571 if (!type->virt2phys) {
1572 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1573 type->virt2phys = identity_virt2phys;
1574 }
1575 } else {
1576 /* Make sure no-MMU targets all behave the same: make no
1577 * distinction between physical and virtual addresses, and
1578 * ensure that virt2phys() is always an identity mapping.
1579 */
1580 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1581 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1582
1583 type->mmu = no_mmu;
1584 type->write_phys_memory = type->write_memory;
1585 type->read_phys_memory = type->read_memory;
1586 type->virt2phys = identity_virt2phys;
1587 }
1588
1589 if (!target->type->read_buffer)
1590 target->type->read_buffer = target_read_buffer_default;
1591
1592 if (!target->type->write_buffer)
1593 target->type->write_buffer = target_write_buffer_default;
1594
1595 if (!target->type->get_gdb_fileio_info)
1596 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1597
1598 if (!target->type->gdb_fileio_end)
1599 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1600
1601 if (!target->type->profiling)
1602 target->type->profiling = target_profiling_default;
1603
1604 return ERROR_OK;
1605 }
1606
1607 static int target_init(struct command_context *cmd_ctx)
1608 {
1609 struct target *target;
1610 int retval;
1611
1612 for (target = all_targets; target; target = target->next) {
1613 retval = target_init_one(cmd_ctx, target);
1614 if (retval != ERROR_OK)
1615 return retval;
1616 }
1617
1618 if (!all_targets)
1619 return ERROR_OK;
1620
1621 retval = target_register_user_commands(cmd_ctx);
1622 if (retval != ERROR_OK)
1623 return retval;
1624
1625 retval = target_register_timer_callback(&handle_target,
1626 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1627 if (retval != ERROR_OK)
1628 return retval;
1629
1630 return ERROR_OK;
1631 }
1632
1633 COMMAND_HANDLER(handle_target_init_command)
1634 {
1635 int retval;
1636
1637 if (CMD_ARGC != 0)
1638 return ERROR_COMMAND_SYNTAX_ERROR;
1639
1640 static bool target_initialized;
1641 if (target_initialized) {
1642 LOG_INFO("'target init' has already been called");
1643 return ERROR_OK;
1644 }
1645 target_initialized = true;
1646
1647 retval = command_run_line(CMD_CTX, "init_targets");
1648 if (retval != ERROR_OK)
1649 return retval;
1650
1651 retval = command_run_line(CMD_CTX, "init_target_events");
1652 if (retval != ERROR_OK)
1653 return retval;
1654
1655 retval = command_run_line(CMD_CTX, "init_board");
1656 if (retval != ERROR_OK)
1657 return retval;
1658
1659 LOG_DEBUG("Initializing targets...");
1660 return target_init(CMD_CTX);
1661 }
1662
1663 int target_register_event_callback(int (*callback)(struct target *target,
1664 enum target_event event, void *priv), void *priv)
1665 {
1666 struct target_event_callback **callbacks_p = &target_event_callbacks;
1667
1668 if (!callback)
1669 return ERROR_COMMAND_SYNTAX_ERROR;
1670
1671 if (*callbacks_p) {
1672 while ((*callbacks_p)->next)
1673 callbacks_p = &((*callbacks_p)->next);
1674 callbacks_p = &((*callbacks_p)->next);
1675 }
1676
1677 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1678 (*callbacks_p)->callback = callback;
1679 (*callbacks_p)->priv = priv;
1680 (*callbacks_p)->next = NULL;
1681
1682 return ERROR_OK;
1683 }
1684
1685 int target_register_reset_callback(int (*callback)(struct target *target,
1686 enum target_reset_mode reset_mode, void *priv), void *priv)
1687 {
1688 struct target_reset_callback *entry;
1689
1690 if (!callback)
1691 return ERROR_COMMAND_SYNTAX_ERROR;
1692
1693 entry = malloc(sizeof(struct target_reset_callback));
1694 if (!entry) {
1695 LOG_ERROR("error allocating buffer for reset callback entry");
1696 return ERROR_COMMAND_SYNTAX_ERROR;
1697 }
1698
1699 entry->callback = callback;
1700 entry->priv = priv;
1701 list_add(&entry->list, &target_reset_callback_list);
1702
1703
1704 return ERROR_OK;
1705 }
1706
1707 int target_register_trace_callback(int (*callback)(struct target *target,
1708 size_t len, uint8_t *data, void *priv), void *priv)
1709 {
1710 struct target_trace_callback *entry;
1711
1712 if (!callback)
1713 return ERROR_COMMAND_SYNTAX_ERROR;
1714
1715 entry = malloc(sizeof(struct target_trace_callback));
1716 if (!entry) {
1717 LOG_ERROR("error allocating buffer for trace callback entry");
1718 return ERROR_COMMAND_SYNTAX_ERROR;
1719 }
1720
1721 entry->callback = callback;
1722 entry->priv = priv;
1723 list_add(&entry->list, &target_trace_callback_list);
1724
1725
1726 return ERROR_OK;
1727 }
1728
1729 int target_register_timer_callback(int (*callback)(void *priv),
1730 unsigned int time_ms, enum target_timer_type type, void *priv)
1731 {
1732 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1733
1734 if (!callback)
1735 return ERROR_COMMAND_SYNTAX_ERROR;
1736
1737 if (*callbacks_p) {
1738 while ((*callbacks_p)->next)
1739 callbacks_p = &((*callbacks_p)->next);
1740 callbacks_p = &((*callbacks_p)->next);
1741 }
1742
1743 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1744 (*callbacks_p)->callback = callback;
1745 (*callbacks_p)->type = type;
1746 (*callbacks_p)->time_ms = time_ms;
1747 (*callbacks_p)->removed = false;
1748
1749 (*callbacks_p)->when = timeval_ms() + time_ms;
1750 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1751
1752 (*callbacks_p)->priv = priv;
1753 (*callbacks_p)->next = NULL;
1754
1755 return ERROR_OK;
1756 }
1757
1758 int target_unregister_event_callback(int (*callback)(struct target *target,
1759 enum target_event event, void *priv), void *priv)
1760 {
1761 struct target_event_callback **p = &target_event_callbacks;
1762 struct target_event_callback *c = target_event_callbacks;
1763
1764 if (!callback)
1765 return ERROR_COMMAND_SYNTAX_ERROR;
1766
1767 while (c) {
1768 struct target_event_callback *next = c->next;
1769 if ((c->callback == callback) && (c->priv == priv)) {
1770 *p = next;
1771 free(c);
1772 return ERROR_OK;
1773 } else
1774 p = &(c->next);
1775 c = next;
1776 }
1777
1778 return ERROR_OK;
1779 }
1780
1781 int target_unregister_reset_callback(int (*callback)(struct target *target,
1782 enum target_reset_mode reset_mode, void *priv), void *priv)
1783 {
1784 struct target_reset_callback *entry;
1785
1786 if (!callback)
1787 return ERROR_COMMAND_SYNTAX_ERROR;
1788
1789 list_for_each_entry(entry, &target_reset_callback_list, list) {
1790 if (entry->callback == callback && entry->priv == priv) {
1791 list_del(&entry->list);
1792 free(entry);
1793 break;
1794 }
1795 }
1796
1797 return ERROR_OK;
1798 }
1799
1800 int target_unregister_trace_callback(int (*callback)(struct target *target,
1801 size_t len, uint8_t *data, void *priv), void *priv)
1802 {
1803 struct target_trace_callback *entry;
1804
1805 if (!callback)
1806 return ERROR_COMMAND_SYNTAX_ERROR;
1807
1808 list_for_each_entry(entry, &target_trace_callback_list, list) {
1809 if (entry->callback == callback && entry->priv == priv) {
1810 list_del(&entry->list);
1811 free(entry);
1812 break;
1813 }
1814 }
1815
1816 return ERROR_OK;
1817 }
1818
1819 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1820 {
1821 if (!callback)
1822 return ERROR_COMMAND_SYNTAX_ERROR;
1823
1824 for (struct target_timer_callback *c = target_timer_callbacks;
1825 c; c = c->next) {
1826 if ((c->callback == callback) && (c->priv == priv)) {
1827 c->removed = true;
1828 return ERROR_OK;
1829 }
1830 }
1831
1832 return ERROR_FAIL;
1833 }
1834
1835 int target_call_event_callbacks(struct target *target, enum target_event event)
1836 {
1837 struct target_event_callback *callback = target_event_callbacks;
1838 struct target_event_callback *next_callback;
1839
1840 if (event == TARGET_EVENT_HALTED) {
1841 /* execute early halted first */
1842 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1843 }
1844
1845 LOG_DEBUG("target event %i (%s) for core %s", event,
1846 target_event_name(event),
1847 target_name(target));
1848
1849 target_handle_event(target, event);
1850
1851 while (callback) {
1852 next_callback = callback->next;
1853 callback->callback(target, event, callback->priv);
1854 callback = next_callback;
1855 }
1856
1857 return ERROR_OK;
1858 }
1859
1860 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1861 {
1862 struct target_reset_callback *callback;
1863
1864 LOG_DEBUG("target reset %i (%s)", reset_mode,
1865 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1866
1867 list_for_each_entry(callback, &target_reset_callback_list, list)
1868 callback->callback(target, reset_mode, callback->priv);
1869
1870 return ERROR_OK;
1871 }
1872
1873 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1874 {
1875 struct target_trace_callback *callback;
1876
1877 list_for_each_entry(callback, &target_trace_callback_list, list)
1878 callback->callback(target, len, data, callback->priv);
1879
1880 return ERROR_OK;
1881 }
1882
1883 static int target_timer_callback_periodic_restart(
1884 struct target_timer_callback *cb, int64_t *now)
1885 {
1886 cb->when = *now + cb->time_ms;
1887 return ERROR_OK;
1888 }
1889
1890 static int target_call_timer_callback(struct target_timer_callback *cb,
1891 int64_t *now)
1892 {
1893 cb->callback(cb->priv);
1894
1895 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1896 return target_timer_callback_periodic_restart(cb, now);
1897
1898 return target_unregister_timer_callback(cb->callback, cb->priv);
1899 }
1900
1901 static int target_call_timer_callbacks_check_time(int checktime)
1902 {
1903 static bool callback_processing;
1904
1905 /* Do not allow nesting */
1906 if (callback_processing)
1907 return ERROR_OK;
1908
1909 callback_processing = true;
1910
1911 keep_alive();
1912
1913 int64_t now = timeval_ms();
1914
1915 /* Initialize to a default value that's a ways into the future.
1916 * The loop below will make it closer to now if there are
1917 * callbacks that want to be called sooner. */
1918 target_timer_next_event_value = now + 1000;
1919
1920 /* Store an address of the place containing a pointer to the
1921 * next item; initially, that's a standalone "root of the
1922 * list" variable. */
1923 struct target_timer_callback **callback = &target_timer_callbacks;
1924 while (callback && *callback) {
1925 if ((*callback)->removed) {
1926 struct target_timer_callback *p = *callback;
1927 *callback = (*callback)->next;
1928 free(p);
1929 continue;
1930 }
1931
1932 bool call_it = (*callback)->callback &&
1933 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1934 now >= (*callback)->when);
1935
1936 if (call_it)
1937 target_call_timer_callback(*callback, &now);
1938
1939 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1940 target_timer_next_event_value = (*callback)->when;
1941
1942 callback = &(*callback)->next;
1943 }
1944
1945 callback_processing = false;
1946 return ERROR_OK;
1947 }
1948
1949 int target_call_timer_callbacks()
1950 {
1951 return target_call_timer_callbacks_check_time(1);
1952 }
1953
1954 /* invoke periodic callbacks immediately */
1955 int target_call_timer_callbacks_now()
1956 {
1957 return target_call_timer_callbacks_check_time(0);
1958 }
1959
1960 int64_t target_timer_next_event(void)
1961 {
1962 return target_timer_next_event_value;
1963 }
1964
1965 /* Prints the working area layout for debug purposes */
1966 static void print_wa_layout(struct target *target)
1967 {
1968 struct working_area *c = target->working_areas;
1969
1970 while (c) {
1971 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1972 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1973 c->address, c->address + c->size - 1, c->size);
1974 c = c->next;
1975 }
1976 }
1977
1978 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1979 static void target_split_working_area(struct working_area *area, uint32_t size)
1980 {
1981 assert(area->free); /* Shouldn't split an allocated area */
1982 assert(size <= area->size); /* Caller should guarantee this */
1983
1984 /* Split only if not already the right size */
1985 if (size < area->size) {
1986 struct working_area *new_wa = malloc(sizeof(*new_wa));
1987
1988 if (!new_wa)
1989 return;
1990
1991 new_wa->next = area->next;
1992 new_wa->size = area->size - size;
1993 new_wa->address = area->address + size;
1994 new_wa->backup = NULL;
1995 new_wa->user = NULL;
1996 new_wa->free = true;
1997
1998 area->next = new_wa;
1999 area->size = size;
2000
2001 /* If backup memory was allocated to this area, it has the wrong size
2002 * now so free it and it will be reallocated if/when needed */
2003 free(area->backup);
2004 area->backup = NULL;
2005 }
2006 }
2007
2008 /* Merge all adjacent free areas into one */
2009 static void target_merge_working_areas(struct target *target)
2010 {
2011 struct working_area *c = target->working_areas;
2012
2013 while (c && c->next) {
2014 assert(c->next->address == c->address + c->size); /* This is an invariant */
2015
2016 /* Find two adjacent free areas */
2017 if (c->free && c->next->free) {
2018 /* Merge the last into the first */
2019 c->size += c->next->size;
2020
2021 /* Remove the last */
2022 struct working_area *to_be_freed = c->next;
2023 c->next = c->next->next;
2024 free(to_be_freed->backup);
2025 free(to_be_freed);
2026
2027 /* If backup memory was allocated to the remaining area, it's has
2028 * the wrong size now */
2029 free(c->backup);
2030 c->backup = NULL;
2031 } else {
2032 c = c->next;
2033 }
2034 }
2035 }
2036
2037 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2038 {
2039 /* Reevaluate working area address based on MMU state*/
2040 if (!target->working_areas) {
2041 int retval;
2042 int enabled;
2043
2044 retval = target->type->mmu(target, &enabled);
2045 if (retval != ERROR_OK)
2046 return retval;
2047
2048 if (!enabled) {
2049 if (target->working_area_phys_spec) {
2050 LOG_DEBUG("MMU disabled, using physical "
2051 "address for working memory " TARGET_ADDR_FMT,
2052 target->working_area_phys);
2053 target->working_area = target->working_area_phys;
2054 } else {
2055 LOG_ERROR("No working memory available. "
2056 "Specify -work-area-phys to target.");
2057 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2058 }
2059 } else {
2060 if (target->working_area_virt_spec) {
2061 LOG_DEBUG("MMU enabled, using virtual "
2062 "address for working memory " TARGET_ADDR_FMT,
2063 target->working_area_virt);
2064 target->working_area = target->working_area_virt;
2065 } else {
2066 LOG_ERROR("No working memory available. "
2067 "Specify -work-area-virt to target.");
2068 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2069 }
2070 }
2071
2072 /* Set up initial working area on first call */
2073 struct working_area *new_wa = malloc(sizeof(*new_wa));
2074 if (new_wa) {
2075 new_wa->next = NULL;
2076 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
2077 new_wa->address = target->working_area;
2078 new_wa->backup = NULL;
2079 new_wa->user = NULL;
2080 new_wa->free = true;
2081 }
2082
2083 target->working_areas = new_wa;
2084 }
2085
2086 /* only allocate multiples of 4 byte */
2087 if (size % 4)
2088 size = (size + 3) & (~3UL);
2089
2090 struct working_area *c = target->working_areas;
2091
2092 /* Find the first large enough working area */
2093 while (c) {
2094 if (c->free && c->size >= size)
2095 break;
2096 c = c->next;
2097 }
2098
2099 if (!c)
2100 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2101
2102 /* Split the working area into the requested size */
2103 target_split_working_area(c, size);
2104
2105 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2106 size, c->address);
2107
2108 if (target->backup_working_area) {
2109 if (!c->backup) {
2110 c->backup = malloc(c->size);
2111 if (!c->backup)
2112 return ERROR_FAIL;
2113 }
2114
2115 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2116 if (retval != ERROR_OK)
2117 return retval;
2118 }
2119
2120 /* mark as used, and return the new (reused) area */
2121 c->free = false;
2122 *area = c;
2123
2124 /* user pointer */
2125 c->user = area;
2126
2127 print_wa_layout(target);
2128
2129 return ERROR_OK;
2130 }
2131
2132 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2133 {
2134 int retval;
2135
2136 retval = target_alloc_working_area_try(target, size, area);
2137 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2138 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2139 return retval;
2140
2141 }
2142
2143 static int target_restore_working_area(struct target *target, struct working_area *area)
2144 {
2145 int retval = ERROR_OK;
2146
2147 if (target->backup_working_area && area->backup) {
2148 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2149 if (retval != ERROR_OK)
2150 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2151 area->size, area->address);
2152 }
2153
2154 return retval;
2155 }
2156
2157 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2158 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2159 {
2160 if (!area || area->free)
2161 return ERROR_OK;
2162
2163 int retval = ERROR_OK;
2164 if (restore) {
2165 retval = target_restore_working_area(target, area);
2166 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2167 if (retval != ERROR_OK)
2168 return retval;
2169 }
2170
2171 area->free = true;
2172
2173 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2174 area->size, area->address);
2175
2176 /* mark user pointer invalid */
2177 /* TODO: Is this really safe? It points to some previous caller's memory.
2178 * How could we know that the area pointer is still in that place and not
2179 * some other vital data? What's the purpose of this, anyway? */
2180 *area->user = NULL;
2181 area->user = NULL;
2182
2183 target_merge_working_areas(target);
2184
2185 print_wa_layout(target);
2186
2187 return retval;
2188 }
2189
2190 int target_free_working_area(struct target *target, struct working_area *area)
2191 {
2192 return target_free_working_area_restore(target, area, 1);
2193 }
2194
2195 /* free resources and restore memory, if restoring memory fails,
2196 * free up resources anyway
2197 */
2198 static void target_free_all_working_areas_restore(struct target *target, int restore)
2199 {
2200 struct working_area *c = target->working_areas;
2201
2202 LOG_DEBUG("freeing all working areas");
2203
2204 /* Loop through all areas, restoring the allocated ones and marking them as free */
2205 while (c) {
2206 if (!c->free) {
2207 if (restore)
2208 target_restore_working_area(target, c);
2209 c->free = true;
2210 *c->user = NULL; /* Same as above */
2211 c->user = NULL;
2212 }
2213 c = c->next;
2214 }
2215
2216 /* Run a merge pass to combine all areas into one */
2217 target_merge_working_areas(target);
2218
2219 print_wa_layout(target);
2220 }
2221
2222 void target_free_all_working_areas(struct target *target)
2223 {
2224 target_free_all_working_areas_restore(target, 1);
2225
2226 /* Now we have none or only one working area marked as free */
2227 if (target->working_areas) {
2228 /* Free the last one to allow on-the-fly moving and resizing */
2229 free(target->working_areas->backup);
2230 free(target->working_areas);
2231 target->working_areas = NULL;
2232 }
2233 }
2234
2235 /* Find the largest number of bytes that can be allocated */
2236 uint32_t target_get_working_area_avail(struct target *target)
2237 {
2238 struct working_area *c = target->working_areas;
2239 uint32_t max_size = 0;
2240
2241 if (!c)
2242 return target->working_area_size;
2243
2244 while (c) {
2245 if (c->free && max_size < c->size)
2246 max_size = c->size;
2247
2248 c = c->next;
2249 }
2250
2251 return max_size;
2252 }
2253
2254 static void target_destroy(struct target *target)
2255 {
2256 if (target->type->deinit_target)
2257 target->type->deinit_target(target);
2258
2259 free(target->semihosting);
2260
2261 jtag_unregister_event_callback(jtag_enable_callback, target);
2262
2263 struct target_event_action *teap = target->event_action;
2264 while (teap) {
2265 struct target_event_action *next = teap->next;
2266 Jim_DecrRefCount(teap->interp, teap->body);
2267 free(teap);
2268 teap = next;
2269 }
2270
2271 target_free_all_working_areas(target);
2272
2273 /* release the targets SMP list */
2274 if (target->smp) {
2275 struct target_list *head = target->head;
2276 while (head) {
2277 struct target_list *pos = head->next;
2278 head->target->smp = 0;
2279 free(head);
2280 head = pos;
2281 }
2282 target->smp = 0;
2283 }
2284
2285 rtos_destroy(target);
2286
2287 free(target->gdb_port_override);
2288 free(target->type);
2289 free(target->trace_info);
2290 free(target->fileio_info);
2291 free(target->cmd_name);
2292 free(target);
2293 }
2294
2295 void target_quit(void)
2296 {
2297 struct target_event_callback *pe = target_event_callbacks;
2298 while (pe) {
2299 struct target_event_callback *t = pe->next;
2300 free(pe);
2301 pe = t;
2302 }
2303 target_event_callbacks = NULL;
2304
2305 struct target_timer_callback *pt = target_timer_callbacks;
2306 while (pt) {
2307 struct target_timer_callback *t = pt->next;
2308 free(pt);
2309 pt = t;
2310 }
2311 target_timer_callbacks = NULL;
2312
2313 for (struct target *target = all_targets; target;) {
2314 struct target *tmp;
2315
2316 tmp = target->next;
2317 target_destroy(target);
2318 target = tmp;
2319 }
2320
2321 all_targets = NULL;
2322 }
2323
2324 int target_arch_state(struct target *target)
2325 {
2326 int retval;
2327 if (!target) {
2328 LOG_WARNING("No target has been configured");
2329 return ERROR_OK;
2330 }
2331
2332 if (target->state != TARGET_HALTED)
2333 return ERROR_OK;
2334
2335 retval = target->type->arch_state(target);
2336 return retval;
2337 }
2338
2339 static int target_get_gdb_fileio_info_default(struct target *target,
2340 struct gdb_fileio_info *fileio_info)
2341 {
2342 /* If target does not support semi-hosting function, target
2343 has no need to provide .get_gdb_fileio_info callback.
2344 It just return ERROR_FAIL and gdb_server will return "Txx"
2345 as target halted every time. */
2346 return ERROR_FAIL;
2347 }
2348
2349 static int target_gdb_fileio_end_default(struct target *target,
2350 int retcode, int fileio_errno, bool ctrl_c)
2351 {
2352 return ERROR_OK;
2353 }
2354
2355 int target_profiling_default(struct target *target, uint32_t *samples,
2356 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2357 {
2358 struct timeval timeout, now;
2359
2360 gettimeofday(&timeout, NULL);
2361 timeval_add_time(&timeout, seconds, 0);
2362
2363 LOG_INFO("Starting profiling. Halting and resuming the"
2364 " target as often as we can...");
2365
2366 uint32_t sample_count = 0;
2367 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2368 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2369
2370 int retval = ERROR_OK;
2371 for (;;) {
2372 target_poll(target);
2373 if (target->state == TARGET_HALTED) {
2374 uint32_t t = buf_get_u32(reg->value, 0, 32);
2375 samples[sample_count++] = t;
2376 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2377 retval = target_resume(target, 1, 0, 0, 0);
2378 target_poll(target);
2379 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2380 } else if (target->state == TARGET_RUNNING) {
2381 /* We want to quickly sample the PC. */
2382 retval = target_halt(target);
2383 } else {
2384 LOG_INFO("Target not halted or running");
2385 retval = ERROR_OK;
2386 break;
2387 }
2388
2389 if (retval != ERROR_OK)
2390 break;
2391
2392 gettimeofday(&now, NULL);
2393 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2394 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2395 break;
2396 }
2397 }
2398
2399 *num_samples = sample_count;
2400 return retval;
2401 }
2402
2403 /* Single aligned words are guaranteed to use 16 or 32 bit access
2404 * mode respectively, otherwise data is handled as quickly as
2405 * possible
2406 */
2407 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2408 {
2409 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2410 size, address);
2411
2412 if (!target_was_examined(target)) {
2413 LOG_ERROR("Target not examined yet");
2414 return ERROR_FAIL;
2415 }
2416
2417 if (size == 0)
2418 return ERROR_OK;
2419
2420 if ((address + size - 1) < address) {
2421 /* GDB can request this when e.g. PC is 0xfffffffc */
2422 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2423 address,
2424 size);
2425 return ERROR_FAIL;
2426 }
2427
2428 return target->type->write_buffer(target, address, size, buffer);
2429 }
2430
2431 static int target_write_buffer_default(struct target *target,
2432 target_addr_t address, uint32_t count, const uint8_t *buffer)
2433 {
2434 uint32_t size;
2435 unsigned int data_bytes = target_data_bits(target) / 8;
2436
2437 /* Align up to maximum bytes. The loop condition makes sure the next pass
2438 * will have something to do with the size we leave to it. */
2439 for (size = 1;
2440 size < data_bytes && count >= size * 2 + (address & size);
2441 size *= 2) {
2442 if (address & size) {
2443 int retval = target_write_memory(target, address, size, 1, buffer);
2444 if (retval != ERROR_OK)
2445 return retval;
2446 address += size;
2447 count -= size;
2448 buffer += size;
2449 }
2450 }
2451
2452 /* Write the data with as large access size as possible. */
2453 for (; size > 0; size /= 2) {
2454 uint32_t aligned = count - count % size;
2455 if (aligned > 0) {
2456 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2457 if (retval != ERROR_OK)
2458 return retval;
2459 address += aligned;
2460 count -= aligned;
2461 buffer += aligned;
2462 }
2463 }
2464
2465 return ERROR_OK;
2466 }
2467
2468 /* Single aligned words are guaranteed to use 16 or 32 bit access
2469 * mode respectively, otherwise data is handled as quickly as
2470 * possible
2471 */
2472 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2473 {
2474 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2475 size, address);
2476
2477 if (!target_was_examined(target)) {
2478 LOG_ERROR("Target not examined yet");
2479 return ERROR_FAIL;
2480 }
2481
2482 if (size == 0)
2483 return ERROR_OK;
2484
2485 if ((address + size - 1) < address) {
2486 /* GDB can request this when e.g. PC is 0xfffffffc */
2487 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2488 address,
2489 size);
2490 return ERROR_FAIL;
2491 }
2492
2493 return target->type->read_buffer(target, address, size, buffer);
2494 }
2495
2496 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2497 {
2498 uint32_t size;
2499 unsigned int data_bytes = target_data_bits(target) / 8;
2500
2501 /* Align up to maximum bytes. The loop condition makes sure the next pass
2502 * will have something to do with the size we leave to it. */
2503 for (size = 1;
2504 size < data_bytes && count >= size * 2 + (address & size);
2505 size *= 2) {
2506 if (address & size) {
2507 int retval = target_read_memory(target, address, size, 1, buffer);
2508 if (retval != ERROR_OK)
2509 return retval;
2510 address += size;
2511 count -= size;
2512 buffer += size;
2513 }
2514 }
2515
2516 /* Read the data with as large access size as possible. */
2517 for (; size > 0; size /= 2) {
2518 uint32_t aligned = count - count % size;
2519 if (aligned > 0) {
2520 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2521 if (retval != ERROR_OK)
2522 return retval;
2523 address += aligned;
2524 count -= aligned;
2525 buffer += aligned;
2526 }
2527 }
2528
2529 return ERROR_OK;
2530 }
2531
2532 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2533 {
2534 uint8_t *buffer;
2535 int retval;
2536 uint32_t i;
2537 uint32_t checksum = 0;
2538 if (!target_was_examined(target)) {
2539 LOG_ERROR("Target not examined yet");
2540 return ERROR_FAIL;
2541 }
2542 if (!target->type->checksum_memory) {
2543 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2544 return ERROR_FAIL;
2545 }
2546
2547 retval = target->type->checksum_memory(target, address, size, &checksum);
2548 if (retval != ERROR_OK) {
2549 buffer = malloc(size);
2550 if (!buffer) {
2551 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2552 return ERROR_COMMAND_SYNTAX_ERROR;
2553 }
2554 retval = target_read_buffer(target, address, size, buffer);
2555 if (retval != ERROR_OK) {
2556 free(buffer);
2557 return retval;
2558 }
2559
2560 /* convert to target endianness */
2561 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2562 uint32_t target_data;
2563 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2564 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2565 }
2566
2567 retval = image_calculate_checksum(buffer, size, &checksum);
2568 free(buffer);
2569 }
2570
2571 *crc = checksum;
2572
2573 return retval;
2574 }
2575
2576 int target_blank_check_memory(struct target *target,
2577 struct target_memory_check_block *blocks, int num_blocks,
2578 uint8_t erased_value)
2579 {
2580 if (!target_was_examined(target)) {
2581 LOG_ERROR("Target not examined yet");
2582 return ERROR_FAIL;
2583 }
2584
2585 if (!target->type->blank_check_memory)
2586 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2587
2588 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2589 }
2590
2591 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2592 {
2593 uint8_t value_buf[8];
2594 if (!target_was_examined(target)) {
2595 LOG_ERROR("Target not examined yet");
2596 return ERROR_FAIL;
2597 }
2598
2599 int retval = target_read_memory(target, address, 8, 1, value_buf);
2600
2601 if (retval == ERROR_OK) {
2602 *value = target_buffer_get_u64(target, value_buf);
2603 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2604 address,
2605 *value);
2606 } else {
2607 *value = 0x0;
2608 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2609 address);
2610 }
2611
2612 return retval;
2613 }
2614
2615 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2616 {
2617 uint8_t value_buf[4];
2618 if (!target_was_examined(target)) {
2619 LOG_ERROR("Target not examined yet");
2620 return ERROR_FAIL;
2621 }
2622
2623 int retval = target_read_memory(target, address, 4, 1, value_buf);
2624
2625 if (retval == ERROR_OK) {
2626 *value = target_buffer_get_u32(target, value_buf);
2627 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2628 address,
2629 *value);
2630 } else {
2631 *value = 0x0;
2632 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2633 address);
2634 }
2635
2636 return retval;
2637 }
2638
2639 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2640 {
2641 uint8_t value_buf[2];
2642 if (!target_was_examined(target)) {
2643 LOG_ERROR("Target not examined yet");
2644 return ERROR_FAIL;
2645 }
2646
2647 int retval = target_read_memory(target, address, 2, 1, value_buf);
2648
2649 if (retval == ERROR_OK) {
2650 *value = target_buffer_get_u16(target, value_buf);
2651 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2652 address,
2653 *value);
2654 } else {
2655 *value = 0x0;
2656 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2657 address);
2658 }
2659
2660 return retval;
2661 }
2662
2663 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2664 {
2665 if (!target_was_examined(target)) {
2666 LOG_ERROR("Target not examined yet");
2667 return ERROR_FAIL;
2668 }
2669
2670 int retval = target_read_memory(target, address, 1, 1, value);
2671
2672 if (retval == ERROR_OK) {
2673 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2674 address,
2675 *value);
2676 } else {
2677 *value = 0x0;
2678 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2679 address);
2680 }
2681
2682 return retval;
2683 }
2684
2685 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2686 {
2687 int retval;
2688 uint8_t value_buf[8];
2689 if (!target_was_examined(target)) {
2690 LOG_ERROR("Target not examined yet");
2691 return ERROR_FAIL;
2692 }
2693
2694 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2695 address,
2696 value);
2697
2698 target_buffer_set_u64(target, value_buf, value);
2699 retval = target_write_memory(target, address, 8, 1, value_buf);
2700 if (retval != ERROR_OK)
2701 LOG_DEBUG("failed: %i", retval);
2702
2703 return retval;
2704 }
2705
2706 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2707 {
2708 int retval;
2709 uint8_t value_buf[4];
2710 if (!target_was_examined(target)) {
2711 LOG_ERROR("Target not examined yet");
2712 return ERROR_FAIL;
2713 }
2714
2715 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2716 address,
2717 value);
2718
2719 target_buffer_set_u32(target, value_buf, value);
2720 retval = target_write_memory(target, address, 4, 1, value_buf);
2721 if (retval != ERROR_OK)
2722 LOG_DEBUG("failed: %i", retval);
2723
2724 return retval;
2725 }
2726
2727 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2728 {
2729 int retval;
2730 uint8_t value_buf[2];
2731 if (!target_was_examined(target)) {
2732 LOG_ERROR("Target not examined yet");
2733 return ERROR_FAIL;
2734 }
2735
2736 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2737 address,
2738 value);
2739
2740 target_buffer_set_u16(target, value_buf, value);
2741 retval = target_write_memory(target, address, 2, 1, value_buf);
2742 if (retval != ERROR_OK)
2743 LOG_DEBUG("failed: %i", retval);
2744
2745 return retval;
2746 }
2747
2748 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2749 {
2750 int retval;
2751 if (!target_was_examined(target)) {
2752 LOG_ERROR("Target not examined yet");
2753 return ERROR_FAIL;
2754 }
2755
2756 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2757 address, value);
2758
2759 retval = target_write_memory(target, address, 1, 1, &value);
2760 if (retval != ERROR_OK)
2761 LOG_DEBUG("failed: %i", retval);
2762
2763 return retval;
2764 }
2765
2766 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2767 {
2768 int retval;
2769 uint8_t value_buf[8];
2770 if (!target_was_examined(target)) {
2771 LOG_ERROR("Target not examined yet");
2772 return ERROR_FAIL;
2773 }
2774
2775 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2776 address,
2777 value);
2778
2779 target_buffer_set_u64(target, value_buf, value);
2780 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2781 if (retval != ERROR_OK)
2782 LOG_DEBUG("failed: %i", retval);
2783
2784 return retval;
2785 }
2786
2787 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2788 {
2789 int retval;
2790 uint8_t value_buf[4];
2791 if (!target_was_examined(target)) {
2792 LOG_ERROR("Target not examined yet");
2793 return ERROR_FAIL;
2794 }
2795
2796 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2797 address,
2798 value);
2799
2800 target_buffer_set_u32(target, value_buf, value);
2801 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2802 if (retval != ERROR_OK)
2803 LOG_DEBUG("failed: %i", retval);
2804
2805 return retval;
2806 }
2807
2808 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2809 {
2810 int retval;
2811 uint8_t value_buf[2];
2812 if (!target_was_examined(target)) {
2813 LOG_ERROR("Target not examined yet");
2814 return ERROR_FAIL;
2815 }
2816
2817 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2818 address,
2819 value);
2820
2821 target_buffer_set_u16(target, value_buf, value);
2822 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2823 if (retval != ERROR_OK)
2824 LOG_DEBUG("failed: %i", retval);
2825
2826 return retval;
2827 }
2828
2829 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2830 {
2831 int retval;
2832 if (!target_was_examined(target)) {
2833 LOG_ERROR("Target not examined yet");
2834 return ERROR_FAIL;
2835 }
2836
2837 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2838 address, value);
2839
2840 retval = target_write_phys_memory(target, address, 1, 1, &value);
2841 if (retval != ERROR_OK)
2842 LOG_DEBUG("failed: %i", retval);
2843
2844 return retval;
2845 }
2846
2847 static int find_target(struct command_invocation *cmd, const char *name)
2848 {
2849 struct target *target = get_target(name);
2850 if (!target) {
2851 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2852 return ERROR_FAIL;
2853 }
2854 if (!target->tap->enabled) {
2855 command_print(cmd, "Target: TAP %s is disabled, "
2856 "can't be the current target\n",
2857 target->tap->dotted_name);
2858 return ERROR_FAIL;
2859 }
2860
2861 cmd->ctx->current_target = target;
2862 if (cmd->ctx->current_target_override)
2863 cmd->ctx->current_target_override = target;
2864
2865 return ERROR_OK;
2866 }
2867
2868
2869 COMMAND_HANDLER(handle_targets_command)
2870 {
2871 int retval = ERROR_OK;
2872 if (CMD_ARGC == 1) {
2873 retval = find_target(CMD, CMD_ARGV[0]);
2874 if (retval == ERROR_OK) {
2875 /* we're done! */
2876 return retval;
2877 }
2878 }
2879
2880 struct target *target = all_targets;
2881 command_print(CMD, " TargetName Type Endian TapName State ");
2882 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2883 while (target) {
2884 const char *state;
2885 char marker = ' ';
2886
2887 if (target->tap->enabled)
2888 state = target_state_name(target);
2889 else
2890 state = "tap-disabled";
2891
2892 if (CMD_CTX->current_target == target)
2893 marker = '*';
2894
2895 /* keep columns lined up to match the headers above */
2896 command_print(CMD,
2897 "%2d%c %-18s %-10s %-6s %-18s %s",
2898 target->target_number,
2899 marker,
2900 target_name(target),
2901 target_type_name(target),
2902 jim_nvp_value2name_simple(nvp_target_endian,
2903 target->endianness)->name,
2904 target->tap->dotted_name,
2905 state);
2906 target = target->next;
2907 }
2908
2909 return retval;
2910 }
2911
2912 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2913
2914 static int power_dropout;
2915 static int srst_asserted;
2916
2917 static int run_power_restore;
2918 static int run_power_dropout;
2919 static int run_srst_asserted;
2920 static int run_srst_deasserted;
2921
2922 static int sense_handler(void)
2923 {
2924 static int prev_srst_asserted;
2925 static int prev_power_dropout;
2926
2927 int retval = jtag_power_dropout(&power_dropout);
2928 if (retval != ERROR_OK)
2929 return retval;
2930
2931 int power_restored;
2932 power_restored = prev_power_dropout && !power_dropout;
2933 if (power_restored)
2934 run_power_restore = 1;
2935
2936 int64_t current = timeval_ms();
2937 static int64_t last_power;
2938 bool wait_more = last_power + 2000 > current;
2939 if (power_dropout && !wait_more) {
2940 run_power_dropout = 1;
2941 last_power = current;
2942 }
2943
2944 retval = jtag_srst_asserted(&srst_asserted);
2945 if (retval != ERROR_OK)
2946 return retval;
2947
2948 int srst_deasserted;
2949 srst_deasserted = prev_srst_asserted && !srst_asserted;
2950
2951 static int64_t last_srst;
2952 wait_more = last_srst + 2000 > current;
2953 if (srst_deasserted && !wait_more) {
2954 run_srst_deasserted = 1;
2955 last_srst = current;
2956 }
2957
2958 if (!prev_srst_asserted && srst_asserted)
2959 run_srst_asserted = 1;
2960
2961 prev_srst_asserted = srst_asserted;
2962 prev_power_dropout = power_dropout;
2963
2964 if (srst_deasserted || power_restored) {
2965 /* Other than logging the event we can't do anything here.
2966 * Issuing a reset is a particularly bad idea as we might
2967 * be inside a reset already.
2968 */
2969 }
2970
2971 return ERROR_OK;
2972 }
2973
2974 /* process target state changes */
2975 static int handle_target(void *priv)
2976 {
2977 Jim_Interp *interp = (Jim_Interp *)priv;
2978 int retval = ERROR_OK;
2979
2980 if (!is_jtag_poll_safe()) {
2981 /* polling is disabled currently */
2982 return ERROR_OK;
2983 }
2984
2985 /* we do not want to recurse here... */
2986 static int recursive;
2987 if (!recursive) {
2988 recursive = 1;
2989 sense_handler();
2990 /* danger! running these procedures can trigger srst assertions and power dropouts.
2991 * We need to avoid an infinite loop/recursion here and we do that by
2992 * clearing the flags after running these events.
2993 */
2994 int did_something = 0;
2995 if (run_srst_asserted) {
2996 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2997 Jim_Eval(interp, "srst_asserted");
2998 did_something = 1;
2999 }
3000 if (run_srst_deasserted) {
3001 Jim_Eval(interp, "srst_deasserted");
3002 did_something = 1;
3003 }
3004 if (run_power_dropout) {
3005 LOG_INFO("Power dropout detected, running power_dropout proc.");
3006 Jim_Eval(interp, "power_dropout");
3007 did_something = 1;
3008 }
3009 if (run_power_restore) {
3010 Jim_Eval(interp, "power_restore");
3011 did_something = 1;
3012 }
3013
3014 if (did_something) {
3015 /* clear detect flags */
3016 sense_handler();
3017 }
3018
3019 /* clear action flags */
3020
3021 run_srst_asserted = 0;
3022 run_srst_deasserted = 0;
3023 run_power_restore = 0;
3024 run_power_dropout = 0;
3025
3026 recursive = 0;
3027 }
3028
3029 /* Poll targets for state changes unless that's globally disabled.
3030 * Skip targets that are currently disabled.
3031 */
3032 for (struct target *target = all_targets;
3033 is_jtag_poll_safe() && target;
3034 target = target->next) {
3035
3036 if (!target_was_examined(target))
3037 continue;
3038
3039 if (!target->tap->enabled)
3040 continue;
3041
3042 if (target->backoff.times > target->backoff.count) {
3043 /* do not poll this time as we failed previously */
3044 target->backoff.count++;
3045 continue;
3046 }
3047 target->backoff.count = 0;
3048
3049 /* only poll target if we've got power and srst isn't asserted */
3050 if (!power_dropout && !srst_asserted) {
3051 /* polling may fail silently until the target has been examined */
3052 retval = target_poll(target);
3053 if (retval != ERROR_OK) {
3054 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3055 if (target->backoff.times * polling_interval < 5000) {
3056 target->backoff.times *= 2;
3057 target->backoff.times++;
3058 }
3059
3060 /* Tell GDB to halt the debugger. This allows the user to
3061 * run monitor commands to handle the situation.
3062 */
3063 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3064 }
3065 if (target->backoff.times > 0) {
3066 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3067 target_reset_examined(target);
3068 retval = target_examine_one(target);
3069 /* Target examination could have failed due to unstable connection,
3070 * but we set the examined flag anyway to repoll it later */
3071 if (retval != ERROR_OK) {
3072 target_set_examined(target);
3073 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3074 target->backoff.times * polling_interval);
3075 return retval;
3076 }
3077 }
3078
3079 /* Since we succeeded, we reset backoff count */
3080 target->backoff.times = 0;
3081 }
3082 }
3083
3084 return retval;
3085 }
3086
3087 COMMAND_HANDLER(handle_reg_command)
3088 {
3089 LOG_DEBUG("-");
3090
3091 struct target *target = get_current_target(CMD_CTX);
3092 struct reg *reg = NULL;
3093
3094 /* list all available registers for the current target */
3095 if (CMD_ARGC == 0) {
3096 struct reg_cache *cache = target->reg_cache;
3097
3098 unsigned int count = 0;
3099 while (cache) {
3100 unsigned i;
3101
3102 command_print(CMD, "===== %s", cache->name);
3103
3104 for (i = 0, reg = cache->reg_list;
3105 i < cache->num_regs;
3106 i++, reg++, count++) {
3107 if (reg->exist == false || reg->hidden)
3108 continue;
3109 /* only print cached values if they are valid */
3110 if (reg->valid) {
3111 char *value = buf_to_hex_str(reg->value,
3112 reg->size);
3113 command_print(CMD,
3114 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3115 count, reg->name,
3116 reg->size, value,
3117 reg->dirty
3118 ? " (dirty)"
3119 : "");
3120 free(value);
3121 } else {
3122 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3123 count, reg->name,
3124 reg->size);
3125 }
3126 }
3127 cache = cache->next;
3128 }
3129
3130 return ERROR_OK;
3131 }
3132
3133 /* access a single register by its ordinal number */
3134 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3135 unsigned num;
3136 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3137
3138 struct reg_cache *cache = target->reg_cache;
3139 unsigned int count = 0;
3140 while (cache) {
3141 unsigned i;
3142 for (i = 0; i < cache->num_regs; i++) {
3143 if (count++ == num) {
3144 reg = &cache->reg_list[i];
3145 break;
3146 }
3147 }
3148 if (reg)
3149 break;
3150 cache = cache->next;
3151 }
3152
3153 if (!reg) {
3154 command_print(CMD, "%i is out of bounds, the current target "
3155 "has only %i registers (0 - %i)", num, count, count - 1);
3156 return ERROR_OK;
3157 }
3158 } else {
3159 /* access a single register by its name */
3160 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3161
3162 if (!reg)
3163 goto not_found;
3164 }
3165
3166 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3167
3168 if (!reg->exist)
3169 goto not_found;
3170
3171 /* display a register */
3172 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3173 && (CMD_ARGV[1][0] <= '9')))) {
3174 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3175 reg->valid = 0;
3176
3177 if (reg->valid == 0) {
3178 int retval = reg->type->get(reg);
3179 if (retval != ERROR_OK) {
3180 LOG_ERROR("Could not read register '%s'", reg->name);
3181 return retval;
3182 }
3183 }
3184 char *value = buf_to_hex_str(reg->value, reg->size);
3185 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3186 free(value);
3187 return ERROR_OK;
3188 }
3189
3190 /* set register value */
3191 if (CMD_ARGC == 2) {
3192 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3193 if (!buf)
3194 return ERROR_FAIL;
3195 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3196
3197 int retval = reg->type->set(reg, buf);
3198 if (retval != ERROR_OK) {
3199 LOG_ERROR("Could not write to register '%s'", reg->name);
3200 } else {
3201 char *value = buf_to_hex_str(reg->value, reg->size);
3202 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3203 free(value);
3204 }
3205
3206 free(buf);
3207
3208 return retval;
3209 }
3210
3211 return ERROR_COMMAND_SYNTAX_ERROR;
3212
3213 not_found:
3214 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3215 return ERROR_OK;
3216 }
3217
3218 COMMAND_HANDLER(handle_poll_command)
3219 {
3220 int retval = ERROR_OK;
3221 struct target *target = get_current_target(CMD_CTX);
3222
3223 if (CMD_ARGC == 0) {
3224 command_print(CMD, "background polling: %s",
3225 jtag_poll_get_enabled() ? "on" : "off");
3226 command_print(CMD, "TAP: %s (%s)",
3227 target->tap->dotted_name,
3228 target->tap->enabled ? "enabled" : "disabled");
3229 if (!target->tap->enabled)
3230 return ERROR_OK;
3231 retval = target_poll(target);
3232 if (retval != ERROR_OK)
3233 return retval;
3234 retval = target_arch_state(target);
3235 if (retval != ERROR_OK)
3236 return retval;
3237 } else if (CMD_ARGC == 1) {
3238 bool enable;
3239 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3240 jtag_poll_set_enabled(enable);
3241 } else
3242 return ERROR_COMMAND_SYNTAX_ERROR;
3243
3244 return retval;
3245 }
3246
3247 COMMAND_HANDLER(handle_wait_halt_command)
3248 {
3249 if (CMD_ARGC > 1)
3250 return ERROR_COMMAND_SYNTAX_ERROR;
3251
3252 unsigned ms = DEFAULT_HALT_TIMEOUT;
3253 if (1 == CMD_ARGC) {
3254 int retval = parse_uint(CMD_ARGV[0], &ms);
3255 if (retval != ERROR_OK)
3256 return ERROR_COMMAND_SYNTAX_ERROR;
3257 }
3258
3259 struct target *target = get_current_target(CMD_CTX);
3260 return target_wait_state(target, TARGET_HALTED, ms);
3261 }
3262
3263 /* wait for target state to change. The trick here is to have a low
3264 * latency for short waits and not to suck up all the CPU time
3265 * on longer waits.
3266 *
3267 * After 500ms, keep_alive() is invoked
3268 */
3269 int target_wait_state(struct target *target, enum target_state state, int ms)
3270 {
3271 int retval;
3272 int64_t then = 0, cur;
3273 bool once = true;
3274
3275 for (;;) {
3276 retval = target_poll(target);
3277 if (retval != ERROR_OK)
3278 return retval;
3279 if (target->state == state)
3280 break;
3281 cur = timeval_ms();
3282 if (once) {
3283 once = false;
3284 then = timeval_ms();
3285 LOG_DEBUG("waiting for target %s...",
3286 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3287 }
3288
3289 if (cur-then > 500)
3290 keep_alive();
3291
3292 if ((cur-then) > ms) {
3293 LOG_ERROR("timed out while waiting for target %s",
3294 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3295 return ERROR_FAIL;
3296 }
3297 }
3298
3299 return ERROR_OK;
3300 }
3301
3302 COMMAND_HANDLER(handle_halt_command)
3303 {
3304 LOG_DEBUG("-");
3305
3306 struct target *target = get_current_target(CMD_CTX);
3307
3308 target->verbose_halt_msg = true;
3309
3310 int retval = target_halt(target);
3311 if (retval != ERROR_OK)
3312 return retval;
3313
3314 if (CMD_ARGC == 1) {
3315 unsigned wait_local;
3316 retval = parse_uint(CMD_ARGV[0], &wait_local);
3317 if (retval != ERROR_OK)
3318 return ERROR_COMMAND_SYNTAX_ERROR;
3319 if (!wait_local)
3320 return ERROR_OK;
3321 }
3322
3323 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3324 }
3325
3326 COMMAND_HANDLER(handle_soft_reset_halt_command)
3327 {
3328 struct target *target = get_current_target(CMD_CTX);
3329
3330 LOG_USER("requesting target halt and executing a soft reset");
3331
3332 target_soft_reset_halt(target);
3333
3334 return ERROR_OK;
3335 }
3336
3337 COMMAND_HANDLER(handle_reset_command)
3338 {
3339 if (CMD_ARGC > 1)
3340 return ERROR_COMMAND_SYNTAX_ERROR;
3341
3342 enum target_reset_mode reset_mode = RESET_RUN;
3343 if (CMD_ARGC == 1) {
3344 const struct jim_nvp *n;
3345 n = jim_nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3346 if ((!n->name) || (n->value == RESET_UNKNOWN))
3347 return ERROR_COMMAND_SYNTAX_ERROR;
3348 reset_mode = n->value;
3349 }
3350
3351 /* reset *all* targets */
3352 return target_process_reset(CMD, reset_mode);
3353 }
3354
3355
3356 COMMAND_HANDLER(handle_resume_command)
3357 {
3358 int current = 1;
3359 if (CMD_ARGC > 1)
3360 return ERROR_COMMAND_SYNTAX_ERROR;
3361
3362 struct target *target = get_current_target(CMD_CTX);
3363
3364 /* with no CMD_ARGV, resume from current pc, addr = 0,
3365 * with one arguments, addr = CMD_ARGV[0],
3366 * handle breakpoints, not debugging */
3367 target_addr_t addr = 0;
3368 if (CMD_ARGC == 1) {
3369 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3370 current = 0;
3371 }
3372
3373 return target_resume(target, current, addr, 1, 0);
3374 }
3375
3376 COMMAND_HANDLER(handle_step_command)
3377 {
3378 if (CMD_ARGC > 1)
3379 return ERROR_COMMAND_SYNTAX_ERROR;
3380
3381 LOG_DEBUG("-");
3382
3383 /* with no CMD_ARGV, step from current pc, addr = 0,
3384 * with one argument addr = CMD_ARGV[0],
3385 * handle breakpoints, debugging */
3386 target_addr_t addr = 0;
3387 int current_pc = 1;
3388 if (CMD_ARGC == 1) {
3389 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3390 current_pc = 0;
3391 }
3392
3393 struct target *target = get_current_target(CMD_CTX);
3394
3395 return target_step(target, current_pc, addr, 1);
3396 }
3397
3398 void target_handle_md_output(struct command_invocation *cmd,
3399 struct target *target, target_addr_t address, unsigned size,
3400 unsigned count, const uint8_t *buffer)
3401 {
3402 const unsigned line_bytecnt = 32;
3403 unsigned line_modulo = line_bytecnt / size;
3404
3405 char output[line_bytecnt * 4 + 1];
3406 unsigned output_len = 0;
3407
3408 const char *value_fmt;
3409 switch (size) {
3410 case 8:
3411 value_fmt = "%16.16"PRIx64" ";
3412 break;
3413 case 4:
3414 value_fmt = "%8.8"PRIx64" ";
3415 break;
3416 case 2:
3417 value_fmt = "%4.4"PRIx64" ";
3418 break;
3419 case 1:
3420 value_fmt = "%2.2"PRIx64" ";
3421 break;
3422 default:
3423 /* "can't happen", caller checked */
3424 LOG_ERROR("invalid memory read size: %u", size);
3425 return;
3426 }
3427
3428 for (unsigned i = 0; i < count; i++) {
3429 if (i % line_modulo == 0) {
3430 output_len += snprintf(output + output_len,
3431 sizeof(output) - output_len,
3432 TARGET_ADDR_FMT ": ",
3433 (address + (i * size)));
3434 }
3435
3436 uint64_t value = 0;
3437 const uint8_t *value_ptr = buffer + i * size;
3438 switch (size) {
3439 case 8:
3440 value = target_buffer_get_u64(target, value_ptr);
3441 break;
3442 case 4:
3443 value = target_buffer_get_u32(target, value_ptr);
3444 break;
3445 case 2:
3446 value = target_buffer_get_u16(target, value_ptr);
3447 break;
3448 case 1:
3449 value = *value_ptr;
3450 }
3451 output_len += snprintf(output + output_len,
3452 sizeof(output) - output_len,
3453 value_fmt, value);
3454
3455 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3456 command_print(cmd, "%s", output);
3457 output_len = 0;
3458 }
3459 }
3460 }
3461
3462 COMMAND_HANDLER(handle_md_command)
3463 {
3464 if (CMD_ARGC < 1)
3465 return ERROR_COMMAND_SYNTAX_ERROR;
3466
3467 unsigned size = 0;
3468 switch (CMD_NAME[2]) {
3469 case 'd':
3470 size = 8;
3471 break;
3472 case 'w':
3473 size = 4;
3474 break;
3475 case 'h':
3476 size = 2;
3477 break;
3478 case 'b':
3479 size = 1;
3480 break;
3481 default:
3482 return ERROR_COMMAND_SYNTAX_ERROR;
3483 }
3484
3485 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3486 int (*fn)(struct target *target,
3487 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3488 if (physical) {
3489 CMD_ARGC--;
3490 CMD_ARGV++;
3491 fn = target_read_phys_memory;
3492 } else
3493 fn = target_read_memory;
3494 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3495 return ERROR_COMMAND_SYNTAX_ERROR;
3496
3497 target_addr_t address;
3498 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3499
3500 unsigned count = 1;
3501 if (CMD_ARGC == 2)
3502 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3503
3504 uint8_t *buffer = calloc(count, size);
3505 if (!buffer) {
3506 LOG_ERROR("Failed to allocate md read buffer");
3507 return ERROR_FAIL;
3508 }
3509
3510 struct target *target = get_current_target(CMD_CTX);
3511 int retval = fn(target, address, size, count, buffer);
3512 if (retval == ERROR_OK)
3513 target_handle_md_output(CMD, target, address, size, count, buffer);
3514
3515 free(buffer);
3516
3517 return retval;
3518 }
3519
3520 typedef int (*target_write_fn)(struct target *target,
3521 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3522
3523 static int target_fill_mem(struct target *target,
3524 target_addr_t address,
3525 target_write_fn fn,
3526 unsigned data_size,
3527 /* value */
3528 uint64_t b,
3529 /* count */
3530 unsigned c)
3531 {
3532 /* We have to write in reasonably large chunks to be able
3533 * to fill large memory areas with any sane speed */
3534 const unsigned chunk_size = 16384;
3535 uint8_t *target_buf = malloc(chunk_size * data_size);
3536 if (!target_buf) {
3537 LOG_ERROR("Out of memory");
3538 return ERROR_FAIL;
3539 }
3540
3541 for (unsigned i = 0; i < chunk_size; i++) {
3542 switch (data_size) {
3543 case 8:
3544 target_buffer_set_u64(target, target_buf + i * data_size, b);
3545 break;
3546 case 4:
3547 target_buffer_set_u32(target, target_buf + i * data_size, b);
3548 break;
3549 case 2:
3550 target_buffer_set_u16(target, target_buf + i * data_size, b);
3551 break;
3552 case 1:
3553 target_buffer_set_u8(target, target_buf + i * data_size, b);
3554 break;
3555 default:
3556 exit(-1);
3557 }
3558 }
3559
3560 int retval = ERROR_OK;
3561
3562 for (unsigned x = 0; x < c; x += chunk_size) {
3563 unsigned current;
3564 current = c - x;
3565 if (current > chunk_size)
3566 current = chunk_size;
3567 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3568 if (retval != ERROR_OK)
3569 break;
3570 /* avoid GDB timeouts */
3571 keep_alive();
3572 }
3573 free(target_buf);
3574
3575 return retval;
3576 }
3577
3578
3579 COMMAND_HANDLER(handle_mw_command)
3580 {
3581 if (CMD_ARGC < 2)
3582 return ERROR_COMMAND_SYNTAX_ERROR;
3583 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3584 target_write_fn fn;
3585 if (physical) {
3586 CMD_ARGC--;
3587 CMD_ARGV++;
3588 fn = target_write_phys_memory;
3589 } else
3590 fn = target_write_memory;
3591 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3592 return ERROR_COMMAND_SYNTAX_ERROR;
3593
3594 target_addr_t address;
3595 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3596
3597 uint64_t value;
3598 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3599
3600 unsigned count = 1;
3601 if (CMD_ARGC == 3)
3602 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3603
3604 struct target *target = get_current_target(CMD_CTX);
3605 unsigned wordsize;
3606 switch (CMD_NAME[2]) {
3607 case 'd':
3608 wordsize = 8;
3609 break;
3610 case 'w':
3611 wordsize = 4;
3612 break;
3613 case 'h':
3614 wordsize = 2;
3615 break;
3616 case 'b':
3617 wordsize = 1;
3618 break;
3619 default:
3620 return ERROR_COMMAND_SYNTAX_ERROR;
3621 }
3622
3623 return target_fill_mem(target, address, fn, wordsize, value, count);
3624 }
3625
3626 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3627 target_addr_t *min_address, target_addr_t *max_address)
3628 {
3629 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3630 return ERROR_COMMAND_SYNTAX_ERROR;
3631
3632 /* a base address isn't always necessary,
3633 * default to 0x0 (i.e. don't relocate) */
3634 if (CMD_ARGC >= 2) {
3635 target_addr_t addr;
3636 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3637 image->base_address = addr;
3638 image->base_address_set = true;
3639 } else
3640 image->base_address_set = false;
3641
3642 image->start_address_set = false;
3643
3644 if (CMD_ARGC >= 4)
3645 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3646 if (CMD_ARGC == 5) {
3647 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3648 /* use size (given) to find max (required) */
3649 *max_address += *min_address;
3650 }
3651
3652 if (*min_address > *max_address)
3653 return ERROR_COMMAND_SYNTAX_ERROR;
3654
3655 return ERROR_OK;
3656 }
3657
3658 COMMAND_HANDLER(handle_load_image_command)
3659 {
3660 uint8_t *buffer;
3661 size_t buf_cnt;
3662 uint32_t image_size;
3663 target_addr_t min_address = 0;
3664 target_addr_t max_address = -1;
3665 struct image image;
3666
3667 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3668 &image, &min_address, &max_address);
3669 if (retval != ERROR_OK)
3670 return retval;
3671
3672 struct target *target = get_current_target(CMD_CTX);
3673
3674 struct duration bench;
3675 duration_start(&bench);
3676
3677 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3678 return ERROR_FAIL;
3679
3680 image_size = 0x0;
3681 retval = ERROR_OK;
3682 for (unsigned int i = 0; i < image.num_sections; i++) {
3683 buffer = malloc(image.sections[i].size);
3684 if (!buffer) {
3685 command_print(CMD,
3686 "error allocating buffer for section (%d bytes)",
3687 (int)(image.sections[i].size));
3688 retval = ERROR_FAIL;
3689 break;
3690 }
3691
3692 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3693 if (retval != ERROR_OK) {
3694 free(buffer);
3695 break;
3696 }
3697
3698 uint32_t offset = 0;
3699 uint32_t length = buf_cnt;
3700
3701 /* DANGER!!! beware of unsigned comparison here!!! */
3702
3703 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3704 (image.sections[i].base_address < max_address)) {
3705
3706 if (image.sections[i].base_address < min_address) {
3707 /* clip addresses below */
3708 offset += min_address-image.sections[i].base_address;
3709 length -= offset;
3710 }
3711
3712 if (image.sections[i].base_address + buf_cnt > max_address)
3713 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3714
3715 retval = target_write_buffer(target,
3716 image.sections[i].base_address + offset, length, buffer + offset);
3717 if (retval != ERROR_OK) {
3718 free(buffer);
3719 break;
3720 }
3721 image_size += length;
3722 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3723 (unsigned int)length,
3724 image.sections[i].base_address + offset);
3725 }
3726
3727 free(buffer);
3728 }
3729
3730 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3731 command_print(CMD, "downloaded %" PRIu32 " bytes "
3732 "in %fs (%0.3f KiB/s)", image_size,
3733 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3734 }
3735
3736 image_close(&image);
3737
3738 return retval;
3739
3740 }
3741
3742 COMMAND_HANDLER(handle_dump_image_command)
3743 {
3744 struct fileio *fileio;
3745 uint8_t *buffer;
3746 int retval, retvaltemp;
3747 target_addr_t address, size;
3748 struct duration bench;
3749 struct target *target = get_current_target(CMD_CTX);
3750
3751 if (CMD_ARGC != 3)
3752 return ERROR_COMMAND_SYNTAX_ERROR;
3753
3754 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3755 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3756
3757 uint32_t buf_size = (size > 4096) ? 4096 : size;
3758 buffer = malloc(buf_size);
3759 if (!buffer)
3760 return ERROR_FAIL;
3761
3762 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3763 if (retval != ERROR_OK) {
3764 free(buffer);
3765 return retval;
3766 }
3767
3768 duration_start(&bench);
3769
3770 while (size > 0) {
3771 size_t size_written;
3772 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3773 retval = target_read_buffer(target, address, this_run_size, buffer);
3774 if (retval != ERROR_OK)
3775 break;
3776
3777 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3778 if (retval != ERROR_OK)
3779 break;
3780
3781 size -= this_run_size;
3782 address += this_run_size;
3783 }
3784
3785 free(buffer);
3786
3787 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3788 size_t filesize;
3789 retval = fileio_size(fileio, &filesize);
3790 if (retval != ERROR_OK)
3791 return retval;
3792 command_print(CMD,
3793 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3794 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3795 }
3796
3797 retvaltemp = fileio_close(fileio);
3798 if (retvaltemp != ERROR_OK)
3799 return retvaltemp;
3800
3801 return retval;
3802 }
3803
3804 enum verify_mode {
3805 IMAGE_TEST = 0,
3806 IMAGE_VERIFY = 1,
3807 IMAGE_CHECKSUM_ONLY = 2
3808 };
3809
3810 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3811 {
3812 uint8_t *buffer;
3813 size_t buf_cnt;
3814 uint32_t image_size;
3815 int retval;
3816 uint32_t checksum = 0;
3817 uint32_t mem_checksum = 0;
3818
3819 struct image image;
3820
3821 struct target *target = get_current_target(CMD_CTX);
3822
3823 if (CMD_ARGC < 1)
3824 return ERROR_COMMAND_SYNTAX_ERROR;
3825
3826 if (!target) {
3827 LOG_ERROR("no target selected");
3828 return ERROR_FAIL;
3829 }
3830
3831 struct duration bench;
3832 duration_start(&bench);
3833
3834 if (CMD_ARGC >= 2) {
3835 target_addr_t addr;
3836 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3837 image.base_address = addr;
3838 image.base_address_set = true;
3839 } else {
3840 image.base_address_set = false;
3841 image.base_address = 0x0;
3842 }
3843
3844 image.start_address_set = false;
3845
3846 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3847 if (retval != ERROR_OK)
3848 return retval;
3849
3850 image_size = 0x0;
3851 int diffs = 0;
3852 retval = ERROR_OK;
3853 for (unsigned int i = 0; i < image.num_sections; i++) {
3854 buffer = malloc(image.sections[i].size);
3855 if (!buffer) {
3856 command_print(CMD,
3857 "error allocating buffer for section (%" PRIu32 " bytes)",
3858 image.sections[i].size);
3859 break;
3860 }
3861 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3862 if (retval != ERROR_OK) {
3863 free(buffer);
3864 break;
3865 }
3866
3867 if (verify >= IMAGE_VERIFY) {
3868 /* calculate checksum of image */
3869 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3870 if (retval != ERROR_OK) {
3871 free(buffer);
3872 break;
3873 }
3874
3875 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3876 if (retval != ERROR_OK) {
3877 free(buffer);
3878 break;
3879 }
3880 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3881 LOG_ERROR("checksum mismatch");
3882 free(buffer);
3883 retval = ERROR_FAIL;
3884 goto done;
3885 }
3886 if (checksum != mem_checksum) {
3887 /* failed crc checksum, fall back to a binary compare */
3888 uint8_t *data;
3889
3890 if (diffs == 0)
3891 LOG_ERROR("checksum mismatch - attempting binary compare");
3892
3893 data = malloc(buf_cnt);
3894
3895 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3896 if (retval == ERROR_OK) {
3897 uint32_t t;
3898 for (t = 0; t < buf_cnt; t++) {
3899 if (data[t] != buffer[t]) {
3900 command_print(CMD,
3901 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3902 diffs,
3903 (unsigned)(t + image.sections[i].base_address),
3904 data[t],
3905 buffer[t]);
3906 if (diffs++ >= 127) {
3907 command_print(CMD, "More than 128 errors, the rest are not printed.");
3908 free(data);
3909 free(buffer);
3910 goto done;
3911 }
3912 }
3913 keep_alive();
3914 }
3915 }
3916 free(data);
3917 }
3918 } else {
3919 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3920 image.sections[i].base_address,
3921 buf_cnt);
3922 }
3923
3924 free(buffer);
3925 image_size += buf_cnt;
3926 }
3927 if (diffs > 0)
3928 command_print(CMD, "No more differences found.");
3929 done:
3930 if (diffs > 0)
3931 retval = ERROR_FAIL;
3932 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3933 command_print(CMD, "verified %" PRIu32 " bytes "
3934 "in %fs (%0.3f KiB/s)", image_size,
3935 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3936 }
3937
3938 image_close(&image);
3939
3940 return retval;
3941 }
3942
3943 COMMAND_HANDLER(handle_verify_image_checksum_command)
3944 {
3945 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3946 }
3947
3948 COMMAND_HANDLER(handle_verify_image_command)
3949 {
3950 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3951 }
3952
3953 COMMAND_HANDLER(handle_test_image_command)
3954 {
3955 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3956 }
3957
3958 static int handle_bp_command_list(struct command_invocation *cmd)
3959 {
3960 struct target *target = get_current_target(cmd->ctx);
3961 struct breakpoint *breakpoint = target->breakpoints;
3962 while (breakpoint) {
3963 if (breakpoint->type == BKPT_SOFT) {
3964 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3965 breakpoint->length);
3966 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, %i, 0x%s",
3967 breakpoint->address,
3968 breakpoint->length,
3969 breakpoint->set, buf);
3970 free(buf);
3971 } else {
3972 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3973 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3974 breakpoint->asid,
3975 breakpoint->length, breakpoint->set);
3976 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3977 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3978 breakpoint->address,
3979 breakpoint->length, breakpoint->set);
3980 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3981 breakpoint->asid);
3982 } else
3983 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3984 breakpoint->address,
3985 breakpoint->length, breakpoint->set);
3986 }
3987
3988 breakpoint = breakpoint->next;
3989 }
3990 return ERROR_OK;
3991 }
3992
3993 static int handle_bp_command_set(struct command_invocation *cmd,
3994 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3995 {
3996 struct target *target = get_current_target(cmd->ctx);
3997 int retval;
3998
3999 if (asid == 0) {
4000 retval = breakpoint_add(target, addr, length, hw);
4001 /* error is always logged in breakpoint_add(), do not print it again */
4002 if (retval == ERROR_OK)
4003 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
4004
4005 } else if (addr == 0) {
4006 if (!target->type->add_context_breakpoint) {
4007 LOG_ERROR("Context breakpoint not available");
4008 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4009 }
4010 retval = context_breakpoint_add(target, asid, length, hw);
4011 /* error is always logged in context_breakpoint_add(), do not print it again */
4012 if (retval == ERROR_OK)
4013 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
4014
4015 } else {
4016 if (!target->type->add_hybrid_breakpoint) {
4017 LOG_ERROR("Hybrid breakpoint not available");
4018 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4019 }
4020 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
4021 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
4022 if (retval == ERROR_OK)
4023 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
4024 }
4025 return retval;
4026 }
4027
4028 COMMAND_HANDLER(handle_bp_command)
4029 {
4030 target_addr_t addr;
4031 uint32_t asid;
4032 uint32_t length;
4033 int hw = BKPT_SOFT;
4034
4035 switch (CMD_ARGC) {
4036 case 0:
4037 return handle_bp_command_list(CMD);
4038
4039 case 2:
4040 asid = 0;
4041 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4042 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4043 return handle_bp_command_set(CMD, addr, asid, length, hw);
4044
4045 case 3:
4046 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4047 hw = BKPT_HARD;
4048 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4049 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4050 asid = 0;
4051 return handle_bp_command_set(CMD, addr, asid, length, hw);
4052 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4053 hw = BKPT_HARD;
4054 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4055 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4056 addr = 0;
4057 return handle_bp_command_set(CMD, addr, asid, length, hw);
4058 }
4059 /* fallthrough */
4060 case 4:
4061 hw = BKPT_HARD;
4062 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4063 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4064 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4065 return handle_bp_command_set(CMD, addr, asid, length, hw);
4066
4067 default:
4068 return ERROR_COMMAND_SYNTAX_ERROR;
4069 }
4070 }
4071
4072 COMMAND_HANDLER(handle_rbp_command)
4073 {
4074 if (CMD_ARGC != 1)
4075 return ERROR_COMMAND_SYNTAX_ERROR;
4076
4077 struct target *target = get_current_target(CMD_CTX);
4078
4079 if (!strcmp(CMD_ARGV[0], "all")) {
4080 breakpoint_remove_all(target);
4081 } else {
4082 target_addr_t addr;
4083 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4084
4085 breakpoint_remove(target, addr);
4086 }
4087
4088 return ERROR_OK;
4089 }
4090
4091 COMMAND_HANDLER(handle_wp_command)
4092 {
4093 struct target *target = get_current_target(CMD_CTX);
4094
4095 if (CMD_ARGC == 0) {
4096 struct watchpoint *watchpoint = target->watchpoints;
4097
4098 while (watchpoint) {
4099 command_print(CMD, "address: " TARGET_ADDR_FMT
4100 ", len: 0x%8.8" PRIx32
4101 ", r/w/a: %i, value: 0x%8.8" PRIx32
4102 ", mask: 0x%8.8" PRIx32,
4103 watchpoint->address,
4104 watchpoint->length,
4105 (int)watchpoint->rw,
4106 watchpoint->value,
4107 watchpoint->mask);
4108 watchpoint = watchpoint->next;
4109 }
4110 return ERROR_OK;
4111 }
4112
4113 enum watchpoint_rw type = WPT_ACCESS;
4114 target_addr_t addr = 0;
4115 uint32_t length = 0;
4116 uint32_t data_value = 0x0;
4117 uint32_t data_mask = 0xffffffff;
4118
4119 switch (CMD_ARGC) {
4120 case 5:
4121 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
4122 /* fall through */
4123 case 4:
4124 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
4125 /* fall through */
4126 case 3:
4127 switch (CMD_ARGV[2][0]) {
4128 case 'r':
4129 type = WPT_READ;
4130 break;
4131 case 'w':
4132 type = WPT_WRITE;
4133 break;
4134 case 'a':
4135 type = WPT_ACCESS;
4136 break;
4137 default:
4138 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4139 return ERROR_COMMAND_SYNTAX_ERROR;
4140 }
4141 /* fall through */
4142 case 2:
4143 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4144 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4145 break;
4146
4147 default:
4148 return ERROR_COMMAND_SYNTAX_ERROR;
4149 }
4150
4151 int retval = watchpoint_add(target, addr, length, type,
4152 data_value, data_mask);
4153 if (retval != ERROR_OK)
4154 LOG_ERROR("Failure setting watchpoints");
4155
4156 return retval;
4157 }
4158
4159 COMMAND_HANDLER(handle_rwp_command)
4160 {
4161 if (CMD_ARGC != 1)
4162 return ERROR_COMMAND_SYNTAX_ERROR;
4163
4164 target_addr_t addr;
4165 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4166
4167 struct target *target = get_current_target(CMD_CTX);
4168 watchpoint_remove(target, addr);
4169
4170 return ERROR_OK;
4171 }
4172
4173 /**
4174 * Translate a virtual address to a physical address.
4175 *
4176 * The low-level target implementation must have logged a detailed error
4177 * which is forwarded to telnet/GDB session.
4178 */
4179 COMMAND_HANDLER(handle_virt2phys_command)
4180 {
4181 if (CMD_ARGC != 1)
4182 return ERROR_COMMAND_SYNTAX_ERROR;
4183
4184 target_addr_t va;
4185 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4186 target_addr_t pa;
4187
4188 struct target *target = get_current_target(CMD_CTX);
4189 int retval = target->type->virt2phys(target, va, &pa);
4190 if (retval == ERROR_OK)
4191 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4192
4193 return retval;
4194 }
4195
4196 static void write_data(FILE *f, const void *data, size_t len)
4197 {
4198 size_t written = fwrite(data, 1, len, f);
4199 if (written != len)
4200 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4201 }
4202
4203 static void write_long(FILE *f, int l, struct target *target)
4204 {
4205 uint8_t val[4];
4206
4207 target_buffer_set_u32(target, val, l);
4208 write_data(f, val, 4);
4209 }
4210
4211 static void write_string(FILE *f, char *s)
4212 {
4213 write_data(f, s, strlen(s));
4214 }
4215
4216 typedef unsigned char UNIT[2]; /* unit of profiling */
4217
4218 /* Dump a gmon.out histogram file. */
4219 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4220 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4221 {
4222 uint32_t i;
4223 FILE *f = fopen(filename, "w");
4224 if (!f)
4225 return;
4226 write_string(f, "gmon");
4227 write_long(f, 0x00000001, target); /* Version */
4228 write_long(f, 0, target); /* padding */
4229 write_long(f, 0, target); /* padding */
4230 write_long(f, 0, target); /* padding */
4231
4232 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4233 write_data(f, &zero, 1);
4234
4235 /* figure out bucket size */
4236 uint32_t min;
4237 uint32_t max;
4238 if (with_range) {
4239 min = start_address;
4240 max = end_address;
4241 } else {
4242 min = samples[0];
4243 max = samples[0];
4244 for (i = 0; i < sample_num; i++) {
4245 if (min > samples[i])
4246 min = samples[i];
4247 if (max < samples[i])
4248 max = samples[i];
4249 }
4250
4251 /* max should be (largest sample + 1)
4252 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4253 max++;
4254 }
4255
4256 int address_space = max - min;
4257 assert(address_space >= 2);
4258
4259 /* FIXME: What is the reasonable number of buckets?
4260 * The profiling result will be more accurate if there are enough buckets. */
4261 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4262 uint32_t num_buckets = address_space / sizeof(UNIT);
4263 if (num_buckets > max_buckets)
4264 num_buckets = max_buckets;
4265 int *buckets = malloc(sizeof(int) * num_buckets);
4266 if (!buckets) {
4267 fclose(f);
4268 return;
4269 }
4270 memset(buckets, 0, sizeof(int) * num_buckets);
4271 for (i = 0; i < sample_num; i++) {
4272 uint32_t address = samples[i];
4273
4274 if ((address < min) || (max <= address))
4275 continue;
4276
4277 long long a = address - min;
4278 long long b = num_buckets;
4279 long long c = address_space;
4280 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4281 buckets[index_t]++;
4282 }
4283
4284 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4285 write_long(f, min, target); /* low_pc */
4286 write_long(f, max, target); /* high_pc */
4287 write_long(f, num_buckets, target); /* # of buckets */
4288 float sample_rate = sample_num / (duration_ms / 1000.0);
4289 write_long(f, sample_rate, target);
4290 write_string(f, "seconds");
4291 for (i = 0; i < (15-strlen("seconds")); i++)
4292 write_data(f, &zero, 1);
4293 write_string(f, "s");
4294
4295 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4296
4297 char *data = malloc(2 * num_buckets);
4298 if (data) {
4299 for (i = 0; i < num_buckets; i++) {
4300 int val;
4301 val = buckets[i];
4302 if (val > 65535)
4303 val = 65535;
4304 data[i * 2] = val&0xff;
4305 data[i * 2 + 1] = (val >> 8) & 0xff;
4306 }
4307 free(buckets);
4308 write_data(f, data, num_buckets * 2);
4309 free(data);
4310 } else
4311 free(buckets);
4312
4313 fclose(f);
4314 }
4315
4316 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4317 * which will be used as a random sampling of PC */
4318 COMMAND_HANDLER(handle_profile_command)
4319 {
4320 struct target *target = get_current_target(CMD_CTX);
4321
4322 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4323 return ERROR_COMMAND_SYNTAX_ERROR;
4324
4325 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4326 uint32_t offset;
4327 uint32_t num_of_samples;
4328 int retval = ERROR_OK;
4329 bool halted_before_profiling = target->state == TARGET_HALTED;
4330
4331 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4332
4333 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4334 if (!samples) {
4335 LOG_ERROR("No memory to store samples.");
4336 return ERROR_FAIL;
4337 }
4338
4339 uint64_t timestart_ms = timeval_ms();
4340 /**
4341 * Some cores let us sample the PC without the
4342 * annoying halt/resume step; for example, ARMv7 PCSR.
4343 * Provide a way to use that more efficient mechanism.
4344 */
4345 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4346 &num_of_samples, offset);
4347 if (retval != ERROR_OK) {
4348 free(samples);
4349 return retval;
4350 }
4351 uint32_t duration_ms = timeval_ms() - timestart_ms;
4352
4353 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4354
4355 retval = target_poll(target);
4356 if (retval != ERROR_OK) {
4357 free(samples);
4358 return retval;
4359 }
4360
4361 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4362 /* The target was halted before we started and is running now. Halt it,
4363 * for consistency. */
4364 retval = target_halt(target);
4365 if (retval != ERROR_OK) {
4366 free(samples);
4367 return retval;
4368 }
4369 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4370 /* The target was running before we started and is halted now. Resume
4371 * it, for consistency. */
4372 retval = target_resume(target, 1, 0, 0, 0);
4373 if (retval != ERROR_OK) {
4374 free(samples);
4375 return retval;
4376 }
4377 }
4378
4379 retval = target_poll(target);
4380 if (retval != ERROR_OK) {
4381 free(samples);
4382 return retval;
4383 }
4384
4385 uint32_t start_address = 0;
4386 uint32_t end_address = 0;
4387 bool with_range = false;
4388 if (CMD_ARGC == 4) {
4389 with_range = true;
4390 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4391 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4392 }
4393
4394 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4395 with_range, start_address, end_address, target, duration_ms);
4396 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4397
4398 free(samples);
4399 return retval;
4400 }
4401
4402 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4403 {
4404 char *namebuf;
4405 Jim_Obj *obj_name, *obj_val;
4406 int result;
4407
4408 namebuf = alloc_printf("%s(%d)", varname, idx);
4409 if (!namebuf)
4410 return JIM_ERR;
4411
4412 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4413 jim_wide wide_val = val;
4414 obj_val = Jim_NewWideObj(interp, wide_val);
4415 if (!obj_name || !obj_val) {
4416 free(namebuf);
4417 return JIM_ERR;
4418 }
4419
4420 Jim_IncrRefCount(obj_name);
4421 Jim_IncrRefCount(obj_val);
4422 result = Jim_SetVariable(interp, obj_name, obj_val);
4423 Jim_DecrRefCount(interp, obj_name);
4424 Jim_DecrRefCount(interp, obj_val);
4425 free(namebuf);
4426 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4427 return result;
4428 }
4429
4430 static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4431 {
4432 struct command_context *context;
4433 struct target *target;
4434
4435 context = current_command_context(interp);
4436 assert(context);
4437
4438 target = get_current_target(context);
4439 if (!target) {
4440 LOG_ERROR("mem2array: no current target");
4441 return JIM_ERR;
4442 }
4443
4444 return target_mem2array(interp, target, argc - 1, argv + 1);
4445 }
4446
4447 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4448 {
4449 int e;
4450
4451 /* argv[0] = name of array to receive the data
4452 * argv[1] = desired element width in bits
4453 * argv[2] = memory address
4454 * argv[3] = count of times to read
4455 * argv[4] = optional "phys"
4456 */
4457 if (argc < 4 || argc > 5) {
4458 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4459 return JIM_ERR;
4460 }
4461
4462 /* Arg 0: Name of the array variable */
4463 const char *varname = Jim_GetString(argv[0], NULL);
4464
4465 /* Arg 1: Bit width of one element */
4466 long l;
4467 e = Jim_GetLong(interp, argv[1], &l);
4468 if (e != JIM_OK)
4469 return e;
4470 const unsigned int width_bits = l;
4471
4472 if (width_bits != 8 &&
4473 width_bits != 16 &&
4474 width_bits != 32 &&
4475 width_bits != 64) {
4476 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4477 Jim_AppendStrings(interp, Jim_GetResult(interp),
4478 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4479 return JIM_ERR;
4480 }
4481 const unsigned int width = width_bits / 8;
4482
4483 /* Arg 2: Memory address */
4484 jim_wide wide_addr;
4485 e = Jim_GetWide(interp, argv[2], &wide_addr);
4486 if (e != JIM_OK)
4487 return e;
4488 target_addr_t addr = (target_addr_t)wide_addr;
4489
4490 /* Arg 3: Number of elements to read */
4491 e = Jim_GetLong(interp, argv[3], &l);
4492 if (e != JIM_OK)
4493 return e;
4494 size_t len = l;
4495
4496 /* Arg 4: phys */
4497 bool is_phys = false;
4498 if (argc > 4) {
4499 int str_len = 0;
4500 const char *phys = Jim_GetString(argv[4], &str_len);
4501 if (!strncmp(phys, "phys", str_len))
4502 is_phys = true;
4503 else
4504 return JIM_ERR;
4505 }
4506
4507 /* Argument checks */
4508 if (len == 0) {
4509 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4510 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4511 return JIM_ERR;
4512 }
4513 if ((addr + (len * width)) < addr) {
4514 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4515 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4516 return JIM_ERR;
4517 }
4518 if (len > 65536) {
4519 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4520 Jim_AppendStrings(interp, Jim_GetResult(interp),
4521 "mem2array: too large read request, exceeds 64K items", NULL);
4522 return JIM_ERR;
4523 }
4524
4525 if ((width == 1) ||
4526 ((width == 2) && ((addr & 1) == 0)) ||
4527 ((width == 4) && ((addr & 3) == 0)) ||
4528 ((width == 8) && ((addr & 7) == 0))) {
4529 /* alignment correct */
4530 } else {
4531 char buf[100];
4532 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4533 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4534 addr,
4535 width);
4536 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4537 return JIM_ERR;
4538 }
4539
4540 /* Transfer loop */
4541
4542 /* index counter */
4543 size_t idx = 0;
4544
4545 const size_t buffersize = 4096;
4546 uint8_t *buffer = malloc(buffersize);
4547 if (!buffer)
4548 return JIM_ERR;
4549
4550 /* assume ok */
4551 e = JIM_OK;
4552 while (len) {
4553 /* Slurp... in buffer size chunks */
4554 const unsigned int max_chunk_len = buffersize / width;
4555 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4556
4557 int retval;
4558 if (is_phys)
4559 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4560 else
4561 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4562 if (retval != ERROR_OK) {
4563 /* BOO !*/
4564 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4565 addr,
4566 width,
4567 chunk_len);
4568 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4569 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4570 e = JIM_ERR;
4571 break;
4572 } else {
4573 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4574 uint64_t v = 0;
4575 switch (width) {
4576 case 8:
4577 v = target_buffer_get_u64(target, &buffer[i*width]);
4578 break;
4579 case 4:
4580 v = target_buffer_get_u32(target, &buffer[i*width]);
4581 break;
4582 case 2:
4583 v = target_buffer_get_u16(target, &buffer[i*width]);
4584 break;
4585 case 1:
4586 v = buffer[i] & 0x0ff;
4587 break;
4588 }
4589 new_u64_array_element(interp, varname, idx, v);
4590 }
4591 len -= chunk_len;
4592 addr += chunk_len * width;
4593 }
4594 }
4595
4596 free(buffer);
4597
4598 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4599
4600 return e;
4601 }
4602
4603 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4604 {
4605 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4606 if (!namebuf)
4607 return JIM_ERR;
4608
4609 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4610 if (!obj_name) {
4611 free(namebuf);
4612 return JIM_ERR;
4613 }
4614
4615 Jim_IncrRefCount(obj_name);
4616 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4617 Jim_DecrRefCount(interp, obj_name);
4618 free(namebuf);
4619 if (!obj_val)
4620 return JIM_ERR;
4621
4622 jim_wide wide_val;
4623 int result = Jim_GetWide(interp, obj_val, &wide_val);
4624 *val = wide_val;
4625 return result;
4626 }
4627
4628 static int jim_array2mem(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4629 {
4630 struct command_context *context;
4631 struct target *target;
4632
4633 context = current_command_context(interp);
4634 assert(context);
4635
4636 target = get_current_target(context);
4637 if (!target) {
4638 LOG_ERROR("array2mem: no current target");
4639 return JIM_ERR;
4640 }
4641
4642 return target_array2mem(interp, target, argc-1, argv + 1);
4643 }
4644
4645 static int target_array2mem(Jim_Interp *interp, struct target *target,
4646 int argc, Jim_Obj *const *argv)
4647 {
4648 int e;
4649
4650 /* argv[0] = name of array from which to read the data
4651 * argv[1] = desired element width in bits
4652 * argv[2] = memory address
4653 * argv[3] = number of elements to write
4654 * argv[4] = optional "phys"
4655 */
4656 if (argc < 4 || argc > 5) {
4657 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4658 return JIM_ERR;
4659 }
4660
4661 /* Arg 0: Name of the array variable */
4662 const char *varname = Jim_GetString(argv[0], NULL);
4663
4664 /* Arg 1: Bit width of one element */
4665 long l;
4666 e = Jim_GetLong(interp, argv[1], &l);
4667 if (e != JIM_OK)
4668 return e;
4669 const unsigned int width_bits = l;
4670
4671 if (width_bits != 8 &&
4672 width_bits != 16 &&
4673 width_bits != 32 &&
4674 width_bits != 64) {
4675 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4676 Jim_AppendStrings(interp, Jim_GetResult(interp),
4677 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4678 return JIM_ERR;
4679 }
4680 const unsigned int width = width_bits / 8;
4681
4682 /* Arg 2: Memory address */
4683 jim_wide wide_addr;
4684 e = Jim_GetWide(interp, argv[2], &wide_addr);
4685 if (e != JIM_OK)
4686 return e;
4687 target_addr_t addr = (target_addr_t)wide_addr;
4688
4689 /* Arg 3: Number of elements to write */
4690 e = Jim_GetLong(interp, argv[3], &l);
4691 if (e != JIM_OK)
4692 return e;
4693 size_t len = l;
4694
4695 /* Arg 4: Phys */
4696 bool is_phys = false;
4697 if (argc > 4) {
4698 int str_len = 0;
4699 const char *phys = Jim_GetString(argv[4], &str_len);
4700 if (!strncmp(phys, "phys", str_len))
4701 is_phys = true;
4702 else
4703 return JIM_ERR;
4704 }
4705
4706 /* Argument checks */
4707 if (len == 0) {
4708 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4709 Jim_AppendStrings(interp, Jim_GetResult(interp),
4710 "array2mem: zero width read?", NULL);
4711 return JIM_ERR;
4712 }
4713
4714 if ((addr + (len * width)) < addr) {
4715 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4716 Jim_AppendStrings(interp, Jim_GetResult(interp),
4717 "array2mem: addr + len - wraps to zero?", NULL);
4718 return JIM_ERR;
4719 }
4720
4721 if (len > 65536) {
4722 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4723 Jim_AppendStrings(interp, Jim_GetResult(interp),
4724 "array2mem: too large memory write request, exceeds 64K items", NULL);
4725 return JIM_ERR;
4726 }
4727
4728 if ((width == 1) ||
4729 ((width == 2) && ((addr & 1) == 0)) ||
4730 ((width == 4) && ((addr & 3) == 0)) ||
4731 ((width == 8) && ((addr & 7) == 0))) {
4732 /* alignment correct */
4733 } else {
4734 char buf[100];
4735 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4736 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4737 addr,
4738 width);
4739 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4740 return JIM_ERR;
4741 }
4742
4743 /* Transfer loop */
4744
4745 /* assume ok */
4746 e = JIM_OK;
4747
4748 const size_t buffersize = 4096;
4749 uint8_t *buffer = malloc(buffersize);
4750 if (!buffer)
4751 return JIM_ERR;
4752
4753 /* index counter */
4754 size_t idx = 0;
4755
4756 while (len) {
4757 /* Slurp... in buffer size chunks */
4758 const unsigned int max_chunk_len = buffersize / width;
4759
4760 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4761
4762 /* Fill the buffer */
4763 for (size_t i = 0; i < chunk_len; i++, idx++) {
4764 uint64_t v = 0;
4765 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4766 free(buffer);
4767 return JIM_ERR;
4768 }
4769 switch (width) {
4770 case 8:
4771 target_buffer_set_u64(target, &buffer[i * width], v);
4772 break;
4773 case 4:
4774 target_buffer_set_u32(target, &buffer[i * width], v);
4775 break;
4776 case 2:
4777 target_buffer_set_u16(target, &buffer[i * width], v);
4778 break;
4779 case 1:
4780 buffer[i] = v & 0x0ff;
4781 break;
4782 }
4783 }
4784 len -= chunk_len;
4785
4786 /* Write the buffer to memory */
4787 int retval;
4788 if (is_phys)
4789 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4790 else
4791 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4792 if (retval != ERROR_OK) {
4793 /* BOO !*/
4794 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4795 addr,
4796 width,
4797 chunk_len);
4798 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4799 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4800 e = JIM_ERR;
4801 break;
4802 }
4803 addr += chunk_len * width;
4804 }
4805
4806 free(buffer);
4807
4808 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4809
4810 return e;
4811 }
4812
4813 /* FIX? should we propagate errors here rather than printing them
4814 * and continuing?
4815 */
4816 void target_handle_event(struct target *target, enum target_event e)
4817 {
4818 struct target_event_action *teap;
4819 int retval;
4820
4821 for (teap = target->event_action; teap; teap = teap->next) {
4822 if (teap->event == e) {
4823 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
4824 target->target_number,
4825 target_name(target),
4826 target_type_name(target),
4827 e,
4828 target_event_name(e),
4829 Jim_GetString(teap->body, NULL));
4830
4831 /* Override current target by the target an event
4832 * is issued from (lot of scripts need it).
4833 * Return back to previous override as soon
4834 * as the handler processing is done */
4835 struct command_context *cmd_ctx = current_command_context(teap->interp);
4836 struct target *saved_target_override = cmd_ctx->current_target_override;
4837 cmd_ctx->current_target_override = target;
4838
4839 retval = Jim_EvalObj(teap->interp, teap->body);
4840
4841 cmd_ctx->current_target_override = saved_target_override;
4842
4843 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
4844 return;
4845
4846 if (retval == JIM_RETURN)
4847 retval = teap->interp->returnCode;
4848
4849 if (retval != JIM_OK) {
4850 Jim_MakeErrorMessage(teap->interp);
4851 LOG_USER("Error executing event %s on target %s:\n%s",
4852 target_event_name(e),
4853 target_name(target),
4854 Jim_GetString(Jim_GetResult(teap->interp), NULL));
4855 /* clean both error code and stacktrace before return */
4856 Jim_Eval(teap->interp, "error \"\" \"\"");
4857 }
4858 }
4859 }
4860 }
4861
4862 /**
4863 * Returns true only if the target has a handler for the specified event.
4864 */
4865 bool target_has_event_action(struct target *target, enum target_event event)
4866 {
4867 struct target_event_action *teap;
4868
4869 for (teap = target->event_action; teap; teap = teap->next) {
4870 if (teap->event == event)
4871 return true;
4872 }
4873 return false;
4874 }
4875
4876 enum target_cfg_param {
4877 TCFG_TYPE,
4878 TCFG_EVENT,
4879 TCFG_WORK_AREA_VIRT,
4880 TCFG_WORK_AREA_PHYS,
4881 TCFG_WORK_AREA_SIZE,
4882 TCFG_WORK_AREA_BACKUP,
4883 TCFG_ENDIAN,
4884 TCFG_COREID,
4885 TCFG_CHAIN_POSITION,
4886 TCFG_DBGBASE,
4887 TCFG_RTOS,
4888 TCFG_DEFER_EXAMINE,
4889 TCFG_GDB_PORT,
4890 TCFG_GDB_MAX_CONNECTIONS,
4891 };
4892
4893 static struct jim_nvp nvp_config_opts[] = {
4894 { .name = "-type", .value = TCFG_TYPE },
4895 { .name = "-event", .value = TCFG_EVENT },
4896 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
4897 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
4898 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
4899 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
4900 { .name = "-endian", .value = TCFG_ENDIAN },
4901 { .name = "-coreid", .value = TCFG_COREID },
4902 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
4903 { .name = "-dbgbase", .value = TCFG_DBGBASE },
4904 { .name = "-rtos", .value = TCFG_RTOS },
4905 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
4906 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
4907 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
4908 { .name = NULL, .value = -1 }
4909 };
4910
4911 static int target_configure(struct jim_getopt_info *goi, struct target *target)
4912 {
4913 struct jim_nvp *n;
4914 Jim_Obj *o;
4915 jim_wide w;
4916 int e;
4917
4918 /* parse config or cget options ... */
4919 while (goi->argc > 0) {
4920 Jim_SetEmptyResult(goi->interp);
4921 /* jim_getopt_debug(goi); */
4922
4923 if (target->type->target_jim_configure) {
4924 /* target defines a configure function */
4925 /* target gets first dibs on parameters */
4926 e = (*(target->type->target_jim_configure))(target, goi);
4927 if (e == JIM_OK) {
4928 /* more? */
4929 continue;
4930 }
4931 if (e == JIM_ERR) {
4932 /* An error */
4933 return e;
4934 }
4935 /* otherwise we 'continue' below */
4936 }
4937 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
4938 if (e != JIM_OK) {
4939 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
4940 return e;
4941 }
4942 switch (n->value) {
4943 case TCFG_TYPE:
4944 /* not settable */
4945 if (goi->isconfigure) {
4946 Jim_SetResultFormatted(goi->interp,
4947 "not settable: %s", n->name);
4948 return JIM_ERR;
4949 } else {
4950 no_params:
4951 if (goi->argc != 0) {
4952 Jim_WrongNumArgs(goi->interp,
4953 goi->argc, goi->argv,
4954 "NO PARAMS");
4955 return JIM_ERR;
4956 }
4957 }
4958 Jim_SetResultString(goi->interp,
4959 target_type_name(target), -1);
4960 /* loop for more */
4961 break;
4962 case TCFG_EVENT:
4963 if (goi->argc == 0) {
4964 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
4965 return JIM_ERR;
4966 }
4967
4968 e = jim_getopt_nvp(goi, nvp_target_event, &n);
4969 if (e != JIM_OK) {
4970 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
4971 return e;
4972 }
4973
4974 if (goi->isconfigure) {
4975 if (goi->argc != 1) {
4976 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
4977 return JIM_ERR;
4978 }
4979 } else {
4980 if (goi->argc != 0) {
4981 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
4982 return JIM_ERR;
4983 }
4984 }
4985
4986 {
4987 struct target_event_action *teap;
4988
4989 teap = target->event_action;
4990 /* replace existing? */
4991 while (teap) {
4992 if (teap->event == (enum target_event)n->value)
4993 break;
4994 teap = teap->next;
4995 }
4996
4997 if (goi->isconfigure) {
4998 /* START_DEPRECATED_TPIU */
4999 if (n->value == TARGET_EVENT_TRACE_CONFIG)
5000 LOG_INFO("DEPRECATED target event %s; use TPIU events {pre,post}-{enable,disable}", n->name);
5001 /* END_DEPRECATED_TPIU */
5002
5003 bool replace = true;
5004 if (!teap) {
5005 /* create new */
5006 teap = calloc(1, sizeof(*teap));
5007 replace = false;
5008 }
5009 teap->event = n->value;
5010 teap->interp = goi->interp;
5011 jim_getopt_obj(goi, &o);
5012 if (teap->body)
5013 Jim_DecrRefCount(teap->interp, teap->body);
5014 teap->body = Jim_DuplicateObj(goi->interp, o);
5015 /*
5016 * FIXME:
5017 * Tcl/TK - "tk events" have a nice feature.
5018 * See the "BIND" command.
5019 * We should support that here.
5020 * You can specify %X and %Y in the event code.
5021 * The idea is: %T - target name.
5022 * The idea is: %N - target number
5023 * The idea is: %E - event name.
5024 */
5025 Jim_IncrRefCount(teap->body);
5026
5027 if (!replace) {
5028 /* add to head of event list */
5029 teap->next = target->event_action;
5030 target->event_action = teap;
5031 }
5032 Jim_SetEmptyResult(goi->interp);
5033 } else {
5034 /* get */
5035 if (!teap)
5036 Jim_SetEmptyResult(goi->interp);
5037 else
5038 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5039 }
5040 }
5041 /* loop for more */
5042 break;
5043
5044 case TCFG_WORK_AREA_VIRT:
5045 if (goi->isconfigure) {
5046 target_free_all_working_areas(target);
5047 e = jim_getopt_wide(goi, &w);
5048 if (e != JIM_OK)
5049 return e;
5050 target->working_area_virt = w;
5051 target->working_area_virt_spec = true;
5052 } else {
5053 if (goi->argc != 0)
5054 goto no_params;
5055 }
5056 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5057 /* loop for more */
5058 break;
5059
5060 case TCFG_WORK_AREA_PHYS:
5061 if (goi->isconfigure) {
5062 target_free_all_working_areas(target);
5063 e = jim_getopt_wide(goi, &w);
5064 if (e != JIM_OK)
5065 return e;
5066 target->working_area_phys = w;
5067 target->working_area_phys_spec = true;
5068 } else {
5069 if (goi->argc != 0)
5070 goto no_params;
5071 }
5072 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5073 /* loop for more */
5074 break;
5075
5076 case TCFG_WORK_AREA_SIZE:
5077 if (goi->isconfigure) {
5078 target_free_all_working_areas(target);
5079 e = jim_getopt_wide(goi, &w);
5080 if (e != JIM_OK)
5081 return e;
5082 target->working_area_size = w;
5083 } else {
5084 if (goi->argc != 0)
5085 goto no_params;
5086 }
5087 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5088 /* loop for more */
5089 break;
5090
5091 case TCFG_WORK_AREA_BACKUP:
5092 if (goi->isconfigure) {
5093 target_free_all_working_areas(target);
5094 e = jim_getopt_wide(goi, &w);
5095 if (e != JIM_OK)
5096 return e;
5097 /* make this exactly 1 or 0 */
5098 target->backup_working_area = (!!w);
5099 } else {
5100 if (goi->argc != 0)
5101 goto no_params;
5102 }
5103 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5104 /* loop for more e*/
5105 break;
5106
5107
5108 case TCFG_ENDIAN:
5109 if (goi->isconfigure) {
5110 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5111 if (e != JIM_OK) {
5112 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5113 return e;
5114 }
5115 target->endianness = n->value;
5116 } else {
5117 if (goi->argc != 0)
5118 goto no_params;
5119 }
5120 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5121 if (!n->name) {
5122 target->endianness = TARGET_LITTLE_ENDIAN;
5123 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5124 }
5125 Jim_SetResultString(goi->interp, n->name, -1);
5126 /* loop for more */
5127 break;
5128
5129 case TCFG_COREID:
5130 if (goi->isconfigure) {
5131 e = jim_getopt_wide(goi, &w);
5132 if (e != JIM_OK)
5133 return e;
5134 target->coreid = (int32_t)w;
5135 } else {
5136 if (goi->argc != 0)
5137 goto no_params;
5138 }
5139 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5140 /* loop for more */
5141 break;
5142
5143 case TCFG_CHAIN_POSITION:
5144 if (goi->isconfigure) {
5145 Jim_Obj *o_t;
5146 struct jtag_tap *tap;
5147
5148 if (target->has_dap) {
5149 Jim_SetResultString(goi->interp,
5150 "target requires -dap parameter instead of -chain-position!", -1);
5151 return JIM_ERR;
5152 }
5153
5154 target_free_all_working_areas(target);
5155 e = jim_getopt_obj(goi, &o_t);
5156 if (e != JIM_OK)
5157 return e;
5158 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5159 if (!tap)
5160 return JIM_ERR;
5161 target->tap = tap;
5162 target->tap_configured = true;
5163 } else {
5164 if (goi->argc != 0)
5165 goto no_params;
5166 }
5167 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5168 /* loop for more e*/
5169 break;
5170 case TCFG_DBGBASE:
5171 if (goi->isconfigure) {
5172 e = jim_getopt_wide(goi, &w);
5173 if (e != JIM_OK)
5174 return e;
5175 target->dbgbase = (uint32_t)w;
5176 target->dbgbase_set = true;
5177 } else {
5178 if (goi->argc != 0)
5179 goto no_params;
5180 }
5181 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5182 /* loop for more */
5183 break;
5184 case TCFG_RTOS:
5185 /* RTOS */
5186 {
5187 int result = rtos_create(goi, target);
5188 if (result != JIM_OK)
5189 return result;
5190 }
5191 /* loop for more */
5192 break;
5193
5194 case TCFG_DEFER_EXAMINE:
5195 /* DEFER_EXAMINE */
5196 target->defer_examine = true;
5197 /* loop for more */
5198 break;
5199
5200 case TCFG_GDB_PORT:
5201 if (goi->isconfigure) {
5202 struct command_context *cmd_ctx = current_command_context(goi->interp);
5203 if (cmd_ctx->mode != COMMAND_CONFIG) {
5204 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5205 return JIM_ERR;
5206 }
5207
5208 const char *s;
5209 e = jim_getopt_string(goi, &s, NULL);
5210 if (e != JIM_OK)
5211 return e;
5212 free(target->gdb_port_override);
5213 target->gdb_port_override = strdup(s);
5214 } else {
5215 if (goi->argc != 0)
5216 goto no_params;
5217 }
5218 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5219 /* loop for more */
5220 break;
5221
5222 case TCFG_GDB_MAX_CONNECTIONS:
5223 if (goi->isconfigure) {
5224 struct command_context *cmd_ctx = current_command_context(goi->interp);
5225 if (cmd_ctx->mode != COMMAND_CONFIG) {
5226 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5227 return JIM_ERR;
5228 }
5229
5230 e = jim_getopt_wide(goi, &w);
5231 if (e != JIM_OK)
5232 return e;
5233 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5234 } else {
5235 if (goi->argc != 0)
5236 goto no_params;
5237 }
5238 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5239 break;
5240 }
5241 } /* while (goi->argc) */
5242
5243
5244 /* done - we return */
5245 return JIM_OK;
5246 }
5247
5248 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5249 {
5250 struct command *c = jim_to_command(interp);
5251 struct jim_getopt_info goi;
5252
5253 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5254 goi.isconfigure = !strcmp(c->name, "configure");
5255 if (goi.argc < 1) {
5256 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5257 "missing: -option ...");
5258 return JIM_ERR;
5259 }
5260 struct command_context *cmd_ctx = current_command_context(interp);
5261 assert(cmd_ctx);
5262 struct target *target = get_current_target(cmd_ctx);
5263 return target_configure(&goi, target);
5264 }
5265
5266 static int jim_target_mem2array(Jim_Interp *interp,
5267 int argc, Jim_Obj *const *argv)
5268 {
5269 struct command_context *cmd_ctx = current_command_context(interp);
5270 assert(cmd_ctx);
5271 struct target *target = get_current_target(cmd_ctx);
5272 return target_mem2array(interp, target, argc - 1, argv + 1);
5273 }
5274
5275 static int jim_target_array2mem(Jim_Interp *interp,
5276 int argc, Jim_Obj *const *argv)
5277 {
5278 struct command_context *cmd_ctx = current_command_context(interp);
5279 assert(cmd_ctx);
5280 struct target *target = get_current_target(cmd_ctx);
5281 return target_array2mem(interp, target, argc - 1, argv + 1);
5282 }
5283
5284 static int jim_target_tap_disabled(Jim_Interp *interp)
5285 {
5286 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5287 return JIM_ERR;
5288 }
5289
5290 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5291 {
5292 bool allow_defer = false;
5293
5294 struct jim_getopt_info goi;
5295 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5296 if (goi.argc > 1) {
5297 const char *cmd_name = Jim_GetString(argv[0], NULL);
5298 Jim_SetResultFormatted(goi.interp,
5299 "usage: %s ['allow-defer']", cmd_name);
5300 return JIM_ERR;
5301 }
5302 if (goi.argc > 0 &&
5303 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5304 /* consume it */
5305 Jim_Obj *obj;
5306 int e = jim_getopt_obj(&goi, &obj);
5307 if (e != JIM_OK)
5308 return e;
5309 allow_defer = true;
5310 }
5311
5312 struct command_context *cmd_ctx = current_command_context(interp);
5313 assert(cmd_ctx);
5314 struct target *target = get_current_target(cmd_ctx);
5315 if (!target->tap->enabled)
5316 return jim_target_tap_disabled(interp);
5317
5318 if (allow_defer && target->defer_examine) {
5319 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5320 LOG_INFO("Use arp_examine command to examine it manually!");
5321 return JIM_OK;
5322 }
5323
5324 int e = target->type->examine(target);
5325 if (e != ERROR_OK) {
5326 target_reset_examined(target);
5327 return JIM_ERR;
5328 }
5329
5330 target_set_examined(target);
5331
5332 return JIM_OK;
5333 }
5334
5335 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5336 {
5337 struct command_context *cmd_ctx = current_command_context(interp);
5338 assert(cmd_ctx);
5339 struct target *target = get_current_target(cmd_ctx);
5340
5341 Jim_SetResultBool(interp, target_was_examined(target));
5342 return JIM_OK;
5343 }
5344
5345 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5346 {
5347 struct command_context *cmd_ctx = current_command_context(interp);
5348 assert(cmd_ctx);
5349 struct target *target = get_current_target(cmd_ctx);
5350
5351 Jim_SetResultBool(interp, target->defer_examine);
5352 return JIM_OK;
5353 }
5354
5355 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5356 {
5357 if (argc != 1) {
5358 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5359 return JIM_ERR;
5360 }
5361 struct command_context *cmd_ctx = current_command_context(interp);
5362 assert(cmd_ctx);
5363 struct target *target = get_current_target(cmd_ctx);
5364
5365 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5366 return JIM_ERR;
5367
5368 return JIM_OK;
5369 }
5370
5371 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5372 {
5373 if (argc != 1) {
5374 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5375 return JIM_ERR;
5376 }
5377 struct command_context *cmd_ctx = current_command_context(interp);
5378 assert(cmd_ctx);
5379 struct target *target = get_current_target(cmd_ctx);
5380 if (!target->tap->enabled)
5381 return jim_target_tap_disabled(interp);
5382
5383 int e;
5384 if (!(target_was_examined(target)))
5385 e = ERROR_TARGET_NOT_EXAMINED;
5386 else
5387 e = target->type->poll(target);
5388 if (e != ERROR_OK)
5389 return JIM_ERR;
5390 return JIM_OK;
5391 }
5392
5393 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5394 {
5395 struct jim_getopt_info goi;
5396 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5397
5398 if (goi.argc != 2) {
5399 Jim_WrongNumArgs(interp, 0, argv,
5400 "([tT]|[fF]|assert|deassert) BOOL");
5401 return JIM_ERR;
5402 }
5403
5404 struct jim_nvp *n;
5405 int e = jim_getopt_nvp(&goi, nvp_assert, &n);
5406 if (e != JIM_OK) {
5407 jim_getopt_nvp_unknown(&goi, nvp_assert, 1);
5408 return e;
5409 }
5410 /* the halt or not param */
5411 jim_wide a;
5412 e = jim_getopt_wide(&goi, &a);
5413 if (e != JIM_OK)
5414 return e;
5415
5416 struct command_context *cmd_ctx = current_command_context(interp);
5417 assert(cmd_ctx);
5418 struct target *target = get_current_target(cmd_ctx);
5419 if (!target->tap->enabled)
5420 return jim_target_tap_disabled(interp);
5421
5422 if (!target->type->assert_reset || !target->type->deassert_reset) {
5423 Jim_SetResultFormatted(interp,
5424 "No target-specific reset for %s",
5425 target_name(target));
5426 return JIM_ERR;
5427 }
5428
5429 if (target->defer_examine)
5430 target_reset_examined(target);
5431
5432 /* determine if we should halt or not. */
5433 target->reset_halt = (a != 0);
5434 /* When this happens - all workareas are invalid. */
5435 target_free_all_working_areas_restore(target, 0);
5436
5437 /* do the assert */
5438 if (n->value == NVP_ASSERT)
5439 e = target->type->assert_reset(target);
5440 else
5441 e = target->type->deassert_reset(target);
5442 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5443 }
5444
5445 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5446 {
5447 if (argc != 1) {
5448 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5449 return JIM_ERR;
5450 }
5451 struct command_context *cmd_ctx = current_command_context(interp);
5452 assert(cmd_ctx);
5453 struct target *target = get_current_target(cmd_ctx);
5454 if (!target->tap->enabled)
5455 return jim_target_tap_disabled(interp);
5456 int e = target->type->halt(target);
5457 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5458 }
5459
5460 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5461 {
5462 struct jim_getopt_info goi;
5463 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5464
5465 /* params: <name> statename timeoutmsecs */
5466 if (goi.argc != 2) {
5467 const char *cmd_name = Jim_GetString(argv[0], NULL);
5468 Jim_SetResultFormatted(goi.interp,
5469 "%s <state_name> <timeout_in_msec>", cmd_name);
5470 return JIM_ERR;
5471 }
5472
5473 struct jim_nvp *n;
5474 int e = jim_getopt_nvp(&goi, nvp_target_state, &n);
5475 if (e != JIM_OK) {
5476 jim_getopt_nvp_unknown(&goi, nvp_target_state, 1);
5477 return e;
5478 }
5479 jim_wide a;
5480 e = jim_getopt_wide(&goi, &a);
5481 if (e != JIM_OK)
5482 return e;
5483 struct command_context *cmd_ctx = current_command_context(interp);
5484 assert(cmd_ctx);
5485 struct target *target = get_current_target(cmd_ctx);
5486 if (!target->tap->enabled)
5487 return jim_target_tap_disabled(interp);
5488
5489 e = target_wait_state(target, n->value, a);
5490 if (e != ERROR_OK) {
5491 Jim_Obj *obj = Jim_NewIntObj(interp, e);
5492 Jim_SetResultFormatted(goi.interp,
5493 "target: %s wait %s fails (%#s) %s",
5494 target_name(target), n->name,
5495 obj, target_strerror_safe(e));
5496 return JIM_ERR;
5497 }
5498 return JIM_OK;
5499 }
5500 /* List for human, Events defined for this target.
5501 * scripts/programs should use 'name cget -event NAME'
5502 */
5503 COMMAND_HANDLER(handle_target_event_list)
5504 {
5505 struct target *target = get_current_target(CMD_CTX);
5506 struct target_event_action *teap = target->event_action;
5507
5508 command_print(CMD, "Event actions for target (%d) %s\n",
5509 target->target_number,
5510 target_name(target));
5511 command_print(CMD, "%-25s | Body", "Event");
5512 command_print(CMD, "------------------------- | "
5513 "----------------------------------------");
5514 while (teap) {
5515 command_print(CMD, "%-25s | %s",
5516 target_event_name(teap->event),
5517 Jim_GetString(teap->body, NULL));
5518 teap = teap->next;
5519 }
5520 command_print(CMD, "***END***");
5521 return ERROR_OK;
5522 }
5523 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5524 {
5525 if (argc != 1) {
5526 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5527 return JIM_ERR;
5528 }
5529 struct command_context *cmd_ctx = current_command_context(interp);
5530 assert(cmd_ctx);
5531 struct target *target = get_current_target(cmd_ctx);
5532 Jim_SetResultString(interp, target_state_name(target), -1);
5533 return JIM_OK;
5534 }
5535 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5536 {
5537 struct jim_getopt_info goi;
5538 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5539 if (goi.argc != 1) {
5540 const char *cmd_name = Jim_GetString(argv[0], NULL);
5541 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5542 return JIM_ERR;
5543 }
5544 struct jim_nvp *n;
5545 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5546 if (e != JIM_OK) {
5547 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5548 return e;
5549 }
5550 struct command_context *cmd_ctx = current_command_context(interp);
5551 assert(cmd_ctx);
5552 struct target *target = get_current_target(cmd_ctx);
5553 target_handle_event(target, n->value);
5554 return JIM_OK;
5555 }
5556
5557 static const struct command_registration target_instance_command_handlers[] = {
5558 {
5559 .name = "configure",
5560 .mode = COMMAND_ANY,
5561 .jim_handler = jim_target_configure,
5562 .help = "configure a new target for use",
5563 .usage = "[target_attribute ...]",
5564 },
5565 {
5566 .name = "cget",
5567 .mode = COMMAND_ANY,
5568 .jim_handler = jim_target_configure,
5569 .help = "returns the specified target attribute",
5570 .usage = "target_attribute",
5571 },
5572 {
5573 .name = "mwd",
5574 .handler = handle_mw_command,
5575 .mode = COMMAND_EXEC,
5576 .help = "Write 64-bit word(s) to target memory",
5577 .usage = "address data [count]",
5578 },
5579 {
5580 .name = "mww",
5581 .handler = handle_mw_command,
5582 .mode = COMMAND_EXEC,
5583 .help = "Write 32-bit word(s) to target memory",
5584 .usage = "address data [count]",
5585 },
5586 {
5587 .name = "mwh",
5588 .handler = handle_mw_command,
5589 .mode = COMMAND_EXEC,
5590 .help = "Write 16-bit half-word(s) to target memory",
5591 .usage = "address data [count]",
5592 },
5593 {
5594 .name = "mwb",
5595 .handler = handle_mw_command,
5596 .mode = COMMAND_EXEC,
5597 .help = "Write byte(s) to target memory",
5598 .usage = "address data [count]",
5599 },
5600 {
5601 .name = "mdd",
5602 .handler = handle_md_command,
5603 .mode = COMMAND_EXEC,
5604 .help = "Display target memory as 64-bit words",
5605 .usage = "address [count]",
5606 },
5607 {
5608 .name = "mdw",
5609 .handler = handle_md_command,
5610 .mode = COMMAND_EXEC,
5611 .help = "Display target memory as 32-bit words",
5612 .usage = "address [count]",
5613 },
5614 {
5615 .name = "mdh",
5616 .handler = handle_md_command,
5617 .mode = COMMAND_EXEC,
5618 .help = "Display target memory as 16-bit half-words",
5619 .usage = "address [count]",
5620 },
5621 {
5622 .name = "mdb",
5623 .handler = handle_md_command,
5624 .mode = COMMAND_EXEC,
5625 .help = "Display target memory as 8-bit bytes",
5626 .usage = "address [count]",
5627 },
5628 {
5629 .name = "array2mem",
5630 .mode = COMMAND_EXEC,
5631 .jim_handler = jim_target_array2mem,
5632 .help = "Writes Tcl array of 8/16/32 bit numbers "
5633 "to target memory",
5634 .usage = "arrayname bitwidth address count",
5635 },
5636 {
5637 .name = "mem2array",
5638 .mode = COMMAND_EXEC,
5639 .jim_handler = jim_target_mem2array,
5640 .help = "Loads Tcl array of 8/16/32 bit numbers "
5641 "from target memory",
5642 .usage = "arrayname bitwidth address count",
5643 },
5644 {
5645 .name = "eventlist",
5646 .handler = handle_target_event_list,
5647 .mode = COMMAND_EXEC,
5648 .help = "displays a table of events defined for this target",
5649 .usage = "",
5650 },
5651 {
5652 .name = "curstate",
5653 .mode = COMMAND_EXEC,
5654 .jim_handler = jim_target_current_state,
5655 .help = "displays the current state of this target",
5656 },
5657 {
5658 .name = "arp_examine",
5659 .mode = COMMAND_EXEC,
5660 .jim_handler = jim_target_examine,
5661 .help = "used internally for reset processing",
5662 .usage = "['allow-defer']",
5663 },
5664 {
5665 .name = "was_examined",
5666 .mode = COMMAND_EXEC,
5667 .jim_handler = jim_target_was_examined,
5668 .help = "used internally for reset processing",
5669 },
5670 {
5671 .name = "examine_deferred",
5672 .mode = COMMAND_EXEC,
5673 .jim_handler = jim_target_examine_deferred,
5674 .help = "used internally for reset processing",
5675 },
5676 {
5677 .name = "arp_halt_gdb",
5678 .mode = COMMAND_EXEC,
5679 .jim_handler = jim_target_halt_gdb,
5680 .help = "used internally for reset processing to halt GDB",
5681 },
5682 {
5683 .name = "arp_poll",
5684 .mode = COMMAND_EXEC,
5685 .jim_handler = jim_target_poll,
5686 .help = "used internally for reset processing",
5687 },
5688 {
5689 .name = "arp_reset",
5690 .mode = COMMAND_EXEC,
5691 .jim_handler = jim_target_reset,
5692 .help = "used internally for reset processing",
5693 },
5694 {
5695 .name = "arp_halt",
5696 .mode = COMMAND_EXEC,
5697 .jim_handler = jim_target_halt,
5698 .help = "used internally for reset processing",
5699 },
5700 {
5701 .name = "arp_waitstate",
5702 .mode = COMMAND_EXEC,
5703 .jim_handler = jim_target_wait_state,
5704 .help = "used internally for reset processing",
5705 },
5706 {
5707 .name = "invoke-event",
5708 .mode = COMMAND_EXEC,
5709 .jim_handler = jim_target_invoke_event,
5710 .help = "invoke handler for specified event",
5711 .usage = "event_name",
5712 },
5713 COMMAND_REGISTRATION_DONE
5714 };
5715
5716 static int target_create(struct jim_getopt_info *goi)
5717 {
5718 Jim_Obj *new_cmd;
5719 Jim_Cmd *cmd;
5720 const char *cp;
5721 int e;
5722 int x;
5723 struct target *target;
5724 struct command_context *cmd_ctx;
5725
5726 cmd_ctx = current_command_context(goi->interp);
5727 assert(cmd_ctx);
5728
5729 if (goi->argc < 3) {
5730 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
5731 return JIM_ERR;
5732 }
5733
5734 /* COMMAND */
5735 jim_getopt_obj(goi, &new_cmd);
5736 /* does this command exist? */
5737 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_NONE);
5738 if (cmd) {
5739 cp = Jim_GetString(new_cmd, NULL);
5740 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
5741 return JIM_ERR;
5742 }
5743
5744 /* TYPE */
5745 e = jim_getopt_string(goi, &cp, NULL);
5746 if (e != JIM_OK)
5747 return e;
5748 struct transport *tr = get_current_transport();
5749 if (tr->override_target) {
5750 e = tr->override_target(&cp);
5751 if (e != ERROR_OK) {
5752 LOG_ERROR("The selected transport doesn't support this target");
5753 return JIM_ERR;
5754 }
5755 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
5756 }
5757 /* now does target type exist */
5758 for (x = 0 ; target_types[x] ; x++) {
5759 if (strcmp(cp, target_types[x]->name) == 0) {
5760 /* found */
5761 break;
5762 }
5763 }
5764 if (!target_types[x]) {
5765 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
5766 for (x = 0 ; target_types[x] ; x++) {
5767 if (target_types[x + 1]) {
5768 Jim_AppendStrings(goi->interp,
5769 Jim_GetResult(goi->interp),
5770 target_types[x]->name,
5771 ", ", NULL);
5772 } else {
5773 Jim_AppendStrings(goi->interp,
5774 Jim_GetResult(goi->interp),
5775 " or ",
5776 target_types[x]->name, NULL);
5777 }
5778 }
5779 return JIM_ERR;
5780 }
5781
5782 /* Create it */
5783 target = calloc(1, sizeof(struct target));
5784 if (!target) {
5785 LOG_ERROR("Out of memory");
5786 return JIM_ERR;
5787 }
5788
5789 /* set target number */
5790 target->target_number = new_target_number();
5791
5792 /* allocate memory for each unique target type */
5793 target->type = malloc(sizeof(struct target_type));
5794 if (!target->type) {
5795 LOG_ERROR("Out of memory");
5796 free(target);
5797 return JIM_ERR;
5798 }
5799
5800 memcpy(target->type, target_types[x], sizeof(struct target_type));
5801
5802 /* default to first core, override with -coreid */
5803 target->coreid = 0;
5804
5805 target->working_area = 0x0;
5806 target->working_area_size = 0x0;
5807 target->working_areas = NULL;
5808 target->backup_working_area = 0;
5809
5810 target->state = TARGET_UNKNOWN;
5811 target->debug_reason = DBG_REASON_UNDEFINED;
5812 target->reg_cache = NULL;
5813 target->breakpoints = NULL;
5814 target->watchpoints = NULL;
5815 target->next = NULL;
5816 target->arch_info = NULL;
5817
5818 target->verbose_halt_msg = true;
5819
5820 target->halt_issued = false;
5821
5822 /* initialize trace information */
5823 target->trace_info = calloc(1, sizeof(struct trace));
5824 if (!target->trace_info) {
5825 LOG_ERROR("Out of memory");
5826 free(target->type);
5827 free(target);
5828 return JIM_ERR;
5829 }
5830
5831 target->dbgmsg = NULL;
5832 target->dbg_msg_enabled = 0;
5833
5834 target->endianness = TARGET_ENDIAN_UNKNOWN;
5835
5836 target->rtos = NULL;
5837 target->rtos_auto_detect = false;
5838
5839 target->gdb_port_override = NULL;
5840 target->gdb_max_connections = 1;
5841
5842 /* Do the rest as "configure" options */
5843 goi->isconfigure = 1;
5844 e = target_configure(goi, target);
5845
5846 if (e == JIM_OK) {
5847 if (target->has_dap) {
5848 if (!target->dap_configured) {
5849 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
5850 e = JIM_ERR;
5851 }
5852 } else {
5853 if (!target->tap_configured) {
5854 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
5855 e = JIM_ERR;
5856 }
5857 }
5858 /* tap must be set after target was configured */
5859 if (!target->tap)
5860 e = JIM_ERR;
5861 }
5862
5863 if (e != JIM_OK) {
5864 rtos_destroy(target);
5865 free(target->gdb_port_override);
5866 free(target->trace_info);
5867 free(target->type);
5868 free(target);
5869 return e;
5870 }
5871
5872 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
5873 /* default endian to little if not specified */
5874 target->endianness = TARGET_LITTLE_ENDIAN;
5875 }
5876
5877 cp = Jim_GetString(new_cmd, NULL);
5878 target->cmd_name = strdup(cp);
5879 if (!target->cmd_name) {
5880 LOG_ERROR("Out of memory");
5881 rtos_destroy(target);
5882 free(target->gdb_port_override);
5883 free(target->trace_info);
5884 free(target->type);
5885 free(target);
5886 return JIM_ERR;
5887 }
5888
5889 if (target->type->target_create) {
5890 e = (*(target->type->target_create))(target, goi->interp);
5891 if (e != ERROR_OK) {
5892 LOG_DEBUG("target_create failed");
5893 free(target->cmd_name);
5894 rtos_destroy(target);
5895 free(target->gdb_port_override);
5896 free(target->trace_info);
5897 free(target->type);
5898 free(target);
5899 return JIM_ERR;
5900 }
5901 }
5902
5903 /* create the target specific commands */
5904 if (target->type->commands) {
5905 e = register_commands(cmd_ctx, NULL, target->type->commands);
5906 if (e != ERROR_OK)
5907 LOG_ERROR("unable to register '%s' commands", cp);
5908 }
5909
5910 /* now - create the new target name command */
5911 const struct command_registration target_subcommands[] = {
5912 {
5913 .chain = target_instance_command_handlers,
5914 },
5915 {
5916 .chain = target->type->commands,
5917 },
5918 COMMAND_REGISTRATION_DONE
5919 };
5920 const struct command_registration target_commands[] = {
5921 {
5922 .name = cp,
5923 .mode = COMMAND_ANY,
5924 .help = "target command group",
5925 .usage = "",
5926 .chain = target_subcommands,
5927 },
5928 COMMAND_REGISTRATION_DONE
5929 };
5930 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
5931 if (e != ERROR_OK) {
5932 if (target->type->deinit_target)
5933 target->type->deinit_target(target);
5934 free(target->cmd_name);
5935 rtos_destroy(target);
5936 free(target->gdb_port_override);
5937 free(target->trace_info);
5938 free(target->type);
5939 free(target);
5940 return JIM_ERR;
5941 }
5942
5943 /* append to end of list */
5944 append_to_list_all_targets(target);
5945
5946 cmd_ctx->current_target = target;
5947 return JIM_OK;
5948 }
5949
5950 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5951 {
5952 if (argc != 1) {
5953 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5954 return JIM_ERR;
5955 }
5956 struct command_context *cmd_ctx = current_command_context(interp);
5957 assert(cmd_ctx);
5958
5959 struct target *target = get_current_target_or_null(cmd_ctx);
5960 if (target)
5961 Jim_SetResultString(interp, target_name(target), -1);
5962 return JIM_OK;
5963 }
5964
5965 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5966 {
5967 if (argc != 1) {
5968 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5969 return JIM_ERR;
5970 }
5971 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5972 for (unsigned x = 0; target_types[x]; x++) {
5973 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5974 Jim_NewStringObj(interp, target_types[x]->name, -1));
5975 }
5976 return JIM_OK;
5977 }
5978
5979 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5980 {
5981 if (argc != 1) {
5982 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5983 return JIM_ERR;
5984 }
5985 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5986 struct target *target = all_targets;
5987 while (target) {
5988 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5989 Jim_NewStringObj(interp, target_name(target), -1));
5990 target = target->next;
5991 }
5992 return JIM_OK;
5993 }
5994
5995 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5996 {
5997 int i;
5998 const char *targetname;
5999 int retval, len;
6000 struct target *target = NULL;
6001 struct target_list *head, *curr, *new;
6002 curr = NULL;
6003 head = NULL;
6004
6005 retval = 0;
6006 LOG_DEBUG("%d", argc);
6007 /* argv[1] = target to associate in smp
6008 * argv[2] = target to associate in smp
6009 * argv[3] ...
6010 */
6011
6012 for (i = 1; i < argc; i++) {
6013
6014 targetname = Jim_GetString(argv[i], &len);
6015 target = get_target(targetname);
6016 LOG_DEBUG("%s ", targetname);
6017 if (target) {
6018 new = malloc(sizeof(struct target_list));
6019 new->target = target;
6020 new->next = NULL;
6021 if (!head) {
6022 head = new;
6023 curr = head;
6024 } else {
6025 curr->next = new;
6026 curr = new;
6027 }
6028 }
6029 }
6030 /* now parse the list of cpu and put the target in smp mode*/
6031 curr = head;
6032
6033 while (curr) {
6034 target = curr->target;
6035 target->smp = 1;
6036 target->head = head;
6037 curr = curr->next;
6038 }
6039
6040 if (target && target->rtos)
6041 retval = rtos_smp_init(head->target);
6042
6043 return retval;
6044 }
6045
6046
6047 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6048 {
6049 struct jim_getopt_info goi;
6050 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6051 if (goi.argc < 3) {
6052 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6053 "<name> <target_type> [<target_options> ...]");
6054 return JIM_ERR;
6055 }
6056 return target_create(&goi);
6057 }
6058
6059 static const struct command_registration target_subcommand_handlers[] = {
6060 {
6061 .name = "init",
6062 .mode = COMMAND_CONFIG,
6063 .handler = handle_target_init_command,
6064 .help = "initialize targets",
6065 .usage = "",
6066 },
6067 {
6068 .name = "create",
6069 .mode = COMMAND_CONFIG,
6070 .jim_handler = jim_target_create,
6071 .usage = "name type '-chain-position' name [options ...]",
6072 .help = "Creates and selects a new target",
6073 },
6074 {
6075 .name = "current",
6076 .mode = COMMAND_ANY,
6077 .jim_handler = jim_target_current,
6078 .help = "Returns the currently selected target",
6079 },
6080 {
6081 .name = "types",
6082 .mode = COMMAND_ANY,
6083 .jim_handler = jim_target_types,
6084 .help = "Returns the available target types as "
6085 "a list of strings",
6086 },
6087 {
6088 .name = "names",
6089 .mode = COMMAND_ANY,
6090 .jim_handler = jim_target_names,
6091 .help = "Returns the names of all targets as a list of strings",
6092 },
6093 {
6094 .name = "smp",
6095 .mode = COMMAND_ANY,
6096 .jim_handler = jim_target_smp,
6097 .usage = "targetname1 targetname2 ...",
6098 .help = "gather several target in a smp list"
6099 },
6100
6101 COMMAND_REGISTRATION_DONE
6102 };
6103
6104 struct fast_load {
6105 target_addr_t address;
6106 uint8_t *data;
6107 int length;
6108
6109 };
6110
6111 static int fastload_num;
6112 static struct fast_load *fastload;
6113
6114 static void free_fastload(void)
6115 {
6116 if (fastload) {
6117 for (int i = 0; i < fastload_num; i++)
6118 free(fastload[i].data);
6119 free(fastload);
6120 fastload = NULL;
6121 }
6122 }
6123
6124 COMMAND_HANDLER(handle_fast_load_image_command)
6125 {
6126 uint8_t *buffer;
6127 size_t buf_cnt;
6128 uint32_t image_size;
6129 target_addr_t min_address = 0;
6130 target_addr_t max_address = -1;
6131
6132 struct image image;
6133
6134 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6135 &image, &min_address, &max_address);
6136 if (retval != ERROR_OK)
6137 return retval;
6138
6139 struct duration bench;
6140 duration_start(&bench);
6141
6142 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6143 if (retval != ERROR_OK)
6144 return retval;
6145
6146 image_size = 0x0;
6147 retval = ERROR_OK;
6148 fastload_num = image.num_sections;
6149 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6150 if (!fastload) {
6151 command_print(CMD, "out of memory");
6152 image_close(&image);
6153 return ERROR_FAIL;
6154 }
6155 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6156 for (unsigned int i = 0; i < image.num_sections; i++) {
6157 buffer = malloc(image.sections[i].size);
6158 if (!buffer) {
6159 command_print(CMD, "error allocating buffer for section (%d bytes)",
6160 (int)(image.sections[i].size));
6161 retval = ERROR_FAIL;
6162 break;
6163 }
6164
6165 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6166 if (retval != ERROR_OK) {
6167 free(buffer);
6168 break;
6169 }
6170
6171 uint32_t offset = 0;
6172 uint32_t length = buf_cnt;
6173
6174 /* DANGER!!! beware of unsigned comparison here!!! */
6175
6176 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6177 (image.sections[i].base_address < max_address)) {
6178 if (image.sections[i].base_address < min_address) {
6179 /* clip addresses below */
6180 offset += min_address-image.sections[i].base_address;
6181 length -= offset;
6182 }
6183
6184 if (image.sections[i].base_address + buf_cnt > max_address)
6185 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6186
6187 fastload[i].address = image.sections[i].base_address + offset;
6188 fastload[i].data = malloc(length);
6189 if (!fastload[i].data) {
6190 free(buffer);
6191 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6192 length);
6193 retval = ERROR_FAIL;
6194 break;
6195 }
6196 memcpy(fastload[i].data, buffer + offset, length);
6197 fastload[i].length = length;
6198
6199 image_size += length;
6200 command_print(CMD, "%u bytes written at address 0x%8.8x",
6201 (unsigned int)length,
6202 ((unsigned int)(image.sections[i].base_address + offset)));
6203 }
6204
6205 free(buffer);
6206 }
6207
6208 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6209 command_print(CMD, "Loaded %" PRIu32 " bytes "
6210 "in %fs (%0.3f KiB/s)", image_size,
6211 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6212
6213 command_print(CMD,
6214 "WARNING: image has not been loaded to target!"
6215 "You can issue a 'fast_load' to finish loading.");
6216 }
6217
6218 image_close(&image);
6219
6220 if (retval != ERROR_OK)
6221 free_fastload();
6222
6223 return retval;
6224 }
6225
6226 COMMAND_HANDLER(handle_fast_load_command)
6227 {
6228 if (CMD_ARGC > 0)
6229 return ERROR_COMMAND_SYNTAX_ERROR;
6230 if (!fastload) {
6231 LOG_ERROR("No image in memory");
6232 return ERROR_FAIL;
6233 }
6234 int i;
6235 int64_t ms = timeval_ms();
6236 int size = 0;
6237 int retval = ERROR_OK;
6238 for (i = 0; i < fastload_num; i++) {
6239 struct target *target = get_current_target(CMD_CTX);
6240 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6241 (unsigned int)(fastload[i].address),
6242 (unsigned int)(fastload[i].length));
6243 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6244 if (retval != ERROR_OK)
6245 break;
6246 size += fastload[i].length;
6247 }
6248 if (retval == ERROR_OK) {
6249 int64_t after = timeval_ms();
6250 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6251 }
6252 return retval;
6253 }
6254
6255 static const struct command_registration target_command_handlers[] = {
6256 {
6257 .name = "targets",
6258 .handler = handle_targets_command,
6259 .mode = COMMAND_ANY,
6260 .help = "change current default target (one parameter) "
6261 "or prints table of all targets (no parameters)",
6262 .usage = "[target]",
6263 },
6264 {
6265 .name = "target",
6266 .mode = COMMAND_CONFIG,
6267 .help = "configure target",
6268 .chain = target_subcommand_handlers,
6269 .usage = "",
6270 },
6271 COMMAND_REGISTRATION_DONE
6272 };
6273
6274 int target_register_commands(struct command_context *cmd_ctx)
6275 {
6276 return register_commands(cmd_ctx, NULL, target_command_handlers);
6277 }
6278
6279 static bool target_reset_nag = true;
6280
6281 bool get_target_reset_nag(void)
6282 {
6283 return target_reset_nag;
6284 }
6285
6286 COMMAND_HANDLER(handle_target_reset_nag)
6287 {
6288 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6289 &target_reset_nag, "Nag after each reset about options to improve "
6290 "performance");
6291 }
6292
6293 COMMAND_HANDLER(handle_ps_command)
6294 {
6295 struct target *target = get_current_target(CMD_CTX);
6296 char *display;
6297 if (target->state != TARGET_HALTED) {
6298 LOG_INFO("target not halted !!");
6299 return ERROR_OK;
6300 }
6301
6302 if ((target->rtos) && (target->rtos->type)
6303 && (target->rtos->type->ps_command)) {
6304 display = target->rtos->type->ps_command(target);
6305 command_print(CMD, "%s", display);
6306 free(display);
6307 return ERROR_OK;
6308 } else {
6309 LOG_INFO("failed");
6310 return ERROR_TARGET_FAILURE;
6311 }
6312 }
6313
6314 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6315 {
6316 if (text)
6317 command_print_sameline(cmd, "%s", text);
6318 for (int i = 0; i < size; i++)
6319 command_print_sameline(cmd, " %02x", buf[i]);
6320 command_print(cmd, " ");
6321 }
6322
6323 COMMAND_HANDLER(handle_test_mem_access_command)
6324 {
6325 struct target *target = get_current_target(CMD_CTX);
6326 uint32_t test_size;
6327 int retval = ERROR_OK;
6328
6329 if (target->state != TARGET_HALTED) {
6330 LOG_INFO("target not halted !!");
6331 return ERROR_FAIL;
6332 }
6333
6334 if (CMD_ARGC != 1)
6335 return ERROR_COMMAND_SYNTAX_ERROR;
6336
6337 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6338
6339 /* Test reads */
6340 size_t num_bytes = test_size + 4;
6341
6342 struct working_area *wa = NULL;
6343 retval = target_alloc_working_area(target, num_bytes, &wa);
6344 if (retval != ERROR_OK) {
6345 LOG_ERROR("Not enough working area");
6346 return ERROR_FAIL;
6347 }
6348
6349 uint8_t *test_pattern = malloc(num_bytes);
6350
6351 for (size_t i = 0; i < num_bytes; i++)
6352 test_pattern[i] = rand();
6353
6354 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6355 if (retval != ERROR_OK) {
6356 LOG_ERROR("Test pattern write failed");
6357 goto out;
6358 }
6359
6360 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6361 for (int size = 1; size <= 4; size *= 2) {
6362 for (int offset = 0; offset < 4; offset++) {
6363 uint32_t count = test_size / size;
6364 size_t host_bufsiz = (count + 2) * size + host_offset;
6365 uint8_t *read_ref = malloc(host_bufsiz);
6366 uint8_t *read_buf = malloc(host_bufsiz);
6367
6368 for (size_t i = 0; i < host_bufsiz; i++) {
6369 read_ref[i] = rand();
6370 read_buf[i] = read_ref[i];
6371 }
6372 command_print_sameline(CMD,
6373 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6374 size, offset, host_offset ? "un" : "");
6375
6376 struct duration bench;
6377 duration_start(&bench);
6378
6379 retval = target_read_memory(target, wa->address + offset, size, count,
6380 read_buf + size + host_offset);
6381
6382 duration_measure(&bench);
6383
6384 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6385 command_print(CMD, "Unsupported alignment");
6386 goto next;
6387 } else if (retval != ERROR_OK) {
6388 command_print(CMD, "Memory read failed");
6389 goto next;
6390 }
6391
6392 /* replay on host */
6393 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6394
6395 /* check result */
6396 int result = memcmp(read_ref, read_buf, host_bufsiz);
6397 if (result == 0) {
6398 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6399 duration_elapsed(&bench),
6400 duration_kbps(&bench, count * size));
6401 } else {
6402 command_print(CMD, "Compare failed");
6403 binprint(CMD, "ref:", read_ref, host_bufsiz);
6404 binprint(CMD, "buf:", read_buf, host_bufsiz);
6405 }
6406 next:
6407 free(read_ref);
6408 free(read_buf);
6409 }
6410 }
6411 }
6412
6413 out:
6414 free(test_pattern);
6415
6416 target_free_working_area(target, wa);
6417
6418 /* Test writes */
6419 num_bytes = test_size + 4 + 4 + 4;
6420
6421 retval = target_alloc_working_area(target, num_bytes, &wa);
6422 if (retval != ERROR_OK) {
6423 LOG_ERROR("Not enough working area");
6424 return ERROR_FAIL;
6425 }
6426
6427 test_pattern = malloc(num_bytes);
6428
6429 for (size_t i = 0; i < num_bytes; i++)
6430 test_pattern[i] = rand();
6431
6432 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6433 for (int size = 1; size <= 4; size *= 2) {
6434 for (int offset = 0; offset < 4; offset++) {
6435 uint32_t count = test_size / size;
6436 size_t host_bufsiz = count * size + host_offset;
6437 uint8_t *read_ref = malloc(num_bytes);
6438 uint8_t *read_buf = malloc(num_bytes);
6439 uint8_t *write_buf = malloc(host_bufsiz);
6440
6441 for (size_t i = 0; i < host_bufsiz; i++)
6442 write_buf[i] = rand();
6443 command_print_sameline(CMD,
6444 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6445 size, offset, host_offset ? "un" : "");
6446
6447 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6448 if (retval != ERROR_OK) {
6449 command_print(CMD, "Test pattern write failed");
6450 goto nextw;
6451 }
6452
6453 /* replay on host */
6454 memcpy(read_ref, test_pattern, num_bytes);
6455 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6456
6457 struct duration bench;
6458 duration_start(&bench);
6459
6460 retval = target_write_memory(target, wa->address + size + offset, size, count,
6461 write_buf + host_offset);
6462
6463 duration_measure(&bench);
6464
6465 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6466 command_print(CMD, "Unsupported alignment");
6467 goto nextw;
6468 } else if (retval != ERROR_OK) {
6469 command_print(CMD, "Memory write failed");
6470 goto nextw;
6471 }
6472
6473 /* read back */
6474 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6475 if (retval != ERROR_OK) {
6476 command_print(CMD, "Test pattern write failed");
6477 goto nextw;
6478 }
6479
6480 /* check result */
6481 int result = memcmp(read_ref, read_buf, num_bytes);
6482 if (result == 0) {
6483 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6484 duration_elapsed(&bench),
6485 duration_kbps(&bench, count * size));
6486 } else {
6487 command_print(CMD, "Compare failed");
6488 binprint(CMD, "ref:", read_ref, num_bytes);
6489 binprint(CMD, "buf:", read_buf, num_bytes);
6490 }
6491 nextw:
6492 free(read_ref);
6493 free(read_buf);
6494 }
6495 }
6496 }
6497
6498 free(test_pattern);
6499
6500 target_free_working_area(target, wa);
6501 return retval;
6502 }
6503
6504 static const struct command_registration target_exec_command_handlers[] = {
6505 {
6506 .name = "fast_load_image",
6507 .handler = handle_fast_load_image_command,
6508 .mode = COMMAND_ANY,
6509 .help = "Load image into server memory for later use by "
6510 "fast_load; primarily for profiling",
6511 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6512 "[min_address [max_length]]",
6513 },
6514 {
6515 .name = "fast_load",
6516 .handler = handle_fast_load_command,
6517 .mode = COMMAND_EXEC,
6518 .help = "loads active fast load image to current target "
6519 "- mainly for profiling purposes",
6520 .usage = "",
6521 },
6522 {
6523 .name = "profile",
6524 .handler = handle_profile_command,
6525 .mode = COMMAND_EXEC,
6526 .usage = "seconds filename [start end]",
6527 .help = "profiling samples the CPU PC",
6528 },
6529 /** @todo don't register virt2phys() unless target supports it */
6530 {
6531 .name = "virt2phys",
6532 .handler = handle_virt2phys_command,
6533 .mode = COMMAND_ANY,
6534 .help = "translate a virtual address into a physical address",
6535 .usage = "virtual_address",
6536 },
6537 {
6538 .name = "reg",
6539 .handler = handle_reg_command,
6540 .mode = COMMAND_EXEC,
6541 .help = "display (reread from target with \"force\") or set a register; "
6542 "with no arguments, displays all registers and their values",
6543 .usage = "[(register_number|register_name) [(value|'force')]]",
6544 },
6545 {
6546 .name = "poll",
6547 .handler = handle_poll_command,
6548 .mode = COMMAND_EXEC,
6549 .help = "poll target state; or reconfigure background polling",
6550 .usage = "['on'|'off']",
6551 },
6552 {
6553 .name = "wait_halt",
6554 .handler = handle_wait_halt_command,
6555 .mode = COMMAND_EXEC,
6556 .help = "wait up to the specified number of milliseconds "
6557 "(default 5000) for a previously requested halt",
6558 .usage = "[milliseconds]",
6559 },
6560 {
6561 .name = "halt",
6562 .handler = handle_halt_command,
6563 .mode = COMMAND_EXEC,
6564 .help = "request target to halt, then wait up to the specified "
6565 "number of milliseconds (default 5000) for it to complete",
6566 .usage = "[milliseconds]",
6567 },
6568 {
6569 .name = "resume",
6570 .handler = handle_resume_command,
6571 .mode = COMMAND_EXEC,
6572 .help = "resume target execution from current PC or address",
6573 .usage = "[address]",
6574 },
6575 {
6576 .name = "reset",
6577 .handler = handle_reset_command,
6578 .mode = COMMAND_EXEC,
6579 .usage = "[run|halt|init]",
6580 .help = "Reset all targets into the specified mode. "
6581 "Default reset mode is run, if not given.",
6582 },
6583 {
6584 .name = "soft_reset_halt",
6585 .handler = handle_soft_reset_halt_command,
6586 .mode = COMMAND_EXEC,
6587 .usage = "",
6588 .help = "halt the target and do a soft reset",
6589 },
6590 {
6591 .name = "step",
6592 .handler = handle_step_command,
6593 .mode = COMMAND_EXEC,
6594 .help = "step one instruction from current PC or address",
6595 .usage = "[address]",
6596 },
6597 {
6598 .name = "mdd",
6599 .handler = handle_md_command,
6600 .mode = COMMAND_EXEC,
6601 .help = "display memory double-words",
6602 .usage = "['phys'] address [count]",
6603 },
6604 {
6605 .name = "mdw",
6606 .handler = handle_md_command,
6607 .mode = COMMAND_EXEC,
6608 .help = "display memory words",
6609 .usage = "['phys'] address [count]",
6610 },
6611 {
6612 .name = "mdh",
6613 .handler = handle_md_command,
6614 .mode = COMMAND_EXEC,
6615 .help = "display memory half-words",
6616 .usage = "['phys'] address [count]",
6617 },
6618 {
6619 .name = "mdb",
6620 .handler = handle_md_command,
6621 .mode = COMMAND_EXEC,
6622 .help = "display memory bytes",
6623 .usage = "['phys'] address [count]",
6624 },
6625 {
6626 .name = "mwd",
6627 .handler = handle_mw_command,
6628 .mode = COMMAND_EXEC,
6629 .help = "write memory double-word",
6630 .usage = "['phys'] address value [count]",
6631 },
6632 {
6633 .name = "mww",
6634 .handler = handle_mw_command,
6635 .mode = COMMAND_EXEC,
6636 .help = "write memory word",
6637 .usage = "['phys'] address value [count]",
6638 },
6639 {
6640 .name = "mwh",
6641 .handler = handle_mw_command,
6642 .mode = COMMAND_EXEC,
6643 .help = "write memory half-word",
6644 .usage = "['phys'] address value [count]",
6645 },
6646 {
6647 .name = "mwb",
6648 .handler = handle_mw_command,
6649 .mode = COMMAND_EXEC,
6650 .help = "write memory byte",
6651 .usage = "['phys'] address value [count]",
6652 },
6653 {
6654 .name = "bp",
6655 .handler = handle_bp_command,
6656 .mode = COMMAND_EXEC,
6657 .help = "list or set hardware or software breakpoint",
6658 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
6659 },
6660 {
6661 .name = "rbp",
6662 .handler = handle_rbp_command,
6663 .mode = COMMAND_EXEC,
6664 .help = "remove breakpoint",
6665 .usage = "'all' | address",
6666 },
6667 {
6668 .name = "wp",
6669 .handler = handle_wp_command,
6670 .mode = COMMAND_EXEC,
6671 .help = "list (no params) or create watchpoints",
6672 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
6673 },
6674 {
6675 .name = "rwp",
6676 .handler = handle_rwp_command,
6677 .mode = COMMAND_EXEC,
6678 .help = "remove watchpoint",
6679 .usage = "address",
6680 },
6681 {
6682 .name = "load_image",
6683 .handler = handle_load_image_command,
6684 .mode = COMMAND_EXEC,
6685 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6686 "[min_address] [max_length]",
6687 },
6688 {
6689 .name = "dump_image",
6690 .handler = handle_dump_image_command,
6691 .mode = COMMAND_EXEC,
6692 .usage = "filename address size",
6693 },
6694 {
6695 .name = "verify_image_checksum",
6696 .handler = handle_verify_image_checksum_command,
6697 .mode = COMMAND_EXEC,
6698 .usage = "filename [offset [type]]",
6699 },
6700 {
6701 .name = "verify_image",
6702 .handler = handle_verify_image_command,
6703 .mode = COMMAND_EXEC,
6704 .usage = "filename [offset [type]]",
6705 },
6706 {
6707 .name = "test_image",
6708 .handler = handle_test_image_command,
6709 .mode = COMMAND_EXEC,
6710 .usage = "filename [offset [type]]",
6711 },
6712 {
6713 .name = "mem2array",
6714 .mode = COMMAND_EXEC,
6715 .jim_handler = jim_mem2array,
6716 .help = "read 8/16/32 bit memory and return as a TCL array "
6717 "for script processing",
6718 .usage = "arrayname bitwidth address count",
6719 },
6720 {
6721 .name = "array2mem",
6722 .mode = COMMAND_EXEC,
6723 .jim_handler = jim_array2mem,
6724 .help = "convert a TCL array to memory locations "
6725 "and write the 8/16/32 bit values",
6726 .usage = "arrayname bitwidth address count",
6727 },
6728 {
6729 .name = "reset_nag",
6730 .handler = handle_target_reset_nag,
6731 .mode = COMMAND_ANY,
6732 .help = "Nag after each reset about options that could have been "
6733 "enabled to improve performance.",
6734 .usage = "['enable'|'disable']",
6735 },
6736 {
6737 .name = "ps",
6738 .handler = handle_ps_command,
6739 .mode = COMMAND_EXEC,
6740 .help = "list all tasks",
6741 .usage = "",
6742 },
6743 {
6744 .name = "test_mem_access",
6745 .handler = handle_test_mem_access_command,
6746 .mode = COMMAND_EXEC,
6747 .help = "Test the target's memory access functions",
6748 .usage = "size",
6749 },
6750
6751 COMMAND_REGISTRATION_DONE
6752 };
6753 static int target_register_user_commands(struct command_context *cmd_ctx)
6754 {
6755 int retval = ERROR_OK;
6756 retval = target_request_register_commands(cmd_ctx);
6757 if (retval != ERROR_OK)
6758 return retval;
6759
6760 retval = trace_register_commands(cmd_ctx);
6761 if (retval != ERROR_OK)
6762 return retval;
6763
6764
6765 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
6766 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)