target/smp: use a struct list_head to hold the smp targets
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/align.h>
45 #include <helper/time_support.h>
46 #include <jtag/jtag.h>
47 #include <flash/nor/core.h>
48
49 #include "target.h"
50 #include "target_type.h"
51 #include "target_request.h"
52 #include "breakpoints.h"
53 #include "register.h"
54 #include "trace.h"
55 #include "image.h"
56 #include "rtos/rtos.h"
57 #include "transport/transport.h"
58 #include "arm_cti.h"
59 #include "smp.h"
60
61 /* default halt wait timeout (ms) */
62 #define DEFAULT_HALT_TIMEOUT 5000
63
64 static int target_read_buffer_default(struct target *target, target_addr_t address,
65 uint32_t count, uint8_t *buffer);
66 static int target_write_buffer_default(struct target *target, target_addr_t address,
67 uint32_t count, const uint8_t *buffer);
68 static int target_array2mem(Jim_Interp *interp, struct target *target,
69 int argc, Jim_Obj * const *argv);
70 static int target_mem2array(Jim_Interp *interp, struct target *target,
71 int argc, Jim_Obj * const *argv);
72 static int target_register_user_commands(struct command_context *cmd_ctx);
73 static int target_get_gdb_fileio_info_default(struct target *target,
74 struct gdb_fileio_info *fileio_info);
75 static int target_gdb_fileio_end_default(struct target *target, int retcode,
76 int fileio_errno, bool ctrl_c);
77
78 /* targets */
79 extern struct target_type arm7tdmi_target;
80 extern struct target_type arm720t_target;
81 extern struct target_type arm9tdmi_target;
82 extern struct target_type arm920t_target;
83 extern struct target_type arm966e_target;
84 extern struct target_type arm946e_target;
85 extern struct target_type arm926ejs_target;
86 extern struct target_type fa526_target;
87 extern struct target_type feroceon_target;
88 extern struct target_type dragonite_target;
89 extern struct target_type xscale_target;
90 extern struct target_type cortexm_target;
91 extern struct target_type cortexa_target;
92 extern struct target_type aarch64_target;
93 extern struct target_type cortexr4_target;
94 extern struct target_type arm11_target;
95 extern struct target_type ls1_sap_target;
96 extern struct target_type mips_m4k_target;
97 extern struct target_type mips_mips64_target;
98 extern struct target_type avr_target;
99 extern struct target_type dsp563xx_target;
100 extern struct target_type dsp5680xx_target;
101 extern struct target_type testee_target;
102 extern struct target_type avr32_ap7k_target;
103 extern struct target_type hla_target;
104 extern struct target_type nds32_v2_target;
105 extern struct target_type nds32_v3_target;
106 extern struct target_type nds32_v3m_target;
107 extern struct target_type or1k_target;
108 extern struct target_type quark_x10xx_target;
109 extern struct target_type quark_d20xx_target;
110 extern struct target_type stm8_target;
111 extern struct target_type riscv_target;
112 extern struct target_type mem_ap_target;
113 extern struct target_type esirisc_target;
114 extern struct target_type arcv2_target;
115
116 static struct target_type *target_types[] = {
117 &arm7tdmi_target,
118 &arm9tdmi_target,
119 &arm920t_target,
120 &arm720t_target,
121 &arm966e_target,
122 &arm946e_target,
123 &arm926ejs_target,
124 &fa526_target,
125 &feroceon_target,
126 &dragonite_target,
127 &xscale_target,
128 &cortexm_target,
129 &cortexa_target,
130 &cortexr4_target,
131 &arm11_target,
132 &ls1_sap_target,
133 &mips_m4k_target,
134 &avr_target,
135 &dsp563xx_target,
136 &dsp5680xx_target,
137 &testee_target,
138 &avr32_ap7k_target,
139 &hla_target,
140 &nds32_v2_target,
141 &nds32_v3_target,
142 &nds32_v3m_target,
143 &or1k_target,
144 &quark_x10xx_target,
145 &quark_d20xx_target,
146 &stm8_target,
147 &riscv_target,
148 &mem_ap_target,
149 &esirisc_target,
150 &arcv2_target,
151 &aarch64_target,
152 &mips_mips64_target,
153 NULL,
154 };
155
156 struct target *all_targets;
157 static struct target_event_callback *target_event_callbacks;
158 static struct target_timer_callback *target_timer_callbacks;
159 static int64_t target_timer_next_event_value;
160 static LIST_HEAD(target_reset_callback_list);
161 static LIST_HEAD(target_trace_callback_list);
162 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
163 static LIST_HEAD(empty_smp_targets);
164
165 static const struct jim_nvp nvp_assert[] = {
166 { .name = "assert", NVP_ASSERT },
167 { .name = "deassert", NVP_DEASSERT },
168 { .name = "T", NVP_ASSERT },
169 { .name = "F", NVP_DEASSERT },
170 { .name = "t", NVP_ASSERT },
171 { .name = "f", NVP_DEASSERT },
172 { .name = NULL, .value = -1 }
173 };
174
175 static const struct jim_nvp nvp_error_target[] = {
176 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
177 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
178 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
179 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
180 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
181 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
182 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
183 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
184 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
185 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
186 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
187 { .value = -1, .name = NULL }
188 };
189
190 static const char *target_strerror_safe(int err)
191 {
192 const struct jim_nvp *n;
193
194 n = jim_nvp_value2name_simple(nvp_error_target, err);
195 if (!n->name)
196 return "unknown";
197 else
198 return n->name;
199 }
200
201 static const struct jim_nvp nvp_target_event[] = {
202
203 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
204 { .value = TARGET_EVENT_HALTED, .name = "halted" },
205 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
206 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
207 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
208 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
209 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
210
211 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
212 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
213
214 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
215 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
216 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
217 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
218 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
219 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
220 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
221 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
222
223 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
224 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
225 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
226
227 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
228 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
229
230 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
231 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
232
233 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
234 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
235
236 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
237 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
238
239 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
240
241 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x100, .name = "semihosting-user-cmd-0x100" },
242 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x101, .name = "semihosting-user-cmd-0x101" },
243 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x102, .name = "semihosting-user-cmd-0x102" },
244 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x103, .name = "semihosting-user-cmd-0x103" },
245 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x104, .name = "semihosting-user-cmd-0x104" },
246 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x105, .name = "semihosting-user-cmd-0x105" },
247 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x106, .name = "semihosting-user-cmd-0x106" },
248 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x107, .name = "semihosting-user-cmd-0x107" },
249
250 { .name = NULL, .value = -1 }
251 };
252
253 static const struct jim_nvp nvp_target_state[] = {
254 { .name = "unknown", .value = TARGET_UNKNOWN },
255 { .name = "running", .value = TARGET_RUNNING },
256 { .name = "halted", .value = TARGET_HALTED },
257 { .name = "reset", .value = TARGET_RESET },
258 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
259 { .name = NULL, .value = -1 },
260 };
261
262 static const struct jim_nvp nvp_target_debug_reason[] = {
263 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
264 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
265 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
266 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
267 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
268 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
269 { .name = "program-exit", .value = DBG_REASON_EXIT },
270 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
271 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
272 { .name = NULL, .value = -1 },
273 };
274
275 static const struct jim_nvp nvp_target_endian[] = {
276 { .name = "big", .value = TARGET_BIG_ENDIAN },
277 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
278 { .name = "be", .value = TARGET_BIG_ENDIAN },
279 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
280 { .name = NULL, .value = -1 },
281 };
282
283 static const struct jim_nvp nvp_reset_modes[] = {
284 { .name = "unknown", .value = RESET_UNKNOWN },
285 { .name = "run", .value = RESET_RUN },
286 { .name = "halt", .value = RESET_HALT },
287 { .name = "init", .value = RESET_INIT },
288 { .name = NULL, .value = -1 },
289 };
290
291 const char *debug_reason_name(struct target *t)
292 {
293 const char *cp;
294
295 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
296 t->debug_reason)->name;
297 if (!cp) {
298 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
299 cp = "(*BUG*unknown*BUG*)";
300 }
301 return cp;
302 }
303
304 const char *target_state_name(struct target *t)
305 {
306 const char *cp;
307 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
308 if (!cp) {
309 LOG_ERROR("Invalid target state: %d", (int)(t->state));
310 cp = "(*BUG*unknown*BUG*)";
311 }
312
313 if (!target_was_examined(t) && t->defer_examine)
314 cp = "examine deferred";
315
316 return cp;
317 }
318
319 const char *target_event_name(enum target_event event)
320 {
321 const char *cp;
322 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
323 if (!cp) {
324 LOG_ERROR("Invalid target event: %d", (int)(event));
325 cp = "(*BUG*unknown*BUG*)";
326 }
327 return cp;
328 }
329
330 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
331 {
332 const char *cp;
333 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
334 if (!cp) {
335 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
336 cp = "(*BUG*unknown*BUG*)";
337 }
338 return cp;
339 }
340
341 /* determine the number of the new target */
342 static int new_target_number(void)
343 {
344 struct target *t;
345 int x;
346
347 /* number is 0 based */
348 x = -1;
349 t = all_targets;
350 while (t) {
351 if (x < t->target_number)
352 x = t->target_number;
353 t = t->next;
354 }
355 return x + 1;
356 }
357
358 static void append_to_list_all_targets(struct target *target)
359 {
360 struct target **t = &all_targets;
361
362 while (*t)
363 t = &((*t)->next);
364 *t = target;
365 }
366
367 /* read a uint64_t from a buffer in target memory endianness */
368 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
369 {
370 if (target->endianness == TARGET_LITTLE_ENDIAN)
371 return le_to_h_u64(buffer);
372 else
373 return be_to_h_u64(buffer);
374 }
375
376 /* read a uint32_t from a buffer in target memory endianness */
377 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
378 {
379 if (target->endianness == TARGET_LITTLE_ENDIAN)
380 return le_to_h_u32(buffer);
381 else
382 return be_to_h_u32(buffer);
383 }
384
385 /* read a uint24_t from a buffer in target memory endianness */
386 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
387 {
388 if (target->endianness == TARGET_LITTLE_ENDIAN)
389 return le_to_h_u24(buffer);
390 else
391 return be_to_h_u24(buffer);
392 }
393
394 /* read a uint16_t from a buffer in target memory endianness */
395 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
396 {
397 if (target->endianness == TARGET_LITTLE_ENDIAN)
398 return le_to_h_u16(buffer);
399 else
400 return be_to_h_u16(buffer);
401 }
402
403 /* write a uint64_t to a buffer in target memory endianness */
404 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
405 {
406 if (target->endianness == TARGET_LITTLE_ENDIAN)
407 h_u64_to_le(buffer, value);
408 else
409 h_u64_to_be(buffer, value);
410 }
411
412 /* write a uint32_t to a buffer in target memory endianness */
413 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
414 {
415 if (target->endianness == TARGET_LITTLE_ENDIAN)
416 h_u32_to_le(buffer, value);
417 else
418 h_u32_to_be(buffer, value);
419 }
420
421 /* write a uint24_t to a buffer in target memory endianness */
422 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
423 {
424 if (target->endianness == TARGET_LITTLE_ENDIAN)
425 h_u24_to_le(buffer, value);
426 else
427 h_u24_to_be(buffer, value);
428 }
429
430 /* write a uint16_t to a buffer in target memory endianness */
431 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
432 {
433 if (target->endianness == TARGET_LITTLE_ENDIAN)
434 h_u16_to_le(buffer, value);
435 else
436 h_u16_to_be(buffer, value);
437 }
438
439 /* write a uint8_t to a buffer in target memory endianness */
440 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
441 {
442 *buffer = value;
443 }
444
445 /* write a uint64_t array to a buffer in target memory endianness */
446 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
447 {
448 uint32_t i;
449 for (i = 0; i < count; i++)
450 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
451 }
452
453 /* write a uint32_t array to a buffer in target memory endianness */
454 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
455 {
456 uint32_t i;
457 for (i = 0; i < count; i++)
458 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
459 }
460
461 /* write a uint16_t array to a buffer in target memory endianness */
462 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
463 {
464 uint32_t i;
465 for (i = 0; i < count; i++)
466 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
467 }
468
469 /* write a uint64_t array to a buffer in target memory endianness */
470 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
471 {
472 uint32_t i;
473 for (i = 0; i < count; i++)
474 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
475 }
476
477 /* write a uint32_t array to a buffer in target memory endianness */
478 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
479 {
480 uint32_t i;
481 for (i = 0; i < count; i++)
482 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
483 }
484
485 /* write a uint16_t array to a buffer in target memory endianness */
486 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
487 {
488 uint32_t i;
489 for (i = 0; i < count; i++)
490 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
491 }
492
493 /* return a pointer to a configured target; id is name or number */
494 struct target *get_target(const char *id)
495 {
496 struct target *target;
497
498 /* try as tcltarget name */
499 for (target = all_targets; target; target = target->next) {
500 if (!target_name(target))
501 continue;
502 if (strcmp(id, target_name(target)) == 0)
503 return target;
504 }
505
506 /* It's OK to remove this fallback sometime after August 2010 or so */
507
508 /* no match, try as number */
509 unsigned num;
510 if (parse_uint(id, &num) != ERROR_OK)
511 return NULL;
512
513 for (target = all_targets; target; target = target->next) {
514 if (target->target_number == (int)num) {
515 LOG_WARNING("use '%s' as target identifier, not '%u'",
516 target_name(target), num);
517 return target;
518 }
519 }
520
521 return NULL;
522 }
523
524 /* returns a pointer to the n-th configured target */
525 struct target *get_target_by_num(int num)
526 {
527 struct target *target = all_targets;
528
529 while (target) {
530 if (target->target_number == num)
531 return target;
532 target = target->next;
533 }
534
535 return NULL;
536 }
537
538 struct target *get_current_target(struct command_context *cmd_ctx)
539 {
540 struct target *target = get_current_target_or_null(cmd_ctx);
541
542 if (!target) {
543 LOG_ERROR("BUG: current_target out of bounds");
544 exit(-1);
545 }
546
547 return target;
548 }
549
550 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
551 {
552 return cmd_ctx->current_target_override
553 ? cmd_ctx->current_target_override
554 : cmd_ctx->current_target;
555 }
556
557 int target_poll(struct target *target)
558 {
559 int retval;
560
561 /* We can't poll until after examine */
562 if (!target_was_examined(target)) {
563 /* Fail silently lest we pollute the log */
564 return ERROR_FAIL;
565 }
566
567 retval = target->type->poll(target);
568 if (retval != ERROR_OK)
569 return retval;
570
571 if (target->halt_issued) {
572 if (target->state == TARGET_HALTED)
573 target->halt_issued = false;
574 else {
575 int64_t t = timeval_ms() - target->halt_issued_time;
576 if (t > DEFAULT_HALT_TIMEOUT) {
577 target->halt_issued = false;
578 LOG_INFO("Halt timed out, wake up GDB.");
579 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
580 }
581 }
582 }
583
584 return ERROR_OK;
585 }
586
587 int target_halt(struct target *target)
588 {
589 int retval;
590 /* We can't poll until after examine */
591 if (!target_was_examined(target)) {
592 LOG_ERROR("Target not examined yet");
593 return ERROR_FAIL;
594 }
595
596 retval = target->type->halt(target);
597 if (retval != ERROR_OK)
598 return retval;
599
600 target->halt_issued = true;
601 target->halt_issued_time = timeval_ms();
602
603 return ERROR_OK;
604 }
605
606 /**
607 * Make the target (re)start executing using its saved execution
608 * context (possibly with some modifications).
609 *
610 * @param target Which target should start executing.
611 * @param current True to use the target's saved program counter instead
612 * of the address parameter
613 * @param address Optionally used as the program counter.
614 * @param handle_breakpoints True iff breakpoints at the resumption PC
615 * should be skipped. (For example, maybe execution was stopped by
616 * such a breakpoint, in which case it would be counterproductive to
617 * let it re-trigger.
618 * @param debug_execution False if all working areas allocated by OpenOCD
619 * should be released and/or restored to their original contents.
620 * (This would for example be true to run some downloaded "helper"
621 * algorithm code, which resides in one such working buffer and uses
622 * another for data storage.)
623 *
624 * @todo Resolve the ambiguity about what the "debug_execution" flag
625 * signifies. For example, Target implementations don't agree on how
626 * it relates to invalidation of the register cache, or to whether
627 * breakpoints and watchpoints should be enabled. (It would seem wrong
628 * to enable breakpoints when running downloaded "helper" algorithms
629 * (debug_execution true), since the breakpoints would be set to match
630 * target firmware being debugged, not the helper algorithm.... and
631 * enabling them could cause such helpers to malfunction (for example,
632 * by overwriting data with a breakpoint instruction. On the other
633 * hand the infrastructure for running such helpers might use this
634 * procedure but rely on hardware breakpoint to detect termination.)
635 */
636 int target_resume(struct target *target, int current, target_addr_t address,
637 int handle_breakpoints, int debug_execution)
638 {
639 int retval;
640
641 /* We can't poll until after examine */
642 if (!target_was_examined(target)) {
643 LOG_ERROR("Target not examined yet");
644 return ERROR_FAIL;
645 }
646
647 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
648
649 /* note that resume *must* be asynchronous. The CPU can halt before
650 * we poll. The CPU can even halt at the current PC as a result of
651 * a software breakpoint being inserted by (a bug?) the application.
652 */
653 /*
654 * resume() triggers the event 'resumed'. The execution of TCL commands
655 * in the event handler causes the polling of targets. If the target has
656 * already halted for a breakpoint, polling will run the 'halted' event
657 * handler before the pending 'resumed' handler.
658 * Disable polling during resume() to guarantee the execution of handlers
659 * in the correct order.
660 */
661 bool save_poll = jtag_poll_get_enabled();
662 jtag_poll_set_enabled(false);
663 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
664 jtag_poll_set_enabled(save_poll);
665 if (retval != ERROR_OK)
666 return retval;
667
668 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
669
670 return retval;
671 }
672
673 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
674 {
675 char buf[100];
676 int retval;
677 struct jim_nvp *n;
678 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
679 if (!n->name) {
680 LOG_ERROR("invalid reset mode");
681 return ERROR_FAIL;
682 }
683
684 struct target *target;
685 for (target = all_targets; target; target = target->next)
686 target_call_reset_callbacks(target, reset_mode);
687
688 /* disable polling during reset to make reset event scripts
689 * more predictable, i.e. dr/irscan & pathmove in events will
690 * not have JTAG operations injected into the middle of a sequence.
691 */
692 bool save_poll = jtag_poll_get_enabled();
693
694 jtag_poll_set_enabled(false);
695
696 sprintf(buf, "ocd_process_reset %s", n->name);
697 retval = Jim_Eval(cmd->ctx->interp, buf);
698
699 jtag_poll_set_enabled(save_poll);
700
701 if (retval != JIM_OK) {
702 Jim_MakeErrorMessage(cmd->ctx->interp);
703 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
704 return ERROR_FAIL;
705 }
706
707 /* We want any events to be processed before the prompt */
708 retval = target_call_timer_callbacks_now();
709
710 for (target = all_targets; target; target = target->next) {
711 target->type->check_reset(target);
712 target->running_alg = false;
713 }
714
715 return retval;
716 }
717
718 static int identity_virt2phys(struct target *target,
719 target_addr_t virtual, target_addr_t *physical)
720 {
721 *physical = virtual;
722 return ERROR_OK;
723 }
724
725 static int no_mmu(struct target *target, int *enabled)
726 {
727 *enabled = 0;
728 return ERROR_OK;
729 }
730
731 /**
732 * Reset the @c examined flag for the given target.
733 * Pure paranoia -- targets are zeroed on allocation.
734 */
735 static inline void target_reset_examined(struct target *target)
736 {
737 target->examined = false;
738 }
739
740 static int default_examine(struct target *target)
741 {
742 target_set_examined(target);
743 return ERROR_OK;
744 }
745
746 /* no check by default */
747 static int default_check_reset(struct target *target)
748 {
749 return ERROR_OK;
750 }
751
752 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
753 * Keep in sync */
754 int target_examine_one(struct target *target)
755 {
756 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
757
758 int retval = target->type->examine(target);
759 if (retval != ERROR_OK) {
760 target_reset_examined(target);
761 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
762 return retval;
763 }
764
765 target_set_examined(target);
766 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
767
768 return ERROR_OK;
769 }
770
771 static int jtag_enable_callback(enum jtag_event event, void *priv)
772 {
773 struct target *target = priv;
774
775 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
776 return ERROR_OK;
777
778 jtag_unregister_event_callback(jtag_enable_callback, target);
779
780 return target_examine_one(target);
781 }
782
783 /* Targets that correctly implement init + examine, i.e.
784 * no communication with target during init:
785 *
786 * XScale
787 */
788 int target_examine(void)
789 {
790 int retval = ERROR_OK;
791 struct target *target;
792
793 for (target = all_targets; target; target = target->next) {
794 /* defer examination, but don't skip it */
795 if (!target->tap->enabled) {
796 jtag_register_event_callback(jtag_enable_callback,
797 target);
798 continue;
799 }
800
801 if (target->defer_examine)
802 continue;
803
804 int retval2 = target_examine_one(target);
805 if (retval2 != ERROR_OK) {
806 LOG_WARNING("target %s examination failed", target_name(target));
807 retval = retval2;
808 }
809 }
810 return retval;
811 }
812
813 const char *target_type_name(struct target *target)
814 {
815 return target->type->name;
816 }
817
818 static int target_soft_reset_halt(struct target *target)
819 {
820 if (!target_was_examined(target)) {
821 LOG_ERROR("Target not examined yet");
822 return ERROR_FAIL;
823 }
824 if (!target->type->soft_reset_halt) {
825 LOG_ERROR("Target %s does not support soft_reset_halt",
826 target_name(target));
827 return ERROR_FAIL;
828 }
829 return target->type->soft_reset_halt(target);
830 }
831
832 /**
833 * Downloads a target-specific native code algorithm to the target,
834 * and executes it. * Note that some targets may need to set up, enable,
835 * and tear down a breakpoint (hard or * soft) to detect algorithm
836 * termination, while others may support lower overhead schemes where
837 * soft breakpoints embedded in the algorithm automatically terminate the
838 * algorithm.
839 *
840 * @param target used to run the algorithm
841 * @param num_mem_params
842 * @param mem_params
843 * @param num_reg_params
844 * @param reg_param
845 * @param entry_point
846 * @param exit_point
847 * @param timeout_ms
848 * @param arch_info target-specific description of the algorithm.
849 */
850 int target_run_algorithm(struct target *target,
851 int num_mem_params, struct mem_param *mem_params,
852 int num_reg_params, struct reg_param *reg_param,
853 target_addr_t entry_point, target_addr_t exit_point,
854 int timeout_ms, void *arch_info)
855 {
856 int retval = ERROR_FAIL;
857
858 if (!target_was_examined(target)) {
859 LOG_ERROR("Target not examined yet");
860 goto done;
861 }
862 if (!target->type->run_algorithm) {
863 LOG_ERROR("Target type '%s' does not support %s",
864 target_type_name(target), __func__);
865 goto done;
866 }
867
868 target->running_alg = true;
869 retval = target->type->run_algorithm(target,
870 num_mem_params, mem_params,
871 num_reg_params, reg_param,
872 entry_point, exit_point, timeout_ms, arch_info);
873 target->running_alg = false;
874
875 done:
876 return retval;
877 }
878
879 /**
880 * Executes a target-specific native code algorithm and leaves it running.
881 *
882 * @param target used to run the algorithm
883 * @param num_mem_params
884 * @param mem_params
885 * @param num_reg_params
886 * @param reg_params
887 * @param entry_point
888 * @param exit_point
889 * @param arch_info target-specific description of the algorithm.
890 */
891 int target_start_algorithm(struct target *target,
892 int num_mem_params, struct mem_param *mem_params,
893 int num_reg_params, struct reg_param *reg_params,
894 target_addr_t entry_point, target_addr_t exit_point,
895 void *arch_info)
896 {
897 int retval = ERROR_FAIL;
898
899 if (!target_was_examined(target)) {
900 LOG_ERROR("Target not examined yet");
901 goto done;
902 }
903 if (!target->type->start_algorithm) {
904 LOG_ERROR("Target type '%s' does not support %s",
905 target_type_name(target), __func__);
906 goto done;
907 }
908 if (target->running_alg) {
909 LOG_ERROR("Target is already running an algorithm");
910 goto done;
911 }
912
913 target->running_alg = true;
914 retval = target->type->start_algorithm(target,
915 num_mem_params, mem_params,
916 num_reg_params, reg_params,
917 entry_point, exit_point, arch_info);
918
919 done:
920 return retval;
921 }
922
923 /**
924 * Waits for an algorithm started with target_start_algorithm() to complete.
925 *
926 * @param target used to run the algorithm
927 * @param num_mem_params
928 * @param mem_params
929 * @param num_reg_params
930 * @param reg_params
931 * @param exit_point
932 * @param timeout_ms
933 * @param arch_info target-specific description of the algorithm.
934 */
935 int target_wait_algorithm(struct target *target,
936 int num_mem_params, struct mem_param *mem_params,
937 int num_reg_params, struct reg_param *reg_params,
938 target_addr_t exit_point, int timeout_ms,
939 void *arch_info)
940 {
941 int retval = ERROR_FAIL;
942
943 if (!target->type->wait_algorithm) {
944 LOG_ERROR("Target type '%s' does not support %s",
945 target_type_name(target), __func__);
946 goto done;
947 }
948 if (!target->running_alg) {
949 LOG_ERROR("Target is not running an algorithm");
950 goto done;
951 }
952
953 retval = target->type->wait_algorithm(target,
954 num_mem_params, mem_params,
955 num_reg_params, reg_params,
956 exit_point, timeout_ms, arch_info);
957 if (retval != ERROR_TARGET_TIMEOUT)
958 target->running_alg = false;
959
960 done:
961 return retval;
962 }
963
964 /**
965 * Streams data to a circular buffer on target intended for consumption by code
966 * running asynchronously on target.
967 *
968 * This is intended for applications where target-specific native code runs
969 * on the target, receives data from the circular buffer, does something with
970 * it (most likely writing it to a flash memory), and advances the circular
971 * buffer pointer.
972 *
973 * This assumes that the helper algorithm has already been loaded to the target,
974 * but has not been started yet. Given memory and register parameters are passed
975 * to the algorithm.
976 *
977 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
978 * following format:
979 *
980 * [buffer_start + 0, buffer_start + 4):
981 * Write Pointer address (aka head). Written and updated by this
982 * routine when new data is written to the circular buffer.
983 * [buffer_start + 4, buffer_start + 8):
984 * Read Pointer address (aka tail). Updated by code running on the
985 * target after it consumes data.
986 * [buffer_start + 8, buffer_start + buffer_size):
987 * Circular buffer contents.
988 *
989 * See contrib/loaders/flash/stm32f1x.S for an example.
990 *
991 * @param target used to run the algorithm
992 * @param buffer address on the host where data to be sent is located
993 * @param count number of blocks to send
994 * @param block_size size in bytes of each block
995 * @param num_mem_params count of memory-based params to pass to algorithm
996 * @param mem_params memory-based params to pass to algorithm
997 * @param num_reg_params count of register-based params to pass to algorithm
998 * @param reg_params memory-based params to pass to algorithm
999 * @param buffer_start address on the target of the circular buffer structure
1000 * @param buffer_size size of the circular buffer structure
1001 * @param entry_point address on the target to execute to start the algorithm
1002 * @param exit_point address at which to set a breakpoint to catch the
1003 * end of the algorithm; can be 0 if target triggers a breakpoint itself
1004 * @param arch_info
1005 */
1006
1007 int target_run_flash_async_algorithm(struct target *target,
1008 const uint8_t *buffer, uint32_t count, int block_size,
1009 int num_mem_params, struct mem_param *mem_params,
1010 int num_reg_params, struct reg_param *reg_params,
1011 uint32_t buffer_start, uint32_t buffer_size,
1012 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1013 {
1014 int retval;
1015 int timeout = 0;
1016
1017 const uint8_t *buffer_orig = buffer;
1018
1019 /* Set up working area. First word is write pointer, second word is read pointer,
1020 * rest is fifo data area. */
1021 uint32_t wp_addr = buffer_start;
1022 uint32_t rp_addr = buffer_start + 4;
1023 uint32_t fifo_start_addr = buffer_start + 8;
1024 uint32_t fifo_end_addr = buffer_start + buffer_size;
1025
1026 uint32_t wp = fifo_start_addr;
1027 uint32_t rp = fifo_start_addr;
1028
1029 /* validate block_size is 2^n */
1030 assert(IS_PWR_OF_2(block_size));
1031
1032 retval = target_write_u32(target, wp_addr, wp);
1033 if (retval != ERROR_OK)
1034 return retval;
1035 retval = target_write_u32(target, rp_addr, rp);
1036 if (retval != ERROR_OK)
1037 return retval;
1038
1039 /* Start up algorithm on target and let it idle while writing the first chunk */
1040 retval = target_start_algorithm(target, num_mem_params, mem_params,
1041 num_reg_params, reg_params,
1042 entry_point,
1043 exit_point,
1044 arch_info);
1045
1046 if (retval != ERROR_OK) {
1047 LOG_ERROR("error starting target flash write algorithm");
1048 return retval;
1049 }
1050
1051 while (count > 0) {
1052
1053 retval = target_read_u32(target, rp_addr, &rp);
1054 if (retval != ERROR_OK) {
1055 LOG_ERROR("failed to get read pointer");
1056 break;
1057 }
1058
1059 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1060 (size_t) (buffer - buffer_orig), count, wp, rp);
1061
1062 if (rp == 0) {
1063 LOG_ERROR("flash write algorithm aborted by target");
1064 retval = ERROR_FLASH_OPERATION_FAILED;
1065 break;
1066 }
1067
1068 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1069 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1070 break;
1071 }
1072
1073 /* Count the number of bytes available in the fifo without
1074 * crossing the wrap around. Make sure to not fill it completely,
1075 * because that would make wp == rp and that's the empty condition. */
1076 uint32_t thisrun_bytes;
1077 if (rp > wp)
1078 thisrun_bytes = rp - wp - block_size;
1079 else if (rp > fifo_start_addr)
1080 thisrun_bytes = fifo_end_addr - wp;
1081 else
1082 thisrun_bytes = fifo_end_addr - wp - block_size;
1083
1084 if (thisrun_bytes == 0) {
1085 /* Throttle polling a bit if transfer is (much) faster than flash
1086 * programming. The exact delay shouldn't matter as long as it's
1087 * less than buffer size / flash speed. This is very unlikely to
1088 * run when using high latency connections such as USB. */
1089 alive_sleep(2);
1090
1091 /* to stop an infinite loop on some targets check and increment a timeout
1092 * this issue was observed on a stellaris using the new ICDI interface */
1093 if (timeout++ >= 2500) {
1094 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1095 return ERROR_FLASH_OPERATION_FAILED;
1096 }
1097 continue;
1098 }
1099
1100 /* reset our timeout */
1101 timeout = 0;
1102
1103 /* Limit to the amount of data we actually want to write */
1104 if (thisrun_bytes > count * block_size)
1105 thisrun_bytes = count * block_size;
1106
1107 /* Force end of large blocks to be word aligned */
1108 if (thisrun_bytes >= 16)
1109 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1110
1111 /* Write data to fifo */
1112 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1113 if (retval != ERROR_OK)
1114 break;
1115
1116 /* Update counters and wrap write pointer */
1117 buffer += thisrun_bytes;
1118 count -= thisrun_bytes / block_size;
1119 wp += thisrun_bytes;
1120 if (wp >= fifo_end_addr)
1121 wp = fifo_start_addr;
1122
1123 /* Store updated write pointer to target */
1124 retval = target_write_u32(target, wp_addr, wp);
1125 if (retval != ERROR_OK)
1126 break;
1127
1128 /* Avoid GDB timeouts */
1129 keep_alive();
1130 }
1131
1132 if (retval != ERROR_OK) {
1133 /* abort flash write algorithm on target */
1134 target_write_u32(target, wp_addr, 0);
1135 }
1136
1137 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1138 num_reg_params, reg_params,
1139 exit_point,
1140 10000,
1141 arch_info);
1142
1143 if (retval2 != ERROR_OK) {
1144 LOG_ERROR("error waiting for target flash write algorithm");
1145 retval = retval2;
1146 }
1147
1148 if (retval == ERROR_OK) {
1149 /* check if algorithm set rp = 0 after fifo writer loop finished */
1150 retval = target_read_u32(target, rp_addr, &rp);
1151 if (retval == ERROR_OK && rp == 0) {
1152 LOG_ERROR("flash write algorithm aborted by target");
1153 retval = ERROR_FLASH_OPERATION_FAILED;
1154 }
1155 }
1156
1157 return retval;
1158 }
1159
1160 int target_run_read_async_algorithm(struct target *target,
1161 uint8_t *buffer, uint32_t count, int block_size,
1162 int num_mem_params, struct mem_param *mem_params,
1163 int num_reg_params, struct reg_param *reg_params,
1164 uint32_t buffer_start, uint32_t buffer_size,
1165 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1166 {
1167 int retval;
1168 int timeout = 0;
1169
1170 const uint8_t *buffer_orig = buffer;
1171
1172 /* Set up working area. First word is write pointer, second word is read pointer,
1173 * rest is fifo data area. */
1174 uint32_t wp_addr = buffer_start;
1175 uint32_t rp_addr = buffer_start + 4;
1176 uint32_t fifo_start_addr = buffer_start + 8;
1177 uint32_t fifo_end_addr = buffer_start + buffer_size;
1178
1179 uint32_t wp = fifo_start_addr;
1180 uint32_t rp = fifo_start_addr;
1181
1182 /* validate block_size is 2^n */
1183 assert(IS_PWR_OF_2(block_size));
1184
1185 retval = target_write_u32(target, wp_addr, wp);
1186 if (retval != ERROR_OK)
1187 return retval;
1188 retval = target_write_u32(target, rp_addr, rp);
1189 if (retval != ERROR_OK)
1190 return retval;
1191
1192 /* Start up algorithm on target */
1193 retval = target_start_algorithm(target, num_mem_params, mem_params,
1194 num_reg_params, reg_params,
1195 entry_point,
1196 exit_point,
1197 arch_info);
1198
1199 if (retval != ERROR_OK) {
1200 LOG_ERROR("error starting target flash read algorithm");
1201 return retval;
1202 }
1203
1204 while (count > 0) {
1205 retval = target_read_u32(target, wp_addr, &wp);
1206 if (retval != ERROR_OK) {
1207 LOG_ERROR("failed to get write pointer");
1208 break;
1209 }
1210
1211 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1212 (size_t)(buffer - buffer_orig), count, wp, rp);
1213
1214 if (wp == 0) {
1215 LOG_ERROR("flash read algorithm aborted by target");
1216 retval = ERROR_FLASH_OPERATION_FAILED;
1217 break;
1218 }
1219
1220 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1221 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1222 break;
1223 }
1224
1225 /* Count the number of bytes available in the fifo without
1226 * crossing the wrap around. */
1227 uint32_t thisrun_bytes;
1228 if (wp >= rp)
1229 thisrun_bytes = wp - rp;
1230 else
1231 thisrun_bytes = fifo_end_addr - rp;
1232
1233 if (thisrun_bytes == 0) {
1234 /* Throttle polling a bit if transfer is (much) faster than flash
1235 * reading. The exact delay shouldn't matter as long as it's
1236 * less than buffer size / flash speed. This is very unlikely to
1237 * run when using high latency connections such as USB. */
1238 alive_sleep(2);
1239
1240 /* to stop an infinite loop on some targets check and increment a timeout
1241 * this issue was observed on a stellaris using the new ICDI interface */
1242 if (timeout++ >= 2500) {
1243 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1244 return ERROR_FLASH_OPERATION_FAILED;
1245 }
1246 continue;
1247 }
1248
1249 /* Reset our timeout */
1250 timeout = 0;
1251
1252 /* Limit to the amount of data we actually want to read */
1253 if (thisrun_bytes > count * block_size)
1254 thisrun_bytes = count * block_size;
1255
1256 /* Force end of large blocks to be word aligned */
1257 if (thisrun_bytes >= 16)
1258 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1259
1260 /* Read data from fifo */
1261 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1262 if (retval != ERROR_OK)
1263 break;
1264
1265 /* Update counters and wrap write pointer */
1266 buffer += thisrun_bytes;
1267 count -= thisrun_bytes / block_size;
1268 rp += thisrun_bytes;
1269 if (rp >= fifo_end_addr)
1270 rp = fifo_start_addr;
1271
1272 /* Store updated write pointer to target */
1273 retval = target_write_u32(target, rp_addr, rp);
1274 if (retval != ERROR_OK)
1275 break;
1276
1277 /* Avoid GDB timeouts */
1278 keep_alive();
1279
1280 }
1281
1282 if (retval != ERROR_OK) {
1283 /* abort flash write algorithm on target */
1284 target_write_u32(target, rp_addr, 0);
1285 }
1286
1287 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1288 num_reg_params, reg_params,
1289 exit_point,
1290 10000,
1291 arch_info);
1292
1293 if (retval2 != ERROR_OK) {
1294 LOG_ERROR("error waiting for target flash write algorithm");
1295 retval = retval2;
1296 }
1297
1298 if (retval == ERROR_OK) {
1299 /* check if algorithm set wp = 0 after fifo writer loop finished */
1300 retval = target_read_u32(target, wp_addr, &wp);
1301 if (retval == ERROR_OK && wp == 0) {
1302 LOG_ERROR("flash read algorithm aborted by target");
1303 retval = ERROR_FLASH_OPERATION_FAILED;
1304 }
1305 }
1306
1307 return retval;
1308 }
1309
1310 int target_read_memory(struct target *target,
1311 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1312 {
1313 if (!target_was_examined(target)) {
1314 LOG_ERROR("Target not examined yet");
1315 return ERROR_FAIL;
1316 }
1317 if (!target->type->read_memory) {
1318 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1319 return ERROR_FAIL;
1320 }
1321 return target->type->read_memory(target, address, size, count, buffer);
1322 }
1323
1324 int target_read_phys_memory(struct target *target,
1325 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1326 {
1327 if (!target_was_examined(target)) {
1328 LOG_ERROR("Target not examined yet");
1329 return ERROR_FAIL;
1330 }
1331 if (!target->type->read_phys_memory) {
1332 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1333 return ERROR_FAIL;
1334 }
1335 return target->type->read_phys_memory(target, address, size, count, buffer);
1336 }
1337
1338 int target_write_memory(struct target *target,
1339 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1340 {
1341 if (!target_was_examined(target)) {
1342 LOG_ERROR("Target not examined yet");
1343 return ERROR_FAIL;
1344 }
1345 if (!target->type->write_memory) {
1346 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1347 return ERROR_FAIL;
1348 }
1349 return target->type->write_memory(target, address, size, count, buffer);
1350 }
1351
1352 int target_write_phys_memory(struct target *target,
1353 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1354 {
1355 if (!target_was_examined(target)) {
1356 LOG_ERROR("Target not examined yet");
1357 return ERROR_FAIL;
1358 }
1359 if (!target->type->write_phys_memory) {
1360 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1361 return ERROR_FAIL;
1362 }
1363 return target->type->write_phys_memory(target, address, size, count, buffer);
1364 }
1365
1366 int target_add_breakpoint(struct target *target,
1367 struct breakpoint *breakpoint)
1368 {
1369 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1370 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1371 return ERROR_TARGET_NOT_HALTED;
1372 }
1373 return target->type->add_breakpoint(target, breakpoint);
1374 }
1375
1376 int target_add_context_breakpoint(struct target *target,
1377 struct breakpoint *breakpoint)
1378 {
1379 if (target->state != TARGET_HALTED) {
1380 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1381 return ERROR_TARGET_NOT_HALTED;
1382 }
1383 return target->type->add_context_breakpoint(target, breakpoint);
1384 }
1385
1386 int target_add_hybrid_breakpoint(struct target *target,
1387 struct breakpoint *breakpoint)
1388 {
1389 if (target->state != TARGET_HALTED) {
1390 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1391 return ERROR_TARGET_NOT_HALTED;
1392 }
1393 return target->type->add_hybrid_breakpoint(target, breakpoint);
1394 }
1395
1396 int target_remove_breakpoint(struct target *target,
1397 struct breakpoint *breakpoint)
1398 {
1399 return target->type->remove_breakpoint(target, breakpoint);
1400 }
1401
1402 int target_add_watchpoint(struct target *target,
1403 struct watchpoint *watchpoint)
1404 {
1405 if (target->state != TARGET_HALTED) {
1406 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1407 return ERROR_TARGET_NOT_HALTED;
1408 }
1409 return target->type->add_watchpoint(target, watchpoint);
1410 }
1411 int target_remove_watchpoint(struct target *target,
1412 struct watchpoint *watchpoint)
1413 {
1414 return target->type->remove_watchpoint(target, watchpoint);
1415 }
1416 int target_hit_watchpoint(struct target *target,
1417 struct watchpoint **hit_watchpoint)
1418 {
1419 if (target->state != TARGET_HALTED) {
1420 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1421 return ERROR_TARGET_NOT_HALTED;
1422 }
1423
1424 if (!target->type->hit_watchpoint) {
1425 /* For backward compatible, if hit_watchpoint is not implemented,
1426 * return ERROR_FAIL such that gdb_server will not take the nonsense
1427 * information. */
1428 return ERROR_FAIL;
1429 }
1430
1431 return target->type->hit_watchpoint(target, hit_watchpoint);
1432 }
1433
1434 const char *target_get_gdb_arch(struct target *target)
1435 {
1436 if (!target->type->get_gdb_arch)
1437 return NULL;
1438 return target->type->get_gdb_arch(target);
1439 }
1440
1441 int target_get_gdb_reg_list(struct target *target,
1442 struct reg **reg_list[], int *reg_list_size,
1443 enum target_register_class reg_class)
1444 {
1445 int result = ERROR_FAIL;
1446
1447 if (!target_was_examined(target)) {
1448 LOG_ERROR("Target not examined yet");
1449 goto done;
1450 }
1451
1452 result = target->type->get_gdb_reg_list(target, reg_list,
1453 reg_list_size, reg_class);
1454
1455 done:
1456 if (result != ERROR_OK) {
1457 *reg_list = NULL;
1458 *reg_list_size = 0;
1459 }
1460 return result;
1461 }
1462
1463 int target_get_gdb_reg_list_noread(struct target *target,
1464 struct reg **reg_list[], int *reg_list_size,
1465 enum target_register_class reg_class)
1466 {
1467 if (target->type->get_gdb_reg_list_noread &&
1468 target->type->get_gdb_reg_list_noread(target, reg_list,
1469 reg_list_size, reg_class) == ERROR_OK)
1470 return ERROR_OK;
1471 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1472 }
1473
1474 bool target_supports_gdb_connection(struct target *target)
1475 {
1476 /*
1477 * exclude all the targets that don't provide get_gdb_reg_list
1478 * or that have explicit gdb_max_connection == 0
1479 */
1480 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1481 }
1482
1483 int target_step(struct target *target,
1484 int current, target_addr_t address, int handle_breakpoints)
1485 {
1486 int retval;
1487
1488 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1489
1490 retval = target->type->step(target, current, address, handle_breakpoints);
1491 if (retval != ERROR_OK)
1492 return retval;
1493
1494 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1495
1496 return retval;
1497 }
1498
1499 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1500 {
1501 if (target->state != TARGET_HALTED) {
1502 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1503 return ERROR_TARGET_NOT_HALTED;
1504 }
1505 return target->type->get_gdb_fileio_info(target, fileio_info);
1506 }
1507
1508 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1509 {
1510 if (target->state != TARGET_HALTED) {
1511 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1512 return ERROR_TARGET_NOT_HALTED;
1513 }
1514 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1515 }
1516
1517 target_addr_t target_address_max(struct target *target)
1518 {
1519 unsigned bits = target_address_bits(target);
1520 if (sizeof(target_addr_t) * 8 == bits)
1521 return (target_addr_t) -1;
1522 else
1523 return (((target_addr_t) 1) << bits) - 1;
1524 }
1525
1526 unsigned target_address_bits(struct target *target)
1527 {
1528 if (target->type->address_bits)
1529 return target->type->address_bits(target);
1530 return 32;
1531 }
1532
1533 unsigned int target_data_bits(struct target *target)
1534 {
1535 if (target->type->data_bits)
1536 return target->type->data_bits(target);
1537 return 32;
1538 }
1539
1540 static int target_profiling(struct target *target, uint32_t *samples,
1541 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1542 {
1543 return target->type->profiling(target, samples, max_num_samples,
1544 num_samples, seconds);
1545 }
1546
1547 static int handle_target(void *priv);
1548
1549 static int target_init_one(struct command_context *cmd_ctx,
1550 struct target *target)
1551 {
1552 target_reset_examined(target);
1553
1554 struct target_type *type = target->type;
1555 if (!type->examine)
1556 type->examine = default_examine;
1557
1558 if (!type->check_reset)
1559 type->check_reset = default_check_reset;
1560
1561 assert(type->init_target);
1562
1563 int retval = type->init_target(cmd_ctx, target);
1564 if (retval != ERROR_OK) {
1565 LOG_ERROR("target '%s' init failed", target_name(target));
1566 return retval;
1567 }
1568
1569 /* Sanity-check MMU support ... stub in what we must, to help
1570 * implement it in stages, but warn if we need to do so.
1571 */
1572 if (type->mmu) {
1573 if (!type->virt2phys) {
1574 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1575 type->virt2phys = identity_virt2phys;
1576 }
1577 } else {
1578 /* Make sure no-MMU targets all behave the same: make no
1579 * distinction between physical and virtual addresses, and
1580 * ensure that virt2phys() is always an identity mapping.
1581 */
1582 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1583 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1584
1585 type->mmu = no_mmu;
1586 type->write_phys_memory = type->write_memory;
1587 type->read_phys_memory = type->read_memory;
1588 type->virt2phys = identity_virt2phys;
1589 }
1590
1591 if (!target->type->read_buffer)
1592 target->type->read_buffer = target_read_buffer_default;
1593
1594 if (!target->type->write_buffer)
1595 target->type->write_buffer = target_write_buffer_default;
1596
1597 if (!target->type->get_gdb_fileio_info)
1598 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1599
1600 if (!target->type->gdb_fileio_end)
1601 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1602
1603 if (!target->type->profiling)
1604 target->type->profiling = target_profiling_default;
1605
1606 return ERROR_OK;
1607 }
1608
1609 static int target_init(struct command_context *cmd_ctx)
1610 {
1611 struct target *target;
1612 int retval;
1613
1614 for (target = all_targets; target; target = target->next) {
1615 retval = target_init_one(cmd_ctx, target);
1616 if (retval != ERROR_OK)
1617 return retval;
1618 }
1619
1620 if (!all_targets)
1621 return ERROR_OK;
1622
1623 retval = target_register_user_commands(cmd_ctx);
1624 if (retval != ERROR_OK)
1625 return retval;
1626
1627 retval = target_register_timer_callback(&handle_target,
1628 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1629 if (retval != ERROR_OK)
1630 return retval;
1631
1632 return ERROR_OK;
1633 }
1634
1635 COMMAND_HANDLER(handle_target_init_command)
1636 {
1637 int retval;
1638
1639 if (CMD_ARGC != 0)
1640 return ERROR_COMMAND_SYNTAX_ERROR;
1641
1642 static bool target_initialized;
1643 if (target_initialized) {
1644 LOG_INFO("'target init' has already been called");
1645 return ERROR_OK;
1646 }
1647 target_initialized = true;
1648
1649 retval = command_run_line(CMD_CTX, "init_targets");
1650 if (retval != ERROR_OK)
1651 return retval;
1652
1653 retval = command_run_line(CMD_CTX, "init_target_events");
1654 if (retval != ERROR_OK)
1655 return retval;
1656
1657 retval = command_run_line(CMD_CTX, "init_board");
1658 if (retval != ERROR_OK)
1659 return retval;
1660
1661 LOG_DEBUG("Initializing targets...");
1662 return target_init(CMD_CTX);
1663 }
1664
1665 int target_register_event_callback(int (*callback)(struct target *target,
1666 enum target_event event, void *priv), void *priv)
1667 {
1668 struct target_event_callback **callbacks_p = &target_event_callbacks;
1669
1670 if (!callback)
1671 return ERROR_COMMAND_SYNTAX_ERROR;
1672
1673 if (*callbacks_p) {
1674 while ((*callbacks_p)->next)
1675 callbacks_p = &((*callbacks_p)->next);
1676 callbacks_p = &((*callbacks_p)->next);
1677 }
1678
1679 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1680 (*callbacks_p)->callback = callback;
1681 (*callbacks_p)->priv = priv;
1682 (*callbacks_p)->next = NULL;
1683
1684 return ERROR_OK;
1685 }
1686
1687 int target_register_reset_callback(int (*callback)(struct target *target,
1688 enum target_reset_mode reset_mode, void *priv), void *priv)
1689 {
1690 struct target_reset_callback *entry;
1691
1692 if (!callback)
1693 return ERROR_COMMAND_SYNTAX_ERROR;
1694
1695 entry = malloc(sizeof(struct target_reset_callback));
1696 if (!entry) {
1697 LOG_ERROR("error allocating buffer for reset callback entry");
1698 return ERROR_COMMAND_SYNTAX_ERROR;
1699 }
1700
1701 entry->callback = callback;
1702 entry->priv = priv;
1703 list_add(&entry->list, &target_reset_callback_list);
1704
1705
1706 return ERROR_OK;
1707 }
1708
1709 int target_register_trace_callback(int (*callback)(struct target *target,
1710 size_t len, uint8_t *data, void *priv), void *priv)
1711 {
1712 struct target_trace_callback *entry;
1713
1714 if (!callback)
1715 return ERROR_COMMAND_SYNTAX_ERROR;
1716
1717 entry = malloc(sizeof(struct target_trace_callback));
1718 if (!entry) {
1719 LOG_ERROR("error allocating buffer for trace callback entry");
1720 return ERROR_COMMAND_SYNTAX_ERROR;
1721 }
1722
1723 entry->callback = callback;
1724 entry->priv = priv;
1725 list_add(&entry->list, &target_trace_callback_list);
1726
1727
1728 return ERROR_OK;
1729 }
1730
1731 int target_register_timer_callback(int (*callback)(void *priv),
1732 unsigned int time_ms, enum target_timer_type type, void *priv)
1733 {
1734 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1735
1736 if (!callback)
1737 return ERROR_COMMAND_SYNTAX_ERROR;
1738
1739 if (*callbacks_p) {
1740 while ((*callbacks_p)->next)
1741 callbacks_p = &((*callbacks_p)->next);
1742 callbacks_p = &((*callbacks_p)->next);
1743 }
1744
1745 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1746 (*callbacks_p)->callback = callback;
1747 (*callbacks_p)->type = type;
1748 (*callbacks_p)->time_ms = time_ms;
1749 (*callbacks_p)->removed = false;
1750
1751 (*callbacks_p)->when = timeval_ms() + time_ms;
1752 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1753
1754 (*callbacks_p)->priv = priv;
1755 (*callbacks_p)->next = NULL;
1756
1757 return ERROR_OK;
1758 }
1759
1760 int target_unregister_event_callback(int (*callback)(struct target *target,
1761 enum target_event event, void *priv), void *priv)
1762 {
1763 struct target_event_callback **p = &target_event_callbacks;
1764 struct target_event_callback *c = target_event_callbacks;
1765
1766 if (!callback)
1767 return ERROR_COMMAND_SYNTAX_ERROR;
1768
1769 while (c) {
1770 struct target_event_callback *next = c->next;
1771 if ((c->callback == callback) && (c->priv == priv)) {
1772 *p = next;
1773 free(c);
1774 return ERROR_OK;
1775 } else
1776 p = &(c->next);
1777 c = next;
1778 }
1779
1780 return ERROR_OK;
1781 }
1782
1783 int target_unregister_reset_callback(int (*callback)(struct target *target,
1784 enum target_reset_mode reset_mode, void *priv), void *priv)
1785 {
1786 struct target_reset_callback *entry;
1787
1788 if (!callback)
1789 return ERROR_COMMAND_SYNTAX_ERROR;
1790
1791 list_for_each_entry(entry, &target_reset_callback_list, list) {
1792 if (entry->callback == callback && entry->priv == priv) {
1793 list_del(&entry->list);
1794 free(entry);
1795 break;
1796 }
1797 }
1798
1799 return ERROR_OK;
1800 }
1801
1802 int target_unregister_trace_callback(int (*callback)(struct target *target,
1803 size_t len, uint8_t *data, void *priv), void *priv)
1804 {
1805 struct target_trace_callback *entry;
1806
1807 if (!callback)
1808 return ERROR_COMMAND_SYNTAX_ERROR;
1809
1810 list_for_each_entry(entry, &target_trace_callback_list, list) {
1811 if (entry->callback == callback && entry->priv == priv) {
1812 list_del(&entry->list);
1813 free(entry);
1814 break;
1815 }
1816 }
1817
1818 return ERROR_OK;
1819 }
1820
1821 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1822 {
1823 if (!callback)
1824 return ERROR_COMMAND_SYNTAX_ERROR;
1825
1826 for (struct target_timer_callback *c = target_timer_callbacks;
1827 c; c = c->next) {
1828 if ((c->callback == callback) && (c->priv == priv)) {
1829 c->removed = true;
1830 return ERROR_OK;
1831 }
1832 }
1833
1834 return ERROR_FAIL;
1835 }
1836
1837 int target_call_event_callbacks(struct target *target, enum target_event event)
1838 {
1839 struct target_event_callback *callback = target_event_callbacks;
1840 struct target_event_callback *next_callback;
1841
1842 if (event == TARGET_EVENT_HALTED) {
1843 /* execute early halted first */
1844 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1845 }
1846
1847 LOG_DEBUG("target event %i (%s) for core %s", event,
1848 target_event_name(event),
1849 target_name(target));
1850
1851 target_handle_event(target, event);
1852
1853 while (callback) {
1854 next_callback = callback->next;
1855 callback->callback(target, event, callback->priv);
1856 callback = next_callback;
1857 }
1858
1859 return ERROR_OK;
1860 }
1861
1862 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1863 {
1864 struct target_reset_callback *callback;
1865
1866 LOG_DEBUG("target reset %i (%s)", reset_mode,
1867 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1868
1869 list_for_each_entry(callback, &target_reset_callback_list, list)
1870 callback->callback(target, reset_mode, callback->priv);
1871
1872 return ERROR_OK;
1873 }
1874
1875 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1876 {
1877 struct target_trace_callback *callback;
1878
1879 list_for_each_entry(callback, &target_trace_callback_list, list)
1880 callback->callback(target, len, data, callback->priv);
1881
1882 return ERROR_OK;
1883 }
1884
1885 static int target_timer_callback_periodic_restart(
1886 struct target_timer_callback *cb, int64_t *now)
1887 {
1888 cb->when = *now + cb->time_ms;
1889 return ERROR_OK;
1890 }
1891
1892 static int target_call_timer_callback(struct target_timer_callback *cb,
1893 int64_t *now)
1894 {
1895 cb->callback(cb->priv);
1896
1897 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1898 return target_timer_callback_periodic_restart(cb, now);
1899
1900 return target_unregister_timer_callback(cb->callback, cb->priv);
1901 }
1902
1903 static int target_call_timer_callbacks_check_time(int checktime)
1904 {
1905 static bool callback_processing;
1906
1907 /* Do not allow nesting */
1908 if (callback_processing)
1909 return ERROR_OK;
1910
1911 callback_processing = true;
1912
1913 keep_alive();
1914
1915 int64_t now = timeval_ms();
1916
1917 /* Initialize to a default value that's a ways into the future.
1918 * The loop below will make it closer to now if there are
1919 * callbacks that want to be called sooner. */
1920 target_timer_next_event_value = now + 1000;
1921
1922 /* Store an address of the place containing a pointer to the
1923 * next item; initially, that's a standalone "root of the
1924 * list" variable. */
1925 struct target_timer_callback **callback = &target_timer_callbacks;
1926 while (callback && *callback) {
1927 if ((*callback)->removed) {
1928 struct target_timer_callback *p = *callback;
1929 *callback = (*callback)->next;
1930 free(p);
1931 continue;
1932 }
1933
1934 bool call_it = (*callback)->callback &&
1935 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1936 now >= (*callback)->when);
1937
1938 if (call_it)
1939 target_call_timer_callback(*callback, &now);
1940
1941 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1942 target_timer_next_event_value = (*callback)->when;
1943
1944 callback = &(*callback)->next;
1945 }
1946
1947 callback_processing = false;
1948 return ERROR_OK;
1949 }
1950
1951 int target_call_timer_callbacks()
1952 {
1953 return target_call_timer_callbacks_check_time(1);
1954 }
1955
1956 /* invoke periodic callbacks immediately */
1957 int target_call_timer_callbacks_now()
1958 {
1959 return target_call_timer_callbacks_check_time(0);
1960 }
1961
1962 int64_t target_timer_next_event(void)
1963 {
1964 return target_timer_next_event_value;
1965 }
1966
1967 /* Prints the working area layout for debug purposes */
1968 static void print_wa_layout(struct target *target)
1969 {
1970 struct working_area *c = target->working_areas;
1971
1972 while (c) {
1973 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1974 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1975 c->address, c->address + c->size - 1, c->size);
1976 c = c->next;
1977 }
1978 }
1979
1980 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1981 static void target_split_working_area(struct working_area *area, uint32_t size)
1982 {
1983 assert(area->free); /* Shouldn't split an allocated area */
1984 assert(size <= area->size); /* Caller should guarantee this */
1985
1986 /* Split only if not already the right size */
1987 if (size < area->size) {
1988 struct working_area *new_wa = malloc(sizeof(*new_wa));
1989
1990 if (!new_wa)
1991 return;
1992
1993 new_wa->next = area->next;
1994 new_wa->size = area->size - size;
1995 new_wa->address = area->address + size;
1996 new_wa->backup = NULL;
1997 new_wa->user = NULL;
1998 new_wa->free = true;
1999
2000 area->next = new_wa;
2001 area->size = size;
2002
2003 /* If backup memory was allocated to this area, it has the wrong size
2004 * now so free it and it will be reallocated if/when needed */
2005 free(area->backup);
2006 area->backup = NULL;
2007 }
2008 }
2009
2010 /* Merge all adjacent free areas into one */
2011 static void target_merge_working_areas(struct target *target)
2012 {
2013 struct working_area *c = target->working_areas;
2014
2015 while (c && c->next) {
2016 assert(c->next->address == c->address + c->size); /* This is an invariant */
2017
2018 /* Find two adjacent free areas */
2019 if (c->free && c->next->free) {
2020 /* Merge the last into the first */
2021 c->size += c->next->size;
2022
2023 /* Remove the last */
2024 struct working_area *to_be_freed = c->next;
2025 c->next = c->next->next;
2026 free(to_be_freed->backup);
2027 free(to_be_freed);
2028
2029 /* If backup memory was allocated to the remaining area, it's has
2030 * the wrong size now */
2031 free(c->backup);
2032 c->backup = NULL;
2033 } else {
2034 c = c->next;
2035 }
2036 }
2037 }
2038
2039 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2040 {
2041 /* Reevaluate working area address based on MMU state*/
2042 if (!target->working_areas) {
2043 int retval;
2044 int enabled;
2045
2046 retval = target->type->mmu(target, &enabled);
2047 if (retval != ERROR_OK)
2048 return retval;
2049
2050 if (!enabled) {
2051 if (target->working_area_phys_spec) {
2052 LOG_DEBUG("MMU disabled, using physical "
2053 "address for working memory " TARGET_ADDR_FMT,
2054 target->working_area_phys);
2055 target->working_area = target->working_area_phys;
2056 } else {
2057 LOG_ERROR("No working memory available. "
2058 "Specify -work-area-phys to target.");
2059 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2060 }
2061 } else {
2062 if (target->working_area_virt_spec) {
2063 LOG_DEBUG("MMU enabled, using virtual "
2064 "address for working memory " TARGET_ADDR_FMT,
2065 target->working_area_virt);
2066 target->working_area = target->working_area_virt;
2067 } else {
2068 LOG_ERROR("No working memory available. "
2069 "Specify -work-area-virt to target.");
2070 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2071 }
2072 }
2073
2074 /* Set up initial working area on first call */
2075 struct working_area *new_wa = malloc(sizeof(*new_wa));
2076 if (new_wa) {
2077 new_wa->next = NULL;
2078 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
2079 new_wa->address = target->working_area;
2080 new_wa->backup = NULL;
2081 new_wa->user = NULL;
2082 new_wa->free = true;
2083 }
2084
2085 target->working_areas = new_wa;
2086 }
2087
2088 /* only allocate multiples of 4 byte */
2089 if (size % 4)
2090 size = (size + 3) & (~3UL);
2091
2092 struct working_area *c = target->working_areas;
2093
2094 /* Find the first large enough working area */
2095 while (c) {
2096 if (c->free && c->size >= size)
2097 break;
2098 c = c->next;
2099 }
2100
2101 if (!c)
2102 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2103
2104 /* Split the working area into the requested size */
2105 target_split_working_area(c, size);
2106
2107 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2108 size, c->address);
2109
2110 if (target->backup_working_area) {
2111 if (!c->backup) {
2112 c->backup = malloc(c->size);
2113 if (!c->backup)
2114 return ERROR_FAIL;
2115 }
2116
2117 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2118 if (retval != ERROR_OK)
2119 return retval;
2120 }
2121
2122 /* mark as used, and return the new (reused) area */
2123 c->free = false;
2124 *area = c;
2125
2126 /* user pointer */
2127 c->user = area;
2128
2129 print_wa_layout(target);
2130
2131 return ERROR_OK;
2132 }
2133
2134 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2135 {
2136 int retval;
2137
2138 retval = target_alloc_working_area_try(target, size, area);
2139 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2140 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2141 return retval;
2142
2143 }
2144
2145 static int target_restore_working_area(struct target *target, struct working_area *area)
2146 {
2147 int retval = ERROR_OK;
2148
2149 if (target->backup_working_area && area->backup) {
2150 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2151 if (retval != ERROR_OK)
2152 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2153 area->size, area->address);
2154 }
2155
2156 return retval;
2157 }
2158
2159 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2160 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2161 {
2162 if (!area || area->free)
2163 return ERROR_OK;
2164
2165 int retval = ERROR_OK;
2166 if (restore) {
2167 retval = target_restore_working_area(target, area);
2168 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2169 if (retval != ERROR_OK)
2170 return retval;
2171 }
2172
2173 area->free = true;
2174
2175 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2176 area->size, area->address);
2177
2178 /* mark user pointer invalid */
2179 /* TODO: Is this really safe? It points to some previous caller's memory.
2180 * How could we know that the area pointer is still in that place and not
2181 * some other vital data? What's the purpose of this, anyway? */
2182 *area->user = NULL;
2183 area->user = NULL;
2184
2185 target_merge_working_areas(target);
2186
2187 print_wa_layout(target);
2188
2189 return retval;
2190 }
2191
2192 int target_free_working_area(struct target *target, struct working_area *area)
2193 {
2194 return target_free_working_area_restore(target, area, 1);
2195 }
2196
2197 /* free resources and restore memory, if restoring memory fails,
2198 * free up resources anyway
2199 */
2200 static void target_free_all_working_areas_restore(struct target *target, int restore)
2201 {
2202 struct working_area *c = target->working_areas;
2203
2204 LOG_DEBUG("freeing all working areas");
2205
2206 /* Loop through all areas, restoring the allocated ones and marking them as free */
2207 while (c) {
2208 if (!c->free) {
2209 if (restore)
2210 target_restore_working_area(target, c);
2211 c->free = true;
2212 *c->user = NULL; /* Same as above */
2213 c->user = NULL;
2214 }
2215 c = c->next;
2216 }
2217
2218 /* Run a merge pass to combine all areas into one */
2219 target_merge_working_areas(target);
2220
2221 print_wa_layout(target);
2222 }
2223
2224 void target_free_all_working_areas(struct target *target)
2225 {
2226 target_free_all_working_areas_restore(target, 1);
2227
2228 /* Now we have none or only one working area marked as free */
2229 if (target->working_areas) {
2230 /* Free the last one to allow on-the-fly moving and resizing */
2231 free(target->working_areas->backup);
2232 free(target->working_areas);
2233 target->working_areas = NULL;
2234 }
2235 }
2236
2237 /* Find the largest number of bytes that can be allocated */
2238 uint32_t target_get_working_area_avail(struct target *target)
2239 {
2240 struct working_area *c = target->working_areas;
2241 uint32_t max_size = 0;
2242
2243 if (!c)
2244 return target->working_area_size;
2245
2246 while (c) {
2247 if (c->free && max_size < c->size)
2248 max_size = c->size;
2249
2250 c = c->next;
2251 }
2252
2253 return max_size;
2254 }
2255
2256 static void target_destroy(struct target *target)
2257 {
2258 if (target->type->deinit_target)
2259 target->type->deinit_target(target);
2260
2261 free(target->semihosting);
2262
2263 jtag_unregister_event_callback(jtag_enable_callback, target);
2264
2265 struct target_event_action *teap = target->event_action;
2266 while (teap) {
2267 struct target_event_action *next = teap->next;
2268 Jim_DecrRefCount(teap->interp, teap->body);
2269 free(teap);
2270 teap = next;
2271 }
2272
2273 target_free_all_working_areas(target);
2274
2275 /* release the targets SMP list */
2276 if (target->smp) {
2277 struct target_list *head, *tmp;
2278
2279 list_for_each_entry_safe(head, tmp, target->smp_targets, lh) {
2280 list_del(&head->lh);
2281 head->target->smp = 0;
2282 free(head);
2283 }
2284 if (target->smp_targets != &empty_smp_targets)
2285 free(target->smp_targets);
2286 target->smp = 0;
2287 }
2288
2289 rtos_destroy(target);
2290
2291 free(target->gdb_port_override);
2292 free(target->type);
2293 free(target->trace_info);
2294 free(target->fileio_info);
2295 free(target->cmd_name);
2296 free(target);
2297 }
2298
2299 void target_quit(void)
2300 {
2301 struct target_event_callback *pe = target_event_callbacks;
2302 while (pe) {
2303 struct target_event_callback *t = pe->next;
2304 free(pe);
2305 pe = t;
2306 }
2307 target_event_callbacks = NULL;
2308
2309 struct target_timer_callback *pt = target_timer_callbacks;
2310 while (pt) {
2311 struct target_timer_callback *t = pt->next;
2312 free(pt);
2313 pt = t;
2314 }
2315 target_timer_callbacks = NULL;
2316
2317 for (struct target *target = all_targets; target;) {
2318 struct target *tmp;
2319
2320 tmp = target->next;
2321 target_destroy(target);
2322 target = tmp;
2323 }
2324
2325 all_targets = NULL;
2326 }
2327
2328 int target_arch_state(struct target *target)
2329 {
2330 int retval;
2331 if (!target) {
2332 LOG_WARNING("No target has been configured");
2333 return ERROR_OK;
2334 }
2335
2336 if (target->state != TARGET_HALTED)
2337 return ERROR_OK;
2338
2339 retval = target->type->arch_state(target);
2340 return retval;
2341 }
2342
2343 static int target_get_gdb_fileio_info_default(struct target *target,
2344 struct gdb_fileio_info *fileio_info)
2345 {
2346 /* If target does not support semi-hosting function, target
2347 has no need to provide .get_gdb_fileio_info callback.
2348 It just return ERROR_FAIL and gdb_server will return "Txx"
2349 as target halted every time. */
2350 return ERROR_FAIL;
2351 }
2352
2353 static int target_gdb_fileio_end_default(struct target *target,
2354 int retcode, int fileio_errno, bool ctrl_c)
2355 {
2356 return ERROR_OK;
2357 }
2358
2359 int target_profiling_default(struct target *target, uint32_t *samples,
2360 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2361 {
2362 struct timeval timeout, now;
2363
2364 gettimeofday(&timeout, NULL);
2365 timeval_add_time(&timeout, seconds, 0);
2366
2367 LOG_INFO("Starting profiling. Halting and resuming the"
2368 " target as often as we can...");
2369
2370 uint32_t sample_count = 0;
2371 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2372 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2373
2374 int retval = ERROR_OK;
2375 for (;;) {
2376 target_poll(target);
2377 if (target->state == TARGET_HALTED) {
2378 uint32_t t = buf_get_u32(reg->value, 0, 32);
2379 samples[sample_count++] = t;
2380 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2381 retval = target_resume(target, 1, 0, 0, 0);
2382 target_poll(target);
2383 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2384 } else if (target->state == TARGET_RUNNING) {
2385 /* We want to quickly sample the PC. */
2386 retval = target_halt(target);
2387 } else {
2388 LOG_INFO("Target not halted or running");
2389 retval = ERROR_OK;
2390 break;
2391 }
2392
2393 if (retval != ERROR_OK)
2394 break;
2395
2396 gettimeofday(&now, NULL);
2397 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2398 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2399 break;
2400 }
2401 }
2402
2403 *num_samples = sample_count;
2404 return retval;
2405 }
2406
2407 /* Single aligned words are guaranteed to use 16 or 32 bit access
2408 * mode respectively, otherwise data is handled as quickly as
2409 * possible
2410 */
2411 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2412 {
2413 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2414 size, address);
2415
2416 if (!target_was_examined(target)) {
2417 LOG_ERROR("Target not examined yet");
2418 return ERROR_FAIL;
2419 }
2420
2421 if (size == 0)
2422 return ERROR_OK;
2423
2424 if ((address + size - 1) < address) {
2425 /* GDB can request this when e.g. PC is 0xfffffffc */
2426 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2427 address,
2428 size);
2429 return ERROR_FAIL;
2430 }
2431
2432 return target->type->write_buffer(target, address, size, buffer);
2433 }
2434
2435 static int target_write_buffer_default(struct target *target,
2436 target_addr_t address, uint32_t count, const uint8_t *buffer)
2437 {
2438 uint32_t size;
2439 unsigned int data_bytes = target_data_bits(target) / 8;
2440
2441 /* Align up to maximum bytes. The loop condition makes sure the next pass
2442 * will have something to do with the size we leave to it. */
2443 for (size = 1;
2444 size < data_bytes && count >= size * 2 + (address & size);
2445 size *= 2) {
2446 if (address & size) {
2447 int retval = target_write_memory(target, address, size, 1, buffer);
2448 if (retval != ERROR_OK)
2449 return retval;
2450 address += size;
2451 count -= size;
2452 buffer += size;
2453 }
2454 }
2455
2456 /* Write the data with as large access size as possible. */
2457 for (; size > 0; size /= 2) {
2458 uint32_t aligned = count - count % size;
2459 if (aligned > 0) {
2460 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2461 if (retval != ERROR_OK)
2462 return retval;
2463 address += aligned;
2464 count -= aligned;
2465 buffer += aligned;
2466 }
2467 }
2468
2469 return ERROR_OK;
2470 }
2471
2472 /* Single aligned words are guaranteed to use 16 or 32 bit access
2473 * mode respectively, otherwise data is handled as quickly as
2474 * possible
2475 */
2476 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2477 {
2478 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2479 size, address);
2480
2481 if (!target_was_examined(target)) {
2482 LOG_ERROR("Target not examined yet");
2483 return ERROR_FAIL;
2484 }
2485
2486 if (size == 0)
2487 return ERROR_OK;
2488
2489 if ((address + size - 1) < address) {
2490 /* GDB can request this when e.g. PC is 0xfffffffc */
2491 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2492 address,
2493 size);
2494 return ERROR_FAIL;
2495 }
2496
2497 return target->type->read_buffer(target, address, size, buffer);
2498 }
2499
2500 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2501 {
2502 uint32_t size;
2503 unsigned int data_bytes = target_data_bits(target) / 8;
2504
2505 /* Align up to maximum bytes. The loop condition makes sure the next pass
2506 * will have something to do with the size we leave to it. */
2507 for (size = 1;
2508 size < data_bytes && count >= size * 2 + (address & size);
2509 size *= 2) {
2510 if (address & size) {
2511 int retval = target_read_memory(target, address, size, 1, buffer);
2512 if (retval != ERROR_OK)
2513 return retval;
2514 address += size;
2515 count -= size;
2516 buffer += size;
2517 }
2518 }
2519
2520 /* Read the data with as large access size as possible. */
2521 for (; size > 0; size /= 2) {
2522 uint32_t aligned = count - count % size;
2523 if (aligned > 0) {
2524 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2525 if (retval != ERROR_OK)
2526 return retval;
2527 address += aligned;
2528 count -= aligned;
2529 buffer += aligned;
2530 }
2531 }
2532
2533 return ERROR_OK;
2534 }
2535
2536 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2537 {
2538 uint8_t *buffer;
2539 int retval;
2540 uint32_t i;
2541 uint32_t checksum = 0;
2542 if (!target_was_examined(target)) {
2543 LOG_ERROR("Target not examined yet");
2544 return ERROR_FAIL;
2545 }
2546 if (!target->type->checksum_memory) {
2547 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2548 return ERROR_FAIL;
2549 }
2550
2551 retval = target->type->checksum_memory(target, address, size, &checksum);
2552 if (retval != ERROR_OK) {
2553 buffer = malloc(size);
2554 if (!buffer) {
2555 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2556 return ERROR_COMMAND_SYNTAX_ERROR;
2557 }
2558 retval = target_read_buffer(target, address, size, buffer);
2559 if (retval != ERROR_OK) {
2560 free(buffer);
2561 return retval;
2562 }
2563
2564 /* convert to target endianness */
2565 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2566 uint32_t target_data;
2567 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2568 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2569 }
2570
2571 retval = image_calculate_checksum(buffer, size, &checksum);
2572 free(buffer);
2573 }
2574
2575 *crc = checksum;
2576
2577 return retval;
2578 }
2579
2580 int target_blank_check_memory(struct target *target,
2581 struct target_memory_check_block *blocks, int num_blocks,
2582 uint8_t erased_value)
2583 {
2584 if (!target_was_examined(target)) {
2585 LOG_ERROR("Target not examined yet");
2586 return ERROR_FAIL;
2587 }
2588
2589 if (!target->type->blank_check_memory)
2590 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2591
2592 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2593 }
2594
2595 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2596 {
2597 uint8_t value_buf[8];
2598 if (!target_was_examined(target)) {
2599 LOG_ERROR("Target not examined yet");
2600 return ERROR_FAIL;
2601 }
2602
2603 int retval = target_read_memory(target, address, 8, 1, value_buf);
2604
2605 if (retval == ERROR_OK) {
2606 *value = target_buffer_get_u64(target, value_buf);
2607 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2608 address,
2609 *value);
2610 } else {
2611 *value = 0x0;
2612 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2613 address);
2614 }
2615
2616 return retval;
2617 }
2618
2619 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2620 {
2621 uint8_t value_buf[4];
2622 if (!target_was_examined(target)) {
2623 LOG_ERROR("Target not examined yet");
2624 return ERROR_FAIL;
2625 }
2626
2627 int retval = target_read_memory(target, address, 4, 1, value_buf);
2628
2629 if (retval == ERROR_OK) {
2630 *value = target_buffer_get_u32(target, value_buf);
2631 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2632 address,
2633 *value);
2634 } else {
2635 *value = 0x0;
2636 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2637 address);
2638 }
2639
2640 return retval;
2641 }
2642
2643 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2644 {
2645 uint8_t value_buf[2];
2646 if (!target_was_examined(target)) {
2647 LOG_ERROR("Target not examined yet");
2648 return ERROR_FAIL;
2649 }
2650
2651 int retval = target_read_memory(target, address, 2, 1, value_buf);
2652
2653 if (retval == ERROR_OK) {
2654 *value = target_buffer_get_u16(target, value_buf);
2655 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2656 address,
2657 *value);
2658 } else {
2659 *value = 0x0;
2660 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2661 address);
2662 }
2663
2664 return retval;
2665 }
2666
2667 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2668 {
2669 if (!target_was_examined(target)) {
2670 LOG_ERROR("Target not examined yet");
2671 return ERROR_FAIL;
2672 }
2673
2674 int retval = target_read_memory(target, address, 1, 1, value);
2675
2676 if (retval == ERROR_OK) {
2677 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2678 address,
2679 *value);
2680 } else {
2681 *value = 0x0;
2682 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2683 address);
2684 }
2685
2686 return retval;
2687 }
2688
2689 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2690 {
2691 int retval;
2692 uint8_t value_buf[8];
2693 if (!target_was_examined(target)) {
2694 LOG_ERROR("Target not examined yet");
2695 return ERROR_FAIL;
2696 }
2697
2698 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2699 address,
2700 value);
2701
2702 target_buffer_set_u64(target, value_buf, value);
2703 retval = target_write_memory(target, address, 8, 1, value_buf);
2704 if (retval != ERROR_OK)
2705 LOG_DEBUG("failed: %i", retval);
2706
2707 return retval;
2708 }
2709
2710 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2711 {
2712 int retval;
2713 uint8_t value_buf[4];
2714 if (!target_was_examined(target)) {
2715 LOG_ERROR("Target not examined yet");
2716 return ERROR_FAIL;
2717 }
2718
2719 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2720 address,
2721 value);
2722
2723 target_buffer_set_u32(target, value_buf, value);
2724 retval = target_write_memory(target, address, 4, 1, value_buf);
2725 if (retval != ERROR_OK)
2726 LOG_DEBUG("failed: %i", retval);
2727
2728 return retval;
2729 }
2730
2731 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2732 {
2733 int retval;
2734 uint8_t value_buf[2];
2735 if (!target_was_examined(target)) {
2736 LOG_ERROR("Target not examined yet");
2737 return ERROR_FAIL;
2738 }
2739
2740 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2741 address,
2742 value);
2743
2744 target_buffer_set_u16(target, value_buf, value);
2745 retval = target_write_memory(target, address, 2, 1, value_buf);
2746 if (retval != ERROR_OK)
2747 LOG_DEBUG("failed: %i", retval);
2748
2749 return retval;
2750 }
2751
2752 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2753 {
2754 int retval;
2755 if (!target_was_examined(target)) {
2756 LOG_ERROR("Target not examined yet");
2757 return ERROR_FAIL;
2758 }
2759
2760 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2761 address, value);
2762
2763 retval = target_write_memory(target, address, 1, 1, &value);
2764 if (retval != ERROR_OK)
2765 LOG_DEBUG("failed: %i", retval);
2766
2767 return retval;
2768 }
2769
2770 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2771 {
2772 int retval;
2773 uint8_t value_buf[8];
2774 if (!target_was_examined(target)) {
2775 LOG_ERROR("Target not examined yet");
2776 return ERROR_FAIL;
2777 }
2778
2779 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2780 address,
2781 value);
2782
2783 target_buffer_set_u64(target, value_buf, value);
2784 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2785 if (retval != ERROR_OK)
2786 LOG_DEBUG("failed: %i", retval);
2787
2788 return retval;
2789 }
2790
2791 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2792 {
2793 int retval;
2794 uint8_t value_buf[4];
2795 if (!target_was_examined(target)) {
2796 LOG_ERROR("Target not examined yet");
2797 return ERROR_FAIL;
2798 }
2799
2800 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2801 address,
2802 value);
2803
2804 target_buffer_set_u32(target, value_buf, value);
2805 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2806 if (retval != ERROR_OK)
2807 LOG_DEBUG("failed: %i", retval);
2808
2809 return retval;
2810 }
2811
2812 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2813 {
2814 int retval;
2815 uint8_t value_buf[2];
2816 if (!target_was_examined(target)) {
2817 LOG_ERROR("Target not examined yet");
2818 return ERROR_FAIL;
2819 }
2820
2821 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2822 address,
2823 value);
2824
2825 target_buffer_set_u16(target, value_buf, value);
2826 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2827 if (retval != ERROR_OK)
2828 LOG_DEBUG("failed: %i", retval);
2829
2830 return retval;
2831 }
2832
2833 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2834 {
2835 int retval;
2836 if (!target_was_examined(target)) {
2837 LOG_ERROR("Target not examined yet");
2838 return ERROR_FAIL;
2839 }
2840
2841 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2842 address, value);
2843
2844 retval = target_write_phys_memory(target, address, 1, 1, &value);
2845 if (retval != ERROR_OK)
2846 LOG_DEBUG("failed: %i", retval);
2847
2848 return retval;
2849 }
2850
2851 static int find_target(struct command_invocation *cmd, const char *name)
2852 {
2853 struct target *target = get_target(name);
2854 if (!target) {
2855 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2856 return ERROR_FAIL;
2857 }
2858 if (!target->tap->enabled) {
2859 command_print(cmd, "Target: TAP %s is disabled, "
2860 "can't be the current target\n",
2861 target->tap->dotted_name);
2862 return ERROR_FAIL;
2863 }
2864
2865 cmd->ctx->current_target = target;
2866 if (cmd->ctx->current_target_override)
2867 cmd->ctx->current_target_override = target;
2868
2869 return ERROR_OK;
2870 }
2871
2872
2873 COMMAND_HANDLER(handle_targets_command)
2874 {
2875 int retval = ERROR_OK;
2876 if (CMD_ARGC == 1) {
2877 retval = find_target(CMD, CMD_ARGV[0]);
2878 if (retval == ERROR_OK) {
2879 /* we're done! */
2880 return retval;
2881 }
2882 }
2883
2884 struct target *target = all_targets;
2885 command_print(CMD, " TargetName Type Endian TapName State ");
2886 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2887 while (target) {
2888 const char *state;
2889 char marker = ' ';
2890
2891 if (target->tap->enabled)
2892 state = target_state_name(target);
2893 else
2894 state = "tap-disabled";
2895
2896 if (CMD_CTX->current_target == target)
2897 marker = '*';
2898
2899 /* keep columns lined up to match the headers above */
2900 command_print(CMD,
2901 "%2d%c %-18s %-10s %-6s %-18s %s",
2902 target->target_number,
2903 marker,
2904 target_name(target),
2905 target_type_name(target),
2906 jim_nvp_value2name_simple(nvp_target_endian,
2907 target->endianness)->name,
2908 target->tap->dotted_name,
2909 state);
2910 target = target->next;
2911 }
2912
2913 return retval;
2914 }
2915
2916 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2917
2918 static int power_dropout;
2919 static int srst_asserted;
2920
2921 static int run_power_restore;
2922 static int run_power_dropout;
2923 static int run_srst_asserted;
2924 static int run_srst_deasserted;
2925
2926 static int sense_handler(void)
2927 {
2928 static int prev_srst_asserted;
2929 static int prev_power_dropout;
2930
2931 int retval = jtag_power_dropout(&power_dropout);
2932 if (retval != ERROR_OK)
2933 return retval;
2934
2935 int power_restored;
2936 power_restored = prev_power_dropout && !power_dropout;
2937 if (power_restored)
2938 run_power_restore = 1;
2939
2940 int64_t current = timeval_ms();
2941 static int64_t last_power;
2942 bool wait_more = last_power + 2000 > current;
2943 if (power_dropout && !wait_more) {
2944 run_power_dropout = 1;
2945 last_power = current;
2946 }
2947
2948 retval = jtag_srst_asserted(&srst_asserted);
2949 if (retval != ERROR_OK)
2950 return retval;
2951
2952 int srst_deasserted;
2953 srst_deasserted = prev_srst_asserted && !srst_asserted;
2954
2955 static int64_t last_srst;
2956 wait_more = last_srst + 2000 > current;
2957 if (srst_deasserted && !wait_more) {
2958 run_srst_deasserted = 1;
2959 last_srst = current;
2960 }
2961
2962 if (!prev_srst_asserted && srst_asserted)
2963 run_srst_asserted = 1;
2964
2965 prev_srst_asserted = srst_asserted;
2966 prev_power_dropout = power_dropout;
2967
2968 if (srst_deasserted || power_restored) {
2969 /* Other than logging the event we can't do anything here.
2970 * Issuing a reset is a particularly bad idea as we might
2971 * be inside a reset already.
2972 */
2973 }
2974
2975 return ERROR_OK;
2976 }
2977
2978 /* process target state changes */
2979 static int handle_target(void *priv)
2980 {
2981 Jim_Interp *interp = (Jim_Interp *)priv;
2982 int retval = ERROR_OK;
2983
2984 if (!is_jtag_poll_safe()) {
2985 /* polling is disabled currently */
2986 return ERROR_OK;
2987 }
2988
2989 /* we do not want to recurse here... */
2990 static int recursive;
2991 if (!recursive) {
2992 recursive = 1;
2993 sense_handler();
2994 /* danger! running these procedures can trigger srst assertions and power dropouts.
2995 * We need to avoid an infinite loop/recursion here and we do that by
2996 * clearing the flags after running these events.
2997 */
2998 int did_something = 0;
2999 if (run_srst_asserted) {
3000 LOG_INFO("srst asserted detected, running srst_asserted proc.");
3001 Jim_Eval(interp, "srst_asserted");
3002 did_something = 1;
3003 }
3004 if (run_srst_deasserted) {
3005 Jim_Eval(interp, "srst_deasserted");
3006 did_something = 1;
3007 }
3008 if (run_power_dropout) {
3009 LOG_INFO("Power dropout detected, running power_dropout proc.");
3010 Jim_Eval(interp, "power_dropout");
3011 did_something = 1;
3012 }
3013 if (run_power_restore) {
3014 Jim_Eval(interp, "power_restore");
3015 did_something = 1;
3016 }
3017
3018 if (did_something) {
3019 /* clear detect flags */
3020 sense_handler();
3021 }
3022
3023 /* clear action flags */
3024
3025 run_srst_asserted = 0;
3026 run_srst_deasserted = 0;
3027 run_power_restore = 0;
3028 run_power_dropout = 0;
3029
3030 recursive = 0;
3031 }
3032
3033 /* Poll targets for state changes unless that's globally disabled.
3034 * Skip targets that are currently disabled.
3035 */
3036 for (struct target *target = all_targets;
3037 is_jtag_poll_safe() && target;
3038 target = target->next) {
3039
3040 if (!target_was_examined(target))
3041 continue;
3042
3043 if (!target->tap->enabled)
3044 continue;
3045
3046 if (target->backoff.times > target->backoff.count) {
3047 /* do not poll this time as we failed previously */
3048 target->backoff.count++;
3049 continue;
3050 }
3051 target->backoff.count = 0;
3052
3053 /* only poll target if we've got power and srst isn't asserted */
3054 if (!power_dropout && !srst_asserted) {
3055 /* polling may fail silently until the target has been examined */
3056 retval = target_poll(target);
3057 if (retval != ERROR_OK) {
3058 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3059 if (target->backoff.times * polling_interval < 5000) {
3060 target->backoff.times *= 2;
3061 target->backoff.times++;
3062 }
3063
3064 /* Tell GDB to halt the debugger. This allows the user to
3065 * run monitor commands to handle the situation.
3066 */
3067 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3068 }
3069 if (target->backoff.times > 0) {
3070 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3071 target_reset_examined(target);
3072 retval = target_examine_one(target);
3073 /* Target examination could have failed due to unstable connection,
3074 * but we set the examined flag anyway to repoll it later */
3075 if (retval != ERROR_OK) {
3076 target_set_examined(target);
3077 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3078 target->backoff.times * polling_interval);
3079 return retval;
3080 }
3081 }
3082
3083 /* Since we succeeded, we reset backoff count */
3084 target->backoff.times = 0;
3085 }
3086 }
3087
3088 return retval;
3089 }
3090
3091 COMMAND_HANDLER(handle_reg_command)
3092 {
3093 LOG_DEBUG("-");
3094
3095 struct target *target = get_current_target(CMD_CTX);
3096 struct reg *reg = NULL;
3097
3098 /* list all available registers for the current target */
3099 if (CMD_ARGC == 0) {
3100 struct reg_cache *cache = target->reg_cache;
3101
3102 unsigned int count = 0;
3103 while (cache) {
3104 unsigned i;
3105
3106 command_print(CMD, "===== %s", cache->name);
3107
3108 for (i = 0, reg = cache->reg_list;
3109 i < cache->num_regs;
3110 i++, reg++, count++) {
3111 if (reg->exist == false || reg->hidden)
3112 continue;
3113 /* only print cached values if they are valid */
3114 if (reg->valid) {
3115 char *value = buf_to_hex_str(reg->value,
3116 reg->size);
3117 command_print(CMD,
3118 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3119 count, reg->name,
3120 reg->size, value,
3121 reg->dirty
3122 ? " (dirty)"
3123 : "");
3124 free(value);
3125 } else {
3126 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3127 count, reg->name,
3128 reg->size);
3129 }
3130 }
3131 cache = cache->next;
3132 }
3133
3134 return ERROR_OK;
3135 }
3136
3137 /* access a single register by its ordinal number */
3138 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3139 unsigned num;
3140 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3141
3142 struct reg_cache *cache = target->reg_cache;
3143 unsigned int count = 0;
3144 while (cache) {
3145 unsigned i;
3146 for (i = 0; i < cache->num_regs; i++) {
3147 if (count++ == num) {
3148 reg = &cache->reg_list[i];
3149 break;
3150 }
3151 }
3152 if (reg)
3153 break;
3154 cache = cache->next;
3155 }
3156
3157 if (!reg) {
3158 command_print(CMD, "%i is out of bounds, the current target "
3159 "has only %i registers (0 - %i)", num, count, count - 1);
3160 return ERROR_OK;
3161 }
3162 } else {
3163 /* access a single register by its name */
3164 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3165
3166 if (!reg)
3167 goto not_found;
3168 }
3169
3170 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3171
3172 if (!reg->exist)
3173 goto not_found;
3174
3175 /* display a register */
3176 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3177 && (CMD_ARGV[1][0] <= '9')))) {
3178 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3179 reg->valid = 0;
3180
3181 if (reg->valid == 0) {
3182 int retval = reg->type->get(reg);
3183 if (retval != ERROR_OK) {
3184 LOG_ERROR("Could not read register '%s'", reg->name);
3185 return retval;
3186 }
3187 }
3188 char *value = buf_to_hex_str(reg->value, reg->size);
3189 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3190 free(value);
3191 return ERROR_OK;
3192 }
3193
3194 /* set register value */
3195 if (CMD_ARGC == 2) {
3196 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3197 if (!buf)
3198 return ERROR_FAIL;
3199 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3200
3201 int retval = reg->type->set(reg, buf);
3202 if (retval != ERROR_OK) {
3203 LOG_ERROR("Could not write to register '%s'", reg->name);
3204 } else {
3205 char *value = buf_to_hex_str(reg->value, reg->size);
3206 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3207 free(value);
3208 }
3209
3210 free(buf);
3211
3212 return retval;
3213 }
3214
3215 return ERROR_COMMAND_SYNTAX_ERROR;
3216
3217 not_found:
3218 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3219 return ERROR_OK;
3220 }
3221
3222 COMMAND_HANDLER(handle_poll_command)
3223 {
3224 int retval = ERROR_OK;
3225 struct target *target = get_current_target(CMD_CTX);
3226
3227 if (CMD_ARGC == 0) {
3228 command_print(CMD, "background polling: %s",
3229 jtag_poll_get_enabled() ? "on" : "off");
3230 command_print(CMD, "TAP: %s (%s)",
3231 target->tap->dotted_name,
3232 target->tap->enabled ? "enabled" : "disabled");
3233 if (!target->tap->enabled)
3234 return ERROR_OK;
3235 retval = target_poll(target);
3236 if (retval != ERROR_OK)
3237 return retval;
3238 retval = target_arch_state(target);
3239 if (retval != ERROR_OK)
3240 return retval;
3241 } else if (CMD_ARGC == 1) {
3242 bool enable;
3243 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3244 jtag_poll_set_enabled(enable);
3245 } else
3246 return ERROR_COMMAND_SYNTAX_ERROR;
3247
3248 return retval;
3249 }
3250
3251 COMMAND_HANDLER(handle_wait_halt_command)
3252 {
3253 if (CMD_ARGC > 1)
3254 return ERROR_COMMAND_SYNTAX_ERROR;
3255
3256 unsigned ms = DEFAULT_HALT_TIMEOUT;
3257 if (1 == CMD_ARGC) {
3258 int retval = parse_uint(CMD_ARGV[0], &ms);
3259 if (retval != ERROR_OK)
3260 return ERROR_COMMAND_SYNTAX_ERROR;
3261 }
3262
3263 struct target *target = get_current_target(CMD_CTX);
3264 return target_wait_state(target, TARGET_HALTED, ms);
3265 }
3266
3267 /* wait for target state to change. The trick here is to have a low
3268 * latency for short waits and not to suck up all the CPU time
3269 * on longer waits.
3270 *
3271 * After 500ms, keep_alive() is invoked
3272 */
3273 int target_wait_state(struct target *target, enum target_state state, int ms)
3274 {
3275 int retval;
3276 int64_t then = 0, cur;
3277 bool once = true;
3278
3279 for (;;) {
3280 retval = target_poll(target);
3281 if (retval != ERROR_OK)
3282 return retval;
3283 if (target->state == state)
3284 break;
3285 cur = timeval_ms();
3286 if (once) {
3287 once = false;
3288 then = timeval_ms();
3289 LOG_DEBUG("waiting for target %s...",
3290 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3291 }
3292
3293 if (cur-then > 500)
3294 keep_alive();
3295
3296 if ((cur-then) > ms) {
3297 LOG_ERROR("timed out while waiting for target %s",
3298 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3299 return ERROR_FAIL;
3300 }
3301 }
3302
3303 return ERROR_OK;
3304 }
3305
3306 COMMAND_HANDLER(handle_halt_command)
3307 {
3308 LOG_DEBUG("-");
3309
3310 struct target *target = get_current_target(CMD_CTX);
3311
3312 target->verbose_halt_msg = true;
3313
3314 int retval = target_halt(target);
3315 if (retval != ERROR_OK)
3316 return retval;
3317
3318 if (CMD_ARGC == 1) {
3319 unsigned wait_local;
3320 retval = parse_uint(CMD_ARGV[0], &wait_local);
3321 if (retval != ERROR_OK)
3322 return ERROR_COMMAND_SYNTAX_ERROR;
3323 if (!wait_local)
3324 return ERROR_OK;
3325 }
3326
3327 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3328 }
3329
3330 COMMAND_HANDLER(handle_soft_reset_halt_command)
3331 {
3332 struct target *target = get_current_target(CMD_CTX);
3333
3334 LOG_USER("requesting target halt and executing a soft reset");
3335
3336 target_soft_reset_halt(target);
3337
3338 return ERROR_OK;
3339 }
3340
3341 COMMAND_HANDLER(handle_reset_command)
3342 {
3343 if (CMD_ARGC > 1)
3344 return ERROR_COMMAND_SYNTAX_ERROR;
3345
3346 enum target_reset_mode reset_mode = RESET_RUN;
3347 if (CMD_ARGC == 1) {
3348 const struct jim_nvp *n;
3349 n = jim_nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3350 if ((!n->name) || (n->value == RESET_UNKNOWN))
3351 return ERROR_COMMAND_SYNTAX_ERROR;
3352 reset_mode = n->value;
3353 }
3354
3355 /* reset *all* targets */
3356 return target_process_reset(CMD, reset_mode);
3357 }
3358
3359
3360 COMMAND_HANDLER(handle_resume_command)
3361 {
3362 int current = 1;
3363 if (CMD_ARGC > 1)
3364 return ERROR_COMMAND_SYNTAX_ERROR;
3365
3366 struct target *target = get_current_target(CMD_CTX);
3367
3368 /* with no CMD_ARGV, resume from current pc, addr = 0,
3369 * with one arguments, addr = CMD_ARGV[0],
3370 * handle breakpoints, not debugging */
3371 target_addr_t addr = 0;
3372 if (CMD_ARGC == 1) {
3373 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3374 current = 0;
3375 }
3376
3377 return target_resume(target, current, addr, 1, 0);
3378 }
3379
3380 COMMAND_HANDLER(handle_step_command)
3381 {
3382 if (CMD_ARGC > 1)
3383 return ERROR_COMMAND_SYNTAX_ERROR;
3384
3385 LOG_DEBUG("-");
3386
3387 /* with no CMD_ARGV, step from current pc, addr = 0,
3388 * with one argument addr = CMD_ARGV[0],
3389 * handle breakpoints, debugging */
3390 target_addr_t addr = 0;
3391 int current_pc = 1;
3392 if (CMD_ARGC == 1) {
3393 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3394 current_pc = 0;
3395 }
3396
3397 struct target *target = get_current_target(CMD_CTX);
3398
3399 return target_step(target, current_pc, addr, 1);
3400 }
3401
3402 void target_handle_md_output(struct command_invocation *cmd,
3403 struct target *target, target_addr_t address, unsigned size,
3404 unsigned count, const uint8_t *buffer)
3405 {
3406 const unsigned line_bytecnt = 32;
3407 unsigned line_modulo = line_bytecnt / size;
3408
3409 char output[line_bytecnt * 4 + 1];
3410 unsigned output_len = 0;
3411
3412 const char *value_fmt;
3413 switch (size) {
3414 case 8:
3415 value_fmt = "%16.16"PRIx64" ";
3416 break;
3417 case 4:
3418 value_fmt = "%8.8"PRIx64" ";
3419 break;
3420 case 2:
3421 value_fmt = "%4.4"PRIx64" ";
3422 break;
3423 case 1:
3424 value_fmt = "%2.2"PRIx64" ";
3425 break;
3426 default:
3427 /* "can't happen", caller checked */
3428 LOG_ERROR("invalid memory read size: %u", size);
3429 return;
3430 }
3431
3432 for (unsigned i = 0; i < count; i++) {
3433 if (i % line_modulo == 0) {
3434 output_len += snprintf(output + output_len,
3435 sizeof(output) - output_len,
3436 TARGET_ADDR_FMT ": ",
3437 (address + (i * size)));
3438 }
3439
3440 uint64_t value = 0;
3441 const uint8_t *value_ptr = buffer + i * size;
3442 switch (size) {
3443 case 8:
3444 value = target_buffer_get_u64(target, value_ptr);
3445 break;
3446 case 4:
3447 value = target_buffer_get_u32(target, value_ptr);
3448 break;
3449 case 2:
3450 value = target_buffer_get_u16(target, value_ptr);
3451 break;
3452 case 1:
3453 value = *value_ptr;
3454 }
3455 output_len += snprintf(output + output_len,
3456 sizeof(output) - output_len,
3457 value_fmt, value);
3458
3459 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3460 command_print(cmd, "%s", output);
3461 output_len = 0;
3462 }
3463 }
3464 }
3465
3466 COMMAND_HANDLER(handle_md_command)
3467 {
3468 if (CMD_ARGC < 1)
3469 return ERROR_COMMAND_SYNTAX_ERROR;
3470
3471 unsigned size = 0;
3472 switch (CMD_NAME[2]) {
3473 case 'd':
3474 size = 8;
3475 break;
3476 case 'w':
3477 size = 4;
3478 break;
3479 case 'h':
3480 size = 2;
3481 break;
3482 case 'b':
3483 size = 1;
3484 break;
3485 default:
3486 return ERROR_COMMAND_SYNTAX_ERROR;
3487 }
3488
3489 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3490 int (*fn)(struct target *target,
3491 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3492 if (physical) {
3493 CMD_ARGC--;
3494 CMD_ARGV++;
3495 fn = target_read_phys_memory;
3496 } else
3497 fn = target_read_memory;
3498 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3499 return ERROR_COMMAND_SYNTAX_ERROR;
3500
3501 target_addr_t address;
3502 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3503
3504 unsigned count = 1;
3505 if (CMD_ARGC == 2)
3506 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3507
3508 uint8_t *buffer = calloc(count, size);
3509 if (!buffer) {
3510 LOG_ERROR("Failed to allocate md read buffer");
3511 return ERROR_FAIL;
3512 }
3513
3514 struct target *target = get_current_target(CMD_CTX);
3515 int retval = fn(target, address, size, count, buffer);
3516 if (retval == ERROR_OK)
3517 target_handle_md_output(CMD, target, address, size, count, buffer);
3518
3519 free(buffer);
3520
3521 return retval;
3522 }
3523
3524 typedef int (*target_write_fn)(struct target *target,
3525 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3526
3527 static int target_fill_mem(struct target *target,
3528 target_addr_t address,
3529 target_write_fn fn,
3530 unsigned data_size,
3531 /* value */
3532 uint64_t b,
3533 /* count */
3534 unsigned c)
3535 {
3536 /* We have to write in reasonably large chunks to be able
3537 * to fill large memory areas with any sane speed */
3538 const unsigned chunk_size = 16384;
3539 uint8_t *target_buf = malloc(chunk_size * data_size);
3540 if (!target_buf) {
3541 LOG_ERROR("Out of memory");
3542 return ERROR_FAIL;
3543 }
3544
3545 for (unsigned i = 0; i < chunk_size; i++) {
3546 switch (data_size) {
3547 case 8:
3548 target_buffer_set_u64(target, target_buf + i * data_size, b);
3549 break;
3550 case 4:
3551 target_buffer_set_u32(target, target_buf + i * data_size, b);
3552 break;
3553 case 2:
3554 target_buffer_set_u16(target, target_buf + i * data_size, b);
3555 break;
3556 case 1:
3557 target_buffer_set_u8(target, target_buf + i * data_size, b);
3558 break;
3559 default:
3560 exit(-1);
3561 }
3562 }
3563
3564 int retval = ERROR_OK;
3565
3566 for (unsigned x = 0; x < c; x += chunk_size) {
3567 unsigned current;
3568 current = c - x;
3569 if (current > chunk_size)
3570 current = chunk_size;
3571 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3572 if (retval != ERROR_OK)
3573 break;
3574 /* avoid GDB timeouts */
3575 keep_alive();
3576 }
3577 free(target_buf);
3578
3579 return retval;
3580 }
3581
3582
3583 COMMAND_HANDLER(handle_mw_command)
3584 {
3585 if (CMD_ARGC < 2)
3586 return ERROR_COMMAND_SYNTAX_ERROR;
3587 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3588 target_write_fn fn;
3589 if (physical) {
3590 CMD_ARGC--;
3591 CMD_ARGV++;
3592 fn = target_write_phys_memory;
3593 } else
3594 fn = target_write_memory;
3595 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3596 return ERROR_COMMAND_SYNTAX_ERROR;
3597
3598 target_addr_t address;
3599 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3600
3601 uint64_t value;
3602 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3603
3604 unsigned count = 1;
3605 if (CMD_ARGC == 3)
3606 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3607
3608 struct target *target = get_current_target(CMD_CTX);
3609 unsigned wordsize;
3610 switch (CMD_NAME[2]) {
3611 case 'd':
3612 wordsize = 8;
3613 break;
3614 case 'w':
3615 wordsize = 4;
3616 break;
3617 case 'h':
3618 wordsize = 2;
3619 break;
3620 case 'b':
3621 wordsize = 1;
3622 break;
3623 default:
3624 return ERROR_COMMAND_SYNTAX_ERROR;
3625 }
3626
3627 return target_fill_mem(target, address, fn, wordsize, value, count);
3628 }
3629
3630 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3631 target_addr_t *min_address, target_addr_t *max_address)
3632 {
3633 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3634 return ERROR_COMMAND_SYNTAX_ERROR;
3635
3636 /* a base address isn't always necessary,
3637 * default to 0x0 (i.e. don't relocate) */
3638 if (CMD_ARGC >= 2) {
3639 target_addr_t addr;
3640 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3641 image->base_address = addr;
3642 image->base_address_set = true;
3643 } else
3644 image->base_address_set = false;
3645
3646 image->start_address_set = false;
3647
3648 if (CMD_ARGC >= 4)
3649 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3650 if (CMD_ARGC == 5) {
3651 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3652 /* use size (given) to find max (required) */
3653 *max_address += *min_address;
3654 }
3655
3656 if (*min_address > *max_address)
3657 return ERROR_COMMAND_SYNTAX_ERROR;
3658
3659 return ERROR_OK;
3660 }
3661
3662 COMMAND_HANDLER(handle_load_image_command)
3663 {
3664 uint8_t *buffer;
3665 size_t buf_cnt;
3666 uint32_t image_size;
3667 target_addr_t min_address = 0;
3668 target_addr_t max_address = -1;
3669 struct image image;
3670
3671 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3672 &image, &min_address, &max_address);
3673 if (retval != ERROR_OK)
3674 return retval;
3675
3676 struct target *target = get_current_target(CMD_CTX);
3677
3678 struct duration bench;
3679 duration_start(&bench);
3680
3681 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3682 return ERROR_FAIL;
3683
3684 image_size = 0x0;
3685 retval = ERROR_OK;
3686 for (unsigned int i = 0; i < image.num_sections; i++) {
3687 buffer = malloc(image.sections[i].size);
3688 if (!buffer) {
3689 command_print(CMD,
3690 "error allocating buffer for section (%d bytes)",
3691 (int)(image.sections[i].size));
3692 retval = ERROR_FAIL;
3693 break;
3694 }
3695
3696 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3697 if (retval != ERROR_OK) {
3698 free(buffer);
3699 break;
3700 }
3701
3702 uint32_t offset = 0;
3703 uint32_t length = buf_cnt;
3704
3705 /* DANGER!!! beware of unsigned comparison here!!! */
3706
3707 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3708 (image.sections[i].base_address < max_address)) {
3709
3710 if (image.sections[i].base_address < min_address) {
3711 /* clip addresses below */
3712 offset += min_address-image.sections[i].base_address;
3713 length -= offset;
3714 }
3715
3716 if (image.sections[i].base_address + buf_cnt > max_address)
3717 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3718
3719 retval = target_write_buffer(target,
3720 image.sections[i].base_address + offset, length, buffer + offset);
3721 if (retval != ERROR_OK) {
3722 free(buffer);
3723 break;
3724 }
3725 image_size += length;
3726 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3727 (unsigned int)length,
3728 image.sections[i].base_address + offset);
3729 }
3730
3731 free(buffer);
3732 }
3733
3734 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3735 command_print(CMD, "downloaded %" PRIu32 " bytes "
3736 "in %fs (%0.3f KiB/s)", image_size,
3737 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3738 }
3739
3740 image_close(&image);
3741
3742 return retval;
3743
3744 }
3745
3746 COMMAND_HANDLER(handle_dump_image_command)
3747 {
3748 struct fileio *fileio;
3749 uint8_t *buffer;
3750 int retval, retvaltemp;
3751 target_addr_t address, size;
3752 struct duration bench;
3753 struct target *target = get_current_target(CMD_CTX);
3754
3755 if (CMD_ARGC != 3)
3756 return ERROR_COMMAND_SYNTAX_ERROR;
3757
3758 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3759 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3760
3761 uint32_t buf_size = (size > 4096) ? 4096 : size;
3762 buffer = malloc(buf_size);
3763 if (!buffer)
3764 return ERROR_FAIL;
3765
3766 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3767 if (retval != ERROR_OK) {
3768 free(buffer);
3769 return retval;
3770 }
3771
3772 duration_start(&bench);
3773
3774 while (size > 0) {
3775 size_t size_written;
3776 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3777 retval = target_read_buffer(target, address, this_run_size, buffer);
3778 if (retval != ERROR_OK)
3779 break;
3780
3781 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3782 if (retval != ERROR_OK)
3783 break;
3784
3785 size -= this_run_size;
3786 address += this_run_size;
3787 }
3788
3789 free(buffer);
3790
3791 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3792 size_t filesize;
3793 retval = fileio_size(fileio, &filesize);
3794 if (retval != ERROR_OK)
3795 return retval;
3796 command_print(CMD,
3797 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3798 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3799 }
3800
3801 retvaltemp = fileio_close(fileio);
3802 if (retvaltemp != ERROR_OK)
3803 return retvaltemp;
3804
3805 return retval;
3806 }
3807
3808 enum verify_mode {
3809 IMAGE_TEST = 0,
3810 IMAGE_VERIFY = 1,
3811 IMAGE_CHECKSUM_ONLY = 2
3812 };
3813
3814 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3815 {
3816 uint8_t *buffer;
3817 size_t buf_cnt;
3818 uint32_t image_size;
3819 int retval;
3820 uint32_t checksum = 0;
3821 uint32_t mem_checksum = 0;
3822
3823 struct image image;
3824
3825 struct target *target = get_current_target(CMD_CTX);
3826
3827 if (CMD_ARGC < 1)
3828 return ERROR_COMMAND_SYNTAX_ERROR;
3829
3830 if (!target) {
3831 LOG_ERROR("no target selected");
3832 return ERROR_FAIL;
3833 }
3834
3835 struct duration bench;
3836 duration_start(&bench);
3837
3838 if (CMD_ARGC >= 2) {
3839 target_addr_t addr;
3840 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3841 image.base_address = addr;
3842 image.base_address_set = true;
3843 } else {
3844 image.base_address_set = false;
3845 image.base_address = 0x0;
3846 }
3847
3848 image.start_address_set = false;
3849
3850 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3851 if (retval != ERROR_OK)
3852 return retval;
3853
3854 image_size = 0x0;
3855 int diffs = 0;
3856 retval = ERROR_OK;
3857 for (unsigned int i = 0; i < image.num_sections; i++) {
3858 buffer = malloc(image.sections[i].size);
3859 if (!buffer) {
3860 command_print(CMD,
3861 "error allocating buffer for section (%" PRIu32 " bytes)",
3862 image.sections[i].size);
3863 break;
3864 }
3865 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3866 if (retval != ERROR_OK) {
3867 free(buffer);
3868 break;
3869 }
3870
3871 if (verify >= IMAGE_VERIFY) {
3872 /* calculate checksum of image */
3873 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3874 if (retval != ERROR_OK) {
3875 free(buffer);
3876 break;
3877 }
3878
3879 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3880 if (retval != ERROR_OK) {
3881 free(buffer);
3882 break;
3883 }
3884 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3885 LOG_ERROR("checksum mismatch");
3886 free(buffer);
3887 retval = ERROR_FAIL;
3888 goto done;
3889 }
3890 if (checksum != mem_checksum) {
3891 /* failed crc checksum, fall back to a binary compare */
3892 uint8_t *data;
3893
3894 if (diffs == 0)
3895 LOG_ERROR("checksum mismatch - attempting binary compare");
3896
3897 data = malloc(buf_cnt);
3898
3899 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3900 if (retval == ERROR_OK) {
3901 uint32_t t;
3902 for (t = 0; t < buf_cnt; t++) {
3903 if (data[t] != buffer[t]) {
3904 command_print(CMD,
3905 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3906 diffs,
3907 (unsigned)(t + image.sections[i].base_address),
3908 data[t],
3909 buffer[t]);
3910 if (diffs++ >= 127) {
3911 command_print(CMD, "More than 128 errors, the rest are not printed.");
3912 free(data);
3913 free(buffer);
3914 goto done;
3915 }
3916 }
3917 keep_alive();
3918 }
3919 }
3920 free(data);
3921 }
3922 } else {
3923 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3924 image.sections[i].base_address,
3925 buf_cnt);
3926 }
3927
3928 free(buffer);
3929 image_size += buf_cnt;
3930 }
3931 if (diffs > 0)
3932 command_print(CMD, "No more differences found.");
3933 done:
3934 if (diffs > 0)
3935 retval = ERROR_FAIL;
3936 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3937 command_print(CMD, "verified %" PRIu32 " bytes "
3938 "in %fs (%0.3f KiB/s)", image_size,
3939 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3940 }
3941
3942 image_close(&image);
3943
3944 return retval;
3945 }
3946
3947 COMMAND_HANDLER(handle_verify_image_checksum_command)
3948 {
3949 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3950 }
3951
3952 COMMAND_HANDLER(handle_verify_image_command)
3953 {
3954 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3955 }
3956
3957 COMMAND_HANDLER(handle_test_image_command)
3958 {
3959 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3960 }
3961
3962 static int handle_bp_command_list(struct command_invocation *cmd)
3963 {
3964 struct target *target = get_current_target(cmd->ctx);
3965 struct breakpoint *breakpoint = target->breakpoints;
3966 while (breakpoint) {
3967 if (breakpoint->type == BKPT_SOFT) {
3968 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3969 breakpoint->length);
3970 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, %i, 0x%s",
3971 breakpoint->address,
3972 breakpoint->length,
3973 breakpoint->set, buf);
3974 free(buf);
3975 } else {
3976 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3977 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3978 breakpoint->asid,
3979 breakpoint->length, breakpoint->set);
3980 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3981 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3982 breakpoint->address,
3983 breakpoint->length, breakpoint->set);
3984 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3985 breakpoint->asid);
3986 } else
3987 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3988 breakpoint->address,
3989 breakpoint->length, breakpoint->set);
3990 }
3991
3992 breakpoint = breakpoint->next;
3993 }
3994 return ERROR_OK;
3995 }
3996
3997 static int handle_bp_command_set(struct command_invocation *cmd,
3998 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3999 {
4000 struct target *target = get_current_target(cmd->ctx);
4001 int retval;
4002
4003 if (asid == 0) {
4004 retval = breakpoint_add(target, addr, length, hw);
4005 /* error is always logged in breakpoint_add(), do not print it again */
4006 if (retval == ERROR_OK)
4007 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
4008
4009 } else if (addr == 0) {
4010 if (!target->type->add_context_breakpoint) {
4011 LOG_ERROR("Context breakpoint not available");
4012 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4013 }
4014 retval = context_breakpoint_add(target, asid, length, hw);
4015 /* error is always logged in context_breakpoint_add(), do not print it again */
4016 if (retval == ERROR_OK)
4017 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
4018
4019 } else {
4020 if (!target->type->add_hybrid_breakpoint) {
4021 LOG_ERROR("Hybrid breakpoint not available");
4022 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4023 }
4024 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
4025 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
4026 if (retval == ERROR_OK)
4027 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
4028 }
4029 return retval;
4030 }
4031
4032 COMMAND_HANDLER(handle_bp_command)
4033 {
4034 target_addr_t addr;
4035 uint32_t asid;
4036 uint32_t length;
4037 int hw = BKPT_SOFT;
4038
4039 switch (CMD_ARGC) {
4040 case 0:
4041 return handle_bp_command_list(CMD);
4042
4043 case 2:
4044 asid = 0;
4045 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4046 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4047 return handle_bp_command_set(CMD, addr, asid, length, hw);
4048
4049 case 3:
4050 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4051 hw = BKPT_HARD;
4052 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4053 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4054 asid = 0;
4055 return handle_bp_command_set(CMD, addr, asid, length, hw);
4056 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4057 hw = BKPT_HARD;
4058 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4059 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4060 addr = 0;
4061 return handle_bp_command_set(CMD, addr, asid, length, hw);
4062 }
4063 /* fallthrough */
4064 case 4:
4065 hw = BKPT_HARD;
4066 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4067 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4068 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4069 return handle_bp_command_set(CMD, addr, asid, length, hw);
4070
4071 default:
4072 return ERROR_COMMAND_SYNTAX_ERROR;
4073 }
4074 }
4075
4076 COMMAND_HANDLER(handle_rbp_command)
4077 {
4078 if (CMD_ARGC != 1)
4079 return ERROR_COMMAND_SYNTAX_ERROR;
4080
4081 struct target *target = get_current_target(CMD_CTX);
4082
4083 if (!strcmp(CMD_ARGV[0], "all")) {
4084 breakpoint_remove_all(target);
4085 } else {
4086 target_addr_t addr;
4087 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4088
4089 breakpoint_remove(target, addr);
4090 }
4091
4092 return ERROR_OK;
4093 }
4094
4095 COMMAND_HANDLER(handle_wp_command)
4096 {
4097 struct target *target = get_current_target(CMD_CTX);
4098
4099 if (CMD_ARGC == 0) {
4100 struct watchpoint *watchpoint = target->watchpoints;
4101
4102 while (watchpoint) {
4103 command_print(CMD, "address: " TARGET_ADDR_FMT
4104 ", len: 0x%8.8" PRIx32
4105 ", r/w/a: %i, value: 0x%8.8" PRIx32
4106 ", mask: 0x%8.8" PRIx32,
4107 watchpoint->address,
4108 watchpoint->length,
4109 (int)watchpoint->rw,
4110 watchpoint->value,
4111 watchpoint->mask);
4112 watchpoint = watchpoint->next;
4113 }
4114 return ERROR_OK;
4115 }
4116
4117 enum watchpoint_rw type = WPT_ACCESS;
4118 target_addr_t addr = 0;
4119 uint32_t length = 0;
4120 uint32_t data_value = 0x0;
4121 uint32_t data_mask = 0xffffffff;
4122
4123 switch (CMD_ARGC) {
4124 case 5:
4125 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
4126 /* fall through */
4127 case 4:
4128 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
4129 /* fall through */
4130 case 3:
4131 switch (CMD_ARGV[2][0]) {
4132 case 'r':
4133 type = WPT_READ;
4134 break;
4135 case 'w':
4136 type = WPT_WRITE;
4137 break;
4138 case 'a':
4139 type = WPT_ACCESS;
4140 break;
4141 default:
4142 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4143 return ERROR_COMMAND_SYNTAX_ERROR;
4144 }
4145 /* fall through */
4146 case 2:
4147 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4148 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4149 break;
4150
4151 default:
4152 return ERROR_COMMAND_SYNTAX_ERROR;
4153 }
4154
4155 int retval = watchpoint_add(target, addr, length, type,
4156 data_value, data_mask);
4157 if (retval != ERROR_OK)
4158 LOG_ERROR("Failure setting watchpoints");
4159
4160 return retval;
4161 }
4162
4163 COMMAND_HANDLER(handle_rwp_command)
4164 {
4165 if (CMD_ARGC != 1)
4166 return ERROR_COMMAND_SYNTAX_ERROR;
4167
4168 target_addr_t addr;
4169 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4170
4171 struct target *target = get_current_target(CMD_CTX);
4172 watchpoint_remove(target, addr);
4173
4174 return ERROR_OK;
4175 }
4176
4177 /**
4178 * Translate a virtual address to a physical address.
4179 *
4180 * The low-level target implementation must have logged a detailed error
4181 * which is forwarded to telnet/GDB session.
4182 */
4183 COMMAND_HANDLER(handle_virt2phys_command)
4184 {
4185 if (CMD_ARGC != 1)
4186 return ERROR_COMMAND_SYNTAX_ERROR;
4187
4188 target_addr_t va;
4189 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4190 target_addr_t pa;
4191
4192 struct target *target = get_current_target(CMD_CTX);
4193 int retval = target->type->virt2phys(target, va, &pa);
4194 if (retval == ERROR_OK)
4195 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4196
4197 return retval;
4198 }
4199
4200 static void write_data(FILE *f, const void *data, size_t len)
4201 {
4202 size_t written = fwrite(data, 1, len, f);
4203 if (written != len)
4204 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4205 }
4206
4207 static void write_long(FILE *f, int l, struct target *target)
4208 {
4209 uint8_t val[4];
4210
4211 target_buffer_set_u32(target, val, l);
4212 write_data(f, val, 4);
4213 }
4214
4215 static void write_string(FILE *f, char *s)
4216 {
4217 write_data(f, s, strlen(s));
4218 }
4219
4220 typedef unsigned char UNIT[2]; /* unit of profiling */
4221
4222 /* Dump a gmon.out histogram file. */
4223 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4224 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4225 {
4226 uint32_t i;
4227 FILE *f = fopen(filename, "w");
4228 if (!f)
4229 return;
4230 write_string(f, "gmon");
4231 write_long(f, 0x00000001, target); /* Version */
4232 write_long(f, 0, target); /* padding */
4233 write_long(f, 0, target); /* padding */
4234 write_long(f, 0, target); /* padding */
4235
4236 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4237 write_data(f, &zero, 1);
4238
4239 /* figure out bucket size */
4240 uint32_t min;
4241 uint32_t max;
4242 if (with_range) {
4243 min = start_address;
4244 max = end_address;
4245 } else {
4246 min = samples[0];
4247 max = samples[0];
4248 for (i = 0; i < sample_num; i++) {
4249 if (min > samples[i])
4250 min = samples[i];
4251 if (max < samples[i])
4252 max = samples[i];
4253 }
4254
4255 /* max should be (largest sample + 1)
4256 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4257 max++;
4258 }
4259
4260 int address_space = max - min;
4261 assert(address_space >= 2);
4262
4263 /* FIXME: What is the reasonable number of buckets?
4264 * The profiling result will be more accurate if there are enough buckets. */
4265 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4266 uint32_t num_buckets = address_space / sizeof(UNIT);
4267 if (num_buckets > max_buckets)
4268 num_buckets = max_buckets;
4269 int *buckets = malloc(sizeof(int) * num_buckets);
4270 if (!buckets) {
4271 fclose(f);
4272 return;
4273 }
4274 memset(buckets, 0, sizeof(int) * num_buckets);
4275 for (i = 0; i < sample_num; i++) {
4276 uint32_t address = samples[i];
4277
4278 if ((address < min) || (max <= address))
4279 continue;
4280
4281 long long a = address - min;
4282 long long b = num_buckets;
4283 long long c = address_space;
4284 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4285 buckets[index_t]++;
4286 }
4287
4288 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4289 write_long(f, min, target); /* low_pc */
4290 write_long(f, max, target); /* high_pc */
4291 write_long(f, num_buckets, target); /* # of buckets */
4292 float sample_rate = sample_num / (duration_ms / 1000.0);
4293 write_long(f, sample_rate, target);
4294 write_string(f, "seconds");
4295 for (i = 0; i < (15-strlen("seconds")); i++)
4296 write_data(f, &zero, 1);
4297 write_string(f, "s");
4298
4299 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4300
4301 char *data = malloc(2 * num_buckets);
4302 if (data) {
4303 for (i = 0; i < num_buckets; i++) {
4304 int val;
4305 val = buckets[i];
4306 if (val > 65535)
4307 val = 65535;
4308 data[i * 2] = val&0xff;
4309 data[i * 2 + 1] = (val >> 8) & 0xff;
4310 }
4311 free(buckets);
4312 write_data(f, data, num_buckets * 2);
4313 free(data);
4314 } else
4315 free(buckets);
4316
4317 fclose(f);
4318 }
4319
4320 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4321 * which will be used as a random sampling of PC */
4322 COMMAND_HANDLER(handle_profile_command)
4323 {
4324 struct target *target = get_current_target(CMD_CTX);
4325
4326 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4327 return ERROR_COMMAND_SYNTAX_ERROR;
4328
4329 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4330 uint32_t offset;
4331 uint32_t num_of_samples;
4332 int retval = ERROR_OK;
4333 bool halted_before_profiling = target->state == TARGET_HALTED;
4334
4335 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4336
4337 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4338 if (!samples) {
4339 LOG_ERROR("No memory to store samples.");
4340 return ERROR_FAIL;
4341 }
4342
4343 uint64_t timestart_ms = timeval_ms();
4344 /**
4345 * Some cores let us sample the PC without the
4346 * annoying halt/resume step; for example, ARMv7 PCSR.
4347 * Provide a way to use that more efficient mechanism.
4348 */
4349 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4350 &num_of_samples, offset);
4351 if (retval != ERROR_OK) {
4352 free(samples);
4353 return retval;
4354 }
4355 uint32_t duration_ms = timeval_ms() - timestart_ms;
4356
4357 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4358
4359 retval = target_poll(target);
4360 if (retval != ERROR_OK) {
4361 free(samples);
4362 return retval;
4363 }
4364
4365 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4366 /* The target was halted before we started and is running now. Halt it,
4367 * for consistency. */
4368 retval = target_halt(target);
4369 if (retval != ERROR_OK) {
4370 free(samples);
4371 return retval;
4372 }
4373 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4374 /* The target was running before we started and is halted now. Resume
4375 * it, for consistency. */
4376 retval = target_resume(target, 1, 0, 0, 0);
4377 if (retval != ERROR_OK) {
4378 free(samples);
4379 return retval;
4380 }
4381 }
4382
4383 retval = target_poll(target);
4384 if (retval != ERROR_OK) {
4385 free(samples);
4386 return retval;
4387 }
4388
4389 uint32_t start_address = 0;
4390 uint32_t end_address = 0;
4391 bool with_range = false;
4392 if (CMD_ARGC == 4) {
4393 with_range = true;
4394 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4395 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4396 }
4397
4398 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4399 with_range, start_address, end_address, target, duration_ms);
4400 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4401
4402 free(samples);
4403 return retval;
4404 }
4405
4406 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4407 {
4408 char *namebuf;
4409 Jim_Obj *obj_name, *obj_val;
4410 int result;
4411
4412 namebuf = alloc_printf("%s(%d)", varname, idx);
4413 if (!namebuf)
4414 return JIM_ERR;
4415
4416 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4417 jim_wide wide_val = val;
4418 obj_val = Jim_NewWideObj(interp, wide_val);
4419 if (!obj_name || !obj_val) {
4420 free(namebuf);
4421 return JIM_ERR;
4422 }
4423
4424 Jim_IncrRefCount(obj_name);
4425 Jim_IncrRefCount(obj_val);
4426 result = Jim_SetVariable(interp, obj_name, obj_val);
4427 Jim_DecrRefCount(interp, obj_name);
4428 Jim_DecrRefCount(interp, obj_val);
4429 free(namebuf);
4430 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4431 return result;
4432 }
4433
4434 static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4435 {
4436 struct command_context *context;
4437 struct target *target;
4438
4439 context = current_command_context(interp);
4440 assert(context);
4441
4442 target = get_current_target(context);
4443 if (!target) {
4444 LOG_ERROR("mem2array: no current target");
4445 return JIM_ERR;
4446 }
4447
4448 return target_mem2array(interp, target, argc - 1, argv + 1);
4449 }
4450
4451 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4452 {
4453 int e;
4454
4455 /* argv[0] = name of array to receive the data
4456 * argv[1] = desired element width in bits
4457 * argv[2] = memory address
4458 * argv[3] = count of times to read
4459 * argv[4] = optional "phys"
4460 */
4461 if (argc < 4 || argc > 5) {
4462 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4463 return JIM_ERR;
4464 }
4465
4466 /* Arg 0: Name of the array variable */
4467 const char *varname = Jim_GetString(argv[0], NULL);
4468
4469 /* Arg 1: Bit width of one element */
4470 long l;
4471 e = Jim_GetLong(interp, argv[1], &l);
4472 if (e != JIM_OK)
4473 return e;
4474 const unsigned int width_bits = l;
4475
4476 if (width_bits != 8 &&
4477 width_bits != 16 &&
4478 width_bits != 32 &&
4479 width_bits != 64) {
4480 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4481 Jim_AppendStrings(interp, Jim_GetResult(interp),
4482 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4483 return JIM_ERR;
4484 }
4485 const unsigned int width = width_bits / 8;
4486
4487 /* Arg 2: Memory address */
4488 jim_wide wide_addr;
4489 e = Jim_GetWide(interp, argv[2], &wide_addr);
4490 if (e != JIM_OK)
4491 return e;
4492 target_addr_t addr = (target_addr_t)wide_addr;
4493
4494 /* Arg 3: Number of elements to read */
4495 e = Jim_GetLong(interp, argv[3], &l);
4496 if (e != JIM_OK)
4497 return e;
4498 size_t len = l;
4499
4500 /* Arg 4: phys */
4501 bool is_phys = false;
4502 if (argc > 4) {
4503 int str_len = 0;
4504 const char *phys = Jim_GetString(argv[4], &str_len);
4505 if (!strncmp(phys, "phys", str_len))
4506 is_phys = true;
4507 else
4508 return JIM_ERR;
4509 }
4510
4511 /* Argument checks */
4512 if (len == 0) {
4513 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4514 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4515 return JIM_ERR;
4516 }
4517 if ((addr + (len * width)) < addr) {
4518 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4519 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4520 return JIM_ERR;
4521 }
4522 if (len > 65536) {
4523 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4524 Jim_AppendStrings(interp, Jim_GetResult(interp),
4525 "mem2array: too large read request, exceeds 64K items", NULL);
4526 return JIM_ERR;
4527 }
4528
4529 if ((width == 1) ||
4530 ((width == 2) && ((addr & 1) == 0)) ||
4531 ((width == 4) && ((addr & 3) == 0)) ||
4532 ((width == 8) && ((addr & 7) == 0))) {
4533 /* alignment correct */
4534 } else {
4535 char buf[100];
4536 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4537 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4538 addr,
4539 width);
4540 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4541 return JIM_ERR;
4542 }
4543
4544 /* Transfer loop */
4545
4546 /* index counter */
4547 size_t idx = 0;
4548
4549 const size_t buffersize = 4096;
4550 uint8_t *buffer = malloc(buffersize);
4551 if (!buffer)
4552 return JIM_ERR;
4553
4554 /* assume ok */
4555 e = JIM_OK;
4556 while (len) {
4557 /* Slurp... in buffer size chunks */
4558 const unsigned int max_chunk_len = buffersize / width;
4559 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4560
4561 int retval;
4562 if (is_phys)
4563 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4564 else
4565 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4566 if (retval != ERROR_OK) {
4567 /* BOO !*/
4568 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4569 addr,
4570 width,
4571 chunk_len);
4572 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4573 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4574 e = JIM_ERR;
4575 break;
4576 } else {
4577 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4578 uint64_t v = 0;
4579 switch (width) {
4580 case 8:
4581 v = target_buffer_get_u64(target, &buffer[i*width]);
4582 break;
4583 case 4:
4584 v = target_buffer_get_u32(target, &buffer[i*width]);
4585 break;
4586 case 2:
4587 v = target_buffer_get_u16(target, &buffer[i*width]);
4588 break;
4589 case 1:
4590 v = buffer[i] & 0x0ff;
4591 break;
4592 }
4593 new_u64_array_element(interp, varname, idx, v);
4594 }
4595 len -= chunk_len;
4596 addr += chunk_len * width;
4597 }
4598 }
4599
4600 free(buffer);
4601
4602 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4603
4604 return e;
4605 }
4606
4607 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4608 {
4609 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4610 if (!namebuf)
4611 return JIM_ERR;
4612
4613 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4614 if (!obj_name) {
4615 free(namebuf);
4616 return JIM_ERR;
4617 }
4618
4619 Jim_IncrRefCount(obj_name);
4620 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4621 Jim_DecrRefCount(interp, obj_name);
4622 free(namebuf);
4623 if (!obj_val)
4624 return JIM_ERR;
4625
4626 jim_wide wide_val;
4627 int result = Jim_GetWide(interp, obj_val, &wide_val);
4628 *val = wide_val;
4629 return result;
4630 }
4631
4632 static int jim_array2mem(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4633 {
4634 struct command_context *context;
4635 struct target *target;
4636
4637 context = current_command_context(interp);
4638 assert(context);
4639
4640 target = get_current_target(context);
4641 if (!target) {
4642 LOG_ERROR("array2mem: no current target");
4643 return JIM_ERR;
4644 }
4645
4646 return target_array2mem(interp, target, argc-1, argv + 1);
4647 }
4648
4649 static int target_array2mem(Jim_Interp *interp, struct target *target,
4650 int argc, Jim_Obj *const *argv)
4651 {
4652 int e;
4653
4654 /* argv[0] = name of array from which to read the data
4655 * argv[1] = desired element width in bits
4656 * argv[2] = memory address
4657 * argv[3] = number of elements to write
4658 * argv[4] = optional "phys"
4659 */
4660 if (argc < 4 || argc > 5) {
4661 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4662 return JIM_ERR;
4663 }
4664
4665 /* Arg 0: Name of the array variable */
4666 const char *varname = Jim_GetString(argv[0], NULL);
4667
4668 /* Arg 1: Bit width of one element */
4669 long l;
4670 e = Jim_GetLong(interp, argv[1], &l);
4671 if (e != JIM_OK)
4672 return e;
4673 const unsigned int width_bits = l;
4674
4675 if (width_bits != 8 &&
4676 width_bits != 16 &&
4677 width_bits != 32 &&
4678 width_bits != 64) {
4679 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4680 Jim_AppendStrings(interp, Jim_GetResult(interp),
4681 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4682 return JIM_ERR;
4683 }
4684 const unsigned int width = width_bits / 8;
4685
4686 /* Arg 2: Memory address */
4687 jim_wide wide_addr;
4688 e = Jim_GetWide(interp, argv[2], &wide_addr);
4689 if (e != JIM_OK)
4690 return e;
4691 target_addr_t addr = (target_addr_t)wide_addr;
4692
4693 /* Arg 3: Number of elements to write */
4694 e = Jim_GetLong(interp, argv[3], &l);
4695 if (e != JIM_OK)
4696 return e;
4697 size_t len = l;
4698
4699 /* Arg 4: Phys */
4700 bool is_phys = false;
4701 if (argc > 4) {
4702 int str_len = 0;
4703 const char *phys = Jim_GetString(argv[4], &str_len);
4704 if (!strncmp(phys, "phys", str_len))
4705 is_phys = true;
4706 else
4707 return JIM_ERR;
4708 }
4709
4710 /* Argument checks */
4711 if (len == 0) {
4712 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4713 Jim_AppendStrings(interp, Jim_GetResult(interp),
4714 "array2mem: zero width read?", NULL);
4715 return JIM_ERR;
4716 }
4717
4718 if ((addr + (len * width)) < addr) {
4719 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4720 Jim_AppendStrings(interp, Jim_GetResult(interp),
4721 "array2mem: addr + len - wraps to zero?", NULL);
4722 return JIM_ERR;
4723 }
4724
4725 if (len > 65536) {
4726 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4727 Jim_AppendStrings(interp, Jim_GetResult(interp),
4728 "array2mem: too large memory write request, exceeds 64K items", NULL);
4729 return JIM_ERR;
4730 }
4731
4732 if ((width == 1) ||
4733 ((width == 2) && ((addr & 1) == 0)) ||
4734 ((width == 4) && ((addr & 3) == 0)) ||
4735 ((width == 8) && ((addr & 7) == 0))) {
4736 /* alignment correct */
4737 } else {
4738 char buf[100];
4739 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4740 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4741 addr,
4742 width);
4743 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4744 return JIM_ERR;
4745 }
4746
4747 /* Transfer loop */
4748
4749 /* assume ok */
4750 e = JIM_OK;
4751
4752 const size_t buffersize = 4096;
4753 uint8_t *buffer = malloc(buffersize);
4754 if (!buffer)
4755 return JIM_ERR;
4756
4757 /* index counter */
4758 size_t idx = 0;
4759
4760 while (len) {
4761 /* Slurp... in buffer size chunks */
4762 const unsigned int max_chunk_len = buffersize / width;
4763
4764 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4765
4766 /* Fill the buffer */
4767 for (size_t i = 0; i < chunk_len; i++, idx++) {
4768 uint64_t v = 0;
4769 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4770 free(buffer);
4771 return JIM_ERR;
4772 }
4773 switch (width) {
4774 case 8:
4775 target_buffer_set_u64(target, &buffer[i * width], v);
4776 break;
4777 case 4:
4778 target_buffer_set_u32(target, &buffer[i * width], v);
4779 break;
4780 case 2:
4781 target_buffer_set_u16(target, &buffer[i * width], v);
4782 break;
4783 case 1:
4784 buffer[i] = v & 0x0ff;
4785 break;
4786 }
4787 }
4788 len -= chunk_len;
4789
4790 /* Write the buffer to memory */
4791 int retval;
4792 if (is_phys)
4793 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4794 else
4795 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4796 if (retval != ERROR_OK) {
4797 /* BOO !*/
4798 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4799 addr,
4800 width,
4801 chunk_len);
4802 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4803 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4804 e = JIM_ERR;
4805 break;
4806 }
4807 addr += chunk_len * width;
4808 }
4809
4810 free(buffer);
4811
4812 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4813
4814 return e;
4815 }
4816
4817 /* FIX? should we propagate errors here rather than printing them
4818 * and continuing?
4819 */
4820 void target_handle_event(struct target *target, enum target_event e)
4821 {
4822 struct target_event_action *teap;
4823 int retval;
4824
4825 for (teap = target->event_action; teap; teap = teap->next) {
4826 if (teap->event == e) {
4827 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
4828 target->target_number,
4829 target_name(target),
4830 target_type_name(target),
4831 e,
4832 target_event_name(e),
4833 Jim_GetString(teap->body, NULL));
4834
4835 /* Override current target by the target an event
4836 * is issued from (lot of scripts need it).
4837 * Return back to previous override as soon
4838 * as the handler processing is done */
4839 struct command_context *cmd_ctx = current_command_context(teap->interp);
4840 struct target *saved_target_override = cmd_ctx->current_target_override;
4841 cmd_ctx->current_target_override = target;
4842
4843 retval = Jim_EvalObj(teap->interp, teap->body);
4844
4845 cmd_ctx->current_target_override = saved_target_override;
4846
4847 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
4848 return;
4849
4850 if (retval == JIM_RETURN)
4851 retval = teap->interp->returnCode;
4852
4853 if (retval != JIM_OK) {
4854 Jim_MakeErrorMessage(teap->interp);
4855 LOG_USER("Error executing event %s on target %s:\n%s",
4856 target_event_name(e),
4857 target_name(target),
4858 Jim_GetString(Jim_GetResult(teap->interp), NULL));
4859 /* clean both error code and stacktrace before return */
4860 Jim_Eval(teap->interp, "error \"\" \"\"");
4861 }
4862 }
4863 }
4864 }
4865
4866 /**
4867 * Returns true only if the target has a handler for the specified event.
4868 */
4869 bool target_has_event_action(struct target *target, enum target_event event)
4870 {
4871 struct target_event_action *teap;
4872
4873 for (teap = target->event_action; teap; teap = teap->next) {
4874 if (teap->event == event)
4875 return true;
4876 }
4877 return false;
4878 }
4879
4880 enum target_cfg_param {
4881 TCFG_TYPE,
4882 TCFG_EVENT,
4883 TCFG_WORK_AREA_VIRT,
4884 TCFG_WORK_AREA_PHYS,
4885 TCFG_WORK_AREA_SIZE,
4886 TCFG_WORK_AREA_BACKUP,
4887 TCFG_ENDIAN,
4888 TCFG_COREID,
4889 TCFG_CHAIN_POSITION,
4890 TCFG_DBGBASE,
4891 TCFG_RTOS,
4892 TCFG_DEFER_EXAMINE,
4893 TCFG_GDB_PORT,
4894 TCFG_GDB_MAX_CONNECTIONS,
4895 };
4896
4897 static struct jim_nvp nvp_config_opts[] = {
4898 { .name = "-type", .value = TCFG_TYPE },
4899 { .name = "-event", .value = TCFG_EVENT },
4900 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
4901 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
4902 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
4903 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
4904 { .name = "-endian", .value = TCFG_ENDIAN },
4905 { .name = "-coreid", .value = TCFG_COREID },
4906 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
4907 { .name = "-dbgbase", .value = TCFG_DBGBASE },
4908 { .name = "-rtos", .value = TCFG_RTOS },
4909 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
4910 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
4911 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
4912 { .name = NULL, .value = -1 }
4913 };
4914
4915 static int target_configure(struct jim_getopt_info *goi, struct target *target)
4916 {
4917 struct jim_nvp *n;
4918 Jim_Obj *o;
4919 jim_wide w;
4920 int e;
4921
4922 /* parse config or cget options ... */
4923 while (goi->argc > 0) {
4924 Jim_SetEmptyResult(goi->interp);
4925 /* jim_getopt_debug(goi); */
4926
4927 if (target->type->target_jim_configure) {
4928 /* target defines a configure function */
4929 /* target gets first dibs on parameters */
4930 e = (*(target->type->target_jim_configure))(target, goi);
4931 if (e == JIM_OK) {
4932 /* more? */
4933 continue;
4934 }
4935 if (e == JIM_ERR) {
4936 /* An error */
4937 return e;
4938 }
4939 /* otherwise we 'continue' below */
4940 }
4941 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
4942 if (e != JIM_OK) {
4943 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
4944 return e;
4945 }
4946 switch (n->value) {
4947 case TCFG_TYPE:
4948 /* not settable */
4949 if (goi->isconfigure) {
4950 Jim_SetResultFormatted(goi->interp,
4951 "not settable: %s", n->name);
4952 return JIM_ERR;
4953 } else {
4954 no_params:
4955 if (goi->argc != 0) {
4956 Jim_WrongNumArgs(goi->interp,
4957 goi->argc, goi->argv,
4958 "NO PARAMS");
4959 return JIM_ERR;
4960 }
4961 }
4962 Jim_SetResultString(goi->interp,
4963 target_type_name(target), -1);
4964 /* loop for more */
4965 break;
4966 case TCFG_EVENT:
4967 if (goi->argc == 0) {
4968 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
4969 return JIM_ERR;
4970 }
4971
4972 e = jim_getopt_nvp(goi, nvp_target_event, &n);
4973 if (e != JIM_OK) {
4974 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
4975 return e;
4976 }
4977
4978 if (goi->isconfigure) {
4979 if (goi->argc != 1) {
4980 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
4981 return JIM_ERR;
4982 }
4983 } else {
4984 if (goi->argc != 0) {
4985 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
4986 return JIM_ERR;
4987 }
4988 }
4989
4990 {
4991 struct target_event_action *teap;
4992
4993 teap = target->event_action;
4994 /* replace existing? */
4995 while (teap) {
4996 if (teap->event == (enum target_event)n->value)
4997 break;
4998 teap = teap->next;
4999 }
5000
5001 if (goi->isconfigure) {
5002 /* START_DEPRECATED_TPIU */
5003 if (n->value == TARGET_EVENT_TRACE_CONFIG)
5004 LOG_INFO("DEPRECATED target event %s; use TPIU events {pre,post}-{enable,disable}", n->name);
5005 /* END_DEPRECATED_TPIU */
5006
5007 bool replace = true;
5008 if (!teap) {
5009 /* create new */
5010 teap = calloc(1, sizeof(*teap));
5011 replace = false;
5012 }
5013 teap->event = n->value;
5014 teap->interp = goi->interp;
5015 jim_getopt_obj(goi, &o);
5016 if (teap->body)
5017 Jim_DecrRefCount(teap->interp, teap->body);
5018 teap->body = Jim_DuplicateObj(goi->interp, o);
5019 /*
5020 * FIXME:
5021 * Tcl/TK - "tk events" have a nice feature.
5022 * See the "BIND" command.
5023 * We should support that here.
5024 * You can specify %X and %Y in the event code.
5025 * The idea is: %T - target name.
5026 * The idea is: %N - target number
5027 * The idea is: %E - event name.
5028 */
5029 Jim_IncrRefCount(teap->body);
5030
5031 if (!replace) {
5032 /* add to head of event list */
5033 teap->next = target->event_action;
5034 target->event_action = teap;
5035 }
5036 Jim_SetEmptyResult(goi->interp);
5037 } else {
5038 /* get */
5039 if (!teap)
5040 Jim_SetEmptyResult(goi->interp);
5041 else
5042 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5043 }
5044 }
5045 /* loop for more */
5046 break;
5047
5048 case TCFG_WORK_AREA_VIRT:
5049 if (goi->isconfigure) {
5050 target_free_all_working_areas(target);
5051 e = jim_getopt_wide(goi, &w);
5052 if (e != JIM_OK)
5053 return e;
5054 target->working_area_virt = w;
5055 target->working_area_virt_spec = true;
5056 } else {
5057 if (goi->argc != 0)
5058 goto no_params;
5059 }
5060 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5061 /* loop for more */
5062 break;
5063
5064 case TCFG_WORK_AREA_PHYS:
5065 if (goi->isconfigure) {
5066 target_free_all_working_areas(target);
5067 e = jim_getopt_wide(goi, &w);
5068 if (e != JIM_OK)
5069 return e;
5070 target->working_area_phys = w;
5071 target->working_area_phys_spec = true;
5072 } else {
5073 if (goi->argc != 0)
5074 goto no_params;
5075 }
5076 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5077 /* loop for more */
5078 break;
5079
5080 case TCFG_WORK_AREA_SIZE:
5081 if (goi->isconfigure) {
5082 target_free_all_working_areas(target);
5083 e = jim_getopt_wide(goi, &w);
5084 if (e != JIM_OK)
5085 return e;
5086 target->working_area_size = w;
5087 } else {
5088 if (goi->argc != 0)
5089 goto no_params;
5090 }
5091 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5092 /* loop for more */
5093 break;
5094
5095 case TCFG_WORK_AREA_BACKUP:
5096 if (goi->isconfigure) {
5097 target_free_all_working_areas(target);
5098 e = jim_getopt_wide(goi, &w);
5099 if (e != JIM_OK)
5100 return e;
5101 /* make this exactly 1 or 0 */
5102 target->backup_working_area = (!!w);
5103 } else {
5104 if (goi->argc != 0)
5105 goto no_params;
5106 }
5107 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5108 /* loop for more e*/
5109 break;
5110
5111
5112 case TCFG_ENDIAN:
5113 if (goi->isconfigure) {
5114 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5115 if (e != JIM_OK) {
5116 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5117 return e;
5118 }
5119 target->endianness = n->value;
5120 } else {
5121 if (goi->argc != 0)
5122 goto no_params;
5123 }
5124 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5125 if (!n->name) {
5126 target->endianness = TARGET_LITTLE_ENDIAN;
5127 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5128 }
5129 Jim_SetResultString(goi->interp, n->name, -1);
5130 /* loop for more */
5131 break;
5132
5133 case TCFG_COREID:
5134 if (goi->isconfigure) {
5135 e = jim_getopt_wide(goi, &w);
5136 if (e != JIM_OK)
5137 return e;
5138 target->coreid = (int32_t)w;
5139 } else {
5140 if (goi->argc != 0)
5141 goto no_params;
5142 }
5143 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5144 /* loop for more */
5145 break;
5146
5147 case TCFG_CHAIN_POSITION:
5148 if (goi->isconfigure) {
5149 Jim_Obj *o_t;
5150 struct jtag_tap *tap;
5151
5152 if (target->has_dap) {
5153 Jim_SetResultString(goi->interp,
5154 "target requires -dap parameter instead of -chain-position!", -1);
5155 return JIM_ERR;
5156 }
5157
5158 target_free_all_working_areas(target);
5159 e = jim_getopt_obj(goi, &o_t);
5160 if (e != JIM_OK)
5161 return e;
5162 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5163 if (!tap)
5164 return JIM_ERR;
5165 target->tap = tap;
5166 target->tap_configured = true;
5167 } else {
5168 if (goi->argc != 0)
5169 goto no_params;
5170 }
5171 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5172 /* loop for more e*/
5173 break;
5174 case TCFG_DBGBASE:
5175 if (goi->isconfigure) {
5176 e = jim_getopt_wide(goi, &w);
5177 if (e != JIM_OK)
5178 return e;
5179 target->dbgbase = (uint32_t)w;
5180 target->dbgbase_set = true;
5181 } else {
5182 if (goi->argc != 0)
5183 goto no_params;
5184 }
5185 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5186 /* loop for more */
5187 break;
5188 case TCFG_RTOS:
5189 /* RTOS */
5190 {
5191 int result = rtos_create(goi, target);
5192 if (result != JIM_OK)
5193 return result;
5194 }
5195 /* loop for more */
5196 break;
5197
5198 case TCFG_DEFER_EXAMINE:
5199 /* DEFER_EXAMINE */
5200 target->defer_examine = true;
5201 /* loop for more */
5202 break;
5203
5204 case TCFG_GDB_PORT:
5205 if (goi->isconfigure) {
5206 struct command_context *cmd_ctx = current_command_context(goi->interp);
5207 if (cmd_ctx->mode != COMMAND_CONFIG) {
5208 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5209 return JIM_ERR;
5210 }
5211
5212 const char *s;
5213 e = jim_getopt_string(goi, &s, NULL);
5214 if (e != JIM_OK)
5215 return e;
5216 free(target->gdb_port_override);
5217 target->gdb_port_override = strdup(s);
5218 } else {
5219 if (goi->argc != 0)
5220 goto no_params;
5221 }
5222 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5223 /* loop for more */
5224 break;
5225
5226 case TCFG_GDB_MAX_CONNECTIONS:
5227 if (goi->isconfigure) {
5228 struct command_context *cmd_ctx = current_command_context(goi->interp);
5229 if (cmd_ctx->mode != COMMAND_CONFIG) {
5230 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5231 return JIM_ERR;
5232 }
5233
5234 e = jim_getopt_wide(goi, &w);
5235 if (e != JIM_OK)
5236 return e;
5237 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5238 } else {
5239 if (goi->argc != 0)
5240 goto no_params;
5241 }
5242 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5243 break;
5244 }
5245 } /* while (goi->argc) */
5246
5247
5248 /* done - we return */
5249 return JIM_OK;
5250 }
5251
5252 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5253 {
5254 struct command *c = jim_to_command(interp);
5255 struct jim_getopt_info goi;
5256
5257 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5258 goi.isconfigure = !strcmp(c->name, "configure");
5259 if (goi.argc < 1) {
5260 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5261 "missing: -option ...");
5262 return JIM_ERR;
5263 }
5264 struct command_context *cmd_ctx = current_command_context(interp);
5265 assert(cmd_ctx);
5266 struct target *target = get_current_target(cmd_ctx);
5267 return target_configure(&goi, target);
5268 }
5269
5270 static int jim_target_mem2array(Jim_Interp *interp,
5271 int argc, Jim_Obj *const *argv)
5272 {
5273 struct command_context *cmd_ctx = current_command_context(interp);
5274 assert(cmd_ctx);
5275 struct target *target = get_current_target(cmd_ctx);
5276 return target_mem2array(interp, target, argc - 1, argv + 1);
5277 }
5278
5279 static int jim_target_array2mem(Jim_Interp *interp,
5280 int argc, Jim_Obj *const *argv)
5281 {
5282 struct command_context *cmd_ctx = current_command_context(interp);
5283 assert(cmd_ctx);
5284 struct target *target = get_current_target(cmd_ctx);
5285 return target_array2mem(interp, target, argc - 1, argv + 1);
5286 }
5287
5288 static int jim_target_tap_disabled(Jim_Interp *interp)
5289 {
5290 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5291 return JIM_ERR;
5292 }
5293
5294 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5295 {
5296 bool allow_defer = false;
5297
5298 struct jim_getopt_info goi;
5299 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5300 if (goi.argc > 1) {
5301 const char *cmd_name = Jim_GetString(argv[0], NULL);
5302 Jim_SetResultFormatted(goi.interp,
5303 "usage: %s ['allow-defer']", cmd_name);
5304 return JIM_ERR;
5305 }
5306 if (goi.argc > 0 &&
5307 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5308 /* consume it */
5309 Jim_Obj *obj;
5310 int e = jim_getopt_obj(&goi, &obj);
5311 if (e != JIM_OK)
5312 return e;
5313 allow_defer = true;
5314 }
5315
5316 struct command_context *cmd_ctx = current_command_context(interp);
5317 assert(cmd_ctx);
5318 struct target *target = get_current_target(cmd_ctx);
5319 if (!target->tap->enabled)
5320 return jim_target_tap_disabled(interp);
5321
5322 if (allow_defer && target->defer_examine) {
5323 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5324 LOG_INFO("Use arp_examine command to examine it manually!");
5325 return JIM_OK;
5326 }
5327
5328 int e = target->type->examine(target);
5329 if (e != ERROR_OK) {
5330 target_reset_examined(target);
5331 return JIM_ERR;
5332 }
5333
5334 target_set_examined(target);
5335
5336 return JIM_OK;
5337 }
5338
5339 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5340 {
5341 struct command_context *cmd_ctx = current_command_context(interp);
5342 assert(cmd_ctx);
5343 struct target *target = get_current_target(cmd_ctx);
5344
5345 Jim_SetResultBool(interp, target_was_examined(target));
5346 return JIM_OK;
5347 }
5348
5349 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5350 {
5351 struct command_context *cmd_ctx = current_command_context(interp);
5352 assert(cmd_ctx);
5353 struct target *target = get_current_target(cmd_ctx);
5354
5355 Jim_SetResultBool(interp, target->defer_examine);
5356 return JIM_OK;
5357 }
5358
5359 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5360 {
5361 if (argc != 1) {
5362 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5363 return JIM_ERR;
5364 }
5365 struct command_context *cmd_ctx = current_command_context(interp);
5366 assert(cmd_ctx);
5367 struct target *target = get_current_target(cmd_ctx);
5368
5369 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5370 return JIM_ERR;
5371
5372 return JIM_OK;
5373 }
5374
5375 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5376 {
5377 if (argc != 1) {
5378 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5379 return JIM_ERR;
5380 }
5381 struct command_context *cmd_ctx = current_command_context(interp);
5382 assert(cmd_ctx);
5383 struct target *target = get_current_target(cmd_ctx);
5384 if (!target->tap->enabled)
5385 return jim_target_tap_disabled(interp);
5386
5387 int e;
5388 if (!(target_was_examined(target)))
5389 e = ERROR_TARGET_NOT_EXAMINED;
5390 else
5391 e = target->type->poll(target);
5392 if (e != ERROR_OK)
5393 return JIM_ERR;
5394 return JIM_OK;
5395 }
5396
5397 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5398 {
5399 struct jim_getopt_info goi;
5400 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5401
5402 if (goi.argc != 2) {
5403 Jim_WrongNumArgs(interp, 0, argv,
5404 "([tT]|[fF]|assert|deassert) BOOL");
5405 return JIM_ERR;
5406 }
5407
5408 struct jim_nvp *n;
5409 int e = jim_getopt_nvp(&goi, nvp_assert, &n);
5410 if (e != JIM_OK) {
5411 jim_getopt_nvp_unknown(&goi, nvp_assert, 1);
5412 return e;
5413 }
5414 /* the halt or not param */
5415 jim_wide a;
5416 e = jim_getopt_wide(&goi, &a);
5417 if (e != JIM_OK)
5418 return e;
5419
5420 struct command_context *cmd_ctx = current_command_context(interp);
5421 assert(cmd_ctx);
5422 struct target *target = get_current_target(cmd_ctx);
5423 if (!target->tap->enabled)
5424 return jim_target_tap_disabled(interp);
5425
5426 if (!target->type->assert_reset || !target->type->deassert_reset) {
5427 Jim_SetResultFormatted(interp,
5428 "No target-specific reset for %s",
5429 target_name(target));
5430 return JIM_ERR;
5431 }
5432
5433 if (target->defer_examine)
5434 target_reset_examined(target);
5435
5436 /* determine if we should halt or not. */
5437 target->reset_halt = (a != 0);
5438 /* When this happens - all workareas are invalid. */
5439 target_free_all_working_areas_restore(target, 0);
5440
5441 /* do the assert */
5442 if (n->value == NVP_ASSERT)
5443 e = target->type->assert_reset(target);
5444 else
5445 e = target->type->deassert_reset(target);
5446 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5447 }
5448
5449 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5450 {
5451 if (argc != 1) {
5452 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5453 return JIM_ERR;
5454 }
5455 struct command_context *cmd_ctx = current_command_context(interp);
5456 assert(cmd_ctx);
5457 struct target *target = get_current_target(cmd_ctx);
5458 if (!target->tap->enabled)
5459 return jim_target_tap_disabled(interp);
5460 int e = target->type->halt(target);
5461 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5462 }
5463
5464 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5465 {
5466 struct jim_getopt_info goi;
5467 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5468
5469 /* params: <name> statename timeoutmsecs */
5470 if (goi.argc != 2) {
5471 const char *cmd_name = Jim_GetString(argv[0], NULL);
5472 Jim_SetResultFormatted(goi.interp,
5473 "%s <state_name> <timeout_in_msec>", cmd_name);
5474 return JIM_ERR;
5475 }
5476
5477 struct jim_nvp *n;
5478 int e = jim_getopt_nvp(&goi, nvp_target_state, &n);
5479 if (e != JIM_OK) {
5480 jim_getopt_nvp_unknown(&goi, nvp_target_state, 1);
5481 return e;
5482 }
5483 jim_wide a;
5484 e = jim_getopt_wide(&goi, &a);
5485 if (e != JIM_OK)
5486 return e;
5487 struct command_context *cmd_ctx = current_command_context(interp);
5488 assert(cmd_ctx);
5489 struct target *target = get_current_target(cmd_ctx);
5490 if (!target->tap->enabled)
5491 return jim_target_tap_disabled(interp);
5492
5493 e = target_wait_state(target, n->value, a);
5494 if (e != ERROR_OK) {
5495 Jim_Obj *obj = Jim_NewIntObj(interp, e);
5496 Jim_SetResultFormatted(goi.interp,
5497 "target: %s wait %s fails (%#s) %s",
5498 target_name(target), n->name,
5499 obj, target_strerror_safe(e));
5500 return JIM_ERR;
5501 }
5502 return JIM_OK;
5503 }
5504 /* List for human, Events defined for this target.
5505 * scripts/programs should use 'name cget -event NAME'
5506 */
5507 COMMAND_HANDLER(handle_target_event_list)
5508 {
5509 struct target *target = get_current_target(CMD_CTX);
5510 struct target_event_action *teap = target->event_action;
5511
5512 command_print(CMD, "Event actions for target (%d) %s\n",
5513 target->target_number,
5514 target_name(target));
5515 command_print(CMD, "%-25s | Body", "Event");
5516 command_print(CMD, "------------------------- | "
5517 "----------------------------------------");
5518 while (teap) {
5519 command_print(CMD, "%-25s | %s",
5520 target_event_name(teap->event),
5521 Jim_GetString(teap->body, NULL));
5522 teap = teap->next;
5523 }
5524 command_print(CMD, "***END***");
5525 return ERROR_OK;
5526 }
5527 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5528 {
5529 if (argc != 1) {
5530 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5531 return JIM_ERR;
5532 }
5533 struct command_context *cmd_ctx = current_command_context(interp);
5534 assert(cmd_ctx);
5535 struct target *target = get_current_target(cmd_ctx);
5536 Jim_SetResultString(interp, target_state_name(target), -1);
5537 return JIM_OK;
5538 }
5539 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5540 {
5541 struct jim_getopt_info goi;
5542 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5543 if (goi.argc != 1) {
5544 const char *cmd_name = Jim_GetString(argv[0], NULL);
5545 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5546 return JIM_ERR;
5547 }
5548 struct jim_nvp *n;
5549 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5550 if (e != JIM_OK) {
5551 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5552 return e;
5553 }
5554 struct command_context *cmd_ctx = current_command_context(interp);
5555 assert(cmd_ctx);
5556 struct target *target = get_current_target(cmd_ctx);
5557 target_handle_event(target, n->value);
5558 return JIM_OK;
5559 }
5560
5561 static const struct command_registration target_instance_command_handlers[] = {
5562 {
5563 .name = "configure",
5564 .mode = COMMAND_ANY,
5565 .jim_handler = jim_target_configure,
5566 .help = "configure a new target for use",
5567 .usage = "[target_attribute ...]",
5568 },
5569 {
5570 .name = "cget",
5571 .mode = COMMAND_ANY,
5572 .jim_handler = jim_target_configure,
5573 .help = "returns the specified target attribute",
5574 .usage = "target_attribute",
5575 },
5576 {
5577 .name = "mwd",
5578 .handler = handle_mw_command,
5579 .mode = COMMAND_EXEC,
5580 .help = "Write 64-bit word(s) to target memory",
5581 .usage = "address data [count]",
5582 },
5583 {
5584 .name = "mww",
5585 .handler = handle_mw_command,
5586 .mode = COMMAND_EXEC,
5587 .help = "Write 32-bit word(s) to target memory",
5588 .usage = "address data [count]",
5589 },
5590 {
5591 .name = "mwh",
5592 .handler = handle_mw_command,
5593 .mode = COMMAND_EXEC,
5594 .help = "Write 16-bit half-word(s) to target memory",
5595 .usage = "address data [count]",
5596 },
5597 {
5598 .name = "mwb",
5599 .handler = handle_mw_command,
5600 .mode = COMMAND_EXEC,
5601 .help = "Write byte(s) to target memory",
5602 .usage = "address data [count]",
5603 },
5604 {
5605 .name = "mdd",
5606 .handler = handle_md_command,
5607 .mode = COMMAND_EXEC,
5608 .help = "Display target memory as 64-bit words",
5609 .usage = "address [count]",
5610 },
5611 {
5612 .name = "mdw",
5613 .handler = handle_md_command,
5614 .mode = COMMAND_EXEC,
5615 .help = "Display target memory as 32-bit words",
5616 .usage = "address [count]",
5617 },
5618 {
5619 .name = "mdh",
5620 .handler = handle_md_command,
5621 .mode = COMMAND_EXEC,
5622 .help = "Display target memory as 16-bit half-words",
5623 .usage = "address [count]",
5624 },
5625 {
5626 .name = "mdb",
5627 .handler = handle_md_command,
5628 .mode = COMMAND_EXEC,
5629 .help = "Display target memory as 8-bit bytes",
5630 .usage = "address [count]",
5631 },
5632 {
5633 .name = "array2mem",
5634 .mode = COMMAND_EXEC,
5635 .jim_handler = jim_target_array2mem,
5636 .help = "Writes Tcl array of 8/16/32 bit numbers "
5637 "to target memory",
5638 .usage = "arrayname bitwidth address count",
5639 },
5640 {
5641 .name = "mem2array",
5642 .mode = COMMAND_EXEC,
5643 .jim_handler = jim_target_mem2array,
5644 .help = "Loads Tcl array of 8/16/32 bit numbers "
5645 "from target memory",
5646 .usage = "arrayname bitwidth address count",
5647 },
5648 {
5649 .name = "eventlist",
5650 .handler = handle_target_event_list,
5651 .mode = COMMAND_EXEC,
5652 .help = "displays a table of events defined for this target",
5653 .usage = "",
5654 },
5655 {
5656 .name = "curstate",
5657 .mode = COMMAND_EXEC,
5658 .jim_handler = jim_target_current_state,
5659 .help = "displays the current state of this target",
5660 },
5661 {
5662 .name = "arp_examine",
5663 .mode = COMMAND_EXEC,
5664 .jim_handler = jim_target_examine,
5665 .help = "used internally for reset processing",
5666 .usage = "['allow-defer']",
5667 },
5668 {
5669 .name = "was_examined",
5670 .mode = COMMAND_EXEC,
5671 .jim_handler = jim_target_was_examined,
5672 .help = "used internally for reset processing",
5673 },
5674 {
5675 .name = "examine_deferred",
5676 .mode = COMMAND_EXEC,
5677 .jim_handler = jim_target_examine_deferred,
5678 .help = "used internally for reset processing",
5679 },
5680 {
5681 .name = "arp_halt_gdb",
5682 .mode = COMMAND_EXEC,
5683 .jim_handler = jim_target_halt_gdb,
5684 .help = "used internally for reset processing to halt GDB",
5685 },
5686 {
5687 .name = "arp_poll",
5688 .mode = COMMAND_EXEC,
5689 .jim_handler = jim_target_poll,
5690 .help = "used internally for reset processing",
5691 },
5692 {
5693 .name = "arp_reset",
5694 .mode = COMMAND_EXEC,
5695 .jim_handler = jim_target_reset,
5696 .help = "used internally for reset processing",
5697 },
5698 {
5699 .name = "arp_halt",
5700 .mode = COMMAND_EXEC,
5701 .jim_handler = jim_target_halt,
5702 .help = "used internally for reset processing",
5703 },
5704 {
5705 .name = "arp_waitstate",
5706 .mode = COMMAND_EXEC,
5707 .jim_handler = jim_target_wait_state,
5708 .help = "used internally for reset processing",
5709 },
5710 {
5711 .name = "invoke-event",
5712 .mode = COMMAND_EXEC,
5713 .jim_handler = jim_target_invoke_event,
5714 .help = "invoke handler for specified event",
5715 .usage = "event_name",
5716 },
5717 COMMAND_REGISTRATION_DONE
5718 };
5719
5720 static int target_create(struct jim_getopt_info *goi)
5721 {
5722 Jim_Obj *new_cmd;
5723 Jim_Cmd *cmd;
5724 const char *cp;
5725 int e;
5726 int x;
5727 struct target *target;
5728 struct command_context *cmd_ctx;
5729
5730 cmd_ctx = current_command_context(goi->interp);
5731 assert(cmd_ctx);
5732
5733 if (goi->argc < 3) {
5734 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
5735 return JIM_ERR;
5736 }
5737
5738 /* COMMAND */
5739 jim_getopt_obj(goi, &new_cmd);
5740 /* does this command exist? */
5741 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_NONE);
5742 if (cmd) {
5743 cp = Jim_GetString(new_cmd, NULL);
5744 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
5745 return JIM_ERR;
5746 }
5747
5748 /* TYPE */
5749 e = jim_getopt_string(goi, &cp, NULL);
5750 if (e != JIM_OK)
5751 return e;
5752 struct transport *tr = get_current_transport();
5753 if (tr->override_target) {
5754 e = tr->override_target(&cp);
5755 if (e != ERROR_OK) {
5756 LOG_ERROR("The selected transport doesn't support this target");
5757 return JIM_ERR;
5758 }
5759 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
5760 }
5761 /* now does target type exist */
5762 for (x = 0 ; target_types[x] ; x++) {
5763 if (strcmp(cp, target_types[x]->name) == 0) {
5764 /* found */
5765 break;
5766 }
5767 }
5768 if (!target_types[x]) {
5769 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
5770 for (x = 0 ; target_types[x] ; x++) {
5771 if (target_types[x + 1]) {
5772 Jim_AppendStrings(goi->interp,
5773 Jim_GetResult(goi->interp),
5774 target_types[x]->name,
5775 ", ", NULL);
5776 } else {
5777 Jim_AppendStrings(goi->interp,
5778 Jim_GetResult(goi->interp),
5779 " or ",
5780 target_types[x]->name, NULL);
5781 }
5782 }
5783 return JIM_ERR;
5784 }
5785
5786 /* Create it */
5787 target = calloc(1, sizeof(struct target));
5788 if (!target) {
5789 LOG_ERROR("Out of memory");
5790 return JIM_ERR;
5791 }
5792
5793 /* set empty smp cluster */
5794 target->smp_targets = &empty_smp_targets;
5795
5796 /* set target number */
5797 target->target_number = new_target_number();
5798
5799 /* allocate memory for each unique target type */
5800 target->type = malloc(sizeof(struct target_type));
5801 if (!target->type) {
5802 LOG_ERROR("Out of memory");
5803 free(target);
5804 return JIM_ERR;
5805 }
5806
5807 memcpy(target->type, target_types[x], sizeof(struct target_type));
5808
5809 /* default to first core, override with -coreid */
5810 target->coreid = 0;
5811
5812 target->working_area = 0x0;
5813 target->working_area_size = 0x0;
5814 target->working_areas = NULL;
5815 target->backup_working_area = 0;
5816
5817 target->state = TARGET_UNKNOWN;
5818 target->debug_reason = DBG_REASON_UNDEFINED;
5819 target->reg_cache = NULL;
5820 target->breakpoints = NULL;
5821 target->watchpoints = NULL;
5822 target->next = NULL;
5823 target->arch_info = NULL;
5824
5825 target->verbose_halt_msg = true;
5826
5827 target->halt_issued = false;
5828
5829 /* initialize trace information */
5830 target->trace_info = calloc(1, sizeof(struct trace));
5831 if (!target->trace_info) {
5832 LOG_ERROR("Out of memory");
5833 free(target->type);
5834 free(target);
5835 return JIM_ERR;
5836 }
5837
5838 target->dbgmsg = NULL;
5839 target->dbg_msg_enabled = 0;
5840
5841 target->endianness = TARGET_ENDIAN_UNKNOWN;
5842
5843 target->rtos = NULL;
5844 target->rtos_auto_detect = false;
5845
5846 target->gdb_port_override = NULL;
5847 target->gdb_max_connections = 1;
5848
5849 /* Do the rest as "configure" options */
5850 goi->isconfigure = 1;
5851 e = target_configure(goi, target);
5852
5853 if (e == JIM_OK) {
5854 if (target->has_dap) {
5855 if (!target->dap_configured) {
5856 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
5857 e = JIM_ERR;
5858 }
5859 } else {
5860 if (!target->tap_configured) {
5861 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
5862 e = JIM_ERR;
5863 }
5864 }
5865 /* tap must be set after target was configured */
5866 if (!target->tap)
5867 e = JIM_ERR;
5868 }
5869
5870 if (e != JIM_OK) {
5871 rtos_destroy(target);
5872 free(target->gdb_port_override);
5873 free(target->trace_info);
5874 free(target->type);
5875 free(target);
5876 return e;
5877 }
5878
5879 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
5880 /* default endian to little if not specified */
5881 target->endianness = TARGET_LITTLE_ENDIAN;
5882 }
5883
5884 cp = Jim_GetString(new_cmd, NULL);
5885 target->cmd_name = strdup(cp);
5886 if (!target->cmd_name) {
5887 LOG_ERROR("Out of memory");
5888 rtos_destroy(target);
5889 free(target->gdb_port_override);
5890 free(target->trace_info);
5891 free(target->type);
5892 free(target);
5893 return JIM_ERR;
5894 }
5895
5896 if (target->type->target_create) {
5897 e = (*(target->type->target_create))(target, goi->interp);
5898 if (e != ERROR_OK) {
5899 LOG_DEBUG("target_create failed");
5900 free(target->cmd_name);
5901 rtos_destroy(target);
5902 free(target->gdb_port_override);
5903 free(target->trace_info);
5904 free(target->type);
5905 free(target);
5906 return JIM_ERR;
5907 }
5908 }
5909
5910 /* create the target specific commands */
5911 if (target->type->commands) {
5912 e = register_commands(cmd_ctx, NULL, target->type->commands);
5913 if (e != ERROR_OK)
5914 LOG_ERROR("unable to register '%s' commands", cp);
5915 }
5916
5917 /* now - create the new target name command */
5918 const struct command_registration target_subcommands[] = {
5919 {
5920 .chain = target_instance_command_handlers,
5921 },
5922 {
5923 .chain = target->type->commands,
5924 },
5925 COMMAND_REGISTRATION_DONE
5926 };
5927 const struct command_registration target_commands[] = {
5928 {
5929 .name = cp,
5930 .mode = COMMAND_ANY,
5931 .help = "target command group",
5932 .usage = "",
5933 .chain = target_subcommands,
5934 },
5935 COMMAND_REGISTRATION_DONE
5936 };
5937 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
5938 if (e != ERROR_OK) {
5939 if (target->type->deinit_target)
5940 target->type->deinit_target(target);
5941 free(target->cmd_name);
5942 rtos_destroy(target);
5943 free(target->gdb_port_override);
5944 free(target->trace_info);
5945 free(target->type);
5946 free(target);
5947 return JIM_ERR;
5948 }
5949
5950 /* append to end of list */
5951 append_to_list_all_targets(target);
5952
5953 cmd_ctx->current_target = target;
5954 return JIM_OK;
5955 }
5956
5957 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5958 {
5959 if (argc != 1) {
5960 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5961 return JIM_ERR;
5962 }
5963 struct command_context *cmd_ctx = current_command_context(interp);
5964 assert(cmd_ctx);
5965
5966 struct target *target = get_current_target_or_null(cmd_ctx);
5967 if (target)
5968 Jim_SetResultString(interp, target_name(target), -1);
5969 return JIM_OK;
5970 }
5971
5972 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5973 {
5974 if (argc != 1) {
5975 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5976 return JIM_ERR;
5977 }
5978 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5979 for (unsigned x = 0; target_types[x]; x++) {
5980 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5981 Jim_NewStringObj(interp, target_types[x]->name, -1));
5982 }
5983 return JIM_OK;
5984 }
5985
5986 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5987 {
5988 if (argc != 1) {
5989 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5990 return JIM_ERR;
5991 }
5992 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5993 struct target *target = all_targets;
5994 while (target) {
5995 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5996 Jim_NewStringObj(interp, target_name(target), -1));
5997 target = target->next;
5998 }
5999 return JIM_OK;
6000 }
6001
6002 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6003 {
6004 int i;
6005 const char *targetname;
6006 int retval, len;
6007 struct target *target = NULL;
6008 struct target_list *head, *new;
6009
6010 retval = 0;
6011 LOG_DEBUG("%d", argc);
6012 /* argv[1] = target to associate in smp
6013 * argv[2] = target to associate in smp
6014 * argv[3] ...
6015 */
6016
6017 struct list_head *lh = malloc(sizeof(*lh));
6018 if (!lh) {
6019 LOG_ERROR("Out of memory");
6020 return JIM_ERR;
6021 }
6022 INIT_LIST_HEAD(lh);
6023
6024 for (i = 1; i < argc; i++) {
6025
6026 targetname = Jim_GetString(argv[i], &len);
6027 target = get_target(targetname);
6028 LOG_DEBUG("%s ", targetname);
6029 if (target) {
6030 new = malloc(sizeof(struct target_list));
6031 new->target = target;
6032 list_add_tail(&new->lh, lh);
6033 }
6034 }
6035 /* now parse the list of cpu and put the target in smp mode*/
6036 foreach_smp_target(head, lh) {
6037 target = head->target;
6038 target->smp = 1;
6039 target->smp_targets = lh;
6040 }
6041
6042 if (target && target->rtos)
6043 retval = rtos_smp_init(head->target);
6044
6045 return retval;
6046 }
6047
6048
6049 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6050 {
6051 struct jim_getopt_info goi;
6052 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6053 if (goi.argc < 3) {
6054 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6055 "<name> <target_type> [<target_options> ...]");
6056 return JIM_ERR;
6057 }
6058 return target_create(&goi);
6059 }
6060
6061 static const struct command_registration target_subcommand_handlers[] = {
6062 {
6063 .name = "init",
6064 .mode = COMMAND_CONFIG,
6065 .handler = handle_target_init_command,
6066 .help = "initialize targets",
6067 .usage = "",
6068 },
6069 {
6070 .name = "create",
6071 .mode = COMMAND_CONFIG,
6072 .jim_handler = jim_target_create,
6073 .usage = "name type '-chain-position' name [options ...]",
6074 .help = "Creates and selects a new target",
6075 },
6076 {
6077 .name = "current",
6078 .mode = COMMAND_ANY,
6079 .jim_handler = jim_target_current,
6080 .help = "Returns the currently selected target",
6081 },
6082 {
6083 .name = "types",
6084 .mode = COMMAND_ANY,
6085 .jim_handler = jim_target_types,
6086 .help = "Returns the available target types as "
6087 "a list of strings",
6088 },
6089 {
6090 .name = "names",
6091 .mode = COMMAND_ANY,
6092 .jim_handler = jim_target_names,
6093 .help = "Returns the names of all targets as a list of strings",
6094 },
6095 {
6096 .name = "smp",
6097 .mode = COMMAND_ANY,
6098 .jim_handler = jim_target_smp,
6099 .usage = "targetname1 targetname2 ...",
6100 .help = "gather several target in a smp list"
6101 },
6102
6103 COMMAND_REGISTRATION_DONE
6104 };
6105
6106 struct fast_load {
6107 target_addr_t address;
6108 uint8_t *data;
6109 int length;
6110
6111 };
6112
6113 static int fastload_num;
6114 static struct fast_load *fastload;
6115
6116 static void free_fastload(void)
6117 {
6118 if (fastload) {
6119 for (int i = 0; i < fastload_num; i++)
6120 free(fastload[i].data);
6121 free(fastload);
6122 fastload = NULL;
6123 }
6124 }
6125
6126 COMMAND_HANDLER(handle_fast_load_image_command)
6127 {
6128 uint8_t *buffer;
6129 size_t buf_cnt;
6130 uint32_t image_size;
6131 target_addr_t min_address = 0;
6132 target_addr_t max_address = -1;
6133
6134 struct image image;
6135
6136 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6137 &image, &min_address, &max_address);
6138 if (retval != ERROR_OK)
6139 return retval;
6140
6141 struct duration bench;
6142 duration_start(&bench);
6143
6144 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6145 if (retval != ERROR_OK)
6146 return retval;
6147
6148 image_size = 0x0;
6149 retval = ERROR_OK;
6150 fastload_num = image.num_sections;
6151 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6152 if (!fastload) {
6153 command_print(CMD, "out of memory");
6154 image_close(&image);
6155 return ERROR_FAIL;
6156 }
6157 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6158 for (unsigned int i = 0; i < image.num_sections; i++) {
6159 buffer = malloc(image.sections[i].size);
6160 if (!buffer) {
6161 command_print(CMD, "error allocating buffer for section (%d bytes)",
6162 (int)(image.sections[i].size));
6163 retval = ERROR_FAIL;
6164 break;
6165 }
6166
6167 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6168 if (retval != ERROR_OK) {
6169 free(buffer);
6170 break;
6171 }
6172
6173 uint32_t offset = 0;
6174 uint32_t length = buf_cnt;
6175
6176 /* DANGER!!! beware of unsigned comparison here!!! */
6177
6178 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6179 (image.sections[i].base_address < max_address)) {
6180 if (image.sections[i].base_address < min_address) {
6181 /* clip addresses below */
6182 offset += min_address-image.sections[i].base_address;
6183 length -= offset;
6184 }
6185
6186 if (image.sections[i].base_address + buf_cnt > max_address)
6187 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6188
6189 fastload[i].address = image.sections[i].base_address + offset;
6190 fastload[i].data = malloc(length);
6191 if (!fastload[i].data) {
6192 free(buffer);
6193 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6194 length);
6195 retval = ERROR_FAIL;
6196 break;
6197 }
6198 memcpy(fastload[i].data, buffer + offset, length);
6199 fastload[i].length = length;
6200
6201 image_size += length;
6202 command_print(CMD, "%u bytes written at address 0x%8.8x",
6203 (unsigned int)length,
6204 ((unsigned int)(image.sections[i].base_address + offset)));
6205 }
6206
6207 free(buffer);
6208 }
6209
6210 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6211 command_print(CMD, "Loaded %" PRIu32 " bytes "
6212 "in %fs (%0.3f KiB/s)", image_size,
6213 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6214
6215 command_print(CMD,
6216 "WARNING: image has not been loaded to target!"
6217 "You can issue a 'fast_load' to finish loading.");
6218 }
6219
6220 image_close(&image);
6221
6222 if (retval != ERROR_OK)
6223 free_fastload();
6224
6225 return retval;
6226 }
6227
6228 COMMAND_HANDLER(handle_fast_load_command)
6229 {
6230 if (CMD_ARGC > 0)
6231 return ERROR_COMMAND_SYNTAX_ERROR;
6232 if (!fastload) {
6233 LOG_ERROR("No image in memory");
6234 return ERROR_FAIL;
6235 }
6236 int i;
6237 int64_t ms = timeval_ms();
6238 int size = 0;
6239 int retval = ERROR_OK;
6240 for (i = 0; i < fastload_num; i++) {
6241 struct target *target = get_current_target(CMD_CTX);
6242 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6243 (unsigned int)(fastload[i].address),
6244 (unsigned int)(fastload[i].length));
6245 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6246 if (retval != ERROR_OK)
6247 break;
6248 size += fastload[i].length;
6249 }
6250 if (retval == ERROR_OK) {
6251 int64_t after = timeval_ms();
6252 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6253 }
6254 return retval;
6255 }
6256
6257 static const struct command_registration target_command_handlers[] = {
6258 {
6259 .name = "targets",
6260 .handler = handle_targets_command,
6261 .mode = COMMAND_ANY,
6262 .help = "change current default target (one parameter) "
6263 "or prints table of all targets (no parameters)",
6264 .usage = "[target]",
6265 },
6266 {
6267 .name = "target",
6268 .mode = COMMAND_CONFIG,
6269 .help = "configure target",
6270 .chain = target_subcommand_handlers,
6271 .usage = "",
6272 },
6273 COMMAND_REGISTRATION_DONE
6274 };
6275
6276 int target_register_commands(struct command_context *cmd_ctx)
6277 {
6278 return register_commands(cmd_ctx, NULL, target_command_handlers);
6279 }
6280
6281 static bool target_reset_nag = true;
6282
6283 bool get_target_reset_nag(void)
6284 {
6285 return target_reset_nag;
6286 }
6287
6288 COMMAND_HANDLER(handle_target_reset_nag)
6289 {
6290 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6291 &target_reset_nag, "Nag after each reset about options to improve "
6292 "performance");
6293 }
6294
6295 COMMAND_HANDLER(handle_ps_command)
6296 {
6297 struct target *target = get_current_target(CMD_CTX);
6298 char *display;
6299 if (target->state != TARGET_HALTED) {
6300 LOG_INFO("target not halted !!");
6301 return ERROR_OK;
6302 }
6303
6304 if ((target->rtos) && (target->rtos->type)
6305 && (target->rtos->type->ps_command)) {
6306 display = target->rtos->type->ps_command(target);
6307 command_print(CMD, "%s", display);
6308 free(display);
6309 return ERROR_OK;
6310 } else {
6311 LOG_INFO("failed");
6312 return ERROR_TARGET_FAILURE;
6313 }
6314 }
6315
6316 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6317 {
6318 if (text)
6319 command_print_sameline(cmd, "%s", text);
6320 for (int i = 0; i < size; i++)
6321 command_print_sameline(cmd, " %02x", buf[i]);
6322 command_print(cmd, " ");
6323 }
6324
6325 COMMAND_HANDLER(handle_test_mem_access_command)
6326 {
6327 struct target *target = get_current_target(CMD_CTX);
6328 uint32_t test_size;
6329 int retval = ERROR_OK;
6330
6331 if (target->state != TARGET_HALTED) {
6332 LOG_INFO("target not halted !!");
6333 return ERROR_FAIL;
6334 }
6335
6336 if (CMD_ARGC != 1)
6337 return ERROR_COMMAND_SYNTAX_ERROR;
6338
6339 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6340
6341 /* Test reads */
6342 size_t num_bytes = test_size + 4;
6343
6344 struct working_area *wa = NULL;
6345 retval = target_alloc_working_area(target, num_bytes, &wa);
6346 if (retval != ERROR_OK) {
6347 LOG_ERROR("Not enough working area");
6348 return ERROR_FAIL;
6349 }
6350
6351 uint8_t *test_pattern = malloc(num_bytes);
6352
6353 for (size_t i = 0; i < num_bytes; i++)
6354 test_pattern[i] = rand();
6355
6356 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6357 if (retval != ERROR_OK) {
6358 LOG_ERROR("Test pattern write failed");
6359 goto out;
6360 }
6361
6362 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6363 for (int size = 1; size <= 4; size *= 2) {
6364 for (int offset = 0; offset < 4; offset++) {
6365 uint32_t count = test_size / size;
6366 size_t host_bufsiz = (count + 2) * size + host_offset;
6367 uint8_t *read_ref = malloc(host_bufsiz);
6368 uint8_t *read_buf = malloc(host_bufsiz);
6369
6370 for (size_t i = 0; i < host_bufsiz; i++) {
6371 read_ref[i] = rand();
6372 read_buf[i] = read_ref[i];
6373 }
6374 command_print_sameline(CMD,
6375 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6376 size, offset, host_offset ? "un" : "");
6377
6378 struct duration bench;
6379 duration_start(&bench);
6380
6381 retval = target_read_memory(target, wa->address + offset, size, count,
6382 read_buf + size + host_offset);
6383
6384 duration_measure(&bench);
6385
6386 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6387 command_print(CMD, "Unsupported alignment");
6388 goto next;
6389 } else if (retval != ERROR_OK) {
6390 command_print(CMD, "Memory read failed");
6391 goto next;
6392 }
6393
6394 /* replay on host */
6395 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6396
6397 /* check result */
6398 int result = memcmp(read_ref, read_buf, host_bufsiz);
6399 if (result == 0) {
6400 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6401 duration_elapsed(&bench),
6402 duration_kbps(&bench, count * size));
6403 } else {
6404 command_print(CMD, "Compare failed");
6405 binprint(CMD, "ref:", read_ref, host_bufsiz);
6406 binprint(CMD, "buf:", read_buf, host_bufsiz);
6407 }
6408 next:
6409 free(read_ref);
6410 free(read_buf);
6411 }
6412 }
6413 }
6414
6415 out:
6416 free(test_pattern);
6417
6418 target_free_working_area(target, wa);
6419
6420 /* Test writes */
6421 num_bytes = test_size + 4 + 4 + 4;
6422
6423 retval = target_alloc_working_area(target, num_bytes, &wa);
6424 if (retval != ERROR_OK) {
6425 LOG_ERROR("Not enough working area");
6426 return ERROR_FAIL;
6427 }
6428
6429 test_pattern = malloc(num_bytes);
6430
6431 for (size_t i = 0; i < num_bytes; i++)
6432 test_pattern[i] = rand();
6433
6434 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6435 for (int size = 1; size <= 4; size *= 2) {
6436 for (int offset = 0; offset < 4; offset++) {
6437 uint32_t count = test_size / size;
6438 size_t host_bufsiz = count * size + host_offset;
6439 uint8_t *read_ref = malloc(num_bytes);
6440 uint8_t *read_buf = malloc(num_bytes);
6441 uint8_t *write_buf = malloc(host_bufsiz);
6442
6443 for (size_t i = 0; i < host_bufsiz; i++)
6444 write_buf[i] = rand();
6445 command_print_sameline(CMD,
6446 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6447 size, offset, host_offset ? "un" : "");
6448
6449 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6450 if (retval != ERROR_OK) {
6451 command_print(CMD, "Test pattern write failed");
6452 goto nextw;
6453 }
6454
6455 /* replay on host */
6456 memcpy(read_ref, test_pattern, num_bytes);
6457 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6458
6459 struct duration bench;
6460 duration_start(&bench);
6461
6462 retval = target_write_memory(target, wa->address + size + offset, size, count,
6463 write_buf + host_offset);
6464
6465 duration_measure(&bench);
6466
6467 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6468 command_print(CMD, "Unsupported alignment");
6469 goto nextw;
6470 } else if (retval != ERROR_OK) {
6471 command_print(CMD, "Memory write failed");
6472 goto nextw;
6473 }
6474
6475 /* read back */
6476 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6477 if (retval != ERROR_OK) {
6478 command_print(CMD, "Test pattern write failed");
6479 goto nextw;
6480 }
6481
6482 /* check result */
6483 int result = memcmp(read_ref, read_buf, num_bytes);
6484 if (result == 0) {
6485 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6486 duration_elapsed(&bench),
6487 duration_kbps(&bench, count * size));
6488 } else {
6489 command_print(CMD, "Compare failed");
6490 binprint(CMD, "ref:", read_ref, num_bytes);
6491 binprint(CMD, "buf:", read_buf, num_bytes);
6492 }
6493 nextw:
6494 free(read_ref);
6495 free(read_buf);
6496 }
6497 }
6498 }
6499
6500 free(test_pattern);
6501
6502 target_free_working_area(target, wa);
6503 return retval;
6504 }
6505
6506 static const struct command_registration target_exec_command_handlers[] = {
6507 {
6508 .name = "fast_load_image",
6509 .handler = handle_fast_load_image_command,
6510 .mode = COMMAND_ANY,
6511 .help = "Load image into server memory for later use by "
6512 "fast_load; primarily for profiling",
6513 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6514 "[min_address [max_length]]",
6515 },
6516 {
6517 .name = "fast_load",
6518 .handler = handle_fast_load_command,
6519 .mode = COMMAND_EXEC,
6520 .help = "loads active fast load image to current target "
6521 "- mainly for profiling purposes",
6522 .usage = "",
6523 },
6524 {
6525 .name = "profile",
6526 .handler = handle_profile_command,
6527 .mode = COMMAND_EXEC,
6528 .usage = "seconds filename [start end]",
6529 .help = "profiling samples the CPU PC",
6530 },
6531 /** @todo don't register virt2phys() unless target supports it */
6532 {
6533 .name = "virt2phys",
6534 .handler = handle_virt2phys_command,
6535 .mode = COMMAND_ANY,
6536 .help = "translate a virtual address into a physical address",
6537 .usage = "virtual_address",
6538 },
6539 {
6540 .name = "reg",
6541 .handler = handle_reg_command,
6542 .mode = COMMAND_EXEC,
6543 .help = "display (reread from target with \"force\") or set a register; "
6544 "with no arguments, displays all registers and their values",
6545 .usage = "[(register_number|register_name) [(value|'force')]]",
6546 },
6547 {
6548 .name = "poll",
6549 .handler = handle_poll_command,
6550 .mode = COMMAND_EXEC,
6551 .help = "poll target state; or reconfigure background polling",
6552 .usage = "['on'|'off']",
6553 },
6554 {
6555 .name = "wait_halt",
6556 .handler = handle_wait_halt_command,
6557 .mode = COMMAND_EXEC,
6558 .help = "wait up to the specified number of milliseconds "
6559 "(default 5000) for a previously requested halt",
6560 .usage = "[milliseconds]",
6561 },
6562 {
6563 .name = "halt",
6564 .handler = handle_halt_command,
6565 .mode = COMMAND_EXEC,
6566 .help = "request target to halt, then wait up to the specified "
6567 "number of milliseconds (default 5000) for it to complete",
6568 .usage = "[milliseconds]",
6569 },
6570 {
6571 .name = "resume",
6572 .handler = handle_resume_command,
6573 .mode = COMMAND_EXEC,
6574 .help = "resume target execution from current PC or address",
6575 .usage = "[address]",
6576 },
6577 {
6578 .name = "reset",
6579 .handler = handle_reset_command,
6580 .mode = COMMAND_EXEC,
6581 .usage = "[run|halt|init]",
6582 .help = "Reset all targets into the specified mode. "
6583 "Default reset mode is run, if not given.",
6584 },
6585 {
6586 .name = "soft_reset_halt",
6587 .handler = handle_soft_reset_halt_command,
6588 .mode = COMMAND_EXEC,
6589 .usage = "",
6590 .help = "halt the target and do a soft reset",
6591 },
6592 {
6593 .name = "step",
6594 .handler = handle_step_command,
6595 .mode = COMMAND_EXEC,
6596 .help = "step one instruction from current PC or address",
6597 .usage = "[address]",
6598 },
6599 {
6600 .name = "mdd",
6601 .handler = handle_md_command,
6602 .mode = COMMAND_EXEC,
6603 .help = "display memory double-words",
6604 .usage = "['phys'] address [count]",
6605 },
6606 {
6607 .name = "mdw",
6608 .handler = handle_md_command,
6609 .mode = COMMAND_EXEC,
6610 .help = "display memory words",
6611 .usage = "['phys'] address [count]",
6612 },
6613 {
6614 .name = "mdh",
6615 .handler = handle_md_command,
6616 .mode = COMMAND_EXEC,
6617 .help = "display memory half-words",
6618 .usage = "['phys'] address [count]",
6619 },
6620 {
6621 .name = "mdb",
6622 .handler = handle_md_command,
6623 .mode = COMMAND_EXEC,
6624 .help = "display memory bytes",
6625 .usage = "['phys'] address [count]",
6626 },
6627 {
6628 .name = "mwd",
6629 .handler = handle_mw_command,
6630 .mode = COMMAND_EXEC,
6631 .help = "write memory double-word",
6632 .usage = "['phys'] address value [count]",
6633 },
6634 {
6635 .name = "mww",
6636 .handler = handle_mw_command,
6637 .mode = COMMAND_EXEC,
6638 .help = "write memory word",
6639 .usage = "['phys'] address value [count]",
6640 },
6641 {
6642 .name = "mwh",
6643 .handler = handle_mw_command,
6644 .mode = COMMAND_EXEC,
6645 .help = "write memory half-word",
6646 .usage = "['phys'] address value [count]",
6647 },
6648 {
6649 .name = "mwb",
6650 .handler = handle_mw_command,
6651 .mode = COMMAND_EXEC,
6652 .help = "write memory byte",
6653 .usage = "['phys'] address value [count]",
6654 },
6655 {
6656 .name = "bp",
6657 .handler = handle_bp_command,
6658 .mode = COMMAND_EXEC,
6659 .help = "list or set hardware or software breakpoint",
6660 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
6661 },
6662 {
6663 .name = "rbp",
6664 .handler = handle_rbp_command,
6665 .mode = COMMAND_EXEC,
6666 .help = "remove breakpoint",
6667 .usage = "'all' | address",
6668 },
6669 {
6670 .name = "wp",
6671 .handler = handle_wp_command,
6672 .mode = COMMAND_EXEC,
6673 .help = "list (no params) or create watchpoints",
6674 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
6675 },
6676 {
6677 .name = "rwp",
6678 .handler = handle_rwp_command,
6679 .mode = COMMAND_EXEC,
6680 .help = "remove watchpoint",
6681 .usage = "address",
6682 },
6683 {
6684 .name = "load_image",
6685 .handler = handle_load_image_command,
6686 .mode = COMMAND_EXEC,
6687 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6688 "[min_address] [max_length]",
6689 },
6690 {
6691 .name = "dump_image",
6692 .handler = handle_dump_image_command,
6693 .mode = COMMAND_EXEC,
6694 .usage = "filename address size",
6695 },
6696 {
6697 .name = "verify_image_checksum",
6698 .handler = handle_verify_image_checksum_command,
6699 .mode = COMMAND_EXEC,
6700 .usage = "filename [offset [type]]",
6701 },
6702 {
6703 .name = "verify_image",
6704 .handler = handle_verify_image_command,
6705 .mode = COMMAND_EXEC,
6706 .usage = "filename [offset [type]]",
6707 },
6708 {
6709 .name = "test_image",
6710 .handler = handle_test_image_command,
6711 .mode = COMMAND_EXEC,
6712 .usage = "filename [offset [type]]",
6713 },
6714 {
6715 .name = "mem2array",
6716 .mode = COMMAND_EXEC,
6717 .jim_handler = jim_mem2array,
6718 .help = "read 8/16/32 bit memory and return as a TCL array "
6719 "for script processing",
6720 .usage = "arrayname bitwidth address count",
6721 },
6722 {
6723 .name = "array2mem",
6724 .mode = COMMAND_EXEC,
6725 .jim_handler = jim_array2mem,
6726 .help = "convert a TCL array to memory locations "
6727 "and write the 8/16/32 bit values",
6728 .usage = "arrayname bitwidth address count",
6729 },
6730 {
6731 .name = "reset_nag",
6732 .handler = handle_target_reset_nag,
6733 .mode = COMMAND_ANY,
6734 .help = "Nag after each reset about options that could have been "
6735 "enabled to improve performance.",
6736 .usage = "['enable'|'disable']",
6737 },
6738 {
6739 .name = "ps",
6740 .handler = handle_ps_command,
6741 .mode = COMMAND_EXEC,
6742 .help = "list all tasks",
6743 .usage = "",
6744 },
6745 {
6746 .name = "test_mem_access",
6747 .handler = handle_test_mem_access_command,
6748 .mode = COMMAND_EXEC,
6749 .help = "Test the target's memory access functions",
6750 .usage = "size",
6751 },
6752
6753 COMMAND_REGISTRATION_DONE
6754 };
6755 static int target_register_user_commands(struct command_context *cmd_ctx)
6756 {
6757 int retval = ERROR_OK;
6758 retval = target_request_register_commands(cmd_ctx);
6759 if (retval != ERROR_OK)
6760 return retval;
6761
6762 retval = trace_register_commands(cmd_ctx);
6763 if (retval != ERROR_OK)
6764 return retval;
6765
6766
6767 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
6768 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)