Add RISC-V support.
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/time_support.h>
45 #include <jtag/jtag.h>
46 #include <flash/nor/core.h>
47
48 #include "target.h"
49 #include "target_type.h"
50 #include "target_request.h"
51 #include "breakpoints.h"
52 #include "register.h"
53 #include "trace.h"
54 #include "image.h"
55 #include "rtos/rtos.h"
56 #include "transport/transport.h"
57 #include "arm_cti.h"
58
59 /* default halt wait timeout (ms) */
60 #define DEFAULT_HALT_TIMEOUT 5000
61
62 static int target_read_buffer_default(struct target *target, target_addr_t address,
63 uint32_t count, uint8_t *buffer);
64 static int target_write_buffer_default(struct target *target, target_addr_t address,
65 uint32_t count, const uint8_t *buffer);
66 static int target_array2mem(Jim_Interp *interp, struct target *target,
67 int argc, Jim_Obj * const *argv);
68 static int target_mem2array(Jim_Interp *interp, struct target *target,
69 int argc, Jim_Obj * const *argv);
70 static int target_register_user_commands(struct command_context *cmd_ctx);
71 static int target_get_gdb_fileio_info_default(struct target *target,
72 struct gdb_fileio_info *fileio_info);
73 static int target_gdb_fileio_end_default(struct target *target, int retcode,
74 int fileio_errno, bool ctrl_c);
75 static int target_profiling_default(struct target *target, uint32_t *samples,
76 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds);
77
78 /* targets */
79 extern struct target_type arm7tdmi_target;
80 extern struct target_type arm720t_target;
81 extern struct target_type arm9tdmi_target;
82 extern struct target_type arm920t_target;
83 extern struct target_type arm966e_target;
84 extern struct target_type arm946e_target;
85 extern struct target_type arm926ejs_target;
86 extern struct target_type fa526_target;
87 extern struct target_type feroceon_target;
88 extern struct target_type dragonite_target;
89 extern struct target_type xscale_target;
90 extern struct target_type cortexm_target;
91 extern struct target_type cortexa_target;
92 extern struct target_type aarch64_target;
93 extern struct target_type cortexr4_target;
94 extern struct target_type arm11_target;
95 extern struct target_type ls1_sap_target;
96 extern struct target_type mips_m4k_target;
97 extern struct target_type avr_target;
98 extern struct target_type dsp563xx_target;
99 extern struct target_type dsp5680xx_target;
100 extern struct target_type testee_target;
101 extern struct target_type avr32_ap7k_target;
102 extern struct target_type hla_target;
103 extern struct target_type nds32_v2_target;
104 extern struct target_type nds32_v3_target;
105 extern struct target_type nds32_v3m_target;
106 extern struct target_type or1k_target;
107 extern struct target_type quark_x10xx_target;
108 extern struct target_type quark_d20xx_target;
109 extern struct target_type stm8_target;
110 extern struct target_type riscv_target;
111
112 static struct target_type *target_types[] = {
113 &arm7tdmi_target,
114 &arm9tdmi_target,
115 &arm920t_target,
116 &arm720t_target,
117 &arm966e_target,
118 &arm946e_target,
119 &arm926ejs_target,
120 &fa526_target,
121 &feroceon_target,
122 &dragonite_target,
123 &xscale_target,
124 &cortexm_target,
125 &cortexa_target,
126 &cortexr4_target,
127 &arm11_target,
128 &ls1_sap_target,
129 &mips_m4k_target,
130 &avr_target,
131 &dsp563xx_target,
132 &dsp5680xx_target,
133 &testee_target,
134 &avr32_ap7k_target,
135 &hla_target,
136 &nds32_v2_target,
137 &nds32_v3_target,
138 &nds32_v3m_target,
139 &or1k_target,
140 &quark_x10xx_target,
141 &quark_d20xx_target,
142 &stm8_target,
143 &riscv_target,
144 #if BUILD_TARGET64
145 &aarch64_target,
146 #endif
147 NULL,
148 };
149
150 struct target *all_targets;
151 static struct target_event_callback *target_event_callbacks;
152 static struct target_timer_callback *target_timer_callbacks;
153 LIST_HEAD(target_reset_callback_list);
154 LIST_HEAD(target_trace_callback_list);
155 static const int polling_interval = 100;
156
157 static const Jim_Nvp nvp_assert[] = {
158 { .name = "assert", NVP_ASSERT },
159 { .name = "deassert", NVP_DEASSERT },
160 { .name = "T", NVP_ASSERT },
161 { .name = "F", NVP_DEASSERT },
162 { .name = "t", NVP_ASSERT },
163 { .name = "f", NVP_DEASSERT },
164 { .name = NULL, .value = -1 }
165 };
166
167 static const Jim_Nvp nvp_error_target[] = {
168 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
169 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
170 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
171 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
172 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
173 { .value = ERROR_TARGET_UNALIGNED_ACCESS , .name = "err-unaligned-access" },
174 { .value = ERROR_TARGET_DATA_ABORT , .name = "err-data-abort" },
175 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE , .name = "err-resource-not-available" },
176 { .value = ERROR_TARGET_TRANSLATION_FAULT , .name = "err-translation-fault" },
177 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
178 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
179 { .value = -1, .name = NULL }
180 };
181
182 static const char *target_strerror_safe(int err)
183 {
184 const Jim_Nvp *n;
185
186 n = Jim_Nvp_value2name_simple(nvp_error_target, err);
187 if (n->name == NULL)
188 return "unknown";
189 else
190 return n->name;
191 }
192
193 static const Jim_Nvp nvp_target_event[] = {
194
195 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
196 { .value = TARGET_EVENT_HALTED, .name = "halted" },
197 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
198 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
199 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
200
201 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
202 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
203
204 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
205 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
206 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
207 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
208 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
209 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
210 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
211 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
212
213 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
214 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
215
216 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
217 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
218
219 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
220 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
221
222 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
223 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END , .name = "gdb-flash-write-end" },
224
225 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
226 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END , .name = "gdb-flash-erase-end" },
227
228 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
229
230 { .name = NULL, .value = -1 }
231 };
232
233 static const Jim_Nvp nvp_target_state[] = {
234 { .name = "unknown", .value = TARGET_UNKNOWN },
235 { .name = "running", .value = TARGET_RUNNING },
236 { .name = "halted", .value = TARGET_HALTED },
237 { .name = "reset", .value = TARGET_RESET },
238 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
239 { .name = NULL, .value = -1 },
240 };
241
242 static const Jim_Nvp nvp_target_debug_reason[] = {
243 { .name = "debug-request" , .value = DBG_REASON_DBGRQ },
244 { .name = "breakpoint" , .value = DBG_REASON_BREAKPOINT },
245 { .name = "watchpoint" , .value = DBG_REASON_WATCHPOINT },
246 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
247 { .name = "single-step" , .value = DBG_REASON_SINGLESTEP },
248 { .name = "target-not-halted" , .value = DBG_REASON_NOTHALTED },
249 { .name = "program-exit" , .value = DBG_REASON_EXIT },
250 { .name = "undefined" , .value = DBG_REASON_UNDEFINED },
251 { .name = NULL, .value = -1 },
252 };
253
254 static const Jim_Nvp nvp_target_endian[] = {
255 { .name = "big", .value = TARGET_BIG_ENDIAN },
256 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
257 { .name = "be", .value = TARGET_BIG_ENDIAN },
258 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
259 { .name = NULL, .value = -1 },
260 };
261
262 static const Jim_Nvp nvp_reset_modes[] = {
263 { .name = "unknown", .value = RESET_UNKNOWN },
264 { .name = "run" , .value = RESET_RUN },
265 { .name = "halt" , .value = RESET_HALT },
266 { .name = "init" , .value = RESET_INIT },
267 { .name = NULL , .value = -1 },
268 };
269
270 const char *debug_reason_name(struct target *t)
271 {
272 const char *cp;
273
274 cp = Jim_Nvp_value2name_simple(nvp_target_debug_reason,
275 t->debug_reason)->name;
276 if (!cp) {
277 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
278 cp = "(*BUG*unknown*BUG*)";
279 }
280 return cp;
281 }
282
283 const char *target_state_name(struct target *t)
284 {
285 const char *cp;
286 cp = Jim_Nvp_value2name_simple(nvp_target_state, t->state)->name;
287 if (!cp) {
288 LOG_ERROR("Invalid target state: %d", (int)(t->state));
289 cp = "(*BUG*unknown*BUG*)";
290 }
291
292 if (!target_was_examined(t) && t->defer_examine)
293 cp = "examine deferred";
294
295 return cp;
296 }
297
298 const char *target_event_name(enum target_event event)
299 {
300 const char *cp;
301 cp = Jim_Nvp_value2name_simple(nvp_target_event, event)->name;
302 if (!cp) {
303 LOG_ERROR("Invalid target event: %d", (int)(event));
304 cp = "(*BUG*unknown*BUG*)";
305 }
306 return cp;
307 }
308
309 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
310 {
311 const char *cp;
312 cp = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
313 if (!cp) {
314 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
315 cp = "(*BUG*unknown*BUG*)";
316 }
317 return cp;
318 }
319
320 /* determine the number of the new target */
321 static int new_target_number(void)
322 {
323 struct target *t;
324 int x;
325
326 /* number is 0 based */
327 x = -1;
328 t = all_targets;
329 while (t) {
330 if (x < t->target_number)
331 x = t->target_number;
332 t = t->next;
333 }
334 return x + 1;
335 }
336
337 /* read a uint64_t from a buffer in target memory endianness */
338 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
339 {
340 if (target->endianness == TARGET_LITTLE_ENDIAN)
341 return le_to_h_u64(buffer);
342 else
343 return be_to_h_u64(buffer);
344 }
345
346 /* read a uint32_t from a buffer in target memory endianness */
347 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
348 {
349 if (target->endianness == TARGET_LITTLE_ENDIAN)
350 return le_to_h_u32(buffer);
351 else
352 return be_to_h_u32(buffer);
353 }
354
355 /* read a uint24_t from a buffer in target memory endianness */
356 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
357 {
358 if (target->endianness == TARGET_LITTLE_ENDIAN)
359 return le_to_h_u24(buffer);
360 else
361 return be_to_h_u24(buffer);
362 }
363
364 /* read a uint16_t from a buffer in target memory endianness */
365 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
366 {
367 if (target->endianness == TARGET_LITTLE_ENDIAN)
368 return le_to_h_u16(buffer);
369 else
370 return be_to_h_u16(buffer);
371 }
372
373 /* read a uint8_t from a buffer in target memory endianness */
374 static uint8_t target_buffer_get_u8(struct target *target, const uint8_t *buffer)
375 {
376 return *buffer & 0x0ff;
377 }
378
379 /* write a uint64_t to a buffer in target memory endianness */
380 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
381 {
382 if (target->endianness == TARGET_LITTLE_ENDIAN)
383 h_u64_to_le(buffer, value);
384 else
385 h_u64_to_be(buffer, value);
386 }
387
388 /* write a uint32_t to a buffer in target memory endianness */
389 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
390 {
391 if (target->endianness == TARGET_LITTLE_ENDIAN)
392 h_u32_to_le(buffer, value);
393 else
394 h_u32_to_be(buffer, value);
395 }
396
397 /* write a uint24_t to a buffer in target memory endianness */
398 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
399 {
400 if (target->endianness == TARGET_LITTLE_ENDIAN)
401 h_u24_to_le(buffer, value);
402 else
403 h_u24_to_be(buffer, value);
404 }
405
406 /* write a uint16_t to a buffer in target memory endianness */
407 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
408 {
409 if (target->endianness == TARGET_LITTLE_ENDIAN)
410 h_u16_to_le(buffer, value);
411 else
412 h_u16_to_be(buffer, value);
413 }
414
415 /* write a uint8_t to a buffer in target memory endianness */
416 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
417 {
418 *buffer = value;
419 }
420
421 /* write a uint64_t array to a buffer in target memory endianness */
422 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
423 {
424 uint32_t i;
425 for (i = 0; i < count; i++)
426 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
427 }
428
429 /* write a uint32_t array to a buffer in target memory endianness */
430 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
431 {
432 uint32_t i;
433 for (i = 0; i < count; i++)
434 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
435 }
436
437 /* write a uint16_t array to a buffer in target memory endianness */
438 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
439 {
440 uint32_t i;
441 for (i = 0; i < count; i++)
442 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
443 }
444
445 /* write a uint64_t array to a buffer in target memory endianness */
446 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
447 {
448 uint32_t i;
449 for (i = 0; i < count; i++)
450 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
451 }
452
453 /* write a uint32_t array to a buffer in target memory endianness */
454 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
455 {
456 uint32_t i;
457 for (i = 0; i < count; i++)
458 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
459 }
460
461 /* write a uint16_t array to a buffer in target memory endianness */
462 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
463 {
464 uint32_t i;
465 for (i = 0; i < count; i++)
466 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
467 }
468
469 /* return a pointer to a configured target; id is name or number */
470 struct target *get_target(const char *id)
471 {
472 struct target *target;
473
474 /* try as tcltarget name */
475 for (target = all_targets; target; target = target->next) {
476 if (target_name(target) == NULL)
477 continue;
478 if (strcmp(id, target_name(target)) == 0)
479 return target;
480 }
481
482 /* It's OK to remove this fallback sometime after August 2010 or so */
483
484 /* no match, try as number */
485 unsigned num;
486 if (parse_uint(id, &num) != ERROR_OK)
487 return NULL;
488
489 for (target = all_targets; target; target = target->next) {
490 if (target->target_number == (int)num) {
491 LOG_WARNING("use '%s' as target identifier, not '%u'",
492 target_name(target), num);
493 return target;
494 }
495 }
496
497 return NULL;
498 }
499
500 /* returns a pointer to the n-th configured target */
501 struct target *get_target_by_num(int num)
502 {
503 struct target *target = all_targets;
504
505 while (target) {
506 if (target->target_number == num)
507 return target;
508 target = target->next;
509 }
510
511 return NULL;
512 }
513
514 struct target *get_current_target(struct command_context *cmd_ctx)
515 {
516 struct target *target = cmd_ctx->current_target_override
517 ? cmd_ctx->current_target_override
518 : cmd_ctx->current_target;
519
520 if (target == NULL) {
521 LOG_ERROR("BUG: current_target out of bounds");
522 exit(-1);
523 }
524
525 return target;
526 }
527
528 int target_poll(struct target *target)
529 {
530 int retval;
531
532 /* We can't poll until after examine */
533 if (!target_was_examined(target)) {
534 /* Fail silently lest we pollute the log */
535 return ERROR_FAIL;
536 }
537
538 retval = target->type->poll(target);
539 if (retval != ERROR_OK)
540 return retval;
541
542 if (target->halt_issued) {
543 if (target->state == TARGET_HALTED)
544 target->halt_issued = false;
545 else {
546 int64_t t = timeval_ms() - target->halt_issued_time;
547 if (t > DEFAULT_HALT_TIMEOUT) {
548 target->halt_issued = false;
549 LOG_INFO("Halt timed out, wake up GDB.");
550 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
551 }
552 }
553 }
554
555 return ERROR_OK;
556 }
557
558 int target_halt(struct target *target)
559 {
560 int retval;
561 /* We can't poll until after examine */
562 if (!target_was_examined(target)) {
563 LOG_ERROR("Target not examined yet");
564 return ERROR_FAIL;
565 }
566
567 retval = target->type->halt(target);
568 if (retval != ERROR_OK)
569 return retval;
570
571 target->halt_issued = true;
572 target->halt_issued_time = timeval_ms();
573
574 return ERROR_OK;
575 }
576
577 /**
578 * Make the target (re)start executing using its saved execution
579 * context (possibly with some modifications).
580 *
581 * @param target Which target should start executing.
582 * @param current True to use the target's saved program counter instead
583 * of the address parameter
584 * @param address Optionally used as the program counter.
585 * @param handle_breakpoints True iff breakpoints at the resumption PC
586 * should be skipped. (For example, maybe execution was stopped by
587 * such a breakpoint, in which case it would be counterprodutive to
588 * let it re-trigger.
589 * @param debug_execution False if all working areas allocated by OpenOCD
590 * should be released and/or restored to their original contents.
591 * (This would for example be true to run some downloaded "helper"
592 * algorithm code, which resides in one such working buffer and uses
593 * another for data storage.)
594 *
595 * @todo Resolve the ambiguity about what the "debug_execution" flag
596 * signifies. For example, Target implementations don't agree on how
597 * it relates to invalidation of the register cache, or to whether
598 * breakpoints and watchpoints should be enabled. (It would seem wrong
599 * to enable breakpoints when running downloaded "helper" algorithms
600 * (debug_execution true), since the breakpoints would be set to match
601 * target firmware being debugged, not the helper algorithm.... and
602 * enabling them could cause such helpers to malfunction (for example,
603 * by overwriting data with a breakpoint instruction. On the other
604 * hand the infrastructure for running such helpers might use this
605 * procedure but rely on hardware breakpoint to detect termination.)
606 */
607 int target_resume(struct target *target, int current, target_addr_t address,
608 int handle_breakpoints, int debug_execution)
609 {
610 int retval;
611
612 /* We can't poll until after examine */
613 if (!target_was_examined(target)) {
614 LOG_ERROR("Target not examined yet");
615 return ERROR_FAIL;
616 }
617
618 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
619
620 /* note that resume *must* be asynchronous. The CPU can halt before
621 * we poll. The CPU can even halt at the current PC as a result of
622 * a software breakpoint being inserted by (a bug?) the application.
623 */
624 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
625 if (retval != ERROR_OK)
626 return retval;
627
628 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
629
630 return retval;
631 }
632
633 static int target_process_reset(struct command_context *cmd_ctx, enum target_reset_mode reset_mode)
634 {
635 char buf[100];
636 int retval;
637 Jim_Nvp *n;
638 n = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode);
639 if (n->name == NULL) {
640 LOG_ERROR("invalid reset mode");
641 return ERROR_FAIL;
642 }
643
644 struct target *target;
645 for (target = all_targets; target; target = target->next)
646 target_call_reset_callbacks(target, reset_mode);
647
648 /* disable polling during reset to make reset event scripts
649 * more predictable, i.e. dr/irscan & pathmove in events will
650 * not have JTAG operations injected into the middle of a sequence.
651 */
652 bool save_poll = jtag_poll_get_enabled();
653
654 jtag_poll_set_enabled(false);
655
656 sprintf(buf, "ocd_process_reset %s", n->name);
657 retval = Jim_Eval(cmd_ctx->interp, buf);
658
659 jtag_poll_set_enabled(save_poll);
660
661 if (retval != JIM_OK) {
662 Jim_MakeErrorMessage(cmd_ctx->interp);
663 command_print(NULL, "%s\n", Jim_GetString(Jim_GetResult(cmd_ctx->interp), NULL));
664 return ERROR_FAIL;
665 }
666
667 /* We want any events to be processed before the prompt */
668 retval = target_call_timer_callbacks_now();
669
670 for (target = all_targets; target; target = target->next) {
671 target->type->check_reset(target);
672 target->running_alg = false;
673 }
674
675 return retval;
676 }
677
678 static int identity_virt2phys(struct target *target,
679 target_addr_t virtual, target_addr_t *physical)
680 {
681 *physical = virtual;
682 return ERROR_OK;
683 }
684
685 static int no_mmu(struct target *target, int *enabled)
686 {
687 *enabled = 0;
688 return ERROR_OK;
689 }
690
691 static int default_examine(struct target *target)
692 {
693 target_set_examined(target);
694 return ERROR_OK;
695 }
696
697 /* no check by default */
698 static int default_check_reset(struct target *target)
699 {
700 return ERROR_OK;
701 }
702
703 int target_examine_one(struct target *target)
704 {
705 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
706
707 int retval = target->type->examine(target);
708 if (retval != ERROR_OK)
709 return retval;
710
711 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
712
713 return ERROR_OK;
714 }
715
716 static int jtag_enable_callback(enum jtag_event event, void *priv)
717 {
718 struct target *target = priv;
719
720 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
721 return ERROR_OK;
722
723 jtag_unregister_event_callback(jtag_enable_callback, target);
724
725 return target_examine_one(target);
726 }
727
728 /* Targets that correctly implement init + examine, i.e.
729 * no communication with target during init:
730 *
731 * XScale
732 */
733 int target_examine(void)
734 {
735 int retval = ERROR_OK;
736 struct target *target;
737
738 for (target = all_targets; target; target = target->next) {
739 /* defer examination, but don't skip it */
740 if (!target->tap->enabled) {
741 jtag_register_event_callback(jtag_enable_callback,
742 target);
743 continue;
744 }
745
746 if (target->defer_examine)
747 continue;
748
749 retval = target_examine_one(target);
750 if (retval != ERROR_OK)
751 return retval;
752 }
753 return retval;
754 }
755
756 const char *target_type_name(struct target *target)
757 {
758 return target->type->name;
759 }
760
761 static int target_soft_reset_halt(struct target *target)
762 {
763 if (!target_was_examined(target)) {
764 LOG_ERROR("Target not examined yet");
765 return ERROR_FAIL;
766 }
767 if (!target->type->soft_reset_halt) {
768 LOG_ERROR("Target %s does not support soft_reset_halt",
769 target_name(target));
770 return ERROR_FAIL;
771 }
772 return target->type->soft_reset_halt(target);
773 }
774
775 /**
776 * Downloads a target-specific native code algorithm to the target,
777 * and executes it. * Note that some targets may need to set up, enable,
778 * and tear down a breakpoint (hard or * soft) to detect algorithm
779 * termination, while others may support lower overhead schemes where
780 * soft breakpoints embedded in the algorithm automatically terminate the
781 * algorithm.
782 *
783 * @param target used to run the algorithm
784 * @param arch_info target-specific description of the algorithm.
785 */
786 int target_run_algorithm(struct target *target,
787 int num_mem_params, struct mem_param *mem_params,
788 int num_reg_params, struct reg_param *reg_param,
789 uint32_t entry_point, uint32_t exit_point,
790 int timeout_ms, void *arch_info)
791 {
792 int retval = ERROR_FAIL;
793
794 if (!target_was_examined(target)) {
795 LOG_ERROR("Target not examined yet");
796 goto done;
797 }
798 if (!target->type->run_algorithm) {
799 LOG_ERROR("Target type '%s' does not support %s",
800 target_type_name(target), __func__);
801 goto done;
802 }
803
804 target->running_alg = true;
805 retval = target->type->run_algorithm(target,
806 num_mem_params, mem_params,
807 num_reg_params, reg_param,
808 entry_point, exit_point, timeout_ms, arch_info);
809 target->running_alg = false;
810
811 done:
812 return retval;
813 }
814
815 /**
816 * Executes a target-specific native code algorithm and leaves it running.
817 *
818 * @param target used to run the algorithm
819 * @param arch_info target-specific description of the algorithm.
820 */
821 int target_start_algorithm(struct target *target,
822 int num_mem_params, struct mem_param *mem_params,
823 int num_reg_params, struct reg_param *reg_params,
824 uint32_t entry_point, uint32_t exit_point,
825 void *arch_info)
826 {
827 int retval = ERROR_FAIL;
828
829 if (!target_was_examined(target)) {
830 LOG_ERROR("Target not examined yet");
831 goto done;
832 }
833 if (!target->type->start_algorithm) {
834 LOG_ERROR("Target type '%s' does not support %s",
835 target_type_name(target), __func__);
836 goto done;
837 }
838 if (target->running_alg) {
839 LOG_ERROR("Target is already running an algorithm");
840 goto done;
841 }
842
843 target->running_alg = true;
844 retval = target->type->start_algorithm(target,
845 num_mem_params, mem_params,
846 num_reg_params, reg_params,
847 entry_point, exit_point, arch_info);
848
849 done:
850 return retval;
851 }
852
853 /**
854 * Waits for an algorithm started with target_start_algorithm() to complete.
855 *
856 * @param target used to run the algorithm
857 * @param arch_info target-specific description of the algorithm.
858 */
859 int target_wait_algorithm(struct target *target,
860 int num_mem_params, struct mem_param *mem_params,
861 int num_reg_params, struct reg_param *reg_params,
862 uint32_t exit_point, int timeout_ms,
863 void *arch_info)
864 {
865 int retval = ERROR_FAIL;
866
867 if (!target->type->wait_algorithm) {
868 LOG_ERROR("Target type '%s' does not support %s",
869 target_type_name(target), __func__);
870 goto done;
871 }
872 if (!target->running_alg) {
873 LOG_ERROR("Target is not running an algorithm");
874 goto done;
875 }
876
877 retval = target->type->wait_algorithm(target,
878 num_mem_params, mem_params,
879 num_reg_params, reg_params,
880 exit_point, timeout_ms, arch_info);
881 if (retval != ERROR_TARGET_TIMEOUT)
882 target->running_alg = false;
883
884 done:
885 return retval;
886 }
887
888 /**
889 * Streams data to a circular buffer on target intended for consumption by code
890 * running asynchronously on target.
891 *
892 * This is intended for applications where target-specific native code runs
893 * on the target, receives data from the circular buffer, does something with
894 * it (most likely writing it to a flash memory), and advances the circular
895 * buffer pointer.
896 *
897 * This assumes that the helper algorithm has already been loaded to the target,
898 * but has not been started yet. Given memory and register parameters are passed
899 * to the algorithm.
900 *
901 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
902 * following format:
903 *
904 * [buffer_start + 0, buffer_start + 4):
905 * Write Pointer address (aka head). Written and updated by this
906 * routine when new data is written to the circular buffer.
907 * [buffer_start + 4, buffer_start + 8):
908 * Read Pointer address (aka tail). Updated by code running on the
909 * target after it consumes data.
910 * [buffer_start + 8, buffer_start + buffer_size):
911 * Circular buffer contents.
912 *
913 * See contrib/loaders/flash/stm32f1x.S for an example.
914 *
915 * @param target used to run the algorithm
916 * @param buffer address on the host where data to be sent is located
917 * @param count number of blocks to send
918 * @param block_size size in bytes of each block
919 * @param num_mem_params count of memory-based params to pass to algorithm
920 * @param mem_params memory-based params to pass to algorithm
921 * @param num_reg_params count of register-based params to pass to algorithm
922 * @param reg_params memory-based params to pass to algorithm
923 * @param buffer_start address on the target of the circular buffer structure
924 * @param buffer_size size of the circular buffer structure
925 * @param entry_point address on the target to execute to start the algorithm
926 * @param exit_point address at which to set a breakpoint to catch the
927 * end of the algorithm; can be 0 if target triggers a breakpoint itself
928 */
929
930 int target_run_flash_async_algorithm(struct target *target,
931 const uint8_t *buffer, uint32_t count, int block_size,
932 int num_mem_params, struct mem_param *mem_params,
933 int num_reg_params, struct reg_param *reg_params,
934 uint32_t buffer_start, uint32_t buffer_size,
935 uint32_t entry_point, uint32_t exit_point, void *arch_info)
936 {
937 int retval;
938 int timeout = 0;
939
940 const uint8_t *buffer_orig = buffer;
941
942 /* Set up working area. First word is write pointer, second word is read pointer,
943 * rest is fifo data area. */
944 uint32_t wp_addr = buffer_start;
945 uint32_t rp_addr = buffer_start + 4;
946 uint32_t fifo_start_addr = buffer_start + 8;
947 uint32_t fifo_end_addr = buffer_start + buffer_size;
948
949 uint32_t wp = fifo_start_addr;
950 uint32_t rp = fifo_start_addr;
951
952 /* validate block_size is 2^n */
953 assert(!block_size || !(block_size & (block_size - 1)));
954
955 retval = target_write_u32(target, wp_addr, wp);
956 if (retval != ERROR_OK)
957 return retval;
958 retval = target_write_u32(target, rp_addr, rp);
959 if (retval != ERROR_OK)
960 return retval;
961
962 /* Start up algorithm on target and let it idle while writing the first chunk */
963 retval = target_start_algorithm(target, num_mem_params, mem_params,
964 num_reg_params, reg_params,
965 entry_point,
966 exit_point,
967 arch_info);
968
969 if (retval != ERROR_OK) {
970 LOG_ERROR("error starting target flash write algorithm");
971 return retval;
972 }
973
974 while (count > 0) {
975
976 retval = target_read_u32(target, rp_addr, &rp);
977 if (retval != ERROR_OK) {
978 LOG_ERROR("failed to get read pointer");
979 break;
980 }
981
982 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
983 (size_t) (buffer - buffer_orig), count, wp, rp);
984
985 if (rp == 0) {
986 LOG_ERROR("flash write algorithm aborted by target");
987 retval = ERROR_FLASH_OPERATION_FAILED;
988 break;
989 }
990
991 if (((rp - fifo_start_addr) & (block_size - 1)) || rp < fifo_start_addr || rp >= fifo_end_addr) {
992 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
993 break;
994 }
995
996 /* Count the number of bytes available in the fifo without
997 * crossing the wrap around. Make sure to not fill it completely,
998 * because that would make wp == rp and that's the empty condition. */
999 uint32_t thisrun_bytes;
1000 if (rp > wp)
1001 thisrun_bytes = rp - wp - block_size;
1002 else if (rp > fifo_start_addr)
1003 thisrun_bytes = fifo_end_addr - wp;
1004 else
1005 thisrun_bytes = fifo_end_addr - wp - block_size;
1006
1007 if (thisrun_bytes == 0) {
1008 /* Throttle polling a bit if transfer is (much) faster than flash
1009 * programming. The exact delay shouldn't matter as long as it's
1010 * less than buffer size / flash speed. This is very unlikely to
1011 * run when using high latency connections such as USB. */
1012 alive_sleep(10);
1013
1014 /* to stop an infinite loop on some targets check and increment a timeout
1015 * this issue was observed on a stellaris using the new ICDI interface */
1016 if (timeout++ >= 500) {
1017 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1018 return ERROR_FLASH_OPERATION_FAILED;
1019 }
1020 continue;
1021 }
1022
1023 /* reset our timeout */
1024 timeout = 0;
1025
1026 /* Limit to the amount of data we actually want to write */
1027 if (thisrun_bytes > count * block_size)
1028 thisrun_bytes = count * block_size;
1029
1030 /* Write data to fifo */
1031 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1032 if (retval != ERROR_OK)
1033 break;
1034
1035 /* Update counters and wrap write pointer */
1036 buffer += thisrun_bytes;
1037 count -= thisrun_bytes / block_size;
1038 wp += thisrun_bytes;
1039 if (wp >= fifo_end_addr)
1040 wp = fifo_start_addr;
1041
1042 /* Store updated write pointer to target */
1043 retval = target_write_u32(target, wp_addr, wp);
1044 if (retval != ERROR_OK)
1045 break;
1046 }
1047
1048 if (retval != ERROR_OK) {
1049 /* abort flash write algorithm on target */
1050 target_write_u32(target, wp_addr, 0);
1051 }
1052
1053 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1054 num_reg_params, reg_params,
1055 exit_point,
1056 10000,
1057 arch_info);
1058
1059 if (retval2 != ERROR_OK) {
1060 LOG_ERROR("error waiting for target flash write algorithm");
1061 retval = retval2;
1062 }
1063
1064 if (retval == ERROR_OK) {
1065 /* check if algorithm set rp = 0 after fifo writer loop finished */
1066 retval = target_read_u32(target, rp_addr, &rp);
1067 if (retval == ERROR_OK && rp == 0) {
1068 LOG_ERROR("flash write algorithm aborted by target");
1069 retval = ERROR_FLASH_OPERATION_FAILED;
1070 }
1071 }
1072
1073 return retval;
1074 }
1075
1076 int target_read_memory(struct target *target,
1077 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1078 {
1079 if (!target_was_examined(target)) {
1080 LOG_ERROR("Target not examined yet");
1081 return ERROR_FAIL;
1082 }
1083 if (!target->type->read_memory) {
1084 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1085 return ERROR_FAIL;
1086 }
1087 return target->type->read_memory(target, address, size, count, buffer);
1088 }
1089
1090 int target_read_phys_memory(struct target *target,
1091 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1092 {
1093 if (!target_was_examined(target)) {
1094 LOG_ERROR("Target not examined yet");
1095 return ERROR_FAIL;
1096 }
1097 if (!target->type->read_phys_memory) {
1098 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1099 return ERROR_FAIL;
1100 }
1101 return target->type->read_phys_memory(target, address, size, count, buffer);
1102 }
1103
1104 int target_write_memory(struct target *target,
1105 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1106 {
1107 if (!target_was_examined(target)) {
1108 LOG_ERROR("Target not examined yet");
1109 return ERROR_FAIL;
1110 }
1111 if (!target->type->write_memory) {
1112 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1113 return ERROR_FAIL;
1114 }
1115 return target->type->write_memory(target, address, size, count, buffer);
1116 }
1117
1118 int target_write_phys_memory(struct target *target,
1119 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1120 {
1121 if (!target_was_examined(target)) {
1122 LOG_ERROR("Target not examined yet");
1123 return ERROR_FAIL;
1124 }
1125 if (!target->type->write_phys_memory) {
1126 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1127 return ERROR_FAIL;
1128 }
1129 return target->type->write_phys_memory(target, address, size, count, buffer);
1130 }
1131
1132 int target_add_breakpoint(struct target *target,
1133 struct breakpoint *breakpoint)
1134 {
1135 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1136 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1137 return ERROR_TARGET_NOT_HALTED;
1138 }
1139 return target->type->add_breakpoint(target, breakpoint);
1140 }
1141
1142 int target_add_context_breakpoint(struct target *target,
1143 struct breakpoint *breakpoint)
1144 {
1145 if (target->state != TARGET_HALTED) {
1146 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1147 return ERROR_TARGET_NOT_HALTED;
1148 }
1149 return target->type->add_context_breakpoint(target, breakpoint);
1150 }
1151
1152 int target_add_hybrid_breakpoint(struct target *target,
1153 struct breakpoint *breakpoint)
1154 {
1155 if (target->state != TARGET_HALTED) {
1156 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1157 return ERROR_TARGET_NOT_HALTED;
1158 }
1159 return target->type->add_hybrid_breakpoint(target, breakpoint);
1160 }
1161
1162 int target_remove_breakpoint(struct target *target,
1163 struct breakpoint *breakpoint)
1164 {
1165 return target->type->remove_breakpoint(target, breakpoint);
1166 }
1167
1168 int target_add_watchpoint(struct target *target,
1169 struct watchpoint *watchpoint)
1170 {
1171 if (target->state != TARGET_HALTED) {
1172 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1173 return ERROR_TARGET_NOT_HALTED;
1174 }
1175 return target->type->add_watchpoint(target, watchpoint);
1176 }
1177 int target_remove_watchpoint(struct target *target,
1178 struct watchpoint *watchpoint)
1179 {
1180 return target->type->remove_watchpoint(target, watchpoint);
1181 }
1182 int target_hit_watchpoint(struct target *target,
1183 struct watchpoint **hit_watchpoint)
1184 {
1185 if (target->state != TARGET_HALTED) {
1186 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1187 return ERROR_TARGET_NOT_HALTED;
1188 }
1189
1190 if (target->type->hit_watchpoint == NULL) {
1191 /* For backward compatible, if hit_watchpoint is not implemented,
1192 * return ERROR_FAIL such that gdb_server will not take the nonsense
1193 * information. */
1194 return ERROR_FAIL;
1195 }
1196
1197 return target->type->hit_watchpoint(target, hit_watchpoint);
1198 }
1199
1200 int target_get_gdb_reg_list(struct target *target,
1201 struct reg **reg_list[], int *reg_list_size,
1202 enum target_register_class reg_class)
1203 {
1204 return target->type->get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1205 }
1206 int target_step(struct target *target,
1207 int current, target_addr_t address, int handle_breakpoints)
1208 {
1209 return target->type->step(target, current, address, handle_breakpoints);
1210 }
1211
1212 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1213 {
1214 if (target->state != TARGET_HALTED) {
1215 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1216 return ERROR_TARGET_NOT_HALTED;
1217 }
1218 return target->type->get_gdb_fileio_info(target, fileio_info);
1219 }
1220
1221 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1222 {
1223 if (target->state != TARGET_HALTED) {
1224 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1225 return ERROR_TARGET_NOT_HALTED;
1226 }
1227 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1228 }
1229
1230 int target_profiling(struct target *target, uint32_t *samples,
1231 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1232 {
1233 if (target->state != TARGET_HALTED) {
1234 LOG_WARNING("target %s is not halted (profiling)", target->cmd_name);
1235 return ERROR_TARGET_NOT_HALTED;
1236 }
1237 return target->type->profiling(target, samples, max_num_samples,
1238 num_samples, seconds);
1239 }
1240
1241 /**
1242 * Reset the @c examined flag for the given target.
1243 * Pure paranoia -- targets are zeroed on allocation.
1244 */
1245 static void target_reset_examined(struct target *target)
1246 {
1247 target->examined = false;
1248 }
1249
1250 static int handle_target(void *priv);
1251
1252 static int target_init_one(struct command_context *cmd_ctx,
1253 struct target *target)
1254 {
1255 target_reset_examined(target);
1256
1257 struct target_type *type = target->type;
1258 if (type->examine == NULL)
1259 type->examine = default_examine;
1260
1261 if (type->check_reset == NULL)
1262 type->check_reset = default_check_reset;
1263
1264 assert(type->init_target != NULL);
1265
1266 int retval = type->init_target(cmd_ctx, target);
1267 if (ERROR_OK != retval) {
1268 LOG_ERROR("target '%s' init failed", target_name(target));
1269 return retval;
1270 }
1271
1272 /* Sanity-check MMU support ... stub in what we must, to help
1273 * implement it in stages, but warn if we need to do so.
1274 */
1275 if (type->mmu) {
1276 if (type->virt2phys == NULL) {
1277 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1278 type->virt2phys = identity_virt2phys;
1279 }
1280 } else {
1281 /* Make sure no-MMU targets all behave the same: make no
1282 * distinction between physical and virtual addresses, and
1283 * ensure that virt2phys() is always an identity mapping.
1284 */
1285 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1286 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1287
1288 type->mmu = no_mmu;
1289 type->write_phys_memory = type->write_memory;
1290 type->read_phys_memory = type->read_memory;
1291 type->virt2phys = identity_virt2phys;
1292 }
1293
1294 if (target->type->read_buffer == NULL)
1295 target->type->read_buffer = target_read_buffer_default;
1296
1297 if (target->type->write_buffer == NULL)
1298 target->type->write_buffer = target_write_buffer_default;
1299
1300 if (target->type->get_gdb_fileio_info == NULL)
1301 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1302
1303 if (target->type->gdb_fileio_end == NULL)
1304 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1305
1306 if (target->type->profiling == NULL)
1307 target->type->profiling = target_profiling_default;
1308
1309 return ERROR_OK;
1310 }
1311
1312 static int target_init(struct command_context *cmd_ctx)
1313 {
1314 struct target *target;
1315 int retval;
1316
1317 for (target = all_targets; target; target = target->next) {
1318 retval = target_init_one(cmd_ctx, target);
1319 if (ERROR_OK != retval)
1320 return retval;
1321 }
1322
1323 if (!all_targets)
1324 return ERROR_OK;
1325
1326 retval = target_register_user_commands(cmd_ctx);
1327 if (ERROR_OK != retval)
1328 return retval;
1329
1330 retval = target_register_timer_callback(&handle_target,
1331 polling_interval, 1, cmd_ctx->interp);
1332 if (ERROR_OK != retval)
1333 return retval;
1334
1335 return ERROR_OK;
1336 }
1337
1338 COMMAND_HANDLER(handle_target_init_command)
1339 {
1340 int retval;
1341
1342 if (CMD_ARGC != 0)
1343 return ERROR_COMMAND_SYNTAX_ERROR;
1344
1345 static bool target_initialized;
1346 if (target_initialized) {
1347 LOG_INFO("'target init' has already been called");
1348 return ERROR_OK;
1349 }
1350 target_initialized = true;
1351
1352 retval = command_run_line(CMD_CTX, "init_targets");
1353 if (ERROR_OK != retval)
1354 return retval;
1355
1356 retval = command_run_line(CMD_CTX, "init_target_events");
1357 if (ERROR_OK != retval)
1358 return retval;
1359
1360 retval = command_run_line(CMD_CTX, "init_board");
1361 if (ERROR_OK != retval)
1362 return retval;
1363
1364 LOG_DEBUG("Initializing targets...");
1365 return target_init(CMD_CTX);
1366 }
1367
1368 int target_register_event_callback(int (*callback)(struct target *target,
1369 enum target_event event, void *priv), void *priv)
1370 {
1371 struct target_event_callback **callbacks_p = &target_event_callbacks;
1372
1373 if (callback == NULL)
1374 return ERROR_COMMAND_SYNTAX_ERROR;
1375
1376 if (*callbacks_p) {
1377 while ((*callbacks_p)->next)
1378 callbacks_p = &((*callbacks_p)->next);
1379 callbacks_p = &((*callbacks_p)->next);
1380 }
1381
1382 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1383 (*callbacks_p)->callback = callback;
1384 (*callbacks_p)->priv = priv;
1385 (*callbacks_p)->next = NULL;
1386
1387 return ERROR_OK;
1388 }
1389
1390 int target_register_reset_callback(int (*callback)(struct target *target,
1391 enum target_reset_mode reset_mode, void *priv), void *priv)
1392 {
1393 struct target_reset_callback *entry;
1394
1395 if (callback == NULL)
1396 return ERROR_COMMAND_SYNTAX_ERROR;
1397
1398 entry = malloc(sizeof(struct target_reset_callback));
1399 if (entry == NULL) {
1400 LOG_ERROR("error allocating buffer for reset callback entry");
1401 return ERROR_COMMAND_SYNTAX_ERROR;
1402 }
1403
1404 entry->callback = callback;
1405 entry->priv = priv;
1406 list_add(&entry->list, &target_reset_callback_list);
1407
1408
1409 return ERROR_OK;
1410 }
1411
1412 int target_register_trace_callback(int (*callback)(struct target *target,
1413 size_t len, uint8_t *data, void *priv), void *priv)
1414 {
1415 struct target_trace_callback *entry;
1416
1417 if (callback == NULL)
1418 return ERROR_COMMAND_SYNTAX_ERROR;
1419
1420 entry = malloc(sizeof(struct target_trace_callback));
1421 if (entry == NULL) {
1422 LOG_ERROR("error allocating buffer for trace callback entry");
1423 return ERROR_COMMAND_SYNTAX_ERROR;
1424 }
1425
1426 entry->callback = callback;
1427 entry->priv = priv;
1428 list_add(&entry->list, &target_trace_callback_list);
1429
1430
1431 return ERROR_OK;
1432 }
1433
1434 int target_register_timer_callback(int (*callback)(void *priv), int time_ms, int periodic, void *priv)
1435 {
1436 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1437
1438 if (callback == NULL)
1439 return ERROR_COMMAND_SYNTAX_ERROR;
1440
1441 if (*callbacks_p) {
1442 while ((*callbacks_p)->next)
1443 callbacks_p = &((*callbacks_p)->next);
1444 callbacks_p = &((*callbacks_p)->next);
1445 }
1446
1447 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1448 (*callbacks_p)->callback = callback;
1449 (*callbacks_p)->periodic = periodic;
1450 (*callbacks_p)->time_ms = time_ms;
1451 (*callbacks_p)->removed = false;
1452
1453 gettimeofday(&(*callbacks_p)->when, NULL);
1454 timeval_add_time(&(*callbacks_p)->when, 0, time_ms * 1000);
1455
1456 (*callbacks_p)->priv = priv;
1457 (*callbacks_p)->next = NULL;
1458
1459 return ERROR_OK;
1460 }
1461
1462 int target_unregister_event_callback(int (*callback)(struct target *target,
1463 enum target_event event, void *priv), void *priv)
1464 {
1465 struct target_event_callback **p = &target_event_callbacks;
1466 struct target_event_callback *c = target_event_callbacks;
1467
1468 if (callback == NULL)
1469 return ERROR_COMMAND_SYNTAX_ERROR;
1470
1471 while (c) {
1472 struct target_event_callback *next = c->next;
1473 if ((c->callback == callback) && (c->priv == priv)) {
1474 *p = next;
1475 free(c);
1476 return ERROR_OK;
1477 } else
1478 p = &(c->next);
1479 c = next;
1480 }
1481
1482 return ERROR_OK;
1483 }
1484
1485 int target_unregister_reset_callback(int (*callback)(struct target *target,
1486 enum target_reset_mode reset_mode, void *priv), void *priv)
1487 {
1488 struct target_reset_callback *entry;
1489
1490 if (callback == NULL)
1491 return ERROR_COMMAND_SYNTAX_ERROR;
1492
1493 list_for_each_entry(entry, &target_reset_callback_list, list) {
1494 if (entry->callback == callback && entry->priv == priv) {
1495 list_del(&entry->list);
1496 free(entry);
1497 break;
1498 }
1499 }
1500
1501 return ERROR_OK;
1502 }
1503
1504 int target_unregister_trace_callback(int (*callback)(struct target *target,
1505 size_t len, uint8_t *data, void *priv), void *priv)
1506 {
1507 struct target_trace_callback *entry;
1508
1509 if (callback == NULL)
1510 return ERROR_COMMAND_SYNTAX_ERROR;
1511
1512 list_for_each_entry(entry, &target_trace_callback_list, list) {
1513 if (entry->callback == callback && entry->priv == priv) {
1514 list_del(&entry->list);
1515 free(entry);
1516 break;
1517 }
1518 }
1519
1520 return ERROR_OK;
1521 }
1522
1523 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1524 {
1525 if (callback == NULL)
1526 return ERROR_COMMAND_SYNTAX_ERROR;
1527
1528 for (struct target_timer_callback *c = target_timer_callbacks;
1529 c; c = c->next) {
1530 if ((c->callback == callback) && (c->priv == priv)) {
1531 c->removed = true;
1532 return ERROR_OK;
1533 }
1534 }
1535
1536 return ERROR_FAIL;
1537 }
1538
1539 int target_call_event_callbacks(struct target *target, enum target_event event)
1540 {
1541 struct target_event_callback *callback = target_event_callbacks;
1542 struct target_event_callback *next_callback;
1543
1544 if (event == TARGET_EVENT_HALTED) {
1545 /* execute early halted first */
1546 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1547 }
1548
1549 LOG_DEBUG("target event %i (%s)", event,
1550 Jim_Nvp_value2name_simple(nvp_target_event, event)->name);
1551
1552 target_handle_event(target, event);
1553
1554 while (callback) {
1555 next_callback = callback->next;
1556 callback->callback(target, event, callback->priv);
1557 callback = next_callback;
1558 }
1559
1560 return ERROR_OK;
1561 }
1562
1563 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1564 {
1565 struct target_reset_callback *callback;
1566
1567 LOG_DEBUG("target reset %i (%s)", reset_mode,
1568 Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1569
1570 list_for_each_entry(callback, &target_reset_callback_list, list)
1571 callback->callback(target, reset_mode, callback->priv);
1572
1573 return ERROR_OK;
1574 }
1575
1576 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1577 {
1578 struct target_trace_callback *callback;
1579
1580 list_for_each_entry(callback, &target_trace_callback_list, list)
1581 callback->callback(target, len, data, callback->priv);
1582
1583 return ERROR_OK;
1584 }
1585
1586 static int target_timer_callback_periodic_restart(
1587 struct target_timer_callback *cb, struct timeval *now)
1588 {
1589 cb->when = *now;
1590 timeval_add_time(&cb->when, 0, cb->time_ms * 1000L);
1591 return ERROR_OK;
1592 }
1593
1594 static int target_call_timer_callback(struct target_timer_callback *cb,
1595 struct timeval *now)
1596 {
1597 cb->callback(cb->priv);
1598
1599 if (cb->periodic)
1600 return target_timer_callback_periodic_restart(cb, now);
1601
1602 return target_unregister_timer_callback(cb->callback, cb->priv);
1603 }
1604
1605 static int target_call_timer_callbacks_check_time(int checktime)
1606 {
1607 static bool callback_processing;
1608
1609 /* Do not allow nesting */
1610 if (callback_processing)
1611 return ERROR_OK;
1612
1613 callback_processing = true;
1614
1615 keep_alive();
1616
1617 struct timeval now;
1618 gettimeofday(&now, NULL);
1619
1620 /* Store an address of the place containing a pointer to the
1621 * next item; initially, that's a standalone "root of the
1622 * list" variable. */
1623 struct target_timer_callback **callback = &target_timer_callbacks;
1624 while (*callback) {
1625 if ((*callback)->removed) {
1626 struct target_timer_callback *p = *callback;
1627 *callback = (*callback)->next;
1628 free(p);
1629 continue;
1630 }
1631
1632 bool call_it = (*callback)->callback &&
1633 ((!checktime && (*callback)->periodic) ||
1634 timeval_compare(&now, &(*callback)->when) >= 0);
1635
1636 if (call_it)
1637 target_call_timer_callback(*callback, &now);
1638
1639 callback = &(*callback)->next;
1640 }
1641
1642 callback_processing = false;
1643 return ERROR_OK;
1644 }
1645
1646 int target_call_timer_callbacks(void)
1647 {
1648 return target_call_timer_callbacks_check_time(1);
1649 }
1650
1651 /* invoke periodic callbacks immediately */
1652 int target_call_timer_callbacks_now(void)
1653 {
1654 return target_call_timer_callbacks_check_time(0);
1655 }
1656
1657 /* Prints the working area layout for debug purposes */
1658 static void print_wa_layout(struct target *target)
1659 {
1660 struct working_area *c = target->working_areas;
1661
1662 while (c) {
1663 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1664 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1665 c->address, c->address + c->size - 1, c->size);
1666 c = c->next;
1667 }
1668 }
1669
1670 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1671 static void target_split_working_area(struct working_area *area, uint32_t size)
1672 {
1673 assert(area->free); /* Shouldn't split an allocated area */
1674 assert(size <= area->size); /* Caller should guarantee this */
1675
1676 /* Split only if not already the right size */
1677 if (size < area->size) {
1678 struct working_area *new_wa = malloc(sizeof(*new_wa));
1679
1680 if (new_wa == NULL)
1681 return;
1682
1683 new_wa->next = area->next;
1684 new_wa->size = area->size - size;
1685 new_wa->address = area->address + size;
1686 new_wa->backup = NULL;
1687 new_wa->user = NULL;
1688 new_wa->free = true;
1689
1690 area->next = new_wa;
1691 area->size = size;
1692
1693 /* If backup memory was allocated to this area, it has the wrong size
1694 * now so free it and it will be reallocated if/when needed */
1695 if (area->backup) {
1696 free(area->backup);
1697 area->backup = NULL;
1698 }
1699 }
1700 }
1701
1702 /* Merge all adjacent free areas into one */
1703 static void target_merge_working_areas(struct target *target)
1704 {
1705 struct working_area *c = target->working_areas;
1706
1707 while (c && c->next) {
1708 assert(c->next->address == c->address + c->size); /* This is an invariant */
1709
1710 /* Find two adjacent free areas */
1711 if (c->free && c->next->free) {
1712 /* Merge the last into the first */
1713 c->size += c->next->size;
1714
1715 /* Remove the last */
1716 struct working_area *to_be_freed = c->next;
1717 c->next = c->next->next;
1718 if (to_be_freed->backup)
1719 free(to_be_freed->backup);
1720 free(to_be_freed);
1721
1722 /* If backup memory was allocated to the remaining area, it's has
1723 * the wrong size now */
1724 if (c->backup) {
1725 free(c->backup);
1726 c->backup = NULL;
1727 }
1728 } else {
1729 c = c->next;
1730 }
1731 }
1732 }
1733
1734 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
1735 {
1736 /* Reevaluate working area address based on MMU state*/
1737 if (target->working_areas == NULL) {
1738 int retval;
1739 int enabled;
1740
1741 retval = target->type->mmu(target, &enabled);
1742 if (retval != ERROR_OK)
1743 return retval;
1744
1745 if (!enabled) {
1746 if (target->working_area_phys_spec) {
1747 LOG_DEBUG("MMU disabled, using physical "
1748 "address for working memory " TARGET_ADDR_FMT,
1749 target->working_area_phys);
1750 target->working_area = target->working_area_phys;
1751 } else {
1752 LOG_ERROR("No working memory available. "
1753 "Specify -work-area-phys to target.");
1754 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1755 }
1756 } else {
1757 if (target->working_area_virt_spec) {
1758 LOG_DEBUG("MMU enabled, using virtual "
1759 "address for working memory " TARGET_ADDR_FMT,
1760 target->working_area_virt);
1761 target->working_area = target->working_area_virt;
1762 } else {
1763 LOG_ERROR("No working memory available. "
1764 "Specify -work-area-virt to target.");
1765 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1766 }
1767 }
1768
1769 /* Set up initial working area on first call */
1770 struct working_area *new_wa = malloc(sizeof(*new_wa));
1771 if (new_wa) {
1772 new_wa->next = NULL;
1773 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
1774 new_wa->address = target->working_area;
1775 new_wa->backup = NULL;
1776 new_wa->user = NULL;
1777 new_wa->free = true;
1778 }
1779
1780 target->working_areas = new_wa;
1781 }
1782
1783 /* only allocate multiples of 4 byte */
1784 if (size % 4)
1785 size = (size + 3) & (~3UL);
1786
1787 struct working_area *c = target->working_areas;
1788
1789 /* Find the first large enough working area */
1790 while (c) {
1791 if (c->free && c->size >= size)
1792 break;
1793 c = c->next;
1794 }
1795
1796 if (c == NULL)
1797 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1798
1799 /* Split the working area into the requested size */
1800 target_split_working_area(c, size);
1801
1802 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
1803 size, c->address);
1804
1805 if (target->backup_working_area) {
1806 if (c->backup == NULL) {
1807 c->backup = malloc(c->size);
1808 if (c->backup == NULL)
1809 return ERROR_FAIL;
1810 }
1811
1812 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
1813 if (retval != ERROR_OK)
1814 return retval;
1815 }
1816
1817 /* mark as used, and return the new (reused) area */
1818 c->free = false;
1819 *area = c;
1820
1821 /* user pointer */
1822 c->user = area;
1823
1824 print_wa_layout(target);
1825
1826 return ERROR_OK;
1827 }
1828
1829 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
1830 {
1831 int retval;
1832
1833 retval = target_alloc_working_area_try(target, size, area);
1834 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1835 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
1836 return retval;
1837
1838 }
1839
1840 static int target_restore_working_area(struct target *target, struct working_area *area)
1841 {
1842 int retval = ERROR_OK;
1843
1844 if (target->backup_working_area && area->backup != NULL) {
1845 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
1846 if (retval != ERROR_OK)
1847 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
1848 area->size, area->address);
1849 }
1850
1851 return retval;
1852 }
1853
1854 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
1855 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
1856 {
1857 int retval = ERROR_OK;
1858
1859 if (area->free)
1860 return retval;
1861
1862 if (restore) {
1863 retval = target_restore_working_area(target, area);
1864 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
1865 if (retval != ERROR_OK)
1866 return retval;
1867 }
1868
1869 area->free = true;
1870
1871 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
1872 area->size, area->address);
1873
1874 /* mark user pointer invalid */
1875 /* TODO: Is this really safe? It points to some previous caller's memory.
1876 * How could we know that the area pointer is still in that place and not
1877 * some other vital data? What's the purpose of this, anyway? */
1878 *area->user = NULL;
1879 area->user = NULL;
1880
1881 target_merge_working_areas(target);
1882
1883 print_wa_layout(target);
1884
1885 return retval;
1886 }
1887
1888 int target_free_working_area(struct target *target, struct working_area *area)
1889 {
1890 return target_free_working_area_restore(target, area, 1);
1891 }
1892
1893 static void target_destroy(struct target *target)
1894 {
1895 if (target->type->deinit_target)
1896 target->type->deinit_target(target);
1897
1898 if (target->semihosting)
1899 free(target->semihosting);
1900
1901 jtag_unregister_event_callback(jtag_enable_callback, target);
1902
1903 struct target_event_action *teap = target->event_action;
1904 while (teap) {
1905 struct target_event_action *next = teap->next;
1906 Jim_DecrRefCount(teap->interp, teap->body);
1907 free(teap);
1908 teap = next;
1909 }
1910
1911 target_free_all_working_areas(target);
1912 /* Now we have none or only one working area marked as free */
1913 if (target->working_areas) {
1914 free(target->working_areas->backup);
1915 free(target->working_areas);
1916 }
1917
1918 /* release the targets SMP list */
1919 if (target->smp) {
1920 struct target_list *head = target->head;
1921 while (head != NULL) {
1922 struct target_list *pos = head->next;
1923 head->target->smp = 0;
1924 free(head);
1925 head = pos;
1926 }
1927 target->smp = 0;
1928 }
1929
1930 free(target->type);
1931 free(target->trace_info);
1932 free(target->fileio_info);
1933 free(target->cmd_name);
1934 free(target);
1935 }
1936
1937 void target_quit(void)
1938 {
1939 struct target_event_callback *pe = target_event_callbacks;
1940 while (pe) {
1941 struct target_event_callback *t = pe->next;
1942 free(pe);
1943 pe = t;
1944 }
1945 target_event_callbacks = NULL;
1946
1947 struct target_timer_callback *pt = target_timer_callbacks;
1948 while (pt) {
1949 struct target_timer_callback *t = pt->next;
1950 free(pt);
1951 pt = t;
1952 }
1953 target_timer_callbacks = NULL;
1954
1955 for (struct target *target = all_targets; target;) {
1956 struct target *tmp;
1957
1958 tmp = target->next;
1959 target_destroy(target);
1960 target = tmp;
1961 }
1962
1963 all_targets = NULL;
1964 }
1965
1966 /* free resources and restore memory, if restoring memory fails,
1967 * free up resources anyway
1968 */
1969 static void target_free_all_working_areas_restore(struct target *target, int restore)
1970 {
1971 struct working_area *c = target->working_areas;
1972
1973 LOG_DEBUG("freeing all working areas");
1974
1975 /* Loop through all areas, restoring the allocated ones and marking them as free */
1976 while (c) {
1977 if (!c->free) {
1978 if (restore)
1979 target_restore_working_area(target, c);
1980 c->free = true;
1981 *c->user = NULL; /* Same as above */
1982 c->user = NULL;
1983 }
1984 c = c->next;
1985 }
1986
1987 /* Run a merge pass to combine all areas into one */
1988 target_merge_working_areas(target);
1989
1990 print_wa_layout(target);
1991 }
1992
1993 void target_free_all_working_areas(struct target *target)
1994 {
1995 target_free_all_working_areas_restore(target, 1);
1996 }
1997
1998 /* Find the largest number of bytes that can be allocated */
1999 uint32_t target_get_working_area_avail(struct target *target)
2000 {
2001 struct working_area *c = target->working_areas;
2002 uint32_t max_size = 0;
2003
2004 if (c == NULL)
2005 return target->working_area_size;
2006
2007 while (c) {
2008 if (c->free && max_size < c->size)
2009 max_size = c->size;
2010
2011 c = c->next;
2012 }
2013
2014 return max_size;
2015 }
2016
2017 int target_arch_state(struct target *target)
2018 {
2019 int retval;
2020 if (target == NULL) {
2021 LOG_WARNING("No target has been configured");
2022 return ERROR_OK;
2023 }
2024
2025 if (target->state != TARGET_HALTED)
2026 return ERROR_OK;
2027
2028 retval = target->type->arch_state(target);
2029 return retval;
2030 }
2031
2032 static int target_get_gdb_fileio_info_default(struct target *target,
2033 struct gdb_fileio_info *fileio_info)
2034 {
2035 /* If target does not support semi-hosting function, target
2036 has no need to provide .get_gdb_fileio_info callback.
2037 It just return ERROR_FAIL and gdb_server will return "Txx"
2038 as target halted every time. */
2039 return ERROR_FAIL;
2040 }
2041
2042 static int target_gdb_fileio_end_default(struct target *target,
2043 int retcode, int fileio_errno, bool ctrl_c)
2044 {
2045 return ERROR_OK;
2046 }
2047
2048 static int target_profiling_default(struct target *target, uint32_t *samples,
2049 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2050 {
2051 struct timeval timeout, now;
2052
2053 gettimeofday(&timeout, NULL);
2054 timeval_add_time(&timeout, seconds, 0);
2055
2056 LOG_INFO("Starting profiling. Halting and resuming the"
2057 " target as often as we can...");
2058
2059 uint32_t sample_count = 0;
2060 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2061 struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
2062
2063 int retval = ERROR_OK;
2064 for (;;) {
2065 target_poll(target);
2066 if (target->state == TARGET_HALTED) {
2067 uint32_t t = buf_get_u32(reg->value, 0, 32);
2068 samples[sample_count++] = t;
2069 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2070 retval = target_resume(target, 1, 0, 0, 0);
2071 target_poll(target);
2072 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2073 } else if (target->state == TARGET_RUNNING) {
2074 /* We want to quickly sample the PC. */
2075 retval = target_halt(target);
2076 } else {
2077 LOG_INFO("Target not halted or running");
2078 retval = ERROR_OK;
2079 break;
2080 }
2081
2082 if (retval != ERROR_OK)
2083 break;
2084
2085 gettimeofday(&now, NULL);
2086 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2087 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2088 break;
2089 }
2090 }
2091
2092 *num_samples = sample_count;
2093 return retval;
2094 }
2095
2096 /* Single aligned words are guaranteed to use 16 or 32 bit access
2097 * mode respectively, otherwise data is handled as quickly as
2098 * possible
2099 */
2100 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2101 {
2102 LOG_DEBUG("writing buffer of %" PRIi32 " byte at " TARGET_ADDR_FMT,
2103 size, address);
2104
2105 if (!target_was_examined(target)) {
2106 LOG_ERROR("Target not examined yet");
2107 return ERROR_FAIL;
2108 }
2109
2110 if (size == 0)
2111 return ERROR_OK;
2112
2113 if ((address + size - 1) < address) {
2114 /* GDB can request this when e.g. PC is 0xfffffffc */
2115 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2116 address,
2117 size);
2118 return ERROR_FAIL;
2119 }
2120
2121 return target->type->write_buffer(target, address, size, buffer);
2122 }
2123
2124 static int target_write_buffer_default(struct target *target,
2125 target_addr_t address, uint32_t count, const uint8_t *buffer)
2126 {
2127 uint32_t size;
2128
2129 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2130 * will have something to do with the size we leave to it. */
2131 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2132 if (address & size) {
2133 int retval = target_write_memory(target, address, size, 1, buffer);
2134 if (retval != ERROR_OK)
2135 return retval;
2136 address += size;
2137 count -= size;
2138 buffer += size;
2139 }
2140 }
2141
2142 /* Write the data with as large access size as possible. */
2143 for (; size > 0; size /= 2) {
2144 uint32_t aligned = count - count % size;
2145 if (aligned > 0) {
2146 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2147 if (retval != ERROR_OK)
2148 return retval;
2149 address += aligned;
2150 count -= aligned;
2151 buffer += aligned;
2152 }
2153 }
2154
2155 return ERROR_OK;
2156 }
2157
2158 /* Single aligned words are guaranteed to use 16 or 32 bit access
2159 * mode respectively, otherwise data is handled as quickly as
2160 * possible
2161 */
2162 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2163 {
2164 LOG_DEBUG("reading buffer of %" PRIi32 " byte at " TARGET_ADDR_FMT,
2165 size, address);
2166
2167 if (!target_was_examined(target)) {
2168 LOG_ERROR("Target not examined yet");
2169 return ERROR_FAIL;
2170 }
2171
2172 if (size == 0)
2173 return ERROR_OK;
2174
2175 if ((address + size - 1) < address) {
2176 /* GDB can request this when e.g. PC is 0xfffffffc */
2177 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2178 address,
2179 size);
2180 return ERROR_FAIL;
2181 }
2182
2183 return target->type->read_buffer(target, address, size, buffer);
2184 }
2185
2186 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2187 {
2188 uint32_t size;
2189
2190 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2191 * will have something to do with the size we leave to it. */
2192 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2193 if (address & size) {
2194 int retval = target_read_memory(target, address, size, 1, buffer);
2195 if (retval != ERROR_OK)
2196 return retval;
2197 address += size;
2198 count -= size;
2199 buffer += size;
2200 }
2201 }
2202
2203 /* Read the data with as large access size as possible. */
2204 for (; size > 0; size /= 2) {
2205 uint32_t aligned = count - count % size;
2206 if (aligned > 0) {
2207 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2208 if (retval != ERROR_OK)
2209 return retval;
2210 address += aligned;
2211 count -= aligned;
2212 buffer += aligned;
2213 }
2214 }
2215
2216 return ERROR_OK;
2217 }
2218
2219 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t* crc)
2220 {
2221 uint8_t *buffer;
2222 int retval;
2223 uint32_t i;
2224 uint32_t checksum = 0;
2225 if (!target_was_examined(target)) {
2226 LOG_ERROR("Target not examined yet");
2227 return ERROR_FAIL;
2228 }
2229
2230 retval = target->type->checksum_memory(target, address, size, &checksum);
2231 if (retval != ERROR_OK) {
2232 buffer = malloc(size);
2233 if (buffer == NULL) {
2234 LOG_ERROR("error allocating buffer for section (%" PRId32 " bytes)", size);
2235 return ERROR_COMMAND_SYNTAX_ERROR;
2236 }
2237 retval = target_read_buffer(target, address, size, buffer);
2238 if (retval != ERROR_OK) {
2239 free(buffer);
2240 return retval;
2241 }
2242
2243 /* convert to target endianness */
2244 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2245 uint32_t target_data;
2246 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2247 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2248 }
2249
2250 retval = image_calculate_checksum(buffer, size, &checksum);
2251 free(buffer);
2252 }
2253
2254 *crc = checksum;
2255
2256 return retval;
2257 }
2258
2259 int target_blank_check_memory(struct target *target,
2260 struct target_memory_check_block *blocks, int num_blocks,
2261 uint8_t erased_value)
2262 {
2263 if (!target_was_examined(target)) {
2264 LOG_ERROR("Target not examined yet");
2265 return ERROR_FAIL;
2266 }
2267
2268 if (target->type->blank_check_memory == NULL)
2269 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2270
2271 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2272 }
2273
2274 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2275 {
2276 uint8_t value_buf[8];
2277 if (!target_was_examined(target)) {
2278 LOG_ERROR("Target not examined yet");
2279 return ERROR_FAIL;
2280 }
2281
2282 int retval = target_read_memory(target, address, 8, 1, value_buf);
2283
2284 if (retval == ERROR_OK) {
2285 *value = target_buffer_get_u64(target, value_buf);
2286 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2287 address,
2288 *value);
2289 } else {
2290 *value = 0x0;
2291 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2292 address);
2293 }
2294
2295 return retval;
2296 }
2297
2298 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2299 {
2300 uint8_t value_buf[4];
2301 if (!target_was_examined(target)) {
2302 LOG_ERROR("Target not examined yet");
2303 return ERROR_FAIL;
2304 }
2305
2306 int retval = target_read_memory(target, address, 4, 1, value_buf);
2307
2308 if (retval == ERROR_OK) {
2309 *value = target_buffer_get_u32(target, value_buf);
2310 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2311 address,
2312 *value);
2313 } else {
2314 *value = 0x0;
2315 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2316 address);
2317 }
2318
2319 return retval;
2320 }
2321
2322 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2323 {
2324 uint8_t value_buf[2];
2325 if (!target_was_examined(target)) {
2326 LOG_ERROR("Target not examined yet");
2327 return ERROR_FAIL;
2328 }
2329
2330 int retval = target_read_memory(target, address, 2, 1, value_buf);
2331
2332 if (retval == ERROR_OK) {
2333 *value = target_buffer_get_u16(target, value_buf);
2334 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2335 address,
2336 *value);
2337 } else {
2338 *value = 0x0;
2339 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2340 address);
2341 }
2342
2343 return retval;
2344 }
2345
2346 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2347 {
2348 if (!target_was_examined(target)) {
2349 LOG_ERROR("Target not examined yet");
2350 return ERROR_FAIL;
2351 }
2352
2353 int retval = target_read_memory(target, address, 1, 1, value);
2354
2355 if (retval == ERROR_OK) {
2356 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2357 address,
2358 *value);
2359 } else {
2360 *value = 0x0;
2361 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2362 address);
2363 }
2364
2365 return retval;
2366 }
2367
2368 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2369 {
2370 int retval;
2371 uint8_t value_buf[8];
2372 if (!target_was_examined(target)) {
2373 LOG_ERROR("Target not examined yet");
2374 return ERROR_FAIL;
2375 }
2376
2377 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2378 address,
2379 value);
2380
2381 target_buffer_set_u64(target, value_buf, value);
2382 retval = target_write_memory(target, address, 8, 1, value_buf);
2383 if (retval != ERROR_OK)
2384 LOG_DEBUG("failed: %i", retval);
2385
2386 return retval;
2387 }
2388
2389 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2390 {
2391 int retval;
2392 uint8_t value_buf[4];
2393 if (!target_was_examined(target)) {
2394 LOG_ERROR("Target not examined yet");
2395 return ERROR_FAIL;
2396 }
2397
2398 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2399 address,
2400 value);
2401
2402 target_buffer_set_u32(target, value_buf, value);
2403 retval = target_write_memory(target, address, 4, 1, value_buf);
2404 if (retval != ERROR_OK)
2405 LOG_DEBUG("failed: %i", retval);
2406
2407 return retval;
2408 }
2409
2410 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2411 {
2412 int retval;
2413 uint8_t value_buf[2];
2414 if (!target_was_examined(target)) {
2415 LOG_ERROR("Target not examined yet");
2416 return ERROR_FAIL;
2417 }
2418
2419 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2420 address,
2421 value);
2422
2423 target_buffer_set_u16(target, value_buf, value);
2424 retval = target_write_memory(target, address, 2, 1, value_buf);
2425 if (retval != ERROR_OK)
2426 LOG_DEBUG("failed: %i", retval);
2427
2428 return retval;
2429 }
2430
2431 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2432 {
2433 int retval;
2434 if (!target_was_examined(target)) {
2435 LOG_ERROR("Target not examined yet");
2436 return ERROR_FAIL;
2437 }
2438
2439 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2440 address, value);
2441
2442 retval = target_write_memory(target, address, 1, 1, &value);
2443 if (retval != ERROR_OK)
2444 LOG_DEBUG("failed: %i", retval);
2445
2446 return retval;
2447 }
2448
2449 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2450 {
2451 int retval;
2452 uint8_t value_buf[8];
2453 if (!target_was_examined(target)) {
2454 LOG_ERROR("Target not examined yet");
2455 return ERROR_FAIL;
2456 }
2457
2458 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2459 address,
2460 value);
2461
2462 target_buffer_set_u64(target, value_buf, value);
2463 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2464 if (retval != ERROR_OK)
2465 LOG_DEBUG("failed: %i", retval);
2466
2467 return retval;
2468 }
2469
2470 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2471 {
2472 int retval;
2473 uint8_t value_buf[4];
2474 if (!target_was_examined(target)) {
2475 LOG_ERROR("Target not examined yet");
2476 return ERROR_FAIL;
2477 }
2478
2479 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2480 address,
2481 value);
2482
2483 target_buffer_set_u32(target, value_buf, value);
2484 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2485 if (retval != ERROR_OK)
2486 LOG_DEBUG("failed: %i", retval);
2487
2488 return retval;
2489 }
2490
2491 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2492 {
2493 int retval;
2494 uint8_t value_buf[2];
2495 if (!target_was_examined(target)) {
2496 LOG_ERROR("Target not examined yet");
2497 return ERROR_FAIL;
2498 }
2499
2500 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2501 address,
2502 value);
2503
2504 target_buffer_set_u16(target, value_buf, value);
2505 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2506 if (retval != ERROR_OK)
2507 LOG_DEBUG("failed: %i", retval);
2508
2509 return retval;
2510 }
2511
2512 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2513 {
2514 int retval;
2515 if (!target_was_examined(target)) {
2516 LOG_ERROR("Target not examined yet");
2517 return ERROR_FAIL;
2518 }
2519
2520 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2521 address, value);
2522
2523 retval = target_write_phys_memory(target, address, 1, 1, &value);
2524 if (retval != ERROR_OK)
2525 LOG_DEBUG("failed: %i", retval);
2526
2527 return retval;
2528 }
2529
2530 static int find_target(struct command_context *cmd_ctx, const char *name)
2531 {
2532 struct target *target = get_target(name);
2533 if (target == NULL) {
2534 LOG_ERROR("Target: %s is unknown, try one of:\n", name);
2535 return ERROR_FAIL;
2536 }
2537 if (!target->tap->enabled) {
2538 LOG_USER("Target: TAP %s is disabled, "
2539 "can't be the current target\n",
2540 target->tap->dotted_name);
2541 return ERROR_FAIL;
2542 }
2543
2544 cmd_ctx->current_target = target;
2545 if (cmd_ctx->current_target_override)
2546 cmd_ctx->current_target_override = target;
2547
2548 return ERROR_OK;
2549 }
2550
2551
2552 COMMAND_HANDLER(handle_targets_command)
2553 {
2554 int retval = ERROR_OK;
2555 if (CMD_ARGC == 1) {
2556 retval = find_target(CMD_CTX, CMD_ARGV[0]);
2557 if (retval == ERROR_OK) {
2558 /* we're done! */
2559 return retval;
2560 }
2561 }
2562
2563 struct target *target = all_targets;
2564 command_print(CMD_CTX, " TargetName Type Endian TapName State ");
2565 command_print(CMD_CTX, "-- ------------------ ---------- ------ ------------------ ------------");
2566 while (target) {
2567 const char *state;
2568 char marker = ' ';
2569
2570 if (target->tap->enabled)
2571 state = target_state_name(target);
2572 else
2573 state = "tap-disabled";
2574
2575 if (CMD_CTX->current_target == target)
2576 marker = '*';
2577
2578 /* keep columns lined up to match the headers above */
2579 command_print(CMD_CTX,
2580 "%2d%c %-18s %-10s %-6s %-18s %s",
2581 target->target_number,
2582 marker,
2583 target_name(target),
2584 target_type_name(target),
2585 Jim_Nvp_value2name_simple(nvp_target_endian,
2586 target->endianness)->name,
2587 target->tap->dotted_name,
2588 state);
2589 target = target->next;
2590 }
2591
2592 return retval;
2593 }
2594
2595 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2596
2597 static int powerDropout;
2598 static int srstAsserted;
2599
2600 static int runPowerRestore;
2601 static int runPowerDropout;
2602 static int runSrstAsserted;
2603 static int runSrstDeasserted;
2604
2605 static int sense_handler(void)
2606 {
2607 static int prevSrstAsserted;
2608 static int prevPowerdropout;
2609
2610 int retval = jtag_power_dropout(&powerDropout);
2611 if (retval != ERROR_OK)
2612 return retval;
2613
2614 int powerRestored;
2615 powerRestored = prevPowerdropout && !powerDropout;
2616 if (powerRestored)
2617 runPowerRestore = 1;
2618
2619 int64_t current = timeval_ms();
2620 static int64_t lastPower;
2621 bool waitMore = lastPower + 2000 > current;
2622 if (powerDropout && !waitMore) {
2623 runPowerDropout = 1;
2624 lastPower = current;
2625 }
2626
2627 retval = jtag_srst_asserted(&srstAsserted);
2628 if (retval != ERROR_OK)
2629 return retval;
2630
2631 int srstDeasserted;
2632 srstDeasserted = prevSrstAsserted && !srstAsserted;
2633
2634 static int64_t lastSrst;
2635 waitMore = lastSrst + 2000 > current;
2636 if (srstDeasserted && !waitMore) {
2637 runSrstDeasserted = 1;
2638 lastSrst = current;
2639 }
2640
2641 if (!prevSrstAsserted && srstAsserted)
2642 runSrstAsserted = 1;
2643
2644 prevSrstAsserted = srstAsserted;
2645 prevPowerdropout = powerDropout;
2646
2647 if (srstDeasserted || powerRestored) {
2648 /* Other than logging the event we can't do anything here.
2649 * Issuing a reset is a particularly bad idea as we might
2650 * be inside a reset already.
2651 */
2652 }
2653
2654 return ERROR_OK;
2655 }
2656
2657 /* process target state changes */
2658 static int handle_target(void *priv)
2659 {
2660 Jim_Interp *interp = (Jim_Interp *)priv;
2661 int retval = ERROR_OK;
2662
2663 if (!is_jtag_poll_safe()) {
2664 /* polling is disabled currently */
2665 return ERROR_OK;
2666 }
2667
2668 /* we do not want to recurse here... */
2669 static int recursive;
2670 if (!recursive) {
2671 recursive = 1;
2672 sense_handler();
2673 /* danger! running these procedures can trigger srst assertions and power dropouts.
2674 * We need to avoid an infinite loop/recursion here and we do that by
2675 * clearing the flags after running these events.
2676 */
2677 int did_something = 0;
2678 if (runSrstAsserted) {
2679 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2680 Jim_Eval(interp, "srst_asserted");
2681 did_something = 1;
2682 }
2683 if (runSrstDeasserted) {
2684 Jim_Eval(interp, "srst_deasserted");
2685 did_something = 1;
2686 }
2687 if (runPowerDropout) {
2688 LOG_INFO("Power dropout detected, running power_dropout proc.");
2689 Jim_Eval(interp, "power_dropout");
2690 did_something = 1;
2691 }
2692 if (runPowerRestore) {
2693 Jim_Eval(interp, "power_restore");
2694 did_something = 1;
2695 }
2696
2697 if (did_something) {
2698 /* clear detect flags */
2699 sense_handler();
2700 }
2701
2702 /* clear action flags */
2703
2704 runSrstAsserted = 0;
2705 runSrstDeasserted = 0;
2706 runPowerRestore = 0;
2707 runPowerDropout = 0;
2708
2709 recursive = 0;
2710 }
2711
2712 /* Poll targets for state changes unless that's globally disabled.
2713 * Skip targets that are currently disabled.
2714 */
2715 for (struct target *target = all_targets;
2716 is_jtag_poll_safe() && target;
2717 target = target->next) {
2718
2719 if (!target_was_examined(target))
2720 continue;
2721
2722 if (!target->tap->enabled)
2723 continue;
2724
2725 if (target->backoff.times > target->backoff.count) {
2726 /* do not poll this time as we failed previously */
2727 target->backoff.count++;
2728 continue;
2729 }
2730 target->backoff.count = 0;
2731
2732 /* only poll target if we've got power and srst isn't asserted */
2733 if (!powerDropout && !srstAsserted) {
2734 /* polling may fail silently until the target has been examined */
2735 retval = target_poll(target);
2736 if (retval != ERROR_OK) {
2737 /* 100ms polling interval. Increase interval between polling up to 5000ms */
2738 if (target->backoff.times * polling_interval < 5000) {
2739 target->backoff.times *= 2;
2740 target->backoff.times++;
2741 }
2742
2743 /* Tell GDB to halt the debugger. This allows the user to
2744 * run monitor commands to handle the situation.
2745 */
2746 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
2747 }
2748 if (target->backoff.times > 0) {
2749 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
2750 target_reset_examined(target);
2751 retval = target_examine_one(target);
2752 /* Target examination could have failed due to unstable connection,
2753 * but we set the examined flag anyway to repoll it later */
2754 if (retval != ERROR_OK) {
2755 target->examined = true;
2756 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
2757 target->backoff.times * polling_interval);
2758 return retval;
2759 }
2760 }
2761
2762 /* Since we succeeded, we reset backoff count */
2763 target->backoff.times = 0;
2764 }
2765 }
2766
2767 return retval;
2768 }
2769
2770 COMMAND_HANDLER(handle_reg_command)
2771 {
2772 struct target *target;
2773 struct reg *reg = NULL;
2774 unsigned count = 0;
2775 char *value;
2776
2777 LOG_DEBUG("-");
2778
2779 target = get_current_target(CMD_CTX);
2780
2781 /* list all available registers for the current target */
2782 if (CMD_ARGC == 0) {
2783 struct reg_cache *cache = target->reg_cache;
2784
2785 count = 0;
2786 while (cache) {
2787 unsigned i;
2788
2789 command_print(CMD_CTX, "===== %s", cache->name);
2790
2791 for (i = 0, reg = cache->reg_list;
2792 i < cache->num_regs;
2793 i++, reg++, count++) {
2794 /* only print cached values if they are valid */
2795 if (reg->valid) {
2796 value = buf_to_str(reg->value,
2797 reg->size, 16);
2798 command_print(CMD_CTX,
2799 "(%i) %s (/%" PRIu32 "): 0x%s%s",
2800 count, reg->name,
2801 reg->size, value,
2802 reg->dirty
2803 ? " (dirty)"
2804 : "");
2805 free(value);
2806 } else {
2807 command_print(CMD_CTX, "(%i) %s (/%" PRIu32 ")",
2808 count, reg->name,
2809 reg->size) ;
2810 }
2811 }
2812 cache = cache->next;
2813 }
2814
2815 return ERROR_OK;
2816 }
2817
2818 /* access a single register by its ordinal number */
2819 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
2820 unsigned num;
2821 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
2822
2823 struct reg_cache *cache = target->reg_cache;
2824 count = 0;
2825 while (cache) {
2826 unsigned i;
2827 for (i = 0; i < cache->num_regs; i++) {
2828 if (count++ == num) {
2829 reg = &cache->reg_list[i];
2830 break;
2831 }
2832 }
2833 if (reg)
2834 break;
2835 cache = cache->next;
2836 }
2837
2838 if (!reg) {
2839 command_print(CMD_CTX, "%i is out of bounds, the current target "
2840 "has only %i registers (0 - %i)", num, count, count - 1);
2841 return ERROR_OK;
2842 }
2843 } else {
2844 /* access a single register by its name */
2845 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], 1);
2846
2847 if (!reg) {
2848 command_print(CMD_CTX, "register %s not found in current target", CMD_ARGV[0]);
2849 return ERROR_OK;
2850 }
2851 }
2852
2853 assert(reg != NULL); /* give clang a hint that we *know* reg is != NULL here */
2854
2855 /* display a register */
2856 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
2857 && (CMD_ARGV[1][0] <= '9')))) {
2858 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
2859 reg->valid = 0;
2860
2861 if (reg->valid == 0)
2862 reg->type->get(reg);
2863 value = buf_to_str(reg->value, reg->size, 16);
2864 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2865 free(value);
2866 return ERROR_OK;
2867 }
2868
2869 /* set register value */
2870 if (CMD_ARGC == 2) {
2871 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
2872 if (buf == NULL)
2873 return ERROR_FAIL;
2874 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
2875
2876 reg->type->set(reg, buf);
2877
2878 value = buf_to_str(reg->value, reg->size, 16);
2879 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2880 free(value);
2881
2882 free(buf);
2883
2884 return ERROR_OK;
2885 }
2886
2887 return ERROR_COMMAND_SYNTAX_ERROR;
2888 }
2889
2890 COMMAND_HANDLER(handle_poll_command)
2891 {
2892 int retval = ERROR_OK;
2893 struct target *target = get_current_target(CMD_CTX);
2894
2895 if (CMD_ARGC == 0) {
2896 command_print(CMD_CTX, "background polling: %s",
2897 jtag_poll_get_enabled() ? "on" : "off");
2898 command_print(CMD_CTX, "TAP: %s (%s)",
2899 target->tap->dotted_name,
2900 target->tap->enabled ? "enabled" : "disabled");
2901 if (!target->tap->enabled)
2902 return ERROR_OK;
2903 retval = target_poll(target);
2904 if (retval != ERROR_OK)
2905 return retval;
2906 retval = target_arch_state(target);
2907 if (retval != ERROR_OK)
2908 return retval;
2909 } else if (CMD_ARGC == 1) {
2910 bool enable;
2911 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
2912 jtag_poll_set_enabled(enable);
2913 } else
2914 return ERROR_COMMAND_SYNTAX_ERROR;
2915
2916 return retval;
2917 }
2918
2919 COMMAND_HANDLER(handle_wait_halt_command)
2920 {
2921 if (CMD_ARGC > 1)
2922 return ERROR_COMMAND_SYNTAX_ERROR;
2923
2924 unsigned ms = DEFAULT_HALT_TIMEOUT;
2925 if (1 == CMD_ARGC) {
2926 int retval = parse_uint(CMD_ARGV[0], &ms);
2927 if (ERROR_OK != retval)
2928 return ERROR_COMMAND_SYNTAX_ERROR;
2929 }
2930
2931 struct target *target = get_current_target(CMD_CTX);
2932 return target_wait_state(target, TARGET_HALTED, ms);
2933 }
2934
2935 /* wait for target state to change. The trick here is to have a low
2936 * latency for short waits and not to suck up all the CPU time
2937 * on longer waits.
2938 *
2939 * After 500ms, keep_alive() is invoked
2940 */
2941 int target_wait_state(struct target *target, enum target_state state, int ms)
2942 {
2943 int retval;
2944 int64_t then = 0, cur;
2945 bool once = true;
2946
2947 for (;;) {
2948 retval = target_poll(target);
2949 if (retval != ERROR_OK)
2950 return retval;
2951 if (target->state == state)
2952 break;
2953 cur = timeval_ms();
2954 if (once) {
2955 once = false;
2956 then = timeval_ms();
2957 LOG_DEBUG("waiting for target %s...",
2958 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
2959 }
2960
2961 if (cur-then > 500)
2962 keep_alive();
2963
2964 if ((cur-then) > ms) {
2965 LOG_ERROR("timed out while waiting for target %s",
2966 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
2967 return ERROR_FAIL;
2968 }
2969 }
2970
2971 return ERROR_OK;
2972 }
2973
2974 COMMAND_HANDLER(handle_halt_command)
2975 {
2976 LOG_DEBUG("-");
2977
2978 struct target *target = get_current_target(CMD_CTX);
2979
2980 target->verbose_halt_msg = true;
2981
2982 int retval = target_halt(target);
2983 if (ERROR_OK != retval)
2984 return retval;
2985
2986 if (CMD_ARGC == 1) {
2987 unsigned wait_local;
2988 retval = parse_uint(CMD_ARGV[0], &wait_local);
2989 if (ERROR_OK != retval)
2990 return ERROR_COMMAND_SYNTAX_ERROR;
2991 if (!wait_local)
2992 return ERROR_OK;
2993 }
2994
2995 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
2996 }
2997
2998 COMMAND_HANDLER(handle_soft_reset_halt_command)
2999 {
3000 struct target *target = get_current_target(CMD_CTX);
3001
3002 LOG_USER("requesting target halt and executing a soft reset");
3003
3004 target_soft_reset_halt(target);
3005
3006 return ERROR_OK;
3007 }
3008
3009 COMMAND_HANDLER(handle_reset_command)
3010 {
3011 if (CMD_ARGC > 1)
3012 return ERROR_COMMAND_SYNTAX_ERROR;
3013
3014 enum target_reset_mode reset_mode = RESET_RUN;
3015 if (CMD_ARGC == 1) {
3016 const Jim_Nvp *n;
3017 n = Jim_Nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3018 if ((n->name == NULL) || (n->value == RESET_UNKNOWN))
3019 return ERROR_COMMAND_SYNTAX_ERROR;
3020 reset_mode = n->value;
3021 }
3022
3023 /* reset *all* targets */
3024 return target_process_reset(CMD_CTX, reset_mode);
3025 }
3026
3027
3028 COMMAND_HANDLER(handle_resume_command)
3029 {
3030 int current = 1;
3031 if (CMD_ARGC > 1)
3032 return ERROR_COMMAND_SYNTAX_ERROR;
3033
3034 struct target *target = get_current_target(CMD_CTX);
3035
3036 /* with no CMD_ARGV, resume from current pc, addr = 0,
3037 * with one arguments, addr = CMD_ARGV[0],
3038 * handle breakpoints, not debugging */
3039 target_addr_t addr = 0;
3040 if (CMD_ARGC == 1) {
3041 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3042 current = 0;
3043 }
3044
3045 return target_resume(target, current, addr, 1, 0);
3046 }
3047
3048 COMMAND_HANDLER(handle_step_command)
3049 {
3050 if (CMD_ARGC > 1)
3051 return ERROR_COMMAND_SYNTAX_ERROR;
3052
3053 LOG_DEBUG("-");
3054
3055 /* with no CMD_ARGV, step from current pc, addr = 0,
3056 * with one argument addr = CMD_ARGV[0],
3057 * handle breakpoints, debugging */
3058 target_addr_t addr = 0;
3059 int current_pc = 1;
3060 if (CMD_ARGC == 1) {
3061 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3062 current_pc = 0;
3063 }
3064
3065 struct target *target = get_current_target(CMD_CTX);
3066
3067 return target->type->step(target, current_pc, addr, 1);
3068 }
3069
3070 static void handle_md_output(struct command_context *cmd_ctx,
3071 struct target *target, target_addr_t address, unsigned size,
3072 unsigned count, const uint8_t *buffer)
3073 {
3074 const unsigned line_bytecnt = 32;
3075 unsigned line_modulo = line_bytecnt / size;
3076
3077 char output[line_bytecnt * 4 + 1];
3078 unsigned output_len = 0;
3079
3080 const char *value_fmt;
3081 switch (size) {
3082 case 8:
3083 value_fmt = "%16.16"PRIx64" ";
3084 break;
3085 case 4:
3086 value_fmt = "%8.8"PRIx64" ";
3087 break;
3088 case 2:
3089 value_fmt = "%4.4"PRIx64" ";
3090 break;
3091 case 1:
3092 value_fmt = "%2.2"PRIx64" ";
3093 break;
3094 default:
3095 /* "can't happen", caller checked */
3096 LOG_ERROR("invalid memory read size: %u", size);
3097 return;
3098 }
3099
3100 for (unsigned i = 0; i < count; i++) {
3101 if (i % line_modulo == 0) {
3102 output_len += snprintf(output + output_len,
3103 sizeof(output) - output_len,
3104 TARGET_ADDR_FMT ": ",
3105 (address + (i * size)));
3106 }
3107
3108 uint64_t value = 0;
3109 const uint8_t *value_ptr = buffer + i * size;
3110 switch (size) {
3111 case 8:
3112 value = target_buffer_get_u64(target, value_ptr);
3113 break;
3114 case 4:
3115 value = target_buffer_get_u32(target, value_ptr);
3116 break;
3117 case 2:
3118 value = target_buffer_get_u16(target, value_ptr);
3119 break;
3120 case 1:
3121 value = *value_ptr;
3122 }
3123 output_len += snprintf(output + output_len,
3124 sizeof(output) - output_len,
3125 value_fmt, value);
3126
3127 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3128 command_print(cmd_ctx, "%s", output);
3129 output_len = 0;
3130 }
3131 }
3132 }
3133
3134 COMMAND_HANDLER(handle_md_command)
3135 {
3136 if (CMD_ARGC < 1)
3137 return ERROR_COMMAND_SYNTAX_ERROR;
3138
3139 unsigned size = 0;
3140 switch (CMD_NAME[2]) {
3141 case 'd':
3142 size = 8;
3143 break;
3144 case 'w':
3145 size = 4;
3146 break;
3147 case 'h':
3148 size = 2;
3149 break;
3150 case 'b':
3151 size = 1;
3152 break;
3153 default:
3154 return ERROR_COMMAND_SYNTAX_ERROR;
3155 }
3156
3157 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3158 int (*fn)(struct target *target,
3159 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3160 if (physical) {
3161 CMD_ARGC--;
3162 CMD_ARGV++;
3163 fn = target_read_phys_memory;
3164 } else
3165 fn = target_read_memory;
3166 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3167 return ERROR_COMMAND_SYNTAX_ERROR;
3168
3169 target_addr_t address;
3170 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3171
3172 unsigned count = 1;
3173 if (CMD_ARGC == 2)
3174 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3175
3176 uint8_t *buffer = calloc(count, size);
3177 if (buffer == NULL) {
3178 LOG_ERROR("Failed to allocate md read buffer");
3179 return ERROR_FAIL;
3180 }
3181
3182 struct target *target = get_current_target(CMD_CTX);
3183 int retval = fn(target, address, size, count, buffer);
3184 if (ERROR_OK == retval)
3185 handle_md_output(CMD_CTX, target, address, size, count, buffer);
3186
3187 free(buffer);
3188
3189 return retval;
3190 }
3191
3192 typedef int (*target_write_fn)(struct target *target,
3193 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3194
3195 static int target_fill_mem(struct target *target,
3196 target_addr_t address,
3197 target_write_fn fn,
3198 unsigned data_size,
3199 /* value */
3200 uint64_t b,
3201 /* count */
3202 unsigned c)
3203 {
3204 /* We have to write in reasonably large chunks to be able
3205 * to fill large memory areas with any sane speed */
3206 const unsigned chunk_size = 16384;
3207 uint8_t *target_buf = malloc(chunk_size * data_size);
3208 if (target_buf == NULL) {
3209 LOG_ERROR("Out of memory");
3210 return ERROR_FAIL;
3211 }
3212
3213 for (unsigned i = 0; i < chunk_size; i++) {
3214 switch (data_size) {
3215 case 8:
3216 target_buffer_set_u64(target, target_buf + i * data_size, b);
3217 break;
3218 case 4:
3219 target_buffer_set_u32(target, target_buf + i * data_size, b);
3220 break;
3221 case 2:
3222 target_buffer_set_u16(target, target_buf + i * data_size, b);
3223 break;
3224 case 1:
3225 target_buffer_set_u8(target, target_buf + i * data_size, b);
3226 break;
3227 default:
3228 exit(-1);
3229 }
3230 }
3231
3232 int retval = ERROR_OK;
3233
3234 for (unsigned x = 0; x < c; x += chunk_size) {
3235 unsigned current;
3236 current = c - x;
3237 if (current > chunk_size)
3238 current = chunk_size;
3239 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3240 if (retval != ERROR_OK)
3241 break;
3242 /* avoid GDB timeouts */
3243 keep_alive();
3244 }
3245 free(target_buf);
3246
3247 return retval;
3248 }
3249
3250
3251 COMMAND_HANDLER(handle_mw_command)
3252 {
3253 if (CMD_ARGC < 2)
3254 return ERROR_COMMAND_SYNTAX_ERROR;
3255 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3256 target_write_fn fn;
3257 if (physical) {
3258 CMD_ARGC--;
3259 CMD_ARGV++;
3260 fn = target_write_phys_memory;
3261 } else
3262 fn = target_write_memory;
3263 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3264 return ERROR_COMMAND_SYNTAX_ERROR;
3265
3266 target_addr_t address;
3267 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3268
3269 target_addr_t value;
3270 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], value);
3271
3272 unsigned count = 1;
3273 if (CMD_ARGC == 3)
3274 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3275
3276 struct target *target = get_current_target(CMD_CTX);
3277 unsigned wordsize;
3278 switch (CMD_NAME[2]) {
3279 case 'd':
3280 wordsize = 8;
3281 break;
3282 case 'w':
3283 wordsize = 4;
3284 break;
3285 case 'h':
3286 wordsize = 2;
3287 break;
3288 case 'b':
3289 wordsize = 1;
3290 break;
3291 default:
3292 return ERROR_COMMAND_SYNTAX_ERROR;
3293 }
3294
3295 return target_fill_mem(target, address, fn, wordsize, value, count);
3296 }
3297
3298 static COMMAND_HELPER(parse_load_image_command_CMD_ARGV, struct image *image,
3299 target_addr_t *min_address, target_addr_t *max_address)
3300 {
3301 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3302 return ERROR_COMMAND_SYNTAX_ERROR;
3303
3304 /* a base address isn't always necessary,
3305 * default to 0x0 (i.e. don't relocate) */
3306 if (CMD_ARGC >= 2) {
3307 target_addr_t addr;
3308 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3309 image->base_address = addr;
3310 image->base_address_set = 1;
3311 } else
3312 image->base_address_set = 0;
3313
3314 image->start_address_set = 0;
3315
3316 if (CMD_ARGC >= 4)
3317 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3318 if (CMD_ARGC == 5) {
3319 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3320 /* use size (given) to find max (required) */
3321 *max_address += *min_address;
3322 }
3323
3324 if (*min_address > *max_address)
3325 return ERROR_COMMAND_SYNTAX_ERROR;
3326
3327 return ERROR_OK;
3328 }
3329
3330 COMMAND_HANDLER(handle_load_image_command)
3331 {
3332 uint8_t *buffer;
3333 size_t buf_cnt;
3334 uint32_t image_size;
3335 target_addr_t min_address = 0;
3336 target_addr_t max_address = -1;
3337 int i;
3338 struct image image;
3339
3340 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
3341 &image, &min_address, &max_address);
3342 if (ERROR_OK != retval)
3343 return retval;
3344
3345 struct target *target = get_current_target(CMD_CTX);
3346
3347 struct duration bench;
3348 duration_start(&bench);
3349
3350 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3351 return ERROR_FAIL;
3352
3353 image_size = 0x0;
3354 retval = ERROR_OK;
3355 for (i = 0; i < image.num_sections; i++) {
3356 buffer = malloc(image.sections[i].size);
3357 if (buffer == NULL) {
3358 command_print(CMD_CTX,
3359 "error allocating buffer for section (%d bytes)",
3360 (int)(image.sections[i].size));
3361 retval = ERROR_FAIL;
3362 break;
3363 }
3364
3365 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3366 if (retval != ERROR_OK) {
3367 free(buffer);
3368 break;
3369 }
3370
3371 uint32_t offset = 0;
3372 uint32_t length = buf_cnt;
3373
3374 /* DANGER!!! beware of unsigned comparision here!!! */
3375
3376 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3377 (image.sections[i].base_address < max_address)) {
3378
3379 if (image.sections[i].base_address < min_address) {
3380 /* clip addresses below */
3381 offset += min_address-image.sections[i].base_address;
3382 length -= offset;
3383 }
3384
3385 if (image.sections[i].base_address + buf_cnt > max_address)
3386 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3387
3388 retval = target_write_buffer(target,
3389 image.sections[i].base_address + offset, length, buffer + offset);
3390 if (retval != ERROR_OK) {
3391 free(buffer);
3392 break;
3393 }
3394 image_size += length;
3395 command_print(CMD_CTX, "%u bytes written at address " TARGET_ADDR_FMT "",
3396 (unsigned int)length,
3397 image.sections[i].base_address + offset);
3398 }
3399
3400 free(buffer);
3401 }
3402
3403 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3404 command_print(CMD_CTX, "downloaded %" PRIu32 " bytes "
3405 "in %fs (%0.3f KiB/s)", image_size,
3406 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3407 }
3408
3409 image_close(&image);
3410
3411 return retval;
3412
3413 }
3414
3415 COMMAND_HANDLER(handle_dump_image_command)
3416 {
3417 struct fileio *fileio;
3418 uint8_t *buffer;
3419 int retval, retvaltemp;
3420 target_addr_t address, size;
3421 struct duration bench;
3422 struct target *target = get_current_target(CMD_CTX);
3423
3424 if (CMD_ARGC != 3)
3425 return ERROR_COMMAND_SYNTAX_ERROR;
3426
3427 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3428 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3429
3430 uint32_t buf_size = (size > 4096) ? 4096 : size;
3431 buffer = malloc(buf_size);
3432 if (!buffer)
3433 return ERROR_FAIL;
3434
3435 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3436 if (retval != ERROR_OK) {
3437 free(buffer);
3438 return retval;
3439 }
3440
3441 duration_start(&bench);
3442
3443 while (size > 0) {
3444 size_t size_written;
3445 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3446 retval = target_read_buffer(target, address, this_run_size, buffer);
3447 if (retval != ERROR_OK)
3448 break;
3449
3450 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3451 if (retval != ERROR_OK)
3452 break;
3453
3454 size -= this_run_size;
3455 address += this_run_size;
3456 }
3457
3458 free(buffer);
3459
3460 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3461 size_t filesize;
3462 retval = fileio_size(fileio, &filesize);
3463 if (retval != ERROR_OK)
3464 return retval;
3465 command_print(CMD_CTX,
3466 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3467 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3468 }
3469
3470 retvaltemp = fileio_close(fileio);
3471 if (retvaltemp != ERROR_OK)
3472 return retvaltemp;
3473
3474 return retval;
3475 }
3476
3477 enum verify_mode {
3478 IMAGE_TEST = 0,
3479 IMAGE_VERIFY = 1,
3480 IMAGE_CHECKSUM_ONLY = 2
3481 };
3482
3483 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3484 {
3485 uint8_t *buffer;
3486 size_t buf_cnt;
3487 uint32_t image_size;
3488 int i;
3489 int retval;
3490 uint32_t checksum = 0;
3491 uint32_t mem_checksum = 0;
3492
3493 struct image image;
3494
3495 struct target *target = get_current_target(CMD_CTX);
3496
3497 if (CMD_ARGC < 1)
3498 return ERROR_COMMAND_SYNTAX_ERROR;
3499
3500 if (!target) {
3501 LOG_ERROR("no target selected");
3502 return ERROR_FAIL;
3503 }
3504
3505 struct duration bench;
3506 duration_start(&bench);
3507
3508 if (CMD_ARGC >= 2) {
3509 target_addr_t addr;
3510 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3511 image.base_address = addr;
3512 image.base_address_set = 1;
3513 } else {
3514 image.base_address_set = 0;
3515 image.base_address = 0x0;
3516 }
3517
3518 image.start_address_set = 0;
3519
3520 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3521 if (retval != ERROR_OK)
3522 return retval;
3523
3524 image_size = 0x0;
3525 int diffs = 0;
3526 retval = ERROR_OK;
3527 for (i = 0; i < image.num_sections; i++) {
3528 buffer = malloc(image.sections[i].size);
3529 if (buffer == NULL) {
3530 command_print(CMD_CTX,
3531 "error allocating buffer for section (%d bytes)",
3532 (int)(image.sections[i].size));
3533 break;
3534 }
3535 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3536 if (retval != ERROR_OK) {
3537 free(buffer);
3538 break;
3539 }
3540
3541 if (verify >= IMAGE_VERIFY) {
3542 /* calculate checksum of image */
3543 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3544 if (retval != ERROR_OK) {
3545 free(buffer);
3546 break;
3547 }
3548
3549 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3550 if (retval != ERROR_OK) {
3551 free(buffer);
3552 break;
3553 }
3554 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3555 LOG_ERROR("checksum mismatch");
3556 free(buffer);
3557 retval = ERROR_FAIL;
3558 goto done;
3559 }
3560 if (checksum != mem_checksum) {
3561 /* failed crc checksum, fall back to a binary compare */
3562 uint8_t *data;
3563
3564 if (diffs == 0)
3565 LOG_ERROR("checksum mismatch - attempting binary compare");
3566
3567 data = malloc(buf_cnt);
3568
3569 /* Can we use 32bit word accesses? */
3570 int size = 1;
3571 int count = buf_cnt;
3572 if ((count % 4) == 0) {
3573 size *= 4;
3574 count /= 4;
3575 }
3576 retval = target_read_memory(target, image.sections[i].base_address, size, count, data);
3577 if (retval == ERROR_OK) {
3578 uint32_t t;
3579 for (t = 0; t < buf_cnt; t++) {
3580 if (data[t] != buffer[t]) {
3581 command_print(CMD_CTX,
3582 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3583 diffs,
3584 (unsigned)(t + image.sections[i].base_address),
3585 data[t],
3586 buffer[t]);
3587 if (diffs++ >= 127) {
3588 command_print(CMD_CTX, "More than 128 errors, the rest are not printed.");
3589 free(data);
3590 free(buffer);
3591 goto done;
3592 }
3593 }
3594 keep_alive();
3595 }
3596 }
3597 free(data);
3598 }
3599 } else {
3600 command_print(CMD_CTX, "address " TARGET_ADDR_FMT " length 0x%08zx",
3601 image.sections[i].base_address,
3602 buf_cnt);
3603 }
3604
3605 free(buffer);
3606 image_size += buf_cnt;
3607 }
3608 if (diffs > 0)
3609 command_print(CMD_CTX, "No more differences found.");
3610 done:
3611 if (diffs > 0)
3612 retval = ERROR_FAIL;
3613 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3614 command_print(CMD_CTX, "verified %" PRIu32 " bytes "
3615 "in %fs (%0.3f KiB/s)", image_size,
3616 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3617 }
3618
3619 image_close(&image);
3620
3621 return retval;
3622 }
3623
3624 COMMAND_HANDLER(handle_verify_image_checksum_command)
3625 {
3626 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3627 }
3628
3629 COMMAND_HANDLER(handle_verify_image_command)
3630 {
3631 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3632 }
3633
3634 COMMAND_HANDLER(handle_test_image_command)
3635 {
3636 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3637 }
3638
3639 static int handle_bp_command_list(struct command_context *cmd_ctx)
3640 {
3641 struct target *target = get_current_target(cmd_ctx);
3642 struct breakpoint *breakpoint = target->breakpoints;
3643 while (breakpoint) {
3644 if (breakpoint->type == BKPT_SOFT) {
3645 char *buf = buf_to_str(breakpoint->orig_instr,
3646 breakpoint->length, 16);
3647 command_print(cmd_ctx, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, %i, 0x%s",
3648 breakpoint->address,
3649 breakpoint->length,
3650 breakpoint->set, buf);
3651 free(buf);
3652 } else {
3653 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3654 command_print(cmd_ctx, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3655 breakpoint->asid,
3656 breakpoint->length, breakpoint->set);
3657 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3658 command_print(cmd_ctx, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3659 breakpoint->address,
3660 breakpoint->length, breakpoint->set);
3661 command_print(cmd_ctx, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3662 breakpoint->asid);
3663 } else
3664 command_print(cmd_ctx, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3665 breakpoint->address,
3666 breakpoint->length, breakpoint->set);
3667 }
3668
3669 breakpoint = breakpoint->next;
3670 }
3671 return ERROR_OK;
3672 }
3673
3674 static int handle_bp_command_set(struct command_context *cmd_ctx,
3675 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3676 {
3677 struct target *target = get_current_target(cmd_ctx);
3678 int retval;
3679
3680 if (asid == 0) {
3681 retval = breakpoint_add(target, addr, length, hw);
3682 if (ERROR_OK == retval)
3683 command_print(cmd_ctx, "breakpoint set at " TARGET_ADDR_FMT "", addr);
3684 else {
3685 LOG_ERROR("Failure setting breakpoint, the same address(IVA) is already used");
3686 return retval;
3687 }
3688 } else if (addr == 0) {
3689 if (target->type->add_context_breakpoint == NULL) {
3690 LOG_WARNING("Context breakpoint not available");
3691 return ERROR_OK;
3692 }
3693 retval = context_breakpoint_add(target, asid, length, hw);
3694 if (ERROR_OK == retval)
3695 command_print(cmd_ctx, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
3696 else {
3697 LOG_ERROR("Failure setting breakpoint, the same address(CONTEXTID) is already used");
3698 return retval;
3699 }
3700 } else {
3701 if (target->type->add_hybrid_breakpoint == NULL) {
3702 LOG_WARNING("Hybrid breakpoint not available");
3703 return ERROR_OK;
3704 }
3705 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
3706 if (ERROR_OK == retval)
3707 command_print(cmd_ctx, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
3708 else {
3709 LOG_ERROR("Failure setting breakpoint, the same address is already used");
3710 return retval;
3711 }
3712 }
3713 return ERROR_OK;
3714 }
3715
3716 COMMAND_HANDLER(handle_bp_command)
3717 {
3718 target_addr_t addr;
3719 uint32_t asid;
3720 uint32_t length;
3721 int hw = BKPT_SOFT;
3722
3723 switch (CMD_ARGC) {
3724 case 0:
3725 return handle_bp_command_list(CMD_CTX);
3726
3727 case 2:
3728 asid = 0;
3729 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3730 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3731 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3732
3733 case 3:
3734 if (strcmp(CMD_ARGV[2], "hw") == 0) {
3735 hw = BKPT_HARD;
3736 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3737 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3738 asid = 0;
3739 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3740 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
3741 hw = BKPT_HARD;
3742 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
3743 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3744 addr = 0;
3745 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3746 }
3747 /* fallthrough */
3748 case 4:
3749 hw = BKPT_HARD;
3750 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3751 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
3752 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
3753 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3754
3755 default:
3756 return ERROR_COMMAND_SYNTAX_ERROR;
3757 }
3758 }
3759
3760 COMMAND_HANDLER(handle_rbp_command)
3761 {
3762 if (CMD_ARGC != 1)
3763 return ERROR_COMMAND_SYNTAX_ERROR;
3764
3765 target_addr_t addr;
3766 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3767
3768 struct target *target = get_current_target(CMD_CTX);
3769 breakpoint_remove(target, addr);
3770
3771 return ERROR_OK;
3772 }
3773
3774 COMMAND_HANDLER(handle_wp_command)
3775 {
3776 struct target *target = get_current_target(CMD_CTX);
3777
3778 if (CMD_ARGC == 0) {
3779 struct watchpoint *watchpoint = target->watchpoints;
3780
3781 while (watchpoint) {
3782 command_print(CMD_CTX, "address: " TARGET_ADDR_FMT
3783 ", len: 0x%8.8" PRIx32
3784 ", r/w/a: %i, value: 0x%8.8" PRIx32
3785 ", mask: 0x%8.8" PRIx32,
3786 watchpoint->address,
3787 watchpoint->length,
3788 (int)watchpoint->rw,
3789 watchpoint->value,
3790 watchpoint->mask);
3791 watchpoint = watchpoint->next;
3792 }
3793 return ERROR_OK;
3794 }
3795
3796 enum watchpoint_rw type = WPT_ACCESS;
3797 uint32_t addr = 0;
3798 uint32_t length = 0;
3799 uint32_t data_value = 0x0;
3800 uint32_t data_mask = 0xffffffff;
3801
3802 switch (CMD_ARGC) {
3803 case 5:
3804 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
3805 /* fall through */
3806 case 4:
3807 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
3808 /* fall through */
3809 case 3:
3810 switch (CMD_ARGV[2][0]) {
3811 case 'r':
3812 type = WPT_READ;
3813 break;
3814 case 'w':
3815 type = WPT_WRITE;
3816 break;
3817 case 'a':
3818 type = WPT_ACCESS;
3819 break;
3820 default:
3821 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
3822 return ERROR_COMMAND_SYNTAX_ERROR;
3823 }
3824 /* fall through */
3825 case 2:
3826 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3827 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3828 break;
3829
3830 default:
3831 return ERROR_COMMAND_SYNTAX_ERROR;
3832 }
3833
3834 int retval = watchpoint_add(target, addr, length, type,
3835 data_value, data_mask);
3836 if (ERROR_OK != retval)
3837 LOG_ERROR("Failure setting watchpoints");
3838
3839 return retval;
3840 }
3841
3842 COMMAND_HANDLER(handle_rwp_command)
3843 {
3844 if (CMD_ARGC != 1)
3845 return ERROR_COMMAND_SYNTAX_ERROR;
3846
3847 uint32_t addr;
3848 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3849
3850 struct target *target = get_current_target(CMD_CTX);
3851 watchpoint_remove(target, addr);
3852
3853 return ERROR_OK;
3854 }
3855
3856 /**
3857 * Translate a virtual address to a physical address.
3858 *
3859 * The low-level target implementation must have logged a detailed error
3860 * which is forwarded to telnet/GDB session.
3861 */
3862 COMMAND_HANDLER(handle_virt2phys_command)
3863 {
3864 if (CMD_ARGC != 1)
3865 return ERROR_COMMAND_SYNTAX_ERROR;
3866
3867 target_addr_t va;
3868 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
3869 target_addr_t pa;
3870
3871 struct target *target = get_current_target(CMD_CTX);
3872 int retval = target->type->virt2phys(target, va, &pa);
3873 if (retval == ERROR_OK)
3874 command_print(CMD_CTX, "Physical address " TARGET_ADDR_FMT "", pa);
3875
3876 return retval;
3877 }
3878
3879 static void writeData(FILE *f, const void *data, size_t len)
3880 {
3881 size_t written = fwrite(data, 1, len, f);
3882 if (written != len)
3883 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
3884 }
3885
3886 static void writeLong(FILE *f, int l, struct target *target)
3887 {
3888 uint8_t val[4];
3889
3890 target_buffer_set_u32(target, val, l);
3891 writeData(f, val, 4);
3892 }
3893
3894 static void writeString(FILE *f, char *s)
3895 {
3896 writeData(f, s, strlen(s));
3897 }
3898
3899 typedef unsigned char UNIT[2]; /* unit of profiling */
3900
3901 /* Dump a gmon.out histogram file. */
3902 static void write_gmon(uint32_t *samples, uint32_t sampleNum, const char *filename, bool with_range,
3903 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
3904 {
3905 uint32_t i;
3906 FILE *f = fopen(filename, "w");
3907 if (f == NULL)
3908 return;
3909 writeString(f, "gmon");
3910 writeLong(f, 0x00000001, target); /* Version */
3911 writeLong(f, 0, target); /* padding */
3912 writeLong(f, 0, target); /* padding */
3913 writeLong(f, 0, target); /* padding */
3914
3915 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
3916 writeData(f, &zero, 1);
3917
3918 /* figure out bucket size */
3919 uint32_t min;
3920 uint32_t max;
3921 if (with_range) {
3922 min = start_address;
3923 max = end_address;
3924 } else {
3925 min = samples[0];
3926 max = samples[0];
3927 for (i = 0; i < sampleNum; i++) {
3928 if (min > samples[i])
3929 min = samples[i];
3930 if (max < samples[i])
3931 max = samples[i];
3932 }
3933
3934 /* max should be (largest sample + 1)
3935 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
3936 max++;
3937 }
3938
3939 int addressSpace = max - min;
3940 assert(addressSpace >= 2);
3941
3942 /* FIXME: What is the reasonable number of buckets?
3943 * The profiling result will be more accurate if there are enough buckets. */
3944 static const uint32_t maxBuckets = 128 * 1024; /* maximum buckets. */
3945 uint32_t numBuckets = addressSpace / sizeof(UNIT);
3946 if (numBuckets > maxBuckets)
3947 numBuckets = maxBuckets;
3948 int *buckets = malloc(sizeof(int) * numBuckets);
3949 if (buckets == NULL) {
3950 fclose(f);
3951 return;
3952 }
3953 memset(buckets, 0, sizeof(int) * numBuckets);
3954 for (i = 0; i < sampleNum; i++) {
3955 uint32_t address = samples[i];
3956
3957 if ((address < min) || (max <= address))
3958 continue;
3959
3960 long long a = address - min;
3961 long long b = numBuckets;
3962 long long c = addressSpace;
3963 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
3964 buckets[index_t]++;
3965 }
3966
3967 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
3968 writeLong(f, min, target); /* low_pc */
3969 writeLong(f, max, target); /* high_pc */
3970 writeLong(f, numBuckets, target); /* # of buckets */
3971 float sample_rate = sampleNum / (duration_ms / 1000.0);
3972 writeLong(f, sample_rate, target);
3973 writeString(f, "seconds");
3974 for (i = 0; i < (15-strlen("seconds")); i++)
3975 writeData(f, &zero, 1);
3976 writeString(f, "s");
3977
3978 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
3979
3980 char *data = malloc(2 * numBuckets);
3981 if (data != NULL) {
3982 for (i = 0; i < numBuckets; i++) {
3983 int val;
3984 val = buckets[i];
3985 if (val > 65535)
3986 val = 65535;
3987 data[i * 2] = val&0xff;
3988 data[i * 2 + 1] = (val >> 8) & 0xff;
3989 }
3990 free(buckets);
3991 writeData(f, data, numBuckets * 2);
3992 free(data);
3993 } else
3994 free(buckets);
3995
3996 fclose(f);
3997 }
3998
3999 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4000 * which will be used as a random sampling of PC */
4001 COMMAND_HANDLER(handle_profile_command)
4002 {
4003 struct target *target = get_current_target(CMD_CTX);
4004
4005 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4006 return ERROR_COMMAND_SYNTAX_ERROR;
4007
4008 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4009 uint32_t offset;
4010 uint32_t num_of_samples;
4011 int retval = ERROR_OK;
4012
4013 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4014
4015 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4016 if (samples == NULL) {
4017 LOG_ERROR("No memory to store samples.");
4018 return ERROR_FAIL;
4019 }
4020
4021 uint64_t timestart_ms = timeval_ms();
4022 /**
4023 * Some cores let us sample the PC without the
4024 * annoying halt/resume step; for example, ARMv7 PCSR.
4025 * Provide a way to use that more efficient mechanism.
4026 */
4027 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4028 &num_of_samples, offset);
4029 if (retval != ERROR_OK) {
4030 free(samples);
4031 return retval;
4032 }
4033 uint32_t duration_ms = timeval_ms() - timestart_ms;
4034
4035 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4036
4037 retval = target_poll(target);
4038 if (retval != ERROR_OK) {
4039 free(samples);
4040 return retval;
4041 }
4042 if (target->state == TARGET_RUNNING) {
4043 retval = target_halt(target);
4044 if (retval != ERROR_OK) {
4045 free(samples);
4046 return retval;
4047 }
4048 }
4049
4050 retval = target_poll(target);
4051 if (retval != ERROR_OK) {
4052 free(samples);
4053 return retval;
4054 }
4055
4056 uint32_t start_address = 0;
4057 uint32_t end_address = 0;
4058 bool with_range = false;
4059 if (CMD_ARGC == 4) {
4060 with_range = true;
4061 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4062 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4063 }
4064
4065 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4066 with_range, start_address, end_address, target, duration_ms);
4067 command_print(CMD_CTX, "Wrote %s", CMD_ARGV[1]);
4068
4069 free(samples);
4070 return retval;
4071 }
4072
4073 static int new_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t val)
4074 {
4075 char *namebuf;
4076 Jim_Obj *nameObjPtr, *valObjPtr;
4077 int result;
4078
4079 namebuf = alloc_printf("%s(%d)", varname, idx);
4080 if (!namebuf)
4081 return JIM_ERR;
4082
4083 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
4084 valObjPtr = Jim_NewIntObj(interp, val);
4085 if (!nameObjPtr || !valObjPtr) {
4086 free(namebuf);
4087 return JIM_ERR;
4088 }
4089
4090 Jim_IncrRefCount(nameObjPtr);
4091 Jim_IncrRefCount(valObjPtr);
4092 result = Jim_SetVariable(interp, nameObjPtr, valObjPtr);
4093 Jim_DecrRefCount(interp, nameObjPtr);
4094 Jim_DecrRefCount(interp, valObjPtr);
4095 free(namebuf);
4096 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4097 return result;
4098 }
4099
4100 static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4101 {
4102 struct command_context *context;
4103 struct target *target;
4104
4105 context = current_command_context(interp);
4106 assert(context != NULL);
4107
4108 target = get_current_target(context);
4109 if (target == NULL) {
4110 LOG_ERROR("mem2array: no current target");
4111 return JIM_ERR;
4112 }
4113
4114 return target_mem2array(interp, target, argc - 1, argv + 1);
4115 }
4116
4117 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4118 {
4119 long l;
4120 uint32_t width;
4121 int len;
4122 uint32_t addr;
4123 uint32_t count;
4124 uint32_t v;
4125 const char *varname;
4126 const char *phys;
4127 bool is_phys;
4128 int n, e, retval;
4129 uint32_t i;
4130
4131 /* argv[1] = name of array to receive the data
4132 * argv[2] = desired width
4133 * argv[3] = memory address
4134 * argv[4] = count of times to read
4135 */
4136
4137 if (argc < 4 || argc > 5) {
4138 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4139 return JIM_ERR;
4140 }
4141 varname = Jim_GetString(argv[0], &len);
4142 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
4143
4144 e = Jim_GetLong(interp, argv[1], &l);
4145 width = l;
4146 if (e != JIM_OK)
4147 return e;
4148
4149 e = Jim_GetLong(interp, argv[2], &l);
4150 addr = l;
4151 if (e != JIM_OK)
4152 return e;
4153 e = Jim_GetLong(interp, argv[3], &l);
4154 len = l;
4155 if (e != JIM_OK)
4156 return e;
4157 is_phys = false;
4158 if (argc > 4) {
4159 phys = Jim_GetString(argv[4], &n);
4160 if (!strncmp(phys, "phys", n))
4161 is_phys = true;
4162 else
4163 return JIM_ERR;
4164 }
4165 switch (width) {
4166 case 8:
4167 width = 1;
4168 break;
4169 case 16:
4170 width = 2;
4171 break;
4172 case 32:
4173 width = 4;
4174 break;
4175 default:
4176 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4177 Jim_AppendStrings(interp, Jim_GetResult(interp), "Invalid width param, must be 8/16/32", NULL);
4178 return JIM_ERR;
4179 }
4180 if (len == 0) {
4181 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4182 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4183 return JIM_ERR;
4184 }
4185 if ((addr + (len * width)) < addr) {
4186 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4187 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4188 return JIM_ERR;
4189 }
4190 /* absurd transfer size? */
4191 if (len > 65536) {
4192 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4193 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: absurd > 64K item request", NULL);
4194 return JIM_ERR;
4195 }
4196
4197 if ((width == 1) ||
4198 ((width == 2) && ((addr & 1) == 0)) ||
4199 ((width == 4) && ((addr & 3) == 0))) {
4200 /* all is well */
4201 } else {
4202 char buf[100];
4203 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4204 sprintf(buf, "mem2array address: 0x%08" PRIx32 " is not aligned for %" PRId32 " byte reads",
4205 addr,
4206 width);
4207 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4208 return JIM_ERR;
4209 }
4210
4211 /* Transfer loop */
4212
4213 /* index counter */
4214 n = 0;
4215
4216 size_t buffersize = 4096;
4217 uint8_t *buffer = malloc(buffersize);
4218 if (buffer == NULL)
4219 return JIM_ERR;
4220
4221 /* assume ok */
4222 e = JIM_OK;
4223 while (len) {
4224 /* Slurp... in buffer size chunks */
4225
4226 count = len; /* in objects.. */
4227 if (count > (buffersize / width))
4228 count = (buffersize / width);
4229
4230 if (is_phys)
4231 retval = target_read_phys_memory(target, addr, width, count, buffer);
4232 else
4233 retval = target_read_memory(target, addr, width, count, buffer);
4234 if (retval != ERROR_OK) {
4235 /* BOO !*/
4236 LOG_ERROR("mem2array: Read @ 0x%08" PRIx32 ", w=%" PRId32 ", cnt=%" PRId32 ", failed",
4237 addr,
4238 width,
4239 count);
4240 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4241 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4242 e = JIM_ERR;
4243 break;
4244 } else {
4245 v = 0; /* shut up gcc */
4246 for (i = 0; i < count ; i++, n++) {
4247 switch (width) {
4248 case 4:
4249 v = target_buffer_get_u32(target, &buffer[i*width]);
4250 break;
4251 case 2:
4252 v = target_buffer_get_u16(target, &buffer[i*width]);
4253 break;
4254 case 1:
4255 v = buffer[i] & 0x0ff;
4256 break;
4257 }
4258 new_int_array_element(interp, varname, n, v);
4259 }
4260 len -= count;
4261 addr += count * width;
4262 }
4263 }
4264
4265 free(buffer);
4266
4267 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4268
4269 return e;
4270 }
4271
4272 static int get_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t *val)
4273 {
4274 char *namebuf;
4275 Jim_Obj *nameObjPtr, *valObjPtr;
4276 int result;
4277 long l;
4278
4279 namebuf = alloc_printf("%s(%d)", varname, idx);
4280 if (!namebuf)
4281 return JIM_ERR;
4282
4283 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
4284 if (!nameObjPtr) {
4285 free(namebuf);
4286 return JIM_ERR;
4287 }
4288
4289 Jim_IncrRefCount(nameObjPtr);
4290 valObjPtr = Jim_GetVariable(interp, nameObjPtr, JIM_ERRMSG);
4291 Jim_DecrRefCount(interp, nameObjPtr);
4292 free(namebuf);
4293 if (valObjPtr == NULL)
4294 return JIM_ERR;
4295
4296 result = Jim_GetLong(interp, valObjPtr, &l);
4297 /* printf("%s(%d) => 0%08x\n", varname, idx, val); */
4298 *val = l;
4299 return result;
4300 }
4301
4302 static int jim_array2mem(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4303 {
4304 struct command_context *context;
4305 struct target *target;
4306
4307 context = current_command_context(interp);
4308 assert(context != NULL);
4309
4310 target = get_current_target(context);
4311 if (target == NULL) {
4312 LOG_ERROR("array2mem: no current target");
4313 return JIM_ERR;
4314 }
4315
4316 return target_array2mem(interp, target, argc-1, argv + 1);
4317 }
4318
4319 static int target_array2mem(Jim_Interp *interp, struct target *target,
4320 int argc, Jim_Obj *const *argv)
4321 {
4322 long l;
4323 uint32_t width;
4324 int len;
4325 uint32_t addr;
4326 uint32_t count;
4327 uint32_t v;
4328 const char *varname;
4329 const char *phys;
4330 bool is_phys;
4331 int n, e, retval;
4332 uint32_t i;
4333
4334 /* argv[1] = name of array to get the data
4335 * argv[2] = desired width
4336 * argv[3] = memory address
4337 * argv[4] = count to write
4338 */
4339 if (argc < 4 || argc > 5) {
4340 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4341 return JIM_ERR;
4342 }
4343 varname = Jim_GetString(argv[0], &len);
4344 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
4345
4346 e = Jim_GetLong(interp, argv[1], &l);
4347 width = l;
4348 if (e != JIM_OK)
4349 return e;
4350
4351 e = Jim_GetLong(interp, argv[2], &l);
4352 addr = l;
4353 if (e != JIM_OK)
4354 return e;
4355 e = Jim_GetLong(interp, argv[3], &l);
4356 len = l;
4357 if (e != JIM_OK)
4358 return e;
4359 is_phys = false;
4360 if (argc > 4) {
4361 phys = Jim_GetString(argv[4], &n);
4362 if (!strncmp(phys, "phys", n))
4363 is_phys = true;
4364 else
4365 return JIM_ERR;
4366 }
4367 switch (width) {
4368 case 8:
4369 width = 1;
4370 break;
4371 case 16:
4372 width = 2;
4373 break;
4374 case 32:
4375 width = 4;
4376 break;
4377 default:
4378 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4379 Jim_AppendStrings(interp, Jim_GetResult(interp),
4380 "Invalid width param, must be 8/16/32", NULL);
4381 return JIM_ERR;
4382 }
4383 if (len == 0) {
4384 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4385 Jim_AppendStrings(interp, Jim_GetResult(interp),
4386 "array2mem: zero width read?", NULL);
4387 return JIM_ERR;
4388 }
4389 if ((addr + (len * width)) < addr) {
4390 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4391 Jim_AppendStrings(interp, Jim_GetResult(interp),
4392 "array2mem: addr + len - wraps to zero?", NULL);
4393 return JIM_ERR;
4394 }
4395 /* absurd transfer size? */
4396 if (len > 65536) {
4397 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4398 Jim_AppendStrings(interp, Jim_GetResult(interp),
4399 "array2mem: absurd > 64K item request", NULL);
4400 return JIM_ERR;
4401 }
4402
4403 if ((width == 1) ||
4404 ((width == 2) && ((addr & 1) == 0)) ||
4405 ((width == 4) && ((addr & 3) == 0))) {
4406 /* all is well */
4407 } else {
4408 char buf[100];
4409 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4410 sprintf(buf, "array2mem address: 0x%08" PRIx32 " is not aligned for %" PRId32 " byte reads",
4411 addr,
4412 width);
4413 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4414 return JIM_ERR;
4415 }
4416
4417 /* Transfer loop */
4418
4419 /* index counter */
4420 n = 0;
4421 /* assume ok */
4422 e = JIM_OK;
4423
4424 size_t buffersize = 4096;
4425 uint8_t *buffer = malloc(buffersize);
4426 if (buffer == NULL)
4427 return JIM_ERR;
4428
4429 while (len) {
4430 /* Slurp... in buffer size chunks */
4431
4432 count = len; /* in objects.. */
4433 if (count > (buffersize / width))
4434 count = (buffersize / width);
4435
4436 v = 0; /* shut up gcc */
4437 for (i = 0; i < count; i++, n++) {
4438 get_int_array_element(interp, varname, n, &v);
4439 switch (width) {
4440 case 4:
4441 target_buffer_set_u32(target, &buffer[i * width], v);
4442 break;
4443 case 2:
4444 target_buffer_set_u16(target, &buffer[i * width], v);
4445 break;
4446 case 1:
4447 buffer[i] = v & 0x0ff;
4448 break;
4449 }
4450 }
4451 len -= count;
4452
4453 if (is_phys)
4454 retval = target_write_phys_memory(target, addr, width, count, buffer);
4455 else
4456 retval = target_write_memory(target, addr, width, count, buffer);
4457 if (retval != ERROR_OK) {
4458 /* BOO !*/
4459 LOG_ERROR("array2mem: Write @ 0x%08" PRIx32 ", w=%" PRId32 ", cnt=%" PRId32 ", failed",
4460 addr,
4461 width,
4462 count);
4463 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4464 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4465 e = JIM_ERR;
4466 break;
4467 }
4468 addr += count * width;
4469 }
4470
4471 free(buffer);
4472
4473 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4474
4475 return e;
4476 }
4477
4478 /* FIX? should we propagate errors here rather than printing them
4479 * and continuing?
4480 */
4481 void target_handle_event(struct target *target, enum target_event e)
4482 {
4483 struct target_event_action *teap;
4484
4485 for (teap = target->event_action; teap != NULL; teap = teap->next) {
4486 if (teap->event == e) {
4487 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
4488 target->target_number,
4489 target_name(target),
4490 target_type_name(target),
4491 e,
4492 Jim_Nvp_value2name_simple(nvp_target_event, e)->name,
4493 Jim_GetString(teap->body, NULL));
4494
4495 /* Override current target by the target an event
4496 * is issued from (lot of scripts need it).
4497 * Return back to previous override as soon
4498 * as the handler processing is done */
4499 struct command_context *cmd_ctx = current_command_context(teap->interp);
4500 struct target *saved_target_override = cmd_ctx->current_target_override;
4501 cmd_ctx->current_target_override = target;
4502
4503 if (Jim_EvalObj(teap->interp, teap->body) != JIM_OK) {
4504 Jim_MakeErrorMessage(teap->interp);
4505 command_print(NULL, "%s\n", Jim_GetString(Jim_GetResult(teap->interp), NULL));
4506 }
4507
4508 cmd_ctx->current_target_override = saved_target_override;
4509 }
4510 }
4511 }
4512
4513 /**
4514 * Returns true only if the target has a handler for the specified event.
4515 */
4516 bool target_has_event_action(struct target *target, enum target_event event)
4517 {
4518 struct target_event_action *teap;
4519
4520 for (teap = target->event_action; teap != NULL; teap = teap->next) {
4521 if (teap->event == event)
4522 return true;
4523 }
4524 return false;
4525 }
4526
4527 enum target_cfg_param {
4528 TCFG_TYPE,
4529 TCFG_EVENT,
4530 TCFG_WORK_AREA_VIRT,
4531 TCFG_WORK_AREA_PHYS,
4532 TCFG_WORK_AREA_SIZE,
4533 TCFG_WORK_AREA_BACKUP,
4534 TCFG_ENDIAN,
4535 TCFG_COREID,
4536 TCFG_CHAIN_POSITION,
4537 TCFG_DBGBASE,
4538 TCFG_RTOS,
4539 TCFG_DEFER_EXAMINE,
4540 };
4541
4542 static Jim_Nvp nvp_config_opts[] = {
4543 { .name = "-type", .value = TCFG_TYPE },
4544 { .name = "-event", .value = TCFG_EVENT },
4545 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
4546 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
4547 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
4548 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
4549 { .name = "-endian" , .value = TCFG_ENDIAN },
4550 { .name = "-coreid", .value = TCFG_COREID },
4551 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
4552 { .name = "-dbgbase", .value = TCFG_DBGBASE },
4553 { .name = "-rtos", .value = TCFG_RTOS },
4554 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
4555 { .name = NULL, .value = -1 }
4556 };
4557
4558 static int target_configure(Jim_GetOptInfo *goi, struct target *target)
4559 {
4560 Jim_Nvp *n;
4561 Jim_Obj *o;
4562 jim_wide w;
4563 int e;
4564
4565 /* parse config or cget options ... */
4566 while (goi->argc > 0) {
4567 Jim_SetEmptyResult(goi->interp);
4568 /* Jim_GetOpt_Debug(goi); */
4569
4570 if (target->type->target_jim_configure) {
4571 /* target defines a configure function */
4572 /* target gets first dibs on parameters */
4573 e = (*(target->type->target_jim_configure))(target, goi);
4574 if (e == JIM_OK) {
4575 /* more? */
4576 continue;
4577 }
4578 if (e == JIM_ERR) {
4579 /* An error */
4580 return e;
4581 }
4582 /* otherwise we 'continue' below */
4583 }
4584 e = Jim_GetOpt_Nvp(goi, nvp_config_opts, &n);
4585 if (e != JIM_OK) {
4586 Jim_GetOpt_NvpUnknown(goi, nvp_config_opts, 0);
4587 return e;
4588 }
4589 switch (n->value) {
4590 case TCFG_TYPE:
4591 /* not setable */
4592 if (goi->isconfigure) {
4593 Jim_SetResultFormatted(goi->interp,
4594 "not settable: %s", n->name);
4595 return JIM_ERR;
4596 } else {
4597 no_params:
4598 if (goi->argc != 0) {
4599 Jim_WrongNumArgs(goi->interp,
4600 goi->argc, goi->argv,
4601 "NO PARAMS");
4602 return JIM_ERR;
4603 }
4604 }
4605 Jim_SetResultString(goi->interp,
4606 target_type_name(target), -1);
4607 /* loop for more */
4608 break;
4609 case TCFG_EVENT:
4610 if (goi->argc == 0) {
4611 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
4612 return JIM_ERR;
4613 }
4614
4615 e = Jim_GetOpt_Nvp(goi, nvp_target_event, &n);
4616 if (e != JIM_OK) {
4617 Jim_GetOpt_NvpUnknown(goi, nvp_target_event, 1);
4618 return e;
4619 }
4620
4621 if (goi->isconfigure) {
4622 if (goi->argc != 1) {
4623 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
4624 return JIM_ERR;
4625 }
4626 } else {
4627 if (goi->argc != 0) {
4628 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
4629 return JIM_ERR;
4630 }
4631 }
4632
4633 {
4634 struct target_event_action *teap;
4635
4636 teap = target->event_action;
4637 /* replace existing? */
4638 while (teap) {
4639 if (teap->event == (enum target_event)n->value)
4640 break;
4641 teap = teap->next;
4642 }
4643
4644 if (goi->isconfigure) {
4645 bool replace = true;
4646 if (teap == NULL) {
4647 /* create new */
4648 teap = calloc(1, sizeof(*teap));
4649 replace = false;
4650 }
4651 teap->event = n->value;
4652 teap->interp = goi->interp;
4653 Jim_GetOpt_Obj(goi, &o);
4654 if (teap->body)
4655 Jim_DecrRefCount(teap->interp, teap->body);
4656 teap->body = Jim_DuplicateObj(goi->interp, o);
4657 /*
4658 * FIXME:
4659 * Tcl/TK - "tk events" have a nice feature.
4660 * See the "BIND" command.
4661 * We should support that here.
4662 * You can specify %X and %Y in the event code.
4663 * The idea is: %T - target name.
4664 * The idea is: %N - target number
4665 * The idea is: %E - event name.
4666 */
4667 Jim_IncrRefCount(teap->body);
4668
4669 if (!replace) {
4670 /* add to head of event list */
4671 teap->next = target->event_action;
4672 target->event_action = teap;
4673 }
4674 Jim_SetEmptyResult(goi->interp);
4675 } else {
4676 /* get */
4677 if (teap == NULL)
4678 Jim_SetEmptyResult(goi->interp);
4679 else
4680 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
4681 }
4682 }
4683 /* loop for more */
4684 break;
4685
4686 case TCFG_WORK_AREA_VIRT:
4687 if (goi->isconfigure) {
4688 target_free_all_working_areas(target);
4689 e = Jim_GetOpt_Wide(goi, &w);
4690 if (e != JIM_OK)
4691 return e;
4692 target->working_area_virt = w;
4693 target->working_area_virt_spec = true;
4694 } else {
4695 if (goi->argc != 0)
4696 goto no_params;
4697 }
4698 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
4699 /* loop for more */
4700 break;
4701
4702 case TCFG_WORK_AREA_PHYS:
4703 if (goi->isconfigure) {
4704 target_free_all_working_areas(target);
4705 e = Jim_GetOpt_Wide(goi, &w);
4706 if (e != JIM_OK)
4707 return e;
4708 target->working_area_phys = w;
4709 target->working_area_phys_spec = true;
4710 } else {
4711 if (goi->argc != 0)
4712 goto no_params;
4713 }
4714 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
4715 /* loop for more */
4716 break;
4717
4718 case TCFG_WORK_AREA_SIZE:
4719 if (goi->isconfigure) {
4720 target_free_all_working_areas(target);
4721 e = Jim_GetOpt_Wide(goi, &w);
4722 if (e != JIM_OK)
4723 return e;
4724 target->working_area_size = w;
4725 } else {
4726 if (goi->argc != 0)
4727 goto no_params;
4728 }
4729 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
4730 /* loop for more */
4731 break;
4732
4733 case TCFG_WORK_AREA_BACKUP:
4734 if (goi->isconfigure) {
4735 target_free_all_working_areas(target);
4736 e = Jim_GetOpt_Wide(goi, &w);
4737 if (e != JIM_OK)
4738 return e;
4739 /* make this exactly 1 or 0 */
4740 target->backup_working_area = (!!w);
4741 } else {
4742 if (goi->argc != 0)
4743 goto no_params;
4744 }
4745 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
4746 /* loop for more e*/
4747 break;
4748
4749
4750 case TCFG_ENDIAN:
4751 if (goi->isconfigure) {
4752 e = Jim_GetOpt_Nvp(goi, nvp_target_endian, &n);
4753 if (e != JIM_OK) {
4754 Jim_GetOpt_NvpUnknown(goi, nvp_target_endian, 1);
4755 return e;
4756 }
4757 target->endianness = n->value;
4758 } else {
4759 if (goi->argc != 0)
4760 goto no_params;
4761 }
4762 n = Jim_Nvp_value2name_simple(nvp_target_endian, target->endianness);
4763 if (n->name == NULL) {
4764 target->endianness = TARGET_LITTLE_ENDIAN;
4765 n = Jim_Nvp_value2name_simple(nvp_target_endian, target->endianness);
4766 }
4767 Jim_SetResultString(goi->interp, n->name, -1);
4768 /* loop for more */
4769 break;
4770
4771 case TCFG_COREID:
4772 if (goi->isconfigure) {
4773 e = Jim_GetOpt_Wide(goi, &w);
4774 if (e != JIM_OK)
4775 return e;
4776 target->coreid = (int32_t)w;
4777 } else {
4778 if (goi->argc != 0)
4779 goto no_params;
4780 }
4781 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
4782 /* loop for more */
4783 break;
4784
4785 case TCFG_CHAIN_POSITION:
4786 if (goi->isconfigure) {
4787 Jim_Obj *o_t;
4788 struct jtag_tap *tap;
4789
4790 if (target->has_dap) {
4791 Jim_SetResultString(goi->interp,
4792 "target requires -dap parameter instead of -chain-position!", -1);
4793 return JIM_ERR;
4794 }
4795
4796 target_free_all_working_areas(target);
4797 e = Jim_GetOpt_Obj(goi, &o_t);
4798 if (e != JIM_OK)
4799 return e;
4800 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
4801 if (tap == NULL)
4802 return JIM_ERR;
4803 target->tap = tap;
4804 target->tap_configured = true;
4805 } else {
4806 if (goi->argc != 0)
4807 goto no_params;
4808 }
4809 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
4810 /* loop for more e*/
4811 break;
4812 case TCFG_DBGBASE:
4813 if (goi->isconfigure) {
4814 e = Jim_GetOpt_Wide(goi, &w);
4815 if (e != JIM_OK)
4816 return e;
4817 target->dbgbase = (uint32_t)w;
4818 target->dbgbase_set = true;
4819 } else {
4820 if (goi->argc != 0)
4821 goto no_params;
4822 }
4823 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
4824 /* loop for more */
4825 break;
4826 case TCFG_RTOS:
4827 /* RTOS */
4828 {
4829 int result = rtos_create(goi, target);
4830 if (result != JIM_OK)
4831 return result;
4832 }
4833 /* loop for more */
4834 break;
4835
4836 case TCFG_DEFER_EXAMINE:
4837 /* DEFER_EXAMINE */
4838 target->defer_examine = true;
4839 /* loop for more */
4840 break;
4841
4842 }
4843 } /* while (goi->argc) */
4844
4845
4846 /* done - we return */
4847 return JIM_OK;
4848 }
4849
4850 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
4851 {
4852 Jim_GetOptInfo goi;
4853
4854 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
4855 goi.isconfigure = !strcmp(Jim_GetString(argv[0], NULL), "configure");
4856 if (goi.argc < 1) {
4857 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
4858 "missing: -option ...");
4859 return JIM_ERR;
4860 }
4861 struct target *target = Jim_CmdPrivData(goi.interp);
4862 return target_configure(&goi, target);
4863 }
4864
4865 static int jim_target_mw(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4866 {
4867 const char *cmd_name = Jim_GetString(argv[0], NULL);
4868
4869 Jim_GetOptInfo goi;
4870 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
4871
4872 if (goi.argc < 2 || goi.argc > 4) {
4873 Jim_SetResultFormatted(goi.interp,
4874 "usage: %s [phys] <address> <data> [<count>]", cmd_name);
4875 return JIM_ERR;
4876 }
4877
4878 target_write_fn fn;
4879 fn = target_write_memory;
4880
4881 int e;
4882 if (strcmp(Jim_GetString(argv[1], NULL), "phys") == 0) {
4883 /* consume it */
4884 struct Jim_Obj *obj;
4885 e = Jim_GetOpt_Obj(&goi, &obj);
4886 if (e != JIM_OK)
4887 return e;
4888
4889 fn = target_write_phys_memory;
4890 }
4891
4892 jim_wide a;
4893 e = Jim_GetOpt_Wide(&goi, &a);
4894 if (e != JIM_OK)
4895 return e;
4896
4897 jim_wide b;
4898 e = Jim_GetOpt_Wide(&goi, &b);
4899 if (e != JIM_OK)
4900 return e;
4901
4902 jim_wide c = 1;
4903 if (goi.argc == 1) {
4904 e = Jim_GetOpt_Wide(&goi, &c);
4905 if (e != JIM_OK)
4906 return e;
4907 }
4908
4909 /* all args must be consumed */
4910 if (goi.argc != 0)
4911 return JIM_ERR;
4912
4913 struct target *target = Jim_CmdPrivData(goi.interp);
4914 unsigned data_size;
4915 if (strcasecmp(cmd_name, "mww") == 0)
4916 data_size = 4;
4917 else if (strcasecmp(cmd_name, "mwh") == 0)
4918 data_size = 2;
4919 else if (strcasecmp(cmd_name, "mwb") == 0)
4920 data_size = 1;
4921 else {
4922 LOG_ERROR("command '%s' unknown: ", cmd_name);
4923 return JIM_ERR;
4924 }
4925
4926 return (target_fill_mem(target, a, fn, data_size, b, c) == ERROR_OK) ? JIM_OK : JIM_ERR;
4927 }
4928
4929 /**
4930 * @brief Reads an array of words/halfwords/bytes from target memory starting at specified address.
4931 *
4932 * Usage: mdw [phys] <address> [<count>] - for 32 bit reads
4933 * mdh [phys] <address> [<count>] - for 16 bit reads
4934 * mdb [phys] <address> [<count>] - for 8 bit reads
4935 *
4936 * Count defaults to 1.
4937 *
4938 * Calls target_read_memory or target_read_phys_memory depending on
4939 * the presence of the "phys" argument
4940 * Reads the target memory in blocks of max. 32 bytes, and returns an array of ints formatted
4941 * to int representation in base16.
4942 * Also outputs read data in a human readable form using command_print
4943 *
4944 * @param phys if present target_read_phys_memory will be used instead of target_read_memory
4945 * @param address address where to start the read. May be specified in decimal or hex using the standard "0x" prefix
4946 * @param count optional count parameter to read an array of values. If not specified, defaults to 1.
4947 * @returns: JIM_ERR on error or JIM_OK on success and sets the result string to an array of ascii formatted numbers
4948 * on success, with [<count>] number of elements.
4949 *
4950 * In case of little endian target:
4951 * Example1: "mdw 0x00000000" returns "10123456"
4952 * Exmaple2: "mdh 0x00000000 1" returns "3456"
4953 * Example3: "mdb 0x00000000" returns "56"
4954 * Example4: "mdh 0x00000000 2" returns "3456 1012"
4955 * Example5: "mdb 0x00000000 3" returns "56 34 12"
4956 **/
4957 static int jim_target_md(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4958 {
4959 const char *cmd_name = Jim_GetString(argv[0], NULL);
4960
4961 Jim_GetOptInfo goi;
4962 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
4963
4964 if ((goi.argc < 1) || (goi.argc > 3)) {
4965 Jim_SetResultFormatted(goi.interp,
4966 "usage: %s [phys] <address> [<count>]", cmd_name);
4967 return JIM_ERR;
4968 }
4969
4970 int (*fn)(struct target *target,
4971 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer);
4972 fn = target_read_memory;
4973
4974 int e;
4975 if (strcmp(Jim_GetString(argv[1], NULL), "phys") == 0) {
4976 /* consume it */
4977 struct Jim_Obj *obj;
4978 e = Jim_GetOpt_Obj(&goi, &obj);
4979 if (e != JIM_OK)
4980 return e;
4981
4982 fn = target_read_phys_memory;
4983 }
4984
4985 /* Read address parameter */
4986 jim_wide addr;
4987 e = Jim_GetOpt_Wide(&goi, &addr);
4988 if (e != JIM_OK)
4989 return JIM_ERR;
4990
4991 /* If next parameter exists, read it out as the count parameter, if not, set it to 1 (default) */
4992 jim_wide count;
4993 if (goi.argc == 1) {
4994 e = Jim_GetOpt_Wide(&goi, &count);
4995 if (e != JIM_OK)
4996 return JIM_ERR;
4997 } else
4998 count = 1;
4999
5000 /* all args must be consumed */
5001 if (goi.argc != 0)
5002 return JIM_ERR;
5003
5004 jim_wide dwidth = 1; /* shut up gcc */
5005 if (strcasecmp(cmd_name, "mdw") == 0)
5006 dwidth = 4;
5007 else if (strcasecmp(cmd_name, "mdh") == 0)
5008 dwidth = 2;
5009 else if (strcasecmp(cmd_name, "mdb") == 0)
5010 dwidth = 1;
5011 else {
5012 LOG_ERROR("command '%s' unknown: ", cmd_name);
5013 return JIM_ERR;
5014 }
5015
5016 /* convert count to "bytes" */
5017 int bytes = count * dwidth;
5018
5019 struct target *target = Jim_CmdPrivData(goi.interp);
5020 uint8_t target_buf[32];
5021 jim_wide x, y, z;
5022 while (bytes > 0) {
5023 y = (bytes < 16) ? bytes : 16; /* y = min(bytes, 16); */
5024
5025 /* Try to read out next block */
5026 e = fn(target, addr, dwidth, y / dwidth, target_buf);
5027
5028 if (e != ERROR_OK) {
5029 Jim_SetResultFormatted(interp, "error reading target @ 0x%08lx", (long)addr);
5030 return JIM_ERR;
5031 }
5032
5033 command_print_sameline(NULL, "0x%08x ", (int)(addr));
5034 switch (dwidth) {
5035 case 4:
5036 for (x = 0; x < 16 && x < y; x += 4) {
5037 z = target_buffer_get_u32(target, &(target_buf[x]));
5038 command_print_sameline(NULL, "%08x ", (int)(z));
5039 }
5040 for (; (x < 16) ; x += 4)
5041 command_print_sameline(NULL, " ");
5042 break;
5043 case 2:
5044 for (x = 0; x < 16 && x < y; x += 2) {
5045 z = target_buffer_get_u16(target, &(target_buf[x]));
5046 command_print_sameline(NULL, "%04x ", (int)(z));
5047 }
5048 for (; (x < 16) ; x += 2)
5049 command_print_sameline(NULL, " ");
5050 break;
5051 case 1:
5052 default:
5053 for (x = 0 ; (x < 16) && (x < y) ; x += 1) {
5054 z = target_buffer_get_u8(target, &(target_buf[x]));
5055 command_print_sameline(NULL, "%02x ", (int)(z));
5056 }
5057 for (; (x < 16) ; x += 1)
5058 command_print_sameline(NULL, " ");
5059 break;
5060 }
5061 /* ascii-ify the bytes */
5062 for (x = 0 ; x < y ; x++) {
5063 if ((target_buf[x] >= 0x20) &&
5064 (target_buf[x] <= 0x7e)) {
5065 /* good */
5066 } else {
5067 /* smack it */
5068 target_buf[x] = '.';
5069 }
5070 }
5071 /* space pad */
5072 while (x < 16) {
5073 target_buf[x] = ' ';
5074 x++;
5075 }
5076 /* terminate */
5077 target_buf[16] = 0;
5078 /* print - with a newline */
5079 command_print_sameline(NULL, "%s\n", target_buf);
5080 /* NEXT... */
5081 bytes -= 16;
5082 addr += 16;
5083 }
5084 return JIM_OK;
5085 }
5086
5087 static int jim_target_mem2array(Jim_Interp *interp,
5088 int argc, Jim_Obj *const *argv)
5089 {
5090 struct target *target = Jim_CmdPrivData(interp);
5091 return target_mem2array(interp, target, argc - 1, argv + 1);
5092 }
5093
5094 static int jim_target_array2mem(Jim_Interp *interp,
5095 int argc, Jim_Obj *const *argv)
5096 {
5097 struct target *target = Jim_CmdPrivData(interp);
5098 return target_array2mem(interp, target, argc - 1, argv + 1);
5099 }
5100
5101 static int jim_target_tap_disabled(Jim_Interp *interp)
5102 {
5103 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5104 return JIM_ERR;
5105 }
5106
5107 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5108 {
5109 bool allow_defer = false;
5110
5111 Jim_GetOptInfo goi;
5112 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5113 if (goi.argc > 1) {
5114 const char *cmd_name = Jim_GetString(argv[0], NULL);
5115 Jim_SetResultFormatted(goi.interp,
5116 "usage: %s ['allow-defer']", cmd_name);
5117 return JIM_ERR;
5118 }
5119 if (goi.argc > 0 &&
5120 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5121 /* consume it */
5122 struct Jim_Obj *obj;
5123 int e = Jim_GetOpt_Obj(&goi, &obj);
5124 if (e != JIM_OK)
5125 return e;
5126 allow_defer = true;
5127 }
5128
5129 struct target *target = Jim_CmdPrivData(interp);
5130 if (!target->tap->enabled)
5131 return jim_target_tap_disabled(interp);
5132
5133 if (allow_defer && target->defer_examine) {
5134 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5135 LOG_INFO("Use arp_examine command to examine it manually!");
5136 return JIM_OK;
5137 }
5138
5139 int e = target->type->examine(target);
5140 if (e != ERROR_OK)
5141 return JIM_ERR;
5142 return JIM_OK;
5143 }
5144
5145 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5146 {
5147 struct target *target = Jim_CmdPrivData(interp);
5148
5149 Jim_SetResultBool(interp, target_was_examined(target));
5150 return JIM_OK;
5151 }
5152
5153 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5154 {
5155 struct target *target = Jim_CmdPrivData(interp);
5156
5157 Jim_SetResultBool(interp, target->defer_examine);
5158 return JIM_OK;
5159 }
5160
5161 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5162 {
5163 if (argc != 1) {
5164 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5165 return JIM_ERR;
5166 }
5167 struct target *target = Jim_CmdPrivData(interp);
5168
5169 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5170 return JIM_ERR;
5171
5172 return JIM_OK;
5173 }
5174
5175 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5176 {
5177 if (argc != 1) {
5178 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5179 return JIM_ERR;
5180 }
5181 struct target *target = Jim_CmdPrivData(interp);
5182 if (!target->tap->enabled)
5183 return jim_target_tap_disabled(interp);
5184
5185 int e;
5186 if (!(target_was_examined(target)))
5187 e = ERROR_TARGET_NOT_EXAMINED;
5188 else
5189 e = target->type->poll(target);
5190 if (e != ERROR_OK)
5191 return JIM_ERR;
5192 return JIM_OK;
5193 }
5194
5195 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5196 {
5197 Jim_GetOptInfo goi;
5198 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5199
5200 if (goi.argc != 2) {
5201 Jim_WrongNumArgs(interp, 0, argv,
5202 "([tT]|[fF]|assert|deassert) BOOL");
5203 return JIM_ERR;
5204 }
5205
5206 Jim_Nvp *n;
5207 int e = Jim_GetOpt_Nvp(&goi, nvp_assert, &n);
5208 if (e != JIM_OK) {
5209 Jim_GetOpt_NvpUnknown(&goi, nvp_assert, 1);
5210 return e;
5211 }
5212 /* the halt or not param */
5213 jim_wide a;
5214 e = Jim_GetOpt_Wide(&goi, &a);
5215 if (e != JIM_OK)
5216 return e;
5217
5218 struct target *target = Jim_CmdPrivData(goi.interp);
5219 if (!target->tap->enabled)
5220 return jim_target_tap_disabled(interp);
5221
5222 if (!target->type->assert_reset || !target->type->deassert_reset) {
5223 Jim_SetResultFormatted(interp,
5224 "No target-specific reset for %s",
5225 target_name(target));
5226 return JIM_ERR;
5227 }
5228
5229 if (target->defer_examine)
5230 target_reset_examined(target);
5231
5232 /* determine if we should halt or not. */
5233 target->reset_halt = !!a;
5234 /* When this happens - all workareas are invalid. */
5235 target_free_all_working_areas_restore(target, 0);
5236
5237 /* do the assert */
5238 if (n->value == NVP_ASSERT)
5239 e = target->type->assert_reset(target);
5240 else
5241 e = target->type->deassert_reset(target);
5242 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5243 }
5244
5245 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5246 {
5247 if (argc != 1) {
5248 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5249 return JIM_ERR;
5250 }
5251 struct target *target = Jim_CmdPrivData(interp);
5252 if (!target->tap->enabled)
5253 return jim_target_tap_disabled(interp);
5254 int e = target->type->halt(target);
5255 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5256 }
5257
5258 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5259 {
5260 Jim_GetOptInfo goi;
5261 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5262
5263 /* params: <name> statename timeoutmsecs */
5264 if (goi.argc != 2) {
5265 const char *cmd_name = Jim_GetString(argv[0], NULL);
5266 Jim_SetResultFormatted(goi.interp,
5267 "%s <state_name> <timeout_in_msec>", cmd_name);
5268 return JIM_ERR;
5269 }
5270
5271 Jim_Nvp *n;
5272 int e = Jim_GetOpt_Nvp(&goi, nvp_target_state, &n);
5273 if (e != JIM_OK) {
5274 Jim_GetOpt_NvpUnknown(&goi, nvp_target_state, 1);
5275 return e;
5276 }
5277 jim_wide a;
5278 e = Jim_GetOpt_Wide(&goi, &a);
5279 if (e != JIM_OK)
5280 return e;
5281 struct target *target = Jim_CmdPrivData(interp);
5282 if (!target->tap->enabled)
5283 return jim_target_tap_disabled(interp);
5284
5285 e = target_wait_state(target, n->value, a);
5286 if (e != ERROR_OK) {
5287 Jim_Obj *eObj = Jim_NewIntObj(interp, e);
5288 Jim_SetResultFormatted(goi.interp,
5289 "target: %s wait %s fails (%#s) %s",
5290 target_name(target), n->name,
5291 eObj, target_strerror_safe(e));
5292 Jim_FreeNewObj(interp, eObj);
5293 return JIM_ERR;
5294 }
5295 return JIM_OK;
5296 }
5297 /* List for human, Events defined for this target.
5298 * scripts/programs should use 'name cget -event NAME'
5299 */
5300 static int jim_target_event_list(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5301 {
5302 struct command_context *cmd_ctx = current_command_context(interp);
5303 assert(cmd_ctx != NULL);
5304
5305 struct target *target = Jim_CmdPrivData(interp);
5306 struct target_event_action *teap = target->event_action;
5307 command_print(cmd_ctx, "Event actions for target (%d) %s\n",
5308 target->target_number,
5309 target_name(target));
5310 command_print(cmd_ctx, "%-25s | Body", "Event");
5311 command_print(cmd_ctx, "------------------------- | "
5312 "----------------------------------------");
5313 while (teap) {
5314 Jim_Nvp *opt = Jim_Nvp_value2name_simple(nvp_target_event, teap->event);
5315 command_print(cmd_ctx, "%-25s | %s",
5316 opt->name, Jim_GetString(teap->body, NULL));
5317 teap = teap->next;
5318 }
5319 command_print(cmd_ctx, "***END***");
5320 return JIM_OK;
5321 }
5322 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5323 {
5324 if (argc != 1) {
5325 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5326 return JIM_ERR;
5327 }
5328 struct target *target = Jim_CmdPrivData(interp);
5329 Jim_SetResultString(interp, target_state_name(target), -1);
5330 return JIM_OK;
5331 }
5332 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5333 {
5334 Jim_GetOptInfo goi;
5335 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5336 if (goi.argc != 1) {
5337 const char *cmd_name = Jim_GetString(argv[0], NULL);
5338 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5339 return JIM_ERR;
5340 }
5341 Jim_Nvp *n;
5342 int e = Jim_GetOpt_Nvp(&goi, nvp_target_event, &n);
5343 if (e != JIM_OK) {
5344 Jim_GetOpt_NvpUnknown(&goi, nvp_target_event, 1);
5345 return e;
5346 }
5347 struct target *target = Jim_CmdPrivData(interp);
5348 target_handle_event(target, n->value);
5349 return JIM_OK;
5350 }
5351
5352 static const struct command_registration target_instance_command_handlers[] = {
5353 {
5354 .name = "configure",
5355 .mode = COMMAND_CONFIG,
5356 .jim_handler = jim_target_configure,
5357 .help = "configure a new target for use",
5358 .usage = "[target_attribute ...]",
5359 },
5360 {
5361 .name = "cget",
5362 .mode = COMMAND_ANY,
5363 .jim_handler = jim_target_configure,
5364 .help = "returns the specified target attribute",
5365 .usage = "target_attribute",
5366 },
5367 {
5368 .name = "mww",
5369 .mode = COMMAND_EXEC,
5370 .jim_handler = jim_target_mw,
5371 .help = "Write 32-bit word(s) to target memory",
5372 .usage = "address data [count]",
5373 },
5374 {
5375 .name = "mwh",
5376 .mode = COMMAND_EXEC,
5377 .jim_handler = jim_target_mw,
5378 .help = "Write 16-bit half-word(s) to target memory",
5379 .usage = "address data [count]",
5380 },
5381 {
5382 .name = "mwb",
5383 .mode = COMMAND_EXEC,
5384 .jim_handler = jim_target_mw,
5385 .help = "Write byte(s) to target memory",
5386 .usage = "address data [count]",
5387 },
5388 {
5389 .name = "mdw",
5390 .mode = COMMAND_EXEC,
5391 .jim_handler = jim_target_md,
5392 .help = "Display target memory as 32-bit words",
5393 .usage = "address [count]",
5394 },
5395 {
5396 .name = "mdh",
5397 .mode = COMMAND_EXEC,
5398 .jim_handler = jim_target_md,
5399 .help = "Display target memory as 16-bit half-words",
5400 .usage = "address [count]",
5401 },
5402 {
5403 .name = "mdb",
5404 .mode = COMMAND_EXEC,
5405 .jim_handler = jim_target_md,
5406 .help = "Display target memory as 8-bit bytes",
5407 .usage = "address [count]",
5408 },
5409 {
5410 .name = "array2mem",
5411 .mode = COMMAND_EXEC,
5412 .jim_handler = jim_target_array2mem,
5413 .help = "Writes Tcl array of 8/16/32 bit numbers "
5414 "to target memory",
5415 .usage = "arrayname bitwidth address count",
5416 },
5417 {
5418 .name = "mem2array",
5419 .mode = COMMAND_EXEC,
5420 .jim_handler = jim_target_mem2array,
5421 .help = "Loads Tcl array of 8/16/32 bit numbers "
5422 "from target memory",
5423 .usage = "arrayname bitwidth address count",
5424 },
5425 {
5426 .name = "eventlist",
5427 .mode = COMMAND_EXEC,
5428 .jim_handler = jim_target_event_list,
5429 .help = "displays a table of events defined for this target",
5430 },
5431 {
5432 .name = "curstate",
5433 .mode = COMMAND_EXEC,
5434 .jim_handler = jim_target_current_state,
5435 .help = "displays the current state of this target",
5436 },
5437 {
5438 .name = "arp_examine",
5439 .mode = COMMAND_EXEC,
5440 .jim_handler = jim_target_examine,
5441 .help = "used internally for reset processing",
5442 .usage = "['allow-defer']",
5443 },
5444 {
5445 .name = "was_examined",
5446 .mode = COMMAND_EXEC,
5447 .jim_handler = jim_target_was_examined,
5448 .help = "used internally for reset processing",
5449 },
5450 {
5451 .name = "examine_deferred",
5452 .mode = COMMAND_EXEC,
5453 .jim_handler = jim_target_examine_deferred,
5454 .help = "used internally for reset processing",
5455 },
5456 {
5457 .name = "arp_halt_gdb",
5458 .mode = COMMAND_EXEC,
5459 .jim_handler = jim_target_halt_gdb,
5460 .help = "used internally for reset processing to halt GDB",
5461 },
5462 {
5463 .name = "arp_poll",
5464 .mode = COMMAND_EXEC,
5465 .jim_handler = jim_target_poll,
5466 .help = "used internally for reset processing",
5467 },
5468 {
5469 .name = "arp_reset",
5470 .mode = COMMAND_EXEC,
5471 .jim_handler = jim_target_reset,
5472 .help = "used internally for reset processing",
5473 },
5474 {
5475 .name = "arp_halt",
5476 .mode = COMMAND_EXEC,
5477 .jim_handler = jim_target_halt,
5478 .help = "used internally for reset processing",
5479 },
5480 {
5481 .name = "arp_waitstate",
5482 .mode = COMMAND_EXEC,
5483 .jim_handler = jim_target_wait_state,
5484 .help = "used internally for reset processing",
5485 },
5486 {
5487 .name = "invoke-event",
5488 .mode = COMMAND_EXEC,
5489 .jim_handler = jim_target_invoke_event,
5490 .help = "invoke handler for specified event",
5491 .usage = "event_name",
5492 },
5493 COMMAND_REGISTRATION_DONE
5494 };
5495
5496 static int target_create(Jim_GetOptInfo *goi)
5497 {
5498 Jim_Obj *new_cmd;
5499 Jim_Cmd *cmd;
5500 const char *cp;
5501 int e;
5502 int x;
5503 struct target *target;
5504 struct command_context *cmd_ctx;
5505
5506 cmd_ctx = current_command_context(goi->interp);
5507 assert(cmd_ctx != NULL);
5508
5509 if (goi->argc < 3) {
5510 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
5511 return JIM_ERR;
5512 }
5513
5514 /* COMMAND */
5515 Jim_GetOpt_Obj(goi, &new_cmd);
5516 /* does this command exist? */
5517 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_ERRMSG);
5518 if (cmd) {
5519 cp = Jim_GetString(new_cmd, NULL);
5520 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
5521 return JIM_ERR;
5522 }
5523
5524 /* TYPE */
5525 e = Jim_GetOpt_String(goi, &cp, NULL);
5526 if (e != JIM_OK)
5527 return e;
5528 struct transport *tr = get_current_transport();
5529 if (tr->override_target) {
5530 e = tr->override_target(&cp);
5531 if (e != ERROR_OK) {
5532 LOG_ERROR("The selected transport doesn't support this target");
5533 return JIM_ERR;
5534 }
5535 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
5536 }
5537 /* now does target type exist */
5538 for (x = 0 ; target_types[x] ; x++) {
5539 if (0 == strcmp(cp, target_types[x]->name)) {
5540 /* found */
5541 break;
5542 }
5543
5544 /* check for deprecated name */
5545 if (target_types[x]->deprecated_name) {
5546 if (0 == strcmp(cp, target_types[x]->deprecated_name)) {
5547 /* found */
5548 LOG_WARNING("target name is deprecated use: \'%s\'", target_types[x]->name);
5549 break;
5550 }
5551 }
5552 }
5553 if (target_types[x] == NULL) {
5554 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
5555 for (x = 0 ; target_types[x] ; x++) {
5556 if (target_types[x + 1]) {
5557 Jim_AppendStrings(goi->interp,
5558 Jim_GetResult(goi->interp),
5559 target_types[x]->name,
5560 ", ", NULL);
5561 } else {
5562 Jim_AppendStrings(goi->interp,
5563 Jim_GetResult(goi->interp),
5564 " or ",
5565 target_types[x]->name, NULL);
5566 }
5567 }
5568 return JIM_ERR;
5569 }
5570
5571 /* Create it */
5572 target = calloc(1, sizeof(struct target));
5573 /* set target number */
5574 target->target_number = new_target_number();
5575 cmd_ctx->current_target = target;
5576
5577 /* allocate memory for each unique target type */
5578 target->type = calloc(1, sizeof(struct target_type));
5579
5580 memcpy(target->type, target_types[x], sizeof(struct target_type));
5581
5582 /* will be set by "-endian" */
5583 target->endianness = TARGET_ENDIAN_UNKNOWN;
5584
5585 /* default to first core, override with -coreid */
5586 target->coreid = 0;
5587
5588 target->working_area = 0x0;
5589 target->working_area_size = 0x0;
5590 target->working_areas = NULL;
5591 target->backup_working_area = 0;
5592
5593 target->state = TARGET_UNKNOWN;
5594 target->debug_reason = DBG_REASON_UNDEFINED;
5595 target->reg_cache = NULL;
5596 target->breakpoints = NULL;
5597 target->watchpoints = NULL;
5598 target->next = NULL;
5599 target->arch_info = NULL;
5600
5601 target->verbose_halt_msg = true;
5602
5603 target->halt_issued = false;
5604
5605 /* initialize trace information */
5606 target->trace_info = calloc(1, sizeof(struct trace));
5607
5608 target->dbgmsg = NULL;
5609 target->dbg_msg_enabled = 0;
5610
5611 target->endianness = TARGET_ENDIAN_UNKNOWN;
5612
5613 target->rtos = NULL;
5614 target->rtos_auto_detect = false;
5615
5616 /* Do the rest as "configure" options */
5617 goi->isconfigure = 1;
5618 e = target_configure(goi, target);
5619
5620 if (e == JIM_OK) {
5621 if (target->has_dap) {
5622 if (!target->dap_configured) {
5623 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
5624 e = JIM_ERR;
5625 }
5626 } else {
5627 if (!target->tap_configured) {
5628 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
5629 e = JIM_ERR;
5630 }
5631 }
5632 /* tap must be set after target was configured */
5633 if (target->tap == NULL)
5634 e = JIM_ERR;
5635 }
5636
5637 if (e != JIM_OK) {
5638 free(target->type);
5639 free(target);
5640 return e;
5641 }
5642
5643 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
5644 /* default endian to little if not specified */
5645 target->endianness = TARGET_LITTLE_ENDIAN;
5646 }
5647
5648 cp = Jim_GetString(new_cmd, NULL);
5649 target->cmd_name = strdup(cp);
5650
5651 if (target->type->target_create) {
5652 e = (*(target->type->target_create))(target, goi->interp);
5653 if (e != ERROR_OK) {
5654 LOG_DEBUG("target_create failed");
5655 free(target->type);
5656 free(target->cmd_name);
5657 free(target);
5658 return JIM_ERR;
5659 }
5660 }
5661
5662 /* create the target specific commands */
5663 if (target->type->commands) {
5664 e = register_commands(cmd_ctx, NULL, target->type->commands);
5665 if (ERROR_OK != e)
5666 LOG_ERROR("unable to register '%s' commands", cp);
5667 }
5668
5669 /* append to end of list */
5670 {
5671 struct target **tpp;
5672 tpp = &(all_targets);
5673 while (*tpp)
5674 tpp = &((*tpp)->next);
5675 *tpp = target;
5676 }
5677
5678 /* now - create the new target name command */
5679 const struct command_registration target_subcommands[] = {
5680 {
5681 .chain = target_instance_command_handlers,
5682 },
5683 {
5684 .chain = target->type->commands,
5685 },
5686 COMMAND_REGISTRATION_DONE
5687 };
5688 const struct command_registration target_commands[] = {
5689 {
5690 .name = cp,
5691 .mode = COMMAND_ANY,
5692 .help = "target command group",
5693 .usage = "",
5694 .chain = target_subcommands,
5695 },
5696 COMMAND_REGISTRATION_DONE
5697 };
5698 e = register_commands(cmd_ctx, NULL, target_commands);
5699 if (ERROR_OK != e)
5700 return JIM_ERR;
5701
5702 struct command *c = command_find_in_context(cmd_ctx, cp);
5703 assert(c);
5704 command_set_handler_data(c, target);
5705
5706 return (ERROR_OK == e) ? JIM_OK : JIM_ERR;
5707 }
5708
5709 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5710 {
5711 if (argc != 1) {
5712 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5713 return JIM_ERR;
5714 }
5715 struct command_context *cmd_ctx = current_command_context(interp);
5716 assert(cmd_ctx != NULL);
5717
5718 Jim_SetResultString(interp, target_name(get_current_target(cmd_ctx)), -1);
5719 return JIM_OK;
5720 }
5721
5722 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5723 {
5724 if (argc != 1) {
5725 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5726 return JIM_ERR;
5727 }
5728 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5729 for (unsigned x = 0; NULL != target_types[x]; x++) {
5730 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5731 Jim_NewStringObj(interp, target_types[x]->name, -1));
5732 }
5733 return JIM_OK;
5734 }
5735
5736 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5737 {
5738 if (argc != 1) {
5739 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5740 return JIM_ERR;
5741 }
5742 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5743 struct target *target = all_targets;
5744 while (target) {
5745 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5746 Jim_NewStringObj(interp, target_name(target), -1));
5747 target = target->next;
5748 }
5749 return JIM_OK;
5750 }
5751
5752 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5753 {
5754 int i;
5755 const char *targetname;
5756 int retval, len;
5757 struct target *target = (struct target *) NULL;
5758 struct target_list *head, *curr, *new;
5759 curr = (struct target_list *) NULL;
5760 head = (struct target_list *) NULL;
5761
5762 retval = 0;
5763 LOG_DEBUG("%d", argc);
5764 /* argv[1] = target to associate in smp
5765 * argv[2] = target to assoicate in smp
5766 * argv[3] ...
5767 */
5768
5769 for (i = 1; i < argc; i++) {
5770
5771 targetname = Jim_GetString(argv[i], &len);
5772 target = get_target(targetname);
5773 LOG_DEBUG("%s ", targetname);
5774 if (target) {
5775 new = malloc(sizeof(struct target_list));
5776 new->target = target;
5777 new->next = (struct target_list *)NULL;
5778 if (head == (struct target_list *)NULL) {
5779 head = new;
5780 curr = head;
5781 } else {
5782 curr->next = new;
5783 curr = new;
5784 }
5785 }
5786 }
5787 /* now parse the list of cpu and put the target in smp mode*/
5788 curr = head;
5789
5790 while (curr != (struct target_list *)NULL) {
5791 target = curr->target;
5792 target->smp = 1;
5793 target->head = head;
5794 curr = curr->next;
5795 }
5796
5797 if (target && target->rtos)
5798 retval = rtos_smp_init(head->target);
5799
5800 return retval;
5801 }
5802
5803
5804 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5805 {
5806 Jim_GetOptInfo goi;
5807 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5808 if (goi.argc < 3) {
5809 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5810 "<name> <target_type> [<target_options> ...]");
5811 return JIM_ERR;
5812 }
5813 return target_create(&goi);
5814 }
5815
5816 static const struct command_registration target_subcommand_handlers[] = {
5817 {
5818 .name = "init",
5819 .mode = COMMAND_CONFIG,
5820 .handler = handle_target_init_command,
5821 .help = "initialize targets",
5822 },
5823 {
5824 .name = "create",
5825 /* REVISIT this should be COMMAND_CONFIG ... */
5826 .mode = COMMAND_ANY,
5827 .jim_handler = jim_target_create,
5828 .usage = "name type '-chain-position' name [options ...]",
5829 .help = "Creates and selects a new target",
5830 },
5831 {
5832 .name = "current",
5833 .mode = COMMAND_ANY,
5834 .jim_handler = jim_target_current,
5835 .help = "Returns the currently selected target",
5836 },
5837 {
5838 .name = "types",
5839 .mode = COMMAND_ANY,
5840 .jim_handler = jim_target_types,
5841 .help = "Returns the available target types as "
5842 "a list of strings",
5843 },
5844 {
5845 .name = "names",
5846 .mode = COMMAND_ANY,
5847 .jim_handler = jim_target_names,
5848 .help = "Returns the names of all targets as a list of strings",
5849 },
5850 {
5851 .name = "smp",
5852 .mode = COMMAND_ANY,
5853 .jim_handler = jim_target_smp,
5854 .usage = "targetname1 targetname2 ...",
5855 .help = "gather several target in a smp list"
5856 },
5857
5858 COMMAND_REGISTRATION_DONE
5859 };
5860
5861 struct FastLoad {
5862 target_addr_t address;
5863 uint8_t *data;
5864 int length;
5865
5866 };
5867
5868 static int fastload_num;
5869 static struct FastLoad *fastload;
5870
5871 static void free_fastload(void)
5872 {
5873 if (fastload != NULL) {
5874 int i;
5875 for (i = 0; i < fastload_num; i++) {
5876 if (fastload[i].data)
5877 free(fastload[i].data);
5878 }
5879 free(fastload);
5880 fastload = NULL;
5881 }
5882 }
5883
5884 COMMAND_HANDLER(handle_fast_load_image_command)
5885 {
5886 uint8_t *buffer;
5887 size_t buf_cnt;
5888 uint32_t image_size;
5889 target_addr_t min_address = 0;
5890 target_addr_t max_address = -1;
5891 int i;
5892
5893 struct image image;
5894
5895 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
5896 &image, &min_address, &max_address);
5897 if (ERROR_OK != retval)
5898 return retval;
5899
5900 struct duration bench;
5901 duration_start(&bench);
5902
5903 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
5904 if (retval != ERROR_OK)
5905 return retval;
5906
5907 image_size = 0x0;
5908 retval = ERROR_OK;
5909 fastload_num = image.num_sections;
5910 fastload = malloc(sizeof(struct FastLoad)*image.num_sections);
5911 if (fastload == NULL) {
5912 command_print(CMD_CTX, "out of memory");
5913 image_close(&image);
5914 return ERROR_FAIL;
5915 }
5916 memset(fastload, 0, sizeof(struct FastLoad)*image.num_sections);
5917 for (i = 0; i < image.num_sections; i++) {
5918 buffer = malloc(image.sections[i].size);
5919 if (buffer == NULL) {
5920 command_print(CMD_CTX, "error allocating buffer for section (%d bytes)",
5921 (int)(image.sections[i].size));
5922 retval = ERROR_FAIL;
5923 break;
5924 }
5925
5926 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
5927 if (retval != ERROR_OK) {
5928 free(buffer);
5929 break;
5930 }
5931
5932 uint32_t offset = 0;
5933 uint32_t length = buf_cnt;
5934
5935 /* DANGER!!! beware of unsigned comparision here!!! */
5936
5937 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
5938 (image.sections[i].base_address < max_address)) {
5939 if (image.sections[i].base_address < min_address) {
5940 /* clip addresses below */
5941 offset += min_address-image.sections[i].base_address;
5942 length -= offset;
5943 }
5944
5945 if (image.sections[i].base_address + buf_cnt > max_address)
5946 length -= (image.sections[i].base_address + buf_cnt)-max_address;
5947
5948 fastload[i].address = image.sections[i].base_address + offset;
5949 fastload[i].data = malloc(length);
5950 if (fastload[i].data == NULL) {
5951 free(buffer);
5952 command_print(CMD_CTX, "error allocating buffer for section (%" PRIu32 " bytes)",
5953 length);
5954 retval = ERROR_FAIL;
5955 break;
5956 }
5957 memcpy(fastload[i].data, buffer + offset, length);
5958 fastload[i].length = length;
5959
5960 image_size += length;
5961 command_print(CMD_CTX, "%u bytes written at address 0x%8.8x",
5962 (unsigned int)length,
5963 ((unsigned int)(image.sections[i].base_address + offset)));
5964 }
5965
5966 free(buffer);
5967 }
5968
5969 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
5970 command_print(CMD_CTX, "Loaded %" PRIu32 " bytes "
5971 "in %fs (%0.3f KiB/s)", image_size,
5972 duration_elapsed(&bench), duration_kbps(&bench, image_size));
5973
5974 command_print(CMD_CTX,
5975 "WARNING: image has not been loaded to target!"
5976 "You can issue a 'fast_load' to finish loading.");
5977 }
5978
5979 image_close(&image);
5980
5981 if (retval != ERROR_OK)
5982 free_fastload();
5983
5984 return retval;
5985 }
5986
5987 COMMAND_HANDLER(handle_fast_load_command)
5988 {
5989 if (CMD_ARGC > 0)
5990 return ERROR_COMMAND_SYNTAX_ERROR;
5991 if (fastload == NULL) {
5992 LOG_ERROR("No image in memory");
5993 return ERROR_FAIL;
5994 }
5995 int i;
5996 int64_t ms = timeval_ms();
5997 int size = 0;
5998 int retval = ERROR_OK;
5999 for (i = 0; i < fastload_num; i++) {
6000 struct target *target = get_current_target(CMD_CTX);
6001 command_print(CMD_CTX, "Write to 0x%08x, length 0x%08x",
6002 (unsigned int)(fastload[i].address),
6003 (unsigned int)(fastload[i].length));
6004 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6005 if (retval != ERROR_OK)
6006 break;
6007 size += fastload[i].length;
6008 }
6009 if (retval == ERROR_OK) {
6010 int64_t after = timeval_ms();
6011 command_print(CMD_CTX, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6012 }
6013 return retval;
6014 }
6015
6016 static const struct command_registration target_command_handlers[] = {
6017 {
6018 .name = "targets",
6019 .handler = handle_targets_command,
6020 .mode = COMMAND_ANY,
6021 .help = "change current default target (one parameter) "
6022 "or prints table of all targets (no parameters)",
6023 .usage = "[target]",
6024 },
6025 {
6026 .name = "target",
6027 .mode = COMMAND_CONFIG,
6028 .help = "configure target",
6029
6030 .chain = target_subcommand_handlers,
6031 },
6032 COMMAND_REGISTRATION_DONE
6033 };
6034
6035 int target_register_commands(struct command_context *cmd_ctx)
6036 {
6037 return register_commands(cmd_ctx, NULL, target_command_handlers);
6038 }
6039
6040 static bool target_reset_nag = true;
6041
6042 bool get_target_reset_nag(void)
6043 {
6044 return target_reset_nag;
6045 }
6046
6047 COMMAND_HANDLER(handle_target_reset_nag)
6048 {
6049 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6050 &target_reset_nag, "Nag after each reset about options to improve "
6051 "performance");
6052 }
6053
6054 COMMAND_HANDLER(handle_ps_command)
6055 {
6056 struct target *target = get_current_target(CMD_CTX);
6057 char *display;
6058 if (target->state != TARGET_HALTED) {
6059 LOG_INFO("target not halted !!");
6060 return ERROR_OK;
6061 }
6062
6063 if ((target->rtos) && (target->rtos->type)
6064 && (target->rtos->type->ps_command)) {
6065 display = target->rtos->type->ps_command(target);
6066 command_print(CMD_CTX, "%s", display);
6067 free(display);
6068 return ERROR_OK;
6069 } else {
6070 LOG_INFO("failed");
6071 return ERROR_TARGET_FAILURE;
6072 }
6073 }
6074
6075 static void binprint(struct command_context *cmd_ctx, const char *text, const uint8_t *buf, int size)
6076 {
6077 if (text != NULL)
6078 command_print_sameline(cmd_ctx, "%s", text);
6079 for (int i = 0; i < size; i++)
6080 command_print_sameline(cmd_ctx, " %02x", buf[i]);
6081 command_print(cmd_ctx, " ");
6082 }
6083
6084 COMMAND_HANDLER(handle_test_mem_access_command)
6085 {
6086 struct target *target = get_current_target(CMD_CTX);
6087 uint32_t test_size;
6088 int retval = ERROR_OK;
6089
6090 if (target->state != TARGET_HALTED) {
6091 LOG_INFO("target not halted !!");
6092 return ERROR_FAIL;
6093 }
6094
6095 if (CMD_ARGC != 1)
6096 return ERROR_COMMAND_SYNTAX_ERROR;
6097
6098 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6099
6100 /* Test reads */
6101 size_t num_bytes = test_size + 4;
6102
6103 struct working_area *wa = NULL;
6104 retval = target_alloc_working_area(target, num_bytes, &wa);
6105 if (retval != ERROR_OK) {
6106 LOG_ERROR("Not enough working area");
6107 return ERROR_FAIL;
6108 }
6109
6110 uint8_t *test_pattern = malloc(num_bytes);
6111
6112 for (size_t i = 0; i < num_bytes; i++)
6113 test_pattern[i] = rand();
6114
6115 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6116 if (retval != ERROR_OK) {
6117 LOG_ERROR("Test pattern write failed");
6118 goto out;
6119 }
6120
6121 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6122 for (int size = 1; size <= 4; size *= 2) {
6123 for (int offset = 0; offset < 4; offset++) {
6124 uint32_t count = test_size / size;
6125 size_t host_bufsiz = (count + 2) * size + host_offset;
6126 uint8_t *read_ref = malloc(host_bufsiz);
6127 uint8_t *read_buf = malloc(host_bufsiz);
6128
6129 for (size_t i = 0; i < host_bufsiz; i++) {
6130 read_ref[i] = rand();
6131 read_buf[i] = read_ref[i];
6132 }
6133 command_print_sameline(CMD_CTX,
6134 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6135 size, offset, host_offset ? "un" : "");
6136
6137 struct duration bench;
6138 duration_start(&bench);
6139
6140 retval = target_read_memory(target, wa->address + offset, size, count,
6141 read_buf + size + host_offset);
6142
6143 duration_measure(&bench);
6144
6145 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6146 command_print(CMD_CTX, "Unsupported alignment");
6147 goto next;
6148 } else if (retval != ERROR_OK) {
6149 command_print(CMD_CTX, "Memory read failed");
6150 goto next;
6151 }
6152
6153 /* replay on host */
6154 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6155
6156 /* check result */
6157 int result = memcmp(read_ref, read_buf, host_bufsiz);
6158 if (result == 0) {
6159 command_print(CMD_CTX, "Pass in %fs (%0.3f KiB/s)",
6160 duration_elapsed(&bench),
6161 duration_kbps(&bench, count * size));
6162 } else {
6163 command_print(CMD_CTX, "Compare failed");
6164 binprint(CMD_CTX, "ref:", read_ref, host_bufsiz);
6165 binprint(CMD_CTX, "buf:", read_buf, host_bufsiz);
6166 }
6167 next:
6168 free(read_ref);
6169 free(read_buf);
6170 }
6171 }
6172 }
6173
6174 out:
6175 free(test_pattern);
6176
6177 if (wa != NULL)
6178 target_free_working_area(target, wa);
6179
6180 /* Test writes */
6181 num_bytes = test_size + 4 + 4 + 4;
6182
6183 retval = target_alloc_working_area(target, num_bytes, &wa);
6184 if (retval != ERROR_OK) {
6185 LOG_ERROR("Not enough working area");
6186 return ERROR_FAIL;
6187 }
6188
6189 test_pattern = malloc(num_bytes);
6190
6191 for (size_t i = 0; i < num_bytes; i++)
6192 test_pattern[i] = rand();
6193
6194 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6195 for (int size = 1; size <= 4; size *= 2) {
6196 for (int offset = 0; offset < 4; offset++) {
6197 uint32_t count = test_size / size;
6198 size_t host_bufsiz = count * size + host_offset;
6199 uint8_t *read_ref = malloc(num_bytes);
6200 uint8_t *read_buf = malloc(num_bytes);
6201 uint8_t *write_buf = malloc(host_bufsiz);
6202
6203 for (size_t i = 0; i < host_bufsiz; i++)
6204 write_buf[i] = rand();
6205 command_print_sameline(CMD_CTX,
6206 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6207 size, offset, host_offset ? "un" : "");
6208
6209 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6210 if (retval != ERROR_OK) {
6211 command_print(CMD_CTX, "Test pattern write failed");
6212 goto nextw;
6213 }
6214
6215 /* replay on host */
6216 memcpy(read_ref, test_pattern, num_bytes);
6217 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6218
6219 struct duration bench;
6220 duration_start(&bench);
6221
6222 retval = target_write_memory(target, wa->address + size + offset, size, count,
6223 write_buf + host_offset);
6224
6225 duration_measure(&bench);
6226
6227 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6228 command_print(CMD_CTX, "Unsupported alignment");
6229 goto nextw;
6230 } else if (retval != ERROR_OK) {
6231 command_print(CMD_CTX, "Memory write failed");
6232 goto nextw;
6233 }
6234
6235 /* read back */
6236 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6237 if (retval != ERROR_OK) {
6238 command_print(CMD_CTX, "Test pattern write failed");
6239 goto nextw;
6240 }
6241
6242 /* check result */
6243 int result = memcmp(read_ref, read_buf, num_bytes);
6244 if (result == 0) {
6245 command_print(CMD_CTX, "Pass in %fs (%0.3f KiB/s)",
6246 duration_elapsed(&bench),
6247 duration_kbps(&bench, count * size));
6248 } else {
6249 command_print(CMD_CTX, "Compare failed");
6250 binprint(CMD_CTX, "ref:", read_ref, num_bytes);
6251 binprint(CMD_CTX, "buf:", read_buf, num_bytes);
6252 }
6253 nextw:
6254 free(read_ref);
6255 free(read_buf);
6256 }
6257 }
6258 }
6259
6260 free(test_pattern);
6261
6262 if (wa != NULL)
6263 target_free_working_area(target, wa);
6264 return retval;
6265 }
6266
6267 static const struct command_registration target_exec_command_handlers[] = {
6268 {
6269 .name = "fast_load_image",
6270 .handler = handle_fast_load_image_command,
6271 .mode = COMMAND_ANY,
6272 .help = "Load image into server memory for later use by "
6273 "fast_load; primarily for profiling",
6274 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6275 "[min_address [max_length]]",
6276 },
6277 {
6278 .name = "fast_load",
6279 .handler = handle_fast_load_command,
6280 .mode = COMMAND_EXEC,
6281 .help = "loads active fast load image to current target "
6282 "- mainly for profiling purposes",
6283 .usage = "",
6284 },
6285 {
6286 .name = "profile",
6287 .handler = handle_profile_command,
6288 .mode = COMMAND_EXEC,
6289 .usage = "seconds filename [start end]",
6290 .help = "profiling samples the CPU PC",
6291 },
6292 /** @todo don't register virt2phys() unless target supports it */
6293 {
6294 .name = "virt2phys",
6295 .handler = handle_virt2phys_command,
6296 .mode = COMMAND_ANY,
6297 .help = "translate a virtual address into a physical address",
6298 .usage = "virtual_address",
6299 },
6300 {
6301 .name = "reg",
6302 .handler = handle_reg_command,
6303 .mode = COMMAND_EXEC,
6304 .help = "display (reread from target with \"force\") or set a register; "
6305 "with no arguments, displays all registers and their values",
6306 .usage = "[(register_number|register_name) [(value|'force')]]",
6307 },
6308 {
6309 .name = "poll",
6310 .handler = handle_poll_command,
6311 .mode = COMMAND_EXEC,
6312 .help = "poll target state; or reconfigure background polling",
6313 .usage = "['on'|'off']",
6314 },
6315 {
6316 .name = "wait_halt",
6317 .handler = handle_wait_halt_command,
6318 .mode = COMMAND_EXEC,
6319 .help = "wait up to the specified number of milliseconds "
6320 "(default 5000) for a previously requested halt",
6321 .usage = "[milliseconds]",
6322 },
6323 {
6324 .name = "halt",
6325 .handler = handle_halt_command,
6326 .mode = COMMAND_EXEC,
6327 .help = "request target to halt, then wait up to the specified"
6328 "number of milliseconds (default 5000) for it to complete",
6329 .usage = "[milliseconds]",
6330 },
6331 {
6332 .name = "resume",
6333 .handler = handle_resume_command,
6334 .mode = COMMAND_EXEC,
6335 .help = "resume target execution from current PC or address",
6336 .usage = "[address]",
6337 },
6338 {
6339 .name = "reset",
6340 .handler = handle_reset_command,
6341 .mode = COMMAND_EXEC,
6342 .usage = "[run|halt|init]",
6343 .help = "Reset all targets into the specified mode."
6344 "Default reset mode is run, if not given.",
6345 },
6346 {
6347 .name = "soft_reset_halt",
6348 .handler = handle_soft_reset_halt_command,
6349 .mode = COMMAND_EXEC,
6350 .usage = "",
6351 .help = "halt the target and do a soft reset",
6352 },
6353 {
6354 .name = "step",
6355 .handler = handle_step_command,
6356 .mode = COMMAND_EXEC,
6357 .help = "step one instruction from current PC or address",
6358 .usage = "[address]",
6359 },
6360 {
6361 .name = "mdd",
6362 .handler = handle_md_command,
6363 .mode = COMMAND_EXEC,
6364 .help = "display memory words",
6365 .usage = "['phys'] address [count]",
6366 },
6367 {
6368 .name = "mdw",
6369 .handler = handle_md_command,
6370 .mode = COMMAND_EXEC,
6371 .help = "display memory words",
6372 .usage = "['phys'] address [count]",
6373 },
6374 {
6375 .name = "mdh",
6376 .handler = handle_md_command,
6377 .mode = COMMAND_EXEC,
6378 .help = "display memory half-words",
6379 .usage = "['phys'] address [count]",
6380 },
6381 {
6382 .name = "mdb",
6383 .handler = handle_md_command,
6384 .mode = COMMAND_EXEC,
6385 .help = "display memory bytes",
6386 .usage = "['phys'] address [count]",
6387 },
6388 {
6389 .name = "mwd",
6390 .handler = handle_mw_command,
6391 .mode = COMMAND_EXEC,
6392 .help = "write memory word",
6393 .usage = "['phys'] address value [count]",
6394 },
6395 {
6396 .name = "mww",
6397 .handler = handle_mw_command,
6398 .mode = COMMAND_EXEC,
6399 .help = "write memory word",
6400 .usage = "['phys'] address value [count]",
6401 },
6402 {
6403 .name = "mwh",
6404 .handler = handle_mw_command,
6405 .mode = COMMAND_EXEC,
6406 .help = "write memory half-word",
6407 .usage = "['phys'] address value [count]",
6408 },
6409 {
6410 .name = "mwb",
6411 .handler = handle_mw_command,
6412 .mode = COMMAND_EXEC,
6413 .help = "write memory byte",
6414 .usage = "['phys'] address value [count]",
6415 },
6416 {
6417 .name = "bp",
6418 .handler = handle_bp_command,
6419 .mode = COMMAND_EXEC,
6420 .help = "list or set hardware or software breakpoint",
6421 .usage = "<address> [<asid>] <length> ['hw'|'hw_ctx']",
6422 },
6423 {
6424 .name = "rbp",
6425 .handler = handle_rbp_command,
6426 .mode = COMMAND_EXEC,
6427 .help = "remove breakpoint",
6428 .usage = "address",
6429 },
6430 {
6431 .name = "wp",
6432 .handler = handle_wp_command,
6433 .mode = COMMAND_EXEC,
6434 .help = "list (no params) or create watchpoints",
6435 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
6436 },
6437 {
6438 .name = "rwp",
6439 .handler = handle_rwp_command,
6440 .mode = COMMAND_EXEC,
6441 .help = "remove watchpoint",
6442 .usage = "address",
6443 },
6444 {
6445 .name = "load_image",
6446 .handler = handle_load_image_command,
6447 .mode = COMMAND_EXEC,
6448 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6449 "[min_address] [max_length]",
6450 },
6451 {
6452 .name = "dump_image",
6453 .handler = handle_dump_image_command,
6454 .mode = COMMAND_EXEC,
6455 .usage = "filename address size",
6456 },
6457 {
6458 .name = "verify_image_checksum",
6459 .handler = handle_verify_image_checksum_command,
6460 .mode = COMMAND_EXEC,
6461 .usage = "filename [offset [type]]",
6462 },
6463 {
6464 .name = "verify_image",
6465 .handler = handle_verify_image_command,
6466 .mode = COMMAND_EXEC,
6467 .usage = "filename [offset [type]]",
6468 },
6469 {
6470 .name = "test_image",
6471 .handler = handle_test_image_command,
6472 .mode = COMMAND_EXEC,
6473 .usage = "filename [offset [type]]",
6474 },
6475 {
6476 .name = "mem2array",
6477 .mode = COMMAND_EXEC,
6478 .jim_handler = jim_mem2array,
6479 .help = "read 8/16/32 bit memory and return as a TCL array "
6480 "for script processing",
6481 .usage = "arrayname bitwidth address count",
6482 },
6483 {
6484 .name = "array2mem",
6485 .mode = COMMAND_EXEC,
6486 .jim_handler = jim_array2mem,
6487 .help = "convert a TCL array to memory locations "
6488 "and write the 8/16/32 bit values",
6489 .usage = "arrayname bitwidth address count",
6490 },
6491 {
6492 .name = "reset_nag",
6493 .handler = handle_target_reset_nag,
6494 .mode = COMMAND_ANY,
6495 .help = "Nag after each reset about options that could have been "
6496 "enabled to improve performance. ",
6497 .usage = "['enable'|'disable']",
6498 },
6499 {
6500 .name = "ps",
6501 .handler = handle_ps_command,
6502 .mode = COMMAND_EXEC,
6503 .help = "list all tasks ",
6504 .usage = " ",
6505 },
6506 {
6507 .name = "test_mem_access",
6508 .handler = handle_test_mem_access_command,
6509 .mode = COMMAND_EXEC,
6510 .help = "Test the target's memory access functions",
6511 .usage = "size",
6512 },
6513
6514 COMMAND_REGISTRATION_DONE
6515 };
6516 static int target_register_user_commands(struct command_context *cmd_ctx)
6517 {
6518 int retval = ERROR_OK;
6519 retval = target_request_register_commands(cmd_ctx);
6520 if (retval != ERROR_OK)
6521 return retval;
6522
6523 retval = trace_register_commands(cmd_ctx);
6524 if (retval != ERROR_OK)
6525 return retval;
6526
6527
6528 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
6529 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)