bffb55410f1955e9c26a846ea3cbe23d67084b73
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 √ėyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/time_support.h>
45 #include <jtag/jtag.h>
46 #include <flash/nor/core.h>
47
48 #include "target.h"
49 #include "target_type.h"
50 #include "target_request.h"
51 #include "breakpoints.h"
52 #include "register.h"
53 #include "trace.h"
54 #include "image.h"
55 #include "rtos/rtos.h"
56 #include "transport/transport.h"
57 #include "arm_cti.h"
58
59 /* default halt wait timeout (ms) */
60 #define DEFAULT_HALT_TIMEOUT 5000
61
62 static int target_read_buffer_default(struct target *target, target_addr_t address,
63 uint32_t count, uint8_t *buffer);
64 static int target_write_buffer_default(struct target *target, target_addr_t address,
65 uint32_t count, const uint8_t *buffer);
66 static int target_array2mem(Jim_Interp *interp, struct target *target,
67 int argc, Jim_Obj * const *argv);
68 static int target_mem2array(Jim_Interp *interp, struct target *target,
69 int argc, Jim_Obj * const *argv);
70 static int target_register_user_commands(struct command_context *cmd_ctx);
71 static int target_get_gdb_fileio_info_default(struct target *target,
72 struct gdb_fileio_info *fileio_info);
73 static int target_gdb_fileio_end_default(struct target *target, int retcode,
74 int fileio_errno, bool ctrl_c);
75 static int target_profiling_default(struct target *target, uint32_t *samples,
76 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds);
77
78 /* targets */
79 extern struct target_type arm7tdmi_target;
80 extern struct target_type arm720t_target;
81 extern struct target_type arm9tdmi_target;
82 extern struct target_type arm920t_target;
83 extern struct target_type arm966e_target;
84 extern struct target_type arm946e_target;
85 extern struct target_type arm926ejs_target;
86 extern struct target_type fa526_target;
87 extern struct target_type feroceon_target;
88 extern struct target_type dragonite_target;
89 extern struct target_type xscale_target;
90 extern struct target_type cortexm_target;
91 extern struct target_type cortexa_target;
92 extern struct target_type aarch64_target;
93 extern struct target_type cortexr4_target;
94 extern struct target_type arm11_target;
95 extern struct target_type ls1_sap_target;
96 extern struct target_type mips_m4k_target;
97 extern struct target_type avr_target;
98 extern struct target_type dsp563xx_target;
99 extern struct target_type dsp5680xx_target;
100 extern struct target_type testee_target;
101 extern struct target_type avr32_ap7k_target;
102 extern struct target_type hla_target;
103 extern struct target_type nds32_v2_target;
104 extern struct target_type nds32_v3_target;
105 extern struct target_type nds32_v3m_target;
106 extern struct target_type or1k_target;
107 extern struct target_type quark_x10xx_target;
108 extern struct target_type quark_d20xx_target;
109 extern struct target_type stm8_target;
110 extern struct target_type riscv_target;
111 extern struct target_type mem_ap_target;
112 extern struct target_type esirisc_target;
113
114 static struct target_type *target_types[] = {
115 &arm7tdmi_target,
116 &arm9tdmi_target,
117 &arm920t_target,
118 &arm720t_target,
119 &arm966e_target,
120 &arm946e_target,
121 &arm926ejs_target,
122 &fa526_target,
123 &feroceon_target,
124 &dragonite_target,
125 &xscale_target,
126 &cortexm_target,
127 &cortexa_target,
128 &cortexr4_target,
129 &arm11_target,
130 &ls1_sap_target,
131 &mips_m4k_target,
132 &avr_target,
133 &dsp563xx_target,
134 &dsp5680xx_target,
135 &testee_target,
136 &avr32_ap7k_target,
137 &hla_target,
138 &nds32_v2_target,
139 &nds32_v3_target,
140 &nds32_v3m_target,
141 &or1k_target,
142 &quark_x10xx_target,
143 &quark_d20xx_target,
144 &stm8_target,
145 &riscv_target,
146 &mem_ap_target,
147 &esirisc_target,
148 #if BUILD_TARGET64
149 &aarch64_target,
150 #endif
151 NULL,
152 };
153
154 struct target *all_targets;
155 static struct target_event_callback *target_event_callbacks;
156 static struct target_timer_callback *target_timer_callbacks;
157 LIST_HEAD(target_reset_callback_list);
158 LIST_HEAD(target_trace_callback_list);
159 static const int polling_interval = 100;
160
161 static const Jim_Nvp nvp_assert[] = {
162 { .name = "assert", NVP_ASSERT },
163 { .name = "deassert", NVP_DEASSERT },
164 { .name = "T", NVP_ASSERT },
165 { .name = "F", NVP_DEASSERT },
166 { .name = "t", NVP_ASSERT },
167 { .name = "f", NVP_DEASSERT },
168 { .name = NULL, .value = -1 }
169 };
170
171 static const Jim_Nvp nvp_error_target[] = {
172 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
173 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
174 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
175 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
176 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
177 { .value = ERROR_TARGET_UNALIGNED_ACCESS , .name = "err-unaligned-access" },
178 { .value = ERROR_TARGET_DATA_ABORT , .name = "err-data-abort" },
179 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE , .name = "err-resource-not-available" },
180 { .value = ERROR_TARGET_TRANSLATION_FAULT , .name = "err-translation-fault" },
181 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
182 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
183 { .value = -1, .name = NULL }
184 };
185
186 static const char *target_strerror_safe(int err)
187 {
188 const Jim_Nvp *n;
189
190 n = Jim_Nvp_value2name_simple(nvp_error_target, err);
191 if (n->name == NULL)
192 return "unknown";
193 else
194 return n->name;
195 }
196
197 static const Jim_Nvp nvp_target_event[] = {
198
199 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
200 { .value = TARGET_EVENT_HALTED, .name = "halted" },
201 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
202 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
203 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
204
205 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
206 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
207
208 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
209 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
210 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
211 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
212 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
213 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
214 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
215 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
216
217 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
218 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
219
220 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
221 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
222
223 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
224 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
225
226 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
227 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END , .name = "gdb-flash-write-end" },
228
229 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
230 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END , .name = "gdb-flash-erase-end" },
231
232 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
233
234 { .name = NULL, .value = -1 }
235 };
236
237 static const Jim_Nvp nvp_target_state[] = {
238 { .name = "unknown", .value = TARGET_UNKNOWN },
239 { .name = "running", .value = TARGET_RUNNING },
240 { .name = "halted", .value = TARGET_HALTED },
241 { .name = "reset", .value = TARGET_RESET },
242 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
243 { .name = NULL, .value = -1 },
244 };
245
246 static const Jim_Nvp nvp_target_debug_reason[] = {
247 { .name = "debug-request" , .value = DBG_REASON_DBGRQ },
248 { .name = "breakpoint" , .value = DBG_REASON_BREAKPOINT },
249 { .name = "watchpoint" , .value = DBG_REASON_WATCHPOINT },
250 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
251 { .name = "single-step" , .value = DBG_REASON_SINGLESTEP },
252 { .name = "target-not-halted" , .value = DBG_REASON_NOTHALTED },
253 { .name = "program-exit" , .value = DBG_REASON_EXIT },
254 { .name = "exception-catch" , .value = DBG_REASON_EXC_CATCH },
255 { .name = "undefined" , .value = DBG_REASON_UNDEFINED },
256 { .name = NULL, .value = -1 },
257 };
258
259 static const Jim_Nvp nvp_target_endian[] = {
260 { .name = "big", .value = TARGET_BIG_ENDIAN },
261 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
262 { .name = "be", .value = TARGET_BIG_ENDIAN },
263 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
264 { .name = NULL, .value = -1 },
265 };
266
267 static const Jim_Nvp nvp_reset_modes[] = {
268 { .name = "unknown", .value = RESET_UNKNOWN },
269 { .name = "run" , .value = RESET_RUN },
270 { .name = "halt" , .value = RESET_HALT },
271 { .name = "init" , .value = RESET_INIT },
272 { .name = NULL , .value = -1 },
273 };
274
275 const char *debug_reason_name(struct target *t)
276 {
277 const char *cp;
278
279 cp = Jim_Nvp_value2name_simple(nvp_target_debug_reason,
280 t->debug_reason)->name;
281 if (!cp) {
282 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
283 cp = "(*BUG*unknown*BUG*)";
284 }
285 return cp;
286 }
287
288 const char *target_state_name(struct target *t)
289 {
290 const char *cp;
291 cp = Jim_Nvp_value2name_simple(nvp_target_state, t->state)->name;
292 if (!cp) {
293 LOG_ERROR("Invalid target state: %d", (int)(t->state));
294 cp = "(*BUG*unknown*BUG*)";
295 }
296
297 if (!target_was_examined(t) && t->defer_examine)
298 cp = "examine deferred";
299
300 return cp;
301 }
302
303 const char *target_event_name(enum target_event event)
304 {
305 const char *cp;
306 cp = Jim_Nvp_value2name_simple(nvp_target_event, event)->name;
307 if (!cp) {
308 LOG_ERROR("Invalid target event: %d", (int)(event));
309 cp = "(*BUG*unknown*BUG*)";
310 }
311 return cp;
312 }
313
314 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
315 {
316 const char *cp;
317 cp = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
318 if (!cp) {
319 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
320 cp = "(*BUG*unknown*BUG*)";
321 }
322 return cp;
323 }
324
325 /* determine the number of the new target */
326 static int new_target_number(void)
327 {
328 struct target *t;
329 int x;
330
331 /* number is 0 based */
332 x = -1;
333 t = all_targets;
334 while (t) {
335 if (x < t->target_number)
336 x = t->target_number;
337 t = t->next;
338 }
339 return x + 1;
340 }
341
342 /* read a uint64_t from a buffer in target memory endianness */
343 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
344 {
345 if (target->endianness == TARGET_LITTLE_ENDIAN)
346 return le_to_h_u64(buffer);
347 else
348 return be_to_h_u64(buffer);
349 }
350
351 /* read a uint32_t from a buffer in target memory endianness */
352 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
353 {
354 if (target->endianness == TARGET_LITTLE_ENDIAN)
355 return le_to_h_u32(buffer);
356 else
357 return be_to_h_u32(buffer);
358 }
359
360 /* read a uint24_t from a buffer in target memory endianness */
361 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
362 {
363 if (target->endianness == TARGET_LITTLE_ENDIAN)
364 return le_to_h_u24(buffer);
365 else
366 return be_to_h_u24(buffer);
367 }
368
369 /* read a uint16_t from a buffer in target memory endianness */
370 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
371 {
372 if (target->endianness == TARGET_LITTLE_ENDIAN)
373 return le_to_h_u16(buffer);
374 else
375 return be_to_h_u16(buffer);
376 }
377
378 /* read a uint8_t from a buffer in target memory endianness */
379 static __attribute__((unused)) uint8_t target_buffer_get_u8(struct target *target, const uint8_t *buffer)
380 {
381 return *buffer & 0x0ff;
382 }
383
384 /* write a uint64_t to a buffer in target memory endianness */
385 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
386 {
387 if (target->endianness == TARGET_LITTLE_ENDIAN)
388 h_u64_to_le(buffer, value);
389 else
390 h_u64_to_be(buffer, value);
391 }
392
393 /* write a uint32_t to a buffer in target memory endianness */
394 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
395 {
396 if (target->endianness == TARGET_LITTLE_ENDIAN)
397 h_u32_to_le(buffer, value);
398 else
399 h_u32_to_be(buffer, value);
400 }
401
402 /* write a uint24_t to a buffer in target memory endianness */
403 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
404 {
405 if (target->endianness == TARGET_LITTLE_ENDIAN)
406 h_u24_to_le(buffer, value);
407 else
408 h_u24_to_be(buffer, value);
409 }
410
411 /* write a uint16_t to a buffer in target memory endianness */
412 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
413 {
414 if (target->endianness == TARGET_LITTLE_ENDIAN)
415 h_u16_to_le(buffer, value);
416 else
417 h_u16_to_be(buffer, value);
418 }
419
420 /* write a uint8_t to a buffer in target memory endianness */
421 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
422 {
423 *buffer = value;
424 }
425
426 /* write a uint64_t array to a buffer in target memory endianness */
427 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
428 {
429 uint32_t i;
430 for (i = 0; i < count; i++)
431 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
432 }
433
434 /* write a uint32_t array to a buffer in target memory endianness */
435 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
436 {
437 uint32_t i;
438 for (i = 0; i < count; i++)
439 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
440 }
441
442 /* write a uint16_t array to a buffer in target memory endianness */
443 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
444 {
445 uint32_t i;
446 for (i = 0; i < count; i++)
447 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
448 }
449
450 /* write a uint64_t array to a buffer in target memory endianness */
451 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
452 {
453 uint32_t i;
454 for (i = 0; i < count; i++)
455 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
456 }
457
458 /* write a uint32_t array to a buffer in target memory endianness */
459 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
460 {
461 uint32_t i;
462 for (i = 0; i < count; i++)
463 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
464 }
465
466 /* write a uint16_t array to a buffer in target memory endianness */
467 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
468 {
469 uint32_t i;
470 for (i = 0; i < count; i++)
471 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
472 }
473
474 /* return a pointer to a configured target; id is name or number */
475 struct target *get_target(const char *id)
476 {
477 struct target *target;
478
479 /* try as tcltarget name */
480 for (target = all_targets; target; target = target->next) {
481 if (target_name(target) == NULL)
482 continue;
483 if (strcmp(id, target_name(target)) == 0)
484 return target;
485 }
486
487 /* It's OK to remove this fallback sometime after August 2010 or so */
488
489 /* no match, try as number */
490 unsigned num;
491 if (parse_uint(id, &num) != ERROR_OK)
492 return NULL;
493
494 for (target = all_targets; target; target = target->next) {
495 if (target->target_number == (int)num) {
496 LOG_WARNING("use '%s' as target identifier, not '%u'",
497 target_name(target), num);
498 return target;
499 }
500 }
501
502 return NULL;
503 }
504
505 /* returns a pointer to the n-th configured target */
506 struct target *get_target_by_num(int num)
507 {
508 struct target *target = all_targets;
509
510 while (target) {
511 if (target->target_number == num)
512 return target;
513 target = target->next;
514 }
515
516 return NULL;
517 }
518
519 struct target *get_current_target(struct command_context *cmd_ctx)
520 {
521 struct target *target = get_current_target_or_null(cmd_ctx);
522
523 if (target == NULL) {
524 LOG_ERROR("BUG: current_target out of bounds");
525 exit(-1);
526 }
527
528 return target;
529 }
530
531 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
532 {
533 return cmd_ctx->current_target_override
534 ? cmd_ctx->current_target_override
535 : cmd_ctx->current_target;
536 }
537
538 int target_poll(struct target *target)
539 {
540 int retval;
541
542 /* We can't poll until after examine */
543 if (!target_was_examined(target)) {
544 /* Fail silently lest we pollute the log */
545 return ERROR_FAIL;
546 }
547
548 retval = target->type->poll(target);
549 if (retval != ERROR_OK)
550 return retval;
551
552 if (target->halt_issued) {
553 if (target->state == TARGET_HALTED)
554 target->halt_issued = false;
555 else {
556 int64_t t = timeval_ms() - target->halt_issued_time;
557 if (t > DEFAULT_HALT_TIMEOUT) {
558 target->halt_issued = false;
559 LOG_INFO("Halt timed out, wake up GDB.");
560 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
561 }
562 }
563 }
564
565 return ERROR_OK;
566 }
567
568 int target_halt(struct target *target)
569 {
570 int retval;
571 /* We can't poll until after examine */
572 if (!target_was_examined(target)) {
573 LOG_ERROR("Target not examined yet");
574 return ERROR_FAIL;
575 }
576
577 retval = target->type->halt(target);
578 if (retval != ERROR_OK)
579 return retval;
580
581 target->halt_issued = true;
582 target->halt_issued_time = timeval_ms();
583
584 return ERROR_OK;
585 }
586
587 /**
588 * Make the target (re)start executing using its saved execution
589 * context (possibly with some modifications).
590 *
591 * @param target Which target should start executing.
592 * @param current True to use the target's saved program counter instead
593 * of the address parameter
594 * @param address Optionally used as the program counter.
595 * @param handle_breakpoints True iff breakpoints at the resumption PC
596 * should be skipped. (For example, maybe execution was stopped by
597 * such a breakpoint, in which case it would be counterprodutive to
598 * let it re-trigger.
599 * @param debug_execution False if all working areas allocated by OpenOCD
600 * should be released and/or restored to their original contents.
601 * (This would for example be true to run some downloaded "helper"
602 * algorithm code, which resides in one such working buffer and uses
603 * another for data storage.)
604 *
605 * @todo Resolve the ambiguity about what the "debug_execution" flag
606 * signifies. For example, Target implementations don't agree on how
607 * it relates to invalidation of the register cache, or to whether
608 * breakpoints and watchpoints should be enabled. (It would seem wrong
609 * to enable breakpoints when running downloaded "helper" algorithms
610 * (debug_execution true), since the breakpoints would be set to match
611 * target firmware being debugged, not the helper algorithm.... and
612 * enabling them could cause such helpers to malfunction (for example,
613 * by overwriting data with a breakpoint instruction. On the other
614 * hand the infrastructure for running such helpers might use this
615 * procedure but rely on hardware breakpoint to detect termination.)
616 */
617 int target_resume(struct target *target, int current, target_addr_t address,
618 int handle_breakpoints, int debug_execution)
619 {
620 int retval;
621
622 /* We can't poll until after examine */
623 if (!target_was_examined(target)) {
624 LOG_ERROR("Target not examined yet");
625 return ERROR_FAIL;
626 }
627
628 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
629
630 /* note that resume *must* be asynchronous. The CPU can halt before
631 * we poll. The CPU can even halt at the current PC as a result of
632 * a software breakpoint being inserted by (a bug?) the application.
633 */
634 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
635 if (retval != ERROR_OK)
636 return retval;
637
638 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
639
640 return retval;
641 }
642
643 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
644 {
645 char buf[100];
646 int retval;
647 Jim_Nvp *n;
648 n = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode);
649 if (n->name == NULL) {
650 LOG_ERROR("invalid reset mode");
651 return ERROR_FAIL;
652 }
653
654 struct target *target;
655 for (target = all_targets; target; target = target->next)
656 target_call_reset_callbacks(target, reset_mode);
657
658 /* disable polling during reset to make reset event scripts
659 * more predictable, i.e. dr/irscan & pathmove in events will
660 * not have JTAG operations injected into the middle of a sequence.
661 */
662 bool save_poll = jtag_poll_get_enabled();
663
664 jtag_poll_set_enabled(false);
665
666 sprintf(buf, "ocd_process_reset %s", n->name);
667 retval = Jim_Eval(cmd->ctx->interp, buf);
668
669 jtag_poll_set_enabled(save_poll);
670
671 if (retval != JIM_OK) {
672 Jim_MakeErrorMessage(cmd->ctx->interp);
673 command_print(cmd->ctx, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
674 return ERROR_FAIL;
675 }
676
677 /* We want any events to be processed before the prompt */
678 retval = target_call_timer_callbacks_now();
679
680 for (target = all_targets; target; target = target->next) {
681 target->type->check_reset(target);
682 target->running_alg = false;
683 }
684
685 return retval;
686 }
687
688 static int identity_virt2phys(struct target *target,
689 target_addr_t virtual, target_addr_t *physical)
690 {
691 *physical = virtual;
692 return ERROR_OK;
693 }
694
695 static int no_mmu(struct target *target, int *enabled)
696 {
697 *enabled = 0;
698 return ERROR_OK;
699 }
700
701 static int default_examine(struct target *target)
702 {
703 target_set_examined(target);
704 return ERROR_OK;
705 }
706
707 /* no check by default */
708 static int default_check_reset(struct target *target)
709 {
710 return ERROR_OK;
711 }
712
713 int target_examine_one(struct target *target)
714 {
715 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
716
717 int retval = target->type->examine(target);
718 if (retval != ERROR_OK)
719 return retval;
720
721 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
722
723 return ERROR_OK;
724 }
725
726 static int jtag_enable_callback(enum jtag_event event, void *priv)
727 {
728 struct target *target = priv;
729
730 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
731 return ERROR_OK;
732
733 jtag_unregister_event_callback(jtag_enable_callback, target);
734
735 return target_examine_one(target);
736 }
737
738 /* Targets that correctly implement init + examine, i.e.
739 * no communication with target during init:
740 *
741 * XScale
742 */
743 int target_examine(void)
744 {
745 int retval = ERROR_OK;
746 struct target *target;
747
748 for (target = all_targets; target; target = target->next) {
749 /* defer examination, but don't skip it */
750 if (!target->tap->enabled) {
751 jtag_register_event_callback(jtag_enable_callback,
752 target);
753 continue;
754 }
755
756 if (target->defer_examine)
757 continue;
758
759 retval = target_examine_one(target);
760 if (retval != ERROR_OK)
761 return retval;
762 }
763 return retval;
764 }
765
766 const char *target_type_name(struct target *target)
767 {
768 return target->type->name;
769 }
770
771 static int target_soft_reset_halt(struct target *target)
772 {
773 if (!target_was_examined(target)) {
774 LOG_ERROR("Target not examined yet");
775 return ERROR_FAIL;
776 }
777 if (!target->type->soft_reset_halt) {
778 LOG_ERROR("Target %s does not support soft_reset_halt",
779 target_name(target));
780 return ERROR_FAIL;
781 }
782 return target->type->soft_reset_halt(target);
783 }
784
785 /**
786 * Downloads a target-specific native code algorithm to the target,
787 * and executes it. * Note that some targets may need to set up, enable,
788 * and tear down a breakpoint (hard or * soft) to detect algorithm
789 * termination, while others may support lower overhead schemes where
790 * soft breakpoints embedded in the algorithm automatically terminate the
791 * algorithm.
792 *
793 * @param target used to run the algorithm
794 * @param arch_info target-specific description of the algorithm.
795 */
796 int target_run_algorithm(struct target *target,
797 int num_mem_params, struct mem_param *mem_params,
798 int num_reg_params, struct reg_param *reg_param,
799 uint32_t entry_point, uint32_t exit_point,
800 int timeout_ms, void *arch_info)
801 {
802 int retval = ERROR_FAIL;
803
804 if (!target_was_examined(target)) {
805 LOG_ERROR("Target not examined yet");
806 goto done;
807 }
808 if (!target->type->run_algorithm) {
809 LOG_ERROR("Target type '%s' does not support %s",
810 target_type_name(target), __func__);
811 goto done;
812 }
813
814 target->running_alg = true;
815 retval = target->type->run_algorithm(target,
816 num_mem_params, mem_params,
817 num_reg_params, reg_param,
818 entry_point, exit_point, timeout_ms, arch_info);
819 target->running_alg = false;
820
821 done:
822 return retval;
823 }
824
825 /**
826 * Executes a target-specific native code algorithm and leaves it running.
827 *
828 * @param target used to run the algorithm
829 * @param arch_info target-specific description of the algorithm.
830 */
831 int target_start_algorithm(struct target *target,
832 int num_mem_params, struct mem_param *mem_params,
833 int num_reg_params, struct reg_param *reg_params,
834 uint32_t entry_point, uint32_t exit_point,
835 void *arch_info)
836 {
837 int retval = ERROR_FAIL;
838
839 if (!target_was_examined(target)) {
840 LOG_ERROR("Target not examined yet");
841 goto done;
842 }
843 if (!target->type->start_algorithm) {
844 LOG_ERROR("Target type '%s' does not support %s",
845 target_type_name(target), __func__);
846 goto done;
847 }
848 if (target->running_alg) {
849 LOG_ERROR("Target is already running an algorithm");
850 goto done;
851 }
852
853 target->running_alg = true;
854 retval = target->type->start_algorithm(target,
855 num_mem_params, mem_params,
856 num_reg_params, reg_params,
857 entry_point, exit_point, arch_info);
858
859 done:
860 return retval;
861 }
862
863 /**
864 * Waits for an algorithm started with target_start_algorithm() to complete.
865 *
866 * @param target used to run the algorithm
867 * @param arch_info target-specific description of the algorithm.
868 */
869 int target_wait_algorithm(struct target *target,
870 int num_mem_params, struct mem_param *mem_params,
871 int num_reg_params, struct reg_param *reg_params,
872 uint32_t exit_point, int timeout_ms,
873 void *arch_info)
874 {
875 int retval = ERROR_FAIL;
876
877 if (!target->type->wait_algorithm) {
878 LOG_ERROR("Target type '%s' does not support %s",
879 target_type_name(target), __func__);
880 goto done;
881 }
882 if (!target->running_alg) {
883 LOG_ERROR("Target is not running an algorithm");
884 goto done;
885 }
886
887 retval = target->type->wait_algorithm(target,
888 num_mem_params, mem_params,
889 num_reg_params, reg_params,
890 exit_point, timeout_ms, arch_info);
891 if (retval != ERROR_TARGET_TIMEOUT)
892 target->running_alg = false;
893
894 done:
895 return retval;
896 }
897
898 /**
899 * Streams data to a circular buffer on target intended for consumption by code
900 * running asynchronously on target.
901 *
902 * This is intended for applications where target-specific native code runs
903 * on the target, receives data from the circular buffer, does something with
904 * it (most likely writing it to a flash memory), and advances the circular
905 * buffer pointer.
906 *
907 * This assumes that the helper algorithm has already been loaded to the target,
908 * but has not been started yet. Given memory and register parameters are passed
909 * to the algorithm.
910 *
911 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
912 * following format:
913 *
914 * [buffer_start + 0, buffer_start + 4):
915 * Write Pointer address (aka head). Written and updated by this
916 * routine when new data is written to the circular buffer.
917 * [buffer_start + 4, buffer_start + 8):
918 * Read Pointer address (aka tail). Updated by code running on the
919 * target after it consumes data.
920 * [buffer_start + 8, buffer_start + buffer_size):
921 * Circular buffer contents.
922 *
923 * See contrib/loaders/flash/stm32f1x.S for an example.
924 *
925 * @param target used to run the algorithm
926 * @param buffer address on the host where data to be sent is located
927 * @param count number of blocks to send
928 * @param block_size size in bytes of each block
929 * @param num_mem_params count of memory-based params to pass to algorithm
930 * @param mem_params memory-based params to pass to algorithm
931 * @param num_reg_params count of register-based params to pass to algorithm
932 * @param reg_params memory-based params to pass to algorithm
933 * @param buffer_start address on the target of the circular buffer structure
934 * @param buffer_size size of the circular buffer structure
935 * @param entry_point address on the target to execute to start the algorithm
936 * @param exit_point address at which to set a breakpoint to catch the
937 * end of the algorithm; can be 0 if target triggers a breakpoint itself
938 */
939
940 int target_run_flash_async_algorithm(struct target *target,
941 const uint8_t *buffer, uint32_t count, int block_size,
942 int num_mem_params, struct mem_param *mem_params,
943 int num_reg_params, struct reg_param *reg_params,
944 uint32_t buffer_start, uint32_t buffer_size,
945 uint32_t entry_point, uint32_t exit_point, void *arch_info)
946 {
947 int retval;
948 int timeout = 0;
949
950 const uint8_t *buffer_orig = buffer;
951
952 /* Set up working area. First word is write pointer, second word is read pointer,
953 * rest is fifo data area. */
954 uint32_t wp_addr = buffer_start;
955 uint32_t rp_addr = buffer_start + 4;
956 uint32_t fifo_start_addr = buffer_start + 8;
957 uint32_t fifo_end_addr = buffer_start + buffer_size;
958
959 uint32_t wp = fifo_start_addr;
960 uint32_t rp = fifo_start_addr;
961
962 /* validate block_size is 2^n */
963 assert(!block_size || !(block_size & (block_size - 1)));
964
965 retval = target_write_u32(target, wp_addr, wp);
966 if (retval != ERROR_OK)
967 return retval;
968 retval = target_write_u32(target, rp_addr, rp);
969 if (retval != ERROR_OK)
970 return retval;
971
972 /* Start up algorithm on target and let it idle while writing the first chunk */
973 retval = target_start_algorithm(target, num_mem_params, mem_params,
974 num_reg_params, reg_params,
975 entry_point,
976 exit_point,
977 arch_info);
978
979 if (retval != ERROR_OK) {
980 LOG_ERROR("error starting target flash write algorithm");
981 return retval;
982 }
983
984 while (count > 0) {
985
986 retval = target_read_u32(target, rp_addr, &rp);
987 if (retval != ERROR_OK) {
988 LOG_ERROR("failed to get read pointer");
989 break;
990 }
991
992 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
993 (size_t) (buffer - buffer_orig), count, wp, rp);
994
995 if (rp == 0) {
996 LOG_ERROR("flash write algorithm aborted by target");
997 retval = ERROR_FLASH_OPERATION_FAILED;
998 break;
999 }
1000
1001 if (((rp - fifo_start_addr) & (block_size - 1)) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1002 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1003 break;
1004 }
1005
1006 /* Count the number of bytes available in the fifo without
1007 * crossing the wrap around. Make sure to not fill it completely,
1008 * because that would make wp == rp and that's the empty condition. */
1009 uint32_t thisrun_bytes;
1010 if (rp > wp)
1011 thisrun_bytes = rp - wp - block_size;
1012 else if (rp > fifo_start_addr)
1013 thisrun_bytes = fifo_end_addr - wp;
1014 else
1015 thisrun_bytes = fifo_end_addr - wp - block_size;
1016
1017 if (thisrun_bytes == 0) {
1018 /* Throttle polling a bit if transfer is (much) faster than flash
1019 * programming. The exact delay shouldn't matter as long as it's
1020 * less than buffer size / flash speed. This is very unlikely to
1021 * run when using high latency connections such as USB. */
1022 alive_sleep(10);
1023
1024 /* to stop an infinite loop on some targets check and increment a timeout
1025 * this issue was observed on a stellaris using the new ICDI interface */
1026 if (timeout++ >= 500) {
1027 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1028 return ERROR_FLASH_OPERATION_FAILED;
1029 }
1030 continue;
1031 }
1032
1033 /* reset our timeout */
1034 timeout = 0;
1035
1036 /* Limit to the amount of data we actually want to write */
1037 if (thisrun_bytes > count * block_size)
1038 thisrun_bytes = count * block_size;
1039
1040 /* Write data to fifo */
1041 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1042 if (retval != ERROR_OK)
1043 break;
1044
1045 /* Update counters and wrap write pointer */
1046 buffer += thisrun_bytes;
1047 count -= thisrun_bytes / block_size;
1048 wp += thisrun_bytes;
1049 if (wp >= fifo_end_addr)
1050 wp = fifo_start_addr;
1051
1052 /* Store updated write pointer to target */
1053 retval = target_write_u32(target, wp_addr, wp);
1054 if (retval != ERROR_OK)
1055 break;
1056
1057 /* Avoid GDB timeouts */
1058 keep_alive();
1059 }
1060
1061 if (retval != ERROR_OK) {
1062 /* abort flash write algorithm on target */
1063 target_write_u32(target, wp_addr, 0);
1064 }
1065
1066 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1067 num_reg_params, reg_params,
1068 exit_point,
1069 10000,
1070 arch_info);
1071
1072 if (retval2 != ERROR_OK) {
1073 LOG_ERROR("error waiting for target flash write algorithm");
1074 retval = retval2;
1075 }
1076
1077 if (retval == ERROR_OK) {
1078 /* check if algorithm set rp = 0 after fifo writer loop finished */
1079 retval = target_read_u32(target, rp_addr, &rp);
1080 if (retval == ERROR_OK && rp == 0) {
1081 LOG_ERROR("flash write algorithm aborted by target");
1082 retval = ERROR_FLASH_OPERATION_FAILED;
1083 }
1084 }
1085
1086 return retval;
1087 }
1088
1089 int target_read_memory(struct target *target,
1090 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1091 {
1092 if (!target_was_examined(target)) {
1093 LOG_ERROR("Target not examined yet");
1094 return ERROR_FAIL;
1095 }
1096 if (!target->type->read_memory) {
1097 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1098 return ERROR_FAIL;
1099 }
1100 return target->type->read_memory(target, address, size, count, buffer);
1101 }
1102
1103 int target_read_phys_memory(struct target *target,
1104 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1105 {
1106 if (!target_was_examined(target)) {
1107 LOG_ERROR("Target not examined yet");
1108 return ERROR_FAIL;
1109 }
1110 if (!target->type->read_phys_memory) {
1111 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1112 return ERROR_FAIL;
1113 }
1114 return target->type->read_phys_memory(target, address, size, count, buffer);
1115 }
1116
1117 int target_write_memory(struct target *target,
1118 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1119 {
1120 if (!target_was_examined(target)) {
1121 LOG_ERROR("Target not examined yet");
1122 return ERROR_FAIL;
1123 }
1124 if (!target->type->write_memory) {
1125 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1126 return ERROR_FAIL;
1127 }
1128 return target->type->write_memory(target, address, size, count, buffer);
1129 }
1130
1131 int target_write_phys_memory(struct target *target,
1132 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1133 {
1134 if (!target_was_examined(target)) {
1135 LOG_ERROR("Target not examined yet");
1136 return ERROR_FAIL;
1137 }
1138 if (!target->type->write_phys_memory) {
1139 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1140 return ERROR_FAIL;
1141 }
1142 return target->type->write_phys_memory(target, address, size, count, buffer);
1143 }
1144
1145 int target_add_breakpoint(struct target *target,
1146 struct breakpoint *breakpoint)
1147 {
1148 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1149 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1150 return ERROR_TARGET_NOT_HALTED;
1151 }
1152 return target->type->add_breakpoint(target, breakpoint);
1153 }
1154
1155 int target_add_context_breakpoint(struct target *target,
1156 struct breakpoint *breakpoint)
1157 {
1158 if (target->state != TARGET_HALTED) {
1159 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1160 return ERROR_TARGET_NOT_HALTED;
1161 }
1162 return target->type->add_context_breakpoint(target, breakpoint);
1163 }
1164
1165 int target_add_hybrid_breakpoint(struct target *target,
1166 struct breakpoint *breakpoint)
1167 {
1168 if (target->state != TARGET_HALTED) {
1169 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1170 return ERROR_TARGET_NOT_HALTED;
1171 }
1172 return target->type->add_hybrid_breakpoint(target, breakpoint);
1173 }
1174
1175 int target_remove_breakpoint(struct target *target,
1176 struct breakpoint *breakpoint)
1177 {
1178 return target->type->remove_breakpoint(target, breakpoint);
1179 }
1180
1181 int target_add_watchpoint(struct target *target,
1182 struct watchpoint *watchpoint)
1183 {
1184 if (target->state != TARGET_HALTED) {
1185 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1186 return ERROR_TARGET_NOT_HALTED;
1187 }
1188 return target->type->add_watchpoint(target, watchpoint);
1189 }
1190 int target_remove_watchpoint(struct target *target,
1191 struct watchpoint *watchpoint)
1192 {
1193 return target->type->remove_watchpoint(target, watchpoint);
1194 }
1195 int target_hit_watchpoint(struct target *target,
1196 struct watchpoint **hit_watchpoint)
1197 {
1198 if (target->state != TARGET_HALTED) {
1199 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1200 return ERROR_TARGET_NOT_HALTED;
1201 }
1202
1203 if (target->type->hit_watchpoint == NULL) {
1204 /* For backward compatible, if hit_watchpoint is not implemented,
1205 * return ERROR_FAIL such that gdb_server will not take the nonsense
1206 * information. */
1207 return ERROR_FAIL;
1208 }
1209
1210 return target->type->hit_watchpoint(target, hit_watchpoint);
1211 }
1212
1213 const char *target_get_gdb_arch(struct target *target)
1214 {
1215 if (target->type->get_gdb_arch == NULL)
1216 return NULL;
1217 return target->type->get_gdb_arch(target);
1218 }
1219
1220 int target_get_gdb_reg_list(struct target *target,
1221 struct reg **reg_list[], int *reg_list_size,
1222 enum target_register_class reg_class)
1223 {
1224 return target->type->get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1225 }
1226
1227 bool target_supports_gdb_connection(struct target *target)
1228 {
1229 /*
1230 * based on current code, we can simply exclude all the targets that
1231 * don't provide get_gdb_reg_list; this could change with new targets.
1232 */
1233 return !!target->type->get_gdb_reg_list;
1234 }
1235
1236 int target_step(struct target *target,
1237 int current, target_addr_t address, int handle_breakpoints)
1238 {
1239 return target->type->step(target, current, address, handle_breakpoints);
1240 }
1241
1242 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1243 {
1244 if (target->state != TARGET_HALTED) {
1245 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1246 return ERROR_TARGET_NOT_HALTED;
1247 }
1248 return target->type->get_gdb_fileio_info(target, fileio_info);
1249 }
1250
1251 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1252 {
1253 if (target->state != TARGET_HALTED) {
1254 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1255 return ERROR_TARGET_NOT_HALTED;
1256 }
1257 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1258 }
1259
1260 target_addr_t target_address_max(struct target *target)
1261 {
1262 unsigned bits = target_address_bits(target);
1263 if (sizeof(target_addr_t) * 8 == bits)
1264 return (target_addr_t) -1;
1265 else
1266 return (((target_addr_t) 1) << bits) - 1;
1267 }
1268
1269 unsigned target_address_bits(struct target *target)
1270 {
1271 if (target->type->address_bits)
1272 return target->type->address_bits(target);
1273 return 32;
1274 }
1275
1276 int target_profiling(struct target *target, uint32_t *samples,
1277 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1278 {
1279 if (target->state != TARGET_HALTED) {
1280 LOG_WARNING("target %s is not halted (profiling)", target->cmd_name);
1281 return ERROR_TARGET_NOT_HALTED;
1282 }
1283 return target->type->profiling(target, samples, max_num_samples,
1284 num_samples, seconds);
1285 }
1286
1287 /**
1288 * Reset the @c examined flag for the given target.
1289 * Pure paranoia -- targets are zeroed on allocation.
1290 */
1291 static void target_reset_examined(struct target *target)
1292 {
1293 target->examined = false;
1294 }
1295
1296 static int handle_target(void *priv);
1297
1298 static int target_init_one(struct command_context *cmd_ctx,
1299 struct target *target)
1300 {
1301 target_reset_examined(target);
1302
1303 struct target_type *type = target->type;
1304 if (type->examine == NULL)
1305 type->examine = default_examine;
1306
1307 if (type->check_reset == NULL)
1308 type->check_reset = default_check_reset;
1309
1310 assert(type->init_target != NULL);
1311
1312 int retval = type->init_target(cmd_ctx, target);
1313 if (ERROR_OK != retval) {
1314 LOG_ERROR("target '%s' init failed", target_name(target));
1315 return retval;
1316 }
1317
1318 /* Sanity-check MMU support ... stub in what we must, to help
1319 * implement it in stages, but warn if we need to do so.
1320 */
1321 if (type->mmu) {
1322 if (type->virt2phys == NULL) {
1323 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1324 type->virt2phys = identity_virt2phys;
1325 }
1326 } else {
1327 /* Make sure no-MMU targets all behave the same: make no
1328 * distinction between physical and virtual addresses, and
1329 * ensure that virt2phys() is always an identity mapping.
1330 */
1331 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1332 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1333
1334 type->mmu = no_mmu;
1335 type->write_phys_memory = type->write_memory;
1336 type->read_phys_memory = type->read_memory;
1337 type->virt2phys = identity_virt2phys;
1338 }
1339
1340 if (target->type->read_buffer == NULL)
1341 target->type->read_buffer = target_read_buffer_default;
1342
1343 if (target->type->write_buffer == NULL)
1344 target->type->write_buffer = target_write_buffer_default;
1345
1346 if (target->type->get_gdb_fileio_info == NULL)
1347 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1348
1349 if (target->type->gdb_fileio_end == NULL)
1350 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1351
1352 if (target->type->profiling == NULL)
1353 target->type->profiling = target_profiling_default;
1354
1355 return ERROR_OK;
1356 }
1357
1358 static int target_init(struct command_context *cmd_ctx)
1359 {
1360 struct target *target;
1361 int retval;
1362
1363 for (target = all_targets; target; target = target->next) {
1364 retval = target_init_one(cmd_ctx, target);
1365 if (ERROR_OK != retval)
1366 return retval;
1367 }
1368
1369 if (!all_targets)
1370 return ERROR_OK;
1371
1372 retval = target_register_user_commands(cmd_ctx);
1373 if (ERROR_OK != retval)
1374 return retval;
1375
1376 retval = target_register_timer_callback(&handle_target,
1377 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1378 if (ERROR_OK != retval)
1379 return retval;
1380
1381 return ERROR_OK;
1382 }
1383
1384 COMMAND_HANDLER(handle_target_init_command)
1385 {
1386 int retval;
1387
1388 if (CMD_ARGC != 0)
1389 return ERROR_COMMAND_SYNTAX_ERROR;
1390
1391 static bool target_initialized;
1392 if (target_initialized) {
1393 LOG_INFO("'target init' has already been called");
1394 return ERROR_OK;
1395 }
1396 target_initialized = true;
1397
1398 retval = command_run_line(CMD_CTX, "init_targets");
1399 if (ERROR_OK != retval)
1400 return retval;
1401
1402 retval = command_run_line(CMD_CTX, "init_target_events");
1403 if (ERROR_OK != retval)
1404 return retval;
1405
1406 retval = command_run_line(CMD_CTX, "init_board");
1407 if (ERROR_OK != retval)
1408 return retval;
1409
1410 LOG_DEBUG("Initializing targets...");
1411 return target_init(CMD_CTX);
1412 }
1413
1414 int target_register_event_callback(int (*callback)(struct target *target,
1415 enum target_event event, void *priv), void *priv)
1416 {
1417 struct target_event_callback **callbacks_p = &target_event_callbacks;
1418
1419 if (callback == NULL)
1420 return ERROR_COMMAND_SYNTAX_ERROR;
1421
1422 if (*callbacks_p) {
1423 while ((*callbacks_p)->next)
1424 callbacks_p = &((*callbacks_p)->next);
1425 callbacks_p = &((*callbacks_p)->next);
1426 }
1427
1428 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1429 (*callbacks_p)->callback = callback;
1430 (*callbacks_p)->priv = priv;
1431 (*callbacks_p)->next = NULL;
1432
1433 return ERROR_OK;
1434 }
1435
1436 int target_register_reset_callback(int (*callback)(struct target *target,
1437 enum target_reset_mode reset_mode, void *priv), void *priv)
1438 {
1439 struct target_reset_callback *entry;
1440
1441 if (callback == NULL)
1442 return ERROR_COMMAND_SYNTAX_ERROR;
1443
1444 entry = malloc(sizeof(struct target_reset_callback));
1445 if (entry == NULL) {
1446 LOG_ERROR("error allocating buffer for reset callback entry");
1447 return ERROR_COMMAND_SYNTAX_ERROR;
1448 }
1449
1450 entry->callback = callback;
1451 entry->priv = priv;
1452 list_add(&entry->list, &target_reset_callback_list);
1453
1454
1455 return ERROR_OK;
1456 }
1457
1458 int target_register_trace_callback(int (*callback)(struct target *target,
1459 size_t len, uint8_t *data, void *priv), void *priv)
1460 {
1461 struct target_trace_callback *entry;
1462
1463 if (callback == NULL)
1464 return ERROR_COMMAND_SYNTAX_ERROR;
1465
1466 entry = malloc(sizeof(struct target_trace_callback));
1467 if (entry == NULL) {
1468 LOG_ERROR("error allocating buffer for trace callback entry");
1469 return ERROR_COMMAND_SYNTAX_ERROR;
1470 }
1471
1472 entry->callback = callback;
1473 entry->priv = priv;
1474 list_add(&entry->list, &target_trace_callback_list);
1475
1476
1477 return ERROR_OK;
1478 }
1479
1480 int target_register_timer_callback(int (*callback)(void *priv),
1481 unsigned int time_ms, enum target_timer_type type, void *priv)
1482 {
1483 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1484
1485 if (callback == NULL)
1486 return ERROR_COMMAND_SYNTAX_ERROR;
1487
1488 if (*callbacks_p) {
1489 while ((*callbacks_p)->next)
1490 callbacks_p = &((*callbacks_p)->next);
1491 callbacks_p = &((*callbacks_p)->next);
1492 }
1493
1494 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1495 (*callbacks_p)->callback = callback;
1496 (*callbacks_p)->type = type;
1497 (*callbacks_p)->time_ms = time_ms;
1498 (*callbacks_p)->removed = false;
1499
1500 gettimeofday(&(*callbacks_p)->when, NULL);
1501 timeval_add_time(&(*callbacks_p)->when, 0, time_ms * 1000);
1502
1503 (*callbacks_p)->priv = priv;
1504 (*callbacks_p)->next = NULL;
1505
1506 return ERROR_OK;
1507 }
1508
1509 int target_unregister_event_callback(int (*callback)(struct target *target,
1510 enum target_event event, void *priv), void *priv)
1511 {
1512 struct target_event_callback **p = &target_event_callbacks;
1513 struct target_event_callback *c = target_event_callbacks;
1514
1515 if (callback == NULL)
1516 return ERROR_COMMAND_SYNTAX_ERROR;
1517
1518 while (c) {
1519 struct target_event_callback *next = c->next;
1520 if ((c->callback == callback) && (c->priv == priv)) {
1521 *p = next;
1522 free(c);
1523 return ERROR_OK;
1524 } else
1525 p = &(c->next);
1526 c = next;
1527 }
1528
1529 return ERROR_OK;
1530 }
1531
1532 int target_unregister_reset_callback(int (*callback)(struct target *target,
1533 enum target_reset_mode reset_mode, void *priv), void *priv)
1534 {
1535 struct target_reset_callback *entry;
1536
1537 if (callback == NULL)
1538 return ERROR_COMMAND_SYNTAX_ERROR;
1539
1540 list_for_each_entry(entry, &target_reset_callback_list, list) {
1541 if (entry->callback == callback && entry->priv == priv) {
1542 list_del(&entry->list);
1543 free(entry);
1544 break;
1545 }
1546 }
1547
1548 return ERROR_OK;
1549 }
1550
1551 int target_unregister_trace_callback(int (*callback)(struct target *target,
1552 size_t len, uint8_t *data, void *priv), void *priv)
1553 {
1554 struct target_trace_callback *entry;
1555
1556 if (callback == NULL)
1557 return ERROR_COMMAND_SYNTAX_ERROR;
1558
1559 list_for_each_entry(entry, &target_trace_callback_list, list) {
1560 if (entry->callback == callback && entry->priv == priv) {
1561 list_del(&entry->list);
1562 free(entry);
1563 break;
1564 }
1565 }
1566
1567 return ERROR_OK;
1568 }
1569
1570 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1571 {
1572 if (callback == NULL)
1573 return ERROR_COMMAND_SYNTAX_ERROR;
1574
1575 for (struct target_timer_callback *c = target_timer_callbacks;
1576 c; c = c->next) {
1577 if ((c->callback == callback) && (c->priv == priv)) {
1578 c->removed = true;
1579 return ERROR_OK;
1580 }
1581 }
1582
1583 return ERROR_FAIL;
1584 }
1585
1586 int target_call_event_callbacks(struct target *target, enum target_event event)
1587 {
1588 struct target_event_callback *callback = target_event_callbacks;
1589 struct target_event_callback *next_callback;
1590
1591 if (event == TARGET_EVENT_HALTED) {
1592 /* execute early halted first */
1593 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1594 }
1595
1596 LOG_DEBUG("target event %i (%s)", event,
1597 Jim_Nvp_value2name_simple(nvp_target_event, event)->name);
1598
1599 target_handle_event(target, event);
1600
1601 while (callback) {
1602 next_callback = callback->next;
1603 callback->callback(target, event, callback->priv);
1604 callback = next_callback;
1605 }
1606
1607 return ERROR_OK;
1608 }
1609
1610 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1611 {
1612 struct target_reset_callback *callback;
1613
1614 LOG_DEBUG("target reset %i (%s)", reset_mode,
1615 Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1616
1617 list_for_each_entry(callback, &target_reset_callback_list, list)
1618 callback->callback(target, reset_mode, callback->priv);
1619
1620 return ERROR_OK;
1621 }
1622
1623 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1624 {
1625 struct target_trace_callback *callback;
1626
1627 list_for_each_entry(callback, &target_trace_callback_list, list)
1628 callback->callback(target, len, data, callback->priv);
1629
1630 return ERROR_OK;
1631 }
1632
1633 static int target_timer_callback_periodic_restart(
1634 struct target_timer_callback *cb, struct timeval *now)
1635 {
1636 cb->when = *now;
1637 timeval_add_time(&cb->when, 0, cb->time_ms * 1000L);
1638 return ERROR_OK;
1639 }
1640
1641 static int target_call_timer_callback(struct target_timer_callback *cb,
1642 struct timeval *now)
1643 {
1644 cb->callback(cb->priv);
1645
1646 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1647 return target_timer_callback_periodic_restart(cb, now);
1648
1649 return target_unregister_timer_callback(cb->callback, cb->priv);
1650 }
1651
1652 static int target_call_timer_callbacks_check_time(int checktime)
1653 {
1654 static bool callback_processing;
1655
1656 /* Do not allow nesting */
1657 if (callback_processing)
1658 return ERROR_OK;
1659
1660 callback_processing = true;
1661
1662 keep_alive();
1663
1664 struct timeval now;
1665 gettimeofday(&now, NULL);
1666
1667 /* Store an address of the place containing a pointer to the
1668 * next item; initially, that's a standalone "root of the
1669 * list" variable. */
1670 struct target_timer_callback **callback = &target_timer_callbacks;
1671 while (*callback) {
1672 if ((*callback)->removed) {
1673 struct target_timer_callback *p = *callback;
1674 *callback = (*callback)->next;
1675 free(p);
1676 continue;
1677 }
1678
1679 bool call_it = (*callback)->callback &&
1680 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1681 timeval_compare(&now, &(*callback)->when) >= 0);
1682
1683 if (call_it)
1684 target_call_timer_callback(*callback, &now);
1685
1686 callback = &(*callback)->next;
1687 }
1688
1689 callback_processing = false;
1690 return ERROR_OK;
1691 }
1692
1693 int target_call_timer_callbacks(void)
1694 {
1695 return target_call_timer_callbacks_check_time(1);
1696 }
1697
1698 /* invoke periodic callbacks immediately */
1699 int target_call_timer_callbacks_now(void)
1700 {
1701 return target_call_timer_callbacks_check_time(0);
1702 }
1703
1704 /* Prints the working area layout for debug purposes */
1705 static void print_wa_layout(struct target *target)
1706 {
1707 struct working_area *c = target->working_areas;
1708
1709 while (c) {
1710 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1711 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1712 c->address, c->address + c->size - 1, c->size);
1713 c = c->next;
1714 }
1715 }
1716
1717 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1718 static void target_split_working_area(struct working_area *area, uint32_t size)
1719 {
1720 assert(area->free); /* Shouldn't split an allocated area */
1721 assert(size <= area->size); /* Caller should guarantee this */
1722
1723 /* Split only if not already the right size */
1724 if (size < area->size) {
1725 struct working_area *new_wa = malloc(sizeof(*new_wa));
1726
1727 if (new_wa == NULL)
1728 return;
1729
1730 new_wa->next = area->next;
1731 new_wa->size = area->size - size;
1732 new_wa->address = area->address + size;
1733 new_wa->backup = NULL;
1734 new_wa->user = NULL;
1735 new_wa->free = true;
1736
1737 area->next = new_wa;
1738 area->size = size;
1739
1740 /* If backup memory was allocated to this area, it has the wrong size
1741 * now so free it and it will be reallocated if/when needed */
1742 if (area->backup) {
1743 free(area->backup);
1744 area->backup = NULL;
1745 }
1746 }
1747 }
1748
1749 /* Merge all adjacent free areas into one */
1750 static void target_merge_working_areas(struct target *target)
1751 {
1752 struct working_area *c = target->working_areas;
1753
1754 while (c && c->next) {
1755 assert(c->next->address == c->address + c->size); /* This is an invariant */
1756
1757 /* Find two adjacent free areas */
1758 if (c->free && c->next->free) {
1759 /* Merge the last into the first */
1760 c->size += c->next->size;
1761
1762 /* Remove the last */
1763 struct working_area *to_be_freed = c->next;
1764 c->next = c->next->next;
1765 if (to_be_freed->backup)
1766 free(to_be_freed->backup);
1767 free(to_be_freed);
1768
1769 /* If backup memory was allocated to the remaining area, it's has
1770 * the wrong size now */
1771 if (c->backup) {
1772 free(c->backup);
1773 c->backup = NULL;
1774 }
1775 } else {
1776 c = c->next;
1777 }
1778 }
1779 }
1780
1781 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
1782 {
1783 /* Reevaluate working area address based on MMU state*/
1784 if (target->working_areas == NULL) {
1785 int retval;
1786 int enabled;
1787
1788 retval = target->type->mmu(target, &enabled);
1789 if (retval != ERROR_OK)
1790 return retval;
1791
1792 if (!enabled) {
1793 if (target->working_area_phys_spec) {
1794 LOG_DEBUG("MMU disabled, using physical "
1795 "address for working memory " TARGET_ADDR_FMT,
1796 target->working_area_phys);
1797 target->working_area = target->working_area_phys;
1798 } else {
1799 LOG_ERROR("No working memory available. "
1800 "Specify -work-area-phys to target.");
1801 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1802 }
1803 } else {
1804 if (target->working_area_virt_spec) {
1805 LOG_DEBUG("MMU enabled, using virtual "
1806 "address for working memory " TARGET_ADDR_FMT,
1807 target->working_area_virt);
1808 target->working_area = target->working_area_virt;
1809 } else {
1810 LOG_ERROR("No working memory available. "
1811 "Specify -work-area-virt to target.");
1812 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1813 }
1814 }
1815
1816 /* Set up initial working area on first call */
1817 struct working_area *new_wa = malloc(sizeof(*new_wa));
1818 if (new_wa) {
1819 new_wa->next = NULL;
1820 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
1821 new_wa->address = target->working_area;
1822 new_wa->backup = NULL;
1823 new_wa->user = NULL;
1824 new_wa->free = true;
1825 }
1826
1827 target->working_areas = new_wa;
1828 }
1829
1830 /* only allocate multiples of 4 byte */
1831 if (size % 4)
1832 size = (size + 3) & (~3UL);
1833
1834 struct working_area *c = target->working_areas;
1835
1836 /* Find the first large enough working area */
1837 while (c) {
1838 if (c->free && c->size >= size)
1839 break;
1840 c = c->next;
1841 }
1842
1843 if (c == NULL)
1844 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1845
1846 /* Split the working area into the requested size */
1847 target_split_working_area(c, size);
1848
1849 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
1850 size, c->address);
1851
1852 if (target->backup_working_area) {
1853 if (c->backup == NULL) {
1854 c->backup = malloc(c->size);
1855 if (c->backup == NULL)
1856 return ERROR_FAIL;
1857 }
1858
1859 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
1860 if (retval != ERROR_OK)
1861 return retval;
1862 }
1863
1864 /* mark as used, and return the new (reused) area */
1865 c->free = false;
1866 *area = c;
1867
1868 /* user pointer */
1869 c->user = area;
1870
1871 print_wa_layout(target);
1872
1873 return ERROR_OK;
1874 }
1875
1876 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
1877 {
1878 int retval;
1879
1880 retval = target_alloc_working_area_try(target, size, area);
1881 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1882 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
1883 return retval;
1884
1885 }
1886
1887 static int target_restore_working_area(struct target *target, struct working_area *area)
1888 {
1889 int retval = ERROR_OK;
1890
1891 if (target->backup_working_area && area->backup != NULL) {
1892 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
1893 if (retval != ERROR_OK)
1894 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
1895 area->size, area->address);
1896 }
1897
1898 return retval;
1899 }
1900
1901 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
1902 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
1903 {
1904 int retval = ERROR_OK;
1905
1906 if (area->free)
1907 return retval;
1908
1909 if (restore) {
1910 retval = target_restore_working_area(target, area);
1911 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
1912 if (retval != ERROR_OK)
1913 return retval;
1914 }
1915
1916 area->free = true;
1917
1918 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
1919 area->size, area->address);
1920
1921 /* mark user pointer invalid */
1922 /* TODO: Is this really safe? It points to some previous caller's memory.
1923 * How could we know that the area pointer is still in that place and not
1924 * some other vital data? What's the purpose of this, anyway? */
1925 *area->user = NULL;
1926 area->user = NULL;
1927
1928 target_merge_working_areas(target);
1929
1930 print_wa_layout(target);
1931
1932 return retval;
1933 }
1934
1935 int target_free_working_area(struct target *target, struct working_area *area)
1936 {
1937 return target_free_working_area_restore(target, area, 1);
1938 }
1939
1940 /* free resources and restore memory, if restoring memory fails,
1941 * free up resources anyway
1942 */
1943 static void target_free_all_working_areas_restore(struct target *target, int restore)
1944 {
1945 struct working_area *c = target->working_areas;
1946
1947 LOG_DEBUG("freeing all working areas");
1948
1949 /* Loop through all areas, restoring the allocated ones and marking them as free */
1950 while (c) {
1951 if (!c->free) {
1952 if (restore)
1953 target_restore_working_area(target, c);
1954 c->free = true;
1955 *c->user = NULL; /* Same as above */
1956 c->user = NULL;
1957 }
1958 c = c->next;
1959 }
1960
1961 /* Run a merge pass to combine all areas into one */
1962 target_merge_working_areas(target);
1963
1964 print_wa_layout(target);
1965 }
1966
1967 void target_free_all_working_areas(struct target *target)
1968 {
1969 target_free_all_working_areas_restore(target, 1);
1970
1971 /* Now we have none or only one working area marked as free */
1972 if (target->working_areas) {
1973 /* Free the last one to allow on-the-fly moving and resizing */
1974 free(target->working_areas->backup);
1975 free(target->working_areas);
1976 target->working_areas = NULL;
1977 }
1978 }
1979
1980 /* Find the largest number of bytes that can be allocated */
1981 uint32_t target_get_working_area_avail(struct target *target)
1982 {
1983 struct working_area *c = target->working_areas;
1984 uint32_t max_size = 0;
1985
1986 if (c == NULL)
1987 return target->working_area_size;
1988
1989 while (c) {
1990 if (c->free && max_size < c->size)
1991 max_size = c->size;
1992
1993 c = c->next;
1994 }
1995
1996 return max_size;
1997 }
1998
1999 static void target_destroy(struct target *target)
2000 {
2001 if (target->type->deinit_target)
2002 target->type->deinit_target(target);
2003
2004 if (target->semihosting)
2005 free(target->semihosting);
2006
2007 jtag_unregister_event_callback(jtag_enable_callback, target);
2008
2009 struct target_event_action *teap = target->event_action;
2010 while (teap) {
2011 struct target_event_action *next = teap->next;
2012 Jim_DecrRefCount(teap->interp, teap->body);
2013 free(teap);
2014 teap = next;
2015 }
2016
2017 target_free_all_working_areas(target);
2018
2019 /* release the targets SMP list */
2020 if (target->smp) {
2021 struct target_list *head = target->head;
2022 while (head != NULL) {
2023 struct target_list *pos = head->next;
2024 head->target->smp = 0;
2025 free(head);
2026 head = pos;
2027 }
2028 target->smp = 0;
2029 }
2030
2031 free(target->gdb_port_override);
2032 free(target->type);
2033 free(target->trace_info);
2034 free(target->fileio_info);
2035 free(target->cmd_name);
2036 free(target);
2037 }
2038
2039 void target_quit(void)
2040 {
2041 struct target_event_callback *pe = target_event_callbacks;
2042 while (pe) {
2043 struct target_event_callback *t = pe->next;
2044 free(pe);
2045 pe = t;
2046 }
2047 target_event_callbacks = NULL;
2048
2049 struct target_timer_callback *pt = target_timer_callbacks;
2050 while (pt) {
2051 struct target_timer_callback *t = pt->next;
2052 free(pt);
2053 pt = t;
2054 }
2055 target_timer_callbacks = NULL;
2056
2057 for (struct target *target = all_targets; target;) {
2058 struct target *tmp;
2059
2060 tmp = target->next;
2061 target_destroy(target);
2062 target = tmp;
2063 }
2064
2065 all_targets = NULL;
2066 }
2067
2068 int target_arch_state(struct target *target)
2069 {
2070 int retval;
2071 if (target == NULL) {
2072 LOG_WARNING("No target has been configured");
2073 return ERROR_OK;
2074 }
2075
2076 if (target->state != TARGET_HALTED)
2077 return ERROR_OK;
2078
2079 retval = target->type->arch_state(target);
2080 return retval;
2081 }
2082
2083 static int target_get_gdb_fileio_info_default(struct target *target,
2084 struct gdb_fileio_info *fileio_info)
2085 {
2086 /* If target does not support semi-hosting function, target
2087 has no need to provide .get_gdb_fileio_info callback.
2088 It just return ERROR_FAIL and gdb_server will return "Txx"
2089 as target halted every time. */
2090 return ERROR_FAIL;
2091 }
2092
2093 static int target_gdb_fileio_end_default(struct target *target,
2094 int retcode, int fileio_errno, bool ctrl_c)
2095 {
2096 return ERROR_OK;
2097 }
2098
2099 static int target_profiling_default(struct target *target, uint32_t *samples,
2100 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2101 {
2102 struct timeval timeout, now;
2103
2104 gettimeofday(&timeout, NULL);
2105 timeval_add_time(&timeout, seconds, 0);
2106
2107 LOG_INFO("Starting profiling. Halting and resuming the"
2108 " target as often as we can...");
2109
2110 uint32_t sample_count = 0;
2111 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2112 struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
2113
2114 int retval = ERROR_OK;
2115 for (;;) {
2116 target_poll(target);
2117 if (target->state == TARGET_HALTED) {
2118 uint32_t t = buf_get_u32(reg->value, 0, 32);
2119 samples[sample_count++] = t;
2120 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2121 retval = target_resume(target, 1, 0, 0, 0);
2122 target_poll(target);
2123 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2124 } else if (target->state == TARGET_RUNNING) {
2125 /* We want to quickly sample the PC. */
2126 retval = target_halt(target);
2127 } else {
2128 LOG_INFO("Target not halted or running");
2129 retval = ERROR_OK;
2130 break;
2131 }
2132
2133 if (retval != ERROR_OK)
2134 break;
2135
2136 gettimeofday(&now, NULL);
2137 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2138 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2139 break;
2140 }
2141 }
2142
2143 *num_samples = sample_count;
2144 return retval;
2145 }
2146
2147 /* Single aligned words are guaranteed to use 16 or 32 bit access
2148 * mode respectively, otherwise data is handled as quickly as
2149 * possible
2150 */
2151 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2152 {
2153 LOG_DEBUG("writing buffer of %" PRIi32 " byte at " TARGET_ADDR_FMT,
2154 size, address);
2155
2156 if (!target_was_examined(target)) {
2157 LOG_ERROR("Target not examined yet");
2158 return ERROR_FAIL;
2159 }
2160
2161 if (size == 0)
2162 return ERROR_OK;
2163
2164 if ((address + size - 1) < address) {
2165 /* GDB can request this when e.g. PC is 0xfffffffc */
2166 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2167 address,
2168 size);
2169 return ERROR_FAIL;
2170 }
2171
2172 return target->type->write_buffer(target, address, size, buffer);
2173 }
2174
2175 static int target_write_buffer_default(struct target *target,
2176 target_addr_t address, uint32_t count, const uint8_t *buffer)
2177 {
2178 uint32_t size;
2179
2180 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2181 * will have something to do with the size we leave to it. */
2182 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2183 if (address & size) {
2184 int retval = target_write_memory(target, address, size, 1, buffer);
2185 if (retval != ERROR_OK)
2186 return retval;
2187 address += size;
2188 count -= size;
2189 buffer += size;
2190 }
2191 }
2192
2193 /* Write the data with as large access size as possible. */
2194 for (; size > 0; size /= 2) {
2195 uint32_t aligned = count - count % size;
2196 if (aligned > 0) {
2197 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2198 if (retval != ERROR_OK)
2199 return retval;
2200 address += aligned;
2201 count -= aligned;
2202 buffer += aligned;
2203 }
2204 }
2205
2206 return ERROR_OK;
2207 }
2208
2209 /* Single aligned words are guaranteed to use 16 or 32 bit access
2210 * mode respectively, otherwise data is handled as quickly as
2211 * possible
2212 */
2213 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2214 {
2215 LOG_DEBUG("reading buffer of %" PRIi32 " byte at " TARGET_ADDR_FMT,
2216 size, address);
2217
2218 if (!target_was_examined(target)) {
2219 LOG_ERROR("Target not examined yet");
2220 return ERROR_FAIL;
2221 }
2222
2223 if (size == 0)
2224 return ERROR_OK;
2225
2226 if ((address + size - 1) < address) {
2227 /* GDB can request this when e.g. PC is 0xfffffffc */
2228 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2229 address,
2230 size);
2231 return ERROR_FAIL;
2232 }
2233
2234 return target->type->read_buffer(target, address, size, buffer);
2235 }
2236
2237 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2238 {
2239 uint32_t size;
2240
2241 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2242 * will have something to do with the size we leave to it. */
2243 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2244 if (address & size) {
2245 int retval = target_read_memory(target, address, size, 1, buffer);
2246 if (retval != ERROR_OK)
2247 return retval;
2248 address += size;
2249 count -= size;
2250 buffer += size;
2251 }
2252 }
2253
2254 /* Read the data with as large access size as possible. */
2255 for (; size > 0; size /= 2) {
2256 uint32_t aligned = count - count % size;
2257 if (aligned > 0) {
2258 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2259 if (retval != ERROR_OK)
2260 return retval;
2261 address += aligned;
2262 count -= aligned;
2263 buffer += aligned;
2264 }
2265 }
2266
2267 return ERROR_OK;
2268 }
2269
2270 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t* crc)
2271 {
2272 uint8_t *buffer;
2273 int retval;
2274 uint32_t i;
2275 uint32_t checksum = 0;
2276 if (!target_was_examined(target)) {
2277 LOG_ERROR("Target not examined yet");
2278 return ERROR_FAIL;
2279 }
2280
2281 retval = target->type->checksum_memory(target, address, size, &checksum);
2282 if (retval != ERROR_OK) {
2283 buffer = malloc(size);
2284 if (buffer == NULL) {
2285 LOG_ERROR("error allocating buffer for section (%" PRId32 " bytes)", size);
2286 return ERROR_COMMAND_SYNTAX_ERROR;
2287 }
2288 retval = target_read_buffer(target, address, size, buffer);
2289 if (retval != ERROR_OK) {
2290 free(buffer);
2291 return retval;
2292 }
2293
2294 /* convert to target endianness */
2295 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2296 uint32_t target_data;
2297 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2298 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2299 }
2300
2301 retval = image_calculate_checksum(buffer, size, &checksum);
2302 free(buffer);
2303 }
2304
2305 *crc = checksum;
2306
2307 return retval;
2308 }
2309
2310 int target_blank_check_memory(struct target *target,
2311 struct target_memory_check_block *blocks, int num_blocks,
2312 uint8_t erased_value)
2313 {
2314 if (!target_was_examined(target)) {
2315 LOG_ERROR("Target not examined yet");
2316 return ERROR_FAIL;
2317 }
2318
2319 if (target->type->blank_check_memory == NULL)
2320 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2321
2322 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2323 }
2324
2325 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2326 {
2327 uint8_t value_buf[8];
2328 if (!target_was_examined(target)) {
2329 LOG_ERROR("Target not examined yet");
2330 return ERROR_FAIL;
2331 }
2332
2333 int retval = target_read_memory(target, address, 8, 1, value_buf);
2334
2335 if (retval == ERROR_OK) {
2336 *value = target_buffer_get_u64(target, value_buf);
2337 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2338 address,
2339 *value);
2340 } else {
2341 *value = 0x0;
2342 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2343 address);
2344 }
2345
2346 return retval;
2347 }
2348
2349 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2350 {
2351 uint8_t value_buf[4];
2352 if (!target_was_examined(target)) {
2353 LOG_ERROR("Target not examined yet");
2354 return ERROR_FAIL;
2355 }
2356
2357 int retval = target_read_memory(target, address, 4, 1, value_buf);
2358
2359 if (retval == ERROR_OK) {
2360 *value = target_buffer_get_u32(target, value_buf);
2361 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2362 address,
2363 *value);
2364 } else {
2365 *value = 0x0;
2366 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2367 address);
2368 }
2369
2370 return retval;
2371 }
2372
2373 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2374 {
2375 uint8_t value_buf[2];
2376 if (!target_was_examined(target)) {
2377 LOG_ERROR("Target not examined yet");
2378 return ERROR_FAIL;
2379 }
2380
2381 int retval = target_read_memory(target, address, 2, 1, value_buf);
2382
2383 if (retval == ERROR_OK) {
2384 *value = target_buffer_get_u16(target, value_buf);
2385 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2386 address,
2387 *value);
2388 } else {
2389 *value = 0x0;
2390 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2391 address);
2392 }
2393
2394 return retval;
2395 }
2396
2397 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2398 {
2399 if (!target_was_examined(target)) {
2400 LOG_ERROR("Target not examined yet");
2401 return ERROR_FAIL;
2402 }
2403
2404 int retval = target_read_memory(target, address, 1, 1, value);
2405
2406 if (retval == ERROR_OK) {
2407 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2408 address,
2409 *value);
2410 } else {
2411 *value = 0x0;
2412 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2413 address);
2414 }
2415
2416 return retval;
2417 }
2418
2419 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2420 {
2421 int retval;
2422 uint8_t value_buf[8];
2423 if (!target_was_examined(target)) {
2424 LOG_ERROR("Target not examined yet");
2425 return ERROR_FAIL;
2426 }
2427
2428 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2429 address,
2430 value);
2431
2432 target_buffer_set_u64(target, value_buf, value);
2433 retval = target_write_memory(target, address, 8, 1, value_buf);
2434 if (retval != ERROR_OK)
2435 LOG_DEBUG("failed: %i", retval);
2436
2437 return retval;
2438 }
2439
2440 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2441 {
2442 int retval;
2443 uint8_t value_buf[4];
2444 if (!target_was_examined(target)) {
2445 LOG_ERROR("Target not examined yet");
2446 return ERROR_FAIL;
2447 }
2448
2449 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2450 address,
2451 value);
2452
2453 target_buffer_set_u32(target, value_buf, value);
2454 retval = target_write_memory(target, address, 4, 1, value_buf);
2455 if (retval != ERROR_OK)
2456 LOG_DEBUG("failed: %i", retval);
2457
2458 return retval;
2459 }
2460
2461 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2462 {
2463 int retval;
2464 uint8_t value_buf[2];
2465 if (!target_was_examined(target)) {
2466 LOG_ERROR("Target not examined yet");
2467 return ERROR_FAIL;
2468 }
2469
2470 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2471 address,
2472 value);
2473
2474 target_buffer_set_u16(target, value_buf, value);
2475 retval = target_write_memory(target, address, 2, 1, value_buf);
2476 if (retval != ERROR_OK)
2477 LOG_DEBUG("failed: %i", retval);
2478
2479 return retval;
2480 }
2481
2482 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2483 {
2484 int retval;
2485 if (!target_was_examined(target)) {
2486 LOG_ERROR("Target not examined yet");
2487 return ERROR_FAIL;
2488 }
2489
2490 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2491 address, value);
2492
2493 retval = target_write_memory(target, address, 1, 1, &value);
2494 if (retval != ERROR_OK)
2495 LOG_DEBUG("failed: %i", retval);
2496
2497 return retval;
2498 }
2499
2500 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2501 {
2502 int retval;
2503 uint8_t value_buf[8];
2504 if (!target_was_examined(target)) {
2505 LOG_ERROR("Target not examined yet");
2506 return ERROR_FAIL;
2507 }
2508
2509 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2510 address,
2511 value);
2512
2513 target_buffer_set_u64(target, value_buf, value);
2514 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2515 if (retval != ERROR_OK)
2516 LOG_DEBUG("failed: %i", retval);
2517
2518 return retval;
2519 }
2520
2521 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2522 {
2523 int retval;
2524 uint8_t value_buf[4];
2525 if (!target_was_examined(target)) {
2526 LOG_ERROR("Target not examined yet");
2527 return ERROR_FAIL;
2528 }
2529
2530 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2531 address,
2532 value);
2533
2534 target_buffer_set_u32(target, value_buf, value);
2535 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2536 if (retval != ERROR_OK)
2537 LOG_DEBUG("failed: %i", retval);
2538
2539 return retval;
2540 }
2541
2542 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2543 {
2544 int retval;
2545 uint8_t value_buf[2];
2546 if (!target_was_examined(target)) {
2547 LOG_ERROR("Target not examined yet");
2548 return ERROR_FAIL;
2549 }
2550
2551 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2552 address,
2553 value);
2554
2555 target_buffer_set_u16(target, value_buf, value);
2556 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2557 if (retval != ERROR_OK)
2558 LOG_DEBUG("failed: %i", retval);
2559
2560 return retval;
2561 }
2562
2563 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2564 {
2565 int retval;
2566 if (!target_was_examined(target)) {
2567 LOG_ERROR("Target not examined yet");
2568 return ERROR_FAIL;
2569 }
2570
2571 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2572 address, value);
2573
2574 retval = target_write_phys_memory(target, address, 1, 1, &value);
2575 if (retval != ERROR_OK)
2576 LOG_DEBUG("failed: %i", retval);
2577
2578 return retval;
2579 }
2580
2581 static int find_target(struct command_context *cmd_ctx, const char *name)
2582 {
2583 struct target *target = get_target(name);
2584 if (target == NULL) {
2585 LOG_ERROR("Target: %s is unknown, try one of:\n", name);
2586 return ERROR_FAIL;
2587 }
2588 if (!target->tap->enabled) {
2589 LOG_USER("Target: TAP %s is disabled, "
2590 "can't be the current target\n",
2591 target->tap->dotted_name);
2592 return ERROR_FAIL;
2593 }
2594
2595 cmd_ctx->current_target = target;
2596 if (cmd_ctx->current_target_override)
2597 cmd_ctx->current_target_override = target;
2598
2599 return ERROR_OK;
2600 }
2601
2602
2603 COMMAND_HANDLER(handle_targets_command)
2604 {
2605 int retval = ERROR_OK;
2606 if (CMD_ARGC == 1) {
2607 retval = find_target(CMD_CTX, CMD_ARGV[0]);
2608 if (retval == ERROR_OK) {
2609 /* we're done! */
2610 return retval;
2611 }
2612 }
2613
2614 struct target *target = all_targets;
2615 command_print(CMD_CTX, " TargetName Type Endian TapName State ");
2616 command_print(CMD_CTX, "-- ------------------ ---------- ------ ------------------ ------------");
2617 while (target) {
2618 const char *state;
2619 char marker = ' ';
2620
2621 if (target->tap->enabled)
2622 state = target_state_name(target);
2623 else
2624 state = "tap-disabled";
2625
2626 if (CMD_CTX->current_target == target)
2627 marker = '*';
2628
2629 /* keep columns lined up to match the headers above */
2630 command_print(CMD_CTX,
2631 "%2d%c %-18s %-10s %-6s %-18s %s",
2632 target->target_number,
2633 marker,
2634 target_name(target),
2635 target_type_name(target),
2636 Jim_Nvp_value2name_simple(nvp_target_endian,
2637 target->endianness)->name,
2638 target->tap->dotted_name,
2639 state);
2640 target = target->next;
2641 }
2642
2643 return retval;
2644 }
2645
2646 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2647
2648 static int powerDropout;
2649 static int srstAsserted;
2650
2651 static int runPowerRestore;
2652 static int runPowerDropout;
2653 static int runSrstAsserted;
2654 static int runSrstDeasserted;
2655
2656 static int sense_handler(void)
2657 {
2658 static int prevSrstAsserted;
2659 static int prevPowerdropout;
2660
2661 int retval = jtag_power_dropout(&powerDropout);
2662 if (retval != ERROR_OK)
2663 return retval;
2664
2665 int powerRestored;
2666 powerRestored = prevPowerdropout && !powerDropout;
2667 if (powerRestored)
2668 runPowerRestore = 1;
2669
2670 int64_t current = timeval_ms();
2671 static int64_t lastPower;
2672 bool waitMore = lastPower + 2000 > current;
2673 if (powerDropout && !waitMore) {
2674 runPowerDropout = 1;
2675 lastPower = current;
2676 }
2677
2678 retval = jtag_srst_asserted(&srstAsserted);
2679 if (retval != ERROR_OK)
2680 return retval;
2681
2682 int srstDeasserted;
2683 srstDeasserted = prevSrstAsserted && !srstAsserted;
2684
2685 static int64_t lastSrst;
2686 waitMore = lastSrst + 2000 > current;
2687 if (srstDeasserted && !waitMore) {
2688 runSrstDeasserted = 1;
2689 lastSrst = current;
2690 }
2691
2692 if (!prevSrstAsserted && srstAsserted)
2693 runSrstAsserted = 1;
2694
2695 prevSrstAsserted = srstAsserted;
2696 prevPowerdropout = powerDropout;
2697
2698 if (srstDeasserted || powerRestored) {
2699 /* Other than logging the event we can't do anything here.
2700 * Issuing a reset is a particularly bad idea as we might
2701 * be inside a reset already.
2702 */
2703 }
2704
2705 return ERROR_OK;
2706 }
2707
2708 /* process target state changes */
2709 static int handle_target(void *priv)
2710 {
2711 Jim_Interp *interp = (Jim_Interp *)priv;
2712 int retval = ERROR_OK;
2713
2714 if (!is_jtag_poll_safe()) {
2715 /* polling is disabled currently */
2716 return ERROR_OK;
2717 }
2718
2719 /* we do not want to recurse here... */
2720 static int recursive;
2721 if (!recursive) {
2722 recursive = 1;
2723 sense_handler();
2724 /* danger! running these procedures can trigger srst assertions and power dropouts.
2725 * We need to avoid an infinite loop/recursion here and we do that by
2726 * clearing the flags after running these events.
2727 */
2728 int did_something = 0;
2729 if (runSrstAsserted) {
2730 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2731 Jim_Eval(interp, "srst_asserted");
2732 did_something = 1;
2733 }
2734 if (runSrstDeasserted) {
2735 Jim_Eval(interp, "srst_deasserted");
2736 did_something = 1;
2737 }
2738 if (runPowerDropout) {
2739 LOG_INFO("Power dropout detected, running power_dropout proc.");
2740 Jim_Eval(interp, "power_dropout");
2741 did_something = 1;
2742 }
2743 if (runPowerRestore) {
2744 Jim_Eval(interp, "power_restore");
2745 did_something = 1;
2746 }
2747
2748 if (did_something) {
2749 /* clear detect flags */
2750 sense_handler();
2751 }
2752
2753 /* clear action flags */
2754
2755 runSrstAsserted = 0;
2756 runSrstDeasserted = 0;
2757 runPowerRestore = 0;
2758 runPowerDropout = 0;
2759
2760 recursive = 0;
2761 }
2762
2763 /* Poll targets for state changes unless that's globally disabled.
2764 * Skip targets that are currently disabled.
2765 */
2766 for (struct target *target = all_targets;
2767 is_jtag_poll_safe() && target;
2768 target = target->next) {
2769
2770 if (!target_was_examined(target))
2771 continue;
2772
2773 if (!target->tap->enabled)
2774 continue;
2775
2776 if (target->backoff.times > target->backoff.count) {
2777 /* do not poll this time as we failed previously */
2778 target->backoff.count++;
2779 continue;
2780 }
2781 target->backoff.count = 0;
2782
2783 /* only poll target if we've got power and srst isn't asserted */
2784 if (!powerDropout && !srstAsserted) {
2785 /* polling may fail silently until the target has been examined */
2786 retval = target_poll(target);
2787 if (retval != ERROR_OK) {
2788 /* 100ms polling interval. Increase interval between polling up to 5000ms */
2789 if (target->backoff.times * polling_interval < 5000) {
2790 target->backoff.times *= 2;
2791 target->backoff.times++;
2792 }
2793
2794 /* Tell GDB to halt the debugger. This allows the user to
2795 * run monitor commands to handle the situation.
2796 */
2797 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
2798 }
2799 if (target->backoff.times > 0) {
2800 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
2801 target_reset_examined(target);
2802 retval = target_examine_one(target);
2803 /* Target examination could have failed due to unstable connection,
2804 * but we set the examined flag anyway to repoll it later */
2805 if (retval != ERROR_OK) {
2806 target->examined = true;
2807 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
2808 target->backoff.times * polling_interval);
2809 return retval;
2810 }
2811 }
2812
2813 /* Since we succeeded, we reset backoff count */
2814 target->backoff.times = 0;
2815 }
2816 }
2817
2818 return retval;
2819 }
2820
2821 COMMAND_HANDLER(handle_reg_command)
2822 {
2823 struct target *target;
2824 struct reg *reg = NULL;
2825 unsigned count = 0;
2826 char *value;
2827
2828 LOG_DEBUG("-");
2829
2830 target = get_current_target(CMD_CTX);
2831
2832 /* list all available registers for the current target */
2833 if (CMD_ARGC == 0) {
2834 struct reg_cache *cache = target->reg_cache;
2835
2836 count = 0;
2837 while (cache) {
2838 unsigned i;
2839
2840 command_print(CMD_CTX, "===== %s", cache->name);
2841
2842 for (i = 0, reg = cache->reg_list;
2843 i < cache->num_regs;
2844 i++, reg++, count++) {
2845 if (reg->exist == false)
2846 continue;
2847 /* only print cached values if they are valid */
2848 if (reg->valid) {
2849 value = buf_to_str(reg->value,
2850 reg->size, 16);
2851 command_print(CMD_CTX,
2852 "(%i) %s (/%" PRIu32 "): 0x%s%s",
2853 count, reg->name,
2854 reg->size, value,
2855 reg->dirty
2856 ? " (dirty)"
2857 : "");
2858 free(value);
2859 } else {
2860 command_print(CMD_CTX, "(%i) %s (/%" PRIu32 ")",
2861 count, reg->name,
2862 reg->size) ;
2863 }
2864 }
2865 cache = cache->next;
2866 }
2867
2868 return ERROR_OK;
2869 }
2870
2871 /* access a single register by its ordinal number */
2872 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
2873 unsigned num;
2874 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
2875
2876 struct reg_cache *cache = target->reg_cache;
2877 count = 0;
2878 while (cache) {
2879 unsigned i;
2880 for (i = 0; i < cache->num_regs; i++) {
2881 if (count++ == num) {
2882 reg = &cache->reg_list[i];
2883 break;
2884 }
2885 }
2886 if (reg)
2887 break;
2888 cache = cache->next;
2889 }
2890
2891 if (!reg) {
2892 command_print(CMD_CTX, "%i is out of bounds, the current target "
2893 "has only %i registers (0 - %i)", num, count, count - 1);
2894 return ERROR_OK;
2895 }
2896 } else {
2897 /* access a single register by its name */
2898 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], 1);
2899
2900 if (!reg)
2901 goto not_found;
2902 }
2903
2904 assert(reg != NULL); /* give clang a hint that we *know* reg is != NULL here */
2905
2906 if (!reg->exist)
2907 goto not_found;
2908
2909 /* display a register */
2910 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
2911 && (CMD_ARGV[1][0] <= '9')))) {
2912 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
2913 reg->valid = 0;
2914
2915 if (reg->valid == 0)
2916 reg->type->get(reg);
2917 value = buf_to_str(reg->value, reg->size, 16);
2918 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2919 free(value);
2920 return ERROR_OK;
2921 }
2922
2923 /* set register value */
2924 if (CMD_ARGC == 2) {
2925 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
2926 if (buf == NULL)
2927 return ERROR_FAIL;
2928 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
2929
2930 reg->type->set(reg, buf);
2931
2932 value = buf_to_str(reg->value, reg->size, 16);
2933 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2934 free(value);
2935
2936 free(buf);
2937
2938 return ERROR_OK;
2939 }
2940
2941 return ERROR_COMMAND_SYNTAX_ERROR;
2942
2943 not_found:
2944 command_print(CMD_CTX, "register %s not found in current target", CMD_ARGV[0]);
2945 return ERROR_OK;
2946 }
2947
2948 COMMAND_HANDLER(handle_poll_command)
2949 {
2950 int retval = ERROR_OK;
2951 struct target *target = get_current_target(CMD_CTX);
2952
2953 if (CMD_ARGC == 0) {
2954 command_print(CMD_CTX, "background polling: %s",
2955 jtag_poll_get_enabled() ? "on" : "off");
2956 command_print(CMD_CTX, "TAP: %s (%s)",
2957 target->tap->dotted_name,
2958 target->tap->enabled ? "enabled" : "disabled");
2959 if (!target->tap->enabled)
2960 return ERROR_OK;
2961 retval = target_poll(target);
2962 if (retval != ERROR_OK)
2963 return retval;
2964 retval = target_arch_state(target);
2965 if (retval != ERROR_OK)
2966 return retval;
2967 } else if (CMD_ARGC == 1) {
2968 bool enable;
2969 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
2970 jtag_poll_set_enabled(enable);
2971 } else
2972 return ERROR_COMMAND_SYNTAX_ERROR;
2973
2974 return retval;
2975 }
2976
2977 COMMAND_HANDLER(handle_wait_halt_command)
2978 {
2979 if (CMD_ARGC > 1)
2980 return ERROR_COMMAND_SYNTAX_ERROR;
2981
2982 unsigned ms = DEFAULT_HALT_TIMEOUT;
2983 if (1 == CMD_ARGC) {
2984 int retval = parse_uint(CMD_ARGV[0], &ms);
2985 if (ERROR_OK != retval)
2986 return ERROR_COMMAND_SYNTAX_ERROR;
2987 }
2988
2989 struct target *target = get_current_target(CMD_CTX);
2990 return target_wait_state(target, TARGET_HALTED, ms);
2991 }
2992
2993 /* wait for target state to change. The trick here is to have a low
2994 * latency for short waits and not to suck up all the CPU time
2995 * on longer waits.
2996 *
2997 * After 500ms, keep_alive() is invoked
2998 */
2999 int target_wait_state(struct target *target, enum target_state state, int ms)
3000 {
3001 int retval;
3002 int64_t then = 0, cur;
3003 bool once = true;
3004
3005 for (;;) {
3006 retval = target_poll(target);
3007 if (retval != ERROR_OK)
3008 return retval;
3009 if (target->state == state)
3010 break;
3011 cur = timeval_ms();
3012 if (once) {
3013 once = false;
3014 then = timeval_ms();
3015 LOG_DEBUG("waiting for target %s...",
3016 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
3017 }
3018
3019 if (cur-then > 500)
3020 keep_alive();
3021
3022 if ((cur-then) > ms) {
3023 LOG_ERROR("timed out while waiting for target %s",
3024 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
3025 return ERROR_FAIL;
3026 }
3027 }
3028
3029 return ERROR_OK;
3030 }
3031
3032 COMMAND_HANDLER(handle_halt_command)
3033 {
3034 LOG_DEBUG("-");
3035
3036 struct target *target = get_current_target(CMD_CTX);
3037
3038 target->verbose_halt_msg = true;
3039
3040 int retval = target_halt(target);
3041 if (ERROR_OK != retval)
3042 return retval;
3043
3044 if (CMD_ARGC == 1) {
3045 unsigned wait_local;
3046 retval = parse_uint(CMD_ARGV[0], &wait_local);
3047 if (ERROR_OK != retval)
3048 return ERROR_COMMAND_SYNTAX_ERROR;
3049 if (!wait_local)
3050 return ERROR_OK;
3051 }
3052
3053 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3054 }
3055
3056 COMMAND_HANDLER(handle_soft_reset_halt_command)
3057 {
3058 struct target *target = get_current_target(CMD_CTX);
3059
3060 LOG_USER("requesting target halt and executing a soft reset");
3061
3062 target_soft_reset_halt(target);
3063
3064 return ERROR_OK;
3065 }
3066
3067 COMMAND_HANDLER(handle_reset_command)
3068 {
3069 if (CMD_ARGC > 1)
3070 return ERROR_COMMAND_SYNTAX_ERROR;
3071
3072 enum target_reset_mode reset_mode = RESET_RUN;
3073 if (CMD_ARGC == 1) {
3074 const Jim_Nvp *n;
3075 n = Jim_Nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3076 if ((n->name == NULL) || (n->value == RESET_UNKNOWN))
3077 return ERROR_COMMAND_SYNTAX_ERROR;
3078 reset_mode = n->value;
3079 }
3080
3081 /* reset *all* targets */
3082 return target_process_reset(CMD, reset_mode);
3083 }
3084
3085
3086 COMMAND_HANDLER(handle_resume_command)
3087 {
3088 int current = 1;
3089 if (CMD_ARGC > 1)
3090 return ERROR_COMMAND_SYNTAX_ERROR;
3091
3092 struct target *target = get_current_target(CMD_CTX);
3093
3094 /* with no CMD_ARGV, resume from current pc, addr = 0,
3095 * with one arguments, addr = CMD_ARGV[0],
3096 * handle breakpoints, not debugging */
3097 target_addr_t addr = 0;
3098 if (CMD_ARGC == 1) {
3099 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3100 current = 0;
3101 }
3102
3103 return target_resume(target, current, addr, 1, 0);
3104 }
3105
3106 COMMAND_HANDLER(handle_step_command)
3107 {
3108 if (CMD_ARGC > 1)
3109 return ERROR_COMMAND_SYNTAX_ERROR;
3110
3111 LOG_DEBUG("-");
3112
3113 /* with no CMD_ARGV, step from current pc, addr = 0,
3114 * with one argument addr = CMD_ARGV[0],
3115 * handle breakpoints, debugging */
3116 target_addr_t addr = 0;
3117 int current_pc = 1;
3118 if (CMD_ARGC == 1) {
3119 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3120 current_pc = 0;
3121 }
3122
3123 struct target *target = get_current_target(CMD_CTX);
3124
3125 return target->type->step(target, current_pc, addr, 1);
3126 }
3127
3128 static void handle_md_output(struct command_invocation *cmd,
3129 struct target *target, target_addr_t address, unsigned size,
3130 unsigned count, const uint8_t *buffer)
3131 {
3132 const unsigned line_bytecnt = 32;
3133 unsigned line_modulo = line_bytecnt / size;
3134
3135 char output[line_bytecnt * 4 + 1];
3136 unsigned output_len = 0;
3137
3138 const char *value_fmt;
3139 switch (size) {
3140 case 8:
3141 value_fmt = "%16.16"PRIx64" ";
3142 break;
3143 case 4:
3144 value_fmt = "%8.8"PRIx64" ";
3145 break;
3146 case 2:
3147 value_fmt = "%4.4"PRIx64" ";
3148 break;
3149 case 1:
3150 value_fmt = "%2.2"PRIx64" ";
3151 break;
3152 default:
3153 /* "can't happen", caller checked */
3154 LOG_ERROR("invalid memory read size: %u", size);
3155 return;
3156 }
3157
3158 for (unsigned i = 0; i < count; i++) {
3159 if (i % line_modulo == 0) {
3160 output_len += snprintf(output + output_len,
3161 sizeof(output) - output_len,
3162 TARGET_ADDR_FMT ": ",
3163 (address + (i * size)));
3164 }
3165
3166 uint64_t value = 0;
3167 const uint8_t *value_ptr = buffer + i * size;
3168 switch (size) {
3169 case 8:
3170 value = target_buffer_get_u64(target, value_ptr);
3171 break;
3172 case 4:
3173 value = target_buffer_get_u32(target, value_ptr);
3174 break;
3175 case 2:
3176 value = target_buffer_get_u16(target, value_ptr);
3177 break;
3178 case 1:
3179 value = *value_ptr;
3180 }
3181 output_len += snprintf(output + output_len,
3182 sizeof(output) - output_len,
3183 value_fmt, value);
3184
3185 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3186 command_print(cmd->ctx, "%s", output);
3187 output_len = 0;
3188 }
3189 }
3190 }
3191
3192 COMMAND_HANDLER(handle_md_command)
3193 {
3194 if (CMD_ARGC < 1)
3195 return ERROR_COMMAND_SYNTAX_ERROR;
3196
3197 unsigned size = 0;
3198 switch (CMD_NAME[2]) {
3199 case 'd':
3200 size = 8;
3201 break;
3202 case 'w':
3203 size = 4;
3204 break;
3205 case 'h':
3206 size = 2;
3207 break;
3208 case 'b':
3209 size = 1;
3210 break;
3211 default:
3212 return ERROR_COMMAND_SYNTAX_ERROR;
3213 }
3214
3215 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3216 int (*fn)(struct target *target,
3217 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3218 if (physical) {
3219 CMD_ARGC--;
3220 CMD_ARGV++;
3221 fn = target_read_phys_memory;
3222 } else
3223 fn = target_read_memory;
3224 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3225 return ERROR_COMMAND_SYNTAX_ERROR;
3226
3227 target_addr_t address;
3228 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3229
3230 unsigned count = 1;
3231 if (CMD_ARGC == 2)
3232 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3233
3234 uint8_t *buffer = calloc(count, size);
3235 if (buffer == NULL) {
3236 LOG_ERROR("Failed to allocate md read buffer");
3237 return ERROR_FAIL;
3238 }
3239
3240 struct target *target = get_current_target(CMD_CTX);
3241 int retval = fn(target, address, size, count, buffer);
3242 if (ERROR_OK == retval)
3243 handle_md_output(CMD, target, address, size, count, buffer);
3244
3245 free(buffer);
3246
3247 return retval;
3248 }
3249
3250 typedef int (*target_write_fn)(struct target *target,
3251 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3252
3253 static int target_fill_mem(struct target *target,
3254 target_addr_t address,
3255 target_write_fn fn,
3256 unsigned data_size,
3257 /* value */
3258 uint64_t b,
3259 /* count */
3260 unsigned c)
3261 {
3262 /* We have to write in reasonably large chunks to be able
3263 * to fill large memory areas with any sane speed */
3264 const unsigned chunk_size = 16384;
3265 uint8_t *target_buf = malloc(chunk_size * data_size);
3266 if (target_buf == NULL) {
3267 LOG_ERROR("Out of memory");
3268 return ERROR_FAIL;
3269 }
3270
3271 for (unsigned i = 0; i < chunk_size; i++) {
3272 switch (data_size) {
3273 case 8:
3274 target_buffer_set_u64(target, target_buf + i * data_size, b);
3275 break;
3276 case 4:
3277 target_buffer_set_u32(target, target_buf + i * data_size, b);
3278 break;
3279 case 2:
3280 target_buffer_set_u16(target, target_buf + i * data_size, b);
3281 break;
3282 case 1:
3283 target_buffer_set_u8(target, target_buf + i * data_size, b);
3284 break;
3285 default:
3286 exit(-1);
3287 }
3288 }
3289
3290 int retval = ERROR_OK;
3291
3292 for (unsigned x = 0; x < c; x += chunk_size) {
3293 unsigned current;
3294 current = c - x;
3295 if (current > chunk_size)
3296 current = chunk_size;
3297 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3298 if (retval != ERROR_OK)
3299 break;
3300 /* avoid GDB timeouts */
3301 keep_alive();
3302 }
3303 free(target_buf);
3304
3305 return retval;
3306 }
3307
3308
3309 COMMAND_HANDLER(handle_mw_command)
3310 {
3311 if (CMD_ARGC < 2)
3312 return ERROR_COMMAND_SYNTAX_ERROR;
3313 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3314 target_write_fn fn;
3315 if (physical) {
3316 CMD_ARGC--;
3317 CMD_ARGV++;
3318 fn = target_write_phys_memory;
3319 } else
3320 fn = target_write_memory;
3321 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3322 return ERROR_COMMAND_SYNTAX_ERROR;
3323
3324 target_addr_t address;
3325 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3326
3327 target_addr_t value;
3328 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], value);
3329
3330 unsigned count = 1;
3331 if (CMD_ARGC == 3)
3332 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3333
3334 struct target *target = get_current_target(CMD_CTX);
3335 unsigned wordsize;
3336 switch (CMD_NAME[2]) {
3337 case 'd':
3338 wordsize = 8;
3339 break;
3340 case 'w':
3341 wordsize = 4;
3342 break;
3343 case 'h':
3344 wordsize = 2;
3345 break;
3346 case 'b':
3347 wordsize = 1;
3348 break;
3349 default:
3350 return ERROR_COMMAND_SYNTAX_ERROR;
3351 }
3352
3353 return target_fill_mem(target, address, fn, wordsize, value, count);
3354 }
3355
3356 static COMMAND_HELPER(parse_load_image_command_CMD_ARGV, struct image *image,
3357 target_addr_t *min_address, target_addr_t *max_address)
3358 {
3359 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3360 return ERROR_COMMAND_SYNTAX_ERROR;
3361
3362 /* a base address isn't always necessary,
3363 * default to 0x0 (i.e. don't relocate) */
3364 if (CMD_ARGC >= 2) {
3365 target_addr_t addr;
3366 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3367 image->base_address = addr;
3368 image->base_address_set = 1;
3369 } else
3370 image->base_address_set = 0;
3371
3372 image->start_address_set = 0;
3373
3374 if (CMD_ARGC >= 4)
3375 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3376 if (CMD_ARGC == 5) {
3377 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3378 /* use size (given) to find max (required) */
3379 *max_address += *min_address;
3380 }
3381
3382 if (*min_address > *max_address)
3383 return ERROR_COMMAND_SYNTAX_ERROR;
3384
3385 return ERROR_OK;
3386 }
3387
3388 COMMAND_HANDLER(handle_load_image_command)
3389 {
3390 uint8_t *buffer;
3391 size_t buf_cnt;
3392 uint32_t image_size;
3393 target_addr_t min_address = 0;
3394 target_addr_t max_address = -1;
3395 int i;
3396 struct image image;
3397
3398 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
3399 &image, &min_address, &max_address);
3400 if (ERROR_OK != retval)
3401 return retval;
3402
3403 struct target *target = get_current_target(CMD_CTX);
3404
3405 struct duration bench;
3406 duration_start(&bench);
3407
3408 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3409 return ERROR_FAIL;
3410
3411 image_size = 0x0;
3412 retval = ERROR_OK;
3413 for (i = 0; i < image.num_sections; i++) {
3414 buffer = malloc(image.sections[i].size);
3415 if (buffer == NULL) {
3416 command_print(CMD_CTX,
3417 "error allocating buffer for section (%d bytes)",
3418 (int)(image.sections[i].size));
3419 retval = ERROR_FAIL;
3420 break;
3421 }
3422
3423 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3424 if (retval != ERROR_OK) {
3425 free(buffer);
3426 break;
3427 }
3428
3429 uint32_t offset = 0;
3430 uint32_t length = buf_cnt;
3431
3432 /* DANGER!!! beware of unsigned comparision here!!! */
3433
3434 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3435 (image.sections[i].base_address < max_address)) {
3436
3437 if (image.sections[i].base_address < min_address) {
3438 /* clip addresses below */
3439 offset += min_address-image.sections[i].base_address;
3440 length -= offset;
3441 }
3442
3443 if (image.sections[i].base_address + buf_cnt > max_address)
3444 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3445
3446 retval = target_write_buffer(target,
3447 image.sections[i].base_address + offset, length, buffer + offset);
3448 if (retval != ERROR_OK) {
3449 free(buffer);
3450 break;
3451 }
3452 image_size += length;
3453 command_print(CMD_CTX, "%u bytes written at address " TARGET_ADDR_FMT "",
3454 (unsigned int)length,
3455 image.sections[i].base_address + offset);
3456 }
3457
3458 free(buffer);
3459 }
3460
3461 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3462 command_print(CMD_CTX, "downloaded %" PRIu32 " bytes "
3463 "in %fs (%0.3f KiB/s)", image_size,
3464 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3465 }
3466
3467 image_close(&image);
3468
3469 return retval;
3470
3471 }
3472
3473 COMMAND_HANDLER(handle_dump_image_command)
3474 {
3475 struct fileio *fileio;
3476 uint8_t *buffer;
3477 int retval, retvaltemp;
3478 target_addr_t address, size;
3479 struct duration bench;
3480 struct target *target = get_current_target(CMD_CTX);
3481
3482 if (CMD_ARGC != 3)
3483 return ERROR_COMMAND_SYNTAX_ERROR;
3484
3485 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3486 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3487
3488 uint32_t buf_size = (size > 4096) ? 4096 : size;
3489 buffer = malloc(buf_size);
3490 if (!buffer)
3491 return ERROR_FAIL;
3492
3493 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3494 if (retval != ERROR_OK) {
3495 free(buffer);
3496 return retval;
3497 }
3498
3499 duration_start(&bench);
3500
3501 while (size > 0) {
3502 size_t size_written;
3503 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3504 retval = target_read_buffer(target, address, this_run_size, buffer);
3505 if (retval != ERROR_OK)
3506 break;
3507
3508 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3509 if (retval != ERROR_OK)
3510 break;
3511
3512 size -= this_run_size;
3513 address += this_run_size;
3514 }
3515
3516 free(buffer);
3517
3518 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3519 size_t filesize;
3520 retval = fileio_size(fileio, &filesize);
3521 if (retval != ERROR_OK)
3522 return retval;
3523 command_print(CMD_CTX,
3524 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3525 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3526 }
3527
3528 retvaltemp = fileio_close(fileio);
3529 if (retvaltemp != ERROR_OK)
3530 return retvaltemp;
3531
3532 return retval;
3533 }
3534
3535 enum verify_mode {
3536 IMAGE_TEST = 0,
3537 IMAGE_VERIFY = 1,
3538 IMAGE_CHECKSUM_ONLY = 2
3539 };
3540
3541 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3542 {
3543 uint8_t *buffer;
3544 size_t buf_cnt;
3545 uint32_t image_size;
3546 int i;
3547 int retval;
3548 uint32_t checksum = 0;
3549 uint32_t mem_checksum = 0;
3550
3551 struct image image;
3552
3553 struct target *target = get_current_target(CMD_CTX);
3554
3555 if (CMD_ARGC < 1)
3556 return ERROR_COMMAND_SYNTAX_ERROR;
3557
3558 if (!target) {
3559 LOG_ERROR("no target selected");
3560 return ERROR_FAIL;
3561 }
3562
3563 struct duration bench;
3564 duration_start(&bench);
3565
3566 if (CMD_ARGC >= 2) {
3567 target_addr_t addr;
3568 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3569 image.base_address = addr;
3570 image.base_address_set = 1;
3571 } else {
3572 image.base_address_set = 0;
3573 image.base_address = 0x0;
3574 }
3575
3576 image.start_address_set = 0;
3577
3578 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3579 if (retval != ERROR_OK)
3580 return retval;
3581
3582 image_size = 0x0;
3583 int diffs = 0;
3584 retval = ERROR_OK;
3585 for (i = 0; i < image.num_sections; i++) {
3586 buffer = malloc(image.sections[i].size);
3587 if (buffer == NULL) {
3588 command_print(CMD_CTX,
3589 "error allocating buffer for section (%d bytes)",
3590 (int)(image.sections[i].size));
3591 break;
3592 }
3593 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3594 if (retval != ERROR_OK) {
3595 free(buffer);
3596 break;
3597 }
3598
3599 if (verify >= IMAGE_VERIFY) {
3600 /* calculate checksum of image */
3601 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3602 if (retval != ERROR_OK) {
3603 free(buffer);
3604 break;
3605 }
3606
3607 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3608 if (retval != ERROR_OK) {
3609 free(buffer);
3610 break;
3611 }
3612 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3613 LOG_ERROR("checksum mismatch");
3614 free(buffer);
3615 retval = ERROR_FAIL;
3616 goto done;
3617 }
3618 if (checksum != mem_checksum) {
3619 /* failed crc checksum, fall back to a binary compare */
3620 uint8_t *data;
3621
3622 if (diffs == 0)
3623 LOG_ERROR("checksum mismatch - attempting binary compare");
3624
3625 data = malloc(buf_cnt);
3626
3627 /* Can we use 32bit word accesses? */
3628 int size = 1;
3629 int count = buf_cnt;
3630 if ((count % 4) == 0) {
3631 size *= 4;
3632 count /= 4;
3633 }
3634 retval = target_read_memory(target, image.sections[i].base_address, size, count, data);
3635 if (retval == ERROR_OK) {
3636 uint32_t t;
3637 for (t = 0; t < buf_cnt; t++) {
3638 if (data[t] != buffer[t]) {
3639 command_print(CMD_CTX,
3640 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3641 diffs,
3642 (unsigned)(t + image.sections[i].base_address),
3643 data[t],
3644 buffer[t]);
3645 if (diffs++ >= 127) {
3646 command_print(CMD_CTX, "More than 128 errors, the rest are not printed.");
3647 free(data);
3648 free(buffer);
3649 goto done;
3650 }
3651 }
3652 keep_alive();
3653 }
3654 }
3655 free(data);
3656 }
3657 } else {
3658 command_print(CMD_CTX, "address " TARGET_ADDR_FMT " length 0x%08zx",
3659 image.sections[i].base_address,
3660 buf_cnt);
3661 }
3662
3663 free(buffer);
3664 image_size += buf_cnt;
3665 }
3666 if (diffs > 0)
3667 command_print(CMD_CTX, "No more differences found.");
3668 done:
3669 if (diffs > 0)
3670 retval = ERROR_FAIL;
3671 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3672 command_print(CMD_CTX, "verified %" PRIu32 " bytes "
3673 "in %fs (%0.3f KiB/s)", image_size,
3674 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3675 }
3676
3677 image_close(&image);
3678
3679 return retval;
3680 }
3681
3682 COMMAND_HANDLER(handle_verify_image_checksum_command)
3683 {
3684 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3685 }
3686
3687 COMMAND_HANDLER(handle_verify_image_command)
3688 {
3689 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3690 }
3691