target,flash: allow target_free_working_area on NULL area pointer
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/align.h>
45 #include <helper/time_support.h>
46 #include <jtag/jtag.h>
47 #include <flash/nor/core.h>
48
49 #include "target.h"
50 #include "target_type.h"
51 #include "target_request.h"
52 #include "breakpoints.h"
53 #include "register.h"
54 #include "trace.h"
55 #include "image.h"
56 #include "rtos/rtos.h"
57 #include "transport/transport.h"
58 #include "arm_cti.h"
59
60 /* default halt wait timeout (ms) */
61 #define DEFAULT_HALT_TIMEOUT 5000
62
63 static int target_read_buffer_default(struct target *target, target_addr_t address,
64 uint32_t count, uint8_t *buffer);
65 static int target_write_buffer_default(struct target *target, target_addr_t address,
66 uint32_t count, const uint8_t *buffer);
67 static int target_array2mem(Jim_Interp *interp, struct target *target,
68 int argc, Jim_Obj * const *argv);
69 static int target_mem2array(Jim_Interp *interp, struct target *target,
70 int argc, Jim_Obj * const *argv);
71 static int target_register_user_commands(struct command_context *cmd_ctx);
72 static int target_get_gdb_fileio_info_default(struct target *target,
73 struct gdb_fileio_info *fileio_info);
74 static int target_gdb_fileio_end_default(struct target *target, int retcode,
75 int fileio_errno, bool ctrl_c);
76
77 /* targets */
78 extern struct target_type arm7tdmi_target;
79 extern struct target_type arm720t_target;
80 extern struct target_type arm9tdmi_target;
81 extern struct target_type arm920t_target;
82 extern struct target_type arm966e_target;
83 extern struct target_type arm946e_target;
84 extern struct target_type arm926ejs_target;
85 extern struct target_type fa526_target;
86 extern struct target_type feroceon_target;
87 extern struct target_type dragonite_target;
88 extern struct target_type xscale_target;
89 extern struct target_type cortexm_target;
90 extern struct target_type cortexa_target;
91 extern struct target_type aarch64_target;
92 extern struct target_type cortexr4_target;
93 extern struct target_type arm11_target;
94 extern struct target_type ls1_sap_target;
95 extern struct target_type mips_m4k_target;
96 extern struct target_type mips_mips64_target;
97 extern struct target_type avr_target;
98 extern struct target_type dsp563xx_target;
99 extern struct target_type dsp5680xx_target;
100 extern struct target_type testee_target;
101 extern struct target_type avr32_ap7k_target;
102 extern struct target_type hla_target;
103 extern struct target_type nds32_v2_target;
104 extern struct target_type nds32_v3_target;
105 extern struct target_type nds32_v3m_target;
106 extern struct target_type or1k_target;
107 extern struct target_type quark_x10xx_target;
108 extern struct target_type quark_d20xx_target;
109 extern struct target_type stm8_target;
110 extern struct target_type riscv_target;
111 extern struct target_type mem_ap_target;
112 extern struct target_type esirisc_target;
113 extern struct target_type arcv2_target;
114
115 static struct target_type *target_types[] = {
116 &arm7tdmi_target,
117 &arm9tdmi_target,
118 &arm920t_target,
119 &arm720t_target,
120 &arm966e_target,
121 &arm946e_target,
122 &arm926ejs_target,
123 &fa526_target,
124 &feroceon_target,
125 &dragonite_target,
126 &xscale_target,
127 &cortexm_target,
128 &cortexa_target,
129 &cortexr4_target,
130 &arm11_target,
131 &ls1_sap_target,
132 &mips_m4k_target,
133 &avr_target,
134 &dsp563xx_target,
135 &dsp5680xx_target,
136 &testee_target,
137 &avr32_ap7k_target,
138 &hla_target,
139 &nds32_v2_target,
140 &nds32_v3_target,
141 &nds32_v3m_target,
142 &or1k_target,
143 &quark_x10xx_target,
144 &quark_d20xx_target,
145 &stm8_target,
146 &riscv_target,
147 &mem_ap_target,
148 &esirisc_target,
149 &arcv2_target,
150 &aarch64_target,
151 &mips_mips64_target,
152 NULL,
153 };
154
155 struct target *all_targets;
156 static struct target_event_callback *target_event_callbacks;
157 static struct target_timer_callback *target_timer_callbacks;
158 static int64_t target_timer_next_event_value;
159 static LIST_HEAD(target_reset_callback_list);
160 static LIST_HEAD(target_trace_callback_list);
161 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
162
163 static const struct jim_nvp nvp_assert[] = {
164 { .name = "assert", NVP_ASSERT },
165 { .name = "deassert", NVP_DEASSERT },
166 { .name = "T", NVP_ASSERT },
167 { .name = "F", NVP_DEASSERT },
168 { .name = "t", NVP_ASSERT },
169 { .name = "f", NVP_DEASSERT },
170 { .name = NULL, .value = -1 }
171 };
172
173 static const struct jim_nvp nvp_error_target[] = {
174 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
175 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
176 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
177 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
178 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
179 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
180 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
181 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
182 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
183 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
184 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
185 { .value = -1, .name = NULL }
186 };
187
188 static const char *target_strerror_safe(int err)
189 {
190 const struct jim_nvp *n;
191
192 n = jim_nvp_value2name_simple(nvp_error_target, err);
193 if (!n->name)
194 return "unknown";
195 else
196 return n->name;
197 }
198
199 static const struct jim_nvp nvp_target_event[] = {
200
201 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
202 { .value = TARGET_EVENT_HALTED, .name = "halted" },
203 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
204 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
205 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
206 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
207 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
208
209 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
210 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
211
212 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
213 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
214 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
215 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
216 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
217 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
218 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
219 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
220
221 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
222 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
223 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
224
225 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
226 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
227
228 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
229 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
230
231 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
232 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
233
234 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
235 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
236
237 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
238
239 { .name = NULL, .value = -1 }
240 };
241
242 static const struct jim_nvp nvp_target_state[] = {
243 { .name = "unknown", .value = TARGET_UNKNOWN },
244 { .name = "running", .value = TARGET_RUNNING },
245 { .name = "halted", .value = TARGET_HALTED },
246 { .name = "reset", .value = TARGET_RESET },
247 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
248 { .name = NULL, .value = -1 },
249 };
250
251 static const struct jim_nvp nvp_target_debug_reason[] = {
252 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
253 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
254 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
255 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
256 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
257 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
258 { .name = "program-exit", .value = DBG_REASON_EXIT },
259 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
260 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
261 { .name = NULL, .value = -1 },
262 };
263
264 static const struct jim_nvp nvp_target_endian[] = {
265 { .name = "big", .value = TARGET_BIG_ENDIAN },
266 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
267 { .name = "be", .value = TARGET_BIG_ENDIAN },
268 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
269 { .name = NULL, .value = -1 },
270 };
271
272 static const struct jim_nvp nvp_reset_modes[] = {
273 { .name = "unknown", .value = RESET_UNKNOWN },
274 { .name = "run", .value = RESET_RUN },
275 { .name = "halt", .value = RESET_HALT },
276 { .name = "init", .value = RESET_INIT },
277 { .name = NULL, .value = -1 },
278 };
279
280 const char *debug_reason_name(struct target *t)
281 {
282 const char *cp;
283
284 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
285 t->debug_reason)->name;
286 if (!cp) {
287 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
288 cp = "(*BUG*unknown*BUG*)";
289 }
290 return cp;
291 }
292
293 const char *target_state_name(struct target *t)
294 {
295 const char *cp;
296 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
297 if (!cp) {
298 LOG_ERROR("Invalid target state: %d", (int)(t->state));
299 cp = "(*BUG*unknown*BUG*)";
300 }
301
302 if (!target_was_examined(t) && t->defer_examine)
303 cp = "examine deferred";
304
305 return cp;
306 }
307
308 const char *target_event_name(enum target_event event)
309 {
310 const char *cp;
311 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
312 if (!cp) {
313 LOG_ERROR("Invalid target event: %d", (int)(event));
314 cp = "(*BUG*unknown*BUG*)";
315 }
316 return cp;
317 }
318
319 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
320 {
321 const char *cp;
322 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
323 if (!cp) {
324 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
325 cp = "(*BUG*unknown*BUG*)";
326 }
327 return cp;
328 }
329
330 /* determine the number of the new target */
331 static int new_target_number(void)
332 {
333 struct target *t;
334 int x;
335
336 /* number is 0 based */
337 x = -1;
338 t = all_targets;
339 while (t) {
340 if (x < t->target_number)
341 x = t->target_number;
342 t = t->next;
343 }
344 return x + 1;
345 }
346
347 static void append_to_list_all_targets(struct target *target)
348 {
349 struct target **t = &all_targets;
350
351 while (*t)
352 t = &((*t)->next);
353 *t = target;
354 }
355
356 /* read a uint64_t from a buffer in target memory endianness */
357 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
358 {
359 if (target->endianness == TARGET_LITTLE_ENDIAN)
360 return le_to_h_u64(buffer);
361 else
362 return be_to_h_u64(buffer);
363 }
364
365 /* read a uint32_t from a buffer in target memory endianness */
366 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
367 {
368 if (target->endianness == TARGET_LITTLE_ENDIAN)
369 return le_to_h_u32(buffer);
370 else
371 return be_to_h_u32(buffer);
372 }
373
374 /* read a uint24_t from a buffer in target memory endianness */
375 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
376 {
377 if (target->endianness == TARGET_LITTLE_ENDIAN)
378 return le_to_h_u24(buffer);
379 else
380 return be_to_h_u24(buffer);
381 }
382
383 /* read a uint16_t from a buffer in target memory endianness */
384 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
385 {
386 if (target->endianness == TARGET_LITTLE_ENDIAN)
387 return le_to_h_u16(buffer);
388 else
389 return be_to_h_u16(buffer);
390 }
391
392 /* write a uint64_t to a buffer in target memory endianness */
393 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
394 {
395 if (target->endianness == TARGET_LITTLE_ENDIAN)
396 h_u64_to_le(buffer, value);
397 else
398 h_u64_to_be(buffer, value);
399 }
400
401 /* write a uint32_t to a buffer in target memory endianness */
402 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
403 {
404 if (target->endianness == TARGET_LITTLE_ENDIAN)
405 h_u32_to_le(buffer, value);
406 else
407 h_u32_to_be(buffer, value);
408 }
409
410 /* write a uint24_t to a buffer in target memory endianness */
411 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
412 {
413 if (target->endianness == TARGET_LITTLE_ENDIAN)
414 h_u24_to_le(buffer, value);
415 else
416 h_u24_to_be(buffer, value);
417 }
418
419 /* write a uint16_t to a buffer in target memory endianness */
420 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
421 {
422 if (target->endianness == TARGET_LITTLE_ENDIAN)
423 h_u16_to_le(buffer, value);
424 else
425 h_u16_to_be(buffer, value);
426 }
427
428 /* write a uint8_t to a buffer in target memory endianness */
429 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
430 {
431 *buffer = value;
432 }
433
434 /* write a uint64_t array to a buffer in target memory endianness */
435 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
436 {
437 uint32_t i;
438 for (i = 0; i < count; i++)
439 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
440 }
441
442 /* write a uint32_t array to a buffer in target memory endianness */
443 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
444 {
445 uint32_t i;
446 for (i = 0; i < count; i++)
447 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
448 }
449
450 /* write a uint16_t array to a buffer in target memory endianness */
451 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
452 {
453 uint32_t i;
454 for (i = 0; i < count; i++)
455 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
456 }
457
458 /* write a uint64_t array to a buffer in target memory endianness */
459 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
460 {
461 uint32_t i;
462 for (i = 0; i < count; i++)
463 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
464 }
465
466 /* write a uint32_t array to a buffer in target memory endianness */
467 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
468 {
469 uint32_t i;
470 for (i = 0; i < count; i++)
471 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
472 }
473
474 /* write a uint16_t array to a buffer in target memory endianness */
475 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
476 {
477 uint32_t i;
478 for (i = 0; i < count; i++)
479 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
480 }
481
482 /* return a pointer to a configured target; id is name or number */
483 struct target *get_target(const char *id)
484 {
485 struct target *target;
486
487 /* try as tcltarget name */
488 for (target = all_targets; target; target = target->next) {
489 if (!target_name(target))
490 continue;
491 if (strcmp(id, target_name(target)) == 0)
492 return target;
493 }
494
495 /* It's OK to remove this fallback sometime after August 2010 or so */
496
497 /* no match, try as number */
498 unsigned num;
499 if (parse_uint(id, &num) != ERROR_OK)
500 return NULL;
501
502 for (target = all_targets; target; target = target->next) {
503 if (target->target_number == (int)num) {
504 LOG_WARNING("use '%s' as target identifier, not '%u'",
505 target_name(target), num);
506 return target;
507 }
508 }
509
510 return NULL;
511 }
512
513 /* returns a pointer to the n-th configured target */
514 struct target *get_target_by_num(int num)
515 {
516 struct target *target = all_targets;
517
518 while (target) {
519 if (target->target_number == num)
520 return target;
521 target = target->next;
522 }
523
524 return NULL;
525 }
526
527 struct target *get_current_target(struct command_context *cmd_ctx)
528 {
529 struct target *target = get_current_target_or_null(cmd_ctx);
530
531 if (!target) {
532 LOG_ERROR("BUG: current_target out of bounds");
533 exit(-1);
534 }
535
536 return target;
537 }
538
539 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
540 {
541 return cmd_ctx->current_target_override
542 ? cmd_ctx->current_target_override
543 : cmd_ctx->current_target;
544 }
545
546 int target_poll(struct target *target)
547 {
548 int retval;
549
550 /* We can't poll until after examine */
551 if (!target_was_examined(target)) {
552 /* Fail silently lest we pollute the log */
553 return ERROR_FAIL;
554 }
555
556 retval = target->type->poll(target);
557 if (retval != ERROR_OK)
558 return retval;
559
560 if (target->halt_issued) {
561 if (target->state == TARGET_HALTED)
562 target->halt_issued = false;
563 else {
564 int64_t t = timeval_ms() - target->halt_issued_time;
565 if (t > DEFAULT_HALT_TIMEOUT) {
566 target->halt_issued = false;
567 LOG_INFO("Halt timed out, wake up GDB.");
568 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
569 }
570 }
571 }
572
573 return ERROR_OK;
574 }
575
576 int target_halt(struct target *target)
577 {
578 int retval;
579 /* We can't poll until after examine */
580 if (!target_was_examined(target)) {
581 LOG_ERROR("Target not examined yet");
582 return ERROR_FAIL;
583 }
584
585 retval = target->type->halt(target);
586 if (retval != ERROR_OK)
587 return retval;
588
589 target->halt_issued = true;
590 target->halt_issued_time = timeval_ms();
591
592 return ERROR_OK;
593 }
594
595 /**
596 * Make the target (re)start executing using its saved execution
597 * context (possibly with some modifications).
598 *
599 * @param target Which target should start executing.
600 * @param current True to use the target's saved program counter instead
601 * of the address parameter
602 * @param address Optionally used as the program counter.
603 * @param handle_breakpoints True iff breakpoints at the resumption PC
604 * should be skipped. (For example, maybe execution was stopped by
605 * such a breakpoint, in which case it would be counterproductive to
606 * let it re-trigger.
607 * @param debug_execution False if all working areas allocated by OpenOCD
608 * should be released and/or restored to their original contents.
609 * (This would for example be true to run some downloaded "helper"
610 * algorithm code, which resides in one such working buffer and uses
611 * another for data storage.)
612 *
613 * @todo Resolve the ambiguity about what the "debug_execution" flag
614 * signifies. For example, Target implementations don't agree on how
615 * it relates to invalidation of the register cache, or to whether
616 * breakpoints and watchpoints should be enabled. (It would seem wrong
617 * to enable breakpoints when running downloaded "helper" algorithms
618 * (debug_execution true), since the breakpoints would be set to match
619 * target firmware being debugged, not the helper algorithm.... and
620 * enabling them could cause such helpers to malfunction (for example,
621 * by overwriting data with a breakpoint instruction. On the other
622 * hand the infrastructure for running such helpers might use this
623 * procedure but rely on hardware breakpoint to detect termination.)
624 */
625 int target_resume(struct target *target, int current, target_addr_t address,
626 int handle_breakpoints, int debug_execution)
627 {
628 int retval;
629
630 /* We can't poll until after examine */
631 if (!target_was_examined(target)) {
632 LOG_ERROR("Target not examined yet");
633 return ERROR_FAIL;
634 }
635
636 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
637
638 /* note that resume *must* be asynchronous. The CPU can halt before
639 * we poll. The CPU can even halt at the current PC as a result of
640 * a software breakpoint being inserted by (a bug?) the application.
641 */
642 /*
643 * resume() triggers the event 'resumed'. The execution of TCL commands
644 * in the event handler causes the polling of targets. If the target has
645 * already halted for a breakpoint, polling will run the 'halted' event
646 * handler before the pending 'resumed' handler.
647 * Disable polling during resume() to guarantee the execution of handlers
648 * in the correct order.
649 */
650 bool save_poll = jtag_poll_get_enabled();
651 jtag_poll_set_enabled(false);
652 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
653 jtag_poll_set_enabled(save_poll);
654 if (retval != ERROR_OK)
655 return retval;
656
657 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
658
659 return retval;
660 }
661
662 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
663 {
664 char buf[100];
665 int retval;
666 struct jim_nvp *n;
667 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
668 if (!n->name) {
669 LOG_ERROR("invalid reset mode");
670 return ERROR_FAIL;
671 }
672
673 struct target *target;
674 for (target = all_targets; target; target = target->next)
675 target_call_reset_callbacks(target, reset_mode);
676
677 /* disable polling during reset to make reset event scripts
678 * more predictable, i.e. dr/irscan & pathmove in events will
679 * not have JTAG operations injected into the middle of a sequence.
680 */
681 bool save_poll = jtag_poll_get_enabled();
682
683 jtag_poll_set_enabled(false);
684
685 sprintf(buf, "ocd_process_reset %s", n->name);
686 retval = Jim_Eval(cmd->ctx->interp, buf);
687
688 jtag_poll_set_enabled(save_poll);
689
690 if (retval != JIM_OK) {
691 Jim_MakeErrorMessage(cmd->ctx->interp);
692 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
693 return ERROR_FAIL;
694 }
695
696 /* We want any events to be processed before the prompt */
697 retval = target_call_timer_callbacks_now();
698
699 for (target = all_targets; target; target = target->next) {
700 target->type->check_reset(target);
701 target->running_alg = false;
702 }
703
704 return retval;
705 }
706
707 static int identity_virt2phys(struct target *target,
708 target_addr_t virtual, target_addr_t *physical)
709 {
710 *physical = virtual;
711 return ERROR_OK;
712 }
713
714 static int no_mmu(struct target *target, int *enabled)
715 {
716 *enabled = 0;
717 return ERROR_OK;
718 }
719
720 /**
721 * Reset the @c examined flag for the given target.
722 * Pure paranoia -- targets are zeroed on allocation.
723 */
724 static inline void target_reset_examined(struct target *target)
725 {
726 target->examined = false;
727 }
728
729 static int default_examine(struct target *target)
730 {
731 target_set_examined(target);
732 return ERROR_OK;
733 }
734
735 /* no check by default */
736 static int default_check_reset(struct target *target)
737 {
738 return ERROR_OK;
739 }
740
741 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
742 * Keep in sync */
743 int target_examine_one(struct target *target)
744 {
745 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
746
747 int retval = target->type->examine(target);
748 if (retval != ERROR_OK) {
749 target_reset_examined(target);
750 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
751 return retval;
752 }
753
754 target_set_examined(target);
755 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
756
757 return ERROR_OK;
758 }
759
760 static int jtag_enable_callback(enum jtag_event event, void *priv)
761 {
762 struct target *target = priv;
763
764 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
765 return ERROR_OK;
766
767 jtag_unregister_event_callback(jtag_enable_callback, target);
768
769 return target_examine_one(target);
770 }
771
772 /* Targets that correctly implement init + examine, i.e.
773 * no communication with target during init:
774 *
775 * XScale
776 */
777 int target_examine(void)
778 {
779 int retval = ERROR_OK;
780 struct target *target;
781
782 for (target = all_targets; target; target = target->next) {
783 /* defer examination, but don't skip it */
784 if (!target->tap->enabled) {
785 jtag_register_event_callback(jtag_enable_callback,
786 target);
787 continue;
788 }
789
790 if (target->defer_examine)
791 continue;
792
793 int retval2 = target_examine_one(target);
794 if (retval2 != ERROR_OK) {
795 LOG_WARNING("target %s examination failed", target_name(target));
796 retval = retval2;
797 }
798 }
799 return retval;
800 }
801
802 const char *target_type_name(struct target *target)
803 {
804 return target->type->name;
805 }
806
807 static int target_soft_reset_halt(struct target *target)
808 {
809 if (!target_was_examined(target)) {
810 LOG_ERROR("Target not examined yet");
811 return ERROR_FAIL;
812 }
813 if (!target->type->soft_reset_halt) {
814 LOG_ERROR("Target %s does not support soft_reset_halt",
815 target_name(target));
816 return ERROR_FAIL;
817 }
818 return target->type->soft_reset_halt(target);
819 }
820
821 /**
822 * Downloads a target-specific native code algorithm to the target,
823 * and executes it. * Note that some targets may need to set up, enable,
824 * and tear down a breakpoint (hard or * soft) to detect algorithm
825 * termination, while others may support lower overhead schemes where
826 * soft breakpoints embedded in the algorithm automatically terminate the
827 * algorithm.
828 *
829 * @param target used to run the algorithm
830 * @param num_mem_params
831 * @param mem_params
832 * @param num_reg_params
833 * @param reg_param
834 * @param entry_point
835 * @param exit_point
836 * @param timeout_ms
837 * @param arch_info target-specific description of the algorithm.
838 */
839 int target_run_algorithm(struct target *target,
840 int num_mem_params, struct mem_param *mem_params,
841 int num_reg_params, struct reg_param *reg_param,
842 target_addr_t entry_point, target_addr_t exit_point,
843 int timeout_ms, void *arch_info)
844 {
845 int retval = ERROR_FAIL;
846
847 if (!target_was_examined(target)) {
848 LOG_ERROR("Target not examined yet");
849 goto done;
850 }
851 if (!target->type->run_algorithm) {
852 LOG_ERROR("Target type '%s' does not support %s",
853 target_type_name(target), __func__);
854 goto done;
855 }
856
857 target->running_alg = true;
858 retval = target->type->run_algorithm(target,
859 num_mem_params, mem_params,
860 num_reg_params, reg_param,
861 entry_point, exit_point, timeout_ms, arch_info);
862 target->running_alg = false;
863
864 done:
865 return retval;
866 }
867
868 /**
869 * Executes a target-specific native code algorithm and leaves it running.
870 *
871 * @param target used to run the algorithm
872 * @param num_mem_params
873 * @param mem_params
874 * @param num_reg_params
875 * @param reg_params
876 * @param entry_point
877 * @param exit_point
878 * @param arch_info target-specific description of the algorithm.
879 */
880 int target_start_algorithm(struct target *target,
881 int num_mem_params, struct mem_param *mem_params,
882 int num_reg_params, struct reg_param *reg_params,
883 target_addr_t entry_point, target_addr_t exit_point,
884 void *arch_info)
885 {
886 int retval = ERROR_FAIL;
887
888 if (!target_was_examined(target)) {
889 LOG_ERROR("Target not examined yet");
890 goto done;
891 }
892 if (!target->type->start_algorithm) {
893 LOG_ERROR("Target type '%s' does not support %s",
894 target_type_name(target), __func__);
895 goto done;
896 }
897 if (target->running_alg) {
898 LOG_ERROR("Target is already running an algorithm");
899 goto done;
900 }
901
902 target->running_alg = true;
903 retval = target->type->start_algorithm(target,
904 num_mem_params, mem_params,
905 num_reg_params, reg_params,
906 entry_point, exit_point, arch_info);
907
908 done:
909 return retval;
910 }
911
912 /**
913 * Waits for an algorithm started with target_start_algorithm() to complete.
914 *
915 * @param target used to run the algorithm
916 * @param num_mem_params
917 * @param mem_params
918 * @param num_reg_params
919 * @param reg_params
920 * @param exit_point
921 * @param timeout_ms
922 * @param arch_info target-specific description of the algorithm.
923 */
924 int target_wait_algorithm(struct target *target,
925 int num_mem_params, struct mem_param *mem_params,
926 int num_reg_params, struct reg_param *reg_params,
927 target_addr_t exit_point, int timeout_ms,
928 void *arch_info)
929 {
930 int retval = ERROR_FAIL;
931
932 if (!target->type->wait_algorithm) {
933 LOG_ERROR("Target type '%s' does not support %s",
934 target_type_name(target), __func__);
935 goto done;
936 }
937 if (!target->running_alg) {
938 LOG_ERROR("Target is not running an algorithm");
939 goto done;
940 }
941
942 retval = target->type->wait_algorithm(target,
943 num_mem_params, mem_params,
944 num_reg_params, reg_params,
945 exit_point, timeout_ms, arch_info);
946 if (retval != ERROR_TARGET_TIMEOUT)
947 target->running_alg = false;
948
949 done:
950 return retval;
951 }
952
953 /**
954 * Streams data to a circular buffer on target intended for consumption by code
955 * running asynchronously on target.
956 *
957 * This is intended for applications where target-specific native code runs
958 * on the target, receives data from the circular buffer, does something with
959 * it (most likely writing it to a flash memory), and advances the circular
960 * buffer pointer.
961 *
962 * This assumes that the helper algorithm has already been loaded to the target,
963 * but has not been started yet. Given memory and register parameters are passed
964 * to the algorithm.
965 *
966 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
967 * following format:
968 *
969 * [buffer_start + 0, buffer_start + 4):
970 * Write Pointer address (aka head). Written and updated by this
971 * routine when new data is written to the circular buffer.
972 * [buffer_start + 4, buffer_start + 8):
973 * Read Pointer address (aka tail). Updated by code running on the
974 * target after it consumes data.
975 * [buffer_start + 8, buffer_start + buffer_size):
976 * Circular buffer contents.
977 *
978 * See contrib/loaders/flash/stm32f1x.S for an example.
979 *
980 * @param target used to run the algorithm
981 * @param buffer address on the host where data to be sent is located
982 * @param count number of blocks to send
983 * @param block_size size in bytes of each block
984 * @param num_mem_params count of memory-based params to pass to algorithm
985 * @param mem_params memory-based params to pass to algorithm
986 * @param num_reg_params count of register-based params to pass to algorithm
987 * @param reg_params memory-based params to pass to algorithm
988 * @param buffer_start address on the target of the circular buffer structure
989 * @param buffer_size size of the circular buffer structure
990 * @param entry_point address on the target to execute to start the algorithm
991 * @param exit_point address at which to set a breakpoint to catch the
992 * end of the algorithm; can be 0 if target triggers a breakpoint itself
993 * @param arch_info
994 */
995
996 int target_run_flash_async_algorithm(struct target *target,
997 const uint8_t *buffer, uint32_t count, int block_size,
998 int num_mem_params, struct mem_param *mem_params,
999 int num_reg_params, struct reg_param *reg_params,
1000 uint32_t buffer_start, uint32_t buffer_size,
1001 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1002 {
1003 int retval;
1004 int timeout = 0;
1005
1006 const uint8_t *buffer_orig = buffer;
1007
1008 /* Set up working area. First word is write pointer, second word is read pointer,
1009 * rest is fifo data area. */
1010 uint32_t wp_addr = buffer_start;
1011 uint32_t rp_addr = buffer_start + 4;
1012 uint32_t fifo_start_addr = buffer_start + 8;
1013 uint32_t fifo_end_addr = buffer_start + buffer_size;
1014
1015 uint32_t wp = fifo_start_addr;
1016 uint32_t rp = fifo_start_addr;
1017
1018 /* validate block_size is 2^n */
1019 assert(IS_PWR_OF_2(block_size));
1020
1021 retval = target_write_u32(target, wp_addr, wp);
1022 if (retval != ERROR_OK)
1023 return retval;
1024 retval = target_write_u32(target, rp_addr, rp);
1025 if (retval != ERROR_OK)
1026 return retval;
1027
1028 /* Start up algorithm on target and let it idle while writing the first chunk */
1029 retval = target_start_algorithm(target, num_mem_params, mem_params,
1030 num_reg_params, reg_params,
1031 entry_point,
1032 exit_point,
1033 arch_info);
1034
1035 if (retval != ERROR_OK) {
1036 LOG_ERROR("error starting target flash write algorithm");
1037 return retval;
1038 }
1039
1040 while (count > 0) {
1041
1042 retval = target_read_u32(target, rp_addr, &rp);
1043 if (retval != ERROR_OK) {
1044 LOG_ERROR("failed to get read pointer");
1045 break;
1046 }
1047
1048 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1049 (size_t) (buffer - buffer_orig), count, wp, rp);
1050
1051 if (rp == 0) {
1052 LOG_ERROR("flash write algorithm aborted by target");
1053 retval = ERROR_FLASH_OPERATION_FAILED;
1054 break;
1055 }
1056
1057 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1058 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1059 break;
1060 }
1061
1062 /* Count the number of bytes available in the fifo without
1063 * crossing the wrap around. Make sure to not fill it completely,
1064 * because that would make wp == rp and that's the empty condition. */
1065 uint32_t thisrun_bytes;
1066 if (rp > wp)
1067 thisrun_bytes = rp - wp - block_size;
1068 else if (rp > fifo_start_addr)
1069 thisrun_bytes = fifo_end_addr - wp;
1070 else
1071 thisrun_bytes = fifo_end_addr - wp - block_size;
1072
1073 if (thisrun_bytes == 0) {
1074 /* Throttle polling a bit if transfer is (much) faster than flash
1075 * programming. The exact delay shouldn't matter as long as it's
1076 * less than buffer size / flash speed. This is very unlikely to
1077 * run when using high latency connections such as USB. */
1078 alive_sleep(2);
1079
1080 /* to stop an infinite loop on some targets check and increment a timeout
1081 * this issue was observed on a stellaris using the new ICDI interface */
1082 if (timeout++ >= 2500) {
1083 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1084 return ERROR_FLASH_OPERATION_FAILED;
1085 }
1086 continue;
1087 }
1088
1089 /* reset our timeout */
1090 timeout = 0;
1091
1092 /* Limit to the amount of data we actually want to write */
1093 if (thisrun_bytes > count * block_size)
1094 thisrun_bytes = count * block_size;
1095
1096 /* Force end of large blocks to be word aligned */
1097 if (thisrun_bytes >= 16)
1098 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1099
1100 /* Write data to fifo */
1101 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1102 if (retval != ERROR_OK)
1103 break;
1104
1105 /* Update counters and wrap write pointer */
1106 buffer += thisrun_bytes;
1107 count -= thisrun_bytes / block_size;
1108 wp += thisrun_bytes;
1109 if (wp >= fifo_end_addr)
1110 wp = fifo_start_addr;
1111
1112 /* Store updated write pointer to target */
1113 retval = target_write_u32(target, wp_addr, wp);
1114 if (retval != ERROR_OK)
1115 break;
1116
1117 /* Avoid GDB timeouts */
1118 keep_alive();
1119 }
1120
1121 if (retval != ERROR_OK) {
1122 /* abort flash write algorithm on target */
1123 target_write_u32(target, wp_addr, 0);
1124 }
1125
1126 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1127 num_reg_params, reg_params,
1128 exit_point,
1129 10000,
1130 arch_info);
1131
1132 if (retval2 != ERROR_OK) {
1133 LOG_ERROR("error waiting for target flash write algorithm");
1134 retval = retval2;
1135 }
1136
1137 if (retval == ERROR_OK) {
1138 /* check if algorithm set rp = 0 after fifo writer loop finished */
1139 retval = target_read_u32(target, rp_addr, &rp);
1140 if (retval == ERROR_OK && rp == 0) {
1141 LOG_ERROR("flash write algorithm aborted by target");
1142 retval = ERROR_FLASH_OPERATION_FAILED;
1143 }
1144 }
1145
1146 return retval;
1147 }
1148
1149 int target_run_read_async_algorithm(struct target *target,
1150 uint8_t *buffer, uint32_t count, int block_size,
1151 int num_mem_params, struct mem_param *mem_params,
1152 int num_reg_params, struct reg_param *reg_params,
1153 uint32_t buffer_start, uint32_t buffer_size,
1154 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1155 {
1156 int retval;
1157 int timeout = 0;
1158
1159 const uint8_t *buffer_orig = buffer;
1160
1161 /* Set up working area. First word is write pointer, second word is read pointer,
1162 * rest is fifo data area. */
1163 uint32_t wp_addr = buffer_start;
1164 uint32_t rp_addr = buffer_start + 4;
1165 uint32_t fifo_start_addr = buffer_start + 8;
1166 uint32_t fifo_end_addr = buffer_start + buffer_size;
1167
1168 uint32_t wp = fifo_start_addr;
1169 uint32_t rp = fifo_start_addr;
1170
1171 /* validate block_size is 2^n */
1172 assert(IS_PWR_OF_2(block_size));
1173
1174 retval = target_write_u32(target, wp_addr, wp);
1175 if (retval != ERROR_OK)
1176 return retval;
1177 retval = target_write_u32(target, rp_addr, rp);
1178 if (retval != ERROR_OK)
1179 return retval;
1180
1181 /* Start up algorithm on target */
1182 retval = target_start_algorithm(target, num_mem_params, mem_params,
1183 num_reg_params, reg_params,
1184 entry_point,
1185 exit_point,
1186 arch_info);
1187
1188 if (retval != ERROR_OK) {
1189 LOG_ERROR("error starting target flash read algorithm");
1190 return retval;
1191 }
1192
1193 while (count > 0) {
1194 retval = target_read_u32(target, wp_addr, &wp);
1195 if (retval != ERROR_OK) {
1196 LOG_ERROR("failed to get write pointer");
1197 break;
1198 }
1199
1200 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1201 (size_t)(buffer - buffer_orig), count, wp, rp);
1202
1203 if (wp == 0) {
1204 LOG_ERROR("flash read algorithm aborted by target");
1205 retval = ERROR_FLASH_OPERATION_FAILED;
1206 break;
1207 }
1208
1209 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1210 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1211 break;
1212 }
1213
1214 /* Count the number of bytes available in the fifo without
1215 * crossing the wrap around. */
1216 uint32_t thisrun_bytes;
1217 if (wp >= rp)
1218 thisrun_bytes = wp - rp;
1219 else
1220 thisrun_bytes = fifo_end_addr - rp;
1221
1222 if (thisrun_bytes == 0) {
1223 /* Throttle polling a bit if transfer is (much) faster than flash
1224 * reading. The exact delay shouldn't matter as long as it's
1225 * less than buffer size / flash speed. This is very unlikely to
1226 * run when using high latency connections such as USB. */
1227 alive_sleep(2);
1228
1229 /* to stop an infinite loop on some targets check and increment a timeout
1230 * this issue was observed on a stellaris using the new ICDI interface */
1231 if (timeout++ >= 2500) {
1232 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1233 return ERROR_FLASH_OPERATION_FAILED;
1234 }
1235 continue;
1236 }
1237
1238 /* Reset our timeout */
1239 timeout = 0;
1240
1241 /* Limit to the amount of data we actually want to read */
1242 if (thisrun_bytes > count * block_size)
1243 thisrun_bytes = count * block_size;
1244
1245 /* Force end of large blocks to be word aligned */
1246 if (thisrun_bytes >= 16)
1247 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1248
1249 /* Read data from fifo */
1250 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1251 if (retval != ERROR_OK)
1252 break;
1253
1254 /* Update counters and wrap write pointer */
1255 buffer += thisrun_bytes;
1256 count -= thisrun_bytes / block_size;
1257 rp += thisrun_bytes;
1258 if (rp >= fifo_end_addr)
1259 rp = fifo_start_addr;
1260
1261 /* Store updated write pointer to target */
1262 retval = target_write_u32(target, rp_addr, rp);
1263 if (retval != ERROR_OK)
1264 break;
1265
1266 /* Avoid GDB timeouts */
1267 keep_alive();
1268
1269 }
1270
1271 if (retval != ERROR_OK) {
1272 /* abort flash write algorithm on target */
1273 target_write_u32(target, rp_addr, 0);
1274 }
1275
1276 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1277 num_reg_params, reg_params,
1278 exit_point,
1279 10000,
1280 arch_info);
1281
1282 if (retval2 != ERROR_OK) {
1283 LOG_ERROR("error waiting for target flash write algorithm");
1284 retval = retval2;
1285 }
1286
1287 if (retval == ERROR_OK) {
1288 /* check if algorithm set wp = 0 after fifo writer loop finished */
1289 retval = target_read_u32(target, wp_addr, &wp);
1290 if (retval == ERROR_OK && wp == 0) {
1291 LOG_ERROR("flash read algorithm aborted by target");
1292 retval = ERROR_FLASH_OPERATION_FAILED;
1293 }
1294 }
1295
1296 return retval;
1297 }
1298
1299 int target_read_memory(struct target *target,
1300 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1301 {
1302 if (!target_was_examined(target)) {
1303 LOG_ERROR("Target not examined yet");
1304 return ERROR_FAIL;
1305 }
1306 if (!target->type->read_memory) {
1307 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1308 return ERROR_FAIL;
1309 }
1310 return target->type->read_memory(target, address, size, count, buffer);
1311 }
1312
1313 int target_read_phys_memory(struct target *target,
1314 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1315 {
1316 if (!target_was_examined(target)) {
1317 LOG_ERROR("Target not examined yet");
1318 return ERROR_FAIL;
1319 }
1320 if (!target->type->read_phys_memory) {
1321 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1322 return ERROR_FAIL;
1323 }
1324 return target->type->read_phys_memory(target, address, size, count, buffer);
1325 }
1326
1327 int target_write_memory(struct target *target,
1328 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1329 {
1330 if (!target_was_examined(target)) {
1331 LOG_ERROR("Target not examined yet");
1332 return ERROR_FAIL;
1333 }
1334 if (!target->type->write_memory) {
1335 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1336 return ERROR_FAIL;
1337 }
1338 return target->type->write_memory(target, address, size, count, buffer);
1339 }
1340
1341 int target_write_phys_memory(struct target *target,
1342 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1343 {
1344 if (!target_was_examined(target)) {
1345 LOG_ERROR("Target not examined yet");
1346 return ERROR_FAIL;
1347 }
1348 if (!target->type->write_phys_memory) {
1349 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1350 return ERROR_FAIL;
1351 }
1352 return target->type->write_phys_memory(target, address, size, count, buffer);
1353 }
1354
1355 int target_add_breakpoint(struct target *target,
1356 struct breakpoint *breakpoint)
1357 {
1358 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1359 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1360 return ERROR_TARGET_NOT_HALTED;
1361 }
1362 return target->type->add_breakpoint(target, breakpoint);
1363 }
1364
1365 int target_add_context_breakpoint(struct target *target,
1366 struct breakpoint *breakpoint)
1367 {
1368 if (target->state != TARGET_HALTED) {
1369 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1370 return ERROR_TARGET_NOT_HALTED;
1371 }
1372 return target->type->add_context_breakpoint(target, breakpoint);
1373 }
1374
1375 int target_add_hybrid_breakpoint(struct target *target,
1376 struct breakpoint *breakpoint)
1377 {
1378 if (target->state != TARGET_HALTED) {
1379 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1380 return ERROR_TARGET_NOT_HALTED;
1381 }
1382 return target->type->add_hybrid_breakpoint(target, breakpoint);
1383 }
1384
1385 int target_remove_breakpoint(struct target *target,
1386 struct breakpoint *breakpoint)
1387 {
1388 return target->type->remove_breakpoint(target, breakpoint);
1389 }
1390
1391 int target_add_watchpoint(struct target *target,
1392 struct watchpoint *watchpoint)
1393 {
1394 if (target->state != TARGET_HALTED) {
1395 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1396 return ERROR_TARGET_NOT_HALTED;
1397 }
1398 return target->type->add_watchpoint(target, watchpoint);
1399 }
1400 int target_remove_watchpoint(struct target *target,
1401 struct watchpoint *watchpoint)
1402 {
1403 return target->type->remove_watchpoint(target, watchpoint);
1404 }
1405 int target_hit_watchpoint(struct target *target,
1406 struct watchpoint **hit_watchpoint)
1407 {
1408 if (target->state != TARGET_HALTED) {
1409 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1410 return ERROR_TARGET_NOT_HALTED;
1411 }
1412
1413 if (!target->type->hit_watchpoint) {
1414 /* For backward compatible, if hit_watchpoint is not implemented,
1415 * return ERROR_FAIL such that gdb_server will not take the nonsense
1416 * information. */
1417 return ERROR_FAIL;
1418 }
1419
1420 return target->type->hit_watchpoint(target, hit_watchpoint);
1421 }
1422
1423 const char *target_get_gdb_arch(struct target *target)
1424 {
1425 if (!target->type->get_gdb_arch)
1426 return NULL;
1427 return target->type->get_gdb_arch(target);
1428 }
1429
1430 int target_get_gdb_reg_list(struct target *target,
1431 struct reg **reg_list[], int *reg_list_size,
1432 enum target_register_class reg_class)
1433 {
1434 int result = ERROR_FAIL;
1435
1436 if (!target_was_examined(target)) {
1437 LOG_ERROR("Target not examined yet");
1438 goto done;
1439 }
1440
1441 result = target->type->get_gdb_reg_list(target, reg_list,
1442 reg_list_size, reg_class);
1443
1444 done:
1445 if (result != ERROR_OK) {
1446 *reg_list = NULL;
1447 *reg_list_size = 0;
1448 }
1449 return result;
1450 }
1451
1452 int target_get_gdb_reg_list_noread(struct target *target,
1453 struct reg **reg_list[], int *reg_list_size,
1454 enum target_register_class reg_class)
1455 {
1456 if (target->type->get_gdb_reg_list_noread &&
1457 target->type->get_gdb_reg_list_noread(target, reg_list,
1458 reg_list_size, reg_class) == ERROR_OK)
1459 return ERROR_OK;
1460 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1461 }
1462
1463 bool target_supports_gdb_connection(struct target *target)
1464 {
1465 /*
1466 * exclude all the targets that don't provide get_gdb_reg_list
1467 * or that have explicit gdb_max_connection == 0
1468 */
1469 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1470 }
1471
1472 int target_step(struct target *target,
1473 int current, target_addr_t address, int handle_breakpoints)
1474 {
1475 int retval;
1476
1477 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1478
1479 retval = target->type->step(target, current, address, handle_breakpoints);
1480 if (retval != ERROR_OK)
1481 return retval;
1482
1483 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1484
1485 return retval;
1486 }
1487
1488 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1489 {
1490 if (target->state != TARGET_HALTED) {
1491 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1492 return ERROR_TARGET_NOT_HALTED;
1493 }
1494 return target->type->get_gdb_fileio_info(target, fileio_info);
1495 }
1496
1497 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1498 {
1499 if (target->state != TARGET_HALTED) {
1500 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1501 return ERROR_TARGET_NOT_HALTED;
1502 }
1503 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1504 }
1505
1506 target_addr_t target_address_max(struct target *target)
1507 {
1508 unsigned bits = target_address_bits(target);
1509 if (sizeof(target_addr_t) * 8 == bits)
1510 return (target_addr_t) -1;
1511 else
1512 return (((target_addr_t) 1) << bits) - 1;
1513 }
1514
1515 unsigned target_address_bits(struct target *target)
1516 {
1517 if (target->type->address_bits)
1518 return target->type->address_bits(target);
1519 return 32;
1520 }
1521
1522 unsigned int target_data_bits(struct target *target)
1523 {
1524 if (target->type->data_bits)
1525 return target->type->data_bits(target);
1526 return 32;
1527 }
1528
1529 static int target_profiling(struct target *target, uint32_t *samples,
1530 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1531 {
1532 return target->type->profiling(target, samples, max_num_samples,
1533 num_samples, seconds);
1534 }
1535
1536 static int handle_target(void *priv);
1537
1538 static int target_init_one(struct command_context *cmd_ctx,
1539 struct target *target)
1540 {
1541 target_reset_examined(target);
1542
1543 struct target_type *type = target->type;
1544 if (!type->examine)
1545 type->examine = default_examine;
1546
1547 if (!type->check_reset)
1548 type->check_reset = default_check_reset;
1549
1550 assert(type->init_target);
1551
1552 int retval = type->init_target(cmd_ctx, target);
1553 if (retval != ERROR_OK) {
1554 LOG_ERROR("target '%s' init failed", target_name(target));
1555 return retval;
1556 }
1557
1558 /* Sanity-check MMU support ... stub in what we must, to help
1559 * implement it in stages, but warn if we need to do so.
1560 */
1561 if (type->mmu) {
1562 if (!type->virt2phys) {
1563 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1564 type->virt2phys = identity_virt2phys;
1565 }
1566 } else {
1567 /* Make sure no-MMU targets all behave the same: make no
1568 * distinction between physical and virtual addresses, and
1569 * ensure that virt2phys() is always an identity mapping.
1570 */
1571 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1572 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1573
1574 type->mmu = no_mmu;
1575 type->write_phys_memory = type->write_memory;
1576 type->read_phys_memory = type->read_memory;
1577 type->virt2phys = identity_virt2phys;
1578 }
1579
1580 if (!target->type->read_buffer)
1581 target->type->read_buffer = target_read_buffer_default;
1582
1583 if (!target->type->write_buffer)
1584 target->type->write_buffer = target_write_buffer_default;
1585
1586 if (!target->type->get_gdb_fileio_info)
1587 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1588
1589 if (!target->type->gdb_fileio_end)
1590 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1591
1592 if (!target->type->profiling)
1593 target->type->profiling = target_profiling_default;
1594
1595 return ERROR_OK;
1596 }
1597
1598 static int target_init(struct command_context *cmd_ctx)
1599 {
1600 struct target *target;
1601 int retval;
1602
1603 for (target = all_targets; target; target = target->next) {
1604 retval = target_init_one(cmd_ctx, target);
1605 if (retval != ERROR_OK)
1606 return retval;
1607 }
1608
1609 if (!all_targets)
1610 return ERROR_OK;
1611
1612 retval = target_register_user_commands(cmd_ctx);
1613 if (retval != ERROR_OK)
1614 return retval;
1615
1616 retval = target_register_timer_callback(&handle_target,
1617 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1618 if (retval != ERROR_OK)
1619 return retval;
1620
1621 return ERROR_OK;
1622 }
1623
1624 COMMAND_HANDLER(handle_target_init_command)
1625 {
1626 int retval;
1627
1628 if (CMD_ARGC != 0)
1629 return ERROR_COMMAND_SYNTAX_ERROR;
1630
1631 static bool target_initialized;
1632 if (target_initialized) {
1633 LOG_INFO("'target init' has already been called");
1634 return ERROR_OK;
1635 }
1636 target_initialized = true;
1637
1638 retval = command_run_line(CMD_CTX, "init_targets");
1639 if (retval != ERROR_OK)
1640 return retval;
1641
1642 retval = command_run_line(CMD_CTX, "init_target_events");
1643 if (retval != ERROR_OK)
1644 return retval;
1645
1646 retval = command_run_line(CMD_CTX, "init_board");
1647 if (retval != ERROR_OK)
1648 return retval;
1649
1650 LOG_DEBUG("Initializing targets...");
1651 return target_init(CMD_CTX);
1652 }
1653
1654 int target_register_event_callback(int (*callback)(struct target *target,
1655 enum target_event event, void *priv), void *priv)
1656 {
1657 struct target_event_callback **callbacks_p = &target_event_callbacks;
1658
1659 if (!callback)
1660 return ERROR_COMMAND_SYNTAX_ERROR;
1661
1662 if (*callbacks_p) {
1663 while ((*callbacks_p)->next)
1664 callbacks_p = &((*callbacks_p)->next);
1665 callbacks_p = &((*callbacks_p)->next);
1666 }
1667
1668 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1669 (*callbacks_p)->callback = callback;
1670 (*callbacks_p)->priv = priv;
1671 (*callbacks_p)->next = NULL;
1672
1673 return ERROR_OK;
1674 }
1675
1676 int target_register_reset_callback(int (*callback)(struct target *target,
1677 enum target_reset_mode reset_mode, void *priv), void *priv)
1678 {
1679 struct target_reset_callback *entry;
1680
1681 if (!callback)
1682 return ERROR_COMMAND_SYNTAX_ERROR;
1683
1684 entry = malloc(sizeof(struct target_reset_callback));
1685 if (!entry) {
1686 LOG_ERROR("error allocating buffer for reset callback entry");
1687 return ERROR_COMMAND_SYNTAX_ERROR;
1688 }
1689
1690 entry->callback = callback;
1691 entry->priv = priv;
1692 list_add(&entry->list, &target_reset_callback_list);
1693
1694
1695 return ERROR_OK;
1696 }
1697
1698 int target_register_trace_callback(int (*callback)(struct target *target,
1699 size_t len, uint8_t *data, void *priv), void *priv)
1700 {
1701 struct target_trace_callback *entry;
1702
1703 if (!callback)
1704 return ERROR_COMMAND_SYNTAX_ERROR;
1705
1706 entry = malloc(sizeof(struct target_trace_callback));
1707 if (!entry) {
1708 LOG_ERROR("error allocating buffer for trace callback entry");
1709 return ERROR_COMMAND_SYNTAX_ERROR;
1710 }
1711
1712 entry->callback = callback;
1713 entry->priv = priv;
1714 list_add(&entry->list, &target_trace_callback_list);
1715
1716
1717 return ERROR_OK;
1718 }
1719
1720 int target_register_timer_callback(int (*callback)(void *priv),
1721 unsigned int time_ms, enum target_timer_type type, void *priv)
1722 {
1723 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1724
1725 if (!callback)
1726 return ERROR_COMMAND_SYNTAX_ERROR;
1727
1728 if (*callbacks_p) {
1729 while ((*callbacks_p)->next)
1730 callbacks_p = &((*callbacks_p)->next);
1731 callbacks_p = &((*callbacks_p)->next);
1732 }
1733
1734 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1735 (*callbacks_p)->callback = callback;
1736 (*callbacks_p)->type = type;
1737 (*callbacks_p)->time_ms = time_ms;
1738 (*callbacks_p)->removed = false;
1739
1740 (*callbacks_p)->when = timeval_ms() + time_ms;
1741 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1742
1743 (*callbacks_p)->priv = priv;
1744 (*callbacks_p)->next = NULL;
1745
1746 return ERROR_OK;
1747 }
1748
1749 int target_unregister_event_callback(int (*callback)(struct target *target,
1750 enum target_event event, void *priv), void *priv)
1751 {
1752 struct target_event_callback **p = &target_event_callbacks;
1753 struct target_event_callback *c = target_event_callbacks;
1754
1755 if (!callback)
1756 return ERROR_COMMAND_SYNTAX_ERROR;
1757
1758 while (c) {
1759 struct target_event_callback *next = c->next;
1760 if ((c->callback == callback) && (c->priv == priv)) {
1761 *p = next;
1762 free(c);
1763 return ERROR_OK;
1764 } else
1765 p = &(c->next);
1766 c = next;
1767 }
1768
1769 return ERROR_OK;
1770 }
1771
1772 int target_unregister_reset_callback(int (*callback)(struct target *target,
1773 enum target_reset_mode reset_mode, void *priv), void *priv)
1774 {
1775 struct target_reset_callback *entry;
1776
1777 if (!callback)
1778 return ERROR_COMMAND_SYNTAX_ERROR;
1779
1780 list_for_each_entry(entry, &target_reset_callback_list, list) {
1781 if (entry->callback == callback && entry->priv == priv) {
1782 list_del(&entry->list);
1783 free(entry);
1784 break;
1785 }
1786 }
1787
1788 return ERROR_OK;
1789 }
1790
1791 int target_unregister_trace_callback(int (*callback)(struct target *target,
1792 size_t len, uint8_t *data, void *priv), void *priv)
1793 {
1794 struct target_trace_callback *entry;
1795
1796 if (!callback)
1797 return ERROR_COMMAND_SYNTAX_ERROR;
1798
1799 list_for_each_entry(entry, &target_trace_callback_list, list) {
1800 if (entry->callback == callback && entry->priv == priv) {
1801 list_del(&entry->list);
1802 free(entry);
1803 break;
1804 }
1805 }
1806
1807 return ERROR_OK;
1808 }
1809
1810 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1811 {
1812 if (!callback)
1813 return ERROR_COMMAND_SYNTAX_ERROR;
1814
1815 for (struct target_timer_callback *c = target_timer_callbacks;
1816 c; c = c->next) {
1817 if ((c->callback == callback) && (c->priv == priv)) {
1818 c->removed = true;
1819 return ERROR_OK;
1820 }
1821 }
1822
1823 return ERROR_FAIL;
1824 }
1825
1826 int target_call_event_callbacks(struct target *target, enum target_event event)
1827 {
1828 struct target_event_callback *callback = target_event_callbacks;
1829 struct target_event_callback *next_callback;
1830
1831 if (event == TARGET_EVENT_HALTED) {
1832 /* execute early halted first */
1833 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1834 }
1835
1836 LOG_DEBUG("target event %i (%s) for core %s", event,
1837 jim_nvp_value2name_simple(nvp_target_event, event)->name,
1838 target_name(target));
1839
1840 target_handle_event(target, event);
1841
1842 while (callback) {
1843 next_callback = callback->next;
1844 callback->callback(target, event, callback->priv);
1845 callback = next_callback;
1846 }
1847
1848 return ERROR_OK;
1849 }
1850
1851 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1852 {
1853 struct target_reset_callback *callback;
1854
1855 LOG_DEBUG("target reset %i (%s)", reset_mode,
1856 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1857
1858 list_for_each_entry(callback, &target_reset_callback_list, list)
1859 callback->callback(target, reset_mode, callback->priv);
1860
1861 return ERROR_OK;
1862 }
1863
1864 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1865 {
1866 struct target_trace_callback *callback;
1867
1868 list_for_each_entry(callback, &target_trace_callback_list, list)
1869 callback->callback(target, len, data, callback->priv);
1870
1871 return ERROR_OK;
1872 }
1873
1874 static int target_timer_callback_periodic_restart(
1875 struct target_timer_callback *cb, int64_t *now)
1876 {
1877 cb->when = *now + cb->time_ms;
1878 return ERROR_OK;
1879 }
1880
1881 static int target_call_timer_callback(struct target_timer_callback *cb,
1882 int64_t *now)
1883 {
1884 cb->callback(cb->priv);
1885
1886 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1887 return target_timer_callback_periodic_restart(cb, now);
1888
1889 return target_unregister_timer_callback(cb->callback, cb->priv);
1890 }
1891
1892 static int target_call_timer_callbacks_check_time(int checktime)
1893 {
1894 static bool callback_processing;
1895
1896 /* Do not allow nesting */
1897 if (callback_processing)
1898 return ERROR_OK;
1899
1900 callback_processing = true;
1901
1902 keep_alive();
1903
1904 int64_t now = timeval_ms();
1905
1906 /* Initialize to a default value that's a ways into the future.
1907 * The loop below will make it closer to now if there are
1908 * callbacks that want to be called sooner. */
1909 target_timer_next_event_value = now + 1000;
1910
1911 /* Store an address of the place containing a pointer to the
1912 * next item; initially, that's a standalone "root of the
1913 * list" variable. */
1914 struct target_timer_callback **callback = &target_timer_callbacks;
1915 while (callback && *callback) {
1916 if ((*callback)->removed) {
1917 struct target_timer_callback *p = *callback;
1918 *callback = (*callback)->next;
1919 free(p);
1920 continue;
1921 }
1922
1923 bool call_it = (*callback)->callback &&
1924 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1925 now >= (*callback)->when);
1926
1927 if (call_it)
1928 target_call_timer_callback(*callback, &now);
1929
1930 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1931 target_timer_next_event_value = (*callback)->when;
1932
1933 callback = &(*callback)->next;
1934 }
1935
1936 callback_processing = false;
1937 return ERROR_OK;
1938 }
1939
1940 int target_call_timer_callbacks()
1941 {
1942 return target_call_timer_callbacks_check_time(1);
1943 }
1944
1945 /* invoke periodic callbacks immediately */
1946 int target_call_timer_callbacks_now()
1947 {
1948 return target_call_timer_callbacks_check_time(0);
1949 }
1950
1951 int64_t target_timer_next_event(void)
1952 {
1953 return target_timer_next_event_value;
1954 }
1955
1956 /* Prints the working area layout for debug purposes */
1957 static void print_wa_layout(struct target *target)
1958 {
1959 struct working_area *c = target->working_areas;
1960
1961 while (c) {
1962 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1963 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1964 c->address, c->address + c->size - 1, c->size);
1965 c = c->next;
1966 }
1967 }
1968
1969 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1970 static void target_split_working_area(struct working_area *area, uint32_t size)
1971 {
1972 assert(area->free); /* Shouldn't split an allocated area */
1973 assert(size <= area->size); /* Caller should guarantee this */
1974
1975 /* Split only if not already the right size */
1976 if (size < area->size) {
1977 struct working_area *new_wa = malloc(sizeof(*new_wa));
1978
1979 if (!new_wa)
1980 return;
1981
1982 new_wa->next = area->next;
1983 new_wa->size = area->size - size;
1984 new_wa->address = area->address + size;
1985 new_wa->backup = NULL;
1986 new_wa->user = NULL;
1987 new_wa->free = true;
1988
1989 area->next = new_wa;
1990 area->size = size;
1991
1992 /* If backup memory was allocated to this area, it has the wrong size
1993 * now so free it and it will be reallocated if/when needed */
1994 free(area->backup);
1995 area->backup = NULL;
1996 }
1997 }
1998
1999 /* Merge all adjacent free areas into one */
2000 static void target_merge_working_areas(struct target *target)
2001 {
2002 struct working_area *c = target->working_areas;
2003
2004 while (c && c->next) {
2005 assert(c->next->address == c->address + c->size); /* This is an invariant */
2006
2007 /* Find two adjacent free areas */
2008 if (c->free && c->next->free) {
2009 /* Merge the last into the first */
2010 c->size += c->next->size;
2011
2012 /* Remove the last */
2013 struct working_area *to_be_freed = c->next;
2014 c->next = c->next->next;
2015 free(to_be_freed->backup);
2016 free(to_be_freed);
2017
2018 /* If backup memory was allocated to the remaining area, it's has
2019 * the wrong size now */
2020 free(c->backup);
2021 c->backup = NULL;
2022 } else {
2023 c = c->next;
2024 }
2025 }
2026 }
2027
2028 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2029 {
2030 /* Reevaluate working area address based on MMU state*/
2031 if (!target->working_areas) {
2032 int retval;
2033 int enabled;
2034
2035 retval = target->type->mmu(target, &enabled);
2036 if (retval != ERROR_OK)
2037 return retval;
2038
2039 if (!enabled) {
2040 if (target->working_area_phys_spec) {
2041 LOG_DEBUG("MMU disabled, using physical "
2042 "address for working memory " TARGET_ADDR_FMT,
2043 target->working_area_phys);
2044 target->working_area = target->working_area_phys;
2045 } else {
2046 LOG_ERROR("No working memory available. "
2047 "Specify -work-area-phys to target.");
2048 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2049 }
2050 } else {
2051 if (target->working_area_virt_spec) {
2052 LOG_DEBUG("MMU enabled, using virtual "
2053 "address for working memory " TARGET_ADDR_FMT,
2054 target->working_area_virt);
2055 target->working_area = target->working_area_virt;
2056 } else {
2057 LOG_ERROR("No working memory available. "
2058 "Specify -work-area-virt to target.");
2059 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2060 }
2061 }
2062
2063 /* Set up initial working area on first call */
2064 struct working_area *new_wa = malloc(sizeof(*new_wa));
2065 if (new_wa) {
2066 new_wa->next = NULL;
2067 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
2068 new_wa->address = target->working_area;
2069 new_wa->backup = NULL;
2070 new_wa->user = NULL;
2071 new_wa->free = true;
2072 }
2073
2074 target->working_areas = new_wa;
2075 }
2076
2077 /* only allocate multiples of 4 byte */
2078 if (size % 4)
2079 size = (size + 3) & (~3UL);
2080
2081 struct working_area *c = target->working_areas;
2082
2083 /* Find the first large enough working area */
2084 while (c) {
2085 if (c->free && c->size >= size)
2086 break;
2087 c = c->next;
2088 }
2089
2090 if (!c)
2091 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2092
2093 /* Split the working area into the requested size */
2094 target_split_working_area(c, size);
2095
2096 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2097 size, c->address);
2098
2099 if (target->backup_working_area) {
2100 if (!c->backup) {
2101 c->backup = malloc(c->size);
2102 if (!c->backup)
2103 return ERROR_FAIL;
2104 }
2105
2106 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2107 if (retval != ERROR_OK)
2108 return retval;
2109 }
2110
2111 /* mark as used, and return the new (reused) area */
2112 c->free = false;
2113 *area = c;
2114
2115 /* user pointer */
2116 c->user = area;
2117
2118 print_wa_layout(target);
2119
2120 return ERROR_OK;
2121 }
2122
2123 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2124 {
2125 int retval;
2126
2127 retval = target_alloc_working_area_try(target, size, area);
2128 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2129 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2130 return retval;
2131
2132 }
2133
2134 static int target_restore_working_area(struct target *target, struct working_area *area)
2135 {
2136 int retval = ERROR_OK;
2137
2138 if (target->backup_working_area && area->backup) {
2139 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2140 if (retval != ERROR_OK)
2141 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2142 area->size, area->address);
2143 }
2144
2145 return retval;
2146 }
2147
2148 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2149 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2150 {
2151 if (!area || area->free)
2152 return ERROR_OK;
2153
2154 int retval = ERROR_OK;
2155 if (restore) {
2156 retval = target_restore_working_area(target, area);
2157 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2158 if (retval != ERROR_OK)
2159 return retval;
2160 }
2161
2162 area->free = true;
2163
2164 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2165 area->size, area->address);
2166
2167 /* mark user pointer invalid */
2168 /* TODO: Is this really safe? It points to some previous caller's memory.
2169 * How could we know that the area pointer is still in that place and not
2170 * some other vital data? What's the purpose of this, anyway? */
2171 *area->user = NULL;
2172 area->user = NULL;
2173
2174 target_merge_working_areas(target);
2175
2176 print_wa_layout(target);
2177
2178 return retval;
2179 }
2180
2181 int target_free_working_area(struct target *target, struct working_area *area)
2182 {
2183 return target_free_working_area_restore(target, area, 1);
2184 }
2185
2186 /* free resources and restore memory, if restoring memory fails,
2187 * free up resources anyway
2188 */
2189 static void target_free_all_working_areas_restore(struct target *target, int restore)
2190 {
2191 struct working_area *c = target->working_areas;
2192
2193 LOG_DEBUG("freeing all working areas");
2194
2195 /* Loop through all areas, restoring the allocated ones and marking them as free */
2196 while (c) {
2197 if (!c->free) {
2198 if (restore)
2199 target_restore_working_area(target, c);
2200 c->free = true;
2201 *c->user = NULL; /* Same as above */
2202 c->user = NULL;
2203 }
2204 c = c->next;
2205 }
2206
2207 /* Run a merge pass to combine all areas into one */
2208 target_merge_working_areas(target);
2209
2210 print_wa_layout(target);
2211 }
2212
2213 void target_free_all_working_areas(struct target *target)
2214 {
2215 target_free_all_working_areas_restore(target, 1);
2216
2217 /* Now we have none or only one working area marked as free */
2218 if (target->working_areas) {
2219 /* Free the last one to allow on-the-fly moving and resizing */
2220 free(target->working_areas->backup);
2221 free(target->working_areas);
2222 target->working_areas = NULL;
2223 }
2224 }
2225
2226 /* Find the largest number of bytes that can be allocated */
2227 uint32_t target_get_working_area_avail(struct target *target)
2228 {
2229 struct working_area *c = target->working_areas;
2230 uint32_t max_size = 0;
2231
2232 if (!c)
2233 return target->working_area_size;
2234
2235 while (c) {
2236 if (c->free && max_size < c->size)
2237 max_size = c->size;
2238
2239 c = c->next;
2240 }
2241
2242 return max_size;
2243 }
2244
2245 static void target_destroy(struct target *target)
2246 {
2247 if (target->type->deinit_target)
2248 target->type->deinit_target(target);
2249
2250 free(target->semihosting);
2251
2252 jtag_unregister_event_callback(jtag_enable_callback, target);
2253
2254 struct target_event_action *teap = target->event_action;
2255 while (teap) {
2256 struct target_event_action *next = teap->next;
2257 Jim_DecrRefCount(teap->interp, teap->body);
2258 free(teap);
2259 teap = next;
2260 }
2261
2262 target_free_all_working_areas(target);
2263
2264 /* release the targets SMP list */
2265 if (target->smp) {
2266 struct target_list *head = target->head;
2267 while (head) {
2268 struct target_list *pos = head->next;
2269 head->target->smp = 0;
2270 free(head);
2271 head = pos;
2272 }
2273 target->smp = 0;
2274 }
2275
2276 rtos_destroy(target);
2277
2278 free(target->gdb_port_override);
2279 free(target->type);
2280 free(target->trace_info);
2281 free(target->fileio_info);
2282 free(target->cmd_name);
2283 free(target);
2284 }
2285
2286 void target_quit(void)
2287 {
2288 struct target_event_callback *pe = target_event_callbacks;
2289 while (pe) {
2290 struct target_event_callback *t = pe->next;
2291 free(pe);
2292 pe = t;
2293 }
2294 target_event_callbacks = NULL;
2295
2296 struct target_timer_callback *pt = target_timer_callbacks;
2297 while (pt) {
2298 struct target_timer_callback *t = pt->next;
2299 free(pt);
2300 pt = t;
2301 }
2302 target_timer_callbacks = NULL;
2303
2304 for (struct target *target = all_targets; target;) {
2305 struct target *tmp;
2306
2307 tmp = target->next;
2308 target_destroy(target);
2309 target = tmp;
2310 }
2311
2312 all_targets = NULL;
2313 }
2314
2315 int target_arch_state(struct target *target)
2316 {
2317 int retval;
2318 if (!target) {
2319 LOG_WARNING("No target has been configured");
2320 return ERROR_OK;
2321 }
2322
2323 if (target->state != TARGET_HALTED)
2324 return ERROR_OK;
2325
2326 retval = target->type->arch_state(target);
2327 return retval;
2328 }
2329
2330 static int target_get_gdb_fileio_info_default(struct target *target,
2331 struct gdb_fileio_info *fileio_info)
2332 {
2333 /* If target does not support semi-hosting function, target
2334 has no need to provide .get_gdb_fileio_info callback.
2335 It just return ERROR_FAIL and gdb_server will return "Txx"
2336 as target halted every time. */
2337 return ERROR_FAIL;
2338 }
2339
2340 static int target_gdb_fileio_end_default(struct target *target,
2341 int retcode, int fileio_errno, bool ctrl_c)
2342 {
2343 return ERROR_OK;
2344 }
2345
2346 int target_profiling_default(struct target *target, uint32_t *samples,
2347 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2348 {
2349 struct timeval timeout, now;
2350
2351 gettimeofday(&timeout, NULL);
2352 timeval_add_time(&timeout, seconds, 0);
2353
2354 LOG_INFO("Starting profiling. Halting and resuming the"
2355 " target as often as we can...");
2356
2357 uint32_t sample_count = 0;
2358 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2359 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2360
2361 int retval = ERROR_OK;
2362 for (;;) {
2363 target_poll(target);
2364 if (target->state == TARGET_HALTED) {
2365 uint32_t t = buf_get_u32(reg->value, 0, 32);
2366 samples[sample_count++] = t;
2367 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2368 retval = target_resume(target, 1, 0, 0, 0);
2369 target_poll(target);
2370 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2371 } else if (target->state == TARGET_RUNNING) {
2372 /* We want to quickly sample the PC. */
2373 retval = target_halt(target);
2374 } else {
2375 LOG_INFO("Target not halted or running");
2376 retval = ERROR_OK;
2377 break;
2378 }
2379
2380 if (retval != ERROR_OK)
2381 break;
2382
2383 gettimeofday(&now, NULL);
2384 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2385 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2386 break;
2387 }
2388 }
2389
2390 *num_samples = sample_count;
2391 return retval;
2392 }
2393
2394 /* Single aligned words are guaranteed to use 16 or 32 bit access
2395 * mode respectively, otherwise data is handled as quickly as
2396 * possible
2397 */
2398 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2399 {
2400 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2401 size, address);
2402
2403 if (!target_was_examined(target)) {
2404 LOG_ERROR("Target not examined yet");
2405 return ERROR_FAIL;
2406 }
2407
2408 if (size == 0)
2409 return ERROR_OK;
2410
2411 if ((address + size - 1) < address) {
2412 /* GDB can request this when e.g. PC is 0xfffffffc */
2413 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2414 address,
2415 size);
2416 return ERROR_FAIL;
2417 }
2418
2419 return target->type->write_buffer(target, address, size, buffer);
2420 }
2421
2422 static int target_write_buffer_default(struct target *target,
2423 target_addr_t address, uint32_t count, const uint8_t *buffer)
2424 {
2425 uint32_t size;
2426 unsigned int data_bytes = target_data_bits(target) / 8;
2427
2428 /* Align up to maximum bytes. The loop condition makes sure the next pass
2429 * will have something to do with the size we leave to it. */
2430 for (size = 1;
2431 size < data_bytes && count >= size * 2 + (address & size);
2432 size *= 2) {
2433 if (address & size) {
2434 int retval = target_write_memory(target, address, size, 1, buffer);
2435 if (retval != ERROR_OK)
2436 return retval;
2437 address += size;
2438 count -= size;
2439 buffer += size;
2440 }
2441 }
2442
2443 /* Write the data with as large access size as possible. */
2444 for (; size > 0; size /= 2) {
2445 uint32_t aligned = count - count % size;
2446 if (aligned > 0) {
2447 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2448 if (retval != ERROR_OK)
2449 return retval;
2450 address += aligned;
2451 count -= aligned;
2452 buffer += aligned;
2453 }
2454 }
2455
2456 return ERROR_OK;
2457 }
2458
2459 /* Single aligned words are guaranteed to use 16 or 32 bit access
2460 * mode respectively, otherwise data is handled as quickly as
2461 * possible
2462 */
2463 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2464 {
2465 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2466 size, address);
2467
2468 if (!target_was_examined(target)) {
2469 LOG_ERROR("Target not examined yet");
2470 return ERROR_FAIL;
2471 }
2472
2473 if (size == 0)
2474 return ERROR_OK;
2475
2476 if ((address + size - 1) < address) {
2477 /* GDB can request this when e.g. PC is 0xfffffffc */
2478 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2479 address,
2480 size);
2481 return ERROR_FAIL;
2482 }
2483
2484 return target->type->read_buffer(target, address, size, buffer);
2485 }
2486
2487 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2488 {
2489 uint32_t size;
2490 unsigned int data_bytes = target_data_bits(target) / 8;
2491
2492 /* Align up to maximum bytes. The loop condition makes sure the next pass
2493 * will have something to do with the size we leave to it. */
2494 for (size = 1;
2495 size < data_bytes && count >= size * 2 + (address & size);
2496 size *= 2) {
2497 if (address & size) {
2498 int retval = target_read_memory(target, address, size, 1, buffer);
2499 if (retval != ERROR_OK)
2500 return retval;
2501 address += size;
2502 count -= size;
2503 buffer += size;
2504 }
2505 }
2506
2507 /* Read the data with as large access size as possible. */
2508 for (; size > 0; size /= 2) {
2509 uint32_t aligned = count - count % size;
2510 if (aligned > 0) {
2511 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2512 if (retval != ERROR_OK)
2513 return retval;
2514 address += aligned;
2515 count -= aligned;
2516 buffer += aligned;
2517 }
2518 }
2519
2520 return ERROR_OK;
2521 }
2522
2523 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2524 {
2525 uint8_t *buffer;
2526 int retval;
2527 uint32_t i;
2528 uint32_t checksum = 0;
2529 if (!target_was_examined(target)) {
2530 LOG_ERROR("Target not examined yet");
2531 return ERROR_FAIL;
2532 }
2533
2534 retval = target->type->checksum_memory(target, address, size, &checksum);
2535 if (retval != ERROR_OK) {
2536 buffer = malloc(size);
2537 if (!buffer) {
2538 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2539 return ERROR_COMMAND_SYNTAX_ERROR;
2540 }
2541 retval = target_read_buffer(target, address, size, buffer);
2542 if (retval != ERROR_OK) {
2543 free(buffer);
2544 return retval;
2545 }
2546
2547 /* convert to target endianness */
2548 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2549 uint32_t target_data;
2550 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2551 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2552 }
2553
2554 retval = image_calculate_checksum(buffer, size, &checksum);
2555 free(buffer);
2556 }
2557
2558 *crc = checksum;
2559
2560 return retval;
2561 }
2562
2563 int target_blank_check_memory(struct target *target,
2564 struct target_memory_check_block *blocks, int num_blocks,
2565 uint8_t erased_value)
2566 {
2567 if (!target_was_examined(target)) {
2568 LOG_ERROR("Target not examined yet");
2569 return ERROR_FAIL;
2570 }
2571
2572 if (!target->type->blank_check_memory)
2573 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2574
2575 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2576 }
2577
2578 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2579 {
2580 uint8_t value_buf[8];
2581 if (!target_was_examined(target)) {
2582 LOG_ERROR("Target not examined yet");
2583 return ERROR_FAIL;
2584 }
2585
2586 int retval = target_read_memory(target, address, 8, 1, value_buf);
2587
2588 if (retval == ERROR_OK) {
2589 *value = target_buffer_get_u64(target, value_buf);
2590 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2591 address,
2592 *value);
2593 } else {
2594 *value = 0x0;
2595 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2596 address);
2597 }
2598
2599 return retval;
2600 }
2601
2602 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2603 {
2604 uint8_t value_buf[4];
2605 if (!target_was_examined(target)) {
2606 LOG_ERROR("Target not examined yet");
2607 return ERROR_FAIL;
2608 }
2609
2610 int retval = target_read_memory(target, address, 4, 1, value_buf);
2611
2612 if (retval == ERROR_OK) {
2613 *value = target_buffer_get_u32(target, value_buf);
2614 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2615 address,
2616 *value);
2617 } else {
2618 *value = 0x0;
2619 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2620 address);
2621 }
2622
2623 return retval;
2624 }
2625
2626 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2627 {
2628 uint8_t value_buf[2];
2629 if (!target_was_examined(target)) {
2630 LOG_ERROR("Target not examined yet");
2631 return ERROR_FAIL;
2632 }
2633
2634 int retval = target_read_memory(target, address, 2, 1, value_buf);
2635
2636 if (retval == ERROR_OK) {
2637 *value = target_buffer_get_u16(target, value_buf);
2638 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2639 address,
2640 *value);
2641 } else {
2642 *value = 0x0;
2643 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2644 address);
2645 }
2646
2647 return retval;
2648 }
2649
2650 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2651 {
2652 if (!target_was_examined(target)) {
2653 LOG_ERROR("Target not examined yet");
2654 return ERROR_FAIL;
2655 }
2656
2657 int retval = target_read_memory(target, address, 1, 1, value);
2658
2659 if (retval == ERROR_OK) {
2660 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2661 address,
2662 *value);
2663 } else {
2664 *value = 0x0;
2665 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2666 address);
2667 }
2668
2669 return retval;
2670 }
2671
2672 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2673 {
2674 int retval;
2675 uint8_t value_buf[8];
2676 if (!target_was_examined(target)) {
2677 LOG_ERROR("Target not examined yet");
2678 return ERROR_FAIL;
2679 }
2680
2681 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2682 address,
2683 value);
2684
2685 target_buffer_set_u64(target, value_buf, value);
2686 retval = target_write_memory(target, address, 8, 1, value_buf);
2687 if (retval != ERROR_OK)
2688 LOG_DEBUG("failed: %i", retval);
2689
2690 return retval;
2691 }
2692
2693 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2694 {
2695 int retval;
2696 uint8_t value_buf[4];
2697 if (!target_was_examined(target)) {
2698 LOG_ERROR("Target not examined yet");
2699 return ERROR_FAIL;
2700 }
2701
2702 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2703 address,
2704 value);
2705
2706 target_buffer_set_u32(target, value_buf, value);
2707 retval = target_write_memory(target, address, 4, 1, value_buf);
2708 if (retval != ERROR_OK)
2709 LOG_DEBUG("failed: %i", retval);
2710
2711 return retval;
2712 }
2713
2714 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2715 {
2716 int retval;
2717 uint8_t value_buf[2];
2718 if (!target_was_examined(target)) {
2719 LOG_ERROR("Target not examined yet");
2720 return ERROR_FAIL;
2721 }
2722
2723 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2724 address,
2725 value);
2726
2727 target_buffer_set_u16(target, value_buf, value);
2728 retval = target_write_memory(target, address, 2, 1, value_buf);
2729 if (retval != ERROR_OK)
2730 LOG_DEBUG("failed: %i", retval);
2731
2732 return retval;
2733 }
2734
2735 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2736 {
2737 int retval;
2738 if (!target_was_examined(target)) {
2739 LOG_ERROR("Target not examined yet");
2740 return ERROR_FAIL;
2741 }
2742
2743 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2744 address, value);
2745
2746 retval = target_write_memory(target, address, 1, 1, &value);
2747 if (retval != ERROR_OK)
2748 LOG_DEBUG("failed: %i", retval);
2749
2750 return retval;
2751 }
2752
2753 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2754 {
2755 int retval;
2756 uint8_t value_buf[8];
2757 if (!target_was_examined(target)) {
2758 LOG_ERROR("Target not examined yet");
2759 return ERROR_FAIL;
2760 }
2761
2762 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2763 address,
2764 value);
2765
2766 target_buffer_set_u64(target, value_buf, value);
2767 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2768 if (retval != ERROR_OK)
2769 LOG_DEBUG("failed: %i", retval);
2770
2771 return retval;
2772 }
2773
2774 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2775 {
2776 int retval;
2777 uint8_t value_buf[4];
2778 if (!target_was_examined(target)) {
2779 LOG_ERROR("Target not examined yet");
2780 return ERROR_FAIL;
2781 }
2782
2783 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2784 address,
2785 value);
2786
2787 target_buffer_set_u32(target, value_buf, value);
2788 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2789 if (retval != ERROR_OK)
2790 LOG_DEBUG("failed: %i", retval);
2791
2792 return retval;
2793 }
2794
2795 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2796 {
2797 int retval;
2798 uint8_t value_buf[2];
2799 if (!target_was_examined(target)) {
2800 LOG_ERROR("Target not examined yet");
2801 return ERROR_FAIL;
2802 }
2803
2804 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2805 address,
2806 value);
2807
2808 target_buffer_set_u16(target, value_buf, value);
2809 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2810 if (retval != ERROR_OK)
2811 LOG_DEBUG("failed: %i", retval);
2812
2813 return retval;
2814 }
2815
2816 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2817 {
2818 int retval;
2819 if (!target_was_examined(target)) {
2820 LOG_ERROR("Target not examined yet");
2821 return ERROR_FAIL;
2822 }
2823
2824 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2825 address, value);
2826
2827 retval = target_write_phys_memory(target, address, 1, 1, &value);
2828 if (retval != ERROR_OK)
2829 LOG_DEBUG("failed: %i", retval);
2830
2831 return retval;
2832 }
2833
2834 static int find_target(struct command_invocation *cmd, const char *name)
2835 {
2836 struct target *target = get_target(name);
2837 if (!target) {
2838 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2839 return ERROR_FAIL;
2840 }
2841 if (!target->tap->enabled) {
2842 command_print(cmd, "Target: TAP %s is disabled, "
2843 "can't be the current target\n",
2844 target->tap->dotted_name);
2845 return ERROR_FAIL;
2846 }
2847
2848 cmd->ctx->current_target = target;
2849 if (cmd->ctx->current_target_override)
2850 cmd->ctx->current_target_override = target;
2851
2852 return ERROR_OK;
2853 }
2854
2855
2856 COMMAND_HANDLER(handle_targets_command)
2857 {
2858 int retval = ERROR_OK;
2859 if (CMD_ARGC == 1) {
2860 retval = find_target(CMD, CMD_ARGV[0]);
2861 if (retval == ERROR_OK) {
2862 /* we're done! */
2863 return retval;
2864 }
2865 }
2866
2867 struct target *target = all_targets;
2868 command_print(CMD, " TargetName Type Endian TapName State ");
2869 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2870 while (target) {
2871 const char *state;
2872 char marker = ' ';
2873
2874 if (target->tap->enabled)
2875 state = target_state_name(target);
2876 else
2877 state = "tap-disabled";
2878
2879 if (CMD_CTX->current_target == target)
2880 marker = '*';
2881
2882 /* keep columns lined up to match the headers above */
2883 command_print(CMD,
2884 "%2d%c %-18s %-10s %-6s %-18s %s",
2885 target->target_number,
2886 marker,
2887 target_name(target),
2888 target_type_name(target),
2889 jim_nvp_value2name_simple(nvp_target_endian,
2890 target->endianness)->name,
2891 target->tap->dotted_name,
2892 state);
2893 target = target->next;
2894 }
2895
2896 return retval;
2897 }
2898
2899 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2900
2901 static int power_dropout;
2902 static int srst_asserted;
2903
2904 static int run_power_restore;
2905 static int run_power_dropout;
2906 static int run_srst_asserted;
2907 static int run_srst_deasserted;
2908
2909 static int sense_handler(void)
2910 {
2911 static int prev_srst_asserted;
2912 static int prev_power_dropout;
2913
2914 int retval = jtag_power_dropout(&power_dropout);
2915 if (retval != ERROR_OK)
2916 return retval;
2917
2918 int power_restored;
2919 power_restored = prev_power_dropout && !power_dropout;
2920 if (power_restored)
2921 run_power_restore = 1;
2922
2923 int64_t current = timeval_ms();
2924 static int64_t last_power;
2925 bool wait_more = last_power + 2000 > current;
2926 if (power_dropout && !wait_more) {
2927 run_power_dropout = 1;
2928 last_power = current;
2929 }
2930
2931 retval = jtag_srst_asserted(&srst_asserted);
2932 if (retval != ERROR_OK)
2933 return retval;
2934
2935 int srst_deasserted;
2936 srst_deasserted = prev_srst_asserted && !srst_asserted;
2937
2938 static int64_t last_srst;
2939 wait_more = last_srst + 2000 > current;
2940 if (srst_deasserted && !wait_more) {
2941 run_srst_deasserted = 1;
2942 last_srst = current;
2943 }
2944
2945 if (!prev_srst_asserted && srst_asserted)
2946 run_srst_asserted = 1;
2947
2948 prev_srst_asserted = srst_asserted;
2949 prev_power_dropout = power_dropout;
2950
2951 if (srst_deasserted || power_restored) {
2952 /* Other than logging the event we can't do anything here.
2953 * Issuing a reset is a particularly bad idea as we might
2954 * be inside a reset already.
2955 */
2956 }
2957
2958 return ERROR_OK;
2959 }
2960
2961 /* process target state changes */
2962 static int handle_target(void *priv)
2963 {
2964 Jim_Interp *interp = (Jim_Interp *)priv;
2965 int retval = ERROR_OK;
2966
2967 if (!is_jtag_poll_safe()) {
2968 /* polling is disabled currently */
2969 return ERROR_OK;
2970 }
2971
2972 /* we do not want to recurse here... */
2973 static int recursive;
2974 if (!recursive) {
2975 recursive = 1;
2976 sense_handler();
2977 /* danger! running these procedures can trigger srst assertions and power dropouts.
2978 * We need to avoid an infinite loop/recursion here and we do that by
2979 * clearing the flags after running these events.
2980 */
2981 int did_something = 0;
2982 if (run_srst_asserted) {
2983 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2984 Jim_Eval(interp, "srst_asserted");
2985 did_something = 1;
2986 }
2987 if (run_srst_deasserted) {
2988 Jim_Eval(interp, "srst_deasserted");
2989 did_something = 1;
2990 }
2991 if (run_power_dropout) {
2992 LOG_INFO("Power dropout detected, running power_dropout proc.");
2993 Jim_Eval(interp, "power_dropout");
2994 did_something = 1;
2995 }
2996 if (run_power_restore) {
2997 Jim_Eval(interp, "power_restore");
2998 did_something = 1;
2999 }
3000
3001 if (did_something) {
3002 /* clear detect flags */
3003 sense_handler();
3004 }
3005
3006 /* clear action flags */
3007
3008 run_srst_asserted = 0;
3009 run_srst_deasserted = 0;
3010 run_power_restore = 0;
3011 run_power_dropout = 0;
3012
3013 recursive = 0;
3014 }
3015
3016 /* Poll targets for state changes unless that's globally disabled.
3017 * Skip targets that are currently disabled.
3018 */
3019 for (struct target *target = all_targets;
3020 is_jtag_poll_safe() && target;
3021 target = target->next) {
3022
3023 if (!target_was_examined(target))
3024 continue;
3025
3026 if (!target->tap->enabled)
3027 continue;
3028
3029 if (target->backoff.times > target->backoff.count) {
3030 /* do not poll this time as we failed previously */
3031 target->backoff.count++;
3032 continue;
3033 }
3034 target->backoff.count = 0;
3035
3036 /* only poll target if we've got power and srst isn't asserted */
3037 if (!power_dropout && !srst_asserted) {
3038 /* polling may fail silently until the target has been examined */
3039 retval = target_poll(target);
3040 if (retval != ERROR_OK) {
3041 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3042 if (target->backoff.times * polling_interval < 5000) {
3043 target->backoff.times *= 2;
3044 target->backoff.times++;
3045 }
3046
3047 /* Tell GDB to halt the debugger. This allows the user to
3048 * run monitor commands to handle the situation.
3049 */
3050 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3051 }
3052 if (target->backoff.times > 0) {
3053 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3054 target_reset_examined(target);
3055 retval = target_examine_one(target);
3056 /* Target examination could have failed due to unstable connection,
3057 * but we set the examined flag anyway to repoll it later */
3058 if (retval != ERROR_OK) {
3059 target_set_examined(target);
3060 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3061 target->backoff.times * polling_interval);
3062 return retval;
3063 }
3064 }
3065
3066 /* Since we succeeded, we reset backoff count */
3067 target->backoff.times = 0;
3068 }
3069 }
3070
3071 return retval;
3072 }
3073
3074 COMMAND_HANDLER(handle_reg_command)
3075 {
3076 LOG_DEBUG("-");
3077
3078 struct target *target = get_current_target(CMD_CTX);
3079 struct reg *reg = NULL;
3080
3081 /* list all available registers for the current target */
3082 if (CMD_ARGC == 0) {
3083 struct reg_cache *cache = target->reg_cache;
3084
3085 unsigned int count = 0;
3086 while (cache) {
3087 unsigned i;
3088
3089 command_print(CMD, "===== %s", cache->name);
3090
3091 for (i = 0, reg = cache->reg_list;
3092 i < cache->num_regs;
3093 i++, reg++, count++) {
3094 if (reg->exist == false || reg->hidden)
3095 continue;
3096 /* only print cached values if they are valid */
3097 if (reg->valid) {
3098 char *value = buf_to_hex_str(reg->value,
3099 reg->size);
3100 command_print(CMD,
3101 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3102 count, reg->name,
3103 reg->size, value,
3104 reg->dirty
3105 ? " (dirty)"
3106 : "");
3107 free(value);
3108 } else {
3109 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3110 count, reg->name,
3111 reg->size);
3112 }
3113 }
3114 cache = cache->next;
3115 }
3116
3117 return ERROR_OK;
3118 }
3119
3120 /* access a single register by its ordinal number */
3121 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3122 unsigned num;
3123 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3124
3125 struct reg_cache *cache = target->reg_cache;
3126 unsigned int count = 0;
3127 while (cache) {
3128 unsigned i;
3129 for (i = 0; i < cache->num_regs; i++) {
3130 if (count++ == num) {
3131 reg = &cache->reg_list[i];
3132 break;
3133 }
3134 }
3135 if (reg)
3136 break;
3137 cache = cache->next;
3138 }
3139
3140 if (!reg) {
3141 command_print(CMD, "%i is out of bounds, the current target "
3142 "has only %i registers (0 - %i)", num, count, count - 1);
3143 return ERROR_OK;
3144 }
3145 } else {
3146 /* access a single register by its name */
3147 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3148
3149 if (!reg)
3150 goto not_found;
3151 }
3152
3153 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3154
3155 if (!reg->exist)
3156 goto not_found;
3157
3158 /* display a register */
3159 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3160 && (CMD_ARGV[1][0] <= '9')))) {
3161 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3162 reg->valid = 0;
3163
3164 if (reg->valid == 0) {
3165 int retval = reg->type->get(reg);
3166 if (retval != ERROR_OK) {
3167 LOG_ERROR("Could not read register '%s'", reg->name);
3168 return retval;
3169 }
3170 }
3171 char *value = buf_to_hex_str(reg->value, reg->size);
3172 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3173 free(value);
3174 return ERROR_OK;
3175 }
3176
3177 /* set register value */
3178 if (CMD_ARGC == 2) {
3179 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3180 if (!buf)
3181 return ERROR_FAIL;
3182 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3183
3184 int retval = reg->type->set(reg, buf);
3185 if (retval != ERROR_OK) {
3186 LOG_ERROR("Could not write to register '%s'", reg->name);
3187 } else {
3188 char *value = buf_to_hex_str(reg->value, reg->size);
3189 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3190 free(value);
3191 }
3192
3193 free(buf);
3194
3195 return retval;
3196 }
3197
3198 return ERROR_COMMAND_SYNTAX_ERROR;
3199
3200 not_found:
3201 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3202 return ERROR_OK;
3203 }
3204
3205 COMMAND_HANDLER(handle_poll_command)
3206 {
3207 int retval = ERROR_OK;
3208 struct target *target = get_current_target(CMD_CTX);
3209
3210 if (CMD_ARGC == 0) {
3211 command_print(CMD, "background polling: %s",
3212 jtag_poll_get_enabled() ? "on" : "off");
3213 command_print(CMD, "TAP: %s (%s)",
3214 target->tap->dotted_name,
3215 target->tap->enabled ? "enabled" : "disabled");
3216 if (!target->tap->enabled)
3217 return ERROR_OK;
3218 retval = target_poll(target);
3219 if (retval != ERROR_OK)
3220 return retval;
3221 retval = target_arch_state(target);
3222 if (retval != ERROR_OK)
3223 return retval;
3224 } else if (CMD_ARGC == 1) {
3225 bool enable;
3226 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3227 jtag_poll_set_enabled(enable);
3228 } else
3229 return ERROR_COMMAND_SYNTAX_ERROR;
3230
3231 return retval;
3232 }
3233
3234 COMMAND_HANDLER(handle_wait_halt_command)
3235 {
3236 if (CMD_ARGC > 1)
3237 return ERROR_COMMAND_SYNTAX_ERROR;
3238
3239 unsigned ms = DEFAULT_HALT_TIMEOUT;
3240 if (1 == CMD_ARGC) {
3241 int retval = parse_uint(CMD_ARGV[0], &ms);
3242 if (retval != ERROR_OK)
3243 return ERROR_COMMAND_SYNTAX_ERROR;
3244 }
3245
3246 struct target *target = get_current_target(CMD_CTX);
3247 return target_wait_state(target, TARGET_HALTED, ms);
3248 }
3249
3250 /* wait for target state to change. The trick here is to have a low
3251 * latency for short waits and not to suck up all the CPU time
3252 * on longer waits.
3253 *
3254 * After 500ms, keep_alive() is invoked
3255 */
3256 int target_wait_state(struct target *target, enum target_state state, int ms)
3257 {
3258 int retval;
3259 int64_t then = 0, cur;
3260 bool once = true;
3261
3262 for (;;) {
3263 retval = target_poll(target);
3264 if (retval != ERROR_OK)
3265 return retval;
3266 if (target->state == state)
3267 break;
3268 cur = timeval_ms();
3269 if (once) {
3270 once = false;
3271 then = timeval_ms();
3272 LOG_DEBUG("waiting for target %s...",
3273 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3274 }
3275
3276 if (cur-then > 500)
3277 keep_alive();
3278
3279 if ((cur-then) > ms) {
3280 LOG_ERROR("timed out while waiting for target %s",
3281 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3282 return ERROR_FAIL;
3283 }
3284 }
3285
3286 return ERROR_OK;
3287 }
3288
3289 COMMAND_HANDLER(handle_halt_command)
3290 {
3291 LOG_DEBUG("-");
3292
3293 struct target *target = get_current_target(CMD_CTX);
3294
3295 target->verbose_halt_msg = true;
3296
3297 int retval = target_halt(target);
3298 if (retval != ERROR_OK)
3299 return retval;
3300
3301 if (CMD_ARGC == 1) {
3302 unsigned wait_local;
3303 retval = parse_uint(CMD_ARGV[0], &wait_local);
3304 if (retval != ERROR_OK)
3305 return ERROR_COMMAND_SYNTAX_ERROR;
3306 if (!wait_local)
3307 return ERROR_OK;
3308 }
3309
3310 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3311 }
3312
3313 COMMAND_HANDLER(handle_soft_reset_halt_command)
3314 {
3315 struct target *target = get_current_target(CMD_CTX);
3316
3317 LOG_USER("requesting target halt and executing a soft reset");
3318
3319 target_soft_reset_halt(target);
3320
3321 return ERROR_OK;
3322 }
3323
3324 COMMAND_HANDLER(handle_reset_command)
3325 {
3326 if (CMD_ARGC > 1)
3327 return ERROR_COMMAND_SYNTAX_ERROR;
3328
3329 enum target_reset_mode reset_mode = RESET_RUN;
3330 if (CMD_ARGC == 1) {
3331 const struct jim_nvp *n;
3332 n = jim_nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3333 if ((!n->name) || (n->value == RESET_UNKNOWN))
3334 return ERROR_COMMAND_SYNTAX_ERROR;
3335 reset_mode = n->value;
3336 }
3337
3338 /* reset *all* targets */
3339 return target_process_reset(CMD, reset_mode);
3340 }
3341
3342
3343 COMMAND_HANDLER(handle_resume_command)
3344 {
3345 int current = 1;
3346 if (CMD_ARGC > 1)
3347 return ERROR_COMMAND_SYNTAX_ERROR;
3348
3349 struct target *target = get_current_target(CMD_CTX);
3350
3351 /* with no CMD_ARGV, resume from current pc, addr = 0,
3352 * with one arguments, addr = CMD_ARGV[0],
3353 * handle breakpoints, not debugging */
3354 target_addr_t addr = 0;
3355 if (CMD_ARGC == 1) {
3356 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3357 current = 0;
3358 }
3359
3360 return target_resume(target, current, addr, 1, 0);
3361 }
3362
3363 COMMAND_HANDLER(handle_step_command)
3364 {
3365 if (CMD_ARGC > 1)
3366 return ERROR_COMMAND_SYNTAX_ERROR;
3367
3368 LOG_DEBUG("-");
3369
3370 /* with no CMD_ARGV, step from current pc, addr = 0,
3371 * with one argument addr = CMD_ARGV[0],
3372 * handle breakpoints, debugging */
3373 target_addr_t addr = 0;
3374 int current_pc = 1;
3375 if (CMD_ARGC == 1) {
3376 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3377 current_pc = 0;
3378 }
3379
3380 struct target *target = get_current_target(CMD_CTX);
3381
3382 return target_step(target, current_pc, addr, 1);
3383 }
3384
3385 void target_handle_md_output(struct command_invocation *cmd,
3386 struct target *target, target_addr_t address, unsigned size,
3387 unsigned count, const uint8_t *buffer)
3388 {
3389 const unsigned line_bytecnt = 32;
3390 unsigned line_modulo = line_bytecnt / size;
3391
3392 char output[line_bytecnt * 4 + 1];
3393 unsigned output_len = 0;
3394
3395 const char *value_fmt;
3396 switch (size) {
3397 case 8:
3398 value_fmt = "%16.16"PRIx64" ";
3399 break;
3400 case 4:
3401 value_fmt = "%8.8"PRIx64" ";
3402 break;
3403 case 2:
3404 value_fmt = "%4.4"PRIx64" ";
3405 break;
3406 case 1:
3407 value_fmt = "%2.2"PRIx64" ";
3408 break;
3409 default:
3410 /* "can't happen", caller checked */
3411 LOG_ERROR("invalid memory read size: %u", size);
3412 return;
3413 }
3414
3415 for (unsigned i = 0; i < count; i++) {
3416 if (i % line_modulo == 0) {
3417 output_len += snprintf(output + output_len,
3418 sizeof(output) - output_len,
3419 TARGET_ADDR_FMT ": ",
3420 (address + (i * size)));
3421 }
3422
3423 uint64_t value = 0;
3424 const uint8_t *value_ptr = buffer + i * size;
3425 switch (size) {
3426 case 8:
3427 value = target_buffer_get_u64(target, value_ptr);
3428 break;
3429 case 4:
3430 value = target_buffer_get_u32(target, value_ptr);
3431 break;
3432 case 2:
3433 value = target_buffer_get_u16(target, value_ptr);
3434 break;
3435 case 1:
3436 value = *value_ptr;
3437 }
3438 output_len += snprintf(output + output_len,
3439 sizeof(output) - output_len,
3440 value_fmt, value);
3441
3442 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3443 command_print(cmd, "%s", output);
3444 output_len = 0;
3445 }
3446 }
3447 }
3448
3449 COMMAND_HANDLER(handle_md_command)
3450 {
3451 if (CMD_ARGC < 1)
3452 return ERROR_COMMAND_SYNTAX_ERROR;
3453
3454 unsigned size = 0;
3455 switch (CMD_NAME[2]) {
3456 case 'd':
3457 size = 8;
3458 break;
3459 case 'w':
3460 size = 4;
3461 break;
3462 case 'h':
3463 size = 2;
3464 break;
3465 case 'b':
3466 size = 1;
3467 break;
3468 default:
3469 return ERROR_COMMAND_SYNTAX_ERROR;
3470 }
3471
3472 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3473 int (*fn)(struct target *target,
3474 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3475 if (physical) {
3476 CMD_ARGC--;
3477 CMD_ARGV++;
3478 fn = target_read_phys_memory;
3479 } else
3480 fn = target_read_memory;
3481 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3482 return ERROR_COMMAND_SYNTAX_ERROR;
3483
3484 target_addr_t address;
3485 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3486
3487 unsigned count = 1;
3488 if (CMD_ARGC == 2)
3489 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3490
3491 uint8_t *buffer = calloc(count, size);
3492 if (!buffer) {
3493 LOG_ERROR("Failed to allocate md read buffer");
3494 return ERROR_FAIL;
3495 }
3496
3497 struct target *target = get_current_target(CMD_CTX);
3498 int retval = fn(target, address, size, count, buffer);
3499 if (retval == ERROR_OK)
3500 target_handle_md_output(CMD, target, address, size, count, buffer);
3501
3502 free(buffer);
3503
3504 return retval;
3505 }
3506
3507 typedef int (*target_write_fn)(struct target *target,
3508 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3509
3510 static int target_fill_mem(struct target *target,
3511 target_addr_t address,
3512 target_write_fn fn,
3513 unsigned data_size,
3514 /* value */
3515 uint64_t b,
3516 /* count */
3517 unsigned c)
3518 {
3519 /* We have to write in reasonably large chunks to be able
3520 * to fill large memory areas with any sane speed */
3521 const unsigned chunk_size = 16384;
3522 uint8_t *target_buf = malloc(chunk_size * data_size);
3523 if (!target_buf) {
3524 LOG_ERROR("Out of memory");
3525 return ERROR_FAIL;
3526 }
3527
3528 for (unsigned i = 0; i < chunk_size; i++) {
3529 switch (data_size) {
3530 case 8:
3531 target_buffer_set_u64(target, target_buf + i * data_size, b);
3532 break;
3533 case 4:
3534 target_buffer_set_u32(target, target_buf + i * data_size, b);
3535 break;
3536 case 2:
3537 target_buffer_set_u16(target, target_buf + i * data_size, b);
3538 break;
3539 case 1:
3540 target_buffer_set_u8(target, target_buf + i * data_size, b);
3541 break;
3542 default:
3543 exit(-1);
3544 }
3545 }
3546
3547 int retval = ERROR_OK;
3548
3549 for (unsigned x = 0; x < c; x += chunk_size) {
3550 unsigned current;
3551 current = c - x;
3552 if (current > chunk_size)
3553 current = chunk_size;
3554 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3555 if (retval != ERROR_OK)
3556 break;
3557 /* avoid GDB timeouts */
3558 keep_alive();
3559 }
3560 free(target_buf);
3561
3562 return retval;
3563 }
3564
3565
3566 COMMAND_HANDLER(handle_mw_command)
3567 {
3568 if (CMD_ARGC < 2)
3569 return ERROR_COMMAND_SYNTAX_ERROR;
3570 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3571 target_write_fn fn;
3572 if (physical) {
3573 CMD_ARGC--;
3574 CMD_ARGV++;
3575 fn = target_write_phys_memory;
3576 } else
3577 fn = target_write_memory;
3578 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3579 return ERROR_COMMAND_SYNTAX_ERROR;
3580
3581 target_addr_t address;
3582 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3583
3584 uint64_t value;
3585 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3586
3587 unsigned count = 1;
3588 if (CMD_ARGC == 3)
3589 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3590
3591 struct target *target = get_current_target(CMD_CTX);
3592 unsigned wordsize;
3593 switch (CMD_NAME[2]) {
3594 case 'd':
3595 wordsize = 8;
3596 break;
3597 case 'w':
3598 wordsize = 4;
3599 break;
3600 case 'h':
3601 wordsize = 2;
3602 break;
3603 case 'b':
3604 wordsize = 1;
3605 break;
3606 default:
3607 return ERROR_COMMAND_SYNTAX_ERROR;
3608 }
3609
3610 return target_fill_mem(target, address, fn, wordsize, value, count);
3611 }
3612
3613 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3614 target_addr_t *min_address, target_addr_t *max_address)
3615 {
3616 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3617 return ERROR_COMMAND_SYNTAX_ERROR;
3618
3619 /* a base address isn't always necessary,
3620 * default to 0x0 (i.e. don't relocate) */
3621 if (CMD_ARGC >= 2) {
3622 target_addr_t addr;
3623 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3624 image->base_address = addr;
3625 image->base_address_set = true;
3626 } else
3627 image->base_address_set = false;
3628
3629 image->start_address_set = false;
3630
3631 if (CMD_ARGC >= 4)
3632 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3633 if (CMD_ARGC == 5) {
3634 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3635 /* use size (given) to find max (required) */
3636 *max_address += *min_address;
3637 }
3638
3639 if (*min_address > *max_address)
3640 return ERROR_COMMAND_SYNTAX_ERROR;
3641
3642 return ERROR_OK;
3643 }
3644
3645 COMMAND_HANDLER(handle_load_image_command)
3646 {
3647 uint8_t *buffer;
3648 size_t buf_cnt;
3649 uint32_t image_size;
3650 target_addr_t min_address = 0;
3651 target_addr_t max_address = -1;
3652 struct image image;
3653
3654 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3655 &image, &min_address, &max_address);
3656 if (retval != ERROR_OK)
3657 return retval;
3658
3659 struct target *target = get_current_target(CMD_CTX);
3660
3661 struct duration bench;
3662 duration_start(&bench);
3663
3664 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3665 return ERROR_FAIL;
3666
3667 image_size = 0x0;
3668 retval = ERROR_OK;
3669 for (unsigned int i = 0; i < image.num_sections; i++) {
3670 buffer = malloc(image.sections[i].size);
3671 if (!buffer) {
3672 command_print(CMD,
3673 "error allocating buffer for section (%d bytes)",
3674 (int)(image.sections[i].size));
3675 retval = ERROR_FAIL;
3676 break;
3677 }
3678
3679 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3680 if (retval != ERROR_OK) {
3681 free(buffer);
3682 break;
3683 }
3684
3685 uint32_t offset = 0;
3686 uint32_t length = buf_cnt;
3687
3688 /* DANGER!!! beware of unsigned comparison here!!! */
3689
3690 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3691 (image.sections[i].base_address < max_address)) {
3692
3693 if (image.sections[i].base_address < min_address) {
3694 /* clip addresses below */
3695 offset += min_address-image.sections[i].base_address;
3696 length -= offset;
3697 }
3698
3699 if (image.sections[i].base_address + buf_cnt > max_address)
3700 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3701
3702 retval = target_write_buffer(target,
3703 image.sections[i].base_address + offset, length, buffer + offset);
3704 if (retval != ERROR_OK) {
3705 free(buffer);
3706 break;
3707 }
3708 image_size += length;
3709 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3710 (unsigned int)length,
3711 image.sections[i].base_address + offset);
3712 }
3713
3714 free(buffer);
3715 }
3716
3717 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3718 command_print(CMD, "downloaded %" PRIu32 " bytes "
3719 "in %fs (%0.3f KiB/s)", image_size,
3720 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3721 }
3722
3723 image_close(&image);
3724
3725 return retval;
3726
3727 }
3728
3729 COMMAND_HANDLER(handle_dump_image_command)
3730 {
3731 struct fileio *fileio;
3732 uint8_t *buffer;
3733 int retval, retvaltemp;
3734 target_addr_t address, size;
3735 struct duration bench;
3736 struct target *target = get_current_target(CMD_CTX);
3737
3738 if (CMD_ARGC != 3)
3739 return ERROR_COMMAND_SYNTAX_ERROR;
3740
3741 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3742 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3743
3744 uint32_t buf_size = (size > 4096) ? 4096 : size;
3745 buffer = malloc(buf_size);
3746 if (!buffer)
3747 return ERROR_FAIL;
3748
3749 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3750 if (retval != ERROR_OK) {
3751 free(buffer);
3752 return retval;
3753 }
3754
3755 duration_start(&bench);
3756
3757 while (size > 0) {
3758 size_t size_written;
3759 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3760 retval = target_read_buffer(target, address, this_run_size, buffer);
3761 if (retval != ERROR_OK)
3762 break;
3763
3764 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3765 if (retval != ERROR_OK)
3766 break;
3767
3768 size -= this_run_size;
3769 address += this_run_size;
3770 }
3771
3772 free(buffer);
3773
3774 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3775 size_t filesize;
3776 retval = fileio_size(fileio, &filesize);
3777 if (retval != ERROR_OK)
3778 return retval;
3779 command_print(CMD,
3780 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3781 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3782 }
3783
3784 retvaltemp = fileio_close(fileio);
3785 if (retvaltemp != ERROR_OK)
3786 return retvaltemp;
3787
3788 return retval;
3789 }
3790
3791 enum verify_mode {
3792 IMAGE_TEST = 0,
3793 IMAGE_VERIFY = 1,
3794 IMAGE_CHECKSUM_ONLY = 2
3795 };
3796
3797 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3798 {
3799 uint8_t *buffer;
3800 size_t buf_cnt;
3801 uint32_t image_size;
3802 int retval;
3803 uint32_t checksum = 0;
3804 uint32_t mem_checksum = 0;
3805
3806 struct image image;
3807
3808 struct target *target = get_current_target(CMD_CTX);
3809
3810 if (CMD_ARGC < 1)
3811 return ERROR_COMMAND_SYNTAX_ERROR;
3812
3813 if (!target) {
3814 LOG_ERROR("no target selected");
3815 return ERROR_FAIL;
3816 }
3817
3818 struct duration bench;
3819 duration_start(&bench);
3820
3821 if (CMD_ARGC >= 2) {
3822 target_addr_t addr;
3823 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3824 image.base_address = addr;
3825 image.base_address_set = true;
3826 } else {
3827 image.base_address_set = false;
3828 image.base_address = 0x0;
3829 }
3830
3831 image.start_address_set = false;
3832
3833 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3834 if (retval != ERROR_OK)
3835 return retval;
3836
3837 image_size = 0x0;
3838 int diffs = 0;
3839 retval = ERROR_OK;
3840 for (unsigned int i = 0; i < image.num_sections; i++) {
3841 buffer = malloc(image.sections[i].size);
3842 if (!buffer) {
3843 command_print(CMD,
3844 "error allocating buffer for section (%" PRIu32 " bytes)",
3845 image.sections[i].size);
3846 break;
3847 }
3848 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3849 if (retval != ERROR_OK) {
3850 free(buffer);
3851 break;
3852 }
3853
3854 if (verify >= IMAGE_VERIFY) {
3855 /* calculate checksum of image */
3856 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3857 if (retval != ERROR_OK) {
3858 free(buffer);
3859 break;
3860 }
3861
3862 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3863 if (retval != ERROR_OK) {
3864 free(buffer);
3865 break;
3866 }
3867 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3868 LOG_ERROR("checksum mismatch");
3869 free(buffer);
3870 retval = ERROR_FAIL;
3871 goto done;
3872 }
3873 if (checksum != mem_checksum) {
3874 /* failed crc checksum, fall back to a binary compare */
3875 uint8_t *data;
3876
3877 if (diffs == 0)
3878 LOG_ERROR("checksum mismatch - attempting binary compare");
3879
3880 data = malloc(buf_cnt);
3881
3882 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3883 if (retval == ERROR_OK) {
3884 uint32_t t;
3885 for (t = 0; t < buf_cnt; t++) {
3886 if (data[t] != buffer[t]) {
3887 command_print(CMD,
3888 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3889 diffs,
3890 (unsigned)(t + image.sections[i].base_address),
3891 data[t],
3892 buffer[t]);
3893 if (diffs++ >= 127) {
3894 command_print(CMD, "More than 128 errors, the rest are not printed.");
3895 free(data);
3896 free(buffer);
3897 goto done;
3898 }
3899 }
3900 keep_alive();
3901 }
3902 }
3903 free(data);
3904 }
3905 } else {
3906 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3907 image.sections[i].base_address,
3908 buf_cnt);
3909 }
3910
3911 free(buffer);
3912 image_size += buf_cnt;
3913 }
3914 if (diffs > 0)
3915 command_print(CMD, "No more differences found.");
3916 done:
3917 if (diffs > 0)
3918 retval = ERROR_FAIL;
3919 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3920 command_print(CMD, "verified %" PRIu32 " bytes "
3921 "in %fs (%0.3f KiB/s)", image_size,
3922 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3923 }
3924
3925 image_close(&image);
3926
3927 return retval;
3928 }
3929
3930 COMMAND_HANDLER(handle_verify_image_checksum_command)
3931 {
3932 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3933 }
3934
3935 COMMAND_HANDLER(handle_verify_image_command)
3936 {
3937 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3938 }
3939
3940 COMMAND_HANDLER(handle_test_image_command)
3941 {
3942 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3943 }
3944
3945 static int handle_bp_command_list(struct command_invocation *cmd)
3946 {
3947 struct target *target = get_current_target(cmd->ctx);
3948 struct breakpoint *breakpoint = target->breakpoints;
3949 while (breakpoint) {
3950 if (breakpoint->type == BKPT_SOFT) {
3951 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3952 breakpoint->length);
3953 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, %i, 0x%s",
3954 breakpoint->address,
3955 breakpoint->length,
3956 breakpoint->set, buf);
3957 free(buf);
3958 } else {
3959 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3960 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3961 breakpoint->asid,
3962 breakpoint->length, breakpoint->set);
3963 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3964 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3965 breakpoint->address,
3966 breakpoint->length, breakpoint->set);
3967 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3968 breakpoint->asid);
3969 } else
3970 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3971 breakpoint->address,
3972 breakpoint->length, breakpoint->set);
3973 }
3974
3975 breakpoint = breakpoint->next;
3976 }
3977 return ERROR_OK;
3978 }
3979
3980 static int handle_bp_command_set(struct command_invocation *cmd,
3981 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3982 {
3983 struct target *target = get_current_target(cmd->ctx);
3984 int retval;
3985
3986 if (asid == 0) {
3987 retval = breakpoint_add(target, addr, length, hw);
3988 /* error is always logged in breakpoint_add(), do not print it again */
3989 if (retval == ERROR_OK)
3990 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
3991
3992 } else if (addr == 0) {
3993 if (!target->type->add_context_breakpoint) {
3994 LOG_ERROR("Context breakpoint not available");
3995 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3996 }
3997 retval = context_breakpoint_add(target, asid, length, hw);
3998 /* error is always logged in context_breakpoint_add(), do not print it again */
3999 if (retval == ERROR_OK)
4000 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
4001
4002 } else {
4003 if (!target->type->add_hybrid_breakpoint) {
4004 LOG_ERROR("Hybrid breakpoint not available");
4005 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4006 }
4007 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
4008 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
4009 if (retval == ERROR_OK)
4010 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
4011 }
4012 return retval;
4013 }
4014
4015 COMMAND_HANDLER(handle_bp_command)
4016 {
4017 target_addr_t addr;
4018 uint32_t asid;
4019 uint32_t length;
4020 int hw = BKPT_SOFT;
4021
4022 switch (CMD_ARGC) {
4023 case 0:
4024 return handle_bp_command_list(CMD);
4025
4026 case 2:
4027 asid = 0;
4028 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4029 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4030 return handle_bp_command_set(CMD, addr, asid, length, hw);
4031
4032 case 3:
4033 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4034 hw = BKPT_HARD;
4035 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4036 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4037 asid = 0;
4038 return handle_bp_command_set(CMD, addr, asid, length, hw);
4039 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4040 hw = BKPT_HARD;
4041 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4042 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4043 addr = 0;
4044 return handle_bp_command_set(CMD, addr, asid, length, hw);
4045 }
4046 /* fallthrough */
4047 case 4:
4048 hw = BKPT_HARD;
4049 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4050 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4051 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4052 return handle_bp_command_set(CMD, addr, asid, length, hw);
4053
4054 default:
4055 return ERROR_COMMAND_SYNTAX_ERROR;
4056 }
4057 }
4058
4059 COMMAND_HANDLER(handle_rbp_command)
4060 {
4061 if (CMD_ARGC != 1)
4062 return ERROR_COMMAND_SYNTAX_ERROR;
4063
4064 struct target *target = get_current_target(CMD_CTX);
4065
4066 if (!strcmp(CMD_ARGV[0], "all")) {
4067 breakpoint_remove_all(target);
4068 } else {
4069 target_addr_t addr;
4070 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4071
4072 breakpoint_remove(target, addr);
4073 }
4074
4075 return ERROR_OK;
4076 }
4077
4078 COMMAND_HANDLER(handle_wp_command)
4079 {
4080 struct target *target = get_current_target(CMD_CTX);
4081
4082 if (CMD_ARGC == 0) {
4083 struct watchpoint *watchpoint = target->watchpoints;
4084
4085 while (watchpoint) {
4086 command_print(CMD, "address: " TARGET_ADDR_FMT
4087 ", len: 0x%8.8" PRIx32
4088 ", r/w/a: %i, value: 0x%8.8" PRIx32
4089 ", mask: 0x%8.8" PRIx32,
4090 watchpoint->address,
4091 watchpoint->length,
4092 (int)watchpoint->rw,
4093 watchpoint->value,
4094 watchpoint->mask);
4095 watchpoint = watchpoint->next;
4096 }
4097 return ERROR_OK;
4098 }
4099
4100 enum watchpoint_rw type = WPT_ACCESS;
4101 target_addr_t addr = 0;
4102 uint32_t length = 0;
4103 uint32_t data_value = 0x0;
4104 uint32_t data_mask = 0xffffffff;
4105
4106 switch (CMD_ARGC) {
4107 case 5:
4108 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
4109 /* fall through */
4110 case 4:
4111 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
4112 /* fall through */
4113 case 3:
4114 switch (CMD_ARGV[2][0]) {
4115 case 'r':
4116 type = WPT_READ;
4117 break;
4118 case 'w':
4119 type = WPT_WRITE;
4120 break;
4121 case 'a':
4122 type = WPT_ACCESS;
4123 break;
4124 default:
4125 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4126 return ERROR_COMMAND_SYNTAX_ERROR;
4127 }
4128 /* fall through */
4129 case 2:
4130 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4131 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4132 break;
4133
4134 default:
4135 return ERROR_COMMAND_SYNTAX_ERROR;
4136 }
4137
4138 int retval = watchpoint_add(target, addr, length, type,
4139 data_value, data_mask);
4140 if (retval != ERROR_OK)
4141 LOG_ERROR("Failure setting watchpoints");
4142
4143 return retval;
4144 }
4145
4146 COMMAND_HANDLER(handle_rwp_command)
4147 {
4148 if (CMD_ARGC != 1)
4149 return ERROR_COMMAND_SYNTAX_ERROR;
4150
4151 target_addr_t addr;
4152 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4153
4154 struct target *target = get_current_target(CMD_CTX);
4155 watchpoint_remove(target, addr);
4156
4157 return ERROR_OK;
4158 }
4159
4160 /**
4161 * Translate a virtual address to a physical address.
4162 *
4163 * The low-level target implementation must have logged a detailed error
4164 * which is forwarded to telnet/GDB session.
4165 */
4166 COMMAND_HANDLER(handle_virt2phys_command)
4167 {
4168 if (CMD_ARGC != 1)
4169 return ERROR_COMMAND_SYNTAX_ERROR;
4170
4171 target_addr_t va;
4172 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4173 target_addr_t pa;
4174
4175 struct target *target = get_current_target(CMD_CTX);
4176 int retval = target->type->virt2phys(target, va, &pa);
4177 if (retval == ERROR_OK)
4178 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4179
4180 return retval;
4181 }
4182
4183 static void write_data(FILE *f, const void *data, size_t len)
4184 {
4185 size_t written = fwrite(data, 1, len, f);
4186 if (written != len)
4187 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4188 }
4189
4190 static void write_long(FILE *f, int l, struct target *target)
4191 {
4192 uint8_t val[4];
4193
4194 target_buffer_set_u32(target, val, l);
4195 write_data(f, val, 4);
4196 }
4197
4198 static void write_string(FILE *f, char *s)
4199 {
4200 write_data(f, s, strlen(s));
4201 }
4202
4203 typedef unsigned char UNIT[2]; /* unit of profiling */
4204
4205 /* Dump a gmon.out histogram file. */
4206 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4207 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4208 {
4209 uint32_t i;
4210 FILE *f = fopen(filename, "w");
4211 if (!f)
4212 return;
4213 write_string(f, "gmon");
4214 write_long(f, 0x00000001, target); /* Version */
4215 write_long(f, 0, target); /* padding */
4216 write_long(f, 0, target); /* padding */
4217 write_long(f, 0, target); /* padding */
4218
4219 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4220 write_data(f, &zero, 1);
4221
4222 /* figure out bucket size */
4223 uint32_t min;
4224 uint32_t max;
4225 if (with_range) {
4226 min = start_address;
4227 max = end_address;
4228 } else {
4229 min = samples[0];
4230 max = samples[0];
4231 for (i = 0; i < sample_num; i++) {
4232 if (min > samples[i])
4233 min = samples[i];
4234 if (max < samples[i])
4235 max = samples[i];
4236 }
4237
4238 /* max should be (largest sample + 1)
4239 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4240 max++;
4241 }
4242
4243 int address_space = max - min;
4244 assert(address_space >= 2);
4245
4246 /* FIXME: What is the reasonable number of buckets?
4247 * The profiling result will be more accurate if there are enough buckets. */
4248 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4249 uint32_t num_buckets = address_space / sizeof(UNIT);
4250 if (num_buckets > max_buckets)
4251 num_buckets = max_buckets;
4252 int *buckets = malloc(sizeof(int) * num_buckets);
4253 if (!buckets) {
4254 fclose(f);
4255 return;
4256 }
4257 memset(buckets, 0, sizeof(int) * num_buckets);
4258 for (i = 0; i < sample_num; i++) {
4259 uint32_t address = samples[i];
4260
4261 if ((address < min) || (max <= address))
4262 continue;
4263
4264 long long a = address - min;
4265 long long b = num_buckets;
4266 long long c = address_space;
4267 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4268 buckets[index_t]++;
4269 }
4270
4271 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4272 write_long(f, min, target); /* low_pc */
4273 write_long(f, max, target); /* high_pc */
4274 write_long(f, num_buckets, target); /* # of buckets */
4275 float sample_rate = sample_num / (duration_ms / 1000.0);
4276 write_long(f, sample_rate, target);
4277 write_string(f, "seconds");
4278 for (i = 0; i < (15-strlen("seconds")); i++)
4279 write_data(f, &zero, 1);
4280 write_string(f, "s");
4281
4282 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4283
4284 char *data = malloc(2 * num_buckets);
4285 if (data) {
4286 for (i = 0; i < num_buckets; i++) {
4287 int val;
4288 val = buckets[i];
4289 if (val > 65535)
4290 val = 65535;
4291 data[i * 2] = val&0xff;
4292 data[i * 2 + 1] = (val >> 8) & 0xff;
4293 }
4294 free(buckets);
4295 write_data(f, data, num_buckets * 2);
4296 free(data);
4297 } else
4298 free(buckets);
4299
4300 fclose(f);
4301 }
4302
4303 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4304 * which will be used as a random sampling of PC */
4305 COMMAND_HANDLER(handle_profile_command)
4306 {
4307 struct target *target = get_current_target(CMD_CTX);
4308
4309 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4310 return ERROR_COMMAND_SYNTAX_ERROR;
4311
4312 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4313 uint32_t offset;
4314 uint32_t num_of_samples;
4315 int retval = ERROR_OK;
4316 bool halted_before_profiling = target->state == TARGET_HALTED;
4317
4318 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4319
4320 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4321 if (!samples) {
4322 LOG_ERROR("No memory to store samples.");
4323 return ERROR_FAIL;
4324 }
4325
4326 uint64_t timestart_ms = timeval_ms();
4327 /**
4328 * Some cores let us sample the PC without the
4329 * annoying halt/resume step; for example, ARMv7 PCSR.
4330 * Provide a way to use that more efficient mechanism.
4331 */
4332 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4333 &num_of_samples, offset);
4334 if (retval != ERROR_OK) {
4335 free(samples);
4336 return retval;
4337 }
4338 uint32_t duration_ms = timeval_ms() - timestart_ms;
4339
4340 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4341
4342 retval = target_poll(target);
4343 if (retval != ERROR_OK) {
4344 free(samples);
4345 return retval;
4346 }
4347
4348 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4349 /* The target was halted before we started and is running now. Halt it,
4350 * for consistency. */
4351 retval = target_halt(target);
4352 if (retval != ERROR_OK) {
4353 free(samples);
4354 return retval;
4355 }
4356 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4357 /* The target was running before we started and is halted now. Resume
4358 * it, for consistency. */
4359 retval = target_resume(target, 1, 0, 0, 0);
4360 if (retval != ERROR_OK) {
4361 free(samples);
4362 return retval;
4363 }
4364 }
4365
4366 retval = target_poll(target);
4367 if (retval != ERROR_OK) {
4368 free(samples);
4369 return retval;
4370 }
4371
4372 uint32_t start_address = 0;
4373 uint32_t end_address = 0;
4374 bool with_range = false;
4375 if (CMD_ARGC == 4) {
4376 with_range = true;
4377 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4378 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4379 }
4380
4381 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4382 with_range, start_address, end_address, target, duration_ms);
4383 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4384
4385 free(samples);
4386 return retval;
4387 }
4388
4389 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4390 {
4391 char *namebuf;
4392 Jim_Obj *obj_name, *obj_val;
4393 int result;
4394
4395 namebuf = alloc_printf("%s(%d)", varname, idx);
4396 if (!namebuf)
4397 return JIM_ERR;
4398
4399 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4400 jim_wide wide_val = val;
4401 obj_val = Jim_NewWideObj(interp, wide_val);
4402 if (!obj_name || !obj_val) {
4403 free(namebuf);
4404 return JIM_ERR;
4405 }
4406
4407 Jim_IncrRefCount(obj_name);
4408 Jim_IncrRefCount(obj_val);
4409 result = Jim_SetVariable(interp, obj_name, obj_val);
4410 Jim_DecrRefCount(interp, obj_name);
4411 Jim_DecrRefCount(interp, obj_val);
4412 free(namebuf);
4413 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4414 return result;
4415 }
4416
4417 static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4418 {
4419 struct command_context *context;
4420 struct target *target;
4421
4422 context = current_command_context(interp);
4423 assert(context);
4424
4425 target = get_current_target(context);
4426 if (!target) {
4427 LOG_ERROR("mem2array: no current target");
4428 return JIM_ERR;
4429 }
4430
4431 return target_mem2array(interp, target, argc - 1, argv + 1);
4432 }
4433
4434 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4435 {
4436 int e;
4437
4438 /* argv[0] = name of array to receive the data
4439 * argv[1] = desired element width in bits
4440 * argv[2] = memory address
4441 * argv[3] = count of times to read
4442 * argv[4] = optional "phys"
4443 */
4444 if (argc < 4 || argc > 5) {
4445 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4446 return JIM_ERR;
4447 }
4448
4449 /* Arg 0: Name of the array variable */
4450 const char *varname = Jim_GetString(argv[0], NULL);
4451
4452 /* Arg 1: Bit width of one element */
4453 long l;
4454 e = Jim_GetLong(interp, argv[1], &l);
4455 if (e != JIM_OK)
4456 return e;
4457 const unsigned int width_bits = l;
4458
4459 if (width_bits != 8 &&
4460 width_bits != 16 &&
4461 width_bits != 32 &&
4462 width_bits != 64) {
4463 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4464 Jim_AppendStrings(interp, Jim_GetResult(interp),
4465 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4466 return JIM_ERR;
4467 }
4468 const unsigned int width = width_bits / 8;
4469
4470 /* Arg 2: Memory address */
4471 jim_wide wide_addr;
4472 e = Jim_GetWide(interp, argv[2], &wide_addr);
4473 if (e != JIM_OK)
4474 return e;
4475 target_addr_t addr = (target_addr_t)wide_addr;
4476
4477 /* Arg 3: Number of elements to read */
4478 e = Jim_GetLong(interp, argv[3], &l);
4479 if (e != JIM_OK)
4480 return e;
4481 size_t len = l;
4482
4483 /* Arg 4: phys */
4484 bool is_phys = false;
4485 if (argc > 4) {
4486 int str_len = 0;
4487 const char *phys = Jim_GetString(argv[4], &str_len);
4488 if (!strncmp(phys, "phys", str_len))
4489 is_phys = true;
4490 else
4491 return JIM_ERR;
4492 }
4493
4494 /* Argument checks */
4495 if (len == 0) {
4496 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4497 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4498 return JIM_ERR;
4499 }
4500 if ((addr + (len * width)) < addr) {
4501 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4502 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4503 return JIM_ERR;
4504 }
4505 if (len > 65536) {
4506 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4507 Jim_AppendStrings(interp, Jim_GetResult(interp),
4508 "mem2array: too large read request, exceeds 64K items", NULL);
4509 return JIM_ERR;
4510 }
4511
4512 if ((width == 1) ||
4513 ((width == 2) && ((addr & 1) == 0)) ||
4514 ((width == 4) && ((addr & 3) == 0)) ||
4515 ((width == 8) && ((addr & 7) == 0))) {
4516 /* alignment correct */
4517 } else {
4518 char buf[100];
4519 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4520 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4521 addr,
4522 width);
4523 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4524 return JIM_ERR;
4525 }
4526
4527 /* Transfer loop */
4528
4529 /* index counter */
4530 size_t idx = 0;
4531
4532 const size_t buffersize = 4096;
4533 uint8_t *buffer = malloc(buffersize);
4534 if (!buffer)
4535 return JIM_ERR;
4536
4537 /* assume ok */
4538 e = JIM_OK;
4539 while (len) {
4540 /* Slurp... in buffer size chunks */
4541 const unsigned int max_chunk_len = buffersize / width;
4542 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4543
4544 int retval;
4545 if (is_phys)
4546 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4547 else
4548 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4549 if (retval != ERROR_OK) {
4550 /* BOO !*/
4551 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4552 addr,
4553 width,
4554 chunk_len);
4555 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4556 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4557 e = JIM_ERR;
4558 break;
4559 } else {
4560 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4561 uint64_t v = 0;
4562 switch (width) {
4563 case 8:
4564 v = target_buffer_get_u64(target, &buffer[i*width]);
4565 break;
4566 case 4:
4567 v = target_buffer_get_u32(target, &buffer[i*width]);
4568 break;
4569 case 2:
4570 v = target_buffer_get_u16(target, &buffer[i*width]);
4571 break;
4572 case 1:
4573 v = buffer[i] & 0x0ff;
4574 break;
4575 }
4576 new_u64_array_element(interp, varname, idx, v);
4577 }
4578 len -= chunk_len;
4579 addr += chunk_len * width;
4580 }
4581 }
4582
4583 free(buffer);
4584
4585 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4586
4587 return e;
4588 }
4589
4590 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4591 {
4592 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4593 if (!namebuf)
4594 return JIM_ERR;
4595
4596 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4597 if (!obj_name) {
4598 free(namebuf);
4599 return JIM_ERR;
4600 }
4601
4602 Jim_IncrRefCount(obj_name);
4603 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4604 Jim_DecrRefCount(interp, obj_name);
4605 free(namebuf);
4606 if (!obj_val)
4607 return JIM_ERR;
4608
4609 jim_wide wide_val;
4610 int result = Jim_GetWide(interp, obj_val, &wide_val);
4611 *val = wide_val;
4612 return result;
4613 }
4614
4615 static int jim_array2mem(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4616 {
4617 struct command_context *context;
4618 struct target *target;
4619
4620 context = current_command_context(interp);
4621 assert(context);
4622
4623 target = get_current_target(context);
4624 if (!target) {
4625 LOG_ERROR("array2mem: no current target");
4626 return JIM_ERR;
4627 }
4628
4629 return target_array2mem(interp, target, argc-1, argv + 1);
4630 }
4631
4632 static int target_array2mem(Jim_Interp *interp, struct target *target,
4633 int argc, Jim_Obj *const *argv)
4634 {
4635 int e;
4636
4637 /* argv[0] = name of array from which to read the data
4638 * argv[1] = desired element width in bits
4639 * argv[2] = memory address
4640 * argv[3] = number of elements to write
4641 * argv[4] = optional "phys"
4642 */
4643 if (argc < 4 || argc > 5) {
4644 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4645 return JIM_ERR;
4646 }
4647
4648 /* Arg 0: Name of the array variable */
4649 const char *varname = Jim_GetString(argv[0], NULL);
4650
4651 /* Arg 1: Bit width of one element */
4652 long l;
4653 e = Jim_GetLong(interp, argv[1], &l);
4654 if (e != JIM_OK)
4655 return e;
4656 const unsigned int width_bits = l;
4657
4658 if (width_bits != 8 &&
4659 width_bits != 16 &&
4660 width_bits != 32 &&
4661 width_bits != 64) {
4662 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4663 Jim_AppendStrings(interp, Jim_GetResult(interp),
4664 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4665 return JIM_ERR;
4666 }
4667 const unsigned int width = width_bits / 8;
4668
4669 /* Arg 2: Memory address */
4670 jim_wide wide_addr;
4671 e = Jim_GetWide(interp, argv[2], &wide_addr);
4672 if (e != JIM_OK)
4673 return e;
4674 target_addr_t addr = (target_addr_t)wide_addr;
4675
4676 /* Arg 3: Number of elements to write */
4677 e = Jim_GetLong(interp, argv[3], &l);
4678 if (e != JIM_OK)
4679 return e;
4680 size_t len = l;
4681
4682 /* Arg 4: Phys */
4683 bool is_phys = false;
4684 if (argc > 4) {
4685 int str_len = 0;
4686 const char *phys = Jim_GetString(argv[4], &str_len);
4687 if (!strncmp(phys, "phys", str_len))
4688 is_phys = true;
4689 else
4690 return JIM_ERR;
4691 }
4692
4693 /* Argument checks */
4694 if (len == 0) {
4695 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4696 Jim_AppendStrings(interp, Jim_GetResult(interp),
4697 "array2mem: zero width read?", NULL);
4698 return JIM_ERR;
4699 }
4700
4701 if ((addr + (len * width)) < addr) {
4702 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4703 Jim_AppendStrings(interp, Jim_GetResult(interp),
4704 "array2mem: addr + len - wraps to zero?", NULL);
4705 return JIM_ERR;
4706 }
4707
4708 if (len > 65536) {
4709 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4710 Jim_AppendStrings(interp, Jim_GetResult(interp),
4711 "array2mem: too large memory write request, exceeds 64K items", NULL);
4712 return JIM_ERR;
4713 }
4714
4715 if ((width == 1) ||
4716 ((width == 2) && ((addr & 1) == 0)) ||
4717 ((width == 4) && ((addr & 3) == 0)) ||
4718 ((width == 8) && ((addr & 7) == 0))) {
4719 /* alignment correct */
4720 } else {
4721 char buf[100];
4722 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4723 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4724 addr,
4725 width);
4726 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4727 return JIM_ERR;
4728 }
4729
4730 /* Transfer loop */
4731
4732 /* assume ok */
4733 e = JIM_OK;
4734
4735 const size_t buffersize = 4096;
4736 uint8_t *buffer = malloc(buffersize);
4737 if (!buffer)
4738 return JIM_ERR;
4739
4740 /* index counter */
4741 size_t idx = 0;
4742
4743 while (len) {
4744 /* Slurp... in buffer size chunks */
4745 const unsigned int max_chunk_len = buffersize / width;
4746
4747 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4748
4749 /* Fill the buffer */
4750 for (size_t i = 0; i < chunk_len; i++, idx++) {
4751 uint64_t v = 0;
4752 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4753 free(buffer);
4754 return JIM_ERR;
4755 }
4756 switch (width) {
4757 case 8:
4758 target_buffer_set_u64(target, &buffer[i * width], v);
4759 break;
4760 case 4:
4761 target_buffer_set_u32(target, &buffer[i * width], v);
4762 break;
4763 case 2:
4764 target_buffer_set_u16(target, &buffer[i * width], v);
4765 break;
4766 case 1:
4767 buffer[i] = v & 0x0ff;
4768 break;
4769 }
4770 }
4771 len -= chunk_len;
4772
4773 /* Write the buffer to memory */
4774 int retval;
4775 if (is_phys)
4776 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4777 else
4778 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4779 if (retval != ERROR_OK) {
4780 /* BOO !*/
4781 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4782 addr,
4783 width,
4784 chunk_len);
4785 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4786 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4787 e = JIM_ERR;
4788 break;
4789 }
4790 addr += chunk_len * width;
4791 }
4792
4793 free(buffer);
4794
4795 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4796
4797 return e;
4798 }
4799
4800 /* FIX? should we propagate errors here rather than printing them
4801 * and continuing?
4802 */
4803 void target_handle_event(struct target *target, enum target_event e)
4804 {
4805 struct target_event_action *teap;
4806 int retval;
4807
4808 for (teap = target->event_action; teap; teap = teap->next) {
4809 if (teap->event == e) {
4810 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
4811 target->target_number,
4812 target_name(target),
4813 target_type_name(target),
4814 e,
4815 jim_nvp_value2name_simple(nvp_target_event, e)->name,
4816 Jim_GetString(teap->body, NULL));
4817
4818 /* Override current target by the target an event
4819 * is issued from (lot of scripts need it).
4820 * Return back to previous override as soon
4821 * as the handler processing is done */
4822 struct command_context *cmd_ctx = current_command_context(teap->interp);
4823 struct target *saved_target_override = cmd_ctx->current_target_override;
4824 cmd_ctx->current_target_override = target;
4825
4826 retval = Jim_EvalObj(teap->interp, teap->body);
4827
4828 cmd_ctx->current_target_override = saved_target_override;
4829
4830 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
4831 return;
4832
4833 if (retval == JIM_RETURN)
4834 retval = teap->interp->returnCode;
4835
4836 if (retval != JIM_OK) {
4837 Jim_MakeErrorMessage(teap->interp);
4838 LOG_USER("Error executing event %s on target %s:\n%s",
4839 jim_nvp_value2name_simple(nvp_target_event, e)->name,
4840 target_name(target),
4841 Jim_GetString(Jim_GetResult(teap->interp), NULL));
4842 /* clean both error code and stacktrace before return */
4843 Jim_Eval(teap->interp, "error \"\" \"\"");
4844 }
4845 }
4846 }
4847 }
4848
4849 /**
4850 * Returns true only if the target has a handler for the specified event.
4851 */
4852 bool target_has_event_action(struct target *target, enum target_event event)
4853 {
4854 struct target_event_action *teap;
4855
4856 for (teap = target->event_action; teap; teap = teap->next) {
4857 if (teap->event == event)
4858 return true;
4859 }
4860 return false;
4861 }
4862
4863 enum target_cfg_param {
4864 TCFG_TYPE,
4865 TCFG_EVENT,
4866 TCFG_WORK_AREA_VIRT,
4867 TCFG_WORK_AREA_PHYS,
4868 TCFG_WORK_AREA_SIZE,
4869 TCFG_WORK_AREA_BACKUP,
4870 TCFG_ENDIAN,
4871 TCFG_COREID,
4872 TCFG_CHAIN_POSITION,
4873 TCFG_DBGBASE,
4874 TCFG_RTOS,
4875 TCFG_DEFER_EXAMINE,
4876 TCFG_GDB_PORT,
4877 TCFG_GDB_MAX_CONNECTIONS,
4878 };
4879
4880 static struct jim_nvp nvp_config_opts[] = {
4881 { .name = "-type", .value = TCFG_TYPE },
4882 { .name = "-event", .value = TCFG_EVENT },
4883 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
4884 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
4885 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
4886 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
4887 { .name = "-endian", .value = TCFG_ENDIAN },
4888 { .name = "-coreid", .value = TCFG_COREID },
4889 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
4890 { .name = "-dbgbase", .value = TCFG_DBGBASE },
4891 { .name = "-rtos", .value = TCFG_RTOS },
4892 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
4893 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
4894 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
4895 { .name = NULL, .value = -1 }
4896 };
4897
4898 static int target_configure(struct jim_getopt_info *goi, struct target *target)
4899 {
4900 struct jim_nvp *n;
4901 Jim_Obj *o;
4902 jim_wide w;
4903 int e;
4904
4905 /* parse config or cget options ... */
4906 while (goi->argc > 0) {
4907 Jim_SetEmptyResult(goi->interp);
4908 /* jim_getopt_debug(goi); */
4909
4910 if (target->type->target_jim_configure) {
4911 /* target defines a configure function */
4912 /* target gets first dibs on parameters */
4913 e = (*(target->type->target_jim_configure))(target, goi);
4914 if (e == JIM_OK) {
4915 /* more? */
4916 continue;
4917 }
4918 if (e == JIM_ERR) {
4919 /* An error */
4920 return e;
4921 }
4922 /* otherwise we 'continue' below */
4923 }
4924 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
4925 if (e != JIM_OK) {
4926 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
4927 return e;
4928 }
4929 switch (n->value) {
4930 case TCFG_TYPE:
4931 /* not settable */
4932 if (goi->isconfigure) {
4933 Jim_SetResultFormatted(goi->interp,
4934 "not settable: %s", n->name);
4935 return JIM_ERR;
4936 } else {
4937 no_params:
4938 if (goi->argc != 0) {
4939 Jim_WrongNumArgs(goi->interp,
4940 goi->argc, goi->argv,
4941 "NO PARAMS");
4942 return JIM_ERR;
4943 }
4944 }
4945 Jim_SetResultString(goi->interp,
4946 target_type_name(target), -1);
4947 /* loop for more */
4948 break;
4949 case TCFG_EVENT:
4950 if (goi->argc == 0) {
4951 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
4952 return JIM_ERR;
4953 }
4954
4955 e = jim_getopt_nvp(goi, nvp_target_event, &n);
4956 if (e != JIM_OK) {
4957 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
4958 return e;
4959 }
4960
4961 if (goi->isconfigure) {
4962 if (goi->argc != 1) {
4963 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
4964 return JIM_ERR;
4965 }
4966 } else {
4967 if (goi->argc != 0) {
4968 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
4969 return JIM_ERR;
4970 }
4971 }
4972
4973 {
4974 struct target_event_action *teap;
4975
4976 teap = target->event_action;
4977 /* replace existing? */
4978 while (teap) {
4979 if (teap->event == (enum target_event)n->value)
4980 break;
4981 teap = teap->next;
4982 }
4983
4984 if (goi->isconfigure) {
4985 /* START_DEPRECATED_TPIU */
4986 if (n->value == TARGET_EVENT_TRACE_CONFIG)
4987 LOG_INFO("DEPRECATED target event %s; use TPIU events {pre,post}-{enable,disable}", n->name);
4988 /* END_DEPRECATED_TPIU */
4989
4990 bool replace = true;
4991 if (!teap) {
4992 /* create new */
4993 teap = calloc(1, sizeof(*teap));
4994 replace = false;
4995 }
4996 teap->event = n->value;
4997 teap->interp = goi->interp;
4998 jim_getopt_obj(goi, &o);
4999 if (teap->body)
5000 Jim_DecrRefCount(teap->interp, teap->body);
5001 teap->body = Jim_DuplicateObj(goi->interp, o);
5002 /*
5003 * FIXME:
5004 * Tcl/TK - "tk events" have a nice feature.
5005 * See the "BIND" command.
5006 * We should support that here.
5007 * You can specify %X and %Y in the event code.
5008 * The idea is: %T - target name.
5009 * The idea is: %N - target number
5010 * The idea is: %E - event name.
5011 */
5012 Jim_IncrRefCount(teap->body);
5013
5014 if (!replace) {
5015 /* add to head of event list */
5016 teap->next = target->event_action;
5017 target->event_action = teap;
5018 }
5019 Jim_SetEmptyResult(goi->interp);
5020 } else {
5021 /* get */
5022 if (!teap)
5023 Jim_SetEmptyResult(goi->interp);
5024 else
5025 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5026 }
5027 }
5028 /* loop for more */
5029 break;
5030
5031 case TCFG_WORK_AREA_VIRT:
5032 if (goi->isconfigure) {
5033 target_free_all_working_areas(target);
5034 e = jim_getopt_wide(goi, &w);
5035 if (e != JIM_OK)
5036 return e;
5037 target->working_area_virt = w;
5038 target->working_area_virt_spec = true;
5039 } else {
5040 if (goi->argc != 0)
5041 goto no_params;
5042 }
5043 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5044 /* loop for more */
5045 break;
5046
5047 case TCFG_WORK_AREA_PHYS:
5048 if (goi->isconfigure) {
5049 target_free_all_working_areas(target);
5050 e = jim_getopt_wide(goi, &w);
5051 if (e != JIM_OK)
5052 return e;
5053 target->working_area_phys = w;
5054 target->working_area_phys_spec = true;
5055 } else {
5056 if (goi->argc != 0)
5057 goto no_params;
5058 }
5059 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5060 /* loop for more */
5061 break;
5062
5063 case TCFG_WORK_AREA_SIZE:
5064 if (goi->isconfigure) {
5065 target_free_all_working_areas(target);
5066 e = jim_getopt_wide(goi, &w);
5067 if (e != JIM_OK)
5068 return e;
5069 target->working_area_size = w;
5070 } else {
5071 if (goi->argc != 0)
5072 goto no_params;
5073 }
5074 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5075 /* loop for more */
5076 break;
5077
5078 case TCFG_WORK_AREA_BACKUP:
5079 if (goi->isconfigure) {
5080 target_free_all_working_areas(target);
5081 e = jim_getopt_wide(goi, &w);
5082 if (e != JIM_OK)
5083 return e;
5084 /* make this exactly 1 or 0 */
5085 target->backup_working_area = (!!w);
5086 } else {
5087 if (goi->argc != 0)
5088 goto no_params;
5089 }
5090 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5091 /* loop for more e*/
5092 break;
5093
5094
5095 case TCFG_ENDIAN:
5096 if (goi->isconfigure) {
5097 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5098 if (e != JIM_OK) {
5099 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5100 return e;
5101 }
5102 target->endianness = n->value;
5103 } else {
5104 if (goi->argc != 0)
5105 goto no_params;
5106 }
5107 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5108 if (!n->name) {
5109 target->endianness = TARGET_LITTLE_ENDIAN;
5110 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5111 }
5112 Jim_SetResultString(goi->interp, n->name, -1);
5113 /* loop for more */
5114 break;
5115
5116 case TCFG_COREID:
5117 if (goi->isconfigure) {
5118 e = jim_getopt_wide(goi, &w);
5119 if (e != JIM_OK)
5120 return e;
5121 target->coreid = (int32_t)w;
5122 } else {
5123 if (goi->argc != 0)
5124 goto no_params;
5125 }
5126 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5127 /* loop for more */
5128 break;
5129
5130 case TCFG_CHAIN_POSITION:
5131 if (goi->isconfigure) {
5132 Jim_Obj *o_t;
5133 struct jtag_tap *tap;
5134
5135 if (target->has_dap) {
5136 Jim_SetResultString(goi->interp,
5137 "target requires -dap parameter instead of -chain-position!", -1);
5138 return JIM_ERR;
5139 }
5140
5141 target_free_all_working_areas(target);
5142 e = jim_getopt_obj(goi, &o_t);
5143 if (e != JIM_OK)
5144 return e;
5145 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5146 if (!tap)
5147 return JIM_ERR;
5148 target->tap = tap;
5149 target->tap_configured = true;
5150 } else {
5151 if (goi->argc != 0)
5152 goto no_params;
5153 }
5154 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5155 /* loop for more e*/
5156 break;
5157 case TCFG_DBGBASE:
5158 if (goi->isconfigure) {
5159 e = jim_getopt_wide(goi, &w);
5160 if (e != JIM_OK)
5161 return e;
5162 target->dbgbase = (uint32_t)w;
5163 target->dbgbase_set = true;
5164 } else {
5165 if (goi->argc != 0)
5166 goto no_params;
5167 }
5168 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5169 /* loop for more */
5170 break;
5171 case TCFG_RTOS:
5172 /* RTOS */
5173 {
5174 int result = rtos_create(goi, target);
5175 if (result != JIM_OK)
5176 return result;
5177 }
5178 /* loop for more */
5179 break;
5180
5181 case TCFG_DEFER_EXAMINE:
5182 /* DEFER_EXAMINE */
5183 target->defer_examine = true;
5184 /* loop for more */
5185 break;
5186
5187 case TCFG_GDB_PORT:
5188 if (goi->isconfigure) {
5189 struct command_context *cmd_ctx = current_command_context(goi->interp);
5190 if (cmd_ctx->mode != COMMAND_CONFIG) {
5191 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5192 return JIM_ERR;
5193 }
5194
5195 const char *s;
5196 e = jim_getopt_string(goi, &s, NULL);
5197 if (e != JIM_OK)
5198 return e;
5199 free(target->gdb_port_override);
5200 target->gdb_port_override = strdup(s);
5201 } else {
5202 if (goi->argc != 0)
5203 goto no_params;
5204 }
5205 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5206 /* loop for more */
5207 break;
5208
5209 case TCFG_GDB_MAX_CONNECTIONS:
5210 if (goi->isconfigure) {
5211 struct command_context *cmd_ctx = current_command_context(goi->interp);
5212 if (cmd_ctx->mode != COMMAND_CONFIG) {
5213 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5214 return JIM_ERR;
5215 }
5216
5217 e = jim_getopt_wide(goi, &w);
5218 if (e != JIM_OK)
5219 return e;
5220 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5221 } else {
5222 if (goi->argc != 0)
5223 goto no_params;
5224 }
5225 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5226 break;
5227 }
5228 } /* while (goi->argc) */
5229
5230
5231 /* done - we return */
5232 return JIM_OK;
5233 }
5234
5235 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5236 {
5237 struct command *c = jim_to_command(interp);
5238 struct jim_getopt_info goi;
5239
5240 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5241 goi.isconfigure = !strcmp(c->name, "configure");
5242 if (goi.argc < 1) {
5243 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5244 "missing: -option ...");
5245 return JIM_ERR;
5246 }
5247 struct command_context *cmd_ctx = current_command_context(interp);
5248 assert(cmd_ctx);
5249 struct target *target = get_current_target(cmd_ctx);
5250 return target_configure(&goi, target);
5251 }
5252
5253 static int jim_target_mem2array(Jim_Interp *interp,
5254 int argc, Jim_Obj *const *argv)
5255 {
5256 struct command_context *cmd_ctx = current_command_context(interp);
5257 assert(cmd_ctx);
5258 struct target *target = get_current_target(cmd_ctx);
5259 return target_mem2array(interp, target, argc - 1, argv + 1);
5260 }
5261
5262 static int jim_target_array2mem(Jim_Interp *interp,
5263 int argc, Jim_Obj *const *argv)
5264 {
5265 struct command_context *cmd_ctx = current_command_context(interp);
5266 assert(cmd_ctx);
5267 struct target *target = get_current_target(cmd_ctx);
5268 return target_array2mem(interp, target, argc - 1, argv + 1);
5269 }
5270
5271 static int jim_target_tap_disabled(Jim_Interp *interp)
5272 {
5273 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5274 return JIM_ERR;
5275 }
5276
5277 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5278 {
5279 bool allow_defer = false;
5280
5281 struct jim_getopt_info goi;
5282 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5283 if (goi.argc > 1) {
5284 const char *cmd_name = Jim_GetString(argv[0], NULL);
5285 Jim_SetResultFormatted(goi.interp,
5286 "usage: %s ['allow-defer']", cmd_name);
5287 return JIM_ERR;
5288 }
5289 if (goi.argc > 0 &&
5290 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5291 /* consume it */
5292 Jim_Obj *obj;
5293 int e = jim_getopt_obj(&goi, &obj);
5294 if (e != JIM_OK)
5295 return e;
5296 allow_defer = true;
5297 }
5298
5299 struct command_context *cmd_ctx = current_command_context(interp);
5300 assert(cmd_ctx);
5301 struct target *target = get_current_target(cmd_ctx);
5302 if (!target->tap->enabled)
5303 return jim_target_tap_disabled(interp);
5304
5305 if (allow_defer && target->defer_examine) {
5306 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5307 LOG_INFO("Use arp_examine command to examine it manually!");
5308 return JIM_OK;
5309 }
5310
5311 int e = target->type->examine(target);
5312 if (e != ERROR_OK) {
5313 target_reset_examined(target);
5314 return JIM_ERR;
5315 }
5316
5317 target_set_examined(target);
5318
5319 return JIM_OK;
5320 }
5321
5322 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5323 {
5324 struct command_context *cmd_ctx = current_command_context(interp);
5325 assert(cmd_ctx);
5326 struct target *target = get_current_target(cmd_ctx);
5327
5328 Jim_SetResultBool(interp, target_was_examined(target));
5329 return JIM_OK;
5330 }
5331
5332 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5333 {
5334 struct command_context *cmd_ctx = current_command_context(interp);
5335 assert(cmd_ctx);
5336 struct target *target = get_current_target(cmd_ctx);
5337
5338 Jim_SetResultBool(interp, target->defer_examine);
5339 return JIM_OK;
5340 }
5341
5342 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5343 {
5344 if (argc != 1) {
5345 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5346 return JIM_ERR;
5347 }
5348 struct command_context *cmd_ctx = current_command_context(interp);
5349 assert(cmd_ctx);
5350 struct target *target = get_current_target(cmd_ctx);
5351
5352 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5353 return JIM_ERR;
5354
5355 return JIM_OK;
5356 }
5357
5358 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5359 {
5360 if (argc != 1) {
5361 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5362 return JIM_ERR;
5363 }
5364 struct command_context *cmd_ctx = current_command_context(interp);
5365 assert(cmd_ctx);
5366 struct target *target = get_current_target(cmd_ctx);
5367 if (!target->tap->enabled)
5368 return jim_target_tap_disabled(interp);
5369
5370 int e;
5371 if (!(target_was_examined(target)))
5372 e = ERROR_TARGET_NOT_EXAMINED;
5373 else
5374 e = target->type->poll(target);
5375 if (e != ERROR_OK)
5376 return JIM_ERR;
5377 return JIM_OK;
5378 }
5379
5380 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5381 {
5382 struct jim_getopt_info goi;
5383 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5384
5385 if (goi.argc != 2) {
5386 Jim_WrongNumArgs(interp, 0, argv,
5387 "([tT]|[fF]|assert|deassert) BOOL");
5388 return JIM_ERR;
5389 }
5390
5391 struct jim_nvp *n;
5392 int e = jim_getopt_nvp(&goi, nvp_assert, &n);
5393 if (e != JIM_OK) {
5394 jim_getopt_nvp_unknown(&goi, nvp_assert, 1);
5395 return e;
5396 }
5397 /* the halt or not param */
5398 jim_wide a;
5399 e = jim_getopt_wide(&goi, &a);
5400 if (e != JIM_OK)
5401 return e;
5402
5403 struct command_context *cmd_ctx = current_command_context(interp);
5404 assert(cmd_ctx);
5405 struct target *target = get_current_target(cmd_ctx);
5406 if (!target->tap->enabled)
5407 return jim_target_tap_disabled(interp);
5408
5409 if (!target->type->assert_reset || !target->type->deassert_reset) {
5410 Jim_SetResultFormatted(interp,
5411 "No target-specific reset for %s",
5412 target_name(target));
5413 return JIM_ERR;
5414 }
5415
5416 if (target->defer_examine)
5417 target_reset_examined(target);
5418
5419 /* determine if we should halt or not. */
5420 target->reset_halt = (a != 0);
5421 /* When this happens - all workareas are invalid. */
5422 target_free_all_working_areas_restore(target, 0);
5423
5424 /* do the assert */
5425 if (n->value == NVP_ASSERT)
5426 e = target->type->assert_reset(target);
5427 else
5428 e = target->type->deassert_reset(target);
5429 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5430 }
5431
5432 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5433 {
5434 if (argc != 1) {
5435 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5436 return JIM_ERR;
5437 }
5438 struct command_context *cmd_ctx = current_command_context(interp);
5439 assert(cmd_ctx);
5440 struct target *target = get_current_target(cmd_ctx);
5441 if (!target->tap->enabled)
5442 return jim_target_tap_disabled(interp);
5443 int e = target->type->halt(target);
5444 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5445 }
5446
5447 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5448 {
5449 struct jim_getopt_info goi;
5450 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5451
5452 /* params: <name> statename timeoutmsecs */
5453 if (goi.argc != 2) {
5454 const char *cmd_name = Jim_GetString(argv[0], NULL);
5455 Jim_SetResultFormatted(goi.interp,
5456 "%s <state_name> <timeout_in_msec>", cmd_name);
5457 return JIM_ERR;
5458 }
5459
5460 struct jim_nvp *n;
5461 int e = jim_getopt_nvp(&goi, nvp_target_state, &n);
5462 if (e != JIM_OK) {
5463 jim_getopt_nvp_unknown(&goi, nvp_target_state, 1);
5464 return e;
5465 }
5466 jim_wide a;
5467 e = jim_getopt_wide(&goi, &a);
5468 if (e != JIM_OK)
5469 return e;
5470 struct command_context *cmd_ctx = current_command_context(interp);
5471 assert(cmd_ctx);
5472 struct target *target = get_current_target(cmd_ctx);
5473 if (!target->tap->enabled)
5474 return jim_target_tap_disabled(interp);
5475
5476 e = target_wait_state(target, n->value, a);
5477 if (e != ERROR_OK) {
5478 Jim_Obj *obj = Jim_NewIntObj(interp, e);
5479 Jim_SetResultFormatted(goi.interp,
5480 "target: %s wait %s fails (%#s) %s",
5481 target_name(target), n->name,
5482 obj, target_strerror_safe(e));
5483 return JIM_ERR;
5484 }
5485 return JIM_OK;
5486 }
5487 /* List for human, Events defined for this target.
5488 * scripts/programs should use 'name cget -event NAME'
5489 */
5490 COMMAND_HANDLER(handle_target_event_list)
5491 {
5492 struct target *target = get_current_target(CMD_CTX);
5493 struct target_event_action *teap = target->event_action;
5494
5495 command_print(CMD, "Event actions for target (%d) %s\n",
5496 target->target_number,
5497 target_name(target));
5498 command_print(CMD, "%-25s | Body", "Event");
5499 command_print(CMD, "------------------------- | "
5500 "----------------------------------------");
5501 while (teap) {
5502 struct jim_nvp *opt = jim_nvp_value2name_simple(nvp_target_event, teap->event);
5503 command_print(CMD, "%-25s | %s",
5504 opt->name, Jim_GetString(teap->body, NULL));
5505 teap = teap->next;
5506 }
5507 command_print(CMD, "***END***");
5508 return ERROR_OK;
5509 }
5510 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5511 {
5512 if (argc != 1) {
5513 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5514 return JIM_ERR;
5515 }
5516 struct command_context *cmd_ctx = current_command_context(interp);
5517 assert(cmd_ctx);
5518 struct target *target = get_current_target(cmd_ctx);
5519 Jim_SetResultString(interp, target_state_name(target), -1);
5520 return JIM_OK;
5521 }
5522 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5523 {
5524 struct jim_getopt_info goi;
5525 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5526 if (goi.argc != 1) {
5527 const char *cmd_name = Jim_GetString(argv[0], NULL);
5528 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5529 return JIM_ERR;
5530 }
5531 struct jim_nvp *n;
5532 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5533 if (e != JIM_OK) {
5534 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5535 return e;
5536 }
5537 struct command_context *cmd_ctx = current_command_context(interp);
5538 assert(cmd_ctx);
5539 struct target *target = get_current_target(cmd_ctx);
5540 target_handle_event(target, n->value);
5541 return JIM_OK;
5542 }
5543
5544 static const struct command_registration target_instance_command_handlers[] = {
5545 {
5546 .name = "configure",
5547 .mode = COMMAND_ANY,
5548 .jim_handler = jim_target_configure,
5549 .help = "configure a new target for use",
5550 .usage = "[target_attribute ...]",
5551 },
5552 {
5553 .name = "cget",
5554 .mode = COMMAND_ANY,
5555 .jim_handler = jim_target_configure,
5556 .help = "returns the specified target attribute",
5557 .usage = "target_attribute",
5558 },
5559 {
5560 .name = "mwd",
5561 .handler = handle_mw_command,
5562 .mode = COMMAND_EXEC,
5563 .help = "Write 64-bit word(s) to target memory",
5564 .usage = "address data [count]",
5565 },
5566 {
5567 .name = "mww",
5568 .handler = handle_mw_command,
5569 .mode = COMMAND_EXEC,
5570 .help = "Write 32-bit word(s) to target memory",
5571 .usage = "address data [count]",
5572 },
5573 {
5574 .name = "mwh",
5575 .handler = handle_mw_command,
5576 .mode = COMMAND_EXEC,
5577 .help = "Write 16-bit half-word(s) to target memory",
5578 .usage = "address data [count]",
5579 },
5580 {
5581 .name = "mwb",
5582 .handler = handle_mw_command,
5583 .mode = COMMAND_EXEC,
5584 .help = "Write byte(s) to target memory",
5585 .usage = "address data [count]",
5586 },
5587 {
5588 .name = "mdd",
5589 .handler = handle_md_command,
5590 .mode = COMMAND_EXEC,
5591 .help = "Display target memory as 64-bit words",
5592 .usage = "address [count]",
5593 },
5594 {
5595 .name = "mdw",
5596 .handler = handle_md_command,
5597 .mode = COMMAND_EXEC,
5598 .help = "Display target memory as 32-bit words",
5599 .usage = "address [count]",
5600 },
5601 {
5602 .name = "mdh",
5603 .handler = handle_md_command,
5604 .mode = COMMAND_EXEC,
5605 .help = "Display target memory as 16-bit half-words",
5606 .usage = "address [count]",
5607 },
5608 {
5609 .name = "mdb",
5610 .handler = handle_md_command,
5611 .mode = COMMAND_EXEC,
5612 .help = "Display target memory as 8-bit bytes",
5613 .usage = "address [count]",
5614 },
5615 {
5616 .name = "array2mem",
5617 .mode = COMMAND_EXEC,
5618 .jim_handler = jim_target_array2mem,
5619 .help = "Writes Tcl array of 8/16/32 bit numbers "
5620 "to target memory",
5621 .usage = "arrayname bitwidth address count",
5622 },
5623 {
5624 .name = "mem2array",
5625 .mode = COMMAND_EXEC,
5626 .jim_handler = jim_target_mem2array,
5627 .help = "Loads Tcl array of 8/16/32 bit numbers "
5628 "from target memory",
5629 .usage = "arrayname bitwidth address count",
5630 },
5631 {
5632 .name = "eventlist",
5633 .handler = handle_target_event_list,
5634 .mode = COMMAND_EXEC,
5635 .help = "displays a table of events defined for this target",
5636 .usage = "",
5637 },
5638 {
5639 .name = "curstate",
5640 .mode = COMMAND_EXEC,
5641 .jim_handler = jim_target_current_state,
5642 .help = "displays the current state of this target",
5643 },
5644 {
5645 .name = "arp_examine",
5646 .mode = COMMAND_EXEC,
5647 .jim_handler = jim_target_examine,
5648 .help = "used internally for reset processing",
5649 .usage = "['allow-defer']",
5650 },
5651 {
5652 .name = "was_examined",
5653 .mode = COMMAND_EXEC,
5654 .jim_handler = jim_target_was_examined,
5655 .help = "used internally for reset processing",
5656 },
5657 {
5658 .name = "examine_deferred",
5659 .mode = COMMAND_EXEC,
5660 .jim_handler = jim_target_examine_deferred,
5661 .help = "used internally for reset processing",
5662 },
5663 {
5664 .name = "arp_halt_gdb",
5665 .mode = COMMAND_EXEC,
5666 .jim_handler = jim_target_halt_gdb,
5667 .help = "used internally for reset processing to halt GDB",
5668 },
5669 {
5670 .name = "arp_poll",
5671 .mode = COMMAND_EXEC,
5672 .jim_handler = jim_target_poll,
5673 .help = "used internally for reset processing",
5674 },
5675 {
5676 .name = "arp_reset",
5677 .mode = COMMAND_EXEC,
5678 .jim_handler = jim_target_reset,
5679 .help = "used internally for reset processing",
5680 },
5681 {
5682 .name = "arp_halt",
5683 .mode = COMMAND_EXEC,
5684 .jim_handler = jim_target_halt,
5685 .help = "used internally for reset processing",
5686 },
5687 {
5688 .name = "arp_waitstate",
5689 .mode = COMMAND_EXEC,
5690 .jim_handler = jim_target_wait_state,
5691 .help = "used internally for reset processing",
5692 },
5693 {
5694 .name = "invoke-event",
5695 .mode = COMMAND_EXEC,
5696 .jim_handler = jim_target_invoke_event,
5697 .help = "invoke handler for specified event",
5698 .usage = "event_name",
5699 },
5700 COMMAND_REGISTRATION_DONE
5701 };
5702
5703 static int target_create(struct jim_getopt_info *goi)
5704 {
5705 Jim_Obj *new_cmd;
5706 Jim_Cmd *cmd;
5707 const char *cp;
5708 int e;
5709 int x;
5710 struct target *target;
5711 struct command_context *cmd_ctx;
5712
5713 cmd_ctx = current_command_context(goi->interp);
5714 assert(cmd_ctx);
5715
5716 if (goi->argc < 3) {
5717 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
5718 return JIM_ERR;
5719 }
5720
5721 /* COMMAND */
5722 jim_getopt_obj(goi, &new_cmd);
5723 /* does this command exist? */
5724 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_NONE);
5725 if (cmd) {
5726 cp = Jim_GetString(new_cmd, NULL);
5727 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
5728 return JIM_ERR;
5729 }
5730
5731 /* TYPE */
5732 e = jim_getopt_string(goi, &cp, NULL);
5733 if (e != JIM_OK)
5734 return e;
5735 struct transport *tr = get_current_transport();
5736 if (tr->override_target) {
5737 e = tr->override_target(&cp);
5738 if (e != ERROR_OK) {
5739 LOG_ERROR("The selected transport doesn't support this target");
5740 return JIM_ERR;
5741 }
5742 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
5743 }
5744 /* now does target type exist */
5745 for (x = 0 ; target_types[x] ; x++) {
5746 if (strcmp(cp, target_types[x]->name) == 0) {
5747 /* found */
5748 break;
5749 }
5750 }
5751 if (!target_types[x]) {
5752 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
5753 for (x = 0 ; target_types[x] ; x++) {
5754 if (target_types[x + 1]) {
5755 Jim_AppendStrings(goi->interp,
5756 Jim_GetResult(goi->interp),
5757 target_types[x]->name,
5758 ", ", NULL);
5759 } else {
5760 Jim_AppendStrings(goi->interp,
5761 Jim_GetResult(goi->interp),
5762 " or ",
5763 target_types[x]->name, NULL);
5764 }
5765 }
5766 return JIM_ERR;
5767 }
5768
5769 /* Create it */
5770 target = calloc(1, sizeof(struct target));
5771 if (!target) {
5772 LOG_ERROR("Out of memory");
5773 return JIM_ERR;
5774 }
5775
5776 /* set target number */
5777 target->target_number = new_target_number();
5778
5779 /* allocate memory for each unique target type */
5780 target->type = malloc(sizeof(struct target_type));
5781 if (!target->type) {
5782 LOG_ERROR("Out of memory");
5783 free(target);
5784 return JIM_ERR;
5785 }
5786
5787 memcpy(target->type, target_types[x], sizeof(struct target_type));
5788
5789 /* default to first core, override with -coreid */
5790 target->coreid = 0;
5791
5792 target->working_area = 0x0;
5793 target->working_area_size = 0x0;
5794 target->working_areas = NULL;
5795 target->backup_working_area = 0;
5796
5797 target->state = TARGET_UNKNOWN;
5798 target->debug_reason = DBG_REASON_UNDEFINED;
5799 target->reg_cache = NULL;
5800 target->breakpoints = NULL;
5801 target->watchpoints = NULL;
5802 target->next = NULL;
5803 target->arch_info = NULL;
5804
5805 target->verbose_halt_msg = true;
5806
5807 target->halt_issued = false;
5808
5809 /* initialize trace information */
5810 target->trace_info = calloc(1, sizeof(struct trace));
5811 if (!target->trace_info) {
5812 LOG_ERROR("Out of memory");
5813 free(target->type);
5814 free(target);
5815 return JIM_ERR;
5816 }
5817
5818 target->dbgmsg = NULL;
5819 target->dbg_msg_enabled = 0;
5820
5821 target->endianness = TARGET_ENDIAN_UNKNOWN;
5822
5823 target->rtos = NULL;
5824 target->rtos_auto_detect = false;
5825
5826 target->gdb_port_override = NULL;
5827 target->gdb_max_connections = 1;
5828
5829 /* Do the rest as "configure" options */
5830 goi->isconfigure = 1;
5831 e = target_configure(goi, target);
5832
5833 if (e == JIM_OK) {
5834 if (target->has_dap) {
5835 if (!target->dap_configured) {
5836 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
5837 e = JIM_ERR;
5838 }
5839 } else {
5840 if (!target->tap_configured) {
5841 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
5842 e = JIM_ERR;
5843 }
5844 }
5845 /* tap must be set after target was configured */
5846 if (!target->tap)
5847 e = JIM_ERR;
5848 }
5849
5850 if (e != JIM_OK) {
5851 rtos_destroy(target);
5852 free(target->gdb_port_override);
5853 free(target->trace_info);
5854 free(target->type);
5855 free(target);
5856 return e;
5857 }
5858
5859 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
5860 /* default endian to little if not specified */
5861 target->endianness = TARGET_LITTLE_ENDIAN;
5862 }
5863
5864 cp = Jim_GetString(new_cmd, NULL);
5865 target->cmd_name = strdup(cp);
5866 if (!target->cmd_name) {
5867 LOG_ERROR("Out of memory");
5868 rtos_destroy(target);
5869 free(target->gdb_port_override);
5870 free(target->trace_info);
5871 free(target->type);
5872 free(target);
5873 return JIM_ERR;
5874 }
5875
5876 if (target->type->target_create) {
5877 e = (*(target->type->target_create))(target, goi->interp);
5878 if (e != ERROR_OK) {
5879 LOG_DEBUG("target_create failed");
5880 free(target->cmd_name);
5881 rtos_destroy(target);
5882 free(target->gdb_port_override);
5883 free(target->trace_info);
5884 free(target->type);
5885 free(target);
5886 return JIM_ERR;
5887 }
5888 }
5889
5890 /* create the target specific commands */
5891 if (target->type->commands) {
5892 e = register_commands(cmd_ctx, NULL, target->type->commands);
5893 if (e != ERROR_OK)
5894 LOG_ERROR("unable to register '%s' commands", cp);
5895 }
5896
5897 /* now - create the new target name command */
5898 const struct command_registration target_subcommands[] = {
5899 {
5900 .chain = target_instance_command_handlers,
5901 },
5902 {
5903 .chain = target->type->commands,
5904 },
5905 COMMAND_REGISTRATION_DONE
5906 };
5907 const struct command_registration target_commands[] = {
5908 {
5909 .name = cp,
5910 .mode = COMMAND_ANY,
5911 .help = "target command group",
5912 .usage = "",
5913 .chain = target_subcommands,
5914 },
5915 COMMAND_REGISTRATION_DONE
5916 };
5917 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
5918 if (e != ERROR_OK) {
5919 if (target->type->deinit_target)
5920 target->type->deinit_target(target);
5921 free(target->cmd_name);
5922 rtos_destroy(target);
5923 free(target->gdb_port_override);
5924 free(target->trace_info);
5925 free(target->type);
5926 free(target);
5927 return JIM_ERR;
5928 }
5929
5930 /* append to end of list */
5931 append_to_list_all_targets(target);
5932
5933 cmd_ctx->current_target = target;
5934 return JIM_OK;
5935 }
5936
5937 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5938 {
5939 if (argc != 1) {
5940 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5941 return JIM_ERR;
5942 }
5943 struct command_context *cmd_ctx = current_command_context(interp);
5944 assert(cmd_ctx);
5945
5946 struct target *target = get_current_target_or_null(cmd_ctx);
5947 if (target)
5948 Jim_SetResultString(interp, target_name(target), -1);
5949 return JIM_OK;
5950 }
5951
5952 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5953 {
5954 if (argc != 1) {
5955 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5956 return JIM_ERR;
5957 }
5958 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5959 for (unsigned x = 0; target_types[x]; x++) {
5960 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5961 Jim_NewStringObj(interp, target_types[x]->name, -1));
5962 }
5963 return JIM_OK;
5964 }
5965
5966 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5967 {
5968 if (argc != 1) {
5969 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5970 return JIM_ERR;
5971 }
5972 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5973 struct target *target = all_targets;
5974 while (target) {
5975 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5976 Jim_NewStringObj(interp, target_name(target), -1));
5977 target = target->next;
5978 }
5979 return JIM_OK;
5980 }
5981
5982 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5983 {
5984 int i;
5985 const char *targetname;
5986 int retval, len;
5987 struct target *target = NULL;
5988 struct target_list *head, *curr, *new;
5989 curr = NULL;
5990 head = NULL;
5991
5992 retval = 0;
5993 LOG_DEBUG("%d", argc);
5994 /* argv[1] = target to associate in smp
5995 * argv[2] = target to associate in smp
5996 * argv[3] ...
5997 */
5998
5999 for (i = 1; i < argc; i++) {
6000
6001 targetname = Jim_GetString(argv[i], &len);
6002 target = get_target(targetname);
6003 LOG_DEBUG("%s ", targetname);
6004 if (target) {
6005 new = malloc(sizeof(struct target_list));
6006 new->target = target;
6007 new->next = NULL;
6008 if (!head) {
6009 head = new;
6010 curr = head;
6011 } else {
6012 curr->next = new;
6013 curr = new;
6014 }
6015 }
6016 }
6017 /* now parse the list of cpu and put the target in smp mode*/
6018 curr = head;
6019
6020 while (curr) {
6021 target = curr->target;
6022 target->smp = 1;
6023 target->head = head;
6024 curr = curr->next;
6025 }
6026
6027 if (target && target->rtos)
6028 retval = rtos_smp_init(head->target);
6029
6030 return retval;
6031 }
6032
6033
6034 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6035 {
6036 struct jim_getopt_info goi;
6037 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6038 if (goi.argc < 3) {
6039 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6040 "<name> <target_type> [<target_options> ...]");
6041 return JIM_ERR;
6042 }
6043 return target_create(&goi);
6044 }
6045
6046 static const struct command_registration target_subcommand_handlers[] = {
6047 {
6048 .name = "init",
6049 .mode = COMMAND_CONFIG,
6050 .handler = handle_target_init_command,
6051 .help = "initialize targets",
6052 .usage = "",
6053 },
6054 {
6055 .name = "create",
6056 .mode = COMMAND_CONFIG,
6057 .jim_handler = jim_target_create,
6058 .usage = "name type '-chain-position' name [options ...]",
6059 .help = "Creates and selects a new target",
6060 },
6061 {
6062 .name = "current",
6063 .mode = COMMAND_ANY,
6064 .jim_handler = jim_target_current,
6065 .help = "Returns the currently selected target",
6066 },
6067 {
6068 .name = "types",
6069 .mode = COMMAND_ANY,
6070 .jim_handler = jim_target_types,
6071 .help = "Returns the available target types as "
6072 "a list of strings",
6073 },
6074 {
6075 .name = "names",
6076 .mode = COMMAND_ANY,
6077 .jim_handler = jim_target_names,
6078 .help = "Returns the names of all targets as a list of strings",
6079 },
6080 {
6081 .name = "smp",
6082 .mode = COMMAND_ANY,
6083 .jim_handler = jim_target_smp,
6084 .usage = "targetname1 targetname2 ...",
6085 .help = "gather several target in a smp list"
6086 },
6087
6088 COMMAND_REGISTRATION_DONE
6089 };
6090
6091 struct fast_load {
6092 target_addr_t address;
6093 uint8_t *data;
6094 int length;
6095
6096 };
6097
6098 static int fastload_num;
6099 static struct fast_load *fastload;
6100
6101 static void free_fastload(void)
6102 {
6103 if (fastload) {
6104 for (int i = 0; i < fastload_num; i++)
6105 free(fastload[i].data);
6106 free(fastload);
6107 fastload = NULL;
6108 }
6109 }
6110
6111 COMMAND_HANDLER(handle_fast_load_image_command)
6112 {
6113 uint8_t *buffer;
6114 size_t buf_cnt;
6115 uint32_t image_size;
6116 target_addr_t min_address = 0;
6117 target_addr_t max_address = -1;
6118
6119 struct image image;
6120
6121 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6122 &image, &min_address, &max_address);
6123 if (retval != ERROR_OK)
6124 return retval;
6125
6126 struct duration bench;
6127 duration_start(&bench);
6128
6129 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6130 if (retval != ERROR_OK)
6131 return retval;
6132
6133 image_size = 0x0;
6134 retval = ERROR_OK;
6135 fastload_num = image.num_sections;
6136 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6137 if (!fastload) {
6138 command_print(CMD, "out of memory");
6139 image_close(&image);
6140 return ERROR_FAIL;
6141 }
6142 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6143 for (unsigned int i = 0; i < image.num_sections; i++) {
6144 buffer = malloc(image.sections[i].size);
6145 if (!buffer) {
6146 command_print(CMD, "error allocating buffer for section (%d bytes)",
6147 (int)(image.sections[i].size));
6148 retval = ERROR_FAIL;
6149 break;
6150 }
6151
6152 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6153 if (retval != ERROR_OK) {
6154 free(buffer);
6155 break;
6156 }
6157
6158 uint32_t offset = 0;
6159 uint32_t length = buf_cnt;
6160
6161 /* DANGER!!! beware of unsigned comparison here!!! */
6162
6163 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6164 (image.sections[i].base_address < max_address)) {
6165 if (image.sections[i].base_address < min_address) {
6166 /* clip addresses below */
6167 offset += min_address-image.sections[i].base_address;
6168 length -= offset;
6169 }
6170
6171 if (image.sections[i].base_address + buf_cnt > max_address)
6172 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6173
6174 fastload[i].address = image.sections[i].base_address + offset;
6175 fastload[i].data = malloc(length);
6176 if (!fastload[i].data) {
6177 free(buffer);
6178 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6179 length);
6180 retval = ERROR_FAIL;
6181 break;
6182 }
6183 memcpy(fastload[i].data, buffer + offset, length);
6184 fastload[i].length = length;
6185
6186 image_size += length;
6187 command_print(CMD, "%u bytes written at address 0x%8.8x",
6188 (unsigned int)length,
6189 ((unsigned int)(image.sections[i].base_address + offset)));
6190 }
6191
6192 free(buffer);
6193 }
6194
6195 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6196 command_print(CMD, "Loaded %" PRIu32 " bytes "
6197 "in %fs (%0.3f KiB/s)", image_size,
6198 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6199
6200 command_print(CMD,
6201 "WARNING: image has not been loaded to target!"
6202 "You can issue a 'fast_load' to finish loading.");
6203 }
6204
6205 image_close(&image);
6206
6207 if (retval != ERROR_OK)
6208 free_fastload();
6209
6210 return retval;
6211 }
6212
6213 COMMAND_HANDLER(handle_fast_load_command)
6214 {
6215 if (CMD_ARGC > 0)
6216 return ERROR_COMMAND_SYNTAX_ERROR;
6217 if (!fastload) {
6218 LOG_ERROR("No image in memory");
6219 return ERROR_FAIL;
6220 }
6221 int i;
6222 int64_t ms = timeval_ms();
6223 int size = 0;
6224 int retval = ERROR_OK;
6225 for (i = 0; i < fastload_num; i++) {
6226 struct target *target = get_current_target(CMD_CTX);
6227 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6228 (unsigned int)(fastload[i].address),
6229 (unsigned int)(fastload[i].length));
6230 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6231 if (retval != ERROR_OK)
6232 break;
6233 size += fastload[i].length;
6234 }
6235 if (retval == ERROR_OK) {
6236 int64_t after = timeval_ms();
6237 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6238 }
6239 return retval;
6240 }
6241
6242 static const struct command_registration target_command_handlers[] = {
6243 {
6244 .name = "targets",
6245 .handler = handle_targets_command,
6246 .mode = COMMAND_ANY,
6247 .help = "change current default target (one parameter) "
6248 "or prints table of all targets (no parameters)",
6249 .usage = "[target]",
6250 },
6251 {
6252 .name = "target",
6253 .mode = COMMAND_CONFIG,
6254 .help = "configure target",
6255 .chain = target_subcommand_handlers,
6256 .usage = "",
6257 },
6258 COMMAND_REGISTRATION_DONE
6259 };
6260
6261 int target_register_commands(struct command_context *cmd_ctx)
6262 {
6263 return register_commands(cmd_ctx, NULL, target_command_handlers);
6264 }
6265
6266 static bool target_reset_nag = true;
6267
6268 bool get_target_reset_nag(void)
6269 {
6270 return target_reset_nag;
6271 }
6272
6273 COMMAND_HANDLER(handle_target_reset_nag)
6274 {
6275 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6276 &target_reset_nag, "Nag after each reset about options to improve "
6277 "performance");
6278 }
6279
6280 COMMAND_HANDLER(handle_ps_command)
6281 {
6282 struct target *target = get_current_target(CMD_CTX);
6283 char *display;
6284 if (target->state != TARGET_HALTED) {
6285 LOG_INFO("target not halted !!");
6286 return ERROR_OK;
6287 }
6288
6289 if ((target->rtos) && (target->rtos->type)
6290 && (target->rtos->type->ps_command)) {
6291 display = target->rtos->type->ps_command(target);
6292 command_print(CMD, "%s", display);
6293 free(display);
6294 return ERROR_OK;
6295 } else {
6296 LOG_INFO("failed");
6297 return ERROR_TARGET_FAILURE;
6298 }
6299 }
6300
6301 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6302 {
6303 if (text)
6304 command_print_sameline(cmd, "%s", text);
6305 for (int i = 0; i < size; i++)
6306 command_print_sameline(cmd, " %02x", buf[i]);
6307 command_print(cmd, " ");
6308 }
6309
6310 COMMAND_HANDLER(handle_test_mem_access_command)
6311 {
6312 struct target *target = get_current_target(CMD_CTX);
6313 uint32_t test_size;
6314 int retval = ERROR_OK;
6315
6316 if (target->state != TARGET_HALTED) {
6317 LOG_INFO("target not halted !!");
6318 return ERROR_FAIL;
6319 }
6320
6321 if (CMD_ARGC != 1)
6322 return ERROR_COMMAND_SYNTAX_ERROR;
6323
6324 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6325
6326 /* Test reads */
6327 size_t num_bytes = test_size + 4;
6328
6329 struct working_area *wa = NULL;
6330 retval = target_alloc_working_area(target, num_bytes, &wa);
6331 if (retval != ERROR_OK) {
6332 LOG_ERROR("Not enough working area");
6333 return ERROR_FAIL;
6334 }
6335
6336 uint8_t *test_pattern = malloc(num_bytes);
6337
6338 for (size_t i = 0; i < num_bytes; i++)
6339 test_pattern[i] = rand();
6340
6341 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6342 if (retval != ERROR_OK) {
6343 LOG_ERROR("Test pattern write failed");
6344 goto out;
6345 }
6346
6347 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6348 for (int size = 1; size <= 4; size *= 2) {
6349 for (int offset = 0; offset < 4; offset++) {
6350 uint32_t count = test_size / size;
6351 size_t host_bufsiz = (count + 2) * size + host_offset;
6352 uint8_t *read_ref = malloc(host_bufsiz);
6353 uint8_t *read_buf = malloc(host_bufsiz);
6354
6355 for (size_t i = 0; i < host_bufsiz; i++) {
6356 read_ref[i] = rand();
6357 read_buf[i] = read_ref[i];
6358 }
6359 command_print_sameline(CMD,
6360 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6361 size, offset, host_offset ? "un" : "");
6362
6363 struct duration bench;
6364 duration_start(&bench);
6365
6366 retval = target_read_memory(target, wa->address + offset, size, count,
6367 read_buf + size + host_offset);
6368
6369 duration_measure(&bench);
6370
6371 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6372 command_print(CMD, "Unsupported alignment");
6373 goto next;
6374 } else if (retval != ERROR_OK) {
6375 command_print(CMD, "Memory read failed");
6376 goto next;
6377 }
6378
6379 /* replay on host */
6380 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6381
6382 /* check result */
6383 int result = memcmp(read_ref, read_buf, host_bufsiz);
6384 if (result == 0) {
6385 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6386 duration_elapsed(&bench),
6387 duration_kbps(&bench, count * size));
6388 } else {
6389 command_print(CMD, "Compare failed");
6390 binprint(CMD, "ref:", read_ref, host_bufsiz);
6391 binprint(CMD, "buf:", read_buf, host_bufsiz);
6392 }
6393 next:
6394 free(read_ref);
6395 free(read_buf);
6396 }
6397 }
6398 }
6399
6400 out:
6401 free(test_pattern);
6402
6403 target_free_working_area(target, wa);
6404
6405 /* Test writes */
6406 num_bytes = test_size + 4 + 4 + 4;
6407
6408 retval = target_alloc_working_area(target, num_bytes, &wa);
6409 if (retval != ERROR_OK) {
6410 LOG_ERROR("Not enough working area");
6411 return ERROR_FAIL;
6412 }
6413
6414 test_pattern = malloc(num_bytes);
6415
6416 for (size_t i = 0; i < num_bytes; i++)
6417 test_pattern[i] = rand();
6418
6419 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6420 for (int size = 1; size <= 4; size *= 2) {
6421 for (int offset = 0; offset < 4; offset++) {
6422 uint32_t count = test_size / size;
6423 size_t host_bufsiz = count * size + host_offset;
6424 uint8_t *read_ref = malloc(num_bytes);
6425 uint8_t *read_buf = malloc(num_bytes);
6426 uint8_t *write_buf = malloc(host_bufsiz);
6427
6428 for (size_t i = 0; i < host_bufsiz; i++)
6429 write_buf[i] = rand();
6430 command_print_sameline(CMD,
6431 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6432 size, offset, host_offset ? "un" : "");
6433
6434 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6435 if (retval != ERROR_OK) {
6436 command_print(CMD, "Test pattern write failed");
6437 goto nextw;
6438 }
6439
6440 /* replay on host */
6441 memcpy(read_ref, test_pattern, num_bytes);
6442 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6443
6444 struct duration bench;
6445 duration_start(&bench);
6446
6447 retval = target_write_memory(target, wa->address + size + offset, size, count,
6448 write_buf + host_offset);
6449
6450 duration_measure(&bench);
6451
6452 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6453 command_print(CMD, "Unsupported alignment");
6454 goto nextw;
6455 } else if (retval != ERROR_OK) {
6456 command_print(CMD, "Memory write failed");
6457 goto nextw;
6458 }
6459
6460 /* read back */
6461 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6462 if (retval != ERROR_OK) {
6463 command_print(CMD, "Test pattern write failed");
6464 goto nextw;
6465 }
6466
6467 /* check result */
6468 int result = memcmp(read_ref, read_buf, num_bytes);
6469 if (result == 0) {
6470 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6471 duration_elapsed(&bench),
6472 duration_kbps(&bench, count * size));
6473 } else {
6474 command_print(CMD, "Compare failed");
6475 binprint(CMD, "ref:", read_ref, num_bytes);
6476 binprint(CMD, "buf:", read_buf, num_bytes);
6477 }
6478 nextw:
6479 free(read_ref);
6480 free(read_buf);
6481 }
6482 }
6483 }
6484
6485 free(test_pattern);
6486
6487 target_free_working_area(target, wa);
6488 return retval;
6489 }
6490
6491 static const struct command_registration target_exec_command_handlers[] = {
6492 {
6493 .name = "fast_load_image",
6494 .handler = handle_fast_load_image_command,
6495 .mode = COMMAND_ANY,
6496 .help = "Load image into server memory for later use by "
6497 "fast_load; primarily for profiling",
6498 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6499 "[min_address [max_length]]",
6500 },
6501 {
6502 .name = "fast_load",
6503 .handler = handle_fast_load_command,
6504 .mode = COMMAND_EXEC,
6505 .help = "loads active fast load image to current target "
6506 "- mainly for profiling purposes",
6507 .usage = "",
6508 },
6509 {
6510 .name = "profile",
6511 .handler = handle_profile_command,
6512 .mode = COMMAND_EXEC,
6513 .usage = "seconds filename [start end]",
6514 .help = "profiling samples the CPU PC",
6515 },
6516 /** @todo don't register virt2phys() unless target supports it */
6517 {
6518 .name = "virt2phys",
6519 .handler = handle_virt2phys_command,
6520 .mode = COMMAND_ANY,
6521 .help = "translate a virtual address into a physical address",
6522 .usage = "virtual_address",
6523 },
6524 {
6525 .name = "reg",
6526 .handler = handle_reg_command,
6527 .mode = COMMAND_EXEC,
6528 .help = "display (reread from target with \"force\") or set a register; "
6529 "with no arguments, displays all registers and their values",
6530 .usage = "[(register_number|register_name) [(value|'force')]]",
6531 },
6532 {
6533 .name = "poll",
6534 .handler = handle_poll_command,
6535 .mode = COMMAND_EXEC,
6536 .help = "poll target state; or reconfigure background polling",
6537 .usage = "['on'|'off']",
6538 },
6539 {
6540 .name = "wait_halt",
6541 .handler = handle_wait_halt_command,
6542 .mode = COMMAND_EXEC,
6543 .help = "wait up to the specified number of milliseconds "
6544 "(default 5000) for a previously requested halt",
6545 .usage = "[milliseconds]",
6546 },
6547 {
6548 .name = "halt",
6549 .handler = handle_halt_command,
6550 .mode = COMMAND_EXEC,
6551 .help = "request target to halt, then wait up to the specified "
6552 "number of milliseconds (default 5000) for it to complete",
6553 .usage = "[milliseconds]",
6554 },
6555 {
6556 .name = "resume",
6557 .handler = handle_resume_command,
6558 .mode = COMMAND_EXEC,
6559 .help = "resume target execution from current PC or address",
6560 .usage = "[address]",
6561 },
6562 {
6563 .name = "reset",
6564 .handler = handle_reset_command,
6565 .mode = COMMAND_EXEC,
6566 .usage = "[run|halt|init]",
6567 .help = "Reset all targets into the specified mode. "
6568 "Default reset mode is run, if not given.",
6569 },
6570 {
6571 .name = "soft_reset_halt",
6572 .handler = handle_soft_reset_halt_command,
6573 .mode = COMMAND_EXEC,
6574 .usage = "",
6575 .help = "halt the target and do a soft reset",
6576 },
6577 {
6578 .name = "step",
6579 .handler = handle_step_command,
6580 .mode = COMMAND_EXEC,
6581 .help = "step one instruction from current PC or address",
6582 .usage = "[address]",
6583 },
6584 {
6585 .name = "mdd",
6586 .handler = handle_md_command,
6587 .mode = COMMAND_EXEC,
6588 .help = "display memory double-words",
6589 .usage = "['phys'] address [count]",
6590 },
6591 {
6592 .name = "mdw",
6593 .handler = handle_md_command,
6594 .mode = COMMAND_EXEC,
6595 .help = "display memory words",
6596 .usage = "['phys'] address [count]",
6597 },
6598 {
6599 .name = "mdh",
6600 .handler = handle_md_command,
6601 .mode = COMMAND_EXEC,
6602 .help = "display memory half-words",
6603 .usage = "['phys'] address [count]",
6604 },
6605 {
6606 .name = "mdb",
6607 .handler = handle_md_command,
6608 .mode = COMMAND_EXEC,
6609 .help = "display memory bytes",
6610 .usage = "['phys'] address [count]",
6611 },
6612 {
6613 .name = "mwd",
6614 .handler = handle_mw_command,
6615 .mode = COMMAND_EXEC,
6616 .help = "write memory double-word",
6617 .usage = "['phys'] address value [count]",
6618 },
6619 {
6620 .name = "mww",
6621 .handler = handle_mw_command,
6622 .mode = COMMAND_EXEC,
6623 .help = "write memory word",
6624 .usage = "['phys'] address value [count]",
6625 },
6626 {
6627 .name = "mwh",
6628 .handler = handle_mw_command,
6629 .mode = COMMAND_EXEC,
6630 .help = "write memory half-word",
6631 .usage = "['phys'] address value [count]",
6632 },
6633 {
6634 .name = "mwb",
6635 .handler = handle_mw_command,
6636 .mode = COMMAND_EXEC,
6637 .help = "write memory byte",
6638 .usage = "['phys'] address value [count]",
6639 },
6640 {
6641 .name = "bp",
6642 .handler = handle_bp_command,
6643 .mode = COMMAND_EXEC,
6644 .help = "list or set hardware or software breakpoint",
6645 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
6646 },
6647 {
6648 .name = "rbp",
6649 .handler = handle_rbp_command,
6650 .mode = COMMAND_EXEC,
6651 .help = "remove breakpoint",
6652 .usage = "'all' | address",
6653 },
6654 {
6655 .name = "wp",
6656 .handler = handle_wp_command,
6657 .mode = COMMAND_EXEC,
6658 .help = "list (no params) or create watchpoints",
6659 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
6660 },
6661 {
6662 .name = "rwp",
6663 .handler = handle_rwp_command,
6664 .mode = COMMAND_EXEC,
6665 .help = "remove watchpoint",
6666 .usage = "address",
6667 },
6668 {
6669 .name = "load_image",
6670 .handler = handle_load_image_command,
6671 .mode = COMMAND_EXEC,
6672 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6673 "[min_address] [max_length]",
6674 },
6675 {
6676 .name = "dump_image",
6677 .handler = handle_dump_image_command,
6678 .mode = COMMAND_EXEC,
6679 .usage = "filename address size",
6680 },
6681 {
6682 .name = "verify_image_checksum",
6683 .handler = handle_verify_image_checksum_command,
6684 .mode = COMMAND_EXEC,
6685 .usage = "filename [offset [type]]",
6686 },
6687 {
6688 .name = "verify_image",
6689 .handler = handle_verify_image_command,
6690 .mode = COMMAND_EXEC,
6691 .usage = "filename [offset [type]]",
6692 },
6693 {
6694 .name = "test_image",
6695 .handler = handle_test_image_command,
6696 .mode = COMMAND_EXEC,
6697 .usage = "filename [offset [type]]",
6698 },
6699 {
6700 .name = "mem2array",
6701 .mode = COMMAND_EXEC,
6702 .jim_handler = jim_mem2array,
6703 .help = "read 8/16/32 bit memory and return as a TCL array "
6704 "for script processing",
6705 .usage = "arrayname bitwidth address count",
6706 },
6707 {
6708 .name = "array2mem",
6709 .mode = COMMAND_EXEC,
6710 .jim_handler = jim_array2mem,
6711 .help = "convert a TCL array to memory locations "
6712 "and write the 8/16/32 bit values",
6713 .usage = "arrayname bitwidth address count",
6714 },
6715 {
6716 .name = "reset_nag",
6717 .handler = handle_target_reset_nag,
6718 .mode = COMMAND_ANY,
6719 .help = "Nag after each reset about options that could have been "
6720 "enabled to improve performance.",
6721 .usage = "['enable'|'disable']",
6722 },
6723 {
6724 .name = "ps",
6725 .handler = handle_ps_command,
6726 .mode = COMMAND_EXEC,
6727 .help = "list all tasks",
6728 .usage = "",
6729 },
6730 {
6731 .name = "test_mem_access",
6732 .handler = handle_test_mem_access_command,
6733 .mode = COMMAND_EXEC,
6734 .help = "Test the target's memory access functions",
6735 .usage = "size",
6736 },
6737
6738 COMMAND_REGISTRATION_DONE
6739 };
6740 static int target_register_user_commands(struct command_context *cmd_ctx)
6741 {
6742 int retval = ERROR_OK;
6743 retval = target_request_register_commands(cmd_ctx);
6744 if (retval != ERROR_OK)
6745 return retval;
6746
6747 retval = trace_register_commands(cmd_ctx);
6748 if (retval != ERROR_OK)
6749 return retval;
6750
6751
6752 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
6753 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)