Support for Freescale LS102x SAP
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/time_support.h>
45 #include <jtag/jtag.h>
46 #include <flash/nor/core.h>
47
48 #include "target.h"
49 #include "target_type.h"
50 #include "target_request.h"
51 #include "breakpoints.h"
52 #include "register.h"
53 #include "trace.h"
54 #include "image.h"
55 #include "rtos/rtos.h"
56 #include "transport/transport.h"
57
58 /* default halt wait timeout (ms) */
59 #define DEFAULT_HALT_TIMEOUT 5000
60
61 static int target_read_buffer_default(struct target *target, uint32_t address,
62 uint32_t count, uint8_t *buffer);
63 static int target_write_buffer_default(struct target *target, uint32_t address,
64 uint32_t count, const uint8_t *buffer);
65 static int target_array2mem(Jim_Interp *interp, struct target *target,
66 int argc, Jim_Obj * const *argv);
67 static int target_mem2array(Jim_Interp *interp, struct target *target,
68 int argc, Jim_Obj * const *argv);
69 static int target_register_user_commands(struct command_context *cmd_ctx);
70 static int target_get_gdb_fileio_info_default(struct target *target,
71 struct gdb_fileio_info *fileio_info);
72 static int target_gdb_fileio_end_default(struct target *target, int retcode,
73 int fileio_errno, bool ctrl_c);
74 static int target_profiling_default(struct target *target, uint32_t *samples,
75 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds);
76
77 /* targets */
78 extern struct target_type arm7tdmi_target;
79 extern struct target_type arm720t_target;
80 extern struct target_type arm9tdmi_target;
81 extern struct target_type arm920t_target;
82 extern struct target_type arm966e_target;
83 extern struct target_type arm946e_target;
84 extern struct target_type arm926ejs_target;
85 extern struct target_type fa526_target;
86 extern struct target_type feroceon_target;
87 extern struct target_type dragonite_target;
88 extern struct target_type xscale_target;
89 extern struct target_type cortexm_target;
90 extern struct target_type cortexa_target;
91 extern struct target_type cortexr4_target;
92 extern struct target_type arm11_target;
93 extern struct target_type ls1_sap_target;
94 extern struct target_type mips_m4k_target;
95 extern struct target_type avr_target;
96 extern struct target_type dsp563xx_target;
97 extern struct target_type dsp5680xx_target;
98 extern struct target_type testee_target;
99 extern struct target_type avr32_ap7k_target;
100 extern struct target_type hla_target;
101 extern struct target_type nds32_v2_target;
102 extern struct target_type nds32_v3_target;
103 extern struct target_type nds32_v3m_target;
104 extern struct target_type or1k_target;
105 extern struct target_type quark_x10xx_target;
106 extern struct target_type quark_d20xx_target;
107
108 static struct target_type *target_types[] = {
109 &arm7tdmi_target,
110 &arm9tdmi_target,
111 &arm920t_target,
112 &arm720t_target,
113 &arm966e_target,
114 &arm946e_target,
115 &arm926ejs_target,
116 &fa526_target,
117 &feroceon_target,
118 &dragonite_target,
119 &xscale_target,
120 &cortexm_target,
121 &cortexa_target,
122 &cortexr4_target,
123 &arm11_target,
124 &ls1_sap_target,
125 &mips_m4k_target,
126 &avr_target,
127 &dsp563xx_target,
128 &dsp5680xx_target,
129 &testee_target,
130 &avr32_ap7k_target,
131 &hla_target,
132 &nds32_v2_target,
133 &nds32_v3_target,
134 &nds32_v3m_target,
135 &or1k_target,
136 &quark_x10xx_target,
137 &quark_d20xx_target,
138 NULL,
139 };
140
141 struct target *all_targets;
142 static struct target_event_callback *target_event_callbacks;
143 static struct target_timer_callback *target_timer_callbacks;
144 LIST_HEAD(target_reset_callback_list);
145 LIST_HEAD(target_trace_callback_list);
146 static const int polling_interval = 100;
147
148 static const Jim_Nvp nvp_assert[] = {
149 { .name = "assert", NVP_ASSERT },
150 { .name = "deassert", NVP_DEASSERT },
151 { .name = "T", NVP_ASSERT },
152 { .name = "F", NVP_DEASSERT },
153 { .name = "t", NVP_ASSERT },
154 { .name = "f", NVP_DEASSERT },
155 { .name = NULL, .value = -1 }
156 };
157
158 static const Jim_Nvp nvp_error_target[] = {
159 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
160 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
161 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
162 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
163 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
164 { .value = ERROR_TARGET_UNALIGNED_ACCESS , .name = "err-unaligned-access" },
165 { .value = ERROR_TARGET_DATA_ABORT , .name = "err-data-abort" },
166 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE , .name = "err-resource-not-available" },
167 { .value = ERROR_TARGET_TRANSLATION_FAULT , .name = "err-translation-fault" },
168 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
169 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
170 { .value = -1, .name = NULL }
171 };
172
173 static const char *target_strerror_safe(int err)
174 {
175 const Jim_Nvp *n;
176
177 n = Jim_Nvp_value2name_simple(nvp_error_target, err);
178 if (n->name == NULL)
179 return "unknown";
180 else
181 return n->name;
182 }
183
184 static const Jim_Nvp nvp_target_event[] = {
185
186 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
187 { .value = TARGET_EVENT_HALTED, .name = "halted" },
188 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
189 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
190 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
191
192 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
193 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
194
195 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
196 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
197 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
198 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
199 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
200 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
201 { .value = TARGET_EVENT_RESET_HALT_PRE, .name = "reset-halt-pre" },
202 { .value = TARGET_EVENT_RESET_HALT_POST, .name = "reset-halt-post" },
203 { .value = TARGET_EVENT_RESET_WAIT_PRE, .name = "reset-wait-pre" },
204 { .value = TARGET_EVENT_RESET_WAIT_POST, .name = "reset-wait-post" },
205 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
206 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
207
208 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
209 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
210
211 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
212 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
213
214 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
215 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
216
217 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
218 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END , .name = "gdb-flash-write-end" },
219
220 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
221 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END , .name = "gdb-flash-erase-end" },
222
223 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
224
225 { .name = NULL, .value = -1 }
226 };
227
228 static const Jim_Nvp nvp_target_state[] = {
229 { .name = "unknown", .value = TARGET_UNKNOWN },
230 { .name = "running", .value = TARGET_RUNNING },
231 { .name = "halted", .value = TARGET_HALTED },
232 { .name = "reset", .value = TARGET_RESET },
233 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
234 { .name = NULL, .value = -1 },
235 };
236
237 static const Jim_Nvp nvp_target_debug_reason[] = {
238 { .name = "debug-request" , .value = DBG_REASON_DBGRQ },
239 { .name = "breakpoint" , .value = DBG_REASON_BREAKPOINT },
240 { .name = "watchpoint" , .value = DBG_REASON_WATCHPOINT },
241 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
242 { .name = "single-step" , .value = DBG_REASON_SINGLESTEP },
243 { .name = "target-not-halted" , .value = DBG_REASON_NOTHALTED },
244 { .name = "program-exit" , .value = DBG_REASON_EXIT },
245 { .name = "undefined" , .value = DBG_REASON_UNDEFINED },
246 { .name = NULL, .value = -1 },
247 };
248
249 static const Jim_Nvp nvp_target_endian[] = {
250 { .name = "big", .value = TARGET_BIG_ENDIAN },
251 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
252 { .name = "be", .value = TARGET_BIG_ENDIAN },
253 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
254 { .name = NULL, .value = -1 },
255 };
256
257 static const Jim_Nvp nvp_reset_modes[] = {
258 { .name = "unknown", .value = RESET_UNKNOWN },
259 { .name = "run" , .value = RESET_RUN },
260 { .name = "halt" , .value = RESET_HALT },
261 { .name = "init" , .value = RESET_INIT },
262 { .name = NULL , .value = -1 },
263 };
264
265 const char *debug_reason_name(struct target *t)
266 {
267 const char *cp;
268
269 cp = Jim_Nvp_value2name_simple(nvp_target_debug_reason,
270 t->debug_reason)->name;
271 if (!cp) {
272 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
273 cp = "(*BUG*unknown*BUG*)";
274 }
275 return cp;
276 }
277
278 const char *target_state_name(struct target *t)
279 {
280 const char *cp;
281 cp = Jim_Nvp_value2name_simple(nvp_target_state, t->state)->name;
282 if (!cp) {
283 LOG_ERROR("Invalid target state: %d", (int)(t->state));
284 cp = "(*BUG*unknown*BUG*)";
285 }
286 return cp;
287 }
288
289 const char *target_event_name(enum target_event event)
290 {
291 const char *cp;
292 cp = Jim_Nvp_value2name_simple(nvp_target_event, event)->name;
293 if (!cp) {
294 LOG_ERROR("Invalid target event: %d", (int)(event));
295 cp = "(*BUG*unknown*BUG*)";
296 }
297 return cp;
298 }
299
300 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
301 {
302 const char *cp;
303 cp = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
304 if (!cp) {
305 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
306 cp = "(*BUG*unknown*BUG*)";
307 }
308 return cp;
309 }
310
311 /* determine the number of the new target */
312 static int new_target_number(void)
313 {
314 struct target *t;
315 int x;
316
317 /* number is 0 based */
318 x = -1;
319 t = all_targets;
320 while (t) {
321 if (x < t->target_number)
322 x = t->target_number;
323 t = t->next;
324 }
325 return x + 1;
326 }
327
328 /* read a uint64_t from a buffer in target memory endianness */
329 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
330 {
331 if (target->endianness == TARGET_LITTLE_ENDIAN)
332 return le_to_h_u64(buffer);
333 else
334 return be_to_h_u64(buffer);
335 }
336
337 /* read a uint32_t from a buffer in target memory endianness */
338 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
339 {
340 if (target->endianness == TARGET_LITTLE_ENDIAN)
341 return le_to_h_u32(buffer);
342 else
343 return be_to_h_u32(buffer);
344 }
345
346 /* read a uint24_t from a buffer in target memory endianness */
347 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
348 {
349 if (target->endianness == TARGET_LITTLE_ENDIAN)
350 return le_to_h_u24(buffer);
351 else
352 return be_to_h_u24(buffer);
353 }
354
355 /* read a uint16_t from a buffer in target memory endianness */
356 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
357 {
358 if (target->endianness == TARGET_LITTLE_ENDIAN)
359 return le_to_h_u16(buffer);
360 else
361 return be_to_h_u16(buffer);
362 }
363
364 /* read a uint8_t from a buffer in target memory endianness */
365 static uint8_t target_buffer_get_u8(struct target *target, const uint8_t *buffer)
366 {
367 return *buffer & 0x0ff;
368 }
369
370 /* write a uint64_t to a buffer in target memory endianness */
371 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
372 {
373 if (target->endianness == TARGET_LITTLE_ENDIAN)
374 h_u64_to_le(buffer, value);
375 else
376 h_u64_to_be(buffer, value);
377 }
378
379 /* write a uint32_t to a buffer in target memory endianness */
380 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
381 {
382 if (target->endianness == TARGET_LITTLE_ENDIAN)
383 h_u32_to_le(buffer, value);
384 else
385 h_u32_to_be(buffer, value);
386 }
387
388 /* write a uint24_t to a buffer in target memory endianness */
389 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
390 {
391 if (target->endianness == TARGET_LITTLE_ENDIAN)
392 h_u24_to_le(buffer, value);
393 else
394 h_u24_to_be(buffer, value);
395 }
396
397 /* write a uint16_t to a buffer in target memory endianness */
398 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
399 {
400 if (target->endianness == TARGET_LITTLE_ENDIAN)
401 h_u16_to_le(buffer, value);
402 else
403 h_u16_to_be(buffer, value);
404 }
405
406 /* write a uint8_t to a buffer in target memory endianness */
407 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
408 {
409 *buffer = value;
410 }
411
412 /* write a uint64_t array to a buffer in target memory endianness */
413 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
414 {
415 uint32_t i;
416 for (i = 0; i < count; i++)
417 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
418 }
419
420 /* write a uint32_t array to a buffer in target memory endianness */
421 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
422 {
423 uint32_t i;
424 for (i = 0; i < count; i++)
425 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
426 }
427
428 /* write a uint16_t array to a buffer in target memory endianness */
429 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
430 {
431 uint32_t i;
432 for (i = 0; i < count; i++)
433 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
434 }
435
436 /* write a uint64_t array to a buffer in target memory endianness */
437 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
438 {
439 uint32_t i;
440 for (i = 0; i < count; i++)
441 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
442 }
443
444 /* write a uint32_t array to a buffer in target memory endianness */
445 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
446 {
447 uint32_t i;
448 for (i = 0; i < count; i++)
449 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
450 }
451
452 /* write a uint16_t array to a buffer in target memory endianness */
453 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
454 {
455 uint32_t i;
456 for (i = 0; i < count; i++)
457 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
458 }
459
460 /* return a pointer to a configured target; id is name or number */
461 struct target *get_target(const char *id)
462 {
463 struct target *target;
464
465 /* try as tcltarget name */
466 for (target = all_targets; target; target = target->next) {
467 if (target_name(target) == NULL)
468 continue;
469 if (strcmp(id, target_name(target)) == 0)
470 return target;
471 }
472
473 /* It's OK to remove this fallback sometime after August 2010 or so */
474
475 /* no match, try as number */
476 unsigned num;
477 if (parse_uint(id, &num) != ERROR_OK)
478 return NULL;
479
480 for (target = all_targets; target; target = target->next) {
481 if (target->target_number == (int)num) {
482 LOG_WARNING("use '%s' as target identifier, not '%u'",
483 target_name(target), num);
484 return target;
485 }
486 }
487
488 return NULL;
489 }
490
491 /* returns a pointer to the n-th configured target */
492 struct target *get_target_by_num(int num)
493 {
494 struct target *target = all_targets;
495
496 while (target) {
497 if (target->target_number == num)
498 return target;
499 target = target->next;
500 }
501
502 return NULL;
503 }
504
505 struct target *get_current_target(struct command_context *cmd_ctx)
506 {
507 struct target *target = get_target_by_num(cmd_ctx->current_target);
508
509 if (target == NULL) {
510 LOG_ERROR("BUG: current_target out of bounds");
511 exit(-1);
512 }
513
514 return target;
515 }
516
517 int target_poll(struct target *target)
518 {
519 int retval;
520
521 /* We can't poll until after examine */
522 if (!target_was_examined(target)) {
523 /* Fail silently lest we pollute the log */
524 return ERROR_FAIL;
525 }
526
527 retval = target->type->poll(target);
528 if (retval != ERROR_OK)
529 return retval;
530
531 if (target->halt_issued) {
532 if (target->state == TARGET_HALTED)
533 target->halt_issued = false;
534 else {
535 long long t = timeval_ms() - target->halt_issued_time;
536 if (t > DEFAULT_HALT_TIMEOUT) {
537 target->halt_issued = false;
538 LOG_INFO("Halt timed out, wake up GDB.");
539 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
540 }
541 }
542 }
543
544 return ERROR_OK;
545 }
546
547 int target_halt(struct target *target)
548 {
549 int retval;
550 /* We can't poll until after examine */
551 if (!target_was_examined(target)) {
552 LOG_ERROR("Target not examined yet");
553 return ERROR_FAIL;
554 }
555
556 retval = target->type->halt(target);
557 if (retval != ERROR_OK)
558 return retval;
559
560 target->halt_issued = true;
561 target->halt_issued_time = timeval_ms();
562
563 return ERROR_OK;
564 }
565
566 /**
567 * Make the target (re)start executing using its saved execution
568 * context (possibly with some modifications).
569 *
570 * @param target Which target should start executing.
571 * @param current True to use the target's saved program counter instead
572 * of the address parameter
573 * @param address Optionally used as the program counter.
574 * @param handle_breakpoints True iff breakpoints at the resumption PC
575 * should be skipped. (For example, maybe execution was stopped by
576 * such a breakpoint, in which case it would be counterprodutive to
577 * let it re-trigger.
578 * @param debug_execution False if all working areas allocated by OpenOCD
579 * should be released and/or restored to their original contents.
580 * (This would for example be true to run some downloaded "helper"
581 * algorithm code, which resides in one such working buffer and uses
582 * another for data storage.)
583 *
584 * @todo Resolve the ambiguity about what the "debug_execution" flag
585 * signifies. For example, Target implementations don't agree on how
586 * it relates to invalidation of the register cache, or to whether
587 * breakpoints and watchpoints should be enabled. (It would seem wrong
588 * to enable breakpoints when running downloaded "helper" algorithms
589 * (debug_execution true), since the breakpoints would be set to match
590 * target firmware being debugged, not the helper algorithm.... and
591 * enabling them could cause such helpers to malfunction (for example,
592 * by overwriting data with a breakpoint instruction. On the other
593 * hand the infrastructure for running such helpers might use this
594 * procedure but rely on hardware breakpoint to detect termination.)
595 */
596 int target_resume(struct target *target, int current, uint32_t address, int handle_breakpoints, int debug_execution)
597 {
598 int retval;
599
600 /* We can't poll until after examine */
601 if (!target_was_examined(target)) {
602 LOG_ERROR("Target not examined yet");
603 return ERROR_FAIL;
604 }
605
606 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
607
608 /* note that resume *must* be asynchronous. The CPU can halt before
609 * we poll. The CPU can even halt at the current PC as a result of
610 * a software breakpoint being inserted by (a bug?) the application.
611 */
612 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
613 if (retval != ERROR_OK)
614 return retval;
615
616 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
617
618 return retval;
619 }
620
621 static int target_process_reset(struct command_context *cmd_ctx, enum target_reset_mode reset_mode)
622 {
623 char buf[100];
624 int retval;
625 Jim_Nvp *n;
626 n = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode);
627 if (n->name == NULL) {
628 LOG_ERROR("invalid reset mode");
629 return ERROR_FAIL;
630 }
631
632 struct target *target;
633 for (target = all_targets; target; target = target->next)
634 target_call_reset_callbacks(target, reset_mode);
635
636 /* disable polling during reset to make reset event scripts
637 * more predictable, i.e. dr/irscan & pathmove in events will
638 * not have JTAG operations injected into the middle of a sequence.
639 */
640 bool save_poll = jtag_poll_get_enabled();
641
642 jtag_poll_set_enabled(false);
643
644 sprintf(buf, "ocd_process_reset %s", n->name);
645 retval = Jim_Eval(cmd_ctx->interp, buf);
646
647 jtag_poll_set_enabled(save_poll);
648
649 if (retval != JIM_OK) {
650 Jim_MakeErrorMessage(cmd_ctx->interp);
651 command_print(NULL, "%s\n", Jim_GetString(Jim_GetResult(cmd_ctx->interp), NULL));
652 return ERROR_FAIL;
653 }
654
655 /* We want any events to be processed before the prompt */
656 retval = target_call_timer_callbacks_now();
657
658 for (target = all_targets; target; target = target->next) {
659 target->type->check_reset(target);
660 target->running_alg = false;
661 }
662
663 return retval;
664 }
665
666 static int identity_virt2phys(struct target *target,
667 uint32_t virtual, uint32_t *physical)
668 {
669 *physical = virtual;
670 return ERROR_OK;
671 }
672
673 static int no_mmu(struct target *target, int *enabled)
674 {
675 *enabled = 0;
676 return ERROR_OK;
677 }
678
679 static int default_examine(struct target *target)
680 {
681 target_set_examined(target);
682 return ERROR_OK;
683 }
684
685 /* no check by default */
686 static int default_check_reset(struct target *target)
687 {
688 return ERROR_OK;
689 }
690
691 int target_examine_one(struct target *target)
692 {
693 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
694
695 int retval = target->type->examine(target);
696 if (retval != ERROR_OK)
697 return retval;
698
699 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
700
701 return ERROR_OK;
702 }
703
704 static int jtag_enable_callback(enum jtag_event event, void *priv)
705 {
706 struct target *target = priv;
707
708 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
709 return ERROR_OK;
710
711 jtag_unregister_event_callback(jtag_enable_callback, target);
712
713 return target_examine_one(target);
714 }
715
716 /* Targets that correctly implement init + examine, i.e.
717 * no communication with target during init:
718 *
719 * XScale
720 */
721 int target_examine(void)
722 {
723 int retval = ERROR_OK;
724 struct target *target;
725
726 for (target = all_targets; target; target = target->next) {
727 /* defer examination, but don't skip it */
728 if (!target->tap->enabled) {
729 jtag_register_event_callback(jtag_enable_callback,
730 target);
731 continue;
732 }
733
734 retval = target_examine_one(target);
735 if (retval != ERROR_OK)
736 return retval;
737 }
738 return retval;
739 }
740
741 const char *target_type_name(struct target *target)
742 {
743 return target->type->name;
744 }
745
746 static int target_soft_reset_halt(struct target *target)
747 {
748 if (!target_was_examined(target)) {
749 LOG_ERROR("Target not examined yet");
750 return ERROR_FAIL;
751 }
752 if (!target->type->soft_reset_halt) {
753 LOG_ERROR("Target %s does not support soft_reset_halt",
754 target_name(target));
755 return ERROR_FAIL;
756 }
757 return target->type->soft_reset_halt(target);
758 }
759
760 /**
761 * Downloads a target-specific native code algorithm to the target,
762 * and executes it. * Note that some targets may need to set up, enable,
763 * and tear down a breakpoint (hard or * soft) to detect algorithm
764 * termination, while others may support lower overhead schemes where
765 * soft breakpoints embedded in the algorithm automatically terminate the
766 * algorithm.
767 *
768 * @param target used to run the algorithm
769 * @param arch_info target-specific description of the algorithm.
770 */
771 int target_run_algorithm(struct target *target,
772 int num_mem_params, struct mem_param *mem_params,
773 int num_reg_params, struct reg_param *reg_param,
774 uint32_t entry_point, uint32_t exit_point,
775 int timeout_ms, void *arch_info)
776 {
777 int retval = ERROR_FAIL;
778
779 if (!target_was_examined(target)) {
780 LOG_ERROR("Target not examined yet");
781 goto done;
782 }
783 if (!target->type->run_algorithm) {
784 LOG_ERROR("Target type '%s' does not support %s",
785 target_type_name(target), __func__);
786 goto done;
787 }
788
789 target->running_alg = true;
790 retval = target->type->run_algorithm(target,
791 num_mem_params, mem_params,
792 num_reg_params, reg_param,
793 entry_point, exit_point, timeout_ms, arch_info);
794 target->running_alg = false;
795
796 done:
797 return retval;
798 }
799
800 /**
801 * Downloads a target-specific native code algorithm to the target,
802 * executes and leaves it running.
803 *
804 * @param target used to run the algorithm
805 * @param arch_info target-specific description of the algorithm.
806 */
807 int target_start_algorithm(struct target *target,
808 int num_mem_params, struct mem_param *mem_params,
809 int num_reg_params, struct reg_param *reg_params,
810 uint32_t entry_point, uint32_t exit_point,
811 void *arch_info)
812 {
813 int retval = ERROR_FAIL;
814
815 if (!target_was_examined(target)) {
816 LOG_ERROR("Target not examined yet");
817 goto done;
818 }
819 if (!target->type->start_algorithm) {
820 LOG_ERROR("Target type '%s' does not support %s",
821 target_type_name(target), __func__);
822 goto done;
823 }
824 if (target->running_alg) {
825 LOG_ERROR("Target is already running an algorithm");
826 goto done;
827 }
828
829 target->running_alg = true;
830 retval = target->type->start_algorithm(target,
831 num_mem_params, mem_params,
832 num_reg_params, reg_params,
833 entry_point, exit_point, arch_info);
834
835 done:
836 return retval;
837 }
838
839 /**
840 * Waits for an algorithm started with target_start_algorithm() to complete.
841 *
842 * @param target used to run the algorithm
843 * @param arch_info target-specific description of the algorithm.
844 */
845 int target_wait_algorithm(struct target *target,
846 int num_mem_params, struct mem_param *mem_params,
847 int num_reg_params, struct reg_param *reg_params,
848 uint32_t exit_point, int timeout_ms,
849 void *arch_info)
850 {
851 int retval = ERROR_FAIL;
852
853 if (!target->type->wait_algorithm) {
854 LOG_ERROR("Target type '%s' does not support %s",
855 target_type_name(target), __func__);
856 goto done;
857 }
858 if (!target->running_alg) {
859 LOG_ERROR("Target is not running an algorithm");
860 goto done;
861 }
862
863 retval = target->type->wait_algorithm(target,
864 num_mem_params, mem_params,
865 num_reg_params, reg_params,
866 exit_point, timeout_ms, arch_info);
867 if (retval != ERROR_TARGET_TIMEOUT)
868 target->running_alg = false;
869
870 done:
871 return retval;
872 }
873
874 /**
875 * Executes a target-specific native code algorithm in the target.
876 * It differs from target_run_algorithm in that the algorithm is asynchronous.
877 * Because of this it requires an compliant algorithm:
878 * see contrib/loaders/flash/stm32f1x.S for example.
879 *
880 * @param target used to run the algorithm
881 */
882
883 int target_run_flash_async_algorithm(struct target *target,
884 const uint8_t *buffer, uint32_t count, int block_size,
885 int num_mem_params, struct mem_param *mem_params,
886 int num_reg_params, struct reg_param *reg_params,
887 uint32_t buffer_start, uint32_t buffer_size,
888 uint32_t entry_point, uint32_t exit_point, void *arch_info)
889 {
890 int retval;
891 int timeout = 0;
892
893 const uint8_t *buffer_orig = buffer;
894
895 /* Set up working area. First word is write pointer, second word is read pointer,
896 * rest is fifo data area. */
897 uint32_t wp_addr = buffer_start;
898 uint32_t rp_addr = buffer_start + 4;
899 uint32_t fifo_start_addr = buffer_start + 8;
900 uint32_t fifo_end_addr = buffer_start + buffer_size;
901
902 uint32_t wp = fifo_start_addr;
903 uint32_t rp = fifo_start_addr;
904
905 /* validate block_size is 2^n */
906 assert(!block_size || !(block_size & (block_size - 1)));
907
908 retval = target_write_u32(target, wp_addr, wp);
909 if (retval != ERROR_OK)
910 return retval;
911 retval = target_write_u32(target, rp_addr, rp);
912 if (retval != ERROR_OK)
913 return retval;
914
915 /* Start up algorithm on target and let it idle while writing the first chunk */
916 retval = target_start_algorithm(target, num_mem_params, mem_params,
917 num_reg_params, reg_params,
918 entry_point,
919 exit_point,
920 arch_info);
921
922 if (retval != ERROR_OK) {
923 LOG_ERROR("error starting target flash write algorithm");
924 return retval;
925 }
926
927 while (count > 0) {
928
929 retval = target_read_u32(target, rp_addr, &rp);
930 if (retval != ERROR_OK) {
931 LOG_ERROR("failed to get read pointer");
932 break;
933 }
934
935 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
936 (size_t) (buffer - buffer_orig), count, wp, rp);
937
938 if (rp == 0) {
939 LOG_ERROR("flash write algorithm aborted by target");
940 retval = ERROR_FLASH_OPERATION_FAILED;
941 break;
942 }
943
944 if (((rp - fifo_start_addr) & (block_size - 1)) || rp < fifo_start_addr || rp >= fifo_end_addr) {
945 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
946 break;
947 }
948
949 /* Count the number of bytes available in the fifo without
950 * crossing the wrap around. Make sure to not fill it completely,
951 * because that would make wp == rp and that's the empty condition. */
952 uint32_t thisrun_bytes;
953 if (rp > wp)
954 thisrun_bytes = rp - wp - block_size;
955 else if (rp > fifo_start_addr)
956 thisrun_bytes = fifo_end_addr - wp;
957 else
958 thisrun_bytes = fifo_end_addr - wp - block_size;
959
960 if (thisrun_bytes == 0) {
961 /* Throttle polling a bit if transfer is (much) faster than flash
962 * programming. The exact delay shouldn't matter as long as it's
963 * less than buffer size / flash speed. This is very unlikely to
964 * run when using high latency connections such as USB. */
965 alive_sleep(10);
966
967 /* to stop an infinite loop on some targets check and increment a timeout
968 * this issue was observed on a stellaris using the new ICDI interface */
969 if (timeout++ >= 500) {
970 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
971 return ERROR_FLASH_OPERATION_FAILED;
972 }
973 continue;
974 }
975
976 /* reset our timeout */
977 timeout = 0;
978
979 /* Limit to the amount of data we actually want to write */
980 if (thisrun_bytes > count * block_size)
981 thisrun_bytes = count * block_size;
982
983 /* Write data to fifo */
984 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
985 if (retval != ERROR_OK)
986 break;
987
988 /* Update counters and wrap write pointer */
989 buffer += thisrun_bytes;
990 count -= thisrun_bytes / block_size;
991 wp += thisrun_bytes;
992 if (wp >= fifo_end_addr)
993 wp = fifo_start_addr;
994
995 /* Store updated write pointer to target */
996 retval = target_write_u32(target, wp_addr, wp);
997 if (retval != ERROR_OK)
998 break;
999 }
1000
1001 if (retval != ERROR_OK) {
1002 /* abort flash write algorithm on target */
1003 target_write_u32(target, wp_addr, 0);
1004 }
1005
1006 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1007 num_reg_params, reg_params,
1008 exit_point,
1009 10000,
1010 arch_info);
1011
1012 if (retval2 != ERROR_OK) {
1013 LOG_ERROR("error waiting for target flash write algorithm");
1014 retval = retval2;
1015 }
1016
1017 return retval;
1018 }
1019
1020 int target_read_memory(struct target *target,
1021 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1022 {
1023 if (!target_was_examined(target)) {
1024 LOG_ERROR("Target not examined yet");
1025 return ERROR_FAIL;
1026 }
1027 if (!target->type->read_memory) {
1028 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1029 return ERROR_FAIL;
1030 }
1031 return target->type->read_memory(target, address, size, count, buffer);
1032 }
1033
1034 int target_read_phys_memory(struct target *target,
1035 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1036 {
1037 if (!target_was_examined(target)) {
1038 LOG_ERROR("Target not examined yet");
1039 return ERROR_FAIL;
1040 }
1041 if (!target->type->read_phys_memory) {
1042 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1043 return ERROR_FAIL;
1044 }
1045 return target->type->read_phys_memory(target, address, size, count, buffer);
1046 }
1047
1048 int target_write_memory(struct target *target,
1049 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1050 {
1051 if (!target_was_examined(target)) {
1052 LOG_ERROR("Target not examined yet");
1053 return ERROR_FAIL;
1054 }
1055 if (!target->type->write_memory) {
1056 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1057 return ERROR_FAIL;
1058 }
1059 return target->type->write_memory(target, address, size, count, buffer);
1060 }
1061
1062 int target_write_phys_memory(struct target *target,
1063 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1064 {
1065 if (!target_was_examined(target)) {
1066 LOG_ERROR("Target not examined yet");
1067 return ERROR_FAIL;
1068 }
1069 if (!target->type->write_phys_memory) {
1070 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1071 return ERROR_FAIL;
1072 }
1073 return target->type->write_phys_memory(target, address, size, count, buffer);
1074 }
1075
1076 int target_add_breakpoint(struct target *target,
1077 struct breakpoint *breakpoint)
1078 {
1079 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1080 LOG_WARNING("target %s is not halted", target_name(target));
1081 return ERROR_TARGET_NOT_HALTED;
1082 }
1083 return target->type->add_breakpoint(target, breakpoint);
1084 }
1085
1086 int target_add_context_breakpoint(struct target *target,
1087 struct breakpoint *breakpoint)
1088 {
1089 if (target->state != TARGET_HALTED) {
1090 LOG_WARNING("target %s is not halted", target_name(target));
1091 return ERROR_TARGET_NOT_HALTED;
1092 }
1093 return target->type->add_context_breakpoint(target, breakpoint);
1094 }
1095
1096 int target_add_hybrid_breakpoint(struct target *target,
1097 struct breakpoint *breakpoint)
1098 {
1099 if (target->state != TARGET_HALTED) {
1100 LOG_WARNING("target %s is not halted", target_name(target));
1101 return ERROR_TARGET_NOT_HALTED;
1102 }
1103 return target->type->add_hybrid_breakpoint(target, breakpoint);
1104 }
1105
1106 int target_remove_breakpoint(struct target *target,
1107 struct breakpoint *breakpoint)
1108 {
1109 return target->type->remove_breakpoint(target, breakpoint);
1110 }
1111
1112 int target_add_watchpoint(struct target *target,
1113 struct watchpoint *watchpoint)
1114 {
1115 if (target->state != TARGET_HALTED) {
1116 LOG_WARNING("target %s is not halted", target_name(target));
1117 return ERROR_TARGET_NOT_HALTED;
1118 }
1119 return target->type->add_watchpoint(target, watchpoint);
1120 }
1121 int target_remove_watchpoint(struct target *target,
1122 struct watchpoint *watchpoint)
1123 {
1124 return target->type->remove_watchpoint(target, watchpoint);
1125 }
1126 int target_hit_watchpoint(struct target *target,
1127 struct watchpoint **hit_watchpoint)
1128 {
1129 if (target->state != TARGET_HALTED) {
1130 LOG_WARNING("target %s is not halted", target->cmd_name);
1131 return ERROR_TARGET_NOT_HALTED;
1132 }
1133
1134 if (target->type->hit_watchpoint == NULL) {
1135 /* For backward compatible, if hit_watchpoint is not implemented,
1136 * return ERROR_FAIL such that gdb_server will not take the nonsense
1137 * information. */
1138 return ERROR_FAIL;
1139 }
1140
1141 return target->type->hit_watchpoint(target, hit_watchpoint);
1142 }
1143
1144 int target_get_gdb_reg_list(struct target *target,
1145 struct reg **reg_list[], int *reg_list_size,
1146 enum target_register_class reg_class)
1147 {
1148 return target->type->get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1149 }
1150 int target_step(struct target *target,
1151 int current, uint32_t address, int handle_breakpoints)
1152 {
1153 return target->type->step(target, current, address, handle_breakpoints);
1154 }
1155
1156 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1157 {
1158 if (target->state != TARGET_HALTED) {
1159 LOG_WARNING("target %s is not halted", target->cmd_name);
1160 return ERROR_TARGET_NOT_HALTED;
1161 }
1162 return target->type->get_gdb_fileio_info(target, fileio_info);
1163 }
1164
1165 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1166 {
1167 if (target->state != TARGET_HALTED) {
1168 LOG_WARNING("target %s is not halted", target->cmd_name);
1169 return ERROR_TARGET_NOT_HALTED;
1170 }
1171 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1172 }
1173
1174 int target_profiling(struct target *target, uint32_t *samples,
1175 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1176 {
1177 if (target->state != TARGET_HALTED) {
1178 LOG_WARNING("target %s is not halted", target->cmd_name);
1179 return ERROR_TARGET_NOT_HALTED;
1180 }
1181 return target->type->profiling(target, samples, max_num_samples,
1182 num_samples, seconds);
1183 }
1184
1185 /**
1186 * Reset the @c examined flag for the given target.
1187 * Pure paranoia -- targets are zeroed on allocation.
1188 */
1189 static void target_reset_examined(struct target *target)
1190 {
1191 target->examined = false;
1192 }
1193
1194 static int handle_target(void *priv);
1195
1196 static int target_init_one(struct command_context *cmd_ctx,
1197 struct target *target)
1198 {
1199 target_reset_examined(target);
1200
1201 struct target_type *type = target->type;
1202 if (type->examine == NULL)
1203 type->examine = default_examine;
1204
1205 if (type->check_reset == NULL)
1206 type->check_reset = default_check_reset;
1207
1208 assert(type->init_target != NULL);
1209
1210 int retval = type->init_target(cmd_ctx, target);
1211 if (ERROR_OK != retval) {
1212 LOG_ERROR("target '%s' init failed", target_name(target));
1213 return retval;
1214 }
1215
1216 /* Sanity-check MMU support ... stub in what we must, to help
1217 * implement it in stages, but warn if we need to do so.
1218 */
1219 if (type->mmu) {
1220 if (type->virt2phys == NULL) {
1221 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1222 type->virt2phys = identity_virt2phys;
1223 }
1224 } else {
1225 /* Make sure no-MMU targets all behave the same: make no
1226 * distinction between physical and virtual addresses, and
1227 * ensure that virt2phys() is always an identity mapping.
1228 */
1229 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1230 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1231
1232 type->mmu = no_mmu;
1233 type->write_phys_memory = type->write_memory;
1234 type->read_phys_memory = type->read_memory;
1235 type->virt2phys = identity_virt2phys;
1236 }
1237
1238 if (target->type->read_buffer == NULL)
1239 target->type->read_buffer = target_read_buffer_default;
1240
1241 if (target->type->write_buffer == NULL)
1242 target->type->write_buffer = target_write_buffer_default;
1243
1244 if (target->type->get_gdb_fileio_info == NULL)
1245 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1246
1247 if (target->type->gdb_fileio_end == NULL)
1248 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1249
1250 if (target->type->profiling == NULL)
1251 target->type->profiling = target_profiling_default;
1252
1253 return ERROR_OK;
1254 }
1255
1256 static int target_init(struct command_context *cmd_ctx)
1257 {
1258 struct target *target;
1259 int retval;
1260
1261 for (target = all_targets; target; target = target->next) {
1262 retval = target_init_one(cmd_ctx, target);
1263 if (ERROR_OK != retval)
1264 return retval;
1265 }
1266
1267 if (!all_targets)
1268 return ERROR_OK;
1269
1270 retval = target_register_user_commands(cmd_ctx);
1271 if (ERROR_OK != retval)
1272 return retval;
1273
1274 retval = target_register_timer_callback(&handle_target,
1275 polling_interval, 1, cmd_ctx->interp);
1276 if (ERROR_OK != retval)
1277 return retval;
1278
1279 return ERROR_OK;
1280 }
1281
1282 COMMAND_HANDLER(handle_target_init_command)
1283 {
1284 int retval;
1285
1286 if (CMD_ARGC != 0)
1287 return ERROR_COMMAND_SYNTAX_ERROR;
1288
1289 static bool target_initialized;
1290 if (target_initialized) {
1291 LOG_INFO("'target init' has already been called");
1292 return ERROR_OK;
1293 }
1294 target_initialized = true;
1295
1296 retval = command_run_line(CMD_CTX, "init_targets");
1297 if (ERROR_OK != retval)
1298 return retval;
1299
1300 retval = command_run_line(CMD_CTX, "init_target_events");
1301 if (ERROR_OK != retval)
1302 return retval;
1303
1304 retval = command_run_line(CMD_CTX, "init_board");
1305 if (ERROR_OK != retval)
1306 return retval;
1307
1308 LOG_DEBUG("Initializing targets...");
1309 return target_init(CMD_CTX);
1310 }
1311
1312 int target_register_event_callback(int (*callback)(struct target *target,
1313 enum target_event event, void *priv), void *priv)
1314 {
1315 struct target_event_callback **callbacks_p = &target_event_callbacks;
1316
1317 if (callback == NULL)
1318 return ERROR_COMMAND_SYNTAX_ERROR;
1319
1320 if (*callbacks_p) {
1321 while ((*callbacks_p)->next)
1322 callbacks_p = &((*callbacks_p)->next);
1323 callbacks_p = &((*callbacks_p)->next);
1324 }
1325
1326 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1327 (*callbacks_p)->callback = callback;
1328 (*callbacks_p)->priv = priv;
1329 (*callbacks_p)->next = NULL;
1330
1331 return ERROR_OK;
1332 }
1333
1334 int target_register_reset_callback(int (*callback)(struct target *target,
1335 enum target_reset_mode reset_mode, void *priv), void *priv)
1336 {
1337 struct target_reset_callback *entry;
1338
1339 if (callback == NULL)
1340 return ERROR_COMMAND_SYNTAX_ERROR;
1341
1342 entry = malloc(sizeof(struct target_reset_callback));
1343 if (entry == NULL) {
1344 LOG_ERROR("error allocating buffer for reset callback entry");
1345 return ERROR_COMMAND_SYNTAX_ERROR;
1346 }
1347
1348 entry->callback = callback;
1349 entry->priv = priv;
1350 list_add(&entry->list, &target_reset_callback_list);
1351
1352
1353 return ERROR_OK;
1354 }
1355
1356 int target_register_trace_callback(int (*callback)(struct target *target,
1357 size_t len, uint8_t *data, void *priv), void *priv)
1358 {
1359 struct target_trace_callback *entry;
1360
1361 if (callback == NULL)
1362 return ERROR_COMMAND_SYNTAX_ERROR;
1363
1364 entry = malloc(sizeof(struct target_trace_callback));
1365 if (entry == NULL) {
1366 LOG_ERROR("error allocating buffer for trace callback entry");
1367 return ERROR_COMMAND_SYNTAX_ERROR;
1368 }
1369
1370 entry->callback = callback;
1371 entry->priv = priv;
1372 list_add(&entry->list, &target_trace_callback_list);
1373
1374
1375 return ERROR_OK;
1376 }
1377
1378 int target_register_timer_callback(int (*callback)(void *priv), int time_ms, int periodic, void *priv)
1379 {
1380 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1381 struct timeval now;
1382
1383 if (callback == NULL)
1384 return ERROR_COMMAND_SYNTAX_ERROR;
1385
1386 if (*callbacks_p) {
1387 while ((*callbacks_p)->next)
1388 callbacks_p = &((*callbacks_p)->next);
1389 callbacks_p = &((*callbacks_p)->next);
1390 }
1391
1392 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1393 (*callbacks_p)->callback = callback;
1394 (*callbacks_p)->periodic = periodic;
1395 (*callbacks_p)->time_ms = time_ms;
1396 (*callbacks_p)->removed = false;
1397
1398 gettimeofday(&now, NULL);
1399 (*callbacks_p)->when.tv_usec = now.tv_usec + (time_ms % 1000) * 1000;
1400 time_ms -= (time_ms % 1000);
1401 (*callbacks_p)->when.tv_sec = now.tv_sec + (time_ms / 1000);
1402 if ((*callbacks_p)->when.tv_usec > 1000000) {
1403 (*callbacks_p)->when.tv_usec = (*callbacks_p)->when.tv_usec - 1000000;
1404 (*callbacks_p)->when.tv_sec += 1;
1405 }
1406
1407 (*callbacks_p)->priv = priv;
1408 (*callbacks_p)->next = NULL;
1409
1410 return ERROR_OK;
1411 }
1412
1413 int target_unregister_event_callback(int (*callback)(struct target *target,
1414 enum target_event event, void *priv), void *priv)
1415 {
1416 struct target_event_callback **p = &target_event_callbacks;
1417 struct target_event_callback *c = target_event_callbacks;
1418
1419 if (callback == NULL)
1420 return ERROR_COMMAND_SYNTAX_ERROR;
1421
1422 while (c) {
1423 struct target_event_callback *next = c->next;
1424 if ((c->callback == callback) && (c->priv == priv)) {
1425 *p = next;
1426 free(c);
1427 return ERROR_OK;
1428 } else
1429 p = &(c->next);
1430 c = next;
1431 }
1432
1433 return ERROR_OK;
1434 }
1435
1436 int target_unregister_reset_callback(int (*callback)(struct target *target,
1437 enum target_reset_mode reset_mode, void *priv), void *priv)
1438 {
1439 struct target_reset_callback *entry;
1440
1441 if (callback == NULL)
1442 return ERROR_COMMAND_SYNTAX_ERROR;
1443
1444 list_for_each_entry(entry, &target_reset_callback_list, list) {
1445 if (entry->callback == callback && entry->priv == priv) {
1446 list_del(&entry->list);
1447 free(entry);
1448 break;
1449 }
1450 }
1451
1452 return ERROR_OK;
1453 }
1454
1455 int target_unregister_trace_callback(int (*callback)(struct target *target,
1456 size_t len, uint8_t *data, void *priv), void *priv)
1457 {
1458 struct target_trace_callback *entry;
1459
1460 if (callback == NULL)
1461 return ERROR_COMMAND_SYNTAX_ERROR;
1462
1463 list_for_each_entry(entry, &target_trace_callback_list, list) {
1464 if (entry->callback == callback && entry->priv == priv) {
1465 list_del(&entry->list);
1466 free(entry);
1467 break;
1468 }
1469 }
1470
1471 return ERROR_OK;
1472 }
1473
1474 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1475 {
1476 if (callback == NULL)
1477 return ERROR_COMMAND_SYNTAX_ERROR;
1478
1479 for (struct target_timer_callback *c = target_timer_callbacks;
1480 c; c = c->next) {
1481 if ((c->callback == callback) && (c->priv == priv)) {
1482 c->removed = true;
1483 return ERROR_OK;
1484 }
1485 }
1486
1487 return ERROR_FAIL;
1488 }
1489
1490 int target_call_event_callbacks(struct target *target, enum target_event event)
1491 {
1492 struct target_event_callback *callback = target_event_callbacks;
1493 struct target_event_callback *next_callback;
1494
1495 if (event == TARGET_EVENT_HALTED) {
1496 /* execute early halted first */
1497 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1498 }
1499
1500 LOG_DEBUG("target event %i (%s)", event,
1501 Jim_Nvp_value2name_simple(nvp_target_event, event)->name);
1502
1503 target_handle_event(target, event);
1504
1505 while (callback) {
1506 next_callback = callback->next;
1507 callback->callback(target, event, callback->priv);
1508 callback = next_callback;
1509 }
1510
1511 return ERROR_OK;
1512 }
1513
1514 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1515 {
1516 struct target_reset_callback *callback;
1517
1518 LOG_DEBUG("target reset %i (%s)", reset_mode,
1519 Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1520
1521 list_for_each_entry(callback, &target_reset_callback_list, list)
1522 callback->callback(target, reset_mode, callback->priv);
1523
1524 return ERROR_OK;
1525 }
1526
1527 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1528 {
1529 struct target_trace_callback *callback;
1530
1531 list_for_each_entry(callback, &target_trace_callback_list, list)
1532 callback->callback(target, len, data, callback->priv);
1533
1534 return ERROR_OK;
1535 }
1536
1537 static int target_timer_callback_periodic_restart(
1538 struct target_timer_callback *cb, struct timeval *now)
1539 {
1540 int time_ms = cb->time_ms;
1541 cb->when.tv_usec = now->tv_usec + (time_ms % 1000) * 1000;
1542 time_ms -= (time_ms % 1000);
1543 cb->when.tv_sec = now->tv_sec + time_ms / 1000;
1544 if (cb->when.tv_usec > 1000000) {
1545 cb->when.tv_usec = cb->when.tv_usec - 1000000;
1546 cb->when.tv_sec += 1;
1547 }
1548 return ERROR_OK;
1549 }
1550
1551 static int target_call_timer_callback(struct target_timer_callback *cb,
1552 struct timeval *now)
1553 {
1554 cb->callback(cb->priv);
1555
1556 if (cb->periodic)
1557 return target_timer_callback_periodic_restart(cb, now);
1558
1559 return target_unregister_timer_callback(cb->callback, cb->priv);
1560 }
1561
1562 static int target_call_timer_callbacks_check_time(int checktime)
1563 {
1564 static bool callback_processing;
1565
1566 /* Do not allow nesting */
1567 if (callback_processing)
1568 return ERROR_OK;
1569
1570 callback_processing = true;
1571
1572 keep_alive();
1573
1574 struct timeval now;
1575 gettimeofday(&now, NULL);
1576
1577 /* Store an address of the place containing a pointer to the
1578 * next item; initially, that's a standalone "root of the
1579 * list" variable. */
1580 struct target_timer_callback **callback = &target_timer_callbacks;
1581 while (*callback) {
1582 if ((*callback)->removed) {
1583 struct target_timer_callback *p = *callback;
1584 *callback = (*callback)->next;
1585 free(p);
1586 continue;
1587 }
1588
1589 bool call_it = (*callback)->callback &&
1590 ((!checktime && (*callback)->periodic) ||
1591 now.tv_sec > (*callback)->when.tv_sec ||
1592 (now.tv_sec == (*callback)->when.tv_sec &&
1593 now.tv_usec >= (*callback)->when.tv_usec));
1594
1595 if (call_it)
1596 target_call_timer_callback(*callback, &now);
1597
1598 callback = &(*callback)->next;
1599 }
1600
1601 callback_processing = false;
1602 return ERROR_OK;
1603 }
1604
1605 int target_call_timer_callbacks(void)
1606 {
1607 return target_call_timer_callbacks_check_time(1);
1608 }
1609
1610 /* invoke periodic callbacks immediately */
1611 int target_call_timer_callbacks_now(void)
1612 {
1613 return target_call_timer_callbacks_check_time(0);
1614 }
1615
1616 /* Prints the working area layout for debug purposes */
1617 static void print_wa_layout(struct target *target)
1618 {
1619 struct working_area *c = target->working_areas;
1620
1621 while (c) {
1622 LOG_DEBUG("%c%c 0x%08"PRIx32"-0x%08"PRIx32" (%"PRIu32" bytes)",
1623 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1624 c->address, c->address + c->size - 1, c->size);
1625 c = c->next;
1626 }
1627 }
1628
1629 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1630 static void target_split_working_area(struct working_area *area, uint32_t size)
1631 {
1632 assert(area->free); /* Shouldn't split an allocated area */
1633 assert(size <= area->size); /* Caller should guarantee this */
1634
1635 /* Split only if not already the right size */
1636 if (size < area->size) {
1637 struct working_area *new_wa = malloc(sizeof(*new_wa));
1638
1639 if (new_wa == NULL)
1640 return;
1641
1642 new_wa->next = area->next;
1643 new_wa->size = area->size - size;
1644 new_wa->address = area->address + size;
1645 new_wa->backup = NULL;
1646 new_wa->user = NULL;
1647 new_wa->free = true;
1648
1649 area->next = new_wa;
1650 area->size = size;
1651
1652 /* If backup memory was allocated to this area, it has the wrong size
1653 * now so free it and it will be reallocated if/when needed */
1654 if (area->backup) {
1655 free(area->backup);
1656 area->backup = NULL;
1657 }
1658 }
1659 }
1660
1661 /* Merge all adjacent free areas into one */
1662 static void target_merge_working_areas(struct target *target)
1663 {
1664 struct working_area *c = target->working_areas;
1665
1666 while (c && c->next) {
1667 assert(c->next->address == c->address + c->size); /* This is an invariant */
1668
1669 /* Find two adjacent free areas */
1670 if (c->free && c->next->free) {
1671 /* Merge the last into the first */
1672 c->size += c->next->size;
1673
1674 /* Remove the last */
1675 struct working_area *to_be_freed = c->next;
1676 c->next = c->next->next;
1677 if (to_be_freed->backup)
1678 free(to_be_freed->backup);
1679 free(to_be_freed);
1680
1681 /* If backup memory was allocated to the remaining area, it's has
1682 * the wrong size now */
1683 if (c->backup) {
1684 free(c->backup);
1685 c->backup = NULL;
1686 }
1687 } else {
1688 c = c->next;
1689 }
1690 }
1691 }
1692
1693 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
1694 {
1695 /* Reevaluate working area address based on MMU state*/
1696 if (target->working_areas == NULL) {
1697 int retval;
1698 int enabled;
1699
1700 retval = target->type->mmu(target, &enabled);
1701 if (retval != ERROR_OK)
1702 return retval;
1703
1704 if (!enabled) {
1705 if (target->working_area_phys_spec) {
1706 LOG_DEBUG("MMU disabled, using physical "
1707 "address for working memory 0x%08"PRIx32,
1708 target->working_area_phys);
1709 target->working_area = target->working_area_phys;
1710 } else {
1711 LOG_ERROR("No working memory available. "
1712 "Specify -work-area-phys to target.");
1713 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1714 }
1715 } else {
1716 if (target->working_area_virt_spec) {
1717 LOG_DEBUG("MMU enabled, using virtual "
1718 "address for working memory 0x%08"PRIx32,
1719 target->working_area_virt);
1720 target->working_area = target->working_area_virt;
1721 } else {
1722 LOG_ERROR("No working memory available. "
1723 "Specify -work-area-virt to target.");
1724 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1725 }
1726 }
1727
1728 /* Set up initial working area on first call */
1729 struct working_area *new_wa = malloc(sizeof(*new_wa));
1730 if (new_wa) {
1731 new_wa->next = NULL;
1732 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
1733 new_wa->address = target->working_area;
1734 new_wa->backup = NULL;
1735 new_wa->user = NULL;
1736 new_wa->free = true;
1737 }
1738
1739 target->working_areas = new_wa;
1740 }
1741
1742 /* only allocate multiples of 4 byte */
1743 if (size % 4)
1744 size = (size + 3) & (~3UL);
1745
1746 struct working_area *c = target->working_areas;
1747
1748 /* Find the first large enough working area */
1749 while (c) {
1750 if (c->free && c->size >= size)
1751 break;
1752 c = c->next;
1753 }
1754
1755 if (c == NULL)
1756 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1757
1758 /* Split the working area into the requested size */
1759 target_split_working_area(c, size);
1760
1761 LOG_DEBUG("allocated new working area of %"PRIu32" bytes at address 0x%08"PRIx32, size, c->address);
1762
1763 if (target->backup_working_area) {
1764 if (c->backup == NULL) {
1765 c->backup = malloc(c->size);
1766 if (c->backup == NULL)
1767 return ERROR_FAIL;
1768 }
1769
1770 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
1771 if (retval != ERROR_OK)
1772 return retval;
1773 }
1774
1775 /* mark as used, and return the new (reused) area */
1776 c->free = false;
1777 *area = c;
1778
1779 /* user pointer */
1780 c->user = area;
1781
1782 print_wa_layout(target);
1783
1784 return ERROR_OK;
1785 }
1786
1787 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
1788 {
1789 int retval;
1790
1791 retval = target_alloc_working_area_try(target, size, area);
1792 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1793 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
1794 return retval;
1795
1796 }
1797
1798 static int target_restore_working_area(struct target *target, struct working_area *area)
1799 {
1800 int retval = ERROR_OK;
1801
1802 if (target->backup_working_area && area->backup != NULL) {
1803 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
1804 if (retval != ERROR_OK)
1805 LOG_ERROR("failed to restore %"PRIu32" bytes of working area at address 0x%08"PRIx32,
1806 area->size, area->address);
1807 }
1808
1809 return retval;
1810 }
1811
1812 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
1813 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
1814 {
1815 int retval = ERROR_OK;
1816
1817 if (area->free)
1818 return retval;
1819
1820 if (restore) {
1821 retval = target_restore_working_area(target, area);
1822 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
1823 if (retval != ERROR_OK)
1824 return retval;
1825 }
1826
1827 area->free = true;
1828
1829 LOG_DEBUG("freed %"PRIu32" bytes of working area at address 0x%08"PRIx32,
1830 area->size, area->address);
1831
1832 /* mark user pointer invalid */
1833 /* TODO: Is this really safe? It points to some previous caller's memory.
1834 * How could we know that the area pointer is still in that place and not
1835 * some other vital data? What's the purpose of this, anyway? */
1836 *area->user = NULL;
1837 area->user = NULL;
1838
1839 target_merge_working_areas(target);
1840
1841 print_wa_layout(target);
1842
1843 return retval;
1844 }
1845
1846 int target_free_working_area(struct target *target, struct working_area *area)
1847 {
1848 return target_free_working_area_restore(target, area, 1);
1849 }
1850
1851 void target_quit(void)
1852 {
1853 struct target_event_callback *pe = target_event_callbacks;
1854 while (pe) {
1855 struct target_event_callback *t = pe->next;
1856 free(pe);
1857 pe = t;
1858 }
1859 target_event_callbacks = NULL;
1860
1861 struct target_timer_callback *pt = target_timer_callbacks;
1862 while (pt) {
1863 struct target_timer_callback *t = pt->next;
1864 free(pt);
1865 pt = t;
1866 }
1867 target_timer_callbacks = NULL;
1868
1869 for (struct target *target = all_targets;
1870 target; target = target->next) {
1871 if (target->type->deinit_target)
1872 target->type->deinit_target(target);
1873 }
1874 }
1875
1876 /* free resources and restore memory, if restoring memory fails,
1877 * free up resources anyway
1878 */
1879 static void target_free_all_working_areas_restore(struct target *target, int restore)
1880 {
1881 struct working_area *c = target->working_areas;
1882
1883 LOG_DEBUG("freeing all working areas");
1884
1885 /* Loop through all areas, restoring the allocated ones and marking them as free */
1886 while (c) {
1887 if (!c->free) {
1888 if (restore)
1889 target_restore_working_area(target, c);
1890 c->free = true;
1891 *c->user = NULL; /* Same as above */
1892 c->user = NULL;
1893 }
1894 c = c->next;
1895 }
1896
1897 /* Run a merge pass to combine all areas into one */
1898 target_merge_working_areas(target);
1899
1900 print_wa_layout(target);
1901 }
1902
1903 void target_free_all_working_areas(struct target *target)
1904 {
1905 target_free_all_working_areas_restore(target, 1);
1906 }
1907
1908 /* Find the largest number of bytes that can be allocated */
1909 uint32_t target_get_working_area_avail(struct target *target)
1910 {
1911 struct working_area *c = target->working_areas;
1912 uint32_t max_size = 0;
1913
1914 if (c == NULL)
1915 return target->working_area_size;
1916
1917 while (c) {
1918 if (c->free && max_size < c->size)
1919 max_size = c->size;
1920
1921 c = c->next;
1922 }
1923
1924 return max_size;
1925 }
1926
1927 int target_arch_state(struct target *target)
1928 {
1929 int retval;
1930 if (target == NULL) {
1931 LOG_USER("No target has been configured");
1932 return ERROR_OK;
1933 }
1934
1935 LOG_USER("%s: target state: %s", target_name(target),
1936 target_state_name(target));
1937
1938 if (target->state != TARGET_HALTED)
1939 return ERROR_OK;
1940
1941 retval = target->type->arch_state(target);
1942 return retval;
1943 }
1944
1945 static int target_get_gdb_fileio_info_default(struct target *target,
1946 struct gdb_fileio_info *fileio_info)
1947 {
1948 /* If target does not support semi-hosting function, target
1949 has no need to provide .get_gdb_fileio_info callback.
1950 It just return ERROR_FAIL and gdb_server will return "Txx"
1951 as target halted every time. */
1952 return ERROR_FAIL;
1953 }
1954
1955 static int target_gdb_fileio_end_default(struct target *target,
1956 int retcode, int fileio_errno, bool ctrl_c)
1957 {
1958 return ERROR_OK;
1959 }
1960
1961 static int target_profiling_default(struct target *target, uint32_t *samples,
1962 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1963 {
1964 struct timeval timeout, now;
1965
1966 gettimeofday(&timeout, NULL);
1967 timeval_add_time(&timeout, seconds, 0);
1968
1969 LOG_INFO("Starting profiling. Halting and resuming the"
1970 " target as often as we can...");
1971
1972 uint32_t sample_count = 0;
1973 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
1974 struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
1975
1976 int retval = ERROR_OK;
1977 for (;;) {
1978 target_poll(target);
1979 if (target->state == TARGET_HALTED) {
1980 uint32_t t = buf_get_u32(reg->value, 0, 32);
1981 samples[sample_count++] = t;
1982 /* current pc, addr = 0, do not handle breakpoints, not debugging */
1983 retval = target_resume(target, 1, 0, 0, 0);
1984 target_poll(target);
1985 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
1986 } else if (target->state == TARGET_RUNNING) {
1987 /* We want to quickly sample the PC. */
1988 retval = target_halt(target);
1989 } else {
1990 LOG_INFO("Target not halted or running");
1991 retval = ERROR_OK;
1992 break;
1993 }
1994
1995 if (retval != ERROR_OK)
1996 break;
1997
1998 gettimeofday(&now, NULL);
1999 if ((sample_count >= max_num_samples) ||
2000 ((now.tv_sec >= timeout.tv_sec) && (now.tv_usec >= timeout.tv_usec))) {
2001 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2002 break;
2003 }
2004 }
2005
2006 *num_samples = sample_count;
2007 return retval;
2008 }
2009
2010 /* Single aligned words are guaranteed to use 16 or 32 bit access
2011 * mode respectively, otherwise data is handled as quickly as
2012 * possible
2013 */
2014 int target_write_buffer(struct target *target, uint32_t address, uint32_t size, const uint8_t *buffer)
2015 {
2016 LOG_DEBUG("writing buffer of %i byte at 0x%8.8x",
2017 (int)size, (unsigned)address);
2018
2019 if (!target_was_examined(target)) {
2020 LOG_ERROR("Target not examined yet");
2021 return ERROR_FAIL;
2022 }
2023
2024 if (size == 0)
2025 return ERROR_OK;
2026
2027 if ((address + size - 1) < address) {
2028 /* GDB can request this when e.g. PC is 0xfffffffc*/
2029 LOG_ERROR("address + size wrapped(0x%08x, 0x%08x)",
2030 (unsigned)address,
2031 (unsigned)size);
2032 return ERROR_FAIL;
2033 }
2034
2035 return target->type->write_buffer(target, address, size, buffer);
2036 }
2037
2038 static int target_write_buffer_default(struct target *target, uint32_t address, uint32_t count, const uint8_t *buffer)
2039 {
2040 uint32_t size;
2041
2042 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2043 * will have something to do with the size we leave to it. */
2044 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2045 if (address & size) {
2046 int retval = target_write_memory(target, address, size, 1, buffer);
2047 if (retval != ERROR_OK)
2048 return retval;
2049 address += size;
2050 count -= size;
2051 buffer += size;
2052 }
2053 }
2054
2055 /* Write the data with as large access size as possible. */
2056 for (; size > 0; size /= 2) {
2057 uint32_t aligned = count - count % size;
2058 if (aligned > 0) {
2059 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2060 if (retval != ERROR_OK)
2061 return retval;
2062 address += aligned;
2063 count -= aligned;
2064 buffer += aligned;
2065 }
2066 }
2067
2068 return ERROR_OK;
2069 }
2070
2071 /* Single aligned words are guaranteed to use 16 or 32 bit access
2072 * mode respectively, otherwise data is handled as quickly as
2073 * possible
2074 */
2075 int target_read_buffer(struct target *target, uint32_t address, uint32_t size, uint8_t *buffer)
2076 {
2077 LOG_DEBUG("reading buffer of %i byte at 0x%8.8x",
2078 (int)size, (unsigned)address);
2079
2080 if (!target_was_examined(target)) {
2081 LOG_ERROR("Target not examined yet");
2082 return ERROR_FAIL;
2083 }
2084
2085 if (size == 0)
2086 return ERROR_OK;
2087
2088 if ((address + size - 1) < address) {
2089 /* GDB can request this when e.g. PC is 0xfffffffc*/
2090 LOG_ERROR("address + size wrapped(0x%08" PRIx32 ", 0x%08" PRIx32 ")",
2091 address,
2092 size);
2093 return ERROR_FAIL;
2094 }
2095
2096 return target->type->read_buffer(target, address, size, buffer);
2097 }
2098
2099 static int target_read_buffer_default(struct target *target, uint32_t address, uint32_t count, uint8_t *buffer)
2100 {
2101 uint32_t size;
2102
2103 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2104 * will have something to do with the size we leave to it. */
2105 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2106 if (address & size) {
2107 int retval = target_read_memory(target, address, size, 1, buffer);
2108 if (retval != ERROR_OK)
2109 return retval;
2110 address += size;
2111 count -= size;
2112 buffer += size;
2113 }
2114 }
2115
2116 /* Read the data with as large access size as possible. */
2117 for (; size > 0; size /= 2) {
2118 uint32_t aligned = count - count % size;
2119 if (aligned > 0) {
2120 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2121 if (retval != ERROR_OK)
2122 return retval;
2123 address += aligned;
2124 count -= aligned;
2125 buffer += aligned;
2126 }
2127 }
2128
2129 return ERROR_OK;
2130 }
2131
2132 int target_checksum_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* crc)
2133 {
2134 uint8_t *buffer;
2135 int retval;
2136 uint32_t i;
2137 uint32_t checksum = 0;
2138 if (!target_was_examined(target)) {
2139 LOG_ERROR("Target not examined yet");
2140 return ERROR_FAIL;
2141 }
2142
2143 retval = target->type->checksum_memory(target, address, size, &checksum);
2144 if (retval != ERROR_OK) {
2145 buffer = malloc(size);
2146 if (buffer == NULL) {
2147 LOG_ERROR("error allocating buffer for section (%d bytes)", (int)size);
2148 return ERROR_COMMAND_SYNTAX_ERROR;
2149 }
2150 retval = target_read_buffer(target, address, size, buffer);
2151 if (retval != ERROR_OK) {
2152 free(buffer);
2153 return retval;
2154 }
2155
2156 /* convert to target endianness */
2157 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2158 uint32_t target_data;
2159 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2160 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2161 }
2162
2163 retval = image_calculate_checksum(buffer, size, &checksum);
2164 free(buffer);
2165 }
2166
2167 *crc = checksum;
2168
2169 return retval;
2170 }
2171
2172 int target_blank_check_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* blank)
2173 {
2174 int retval;
2175 if (!target_was_examined(target)) {
2176 LOG_ERROR("Target not examined yet");
2177 return ERROR_FAIL;
2178 }
2179
2180 if (target->type->blank_check_memory == 0)
2181 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2182
2183 retval = target->type->blank_check_memory(target, address, size, blank);
2184
2185 return retval;
2186 }
2187
2188 int target_read_u64(struct target *target, uint64_t address, uint64_t *value)
2189 {
2190 uint8_t value_buf[8];
2191 if (!target_was_examined(target)) {
2192 LOG_ERROR("Target not examined yet");
2193 return ERROR_FAIL;
2194 }
2195
2196 int retval = target_read_memory(target, address, 8, 1, value_buf);
2197
2198 if (retval == ERROR_OK) {
2199 *value = target_buffer_get_u64(target, value_buf);
2200 LOG_DEBUG("address: 0x%" PRIx64 ", value: 0x%16.16" PRIx64 "",
2201 address,
2202 *value);
2203 } else {
2204 *value = 0x0;
2205 LOG_DEBUG("address: 0x%" PRIx64 " failed",
2206 address);
2207 }
2208
2209 return retval;
2210 }
2211
2212 int target_read_u32(struct target *target, uint32_t address, uint32_t *value)
2213 {
2214 uint8_t value_buf[4];
2215 if (!target_was_examined(target)) {
2216 LOG_ERROR("Target not examined yet");
2217 return ERROR_FAIL;
2218 }
2219
2220 int retval = target_read_memory(target, address, 4, 1, value_buf);
2221
2222 if (retval == ERROR_OK) {
2223 *value = target_buffer_get_u32(target, value_buf);
2224 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
2225 address,
2226 *value);
2227 } else {
2228 *value = 0x0;
2229 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
2230 address);
2231 }
2232
2233 return retval;
2234 }
2235
2236 int target_read_u16(struct target *target, uint32_t address, uint16_t *value)
2237 {
2238 uint8_t value_buf[2];
2239 if (!target_was_examined(target)) {
2240 LOG_ERROR("Target not examined yet");
2241 return ERROR_FAIL;
2242 }
2243
2244 int retval = target_read_memory(target, address, 2, 1, value_buf);
2245
2246 if (retval == ERROR_OK) {
2247 *value = target_buffer_get_u16(target, value_buf);
2248 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%4.4x",
2249 address,
2250 *value);
2251 } else {
2252 *value = 0x0;
2253 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
2254 address);
2255 }
2256
2257 return retval;
2258 }
2259
2260 int target_read_u8(struct target *target, uint32_t address, uint8_t *value)
2261 {
2262 if (!target_was_examined(target)) {
2263 LOG_ERROR("Target not examined yet");
2264 return ERROR_FAIL;
2265 }
2266
2267 int retval = target_read_memory(target, address, 1, 1, value);
2268
2269 if (retval == ERROR_OK) {
2270 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
2271 address,
2272 *value);
2273 } else {
2274 *value = 0x0;
2275 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
2276 address);
2277 }
2278
2279 return retval;
2280 }
2281
2282 int target_write_u64(struct target *target, uint64_t address, uint64_t value)
2283 {
2284 int retval;
2285 uint8_t value_buf[8];
2286 if (!target_was_examined(target)) {
2287 LOG_ERROR("Target not examined yet");
2288 return ERROR_FAIL;
2289 }
2290
2291 LOG_DEBUG("address: 0x%" PRIx64 ", value: 0x%16.16" PRIx64 "",
2292 address,
2293 value);
2294
2295 target_buffer_set_u64(target, value_buf, value);
2296 retval = target_write_memory(target, address, 8, 1, value_buf);
2297 if (retval != ERROR_OK)
2298 LOG_DEBUG("failed: %i", retval);
2299
2300 return retval;
2301 }
2302
2303 int target_write_u32(struct target *target, uint32_t address, uint32_t value)
2304 {
2305 int retval;
2306 uint8_t value_buf[4];
2307 if (!target_was_examined(target)) {
2308 LOG_ERROR("Target not examined yet");
2309 return ERROR_FAIL;
2310 }
2311
2312 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
2313 address,
2314 value);
2315
2316 target_buffer_set_u32(target, value_buf, value);
2317 retval = target_write_memory(target, address, 4, 1, value_buf);
2318 if (retval != ERROR_OK)
2319 LOG_DEBUG("failed: %i", retval);
2320
2321 return retval;
2322 }
2323
2324 int target_write_u16(struct target *target, uint32_t address, uint16_t value)
2325 {
2326 int retval;
2327 uint8_t value_buf[2];
2328 if (!target_was_examined(target)) {
2329 LOG_ERROR("Target not examined yet");
2330 return ERROR_FAIL;
2331 }
2332
2333 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8x",
2334 address,
2335 value);
2336
2337 target_buffer_set_u16(target, value_buf, value);
2338 retval = target_write_memory(target, address, 2, 1, value_buf);
2339 if (retval != ERROR_OK)
2340 LOG_DEBUG("failed: %i", retval);
2341
2342 return retval;
2343 }
2344
2345 int target_write_u8(struct target *target, uint32_t address, uint8_t value)
2346 {
2347 int retval;
2348 if (!target_was_examined(target)) {
2349 LOG_ERROR("Target not examined yet");
2350 return ERROR_FAIL;
2351 }
2352
2353 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
2354 address, value);
2355
2356 retval = target_write_memory(target, address, 1, 1, &value);
2357 if (retval != ERROR_OK)
2358 LOG_DEBUG("failed: %i", retval);
2359
2360 return retval;
2361 }
2362
2363 static int find_target(struct command_context *cmd_ctx, const char *name)
2364 {
2365 struct target *target = get_target(name);
2366 if (target == NULL) {
2367 LOG_ERROR("Target: %s is unknown, try one of:\n", name);
2368 return ERROR_FAIL;
2369 }
2370 if (!target->tap->enabled) {
2371 LOG_USER("Target: TAP %s is disabled, "
2372 "can't be the current target\n",
2373 target->tap->dotted_name);
2374 return ERROR_FAIL;
2375 }
2376
2377 cmd_ctx->current_target = target->target_number;
2378 return ERROR_OK;
2379 }
2380
2381
2382 COMMAND_HANDLER(handle_targets_command)
2383 {
2384 int retval = ERROR_OK;
2385 if (CMD_ARGC == 1) {
2386 retval = find_target(CMD_CTX, CMD_ARGV[0]);
2387 if (retval == ERROR_OK) {
2388 /* we're done! */
2389 return retval;
2390 }
2391 }
2392
2393 struct target *target = all_targets;
2394 command_print(CMD_CTX, " TargetName Type Endian TapName State ");
2395 command_print(CMD_CTX, "-- ------------------ ---------- ------ ------------------ ------------");
2396 while (target) {
2397 const char *state;
2398 char marker = ' ';
2399
2400 if (target->tap->enabled)
2401 state = target_state_name(target);
2402 else
2403 state = "tap-disabled";
2404
2405 if (CMD_CTX->current_target == target->target_number)
2406 marker = '*';
2407
2408 /* keep columns lined up to match the headers above */
2409 command_print(CMD_CTX,
2410 "%2d%c %-18s %-10s %-6s %-18s %s",
2411 target->target_number,
2412 marker,
2413 target_name(target),
2414 target_type_name(target),
2415 Jim_Nvp_value2name_simple(nvp_target_endian,
2416 target->endianness)->name,
2417 target->tap->dotted_name,
2418 state);
2419 target = target->next;
2420 }
2421
2422 return retval;
2423 }
2424
2425 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2426
2427 static int powerDropout;
2428 static int srstAsserted;
2429
2430 static int runPowerRestore;
2431 static int runPowerDropout;
2432 static int runSrstAsserted;
2433 static int runSrstDeasserted;
2434
2435 static int sense_handler(void)
2436 {
2437 static int prevSrstAsserted;
2438 static int prevPowerdropout;
2439
2440 int retval = jtag_power_dropout(&powerDropout);
2441 if (retval != ERROR_OK)
2442 return retval;
2443
2444 int powerRestored;
2445 powerRestored = prevPowerdropout && !powerDropout;
2446 if (powerRestored)
2447 runPowerRestore = 1;
2448
2449 long long current = timeval_ms();
2450 static long long lastPower;
2451 int waitMore = lastPower + 2000 > current;
2452 if (powerDropout && !waitMore) {
2453 runPowerDropout = 1;
2454 lastPower = current;
2455 }
2456
2457 retval = jtag_srst_asserted(&srstAsserted);
2458 if (retval != ERROR_OK)
2459 return retval;
2460
2461 int srstDeasserted;
2462 srstDeasserted = prevSrstAsserted && !srstAsserted;
2463
2464 static long long lastSrst;
2465 waitMore = lastSrst + 2000 > current;
2466 if (srstDeasserted && !waitMore) {
2467 runSrstDeasserted = 1;
2468 lastSrst = current;
2469 }
2470
2471 if (!prevSrstAsserted && srstAsserted)
2472 runSrstAsserted = 1;
2473
2474 prevSrstAsserted = srstAsserted;
2475 prevPowerdropout = powerDropout;
2476
2477 if (srstDeasserted || powerRestored) {
2478 /* Other than logging the event we can't do anything here.
2479 * Issuing a reset is a particularly bad idea as we might
2480 * be inside a reset already.
2481 */
2482 }
2483
2484 return ERROR_OK;
2485 }
2486
2487 /* process target state changes */
2488 static int handle_target(void *priv)
2489 {
2490 Jim_Interp *interp = (Jim_Interp *)priv;
2491 int retval = ERROR_OK;
2492
2493 if (!is_jtag_poll_safe()) {
2494 /* polling is disabled currently */
2495 return ERROR_OK;
2496 }
2497
2498 /* we do not want to recurse here... */
2499 static int recursive;
2500 if (!recursive) {
2501 recursive = 1;
2502 sense_handler();
2503 /* danger! running these procedures can trigger srst assertions and power dropouts.
2504 * We need to avoid an infinite loop/recursion here and we do that by
2505 * clearing the flags after running these events.
2506 */
2507 int did_something = 0;
2508 if (runSrstAsserted) {
2509 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2510 Jim_Eval(interp, "srst_asserted");
2511 did_something = 1;
2512 }
2513 if (runSrstDeasserted) {
2514 Jim_Eval(interp, "srst_deasserted");
2515 did_something = 1;
2516 }
2517 if (runPowerDropout) {
2518 LOG_INFO("Power dropout detected, running power_dropout proc.");
2519 Jim_Eval(interp, "power_dropout");
2520 did_something = 1;
2521 }
2522 if (runPowerRestore) {
2523 Jim_Eval(interp, "power_restore");
2524 did_something = 1;
2525 }
2526
2527 if (did_something) {
2528 /* clear detect flags */
2529 sense_handler();
2530 }
2531
2532 /* clear action flags */
2533
2534 runSrstAsserted = 0;
2535 runSrstDeasserted = 0;
2536 runPowerRestore = 0;
2537 runPowerDropout = 0;
2538
2539 recursive = 0;
2540 }
2541
2542 /* Poll targets for state changes unless that's globally disabled.
2543 * Skip targets that are currently disabled.
2544 */
2545 for (struct target *target = all_targets;
2546 is_jtag_poll_safe() && target;
2547 target = target->next) {
2548
2549 if (!target_was_examined(target))
2550 continue;
2551
2552 if (!target->tap->enabled)
2553 continue;
2554
2555 if (target->backoff.times > target->backoff.count) {
2556 /* do not poll this time as we failed previously */
2557 target->backoff.count++;
2558 continue;
2559 }
2560 target->backoff.count = 0;
2561
2562 /* only poll target if we've got power and srst isn't asserted */
2563 if (!powerDropout && !srstAsserted) {
2564 /* polling may fail silently until the target has been examined */
2565 retval = target_poll(target);
2566 if (retval != ERROR_OK) {
2567 /* 100ms polling interval. Increase interval between polling up to 5000ms */
2568 if (target->backoff.times * polling_interval < 5000) {
2569 target->backoff.times *= 2;
2570 target->backoff.times++;
2571 }
2572
2573 /* Tell GDB to halt the debugger. This allows the user to
2574 * run monitor commands to handle the situation.
2575 */
2576 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
2577 }
2578 if (target->backoff.times > 0) {
2579 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
2580 target_reset_examined(target);
2581 retval = target_examine_one(target);
2582 /* Target examination could have failed due to unstable connection,
2583 * but we set the examined flag anyway to repoll it later */
2584 if (retval != ERROR_OK) {
2585 target->examined = true;
2586 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
2587 target->backoff.times * polling_interval);
2588 return retval;
2589 }
2590 }
2591
2592 /* Since we succeeded, we reset backoff count */
2593 target->backoff.times = 0;
2594 }
2595 }
2596
2597 return retval;
2598 }
2599
2600 COMMAND_HANDLER(handle_reg_command)
2601 {
2602 struct target *target;
2603 struct reg *reg = NULL;
2604 unsigned count = 0;
2605 char *value;
2606
2607 LOG_DEBUG("-");
2608
2609 target = get_current_target(CMD_CTX);
2610
2611 /* list all available registers for the current target */
2612 if (CMD_ARGC == 0) {
2613 struct reg_cache *cache = target->reg_cache;
2614
2615 count = 0;
2616 while (cache) {
2617 unsigned i;
2618
2619 command_print(CMD_CTX, "===== %s", cache->name);
2620
2621 for (i = 0, reg = cache->reg_list;
2622 i < cache->num_regs;
2623 i++, reg++, count++) {
2624 /* only print cached values if they are valid */
2625 if (reg->valid) {
2626 value = buf_to_str(reg->value,
2627 reg->size, 16);
2628 command_print(CMD_CTX,
2629 "(%i) %s (/%" PRIu32 "): 0x%s%s",
2630 count, reg->name,
2631 reg->size, value,
2632 reg->dirty
2633 ? " (dirty)"
2634 : "");
2635 free(value);
2636 } else {
2637 command_print(CMD_CTX, "(%i) %s (/%" PRIu32 ")",
2638 count, reg->name,
2639 reg->size) ;
2640 }
2641 }
2642 cache = cache->next;
2643 }
2644
2645 return ERROR_OK;
2646 }
2647
2648 /* access a single register by its ordinal number */
2649 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
2650 unsigned num;
2651 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
2652
2653 struct reg_cache *cache = target->reg_cache;
2654 count = 0;
2655 while (cache) {
2656 unsigned i;
2657 for (i = 0; i < cache->num_regs; i++) {
2658 if (count++ == num) {
2659 reg = &cache->reg_list[i];
2660 break;
2661 }
2662 }
2663 if (reg)
2664 break;
2665 cache = cache->next;
2666 }
2667
2668 if (!reg) {
2669 command_print(CMD_CTX, "%i is out of bounds, the current target "
2670 "has only %i registers (0 - %i)", num, count, count - 1);
2671 return ERROR_OK;
2672 }
2673 } else {
2674 /* access a single register by its name */
2675 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], 1);
2676
2677 if (!reg) {
2678 command_print(CMD_CTX, "register %s not found in current target", CMD_ARGV[0]);
2679 return ERROR_OK;
2680 }
2681 }
2682
2683 assert(reg != NULL); /* give clang a hint that we *know* reg is != NULL here */
2684
2685 /* display a register */
2686 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
2687 && (CMD_ARGV[1][0] <= '9')))) {
2688 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
2689 reg->valid = 0;
2690
2691 if (reg->valid == 0)
2692 reg->type->get(reg);
2693 value = buf_to_str(reg->value, reg->size, 16);
2694 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2695 free(value);
2696 return ERROR_OK;
2697 }
2698
2699 /* set register value */
2700 if (CMD_ARGC == 2) {
2701 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
2702 if (buf == NULL)
2703 return ERROR_FAIL;
2704 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
2705
2706 reg->type->set(reg, buf);
2707
2708 value = buf_to_str(reg->value, reg->size, 16);
2709 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2710 free(value);
2711
2712 free(buf);
2713
2714 return ERROR_OK;
2715 }
2716
2717 return ERROR_COMMAND_SYNTAX_ERROR;
2718 }
2719
2720 COMMAND_HANDLER(handle_poll_command)
2721 {
2722 int retval = ERROR_OK;
2723 struct target *target = get_current_target(CMD_CTX);
2724
2725 if (CMD_ARGC == 0) {
2726 command_print(CMD_CTX, "background polling: %s",
2727 jtag_poll_get_enabled() ? "on" : "off");
2728 command_print(CMD_CTX, "TAP: %s (%s)",
2729 target->tap->dotted_name,
2730 target->tap->enabled ? "enabled" : "disabled");
2731 if (!target->tap->enabled)
2732 return ERROR_OK;
2733 retval = target_poll(target);
2734 if (retval != ERROR_OK)
2735 return retval;
2736 retval = target_arch_state(target);
2737 if (retval != ERROR_OK)
2738 return retval;
2739 } else if (CMD_ARGC == 1) {
2740 bool enable;
2741 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
2742 jtag_poll_set_enabled(enable);
2743 } else
2744 return ERROR_COMMAND_SYNTAX_ERROR;
2745
2746 return retval;
2747 }
2748
2749 COMMAND_HANDLER(handle_wait_halt_command)
2750 {
2751 if (CMD_ARGC > 1)
2752 return ERROR_COMMAND_SYNTAX_ERROR;
2753
2754 unsigned ms = DEFAULT_HALT_TIMEOUT;
2755 if (1 == CMD_ARGC) {
2756 int retval = parse_uint(CMD_ARGV[0], &ms);
2757 if (ERROR_OK != retval)
2758 return ERROR_COMMAND_SYNTAX_ERROR;
2759 }
2760
2761 struct target *target = get_current_target(CMD_CTX);
2762 return target_wait_state(target, TARGET_HALTED, ms);
2763 }
2764
2765 /* wait for target state to change. The trick here is to have a low
2766 * latency for short waits and not to suck up all the CPU time
2767 * on longer waits.
2768 *
2769 * After 500ms, keep_alive() is invoked
2770 */
2771 int target_wait_state(struct target *target, enum target_state state, int ms)
2772 {
2773 int retval;
2774 long long then = 0, cur;
2775 int once = 1;
2776
2777 for (;;) {
2778 retval = target_poll(target);
2779 if (retval != ERROR_OK)
2780 return retval;
2781 if (target->state == state)
2782 break;
2783 cur = timeval_ms();
2784 if (once) {
2785 once = 0;
2786 then = timeval_ms();
2787 LOG_DEBUG("waiting for target %s...",
2788 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
2789 }
2790
2791 if (cur-then > 500)
2792 keep_alive();
2793
2794 if ((cur-then) > ms) {
2795 LOG_ERROR("timed out while waiting for target %s",
2796 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
2797 return ERROR_FAIL;
2798 }
2799 }
2800
2801 return ERROR_OK;
2802 }
2803
2804 COMMAND_HANDLER(handle_halt_command)
2805 {
2806 LOG_DEBUG("-");
2807
2808 struct target *target = get_current_target(CMD_CTX);
2809 int retval = target_halt(target);
2810 if (ERROR_OK != retval)
2811 return retval;
2812
2813 if (CMD_ARGC == 1) {
2814 unsigned wait_local;
2815 retval = parse_uint(CMD_ARGV[0], &wait_local);
2816 if (ERROR_OK != retval)
2817 return ERROR_COMMAND_SYNTAX_ERROR;
2818 if (!wait_local)
2819 return ERROR_OK;
2820 }
2821
2822 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
2823 }
2824
2825 COMMAND_HANDLER(handle_soft_reset_halt_command)
2826 {
2827 struct target *target = get_current_target(CMD_CTX);
2828
2829 LOG_USER("requesting target halt and executing a soft reset");
2830
2831 target_soft_reset_halt(target);
2832
2833 return ERROR_OK;
2834 }
2835
2836 COMMAND_HANDLER(handle_reset_command)
2837 {
2838 if (CMD_ARGC > 1)
2839 return ERROR_COMMAND_SYNTAX_ERROR;
2840
2841 enum target_reset_mode reset_mode = RESET_RUN;
2842 if (CMD_ARGC == 1) {
2843 const Jim_Nvp *n;
2844 n = Jim_Nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
2845 if ((n->name == NULL) || (n->value == RESET_UNKNOWN))
2846 return ERROR_COMMAND_SYNTAX_ERROR;
2847 reset_mode = n->value;
2848 }
2849
2850 /* reset *all* targets */
2851 return target_process_reset(CMD_CTX, reset_mode);
2852 }
2853
2854
2855 COMMAND_HANDLER(handle_resume_command)
2856 {
2857 int current = 1;
2858 if (CMD_ARGC > 1)
2859 return ERROR_COMMAND_SYNTAX_ERROR;
2860
2861 struct target *target = get_current_target(CMD_CTX);
2862
2863 /* with no CMD_ARGV, resume from current pc, addr = 0,
2864 * with one arguments, addr = CMD_ARGV[0],
2865 * handle breakpoints, not debugging */
2866 uint32_t addr = 0;
2867 if (CMD_ARGC == 1) {
2868 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2869 current = 0;
2870 }
2871
2872 return target_resume(target, current, addr, 1, 0);
2873 }
2874
2875 COMMAND_HANDLER(handle_step_command)
2876 {
2877 if (CMD_ARGC > 1)
2878 return ERROR_COMMAND_SYNTAX_ERROR;
2879
2880 LOG_DEBUG("-");
2881
2882 /* with no CMD_ARGV, step from current pc, addr = 0,
2883 * with one argument addr = CMD_ARGV[0],
2884 * handle breakpoints, debugging */
2885 uint32_t addr = 0;
2886 int current_pc = 1;
2887 if (CMD_ARGC == 1) {
2888 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2889 current_pc = 0;
2890 }
2891
2892 struct target *target = get_current_target(CMD_CTX);
2893
2894 return target->type->step(target, current_pc, addr, 1);
2895 }
2896
2897 static void handle_md_output(struct command_context *cmd_ctx,
2898 struct target *target, uint32_t address, unsigned size,
2899 unsigned count, const uint8_t *buffer)
2900 {
2901 const unsigned line_bytecnt = 32;
2902 unsigned line_modulo = line_bytecnt / size;
2903
2904 char output[line_bytecnt * 4 + 1];
2905 unsigned output_len = 0;
2906
2907 const char *value_fmt;
2908 switch (size) {
2909 case 4:
2910 value_fmt = "%8.8x ";
2911 break;
2912 case 2:
2913 value_fmt = "%4.4x ";
2914 break;
2915 case 1:
2916 value_fmt = "%2.2x ";
2917 break;
2918 default:
2919 /* "can't happen", caller checked */
2920 LOG_ERROR("invalid memory read size: %u", size);
2921 return;
2922 }
2923
2924 for (unsigned i = 0; i < count; i++) {
2925 if (i % line_modulo == 0) {
2926 output_len += snprintf(output + output_len,
2927 sizeof(output) - output_len,
2928 "0x%8.8x: ",
2929 (unsigned)(address + (i*size)));
2930 }
2931
2932 uint32_t value = 0;
2933 const uint8_t *value_ptr = buffer + i * size;
2934 switch (size) {
2935 case 4:
2936 value = target_buffer_get_u32(target, value_ptr);
2937 break;
2938 case 2:
2939 value = target_buffer_get_u16(target, value_ptr);
2940 break;
2941 case 1:
2942 value = *value_ptr;
2943 }
2944 output_len += snprintf(output + output_len,
2945 sizeof(output) - output_len,
2946 value_fmt, value);
2947
2948 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
2949 command_print(cmd_ctx, "%s", output);
2950 output_len = 0;
2951 }
2952 }
2953 }
2954
2955 COMMAND_HANDLER(handle_md_command)
2956 {
2957 if (CMD_ARGC < 1)
2958 return ERROR_COMMAND_SYNTAX_ERROR;
2959
2960 unsigned size = 0;
2961 switch (CMD_NAME[2]) {
2962 case 'w':
2963 size = 4;
2964 break;
2965 case 'h':
2966 size = 2;
2967 break;
2968 case 'b':
2969 size = 1;
2970 break;
2971 default:
2972 return ERROR_COMMAND_SYNTAX_ERROR;
2973 }
2974
2975 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
2976 int (*fn)(struct target *target,
2977 uint32_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
2978 if (physical) {
2979 CMD_ARGC--;
2980 CMD_ARGV++;
2981 fn = target_read_phys_memory;
2982 } else
2983 fn = target_read_memory;
2984 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
2985 return ERROR_COMMAND_SYNTAX_ERROR;
2986
2987 uint32_t address;
2988 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2989
2990 unsigned count = 1;
2991 if (CMD_ARGC == 2)
2992 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
2993
2994 uint8_t *buffer = calloc(count, size);
2995
2996 struct target *target = get_current_target(CMD_CTX);
2997 int retval = fn(target, address, size, count, buffer);
2998 if (ERROR_OK == retval)
2999 handle_md_output(CMD_CTX, target, address, size, count, buffer);
3000
3001 free(buffer);
3002
3003 return retval;
3004 }
3005
3006 typedef int (*target_write_fn)(struct target *target,
3007 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3008
3009 static int target_fill_mem(struct target *target,
3010 uint32_t address,
3011 target_write_fn fn,
3012 unsigned data_size,
3013 /* value */
3014 uint32_t b,
3015 /* count */
3016 unsigned c)
3017 {
3018 /* We have to write in reasonably large chunks to be able
3019 * to fill large memory areas with any sane speed */
3020 const unsigned chunk_size = 16384;
3021 uint8_t *target_buf = malloc(chunk_size * data_size);
3022 if (target_buf == NULL) {
3023 LOG_ERROR("Out of memory");
3024 return ERROR_FAIL;
3025 }
3026
3027 for (unsigned i = 0; i < chunk_size; i++) {
3028 switch (data_size) {
3029 case 4:
3030 target_buffer_set_u32(target, target_buf + i * data_size, b);
3031 break;
3032 case 2:
3033 target_buffer_set_u16(target, target_buf + i * data_size, b);
3034 break;
3035 case 1:
3036 target_buffer_set_u8(target, target_buf + i * data_size, b);
3037 break;
3038 default:
3039 exit(-1);
3040 }
3041 }
3042
3043 int retval = ERROR_OK;
3044
3045 for (unsigned x = 0; x < c; x += chunk_size) {
3046 unsigned current;
3047 current = c - x;
3048 if (current > chunk_size)
3049 current = chunk_size;
3050 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3051 if (retval != ERROR_OK)
3052 break;
3053 /* avoid GDB timeouts */
3054 keep_alive();
3055 }
3056 free(target_buf);
3057
3058 return retval;
3059 }
3060
3061
3062 COMMAND_HANDLER(handle_mw_command)
3063 {
3064 if (CMD_ARGC < 2)
3065 return ERROR_COMMAND_SYNTAX_ERROR;
3066 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3067 target_write_fn fn;
3068 if (physical) {
3069 CMD_ARGC--;
3070 CMD_ARGV++;
3071 fn = target_write_phys_memory;
3072 } else
3073 fn = target_write_memory;
3074 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3075 return ERROR_COMMAND_SYNTAX_ERROR;
3076
3077 uint32_t address;
3078 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
3079
3080 uint32_t value;
3081 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3082
3083 unsigned count = 1;
3084 if (CMD_ARGC == 3)
3085 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3086
3087 struct target *target = get_current_target(CMD_CTX);
3088 unsigned wordsize;
3089 switch (CMD_NAME[2]) {
3090 case 'w':
3091 wordsize = 4;
3092 break;
3093 case 'h':
3094 wordsize = 2;
3095 break;
3096 case 'b':
3097 wordsize = 1;
3098 break;
3099 default:
3100 return ERROR_COMMAND_SYNTAX_ERROR;
3101 }
3102
3103 return target_fill_mem(target, address, fn, wordsize, value, count);
3104 }
3105
3106 static COMMAND_HELPER(parse_load_image_command_CMD_ARGV, struct image *image,
3107 uint32_t *min_address, uint32_t *max_address)
3108 {
3109 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3110 return ERROR_COMMAND_SYNTAX_ERROR;
3111
3112 /* a base address isn't always necessary,
3113 * default to 0x0 (i.e. don't relocate) */
3114 if (CMD_ARGC >= 2) {
3115 uint32_t addr;
3116 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], addr);
3117 image->base_address = addr;
3118 image->base_address_set = 1;
3119 } else
3120 image->base_address_set = 0;
3121
3122 image->start_address_set = 0;
3123
3124 if (CMD_ARGC >= 4)
3125 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], *min_address);
3126 if (CMD_ARGC == 5) {
3127 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], *max_address);
3128 /* use size (given) to find max (required) */
3129 *max_address += *min_address;
3130 }
3131
3132 if (*min_address > *max_address)
3133 return ERROR_COMMAND_SYNTAX_ERROR;
3134
3135 return ERROR_OK;
3136 }
3137
3138 COMMAND_HANDLER(handle_load_image_command)
3139 {
3140 uint8_t *buffer;
3141 size_t buf_cnt;
3142 uint32_t image_size;
3143 uint32_t min_address = 0;
3144 uint32_t max_address = 0xffffffff;
3145 int i;
3146 struct image image;
3147
3148 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
3149 &image, &min_address, &max_address);
3150 if (ERROR_OK != retval)
3151 return retval;
3152
3153 struct target *target = get_current_target(CMD_CTX);
3154
3155 struct duration bench;
3156 duration_start(&bench);
3157
3158 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3159 return ERROR_OK;
3160
3161 image_size = 0x0;
3162 retval = ERROR_OK;
3163 for (i = 0; i < image.num_sections; i++) {
3164 buffer = malloc(image.sections[i].size);
3165 if (buffer == NULL) {
3166 command_print(CMD_CTX,
3167 "error allocating buffer for section (%d bytes)",
3168 (int)(image.sections[i].size));
3169 break;
3170 }
3171
3172 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3173 if (retval != ERROR_OK) {
3174 free(buffer);
3175 break;
3176 }
3177
3178 uint32_t offset = 0;
3179 uint32_t length = buf_cnt;
3180
3181 /* DANGER!!! beware of unsigned comparision here!!! */
3182
3183 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3184 (image.sections[i].base_address < max_address)) {
3185
3186 if (image.sections[i].base_address < min_address) {
3187 /* clip addresses below */
3188 offset += min_address-image.sections[i].base_address;
3189 length -= offset;
3190 }
3191
3192 if (image.sections[i].base_address + buf_cnt > max_address)
3193 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3194
3195 retval = target_write_buffer(target,
3196 image.sections[i].base_address + offset, length, buffer + offset);
3197 if (retval != ERROR_OK) {
3198 free(buffer);
3199 break;
3200 }
3201 image_size += length;
3202 command_print(CMD_CTX, "%u bytes written at address 0x%8.8" PRIx32 "",
3203 (unsigned int)length,
3204 image.sections[i].base_address + offset);
3205 }
3206
3207 free(buffer);
3208 }
3209
3210 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3211 command_print(CMD_CTX, "downloaded %" PRIu32 " bytes "
3212 "in %fs (%0.3f KiB/s)", image_size,
3213 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3214 }
3215
3216 image_close(&image);
3217
3218 return retval;
3219
3220 }
3221
3222 COMMAND_HANDLER(handle_dump_image_command)
3223 {
3224 struct fileio *fileio;
3225 uint8_t *buffer;
3226 int retval, retvaltemp;
3227 uint32_t address, size;
3228 struct duration bench;
3229 struct target *target = get_current_target(CMD_CTX);
3230
3231 if (CMD_ARGC != 3)
3232 return ERROR_COMMAND_SYNTAX_ERROR;
3233
3234 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], address);
3235 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], size);
3236
3237 uint32_t buf_size = (size > 4096) ? 4096 : size;
3238 buffer = malloc(buf_size);
3239 if (!buffer)
3240 return ERROR_FAIL;
3241
3242 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3243 if (retval != ERROR_OK) {
3244 free(buffer);
3245 return retval;
3246 }
3247
3248 duration_start(&bench);
3249
3250 while (size > 0) {
3251 size_t size_written;
3252 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3253 retval = target_read_buffer(target, address, this_run_size, buffer);
3254 if (retval != ERROR_OK)
3255 break;
3256
3257 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3258 if (retval != ERROR_OK)
3259 break;
3260
3261 size -= this_run_size;
3262 address += this_run_size;
3263 }
3264
3265 free(buffer);
3266
3267 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3268 size_t filesize;
3269 retval = fileio_size(fileio, &filesize);
3270 if (retval != ERROR_OK)
3271 return retval;
3272 command_print(CMD_CTX,
3273 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3274 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3275 }
3276
3277 retvaltemp = fileio_close(fileio);
3278 if (retvaltemp != ERROR_OK)
3279 return retvaltemp;
3280
3281 return retval;
3282 }
3283
3284 static COMMAND_HELPER(handle_verify_image_command_internal, int verify)
3285 {
3286 uint8_t *buffer;
3287 size_t buf_cnt;
3288 uint32_t image_size;
3289 int i;
3290 int retval;
3291 uint32_t checksum = 0;
3292 uint32_t mem_checksum = 0;
3293
3294 struct image image;
3295
3296 struct target *target = get_current_target(CMD_CTX);
3297
3298 if (CMD_ARGC < 1)
3299 return ERROR_COMMAND_SYNTAX_ERROR;
3300
3301 if (!target) {
3302 LOG_ERROR("no target selected");
3303 return ERROR_FAIL;
3304 }
3305
3306 struct duration bench;
3307 duration_start(&bench);
3308
3309 if (CMD_ARGC >= 2) {
3310 uint32_t addr;
3311 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], addr);
3312 image.base_address = addr;
3313 image.base_address_set = 1;
3314 } else {
3315 image.base_address_set = 0;
3316 image.base_address = 0x0;
3317 }
3318
3319 image.start_address_set = 0;
3320
3321 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3322 if (retval != ERROR_OK)
3323 return retval;
3324
3325 image_size = 0x0;
3326 int diffs = 0;
3327 retval = ERROR_OK;
3328 for (i = 0; i < image.num_sections; i++) {
3329 buffer = malloc(image.sections[i].size);
3330 if (buffer == NULL) {
3331 command_print(CMD_CTX,
3332 "error allocating buffer for section (%d bytes)",
3333 (int)(image.sections[i].size));
3334 break;
3335 }
3336 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3337 if (retval != ERROR_OK) {
3338 free(buffer);
3339 break;
3340 }
3341
3342 if (verify) {
3343 /* calculate checksum of image */
3344 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3345 if (retval != ERROR_OK) {
3346 free(buffer);
3347 break;
3348 }
3349
3350 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3351 if (retval != ERROR_OK) {
3352 free(buffer);
3353 break;
3354 }
3355
3356 if (checksum != mem_checksum) {
3357 /* failed crc checksum, fall back to a binary compare */
3358 uint8_t *data;
3359
3360 if (diffs == 0)
3361 LOG_ERROR("checksum mismatch - attempting binary compare");
3362
3363 data = malloc(buf_cnt);
3364
3365 /* Can we use 32bit word accesses? */
3366 int size = 1;
3367 int count = buf_cnt;
3368 if ((count % 4) == 0) {
3369 size *= 4;
3370 count /= 4;
3371 }
3372 retval = target_read_memory(target, image.sections[i].base_address, size, count, data);
3373 if (retval == ERROR_OK) {
3374 uint32_t t;
3375 for (t = 0; t < buf_cnt; t++) {
3376 if (data[t] != buffer[t]) {
3377 command_print(CMD_CTX,
3378 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3379 diffs,
3380 (unsigned)(t + image.sections[i].base_address),
3381 data[t],
3382 buffer[t]);
3383 if (diffs++ >= 127) {
3384 command_print(CMD_CTX, "More than 128 errors, the rest are not printed.");
3385 free(data);
3386 free(buffer);
3387 goto done;
3388 }
3389 }
3390 keep_alive();
3391 }
3392 }
3393 free(data);
3394 }
3395 } else {
3396 command_print(CMD_CTX, "address 0x%08" PRIx32 " length 0x%08zx",
3397 image.sections[i].base_address,
3398 buf_cnt);
3399 }
3400
3401 free(buffer);
3402 image_size += buf_cnt;
3403 }
3404 if (diffs > 0)
3405 command_print(CMD_CTX, "No more differences found.");
3406 done:
3407 if (diffs > 0)
3408 retval = ERROR_FAIL;
3409 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3410 command_print(CMD_CTX, "verified %" PRIu32 " bytes "
3411 "in %fs (%0.3f KiB/s)", image_size,
3412 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3413 }
3414
3415 image_close(&image);
3416
3417 return retval;
3418 }
3419
3420 COMMAND_HANDLER(handle_verify_image_command)
3421 {
3422 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, 1);
3423 }
3424
3425 COMMAND_HANDLER(handle_test_image_command)
3426 {
3427 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, 0);
3428 }
3429
3430 static int handle_bp_command_list(struct command_context *cmd_ctx)
3431 {
3432 struct target *target = get_current_target(cmd_ctx);
3433 struct breakpoint *breakpoint = target->breakpoints;
3434 while (breakpoint) {
3435 if (breakpoint->type == BKPT_SOFT) {
3436 char *buf = buf_to_str(breakpoint->orig_instr,
3437 breakpoint->length, 16);
3438 command_print(cmd_ctx, "IVA breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i, 0x%s",
3439 breakpoint->address,
3440 breakpoint->length,
3441 breakpoint->set, buf);
3442 free(buf);
3443 } else {
3444 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3445 command_print(cmd_ctx, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3446 breakpoint->asid,
3447 breakpoint->length, breakpoint->set);
3448 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3449 command_print(cmd_ctx, "Hybrid breakpoint(IVA): 0x%8.8" PRIx32 ", 0x%x, %i",
3450 breakpoint->address,
3451 breakpoint->length, breakpoint->set);
3452 command_print(cmd_ctx, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3453 breakpoint->asid);
3454 } else
3455 command_print(cmd_ctx, "Breakpoint(IVA): 0x%8.8" PRIx32 ", 0x%x, %i",
3456 breakpoint->address,
3457 breakpoint->length, breakpoint->set);
3458 }
3459
3460 breakpoint = breakpoint->next;
3461 }
3462 return ERROR_OK;
3463 }
3464
3465 static int handle_bp_command_set(struct command_context *cmd_ctx,
3466 uint32_t addr, uint32_t asid, uint32_t length, int hw)
3467 {
3468 struct target *target = get_current_target(cmd_ctx);
3469 int retval;
3470
3471 if (asid == 0) {
3472 retval = breakpoint_add(target, addr, length, hw);
3473 if (ERROR_OK == retval)
3474 command_print(cmd_ctx, "breakpoint set at 0x%8.8" PRIx32 "", addr);
3475 else {
3476 LOG_ERROR("Failure setting breakpoint, the same address(IVA) is already used");
3477 return retval;
3478 }
3479 } else if (addr == 0) {
3480 if (target->type->add_context_breakpoint == NULL) {
3481 LOG_WARNING("Context breakpoint not available");
3482 return ERROR_OK;
3483 }
3484 retval = context_breakpoint_add(target, asid, length, hw);
3485 if (ERROR_OK == retval)
3486 command_print(cmd_ctx, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
3487 else {
3488 LOG_ERROR("Failure setting breakpoint, the same address(CONTEXTID) is already used");
3489 return retval;
3490 }
3491 } else {
3492 if (target->type->add_hybrid_breakpoint == NULL) {
3493 LOG_WARNING("Hybrid breakpoint not available");
3494 return ERROR_OK;
3495 }
3496 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
3497 if (ERROR_OK == retval)
3498 command_print(cmd_ctx, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
3499 else {
3500 LOG_ERROR("Failure setting breakpoint, the same address is already used");
3501 return retval;
3502 }
3503 }
3504 return ERROR_OK;
3505 }
3506
3507 COMMAND_HANDLER(handle_bp_command)
3508 {
3509 uint32_t addr;
3510 uint32_t asid;
3511 uint32_t length;
3512 int hw = BKPT_SOFT;
3513
3514 switch (CMD_ARGC) {
3515 case 0:
3516 return handle_bp_command_list(CMD_CTX);
3517
3518 case 2:
3519 asid = 0;
3520 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3521 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3522 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3523
3524 case 3:
3525 if (strcmp(CMD_ARGV[2], "hw") == 0) {
3526 hw = BKPT_HARD;
3527 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3528
3529 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3530
3531 asid = 0;
3532 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3533 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
3534 hw = BKPT_HARD;
3535 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
3536 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3537 addr = 0;
3538 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3539 }
3540
3541 case 4:
3542 hw = BKPT_HARD;
3543 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3544 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
3545 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
3546 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3547
3548 default:
3549 return ERROR_COMMAND_SYNTAX_ERROR;
3550 }
3551 }
3552
3553 COMMAND_HANDLER(handle_rbp_command)
3554 {
3555 if (CMD_ARGC != 1)
3556 return ERROR_COMMAND_SYNTAX_ERROR;
3557
3558 uint32_t addr;
3559 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3560
3561 struct target *target = get_current_target(CMD_CTX);
3562 breakpoint_remove(target, addr);
3563
3564 return ERROR_OK;
3565 }
3566
3567 COMMAND_HANDLER(handle_wp_command)
3568 {
3569 struct target *target = get_current_target(CMD_CTX);
3570
3571 if (CMD_ARGC == 0) {
3572 struct watchpoint *watchpoint = target->watchpoints;
3573
3574 while (watchpoint) {
3575 command_print(CMD_CTX, "address: 0x%8.8" PRIx32
3576 ", len: 0x%8.8" PRIx32
3577 ", r/w/a: %i, value: 0x%8.8" PRIx32
3578 ", mask: 0x%8.8" PRIx32,
3579 watchpoint->address,
3580 watchpoint->length,
3581 (int)watchpoint->rw,
3582 watchpoint->value,
3583 watchpoint->mask);
3584 watchpoint = watchpoint->next;
3585 }
3586 return ERROR_OK;
3587 }
3588
3589 enum watchpoint_rw type = WPT_ACCESS;
3590 uint32_t addr = 0;
3591 uint32_t length = 0;
3592 uint32_t data_value = 0x0;
3593 uint32_t data_mask = 0xffffffff;
3594
3595 switch (CMD_ARGC) {
3596 case 5:
3597 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
3598 /* fall through */
3599 case 4:
3600 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
3601 /* fall through */
3602 case 3:
3603 switch (CMD_ARGV[2][0]) {
3604 case 'r':
3605 type = WPT_READ;
3606 break;
3607 case 'w':
3608 type = WPT_WRITE;
3609 break;
3610 case 'a':
3611 type = WPT_ACCESS;
3612 break;
3613 default:
3614 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
3615 return ERROR_COMMAND_SYNTAX_ERROR;
3616 }
3617 /* fall through */
3618 case 2:
3619 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3620 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3621 break;
3622
3623 default:
3624 return ERROR_COMMAND_SYNTAX_ERROR;
3625 }
3626
3627 int retval = watchpoint_add(target, addr, length, type,
3628 data_value, data_mask);
3629 if (ERROR_OK != retval)
3630 LOG_ERROR("Failure setting watchpoints");
3631
3632 return retval;
3633 }
3634
3635 COMMAND_HANDLER(handle_rwp_command)
3636 {
3637 if (CMD_ARGC != 1)
3638 return ERROR_COMMAND_SYNTAX_ERROR;
3639
3640 uint32_t addr;
3641 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3642
3643 struct target *target = get_current_target(CMD_CTX);
3644 watchpoint_remove(target, addr);
3645
3646 return ERROR_OK;
3647 }
3648
3649 /**
3650 * Translate a virtual address to a physical address.
3651 *
3652 * The low-level target implementation must have logged a detailed error
3653 * which is forwarded to telnet/GDB session.
3654 */
3655 COMMAND_HANDLER(handle_virt2phys_command)
3656 {
3657 if (CMD_ARGC != 1)
3658 return ERROR_COMMAND_SYNTAX_ERROR;
3659
3660 uint32_t va;
3661 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], va);
3662 uint32_t pa;
3663
3664 struct target *target = get_current_target(CMD_CTX);
3665 int retval = target->type->virt2phys(target, va, &pa);
3666 if (retval == ERROR_OK)
3667 command_print(CMD_CTX, "Physical address 0x%08" PRIx32 "", pa);
3668
3669 return retval;
3670 }
3671
3672 static void writeData(FILE *f, const void *data, size_t len)
3673 {
3674 size_t written = fwrite(data, 1, len, f);
3675 if (written != len)
3676 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
3677 }
3678
3679 static void writeLong(FILE *f, int l, struct target *target)
3680 {
3681 uint8_t val[4];
3682
3683 target_buffer_set_u32(target, val, l);
3684 writeData(f, val, 4);
3685 }
3686
3687 static void writeString(FILE *f, char *s)
3688 {
3689 writeData(f, s, strlen(s));
3690 }
3691
3692 typedef unsigned char UNIT[2]; /* unit of profiling */
3693
3694 /* Dump a gmon.out histogram file. */
3695 static void write_gmon(uint32_t *samples, uint32_t sampleNum, const char *filename, bool with_range,
3696 uint32_t start_address, uint32_t end_address, struct target *target)
3697 {
3698 uint32_t i;
3699 FILE *f = fopen(filename, "w");
3700 if (f == NULL)
3701 return;
3702 writeString(f, "gmon");
3703 writeLong(f, 0x00000001, target); /* Version */
3704 writeLong(f, 0, target); /* padding */
3705 writeLong(f, 0, target); /* padding */
3706 writeLong(f, 0, target); /* padding */
3707
3708 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
3709 writeData(f, &zero, 1);
3710
3711 /* figure out bucket size */
3712 uint32_t min;
3713 uint32_t max;
3714 if (with_range) {
3715 min = start_address;
3716 max = end_address;
3717 } else {
3718 min = samples[0];
3719 max = samples[0];
3720 for (i = 0; i < sampleNum; i++) {
3721 if (min > samples[i])
3722 min = samples[i];
3723 if (max < samples[i])
3724 max = samples[i];
3725 }
3726
3727 /* max should be (largest sample + 1)
3728 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
3729 max++;
3730 }
3731
3732 int addressSpace = max - min;
3733 assert(addressSpace >= 2);
3734
3735 /* FIXME: What is the reasonable number of buckets?
3736 * The profiling result will be more accurate if there are enough buckets. */
3737 static const uint32_t maxBuckets = 128 * 1024; /* maximum buckets. */
3738 uint32_t numBuckets = addressSpace / sizeof(UNIT);
3739 if (numBuckets > maxBuckets)
3740 numBuckets = maxBuckets;
3741 int *buckets = malloc(sizeof(int) * numBuckets);
3742 if (buckets == NULL) {
3743 fclose(f);
3744 return;
3745 }
3746 memset(buckets, 0, sizeof(int) * numBuckets);
3747 for (i = 0; i < sampleNum; i++) {
3748 uint32_t address = samples[i];
3749
3750 if ((address < min) || (max <= address))
3751 continue;
3752
3753 long long a = address - min;
3754 long long b = numBuckets;
3755 long long c = addressSpace;
3756 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
3757 buckets[index_t]++;
3758 }
3759
3760 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
3761 writeLong(f, min, target); /* low_pc */
3762 writeLong(f, max, target); /* high_pc */
3763 writeLong(f, numBuckets, target); /* # of buckets */
3764 writeLong(f, 100, target); /* KLUDGE! We lie, ca. 100Hz best case. */
3765 writeString(f, "seconds");
3766 for (i = 0; i < (15-strlen("seconds")); i++)
3767 writeData(f, &zero, 1);
3768 writeString(f, "s");
3769
3770 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
3771
3772 char *data = malloc(2 * numBuckets);
3773 if (data != NULL) {
3774 for (i = 0; i < numBuckets; i++) {
3775 int val;
3776 val = buckets[i];
3777 if (val > 65535)
3778 val = 65535;
3779 data[i * 2] = val&0xff;
3780 data[i * 2 + 1] = (val >> 8) & 0xff;
3781 }
3782 free(buckets);
3783 writeData(f, data, numBuckets * 2);
3784 free(data);
3785 } else
3786 free(buckets);
3787
3788 fclose(f);
3789 }
3790
3791 /* profiling samples the CPU PC as quickly as OpenOCD is able,
3792 * which will be used as a random sampling of PC */
3793 COMMAND_HANDLER(handle_profile_command)
3794 {
3795 struct target *target = get_current_target(CMD_CTX);
3796
3797 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
3798 return ERROR_COMMAND_SYNTAX_ERROR;
3799
3800 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
3801 uint32_t offset;
3802 uint32_t num_of_samples;
3803 int retval = ERROR_OK;
3804
3805 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
3806
3807 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
3808 if (samples == NULL) {
3809 LOG_ERROR("No memory to store samples.");
3810 return ERROR_FAIL;
3811 }
3812
3813 /**
3814 * Some cores let us sample the PC without the
3815 * annoying halt/resume step; for example, ARMv7 PCSR.
3816 * Provide a way to use that more efficient mechanism.
3817 */
3818 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
3819 &num_of_samples, offset);
3820 if (retval != ERROR_OK) {
3821 free(samples);
3822 return retval;
3823 }
3824
3825 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
3826
3827 retval = target_poll(target);
3828 if (retval != ERROR_OK) {
3829 free(samples);
3830 return retval;
3831 }
3832 if (target->state == TARGET_RUNNING) {
3833 retval = target_halt(target);
3834 if (retval != ERROR_OK) {
3835 free(samples);
3836 return retval;
3837 }
3838 }
3839
3840 retval = target_poll(target);
3841 if (retval != ERROR_OK) {
3842 free(samples);
3843 return retval;
3844 }
3845
3846 uint32_t start_address = 0;
3847 uint32_t end_address = 0;
3848 bool with_range = false;
3849 if (CMD_ARGC == 4) {
3850 with_range = true;
3851 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
3852 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
3853 }
3854
3855 write_gmon(samples, num_of_samples, CMD_ARGV[1],
3856 with_range, start_address, end_address, target);
3857 command_print(CMD_CTX, "Wrote %s", CMD_ARGV[1]);
3858
3859 free(samples);
3860 return retval;
3861 }
3862
3863 static int new_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t val)
3864 {
3865 char *namebuf;
3866 Jim_Obj *nameObjPtr, *valObjPtr;
3867 int result;
3868
3869 namebuf = alloc_printf("%s(%d)", varname, idx);
3870 if (!namebuf)
3871 return JIM_ERR;
3872
3873 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
3874 valObjPtr = Jim_NewIntObj(interp, val);
3875 if (!nameObjPtr || !valObjPtr) {
3876 free(namebuf);
3877 return JIM_ERR;
3878 }
3879
3880 Jim_IncrRefCount(nameObjPtr);
3881 Jim_IncrRefCount(valObjPtr);
3882 result = Jim_SetVariable(interp, nameObjPtr, valObjPtr);
3883 Jim_DecrRefCount(interp, nameObjPtr);
3884 Jim_DecrRefCount(interp, valObjPtr);
3885 free(namebuf);
3886 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
3887 return result;
3888 }
3889
3890 static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
3891 {
3892 struct command_context *context;
3893 struct target *target;
3894
3895 context = current_command_context(interp);
3896 assert(context != NULL);
3897
3898 target = get_current_target(context);
3899 if (target == NULL) {
3900 LOG_ERROR("mem2array: no current target");
3901 return JIM_ERR;
3902 }
3903
3904 return target_mem2array(interp, target, argc - 1, argv + 1);
3905 }
3906
3907 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
3908 {
3909 long l;
3910 uint32_t width;
3911 int len;
3912 uint32_t addr;
3913 uint32_t count;
3914 uint32_t v;
3915 const char *varname;
3916 int n, e, retval;
3917 uint32_t i;
3918
3919 /* argv[1] = name of array to receive the data
3920 * argv[2] = desired width
3921 * argv[3] = memory address
3922 * argv[4] = count of times to read
3923 */
3924 if (argc != 4) {
3925 Jim_WrongNumArgs(interp, 1, argv, "varname width addr nelems");
3926 return JIM_ERR;
3927 }
3928 varname = Jim_GetString(argv[0], &len);
3929 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
3930
3931 e = Jim_GetLong(interp, argv[1], &l);
3932 width = l;
3933 if (e != JIM_OK)
3934 return e;
3935
3936 e = Jim_GetLong(interp, argv[2], &l);
3937 addr = l;
3938 if (e != JIM_OK)
3939 return e;
3940 e = Jim_GetLong(interp, argv[3], &l);
3941 len = l;
3942 if (e != JIM_OK)
3943 return e;
3944 switch (width) {
3945 case 8:
3946 width = 1;
3947 break;
3948 case 16:
3949 width = 2;
3950 break;
3951 case 32:
3952 width = 4;
3953 break;
3954 default:
3955 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3956 Jim_AppendStrings(interp, Jim_GetResult(interp), "Invalid width param, must be 8/16/32", NULL);
3957 return JIM_ERR;
3958 }
3959 if (len == 0) {
3960 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3961 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
3962 return JIM_ERR;
3963 }
3964 if ((addr + (len * width)) < addr) {
3965 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3966 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
3967 return JIM_ERR;
3968 }
3969 /* absurd transfer size? */
3970 if (len > 65536) {
3971 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3972 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: absurd > 64K item request", NULL);
3973 return JIM_ERR;
3974 }
3975
3976 if ((width == 1) ||
3977 ((width == 2) && ((addr & 1) == 0)) ||
3978 ((width == 4) && ((addr & 3) == 0))) {
3979 /* all is well */
3980 } else {
3981 char buf[100];
3982 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3983 sprintf(buf, "mem2array address: 0x%08" PRIx32 " is not aligned for %" PRId32 " byte reads",
3984 addr,
3985 width);
3986 Jim_AppendStrings(interp, Jim_GetResult(interp), buf , NULL);
3987 return JIM_ERR;
3988 }
3989
3990 /* Transfer loop */
3991
3992 /* index counter */
3993 n = 0;
3994
3995 size_t buffersize = 4096;
3996 uint8_t *buffer = malloc(buffersize);
3997 if (buffer == NULL)
3998 return JIM_ERR;
3999
4000 /* assume ok */
4001 e = JIM_OK;
4002 while (len) {
4003 /* Slurp... in buffer size chunks */
4004
4005 count = len; /* in objects.. */
4006 if (count > (buffersize / width))
4007 count = (buffersize / width);
4008
4009 retval = target_read_memory(target, addr, width, count, buffer);
4010 if (retval != ERROR_OK) {
4011 /* BOO !*/
4012 LOG_ERROR("mem2array: Read @ 0x%08x, w=%d, cnt=%d, failed",
4013 (unsigned int)addr,
4014 (int)width,
4015 (int)count);
4016 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4017 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4018 e = JIM_ERR;
4019 break;
4020 } else {
4021 v = 0; /* shut up gcc */
4022 for (i = 0; i < count ; i++, n++) {
4023 switch (width) {
4024 case 4:
4025 v = target_buffer_get_u32(target, &buffer[i*width]);
4026 break;
4027 case 2:
4028 v = target_buffer_get_u16(target, &buffer[i*width]);
4029 break;
4030 case 1:
4031 v = buffer[i] & 0x0ff;
4032 break;
4033 }
4034 new_int_array_element(interp, varname, n, v);
4035 }
4036 len -= count;
4037 addr += count * width;
4038 }
4039 }
4040
4041 free(buffer);
4042
4043 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4044
4045 return e;
4046 }
4047
4048 static int get_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t *val)
4049 {
4050 char *namebuf;
4051 Jim_Obj *nameObjPtr, *valObjPtr;
4052 int result;
4053 long l;
4054
4055 namebuf = alloc_printf("%s(%d)", varname, idx);
4056 if (!namebuf)
4057 return JIM_ERR;
4058
4059 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
4060 if (!nameObjPtr) {
4061 free(namebuf);
4062 return JIM_ERR;
4063 }
4064
4065 Jim_IncrRefCount(nameObjPtr);
4066 valObjPtr = Jim_GetVariable(interp, nameObjPtr, JIM_ERRMSG);
4067 Jim_DecrRefCount(interp, nameObjPtr);
4068 free(namebuf);
4069 if (valObjPtr == NULL)
4070 return JIM_ERR;
4071
4072 result = Jim_GetLong(interp, valObjPtr, &l);
4073 /* printf("%s(%d) => 0%08x\n", varname, idx, val); */
4074 *val = l;
4075 return result;
4076 }
4077
4078 static int jim_array2mem(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4079 {
4080 struct command_context *context;
4081 struct target *target;
4082
4083 context = current_command_context(interp);
4084 assert(context != NULL);
4085
4086 target = get_current_target(context);
4087 if (target == NULL) {
4088 LOG_ERROR("array2mem: no current target");
4089 return JIM_ERR;
4090 }
4091
4092 return target_array2mem(interp, target, argc-1, argv + 1);
4093 }
4094
4095 static int target_array2mem(Jim_Interp *interp, struct target *target,
4096 int argc, Jim_Obj *const *argv)
4097 {
4098 long l;
4099 uint32_t width;
4100 int len;
4101 uint32_t addr;
4102 uint32_t count;
4103 uint32_t v;
4104 const char *varname;
4105 int n, e, retval;
4106 uint32_t i;
4107
4108 /* argv[1] = name of array to get the data
4109 * argv[2] = desired width
4110 * argv[3] = memory address
4111 * argv[4] = count to write
4112 */
4113 if (argc != 4) {
4114 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems");
4115 return JIM_ERR;
4116 }
4117 varname = Jim_GetString(argv[0], &len);
4118 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
4119
4120 e = Jim_GetLong(interp, argv[1], &l);
4121 width = l;
4122 if (e != JIM_OK)
4123 return e;
4124
4125 e = Jim_GetLong(interp, argv[2], &l);
4126 addr = l;
4127 if (e != JIM_OK)
4128 return e;
4129 e = Jim_GetLong(interp, argv[3], &l);
4130 len = l;
4131 if (e != JIM_OK)
4132 return e;
4133 switch (width) {
4134 case 8:
4135 width = 1;
4136 break;
4137 case 16:
4138 width = 2;
4139 break;
4140 case 32:
4141 width = 4;
4142 break;
4143 default:
4144 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4145 Jim_AppendStrings(interp, Jim_GetResult(interp),
4146 "Invalid width param, must be 8/16/32", NULL);
4147 return JIM_ERR;
4148 }
4149 if (len == 0) {
4150 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4151 Jim_AppendStrings(interp, Jim_GetResult(interp),
4152 "array2mem: zero width read?", NULL);
4153 return JIM_ERR;
4154 }
4155 if ((addr + (len * width)) < addr) {
4156 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4157 Jim_AppendStrings(interp, Jim_GetResult(interp),
4158 "array2mem: addr + len - wraps to zero?", NULL);
4159 return JIM_ERR;
4160 }
4161 /* absurd transfer size? */
4162 if (len > 65536) {
4163 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4164 Jim_AppendStrings(interp, Jim_GetResult(interp),
4165 "array2mem: absurd > 64K item request", NULL);
4166 return JIM_ERR;
4167 }
4168
4169 if ((width == 1) ||
4170 ((width == 2) && ((addr & 1) == 0)) ||
4171 ((width == 4) && ((addr & 3) == 0))) {
4172 /* all is well */
4173 } else {
4174 char buf[100];
4175 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4176 sprintf(buf, "array2mem address: 0x%08x is not aligned for %d byte reads",
4177 (unsigned int)addr,
4178 (int)width);
4179 Jim_AppendStrings(interp, Jim_GetResult(interp), buf , NULL);
4180 return JIM_ERR;
4181 }
4182
4183 /* Transfer loop */
4184
4185 /* index counter */
4186 n = 0;
4187 /* assume ok */
4188 e = JIM_OK;
4189
4190 size_t buffersize = 4096;
4191 uint8_t *buffer = malloc(buffersize);
4192 if (buffer == NULL)
4193 return JIM_ERR;
4194
4195 while (len) {
4196 /* Slurp... in buffer size chunks */
4197
4198 count = len; /* in objects.. */
4199 if (count > (buffersize / width))
4200 count = (buffersize / width);
4201
4202 v = 0; /* shut up gcc */
4203 for (i = 0; i < count; i++, n++) {
4204 get_int_array_element(interp, varname, n, &v);
4205 switch (width) {
4206 case 4:
4207 target_buffer_set_u32(target, &buffer[i * width], v);
4208 break;
4209 case 2:
4210 target_buffer_set_u16(target, &buffer[i * width], v);
4211 break;
4212 case 1:
4213 buffer[i] = v & 0x0ff;
4214 break;
4215 }
4216 }
4217 len -= count;
4218
4219 retval = target_write_memory(target, addr, width, count, buffer);
4220 if (retval != ERROR_OK) {
4221 /* BOO !*/
4222 LOG_ERROR("array2mem: Write @ 0x%08x, w=%d, cnt=%d, failed",
4223 (unsigned int)addr,
4224 (int)width,
4225 (int)count);
4226 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4227 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4228 e = JIM_ERR;
4229 break;
4230 }
4231 addr += count * width;
4232 }
4233
4234 free(buffer);
4235
4236 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4237
4238 return e;
4239 }
4240
4241 /* FIX? should we propagate errors here rather than printing them
4242 * and continuing?
4243 */
4244 void target_handle_event(struct target *target, enum target_event e)
4245 {
4246 struct target_event_action *teap;
4247
4248 for (teap = target->event_action; teap != NULL; teap = teap->next) {
4249 if (teap->event == e) {
4250 LOG_DEBUG("target: (%d) %s (%s) event: %d (%s) action: %s",
4251 target->target_number,
4252 target_name(target),
4253 target_type_name(target),
4254 e,
4255 Jim_Nvp_value2name_simple(nvp_target_event, e)->name,
4256 Jim_GetString(teap->body, NULL));
4257 if (Jim_EvalObj(teap->interp, teap->body) != JIM_OK) {
4258 Jim_MakeErrorMessage(teap->interp);
4259 command_print(NULL, "%s\n", Jim_GetString(Jim_GetResult(teap->interp), NULL));
4260 }
4261 }
4262 }
4263 }
4264
4265 /**
4266 * Returns true only if the target has a handler for the specified event.
4267 */
4268 bool target_has_event_action(struct target *target, enum target_event event)
4269 {
4270 struct target_event_action *teap;
4271
4272 for (teap = target->event_action; teap != NULL; teap = teap->next) {
4273 if (teap->event == event)
4274 return true;
4275 }
4276 return false;
4277 }
4278
4279 enum target_cfg_param {
4280 TCFG_TYPE,
4281 TCFG_EVENT,
4282 TCFG_WORK_AREA_VIRT,
4283 TCFG_WORK_AREA_PHYS,
4284 TCFG_WORK_AREA_SIZE,
4285 TCFG_WORK_AREA_BACKUP,
4286 TCFG_ENDIAN,
4287 TCFG_COREID,
4288 TCFG_CHAIN_POSITION,
4289 TCFG_DBGBASE,
4290 TCFG_RTOS,
4291 };
4292
4293 static Jim_Nvp nvp_config_opts[] = {
4294 { .name = "-type", .value = TCFG_TYPE },
4295 { .name = "-event", .value = TCFG_EVENT },
4296 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
4297 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
4298 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
4299 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
4300 { .name = "-endian" , .value = TCFG_ENDIAN },
4301 { .name = "-coreid", .value = TCFG_COREID },
4302 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
4303 { .name = "-dbgbase", .value = TCFG_DBGBASE },
4304 { .name = "-rtos", .value = TCFG_RTOS },
4305 { .name = NULL, .value = -1 }
4306 };
4307
4308 static int target_configure(Jim_GetOptInfo *goi, struct target *target)
4309 {
4310 Jim_Nvp *n;
4311 Jim_Obj *o;
4312 jim_wide w;
4313 int e;
4314
4315 /* parse config or cget options ... */
4316 while (goi->argc > 0) {
4317 Jim_SetEmptyResult(goi->interp);
4318 /* Jim_GetOpt_Debug(goi); */
4319
4320 if (target->type->target_jim_configure) {
4321 /* target defines a configure function */
4322 /* target gets first dibs on parameters */
4323 e = (*(target->type->target_jim_configure))(target, goi);
4324 if (e == JIM_OK) {
4325 /* more? */
4326 continue;
4327 }
4328 if (e == JIM_ERR) {
4329 /* An error */
4330 return e;
4331 }
4332 /* otherwise we 'continue' below */
4333 }
4334 e = Jim_GetOpt_Nvp(goi, nvp_config_opts, &n);
4335 if (e != JIM_OK) {
4336 Jim_GetOpt_NvpUnknown(goi, nvp_config_opts, 0);
4337 return e;
4338 }
4339 switch (n->value) {
4340 case TCFG_TYPE:
4341 /* not setable */
4342 if (goi->isconfigure) {
4343 Jim_SetResultFormatted(goi->interp,
4344 "not settable: %s", n->name);
4345 return JIM_ERR;
4346 } else {
4347 no_params:
4348 if (goi->argc != 0) {
4349 Jim_WrongNumArgs(goi->interp,
4350 goi->argc, goi->argv,
4351 "NO PARAMS");
4352 return JIM_ERR;
4353 }
4354 }
4355 Jim_SetResultString(goi->interp,
4356 target_type_name(target), -1);
4357 /* loop for more */
4358 break;
4359 case TCFG_EVENT:
4360 if (goi->argc == 0) {
4361 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
4362 return JIM_ERR;
4363 }
4364
4365 e = Jim_GetOpt_Nvp(goi, nvp_target_event, &n);
4366 if (e != JIM_OK) {
4367 Jim_GetOpt_NvpUnknown(goi, nvp_target_event, 1);
4368 return e;
4369 }
4370
4371 if (goi->isconfigure) {
4372 if (goi->argc != 1) {
4373 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
4374 return JIM_ERR;
4375 }
4376 } else {
4377 if (goi->argc != 0) {
4378 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
4379 return JIM_ERR;
4380 }
4381 }
4382
4383 {
4384 struct target_event_action *teap;
4385
4386 teap = target->event_action;
4387 /* replace existing? */
4388 while (teap) {
4389 if (teap->event == (enum target_event)n->value)
4390 break;
4391 teap = teap->next;
4392 }
4393
4394 if (goi->isconfigure) {
4395 bool replace = true;
4396 if (teap == NULL) {
4397 /* create new */
4398 teap = calloc(1, sizeof(*teap));
4399 replace = false;
4400 }
4401 teap->event = n->value;
4402 teap->interp = goi->interp;
4403 Jim_GetOpt_Obj(goi, &o);
4404 if (teap->body)
4405 Jim_DecrRefCount(teap->interp, teap->body);
4406 teap->body = Jim_DuplicateObj(goi->interp, o);
4407 /*
4408 * FIXME:
4409 * Tcl/TK - "tk events" have a nice feature.
4410 * See the "BIND" command.
4411 * We should support that here.
4412 * You can specify %X and %Y in the event code.
4413 * The idea is: %T - target name.
4414 * The idea is: %N - target number
4415 * The idea is: %E - event name.
4416 */
4417 Jim_IncrRefCount(teap->body);
4418
4419 if (!replace) {
4420 /* add to head of event list */
4421 teap->next = target->event_action;
4422 target->event_action = teap;
4423 }
4424 Jim_SetEmptyResult(goi->interp);
4425 } else {
4426 /* get */
4427 if (teap == NULL)
4428 Jim_SetEmptyResult(goi->interp);
4429 else
4430 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
4431 }
4432 }
4433 /* loop for more */
4434 break;
4435
4436 case TCFG_WORK_AREA_VIRT:
4437 if (goi->isconfigure) {
4438 target_free_all_working_areas(target);
4439 e = Jim_GetOpt_Wide(goi, &w);
4440 if (e != JIM_OK)
4441 return e;
4442 target->working_area_virt = w;
4443 target->working_area_virt_spec = true;
4444 } else {
4445 if (goi->argc != 0)
4446 goto no_params;
4447 }
4448 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
4449 /* loop for more */
4450 break;
4451
4452 case TCFG_WORK_AREA_PHYS:
4453 if (goi->isconfigure) {
4454 target_free_all_working_areas(target);
4455 e = Jim_GetOpt_Wide(goi, &w);
4456 if (e != JIM_OK)
4457 return e;
4458 target->working_area_phys = w;
4459 target->working_area_phys_spec = true;
4460 } else {
4461 if (goi->argc != 0)
4462 goto no_params;
4463 }
4464 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
4465 /* loop for more */
4466 break;
4467
4468 case TCFG_WORK_AREA_SIZE:
4469 if (goi->isconfigure) {
4470 target_free_all_working_areas(target);
4471 e = Jim_GetOpt_Wide(goi, &w);
4472 if (e != JIM_OK)
4473 return e;
4474 target->working_area_size = w;
4475 } else {
4476 if (goi->argc != 0)
4477 goto no_params;
4478 }
4479 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
4480 /* loop for more */
4481 break;
4482
4483 case TCFG_WORK_AREA_BACKUP:
4484 if (goi->isconfigure) {
4485 target_free_all_working_areas(target);
4486 e = Jim_GetOpt_Wide(goi, &w);
4487 if (e != JIM_OK)
4488 return e;
4489 /* make this exactly 1 or 0 */
4490 target->backup_working_area = (!!w);
4491 } else {
4492 if (goi->argc != 0)
4493 goto no_params;
4494 }
4495 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
4496 /* loop for more e*/
4497 break;
4498
4499
4500 case TCFG_ENDIAN:
4501 if (goi->isconfigure) {
4502 e = Jim_GetOpt_Nvp(goi, nvp_target_endian, &n);
4503 if (e != JIM_OK) {
4504 Jim_GetOpt_NvpUnknown(goi, nvp_target_endian, 1);
4505 return e;
4506 }
4507 target->endianness = n->value;
4508 } else {
4509 if (goi->argc != 0)
4510 goto no_params;
4511 }
4512 n = Jim_Nvp_value2name_simple(nvp_target_endian, target->endianness);
4513 if (n->name == NULL) {
4514 target->endianness = TARGET_LITTLE_ENDIAN;
4515 n = Jim_Nvp_value2name_simple(nvp_target_endian, target->endianness);
4516 }
4517 Jim_SetResultString(goi->interp, n->name, -1);
4518 /* loop for more */
4519 break;
4520
4521 case TCFG_COREID:
4522 if (goi->isconfigure) {
4523 e = Jim_GetOpt_Wide(goi, &w);
4524 if (e != JIM_OK)
4525 return e;
4526 target->coreid = (int32_t)w;
4527 } else {
4528 if (goi->argc != 0)
4529 goto no_params;
4530 }
4531 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
4532 /* loop for more */
4533 break;
4534
4535 case TCFG_CHAIN_POSITION:
4536 if (goi->isconfigure) {
4537 Jim_Obj *o_t;
4538 struct jtag_tap *tap;
4539 target_free_all_working_areas(target);
4540 e = Jim_GetOpt_Obj(goi, &o_t);
4541 if (e != JIM_OK)
4542 return e;
4543 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
4544 if (tap == NULL)
4545 return JIM_ERR;
4546 /* make this exactly 1 or 0 */
4547 target->tap = tap;
4548 } else {
4549 if (goi->argc != 0)
4550 goto no_params;
4551 }
4552 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
4553 /* loop for more e*/
4554 break;
4555 case TCFG_DBGBASE:
4556 if (goi->isconfigure) {
4557 e = Jim_GetOpt_Wide(goi, &w);
4558 if (e != JIM_OK)
4559 return e;
4560 target->dbgbase = (uint32_t)w;
4561 target->dbgbase_set = true;
4562 } else {
4563 if (goi->argc != 0)
4564 goto no_params;
4565 }
4566 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
4567 /* loop for more */
4568 break;
4569
4570 case TCFG_RTOS:
4571 /* RTOS */
4572 {
4573 int result = rtos_create(goi, target);
4574 if (result != JIM_OK)
4575 return result;
4576 }
4577 /* loop for more */
4578 break;
4579 }
4580 } /* while (goi->argc) */
4581
4582
4583 /* done - we return */
4584 return JIM_OK;
4585 }
4586
4587 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
4588 {
4589 Jim_GetOptInfo goi;
4590
4591 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
4592 goi.isconfigure = !strcmp(Jim_GetString(argv[0], NULL), "configure");
4593 int need_args = 1 + goi.isconfigure;
4594 if (goi.argc < need_args) {
4595 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
4596 goi.isconfigure
4597 ? "missing: -option VALUE ..."
4598 : "missing: -option ...");
4599 return JIM_ERR;
4600 }
4601 struct target *target = Jim_CmdPrivData(goi.interp);
4602 return target_configure(&goi, target);
4603 }
4604
4605 static int jim_target_mw(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4606 {
4607 const char *cmd_name = Jim_GetString(argv[0], NULL);
4608
4609 Jim_GetOptInfo goi;
4610 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
4611
4612 if (goi.argc < 2 || goi.argc > 4) {
4613 Jim_SetResultFormatted(goi.interp,
4614 "usage: %s [phys] <address> <data> [<count>]", cmd_name);
4615 return JIM_ERR;
4616 }
4617
4618 target_write_fn fn;
4619 fn = target_write_memory;
4620
4621 int e;
4622 if (strcmp(Jim_GetString(argv[1], NULL), "phys") == 0) {
4623 /* consume it */
4624 struct Jim_Obj *obj;
4625 e = Jim_GetOpt_Obj(&goi, &obj);
4626 if (e != JIM_OK)
4627 return e;
4628
4629 fn = target_write_phys_memory;
4630 }
4631
4632 jim_wide a;
4633 e = Jim_GetOpt_Wide(&goi, &a);
4634 if (e != JIM_OK)
4635 return e;
4636
4637 jim_wide b;
4638 e = Jim_GetOpt_Wide(&goi, &b);
4639 if (e != JIM_OK)
4640 return e;
4641
4642 jim_wide c = 1;
4643 if (goi.argc == 1) {
4644 e = Jim_GetOpt_Wide(&goi, &c);
4645 if (e != JIM_OK)
4646 return e;
4647 }
4648
4649 /* all args must be consumed */
4650 if (goi.argc != 0)
4651 return JIM_ERR;
4652
4653 struct target *target = Jim_CmdPrivData(goi.interp);
4654 unsigned data_size;
4655 if (strcasecmp(cmd_name, "mww") == 0)
4656 data_size = 4;
4657 else if (strcasecmp(cmd_name, "mwh") == 0)
4658 data_size = 2;
4659 else if (strcasecmp(cmd_name, "mwb") == 0)
4660 data_size = 1;
4661 else {
4662 LOG_ERROR("command '%s' unknown: ", cmd_name);
4663 return JIM_ERR;
4664 }
4665
4666 return (target_fill_mem(target, a, fn, data_size, b, c) == ERROR_OK) ? JIM_OK : JIM_ERR;
4667 }
4668
4669 /**
4670 * @brief Reads an array of words/halfwords/bytes from target memory starting at specified address.
4671 *
4672 * Usage: mdw [phys] <address> [<count>] - for 32 bit reads
4673 * mdh [phys] <address> [<count>] - for 16 bit reads
4674 * mdb [phys] <address> [<count>] - for 8 bit reads
4675 *
4676 * Count defaults to 1.
4677 *
4678 * Calls target_read_memory or target_read_phys_memory depending on
4679 * the presence of the "phys" argument
4680 * Reads the target memory in blocks of max. 32 bytes, and returns an array of ints formatted
4681 * to int representation in base16.
4682 * Also outputs read data in a human readable form using command_print
4683 *
4684 * @param phys if present target_read_phys_memory will be used instead of target_read_memory
4685 * @param address address where to start the read. May be specified in decimal or hex using the standard "0x" prefix
4686 * @param count optional count parameter to read an array of values. If not specified, defaults to 1.
4687 * @returns: JIM_ERR on error or JIM_OK on success and sets the result string to an array of ascii formatted numbers
4688 * on success, with [<count>] number of elements.
4689 *
4690 * In case of little endian target:
4691 * Example1: "mdw 0x00000000" returns "10123456"
4692 * Exmaple2: "mdh 0x00000000 1" returns "3456"
4693 * Example3: "mdb 0x00000000" returns "56"
4694 * Example4: "mdh 0x00000000 2" returns "3456 1012"
4695 * Example5: "mdb 0x00000000 3" returns "56 34 12"
4696 **/
4697 static int jim_target_md(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4698 {
4699 const char *cmd_name = Jim_GetString(argv[0], NULL);
4700
4701 Jim_GetOptInfo goi;
4702 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
4703
4704 if ((goi.argc < 1) || (goi.argc > 3)) {
4705 Jim_SetResultFormatted(goi.interp,
4706 "usage: %s [phys] <address> [<count>]", cmd_name);
4707 return JIM_ERR;
4708 }
4709
4710 int (*fn)(struct target *target,
4711 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
4712 fn = target_read_memory;
4713
4714 int e;
4715 if (strcmp(Jim_GetString(argv[1], NULL), "phys") == 0) {
4716 /* consume it */
4717 struct Jim_Obj *obj;
4718 e = Jim_GetOpt_Obj(&goi, &obj);
4719 if (e != JIM_OK)
4720 return e;
4721
4722 fn = target_read_phys_memory;
4723 }
4724
4725 /* Read address parameter */
4726 jim_wide addr;
4727 e = Jim_GetOpt_Wide(&goi, &addr);
4728 if (e != JIM_OK)
4729 return JIM_ERR;
4730
4731 /* If next parameter exists, read it out as the count parameter, if not, set it to 1 (default) */
4732 jim_wide count;
4733 if (goi.argc == 1) {
4734 e = Jim_GetOpt_Wide(&goi, &count);
4735 if (e != JIM_OK)
4736 return JIM_ERR;
4737 } else
4738 count = 1;
4739
4740 /* all args must be consumed */
4741 if (goi.argc != 0)
4742 return JIM_ERR;
4743
4744 jim_wide dwidth = 1; /* shut up gcc */
4745 if (strcasecmp(cmd_name, "mdw") == 0)
4746 dwidth = 4;
4747 else if (strcasecmp(cmd_name, "mdh") == 0)
4748 dwidth = 2;
4749 else if (strcasecmp(cmd_name, "mdb") == 0)
4750 dwidth = 1;
4751 else {
4752 LOG_ERROR("command '%s' unknown: ", cmd_name);
4753 return JIM_ERR;
4754 }
4755
4756 /* convert count to "bytes" */
4757 int bytes = count * dwidth;
4758
4759 struct target *target = Jim_CmdPrivData(goi.interp);
4760 uint8_t target_buf[32];
4761 jim_wide x, y, z;
4762 while (bytes > 0) {
4763 y = (bytes < 16) ? bytes : 16; /* y = min(bytes, 16); */
4764
4765 /* Try to read out next block */
4766 e = fn(target, addr, dwidth, y / dwidth, target_buf);
4767
4768 if (e != ERROR_OK) {
4769 Jim_SetResultFormatted(interp, "error reading target @ 0x%08lx", (long)addr);
4770 return JIM_ERR;
4771 }
4772
4773 command_print_sameline(NULL, "0x%08x ", (int)(addr));
4774 switch (dwidth) {
4775 case 4:
4776 for (x = 0; x < 16 && x < y; x += 4) {
4777 z = target_buffer_get_u32(target, &(target_buf[x]));
4778 command_print_sameline(NULL, "%08x ", (int)(z));
4779 }
4780 for (; (x < 16) ; x += 4)
4781 command_print_sameline(NULL, " ");
4782 break;
4783 case 2:
4784 for (x = 0; x < 16 && x < y; x += 2) {
4785 z = target_buffer_get_u16(target, &(target_buf[x]));
4786 command_print_sameline(NULL, "%04x ", (int)(z));
4787 }
4788 for (; (x < 16) ; x += 2)
4789 command_print_sameline(NULL, " ");
4790 break;
4791 case 1:
4792 default:
4793 for (x = 0 ; (x < 16) && (x < y) ; x += 1) {
4794 z = target_buffer_get_u8(target, &(target_buf[x]));
4795 command_print_sameline(NULL, "%02x ", (int)(z));
4796 }
4797 for (; (x < 16) ; x += 1)
4798 command_print_sameline(NULL, " ");
4799 break;
4800 }
4801 /* ascii-ify the bytes */
4802 for (x = 0 ; x < y ; x++) {
4803 if ((target_buf[x] >= 0x20) &&
4804 (target_buf[x] <= 0x7e)) {
4805 /* good */
4806 } else {
4807 /* smack it */
4808 target_buf[x] = '.';
4809 }
4810 }
4811 /* space pad */
4812 while (x < 16) {
4813 target_buf[x] = ' ';
4814 x++;
4815 }
4816 /* terminate */
4817 target_buf[16] = 0;
4818 /* print - with a newline */
4819 command_print_sameline(NULL, "%s\n", target_buf);
4820 /* NEXT... */
4821 bytes -= 16;
4822 addr += 16;
4823 }
4824 return JIM_OK;
4825 }
4826
4827 static int jim_target_mem2array(Jim_Interp *interp,
4828 int argc, Jim_Obj *const *argv)
4829 {
4830 struct target *target = Jim_CmdPrivData(interp);
4831 return target_mem2array(interp, target, argc - 1, argv + 1);
4832 }
4833
4834 static int jim_target_array2mem(Jim_Interp *interp,
4835 int argc, Jim_Obj *const *argv)
4836 {
4837 struct target *target = Jim_CmdPrivData(interp);
4838 return target_array2mem(interp, target, argc - 1, argv + 1);
4839 }
4840
4841 static int jim_target_tap_disabled(Jim_Interp *interp)
4842 {
4843 Jim_SetResultFormatted(interp, "[TAP is disabled]");
4844 return JIM_ERR;
4845 }
4846
4847 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4848 {
4849 if (argc != 1) {
4850 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
4851 return JIM_ERR;
4852 }
4853 struct target *target = Jim_CmdPrivData(interp);
4854 if (!target->tap->enabled)
4855 return jim_target_tap_disabled(interp);
4856
4857 int e = target->type->examine(target);
4858 if (e != ERROR_OK)
4859 return JIM_ERR;
4860 return JIM_OK;
4861 }
4862
4863 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4864 {
4865 if (argc != 1) {
4866 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
4867 return JIM_ERR;
4868 }
4869 struct target *target = Jim_CmdPrivData(interp);
4870
4871 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
4872 return JIM_ERR;
4873
4874 return JIM_OK;
4875 }
4876
4877 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4878 {
4879 if (argc != 1) {
4880 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
4881 return JIM_ERR;
4882 }
4883 struct target *target = Jim_CmdPrivData(interp);
4884 if (!target->tap->enabled)
4885 return jim_target_tap_disabled(interp);
4886
4887 int e;
4888 if (!(target_was_examined(target)))
4889 e = ERROR_TARGET_NOT_EXAMINED;
4890 else
4891 e = target->type->poll(target);
4892 if (e != ERROR_OK)
4893 return JIM_ERR;
4894 return JIM_OK;
4895 }
4896
4897 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4898 {
4899 Jim_GetOptInfo goi;
4900 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
4901
4902 if (goi.argc != 2) {
4903 Jim_WrongNumArgs(interp, 0, argv,
4904 "([tT]|[fF]|assert|deassert) BOOL");
4905 return JIM_ERR;
4906 }
4907
4908 Jim_Nvp *n;
4909 int e = Jim_GetOpt_Nvp(&goi, nvp_assert, &n);
4910 if (e != JIM_OK) {
4911 Jim_GetOpt_NvpUnknown(&goi, nvp_assert, 1);
4912 return e;
4913 }
4914 /* the halt or not param */
4915 jim_wide a;
4916 e = Jim_GetOpt_Wide(&goi, &a);
4917 if (e != JIM_OK)
4918 return e;
4919
4920 struct target *target = Jim_CmdPrivData(goi.interp);
4921 if (!target->tap->enabled)
4922 return jim_target_tap_disabled(interp);
4923
4924 if (!target->type->assert_reset || !target->type->deassert_reset) {
4925 Jim_SetResultFormatted(interp,
4926 "No target-specific reset for %s",
4927 target_name(target));
4928 return JIM_ERR;
4929 }
4930 /* determine if we should halt or not. */
4931 target->reset_halt = !!a;
4932 /* When this happens - all workareas are invalid. */
4933 target_free_all_working_areas_restore(target, 0);
4934
4935 /* do the assert */
4936 if (n->value == NVP_ASSERT)
4937 e = target->type->assert_reset(target);
4938 else
4939 e = target->type->deassert_reset(target);
4940 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
4941 }
4942
4943 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4944 {
4945 if (argc != 1) {
4946 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
4947 return JIM_ERR;
4948 }
4949 struct target *target = Jim_CmdPrivData(interp);
4950 if (!target->tap->enabled)
4951 return jim_target_tap_disabled(interp);
4952 int e = target->type->halt(target);
4953 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
4954 }
4955
4956 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4957 {
4958 Jim_GetOptInfo goi;
4959 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
4960
4961 /* params: <name> statename timeoutmsecs */
4962 if (goi.argc != 2) {
4963 const char *cmd_name = Jim_GetString(argv[0], NULL);
4964 Jim_SetResultFormatted(goi.interp,
4965 "%s <state_name> <timeout_in_msec>", cmd_name);
4966 return JIM_ERR;
4967 }
4968
4969 Jim_Nvp *n;
4970 int e = Jim_GetOpt_Nvp(&goi, nvp_target_state, &n);
4971 if (e != JIM_OK) {
4972 Jim_GetOpt_NvpUnknown(&goi, nvp_target_state, 1);
4973 return e;
4974 }
4975 jim_wide a;
4976 e = Jim_GetOpt_Wide(&goi, &a);
4977 if (e != JIM_OK)
4978 return e;
4979 struct target *target = Jim_CmdPrivData(interp);
4980 if (!target->tap->enabled)
4981 return jim_target_tap_disabled(interp);
4982
4983 e = target_wait_state(target, n->value, a);
4984 if (e != ERROR_OK) {
4985 Jim_Obj *eObj = Jim_NewIntObj(interp, e);
4986 Jim_SetResultFormatted(goi.interp,
4987 "target: %s wait %s fails (%#s) %s",
4988 target_name(target), n->name,
4989 eObj, target_strerror_safe(e));
4990 Jim_FreeNewObj(interp, eObj);
4991 return JIM_ERR;
4992 }
4993 return JIM_OK;
4994 }
4995 /* List for human, Events defined for this target.
4996 * scripts/programs should use 'name cget -event NAME'
4997 */
4998 static int jim_target_event_list(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4999 {
5000 struct command_context *cmd_ctx = current_command_context(interp);
5001 assert(cmd_ctx != NULL);
5002
5003 struct target *target = Jim_CmdPrivData(interp);
5004 struct target_event_action *teap = target->event_action;
5005 command_print(cmd_ctx, "Event actions for target (%d) %s\n",
5006 target->target_number,
5007 target_name(target));
5008 command_print(cmd_ctx, "%-25s | Body", "Event");
5009 command_print(cmd_ctx, "------------------------- | "
5010 "----------------------------------------");
5011 while (teap) {
5012 Jim_Nvp *opt = Jim_Nvp_value2name_simple(nvp_target_event, teap->event);
5013 command_print(cmd_ctx, "%-25s | %s",
5014 opt->name, Jim_GetString(teap->body, NULL));
5015 teap = teap->next;
5016 }
5017 command_print(cmd_ctx, "***END***");
5018 return JIM_OK;
5019 }
5020 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5021 {
5022 if (argc != 1) {
5023 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5024 return JIM_ERR;
5025 }
5026 struct target *target = Jim_CmdPrivData(interp);
5027 Jim_SetResultString(interp, target_state_name(target), -1);
5028 return JIM_OK;
5029 }
5030 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5031 {
5032 Jim_GetOptInfo goi;
5033 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5034 if (goi.argc != 1) {
5035 const char *cmd_name = Jim_GetString(argv[0], NULL);
5036 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5037 return JIM_ERR;
5038 }
5039 Jim_Nvp *n;
5040 int e = Jim_GetOpt_Nvp(&goi, nvp_target_event, &n);
5041 if (e != JIM_OK) {
5042 Jim_GetOpt_NvpUnknown(&goi, nvp_target_event, 1);
5043 return e;
5044 }
5045 struct target *target = Jim_CmdPrivData(interp);
5046 target_handle_event(target, n->value);
5047 return JIM_OK;
5048 }
5049
5050 static const struct command_registration target_instance_command_handlers[] = {
5051 {
5052 .name = "configure",
5053 .mode = COMMAND_CONFIG,
5054 .jim_handler = jim_target_configure,
5055 .help = "configure a new target for use",
5056 .usage = "[target_attribute ...]",
5057 },
5058 {
5059 .name = "cget",
5060 .mode = COMMAND_ANY,
5061 .jim_handler = jim_target_configure,
5062 .help = "returns the specified target attribute",
5063 .usage = "target_attribute",
5064 },
5065 {
5066 .name = "mww",
5067 .mode = COMMAND_EXEC,
5068 .jim_handler = jim_target_mw,
5069 .help = "Write 32-bit word(s) to target memory",
5070 .usage = "address data [count]",
5071 },
5072 {
5073 .name = "mwh",
5074 .mode = COMMAND_EXEC,
5075 .jim_handler = jim_target_mw,
5076 .help = "Write 16-bit half-word(s) to target memory",
5077 .usage = "address data [count]",
5078 },
5079 {
5080 .name = "mwb",
5081 .mode = COMMAND_EXEC,
5082 .jim_handler = jim_target_mw,
5083 .help = "Write byte(s) to target memory",
5084 .usage = "address data [count]",
5085 },
5086 {
5087 .name = "mdw",
5088 .mode = COMMAND_EXEC,
5089 .jim_handler = jim_target_md,
5090 .help = "Display target memory as 32-bit words",
5091 .usage = "address [count]",
5092 },
5093 {
5094 .name = "mdh",
5095 .mode = COMMAND_EXEC,
5096 .jim_handler = jim_target_md,
5097 .help = "Display target memory as 16-bit half-words",
5098 .usage = "address [count]",
5099 },
5100 {
5101 .name = "mdb",
5102 .mode = COMMAND_EXEC,
5103 .jim_handler = jim_target_md,
5104 .help = "Display target memory as 8-bit bytes",
5105 .usage = "address [count]",
5106 },
5107 {
5108 .name = "array2mem",
5109 .mode = COMMAND_EXEC,
5110 .jim_handler = jim_target_array2mem,
5111 .help = "Writes Tcl array of 8/16/32 bit numbers "
5112 "to target memory",
5113 .usage = "arrayname bitwidth address count",
5114 },
5115 {
5116 .name = "mem2array",
5117 .mode = COMMAND_EXEC,
5118 .jim_handler = jim_target_mem2array,
5119 .help = "Loads Tcl array of 8/16/32 bit numbers "
5120 "from target memory",
5121 .usage = "arrayname bitwidth address count",
5122 },
5123 {
5124 .name = "eventlist",
5125 .mode = COMMAND_EXEC,
5126 .jim_handler = jim_target_event_list,
5127 .help = "displays a table of events defined for this target",
5128 },
5129 {
5130 .name = "curstate",
5131 .mode = COMMAND_EXEC,
5132 .jim_handler = jim_target_current_state,
5133 .help = "displays the current state of this target",
5134 },
5135 {
5136 .name = "arp_examine",
5137 .mode = COMMAND_EXEC,
5138 .jim_handler = jim_target_examine,
5139 .help = "used internally for reset processing",
5140 },
5141 {
5142 .name = "arp_halt_gdb",
5143 .mode = COMMAND_EXEC,
5144 .jim_handler = jim_target_halt_gdb,
5145 .help = "used internally for reset processing to halt GDB",
5146 },
5147 {
5148 .name = "arp_poll",
5149 .mode = COMMAND_EXEC,
5150 .jim_handler = jim_target_poll,
5151 .help = "used internally for reset processing",
5152 },
5153 {
5154 .name = "arp_reset",
5155 .mode = COMMAND_EXEC,
5156 .jim_handler = jim_target_reset,
5157 .help = "used internally for reset processing",
5158 },
5159 {
5160 .name = "arp_halt",
5161 .mode = COMMAND_EXEC,
5162 .jim_handler = jim_target_halt,
5163 .help = "used internally for reset processing",
5164 },
5165 {
5166 .name = "arp_waitstate",
5167 .mode = COMMAND_EXEC,
5168 .jim_handler = jim_target_wait_state,
5169 .help = "used internally for reset processing",
5170 },
5171 {
5172 .name = "invoke-event",
5173 .mode = COMMAND_EXEC,
5174 .jim_handler = jim_target_invoke_event,
5175 .help = "invoke handler for specified event",
5176 .usage = "event_name",
5177 },
5178 COMMAND_REGISTRATION_DONE
5179 };
5180
5181 static int target_create(Jim_GetOptInfo *goi)
5182 {
5183 Jim_Obj *new_cmd;
5184 Jim_Cmd *cmd;
5185 const char *cp;
5186 int e;
5187 int x;
5188 struct target *target;
5189 struct command_context *cmd_ctx;
5190
5191 cmd_ctx = current_command_context(goi->interp);
5192 assert(cmd_ctx != NULL);
5193
5194 if (goi->argc < 3) {
5195 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
5196 return JIM_ERR;
5197 }
5198
5199 /* COMMAND */
5200 Jim_GetOpt_Obj(goi, &new_cmd);
5201 /* does this command exist? */
5202 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_ERRMSG);
5203 if (cmd) {
5204 cp = Jim_GetString(new_cmd, NULL);
5205 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
5206 return JIM_ERR;
5207 }
5208
5209 /* TYPE */
5210 e = Jim_GetOpt_String(goi, &cp, NULL);
5211 if (e != JIM_OK)
5212 return e;
5213 struct transport *tr = get_current_transport();
5214 if (tr->override_target) {
5215 e = tr->override_target(&cp);
5216 if (e != ERROR_OK) {
5217 LOG_ERROR("The selected transport doesn't support this target");
5218 return JIM_ERR;
5219 }
5220 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
5221 }
5222 /* now does target type exist */
5223 for (x = 0 ; target_types[x] ; x++) {
5224 if (0 == strcmp(cp, target_types[x]->name)) {
5225 /* found */
5226 break;
5227 }
5228
5229 /* check for deprecated name */
5230 if (target_types[x]->deprecated_name) {
5231 if (0 == strcmp(cp, target_types[x]->deprecated_name)) {
5232 /* found */
5233 LOG_WARNING("target name is deprecated use: \'%s\'", target_types[x]->name);
5234 break;
5235 }
5236 }
5237 }
5238 if (target_types[x] == NULL) {
5239 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
5240 for (x = 0 ; target_types[x] ; x++) {
5241 if (target_types[x + 1]) {
5242 Jim_AppendStrings(goi->interp,
5243 Jim_GetResult(goi->interp),
5244 target_types[x]->name,
5245 ", ", NULL);
5246 } else {
5247 Jim_AppendStrings(goi->interp,
5248 Jim_GetResult(goi->interp),
5249 " or ",
5250 target_types[x]->name, NULL);
5251 }
5252 }
5253 return JIM_ERR;
5254 }
5255
5256 /* Create it */
5257 target = calloc(1, sizeof(struct target));
5258 /* set target number */
5259 target->target_number = new_target_number();
5260 cmd_ctx->current_target = target->target_number;
5261
5262 /* allocate memory for each unique target type */
5263 target->type = calloc(1, sizeof(struct target_type));
5264
5265 memcpy(target->type, target_types[x], sizeof(struct target_type));
5266
5267 /* will be set by "-endian" */
5268 target->endianness = TARGET_ENDIAN_UNKNOWN;
5269
5270 /* default to first core, override with -coreid */
5271 target->coreid = 0;
5272
5273 target->working_area = 0x0;
5274 target->working_area_size = 0x0;
5275 target->working_areas = NULL;
5276 target->backup_working_area = 0;
5277
5278 target->state = TARGET_UNKNOWN;
5279 target->debug_reason = DBG_REASON_UNDEFINED;
5280 target->reg_cache = NULL;
5281 target->breakpoints = NULL;
5282 target->watchpoints = NULL;
5283 target->next = NULL;
5284 target->arch_info = NULL;
5285
5286 target->display = 1;
5287
5288 target->halt_issued = false;
5289
5290 /* initialize trace information */
5291 target->trace_info = malloc(sizeof(struct trace));
5292 target->trace_info->num_trace_points = 0;
5293 target->trace_info->trace_points_size = 0;
5294 target->trace_info->trace_points = NULL;
5295 target->trace_info->trace_history_size = 0;
5296 target->trace_info->trace_history = NULL;
5297 target->trace_info->trace_history_pos = 0;
5298 target->trace_info->trace_history_overflowed = 0;
5299
5300 target->dbgmsg = NULL;
5301 target->dbg_msg_enabled = 0;
5302
5303 target->endianness = TARGET_ENDIAN_UNKNOWN;
5304
5305 target->rtos = NULL;
5306 target->rtos_auto_detect = false;
5307
5308 /* Do the rest as "configure" options */
5309 goi->isconfigure = 1;
5310 e = target_configure(goi, target);
5311
5312 if (target->tap == NULL) {
5313 Jim_SetResultString(goi->interp, "-chain-position required when creating target", -1);
5314 e = JIM_ERR;
5315 }
5316
5317 if (e != JIM_OK) {
5318 free(target->type);
5319 free(target);
5320 return e;
5321 }
5322
5323 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
5324 /* default endian to little if not specified */
5325 target->endianness = TARGET_LITTLE_ENDIAN;
5326 }
5327
5328 cp = Jim_GetString(new_cmd, NULL);
5329 target->cmd_name = strdup(cp);
5330
5331 /* create the target specific commands */
5332 if (target->type->commands) {
5333 e = register_commands(cmd_ctx, NULL, target->type->commands);
5334 if (ERROR_OK != e)
5335 LOG_ERROR("unable to register '%s' commands", cp);
5336 }
5337 if (target->type->target_create)
5338 (*(target->type->target_create))(target, goi->interp);
5339
5340 /* append to end of list */
5341 {
5342 struct target **tpp;
5343 tpp = &(all_targets);
5344 while (*tpp)
5345 tpp = &((*tpp)->next);
5346 *tpp = target;
5347 }
5348
5349 /* now - create the new target name command */
5350 const struct command_registration target_subcommands[] = {
5351 {
5352 .chain = target_instance_command_handlers,
5353 },
5354 {
5355 .chain = target->type->commands,
5356 },
5357 COMMAND_REGISTRATION_DONE
5358 };
5359 const struct command_registration target_commands[] = {
5360 {
5361 .name = cp,
5362 .mode = COMMAND_ANY,
5363 .help = "target command group",
5364 .usage = "",
5365 .chain = target_subcommands,
5366 },
5367 COMMAND_REGISTRATION_DONE
5368 };
5369 e = register_commands(cmd_ctx, NULL, target_commands);
5370 if (ERROR_OK != e)
5371 return JIM_ERR;
5372
5373 struct command *c = command_find_in_context(cmd_ctx, cp);
5374 assert(c);
5375 command_set_handler_data(c, target);
5376
5377 return (ERROR_OK == e) ? JIM_OK : JIM_ERR;
5378 }
5379
5380 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5381 {
5382 if (argc != 1) {
5383 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5384 return JIM_ERR;
5385 }
5386 struct command_context *cmd_ctx = current_command_context(interp);
5387 assert(cmd_ctx != NULL);
5388
5389 Jim_SetResultString(interp, target_name(get_current_target(cmd_ctx)), -1);
5390 return JIM_OK;
5391 }
5392
5393 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5394 {
5395 if (argc != 1) {
5396 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5397 return JIM_ERR;
5398 }
5399 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5400 for (unsigned x = 0; NULL != target_types[x]; x++) {
5401 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5402 Jim_NewStringObj(interp, target_types[x]->name, -1));
5403 }
5404 return JIM_OK;
5405 }
5406
5407 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5408 {
5409 if (argc != 1) {
5410 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5411 return JIM_ERR;
5412 }
5413 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5414 struct target *target = all_targets;
5415 while (target) {
5416 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5417 Jim_NewStringObj(interp, target_name(target), -1));
5418 target = target->next;
5419 }
5420 return JIM_OK;
5421 }
5422
5423 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5424 {
5425 int i;
5426 const char *targetname;
5427 int retval, len;
5428 struct target *target = (struct target *) NULL;
5429 struct target_list *head, *curr, *new;
5430 curr = (struct target_list *) NULL;
5431 head = (struct target_list *) NULL;
5432
5433 retval = 0;
5434 LOG_DEBUG("%d", argc);
5435 /* argv[1] = target to associate in smp
5436 * argv[2] = target to assoicate in smp
5437 * argv[3] ...
5438 */
5439
5440 for (i = 1; i < argc; i++) {
5441
5442 targetname = Jim_GetString(argv[i], &len);
5443 target = get_target(targetname);
5444 LOG_DEBUG("%s ", targetname);
5445 if (target) {
5446 new = malloc(sizeof(struct target_list));
5447 new->target = target;
5448 new->next = (struct target_list *)NULL;
5449 if (head == (struct target_list *)NULL) {
5450 head = new;
5451 curr = head;
5452 } else {
5453 curr->next = new;
5454 curr = new;
5455 }
5456 }
5457 }
5458 /* now parse the list of cpu and put the target in smp mode*/
5459 curr = head;
5460
5461 while (curr != (struct target_list *)NULL) {
5462 target = curr->target;
5463 target->smp = 1;
5464 target->head = head;
5465 curr = curr->next;
5466 }
5467
5468 if (target && target->rtos)
5469 retval = rtos_smp_init(head->target);
5470
5471 return retval;
5472 }
5473
5474
5475 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5476 {
5477 Jim_GetOptInfo goi;
5478 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5479 if (goi.argc < 3) {
5480 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5481 "<name> <target_type> [<target_options> ...]");
5482 return JIM_ERR;
5483 }
5484 return target_create(&goi);
5485 }
5486
5487 static const struct command_registration target_subcommand_handlers[] = {
5488 {
5489 .name = "init",
5490 .mode = COMMAND_CONFIG,
5491 .handler = handle_target_init_command,
5492 .help = "initialize targets",
5493 },
5494 {
5495 .name = "create",
5496 /* REVISIT this should be COMMAND_CONFIG ... */
5497 .mode = COMMAND_ANY,
5498 .jim_handler = jim_target_create,
5499 .usage = "name type '-chain-position' name [options ...]",
5500 .help = "Creates and selects a new target",
5501 },
5502 {
5503 .name = "current",
5504 .mode = COMMAND_ANY,
5505 .jim_handler = jim_target_current,
5506 .help = "Returns the currently selected target",
5507 },
5508 {
5509 .name = "types",
5510 .mode = COMMAND_ANY,
5511 .jim_handler = jim_target_types,
5512 .help = "Returns the available target types as "
5513 "a list of strings",
5514 },
5515 {
5516 .name = "names",
5517 .mode = COMMAND_ANY,
5518 .jim_handler = jim_target_names,
5519 .help = "Returns the names of all targets as a list of strings",
5520 },
5521 {
5522 .name = "smp",
5523 .mode = COMMAND_ANY,
5524 .jim_handler = jim_target_smp,
5525 .usage = "targetname1 targetname2 ...",
5526 .help = "gather several target in a smp list"
5527 },
5528
5529 COMMAND_REGISTRATION_DONE
5530 };
5531
5532 struct FastLoad {
5533 uint32_t address;
5534 uint8_t *data;
5535 int length;
5536
5537 };
5538
5539 static int fastload_num;
5540 static struct FastLoad *fastload;
5541
5542 static void free_fastload(void)
5543 {
5544 if (fastload != NULL) {
5545 int i;
5546 for (i = 0; i < fastload_num; i++) {
5547 if (fastload[i].data)
5548 free(fastload[i].data);
5549 }
5550 free(fastload);
5551 fastload = NULL;
5552 }
5553 }
5554
5555 COMMAND_HANDLER(handle_fast_load_image_command)
5556 {
5557 uint8_t *buffer;
5558 size_t buf_cnt;
5559 uint32_t image_size;
5560 uint32_t min_address = 0;
5561 uint32_t max_address = 0xffffffff;
5562 int i;
5563
5564 struct image image;
5565
5566 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
5567 &image, &min_address, &max_address);
5568 if (ERROR_OK != retval)
5569 return retval;
5570
5571 struct duration bench;
5572 duration_start(&bench);
5573
5574 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
5575 if (retval != ERROR_OK)
5576 return retval;
5577
5578 image_size = 0x0;
5579 retval = ERROR_OK;
5580 fastload_num = image.num_sections;
5581 fastload = malloc(sizeof(struct FastLoad)*image.num_sections);
5582 if (fastload == NULL) {
5583 command_print(CMD_CTX, "out of memory");
5584 image_close(&image);
5585 return ERROR_FAIL;
5586 }
5587 memset(fastload, 0, sizeof(struct FastLoad)*image.num_sections);
5588 for (i = 0; i < image.num_sections; i++) {
5589 buffer = malloc(image.sections[i].size);
5590 if (buffer == NULL) {
5591 command_print(CMD_CTX, "error allocating buffer for section (%d bytes)",
5592 (int)(image.sections[i].size));
5593 retval = ERROR_FAIL;
5594 break;
5595 }
5596
5597 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
5598 if (retval != ERROR_OK) {
5599 free(buffer);
5600 break;
5601 }
5602
5603 uint32_t offset = 0;
5604 uint32_t length = buf_cnt;
5605
5606 /* DANGER!!! beware of unsigned comparision here!!! */
5607
5608 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
5609 (image.sections[i].base_address < max_address)) {
5610 if (image.sections[i].base_address < min_address) {
5611 /* clip addresses below */
5612 offset += min_address-image.sections[i].base_address;
5613 length -= offset;
5614 }
5615
5616 if (image.sections[i].base_address + buf_cnt > max_address)
5617 length -= (image.sections[i].base_address + buf_cnt)-max_address;
5618
5619 fastload[i].address = image.sections[i].base_address + offset;
5620 fastload[i].data = malloc(length);
5621 if (fastload[i].data == NULL) {
5622 free(buffer);
5623 command_print(CMD_CTX, "error allocating buffer for section (%" PRIu32 " bytes)",
5624 length);
5625 retval = ERROR_FAIL;
5626 break;
5627 }
5628 memcpy(fastload[i].data, buffer + offset, length);
5629 fastload[i].length = length;
5630
5631 image_size += length;
5632 command_print(CMD_CTX, "%u bytes written at address 0x%8.8x",
5633 (unsigned int)length,
5634 ((unsigned int)(image.sections[i].base_address + offset)));
5635 }
5636
5637 free(buffer);
5638 }
5639
5640 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
5641 command_print(CMD_CTX, "Loaded %" PRIu32 " bytes "
5642 "in %fs (%0.3f KiB/s)", image_size,
5643 duration_elapsed(&bench), duration_kbps(&bench, image_size));
5644
5645 command_print(CMD_CTX,
5646 "WARNING: image has not been loaded to target!"
5647 "You can issue a 'fast_load' to finish loading.");
5648 }
5649
5650 image_close(&image);
5651
5652 if (retval != ERROR_OK)
5653 free_fastload();
5654
5655 return retval;
5656 }
5657
5658 COMMAND_HANDLER(handle_fast_load_command)
5659 {
5660 if (CMD_ARGC > 0)
5661 return ERROR_COMMAND_SYNTAX_ERROR;
5662 if (fastload == NULL) {
5663 LOG_ERROR("No image in memory");
5664 return ERROR_FAIL;
5665 }
5666 int i;
5667 int ms = timeval_ms();
5668 int size = 0;
5669 int retval = ERROR_OK;
5670 for (i = 0; i < fastload_num; i++) {
5671 struct target *target = get_current_target(CMD_CTX);
5672 command_print(CMD_CTX, "Write to 0x%08x, length 0x%08x",
5673 (unsigned int)(fastload[i].address),
5674 (unsigned int)(fastload[i].length));
5675 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
5676 if (retval != ERROR_OK)
5677 break;
5678 size += fastload[i].length;
5679 }
5680 if (retval == ERROR_OK) {
5681 int after = timeval_ms();
5682 command_print(CMD_CTX, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
5683 }
5684 return retval;
5685 }
5686
5687 static const struct command_registration target_command_handlers[] = {
5688 {
5689 .name = "targets",
5690 .handler = handle_targets_command,
5691 .mode = COMMAND_ANY,
5692 .help = "change current default target (one parameter) "
5693 "or prints table of all targets (no parameters)",
5694 .usage = "[target]",
5695 },
5696 {
5697 .name = "target",
5698 .mode = COMMAND_CONFIG,
5699 .help = "configure target",
5700
5701 .chain = target_subcommand_handlers,
5702 },
5703 COMMAND_REGISTRATION_DONE
5704 };
5705
5706 int target_register_commands(struct command_context *cmd_ctx)
5707 {
5708 return register_commands(cmd_ctx, NULL, target_command_handlers);
5709 }
5710
5711 static bool target_reset_nag = true;
5712
5713 bool get_target_reset_nag(void)
5714 {
5715 return target_reset_nag;
5716 }
5717
5718 COMMAND_HANDLER(handle_target_reset_nag)
5719 {
5720 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
5721 &target_reset_nag, "Nag after each reset about options to improve "
5722 "performance");
5723 }
5724
5725 COMMAND_HANDLER(handle_ps_command)
5726 {
5727 struct target *target = get_current_target(CMD_CTX);
5728 char *display;
5729 if (target->state != TARGET_HALTED) {
5730 LOG_INFO("target not halted !!");
5731 return ERROR_OK;
5732 }
5733
5734 if ((target->rtos) && (target->rtos->type)
5735 && (target->rtos->type->ps_command)) {
5736 display = target->rtos->type->ps_command(target);
5737 command_print(CMD_CTX, "%s", display);
5738 free(display);
5739 return ERROR_OK;
5740 } else {
5741 LOG_INFO("failed");
5742 return ERROR_TARGET_FAILURE;
5743 }
5744 }
5745
5746 static void binprint(struct command_context *cmd_ctx, const char *text, const uint8_t *buf, int size)
5747 {
5748 if (text != NULL)
5749 command_print_sameline(cmd_ctx, "%s", text);
5750 for (int i = 0; i < size; i++)
5751 command_print_sameline(cmd_ctx, " %02x", buf[i]);
5752 command_print(cmd_ctx, " ");
5753 }
5754
5755 COMMAND_HANDLER(handle_test_mem_access_command)
5756 {
5757 struct target *target = get_current_target(CMD_CTX);
5758 uint32_t test_size;
5759 int retval = ERROR_OK;
5760
5761 if (target->state != TARGET_HALTED) {
5762 LOG_INFO("target not halted !!");
5763 return ERROR_FAIL;
5764 }
5765
5766 if (CMD_ARGC != 1)
5767 return ERROR_COMMAND_SYNTAX_ERROR;
5768
5769 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
5770
5771 /* Test reads */
5772 size_t num_bytes = test_size + 4;
5773
5774 struct working_area *wa = NULL;
5775 retval = target_alloc_working_area(target, num_bytes, &wa);
5776 if (retval != ERROR_OK) {
5777 LOG_ERROR("Not enough working area");
5778 return ERROR_FAIL;
5779 }
5780
5781 uint8_t *test_pattern = malloc(num_bytes);
5782
5783 for (size_t i = 0; i < num_bytes; i++)
5784 test_pattern[i] = rand();
5785
5786 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
5787 if (retval != ERROR_OK) {
5788 LOG_ERROR("Test pattern write failed");
5789 goto out;
5790 }
5791
5792 for (int host_offset = 0; host_offset <= 1; host_offset++) {
5793 for (int size = 1; size <= 4; size *= 2) {
5794 for (int offset = 0; offset < 4; offset++) {
5795 uint32_t count = test_size / size;
5796 size_t host_bufsiz = (count + 2) * size + host_offset;
5797 uint8_t *read_ref = malloc(host_bufsiz);
5798 uint8_t *read_buf = malloc(host_bufsiz);
5799
5800 for (size_t i = 0; i < host_bufsiz; i++) {
5801 read_ref[i] = rand();
5802 read_buf[i] = read_ref[i];
5803 }
5804 command_print_sameline(CMD_CTX,
5805 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
5806 size, offset, host_offset ? "un" : "");
5807
5808 struct duration bench;
5809 duration_start(&bench);
5810
5811 retval = target_read_memory(target, wa->address + offset, size, count,
5812 read_buf + size + host_offset);
5813
5814 duration_measure(&bench);
5815
5816 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
5817 command_print(CMD_CTX, "Unsupported alignment");
5818 goto next;
5819 } else if (retval != ERROR_OK) {
5820 command_print(CMD_CTX, "Memory read failed");
5821 goto next;
5822 }
5823
5824 /* replay on host */
5825 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
5826
5827 /* check result */
5828 int result = memcmp(read_ref, read_buf, host_bufsiz);
5829 if (result == 0) {
5830 command_print(CMD_CTX, "Pass in %fs (%0.3f KiB/s)",
5831 duration_elapsed(&bench),
5832 duration_kbps(&bench, count * size));
5833 } else {
5834 command_print(CMD_CTX, "Compare failed");
5835 binprint(CMD_CTX, "ref:", read_ref, host_bufsiz);
5836 binprint(CMD_CTX, "buf:", read_buf, host_bufsiz);
5837 }
5838 next:
5839 free(read_ref);
5840 free(read_buf);
5841 }
5842 }
5843 }
5844
5845 out:
5846 free(test_pattern);
5847
5848 if (wa != NULL)
5849 target_free_working_area(target, wa);
5850
5851 /* Test writes */
5852 num_bytes = test_size + 4 + 4 + 4;
5853
5854 retval = target_alloc_working_area(target, num_bytes, &wa);
5855 if (retval != ERROR_OK) {
5856 LOG_ERROR("Not enough working area");
5857 return ERROR_FAIL;
5858 }
5859
5860 test_pattern = malloc(num_bytes);
5861
5862 for (size_t i = 0; i < num_bytes; i++)
5863 test_pattern[i] = rand();
5864
5865 for (int host_offset = 0; host_offset <= 1; host_offset++) {
5866 for (int size = 1; size <= 4; size *= 2) {
5867 for (int offset = 0; offset < 4; offset++) {
5868 uint32_t count = test_size / size;
5869 size_t host_bufsiz = count * size + host_offset;
5870 uint8_t *read_ref = malloc(num_bytes);
5871 uint8_t *read_buf = malloc(num_bytes);
5872 uint8_t *write_buf = malloc(host_bufsiz);
5873
5874 for (size_t i = 0; i < host_bufsiz; i++)
5875 write_buf[i] = rand();
5876 command_print_sameline(CMD_CTX,
5877 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
5878 size, offset, host_offset ? "un" : "");
5879
5880 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
5881 if (retval != ERROR_OK) {
5882 command_print(CMD_CTX, "Test pattern write failed");
5883 goto nextw;
5884 }
5885
5886 /* replay on host */
5887 memcpy(read_ref, test_pattern, num_bytes);
5888 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
5889
5890 struct duration bench;
5891 duration_start(&bench);
5892
5893 retval = target_write_memory(target, wa->address + size + offset, size, count,
5894 write_buf + host_offset);
5895
5896 duration_measure(&bench);
5897
5898 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
5899 command_print(CMD_CTX, "Unsupported alignment");
5900 goto nextw;
5901 } else if (retval != ERROR_OK) {
5902 command_print(CMD_CTX, "Memory write failed");
5903 goto nextw;
5904 }
5905
5906 /* read back */
5907 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
5908 if (retval != ERROR_OK) {
5909 command_print(CMD_CTX, "Test pattern write failed");
5910 goto nextw;
5911 }
5912
5913 /* check result */
5914 int result = memcmp(read_ref, read_buf, num_bytes);
5915 if (result == 0) {
5916 command_print(CMD_CTX, "Pass in %fs (%0.3f KiB/s)",
5917 duration_elapsed(&bench),
5918 duration_kbps(&bench, count * size));
5919 } else {
5920 command_print(CMD_CTX, "Compare failed");
5921 binprint(CMD_CTX, "ref:", read_ref, num_bytes);
5922 binprint(CMD_CTX, "buf:", read_buf, num_bytes);
5923 }
5924 nextw:
5925 free(read_ref);
5926 free(read_buf);
5927 }
5928 }
5929 }
5930
5931 free(test_pattern);
5932
5933 if (wa != NULL)
5934 target_free_working_area(target, wa);
5935 return retval;
5936 }
5937
5938 static const struct command_registration target_exec_command_handlers[] = {
5939 {
5940 .name = "fast_load_image",
5941 .handler = handle_fast_load_image_command,
5942 .mode = COMMAND_ANY,
5943 .help = "Load image into server memory for later use by "
5944 "fast_load; primarily for profiling",
5945 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
5946 "[min_address [max_length]]",
5947 },
5948 {
5949 .name = "fast_load",
5950 .handler = handle_fast_load_command,
5951 .mode = COMMAND_EXEC,
5952 .help = "loads active fast load image to current target "
5953 "- mainly for profiling purposes",
5954 .usage = "",
5955 },
5956 {
5957 .name = "profile",
5958 .handler = handle_profile_command,
5959 .mode = COMMAND_EXEC,
5960 .usage = "seconds filename [start end]",
5961 .help = "profiling samples the CPU PC",
5962 },
5963 /** @todo don't register virt2phys() unless target supports it */
5964 {
5965 .name = "virt2phys",
5966 .handler = handle_virt2phys_command,
5967 .mode = COMMAND_ANY,
5968 .help = "translate a virtual address into a physical address",
5969 .usage = "virtual_address",
5970 },
5971 {
5972 .name = "reg",
5973 .handler = handle_reg_command,
5974 .mode = COMMAND_EXEC,
5975 .help = "display (reread from target with \"force\") or set a register; "
5976 "with no arguments, displays all registers and their values",
5977 .usage = "[(register_number|register_name) [(value|'force')]]",
5978 },
5979 {
5980 .name = "poll",
5981 .handler = handle_poll_command,
5982 .mode = COMMAND_EXEC,
5983 .help = "poll target state; or reconfigure background polling",
5984 .usage = "['on'|'off']",
5985 },
5986 {
5987 .name = "wait_halt",
5988 .handler = handle_wait_halt_command,
5989 .mode = COMMAND_EXEC,
5990 .help = "wait up to the specified number of milliseconds "
5991 "(default 5000) for a previously requested halt",
5992 .usage = "[milliseconds]",
5993 },
5994 {
5995 .name = "halt",
5996 .handler = handle_halt_command,
5997 .mode = COMMAND_EXEC,
5998 .help = "request target to halt, then wait up to the specified"
5999 "number of milliseconds (default 5000) for it to complete",
6000 .usage = "[milliseconds]",
6001 },
6002 {
6003 .name = "resume",
6004 .handler = handle_resume_command,
6005 .mode = COMMAND_EXEC,
6006 .help = "resume target execution from current PC or address",
6007 .usage = "[address]",
6008 },
6009 {
6010 .name = "reset",
6011 .handler = handle_reset_command,
6012 .mode = COMMAND_EXEC,
6013 .usage = "[run|halt|init]",
6014 .help = "Reset all targets into the specified mode."
6015 "Default reset mode is run, if not given.",
6016 },
6017 {
6018 .name = "soft_reset_halt",
6019 .handler = handle_soft_reset_halt_command,
6020 .mode = COMMAND_EXEC,
6021 .usage = "",
6022 .help = "halt the target and do a soft reset",
6023 },
6024 {
6025 .name = "step",
6026 .handler = handle_step_command,
6027 .mode = COMMAND_EXEC,
6028 .help = "step one instruction from current PC or address",
6029 .usage = "[address]",
6030 },
6031 {
6032 .name = "mdw",
6033 .handler = handle_md_command,
6034 .mode = COMMAND_EXEC,
6035 .help = "display memory words",
6036 .usage = "['phys'] address [count]",
6037 },
6038 {
6039 .name = "mdh",
6040 .handler = handle_md_command,
6041 .mode = COMMAND_EXEC,
6042 .help = "display memory half-words",
6043 .usage = "['phys'] address [count]",
6044 },
6045 {
6046 .name = "mdb",
6047 .handler = handle_md_command,
6048 .mode = COMMAND_EXEC,
6049 .help = "display memory bytes",
6050 .usage = "['phys'] address [count]",
6051 },
6052 {
6053 .name = "mww",
6054 .handler = handle_mw_command,
6055 .mode = COMMAND_EXEC,
6056 .help = "write memory word",
6057 .usage = "['phys'] address value [count]",
6058 },
6059 {
6060 .name = "mwh",
6061 .handler = handle_mw_command,
6062 .mode = COMMAND_EXEC,
6063 .help = "write memory half-word",
6064 .usage = "['phys'] address value [count]",
6065 },
6066 {
6067 .name = "mwb",
6068 .handler = handle_mw_command,
6069 .mode = COMMAND_EXEC,
6070 .help = "write memory byte",
6071 .usage = "['phys'] address value [count]",
6072 },
6073 {
6074 .name = "bp",
6075 .handler = handle_bp_command,
6076 .mode = COMMAND_EXEC,
6077 .help = "list or set hardware or software breakpoint",
6078 .usage = "<address> [<asid>]<length> ['hw'|'hw_ctx']",
6079 },
6080 {
6081 .name = "rbp",
6082 .handler = handle_rbp_command,
6083 .mode = COMMAND_EXEC,
6084 .help = "remove breakpoint",
6085 .usage = "address",
6086 },
6087 {
6088 .name = "wp",
6089 .handler = handle_wp_command,
6090 .mode = COMMAND_EXEC,
6091 .help = "list (no params) or create watchpoints",
6092 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
6093 },
6094 {
6095 .name = "rwp",
6096 .handler = handle_rwp_command,
6097 .mode = COMMAND_EXEC,
6098 .help = "remove watchpoint",
6099 .usage = "address",
6100 },
6101 {
6102 .name = "load_image",
6103 .handler = handle_load_image_command,
6104 .mode = COMMAND_EXEC,
6105 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6106 "[min_address] [max_length]",
6107 },
6108 {
6109 .name = "dump_image",
6110 .handler = handle_dump_image_command,
6111 .mode = COMMAND_EXEC,
6112 .usage = "filename address size",
6113 },
6114 {
6115 .name = "verify_image",
6116 .handler = handle_verify_image_command,
6117 .mode = COMMAND_EXEC,
6118 .usage = "filename [offset [type]]",
6119 },
6120 {
6121 .name = "test_image",
6122 .handler = handle_test_image_command,
6123 .mode = COMMAND_EXEC,
6124 .usage = "filename [offset [type]]",
6125 },
6126 {
6127 .name = "mem2array",
6128 .mode = COMMAND_EXEC,
6129 .jim_handler = jim_mem2array,
6130 .help = "read 8/16/32 bit memory and return as a TCL array "
6131 "for script processing",
6132 .usage = "arrayname bitwidth address count",
6133 },
6134 {
6135 .name = "array2mem",
6136 .mode = COMMAND_EXEC,
6137 .jim_handler = jim_array2mem,
6138 .help = "convert a TCL array to memory locations "
6139 "and write the 8/16/32 bit values",
6140 .usage = "arrayname bitwidth address count",
6141 },
6142 {
6143 .name = "reset_nag",
6144 .handler = handle_target_reset_nag,
6145 .mode = COMMAND_ANY,
6146 .help = "Nag after each reset about options that could have been "
6147 "enabled to improve performance. ",
6148 .usage = "['enable'|'disable']",
6149 },
6150 {
6151 .name = "ps",
6152 .handler = handle_ps_command,
6153 .mode = COMMAND_EXEC,
6154 .help = "list all tasks ",
6155 .usage = " ",
6156 },
6157 {
6158 .name = "test_mem_access",
6159 .handler = handle_test_mem_access_command,
6160 .mode = COMMAND_EXEC,
6161 .help = "Test the target's memory access functions",
6162 .usage = "size",
6163 },
6164
6165 COMMAND_REGISTRATION_DONE
6166 };
6167 static int target_register_user_commands(struct command_context *cmd_ctx)
6168 {
6169 int retval = ERROR_OK;
6170 retval = target_request_register_commands(cmd_ctx);
6171 if (retval != ERROR_OK)
6172 return retval;
6173
6174 retval = trace_register_commands(cmd_ctx);
6175 if (retval != ERROR_OK)
6176 return retval;
6177
6178
6179 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
6180 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)