build: cleanup src/target directory
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 √ėyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * This program is free software; you can redistribute it and/or modify *
24 * it under the terms of the GNU General Public License as published by *
25 * the Free Software Foundation; either version 2 of the License, or *
26 * (at your option) any later version. *
27 * *
28 * This program is distributed in the hope that it will be useful, *
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
31 * GNU General Public License for more details. *
32 * *
33 * You should have received a copy of the GNU General Public License *
34 * along with this program; if not, write to the *
35 * Free Software Foundation, Inc., *
36 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
37 ***************************************************************************/
38
39 #ifdef HAVE_CONFIG_H
40 #include "config.h"
41 #endif
42
43 #include <helper/time_support.h>
44 #include <jtag/jtag.h>
45 #include <flash/nor/core.h>
46
47 #include "target.h"
48 #include "target_type.h"
49 #include "target_request.h"
50 #include "breakpoints.h"
51 #include "register.h"
52 #include "trace.h"
53 #include "image.h"
54 #include "rtos/rtos.h"
55
56 static int target_read_buffer_default(struct target *target, uint32_t address,
57 uint32_t size, uint8_t *buffer);
58 static int target_write_buffer_default(struct target *target, uint32_t address,
59 uint32_t size, const uint8_t *buffer);
60 static int target_array2mem(Jim_Interp *interp, struct target *target,
61 int argc, Jim_Obj * const *argv);
62 static int target_mem2array(Jim_Interp *interp, struct target *target,
63 int argc, Jim_Obj * const *argv);
64 static int target_register_user_commands(struct command_context *cmd_ctx);
65
66 /* targets */
67 extern struct target_type arm7tdmi_target;
68 extern struct target_type arm720t_target;
69 extern struct target_type arm9tdmi_target;
70 extern struct target_type arm920t_target;
71 extern struct target_type arm966e_target;
72 extern struct target_type arm946e_target;
73 extern struct target_type arm926ejs_target;
74 extern struct target_type fa526_target;
75 extern struct target_type feroceon_target;
76 extern struct target_type dragonite_target;
77 extern struct target_type xscale_target;
78 extern struct target_type cortexm3_target;
79 extern struct target_type cortexa8_target;
80 extern struct target_type arm11_target;
81 extern struct target_type mips_m4k_target;
82 extern struct target_type avr_target;
83 extern struct target_type dsp563xx_target;
84 extern struct target_type dsp5680xx_target;
85 extern struct target_type testee_target;
86 extern struct target_type avr32_ap7k_target;
87 extern struct target_type stm32_stlink_target;
88
89 static struct target_type *target_types[] = {
90 &arm7tdmi_target,
91 &arm9tdmi_target,
92 &arm920t_target,
93 &arm720t_target,
94 &arm966e_target,
95 &arm946e_target,
96 &arm926ejs_target,
97 &fa526_target,
98 &feroceon_target,
99 &dragonite_target,
100 &xscale_target,
101 &cortexm3_target,
102 &cortexa8_target,
103 &arm11_target,
104 &mips_m4k_target,
105 &avr_target,
106 &dsp563xx_target,
107 &dsp5680xx_target,
108 &testee_target,
109 &avr32_ap7k_target,
110 &stm32_stlink_target,
111 NULL,
112 };
113
114 struct target *all_targets;
115 static struct target_event_callback *target_event_callbacks;
116 static struct target_timer_callback *target_timer_callbacks;
117 static const int polling_interval = 100;
118
119 static const Jim_Nvp nvp_assert[] = {
120 { .name = "assert", NVP_ASSERT },
121 { .name = "deassert", NVP_DEASSERT },
122 { .name = "T", NVP_ASSERT },
123 { .name = "F", NVP_DEASSERT },
124 { .name = "t", NVP_ASSERT },
125 { .name = "f", NVP_DEASSERT },
126 { .name = NULL, .value = -1 }
127 };
128
129 static const Jim_Nvp nvp_error_target[] = {
130 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
131 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
132 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
133 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
134 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
135 { .value = ERROR_TARGET_UNALIGNED_ACCESS , .name = "err-unaligned-access" },
136 { .value = ERROR_TARGET_DATA_ABORT , .name = "err-data-abort" },
137 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE , .name = "err-resource-not-available" },
138 { .value = ERROR_TARGET_TRANSLATION_FAULT , .name = "err-translation-fault" },
139 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
140 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
141 { .value = -1, .name = NULL }
142 };
143
144 static const char *target_strerror_safe(int err)
145 {
146 const Jim_Nvp *n;
147
148 n = Jim_Nvp_value2name_simple(nvp_error_target, err);
149 if (n->name == NULL)
150 return "unknown";
151 else
152 return n->name;
153 }
154
155 static const Jim_Nvp nvp_target_event[] = {
156 { .value = TARGET_EVENT_OLD_gdb_program_config , .name = "old-gdb_program_config" },
157 { .value = TARGET_EVENT_OLD_pre_resume , .name = "old-pre_resume" },
158
159 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
160 { .value = TARGET_EVENT_HALTED, .name = "halted" },
161 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
162 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
163 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
164
165 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
166 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
167
168 /* historical name */
169
170 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
171
172 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
173 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
174 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
175 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
176 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
177 { .value = TARGET_EVENT_RESET_HALT_PRE, .name = "reset-halt-pre" },
178 { .value = TARGET_EVENT_RESET_HALT_POST, .name = "reset-halt-post" },
179 { .value = TARGET_EVENT_RESET_WAIT_PRE, .name = "reset-wait-pre" },
180 { .value = TARGET_EVENT_RESET_WAIT_POST, .name = "reset-wait-post" },
181 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
182 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
183
184 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
185 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
186
187 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
188 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
189
190 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
191 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
192
193 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
194 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END , .name = "gdb-flash-write-end" },
195
196 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
197 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END , .name = "gdb-flash-erase-end" },
198
199 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
200 { .value = TARGET_EVENT_RESUMED , .name = "resume-ok" },
201 { .value = TARGET_EVENT_RESUME_END , .name = "resume-end" },
202
203 { .name = NULL, .value = -1 }
204 };
205
206 static const Jim_Nvp nvp_target_state[] = {
207 { .name = "unknown", .value = TARGET_UNKNOWN },
208 { .name = "running", .value = TARGET_RUNNING },
209 { .name = "halted", .value = TARGET_HALTED },
210 { .name = "reset", .value = TARGET_RESET },
211 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
212 { .name = NULL, .value = -1 },
213 };
214
215 static const Jim_Nvp nvp_target_debug_reason[] = {
216 { .name = "debug-request" , .value = DBG_REASON_DBGRQ },
217 { .name = "breakpoint" , .value = DBG_REASON_BREAKPOINT },
218 { .name = "watchpoint" , .value = DBG_REASON_WATCHPOINT },
219 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
220 { .name = "single-step" , .value = DBG_REASON_SINGLESTEP },
221 { .name = "target-not-halted" , .value = DBG_REASON_NOTHALTED },
222 { .name = "undefined" , .value = DBG_REASON_UNDEFINED },
223 { .name = NULL, .value = -1 },
224 };
225
226 static const Jim_Nvp nvp_target_endian[] = {
227 { .name = "big", .value = TARGET_BIG_ENDIAN },
228 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
229 { .name = "be", .value = TARGET_BIG_ENDIAN },
230 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
231 { .name = NULL, .value = -1 },
232 };
233
234 static const Jim_Nvp nvp_reset_modes[] = {
235 { .name = "unknown", .value = RESET_UNKNOWN },
236 { .name = "run" , .value = RESET_RUN },
237 { .name = "halt" , .value = RESET_HALT },
238 { .name = "init" , .value = RESET_INIT },
239 { .name = NULL , .value = -1 },
240 };
241
242 const char *debug_reason_name(struct target *t)
243 {
244 const char *cp;
245
246 cp = Jim_Nvp_value2name_simple(nvp_target_debug_reason,
247 t->debug_reason)->name;
248 if (!cp) {
249 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
250 cp = "(*BUG*unknown*BUG*)";
251 }
252 return cp;
253 }
254
255 const char *target_state_name(struct target *t)
256 {
257 const char *cp;
258 cp = Jim_Nvp_value2name_simple(nvp_target_state, t->state)->name;
259 if (!cp) {
260 LOG_ERROR("Invalid target state: %d", (int)(t->state));
261 cp = "(*BUG*unknown*BUG*)";
262 }
263 return cp;
264 }
265
266 /* determine the number of the new target */
267 static int new_target_number(void)
268 {
269 struct target *t;
270 int x;
271
272 /* number is 0 based */
273 x = -1;
274 t = all_targets;
275 while (t) {
276 if (x < t->target_number)
277 x = t->target_number;
278 t = t->next;
279 }
280 return x + 1;
281 }
282
283 /* read a uint32_t from a buffer in target memory endianness */
284 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
285 {
286 if (target->endianness == TARGET_LITTLE_ENDIAN)
287 return le_to_h_u32(buffer);
288 else
289 return be_to_h_u32(buffer);
290 }
291
292 /* read a uint24_t from a buffer in target memory endianness */
293 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
294 {
295 if (target->endianness == TARGET_LITTLE_ENDIAN)
296 return le_to_h_u24(buffer);
297 else
298 return be_to_h_u24(buffer);
299 }
300
301 /* read a uint16_t from a buffer in target memory endianness */
302 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
303 {
304 if (target->endianness == TARGET_LITTLE_ENDIAN)
305 return le_to_h_u16(buffer);
306 else
307 return be_to_h_u16(buffer);
308 }
309
310 /* read a uint8_t from a buffer in target memory endianness */
311 static uint8_t target_buffer_get_u8(struct target *target, const uint8_t *buffer)
312 {
313 return *buffer & 0x0ff;
314 }
315
316 /* write a uint32_t to a buffer in target memory endianness */
317 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
318 {
319 if (target->endianness == TARGET_LITTLE_ENDIAN)
320 h_u32_to_le(buffer, value);
321 else
322 h_u32_to_be(buffer, value);
323 }
324
325 /* write a uint24_t to a buffer in target memory endianness */
326 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
327 {
328 if (target->endianness == TARGET_LITTLE_ENDIAN)
329 h_u24_to_le(buffer, value);
330 else
331 h_u24_to_be(buffer, value);
332 }
333
334 /* write a uint16_t to a buffer in target memory endianness */
335 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
336 {
337 if (target->endianness == TARGET_LITTLE_ENDIAN)
338 h_u16_to_le(buffer, value);
339 else
340 h_u16_to_be(buffer, value);
341 }
342
343 /* write a uint8_t to a buffer in target memory endianness */
344 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
345 {
346 *buffer = value;
347 }
348
349 /* write a uint32_t array to a buffer in target memory endianness */
350 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
351 {
352 uint32_t i;
353 for (i = 0; i < count; i++)
354 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
355 }
356
357 /* write a uint16_t array to a buffer in target memory endianness */
358 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
359 {
360 uint32_t i;
361 for (i = 0; i < count; i++)
362 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
363 }
364
365 /* write a uint32_t array to a buffer in target memory endianness */
366 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, uint32_t *srcbuf)
367 {
368 uint32_t i;
369 for (i = 0; i < count; i++)
370 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
371 }
372
373 /* write a uint16_t array to a buffer in target memory endianness */
374 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, uint16_t *srcbuf)
375 {
376 uint32_t i;
377 for (i = 0; i < count; i++)
378 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
379 }
380
381 /* return a pointer to a configured target; id is name or number */
382 struct target *get_target(const char *id)
383 {
384 struct target *target;
385
386 /* try as tcltarget name */
387 for (target = all_targets; target; target = target->next) {
388 if (target->cmd_name == NULL)
389 continue;
390 if (strcmp(id, target->cmd_name) == 0)
391 return target;
392 }
393
394 /* It's OK to remove this fallback sometime after August 2010 or so */
395
396 /* no match, try as number */
397 unsigned num;
398 if (parse_uint(id, &num) != ERROR_OK)
399 return NULL;
400
401 for (target = all_targets; target; target = target->next) {
402 if (target->target_number == (int)num) {
403 LOG_WARNING("use '%s' as target identifier, not '%u'",
404 target->cmd_name, num);
405 return target;
406 }
407 }
408
409 return NULL;
410 }
411
412 /* returns a pointer to the n-th configured target */
413 static struct target *get_target_by_num(int num)
414 {
415 struct target *target = all_targets;
416
417 while (target) {
418 if (target->target_number == num)
419 return target;
420 target = target->next;
421 }
422
423 return NULL;
424 }
425
426 struct target *get_current_target(struct command_context *cmd_ctx)
427 {
428 struct target *target = get_target_by_num(cmd_ctx->current_target);
429
430 if (target == NULL) {
431 LOG_ERROR("BUG: current_target out of bounds");
432 exit(-1);
433 }
434
435 return target;
436 }
437
438 int target_poll(struct target *target)
439 {
440 int retval;
441
442 /* We can't poll until after examine */
443 if (!target_was_examined(target)) {
444 /* Fail silently lest we pollute the log */
445 return ERROR_FAIL;
446 }
447
448 retval = target->type->poll(target);
449 if (retval != ERROR_OK)
450 return retval;
451
452 if (target->halt_issued) {
453 if (target->state == TARGET_HALTED)
454 target->halt_issued = false;
455 else {
456 long long t = timeval_ms() - target->halt_issued_time;
457 if (t > 1000) {
458 target->halt_issued = false;
459 LOG_INFO("Halt timed out, wake up GDB.");
460 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
461 }
462 }
463 }
464
465 return ERROR_OK;
466 }
467
468 int target_halt(struct target *target)
469 {
470 int retval;
471 /* We can't poll until after examine */
472 if (!target_was_examined(target)) {
473 LOG_ERROR("Target not examined yet");
474 return ERROR_FAIL;
475 }
476
477 retval = target->type->halt(target);
478 if (retval != ERROR_OK)
479 return retval;
480
481 target->halt_issued = true;
482 target->halt_issued_time = timeval_ms();
483
484 return ERROR_OK;
485 }
486
487 /**
488 * Make the target (re)start executing using its saved execution
489 * context (possibly with some modifications).
490 *
491 * @param target Which target should start executing.
492 * @param current True to use the target's saved program counter instead
493 * of the address parameter
494 * @param address Optionally used as the program counter.
495 * @param handle_breakpoints True iff breakpoints at the resumption PC
496 * should be skipped. (For example, maybe execution was stopped by
497 * such a breakpoint, in which case it would be counterprodutive to
498 * let it re-trigger.
499 * @param debug_execution False if all working areas allocated by OpenOCD
500 * should be released and/or restored to their original contents.
501 * (This would for example be true to run some downloaded "helper"
502 * algorithm code, which resides in one such working buffer and uses
503 * another for data storage.)
504 *
505 * @todo Resolve the ambiguity about what the "debug_execution" flag
506 * signifies. For example, Target implementations don't agree on how
507 * it relates to invalidation of the register cache, or to whether
508 * breakpoints and watchpoints should be enabled. (It would seem wrong
509 * to enable breakpoints when running downloaded "helper" algorithms
510 * (debug_execution true), since the breakpoints would be set to match
511 * target firmware being debugged, not the helper algorithm.... and
512 * enabling them could cause such helpers to malfunction (for example,
513 * by overwriting data with a breakpoint instruction. On the other
514 * hand the infrastructure for running such helpers might use this
515 * procedure but rely on hardware breakpoint to detect termination.)
516 */
517 int target_resume(struct target *target, int current, uint32_t address, int handle_breakpoints, int debug_execution)
518 {
519 int retval;
520
521 /* We can't poll until after examine */
522 if (!target_was_examined(target)) {
523 LOG_ERROR("Target not examined yet");
524 return ERROR_FAIL;
525 }
526
527 /* note that resume *must* be asynchronous. The CPU can halt before
528 * we poll. The CPU can even halt at the current PC as a result of
529 * a software breakpoint being inserted by (a bug?) the application.
530 */
531 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
532 if (retval != ERROR_OK)
533 return retval;
534
535 return retval;
536 }
537
538 static int target_process_reset(struct command_context *cmd_ctx, enum target_reset_mode reset_mode)
539 {
540 char buf[100];
541 int retval;
542 Jim_Nvp *n;
543 n = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode);
544 if (n->name == NULL) {
545 LOG_ERROR("invalid reset mode");
546 return ERROR_FAIL;
547 }
548
549 /* disable polling during reset to make reset event scripts
550 * more predictable, i.e. dr/irscan & pathmove in events will
551 * not have JTAG operations injected into the middle of a sequence.
552 */
553 bool save_poll = jtag_poll_get_enabled();
554
555 jtag_poll_set_enabled(false);
556
557 sprintf(buf, "ocd_process_reset %s", n->name);
558 retval = Jim_Eval(cmd_ctx->interp, buf);
559
560 jtag_poll_set_enabled(save_poll);
561
562 if (retval != JIM_OK) {
563 Jim_MakeErrorMessage(cmd_ctx->interp);
564 command_print(NULL, "%s\n", Jim_GetString(Jim_GetResult(cmd_ctx->interp), NULL));
565 return ERROR_FAIL;
566 }
567
568 /* We want any events to be processed before the prompt */
569 retval = target_call_timer_callbacks_now();
570
571 struct target *target;
572 for (target = all_targets; target; target = target->next)
573 target->type->check_reset(target);
574
575 return retval;
576 }
577
578 static int identity_virt2phys(struct target *target,
579 uint32_t virtual, uint32_t *physical)
580 {
581 *physical = virtual;
582 return ERROR_OK;
583 }
584
585 static int no_mmu(struct target *target, int *enabled)
586 {
587 *enabled = 0;
588 return ERROR_OK;
589 }
590
591 static int default_examine(struct target *target)
592 {
593 target_set_examined(target);
594 return ERROR_OK;
595 }
596
597 /* no check by default */
598 static int default_check_reset(struct target *target)
599 {
600 return ERROR_OK;
601 }
602
603 int target_examine_one(struct target *target)
604 {
605 return target->type->examine(target);
606 }
607
608 static int jtag_enable_callback(enum jtag_event event, void *priv)
609 {
610 struct target *target = priv;
611
612 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
613 return ERROR_OK;
614
615 jtag_unregister_event_callback(jtag_enable_callback, target);
616 return target_examine_one(target);
617 }
618
619
620 /* Targets that correctly implement init + examine, i.e.
621 * no communication with target during init:
622 *
623 * XScale
624 */
625 int target_examine(void)
626 {
627 int retval = ERROR_OK;
628 struct target *target;
629
630 for (target = all_targets; target; target = target->next) {
631 /* defer examination, but don't skip it */
632 if (!target->tap->enabled) {
633 jtag_register_event_callback(jtag_enable_callback,
634 target);
635 continue;
636 }
637 retval = target_examine_one(target);
638 if (retval != ERROR_OK)
639 return retval;
640 }
641 return retval;
642 }
643 const char *target_type_name(struct target *target)
644 {
645 return target->type->name;
646 }
647
648 static int target_write_memory_imp(struct target *target, uint32_t address,
649 uint32_t size, uint32_t count, const uint8_t *buffer)
650 {
651 if (!target_was_examined(target)) {
652 LOG_ERROR("Target not examined yet");
653 return ERROR_FAIL;
654 }
655 return target->type->write_memory_imp(target, address, size, count, buffer);
656 }
657
658 static int target_read_memory_imp(struct target *target, uint32_t address,
659 uint32_t size, uint32_t count, uint8_t *buffer)
660 {
661 if (!target_was_examined(target)) {
662 LOG_ERROR("Target not examined yet");
663 return ERROR_FAIL;
664 }
665 return target->type->read_memory_imp(target, address, size, count, buffer);
666 }
667
668 static int target_soft_reset_halt_imp(struct target *target)
669 {
670 if (!target_was_examined(target)) {
671 LOG_ERROR("Target not examined yet");
672 return ERROR_FAIL;
673 }
674 if (!target->type->soft_reset_halt_imp) {
675 LOG_ERROR("Target %s does not support soft_reset_halt",
676 target_name(target));
677 return ERROR_FAIL;
678 }
679 return target->type->soft_reset_halt_imp(target);
680 }
681
682 /**
683 * Downloads a target-specific native code algorithm to the target,
684 * and executes it. * Note that some targets may need to set up, enable,
685 * and tear down a breakpoint (hard or * soft) to detect algorithm
686 * termination, while others may support lower overhead schemes where
687 * soft breakpoints embedded in the algorithm automatically terminate the
688 * algorithm.
689 *
690 * @param target used to run the algorithm
691 * @param arch_info target-specific description of the algorithm.
692 */
693 int target_run_algorithm(struct target *target,
694 int num_mem_params, struct mem_param *mem_params,
695 int num_reg_params, struct reg_param *reg_param,
696 uint32_t entry_point, uint32_t exit_point,
697 int timeout_ms, void *arch_info)
698 {
699 int retval = ERROR_FAIL;
700
701 if (!target_was_examined(target)) {
702 LOG_ERROR("Target not examined yet");
703 goto done;
704 }
705 if (!target->type->run_algorithm) {
706 LOG_ERROR("Target type '%s' does not support %s",
707 target_type_name(target), __func__);
708 goto done;
709 }
710
711 target->running_alg = true;
712 retval = target->type->run_algorithm(target,
713 num_mem_params, mem_params,
714 num_reg_params, reg_param,
715 entry_point, exit_point, timeout_ms, arch_info);
716 target->running_alg = false;
717
718 done:
719 return retval;
720 }
721
722 /**
723 * Downloads a target-specific native code algorithm to the target,
724 * executes and leaves it running.
725 *
726 * @param target used to run the algorithm
727 * @param arch_info target-specific description of the algorithm.
728 */
729 int target_start_algorithm(struct target *target,
730 int num_mem_params, struct mem_param *mem_params,
731 int num_reg_params, struct reg_param *reg_params,
732 uint32_t entry_point, uint32_t exit_point,
733 void *arch_info)
734 {
735 int retval = ERROR_FAIL;
736
737 if (!target_was_examined(target)) {
738 LOG_ERROR("Target not examined yet");
739 goto done;
740 }
741 if (!target->type->start_algorithm) {
742 LOG_ERROR("Target type '%s' does not support %s",
743 target_type_name(target), __func__);
744 goto done;
745 }
746 if (target->running_alg) {
747 LOG_ERROR("Target is already running an algorithm");
748 goto done;
749 }
750
751 target->running_alg = true;
752 retval = target->type->start_algorithm(target,
753 num_mem_params, mem_params,
754 num_reg_params, reg_params,
755 entry_point, exit_point, arch_info);
756
757 done:
758 return retval;
759 }
760
761 /**
762 * Waits for an algorithm started with target_start_algorithm() to complete.
763 *
764 * @param target used to run the algorithm
765 * @param arch_info target-specific description of the algorithm.
766 */
767 int target_wait_algorithm(struct target *target,
768 int num_mem_params, struct mem_param *mem_params,
769 int num_reg_params, struct reg_param *reg_params,
770 uint32_t exit_point, int timeout_ms,
771 void *arch_info)
772 {
773 int retval = ERROR_FAIL;
774
775 if (!target->type->wait_algorithm) {
776 LOG_ERROR("Target type '%s' does not support %s",
777 target_type_name(target), __func__);
778 goto done;
779 }
780 if (!target->running_alg) {
781 LOG_ERROR("Target is not running an algorithm");
782 goto done;
783 }
784
785 retval = target->type->wait_algorithm(target,
786 num_mem_params, mem_params,
787 num_reg_params, reg_params,
788 exit_point, timeout_ms, arch_info);
789 if (retval != ERROR_TARGET_TIMEOUT)
790 target->running_alg = false;
791
792 done:
793 return retval;
794 }
795
796
797 int target_read_memory(struct target *target,
798 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
799 {
800 return target->type->read_memory(target, address, size, count, buffer);
801 }
802
803 static int target_read_phys_memory(struct target *target,
804 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
805 {
806 return target->type->read_phys_memory(target, address, size, count, buffer);
807 }
808
809 int target_write_memory(struct target *target,
810 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
811 {
812 return target->type->write_memory(target, address, size, count, buffer);
813 }
814
815 static int target_write_phys_memory(struct target *target,
816 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
817 {
818 return target->type->write_phys_memory(target, address, size, count, buffer);
819 }
820
821 int target_bulk_write_memory(struct target *target,
822 uint32_t address, uint32_t count, const uint8_t *buffer)
823 {
824 return target->type->bulk_write_memory(target, address, count, buffer);
825 }
826
827 int target_add_breakpoint(struct target *target,
828 struct breakpoint *breakpoint)
829 {
830 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
831 LOG_WARNING("target %s is not halted", target->cmd_name);
832 return ERROR_TARGET_NOT_HALTED;
833 }
834 return target->type->add_breakpoint(target, breakpoint);
835 }
836
837 int target_add_context_breakpoint(struct target *target,
838 struct breakpoint *breakpoint)
839 {
840 if (target->state != TARGET_HALTED) {
841 LOG_WARNING("target %s is not halted", target->cmd_name);
842 return ERROR_TARGET_NOT_HALTED;
843 }
844 return target->type->add_context_breakpoint(target, breakpoint);
845 }
846
847 int target_add_hybrid_breakpoint(struct target *target,
848 struct breakpoint *breakpoint)
849 {
850 if (target->state != TARGET_HALTED) {
851 LOG_WARNING("target %s is not halted", target->cmd_name);
852 return ERROR_TARGET_NOT_HALTED;
853 }
854 return target->type->add_hybrid_breakpoint(target, breakpoint);
855 }
856
857 int target_remove_breakpoint(struct target *target,
858 struct breakpoint *breakpoint)
859 {
860 return target->type->remove_breakpoint(target, breakpoint);
861 }
862
863 int target_add_watchpoint(struct target *target,
864 struct watchpoint *watchpoint)
865 {
866 if (target->state != TARGET_HALTED) {
867 LOG_WARNING("target %s is not halted", target->cmd_name);
868 return ERROR_TARGET_NOT_HALTED;
869 }
870 return target->type->add_watchpoint(target, watchpoint);
871 }
872 int target_remove_watchpoint(struct target *target,
873 struct watchpoint *watchpoint)
874 {
875 return target->type->remove_watchpoint(target, watchpoint);
876 }
877
878 int target_get_gdb_reg_list(struct target *target,
879 struct reg **reg_list[], int *reg_list_size)
880 {
881 return target->type->get_gdb_reg_list(target, reg_list, reg_list_size);
882 }
883 int target_step(struct target *target,
884 int current, uint32_t address, int handle_breakpoints)
885 {
886 return target->type->step(target, current, address, handle_breakpoints);
887 }
888
889 /**
890 * Reset the @c examined flag for the given target.
891 * Pure paranoia -- targets are zeroed on allocation.
892 */
893 static void target_reset_examined(struct target *target)
894 {
895 target->examined = false;
896 }
897
898 static int err_read_phys_memory(struct target *target, uint32_t address,
899 uint32_t size, uint32_t count, uint8_t *buffer)
900 {
901 LOG_ERROR("Not implemented: %s", __func__);
902 return ERROR_FAIL;
903 }
904
905 static int err_write_phys_memory(struct target *target, uint32_t address,
906 uint32_t size, uint32_t count, const uint8_t *buffer)
907 {
908 LOG_ERROR("Not implemented: %s", __func__);
909 return ERROR_FAIL;
910 }
911
912 static int handle_target(void *priv);
913
914 static int target_init_one(struct command_context *cmd_ctx,
915 struct target *target)
916 {
917 target_reset_examined(target);
918
919 struct target_type *type = target->type;
920 if (type->examine == NULL)
921 type->examine = default_examine;
922
923 if (type->check_reset == NULL)
924 type->check_reset = default_check_reset;
925
926 assert(type->init_target != NULL);
927
928 int retval = type->init_target(cmd_ctx, target);
929 if (ERROR_OK != retval) {
930 LOG_ERROR("target '%s' init failed", target_name(target));
931 return retval;
932 }
933
934 /**
935 * @todo get rid of those *memory_imp() methods, now that all
936 * callers are using target_*_memory() accessors ... and make
937 * sure the "physical" paths handle the same issues.
938 */
939 /* a non-invasive way(in terms of patches) to add some code that
940 * runs before the type->write/read_memory implementation
941 */
942 type->write_memory_imp = target->type->write_memory;
943 type->write_memory = target_write_memory_imp;
944
945 type->read_memory_imp = target->type->read_memory;
946 type->read_memory = target_read_memory_imp;
947
948 type->soft_reset_halt_imp = target->type->soft_reset_halt;
949 type->soft_reset_halt = target_soft_reset_halt_imp;
950
951 /* Sanity-check MMU support ... stub in what we must, to help
952 * implement it in stages, but warn if we need to do so.
953 */
954 if (type->mmu) {
955 if (type->write_phys_memory == NULL) {
956 LOG_ERROR("type '%s' is missing write_phys_memory",
957 type->name);
958 type->write_phys_memory = err_write_phys_memory;
959 }
960 if (type->read_phys_memory == NULL) {
961 LOG_ERROR("type '%s' is missing read_phys_memory",
962 type->name);
963 type->read_phys_memory = err_read_phys_memory;
964 }
965 if (type->virt2phys == NULL) {
966 LOG_ERROR("type '%s' is missing virt2phys", type->name);
967 type->virt2phys = identity_virt2phys;
968 }
969 } else {
970 /* Make sure no-MMU targets all behave the same: make no
971 * distinction between physical and virtual addresses, and
972 * ensure that virt2phys() is always an identity mapping.
973 */
974 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
975 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
976
977 type->mmu = no_mmu;
978 type->write_phys_memory = type->write_memory;
979 type->read_phys_memory = type->read_memory;
980 type->virt2phys = identity_virt2phys;
981 }
982
983 if (target->type->read_buffer == NULL)
984 target->type->read_buffer = target_read_buffer_default;
985
986 if (target->type->write_buffer == NULL)
987 target->type->write_buffer = target_write_buffer_default;
988
989 return ERROR_OK;
990 }
991
992 static int target_init(struct command_context *cmd_ctx)
993 {
994 struct target *target;
995 int retval;
996
997 for (target = all_targets; target; target = target->next) {
998 retval = target_init_one(cmd_ctx, target);
999 if (ERROR_OK != retval)
1000 return retval;
1001 }
1002
1003 if (!all_targets)
1004 return ERROR_OK;
1005
1006 retval = target_register_user_commands(cmd_ctx);
1007 if (ERROR_OK != retval)
1008 return retval;
1009
1010 retval = target_register_timer_callback(&handle_target,
1011 polling_interval, 1, cmd_ctx->interp);
1012 if (ERROR_OK != retval)
1013 return retval;
1014
1015 return ERROR_OK;
1016 }
1017
1018 COMMAND_HANDLER(handle_target_init_command)
1019 {
1020 int retval;
1021
1022 if (CMD_ARGC != 0)
1023 return ERROR_COMMAND_SYNTAX_ERROR;
1024
1025 static bool target_initialized;
1026 if (target_initialized) {
1027 LOG_INFO("'target init' has already been called");
1028 return ERROR_OK;
1029 }
1030 target_initialized = true;
1031
1032 retval = command_run_line(CMD_CTX, "init_targets");
1033 if (ERROR_OK != retval)
1034 return retval;
1035
1036 LOG_DEBUG("Initializing targets...");
1037 return target_init(CMD_CTX);
1038 }
1039
1040 int target_register_event_callback(int (*callback)(struct target *target,
1041 enum target_event event, void *priv), void *priv)
1042 {
1043 struct target_event_callback **callbacks_p = &target_event_callbacks;
1044
1045 if (callback == NULL)
1046 return ERROR_COMMAND_SYNTAX_ERROR;
1047
1048 if (*callbacks_p) {
1049 while ((*callbacks_p)->next)
1050 callbacks_p = &((*callbacks_p)->next);
1051 callbacks_p = &((*callbacks_p)->next);
1052 }
1053
1054 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1055 (*callbacks_p)->callback = callback;
1056 (*callbacks_p)->priv = priv;
1057 (*callbacks_p)->next = NULL;
1058
1059 return ERROR_OK;
1060 }
1061
1062 int target_register_timer_callback(int (*callback)(void *priv), int time_ms, int periodic, void *priv)
1063 {
1064 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1065 struct timeval now;
1066
1067 if (callback == NULL)
1068 return ERROR_COMMAND_SYNTAX_ERROR;
1069
1070 if (*callbacks_p) {
1071 while ((*callbacks_p)->next)
1072 callbacks_p = &((*callbacks_p)->next);
1073 callbacks_p = &((*callbacks_p)->next);
1074 }
1075
1076 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1077 (*callbacks_p)->callback = callback;
1078 (*callbacks_p)->periodic = periodic;
1079 (*callbacks_p)->time_ms = time_ms;
1080
1081 gettimeofday(&now, NULL);
1082 (*callbacks_p)->when.tv_usec = now.tv_usec + (time_ms % 1000) * 1000;
1083 time_ms -= (time_ms % 1000);
1084 (*callbacks_p)->when.tv_sec = now.tv_sec + (time_ms / 1000);
1085 if ((*callbacks_p)->when.tv_usec > 1000000) {
1086 (*callbacks_p)->when.tv_usec = (*callbacks_p)->when.tv_usec - 1000000;
1087 (*callbacks_p)->when.tv_sec += 1;
1088 }
1089
1090 (*callbacks_p)->priv = priv;
1091 (*callbacks_p)->next = NULL;
1092
1093 return ERROR_OK;
1094 }
1095
1096 int target_unregister_event_callback(int (*callback)(struct target *target,
1097 enum target_event event, void *priv), void *priv)
1098 {
1099 struct target_event_callback **p = &target_event_callbacks;
1100 struct target_event_callback *c = target_event_callbacks;
1101
1102 if (callback == NULL)
1103 return ERROR_COMMAND_SYNTAX_ERROR;
1104
1105 while (c) {
1106 struct target_event_callback *next = c->next;
1107 if ((c->callback == callback) && (c->priv == priv)) {
1108 *p = next;
1109 free(c);
1110 return ERROR_OK;
1111 } else
1112 p = &(c->next);
1113 c = next;
1114 }
1115
1116 return ERROR_OK;
1117 }
1118
1119 static int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1120 {
1121 struct target_timer_callback **p = &target_timer_callbacks;
1122 struct target_timer_callback *c = target_timer_callbacks;
1123
1124 if (callback == NULL)
1125 return ERROR_COMMAND_SYNTAX_ERROR;
1126
1127 while (c) {
1128 struct target_timer_callback *next = c->next;
1129 if ((c->callback == callback) && (c->priv == priv)) {
1130 *p = next;
1131 free(c);
1132 return ERROR_OK;
1133 } else
1134 p = &(c->next);
1135 c = next;
1136 }
1137
1138 return ERROR_OK;
1139 }
1140
1141 int target_call_event_callbacks(struct target *target, enum target_event event)
1142 {
1143 struct target_event_callback *callback = target_event_callbacks;
1144 struct target_event_callback *next_callback;
1145
1146 if (event == TARGET_EVENT_HALTED) {
1147 /* execute early halted first */
1148 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1149 }
1150
1151 LOG_DEBUG("target event %i (%s)", event,
1152 Jim_Nvp_value2name_simple(nvp_target_event, event)->name);
1153
1154 target_handle_event(target, event);
1155
1156 while (callback) {
1157 next_callback = callback->next;
1158 callback->callback(target, event, callback->priv);
1159 callback = next_callback;
1160 }
1161
1162 return ERROR_OK;
1163 }
1164
1165 static int target_timer_callback_periodic_restart(
1166 struct target_timer_callback *cb, struct timeval *now)
1167 {
1168 int time_ms = cb->time_ms;
1169 cb->when.tv_usec = now->tv_usec + (time_ms % 1000) * 1000;
1170 time_ms -= (time_ms % 1000);
1171 cb->when.tv_sec = now->tv_sec + time_ms / 1000;
1172 if (cb->when.tv_usec > 1000000) {
1173 cb->when.tv_usec = cb->when.tv_usec - 1000000;
1174 cb->when.tv_sec += 1;
1175 }
1176 return ERROR_OK;
1177 }
1178
1179 static int target_call_timer_callback(struct target_timer_callback *cb,
1180 struct timeval *now)
1181 {
1182 cb->callback(cb->priv);
1183
1184 if (cb->periodic)
1185 return target_timer_callback_periodic_restart(cb, now);
1186
1187 return target_unregister_timer_callback(cb->callback, cb->priv);
1188 }
1189
1190 static int target_call_timer_callbacks_check_time(int checktime)
1191 {
1192 keep_alive();
1193
1194 struct timeval now;
1195 gettimeofday(&now, NULL);
1196
1197 struct target_timer_callback *callback = target_timer_callbacks;
1198 while (callback) {
1199 /* cleaning up may unregister and free this callback */
1200 struct target_timer_callback *next_callback = callback->next;
1201
1202 bool call_it = callback->callback &&
1203 ((!checktime && callback->periodic) ||
1204 now.tv_sec > callback->when.tv_sec ||
1205 (now.tv_sec == callback->when.tv_sec &&
1206 now.tv_usec >= callback->when.tv_usec));
1207
1208 if (call_it) {
1209 int retval = target_call_timer_callback(callback, &now);
1210 if (retval != ERROR_OK)
1211 return retval;
1212 }
1213
1214 callback = next_callback;
1215 }
1216
1217 return ERROR_OK;
1218 }
1219
1220 int target_call_timer_callbacks(void)
1221 {
1222 return target_call_timer_callbacks_check_time(1);
1223 }
1224
1225 /* invoke periodic callbacks immediately */
1226 int target_call_timer_callbacks_now(void)
1227 {
1228 return target_call_timer_callbacks_check_time(0);
1229 }
1230
1231 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
1232 {
1233 struct working_area *c = target->working_areas;
1234 struct working_area *new_wa = NULL;
1235
1236 /* Reevaluate working area address based on MMU state*/
1237 if (target->working_areas == NULL) {
1238 int retval;
1239 int enabled;
1240
1241 retval = target->type->mmu(target, &enabled);
1242 if (retval != ERROR_OK)
1243 return retval;
1244
1245 if (!enabled) {
1246 if (target->working_area_phys_spec) {
1247 LOG_DEBUG("MMU disabled, using physical "
1248 "address for working memory 0x%08x",
1249 (unsigned)target->working_area_phys);
1250 target->working_area = target->working_area_phys;
1251 } else {
1252 LOG_ERROR("No working memory available. "
1253 "Specify -work-area-phys to target.");
1254 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1255 }
1256 } else {
1257 if (target->working_area_virt_spec) {
1258 LOG_DEBUG("MMU enabled, using virtual "
1259 "address for working memory 0x%08x",
1260 (unsigned)target->working_area_virt);
1261 target->working_area = target->working_area_virt;
1262 } else {
1263 LOG_ERROR("No working memory available. "
1264 "Specify -work-area-virt to target.");
1265 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1266 }
1267 }
1268 }
1269
1270 /* only allocate multiples of 4 byte */
1271 if (size % 4) {
1272 LOG_ERROR("BUG: code tried to allocate unaligned number of bytes (0x%08x), padding", ((unsigned)(size)));
1273 size = (size + 3) & (~3);
1274 }
1275
1276 /* see if there's already a matching working area */
1277 while (c) {
1278 if ((c->free) && (c->size == size)) {
1279 new_wa = c;
1280 break;
1281 }
1282 c = c->next;
1283 }
1284
1285 /* if not, allocate a new one */
1286 if (!new_wa) {
1287 struct working_area **p = &target->working_areas;
1288 uint32_t first_free = target->working_area;
1289 uint32_t free_size = target->working_area_size;
1290
1291 c = target->working_areas;
1292 while (c) {
1293 first_free += c->size;
1294 free_size -= c->size;
1295 p = &c->next;
1296 c = c->next;
1297 }
1298
1299 if (free_size < size)
1300 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1301
1302 LOG_DEBUG("allocated new working area at address 0x%08x", (unsigned)first_free);
1303
1304 new_wa = malloc(sizeof(struct working_area));
1305 new_wa->next = NULL;
1306 new_wa->size = size;
1307 new_wa->address = first_free;
1308
1309 if (target->backup_working_area) {
1310 int retval;
1311 new_wa->backup = malloc(new_wa->size);
1312 retval = target_read_memory(target, new_wa->address, 4,
1313 new_wa->size / 4, new_wa->backup);
1314 if (retval != ERROR_OK) {
1315 free(new_wa->backup);
1316 free(new_wa);
1317 return retval;
1318 }
1319 } else
1320 new_wa->backup = NULL;
1321
1322 /* put new entry in list */
1323 *p = new_wa;
1324 }
1325
1326 /* mark as used, and return the new (reused) area */
1327 new_wa->free = false;
1328 *area = new_wa;
1329
1330 /* user pointer */
1331 new_wa->user = area;
1332
1333 return ERROR_OK;
1334 }
1335
1336 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
1337 {
1338 int retval;
1339
1340 retval = target_alloc_working_area_try(target, size, area);
1341 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1342 LOG_WARNING("not enough working area available(requested %u)", (unsigned)(size));
1343 return retval;
1344
1345 }
1346
1347 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
1348 {
1349 if (area->free)
1350 return ERROR_OK;
1351
1352 if (restore && target->backup_working_area) {
1353 int retval = target_write_memory(target,
1354 area->address, 4, area->size / 4, area->backup);
1355 if (retval != ERROR_OK)
1356 return retval;
1357 }
1358
1359 area->free = true;
1360
1361 /* mark user pointer invalid */
1362 *area->user = NULL;
1363 area->user = NULL;
1364
1365 return ERROR_OK;
1366 }
1367
1368 int target_free_working_area(struct target *target, struct working_area *area)
1369 {
1370 return target_free_working_area_restore(target, area, 1);
1371 }
1372
1373 /* free resources and restore memory, if restoring memory fails,
1374 * free up resources anyway
1375 */
1376 static void target_free_all_working_areas_restore(struct target *target, int restore)
1377 {
1378 struct working_area *c = target->working_areas;
1379
1380 while (c) {
1381 struct working_area *next = c->next;
1382 target_free_working_area_restore(target, c, restore);
1383
1384 if (c->backup)
1385 free(c->backup);
1386
1387 free(c);
1388
1389 c = next;
1390 }
1391
1392 target->working_areas = NULL;
1393 }
1394
1395 void target_free_all_working_areas(struct target *target)
1396 {
1397 target_free_all_working_areas_restore(target, 1);
1398 }
1399
1400 int target_arch_state(struct target *target)
1401 {
1402 int retval;
1403 if (target == NULL) {
1404 LOG_USER("No target has been configured");
1405 return ERROR_OK;
1406 }
1407
1408 LOG_USER("target state: %s", target_state_name(target));
1409
1410 if (target->state != TARGET_HALTED)
1411 return ERROR_OK;
1412
1413 retval = target->type->arch_state(target);
1414 return retval;
1415 }
1416
1417 /* Single aligned words are guaranteed to use 16 or 32 bit access
1418 * mode respectively, otherwise data is handled as quickly as
1419 * possible
1420 */
1421 int target_write_buffer(struct target *target, uint32_t address, uint32_t size, const uint8_t *buffer)
1422 {
1423 LOG_DEBUG("writing buffer of %i byte at 0x%8.8x",
1424 (int)size, (unsigned)address);
1425
1426 if (!target_was_examined(target)) {
1427 LOG_ERROR("Target not examined yet");
1428 return ERROR_FAIL;
1429 }
1430
1431 if (size == 0)
1432 return ERROR_OK;
1433
1434 if ((address + size - 1) < address) {
1435 /* GDB can request this when e.g. PC is 0xfffffffc*/
1436 LOG_ERROR("address + size wrapped(0x%08x, 0x%08x)",
1437 (unsigned)address,
1438 (unsigned)size);
1439 return ERROR_FAIL;
1440 }
1441
1442 return target->type->write_buffer(target, address, size, buffer);
1443 }
1444
1445 static int target_write_buffer_default(struct target *target, uint32_t address, uint32_t size, const uint8_t *buffer)
1446 {
1447 int retval = ERROR_OK;
1448
1449 if (((address % 2) == 0) && (size == 2))
1450 return target_write_memory(target, address, 2, 1, buffer);
1451
1452 /* handle unaligned head bytes */
1453 if (address % 4) {
1454 uint32_t unaligned = 4 - (address % 4);
1455
1456 if (unaligned > size)
1457 unaligned = size;
1458
1459 retval = target_write_memory(target, address, 1, unaligned, buffer);
1460 if (retval != ERROR_OK)
1461 return retval;
1462
1463 buffer += unaligned;
1464 address += unaligned;
1465 size -= unaligned;
1466 }
1467
1468 /* handle aligned words */
1469 if (size >= 4) {
1470 int aligned = size - (size % 4);
1471
1472 /* use bulk writes above a certain limit. This may have to be changed */
1473 if (aligned > 128) {
1474 retval = target->type->bulk_write_memory(target, address, aligned / 4, buffer);
1475 if (retval != ERROR_OK)
1476 return retval;
1477 } else {
1478 retval = target_write_memory(target, address, 4, aligned / 4, buffer);
1479 if (retval != ERROR_OK)
1480 return retval;
1481 }
1482
1483 buffer += aligned;
1484 address += aligned;
1485 size -= aligned;
1486 }
1487
1488 /* handle tail writes of less than 4 bytes */
1489 if (size > 0) {
1490 retval = target_write_memory(target, address, 1, size, buffer);
1491 if (retval != ERROR_OK)
1492 return retval;
1493 }
1494
1495 return retval;
1496 }
1497
1498 /* Single aligned words are guaranteed to use 16 or 32 bit access
1499 * mode respectively, otherwise data is handled as quickly as
1500 * possible
1501 */
1502 int target_read_buffer(struct target *target, uint32_t address, uint32_t size, uint8_t *buffer)
1503 {
1504 LOG_DEBUG("reading buffer of %i byte at 0x%8.8x",
1505 (int)size, (unsigned)address);
1506
1507 if (!target_was_examined(target)) {
1508 LOG_ERROR("Target not examined yet");
1509 return ERROR_FAIL;
1510 }
1511
1512 if (size == 0)
1513 return ERROR_OK;
1514
1515 if ((address + size - 1) < address) {
1516 /* GDB can request this when e.g. PC is 0xfffffffc*/
1517 LOG_ERROR("address + size wrapped(0x%08" PRIx32 ", 0x%08" PRIx32 ")",
1518 address,
1519 size);
1520 return ERROR_FAIL;
1521 }
1522
1523 return target->type->read_buffer(target, address, size, buffer);
1524 }
1525
1526 static int target_read_buffer_default(struct target *target, uint32_t address, uint32_t size, uint8_t *buffer)
1527 {
1528 int retval = ERROR_OK;
1529
1530 if (((address % 2) == 0) && (size == 2))
1531 return target_read_memory(target, address, 2, 1, buffer);
1532
1533 /* handle unaligned head bytes */
1534 if (address % 4) {
1535 uint32_t unaligned = 4 - (address % 4);
1536
1537 if (unaligned > size)
1538 unaligned = size;
1539
1540 retval = target_read_memory(target, address, 1, unaligned, buffer);
1541 if (retval != ERROR_OK)
1542 return retval;
1543
1544 buffer += unaligned;
1545 address += unaligned;
1546 size -= unaligned;
1547 }
1548
1549 /* handle aligned words */
1550 if (size >= 4) {
1551 int aligned = size - (size % 4);
1552
1553 retval = target_read_memory(target, address, 4, aligned / 4, buffer);
1554 if (retval != ERROR_OK)
1555 return retval;
1556
1557 buffer += aligned;
1558 address += aligned;
1559 size -= aligned;
1560 }
1561
1562 /*prevent byte access when possible (avoid AHB access limitations in some cases)*/
1563 if (size >= 2) {
1564 int aligned = size - (size % 2);
1565 retval = target_read_memory(target, address, 2, aligned / 2, buffer);
1566 if (retval != ERROR_OK)
1567 return retval;
1568
1569 buffer += aligned;
1570 address += aligned;
1571 size -= aligned;
1572 }
1573 /* handle tail writes of less than 4 bytes */
1574 if (size > 0) {
1575 retval = target_read_memory(target, address, 1, size, buffer);
1576 if (retval != ERROR_OK)
1577 return retval;
1578 }
1579
1580 return ERROR_OK;
1581 }
1582
1583 int target_checksum_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* crc)
1584 {
1585 uint8_t *buffer;
1586 int retval;
1587 uint32_t i;
1588 uint32_t checksum = 0;
1589 if (!target_was_examined(target)) {
1590 LOG_ERROR("Target not examined yet");
1591 return ERROR_FAIL;
1592 }
1593
1594 retval = target->type->checksum_memory(target, address, size, &checksum);
1595 if (retval != ERROR_OK) {
1596 buffer = malloc(size);
1597 if (buffer == NULL) {
1598 LOG_ERROR("error allocating buffer for section (%d bytes)", (int)size);
1599 return ERROR_COMMAND_SYNTAX_ERROR;
1600 }
1601 retval = target_read_buffer(target, address, size, buffer);
1602 if (retval != ERROR_OK) {
1603 free(buffer);
1604 return retval;
1605 }
1606
1607 /* convert to target endianness */
1608 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
1609 uint32_t target_data;
1610 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
1611 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
1612 }
1613
1614 retval = image_calculate_checksum(buffer, size, &checksum);
1615 free(buffer);
1616 }
1617
1618 *crc = checksum;
1619
1620 return retval;
1621 }
1622
1623 int target_blank_check_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* blank)
1624 {
1625 int retval;
1626 if (!target_was_examined(target)) {
1627 LOG_ERROR("Target not examined yet");
1628 return ERROR_FAIL;
1629 }
1630
1631 if (target->type->blank_check_memory == 0)
1632 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1633
1634 retval = target->type->blank_check_memory(target, address, size, blank);
1635
1636 return retval;
1637 }
1638
1639 int target_read_u32(struct target *target, uint32_t address, uint32_t *value)
1640 {
1641 uint8_t value_buf[4];
1642 if (!target_was_examined(target)) {
1643 LOG_ERROR("Target not examined yet");
1644 return ERROR_FAIL;
1645 }
1646
1647 int retval = target_read_memory(target, address, 4, 1, value_buf);
1648
1649 if (retval == ERROR_OK) {
1650 *value = target_buffer_get_u32(target, value_buf);
1651 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
1652 address,
1653 *value);
1654 } else {
1655 *value = 0x0;
1656 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1657 address);
1658 }
1659
1660 return retval;
1661 }
1662
1663 int target_read_u16(struct target *target, uint32_t address, uint16_t *value)
1664 {
1665 uint8_t value_buf[2];
1666 if (!target_was_examined(target)) {
1667 LOG_ERROR("Target not examined yet");
1668 return ERROR_FAIL;
1669 }
1670
1671 int retval = target_read_memory(target, address, 2, 1, value_buf);
1672
1673 if (retval == ERROR_OK) {
1674 *value = target_buffer_get_u16(target, value_buf);
1675 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%4.4x",
1676 address,
1677 *value);
1678 } else {
1679 *value = 0x0;
1680 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1681 address);
1682 }
1683
1684 return retval;
1685 }
1686
1687 int target_read_u8(struct target *target, uint32_t address, uint8_t *value)
1688 {
1689 int retval = target_read_memory(target, address, 1, 1, value);
1690 if (!target_was_examined(target)) {
1691 LOG_ERROR("Target not examined yet");
1692 return ERROR_FAIL;
1693 }
1694
1695 if (retval == ERROR_OK) {
1696 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
1697 address,
1698 *value);
1699 } else {
1700 *value = 0x0;
1701 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1702 address);
1703 }
1704
1705 return retval;
1706 }
1707
1708 int target_write_u32(struct target *target, uint32_t address, uint32_t value)
1709 {
1710 int retval;
1711 uint8_t value_buf[4];
1712 if (!target_was_examined(target)) {
1713 LOG_ERROR("Target not examined yet");
1714 return ERROR_FAIL;
1715 }
1716
1717 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
1718 address,
1719 value);
1720
1721 target_buffer_set_u32(target, value_buf, value);
1722 retval = target_write_memory(target, address, 4, 1, value_buf);
1723 if (retval != ERROR_OK)
1724 LOG_DEBUG("failed: %i", retval);
1725
1726 return retval;
1727 }
1728
1729 int target_write_u16(struct target *target, uint32_t address, uint16_t value)
1730 {
1731 int retval;
1732 uint8_t value_buf[2];
1733 if (!target_was_examined(target)) {
1734 LOG_ERROR("Target not examined yet");
1735 return ERROR_FAIL;
1736 }
1737
1738 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8x",
1739 address,
1740 value);
1741
1742 target_buffer_set_u16(target, value_buf, value);
1743 retval = target_write_memory(target, address, 2, 1, value_buf);
1744 if (retval != ERROR_OK)
1745 LOG_DEBUG("failed: %i", retval);
1746
1747 return retval;
1748 }
1749
1750 int target_write_u8(struct target *target, uint32_t address, uint8_t value)
1751 {
1752 int retval;
1753 if (!target_was_examined(target)) {
1754 LOG_ERROR("Target not examined yet");
1755 return ERROR_FAIL;
1756 }
1757
1758 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
1759 address, value);
1760
1761 retval = target_write_memory(target, address, 1, 1, &value);
1762 if (retval != ERROR_OK)
1763 LOG_DEBUG("failed: %i", retval);
1764
1765 return retval;
1766 }
1767
1768 static int find_target(struct command_context *cmd_ctx, const char *name)
1769 {
1770 struct target *target = get_target(name);
1771 if (target == NULL) {
1772 LOG_ERROR("Target: %s is unknown, try one of:\n", name);
1773 return ERROR_FAIL;
1774 }
1775 if (!target->tap->enabled) {
1776 LOG_USER("Target: TAP %s is disabled, "
1777 "can't be the current target\n",
1778 target->tap->dotted_name);
1779 return ERROR_FAIL;
1780 }
1781
1782 cmd_ctx->current_target = target->target_number;
1783 return ERROR_OK;
1784 }
1785
1786
1787 COMMAND_HANDLER(handle_targets_command)
1788 {
1789 int retval = ERROR_OK;
1790 if (CMD_ARGC == 1) {
1791 retval = find_target(CMD_CTX, CMD_ARGV[0]);
1792 if (retval == ERROR_OK) {
1793 /* we're done! */
1794 return retval;
1795 }
1796 }
1797
1798 struct target *target = all_targets;
1799 command_print(CMD_CTX, " TargetName Type Endian TapName State ");
1800 command_print(CMD_CTX, "-- ------------------ ---------- ------ ------------------ ------------");
1801 while (target) {
1802 const char *state;
1803 char marker = ' ';
1804
1805 if (target->tap->enabled)
1806 state = target_state_name(target);
1807 else
1808 state = "tap-disabled";
1809
1810 if (CMD_CTX->current_target == target->target_number)
1811 marker = '*';
1812
1813 /* keep columns lined up to match the headers above */
1814 command_print(CMD_CTX,
1815 "%2d%c %-18s %-10s %-6s %-18s %s",
1816 target->target_number,
1817 marker,
1818 target_name(target),
1819 target_type_name(target),
1820 Jim_Nvp_value2name_simple(nvp_target_endian,
1821 target->endianness)->name,
1822 target->tap->dotted_name,
1823 state);
1824 target = target->next;
1825 }
1826
1827 return retval;
1828 }
1829
1830 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
1831
1832 static int powerDropout;
1833 static int srstAsserted;
1834
1835 static int runPowerRestore;
1836 static int runPowerDropout;
1837 static int runSrstAsserted;
1838 static int runSrstDeasserted;
1839
1840 static int sense_handler(void)
1841 {
1842 static int prevSrstAsserted;
1843 static int prevPowerdropout;
1844
1845 int retval = jtag_power_dropout(&powerDropout);
1846 if (retval != ERROR_OK)
1847 return retval;
1848
1849 int powerRestored;
1850 powerRestored = prevPowerdropout && !powerDropout;
1851 if (powerRestored)
1852 runPowerRestore = 1;
1853
1854 long long current = timeval_ms();
1855 static long long lastPower;
1856 int waitMore = lastPower + 2000 > current;
1857 if (powerDropout && !waitMore) {
1858 runPowerDropout = 1;
1859 lastPower = current;
1860 }
1861
1862 retval = jtag_srst_asserted(&srstAsserted);
1863 if (retval != ERROR_OK)
1864 return retval;
1865
1866 int srstDeasserted;
1867 srstDeasserted = prevSrstAsserted && !srstAsserted;
1868
1869 static long long lastSrst;
1870 waitMore = lastSrst + 2000 > current;
1871 if (srstDeasserted && !waitMore) {
1872 runSrstDeasserted = 1;
1873 lastSrst = current;
1874 }
1875
1876 if (!prevSrstAsserted && srstAsserted)
1877 runSrstAsserted = 1;
1878
1879 prevSrstAsserted = srstAsserted;
1880 prevPowerdropout = powerDropout;
1881
1882 if (srstDeasserted || powerRestored) {
1883 /* Other than logging the event we can't do anything here.
1884 * Issuing a reset is a particularly bad idea as we might
1885 * be inside a reset already.
1886 */
1887 }
1888
1889 return ERROR_OK;
1890 }
1891
1892 static int backoff_times;
1893 static int backoff_count;
1894
1895 /* process target state changes */
1896 static int handle_target(void *priv)
1897 {
1898 Jim_Interp *interp = (Jim_Interp *)priv;
1899 int retval = ERROR_OK;
1900
1901 if (!is_jtag_poll_safe()) {
1902 /* polling is disabled currently */
1903 return ERROR_OK;
1904 }
1905
1906 /* we do not want to recurse here... */
1907 static int recursive;
1908 if (!recursive) {
1909 recursive = 1;
1910 sense_handler();
1911 /* danger! running these procedures can trigger srst assertions and power dropouts.
1912 * We need to avoid an infinite loop/recursion here and we do that by
1913 * clearing the flags after running these events.
1914 */
1915 int did_something = 0;
1916 if (runSrstAsserted) {
1917 LOG_INFO("srst asserted detected, running srst_asserted proc.");
1918 Jim_Eval(interp, "srst_asserted");
1919 did_something = 1;
1920 }
1921 if (runSrstDeasserted) {
1922 Jim_Eval(interp, "srst_deasserted");
1923 did_something = 1;
1924 }
1925 if (runPowerDropout) {
1926 LOG_INFO("Power dropout detected, running power_dropout proc.");
1927 Jim_Eval(interp, "power_dropout");
1928 did_something = 1;
1929 }
1930 if (runPowerRestore) {
1931 Jim_Eval(interp, "power_restore");
1932 did_something = 1;
1933 }
1934
1935 if (did_something) {
1936 /* clear detect flags */
1937 sense_handler();
1938 }
1939
1940 /* clear action flags */
1941
1942 runSrstAsserted = 0;
1943 runSrstDeasserted = 0;
1944 runPowerRestore = 0;
1945 runPowerDropout = 0;
1946
1947 recursive = 0;
1948 }
1949
1950 if (backoff_times > backoff_count) {
1951 /* do not poll this time as we failed previously */
1952 backoff_count++;
1953 return ERROR_OK;
1954 }
1955 backoff_count = 0;
1956
1957 /* Poll targets for state changes unless that's globally disabled.
1958 * Skip targets that are currently disabled.
1959 */
1960 for (struct target *target = all_targets;
1961 is_jtag_poll_safe() && target;
1962 target = target->next) {
1963 if (!target->tap->enabled)
1964 continue;
1965
1966 /* only poll target if we've got power and srst isn't asserted */
1967 if (!powerDropout && !srstAsserted) {
1968 /* polling may fail silently until the target has been examined */
1969 retval = target_poll(target);
1970 if (retval != ERROR_OK) {
1971 /* 100ms polling interval. Increase interval between polling up to 5000ms */
1972 if (backoff_times * polling_interval < 5000) {
1973 backoff_times *= 2;
1974 backoff_times++;
1975 }
1976 LOG_USER("Polling target failed, GDB will be halted. Polling again in %dms",
1977 backoff_times * polling_interval);
1978
1979 /* Tell GDB to halt the debugger. This allows the user to
1980 * run monitor commands to handle the situation.
1981 */
1982 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1983 return retval;
1984 }
1985 /* Since we succeeded, we reset backoff count */
1986 if (backoff_times > 0)
1987 LOG_USER("Polling succeeded again");
1988 backoff_times = 0;
1989 }
1990 }
1991
1992 return retval;
1993 }
1994
1995 COMMAND_HANDLER(handle_reg_command)
1996 {
1997 struct target *target;
1998 struct reg *reg = NULL;
1999 unsigned count = 0;
2000 char *value;
2001
2002 LOG_DEBUG("-");
2003
2004 target = get_current_target(CMD_CTX);
2005
2006 /* list all available registers for the current target */
2007 if (CMD_ARGC == 0) {
2008 struct reg_cache *cache = target->reg_cache;
2009
2010 count = 0;
2011 while (cache) {
2012 unsigned i;
2013
2014 command_print(CMD_CTX, "===== %s", cache->name);
2015
2016 for (i = 0, reg = cache->reg_list;
2017 i < cache->num_regs;
2018 i++, reg++, count++) {
2019 /* only print cached values if they are valid */
2020 if (reg->valid) {
2021 value = buf_to_str(reg->value,
2022 reg->size, 16);
2023 command_print(CMD_CTX,
2024 "(%i) %s (/%" PRIu32 "): 0x%s%s",
2025 count, reg->name,
2026 reg->size, value,
2027 reg->dirty
2028 ? " (dirty)"
2029 : "");
2030 free(value);
2031 } else {
2032 command_print(CMD_CTX, "(%i) %s (/%" PRIu32 ")",
2033 count, reg->name,
2034 reg->size) ;
2035 }
2036 }
2037 cache = cache->next;
2038 }
2039
2040 return ERROR_OK;
2041 }
2042
2043 /* access a single register by its ordinal number */
2044 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
2045 unsigned num;
2046 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
2047
2048 struct reg_cache *cache = target->reg_cache;
2049 count = 0;
2050 while (cache) {
2051 unsigned i;
2052 for (i = 0; i < cache->num_regs; i++) {
2053 if (count++ == num) {
2054 reg = &cache->reg_list[i];
2055 break;
2056 }
2057 }
2058 if (reg)
2059 break;
2060 cache = cache->next;
2061 }
2062
2063 if (!reg) {
2064 command_print(CMD_CTX, "%i is out of bounds, the current target "
2065 "has only %i registers (0 - %i)", num, count, count - 1);
2066 return ERROR_OK;
2067 }
2068 } else {
2069 /* access a single register by its name */
2070 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], 1);
2071
2072 if (!reg) {
2073 command_print(CMD_CTX, "register %s not found in current target", CMD_ARGV[0]);
2074 return ERROR_OK;
2075 }
2076 }
2077
2078 assert(reg != NULL); /* give clang a hint that we *know* reg is != NULL here */
2079
2080 /* display a register */
2081 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
2082 && (CMD_ARGV[1][0] <= '9')))) {
2083 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
2084 reg->valid = 0;
2085
2086 if (reg->valid == 0)
2087 reg->type->get(reg);
2088 value = buf_to_str(reg->value, reg->size, 16);
2089 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2090 free(value);
2091 return ERROR_OK;
2092 }
2093
2094 /* set register value */
2095 if (CMD_ARGC == 2) {
2096 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
2097 if (buf == NULL)
2098 return ERROR_FAIL;
2099 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
2100
2101 reg->type->set(reg, buf);
2102
2103 value = buf_to_str(reg->value, reg->size, 16);
2104 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2105 free(value);
2106
2107 free(buf);
2108
2109 return ERROR_OK;
2110 }
2111
2112 return ERROR_COMMAND_SYNTAX_ERROR;
2113 }
2114
2115 COMMAND_HANDLER(handle_poll_command)
2116 {
2117 int retval = ERROR_OK;
2118 struct target *target = get_current_target(CMD_CTX);
2119
2120 if (CMD_ARGC == 0) {
2121 command_print(CMD_CTX, "background polling: %s",
2122 jtag_poll_get_enabled() ? "on" : "off");
2123 command_print(CMD_CTX, "TAP: %s (%s)",
2124 target->tap->dotted_name,
2125 target->tap->enabled ? "enabled" : "disabled");
2126 if (!target->tap->enabled)
2127 return ERROR_OK;
2128 retval = target_poll(target);
2129 if (retval != ERROR_OK)
2130 return retval;
2131 retval = target_arch_state(target);
2132 if (retval != ERROR_OK)
2133 return retval;
2134 } else if (CMD_ARGC == 1) {
2135 bool enable;
2136 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
2137 jtag_poll_set_enabled(enable);
2138 } else
2139 return ERROR_COMMAND_SYNTAX_ERROR;
2140
2141 return retval;
2142 }
2143
2144 COMMAND_HANDLER(handle_wait_halt_command)
2145 {
2146 if (CMD_ARGC > 1)
2147 return ERROR_COMMAND_SYNTAX_ERROR;
2148
2149 unsigned ms = 5000;
2150 if (1 == CMD_ARGC) {
2151 int retval = parse_uint(CMD_ARGV[0], &ms);
2152 if (ERROR_OK != retval)
2153 return ERROR_COMMAND_SYNTAX_ERROR;
2154 /* convert seconds (given) to milliseconds (needed) */
2155 ms *= 1000;
2156 }
2157
2158 struct target *target = get_current_target(CMD_CTX);
2159 return target_wait_state(target, TARGET_HALTED, ms);
2160 }
2161
2162 /* wait for target state to change. The trick here is to have a low
2163 * latency for short waits and not to suck up all the CPU time
2164 * on longer waits.
2165 *
2166 * After 500ms, keep_alive() is invoked
2167 */
2168 int target_wait_state(struct target *target, enum target_state state, int ms)
2169 {
2170 int retval;
2171 long long then = 0, cur;
2172 int once = 1;
2173
2174 for (;;) {
2175 retval = target_poll(target);
2176 if (retval != ERROR_OK)
2177 return retval;
2178 if (target->state == state)
2179 break;
2180 cur = timeval_ms();
2181 if (once) {
2182 once = 0;
2183 then = timeval_ms();
2184 LOG_DEBUG("waiting for target %s...",
2185 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
2186 }
2187
2188 if (cur-then > 500)
2189 keep_alive();
2190
2191 if ((cur-then) > ms) {
2192 LOG_ERROR("timed out while waiting for target %s",
2193 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
2194 return ERROR_FAIL;
2195 }
2196 }
2197
2198 return ERROR_OK;
2199 }
2200
2201 COMMAND_HANDLER(handle_halt_command)
2202 {
2203 LOG_DEBUG("-");
2204
2205 struct target *target = get_current_target(CMD_CTX);
2206 int retval = target_halt(target);
2207 if (ERROR_OK != retval)
2208 return retval;
2209
2210 if (CMD_ARGC == 1) {
2211 unsigned wait_local;
2212 retval = parse_uint(CMD_ARGV[0], &wait_local);
2213 if (ERROR_OK != retval)
2214 return ERROR_COMMAND_SYNTAX_ERROR;
2215 if (!wait_local)
2216 return ERROR_OK;
2217 }
2218
2219 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
2220 }
2221
2222 COMMAND_HANDLER(handle_soft_reset_halt_command)
2223 {
2224 struct target *target = get_current_target(CMD_CTX);
2225
2226 LOG_USER("requesting target halt and executing a soft reset");
2227
2228 target->type->soft_reset_halt(target);
2229
2230 return ERROR_OK;
2231 }
2232
2233 COMMAND_HANDLER(handle_reset_command)
2234 {
2235 if (CMD_ARGC > 1)
2236 return ERROR_COMMAND_SYNTAX_ERROR;
2237
2238 enum target_reset_mode reset_mode = RESET_RUN;
2239 if (CMD_ARGC == 1) {
2240 const Jim_Nvp *n;
2241 n = Jim_Nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
2242 if ((n->name == NULL) || (n->value == RESET_UNKNOWN))
2243 return ERROR_COMMAND_SYNTAX_ERROR;
2244 reset_mode = n->value;
2245 }
2246
2247 /* reset *all* targets */
2248 return target_process_reset(CMD_CTX, reset_mode);
2249 }
2250
2251
2252 COMMAND_HANDLER(handle_resume_command)
2253 {
2254 int current = 1;
2255 if (CMD_ARGC > 1)
2256 return ERROR_COMMAND_SYNTAX_ERROR;
2257
2258 struct target *target = get_current_target(CMD_CTX);
2259 target_handle_event(target, TARGET_EVENT_OLD_pre_resume);
2260
2261 /* with no CMD_ARGV, resume from current pc, addr = 0,
2262 * with one arguments, addr = CMD_ARGV[0],
2263 * handle breakpoints, not debugging */
2264 uint32_t addr = 0;
2265 if (CMD_ARGC == 1) {
2266 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2267 current = 0;
2268 }
2269
2270 return target_resume(target, current, addr, 1, 0);
2271 }
2272
2273 COMMAND_HANDLER(handle_step_command)
2274 {
2275 if (CMD_ARGC > 1)
2276 return ERROR_COMMAND_SYNTAX_ERROR;
2277
2278 LOG_DEBUG("-");
2279
2280 /* with no CMD_ARGV, step from current pc, addr = 0,
2281 * with one argument addr = CMD_ARGV[0],
2282 * handle breakpoints, debugging */
2283 uint32_t addr = 0;
2284 int current_pc = 1;
2285 if (CMD_ARGC == 1) {
2286 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2287 current_pc = 0;
2288 }
2289
2290 struct target *target = get_current_target(CMD_CTX);
2291
2292 return target->type->step(target, current_pc, addr, 1);
2293 }
2294
2295 static void handle_md_output(struct command_context *cmd_ctx,
2296 struct target *target, uint32_t address, unsigned size,
2297 unsigned count, const uint8_t *buffer)
2298 {
2299 const unsigned line_bytecnt = 32;
2300 unsigned line_modulo = line_bytecnt / size;
2301
2302 char output[line_bytecnt * 4 + 1];
2303 unsigned output_len = 0;
2304
2305 const char *value_fmt;
2306 switch (size) {
2307 case 4:
2308 value_fmt = "%8.8x ";
2309 break;
2310 case 2:
2311 value_fmt = "%4.4x ";
2312 break;
2313 case 1:
2314 value_fmt = "%2.2x ";
2315 break;
2316 default:
2317 /* "can't happen", caller checked */
2318 LOG_ERROR("invalid memory read size: %u", size);
2319 return;
2320 }
2321
2322 for (unsigned i = 0; i < count; i++) {
2323 if (i % line_modulo == 0) {
2324 output_len += snprintf(output + output_len,
2325 sizeof(output) - output_len,
2326 "0x%8.8x: ",
2327 (unsigned)(address + (i*size)));
2328 }
2329
2330 uint32_t value = 0;
2331 const uint8_t *value_ptr = buffer + i * size;
2332 switch (size) {
2333 case 4:
2334 value = target_buffer_get_u32(target, value_ptr);
2335 break;
2336 case 2:
2337 value = target_buffer_get_u16(target, value_ptr);
2338 break;
2339 case 1:
2340 value = *value_ptr;
2341 }
2342 output_len += snprintf(output + output_len,
2343 sizeof(output) - output_len,
2344 value_fmt, value);
2345
2346 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
2347 command_print(cmd_ctx, "%s", output);
2348 output_len = 0;
2349 }
2350 }
2351 }
2352
2353 COMMAND_HANDLER(handle_md_command)
2354 {
2355 if (CMD_ARGC < 1)
2356 return ERROR_COMMAND_SYNTAX_ERROR;
2357
2358 unsigned size = 0;
2359 switch (CMD_NAME[2]) {
2360 case 'w':
2361 size = 4;
2362 break;
2363 case 'h':
2364 size = 2;
2365 break;
2366 case 'b':
2367 size = 1;
2368 break;
2369 default:
2370 return ERROR_COMMAND_SYNTAX_ERROR;
2371 }
2372
2373 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
2374 int (*fn)(struct target *target,
2375 uint32_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
2376 if (physical) {
2377 CMD_ARGC--;
2378 CMD_ARGV++;
2379 fn = target_read_phys_memory;
2380 } else
2381 fn = target_read_memory;
2382 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
2383 return ERROR_COMMAND_SYNTAX_ERROR;
2384
2385 uint32_t address;
2386 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2387
2388 unsigned count = 1;
2389 if (CMD_ARGC == 2)
2390 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
2391
2392 uint8_t *buffer = calloc(count, size);
2393
2394 struct target *target = get_current_target(CMD_CTX);
2395 int retval = fn(target, address, size, count, buffer);
2396 if (ERROR_OK == retval)
2397 handle_md_output(CMD_CTX, target, address, size, count, buffer);
2398
2399 free(buffer);
2400
2401 return retval;
2402 }
2403
2404 typedef int (*target_write_fn)(struct target *target,
2405 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
2406
2407 static int target_write_memory_fast(struct target *target,
2408 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
2409 {
2410 return target_write_buffer(target, address, size * count, buffer);
2411 }
2412
2413 static int target_fill_mem(struct target *target,
2414 uint32_t address,
2415 target_write_fn fn,
2416 unsigned data_size,
2417 /* value */
2418 uint32_t b,
2419 /* count */
2420 unsigned c)
2421 {
2422 /* We have to write in reasonably large chunks to be able
2423 * to fill large memory areas with any sane speed */
2424 const unsigned chunk_size = 16384;
2425 uint8_t *target_buf = malloc(chunk_size * data_size);
2426 if (target_buf == NULL) {
2427 LOG_ERROR("Out of memory");
2428 return ERROR_FAIL;
2429 }
2430
2431 for (unsigned i = 0; i < chunk_size; i++) {
2432 switch (data_size) {
2433 case 4:
2434 target_buffer_set_u32(target, target_buf + i * data_size, b);
2435 break;
2436 case 2:
2437 target_buffer_set_u16(target, target_buf + i * data_size, b);
2438 break;
2439 case 1:
2440 target_buffer_set_u8(target, target_buf + i * data_size, b);
2441 break;
2442 default:
2443 exit(-1);
2444 }
2445 }
2446
2447 int retval = ERROR_OK;
2448
2449 for (unsigned x = 0; x < c; x += chunk_size) {
2450 unsigned current;
2451 current = c - x;
2452 if (current > chunk_size)
2453 current = chunk_size;
2454 retval = fn(target, address + x * data_size, data_size, current, target_buf);
2455 if (retval != ERROR_OK)
2456 break;
2457 /* avoid GDB timeouts */
2458 keep_alive();
2459 }
2460 free(target_buf);
2461
2462 return retval;
2463 }
2464
2465
2466 COMMAND_HANDLER(handle_mw_command)
2467 {
2468 if (CMD_ARGC < 2)
2469 return ERROR_COMMAND_SYNTAX_ERROR;
2470 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
2471 target_write_fn fn;
2472 if (physical) {
2473 CMD_ARGC--;
2474 CMD_ARGV++;
2475 fn = target_write_phys_memory;
2476 } else
2477 fn = target_write_memory_fast;
2478 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
2479 return ERROR_COMMAND_SYNTAX_ERROR;
2480
2481 uint32_t address;
2482 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2483
2484 uint32_t value;
2485 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
2486
2487 unsigned count = 1;
2488 if (CMD_ARGC == 3)
2489 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
2490
2491 struct target *target = get_current_target(CMD_CTX);
2492 unsigned wordsize;
2493 switch (CMD_NAME[2]) {
2494 case 'w':
2495 wordsize = 4;
2496 break;
2497 case 'h':
2498 wordsize = 2;
2499 break;
2500 case 'b':
2501 wordsize = 1;
2502 break;
2503 default:
2504 return ERROR_COMMAND_SYNTAX_ERROR;
2505 }
2506
2507 return target_fill_mem(target, address, fn, wordsize, value, count);
2508 }
2509
2510 static COMMAND_HELPER(parse_load_image_command_CMD_ARGV, struct image *image,
2511 uint32_t *min_address, uint32_t *max_address)
2512 {
2513 if (CMD_ARGC < 1 || CMD_ARGC > 5)
2514 return ERROR_COMMAND_SYNTAX_ERROR;
2515
2516 /* a base address isn't always necessary,
2517 * default to 0x0 (i.e. don't relocate) */
2518 if (CMD_ARGC >= 2) {
2519 uint32_t addr;
2520 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], addr);
2521 image->base_address = addr;
2522 image->base_address_set = 1;
2523 } else
2524 image->base_address_set = 0;
2525
2526 image->start_address_set = 0;
2527
2528 if (CMD_ARGC >= 4)
2529 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], *min_address);
2530 if (CMD_ARGC == 5) {
2531 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], *max_address);
2532 /* use size (given) to find max (required) */
2533 *max_address += *min_address;
2534 }
2535
2536 if (*min_address > *max_address)
2537 return ERROR_COMMAND_SYNTAX_ERROR;
2538
2539 return ERROR_OK;
2540 }
2541
2542 COMMAND_HANDLER(handle_load_image_command)
2543 {
2544 uint8_t *buffer;
2545 size_t buf_cnt;
2546 uint32_t image_size;
2547 uint32_t min_address = 0;
2548 uint32_t max_address = 0xffffffff;
2549 int i;
2550 struct image image;
2551
2552 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
2553 &image, &min_address, &max_address);
2554 if (ERROR_OK != retval)
2555 return retval;
2556
2557 struct target *target = get_current_target(CMD_CTX);
2558
2559 struct duration bench;
2560 duration_start(&bench);
2561
2562 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
2563 return ERROR_OK;
2564
2565 image_size = 0x0;
2566 retval = ERROR_OK;
2567 for (i = 0; i < image.num_sections; i++) {
2568 buffer = malloc(image.sections[i].size);
2569 if (buffer == NULL) {
2570 command_print(CMD_CTX,
2571 "error allocating buffer for section (%d bytes)",
2572 (int)(image.sections[i].size));
2573 break;
2574 }
2575
2576 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
2577 if (retval != ERROR_OK) {
2578 free(buffer);
2579 break;
2580 }
2581
2582 uint32_t offset = 0;
2583 uint32_t length = buf_cnt;
2584
2585 /* DANGER!!! beware of unsigned comparision here!!! */
2586
2587 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
2588 (image.sections[i].base_address < max_address)) {
2589
2590 if (image.sections[i].base_address < min_address) {
2591 /* clip addresses below */
2592 offset += min_address-image.sections[i].base_address;
2593 length -= offset;
2594 }
2595
2596 if (image.sections[i].base_address + buf_cnt > max_address)
2597 length -= (image.sections[i].base_address + buf_cnt)-max_address;
2598
2599 retval = target_write_buffer(target,
2600 image.sections[i].base_address + offset, length, buffer + offset);
2601 if (retval != ERROR_OK) {
2602 free(buffer);
2603 break;
2604 }
2605 image_size += length;
2606 command_print(CMD_CTX, "%u bytes written at address 0x%8.8" PRIx32 "",
2607 (unsigned int)length,
2608 image.sections[i].base_address + offset);
2609 }
2610
2611 free(buffer);
2612 }
2613
2614 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
2615 command_print(CMD_CTX, "downloaded %" PRIu32 " bytes "
2616 "in %fs (%0.3f KiB/s)", image_size,
2617 duration_elapsed(&bench), duration_kbps(&bench, image_size));
2618 }
2619
2620 image_close(&image);
2621
2622 return retval;
2623
2624 }
2625
2626 COMMAND_HANDLER(handle_dump_image_command)
2627 {
2628 struct fileio fileio;
2629 uint8_t *buffer;
2630 int retval, retvaltemp;
2631 uint32_t address, size;
2632 struct duration bench;
2633 struct target *target = get_current_target(CMD_CTX);
2634
2635 if (CMD_ARGC != 3)
2636 return ERROR_COMMAND_SYNTAX_ERROR;
2637
2638 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], address);
2639 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], size);
2640
2641 uint32_t buf_size = (size > 4096) ? 4096 : size;
2642 buffer = malloc(buf_size);
2643 if (!buffer)
2644 return ERROR_FAIL;
2645
2646 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
2647 if (retval != ERROR_OK) {
2648 free(buffer);
2649 return retval;
2650 }
2651
2652 duration_start(&bench);
2653
2654 while (size > 0) {
2655 size_t size_written;
2656 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
2657 retval = target_read_buffer(target, address, this_run_size, buffer);
2658 if (retval != ERROR_OK)
2659 break;
2660
2661 retval = fileio_write(&fileio, this_run_size, buffer, &size_written);
2662 if (retval != ERROR_OK)
2663 break;
2664
2665 size -= this_run_size;
2666 address += this_run_size;
2667 }
2668
2669 free(buffer);
2670
2671 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
2672 int filesize;
2673 retval = fileio_size(&fileio, &filesize);
2674 if (retval != ERROR_OK)
2675 return retval;
2676 command_print(CMD_CTX,
2677 "dumped %ld bytes in %fs (%0.3f KiB/s)", (long)filesize,
2678 duration_elapsed(&bench), duration_kbps(&bench, filesize));
2679 }
2680
2681 retvaltemp = fileio_close(&fileio);
2682 if (retvaltemp != ERROR_OK)
2683 return retvaltemp;
2684
2685 return retval;
2686 }
2687
2688 static COMMAND_HELPER(handle_verify_image_command_internal, int verify)
2689 {
2690 uint8_t *buffer;
2691 size_t buf_cnt;
2692 uint32_t image_size;
2693 int i;
2694 int retval;
2695 uint32_t checksum = 0;
2696 uint32_t mem_checksum = 0;
2697
2698 struct image image;
2699
2700 struct target *target = get_current_target(CMD_CTX);
2701
2702 if (CMD_ARGC < 1)
2703 return ERROR_COMMAND_SYNTAX_ERROR;
2704
2705 if (!target) {
2706 LOG_ERROR("no target selected");
2707 return ERROR_FAIL;
2708 }
2709
2710 struct duration bench;
2711 duration_start(&bench);
2712
2713 if (CMD_ARGC >= 2) {
2714 uint32_t addr;
2715 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], addr);
2716 image.base_address = addr;
2717 image.base_address_set = 1;
2718 } else {
2719 image.base_address_set = 0;
2720 image.base_address = 0x0;
2721 }
2722
2723 image.start_address_set = 0;
2724
2725 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
2726 if (retval != ERROR_OK)
2727 return retval;
2728
2729 image_size = 0x0;
2730 int diffs = 0;
2731 retval = ERROR_OK;
2732 for (i = 0; i < image.num_sections; i++) {
2733 buffer = malloc(image.sections[i].size);
2734 if (buffer == NULL) {
2735 command_print(CMD_CTX,
2736 "error allocating buffer for section (%d bytes)",
2737 (int)(image.sections[i].size));
2738 break;
2739 }
2740 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
2741 if (retval != ERROR_OK) {
2742 free(buffer);
2743 break;
2744 }
2745
2746 if (verify) {
2747 /* calculate checksum of image */
2748 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
2749 if (retval != ERROR_OK) {
2750 free(buffer);
2751 break;
2752 }
2753
2754 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
2755 if (retval != ERROR_OK) {
2756 free(buffer);
2757 break;
2758 }
2759
2760 if (checksum != mem_checksum) {
2761 /* failed crc checksum, fall back to a binary compare */
2762 uint8_t *data;
2763
2764 if (diffs == 0)
2765 LOG_ERROR("checksum mismatch - attempting binary compare");
2766
2767 data = (uint8_t *)malloc(buf_cnt);
2768
2769 /* Can we use 32bit word accesses? */
2770 int size = 1;
2771 int count = buf_cnt;
2772 if ((count % 4) == 0) {
2773 size *= 4;
2774 count /= 4;
2775 }
2776 retval = target_read_memory(target, image.sections[i].base_address, size, count, data);
2777 if (retval == ERROR_OK) {
2778 uint32_t t;
2779 for (t = 0; t < buf_cnt; t++) {
2780 if (data[t] != buffer[t]) {
2781 command_print(CMD_CTX,
2782 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
2783 diffs,
2784 (unsigned)(t + image.sections[i].base_address),
2785 data[t],
2786 buffer[t]);
2787 if (diffs++ >= 127) {
2788 command_print(CMD_CTX, "More than 128 errors, the rest are not printed.");
2789 free(data);
2790 free(buffer);
2791 goto done;
2792 }
2793 }
2794 keep_alive();
2795 }
2796 }
2797 free(data);
2798 }
2799 } else {
2800 command_print(CMD_CTX, "address 0x%08" PRIx32 " length 0x%08zx",
2801 image.sections[i].base_address,
2802 buf_cnt);
2803 }
2804
2805 free(buffer);
2806 image_size += buf_cnt;
2807 }
2808 if (diffs > 0)
2809 command_print(CMD_CTX, "No more differences found.");
2810 done:
2811 if (diffs > 0)
2812 retval = ERROR_FAIL;
2813 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
2814 command_print(CMD_CTX, "verified %" PRIu32 " bytes "
2815 "in %fs (%0.3f KiB/s)", image_size,
2816 duration_elapsed(&bench), duration_kbps(&bench, image_size));
2817 }
2818
2819 image_close(&image);
2820
2821 return retval;
2822 }
2823
2824 COMMAND_HANDLER(handle_verify_image_command)
2825 {
2826 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, 1);
2827 }
2828
2829 COMMAND_HANDLER(handle_test_image_command)
2830 {
2831 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, 0);
2832 }
2833
2834 static int handle_bp_command_list(struct command_context *cmd_ctx)
2835 {
2836 struct target *target = get_current_target(cmd_ctx);
2837 struct breakpoint *breakpoint = target->breakpoints;
2838 while (breakpoint) {
2839 if (breakpoint->type == BKPT_SOFT) {
2840 char *buf = buf_to_str(breakpoint->orig_instr,
2841 breakpoint->length, 16);
2842 command_print(cmd_ctx, "IVA breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i, 0x%s",
2843 breakpoint->address,
2844 breakpoint->length,
2845 breakpoint->set, buf);
2846 free(buf);
2847 } else {
2848 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
2849 command_print(cmd_ctx, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
2850 breakpoint->asid,
2851 breakpoint->length, breakpoint->set);
2852 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
2853 command_print(cmd_ctx, "Hybrid breakpoint(IVA): 0x%8.8" PRIx32 ", 0x%x, %i",
2854 breakpoint->address,
2855 breakpoint->length, breakpoint->set);
2856 command_print(cmd_ctx, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
2857 breakpoint->asid);
2858 } else
2859 command_print(cmd_ctx, "Breakpoint(IVA): 0x%8.8" PRIx32 ", 0x%x, %i",
2860 breakpoint->address,
2861 breakpoint->length, breakpoint->set);
2862 }
2863
2864 breakpoint = breakpoint->next;
2865 }
2866 return ERROR_OK;
2867 }
2868
2869 static int handle_bp_command_set(struct command_context *cmd_ctx,
2870 uint32_t addr, uint32_t asid, uint32_t length, int hw)
2871 {
2872 struct target *target = get_current_target(cmd_ctx);
2873
2874 if (asid == 0) {
2875 int retval = breakpoint_add(target, addr, length, hw);
2876 if (ERROR_OK == retval)
2877 command_print(cmd_ctx, "breakpoint set at 0x%8.8" PRIx32 "", addr);
2878 else {
2879 LOG_ERROR("Failure setting breakpoint, the same address(IVA) is already used");
2880 return retval;
2881 }
2882 } else if (addr == 0) {
2883 int retval = context_breakpoint_add(target, asid, length, hw);
2884 if (ERROR_OK == retval)
2885 command_print(cmd_ctx, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
2886 else {
2887 LOG_ERROR("Failure setting breakpoint, the same address(CONTEXTID) is already used");
2888 return retval;
2889 }
2890 } else {
2891 int retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
2892 if (ERROR_OK == retval)
2893 command_print(cmd_ctx, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
2894 else {
2895 LOG_ERROR("Failure setting breakpoint, the same address is already used");
2896 return retval;
2897 }
2898 }
2899 return ERROR_OK;
2900 }
2901
2902 COMMAND_HANDLER(handle_bp_command)
2903 {
2904 uint32_t addr;
2905 uint32_t asid;
2906 uint32_t length;
2907 int hw = BKPT_SOFT;
2908
2909 switch (CMD_ARGC) {
2910 case 0:
2911 return handle_bp_command_list(CMD_CTX);
2912
2913 case 2:
2914 asid = 0;
2915 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2916 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
2917 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
2918
2919 case 3:
2920 if (strcmp(CMD_ARGV[2], "hw") == 0) {
2921 hw = BKPT_HARD;
2922 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2923
2924 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
2925
2926 asid = 0;
2927 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
2928 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
2929 hw = BKPT_HARD;
2930 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
2931 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
2932 addr = 0;
2933 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
2934 }
2935
2936 case 4:
2937 hw = BKPT_HARD;
2938 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2939 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
2940 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
2941 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
2942
2943 default:
2944 return ERROR_COMMAND_SYNTAX_ERROR;
2945 }
2946 }
2947
2948 COMMAND_HANDLER(handle_rbp_command)
2949 {
2950 if (CMD_ARGC != 1)
2951 return ERROR_COMMAND_SYNTAX_ERROR;
2952
2953 uint32_t addr;
2954 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2955
2956 struct target *target = get_current_target(CMD_CTX);
2957 breakpoint_remove(target, addr);
2958
2959 return ERROR_OK;
2960 }
2961
2962 COMMAND_HANDLER(handle_wp_command)
2963 {
2964 struct target *target = get_current_target(CMD_CTX);
2965
2966 if (CMD_ARGC == 0) {
2967 struct watchpoint *watchpoint = target->watchpoints;
2968
2969 while (watchpoint) {
2970 command_print(CMD_CTX, "address: 0x%8.8" PRIx32
2971 ", len: 0x%8.8" PRIx32
2972 ", r/w/a: %i, value: 0x%8.8" PRIx32
2973 ", mask: 0x%8.8" PRIx32,
2974 watchpoint->address,
2975 watchpoint->length,
2976 (int)watchpoint->rw,
2977 watchpoint->value,
2978 watchpoint->mask);
2979 watchpoint = watchpoint->next;
2980 }
2981 return ERROR_OK;
2982 }
2983
2984 enum watchpoint_rw type = WPT_ACCESS;
2985 uint32_t addr = 0;
2986 uint32_t length = 0;
2987 uint32_t data_value = 0x0;
2988 uint32_t data_mask = 0xffffffff;
2989
2990 switch (CMD_ARGC) {
2991 case 5:
2992 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
2993 /* fall through */
2994 case 4:
2995 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
2996 /* fall through */
2997 case 3:
2998 switch (CMD_ARGV[2][0]) {
2999 case 'r':
3000 type = WPT_READ;
3001 break;
3002 case 'w':
3003 type = WPT_WRITE;
3004 break;
3005 case 'a':
3006 type = WPT_ACCESS;
3007 break;
3008 default:
3009 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
3010 return ERROR_COMMAND_SYNTAX_ERROR;
3011 }
3012 /* fall through */
3013 case 2:
3014 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3015 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3016 break;
3017
3018 default:
3019 return ERROR_COMMAND_SYNTAX_ERROR;
3020 }
3021
3022 int retval = watchpoint_add(target, addr, length, type,
3023 data_value, data_mask);
3024 if (ERROR_OK != retval)
3025 LOG_ERROR("Failure setting watchpoints");
3026
3027 return retval;
3028 }
3029
3030 COMMAND_HANDLER(handle_rwp_command)
3031 {
3032 if (CMD_ARGC != 1)
3033 return ERROR_COMMAND_SYNTAX_ERROR;
3034
3035 uint32_t addr;
3036 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3037
3038 struct target *target = get_current_target(CMD_CTX);
3039 watchpoint_remove(target, addr);
3040
3041 return ERROR_OK;
3042 }
3043
3044 /**
3045 * Translate a virtual address to a physical address.
3046 *
3047 * The low-level target implementation must have logged a detailed error
3048 * which is forwarded to telnet/GDB session.
3049 */
3050 COMMAND_HANDLER(handle_virt2phys_command)
3051 {
3052 if (CMD_ARGC != 1)
3053 return ERROR_COMMAND_SYNTAX_ERROR;
3054
3055 uint32_t va;
3056 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], va);
3057 uint32_t pa;
3058
3059 struct target *target = get_current_target(CMD_CTX);
3060 int retval = target->type->virt2phys(target, va, &pa);
3061 if (retval == ERROR_OK)
3062 command_print(CMD_CTX, "Physical address 0x%08" PRIx32 "", pa);
3063
3064 return retval;
3065 }
3066
3067 static void writeData(FILE *f, const void *data, size_t len)
3068 {
3069 size_t written = fwrite(data, 1, len, f);
3070 if (written != len)
3071 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
3072 }
3073
3074 static void writeLong(FILE *f, int l)
3075 {
3076 int i;
3077 for (i = 0; i < 4; i++) {
3078 char c = (l >> (i*8))&0xff;
3079 writeData(f, &c, 1);
3080 }
3081
3082 }
3083
3084 static void writeString(FILE *f, char *s)
3085 {
3086 writeData(f, s, strlen(s));
3087 }
3088
3089 /* Dump a gmon.out histogram file. */
3090 static void writeGmon(uint32_t *samples, uint32_t sampleNum, const char *filename)
3091 {
3092 uint32_t i;
3093 FILE *f = fopen(filename, "w");
3094 if (f == NULL)
3095 return;
3096 writeString(f, "gmon");
3097 writeLong(f, 0x00000001); /* Version */
3098 writeLong(f, 0); /* padding */
3099 writeLong(f, 0); /* padding */
3100 writeLong(f, 0); /* padding */
3101
3102 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
3103 writeData(f, &zero, 1);
3104
3105 /* figure out bucket size */
3106 uint32_t min = samples[0];
3107 uint32_t max = samples[0];
3108 for (i = 0; i < sampleNum; i++) {
3109 if (min > samples[i])
3110 min = samples[i];
3111 if (max < samples[i])
3112 max = samples[i];
3113 }
3114
3115 int addressSpace = (max - min + 1);
3116 assert(addressSpace >= 2);
3117
3118 static const uint32_t maxBuckets = 16 * 1024; /* maximum buckets. */
3119 uint32_t length = addressSpace;
3120 if (length > maxBuckets)
3121 length = maxBuckets;
3122 int *buckets = malloc(sizeof(int)*length);
3123 if (buckets == NULL) {
3124 fclose(f);
3125 return;
3126 }
3127 memset(buckets, 0, sizeof(int) * length);
3128 for (i = 0; i < sampleNum; i++) {
3129 uint32_t address = samples[i];
3130 long long a = address - min;
3131 long long b = length - 1;
3132 long long c = addressSpace - 1;
3133 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
3134 buckets[index_t]++;
3135 }
3136
3137 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
3138 writeLong(f, min); /* low_pc */
3139 writeLong(f, max); /* high_pc */
3140 writeLong(f, length); /* # of samples */
3141 writeLong(f, 100); /* KLUDGE! We lie, ca. 100Hz best case. */
3142 writeString(f, "seconds");
3143 for (i = 0; i < (15-strlen("seconds")); i++)
3144 writeData(f, &zero, 1);
3145 writeString(f, "s");
3146
3147 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
3148
3149 char *data = malloc(2 * length);
3150 if (data != NULL) {
3151 for (i = 0; i < length; i++) {
3152 int val;
3153 val = buckets[i];
3154 if (val > 65535)
3155 val = 65535;
3156 data[i * 2] = val&0xff;
3157 data[i * 2 + 1] = (val >> 8) & 0xff;