+
+ return ERROR_OK;
+}
+
+int target_call_event_callbacks(target_t *target, enum target_event event)
+{
+ target_event_callback_t *callback = target_event_callbacks;
+ target_event_callback_t *next_callback;
+
+ if (event == TARGET_EVENT_HALTED)
+ {
+ /* execute early halted first */
+ target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
+ }
+
+ LOG_DEBUG("target event %i (%s)",
+ event,
+ Jim_Nvp_value2name_simple(nvp_target_event, event)->name);
+
+ target_handle_event(target, event);
+
+ while (callback)
+ {
+ next_callback = callback->next;
+ callback->callback(target, event, callback->priv);
+ callback = next_callback;
+ }
+
+ return ERROR_OK;
+}
+
+static int target_timer_callback_periodic_restart(
+ target_timer_callback_t *cb, struct timeval *now)
+{
+ int time_ms = cb->time_ms;
+ cb->when.tv_usec = now->tv_usec + (time_ms % 1000) * 1000;
+ time_ms -= (time_ms % 1000);
+ cb->when.tv_sec = now->tv_sec + time_ms / 1000;
+ if (cb->when.tv_usec > 1000000)
+ {
+ cb->when.tv_usec = cb->when.tv_usec - 1000000;
+ cb->when.tv_sec += 1;
+ }
+ return ERROR_OK;
+}
+
+static int target_call_timer_callback(target_timer_callback_t *cb,
+ struct timeval *now)
+{
+ cb->callback(cb->priv);
+
+ if (cb->periodic)
+ return target_timer_callback_periodic_restart(cb, now);
+
+ return target_unregister_timer_callback(cb->callback, cb->priv);
+}
+
+static int target_call_timer_callbacks_check_time(int checktime)
+{
+ keep_alive();
+
+ struct timeval now;
+ gettimeofday(&now, NULL);
+
+ target_timer_callback_t *callback = target_timer_callbacks;
+ while (callback)
+ {
+ // cleaning up may unregister and free this callback
+ target_timer_callback_t *next_callback = callback->next;
+
+ bool call_it = callback->callback &&
+ ((!checktime && callback->periodic) ||
+ now.tv_sec > callback->when.tv_sec ||
+ (now.tv_sec == callback->when.tv_sec &&
+ now.tv_usec >= callback->when.tv_usec));
+
+ if (call_it)
+ {
+ int retval = target_call_timer_callback(callback, &now);
+ if (retval != ERROR_OK)
+ return retval;
+ }
+
+ callback = next_callback;
+ }
+
+ return ERROR_OK;
+}
+
+int target_call_timer_callbacks(void)
+{
+ return target_call_timer_callbacks_check_time(1);
+}
+
+/* invoke periodic callbacks immediately */
+int target_call_timer_callbacks_now(void)
+{
+ return target_call_timer_callbacks_check_time(0);
+}
+
+int target_alloc_working_area(struct target_s *target, uint32_t size, working_area_t **area)
+{
+ working_area_t *c = target->working_areas;
+ working_area_t *new_wa = NULL;
+
+ /* Reevaluate working area address based on MMU state*/
+ if (target->working_areas == NULL)
+ {
+ int retval;
+ int enabled;
+
+ retval = target->type->mmu(target, &enabled);
+ if (retval != ERROR_OK)
+ {
+ return retval;
+ }
+
+ if (!enabled) {
+ if (target->working_area_phys_spec) {
+ LOG_DEBUG("MMU disabled, using physical "
+ "address for working memory 0x%08x",
+ (unsigned)target->working_area_phys);
+ target->working_area = target->working_area_phys;
+ } else {
+ LOG_ERROR("No working memory available. "
+ "Specify -work-area-phys to target.");
+ return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
+ }
+ } else {
+ if (target->working_area_virt_spec) {
+ LOG_DEBUG("MMU enabled, using virtual "
+ "address for working memory 0x%08x",
+ (unsigned)target->working_area_virt);
+ target->working_area = target->working_area_virt;
+ } else {
+ LOG_ERROR("No working memory available. "
+ "Specify -work-area-virt to target.");
+ return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
+ }
+ }
+ }
+
+ /* only allocate multiples of 4 byte */
+ if (size % 4)
+ {
+ LOG_ERROR("BUG: code tried to allocate unaligned number of bytes (0x%08x), padding", ((unsigned)(size)));
+ size = (size + 3) & (~3);
+ }
+
+ /* see if there's already a matching working area */
+ while (c)
+ {
+ if ((c->free) && (c->size == size))
+ {
+ new_wa = c;
+ break;
+ }
+ c = c->next;
+ }
+
+ /* if not, allocate a new one */
+ if (!new_wa)
+ {
+ working_area_t **p = &target->working_areas;
+ uint32_t first_free = target->working_area;
+ uint32_t free_size = target->working_area_size;
+
+ c = target->working_areas;
+ while (c)
+ {
+ first_free += c->size;
+ free_size -= c->size;
+ p = &c->next;
+ c = c->next;
+ }
+
+ if (free_size < size)
+ {
+ LOG_WARNING("not enough working area available(requested %u, free %u)",
+ (unsigned)(size), (unsigned)(free_size));
+ return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
+ }
+
+ LOG_DEBUG("allocated new working area at address 0x%08x", (unsigned)first_free);
+
+ new_wa = malloc(sizeof(working_area_t));
+ new_wa->next = NULL;
+ new_wa->size = size;
+ new_wa->address = first_free;
+
+ if (target->backup_working_area)
+ {
+ int retval;
+ new_wa->backup = malloc(new_wa->size);
+ if ((retval = target_read_memory(target, new_wa->address, 4, new_wa->size / 4, new_wa->backup)) != ERROR_OK)
+ {
+ free(new_wa->backup);
+ free(new_wa);
+ return retval;
+ }
+ }
+ else
+ {
+ new_wa->backup = NULL;
+ }
+
+ /* put new entry in list */
+ *p = new_wa;
+ }
+
+ /* mark as used, and return the new (reused) area */
+ new_wa->free = 0;
+ *area = new_wa;
+
+ /* user pointer */
+ new_wa->user = area;
+
+ return ERROR_OK;
+}
+
+int target_free_working_area_restore(struct target_s *target, working_area_t *area, int restore)
+{
+ if (area->free)
+ return ERROR_OK;
+
+ if (restore && target->backup_working_area)
+ {
+ int retval;
+ if ((retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup)) != ERROR_OK)
+ return retval;
+ }
+
+ area->free = 1;
+
+ /* mark user pointer invalid */
+ *area->user = NULL;
+ area->user = NULL;
+
+ return ERROR_OK;
+}
+
+int target_free_working_area(struct target_s *target, working_area_t *area)
+{
+ return target_free_working_area_restore(target, area, 1);
+}
+
+/* free resources and restore memory, if restoring memory fails,
+ * free up resources anyway
+ */
+void target_free_all_working_areas_restore(struct target_s *target, int restore)
+{
+ working_area_t *c = target->working_areas;
+
+ while (c)
+ {
+ working_area_t *next = c->next;
+ target_free_working_area_restore(target, c, restore);
+
+ if (c->backup)
+ free(c->backup);
+
+ free(c);
+
+ c = next;
+ }
+
+ target->working_areas = NULL;
+}
+
+void target_free_all_working_areas(struct target_s *target)
+{
+ target_free_all_working_areas_restore(target, 1);
+}
+
+int target_arch_state(struct target_s *target)
+{
+ int retval;
+ if (target == NULL)
+ {
+ LOG_USER("No target has been configured");
+ return ERROR_OK;
+ }
+
+ LOG_USER("target state: %s", target_state_name( target ));
+
+ if (target->state != TARGET_HALTED)
+ return ERROR_OK;
+
+ retval = target->type->arch_state(target);
+ return retval;
+}
+
+/* Single aligned words are guaranteed to use 16 or 32 bit access
+ * mode respectively, otherwise data is handled as quickly as
+ * possible
+ */
+int target_write_buffer(struct target_s *target, uint32_t address, uint32_t size, uint8_t *buffer)
+{
+ int retval;
+ LOG_DEBUG("writing buffer of %i byte at 0x%8.8x",
+ (int)size, (unsigned)address);
+
+ if (!target_was_examined(target))
+ {
+ LOG_ERROR("Target not examined yet");
+ return ERROR_FAIL;
+ }
+
+ if (size == 0) {
+ return ERROR_OK;
+ }
+
+ if ((address + size - 1) < address)
+ {
+ /* GDB can request this when e.g. PC is 0xfffffffc*/
+ LOG_ERROR("address + size wrapped(0x%08x, 0x%08x)",
+ (unsigned)address,
+ (unsigned)size);
+ return ERROR_FAIL;
+ }
+
+ if (((address % 2) == 0) && (size == 2))
+ {
+ return target_write_memory(target, address, 2, 1, buffer);
+ }
+
+ /* handle unaligned head bytes */
+ if (address % 4)
+ {
+ uint32_t unaligned = 4 - (address % 4);
+
+ if (unaligned > size)
+ unaligned = size;
+
+ if ((retval = target_write_memory(target, address, 1, unaligned, buffer)) != ERROR_OK)
+ return retval;
+
+ buffer += unaligned;
+ address += unaligned;
+ size -= unaligned;
+ }
+
+ /* handle aligned words */
+ if (size >= 4)
+ {
+ int aligned = size - (size % 4);
+
+ /* use bulk writes above a certain limit. This may have to be changed */
+ if (aligned > 128)
+ {
+ if ((retval = target->type->bulk_write_memory(target, address, aligned / 4, buffer)) != ERROR_OK)
+ return retval;
+ }
+ else
+ {
+ if ((retval = target_write_memory(target, address, 4, aligned / 4, buffer)) != ERROR_OK)
+ return retval;
+ }
+
+ buffer += aligned;
+ address += aligned;
+ size -= aligned;
+ }
+
+ /* handle tail writes of less than 4 bytes */
+ if (size > 0)
+ {
+ if ((retval = target_write_memory(target, address, 1, size, buffer)) != ERROR_OK)
+ return retval;
+ }
+
+ return ERROR_OK;
+}
+
+/* Single aligned words are guaranteed to use 16 or 32 bit access
+ * mode respectively, otherwise data is handled as quickly as
+ * possible
+ */
+int target_read_buffer(struct target_s *target, uint32_t address, uint32_t size, uint8_t *buffer)
+{
+ int retval;
+ LOG_DEBUG("reading buffer of %i byte at 0x%8.8x",
+ (int)size, (unsigned)address);
+
+ if (!target_was_examined(target))
+ {
+ LOG_ERROR("Target not examined yet");
+ return ERROR_FAIL;
+ }
+
+ if (size == 0) {
+ return ERROR_OK;
+ }
+
+ if ((address + size - 1) < address)
+ {
+ /* GDB can request this when e.g. PC is 0xfffffffc*/
+ LOG_ERROR("address + size wrapped(0x%08" PRIx32 ", 0x%08" PRIx32 ")",
+ address,
+ size);
+ return ERROR_FAIL;
+ }
+
+ if (((address % 2) == 0) && (size == 2))
+ {
+ return target_read_memory(target, address, 2, 1, buffer);
+ }
+
+ /* handle unaligned head bytes */
+ if (address % 4)
+ {
+ uint32_t unaligned = 4 - (address % 4);
+
+ if (unaligned > size)
+ unaligned = size;
+
+ if ((retval = target_read_memory(target, address, 1, unaligned, buffer)) != ERROR_OK)
+ return retval;
+
+ buffer += unaligned;
+ address += unaligned;
+ size -= unaligned;
+ }
+
+ /* handle aligned words */
+ if (size >= 4)
+ {
+ int aligned = size - (size % 4);
+
+ if ((retval = target_read_memory(target, address, 4, aligned / 4, buffer)) != ERROR_OK)
+ return retval;
+
+ buffer += aligned;
+ address += aligned;
+ size -= aligned;
+ }
+
+ /*prevent byte access when possible (avoid AHB access limitations in some cases)*/
+ if(size >=2)
+ {
+ int aligned = size - (size%2);
+ retval = target_read_memory(target, address, 2, aligned / 2, buffer);
+ if (retval != ERROR_OK)
+ return retval;
+
+ buffer += aligned;
+ address += aligned;
+ size -= aligned;
+ }
+ /* handle tail writes of less than 4 bytes */
+ if (size > 0)
+ {
+ if ((retval = target_read_memory(target, address, 1, size, buffer)) != ERROR_OK)
+ return retval;
+ }
+
+ return ERROR_OK;
+}
+
+int target_checksum_memory(struct target_s *target, uint32_t address, uint32_t size, uint32_t* crc)
+{
+ uint8_t *buffer;
+ int retval;
+ uint32_t i;
+ uint32_t checksum = 0;
+ if (!target_was_examined(target))
+ {
+ LOG_ERROR("Target not examined yet");
+ return ERROR_FAIL;
+ }
+
+ if ((retval = target->type->checksum_memory(target, address,
+ size, &checksum)) != ERROR_OK)
+ {
+ buffer = malloc(size);
+ if (buffer == NULL)
+ {
+ LOG_ERROR("error allocating buffer for section (%d bytes)", (int)size);
+ return ERROR_INVALID_ARGUMENTS;
+ }
+ retval = target_read_buffer(target, address, size, buffer);
+ if (retval != ERROR_OK)
+ {
+ free(buffer);
+ return retval;
+ }
+
+ /* convert to target endianess */
+ for (i = 0; i < (size/sizeof(uint32_t)); i++)
+ {
+ uint32_t target_data;
+ target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
+ target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
+ }
+
+ retval = image_calculate_checksum(buffer, size, &checksum);
+ free(buffer);
+ }
+
+ *crc = checksum;
+
+ return retval;
+}
+
+int target_blank_check_memory(struct target_s *target, uint32_t address, uint32_t size, uint32_t* blank)
+{
+ int retval;
+ if (!target_was_examined(target))
+ {
+ LOG_ERROR("Target not examined yet");
+ return ERROR_FAIL;
+ }
+
+ if (target->type->blank_check_memory == 0)
+ return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
+
+ retval = target->type->blank_check_memory(target, address, size, blank);
+
+ return retval;
+}
+
+int target_read_u32(struct target_s *target, uint32_t address, uint32_t *value)
+{
+ uint8_t value_buf[4];
+ if (!target_was_examined(target))
+ {
+ LOG_ERROR("Target not examined yet");
+ return ERROR_FAIL;
+ }
+
+ int retval = target_read_memory(target, address, 4, 1, value_buf);
+
+ if (retval == ERROR_OK)
+ {
+ *value = target_buffer_get_u32(target, value_buf);
+ LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
+ address,
+ *value);
+ }
+ else
+ {
+ *value = 0x0;
+ LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
+ address);
+ }
+
+ return retval;
+}
+
+int target_read_u16(struct target_s *target, uint32_t address, uint16_t *value)
+{
+ uint8_t value_buf[2];
+ if (!target_was_examined(target))
+ {
+ LOG_ERROR("Target not examined yet");
+ return ERROR_FAIL;
+ }
+
+ int retval = target_read_memory(target, address, 2, 1, value_buf);
+
+ if (retval == ERROR_OK)
+ {
+ *value = target_buffer_get_u16(target, value_buf);
+ LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%4.4x",
+ address,
+ *value);
+ }
+ else
+ {
+ *value = 0x0;
+ LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
+ address);
+ }
+
+ return retval;
+}
+
+int target_read_u8(struct target_s *target, uint32_t address, uint8_t *value)
+{
+ int retval = target_read_memory(target, address, 1, 1, value);
+ if (!target_was_examined(target))
+ {
+ LOG_ERROR("Target not examined yet");
+ return ERROR_FAIL;
+ }
+
+ if (retval == ERROR_OK)
+ {
+ LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
+ address,
+ *value);
+ }
+ else
+ {
+ *value = 0x0;
+ LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
+ address);
+ }
+
+ return retval;
+}
+
+int target_write_u32(struct target_s *target, uint32_t address, uint32_t value)
+{
+ int retval;
+ uint8_t value_buf[4];
+ if (!target_was_examined(target))
+ {
+ LOG_ERROR("Target not examined yet");
+ return ERROR_FAIL;
+ }
+
+ LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
+ address,
+ value);
+
+ target_buffer_set_u32(target, value_buf, value);
+ if ((retval = target_write_memory(target, address, 4, 1, value_buf)) != ERROR_OK)
+ {
+ LOG_DEBUG("failed: %i", retval);
+ }
+
+ return retval;
+}
+
+int target_write_u16(struct target_s *target, uint32_t address, uint16_t value)
+{
+ int retval;
+ uint8_t value_buf[2];
+ if (!target_was_examined(target))
+ {
+ LOG_ERROR("Target not examined yet");
+ return ERROR_FAIL;
+ }
+
+ LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8x",
+ address,
+ value);
+
+ target_buffer_set_u16(target, value_buf, value);
+ if ((retval = target_write_memory(target, address, 2, 1, value_buf)) != ERROR_OK)
+ {
+ LOG_DEBUG("failed: %i", retval);
+ }
+
+ return retval;
+}
+
+int target_write_u8(struct target_s *target, uint32_t address, uint8_t value)
+{
+ int retval;
+ if (!target_was_examined(target))
+ {
+ LOG_ERROR("Target not examined yet");
+ return ERROR_FAIL;
+ }
+
+ LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
+ address, value);
+
+ if ((retval = target_write_memory(target, address, 1, 1, &value)) != ERROR_OK)
+ {
+ LOG_DEBUG("failed: %i", retval);
+ }
+
+ return retval;
+}
+
+static int handle_targets_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
+{
+ target_t *target = all_targets;
+
+ if (argc == 1)
+ {
+ target = get_target(args[0]);
+ if (target == NULL) {
+ command_print(cmd_ctx,"Target: %s is unknown, try one of:\n", args[0]);
+ goto DumpTargets;
+ }
+ if (!target->tap->enabled) {
+ command_print(cmd_ctx,"Target: TAP %s is disabled, "
+ "can't be the current target\n",
+ target->tap->dotted_name);
+ return ERROR_FAIL;
+ }
+
+ cmd_ctx->current_target = target->target_number;
+ return ERROR_OK;
+ }
+DumpTargets:
+
+ target = all_targets;
+ command_print(cmd_ctx, " TargetName Type Endian TapName State ");
+ command_print(cmd_ctx, "-- ------------------ ---------- ------ ------------------ ------------");
+ while (target)
+ {
+ const char *state;
+ char marker = ' ';
+
+ if (target->tap->enabled)
+ state = target_state_name( target );
+ else
+ state = "tap-disabled";
+
+ if (cmd_ctx->current_target == target->target_number)
+ marker = '*';
+
+ /* keep columns lined up to match the headers above */
+ command_print(cmd_ctx, "%2d%c %-18s %-10s %-6s %-18s %s",
+ target->target_number,
+ marker,
+ target->cmd_name,
+ target_get_name(target),
+ Jim_Nvp_value2name_simple(nvp_target_endian,
+ target->endianness)->name,
+ target->tap->dotted_name,
+ state);
+ target = target->next;
+ }
+
+ return ERROR_OK;
+}
+
+/* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
+
+static int powerDropout;
+static int srstAsserted;
+
+static int runPowerRestore;
+static int runPowerDropout;
+static int runSrstAsserted;
+static int runSrstDeasserted;
+
+static int sense_handler(void)
+{
+ static int prevSrstAsserted = 0;
+ static int prevPowerdropout = 0;
+
+ int retval;
+ if ((retval = jtag_power_dropout(&powerDropout)) != ERROR_OK)
+ return retval;
+
+ int powerRestored;
+ powerRestored = prevPowerdropout && !powerDropout;
+ if (powerRestored)
+ {
+ runPowerRestore = 1;
+ }
+
+ long long current = timeval_ms();
+ static long long lastPower = 0;
+ int waitMore = lastPower + 2000 > current;
+ if (powerDropout && !waitMore)
+ {
+ runPowerDropout = 1;
+ lastPower = current;
+ }
+
+ if ((retval = jtag_srst_asserted(&srstAsserted)) != ERROR_OK)
+ return retval;
+
+ int srstDeasserted;
+ srstDeasserted = prevSrstAsserted && !srstAsserted;
+
+ static long long lastSrst = 0;
+ waitMore = lastSrst + 2000 > current;
+ if (srstDeasserted && !waitMore)
+ {
+ runSrstDeasserted = 1;
+ lastSrst = current;
+ }
+
+ if (!prevSrstAsserted && srstAsserted)
+ {
+ runSrstAsserted = 1;
+ }
+
+ prevSrstAsserted = srstAsserted;
+ prevPowerdropout = powerDropout;
+
+ if (srstDeasserted || powerRestored)
+ {
+ /* Other than logging the event we can't do anything here.
+ * Issuing a reset is a particularly bad idea as we might
+ * be inside a reset already.
+ */
+ }
+
+ return ERROR_OK;
+}
+
+static void target_call_event_callbacks_all(enum target_event e) {
+ target_t *target;
+ target = all_targets;
+ while (target) {
+ target_call_event_callbacks(target, e);
+ target = target->next;
+ }
+}
+
+/* process target state changes */
+int handle_target(void *priv)
+{
+ int retval = ERROR_OK;
+
+ /* we do not want to recurse here... */
+ static int recursive = 0;
+ if (! recursive)
+ {
+ recursive = 1;
+ sense_handler();
+ /* danger! running these procedures can trigger srst assertions and power dropouts.
+ * We need to avoid an infinite loop/recursion here and we do that by
+ * clearing the flags after running these events.
+ */
+ int did_something = 0;
+ if (runSrstAsserted)
+ {
+ target_call_event_callbacks_all(TARGET_EVENT_GDB_HALT);
+ Jim_Eval(interp, "srst_asserted");
+ did_something = 1;
+ }
+ if (runSrstDeasserted)
+ {
+ Jim_Eval(interp, "srst_deasserted");
+ did_something = 1;
+ }
+ if (runPowerDropout)
+ {
+ target_call_event_callbacks_all(TARGET_EVENT_GDB_HALT);
+ Jim_Eval(interp, "power_dropout");
+ did_something = 1;
+ }
+ if (runPowerRestore)
+ {
+ Jim_Eval(interp, "power_restore");
+ did_something = 1;
+ }
+
+ if (did_something)
+ {
+ /* clear detect flags */
+ sense_handler();
+ }
+
+ /* clear action flags */
+
+ runSrstAsserted = 0;
+ runSrstDeasserted = 0;
+ runPowerRestore = 0;
+ runPowerDropout = 0;
+
+ recursive = 0;
+ }
+
+ /* Poll targets for state changes unless that's globally disabled.
+ * Skip targets that are currently disabled.
+ */
+ for (target_t *target = all_targets;
+ is_jtag_poll_safe() && target;
+ target = target->next)
+ {
+ if (!target->tap->enabled)
+ continue;
+
+ /* only poll target if we've got power and srst isn't asserted */
+ if (!powerDropout && !srstAsserted)
+ {
+ /* polling may fail silently until the target has been examined */
+ if ((retval = target_poll(target)) != ERROR_OK)
+ {
+ target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
+ return retval;
+ }
+ }
+ }
+
+ return retval;
+}
+
+static int handle_reg_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
+{
+ target_t *target;
+ reg_t *reg = NULL;
+ int count = 0;
+ char *value;
+
+ LOG_DEBUG("-");
+
+ target = get_current_target(cmd_ctx);
+
+ /* list all available registers for the current target */
+ if (argc == 0)
+ {
+ reg_cache_t *cache = target->reg_cache;
+
+ count = 0;
+ while (cache)
+ {
+ int i;
+
+ command_print(cmd_ctx, "===== %s", cache->name);
+
+ for (i = 0, reg = cache->reg_list;
+ i < cache->num_regs;
+ i++, reg++, count++)
+ {
+ /* only print cached values if they are valid */
+ if (reg->valid) {
+ value = buf_to_str(reg->value,
+ reg->size, 16);
+ command_print(cmd_ctx,
+ "(%i) %s (/%" PRIu32 "): 0x%s%s",
+ count, reg->name,
+ reg->size, value,
+ reg->dirty
+ ? " (dirty)"
+ : "");
+ free(value);
+ } else {
+ command_print(cmd_ctx, "(%i) %s (/%" PRIu32 ")",
+ count, reg->name,
+ reg->size) ;
+ }
+ }
+ cache = cache->next;
+ }
+
+ return ERROR_OK;
+ }
+
+ /* access a single register by its ordinal number */
+ if ((args[0][0] >= '0') && (args[0][0] <= '9'))
+ {
+ unsigned num;
+ COMMAND_PARSE_NUMBER(uint, args[0], num);
+
+ reg_cache_t *cache = target->reg_cache;
+ count = 0;
+ while (cache)
+ {
+ int i;
+ for (i = 0; i < cache->num_regs; i++)
+ {
+ if (count++ == (int)num)
+ {
+ reg = &cache->reg_list[i];
+ break;
+ }
+ }
+ if (reg)
+ break;
+ cache = cache->next;
+ }
+
+ if (!reg)
+ {
+ command_print(cmd_ctx, "%i is out of bounds, the current target has only %i registers (0 - %i)", num, count, count - 1);
+ return ERROR_OK;
+ }
+ } else /* access a single register by its name */
+ {
+ reg = register_get_by_name(target->reg_cache, args[0], 1);
+
+ if (!reg)
+ {
+ command_print(cmd_ctx, "register %s not found in current target", args[0]);
+ return ERROR_OK;
+ }
+ }
+
+ /* display a register */
+ if ((argc == 1) || ((argc == 2) && !((args[1][0] >= '0') && (args[1][0] <= '9'))))
+ {
+ if ((argc == 2) && (strcmp(args[1], "force") == 0))
+ reg->valid = 0;
+
+ if (reg->valid == 0)
+ {
+ reg_arch_type_t *arch_type = register_get_arch_type(reg->arch_type);
+ arch_type->get(reg);
+ }
+ value = buf_to_str(reg->value, reg->size, 16);
+ command_print(cmd_ctx, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
+ free(value);
+ return ERROR_OK;
+ }
+
+ /* set register value */
+ if (argc == 2)
+ {
+ uint8_t *buf = malloc(CEIL(reg->size, 8));
+ str_to_buf(args[1], strlen(args[1]), buf, reg->size, 0);
+
+ reg_arch_type_t *arch_type = register_get_arch_type(reg->arch_type);
+ arch_type->set(reg, buf);
+
+ value = buf_to_str(reg->value, reg->size, 16);
+ command_print(cmd_ctx, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
+ free(value);
+
+ free(buf);
+
+ return ERROR_OK;
+ }
+
+ command_print(cmd_ctx, "usage: reg <#|name> [value]");
+
+ return ERROR_OK;
+}
+
+static int handle_poll_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
+{
+ int retval = ERROR_OK;
+ target_t *target = get_current_target(cmd_ctx);
+
+ if (argc == 0)
+ {
+ command_print(cmd_ctx, "background polling: %s",
+ jtag_poll_get_enabled() ? "on" : "off");
+ command_print(cmd_ctx, "TAP: %s (%s)",
+ target->tap->dotted_name,
+ target->tap->enabled ? "enabled" : "disabled");
+ if (!target->tap->enabled)
+ return ERROR_OK;
+ if ((retval = target_poll(target)) != ERROR_OK)
+ return retval;
+ if ((retval = target_arch_state(target)) != ERROR_OK)
+ return retval;
+
+ }
+ else if (argc == 1)
+ {
+ if (strcmp(args[0], "on") == 0)
+ {
+ jtag_poll_set_enabled(true);
+ }
+ else if (strcmp(args[0], "off") == 0)
+ {
+ jtag_poll_set_enabled(false);
+ }
+ else
+ {
+ command_print(cmd_ctx, "arg is \"on\" or \"off\"");
+ }
+ } else
+ {
+ return ERROR_COMMAND_SYNTAX_ERROR;
+ }
+
+ return retval;
+}
+
+static int handle_wait_halt_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
+{
+ if (argc > 1)
+ return ERROR_COMMAND_SYNTAX_ERROR;
+
+ unsigned ms = 5000;
+ if (1 == argc)
+ {
+ int retval = parse_uint(args[0], &ms);
+ if (ERROR_OK != retval)
+ {
+ command_print(cmd_ctx, "usage: %s [seconds]", cmd);
+ return ERROR_COMMAND_SYNTAX_ERROR;
+ }
+ // convert seconds (given) to milliseconds (needed)
+ ms *= 1000;
+ }
+
+ target_t *target = get_current_target(cmd_ctx);
+ return target_wait_state(target, TARGET_HALTED, ms);
+}
+
+/* wait for target state to change. The trick here is to have a low
+ * latency for short waits and not to suck up all the CPU time
+ * on longer waits.
+ *
+ * After 500ms, keep_alive() is invoked
+ */
+int target_wait_state(target_t *target, enum target_state state, int ms)
+{
+ int retval;
+ long long then = 0, cur;
+ int once = 1;
+
+ for (;;)
+ {
+ if ((retval = target_poll(target)) != ERROR_OK)
+ return retval;
+ if (target->state == state)
+ {
+ break;
+ }
+ cur = timeval_ms();
+ if (once)
+ {
+ once = 0;
+ then = timeval_ms();
+ LOG_DEBUG("waiting for target %s...",
+ Jim_Nvp_value2name_simple(nvp_target_state,state)->name);
+ }
+
+ if (cur-then > 500)
+ {
+ keep_alive();
+ }
+
+ if ((cur-then) > ms)
+ {
+ LOG_ERROR("timed out while waiting for target %s",
+ Jim_Nvp_value2name_simple(nvp_target_state,state)->name);
+ return ERROR_FAIL;
+ }
+ }
+
+ return ERROR_OK;
+}
+
+static int handle_halt_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
+{
+ LOG_DEBUG("-");
+
+ target_t *target = get_current_target(cmd_ctx);
+ int retval = target_halt(target);
+ if (ERROR_OK != retval)
+ return retval;
+
+ if (argc == 1)
+ {
+ unsigned wait;
+ retval = parse_uint(args[0], &wait);
+ if (ERROR_OK != retval)
+ return ERROR_COMMAND_SYNTAX_ERROR;
+ if (!wait)
+ return ERROR_OK;
+ }
+
+ return handle_wait_halt_command(cmd_ctx, cmd, args, argc);
+}
+
+static int handle_soft_reset_halt_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
+{
+ target_t *target = get_current_target(cmd_ctx);
+
+ LOG_USER("requesting target halt and executing a soft reset");
+
+ target->type->soft_reset_halt(target);
+