X-Git-Url: https://review.openocd.org/gitweb?p=openocd.git;a=blobdiff_plain;f=src%2Ftarget%2Ftarget.c;h=51518e683390d0bcaf86bd0846a9c349c2331748;hp=5b2117df71b44b47b7d325eec0ac824972052d51;hb=bee7184ce4bd2beb10fb29d1b6ba4e4b33f1c2ed;hpb=374127301ec1d72033b9d573b72c7abdfd61990d diff --git a/src/target/target.c b/src/target/target.c index 5b2117df71..51518e6833 100644 --- a/src/target/target.c +++ b/src/target/target.c @@ -20,6 +20,9 @@ * Copyright (C) ST-Ericsson SA 2011 * * michel.jaouen@stericsson.com : smp minimum support * * * + * Copyright (C) 2011 Andreas Fritiofson * + * andreas.fritiofson@gmail.com * + * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * @@ -793,6 +796,137 @@ done: return retval; } +/** + * Executes a target-specific native code algorithm in the target. + * It differs from target_run_algorithm in that the algorithm is asynchronous. + * Because of this it requires an compliant algorithm: + * see contrib/loaders/flash/stm32f1x.S for example. + * + * @param target used to run the algorithm + */ + +int target_run_flash_async_algorithm(struct target *target, + uint8_t *buffer, uint32_t count, int block_size, + int num_mem_params, struct mem_param *mem_params, + int num_reg_params, struct reg_param *reg_params, + uint32_t buffer_start, uint32_t buffer_size, + uint32_t entry_point, uint32_t exit_point, void *arch_info) +{ + int retval; + + /* Set up working area. First word is write pointer, second word is read pointer, + * rest is fifo data area. */ + uint32_t wp_addr = buffer_start; + uint32_t rp_addr = buffer_start + 4; + uint32_t fifo_start_addr = buffer_start + 8; + uint32_t fifo_end_addr = buffer_start + buffer_size; + + uint32_t wp = fifo_start_addr; + uint32_t rp = fifo_start_addr; + + /* validate block_size is 2^n */ + assert(!block_size || !(block_size & (block_size - 1))); + + retval = target_write_u32(target, wp_addr, wp); + if (retval != ERROR_OK) + return retval; + retval = target_write_u32(target, rp_addr, rp); + if (retval != ERROR_OK) + return retval; + + /* Start up algorithm on target and let it idle while writing the first chunk */ + retval = target_start_algorithm(target, num_mem_params, mem_params, + num_reg_params, reg_params, + entry_point, + exit_point, + arch_info); + + if (retval != ERROR_OK) { + LOG_ERROR("error starting target flash write algorithm"); + return retval; + } + + while (count > 0) { + + retval = target_read_u32(target, rp_addr, &rp); + if (retval != ERROR_OK) { + LOG_ERROR("failed to get read pointer"); + break; + } + + LOG_DEBUG("count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32, count, wp, rp); + + if (rp == 0) { + LOG_ERROR("flash write algorithm aborted by target"); + retval = ERROR_FLASH_OPERATION_FAILED; + break; + } + + if ((rp & (block_size - 1)) || rp < fifo_start_addr || rp >= fifo_end_addr) { + LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp); + break; + } + + /* Count the number of bytes available in the fifo without + * crossing the wrap around. Make sure to not fill it completely, + * because that would make wp == rp and that's the empty condition. */ + uint32_t thisrun_bytes; + if (rp > wp) + thisrun_bytes = rp - wp - block_size; + else if (rp > fifo_start_addr) + thisrun_bytes = fifo_end_addr - wp; + else + thisrun_bytes = fifo_end_addr - wp - block_size; + + if (thisrun_bytes == 0) { + /* Throttle polling a bit if transfer is (much) faster than flash + * programming. The exact delay shouldn't matter as long as it's + * less than buffer size / flash speed. This is very unlikely to + * run when using high latency connections such as USB. */ + alive_sleep(10); + continue; + } + + /* Limit to the amount of data we actually want to write */ + if (thisrun_bytes > count * block_size) + thisrun_bytes = count * block_size; + + /* Write data to fifo */ + retval = target_write_buffer(target, wp, thisrun_bytes, buffer); + if (retval != ERROR_OK) + break; + + /* Update counters and wrap write pointer */ + buffer += thisrun_bytes; + count -= thisrun_bytes / block_size; + wp += thisrun_bytes; + if (wp >= fifo_end_addr) + wp = fifo_start_addr; + + /* Store updated write pointer to target */ + retval = target_write_u32(target, wp_addr, wp); + if (retval != ERROR_OK) + break; + } + + if (retval != ERROR_OK) { + /* abort flash write algorithm on target */ + target_write_u32(target, wp_addr, 0); + } + + int retval2 = target_wait_algorithm(target, num_mem_params, mem_params, + num_reg_params, reg_params, + exit_point, + 10000, + arch_info); + + if (retval2 != ERROR_OK) { + LOG_ERROR("error waiting for target flash write algorithm"); + retval = retval2; + } + + return retval; +} int target_read_memory(struct target *target, uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer) @@ -1033,6 +1167,10 @@ COMMAND_HANDLER(handle_target_init_command) if (ERROR_OK != retval) return retval; + retval = command_run_line(CMD_CTX, "init_board"); + if (ERROR_OK != retval) + return retval; + LOG_DEBUG("Initializing targets..."); return target_init(CMD_CTX); } @@ -1228,11 +1366,85 @@ int target_call_timer_callbacks_now(void) return target_call_timer_callbacks_check_time(0); } -int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area) +/* Prints the working area layout for debug purposes */ +static void print_wa_layout(struct target *target) { struct working_area *c = target->working_areas; - struct working_area *new_wa = NULL; + while (c) { + LOG_DEBUG("%c%c 0x%08"PRIx32"-0x%08"PRIx32" (%"PRIu32" bytes)", + c->backup ? 'b' : ' ', c->free ? ' ' : '*', + c->address, c->address + c->size - 1, c->size); + c = c->next; + } +} + +/* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */ +static void target_split_working_area(struct working_area *area, uint32_t size) +{ + assert(area->free); /* Shouldn't split an allocated area */ + assert(size <= area->size); /* Caller should guarantee this */ + + /* Split only if not already the right size */ + if (size < area->size) { + struct working_area *new_wa = malloc(sizeof(*new_wa)); + + if (new_wa == NULL) + return; + + new_wa->next = area->next; + new_wa->size = area->size - size; + new_wa->address = area->address + size; + new_wa->backup = NULL; + new_wa->user = NULL; + new_wa->free = true; + + area->next = new_wa; + area->size = size; + + /* If backup memory was allocated to this area, it has the wrong size + * now so free it and it will be reallocated if/when needed */ + if (area->backup) { + free(area->backup); + area->backup = NULL; + } + } +} + +/* Merge all adjacent free areas into one */ +static void target_merge_working_areas(struct target *target) +{ + struct working_area *c = target->working_areas; + + while (c && c->next) { + assert(c->next->address == c->address + c->size); /* This is an invariant */ + + /* Find two adjacent free areas */ + if (c->free && c->next->free) { + /* Merge the last into the first */ + c->size += c->next->size; + + /* Remove the last */ + struct working_area *to_be_freed = c->next; + c->next = c->next->next; + if (to_be_freed->backup) + free(to_be_freed->backup); + free(to_be_freed); + + /* If backup memory was allocated to the remaining area, it's has + * the wrong size now */ + if (c->backup) { + free(c->backup); + c->backup = NULL; + } + } else { + c = c->next; + } + } +} + +int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area) +{ /* Reevaluate working area address based on MMU state*/ if (target->working_areas == NULL) { int retval; @@ -1245,8 +1457,8 @@ int target_alloc_working_area_try(struct target *target, uint32_t size, struct w if (!enabled) { if (target->working_area_phys_spec) { LOG_DEBUG("MMU disabled, using physical " - "address for working memory 0x%08x", - (unsigned)target->working_area_phys); + "address for working memory 0x%08"PRIx32, + target->working_area_phys); target->working_area = target->working_area_phys; } else { LOG_ERROR("No working memory available. " @@ -1256,8 +1468,8 @@ int target_alloc_working_area_try(struct target *target, uint32_t size, struct w } else { if (target->working_area_virt_spec) { LOG_DEBUG("MMU enabled, using virtual " - "address for working memory 0x%08x", - (unsigned)target->working_area_virt); + "address for working memory 0x%08"PRIx32, + target->working_area_virt); target->working_area = target->working_area_virt; } else { LOG_ERROR("No working memory available. " @@ -1265,70 +1477,62 @@ int target_alloc_working_area_try(struct target *target, uint32_t size, struct w return ERROR_TARGET_RESOURCE_NOT_AVAILABLE; } } + + /* Set up initial working area on first call */ + struct working_area *new_wa = malloc(sizeof(*new_wa)); + if (new_wa) { + new_wa->next = NULL; + new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */ + new_wa->address = target->working_area; + new_wa->backup = NULL; + new_wa->user = NULL; + new_wa->free = true; + } + + target->working_areas = new_wa; } /* only allocate multiples of 4 byte */ - if (size % 4) { - LOG_ERROR("BUG: code tried to allocate unaligned number of bytes (0x%08x), padding", ((unsigned)(size))); - size = (size + 3) & (~3); - } + if (size % 4) + size = (size + 3) & (~3UL); + + struct working_area *c = target->working_areas; - /* see if there's already a matching working area */ + /* Find the first large enough working area */ while (c) { - if ((c->free) && (c->size == size)) { - new_wa = c; + if (c->free && c->size >= size) break; - } c = c->next; } - /* if not, allocate a new one */ - if (!new_wa) { - struct working_area **p = &target->working_areas; - uint32_t first_free = target->working_area; - uint32_t free_size = target->working_area_size; - - c = target->working_areas; - while (c) { - first_free += c->size; - free_size -= c->size; - p = &c->next; - c = c->next; - } - - if (free_size < size) - return ERROR_TARGET_RESOURCE_NOT_AVAILABLE; + if (c == NULL) + return ERROR_TARGET_RESOURCE_NOT_AVAILABLE; - LOG_DEBUG("allocated new working area at address 0x%08x", (unsigned)first_free); + /* Split the working area into the requested size */ + target_split_working_area(c, size); - new_wa = malloc(sizeof(struct working_area)); - new_wa->next = NULL; - new_wa->size = size; - new_wa->address = first_free; + LOG_DEBUG("allocated new working area of %"PRIu32" bytes at address 0x%08"PRIx32, size, c->address); - if (target->backup_working_area) { - int retval; - new_wa->backup = malloc(new_wa->size); - retval = target_read_memory(target, new_wa->address, 4, - new_wa->size / 4, new_wa->backup); - if (retval != ERROR_OK) { - free(new_wa->backup); - free(new_wa); - return retval; - } - } else - new_wa->backup = NULL; + if (target->backup_working_area) { + if (c->backup == NULL) { + c->backup = malloc(c->size); + if (c->backup == NULL) + return ERROR_FAIL; + } - /* put new entry in list */ - *p = new_wa; + int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup); + if (retval != ERROR_OK) + return retval; } /* mark as used, and return the new (reused) area */ - new_wa->free = false; - *area = new_wa; + c->free = false; + *area = c; /* user pointer */ - new_wa->user = area; + c->user = area; + + print_wa_layout(target); return ERROR_OK; } @@ -1339,30 +1543,57 @@ int target_alloc_working_area(struct target *target, uint32_t size, struct worki retval = target_alloc_working_area_try(target, size, area); if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE) - LOG_WARNING("not enough working area available(requested %u)", (unsigned)(size)); + LOG_WARNING("not enough working area available(requested %"PRIu32")", size); return retval; } +static int target_restore_working_area(struct target *target, struct working_area *area) +{ + int retval = ERROR_OK; + + if (target->backup_working_area && area->backup != NULL) { + retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup); + if (retval != ERROR_OK) + LOG_ERROR("failed to restore %"PRIu32" bytes of working area at address 0x%08"PRIx32, + area->size, area->address); + } + + return retval; +} + +/* Restore the area's backup memory, if any, and return the area to the allocation pool */ static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore) { + int retval = ERROR_OK; + if (area->free) - return ERROR_OK; + return retval; - if (restore && target->backup_working_area) { - int retval = target_write_memory(target, - area->address, 4, area->size / 4, area->backup); + if (restore) { + retval = target_restore_working_area(target, area); + /* REVISIT: Perhaps the area should be freed even if restoring fails. */ if (retval != ERROR_OK) return retval; } area->free = true; + LOG_DEBUG("freed %"PRIu32" bytes of working area at address 0x%08"PRIx32, + area->size, area->address); + /* mark user pointer invalid */ + /* TODO: Is this really safe? It points to some previous caller's memory. + * How could we know that the area pointer is still in that place and not + * some other vital data? What's the purpose of this, anyway? */ *area->user = NULL; area->user = NULL; - return ERROR_OK; + target_merge_working_areas(target); + + print_wa_layout(target); + + return retval; } int target_free_working_area(struct target *target, struct working_area *area) @@ -1377,19 +1608,24 @@ static void target_free_all_working_areas_restore(struct target *target, int res { struct working_area *c = target->working_areas; - while (c) { - struct working_area *next = c->next; - target_free_working_area_restore(target, c, restore); + LOG_DEBUG("freeing all working areas"); - if (c->backup) - free(c->backup); - - free(c); - - c = next; + /* Loop through all areas, restoring the allocated ones and marking them as free */ + while (c) { + if (!c->free) { + if (restore) + target_restore_working_area(target, c); + c->free = true; + *c->user = NULL; /* Same as above */ + c->user = NULL; + } + c = c->next; } - target->working_areas = NULL; + /* Run a merge pass to combine all areas into one */ + target_merge_working_areas(target); + + print_wa_layout(target); } void target_free_all_working_areas(struct target *target) @@ -1397,6 +1633,25 @@ void target_free_all_working_areas(struct target *target) target_free_all_working_areas_restore(target, 1); } +/* Find the largest number of bytes that can be allocated */ +uint32_t target_get_working_area_avail(struct target *target) +{ + struct working_area *c = target->working_areas; + uint32_t max_size = 0; + + if (c == NULL) + return target->working_area_size; + + while (c) { + if (c->free && max_size < c->size) + max_size = c->size; + + c = c->next; + } + + return max_size; +} + int target_arch_state(struct target *target) { int retval;