quark_x10xx: add new target quark_x10xx
[openocd.git] / src / target / x86_32_common.c
1 /*
2 * Copyright(c) 2013 Intel Corporation.
3 *
4 * Adrian Burns (adrian.burns@intel.com)
5 * Thomas Faust (thomas.faust@intel.com)
6 * Ivan De Cesaris (ivan.de.cesaris@intel.com)
7 * Julien Carreno (julien.carreno@intel.com)
8 * Jeffrey Maxwell (jeffrey.r.maxwell@intel.com)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 * Contact Information:
24 * Intel Corporation
25 */
26
27 /*
28 * @file
29 * This implements generic x86 32 bit memory and breakpoint operations.
30 */
31
32 #ifdef HAVE_CONFIG_H
33 #include "config.h"
34 #endif
35
36 #include <helper/log.h>
37
38 #include "target.h"
39 #include "target_type.h"
40 #include "register.h"
41 #include "breakpoints.h"
42 #include "x86_32_common.h"
43
44 static int set_debug_regs(struct target *t, uint32_t address,
45 uint8_t bp_num, uint8_t bp_type, uint8_t bp_length);
46 static int unset_debug_regs(struct target *t, uint8_t bp_num);
47 static int read_mem(struct target *t, uint32_t size,
48 uint32_t addr, uint8_t *buf);
49 static int write_mem(struct target *t, uint32_t size,
50 uint32_t addr, const uint8_t *buf);
51 static int calcaddr_pyhsfromlin(struct target *t, uint32_t addr,
52 uint32_t *physaddr);
53 static int read_phys_mem(struct target *t, uint32_t phys_address,
54 uint32_t size, uint32_t count, uint8_t *buffer);
55 static int write_phys_mem(struct target *t, uint32_t phys_address,
56 uint32_t size, uint32_t count, const uint8_t *buffer);
57 static int set_breakpoint(struct target *target,
58 struct breakpoint *breakpoint);
59 static int unset_breakpoint(struct target *target,
60 struct breakpoint *breakpoint);
61 static int set_watchpoint(struct target *target,
62 struct watchpoint *watchpoint);
63 static int unset_watchpoint(struct target *target,
64 struct watchpoint *watchpoint);
65 static int read_hw_reg_to_cache(struct target *t, int num);
66 static int write_hw_reg_from_cache(struct target *t, int num);
67
68 int x86_32_get_gdb_reg_list(struct target *t,
69 struct reg **reg_list[], int *reg_list_size,
70 enum target_register_class reg_class)
71 {
72
73 struct x86_32_common *x86_32 = target_to_x86_32(t);
74 int i;
75 *reg_list_size = x86_32->cache->num_regs;
76 LOG_DEBUG("num_regs=%d, reg_class=%d", (*reg_list_size), reg_class);
77 *reg_list = malloc(sizeof(struct reg *) * (*reg_list_size));
78 if (*reg_list == NULL) {
79 LOG_ERROR("%s out of memory", __func__);
80 return ERROR_FAIL;
81 }
82 /* this will copy the values from our reg list to gdbs */
83 for (i = 0; i < (*reg_list_size); i++) {
84 (*reg_list)[i] = &x86_32->cache->reg_list[i];
85 LOG_DEBUG("value %s = %08" PRIx32, x86_32->cache->reg_list[i].name,
86 buf_get_u32(x86_32->cache->reg_list[i].value, 0, 32));
87 }
88 return ERROR_OK;
89 }
90
91 int x86_32_common_init_arch_info(struct target *t, struct x86_32_common *x86_32)
92 {
93 t->arch_info = x86_32;
94 x86_32->common_magic = X86_32_COMMON_MAGIC;
95 x86_32->num_hw_bpoints = MAX_DEBUG_REGS;
96 x86_32->hw_break_list = calloc(x86_32->num_hw_bpoints,
97 sizeof(struct x86_32_dbg_reg));
98 if (x86_32->hw_break_list == NULL) {
99 LOG_ERROR("%s out of memory", __func__);
100 return ERROR_FAIL;
101 }
102 x86_32->curr_tap = t->tap;
103 x86_32->fast_data_area = NULL;
104 x86_32->flush = 1;
105 x86_32->read_hw_reg_to_cache = read_hw_reg_to_cache;
106 x86_32->write_hw_reg_from_cache = write_hw_reg_from_cache;
107 return ERROR_OK;
108 }
109
110 int x86_32_common_mmu(struct target *t, int *enabled)
111 {
112 *enabled = true;
113 return ERROR_OK;
114 }
115
116 int x86_32_common_virt2phys(struct target *t, uint32_t address, uint32_t *physical)
117 {
118 struct x86_32_common *x86_32 = target_to_x86_32(t);
119
120 /*
121 * We need to ignore 'segmentation' for now, as OpenOCD can't handle
122 * segmented addresses.
123 * In protected mode that is almost OK, as (almost) any known OS is using
124 * flat segmentation. In real mode we use use the base of the DS segment,
125 * as we don't know better ...
126 */
127
128 uint32_t cr0 = buf_get_u32(x86_32->cache->reg_list[CR0].value, 0, 32);
129 if (!(cr0 & CR0_PG)) {
130 /* target halted in real mode */
131 /* TODO: needs validation !!! */
132 uint32_t dsb = buf_get_u32(x86_32->cache->reg_list[DSB].value, 0, 32);
133 *physical = dsb + address;
134
135 } else {
136 /* target halted in protected mode */
137 if (calcaddr_pyhsfromlin(t, address, physical) != ERROR_OK) {
138 LOG_ERROR("%s failed to calculate physical address from 0x%08" PRIx32,
139 __func__, address);
140 return ERROR_FAIL;
141 }
142 }
143 return ERROR_OK;
144 }
145
146 int x86_32_common_read_phys_mem(struct target *t, uint32_t phys_address,
147 uint32_t size, uint32_t count, uint8_t *buffer)
148 {
149 struct x86_32_common *x86_32 = target_to_x86_32(t);
150 int error;
151
152 error = read_phys_mem(t, phys_address, size, count, buffer);
153 if (error != ERROR_OK)
154 return error;
155
156 /* After reading memory from target, we must replace software breakpoints
157 * with the original instructions again.
158 */
159 struct swbp_mem_patch *iter = x86_32->swbbp_mem_patch_list;
160 while (iter != NULL) {
161 if (iter->physaddr >= phys_address && iter->physaddr < phys_address+(size*count)) {
162 uint32_t offset = iter->physaddr - phys_address;
163 buffer[offset] = iter->orig_byte;
164 }
165 iter = iter->next;
166 }
167 return ERROR_OK;
168 }
169
170 static int read_phys_mem(struct target *t, uint32_t phys_address,
171 uint32_t size, uint32_t count, uint8_t *buffer)
172 {
173 int retval = ERROR_OK;
174 bool pg_disabled = false;
175 LOG_DEBUG("addr=%08" PRIx32 ", size=%d, count=%d, buf=%p",
176 phys_address, size, count, buffer);
177 struct x86_32_common *x86_32 = target_to_x86_32(t);
178
179 if (check_not_halted(t))
180 return ERROR_TARGET_NOT_HALTED;
181 if (!count || !buffer || !phys_address) {
182 LOG_ERROR("%s invalid params count=%d, buf=%p, addr=%08" PRIx32,
183 __func__, count, buffer, phys_address);
184 return ERROR_COMMAND_ARGUMENT_INVALID;
185 }
186
187 /* to access physical memory, switch off the CR0.PG bit */
188 if (x86_32->is_paging_enabled(t)) {
189 retval = x86_32->disable_paging(t);
190 if (retval != ERROR_OK)
191 return retval;
192 pg_disabled = true;
193 }
194
195 for (uint32_t i = 0; i < count; i++) {
196 switch (size) {
197 case BYTE:
198 retval = read_mem(t, size, phys_address + i, buffer + i);
199 break;
200 case WORD:
201 retval = read_mem(t, size, phys_address + i * 2, buffer + i * 2);
202 break;
203 case DWORD:
204 retval = read_mem(t, size, phys_address + i * 4, buffer + i * 4);
205 break;
206 default:
207 LOG_ERROR("%s invalid read size", __func__);
208 break;
209 }
210 }
211 /* restore CR0.PG bit if needed (regardless of retval) */
212 if (pg_disabled) {
213 retval = x86_32->enable_paging(t);
214 if (retval != ERROR_OK)
215 return retval;
216 pg_disabled = true;
217 }
218 /* TODO: After reading memory from target, we must replace
219 * software breakpoints with the original instructions again.
220 * Solve this with the breakpoint fix
221 */
222 return retval;
223 }
224
225 int x86_32_common_write_phys_mem(struct target *t, uint32_t phys_address,
226 uint32_t size, uint32_t count, const uint8_t *buffer)
227 {
228 struct x86_32_common *x86_32 = target_to_x86_32(t);
229 int error = ERROR_OK;
230 uint8_t *newbuffer = NULL;
231
232 check_not_halted(t);
233 if (!count || !buffer || !phys_address) {
234 LOG_ERROR("%s invalid params count=%d, buf=%p, addr=%08" PRIx32,
235 __func__, count, buffer, phys_address);
236 return ERROR_COMMAND_ARGUMENT_INVALID;
237 }
238 /* Before writing memory to target, we must update software breakpoints
239 * with the new instructions and patch the memory buffer with the
240 * breakpoint instruction.
241 */
242 newbuffer = malloc(size*count);
243 if (newbuffer == NULL) {
244 LOG_ERROR("%s out of memory", __func__);
245 return ERROR_FAIL;
246 }
247 memcpy(newbuffer, buffer, size*count);
248 struct swbp_mem_patch *iter = x86_32->swbbp_mem_patch_list;
249 while (iter != NULL) {
250 if (iter->physaddr >= phys_address && iter->physaddr < phys_address+(size*count)) {
251 uint32_t offset = iter->physaddr - phys_address;
252 newbuffer[offset] = SW_BP_OPCODE;
253
254 /* update the breakpoint */
255 struct breakpoint *pbiter = t->breakpoints;
256 while (pbiter != NULL && pbiter->unique_id != iter->swbp_unique_id)
257 pbiter = pbiter->next;
258 if (pbiter)
259 pbiter->orig_instr[0] = buffer[offset];
260 }
261 iter = iter->next;
262 }
263
264 error = write_phys_mem(t, phys_address, size, count, newbuffer);
265 free(newbuffer);
266 return error;
267 }
268
269 static int write_phys_mem(struct target *t, uint32_t phys_address,
270 uint32_t size, uint32_t count, const uint8_t *buffer)
271 {
272 int retval = ERROR_OK;
273 bool pg_disabled = false;
274 struct x86_32_common *x86_32 = target_to_x86_32(t);
275 LOG_DEBUG("addr=%08" PRIx32 ", size=%d, count=%d, buf=%p",
276 phys_address, size, count, buffer);
277
278 check_not_halted(t);
279 if (!count || !buffer || !phys_address) {
280 LOG_ERROR("%s invalid params count=%d, buf=%p, addr=%08" PRIx32,
281 __func__, count, buffer, phys_address);
282 return ERROR_COMMAND_ARGUMENT_INVALID;
283 }
284 /* TODO: Before writing memory to target, we must update
285 * software breakpoints with the new instructions and
286 * patch the memory buffer with the breakpoint instruction.
287 * Solve this with the breakpoint fix
288 */
289
290 /* to access physical memory, switch off the CR0.PG bit */
291 if (x86_32->is_paging_enabled(t)) {
292 retval = x86_32->disable_paging(t);
293 if (retval != ERROR_OK)
294 return retval;
295 pg_disabled = true;
296 }
297 for (uint32_t i = 0; i < count; i++) {
298 switch (size) {
299 case BYTE:
300 retval = write_mem(t, size, phys_address + i, buffer + i);
301 break;
302 case WORD:
303 retval = write_mem(t, size, phys_address + i * 2, buffer + i * 2);
304 break;
305 case DWORD:
306 retval = write_mem(t, size, phys_address + i * 4, buffer + i * 4);
307 break;
308 default:
309 LOG_DEBUG("invalid read size");
310 break;
311 }
312 }
313 /* restore CR0.PG bit if needed (regardless of retval) */
314 if (pg_disabled) {
315 retval = x86_32->enable_paging(t);
316 if (retval != ERROR_OK)
317 return retval;
318 }
319 return retval;
320 }
321
322 static int read_mem(struct target *t, uint32_t size,
323 uint32_t addr, uint8_t *buf)
324 {
325 struct x86_32_common *x86_32 = target_to_x86_32(t);
326
327 /* if CS.D bit=1 then its a 32 bit code segment, else 16 */
328 bool use32 = (buf_get_u32(x86_32->cache->reg_list[CSAR].value, 0, 32)) & CSAR_D;
329 int retval = x86_32->write_hw_reg(t, EAX, addr, 0);
330 if (retval != ERROR_OK) {
331 LOG_ERROR("%s error write EAX", __func__);
332 return retval;
333 }
334
335 switch (size) {
336 case BYTE:
337 if (use32)
338 retval = x86_32->submit_instruction(t, MEMRDB32);
339 else
340 retval = x86_32->submit_instruction(t, MEMRDB16);
341 break;
342 case WORD:
343 if (use32)
344 retval = x86_32->submit_instruction(t, MEMRDH32);
345 else
346 retval = x86_32->submit_instruction(t, MEMRDH16);
347 break;
348 case DWORD:
349 if (use32)
350 retval = x86_32->submit_instruction(t, MEMRDW32);
351 else
352 retval = x86_32->submit_instruction(t, MEMRDW16);
353 break;
354 default:
355 LOG_ERROR("%s invalid read mem size", __func__);
356 break;
357 }
358
359 /* read_hw_reg() will write to 4 bytes (uint32_t)
360 * Watch out, the buffer passed into read_mem() might be 1 or 2 bytes.
361 */
362 uint32_t regval;
363 retval = x86_32->read_hw_reg(t, EDX, &regval, 0);
364
365 if (retval != ERROR_OK) {
366 LOG_ERROR("%s error read EDX", __func__);
367 return retval;
368 }
369 for (uint8_t i = 0; i < size; i++)
370 buf[i] = (regval >> (i*8)) & 0x000000FF;
371
372 retval = x86_32->transaction_status(t);
373 if (retval != ERROR_OK) {
374 LOG_ERROR("%s error on mem read", __func__);
375 return retval;
376 }
377 return retval;
378 }
379
380 static int write_mem(struct target *t, uint32_t size,
381 uint32_t addr, const uint8_t *buf)
382 {
383 uint32_t i = 0;
384 uint32_t buf4bytes = 0;
385 int retval = ERROR_OK;
386 struct x86_32_common *x86_32 = target_to_x86_32(t);
387
388 for (i = 0; i < size; ++i) {
389 buf4bytes = buf4bytes << 8; /* first time we only shift 0s */
390 buf4bytes += buf[(size-1)-i]; /* it was hard to write, should be hard to read! */
391 }
392 /* if CS.D bit=1 then its a 32 bit code segment, else 16 */
393 bool use32 = (buf_get_u32(x86_32->cache->reg_list[CSAR].value, 0, 32)) & CSAR_D;
394 retval = x86_32->write_hw_reg(t, EAX, addr, 0);
395 if (retval != ERROR_OK) {
396 LOG_ERROR("%s error write EAX", __func__);
397 return retval;
398 }
399
400 /* write_hw_reg() will write to 4 bytes (uint32_t)
401 * Watch out, the buffer passed into write_mem() might be 1 or 2 bytes.
402 */
403 retval = x86_32->write_hw_reg(t, EDX, buf4bytes, 0);
404 if (retval != ERROR_OK) {
405 LOG_ERROR("%s error write EDX", __func__);
406 return retval;
407 }
408 switch (size) {
409 case BYTE:
410 if (use32)
411 retval = x86_32->submit_instruction(t, MEMWRB32);
412 else
413 retval = x86_32->submit_instruction(t, MEMWRB16);
414 break;
415 case WORD:
416 if (use32)
417 retval = x86_32->submit_instruction(t, MEMWRH32);
418 else
419 retval = x86_32->submit_instruction(t, MEMWRH16);
420 break;
421 case DWORD:
422 if (use32)
423 retval = x86_32->submit_instruction(t, MEMWRW32);
424 else
425 retval = x86_32->submit_instruction(t, MEMWRW16);
426 break;
427 default:
428 LOG_ERROR("%s invalid write mem size", __func__);
429 return ERROR_FAIL;
430 }
431 retval = x86_32->transaction_status(t);
432 if (retval != ERROR_OK) {
433 LOG_ERROR("%s error on mem write", __func__);
434 return retval;
435 }
436 return retval;
437 }
438
439 int calcaddr_pyhsfromlin(struct target *t, uint32_t addr, uint32_t *physaddr)
440 {
441 uint8_t entry_buffer[8];
442
443 if (physaddr == NULL || t == NULL)
444 return ERROR_FAIL;
445
446 struct x86_32_common *x86_32 = target_to_x86_32(t);
447
448 /* The 'user-visible' CR0.PG should be set - otherwise the function shouldn't be called
449 * (Don't check the CR0.PG on the target, this might be temporally disabled at this point)
450 */
451 uint32_t cr0 = buf_get_u32(x86_32->cache->reg_list[CR0].value, 0, 32);
452 if (!(cr0 & CR0_PG)) {
453 /* you are wrong in this function, never mind */
454 *physaddr = addr;
455 return ERROR_OK;
456 }
457
458 uint32_t cr4 = buf_get_u32(x86_32->cache->reg_list[CR4].value, 0, 32);
459 bool isPAE = cr4 & 0x00000020; /* PAE - Physical Address Extension */
460
461 uint32_t cr3 = buf_get_u32(x86_32->cache->reg_list[CR3].value, 0, 32);
462 if (isPAE) {
463 uint32_t pdpt_base = cr3 & 0xFFFFF000; /* lower 12 bits of CR3 must always be 0 */
464 uint32_t pdpt_index = (addr & 0xC0000000) >> 30; /* A[31:30] index to PDPT */
465 uint32_t pdpt_addr = pdpt_base + (8 * pdpt_index);
466 if (x86_32_common_read_phys_mem(t, pdpt_addr, 4, 2, entry_buffer) != ERROR_OK) {
467 LOG_ERROR("%s couldn't read page directory pointer table entry at 0x%08" PRIx32,
468 __func__, pdpt_addr);
469 return ERROR_FAIL;
470 }
471 uint64_t pdpt_entry = target_buffer_get_u64(t, entry_buffer);
472 if (!(pdpt_entry & 0x0000000000000001)) {
473 LOG_ERROR("%s page directory pointer table entry at 0x%08" PRIx32 " is not present",
474 __func__, pdpt_addr);
475 return ERROR_FAIL;
476 }
477
478 uint32_t pd_base = pdpt_entry & 0xFFFFF000; /* A[31:12] is PageTable/Page Base Address */
479 uint32_t pd_index = (addr & 0x3FE00000) >> 21; /* A[29:21] index to PD entry with PAE */
480 uint32_t pd_addr = pd_base + (8 * pd_index);
481 if (x86_32_common_read_phys_mem(t, pd_addr, 4, 2, entry_buffer) != ERROR_OK) {
482 LOG_ERROR("%s couldn't read page directory entry at 0x%08" PRIx32,
483 __func__, pd_addr);
484 return ERROR_FAIL;
485 }
486 uint64_t pd_entry = target_buffer_get_u64(t, entry_buffer);
487 if (!(pd_entry & 0x0000000000000001)) {
488 LOG_ERROR("%s page directory entry at 0x%08" PRIx32 " is not present",
489 __func__, pd_addr);
490 return ERROR_FAIL;
491 }
492
493 /* PS bit in PD entry is indicating 4KB or 2MB page size */
494 if (pd_entry & 0x0000000000000080) {
495
496 uint32_t page_base = (uint32_t)(pd_entry & 0x00000000FFE00000); /* [31:21] */
497 uint32_t offset = addr & 0x001FFFFF; /* [20:0] */
498 *physaddr = page_base + offset;
499 return ERROR_OK;
500
501 } else {
502
503 uint32_t pt_base = (uint32_t)(pd_entry & 0x00000000FFFFF000); /*[31:12]*/
504 uint32_t pt_index = (addr & 0x001FF000) >> 12; /*[20:12]*/
505 uint32_t pt_addr = pt_base + (8 * pt_index);
506 if (x86_32_common_read_phys_mem(t, pt_addr, 4, 2, entry_buffer) != ERROR_OK) {
507 LOG_ERROR("%s couldn't read page table entry at 0x%08" PRIx32, __func__, pt_addr);
508 return ERROR_FAIL;
509 }
510 uint64_t pt_entry = target_buffer_get_u64(t, entry_buffer);
511 if (!(pt_entry & 0x0000000000000001)) {
512 LOG_ERROR("%s page table entry at 0x%08" PRIx32 " is not present", __func__, pt_addr);
513 return ERROR_FAIL;
514 }
515
516 uint32_t page_base = (uint32_t)(pt_entry & 0x00000000FFFFF000); /*[31:12]*/
517 uint32_t offset = addr & 0x00000FFF; /*[11:0]*/
518 *physaddr = page_base + offset;
519 return ERROR_OK;
520 }
521 } else {
522 uint32_t pd_base = cr3 & 0xFFFFF000; /* lower 12 bits of CR3 must always be 0 */
523 uint32_t pd_index = (addr & 0xFFC00000) >> 22; /* A[31:22] index to PD entry */
524 uint32_t pd_addr = pd_base + (4 * pd_index);
525 if (x86_32_common_read_phys_mem(t, pd_addr, 4, 1, entry_buffer) != ERROR_OK) {
526 LOG_ERROR("%s couldn't read page directory entry at 0x%08" PRIx32, __func__, pd_addr);
527 return ERROR_FAIL;
528 }
529 uint32_t pd_entry = target_buffer_get_u32(t, entry_buffer);
530 if (!(pd_entry & 0x00000001)) {
531 LOG_ERROR("%s page directory entry at 0x%08" PRIx32 " is not present", __func__, pd_addr);
532 return ERROR_FAIL;
533 }
534
535 /* Bit 7 in page directory entry is page size.
536 */
537 if (pd_entry & 0x00000080) {
538 /* 4MB pages */
539 uint32_t page_base = pd_entry & 0xFFC00000;
540 *physaddr = page_base + (addr & 0x003FFFFF);
541
542 } else {
543 /* 4KB pages */
544 uint32_t pt_base = pd_entry & 0xFFFFF000; /* A[31:12] is PageTable/Page Base Address */
545 uint32_t pt_index = (addr & 0x003FF000) >> 12; /* A[21:12] index to page table entry */
546 uint32_t pt_addr = pt_base + (4 * pt_index);
547 if (x86_32_common_read_phys_mem(t, pt_addr, 4, 1, entry_buffer) != ERROR_OK) {
548 LOG_ERROR("%s couldn't read page table entry at 0x%08" PRIx32, __func__, pt_addr);
549 return ERROR_FAIL;
550 }
551 uint32_t pt_entry = target_buffer_get_u32(t, entry_buffer);
552 if (!(pt_entry & 0x00000001)) {
553 LOG_ERROR("%s page table entry at 0x%08" PRIx32 " is not present", __func__, pt_addr);
554 return ERROR_FAIL;
555 }
556 uint32_t page_base = pt_entry & 0xFFFFF000; /* A[31:12] is PageTable/Page Base Address */
557 *physaddr = page_base + (addr & 0x00000FFF); /* A[11:0] offset to 4KB page in linear address */
558 }
559 }
560 return ERROR_OK;
561 }
562
563 int x86_32_common_read_memory(struct target *t, uint32_t addr,
564 uint32_t size, uint32_t count, uint8_t *buf)
565 {
566 int retval = ERROR_OK;
567 struct x86_32_common *x86_32 = target_to_x86_32(t);
568 LOG_DEBUG("addr=%08" PRIx32 ", size=%d, count=%d, buf=%p",
569 addr, size, count, buf);
570 check_not_halted(t);
571 if (!count || !buf || !addr) {
572 LOG_ERROR("%s invalid params count=%d, buf=%p, addr=%08" PRIx32,
573 __func__, count, buf, addr);
574 return ERROR_COMMAND_ARGUMENT_INVALID;
575 }
576
577 if (x86_32->is_paging_enabled(t)) {
578 /* all memory accesses from debugger must be physical (CR0.PG == 0)
579 * conversion to physical address space needed
580 */
581 retval = x86_32->disable_paging(t);
582 if (retval != ERROR_OK)
583 return retval;
584 uint32_t physaddr = 0;
585 if (calcaddr_pyhsfromlin(t, addr, &physaddr) != ERROR_OK) {
586 LOG_ERROR("%s failed to calculate physical address from 0x%08" PRIx32, __func__, addr);
587 retval = ERROR_FAIL;
588 }
589 /* TODO: !!! Watch out for page boundaries
590 * for every 4kB, the physical address has to be re-calculated
591 * This should be fixed together with bulk memory reads
592 */
593
594 if (retval == ERROR_OK
595 && x86_32_common_read_phys_mem(t, physaddr, size, count, buf) != ERROR_OK) {
596 LOG_ERROR("%s failed to read memory from physical address 0x%08" PRIx32, __func__, physaddr);
597 retval = ERROR_FAIL;
598 }
599 /* restore PG bit if it was cleared prior (regardless of retval) */
600 retval = x86_32->enable_paging(t);
601 if (retval != ERROR_OK)
602 return retval;
603 } else {
604 /* paging is off - linear address is physical address */
605 if (x86_32_common_read_phys_mem(t, addr, size, count, buf) != ERROR_OK) {
606 LOG_ERROR("%s failed to read memory from address 0%08" PRIx32, __func__, addr);
607 retval = ERROR_FAIL;
608 }
609 }
610
611 return retval;
612 }
613
614 int x86_32_common_write_memory(struct target *t, uint32_t addr,
615 uint32_t size, uint32_t count, const uint8_t *buf)
616 {
617 int retval = ERROR_OK;
618 struct x86_32_common *x86_32 = target_to_x86_32(t);
619 LOG_DEBUG("addr=%08" PRIx32 ", size=%d, count=%d, buf=%p",
620 addr, size, count, buf);
621 check_not_halted(t);
622 if (!count || !buf || !addr) {
623 LOG_ERROR("%s invalid params count=%d, buf=%p, addr=%08" PRIx32,
624 __func__, count, buf, addr);
625 return ERROR_COMMAND_ARGUMENT_INVALID;
626 }
627 if (x86_32->is_paging_enabled(t)) {
628 /* all memory accesses from debugger must be physical (CR0.PG == 0)
629 * conversion to physical address space needed
630 */
631 retval = x86_32->disable_paging(t);
632 if (retval != ERROR_OK)
633 return retval;
634 uint32_t physaddr = 0;
635 if (calcaddr_pyhsfromlin(t, addr, &physaddr) != ERROR_OK) {
636 LOG_ERROR("%s failed to calculate physical address from 0x%08" PRIx32,
637 __func__, addr);
638 retval = ERROR_FAIL;
639 }
640 /* TODO: !!! Watch out for page boundaries
641 * for every 4kB, the physical address has to be re-calculated
642 * This should be fixed together with bulk memory reads
643 */
644 if (retval == ERROR_OK
645 && x86_32_common_write_phys_mem(t, physaddr, size, count, buf) != ERROR_OK) {
646 LOG_ERROR("%s failed to write memory to physical address 0x%08" PRIx32,
647 __func__, physaddr);
648 retval = ERROR_FAIL;
649 }
650 /* restore PG bit if it was cleared prior (regardless of retval) */
651 retval = x86_32->enable_paging(t);
652 if (retval != ERROR_OK)
653 return retval;
654 } else {
655
656 /* paging is off - linear address is physical address */
657 if (x86_32_common_write_phys_mem(t, addr, size, count, buf) != ERROR_OK) {
658 LOG_ERROR("%s failed to write memory to address 0x%08" PRIx32,
659 __func__, addr);
660 retval = ERROR_FAIL;
661 }
662 }
663 return retval;
664 }
665
666 int x86_32_common_read_io(struct target *t, uint32_t addr,
667 uint32_t size, uint8_t *buf)
668 {
669 struct x86_32_common *x86_32 = target_to_x86_32(t);
670 /* if CS.D bit=1 then its a 32 bit code segment, else 16 */
671 bool use32 = (buf_get_u32(x86_32->cache->reg_list[CSAR].value, 0, 32)) & CSAR_D;
672 int retval = ERROR_FAIL;
673 LOG_DEBUG("addr=%08" PRIx32 ", size=%d, buf=%p", addr, size, buf);
674 check_not_halted(t);
675 if (!buf || !addr) {
676 LOG_ERROR("%s invalid params buf=%p, addr=%08" PRIx32, __func__, buf, addr);
677 return retval;
678 }
679 retval = x86_32->write_hw_reg(t, EDX, addr, 0);
680 if (retval != ERROR_OK) {
681 LOG_ERROR("%s error EDX write", __func__);
682 return retval;
683 }
684 switch (size) {
685 case BYTE:
686 if (use32)
687 retval = x86_32->submit_instruction(t, IORDB32);
688 else
689 retval = x86_32->submit_instruction(t, IORDB16);
690 break;
691 case WORD:
692 if (use32)
693 retval = x86_32->submit_instruction(t, IORDH32);
694 else
695 retval = x86_32->submit_instruction(t, IORDH16);
696 break;
697 case DWORD:
698 if (use32)
699 retval = x86_32->submit_instruction(t, IORDW32);
700 else
701 retval = x86_32->submit_instruction(t, IORDW16);
702 break;
703 default:
704 LOG_ERROR("%s invalid read io size", __func__);
705 return ERROR_FAIL;
706 }
707 uint32_t regval = 0;
708 retval = x86_32->read_hw_reg(t, EAX, &regval, 0);
709 if (retval != ERROR_OK) {
710 LOG_ERROR("%s error on read EAX", __func__);
711 return retval;
712 }
713 for (uint8_t i = 0; i < size; i++)
714 buf[i] = (regval >> (i*8)) & 0x000000FF;
715 retval = x86_32->transaction_status(t);
716 if (retval != ERROR_OK) {
717 LOG_ERROR("%s error on io read", __func__);
718 return retval;
719 }
720 return retval;
721 }
722
723 int x86_32_common_write_io(struct target *t, uint32_t addr,
724 uint32_t size, const uint8_t *buf)
725 {
726 struct x86_32_common *x86_32 = target_to_x86_32(t);
727 /* if CS.D bit=1 then its a 32 bit code segment, else 16 */
728 bool use32 = (buf_get_u32(x86_32->cache->reg_list[CSAR].value, 0, 32)) & CSAR_D;
729 LOG_DEBUG("addr=%08" PRIx32 ", size=%d, buf=%p", addr, size, buf);
730 check_not_halted(t);
731 int retval = ERROR_FAIL;
732 if (!buf || !addr) {
733 LOG_ERROR("%s invalid params buf=%p, addr=%08" PRIx32, __func__, buf, addr);
734 return retval;
735 }
736 /* no do the write */
737 retval = x86_32->write_hw_reg(t, EDX, addr, 0);
738 if (retval != ERROR_OK) {
739 LOG_ERROR("%s error on EDX write", __func__);
740 return retval;
741 }
742 uint32_t regval = 0;
743 for (uint8_t i = 0; i < size; i++)
744 regval += (buf[i] << (i*8));
745 retval = x86_32->write_hw_reg(t, EAX, regval, 0);
746 if (retval != ERROR_OK) {
747 LOG_ERROR("%s error on EAX write", __func__);
748 return retval;
749 }
750 switch (size) {
751 case BYTE:
752 if (use32)
753 retval = x86_32->submit_instruction(t, IOWRB32);
754 else
755 retval = x86_32->submit_instruction(t, IOWRB16);
756 break;
757 case WORD:
758 if (use32)
759 retval = x86_32->submit_instruction(t, IOWRH32);
760 else
761 retval = x86_32->submit_instruction(t, IOWRH16);
762 break;
763 case DWORD:
764 if (use32)
765 retval = x86_32->submit_instruction(t, IOWRW32);
766 else
767 retval = x86_32->submit_instruction(t, IOWRW16);
768 break;
769 default:
770 LOG_ERROR("%s invalid write io size", __func__);
771 return ERROR_FAIL;
772 }
773 retval = x86_32->transaction_status(t);
774 if (retval != ERROR_OK) {
775 LOG_ERROR("%s error on io write", __func__);
776 return retval;
777 }
778 return retval;
779 }
780
781 int x86_32_common_add_watchpoint(struct target *t, struct watchpoint *wp)
782 {
783 check_not_halted(t);
784 /* set_watchpoint() will return ERROR_TARGET_RESOURCE_NOT_AVAILABLE if all
785 * hardware registers are gone
786 */
787 return set_watchpoint(t, wp);
788 }
789
790 int x86_32_common_remove_watchpoint(struct target *t, struct watchpoint *wp)
791 {
792 if (check_not_halted(t))
793 return ERROR_TARGET_NOT_HALTED;
794 if (wp->set)
795 unset_watchpoint(t, wp);
796 return ERROR_OK;
797 }
798
799 int x86_32_common_add_breakpoint(struct target *t, struct breakpoint *bp)
800 {
801 LOG_DEBUG("type=%d, addr=%08" PRIx32, bp->type, bp->address);
802 if (check_not_halted(t))
803 return ERROR_TARGET_NOT_HALTED;
804 /* set_breakpoint() will return ERROR_TARGET_RESOURCE_NOT_AVAILABLE if all
805 * hardware registers are gone (for hardware breakpoints)
806 */
807 return set_breakpoint(t, bp);
808 }
809
810 int x86_32_common_remove_breakpoint(struct target *t, struct breakpoint *bp)
811 {
812 LOG_DEBUG("type=%d, addr=%08" PRIx32, bp->type, bp->address);
813 if (check_not_halted(t))
814 return ERROR_TARGET_NOT_HALTED;
815 if (bp->set)
816 unset_breakpoint(t, bp);
817
818 return ERROR_OK;
819 }
820
821 static int set_debug_regs(struct target *t, uint32_t address,
822 uint8_t bp_num, uint8_t bp_type, uint8_t bp_length)
823 {
824 struct x86_32_common *x86_32 = target_to_x86_32(t);
825 LOG_DEBUG("addr=%08" PRIx32 ", bp_num=%d, bp_type=%d, pb_length=%d",
826 address, bp_num, bp_type, bp_length);
827
828 /* DR7 - set global enable */
829 uint32_t dr7 = buf_get_u32(x86_32->cache->reg_list[DR7].value, 0, 32);
830
831 if (bp_length != 1 && bp_length != 2 && bp_length != 4)
832 return ERROR_FAIL;
833
834 if (DR7_BP_FREE(dr7, bp_num))
835 DR7_GLOBAL_ENABLE(dr7, bp_num);
836 else {
837 LOG_ERROR("%s dr7 error, already enabled, val=%08" PRIx32, __func__, dr7);
838 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
839 }
840
841 switch (bp_type) {
842 case 0:
843 /* 00 - only on instruction execution */
844 DR7_SET_EXE(dr7, bp_num);
845 DR7_SET_LENGTH(dr7, bp_num, bp_length);
846 break;
847 case 1:
848 /* 01 - only on data writes */
849 DR7_SET_WRITE(dr7, bp_num);
850 DR7_SET_LENGTH(dr7, bp_num, bp_length);
851 break;
852 case 2:
853 /* 10 UNSUPPORTED - an I/O read and I/O write */
854 LOG_ERROR("%s unsupported feature bp_type=%d", __func__, bp_type);
855 return ERROR_FAIL;
856 break;
857 case 3:
858 /* on data read or data write */
859 DR7_SET_ACCESS(dr7, bp_num);
860 DR7_SET_LENGTH(dr7, bp_num, bp_length);
861 break;
862 default:
863 LOG_ERROR("%s invalid request [only 0-3] bp_type=%d", __func__, bp_type);
864 return ERROR_FAIL;
865 }
866
867 /* update regs in the reg cache ready to be written to hardware
868 * when we exit PM
869 */
870 buf_set_u32(x86_32->cache->reg_list[bp_num+DR0].value, 0, 32, address);
871 x86_32->cache->reg_list[bp_num+DR0].dirty = 1;
872 x86_32->cache->reg_list[bp_num+DR0].valid = 1;
873 buf_set_u32(x86_32->cache->reg_list[DR6].value, 0, 32, PM_DR6);
874 x86_32->cache->reg_list[DR6].dirty = 1;
875 x86_32->cache->reg_list[DR6].valid = 1;
876 buf_set_u32(x86_32->cache->reg_list[DR7].value, 0, 32, dr7);
877 x86_32->cache->reg_list[DR7].dirty = 1;
878 x86_32->cache->reg_list[DR7].valid = 1;
879 return ERROR_OK;
880 }
881
882 static int unset_debug_regs(struct target *t, uint8_t bp_num)
883 {
884 struct x86_32_common *x86_32 = target_to_x86_32(t);
885 LOG_DEBUG("bp_num=%d", bp_num);
886
887 uint32_t dr7 = buf_get_u32(x86_32->cache->reg_list[DR7].value, 0, 32);
888
889 if (!(DR7_BP_FREE(dr7, bp_num))) {
890 DR7_GLOBAL_DISABLE(dr7, bp_num);
891 } else {
892 LOG_ERROR("%s dr7 error, not enabled, val=%08" PRIx32, __func__, dr7);
893 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
894 }
895 /* this will clear rw and len bits */
896 DR7_RESET_RWLEN_BITS(dr7, bp_num);
897
898 /* update regs in the reg cache ready to be written to hardware
899 * when we exit PM
900 */
901 buf_set_u32(x86_32->cache->reg_list[bp_num+DR0].value, 0, 32, 0);
902 x86_32->cache->reg_list[bp_num+DR0].dirty = 1;
903 x86_32->cache->reg_list[bp_num+DR0].valid = 1;
904 buf_set_u32(x86_32->cache->reg_list[DR6].value, 0, 32, PM_DR6);
905 x86_32->cache->reg_list[DR6].dirty = 1;
906 x86_32->cache->reg_list[DR6].valid = 1;
907 buf_set_u32(x86_32->cache->reg_list[DR7].value, 0, 32, dr7);
908 x86_32->cache->reg_list[DR7].dirty = 1;
909 x86_32->cache->reg_list[DR7].valid = 1;
910 return ERROR_OK;
911 }
912
913 static int set_hwbp(struct target *t, struct breakpoint *bp)
914 {
915 struct x86_32_common *x86_32 = target_to_x86_32(t);
916 struct x86_32_dbg_reg *debug_reg_list = x86_32->hw_break_list;
917 uint8_t hwbp_num = 0;
918
919 while (debug_reg_list[hwbp_num].used && (hwbp_num < x86_32->num_hw_bpoints))
920 hwbp_num++;
921 if (hwbp_num >= x86_32->num_hw_bpoints) {
922 LOG_ERROR("%s no free hw breakpoint bpid=%d", __func__, bp->unique_id);
923 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
924 }
925 if (set_debug_regs(t, bp->address, hwbp_num, DR7_BP_EXECUTE, 1) != ERROR_OK)
926 return ERROR_FAIL;
927 bp->set = hwbp_num + 1;
928 debug_reg_list[hwbp_num].used = 1;
929 debug_reg_list[hwbp_num].bp_value = bp->address;
930 LOG_USER("%s hardware breakpoint %d set at 0x%08" PRIx32 " (hwreg=%d)", __func__,
931 bp->unique_id, debug_reg_list[hwbp_num].bp_value, hwbp_num);
932 return ERROR_OK;
933 }
934
935 static int unset_hwbp(struct target *t, struct breakpoint *bp)
936 {
937 struct x86_32_common *x86_32 = target_to_x86_32(t);
938 struct x86_32_dbg_reg *debug_reg_list = x86_32->hw_break_list;
939 int hwbp_num = bp->set - 1;
940
941 if ((hwbp_num < 0) || (hwbp_num >= x86_32->num_hw_bpoints)) {
942 LOG_ERROR("%s invalid breakpoint number=%d, bpid=%d",
943 __func__, hwbp_num, bp->unique_id);
944 return ERROR_OK;
945 }
946
947 if (unset_debug_regs(t, hwbp_num) != ERROR_OK)
948 return ERROR_FAIL;
949 debug_reg_list[hwbp_num].used = 0;
950 debug_reg_list[hwbp_num].bp_value = 0;
951
952 LOG_USER("%s hardware breakpoint %d removed from 0x%08" PRIx32 " (hwreg=%d)",
953 __func__, bp->unique_id, bp->address, hwbp_num);
954 return ERROR_OK;
955 }
956
957 static int set_swbp(struct target *t, struct breakpoint *bp)
958 {
959 struct x86_32_common *x86_32 = target_to_x86_32(t);
960 LOG_DEBUG("id %d", bp->unique_id);
961 uint32_t physaddr;
962 uint8_t opcode = SW_BP_OPCODE;
963 uint8_t readback;
964
965 if (calcaddr_pyhsfromlin(t, bp->address, &physaddr) != ERROR_OK)
966 return ERROR_FAIL;
967 if (read_phys_mem(t, physaddr, 1, 1, bp->orig_instr))
968 return ERROR_FAIL;
969
970 LOG_DEBUG("set software breakpoint - orig byte=%02" PRIx8 "", *bp->orig_instr);
971
972 /* just write the instruction trap byte */
973 if (write_phys_mem(t, physaddr, 1, 1, &opcode))
974 return ERROR_FAIL;
975
976 /* verify that this is not invalid/read-only memory */
977 if (read_phys_mem(t, physaddr, 1, 1, &readback))
978 return ERROR_FAIL;
979
980 if (readback != SW_BP_OPCODE) {
981 LOG_ERROR("%s software breakpoint error at 0x%08" PRIx32 ", check memory",
982 __func__, bp->address);
983 LOG_ERROR("%s readback=%02" PRIx8 " orig=%02" PRIx8 "",
984 __func__, readback, *bp->orig_instr);
985 return ERROR_FAIL;
986 }
987 bp->set = SW_BP_OPCODE; /* just non 0 */
988
989 /* add the memory patch */
990 struct swbp_mem_patch *new_patch = malloc(sizeof(struct swbp_mem_patch));
991 if (new_patch == NULL) {
992 LOG_ERROR("%s out of memory", __func__);
993 return ERROR_FAIL;
994 }
995 new_patch->next = NULL;
996 new_patch->orig_byte = *bp->orig_instr;
997 new_patch->physaddr = physaddr;
998 new_patch->swbp_unique_id = bp->unique_id;
999
1000 struct swbp_mem_patch *addto = x86_32->swbbp_mem_patch_list;
1001 if (addto == NULL)
1002 x86_32->swbbp_mem_patch_list = new_patch;
1003 else {
1004 while (addto->next != NULL)
1005 addto = addto->next;
1006 addto->next = new_patch;
1007 }
1008 LOG_USER("%s software breakpoint %d set at 0x%08" PRIx32,
1009 __func__, bp->unique_id, bp->address);
1010 return ERROR_OK;
1011 }
1012
1013 static int unset_swbp(struct target *t, struct breakpoint *bp)
1014 {
1015 struct x86_32_common *x86_32 = target_to_x86_32(t);
1016 LOG_DEBUG("id %d", bp->unique_id);
1017 uint32_t physaddr;
1018 uint8_t current_instr;
1019
1020 /* check that user program has not modified breakpoint instruction */
1021 if (calcaddr_pyhsfromlin(t, bp->address, &physaddr) != ERROR_OK)
1022 return ERROR_FAIL;
1023 if (read_phys_mem(t, physaddr, 1, 1, &current_instr))
1024 return ERROR_FAIL;
1025
1026 if (current_instr == SW_BP_OPCODE) {
1027 if (write_phys_mem(t, physaddr, 1, 1, bp->orig_instr))
1028 return ERROR_FAIL;
1029 } else {
1030 LOG_ERROR("%s software breakpoint remove error at 0x%08" PRIx32 ", check memory",
1031 __func__, bp->address);
1032 LOG_ERROR("%s current=%02" PRIx8 " orig=%02" PRIx8 "",
1033 __func__, current_instr, *bp->orig_instr);
1034 return ERROR_FAIL;
1035 }
1036
1037 /* remove from patch */
1038 struct swbp_mem_patch *iter = x86_32->swbbp_mem_patch_list;
1039 if (iter != NULL) {
1040 if (iter->swbp_unique_id == bp->unique_id) {
1041 /* it's the first item */
1042 x86_32->swbbp_mem_patch_list = iter->next;
1043 free(iter);
1044 } else {
1045 while (iter->next != NULL && iter->next->swbp_unique_id != bp->unique_id)
1046 iter = iter->next;
1047 if (iter->next != NULL) {
1048 /* it's the next one */
1049 struct swbp_mem_patch *freeme = iter->next;
1050 iter->next = iter->next->next;
1051 free(freeme);
1052 }
1053 }
1054 }
1055
1056 LOG_USER("%s software breakpoint %d removed from 0x%08" PRIx32,
1057 __func__, bp->unique_id, bp->address);
1058 return ERROR_OK;
1059 }
1060
1061 static int set_breakpoint(struct target *t, struct breakpoint *bp)
1062 {
1063 int error = ERROR_OK;
1064 struct x86_32_common *x86_32 = target_to_x86_32(t);
1065 LOG_DEBUG("type=%d, addr=%08" PRIx32, bp->type, bp->address);
1066 if (bp->set) {
1067 LOG_ERROR("breakpoint already set");
1068 return error;
1069 }
1070 if (bp->type == BKPT_HARD) {
1071 error = set_hwbp(t, bp);
1072 if (error != ERROR_OK) {
1073 LOG_ERROR("%s error setting hardware breakpoint at 0x%08" PRIx32,
1074 __func__, bp->address);
1075 return error;
1076 }
1077 } else {
1078 if (x86_32->sw_bpts_supported(t)) {
1079 error = set_swbp(t, bp);
1080 if (error != ERROR_OK) {
1081 LOG_ERROR("%s error setting software breakpoint at 0x%08" PRIx32,
1082 __func__, bp->address);
1083 return error;
1084 }
1085 } else {
1086 LOG_ERROR("%s core doesn't support SW breakpoints", __func__);
1087 error = ERROR_FAIL;
1088 return ERROR_FAIL;
1089 }
1090 }
1091 return error;
1092 }
1093
1094 static int unset_breakpoint(struct target *t, struct breakpoint *bp)
1095 {
1096 LOG_DEBUG("type=%d, addr=%08" PRIx32, bp->type, bp->address);
1097 if (!bp->set) {
1098 LOG_WARNING("breakpoint not set");
1099 return ERROR_OK;
1100 }
1101
1102 if (bp->type == BKPT_HARD) {
1103 if (unset_hwbp(t, bp) != ERROR_OK) {
1104 LOG_ERROR("%s error removing hardware breakpoint at 0x%08" PRIx32,
1105 __func__, bp->address);
1106 return ERROR_FAIL;
1107 }
1108 } else {
1109 if (unset_swbp(t, bp) != ERROR_OK) {
1110 LOG_ERROR("%s error removing software breakpoint at 0x%08" PRIx32,
1111 __func__, bp->address);
1112 return ERROR_FAIL;
1113 }
1114 }
1115 bp->set = 0;
1116 return ERROR_OK;
1117 }
1118
1119 static int set_watchpoint(struct target *t, struct watchpoint *wp)
1120 {
1121 struct x86_32_common *x86_32 = target_to_x86_32(t);
1122 struct x86_32_dbg_reg *debug_reg_list = x86_32->hw_break_list;
1123 int wp_num = 0;
1124 LOG_DEBUG("type=%d, addr=%08" PRIx32, wp->rw, wp->address);
1125
1126 if (wp->set) {
1127 LOG_ERROR("%s watchpoint already set", __func__);
1128 return ERROR_OK;
1129 }
1130
1131 if (wp->rw == WPT_READ) {
1132 LOG_ERROR("%s no support for 'read' watchpoints, use 'access' or 'write'"
1133 , __func__);
1134 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1135 }
1136
1137 while (debug_reg_list[wp_num].used && (wp_num < x86_32->num_hw_bpoints))
1138 wp_num++;
1139 if (wp_num >= x86_32->num_hw_bpoints) {
1140 LOG_ERROR("%s no debug registers left", __func__);
1141 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1142 }
1143
1144 if (wp->length != 4 && wp->length != 2 && wp->length != 1) {
1145 LOG_ERROR("%s only watchpoints of length 1, 2 or 4 are supported", __func__);
1146 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1147 }
1148
1149 switch (wp->rw) {
1150 case WPT_WRITE:
1151 if (set_debug_regs(t, wp->address, wp_num,
1152 DR7_BP_WRITE, wp->length) != ERROR_OK) {
1153 return ERROR_FAIL;
1154 }
1155 break;
1156 case WPT_ACCESS:
1157 if (set_debug_regs(t, wp->address, wp_num, DR7_BP_READWRITE,
1158 wp->length) != ERROR_OK) {
1159 return ERROR_FAIL;
1160 }
1161 break;
1162 default:
1163 LOG_ERROR("%s only 'access' or 'write' watchpoints are supported", __func__);
1164 break;
1165 }
1166 wp->set = wp_num + 1;
1167 debug_reg_list[wp_num].used = 1;
1168 debug_reg_list[wp_num].bp_value = wp->address;
1169 LOG_USER("'%s' watchpoint %d set at 0x%08" PRIx32 " with length %d (hwreg=%d)",
1170 wp->rw == WPT_READ ? "read" : wp->rw == WPT_WRITE ?
1171 "write" : wp->rw == WPT_ACCESS ? "access" : "?",
1172 wp->unique_id, wp->address, wp->length, wp_num);
1173 return ERROR_OK;
1174 }
1175
1176 static int unset_watchpoint(struct target *t, struct watchpoint *wp)
1177 {
1178 struct x86_32_common *x86_32 = target_to_x86_32(t);
1179 struct x86_32_dbg_reg *debug_reg_list = x86_32->hw_break_list;
1180 LOG_DEBUG("type=%d, addr=%08" PRIx32, wp->rw, wp->address);
1181 if (!wp->set) {
1182 LOG_WARNING("watchpoint not set");
1183 return ERROR_OK;
1184 }
1185
1186 int wp_num = wp->set - 1;
1187 if ((wp_num < 0) || (wp_num >= x86_32->num_hw_bpoints)) {
1188 LOG_DEBUG("Invalid FP Comparator number in watchpoint");
1189 return ERROR_OK;
1190 }
1191 if (unset_debug_regs(t, wp_num) != ERROR_OK)
1192 return ERROR_FAIL;
1193
1194 debug_reg_list[wp_num].used = 0;
1195 debug_reg_list[wp_num].bp_value = 0;
1196 wp->set = 0;
1197
1198 LOG_USER("'%s' watchpoint %d removed from 0x%08" PRIx32 " with length %d (hwreg=%d)",
1199 wp->rw == WPT_READ ? "read" : wp->rw == WPT_WRITE ?
1200 "write" : wp->rw == WPT_ACCESS ? "access" : "?",
1201 wp->unique_id, wp->address, wp->length, wp_num);
1202
1203 return ERROR_OK;
1204 }
1205
1206 static int read_hw_reg_to_cache(struct target *t, int num)
1207 {
1208 uint32_t reg_value;
1209 struct x86_32_common *x86_32 = target_to_x86_32(t);
1210
1211 if (check_not_halted(t))
1212 return ERROR_TARGET_NOT_HALTED;
1213 if ((num < 0) || (num >= x86_32->get_num_user_regs(t)))
1214 return ERROR_COMMAND_SYNTAX_ERROR;
1215 if (x86_32->read_hw_reg(t, num, &reg_value, 1) != ERROR_OK) {
1216 LOG_ERROR("%s fail for %s", x86_32->cache->reg_list[num].name, __func__);
1217 return ERROR_FAIL;
1218 }
1219 LOG_DEBUG("reg %s value 0x%08" PRIx32,
1220 x86_32->cache->reg_list[num].name, reg_value);
1221 return ERROR_OK;
1222 }
1223
1224 static int write_hw_reg_from_cache(struct target *t, int num)
1225 {
1226 struct x86_32_common *x86_32 = target_to_x86_32(t);
1227 if (check_not_halted(t))
1228 return ERROR_TARGET_NOT_HALTED;
1229 if ((num < 0) || (num >= x86_32->get_num_user_regs(t)))
1230 return ERROR_COMMAND_SYNTAX_ERROR;
1231 if (x86_32->write_hw_reg(t, num, 0, 1) != ERROR_OK) {
1232 LOG_ERROR("%s fail for %s", x86_32->cache->reg_list[num].name, __func__);
1233 return ERROR_FAIL;
1234 }
1235 LOG_DEBUG("reg %s value 0x%08" PRIx32, x86_32->cache->reg_list[num].name,
1236 buf_get_u32(x86_32->cache->reg_list[num].value, 0, 32));
1237 return ERROR_OK;
1238 }
1239
1240 /* x86 32 commands */
1241 static void handle_iod_output(struct command_context *cmd_ctx,
1242 struct target *target, uint32_t address, unsigned size,
1243 unsigned count, const uint8_t *buffer)
1244 {
1245 const unsigned line_bytecnt = 32;
1246 unsigned line_modulo = line_bytecnt / size;
1247
1248 char output[line_bytecnt * 4 + 1];
1249 unsigned output_len = 0;
1250
1251 const char *value_fmt;
1252 switch (size) {
1253 case 4:
1254 value_fmt = "%8.8x ";
1255 break;
1256 case 2:
1257 value_fmt = "%4.4x ";
1258 break;
1259 case 1:
1260 value_fmt = "%2.2x ";
1261 break;
1262 default:
1263 /* "can't happen", caller checked */
1264 LOG_ERROR("%s invalid memory read size: %u", __func__, size);
1265 return;
1266 }
1267
1268 for (unsigned i = 0; i < count; i++) {
1269 if (i % line_modulo == 0) {
1270 output_len += snprintf(output + output_len,
1271 sizeof(output) - output_len,
1272 "0x%8.8x: ",
1273 (unsigned)(address + (i*size)));
1274 }
1275
1276 uint32_t value = 0;
1277 const uint8_t *value_ptr = buffer + i * size;
1278 switch (size) {
1279 case 4:
1280 value = target_buffer_get_u32(target, value_ptr);
1281 break;
1282 case 2:
1283 value = target_buffer_get_u16(target, value_ptr);
1284 break;
1285 case 1:
1286 value = *value_ptr;
1287 }
1288 output_len += snprintf(output + output_len,
1289 sizeof(output) - output_len,
1290 value_fmt, value);
1291
1292 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
1293 command_print(cmd_ctx, "%s", output);
1294 output_len = 0;
1295 }
1296 }
1297 }
1298
1299 COMMAND_HANDLER(handle_iod_command)
1300 {
1301 if (CMD_ARGC != 1)
1302 return ERROR_COMMAND_SYNTAX_ERROR;
1303
1304 uint32_t address;
1305 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
1306 if (address > 0xffff) {
1307 LOG_ERROR("%s IA-32 I/O space is 2^16, %08" PRIx32 " exceeds max", __func__, address);
1308 return ERROR_COMMAND_SYNTAX_ERROR;
1309 }
1310
1311 unsigned size = 0;
1312 switch (CMD_NAME[2]) {
1313 case 'w':
1314 size = 4;
1315 break;
1316 case 'h':
1317 size = 2;
1318 break;
1319 case 'b':
1320 size = 1;
1321 break;
1322 default:
1323 return ERROR_COMMAND_SYNTAX_ERROR;
1324 }
1325 unsigned count = 1;
1326 uint8_t *buffer = calloc(count, size);
1327 struct target *target = get_current_target(CMD_CTX);
1328 int retval = x86_32_common_read_io(target, address, size, buffer);
1329 if (ERROR_OK == retval)
1330 handle_iod_output(CMD_CTX, target, address, size, count, buffer);
1331 free(buffer);
1332 return retval;
1333 }
1334
1335 static int target_fill_io(struct target *target,
1336 uint32_t address,
1337 unsigned data_size,
1338 /* value */
1339 uint32_t b)
1340 {
1341 LOG_DEBUG("address=%08X, data_size=%d, b=%08X",
1342 address, data_size, b);
1343 uint8_t target_buf[data_size];
1344 switch (data_size) {
1345 case 4:
1346 target_buffer_set_u32(target, target_buf, b);
1347 break;
1348 case 2:
1349 target_buffer_set_u16(target, target_buf, b);
1350 break;
1351 case 1:
1352 target_buf[0] = (b & 0x0ff);
1353 break;
1354 default:
1355 exit(-1);
1356 }
1357 return x86_32_common_write_io(target, address, data_size, target_buf);
1358 }
1359
1360 COMMAND_HANDLER(handle_iow_command)
1361 {
1362 if (CMD_ARGC != 2)
1363 return ERROR_COMMAND_SYNTAX_ERROR;
1364 uint32_t address;
1365 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
1366 uint32_t value;
1367 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
1368 struct target *target = get_current_target(CMD_CTX);
1369
1370 unsigned wordsize;
1371 switch (CMD_NAME[2]) {
1372 case 'w':
1373 wordsize = 4;
1374 break;
1375 case 'h':
1376 wordsize = 2;
1377 break;
1378 case 'b':
1379 wordsize = 1;
1380 break;
1381 default:
1382 return ERROR_COMMAND_SYNTAX_ERROR;
1383 }
1384 return target_fill_io(target, address, wordsize, value);
1385 }
1386
1387 static const struct command_registration x86_32_exec_command_handlers[] = {
1388 {
1389 .name = "iww",
1390 .mode = COMMAND_EXEC,
1391 .handler = handle_iow_command,
1392 .help = "write I/O port word",
1393 .usage = "port data[word]",
1394 },
1395 {
1396 .name = "iwh",
1397 .mode = COMMAND_EXEC,
1398 .handler = handle_iow_command,
1399 .help = "write I/O port halfword",
1400 .usage = "port data[halfword]",
1401 },
1402 {
1403 .name = "iwb",
1404 .mode = COMMAND_EXEC,
1405 .handler = handle_iow_command,
1406 .help = "write I/O port byte",
1407 .usage = "port data[byte]",
1408 },
1409 {
1410 .name = "idw",
1411 .mode = COMMAND_EXEC,
1412 .handler = handle_iod_command,
1413 .help = "display I/O port word",
1414 .usage = "port",
1415 },
1416 {
1417 .name = "idh",
1418 .mode = COMMAND_EXEC,
1419 .handler = handle_iod_command,
1420 .help = "display I/O port halfword",
1421 .usage = "port",
1422 },
1423 {
1424 .name = "idb",
1425 .mode = COMMAND_EXEC,
1426 .handler = handle_iod_command,
1427 .help = "display I/O port byte",
1428 .usage = "port",
1429 },
1430
1431 COMMAND_REGISTRATION_DONE
1432 };
1433
1434 const struct command_registration x86_32_command_handlers[] = {
1435 {
1436 .name = "x86_32",
1437 .mode = COMMAND_ANY,
1438 .help = "x86_32 target commands",
1439 .usage = "",
1440 .chain = x86_32_exec_command_handlers,
1441 },
1442 COMMAND_REGISTRATION_DONE
1443 };