4acd4275470b89aad1f473717191e86596227484
[openocd.git] / src / target / riscv / riscv-013.c
1 /*
2 * Support for RISC-V, debug version 0.13, which is currently (2/4/17) the
3 * latest draft.
4 */
5
6 #include <assert.h>
7 #include <stdlib.h>
8 #include <time.h>
9
10 #ifdef HAVE_CONFIG_H
11 #include "config.h"
12 #endif
13
14 #include "target/target.h"
15 #include "target/algorithm.h"
16 #include "target/target_type.h"
17 #include "log.h"
18 #include "jtag/jtag.h"
19 #include "target/register.h"
20 #include "target/breakpoints.h"
21 #include "helper/time_support.h"
22 #include "helper/list.h"
23 #include "riscv.h"
24 #include "debug_defines.h"
25 #include "rtos/rtos.h"
26 #include "program.h"
27 #include "asm.h"
28 #include "batch.h"
29
30 #define DMI_DATA1 (DMI_DATA0 + 1)
31 #define DMI_PROGBUF1 (DMI_PROGBUF0 + 1)
32
33 static int riscv013_on_step_or_resume(struct target *target, bool step);
34 static int riscv013_step_or_resume_current_hart(struct target *target, bool step);
35 static void riscv013_clear_abstract_error(struct target *target);
36
37 /* Implementations of the functions in riscv_info_t. */
38 static int riscv013_get_register(struct target *target,
39 riscv_reg_t *value, int hid, int rid);
40 static int riscv013_set_register(struct target *target, int hartid, int regid, uint64_t value);
41 static int riscv013_select_current_hart(struct target *target);
42 static int riscv013_halt_current_hart(struct target *target);
43 static int riscv013_resume_current_hart(struct target *target);
44 static int riscv013_step_current_hart(struct target *target);
45 static int riscv013_on_halt(struct target *target);
46 static int riscv013_on_step(struct target *target);
47 static int riscv013_on_resume(struct target *target);
48 static bool riscv013_is_halted(struct target *target);
49 static enum riscv_halt_reason riscv013_halt_reason(struct target *target);
50 static int riscv013_write_debug_buffer(struct target *target, unsigned index,
51 riscv_insn_t d);
52 static riscv_insn_t riscv013_read_debug_buffer(struct target *target, unsigned
53 index);
54 static int riscv013_execute_debug_buffer(struct target *target);
55 static void riscv013_fill_dmi_write_u64(struct target *target, char *buf, int a, uint64_t d);
56 static void riscv013_fill_dmi_read_u64(struct target *target, char *buf, int a);
57 static int riscv013_dmi_write_u64_bits(struct target *target);
58 static void riscv013_fill_dmi_nop_u64(struct target *target, char *buf);
59 static int register_read(struct target *target, uint64_t *value, uint32_t number);
60 static int register_read_direct(struct target *target, uint64_t *value, uint32_t number);
61 static int register_write_direct(struct target *target, unsigned number,
62 uint64_t value);
63 static int read_memory(struct target *target, target_addr_t address,
64 uint32_t size, uint32_t count, uint8_t *buffer);
65 static int write_memory(struct target *target, target_addr_t address,
66 uint32_t size, uint32_t count, const uint8_t *buffer);
67
68 /**
69 * Since almost everything can be accomplish by scanning the dbus register, all
70 * functions here assume dbus is already selected. The exception are functions
71 * called directly by OpenOCD, which can't assume anything about what's
72 * currently in IR. They should set IR to dbus explicitly.
73 */
74
75 #define get_field(reg, mask) (((reg) & (mask)) / ((mask) & ~((mask) << 1)))
76 #define set_field(reg, mask, val) (((reg) & ~(mask)) | (((val) * ((mask) & ~((mask) << 1))) & (mask)))
77
78 #define DIM(x) (sizeof(x)/sizeof(*x))
79
80 #define CSR_DCSR_CAUSE_SWBP 1
81 #define CSR_DCSR_CAUSE_TRIGGER 2
82 #define CSR_DCSR_CAUSE_DEBUGINT 3
83 #define CSR_DCSR_CAUSE_STEP 4
84 #define CSR_DCSR_CAUSE_HALT 5
85
86 #define RISCV013_INFO(r) riscv013_info_t *r = get_info(target)
87
88 /*** JTAG registers. ***/
89
90 typedef enum {
91 DMI_OP_NOP = 0,
92 DMI_OP_READ = 1,
93 DMI_OP_WRITE = 2
94 } dmi_op_t;
95 typedef enum {
96 DMI_STATUS_SUCCESS = 0,
97 DMI_STATUS_FAILED = 2,
98 DMI_STATUS_BUSY = 3
99 } dmi_status_t;
100
101 typedef enum {
102 RE_OK,
103 RE_FAIL,
104 RE_AGAIN
105 } riscv_error_t;
106
107 typedef enum slot {
108 SLOT0,
109 SLOT1,
110 SLOT_LAST,
111 } slot_t;
112
113 /*** Debug Bus registers. ***/
114
115 #define CMDERR_NONE 0
116 #define CMDERR_BUSY 1
117 #define CMDERR_NOT_SUPPORTED 2
118 #define CMDERR_EXCEPTION 3
119 #define CMDERR_HALT_RESUME 4
120 #define CMDERR_OTHER 7
121
122 /*** Info about the core being debugged. ***/
123
124 struct trigger {
125 uint64_t address;
126 uint32_t length;
127 uint64_t mask;
128 uint64_t value;
129 bool read, write, execute;
130 int unique_id;
131 };
132
133 typedef enum {
134 YNM_MAYBE,
135 YNM_YES,
136 YNM_NO
137 } yes_no_maybe_t;
138
139 typedef struct {
140 struct list_head list;
141 int abs_chain_position;
142 /* Indicates we already reset this DM, so don't need to do it again. */
143 bool was_reset;
144 /* Targets that are connected to this DM. */
145 struct list_head target_list;
146 /* The currently selected hartid on this DM. */
147 int current_hartid;
148 } dm013_info_t;
149
150 typedef struct {
151 struct list_head list;
152 struct target *target;
153 } target_list_t;
154
155 typedef struct {
156 /* Number of address bits in the dbus register. */
157 unsigned abits;
158 /* Number of abstract command data registers. */
159 unsigned datacount;
160 /* Number of words in the Program Buffer. */
161 unsigned progbufsize;
162
163 /* We cache the read-only bits of sbcs here. */
164 uint32_t sbcs;
165
166 yes_no_maybe_t progbuf_writable;
167 /* We only need the address so that we know the alignment of the buffer. */
168 riscv_addr_t progbuf_address;
169
170 /* Number of run-test/idle cycles the target requests we do after each dbus
171 * access. */
172 unsigned int dtmcontrol_idle;
173
174 /* This value is incremented every time a dbus access comes back as "busy".
175 * It's used to determine how many run-test/idle cycles to feed the target
176 * in between accesses. */
177 unsigned int dmi_busy_delay;
178
179 /* Number of run-test/idle cycles to add between consecutive bus master
180 * reads/writes respectively. */
181 unsigned int bus_master_write_delay, bus_master_read_delay;
182
183 /* This value is increased every time we tried to execute two commands
184 * consecutively, and the second one failed because the previous hadn't
185 * completed yet. It's used to add extra run-test/idle cycles after
186 * starting a command, so we don't have to waste time checking for busy to
187 * go low. */
188 unsigned int ac_busy_delay;
189
190 bool need_strict_step;
191
192 bool abstract_read_csr_supported;
193 bool abstract_write_csr_supported;
194 bool abstract_read_fpr_supported;
195 bool abstract_write_fpr_supported;
196
197 /* When a function returns some error due to a failure indicated by the
198 * target in cmderr, the caller can look here to see what that error was.
199 * (Compare with errno.) */
200 uint8_t cmderr;
201
202 /* Some fields from hartinfo. */
203 uint8_t datasize;
204 uint8_t dataaccess;
205 int16_t dataaddr;
206
207 /* The width of the hartsel field. */
208 unsigned hartsellen;
209
210 /* DM that provides access to this target. */
211 dm013_info_t *dm;
212 } riscv013_info_t;
213
214 LIST_HEAD(dm_list);
215
216 static riscv013_info_t *get_info(const struct target *target)
217 {
218 riscv_info_t *info = (riscv_info_t *) target->arch_info;
219 return (riscv013_info_t *) info->version_specific;
220 }
221
222 /**
223 * Return the DM structure for this target. If there isn't one, find it in the
224 * global list of DMs. If it's not in there, then create one and initialize it
225 * to 0.
226 */
227 static dm013_info_t *get_dm(struct target *target)
228 {
229 RISCV013_INFO(info);
230 if (info->dm)
231 return info->dm;
232
233 int abs_chain_position = target->tap->abs_chain_position;
234
235 dm013_info_t *entry;
236 dm013_info_t *dm = NULL;
237 list_for_each_entry(entry, &dm_list, list) {
238 if (entry->abs_chain_position == abs_chain_position) {
239 dm = entry;
240 break;
241 }
242 }
243
244 if (!dm) {
245 dm = calloc(1, sizeof(dm013_info_t));
246 dm->abs_chain_position = abs_chain_position;
247 dm->current_hartid = -1;
248 INIT_LIST_HEAD(&dm->target_list);
249 list_add(&dm->list, &dm_list);
250 }
251
252 info->dm = dm;
253 target_list_t *target_entry;
254 list_for_each_entry(target_entry, &dm->target_list, list) {
255 if (target_entry->target == target)
256 return dm;
257 }
258 target_entry = calloc(1, sizeof(*target_entry));
259 target_entry->target = target;
260 list_add(&target_entry->list, &dm->target_list);
261
262 return dm;
263 }
264
265 static uint32_t set_hartsel(uint32_t initial, uint32_t index)
266 {
267 initial &= ~DMI_DMCONTROL_HARTSELLO;
268 initial &= ~DMI_DMCONTROL_HARTSELHI;
269
270 uint32_t index_lo = index & ((1 << DMI_DMCONTROL_HARTSELLO_LENGTH) - 1);
271 initial |= index_lo << DMI_DMCONTROL_HARTSELLO_OFFSET;
272 uint32_t index_hi = index >> DMI_DMCONTROL_HARTSELLO_LENGTH;
273 assert(index_hi < 1 << DMI_DMCONTROL_HARTSELHI_LENGTH);
274 initial |= index_hi << DMI_DMCONTROL_HARTSELHI_OFFSET;
275
276 return initial;
277 }
278
279 static void decode_dmi(char *text, unsigned address, unsigned data)
280 {
281 static const struct {
282 unsigned address;
283 uint64_t mask;
284 const char *name;
285 } description[] = {
286 { DMI_DMCONTROL, DMI_DMCONTROL_HALTREQ, "haltreq" },
287 { DMI_DMCONTROL, DMI_DMCONTROL_RESUMEREQ, "resumereq" },
288 { DMI_DMCONTROL, DMI_DMCONTROL_HARTRESET, "hartreset" },
289 { DMI_DMCONTROL, DMI_DMCONTROL_HASEL, "hasel" },
290 { DMI_DMCONTROL, DMI_DMCONTROL_HARTSELHI, "hartselhi" },
291 { DMI_DMCONTROL, DMI_DMCONTROL_HARTSELLO, "hartsello" },
292 { DMI_DMCONTROL, DMI_DMCONTROL_NDMRESET, "ndmreset" },
293 { DMI_DMCONTROL, DMI_DMCONTROL_DMACTIVE, "dmactive" },
294 { DMI_DMCONTROL, DMI_DMCONTROL_ACKHAVERESET, "ackhavereset" },
295
296 { DMI_DMSTATUS, DMI_DMSTATUS_IMPEBREAK, "impebreak" },
297 { DMI_DMSTATUS, DMI_DMSTATUS_ALLHAVERESET, "allhavereset" },
298 { DMI_DMSTATUS, DMI_DMSTATUS_ANYHAVERESET, "anyhavereset" },
299 { DMI_DMSTATUS, DMI_DMSTATUS_ALLRESUMEACK, "allresumeack" },
300 { DMI_DMSTATUS, DMI_DMSTATUS_ANYRESUMEACK, "anyresumeack" },
301 { DMI_DMSTATUS, DMI_DMSTATUS_ALLNONEXISTENT, "allnonexistent" },
302 { DMI_DMSTATUS, DMI_DMSTATUS_ANYNONEXISTENT, "anynonexistent" },
303 { DMI_DMSTATUS, DMI_DMSTATUS_ALLUNAVAIL, "allunavail" },
304 { DMI_DMSTATUS, DMI_DMSTATUS_ANYUNAVAIL, "anyunavail" },
305 { DMI_DMSTATUS, DMI_DMSTATUS_ALLRUNNING, "allrunning" },
306 { DMI_DMSTATUS, DMI_DMSTATUS_ANYRUNNING, "anyrunning" },
307 { DMI_DMSTATUS, DMI_DMSTATUS_ALLHALTED, "allhalted" },
308 { DMI_DMSTATUS, DMI_DMSTATUS_ANYHALTED, "anyhalted" },
309 { DMI_DMSTATUS, DMI_DMSTATUS_AUTHENTICATED, "authenticated" },
310 { DMI_DMSTATUS, DMI_DMSTATUS_AUTHBUSY, "authbusy" },
311 { DMI_DMSTATUS, DMI_DMSTATUS_DEVTREEVALID, "devtreevalid" },
312 { DMI_DMSTATUS, DMI_DMSTATUS_VERSION, "version" },
313
314 { DMI_ABSTRACTCS, DMI_ABSTRACTCS_PROGBUFSIZE, "progbufsize" },
315 { DMI_ABSTRACTCS, DMI_ABSTRACTCS_BUSY, "busy" },
316 { DMI_ABSTRACTCS, DMI_ABSTRACTCS_CMDERR, "cmderr" },
317 { DMI_ABSTRACTCS, DMI_ABSTRACTCS_DATACOUNT, "datacount" },
318
319 { DMI_COMMAND, DMI_COMMAND_CMDTYPE, "cmdtype" },
320
321 { DMI_SBCS, DMI_SBCS_SBREADONADDR, "sbreadonaddr" },
322 { DMI_SBCS, DMI_SBCS_SBACCESS, "sbaccess" },
323 { DMI_SBCS, DMI_SBCS_SBAUTOINCREMENT, "sbautoincrement" },
324 { DMI_SBCS, DMI_SBCS_SBREADONDATA, "sbreadondata" },
325 { DMI_SBCS, DMI_SBCS_SBERROR, "sberror" },
326 { DMI_SBCS, DMI_SBCS_SBASIZE, "sbasize" },
327 { DMI_SBCS, DMI_SBCS_SBACCESS128, "sbaccess128" },
328 { DMI_SBCS, DMI_SBCS_SBACCESS64, "sbaccess64" },
329 { DMI_SBCS, DMI_SBCS_SBACCESS32, "sbaccess32" },
330 { DMI_SBCS, DMI_SBCS_SBACCESS16, "sbaccess16" },
331 { DMI_SBCS, DMI_SBCS_SBACCESS8, "sbaccess8" },
332 };
333
334 text[0] = 0;
335 for (unsigned i = 0; i < DIM(description); i++) {
336 if (description[i].address == address) {
337 uint64_t mask = description[i].mask;
338 unsigned value = get_field(data, mask);
339 if (value) {
340 if (i > 0)
341 *(text++) = ' ';
342 if (mask & (mask >> 1)) {
343 /* If the field is more than 1 bit wide. */
344 sprintf(text, "%s=%d", description[i].name, value);
345 } else {
346 strcpy(text, description[i].name);
347 }
348 text += strlen(text);
349 }
350 }
351 }
352 }
353
354 static void dump_field(const struct scan_field *field)
355 {
356 static const char * const op_string[] = {"-", "r", "w", "?"};
357 static const char * const status_string[] = {"+", "?", "F", "b"};
358
359 if (debug_level < LOG_LVL_DEBUG)
360 return;
361
362 uint64_t out = buf_get_u64(field->out_value, 0, field->num_bits);
363 unsigned int out_op = get_field(out, DTM_DMI_OP);
364 unsigned int out_data = get_field(out, DTM_DMI_DATA);
365 unsigned int out_address = out >> DTM_DMI_ADDRESS_OFFSET;
366
367 uint64_t in = buf_get_u64(field->in_value, 0, field->num_bits);
368 unsigned int in_op = get_field(in, DTM_DMI_OP);
369 unsigned int in_data = get_field(in, DTM_DMI_DATA);
370 unsigned int in_address = in >> DTM_DMI_ADDRESS_OFFSET;
371
372 log_printf_lf(LOG_LVL_DEBUG,
373 __FILE__, __LINE__, "scan",
374 "%db %s %08x @%02x -> %s %08x @%02x",
375 field->num_bits,
376 op_string[out_op], out_data, out_address,
377 status_string[in_op], in_data, in_address);
378
379 char out_text[500];
380 char in_text[500];
381 decode_dmi(out_text, out_address, out_data);
382 decode_dmi(in_text, in_address, in_data);
383 if (in_text[0] || out_text[0]) {
384 log_printf_lf(LOG_LVL_DEBUG, __FILE__, __LINE__, "scan", "%s -> %s",
385 out_text, in_text);
386 }
387 }
388
389 /*** Utility functions. ***/
390
391 static void select_dmi(struct target *target)
392 {
393 static uint8_t ir_dmi[1] = {DTM_DMI};
394 struct scan_field field = {
395 .num_bits = target->tap->ir_length,
396 .out_value = ir_dmi,
397 .in_value = NULL,
398 .check_value = NULL,
399 .check_mask = NULL
400 };
401
402 jtag_add_ir_scan(target->tap, &field, TAP_IDLE);
403 }
404
405 static uint32_t dtmcontrol_scan(struct target *target, uint32_t out)
406 {
407 struct scan_field field;
408 uint8_t in_value[4];
409 uint8_t out_value[4];
410
411 buf_set_u32(out_value, 0, 32, out);
412
413 jtag_add_ir_scan(target->tap, &select_dtmcontrol, TAP_IDLE);
414
415 field.num_bits = 32;
416 field.out_value = out_value;
417 field.in_value = in_value;
418 jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
419
420 /* Always return to dmi. */
421 select_dmi(target);
422
423 int retval = jtag_execute_queue();
424 if (retval != ERROR_OK) {
425 LOG_ERROR("failed jtag scan: %d", retval);
426 return retval;
427 }
428
429 uint32_t in = buf_get_u32(field.in_value, 0, 32);
430 LOG_DEBUG("DTMCS: 0x%x -> 0x%x", out, in);
431
432 return in;
433 }
434
435 static void increase_dmi_busy_delay(struct target *target)
436 {
437 riscv013_info_t *info = get_info(target);
438 info->dmi_busy_delay += info->dmi_busy_delay / 10 + 1;
439 LOG_DEBUG("dtmcontrol_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
440 info->dtmcontrol_idle, info->dmi_busy_delay,
441 info->ac_busy_delay);
442
443 dtmcontrol_scan(target, DTM_DTMCS_DMIRESET);
444 }
445
446 /**
447 * exec: If this is set, assume the scan results in an execution, so more
448 * run-test/idle cycles may be required.
449 */
450 static dmi_status_t dmi_scan(struct target *target, uint32_t *address_in,
451 uint32_t *data_in, dmi_op_t op, uint32_t address_out, uint32_t data_out,
452 bool exec)
453 {
454 riscv013_info_t *info = get_info(target);
455 uint8_t in[8] = {0};
456 uint8_t out[8];
457 struct scan_field field = {
458 .num_bits = info->abits + DTM_DMI_OP_LENGTH + DTM_DMI_DATA_LENGTH,
459 .out_value = out,
460 .in_value = in
461 };
462
463 assert(info->abits != 0);
464
465 buf_set_u32(out, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, op);
466 buf_set_u32(out, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, data_out);
467 buf_set_u32(out, DTM_DMI_ADDRESS_OFFSET, info->abits, address_out);
468
469 /* Assume dbus is already selected. */
470 jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
471
472 int idle_count = info->dmi_busy_delay;
473 if (exec)
474 idle_count += info->ac_busy_delay;
475
476 if (idle_count)
477 jtag_add_runtest(idle_count, TAP_IDLE);
478
479 int retval = jtag_execute_queue();
480 if (retval != ERROR_OK) {
481 LOG_ERROR("dmi_scan failed jtag scan");
482 return DMI_STATUS_FAILED;
483 }
484
485 if (data_in)
486 *data_in = buf_get_u32(in, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH);
487
488 if (address_in)
489 *address_in = buf_get_u32(in, DTM_DMI_ADDRESS_OFFSET, info->abits);
490
491 dump_field(&field);
492
493 return buf_get_u32(in, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH);
494 }
495
496 static int dmi_op_timeout(struct target *target, uint32_t *data_in, int dmi_op,
497 uint32_t address, uint32_t data_out, int timeout_sec)
498 {
499 select_dmi(target);
500
501 dmi_status_t status;
502 uint32_t address_in;
503
504 const char *op_name;
505 switch (dmi_op) {
506 case DMI_OP_NOP:
507 op_name = "nop";
508 break;
509 case DMI_OP_READ:
510 op_name = "read";
511 break;
512 case DMI_OP_WRITE:
513 op_name = "write";
514 break;
515 default:
516 LOG_ERROR("Invalid DMI operation: %d", dmi_op);
517 return ERROR_FAIL;
518 }
519
520 time_t start = time(NULL);
521 /* This first loop performs the request. Note that if for some reason this
522 * stays busy, it is actually due to the previous access. */
523 while (1) {
524 status = dmi_scan(target, NULL, NULL, dmi_op, address, data_out,
525 false);
526 if (status == DMI_STATUS_BUSY) {
527 increase_dmi_busy_delay(target);
528 } else if (status == DMI_STATUS_SUCCESS) {
529 break;
530 } else {
531 LOG_ERROR("failed %s at 0x%x, status=%d", op_name, address, status);
532 return ERROR_FAIL;
533 }
534 if (time(NULL) - start > timeout_sec)
535 return ERROR_TIMEOUT_REACHED;
536 }
537
538 if (status != DMI_STATUS_SUCCESS) {
539 LOG_ERROR("Failed %s at 0x%x; status=%d", op_name, address, status);
540 return ERROR_FAIL;
541 }
542
543 /* This second loop ensures the request succeeded, and gets back data.
544 * Note that NOP can result in a 'busy' result as well, but that would be
545 * noticed on the next DMI access we do. */
546 while (1) {
547 status = dmi_scan(target, &address_in, data_in, DMI_OP_NOP, address, 0,
548 false);
549 if (status == DMI_STATUS_BUSY) {
550 increase_dmi_busy_delay(target);
551 } else if (status == DMI_STATUS_SUCCESS) {
552 break;
553 } else {
554 LOG_ERROR("failed %s (NOP) at 0x%x, status=%d", op_name, address,
555 status);
556 return ERROR_FAIL;
557 }
558 if (time(NULL) - start > timeout_sec)
559 return ERROR_TIMEOUT_REACHED;
560 }
561
562 if (status != DMI_STATUS_SUCCESS) {
563 if (status == DMI_STATUS_FAILED || !data_in) {
564 LOG_ERROR("Failed %s (NOP) at 0x%x; status=%d", op_name, address,
565 status);
566 } else {
567 LOG_ERROR("Failed %s (NOP) at 0x%x; value=0x%x, status=%d",
568 op_name, address, *data_in, status);
569 }
570 return ERROR_FAIL;
571 }
572
573 return ERROR_OK;
574 }
575
576 static int dmi_op(struct target *target, uint32_t *data_in, int dmi_op,
577 uint32_t address, uint32_t data_out)
578 {
579 int result = dmi_op_timeout(target, data_in, dmi_op, address, data_out,
580 riscv_command_timeout_sec);
581 if (result == ERROR_TIMEOUT_REACHED) {
582 LOG_ERROR("DMI operation didn't complete in %d seconds. The target is "
583 "either really slow or broken. You could increase the "
584 "timeout with riscv set_command_timeout_sec.",
585 riscv_command_timeout_sec);
586 return ERROR_FAIL;
587 }
588 return result;
589 }
590
591 static int dmi_read(struct target *target, uint32_t *value, uint32_t address)
592 {
593 return dmi_op(target, value, DMI_OP_READ, address, 0);
594 }
595
596 static int dmi_write(struct target *target, uint32_t address, uint32_t value)
597 {
598 return dmi_op(target, NULL, DMI_OP_WRITE, address, value);
599 }
600
601 int dmstatus_read_timeout(struct target *target, uint32_t *dmstatus,
602 bool authenticated, unsigned timeout_sec)
603 {
604 int result = dmi_op_timeout(target, dmstatus, DMI_OP_READ, DMI_DMSTATUS, 0,
605 timeout_sec);
606 if (result != ERROR_OK)
607 return result;
608 if (authenticated && !get_field(*dmstatus, DMI_DMSTATUS_AUTHENTICATED)) {
609 LOG_ERROR("Debugger is not authenticated to target Debug Module. "
610 "(dmstatus=0x%x). Use `riscv authdata_read` and "
611 "`riscv authdata_write` commands to authenticate.", *dmstatus);
612 return ERROR_FAIL;
613 }
614 return ERROR_OK;
615 }
616
617 int dmstatus_read(struct target *target, uint32_t *dmstatus,
618 bool authenticated)
619 {
620 return dmstatus_read_timeout(target, dmstatus, authenticated,
621 riscv_command_timeout_sec);
622 }
623
624 static void increase_ac_busy_delay(struct target *target)
625 {
626 riscv013_info_t *info = get_info(target);
627 info->ac_busy_delay += info->ac_busy_delay / 10 + 1;
628 LOG_DEBUG("dtmcontrol_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
629 info->dtmcontrol_idle, info->dmi_busy_delay,
630 info->ac_busy_delay);
631 }
632
633 uint32_t abstract_register_size(unsigned width)
634 {
635 switch (width) {
636 case 32:
637 return set_field(0, AC_ACCESS_REGISTER_SIZE, 2);
638 case 64:
639 return set_field(0, AC_ACCESS_REGISTER_SIZE, 3);
640 break;
641 case 128:
642 return set_field(0, AC_ACCESS_REGISTER_SIZE, 4);
643 break;
644 default:
645 LOG_ERROR("Unsupported register width: %d", width);
646 return 0;
647 }
648 }
649
650 static int wait_for_idle(struct target *target, uint32_t *abstractcs)
651 {
652 RISCV013_INFO(info);
653 time_t start = time(NULL);
654 while (1) {
655 if (dmi_read(target, abstractcs, DMI_ABSTRACTCS) != ERROR_OK)
656 return ERROR_FAIL;
657
658 if (get_field(*abstractcs, DMI_ABSTRACTCS_BUSY) == 0)
659 return ERROR_OK;
660
661 if (time(NULL) - start > riscv_command_timeout_sec) {
662 info->cmderr = get_field(*abstractcs, DMI_ABSTRACTCS_CMDERR);
663 if (info->cmderr != CMDERR_NONE) {
664 const char *errors[8] = {
665 "none",
666 "busy",
667 "not supported",
668 "exception",
669 "halt/resume",
670 "reserved",
671 "reserved",
672 "other" };
673
674 LOG_ERROR("Abstract command ended in error '%s' (abstractcs=0x%x)",
675 errors[info->cmderr], *abstractcs);
676 }
677
678 LOG_ERROR("Timed out after %ds waiting for busy to go low (abstractcs=0x%x). "
679 "Increase the timeout with riscv set_command_timeout_sec.",
680 riscv_command_timeout_sec,
681 *abstractcs);
682 return ERROR_FAIL;
683 }
684 }
685 }
686
687 static int execute_abstract_command(struct target *target, uint32_t command)
688 {
689 RISCV013_INFO(info);
690 LOG_DEBUG("command=0x%x", command);
691 dmi_write(target, DMI_COMMAND, command);
692
693 uint32_t abstractcs = 0;
694 wait_for_idle(target, &abstractcs);
695
696 info->cmderr = get_field(abstractcs, DMI_ABSTRACTCS_CMDERR);
697 if (info->cmderr != 0) {
698 LOG_DEBUG("command 0x%x failed; abstractcs=0x%x", command, abstractcs);
699 /* Clear the error. */
700 dmi_write(target, DMI_ABSTRACTCS, set_field(0, DMI_ABSTRACTCS_CMDERR,
701 info->cmderr));
702 return ERROR_FAIL;
703 }
704
705 return ERROR_OK;
706 }
707
708 static riscv_reg_t read_abstract_arg(struct target *target, unsigned index,
709 unsigned size_bits)
710 {
711 riscv_reg_t value = 0;
712 uint32_t v;
713 unsigned offset = index * size_bits / 32;
714 switch (size_bits) {
715 default:
716 LOG_ERROR("Unsupported size: %d", size_bits);
717 return ~0;
718 case 64:
719 dmi_read(target, &v, DMI_DATA0 + offset + 1);
720 value |= ((uint64_t) v) << 32;
721 /* falls through */
722 case 32:
723 dmi_read(target, &v, DMI_DATA0 + offset);
724 value |= v;
725 }
726 return value;
727 }
728
729 static int write_abstract_arg(struct target *target, unsigned index,
730 riscv_reg_t value, unsigned size_bits)
731 {
732 unsigned offset = index * size_bits / 32;
733 switch (size_bits) {
734 default:
735 LOG_ERROR("Unsupported size: %d", size_bits);
736 return ERROR_FAIL;
737 case 64:
738 dmi_write(target, DMI_DATA0 + offset + 1, value >> 32);
739 /* falls through */
740 case 32:
741 dmi_write(target, DMI_DATA0 + offset, value);
742 }
743 return ERROR_OK;
744 }
745
746 /**
747 * @size in bits
748 */
749 static uint32_t access_register_command(uint32_t number, unsigned size,
750 uint32_t flags)
751 {
752 uint32_t command = set_field(0, DMI_COMMAND_CMDTYPE, 0);
753 switch (size) {
754 case 32:
755 command = set_field(command, AC_ACCESS_REGISTER_SIZE, 2);
756 break;
757 case 64:
758 command = set_field(command, AC_ACCESS_REGISTER_SIZE, 3);
759 break;
760 default:
761 assert(0);
762 }
763
764 if (number <= GDB_REGNO_XPR31) {
765 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
766 0x1000 + number - GDB_REGNO_ZERO);
767 } else if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
768 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
769 0x1020 + number - GDB_REGNO_FPR0);
770 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
771 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
772 number - GDB_REGNO_CSR0);
773 } else {
774 assert(0);
775 }
776
777 command |= flags;
778
779 return command;
780 }
781
782 static int register_read_abstract(struct target *target, uint64_t *value,
783 uint32_t number, unsigned size)
784 {
785 RISCV013_INFO(info);
786
787 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
788 !info->abstract_read_fpr_supported)
789 return ERROR_FAIL;
790 if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095 &&
791 !info->abstract_read_csr_supported)
792 return ERROR_FAIL;
793
794 uint32_t command = access_register_command(number, size,
795 AC_ACCESS_REGISTER_TRANSFER);
796
797 int result = execute_abstract_command(target, command);
798 if (result != ERROR_OK) {
799 if (info->cmderr == CMDERR_NOT_SUPPORTED) {
800 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
801 info->abstract_read_fpr_supported = false;
802 LOG_INFO("Disabling abstract command reads from FPRs.");
803 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
804 info->abstract_read_csr_supported = false;
805 LOG_INFO("Disabling abstract command reads from CSRs.");
806 }
807 }
808 return result;
809 }
810
811 if (value)
812 *value = read_abstract_arg(target, 0, size);
813
814 return ERROR_OK;
815 }
816
817 static int register_write_abstract(struct target *target, uint32_t number,
818 uint64_t value, unsigned size)
819 {
820 RISCV013_INFO(info);
821
822 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
823 !info->abstract_write_fpr_supported)
824 return ERROR_FAIL;
825 if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095 &&
826 !info->abstract_write_csr_supported)
827 return ERROR_FAIL;
828
829 uint32_t command = access_register_command(number, size,
830 AC_ACCESS_REGISTER_TRANSFER |
831 AC_ACCESS_REGISTER_WRITE);
832
833 if (write_abstract_arg(target, 0, value, size) != ERROR_OK)
834 return ERROR_FAIL;
835
836 int result = execute_abstract_command(target, command);
837 if (result != ERROR_OK) {
838 if (info->cmderr == CMDERR_NOT_SUPPORTED) {
839 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
840 info->abstract_write_fpr_supported = false;
841 LOG_INFO("Disabling abstract command writes to FPRs.");
842 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
843 info->abstract_write_csr_supported = false;
844 LOG_INFO("Disabling abstract command writes to CSRs.");
845 }
846 }
847 return result;
848 }
849
850 return ERROR_OK;
851 }
852
853 static int examine_progbuf(struct target *target)
854 {
855 riscv013_info_t *info = get_info(target);
856
857 if (info->progbuf_writable != YNM_MAYBE)
858 return ERROR_OK;
859
860 /* Figure out if progbuf is writable. */
861
862 if (info->progbufsize < 1) {
863 info->progbuf_writable = YNM_NO;
864 LOG_INFO("No program buffer present.");
865 return ERROR_OK;
866 }
867
868 uint64_t s0;
869 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
870 return ERROR_FAIL;
871
872 struct riscv_program program;
873 riscv_program_init(&program, target);
874 riscv_program_insert(&program, auipc(S0));
875 if (riscv_program_exec(&program, target) != ERROR_OK)
876 return ERROR_FAIL;
877
878 if (register_read_direct(target, &info->progbuf_address, GDB_REGNO_S0) != ERROR_OK)
879 return ERROR_FAIL;
880
881 riscv_program_init(&program, target);
882 riscv_program_insert(&program, sw(S0, S0, 0));
883 int result = riscv_program_exec(&program, target);
884
885 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
886 return ERROR_FAIL;
887
888 if (result != ERROR_OK) {
889 /* This program might have failed if the program buffer is not
890 * writable. */
891 info->progbuf_writable = YNM_NO;
892 return ERROR_OK;
893 }
894
895 uint32_t written;
896 if (dmi_read(target, &written, DMI_PROGBUF0) != ERROR_OK)
897 return ERROR_FAIL;
898 if (written == (uint32_t) info->progbuf_address) {
899 LOG_INFO("progbuf is writable at 0x%" PRIx64,
900 info->progbuf_address);
901 info->progbuf_writable = YNM_YES;
902
903 } else {
904 LOG_INFO("progbuf is not writeable at 0x%" PRIx64,
905 info->progbuf_address);
906 info->progbuf_writable = YNM_NO;
907 }
908
909 return ERROR_OK;
910 }
911
912 typedef enum {
913 SPACE_DMI_DATA,
914 SPACE_DMI_PROGBUF,
915 SPACE_DMI_RAM
916 } memory_space_t;
917
918 typedef struct {
919 /* How can the debugger access this memory? */
920 memory_space_t memory_space;
921 /* Memory address to access the scratch memory from the hart. */
922 riscv_addr_t hart_address;
923 /* Memory address to access the scratch memory from the debugger. */
924 riscv_addr_t debug_address;
925 struct working_area *area;
926 } scratch_mem_t;
927
928 /**
929 * Find some scratch memory to be used with the given program.
930 */
931 static int scratch_reserve(struct target *target,
932 scratch_mem_t *scratch,
933 struct riscv_program *program,
934 unsigned size_bytes)
935 {
936 riscv_addr_t alignment = 1;
937 while (alignment < size_bytes)
938 alignment *= 2;
939
940 scratch->area = NULL;
941
942 riscv013_info_t *info = get_info(target);
943
944 if (info->dataaccess == 1) {
945 /* Sign extend dataaddr. */
946 scratch->hart_address = info->dataaddr;
947 if (info->dataaddr & (1<<11))
948 scratch->hart_address |= 0xfffffffffffff000ULL;
949 /* Align. */
950 scratch->hart_address = (scratch->hart_address + alignment - 1) & ~(alignment - 1);
951
952 if ((size_bytes + scratch->hart_address - info->dataaddr + 3) / 4 >=
953 info->datasize) {
954 scratch->memory_space = SPACE_DMI_DATA;
955 scratch->debug_address = (scratch->hart_address - info->dataaddr) / 4;
956 return ERROR_OK;
957 }
958 }
959
960 if (examine_progbuf(target) != ERROR_OK)
961 return ERROR_FAIL;
962
963 /* Allow for ebreak at the end of the program. */
964 unsigned program_size = (program->instruction_count + 1) * 4;
965 scratch->hart_address = (info->progbuf_address + program_size + alignment - 1) &
966 ~(alignment - 1);
967 if ((size_bytes + scratch->hart_address - info->progbuf_address + 3) / 4 >=
968 info->progbufsize) {
969 scratch->memory_space = SPACE_DMI_PROGBUF;
970 scratch->debug_address = (scratch->hart_address - info->progbuf_address) / 4;
971 return ERROR_OK;
972 }
973
974 if (target_alloc_working_area(target, size_bytes + alignment - 1,
975 &scratch->area) == ERROR_OK) {
976 scratch->hart_address = (scratch->area->address + alignment - 1) &
977 ~(alignment - 1);
978 scratch->memory_space = SPACE_DMI_RAM;
979 scratch->debug_address = scratch->hart_address;
980 return ERROR_OK;
981 }
982
983 LOG_ERROR("Couldn't find %d bytes of scratch RAM to use. Please configure "
984 "a work area with 'configure -work-area-phys'.", size_bytes);
985 return ERROR_FAIL;
986 }
987
988 static int scratch_release(struct target *target,
989 scratch_mem_t *scratch)
990 {
991 if (scratch->area)
992 return target_free_working_area(target, scratch->area);
993
994 return ERROR_OK;
995 }
996
997 static int scratch_read64(struct target *target, scratch_mem_t *scratch,
998 uint64_t *value)
999 {
1000 uint32_t v;
1001 switch (scratch->memory_space) {
1002 case SPACE_DMI_DATA:
1003 if (dmi_read(target, &v, DMI_DATA0 + scratch->debug_address) != ERROR_OK)
1004 return ERROR_FAIL;
1005 *value = v;
1006 if (dmi_read(target, &v, DMI_DATA1 + scratch->debug_address) != ERROR_OK)
1007 return ERROR_FAIL;
1008 *value |= ((uint64_t) v) << 32;
1009 break;
1010 case SPACE_DMI_PROGBUF:
1011 if (dmi_read(target, &v, DMI_PROGBUF0 + scratch->debug_address) != ERROR_OK)
1012 return ERROR_FAIL;
1013 *value = v;
1014 if (dmi_read(target, &v, DMI_PROGBUF1 + scratch->debug_address) != ERROR_OK)
1015 return ERROR_FAIL;
1016 *value |= ((uint64_t) v) << 32;
1017 break;
1018 case SPACE_DMI_RAM:
1019 {
1020 uint8_t buffer[8];
1021 if (read_memory(target, scratch->debug_address, 4, 2, buffer) != ERROR_OK)
1022 return ERROR_FAIL;
1023 *value = buffer[0] |
1024 (((uint64_t) buffer[1]) << 8) |
1025 (((uint64_t) buffer[2]) << 16) |
1026 (((uint64_t) buffer[3]) << 24) |
1027 (((uint64_t) buffer[4]) << 32) |
1028 (((uint64_t) buffer[5]) << 40) |
1029 (((uint64_t) buffer[6]) << 48) |
1030 (((uint64_t) buffer[7]) << 56);
1031 }
1032 break;
1033 }
1034 return ERROR_OK;
1035 }
1036
1037 static int scratch_write64(struct target *target, scratch_mem_t *scratch,
1038 uint64_t value)
1039 {
1040 switch (scratch->memory_space) {
1041 case SPACE_DMI_DATA:
1042 dmi_write(target, DMI_DATA0 + scratch->debug_address, value);
1043 dmi_write(target, DMI_DATA1 + scratch->debug_address, value >> 32);
1044 break;
1045 case SPACE_DMI_PROGBUF:
1046 dmi_write(target, DMI_PROGBUF0 + scratch->debug_address, value);
1047 dmi_write(target, DMI_PROGBUF1 + scratch->debug_address, value >> 32);
1048 break;
1049 case SPACE_DMI_RAM:
1050 {
1051 uint8_t buffer[8] = {
1052 value,
1053 value >> 8,
1054 value >> 16,
1055 value >> 24,
1056 value >> 32,
1057 value >> 40,
1058 value >> 48,
1059 value >> 56
1060 };
1061 if (write_memory(target, scratch->debug_address, 4, 2, buffer) != ERROR_OK)
1062 return ERROR_FAIL;
1063 }
1064 break;
1065 }
1066 return ERROR_OK;
1067 }
1068
1069 /** Return register size in bits. */
1070 static unsigned register_size(struct target *target, unsigned number)
1071 {
1072 /* If reg_cache hasn't been initialized yet, make a guess. We need this for
1073 * when this function is called during examine(). */
1074 if (target->reg_cache)
1075 return target->reg_cache->reg_list[number].size;
1076 else
1077 return riscv_xlen(target);
1078 }
1079
1080 /**
1081 * Immediately write the new value to the requested register. This mechanism
1082 * bypasses any caches.
1083 */
1084 static int register_write_direct(struct target *target, unsigned number,
1085 uint64_t value)
1086 {
1087 RISCV013_INFO(info);
1088 RISCV_INFO(r);
1089
1090 LOG_DEBUG("[%d] reg[0x%x] <- 0x%" PRIx64, riscv_current_hartid(target),
1091 number, value);
1092
1093 int result = register_write_abstract(target, number, value,
1094 register_size(target, number));
1095 if (result == ERROR_OK && target->reg_cache) {
1096 struct reg *reg = &target->reg_cache->reg_list[number];
1097 buf_set_u64(reg->value, 0, reg->size, value);
1098 reg->valid = true;
1099 }
1100 if (result == ERROR_OK || info->progbufsize + r->impebreak < 2 ||
1101 !riscv_is_halted(target))
1102 return result;
1103
1104 struct riscv_program program;
1105 riscv_program_init(&program, target);
1106
1107 uint64_t s0;
1108 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1109 return ERROR_FAIL;
1110
1111 scratch_mem_t scratch;
1112 bool use_scratch = false;
1113 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
1114 riscv_supports_extension(target, riscv_current_hartid(target), 'D') &&
1115 riscv_xlen(target) < 64) {
1116 /* There are no instructions to move all the bits from a register, so
1117 * we need to use some scratch RAM. */
1118 use_scratch = true;
1119 riscv_program_insert(&program, fld(number - GDB_REGNO_FPR0, S0, 0));
1120
1121 if (scratch_reserve(target, &scratch, &program, 8) != ERROR_OK)
1122 return ERROR_FAIL;
1123
1124 if (register_write_direct(target, GDB_REGNO_S0, scratch.hart_address)
1125 != ERROR_OK) {
1126 scratch_release(target, &scratch);
1127 return ERROR_FAIL;
1128 }
1129
1130 if (scratch_write64(target, &scratch, value) != ERROR_OK) {
1131 scratch_release(target, &scratch);
1132 return ERROR_FAIL;
1133 }
1134
1135 } else {
1136 if (register_write_direct(target, GDB_REGNO_S0, value) != ERROR_OK)
1137 return ERROR_FAIL;
1138
1139 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
1140 if (riscv_supports_extension(target, riscv_current_hartid(target), 'D'))
1141 riscv_program_insert(&program, fmv_d_x(number - GDB_REGNO_FPR0, S0));
1142 else
1143 riscv_program_insert(&program, fmv_w_x(number - GDB_REGNO_FPR0, S0));
1144 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
1145 riscv_program_csrw(&program, S0, number);
1146 } else {
1147 LOG_ERROR("Unsupported register (enum gdb_regno)(%d)", number);
1148 return ERROR_FAIL;
1149 }
1150 }
1151
1152 int exec_out = riscv_program_exec(&program, target);
1153 /* Don't message on error. Probably the register doesn't exist. */
1154 if (exec_out == ERROR_OK && target->reg_cache) {
1155 struct reg *reg = &target->reg_cache->reg_list[number];
1156 buf_set_u64(reg->value, 0, reg->size, value);
1157 reg->valid = true;
1158 }
1159
1160 if (use_scratch)
1161 scratch_release(target, &scratch);
1162
1163 /* Restore S0. */
1164 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1165 return ERROR_FAIL;
1166
1167 return exec_out;
1168 }
1169
1170 /** Return the cached value, or read from the target if necessary. */
1171 static int register_read(struct target *target, uint64_t *value, uint32_t number)
1172 {
1173 if (number == GDB_REGNO_ZERO) {
1174 *value = 0;
1175 return ERROR_OK;
1176 }
1177 if (target->reg_cache &&
1178 (number <= GDB_REGNO_XPR31 ||
1179 (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31))) {
1180 /* Only check the cache for registers that we know won't spontaneously
1181 * change. */
1182 struct reg *reg = &target->reg_cache->reg_list[number];
1183 if (reg && reg->valid) {
1184 *value = buf_get_u64(reg->value, 0, reg->size);
1185 return ERROR_OK;
1186 }
1187 }
1188 int result = register_read_direct(target, value, number);
1189 if (result != ERROR_OK)
1190 return ERROR_FAIL;
1191 if (target->reg_cache) {
1192 struct reg *reg = &target->reg_cache->reg_list[number];
1193 buf_set_u64(reg->value, 0, reg->size, *value);
1194 reg->valid = true;
1195 }
1196 return ERROR_OK;
1197 }
1198
1199 /** Actually read registers from the target right now. */
1200 static int register_read_direct(struct target *target, uint64_t *value, uint32_t number)
1201 {
1202 RISCV013_INFO(info);
1203 RISCV_INFO(r);
1204
1205 int result = register_read_abstract(target, value, number,
1206 register_size(target, number));
1207
1208 if (result != ERROR_OK &&
1209 info->progbufsize + r->impebreak >= 2 &&
1210 number > GDB_REGNO_XPR31) {
1211 struct riscv_program program;
1212 riscv_program_init(&program, target);
1213
1214 scratch_mem_t scratch;
1215 bool use_scratch = false;
1216
1217 uint64_t s0;
1218 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1219 return ERROR_FAIL;
1220
1221 /* Write program to move data into s0. */
1222
1223 uint64_t mstatus;
1224 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
1225 if (register_read(target, &mstatus, GDB_REGNO_MSTATUS) != ERROR_OK)
1226 return ERROR_FAIL;
1227 if ((mstatus & MSTATUS_FS) == 0)
1228 if (register_write_direct(target, GDB_REGNO_MSTATUS,
1229 set_field(mstatus, MSTATUS_FS, 1)) != ERROR_OK)
1230 return ERROR_FAIL;
1231
1232 if (riscv_supports_extension(target, riscv_current_hartid(target), 'D')
1233 && riscv_xlen(target) < 64) {
1234 /* There are no instructions to move all the bits from a
1235 * register, so we need to use some scratch RAM. */
1236 riscv_program_insert(&program, fsd(number - GDB_REGNO_FPR0, S0,
1237 0));
1238
1239 if (scratch_reserve(target, &scratch, &program, 8) != ERROR_OK)
1240 return ERROR_FAIL;
1241 use_scratch = true;
1242
1243 if (register_write_direct(target, GDB_REGNO_S0,
1244 scratch.hart_address) != ERROR_OK) {
1245 scratch_release(target, &scratch);
1246 return ERROR_FAIL;
1247 }
1248 } else if (riscv_supports_extension(target,
1249 riscv_current_hartid(target), 'D')) {
1250 riscv_program_insert(&program, fmv_x_d(S0, number - GDB_REGNO_FPR0));
1251 } else {
1252 riscv_program_insert(&program, fmv_x_w(S0, number - GDB_REGNO_FPR0));
1253 }
1254 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
1255 riscv_program_csrr(&program, S0, number);
1256 } else {
1257 LOG_ERROR("Unsupported register (enum gdb_regno)(%d)", number);
1258 return ERROR_FAIL;
1259 }
1260
1261 /* Execute program. */
1262 result = riscv_program_exec(&program, target);
1263 /* Don't message on error. Probably the register doesn't exist. */
1264
1265 if (use_scratch) {
1266 result = scratch_read64(target, &scratch, value);
1267 scratch_release(target, &scratch);
1268 if (result != ERROR_OK)
1269 return result;
1270 } else {
1271 /* Read S0 */
1272 if (register_read_direct(target, value, GDB_REGNO_S0) != ERROR_OK)
1273 return ERROR_FAIL;
1274 }
1275
1276 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
1277 (mstatus & MSTATUS_FS) == 0)
1278 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus) != ERROR_OK)
1279 return ERROR_FAIL;
1280
1281 /* Restore S0. */
1282 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1283 return ERROR_FAIL;
1284 }
1285
1286 if (result == ERROR_OK) {
1287 LOG_DEBUG("[%d] reg[0x%x] = 0x%" PRIx64, riscv_current_hartid(target),
1288 number, *value);
1289 }
1290
1291 return result;
1292 }
1293
1294 int wait_for_authbusy(struct target *target, uint32_t *dmstatus)
1295 {
1296 time_t start = time(NULL);
1297 while (1) {
1298 uint32_t value;
1299 if (dmstatus_read(target, &value, false) != ERROR_OK)
1300 return ERROR_FAIL;
1301 if (dmstatus)
1302 *dmstatus = value;
1303 if (!get_field(value, DMI_DMSTATUS_AUTHBUSY))
1304 break;
1305 if (time(NULL) - start > riscv_command_timeout_sec) {
1306 LOG_ERROR("Timed out after %ds waiting for authbusy to go low (dmstatus=0x%x). "
1307 "Increase the timeout with riscv set_command_timeout_sec.",
1308 riscv_command_timeout_sec,
1309 value);
1310 return ERROR_FAIL;
1311 }
1312 }
1313
1314 return ERROR_OK;
1315 }
1316
1317 /*** OpenOCD target functions. ***/
1318
1319 static void deinit_target(struct target *target)
1320 {
1321 LOG_DEBUG("riscv_deinit_target()");
1322 riscv_info_t *info = (riscv_info_t *) target->arch_info;
1323 free(info->version_specific);
1324 info->version_specific = NULL;
1325 }
1326
1327 static int examine(struct target *target)
1328 {
1329 /* Don't need to select dbus, since the first thing we do is read dtmcontrol. */
1330
1331 uint32_t dtmcontrol = dtmcontrol_scan(target, 0);
1332 LOG_DEBUG("dtmcontrol=0x%x", dtmcontrol);
1333 LOG_DEBUG(" dmireset=%d", get_field(dtmcontrol, DTM_DTMCS_DMIRESET));
1334 LOG_DEBUG(" idle=%d", get_field(dtmcontrol, DTM_DTMCS_IDLE));
1335 LOG_DEBUG(" dmistat=%d", get_field(dtmcontrol, DTM_DTMCS_DMISTAT));
1336 LOG_DEBUG(" abits=%d", get_field(dtmcontrol, DTM_DTMCS_ABITS));
1337 LOG_DEBUG(" version=%d", get_field(dtmcontrol, DTM_DTMCS_VERSION));
1338 if (dtmcontrol == 0) {
1339 LOG_ERROR("dtmcontrol is 0. Check JTAG connectivity/board power.");
1340 return ERROR_FAIL;
1341 }
1342 if (get_field(dtmcontrol, DTM_DTMCS_VERSION) != 1) {
1343 LOG_ERROR("Unsupported DTM version %d. (dtmcontrol=0x%x)",
1344 get_field(dtmcontrol, DTM_DTMCS_VERSION), dtmcontrol);
1345 return ERROR_FAIL;
1346 }
1347
1348 riscv013_info_t *info = get_info(target);
1349 info->abits = get_field(dtmcontrol, DTM_DTMCS_ABITS);
1350 info->dtmcontrol_idle = get_field(dtmcontrol, DTM_DTMCS_IDLE);
1351
1352 uint32_t dmstatus;
1353 if (dmstatus_read(target, &dmstatus, false) != ERROR_OK)
1354 return ERROR_FAIL;
1355 LOG_DEBUG("dmstatus: 0x%08x", dmstatus);
1356 if (get_field(dmstatus, DMI_DMSTATUS_VERSION) != 2) {
1357 LOG_ERROR("OpenOCD only supports Debug Module version 2, not %d "
1358 "(dmstatus=0x%x)", get_field(dmstatus, DMI_DMSTATUS_VERSION), dmstatus);
1359 return ERROR_FAIL;
1360 }
1361
1362 /* Reset the Debug Module. */
1363 dm013_info_t *dm = get_dm(target);
1364 if (!dm->was_reset) {
1365 dmi_write(target, DMI_DMCONTROL, 0);
1366 dmi_write(target, DMI_DMCONTROL, DMI_DMCONTROL_DMACTIVE);
1367 dm->was_reset = true;
1368 }
1369
1370 dmi_write(target, DMI_DMCONTROL, DMI_DMCONTROL_HARTSELLO |
1371 DMI_DMCONTROL_HARTSELHI | DMI_DMCONTROL_DMACTIVE);
1372 uint32_t dmcontrol;
1373 if (dmi_read(target, &dmcontrol, DMI_DMCONTROL) != ERROR_OK)
1374 return ERROR_FAIL;
1375
1376 if (!get_field(dmcontrol, DMI_DMCONTROL_DMACTIVE)) {
1377 LOG_ERROR("Debug Module did not become active. dmcontrol=0x%x",
1378 dmcontrol);
1379 return ERROR_FAIL;
1380 }
1381
1382 uint32_t hartsel =
1383 (get_field(dmcontrol, DMI_DMCONTROL_HARTSELHI) <<
1384 DMI_DMCONTROL_HARTSELLO_LENGTH) |
1385 get_field(dmcontrol, DMI_DMCONTROL_HARTSELLO);
1386 info->hartsellen = 0;
1387 while (hartsel & 1) {
1388 info->hartsellen++;
1389 hartsel >>= 1;
1390 }
1391 LOG_DEBUG("hartsellen=%d", info->hartsellen);
1392
1393 uint32_t hartinfo;
1394 if (dmi_read(target, &hartinfo, DMI_HARTINFO) != ERROR_OK)
1395 return ERROR_FAIL;
1396
1397 info->datasize = get_field(hartinfo, DMI_HARTINFO_DATASIZE);
1398 info->dataaccess = get_field(hartinfo, DMI_HARTINFO_DATAACCESS);
1399 info->dataaddr = get_field(hartinfo, DMI_HARTINFO_DATAADDR);
1400
1401 if (!get_field(dmstatus, DMI_DMSTATUS_AUTHENTICATED)) {
1402 LOG_ERROR("Debugger is not authenticated to target Debug Module. "
1403 "(dmstatus=0x%x). Use `riscv authdata_read` and "
1404 "`riscv authdata_write` commands to authenticate.", dmstatus);
1405 /* If we return ERROR_FAIL here, then in a multicore setup the next
1406 * core won't be examined, which means we won't set up the
1407 * authentication commands for them, which means the config script
1408 * needs to be a lot more complex. */
1409 return ERROR_OK;
1410 }
1411
1412 if (dmi_read(target, &info->sbcs, DMI_SBCS) != ERROR_OK)
1413 return ERROR_FAIL;
1414
1415 /* Check that abstract data registers are accessible. */
1416 uint32_t abstractcs;
1417 if (dmi_read(target, &abstractcs, DMI_ABSTRACTCS) != ERROR_OK)
1418 return ERROR_FAIL;
1419 info->datacount = get_field(abstractcs, DMI_ABSTRACTCS_DATACOUNT);
1420 info->progbufsize = get_field(abstractcs, DMI_ABSTRACTCS_PROGBUFSIZE);
1421
1422 LOG_INFO("datacount=%d progbufsize=%d", info->datacount, info->progbufsize);
1423
1424 RISCV_INFO(r);
1425 r->impebreak = get_field(dmstatus, DMI_DMSTATUS_IMPEBREAK);
1426
1427 if (info->progbufsize + r->impebreak < 2) {
1428 LOG_WARNING("We won't be able to execute fence instructions on this "
1429 "target. Memory may not always appear consistent. "
1430 "(progbufsize=%d, impebreak=%d)", info->progbufsize,
1431 r->impebreak);
1432 }
1433
1434 /* Before doing anything else we must first enumerate the harts. */
1435
1436 /* Don't call any riscv_* functions until after we've counted the number of
1437 * cores and initialized registers. */
1438 for (int i = 0; i < MIN(RISCV_MAX_HARTS, 1 << info->hartsellen); ++i) {
1439 if (!riscv_rtos_enabled(target) && i != target->coreid)
1440 continue;
1441
1442 r->current_hartid = i;
1443 if (riscv013_select_current_hart(target) != ERROR_OK)
1444 return ERROR_FAIL;
1445
1446 uint32_t s;
1447 if (dmstatus_read(target, &s, true) != ERROR_OK)
1448 return ERROR_FAIL;
1449 if (get_field(s, DMI_DMSTATUS_ANYNONEXISTENT))
1450 break;
1451 r->hart_count = i + 1;
1452
1453 if (get_field(s, DMI_DMSTATUS_ANYHAVERESET))
1454 dmi_write(target, DMI_DMCONTROL,
1455 set_hartsel(DMI_DMCONTROL_DMACTIVE | DMI_DMCONTROL_ACKHAVERESET, i));
1456
1457 if (!riscv_is_halted(target)) {
1458 if (riscv013_halt_current_hart(target) != ERROR_OK) {
1459 LOG_ERROR("Fatal: Hart %d failed to halt during examine()", i);
1460 return ERROR_FAIL;
1461 }
1462 }
1463
1464 /* Without knowing anything else we can at least mess with the
1465 * program buffer. */
1466 r->debug_buffer_size[i] = info->progbufsize;
1467
1468 int result = register_read_abstract(target, NULL, GDB_REGNO_S0, 64);
1469 if (result == ERROR_OK)
1470 r->xlen[i] = 64;
1471 else
1472 r->xlen[i] = 32;
1473
1474 if (register_read(target, &r->misa[i], GDB_REGNO_MISA)) {
1475 LOG_ERROR("Fatal: Failed to read MISA from hart %d.", i);
1476 return ERROR_FAIL;
1477 }
1478
1479 /* Now init registers based on what we discovered. */
1480 if (riscv_init_registers(target) != ERROR_OK)
1481 return ERROR_FAIL;
1482
1483 /* Display this as early as possible to help people who are using
1484 * really slow simulators. */
1485 LOG_DEBUG(" hart %d: XLEN=%d, misa=0x%" PRIx64, i, r->xlen[i],
1486 r->misa[i]);
1487 }
1488
1489 LOG_DEBUG("Enumerated %d harts", r->hart_count);
1490
1491 if (r->hart_count == 0) {
1492 LOG_ERROR("No harts found!");
1493 return ERROR_FAIL;
1494 }
1495
1496 /* Resumes all the harts, so the debugger can later pause them. */
1497 /* TODO: Only do this if the harts were halted to start with. */
1498 riscv_resume_all_harts(target);
1499 target->state = TARGET_RUNNING;
1500
1501 target_set_examined(target);
1502
1503 /* Some regression suites rely on seeing 'Examined RISC-V core' to know
1504 * when they can connect with gdb/telnet.
1505 * We will need to update those suites if we want to change that text. */
1506 LOG_INFO("Examined RISC-V core; found %d harts",
1507 riscv_count_harts(target));
1508 for (int i = 0; i < riscv_count_harts(target); ++i) {
1509 if (riscv_hart_enabled(target, i)) {
1510 LOG_INFO(" hart %d: XLEN=%d, misa=0x%" PRIx64, i, r->xlen[i],
1511 r->misa[i]);
1512 } else {
1513 LOG_INFO(" hart %d: currently disabled", i);
1514 }
1515 }
1516 return ERROR_OK;
1517 }
1518
1519 int riscv013_authdata_read(struct target *target, uint32_t *value)
1520 {
1521 if (wait_for_authbusy(target, NULL) != ERROR_OK)
1522 return ERROR_FAIL;
1523
1524 return dmi_read(target, value, DMI_AUTHDATA);
1525 }
1526
1527 int riscv013_authdata_write(struct target *target, uint32_t value)
1528 {
1529 uint32_t before, after;
1530 if (wait_for_authbusy(target, &before) != ERROR_OK)
1531 return ERROR_FAIL;
1532
1533 dmi_write(target, DMI_AUTHDATA, value);
1534
1535 if (wait_for_authbusy(target, &after) != ERROR_OK)
1536 return ERROR_FAIL;
1537
1538 if (!get_field(before, DMI_DMSTATUS_AUTHENTICATED) &&
1539 get_field(after, DMI_DMSTATUS_AUTHENTICATED)) {
1540 LOG_INFO("authdata_write resulted in successful authentication");
1541 int result = ERROR_OK;
1542 dm013_info_t *dm = get_dm(target);
1543 target_list_t *entry;
1544 list_for_each_entry(entry, &dm->target_list, list) {
1545 if (examine(entry->target) != ERROR_OK)
1546 result = ERROR_FAIL;
1547 }
1548 return result;
1549 }
1550
1551 return ERROR_OK;
1552 }
1553
1554 static int init_target(struct command_context *cmd_ctx,
1555 struct target *target)
1556 {
1557 LOG_DEBUG("init");
1558 riscv_info_t *generic_info = (riscv_info_t *) target->arch_info;
1559
1560 generic_info->get_register = &riscv013_get_register;
1561 generic_info->set_register = &riscv013_set_register;
1562 generic_info->select_current_hart = &riscv013_select_current_hart;
1563 generic_info->is_halted = &riscv013_is_halted;
1564 generic_info->halt_current_hart = &riscv013_halt_current_hart;
1565 generic_info->resume_current_hart = &riscv013_resume_current_hart;
1566 generic_info->step_current_hart = &riscv013_step_current_hart;
1567 generic_info->on_halt = &riscv013_on_halt;
1568 generic_info->on_resume = &riscv013_on_resume;
1569 generic_info->on_step = &riscv013_on_step;
1570 generic_info->halt_reason = &riscv013_halt_reason;
1571 generic_info->read_debug_buffer = &riscv013_read_debug_buffer;
1572 generic_info->write_debug_buffer = &riscv013_write_debug_buffer;
1573 generic_info->execute_debug_buffer = &riscv013_execute_debug_buffer;
1574 generic_info->fill_dmi_write_u64 = &riscv013_fill_dmi_write_u64;
1575 generic_info->fill_dmi_read_u64 = &riscv013_fill_dmi_read_u64;
1576 generic_info->fill_dmi_nop_u64 = &riscv013_fill_dmi_nop_u64;
1577 generic_info->dmi_write_u64_bits = &riscv013_dmi_write_u64_bits;
1578 generic_info->authdata_read = &riscv013_authdata_read;
1579 generic_info->authdata_write = &riscv013_authdata_write;
1580 generic_info->dmi_read = &dmi_read;
1581 generic_info->dmi_write = &dmi_write;
1582 generic_info->version_specific = calloc(1, sizeof(riscv013_info_t));
1583 if (!generic_info->version_specific)
1584 return ERROR_FAIL;
1585 riscv013_info_t *info = get_info(target);
1586
1587 info->progbufsize = -1;
1588
1589 info->dmi_busy_delay = 0;
1590 info->bus_master_read_delay = 0;
1591 info->bus_master_write_delay = 0;
1592 info->ac_busy_delay = 0;
1593
1594 /* Assume all these abstract commands are supported until we learn
1595 * otherwise.
1596 * TODO: The spec allows eg. one CSR to be able to be accessed abstractly
1597 * while another one isn't. We don't track that this closely here, but in
1598 * the future we probably should. */
1599 info->abstract_read_csr_supported = true;
1600 info->abstract_write_csr_supported = true;
1601 info->abstract_read_fpr_supported = true;
1602 info->abstract_write_fpr_supported = true;
1603
1604 return ERROR_OK;
1605 }
1606
1607 static int assert_reset(struct target *target)
1608 {
1609 RISCV_INFO(r);
1610
1611 select_dmi(target);
1612
1613 uint32_t control_base = set_field(0, DMI_DMCONTROL_DMACTIVE, 1);
1614
1615 if (target->rtos) {
1616 /* There's only one target, and OpenOCD thinks each hart is a thread.
1617 * We must reset them all. */
1618
1619 /* TODO: Try to use hasel in dmcontrol */
1620
1621 /* Set haltreq for each hart. */
1622 uint32_t control = control_base;
1623 for (int i = 0; i < riscv_count_harts(target); ++i) {
1624 if (!riscv_hart_enabled(target, i))
1625 continue;
1626
1627 control = set_hartsel(control_base, i);
1628 control = set_field(control, DMI_DMCONTROL_HALTREQ,
1629 target->reset_halt ? 1 : 0);
1630 dmi_write(target, DMI_DMCONTROL, control);
1631 }
1632 /* Assert ndmreset */
1633 control = set_field(control, DMI_DMCONTROL_NDMRESET, 1);
1634 dmi_write(target, DMI_DMCONTROL, control);
1635
1636 } else {
1637 /* Reset just this hart. */
1638 uint32_t control = set_hartsel(control_base, r->current_hartid);
1639 control = set_field(control, DMI_DMCONTROL_HALTREQ,
1640 target->reset_halt ? 1 : 0);
1641 control = set_field(control, DMI_DMCONTROL_NDMRESET, 1);
1642 dmi_write(target, DMI_DMCONTROL, control);
1643 }
1644
1645 target->state = TARGET_RESET;
1646
1647 return ERROR_OK;
1648 }
1649
1650 static int deassert_reset(struct target *target)
1651 {
1652 RISCV_INFO(r);
1653 RISCV013_INFO(info);
1654 select_dmi(target);
1655
1656 /* Clear the reset, but make sure haltreq is still set */
1657 uint32_t control = 0;
1658 control = set_field(control, DMI_DMCONTROL_HALTREQ, target->reset_halt ? 1 : 0);
1659 control = set_field(control, DMI_DMCONTROL_DMACTIVE, 1);
1660 dmi_write(target, DMI_DMCONTROL,
1661 set_hartsel(control, r->current_hartid));
1662
1663 uint32_t dmstatus;
1664 int dmi_busy_delay = info->dmi_busy_delay;
1665 time_t start = time(NULL);
1666
1667 for (int i = 0; i < riscv_count_harts(target); ++i) {
1668 int index = i;
1669 if (target->rtos) {
1670 if (!riscv_hart_enabled(target, index))
1671 continue;
1672 dmi_write(target, DMI_DMCONTROL,
1673 set_hartsel(control, index));
1674 } else {
1675 index = r->current_hartid;
1676 }
1677
1678 char *operation;
1679 uint32_t expected_field;
1680 if (target->reset_halt) {
1681 operation = "halt";
1682 expected_field = DMI_DMSTATUS_ALLHALTED;
1683 } else {
1684 operation = "run";
1685 expected_field = DMI_DMSTATUS_ALLRUNNING;
1686 }
1687 LOG_DEBUG("Waiting for hart %d to %s out of reset.", index, operation);
1688 while (1) {
1689 int result = dmstatus_read_timeout(target, &dmstatus, true,
1690 riscv_reset_timeout_sec);
1691 if (result == ERROR_TIMEOUT_REACHED)
1692 LOG_ERROR("Hart %d didn't complete a DMI read coming out of "
1693 "reset in %ds; Increase the timeout with riscv "
1694 "set_reset_timeout_sec.",
1695 index, riscv_reset_timeout_sec);
1696 if (result != ERROR_OK)
1697 return result;
1698 if (get_field(dmstatus, expected_field))
1699 break;
1700 if (time(NULL) - start > riscv_reset_timeout_sec) {
1701 LOG_ERROR("Hart %d didn't %s coming out of reset in %ds; "
1702 "dmstatus=0x%x; "
1703 "Increase the timeout with riscv set_reset_timeout_sec.",
1704 index, operation, riscv_reset_timeout_sec, dmstatus);
1705 return ERROR_FAIL;
1706 }
1707 }
1708 target->state = TARGET_HALTED;
1709
1710 if (get_field(dmstatus, DMI_DMSTATUS_ALLHAVERESET)) {
1711 /* Ack reset. */
1712 dmi_write(target, DMI_DMCONTROL,
1713 set_hartsel(control, index) |
1714 DMI_DMCONTROL_ACKHAVERESET);
1715 }
1716
1717 if (!target->rtos)
1718 break;
1719 }
1720 info->dmi_busy_delay = dmi_busy_delay;
1721 return ERROR_OK;
1722 }
1723
1724 /**
1725 * @size in bytes
1726 */
1727 static void write_to_buf(uint8_t *buffer, uint64_t value, unsigned size)
1728 {
1729 switch (size) {
1730 case 8:
1731 buffer[7] = value >> 56;
1732 buffer[6] = value >> 48;
1733 buffer[5] = value >> 40;
1734 buffer[4] = value >> 32;
1735 /* falls through */
1736 case 4:
1737 buffer[3] = value >> 24;
1738 buffer[2] = value >> 16;
1739 /* falls through */
1740 case 2:
1741 buffer[1] = value >> 8;
1742 /* falls through */
1743 case 1:
1744 buffer[0] = value;
1745 break;
1746 default:
1747 assert(false);
1748 }
1749 }
1750
1751 static int execute_fence(struct target *target)
1752 {
1753 struct riscv_program program;
1754 riscv_program_init(&program, target);
1755 riscv_program_fence(&program);
1756 int result = riscv_program_exec(&program, target);
1757 if (result != ERROR_OK)
1758 LOG_ERROR("Unable to execute fence");
1759 return result;
1760 }
1761
1762 static void log_memory_access(target_addr_t address, uint64_t value,
1763 unsigned size_bytes, bool read)
1764 {
1765 if (debug_level < LOG_LVL_DEBUG)
1766 return;
1767
1768 char fmt[80];
1769 sprintf(fmt, "M[0x%" TARGET_PRIxADDR "] %ss 0x%%0%d" PRIx64,
1770 address, read ? "read" : "write", size_bytes * 2);
1771 value &= (((uint64_t) 0x1) << (size_bytes * 8)) - 1;
1772 LOG_DEBUG(fmt, value);
1773 }
1774
1775 /* Read the relevant sbdata regs depending on size, and put the results into
1776 * buffer. */
1777 static int read_memory_bus_word(struct target *target, target_addr_t address,
1778 uint32_t size, uint8_t *buffer)
1779 {
1780 uint32_t value;
1781 if (size > 12) {
1782 if (dmi_read(target, &value, DMI_SBDATA3) != ERROR_OK)
1783 return ERROR_FAIL;
1784 write_to_buf(buffer + 12, value, 4);
1785 log_memory_access(address + 12, value, 4, true);
1786 }
1787 if (size > 8) {
1788 if (dmi_read(target, &value, DMI_SBDATA2) != ERROR_OK)
1789 return ERROR_FAIL;
1790 write_to_buf(buffer + 8, value, 4);
1791 log_memory_access(address + 8, value, 4, true);
1792 }
1793 if (size > 4) {
1794 if (dmi_read(target, &value, DMI_SBDATA1) != ERROR_OK)
1795 return ERROR_FAIL;
1796 write_to_buf(buffer + 4, value, 4);
1797 log_memory_access(address + 4, value, 4, true);
1798 }
1799 if (dmi_read(target, &value, DMI_SBDATA0) != ERROR_OK)
1800 return ERROR_FAIL;
1801 write_to_buf(buffer, value, MIN(size, 4));
1802 log_memory_access(address, value, MIN(size, 4), true);
1803 return ERROR_OK;
1804 }
1805
1806 static uint32_t sb_sbaccess(unsigned size_bytes)
1807 {
1808 switch (size_bytes) {
1809 case 1:
1810 return set_field(0, DMI_SBCS_SBACCESS, 0);
1811 case 2:
1812 return set_field(0, DMI_SBCS_SBACCESS, 1);
1813 case 4:
1814 return set_field(0, DMI_SBCS_SBACCESS, 2);
1815 case 8:
1816 return set_field(0, DMI_SBCS_SBACCESS, 3);
1817 case 16:
1818 return set_field(0, DMI_SBCS_SBACCESS, 4);
1819 }
1820 assert(0);
1821 return 0; /* Make mingw happy. */
1822 }
1823
1824 static target_addr_t sb_read_address(struct target *target)
1825 {
1826 RISCV013_INFO(info);
1827 unsigned sbasize = get_field(info->sbcs, DMI_SBCS_SBASIZE);
1828 target_addr_t address = 0;
1829 uint32_t v;
1830 if (sbasize > 32) {
1831 #if BUILD_TARGET64
1832 dmi_read(target, &v, DMI_SBADDRESS1);
1833 address |= v;
1834 address <<= 32;
1835 #endif
1836 }
1837 dmi_read(target, &v, DMI_SBADDRESS0);
1838 address |= v;
1839 return address;
1840 }
1841
1842 static int sb_write_address(struct target *target, target_addr_t address)
1843 {
1844 RISCV013_INFO(info);
1845 unsigned sbasize = get_field(info->sbcs, DMI_SBCS_SBASIZE);
1846 /* There currently is no support for >64-bit addresses in OpenOCD. */
1847 if (sbasize > 96)
1848 dmi_write(target, DMI_SBADDRESS3, 0);
1849 if (sbasize > 64)
1850 dmi_write(target, DMI_SBADDRESS2, 0);
1851 if (sbasize > 32)
1852 #if BUILD_TARGET64
1853 dmi_write(target, DMI_SBADDRESS1, address >> 32);
1854 #else
1855 dmi_write(target, DMI_SBADDRESS1, 0);
1856 #endif
1857 return dmi_write(target, DMI_SBADDRESS0, address);
1858 }
1859
1860 static int read_sbcs_nonbusy(struct target *target, uint32_t *sbcs)
1861 {
1862 time_t start = time(NULL);
1863 while (1) {
1864 if (dmi_read(target, sbcs, DMI_SBCS) != ERROR_OK)
1865 return ERROR_FAIL;
1866 if (!get_field(*sbcs, DMI_SBCS_SBBUSY))
1867 return ERROR_OK;
1868 if (time(NULL) - start > riscv_command_timeout_sec) {
1869 LOG_ERROR("Timed out after %ds waiting for sbbusy to go low (sbcs=0x%x). "
1870 "Increase the timeout with riscv set_command_timeout_sec.",
1871 riscv_command_timeout_sec, *sbcs);
1872 return ERROR_FAIL;
1873 }
1874 }
1875 }
1876
1877 static int read_memory_bus_v0(struct target *target, target_addr_t address,
1878 uint32_t size, uint32_t count, uint8_t *buffer)
1879 {
1880 LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
1881 TARGET_PRIxADDR, size, count, address);
1882 uint8_t *t_buffer = buffer;
1883 riscv_addr_t cur_addr = address;
1884 riscv_addr_t fin_addr = address + (count * size);
1885 uint32_t access = 0;
1886
1887 const int DMI_SBCS_SBSINGLEREAD_OFFSET = 20;
1888 const uint32_t DMI_SBCS_SBSINGLEREAD = (0x1U << DMI_SBCS_SBSINGLEREAD_OFFSET);
1889
1890 const int DMI_SBCS_SBAUTOREAD_OFFSET = 15;
1891 const uint32_t DMI_SBCS_SBAUTOREAD = (0x1U << DMI_SBCS_SBAUTOREAD_OFFSET);
1892
1893 /* ww favorise one off reading if there is an issue */
1894 if (count == 1) {
1895 for (uint32_t i = 0; i < count; i++) {
1896 if (dmi_read(target, &access, DMI_SBCS) != ERROR_OK)
1897 return ERROR_FAIL;
1898 dmi_write(target, DMI_SBADDRESS0, cur_addr);
1899 /* size/2 matching the bit access of the spec 0.13 */
1900 access = set_field(access, DMI_SBCS_SBACCESS, size/2);
1901 access = set_field(access, DMI_SBCS_SBSINGLEREAD, 1);
1902 LOG_DEBUG("\r\nread_memory: sab: access: 0x%08x", access);
1903 dmi_write(target, DMI_SBCS, access);
1904 /* 3) read */
1905 uint32_t value;
1906 if (dmi_read(target, &value, DMI_SBDATA0) != ERROR_OK)
1907 return ERROR_FAIL;
1908 LOG_DEBUG("\r\nread_memory: sab: value: 0x%08x", value);
1909 write_to_buf(t_buffer, value, size);
1910 t_buffer += size;
1911 cur_addr += size;
1912 }
1913 return ERROR_OK;
1914 }
1915
1916 /* has to be the same size if we want to read a block */
1917 LOG_DEBUG("reading block until final address 0x%" PRIx64, fin_addr);
1918 if (dmi_read(target, &access, DMI_SBCS) != ERROR_OK)
1919 return ERROR_FAIL;
1920 /* set current address */
1921 dmi_write(target, DMI_SBADDRESS0, cur_addr);
1922 /* 2) write sbaccess=2, sbsingleread,sbautoread,sbautoincrement
1923 * size/2 matching the bit access of the spec 0.13 */
1924 access = set_field(access, DMI_SBCS_SBACCESS, size/2);
1925 access = set_field(access, DMI_SBCS_SBAUTOREAD, 1);
1926 access = set_field(access, DMI_SBCS_SBSINGLEREAD, 1);
1927 access = set_field(access, DMI_SBCS_SBAUTOINCREMENT, 1);
1928 LOG_DEBUG("\r\naccess: 0x%08x", access);
1929 dmi_write(target, DMI_SBCS, access);
1930
1931 while (cur_addr < fin_addr) {
1932 LOG_DEBUG("\r\nsab:autoincrement: \r\n size: %d\tcount:%d\taddress: 0x%08"
1933 PRIx64, size, count, cur_addr);
1934 /* read */
1935 uint32_t value;
1936 if (dmi_read(target, &value, DMI_SBDATA0) != ERROR_OK)
1937 return ERROR_FAIL;
1938 write_to_buf(t_buffer, value, size);
1939 cur_addr += size;
1940 t_buffer += size;
1941
1942 /* if we are reaching last address, we must clear autoread */
1943 if (cur_addr == fin_addr && count != 1) {
1944 dmi_write(target, DMI_SBCS, 0);
1945 if (dmi_read(target, &value, DMI_SBDATA0) != ERROR_OK)
1946 return ERROR_FAIL;
1947 write_to_buf(t_buffer, value, size);
1948 }
1949 }
1950
1951 return ERROR_OK;
1952 }
1953
1954 /**
1955 * Read the requested memory using the system bus interface.
1956 */
1957 static int read_memory_bus_v1(struct target *target, target_addr_t address,
1958 uint32_t size, uint32_t count, uint8_t *buffer)
1959 {
1960 RISCV013_INFO(info);
1961 target_addr_t next_address = address;
1962 target_addr_t end_address = address + count * size;
1963
1964 while (next_address < end_address) {
1965 uint32_t sbcs = set_field(0, DMI_SBCS_SBREADONADDR, 1);
1966 sbcs |= sb_sbaccess(size);
1967 sbcs = set_field(sbcs, DMI_SBCS_SBAUTOINCREMENT, 1);
1968 sbcs = set_field(sbcs, DMI_SBCS_SBREADONDATA, count > 1);
1969 dmi_write(target, DMI_SBCS, sbcs);
1970
1971 /* This address write will trigger the first read. */
1972 sb_write_address(target, next_address);
1973
1974 if (info->bus_master_read_delay) {
1975 jtag_add_runtest(info->bus_master_read_delay, TAP_IDLE);
1976 if (jtag_execute_queue() != ERROR_OK) {
1977 LOG_ERROR("Failed to scan idle sequence");
1978 return ERROR_FAIL;
1979 }
1980 }
1981
1982 for (uint32_t i = (next_address - address) / size; i < count - 1; i++) {
1983 read_memory_bus_word(target, address + i * size, size,
1984 buffer + i * size);
1985 }
1986
1987 sbcs = set_field(sbcs, DMI_SBCS_SBREADONDATA, 0);
1988 dmi_write(target, DMI_SBCS, sbcs);
1989
1990 read_memory_bus_word(target, address + (count - 1) * size, size,
1991 buffer + (count - 1) * size);
1992
1993 if (read_sbcs_nonbusy(target, &sbcs) != ERROR_OK)
1994 return ERROR_FAIL;
1995
1996 if (get_field(sbcs, DMI_SBCS_SBBUSYERROR)) {
1997 /* We read while the target was busy. Slow down and try again. */
1998 dmi_write(target, DMI_SBCS, DMI_SBCS_SBBUSYERROR);
1999 next_address = sb_read_address(target);
2000 info->bus_master_read_delay += info->bus_master_read_delay / 10 + 1;
2001 continue;
2002 }
2003
2004 unsigned error = get_field(sbcs, DMI_SBCS_SBERROR);
2005 if (error == 0) {
2006 next_address = end_address;
2007 } else {
2008 /* Some error indicating the bus access failed, but not because of
2009 * something we did wrong. */
2010 dmi_write(target, DMI_SBCS, DMI_SBCS_SBERROR);
2011 return ERROR_FAIL;
2012 }
2013 }
2014
2015 return ERROR_OK;
2016 }
2017
2018 /**
2019 * Read the requested memory, taking care to execute every read exactly once,
2020 * even if cmderr=busy is encountered.
2021 */
2022 static int read_memory_progbuf(struct target *target, target_addr_t address,
2023 uint32_t size, uint32_t count, uint8_t *buffer)
2024 {
2025 RISCV013_INFO(info);
2026
2027 int result = ERROR_OK;
2028
2029 LOG_DEBUG("reading %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
2030 size, address);
2031
2032 select_dmi(target);
2033
2034 /* s0 holds the next address to write to
2035 * s1 holds the next data value to write
2036 */
2037 uint64_t s0, s1;
2038 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
2039 return ERROR_FAIL;
2040 if (register_read(target, &s1, GDB_REGNO_S1) != ERROR_OK)
2041 return ERROR_FAIL;
2042
2043 if (execute_fence(target) != ERROR_OK)
2044 return ERROR_FAIL;
2045
2046 /* Write the program (load, increment) */
2047 struct riscv_program program;
2048 riscv_program_init(&program, target);
2049 switch (size) {
2050 case 1:
2051 riscv_program_lbr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
2052 break;
2053 case 2:
2054 riscv_program_lhr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
2055 break;
2056 case 4:
2057 riscv_program_lwr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
2058 break;
2059 default:
2060 LOG_ERROR("Unsupported size: %d", size);
2061 return ERROR_FAIL;
2062 }
2063 riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, size);
2064
2065 if (riscv_program_ebreak(&program) != ERROR_OK)
2066 return ERROR_FAIL;
2067 riscv_program_write(&program);
2068
2069 /* Write address to S0, and execute buffer. */
2070 result = register_write_direct(target, GDB_REGNO_S0, address);
2071 if (result != ERROR_OK)
2072 goto error;
2073 uint32_t command = access_register_command(GDB_REGNO_S1, riscv_xlen(target),
2074 AC_ACCESS_REGISTER_TRANSFER |
2075 AC_ACCESS_REGISTER_POSTEXEC);
2076 result = execute_abstract_command(target, command);
2077 if (result != ERROR_OK)
2078 goto error;
2079
2080 /* First read has just triggered. Result is in s1. */
2081
2082 dmi_write(target, DMI_ABSTRACTAUTO,
2083 1 << DMI_ABSTRACTAUTO_AUTOEXECDATA_OFFSET);
2084
2085 /* read_addr is the next address that the hart will read from, which is the
2086 * value in s0. */
2087 riscv_addr_t read_addr = address + size;
2088 /* The next address that we need to receive data for. */
2089 riscv_addr_t receive_addr = address;
2090 riscv_addr_t fin_addr = address + (count * size);
2091 unsigned skip = 1;
2092 while (read_addr < fin_addr) {
2093 LOG_DEBUG("read_addr=0x%" PRIx64 ", receive_addr=0x%" PRIx64
2094 ", fin_addr=0x%" PRIx64, read_addr, receive_addr, fin_addr);
2095 /* The pipeline looks like this:
2096 * memory -> s1 -> dm_data0 -> debugger
2097 * It advances every time the debugger reads dmdata0.
2098 * So at any time the debugger has just read mem[s0 - 3*size],
2099 * dm_data0 contains mem[s0 - 2*size]
2100 * s1 contains mem[s0-size] */
2101
2102 LOG_DEBUG("creating burst to read from 0x%" PRIx64
2103 " up to 0x%" PRIx64, read_addr, fin_addr);
2104 assert(read_addr >= address && read_addr < fin_addr);
2105 struct riscv_batch *batch = riscv_batch_alloc(target, 32,
2106 info->dmi_busy_delay + info->ac_busy_delay);
2107
2108 size_t reads = 0;
2109 for (riscv_addr_t addr = read_addr; addr < fin_addr; addr += size) {
2110 riscv_batch_add_dmi_read(batch, DMI_DATA0);
2111
2112 reads++;
2113 if (riscv_batch_full(batch))
2114 break;
2115 }
2116
2117 riscv_batch_run(batch);
2118
2119 /* Wait for the target to finish performing the last abstract command,
2120 * and update our copy of cmderr. */
2121 uint32_t abstractcs;
2122 if (dmi_read(target, &abstractcs, DMI_ABSTRACTCS) != ERROR_OK)
2123 return ERROR_FAIL;
2124 while (get_field(abstractcs, DMI_ABSTRACTCS_BUSY))
2125 if (dmi_read(target, &abstractcs, DMI_ABSTRACTCS) != ERROR_OK)
2126 return ERROR_FAIL;
2127 info->cmderr = get_field(abstractcs, DMI_ABSTRACTCS_CMDERR);
2128
2129 unsigned cmderr = info->cmderr;
2130 riscv_addr_t next_read_addr;
2131 uint32_t dmi_data0 = -1;
2132 switch (info->cmderr) {
2133 case CMDERR_NONE:
2134 LOG_DEBUG("successful (partial?) memory read");
2135 next_read_addr = read_addr + reads * size;
2136 break;
2137 case CMDERR_BUSY:
2138 LOG_DEBUG("memory read resulted in busy response");
2139
2140 /*
2141 * If you want to exercise this code path, apply the following patch to spike:
2142 --- a/riscv/debug_module.cc
2143 +++ b/riscv/debug_module.cc
2144 @@ -1,3 +1,5 @@
2145 +#include <unistd.h>
2146 +
2147 #include <cassert>
2148
2149 #include "debug_module.h"
2150 @@ -398,6 +400,15 @@ bool debug_module_t::perform_abstract_command()
2151 // Since the next instruction is what we will use, just use nother NOP
2152 // to get there.
2153 write32(debug_abstract, 1, addi(ZERO, ZERO, 0));
2154 +
2155 + if (abstractauto.autoexecdata &&
2156 + program_buffer[0] == 0x83 &&
2157 + program_buffer[1] == 0x24 &&
2158 + program_buffer[2] == 0x04 &&
2159 + program_buffer[3] == 0 &&
2160 + rand() < RAND_MAX / 10) {
2161 + usleep(1000000);
2162 + }
2163 } else {
2164 write32(debug_abstract, 1, ebreak());
2165 }
2166 */
2167 increase_ac_busy_delay(target);
2168 riscv013_clear_abstract_error(target);
2169
2170 dmi_write(target, DMI_ABSTRACTAUTO, 0);
2171
2172 /* This is definitely a good version of the value that we
2173 * attempted to read when we discovered that the target was
2174 * busy. */
2175 if (dmi_read(target, &dmi_data0, DMI_DATA0) != ERROR_OK) {
2176 riscv_batch_free(batch);
2177 goto error;
2178 }
2179
2180 /* Clobbers DMI_DATA0. */
2181 result = register_read_direct(target, &next_read_addr,
2182 GDB_REGNO_S0);
2183 if (result != ERROR_OK) {
2184 riscv_batch_free(batch);
2185 goto error;
2186 }
2187 /* Restore the command, and execute it.
2188 * Now DMI_DATA0 contains the next value just as it would if no
2189 * error had occurred. */
2190 dmi_write(target, DMI_COMMAND, command);
2191
2192 dmi_write(target, DMI_ABSTRACTAUTO,
2193 1 << DMI_ABSTRACTAUTO_AUTOEXECDATA_OFFSET);
2194 break;
2195 default:
2196 LOG_ERROR("error when reading memory, abstractcs=0x%08lx", (long)abstractcs);
2197 riscv013_clear_abstract_error(target);
2198 riscv_batch_free(batch);
2199 result = ERROR_FAIL;
2200 goto error;
2201 }
2202
2203 /* Now read whatever we got out of the batch. */
2204 for (size_t i = 0; i < reads; i++) {
2205 if (read_addr >= next_read_addr)
2206 break;
2207
2208 read_addr += size;
2209
2210 if (skip > 0) {
2211 skip--;
2212 continue;
2213 }
2214
2215 riscv_addr_t offset = receive_addr - address;
2216 uint64_t dmi_out = riscv_batch_get_dmi_read(batch, i);
2217 uint32_t value = get_field(dmi_out, DTM_DMI_DATA);
2218 write_to_buf(buffer + offset, value, size);
2219 log_memory_access(receive_addr, value, size, true);
2220
2221 receive_addr += size;
2222 }
2223 riscv_batch_free(batch);
2224
2225 if (cmderr == CMDERR_BUSY) {
2226 riscv_addr_t offset = receive_addr - address;
2227 write_to_buf(buffer + offset, dmi_data0, size);
2228 log_memory_access(receive_addr, dmi_data0, size, true);
2229 read_addr += size;
2230 receive_addr += size;
2231 }
2232 }
2233
2234 dmi_write(target, DMI_ABSTRACTAUTO, 0);
2235
2236 if (count > 1) {
2237 /* Read the penultimate word. */
2238 uint32_t value;
2239 if (dmi_read(target, &value, DMI_DATA0) != ERROR_OK)
2240 goto error;
2241 write_to_buf(buffer + receive_addr - address, value, size);
2242 log_memory_access(receive_addr, value, size, true);
2243 receive_addr += size;
2244 }
2245
2246 /* Read the last word. */
2247 uint64_t value;
2248 result = register_read_direct(target, &value, GDB_REGNO_S1);
2249 if (result != ERROR_OK)
2250 goto error;
2251 write_to_buf(buffer + receive_addr - address, value, size);
2252 log_memory_access(receive_addr, value, size, true);
2253
2254 riscv_set_register(target, GDB_REGNO_S0, s0);
2255 riscv_set_register(target, GDB_REGNO_S1, s1);
2256 return ERROR_OK;
2257
2258 error:
2259 dmi_write(target, DMI_ABSTRACTAUTO, 0);
2260
2261 riscv_set_register(target, GDB_REGNO_S0, s0);
2262 riscv_set_register(target, GDB_REGNO_S1, s1);
2263 return result;
2264 }
2265
2266 static int read_memory(struct target *target, target_addr_t address,
2267 uint32_t size, uint32_t count, uint8_t *buffer)
2268 {
2269 RISCV013_INFO(info);
2270 if (info->progbufsize >= 2 && !riscv_prefer_sba)
2271 return read_memory_progbuf(target, address, size, count, buffer);
2272
2273 if ((get_field(info->sbcs, DMI_SBCS_SBACCESS8) && size == 1) ||
2274 (get_field(info->sbcs, DMI_SBCS_SBACCESS16) && size == 2) ||
2275 (get_field(info->sbcs, DMI_SBCS_SBACCESS32) && size == 4) ||
2276 (get_field(info->sbcs, DMI_SBCS_SBACCESS64) && size == 8) ||
2277 (get_field(info->sbcs, DMI_SBCS_SBACCESS128) && size == 16)) {
2278 if (get_field(info->sbcs, DMI_SBCS_SBVERSION) == 0)
2279 return read_memory_bus_v0(target, address, size, count, buffer);
2280 else if (get_field(info->sbcs, DMI_SBCS_SBVERSION) == 1)
2281 return read_memory_bus_v1(target, address, size, count, buffer);
2282 }
2283
2284 if (info->progbufsize >= 2)
2285 return read_memory_progbuf(target, address, size, count, buffer);
2286
2287 LOG_ERROR("Don't know how to read memory on this target.");
2288 return ERROR_FAIL;
2289 }
2290
2291 static int write_memory_bus_v0(struct target *target, target_addr_t address,
2292 uint32_t size, uint32_t count, const uint8_t *buffer)
2293 {
2294 /*1) write sbaddress: for singlewrite and autoincrement, we need to write the address once*/
2295 LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
2296 TARGET_PRIxADDR, size, count, address);
2297 dmi_write(target, DMI_SBADDRESS0, address);
2298 int64_t value = 0;
2299 int64_t access = 0;
2300 riscv_addr_t offset = 0;
2301 riscv_addr_t t_addr = 0;
2302 const uint8_t *t_buffer = buffer + offset;
2303
2304 /* B.8 Writing Memory, single write check if we write in one go */
2305 if (count == 1) { /* count is in bytes here */
2306 /* check the size */
2307 switch (size) {
2308 case 1:
2309 value = t_buffer[0];
2310 break;
2311 case 2:
2312 value = t_buffer[0]
2313 | ((uint32_t) t_buffer[1] << 8);
2314 break;
2315 case 4:
2316 value = t_buffer[0]
2317 | ((uint32_t) t_buffer[1] << 8)
2318 | ((uint32_t) t_buffer[2] << 16)
2319 | ((uint32_t) t_buffer[3] << 24);
2320 break;
2321 default:
2322 LOG_ERROR("unsupported access size: %d", size);
2323 return ERROR_FAIL;
2324 }
2325
2326 access = 0;
2327 access = set_field(access, DMI_SBCS_SBACCESS, size/2);
2328 dmi_write(target, DMI_SBCS, access);
2329 LOG_DEBUG("\r\naccess: 0x%08" PRIx64, access);
2330 LOG_DEBUG("\r\nwrite_memory:SAB: ONE OFF: value 0x%08" PRIx64, value);
2331 dmi_write(target, DMI_SBDATA0, value);
2332 return ERROR_OK;
2333 }
2334
2335 /*B.8 Writing Memory, using autoincrement*/
2336
2337 access = 0;
2338 access = set_field(access, DMI_SBCS_SBACCESS, size/2);
2339 access = set_field(access, DMI_SBCS_SBAUTOINCREMENT, 1);
2340 LOG_DEBUG("\r\naccess: 0x%08" PRIx64, access);
2341 dmi_write(target, DMI_SBCS, access);
2342
2343 /*2)set the value according to the size required and write*/
2344 for (riscv_addr_t i = 0; i < count; ++i) {
2345 offset = size*i;
2346 /* for monitoring only */
2347 t_addr = address + offset;
2348 t_buffer = buffer + offset;
2349
2350 switch (size) {
2351 case 1:
2352 value = t_buffer[0];
2353 break;
2354 case 2:
2355 value = t_buffer[0]
2356 | ((uint32_t) t_buffer[1] << 8);
2357 break;
2358 case 4:
2359 value = t_buffer[0]
2360 | ((uint32_t) t_buffer[1] << 8)
2361 | ((uint32_t) t_buffer[2] << 16)
2362 | ((uint32_t) t_buffer[3] << 24);
2363 break;
2364 default:
2365 LOG_ERROR("unsupported access size: %d", size);
2366 return ERROR_FAIL;
2367 }
2368 LOG_DEBUG("SAB:autoincrement: expected address: 0x%08x value: 0x%08x"
2369 PRIx64, (uint32_t)t_addr, (uint32_t)value);
2370 dmi_write(target, DMI_SBDATA0, value);
2371 }
2372 /*reset the autoincrement when finished (something weird is happening if this is not done at the end*/
2373 access = set_field(access, DMI_SBCS_SBAUTOINCREMENT, 0);
2374 dmi_write(target, DMI_SBCS, access);
2375
2376 return ERROR_OK;
2377 }
2378
2379 static int write_memory_bus_v1(struct target *target, target_addr_t address,
2380 uint32_t size, uint32_t count, const uint8_t *buffer)
2381 {
2382 RISCV013_INFO(info);
2383 uint32_t sbcs = sb_sbaccess(size);
2384 sbcs = set_field(sbcs, DMI_SBCS_SBAUTOINCREMENT, 1);
2385 dmi_write(target, DMI_SBCS, sbcs);
2386
2387 target_addr_t next_address = address;
2388 target_addr_t end_address = address + count * size;
2389
2390 sb_write_address(target, next_address);
2391 while (next_address < end_address) {
2392 for (uint32_t i = (next_address - address) / size; i < count; i++) {
2393 const uint8_t *p = buffer + i * size;
2394 if (size > 12)
2395 dmi_write(target, DMI_SBDATA3,
2396 ((uint32_t) p[12]) |
2397 (((uint32_t) p[13]) << 8) |
2398 (((uint32_t) p[14]) << 16) |
2399 (((uint32_t) p[15]) << 24));
2400 if (size > 8)
2401 dmi_write(target, DMI_SBDATA2,
2402 ((uint32_t) p[8]) |
2403 (((uint32_t) p[9]) << 8) |
2404 (((uint32_t) p[10]) << 16) |
2405 (((uint32_t) p[11]) << 24));
2406 if (size > 4)
2407 dmi_write(target, DMI_SBDATA1,
2408 ((uint32_t) p[4]) |
2409 (((uint32_t) p[5]) << 8) |
2410 (((uint32_t) p[6]) << 16) |
2411 (((uint32_t) p[7]) << 24));
2412 uint32_t value = p[0];
2413 if (size > 2) {
2414 value |= ((uint32_t) p[2]) << 16;
2415 value |= ((uint32_t) p[3]) << 24;
2416 }
2417 if (size > 1)
2418 value |= ((uint32_t) p[1]) << 8;
2419 dmi_write(target, DMI_SBDATA0, value);
2420
2421 log_memory_access(address + i * size, value, size, false);
2422
2423 if (info->bus_master_write_delay) {
2424 jtag_add_runtest(info->bus_master_write_delay, TAP_IDLE);
2425 if (jtag_execute_queue() != ERROR_OK) {
2426 LOG_ERROR("Failed to scan idle sequence");
2427 return ERROR_FAIL;
2428 }
2429 }
2430 }
2431
2432 if (read_sbcs_nonbusy(target, &sbcs) != ERROR_OK)
2433 return ERROR_FAIL;
2434
2435 if (get_field(sbcs, DMI_SBCS_SBBUSYERROR)) {
2436 /* We wrote while the target was busy. Slow down and try again. */
2437 dmi_write(target, DMI_SBCS, DMI_SBCS_SBBUSYERROR);
2438 next_address = sb_read_address(target);
2439 info->bus_master_write_delay += info->bus_master_write_delay / 10 + 1;
2440 continue;
2441 }
2442
2443 unsigned error = get_field(sbcs, DMI_SBCS_SBERROR);
2444 if (error == 0) {
2445 next_address = end_address;
2446 } else {
2447 /* Some error indicating the bus access failed, but not because of
2448 * something we did wrong. */
2449 dmi_write(target, DMI_SBCS, DMI_SBCS_SBERROR);
2450 return ERROR_FAIL;
2451 }
2452 }
2453
2454 return ERROR_OK;
2455 }
2456
2457 static int write_memory_progbuf(struct target *target, target_addr_t address,
2458 uint32_t size, uint32_t count, const uint8_t *buffer)
2459 {
2460 RISCV013_INFO(info);
2461
2462 LOG_DEBUG("writing %d words of %d bytes to 0x%08lx", count, size, (long)address);
2463
2464 select_dmi(target);
2465
2466 /* s0 holds the next address to write to
2467 * s1 holds the next data value to write
2468 */
2469
2470 int result = ERROR_OK;
2471 uint64_t s0, s1;
2472 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
2473 return ERROR_FAIL;
2474 if (register_read(target, &s1, GDB_REGNO_S1) != ERROR_OK)
2475 return ERROR_FAIL;
2476
2477 /* Write the program (store, increment) */
2478 struct riscv_program program;
2479 riscv_program_init(&program, target);
2480
2481 switch (size) {
2482 case 1:
2483 riscv_program_sbr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
2484 break;
2485 case 2:
2486 riscv_program_shr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
2487 break;
2488 case 4:
2489 riscv_program_swr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
2490 break;
2491 default:
2492 LOG_ERROR("Unsupported size: %d", size);
2493 result = ERROR_FAIL;
2494 goto error;
2495 }
2496
2497 riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, size);
2498
2499 result = riscv_program_ebreak(&program);
2500 if (result != ERROR_OK)
2501 goto error;
2502 riscv_program_write(&program);
2503
2504 riscv_addr_t cur_addr = address;
2505 riscv_addr_t fin_addr = address + (count * size);
2506 bool setup_needed = true;
2507 LOG_DEBUG("writing until final address 0x%016" PRIx64, fin_addr);
2508 while (cur_addr < fin_addr) {
2509 LOG_DEBUG("transferring burst starting at address 0x%016" PRIx64,
2510 cur_addr);
2511
2512 struct riscv_batch *batch = riscv_batch_alloc(
2513 target,
2514 32,
2515 info->dmi_busy_delay + info->ac_busy_delay);
2516
2517 /* To write another word, we put it in S1 and execute the program. */
2518 unsigned start = (cur_addr - address) / size;
2519 for (unsigned i = start; i < count; ++i) {
2520 unsigned offset = size*i;
2521 const uint8_t *t_buffer = buffer + offset;
2522
2523 uint32_t value;
2524 switch (size) {
2525 case 1:
2526 value = t_buffer[0];
2527 break;
2528 case 2:
2529 value = t_buffer[0]
2530 | ((uint32_t) t_buffer[1] << 8);
2531 break;
2532 case 4:
2533 value = t_buffer[0]
2534 | ((uint32_t) t_buffer[1] << 8)
2535 | ((uint32_t) t_buffer[2] << 16)
2536 | ((uint32_t) t_buffer[3] << 24);
2537 break;
2538 default:
2539 LOG_ERROR("unsupported access size: %d", size);
2540 riscv_batch_free(batch);
2541 result = ERROR_FAIL;
2542 goto error;
2543 }
2544
2545 log_memory_access(address + offset, value, size, false);
2546 cur_addr += size;
2547
2548 if (setup_needed) {
2549 result = register_write_direct(target, GDB_REGNO_S0,
2550 address + offset);
2551 if (result != ERROR_OK) {
2552 riscv_batch_free(batch);
2553 goto error;
2554 }
2555
2556 /* Write value. */
2557 dmi_write(target, DMI_DATA0, value);
2558
2559 /* Write and execute command that moves value into S1 and
2560 * executes program buffer. */
2561 uint32_t command = access_register_command(GDB_REGNO_S1, 32,
2562 AC_ACCESS_REGISTER_POSTEXEC |
2563 AC_ACCESS_REGISTER_TRANSFER |
2564 AC_ACCESS_REGISTER_WRITE);
2565 result = execute_abstract_command(target, command);
2566 if (result != ERROR_OK) {
2567 riscv_batch_free(batch);
2568 goto error;
2569 }
2570
2571 /* Turn on autoexec */
2572 dmi_write(target, DMI_ABSTRACTAUTO,
2573 1 << DMI_ABSTRACTAUTO_AUTOEXECDATA_OFFSET);
2574
2575 setup_needed = false;
2576 } else {
2577 riscv_batch_add_dmi_write(batch, DMI_DATA0, value);
2578 if (riscv_batch_full(batch))
2579 break;
2580 }
2581 }
2582
2583 result = riscv_batch_run(batch);
2584 riscv_batch_free(batch);
2585 if (result != ERROR_OK)
2586 goto error;
2587
2588 /* Note that if the scan resulted in a Busy DMI response, it
2589 * is this read to abstractcs that will cause the dmi_busy_delay
2590 * to be incremented if necessary. */
2591
2592 uint32_t abstractcs;
2593 if (dmi_read(target, &abstractcs, DMI_ABSTRACTCS) != ERROR_OK)
2594 goto error;
2595 while (get_field(abstractcs, DMI_ABSTRACTCS_BUSY))
2596 if (dmi_read(target, &abstractcs, DMI_ABSTRACTCS) != ERROR_OK)
2597 return ERROR_FAIL;
2598 info->cmderr = get_field(abstractcs, DMI_ABSTRACTCS_CMDERR);
2599 switch (info->cmderr) {
2600 case CMDERR_NONE:
2601 LOG_DEBUG("successful (partial?) memory write");
2602 break;
2603 case CMDERR_BUSY:
2604 LOG_DEBUG("memory write resulted in busy response");
2605 riscv013_clear_abstract_error(target);
2606 increase_ac_busy_delay(target);
2607
2608 dmi_write(target, DMI_ABSTRACTAUTO, 0);
2609 result = register_read_direct(target, &cur_addr, GDB_REGNO_S0);
2610 if (result != ERROR_OK)
2611 goto error;
2612 setup_needed = true;
2613 break;
2614
2615 default:
2616 LOG_ERROR("error when writing memory, abstractcs=0x%08lx", (long)abstractcs);
2617 riscv013_clear_abstract_error(target);
2618 result = ERROR_FAIL;
2619 goto error;
2620 }
2621 }
2622
2623 error:
2624 dmi_write(target, DMI_ABSTRACTAUTO, 0);
2625
2626 if (register_write_direct(target, GDB_REGNO_S1, s1) != ERROR_OK)
2627 return ERROR_FAIL;
2628 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
2629 return ERROR_FAIL;
2630
2631 if (execute_fence(target) != ERROR_OK)
2632 return ERROR_FAIL;
2633
2634 return result;
2635 }
2636
2637 static int write_memory(struct target *target, target_addr_t address,
2638 uint32_t size, uint32_t count, const uint8_t *buffer)
2639 {
2640 RISCV013_INFO(info);
2641 if (info->progbufsize >= 2 && !riscv_prefer_sba)
2642 return write_memory_progbuf(target, address, size, count, buffer);
2643
2644 if ((get_field(info->sbcs, DMI_SBCS_SBACCESS8) && size == 1) ||
2645 (get_field(info->sbcs, DMI_SBCS_SBACCESS16) && size == 2) ||
2646 (get_field(info->sbcs, DMI_SBCS_SBACCESS32) && size == 4) ||
2647 (get_field(info->sbcs, DMI_SBCS_SBACCESS64) && size == 8) ||
2648 (get_field(info->sbcs, DMI_SBCS_SBACCESS128) && size == 16)) {
2649 if (get_field(info->sbcs, DMI_SBCS_SBVERSION) == 0)
2650 return write_memory_bus_v0(target, address, size, count, buffer);
2651 else if (get_field(info->sbcs, DMI_SBCS_SBVERSION) == 1)
2652 return write_memory_bus_v1(target, address, size, count, buffer);
2653 }
2654
2655 if (info->progbufsize >= 2)
2656 return write_memory_progbuf(target, address, size, count, buffer);
2657
2658 LOG_ERROR("Don't know how to write memory on this target.");
2659 return ERROR_FAIL;
2660 }
2661
2662 static int arch_state(struct target *target)
2663 {
2664 return ERROR_OK;
2665 }
2666
2667 struct target_type riscv013_target = {
2668 .name = "riscv",
2669
2670 .init_target = init_target,
2671 .deinit_target = deinit_target,
2672 .examine = examine,
2673
2674 .poll = &riscv_openocd_poll,
2675 .halt = &riscv_openocd_halt,
2676 .resume = &riscv_openocd_resume,
2677 .step = &riscv_openocd_step,
2678
2679 .assert_reset = assert_reset,
2680 .deassert_reset = deassert_reset,
2681
2682 .read_memory = read_memory,
2683 .write_memory = write_memory,
2684
2685 .arch_state = arch_state,
2686 };
2687
2688 /*** 0.13-specific implementations of various RISC-V helper functions. ***/
2689 static int riscv013_get_register(struct target *target,
2690 riscv_reg_t *value, int hid, int rid)
2691 {
2692 LOG_DEBUG("reading register %s on hart %d", gdb_regno_name(rid), hid);
2693
2694 riscv_set_current_hartid(target, hid);
2695
2696 int result = ERROR_OK;
2697 if (rid == GDB_REGNO_PC) {
2698 result = register_read(target, value, GDB_REGNO_DPC);
2699 LOG_DEBUG("read PC from DPC: 0x%016" PRIx64, *value);
2700 } else if (rid == GDB_REGNO_PRIV) {
2701 uint64_t dcsr;
2702 result = register_read(target, &dcsr, GDB_REGNO_DCSR);
2703 *value = get_field(dcsr, CSR_DCSR_PRV);
2704 } else {
2705 result = register_read(target, value, rid);
2706 if (result != ERROR_OK)
2707 *value = -1;
2708 }
2709
2710 return result;
2711 }
2712
2713 static int riscv013_set_register(struct target *target, int hid, int rid, uint64_t value)
2714 {
2715 LOG_DEBUG("writing 0x%" PRIx64 " to register %s on hart %d", value,
2716 gdb_regno_name(rid), hid);
2717
2718 riscv_set_current_hartid(target, hid);
2719
2720 if (rid <= GDB_REGNO_XPR31) {
2721 return register_write_direct(target, rid, value);
2722 } else if (rid == GDB_REGNO_PC) {
2723 LOG_DEBUG("writing PC to DPC: 0x%016" PRIx64, value);
2724 register_write_direct(target, GDB_REGNO_DPC, value);
2725 uint64_t actual_value;
2726 register_read_direct(target, &actual_value, GDB_REGNO_DPC);
2727 LOG_DEBUG(" actual DPC written: 0x%016" PRIx64, actual_value);
2728 if (value != actual_value) {
2729 LOG_ERROR("Written PC (0x%" PRIx64 ") does not match read back "
2730 "value (0x%" PRIx64 ")", value, actual_value);
2731 return ERROR_FAIL;
2732 }
2733 } else if (rid == GDB_REGNO_PRIV) {
2734 uint64_t dcsr;
2735 register_read(target, &dcsr, GDB_REGNO_DCSR);
2736 dcsr = set_field(dcsr, CSR_DCSR_PRV, value);
2737 return register_write_direct(target, GDB_REGNO_DCSR, dcsr);
2738 } else {
2739 return register_write_direct(target, rid, value);
2740 }
2741
2742 return ERROR_OK;
2743 }
2744
2745 static int riscv013_select_current_hart(struct target *target)
2746 {
2747 RISCV_INFO(r);
2748
2749 dm013_info_t *dm = get_dm(target);
2750 if (r->current_hartid == dm->current_hartid)
2751 return ERROR_OK;
2752
2753 uint32_t dmcontrol;
2754 /* TODO: can't we just "dmcontrol = DMI_DMACTIVE"? */
2755 if (dmi_read(target, &dmcontrol, DMI_DMCONTROL) != ERROR_OK)
2756 return ERROR_FAIL;
2757 dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
2758 int result = dmi_write(target, DMI_DMCONTROL, dmcontrol);
2759 dm->current_hartid = r->current_hartid;
2760 return result;
2761 }
2762
2763 static int riscv013_halt_current_hart(struct target *target)
2764 {
2765 RISCV_INFO(r);
2766 LOG_DEBUG("halting hart %d", r->current_hartid);
2767 if (riscv_is_halted(target))
2768 LOG_ERROR("Hart %d is already halted!", r->current_hartid);
2769
2770 /* Issue the halt command, and then wait for the current hart to halt. */
2771 uint32_t dmcontrol;
2772 if (dmi_read(target, &dmcontrol, DMI_DMCONTROL) != ERROR_OK)
2773 return ERROR_FAIL;
2774 dmcontrol = set_field(dmcontrol, DMI_DMCONTROL_HALTREQ, 1);
2775 dmi_write(target, DMI_DMCONTROL, dmcontrol);
2776 for (size_t i = 0; i < 256; ++i)
2777 if (riscv_is_halted(target))
2778 break;
2779
2780 if (!riscv_is_halted(target)) {
2781 uint32_t dmstatus;
2782 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
2783 return ERROR_FAIL;
2784 if (dmi_read(target, &dmcontrol, DMI_DMCONTROL) != ERROR_OK)
2785 return ERROR_FAIL;
2786
2787 LOG_ERROR("unable to halt hart %d", r->current_hartid);
2788 LOG_ERROR(" dmcontrol=0x%08x", dmcontrol);
2789 LOG_ERROR(" dmstatus =0x%08x", dmstatus);
2790 return ERROR_FAIL;
2791 }
2792
2793 dmcontrol = set_field(dmcontrol, DMI_DMCONTROL_HALTREQ, 0);
2794 dmi_write(target, DMI_DMCONTROL, dmcontrol);
2795
2796 return ERROR_OK;
2797 }
2798
2799 static int riscv013_resume_current_hart(struct target *target)
2800 {
2801 return riscv013_step_or_resume_current_hart(target, false);
2802 }
2803
2804 static int riscv013_step_current_hart(struct target *target)
2805 {
2806 return riscv013_step_or_resume_current_hart(target, true);
2807 }
2808
2809 static int riscv013_on_resume(struct target *target)
2810 {
2811 return riscv013_on_step_or_resume(target, false);
2812 }
2813
2814 static int riscv013_on_step(struct target *target)
2815 {
2816 return riscv013_on_step_or_resume(target, true);
2817 }
2818
2819 static int riscv013_on_halt(struct target *target)
2820 {
2821 return ERROR_OK;
2822 }
2823
2824 static bool riscv013_is_halted(struct target *target)
2825 {
2826 uint32_t dmstatus;
2827 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
2828 return false;
2829 if (get_field(dmstatus, DMI_DMSTATUS_ANYUNAVAIL))
2830 LOG_ERROR("Hart %d is unavailable.", riscv_current_hartid(target));
2831 if (get_field(dmstatus, DMI_DMSTATUS_ANYNONEXISTENT))
2832 LOG_ERROR("Hart %d doesn't exist.", riscv_current_hartid(target));
2833 if (get_field(dmstatus, DMI_DMSTATUS_ANYHAVERESET)) {
2834 int hartid = riscv_current_hartid(target);
2835 LOG_INFO("Hart %d unexpectedly reset!", hartid);
2836 /* TODO: Can we make this more obvious to eg. a gdb user? */
2837 uint32_t dmcontrol = DMI_DMCONTROL_DMACTIVE |
2838 DMI_DMCONTROL_ACKHAVERESET;
2839 dmcontrol = set_hartsel(dmcontrol, hartid);
2840 /* If we had been halted when we reset, request another halt. If we
2841 * ended up running out of reset, then the user will (hopefully) get a
2842 * message that a reset happened, that the target is running, and then
2843 * that it is halted again once the request goes through.
2844 */
2845 if (target->state == TARGET_HALTED)
2846 dmcontrol |= DMI_DMCONTROL_HALTREQ;
2847 dmi_write(target, DMI_DMCONTROL, dmcontrol);
2848 }
2849 return get_field(dmstatus, DMI_DMSTATUS_ALLHALTED);
2850 }
2851
2852 static enum riscv_halt_reason riscv013_halt_reason(struct target *target)
2853 {
2854 riscv_reg_t dcsr;
2855 int result = register_read(target, &dcsr, GDB_REGNO_DCSR);
2856 if (result != ERROR_OK)
2857 return RISCV_HALT_UNKNOWN;
2858
2859 switch (get_field(dcsr, CSR_DCSR_CAUSE)) {
2860 case CSR_DCSR_CAUSE_SWBP:
2861 return RISCV_HALT_BREAKPOINT;
2862 case CSR_DCSR_CAUSE_TRIGGER:
2863 /* We could get here before triggers are enumerated if a trigger was
2864 * already set when we connected. Force enumeration now, which has the
2865 * side effect of clearing any triggers we did not set. */
2866 riscv_enumerate_triggers(target);
2867 return RISCV_HALT_TRIGGER;
2868 case CSR_DCSR_CAUSE_STEP:
2869 return RISCV_HALT_SINGLESTEP;
2870 case CSR_DCSR_CAUSE_DEBUGINT:
2871 case CSR_DCSR_CAUSE_HALT:
2872 return RISCV_HALT_INTERRUPT;
2873 }
2874
2875 LOG_ERROR("Unknown DCSR cause field: %x", (int)get_field(dcsr, CSR_DCSR_CAUSE));
2876 LOG_ERROR(" dcsr=0x%016lx", (long)dcsr);
2877 return RISCV_HALT_UNKNOWN;
2878 }
2879
2880 int riscv013_write_debug_buffer(struct target *target, unsigned index, riscv_insn_t data)
2881 {
2882 return dmi_write(target, DMI_PROGBUF0 + index, data);
2883 }
2884
2885 riscv_insn_t riscv013_read_debug_buffer(struct target *target, unsigned index)
2886 {
2887 uint32_t value;
2888 dmi_read(target, &value, DMI_PROGBUF0 + index);
2889 return value;
2890 }
2891
2892 int riscv013_execute_debug_buffer(struct target *target)
2893 {
2894 uint32_t run_program = 0;
2895 run_program = set_field(run_program, AC_ACCESS_REGISTER_SIZE, 2);
2896 run_program = set_field(run_program, AC_ACCESS_REGISTER_POSTEXEC, 1);
2897 run_program = set_field(run_program, AC_ACCESS_REGISTER_TRANSFER, 0);
2898 run_program = set_field(run_program, AC_ACCESS_REGISTER_REGNO, 0x1000);
2899
2900 return execute_abstract_command(target, run_program);
2901 }
2902
2903 void riscv013_fill_dmi_write_u64(struct target *target, char *buf, int a, uint64_t d)
2904 {
2905 RISCV013_INFO(info);
2906 buf_set_u64((unsigned char *)buf, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, DMI_OP_WRITE);
2907 buf_set_u64((unsigned char *)buf, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, d);
2908 buf_set_u64((unsigned char *)buf, DTM_DMI_ADDRESS_OFFSET, info->abits, a);
2909 }
2910
2911 void riscv013_fill_dmi_read_u64(struct target *target, char *buf, int a)
2912 {
2913 RISCV013_INFO(info);
2914 buf_set_u64((unsigned char *)buf, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, DMI_OP_READ);
2915 buf_set_u64((unsigned char *)buf, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, 0);
2916 buf_set_u64((unsigned char *)buf, DTM_DMI_ADDRESS_OFFSET, info->abits, a);
2917 }
2918
2919 void riscv013_fill_dmi_nop_u64(struct target *target, char *buf)
2920 {
2921 RISCV013_INFO(info);
2922 buf_set_u64((unsigned char *)buf, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, DMI_OP_NOP);
2923 buf_set_u64((unsigned char *)buf, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, 0);
2924 buf_set_u64((unsigned char *)buf, DTM_DMI_ADDRESS_OFFSET, info->abits, 0);
2925 }
2926
2927 int riscv013_dmi_write_u64_bits(struct target *target)
2928 {
2929 RISCV013_INFO(info);
2930 return info->abits + DTM_DMI_DATA_LENGTH + DTM_DMI_OP_LENGTH;
2931 }
2932
2933 static int maybe_execute_fence_i(struct target *target)
2934 {
2935 RISCV013_INFO(info);
2936 RISCV_INFO(r);
2937 if (info->progbufsize + r->impebreak >= 2) {
2938 struct riscv_program program;
2939 riscv_program_init(&program, target);
2940 if (riscv_program_fence_i(&program) != ERROR_OK)
2941 return ERROR_FAIL;
2942 if (riscv_program_exec(&program, target) != ERROR_OK) {
2943 LOG_ERROR("Failed to execute fence.i");
2944 return ERROR_FAIL;
2945 }
2946 }
2947 return ERROR_OK;
2948 }
2949
2950 /* Helper Functions. */
2951 static int riscv013_on_step_or_resume(struct target *target, bool step)
2952 {
2953 if (maybe_execute_fence_i(target) != ERROR_OK)
2954 return ERROR_FAIL;
2955
2956 /* We want to twiddle some bits in the debug CSR so debugging works. */
2957 riscv_reg_t dcsr;
2958 int result = register_read(target, &dcsr, GDB_REGNO_DCSR);
2959 if (result != ERROR_OK)
2960 return result;
2961 dcsr = set_field(dcsr, CSR_DCSR_STEP, step);
2962 dcsr = set_field(dcsr, CSR_DCSR_EBREAKM, 1);
2963 dcsr = set_field(dcsr, CSR_DCSR_EBREAKS, 1);
2964 dcsr = set_field(dcsr, CSR_DCSR_EBREAKU, 1);
2965 return riscv_set_register(target, GDB_REGNO_DCSR, dcsr);
2966 }
2967
2968 static int riscv013_step_or_resume_current_hart(struct target *target, bool step)
2969 {
2970 RISCV_INFO(r);
2971 LOG_DEBUG("resuming hart %d (for step?=%d)", r->current_hartid, step);
2972 if (!riscv_is_halted(target)) {
2973 LOG_ERROR("Hart %d is not halted!", r->current_hartid);
2974 return ERROR_FAIL;
2975 }
2976
2977 if (maybe_execute_fence_i(target) != ERROR_OK)
2978 return ERROR_FAIL;
2979
2980 /* Issue the resume command, and then wait for the current hart to resume. */
2981 uint32_t dmcontrol = DMI_DMCONTROL_DMACTIVE;
2982 dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
2983 dmi_write(target, DMI_DMCONTROL, dmcontrol | DMI_DMCONTROL_RESUMEREQ);
2984
2985 uint32_t dmstatus;
2986 for (size_t i = 0; i < 256; ++i) {
2987 usleep(10);
2988 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
2989 return ERROR_FAIL;
2990 if (get_field(dmstatus, DMI_DMSTATUS_ALLRESUMEACK) == 0)
2991 continue;
2992 if (step && get_field(dmstatus, DMI_DMSTATUS_ALLHALTED) == 0)
2993 continue;
2994
2995 dmi_write(target, DMI_DMCONTROL, dmcontrol);
2996 return ERROR_OK;
2997 }
2998
2999 LOG_ERROR("unable to resume hart %d", r->current_hartid);
3000 if (dmi_read(target, &dmcontrol, DMI_DMCONTROL) != ERROR_OK)
3001 return ERROR_FAIL;
3002 LOG_ERROR(" dmcontrol=0x%08x", dmcontrol);
3003 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
3004 return ERROR_FAIL;
3005 LOG_ERROR(" dmstatus =0x%08x", dmstatus);
3006
3007 if (step) {
3008 LOG_ERROR(" was stepping, halting");
3009 riscv013_halt_current_hart(target);
3010 return ERROR_OK;
3011 }
3012
3013 return ERROR_FAIL;
3014 }
3015
3016 void riscv013_clear_abstract_error(struct target *target)
3017 {
3018 /* Wait for busy to go away. */
3019 time_t start = time(NULL);
3020 uint32_t abstractcs;
3021 dmi_read(target, &abstractcs, DMI_ABSTRACTCS);
3022 while (get_field(abstractcs, DMI_ABSTRACTCS_BUSY)) {
3023 dmi_read(target, &abstractcs, DMI_ABSTRACTCS);
3024
3025 if (time(NULL) - start > riscv_command_timeout_sec) {
3026 LOG_ERROR("abstractcs.busy is not going low after %d seconds "
3027 "(abstractcs=0x%x). The target is either really slow or "
3028 "broken. You could increase the timeout with riscv "
3029 "set_command_timeout_sec.",
3030 riscv_command_timeout_sec, abstractcs);
3031 break;
3032 }
3033 }
3034 /* Clear the error status. */
3035 dmi_write(target, DMI_ABSTRACTCS, abstractcs & DMI_ABSTRACTCS_CMDERR);
3036 }