Lots of RISC-V improvements.
[openocd.git] / src / target / riscv / riscv-013.c
1 /*
2 * Support for RISC-V, debug version 0.13, which is currently (2/4/17) the
3 * latest draft.
4 */
5
6 #include <assert.h>
7 #include <stdlib.h>
8 #include <time.h>
9
10 #ifdef HAVE_CONFIG_H
11 #include "config.h"
12 #endif
13
14 #include "target/target.h"
15 #include "target/algorithm.h"
16 #include "target/target_type.h"
17 #include "log.h"
18 #include "jtag/jtag.h"
19 #include "target/register.h"
20 #include "target/breakpoints.h"
21 #include "helper/time_support.h"
22 #include "helper/list.h"
23 #include "riscv.h"
24 #include "debug_defines.h"
25 #include "rtos/rtos.h"
26 #include "program.h"
27 #include "asm.h"
28 #include "batch.h"
29
30 #define DMI_DATA1 (DMI_DATA0 + 1)
31 #define DMI_PROGBUF1 (DMI_PROGBUF0 + 1)
32
33 static int riscv013_on_step_or_resume(struct target *target, bool step);
34 static int riscv013_step_or_resume_current_hart(struct target *target, bool step);
35 static void riscv013_clear_abstract_error(struct target *target);
36
37 /* Implementations of the functions in riscv_info_t. */
38 static int riscv013_get_register(struct target *target,
39 riscv_reg_t *value, int hid, int rid);
40 static int riscv013_set_register(struct target *target, int hartid, int regid, uint64_t value);
41 static int riscv013_select_current_hart(struct target *target);
42 static int riscv013_halt_current_hart(struct target *target);
43 static int riscv013_resume_current_hart(struct target *target);
44 static int riscv013_step_current_hart(struct target *target);
45 static int riscv013_on_halt(struct target *target);
46 static int riscv013_on_step(struct target *target);
47 static int riscv013_on_resume(struct target *target);
48 static bool riscv013_is_halted(struct target *target);
49 static enum riscv_halt_reason riscv013_halt_reason(struct target *target);
50 static int riscv013_write_debug_buffer(struct target *target, unsigned index,
51 riscv_insn_t d);
52 static riscv_insn_t riscv013_read_debug_buffer(struct target *target, unsigned
53 index);
54 static int riscv013_execute_debug_buffer(struct target *target);
55 static void riscv013_fill_dmi_write_u64(struct target *target, char *buf, int a, uint64_t d);
56 static void riscv013_fill_dmi_read_u64(struct target *target, char *buf, int a);
57 static int riscv013_dmi_write_u64_bits(struct target *target);
58 static void riscv013_fill_dmi_nop_u64(struct target *target, char *buf);
59 static int register_read(struct target *target, uint64_t *value, uint32_t number);
60 static int register_read_direct(struct target *target, uint64_t *value, uint32_t number);
61 static int register_write_direct(struct target *target, unsigned number,
62 uint64_t value);
63 static int read_memory(struct target *target, target_addr_t address,
64 uint32_t size, uint32_t count, uint8_t *buffer);
65 static int write_memory(struct target *target, target_addr_t address,
66 uint32_t size, uint32_t count, const uint8_t *buffer);
67 static int riscv013_test_sba_config_reg(struct target *target, target_addr_t legal_address,
68 uint32_t num_words, target_addr_t illegal_address, bool run_sbbusyerror_test);
69 void write_memory_sba_simple(struct target *target, target_addr_t addr, uint32_t* write_data,
70 uint32_t write_size, uint32_t sbcs);
71 void read_memory_sba_simple(struct target *target, target_addr_t addr,
72 uint32_t *rd_buf, uint32_t read_size, uint32_t sbcs);
73 static int riscv013_test_compliance(struct target *target);
74
75 /**
76 * Since almost everything can be accomplish by scanning the dbus register, all
77 * functions here assume dbus is already selected. The exception are functions
78 * called directly by OpenOCD, which can't assume anything about what's
79 * currently in IR. They should set IR to dbus explicitly.
80 */
81
82 #define get_field(reg, mask) (((reg) & (mask)) / ((mask) & ~((mask) << 1)))
83 #define set_field(reg, mask, val) (((reg) & ~(mask)) | (((val) * ((mask) & ~((mask) << 1))) & (mask)))
84
85 #define DIM(x) (sizeof(x)/sizeof(*x))
86
87 #define CSR_DCSR_CAUSE_SWBP 1
88 #define CSR_DCSR_CAUSE_TRIGGER 2
89 #define CSR_DCSR_CAUSE_DEBUGINT 3
90 #define CSR_DCSR_CAUSE_STEP 4
91 #define CSR_DCSR_CAUSE_HALT 5
92
93 #define RISCV013_INFO(r) riscv013_info_t *r = get_info(target)
94
95 /*** JTAG registers. ***/
96
97 typedef enum {
98 DMI_OP_NOP = 0,
99 DMI_OP_READ = 1,
100 DMI_OP_WRITE = 2
101 } dmi_op_t;
102 typedef enum {
103 DMI_STATUS_SUCCESS = 0,
104 DMI_STATUS_FAILED = 2,
105 DMI_STATUS_BUSY = 3
106 } dmi_status_t;
107
108 typedef enum {
109 RE_OK,
110 RE_FAIL,
111 RE_AGAIN
112 } riscv_error_t;
113
114 typedef enum slot {
115 SLOT0,
116 SLOT1,
117 SLOT_LAST,
118 } slot_t;
119
120 /*** Debug Bus registers. ***/
121
122 #define CMDERR_NONE 0
123 #define CMDERR_BUSY 1
124 #define CMDERR_NOT_SUPPORTED 2
125 #define CMDERR_EXCEPTION 3
126 #define CMDERR_HALT_RESUME 4
127 #define CMDERR_OTHER 7
128
129 /*** Info about the core being debugged. ***/
130
131 struct trigger {
132 uint64_t address;
133 uint32_t length;
134 uint64_t mask;
135 uint64_t value;
136 bool read, write, execute;
137 int unique_id;
138 };
139
140 typedef enum {
141 YNM_MAYBE,
142 YNM_YES,
143 YNM_NO
144 } yes_no_maybe_t;
145
146 typedef struct {
147 struct list_head list;
148 int abs_chain_position;
149 /* Indicates we already reset this DM, so don't need to do it again. */
150 bool was_reset;
151 /* Targets that are connected to this DM. */
152 struct list_head target_list;
153 /* The currently selected hartid on this DM. */
154 int current_hartid;
155 } dm013_info_t;
156
157 typedef struct {
158 struct list_head list;
159 struct target *target;
160 } target_list_t;
161
162 typedef struct {
163 /* Number of address bits in the dbus register. */
164 unsigned abits;
165 /* Number of abstract command data registers. */
166 unsigned datacount;
167 /* Number of words in the Program Buffer. */
168 unsigned progbufsize;
169
170 /* We cache the read-only bits of sbcs here. */
171 uint32_t sbcs;
172
173 yes_no_maybe_t progbuf_writable;
174 /* We only need the address so that we know the alignment of the buffer. */
175 riscv_addr_t progbuf_address;
176
177 /* Number of run-test/idle cycles the target requests we do after each dbus
178 * access. */
179 unsigned int dtmcs_idle;
180
181 /* This value is incremented every time a dbus access comes back as "busy".
182 * It's used to determine how many run-test/idle cycles to feed the target
183 * in between accesses. */
184 unsigned int dmi_busy_delay;
185
186 /* Number of run-test/idle cycles to add between consecutive bus master
187 * reads/writes respectively. */
188 unsigned int bus_master_write_delay, bus_master_read_delay;
189
190 /* This value is increased every time we tried to execute two commands
191 * consecutively, and the second one failed because the previous hadn't
192 * completed yet. It's used to add extra run-test/idle cycles after
193 * starting a command, so we don't have to waste time checking for busy to
194 * go low. */
195 unsigned int ac_busy_delay;
196
197 bool abstract_read_csr_supported;
198 bool abstract_write_csr_supported;
199 bool abstract_read_fpr_supported;
200 bool abstract_write_fpr_supported;
201
202 /* When a function returns some error due to a failure indicated by the
203 * target in cmderr, the caller can look here to see what that error was.
204 * (Compare with errno.) */
205 uint8_t cmderr;
206
207 /* Some fields from hartinfo. */
208 uint8_t datasize;
209 uint8_t dataaccess;
210 int16_t dataaddr;
211
212 /* The width of the hartsel field. */
213 unsigned hartsellen;
214
215 /* DM that provides access to this target. */
216 dm013_info_t *dm;
217 } riscv013_info_t;
218
219 LIST_HEAD(dm_list);
220
221 static riscv013_info_t *get_info(const struct target *target)
222 {
223 riscv_info_t *info = (riscv_info_t *) target->arch_info;
224 return (riscv013_info_t *) info->version_specific;
225 }
226
227 /**
228 * Return the DM structure for this target. If there isn't one, find it in the
229 * global list of DMs. If it's not in there, then create one and initialize it
230 * to 0.
231 */
232 static dm013_info_t *get_dm(struct target *target)
233 {
234 RISCV013_INFO(info);
235 if (info->dm)
236 return info->dm;
237
238 int abs_chain_position = target->tap->abs_chain_position;
239
240 dm013_info_t *entry;
241 dm013_info_t *dm = NULL;
242 list_for_each_entry(entry, &dm_list, list) {
243 if (entry->abs_chain_position == abs_chain_position) {
244 dm = entry;
245 break;
246 }
247 }
248
249 if (!dm) {
250 dm = calloc(1, sizeof(dm013_info_t));
251 dm->abs_chain_position = abs_chain_position;
252 dm->current_hartid = -1;
253 INIT_LIST_HEAD(&dm->target_list);
254 list_add(&dm->list, &dm_list);
255 }
256
257 info->dm = dm;
258 target_list_t *target_entry;
259 list_for_each_entry(target_entry, &dm->target_list, list) {
260 if (target_entry->target == target)
261 return dm;
262 }
263 target_entry = calloc(1, sizeof(*target_entry));
264 target_entry->target = target;
265 list_add(&target_entry->list, &dm->target_list);
266
267 return dm;
268 }
269
270 static uint32_t set_hartsel(uint32_t initial, uint32_t index)
271 {
272 initial &= ~DMI_DMCONTROL_HARTSELLO;
273 initial &= ~DMI_DMCONTROL_HARTSELHI;
274
275 uint32_t index_lo = index & ((1 << DMI_DMCONTROL_HARTSELLO_LENGTH) - 1);
276 initial |= index_lo << DMI_DMCONTROL_HARTSELLO_OFFSET;
277 uint32_t index_hi = index >> DMI_DMCONTROL_HARTSELLO_LENGTH;
278 assert(index_hi < 1 << DMI_DMCONTROL_HARTSELHI_LENGTH);
279 initial |= index_hi << DMI_DMCONTROL_HARTSELHI_OFFSET;
280
281 return initial;
282 }
283
284 static void decode_dmi(char *text, unsigned address, unsigned data)
285 {
286 static const struct {
287 unsigned address;
288 uint64_t mask;
289 const char *name;
290 } description[] = {
291 { DMI_DMCONTROL, DMI_DMCONTROL_HALTREQ, "haltreq" },
292 { DMI_DMCONTROL, DMI_DMCONTROL_RESUMEREQ, "resumereq" },
293 { DMI_DMCONTROL, DMI_DMCONTROL_HARTRESET, "hartreset" },
294 { DMI_DMCONTROL, DMI_DMCONTROL_HASEL, "hasel" },
295 { DMI_DMCONTROL, DMI_DMCONTROL_HARTSELHI, "hartselhi" },
296 { DMI_DMCONTROL, DMI_DMCONTROL_HARTSELLO, "hartsello" },
297 { DMI_DMCONTROL, DMI_DMCONTROL_NDMRESET, "ndmreset" },
298 { DMI_DMCONTROL, DMI_DMCONTROL_DMACTIVE, "dmactive" },
299 { DMI_DMCONTROL, DMI_DMCONTROL_ACKHAVERESET, "ackhavereset" },
300
301 { DMI_DMSTATUS, DMI_DMSTATUS_IMPEBREAK, "impebreak" },
302 { DMI_DMSTATUS, DMI_DMSTATUS_ALLHAVERESET, "allhavereset" },
303 { DMI_DMSTATUS, DMI_DMSTATUS_ANYHAVERESET, "anyhavereset" },
304 { DMI_DMSTATUS, DMI_DMSTATUS_ALLRESUMEACK, "allresumeack" },
305 { DMI_DMSTATUS, DMI_DMSTATUS_ANYRESUMEACK, "anyresumeack" },
306 { DMI_DMSTATUS, DMI_DMSTATUS_ALLNONEXISTENT, "allnonexistent" },
307 { DMI_DMSTATUS, DMI_DMSTATUS_ANYNONEXISTENT, "anynonexistent" },
308 { DMI_DMSTATUS, DMI_DMSTATUS_ALLUNAVAIL, "allunavail" },
309 { DMI_DMSTATUS, DMI_DMSTATUS_ANYUNAVAIL, "anyunavail" },
310 { DMI_DMSTATUS, DMI_DMSTATUS_ALLRUNNING, "allrunning" },
311 { DMI_DMSTATUS, DMI_DMSTATUS_ANYRUNNING, "anyrunning" },
312 { DMI_DMSTATUS, DMI_DMSTATUS_ALLHALTED, "allhalted" },
313 { DMI_DMSTATUS, DMI_DMSTATUS_ANYHALTED, "anyhalted" },
314 { DMI_DMSTATUS, DMI_DMSTATUS_AUTHENTICATED, "authenticated" },
315 { DMI_DMSTATUS, DMI_DMSTATUS_AUTHBUSY, "authbusy" },
316 { DMI_DMSTATUS, DMI_DMSTATUS_DEVTREEVALID, "devtreevalid" },
317 { DMI_DMSTATUS, DMI_DMSTATUS_VERSION, "version" },
318
319 { DMI_ABSTRACTCS, DMI_ABSTRACTCS_PROGBUFSIZE, "progbufsize" },
320 { DMI_ABSTRACTCS, DMI_ABSTRACTCS_BUSY, "busy" },
321 { DMI_ABSTRACTCS, DMI_ABSTRACTCS_CMDERR, "cmderr" },
322 { DMI_ABSTRACTCS, DMI_ABSTRACTCS_DATACOUNT, "datacount" },
323
324 { DMI_COMMAND, DMI_COMMAND_CMDTYPE, "cmdtype" },
325
326 { DMI_SBCS, DMI_SBCS_SBREADONADDR, "sbreadonaddr" },
327 { DMI_SBCS, DMI_SBCS_SBACCESS, "sbaccess" },
328 { DMI_SBCS, DMI_SBCS_SBAUTOINCREMENT, "sbautoincrement" },
329 { DMI_SBCS, DMI_SBCS_SBREADONDATA, "sbreadondata" },
330 { DMI_SBCS, DMI_SBCS_SBERROR, "sberror" },
331 { DMI_SBCS, DMI_SBCS_SBASIZE, "sbasize" },
332 { DMI_SBCS, DMI_SBCS_SBACCESS128, "sbaccess128" },
333 { DMI_SBCS, DMI_SBCS_SBACCESS64, "sbaccess64" },
334 { DMI_SBCS, DMI_SBCS_SBACCESS32, "sbaccess32" },
335 { DMI_SBCS, DMI_SBCS_SBACCESS16, "sbaccess16" },
336 { DMI_SBCS, DMI_SBCS_SBACCESS8, "sbaccess8" },
337 };
338
339 text[0] = 0;
340 for (unsigned i = 0; i < DIM(description); i++) {
341 if (description[i].address == address) {
342 uint64_t mask = description[i].mask;
343 unsigned value = get_field(data, mask);
344 if (value) {
345 if (i > 0)
346 *(text++) = ' ';
347 if (mask & (mask >> 1)) {
348 /* If the field is more than 1 bit wide. */
349 sprintf(text, "%s=%d", description[i].name, value);
350 } else {
351 strcpy(text, description[i].name);
352 }
353 text += strlen(text);
354 }
355 }
356 }
357 }
358
359 static void dump_field(int idle, const struct scan_field *field)
360 {
361 static const char * const op_string[] = {"-", "r", "w", "?"};
362 static const char * const status_string[] = {"+", "?", "F", "b"};
363
364 if (debug_level < LOG_LVL_DEBUG)
365 return;
366
367 uint64_t out = buf_get_u64(field->out_value, 0, field->num_bits);
368 unsigned int out_op = get_field(out, DTM_DMI_OP);
369 unsigned int out_data = get_field(out, DTM_DMI_DATA);
370 unsigned int out_address = out >> DTM_DMI_ADDRESS_OFFSET;
371
372 uint64_t in = buf_get_u64(field->in_value, 0, field->num_bits);
373 unsigned int in_op = get_field(in, DTM_DMI_OP);
374 unsigned int in_data = get_field(in, DTM_DMI_DATA);
375 unsigned int in_address = in >> DTM_DMI_ADDRESS_OFFSET;
376
377 log_printf_lf(LOG_LVL_DEBUG,
378 __FILE__, __LINE__, "scan",
379 "%db %di %s %08x @%02x -> %s %08x @%02x",
380 field->num_bits, idle,
381 op_string[out_op], out_data, out_address,
382 status_string[in_op], in_data, in_address);
383
384 char out_text[500];
385 char in_text[500];
386 decode_dmi(out_text, out_address, out_data);
387 decode_dmi(in_text, in_address, in_data);
388 if (in_text[0] || out_text[0]) {
389 log_printf_lf(LOG_LVL_DEBUG, __FILE__, __LINE__, "scan", "%s -> %s",
390 out_text, in_text);
391 }
392 }
393
394 /*** Utility functions. ***/
395
396 static void select_dmi(struct target *target)
397 {
398 jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
399 }
400
401 static uint32_t dtmcontrol_scan(struct target *target, uint32_t out)
402 {
403 struct scan_field field;
404 uint8_t in_value[4];
405 uint8_t out_value[4];
406
407 buf_set_u32(out_value, 0, 32, out);
408
409 jtag_add_ir_scan(target->tap, &select_dtmcontrol, TAP_IDLE);
410
411 field.num_bits = 32;
412 field.out_value = out_value;
413 field.in_value = in_value;
414 jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
415
416 /* Always return to dmi. */
417 select_dmi(target);
418
419 int retval = jtag_execute_queue();
420 if (retval != ERROR_OK) {
421 LOG_ERROR("failed jtag scan: %d", retval);
422 return retval;
423 }
424
425 uint32_t in = buf_get_u32(field.in_value, 0, 32);
426 LOG_DEBUG("DTMCS: 0x%x -> 0x%x", out, in);
427
428 return in;
429 }
430
431 static void increase_dmi_busy_delay(struct target *target)
432 {
433 riscv013_info_t *info = get_info(target);
434 info->dmi_busy_delay += info->dmi_busy_delay / 10 + 1;
435 LOG_DEBUG("dtmcs_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
436 info->dtmcs_idle, info->dmi_busy_delay,
437 info->ac_busy_delay);
438
439 dtmcontrol_scan(target, DTM_DTMCS_DMIRESET);
440 }
441
442 /**
443 * exec: If this is set, assume the scan results in an execution, so more
444 * run-test/idle cycles may be required.
445 */
446 static dmi_status_t dmi_scan(struct target *target, uint32_t *address_in,
447 uint32_t *data_in, dmi_op_t op, uint32_t address_out, uint32_t data_out,
448 bool exec)
449 {
450 riscv013_info_t *info = get_info(target);
451 RISCV_INFO(r);
452 unsigned num_bits = info->abits + DTM_DMI_OP_LENGTH + DTM_DMI_DATA_LENGTH;
453 size_t num_bytes = (num_bits + 7) / 8;
454 uint8_t in[num_bytes];
455 uint8_t out[num_bytes];
456 struct scan_field field = {
457 .num_bits = num_bits,
458 .out_value = out,
459 .in_value = in
460 };
461
462 if (r->reset_delays_wait >= 0) {
463 r->reset_delays_wait--;
464 if (r->reset_delays_wait < 0) {
465 info->dmi_busy_delay = 0;
466 info->ac_busy_delay = 0;
467 }
468 }
469
470 memset(in, 0, num_bytes);
471
472 assert(info->abits != 0);
473
474 buf_set_u32(out, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, op);
475 buf_set_u32(out, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, data_out);
476 buf_set_u32(out, DTM_DMI_ADDRESS_OFFSET, info->abits, address_out);
477
478 /* Assume dbus is already selected. */
479 jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
480
481 int idle_count = info->dmi_busy_delay;
482 if (exec)
483 idle_count += info->ac_busy_delay;
484
485 if (idle_count)
486 jtag_add_runtest(idle_count, TAP_IDLE);
487
488 int retval = jtag_execute_queue();
489 if (retval != ERROR_OK) {
490 LOG_ERROR("dmi_scan failed jtag scan");
491 return DMI_STATUS_FAILED;
492 }
493
494 if (data_in)
495 *data_in = buf_get_u32(in, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH);
496
497 if (address_in)
498 *address_in = buf_get_u32(in, DTM_DMI_ADDRESS_OFFSET, info->abits);
499
500 dump_field(idle_count, &field);
501
502 return buf_get_u32(in, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH);
503 }
504
505 /* If dmi_busy_encountered is non-NULL, this function will use it to tell the
506 * caller whether DMI was ever busy during this call. */
507 static int dmi_op_timeout(struct target *target, uint32_t *data_in,
508 bool *dmi_busy_encountered, int dmi_op, uint32_t address,
509 uint32_t data_out, int timeout_sec, bool exec)
510 {
511 select_dmi(target);
512
513 dmi_status_t status;
514 uint32_t address_in;
515
516 if (dmi_busy_encountered)
517 *dmi_busy_encountered = false;
518
519 const char *op_name;
520 switch (dmi_op) {
521 case DMI_OP_NOP:
522 op_name = "nop";
523 break;
524 case DMI_OP_READ:
525 op_name = "read";
526 break;
527 case DMI_OP_WRITE:
528 op_name = "write";
529 break;
530 default:
531 LOG_ERROR("Invalid DMI operation: %d", dmi_op);
532 return ERROR_FAIL;
533 }
534
535 time_t start = time(NULL);
536 /* This first loop performs the request. Note that if for some reason this
537 * stays busy, it is actually due to the previous access. */
538 while (1) {
539 status = dmi_scan(target, NULL, NULL, dmi_op, address, data_out,
540 exec);
541 if (status == DMI_STATUS_BUSY) {
542 increase_dmi_busy_delay(target);
543 if (dmi_busy_encountered)
544 *dmi_busy_encountered = true;
545 } else if (status == DMI_STATUS_SUCCESS) {
546 break;
547 } else {
548 LOG_ERROR("failed %s at 0x%x, status=%d", op_name, address, status);
549 return ERROR_FAIL;
550 }
551 if (time(NULL) - start > timeout_sec)
552 return ERROR_TIMEOUT_REACHED;
553 }
554
555 if (status != DMI_STATUS_SUCCESS) {
556 LOG_ERROR("Failed %s at 0x%x; status=%d", op_name, address, status);
557 return ERROR_FAIL;
558 }
559
560 /* This second loop ensures the request succeeded, and gets back data.
561 * Note that NOP can result in a 'busy' result as well, but that would be
562 * noticed on the next DMI access we do. */
563 while (1) {
564 status = dmi_scan(target, &address_in, data_in, DMI_OP_NOP, address, 0,
565 false);
566 if (status == DMI_STATUS_BUSY) {
567 increase_dmi_busy_delay(target);
568 } else if (status == DMI_STATUS_SUCCESS) {
569 break;
570 } else {
571 LOG_ERROR("failed %s (NOP) at 0x%x, status=%d", op_name, address,
572 status);
573 return ERROR_FAIL;
574 }
575 if (time(NULL) - start > timeout_sec)
576 return ERROR_TIMEOUT_REACHED;
577 }
578
579 if (status != DMI_STATUS_SUCCESS) {
580 if (status == DMI_STATUS_FAILED || !data_in) {
581 LOG_ERROR("Failed %s (NOP) at 0x%x; status=%d", op_name, address,
582 status);
583 } else {
584 LOG_ERROR("Failed %s (NOP) at 0x%x; value=0x%x, status=%d",
585 op_name, address, *data_in, status);
586 }
587 return ERROR_FAIL;
588 }
589
590 return ERROR_OK;
591 }
592
593 static int dmi_op(struct target *target, uint32_t *data_in,
594 bool *dmi_busy_encountered, int dmi_op, uint32_t address,
595 uint32_t data_out, bool exec)
596 {
597 int result = dmi_op_timeout(target, data_in, dmi_busy_encountered, dmi_op,
598 address, data_out, riscv_command_timeout_sec, exec);
599 if (result == ERROR_TIMEOUT_REACHED) {
600 LOG_ERROR("DMI operation didn't complete in %d seconds. The target is "
601 "either really slow or broken. You could increase the "
602 "timeout with riscv set_command_timeout_sec.",
603 riscv_command_timeout_sec);
604 return ERROR_FAIL;
605 }
606 return result;
607 }
608
609 static int dmi_read(struct target *target, uint32_t *value, uint32_t address)
610 {
611 return dmi_op(target, value, NULL, DMI_OP_READ, address, 0, false);
612 }
613
614 static int dmi_read_exec(struct target *target, uint32_t *value, uint32_t address)
615 {
616 return dmi_op(target, value, NULL, DMI_OP_READ, address, 0, true);
617 }
618
619 static int dmi_write(struct target *target, uint32_t address, uint32_t value)
620 {
621 return dmi_op(target, NULL, NULL, DMI_OP_WRITE, address, value, false);
622 }
623
624 static int dmi_write_exec(struct target *target, uint32_t address, uint32_t value)
625 {
626 return dmi_op(target, NULL, NULL, DMI_OP_WRITE, address, value, true);
627 }
628
629 int dmstatus_read_timeout(struct target *target, uint32_t *dmstatus,
630 bool authenticated, unsigned timeout_sec)
631 {
632 int result = dmi_op_timeout(target, dmstatus, NULL, DMI_OP_READ,
633 DMI_DMSTATUS, 0, timeout_sec, false);
634 if (result != ERROR_OK)
635 return result;
636 if (authenticated && !get_field(*dmstatus, DMI_DMSTATUS_AUTHENTICATED)) {
637 LOG_ERROR("Debugger is not authenticated to target Debug Module. "
638 "(dmstatus=0x%x). Use `riscv authdata_read` and "
639 "`riscv authdata_write` commands to authenticate.", *dmstatus);
640 return ERROR_FAIL;
641 }
642 return ERROR_OK;
643 }
644
645 int dmstatus_read(struct target *target, uint32_t *dmstatus,
646 bool authenticated)
647 {
648 return dmstatus_read_timeout(target, dmstatus, authenticated,
649 riscv_command_timeout_sec);
650 }
651
652 static void increase_ac_busy_delay(struct target *target)
653 {
654 riscv013_info_t *info = get_info(target);
655 info->ac_busy_delay += info->ac_busy_delay / 10 + 1;
656 LOG_DEBUG("dtmcs_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
657 info->dtmcs_idle, info->dmi_busy_delay,
658 info->ac_busy_delay);
659 }
660
661 uint32_t abstract_register_size(unsigned width)
662 {
663 switch (width) {
664 case 32:
665 return set_field(0, AC_ACCESS_REGISTER_SIZE, 2);
666 case 64:
667 return set_field(0, AC_ACCESS_REGISTER_SIZE, 3);
668 break;
669 case 128:
670 return set_field(0, AC_ACCESS_REGISTER_SIZE, 4);
671 break;
672 default:
673 LOG_ERROR("Unsupported register width: %d", width);
674 return 0;
675 }
676 }
677
678 static int wait_for_idle(struct target *target, uint32_t *abstractcs)
679 {
680 RISCV013_INFO(info);
681 time_t start = time(NULL);
682 while (1) {
683 if (dmi_read(target, abstractcs, DMI_ABSTRACTCS) != ERROR_OK)
684 return ERROR_FAIL;
685
686 if (get_field(*abstractcs, DMI_ABSTRACTCS_BUSY) == 0)
687 return ERROR_OK;
688
689 if (time(NULL) - start > riscv_command_timeout_sec) {
690 info->cmderr = get_field(*abstractcs, DMI_ABSTRACTCS_CMDERR);
691 if (info->cmderr != CMDERR_NONE) {
692 const char *errors[8] = {
693 "none",
694 "busy",
695 "not supported",
696 "exception",
697 "halt/resume",
698 "reserved",
699 "reserved",
700 "other" };
701
702 LOG_ERROR("Abstract command ended in error '%s' (abstractcs=0x%x)",
703 errors[info->cmderr], *abstractcs);
704 }
705
706 LOG_ERROR("Timed out after %ds waiting for busy to go low (abstractcs=0x%x). "
707 "Increase the timeout with riscv set_command_timeout_sec.",
708 riscv_command_timeout_sec,
709 *abstractcs);
710 return ERROR_FAIL;
711 }
712 }
713 }
714
715 static int execute_abstract_command(struct target *target, uint32_t command)
716 {
717 RISCV013_INFO(info);
718 if (debug_level >= LOG_LVL_DEBUG) {
719 switch (get_field(command, DMI_COMMAND_CMDTYPE)) {
720 case 0:
721 LOG_DEBUG("command=0x%x; access register, size=%d, postexec=%d, "
722 "transfer=%d, write=%d, regno=0x%x",
723 command,
724 8 << get_field(command, AC_ACCESS_REGISTER_SIZE),
725 get_field(command, AC_ACCESS_REGISTER_POSTEXEC),
726 get_field(command, AC_ACCESS_REGISTER_TRANSFER),
727 get_field(command, AC_ACCESS_REGISTER_WRITE),
728 get_field(command, AC_ACCESS_REGISTER_REGNO));
729 break;
730 default:
731 LOG_DEBUG("command=0x%x", command);
732 break;
733 }
734 }
735
736 dmi_write_exec(target, DMI_COMMAND, command);
737
738 uint32_t abstractcs = 0;
739 wait_for_idle(target, &abstractcs);
740
741 info->cmderr = get_field(abstractcs, DMI_ABSTRACTCS_CMDERR);
742 if (info->cmderr != 0) {
743 LOG_DEBUG("command 0x%x failed; abstractcs=0x%x", command, abstractcs);
744 /* Clear the error. */
745 dmi_write(target, DMI_ABSTRACTCS, set_field(0, DMI_ABSTRACTCS_CMDERR,
746 info->cmderr));
747 return ERROR_FAIL;
748 }
749
750 return ERROR_OK;
751 }
752
753 static riscv_reg_t read_abstract_arg(struct target *target, unsigned index,
754 unsigned size_bits)
755 {
756 riscv_reg_t value = 0;
757 uint32_t v;
758 unsigned offset = index * size_bits / 32;
759 switch (size_bits) {
760 default:
761 LOG_ERROR("Unsupported size: %d", size_bits);
762 return ~0;
763 case 64:
764 dmi_read(target, &v, DMI_DATA0 + offset + 1);
765 value |= ((uint64_t) v) << 32;
766 /* falls through */
767 case 32:
768 dmi_read(target, &v, DMI_DATA0 + offset);
769 value |= v;
770 }
771 return value;
772 }
773
774 static int write_abstract_arg(struct target *target, unsigned index,
775 riscv_reg_t value, unsigned size_bits)
776 {
777 unsigned offset = index * size_bits / 32;
778 switch (size_bits) {
779 default:
780 LOG_ERROR("Unsupported size: %d", size_bits);
781 return ERROR_FAIL;
782 case 64:
783 dmi_write(target, DMI_DATA0 + offset + 1, value >> 32);
784 /* falls through */
785 case 32:
786 dmi_write(target, DMI_DATA0 + offset, value);
787 }
788 return ERROR_OK;
789 }
790
791 /**
792 * @par size in bits
793 */
794 static uint32_t access_register_command(struct target *target, uint32_t number,
795 unsigned size, uint32_t flags)
796 {
797 uint32_t command = set_field(0, DMI_COMMAND_CMDTYPE, 0);
798 switch (size) {
799 case 32:
800 command = set_field(command, AC_ACCESS_REGISTER_SIZE, 2);
801 break;
802 case 64:
803 command = set_field(command, AC_ACCESS_REGISTER_SIZE, 3);
804 break;
805 default:
806 assert(0);
807 }
808
809 if (number <= GDB_REGNO_XPR31) {
810 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
811 0x1000 + number - GDB_REGNO_ZERO);
812 } else if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
813 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
814 0x1020 + number - GDB_REGNO_FPR0);
815 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
816 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
817 number - GDB_REGNO_CSR0);
818 } else if (number >= GDB_REGNO_COUNT) {
819 /* Custom register. */
820 assert(target->reg_cache->reg_list[number].arch_info);
821 riscv_reg_info_t *reg_info = target->reg_cache->reg_list[number].arch_info;
822 assert(reg_info);
823 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
824 0xc000 + reg_info->custom_number);
825 }
826
827 command |= flags;
828
829 return command;
830 }
831
832 static int register_read_abstract(struct target *target, uint64_t *value,
833 uint32_t number, unsigned size)
834 {
835 RISCV013_INFO(info);
836
837 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
838 !info->abstract_read_fpr_supported)
839 return ERROR_FAIL;
840 if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095 &&
841 !info->abstract_read_csr_supported)
842 return ERROR_FAIL;
843
844 uint32_t command = access_register_command(target, number, size,
845 AC_ACCESS_REGISTER_TRANSFER);
846
847 int result = execute_abstract_command(target, command);
848 if (result != ERROR_OK) {
849 if (info->cmderr == CMDERR_NOT_SUPPORTED) {
850 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
851 info->abstract_read_fpr_supported = false;
852 LOG_INFO("Disabling abstract command reads from FPRs.");
853 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
854 info->abstract_read_csr_supported = false;
855 LOG_INFO("Disabling abstract command reads from CSRs.");
856 }
857 }
858 return result;
859 }
860
861 if (value)
862 *value = read_abstract_arg(target, 0, size);
863
864 return ERROR_OK;
865 }
866
867 static int register_write_abstract(struct target *target, uint32_t number,
868 uint64_t value, unsigned size)
869 {
870 RISCV013_INFO(info);
871
872 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
873 !info->abstract_write_fpr_supported)
874 return ERROR_FAIL;
875 if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095 &&
876 !info->abstract_write_csr_supported)
877 return ERROR_FAIL;
878
879 uint32_t command = access_register_command(target, number, size,
880 AC_ACCESS_REGISTER_TRANSFER |
881 AC_ACCESS_REGISTER_WRITE);
882
883 if (write_abstract_arg(target, 0, value, size) != ERROR_OK)
884 return ERROR_FAIL;
885
886 int result = execute_abstract_command(target, command);
887 if (result != ERROR_OK) {
888 if (info->cmderr == CMDERR_NOT_SUPPORTED) {
889 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
890 info->abstract_write_fpr_supported = false;
891 LOG_INFO("Disabling abstract command writes to FPRs.");
892 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
893 info->abstract_write_csr_supported = false;
894 LOG_INFO("Disabling abstract command writes to CSRs.");
895 }
896 }
897 return result;
898 }
899
900 return ERROR_OK;
901 }
902
903 static int examine_progbuf(struct target *target)
904 {
905 riscv013_info_t *info = get_info(target);
906
907 if (info->progbuf_writable != YNM_MAYBE)
908 return ERROR_OK;
909
910 /* Figure out if progbuf is writable. */
911
912 if (info->progbufsize < 1) {
913 info->progbuf_writable = YNM_NO;
914 LOG_INFO("No program buffer present.");
915 return ERROR_OK;
916 }
917
918 uint64_t s0;
919 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
920 return ERROR_FAIL;
921
922 struct riscv_program program;
923 riscv_program_init(&program, target);
924 riscv_program_insert(&program, auipc(S0));
925 if (riscv_program_exec(&program, target) != ERROR_OK)
926 return ERROR_FAIL;
927
928 if (register_read_direct(target, &info->progbuf_address, GDB_REGNO_S0) != ERROR_OK)
929 return ERROR_FAIL;
930
931 riscv_program_init(&program, target);
932 riscv_program_insert(&program, sw(S0, S0, 0));
933 int result = riscv_program_exec(&program, target);
934
935 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
936 return ERROR_FAIL;
937
938 if (result != ERROR_OK) {
939 /* This program might have failed if the program buffer is not
940 * writable. */
941 info->progbuf_writable = YNM_NO;
942 return ERROR_OK;
943 }
944
945 uint32_t written;
946 if (dmi_read(target, &written, DMI_PROGBUF0) != ERROR_OK)
947 return ERROR_FAIL;
948 if (written == (uint32_t) info->progbuf_address) {
949 LOG_INFO("progbuf is writable at 0x%" PRIx64,
950 info->progbuf_address);
951 info->progbuf_writable = YNM_YES;
952
953 } else {
954 LOG_INFO("progbuf is not writeable at 0x%" PRIx64,
955 info->progbuf_address);
956 info->progbuf_writable = YNM_NO;
957 }
958
959 return ERROR_OK;
960 }
961
962 typedef enum {
963 SPACE_DMI_DATA,
964 SPACE_DMI_PROGBUF,
965 SPACE_DMI_RAM
966 } memory_space_t;
967
968 typedef struct {
969 /* How can the debugger access this memory? */
970 memory_space_t memory_space;
971 /* Memory address to access the scratch memory from the hart. */
972 riscv_addr_t hart_address;
973 /* Memory address to access the scratch memory from the debugger. */
974 riscv_addr_t debug_address;
975 struct working_area *area;
976 } scratch_mem_t;
977
978 /**
979 * Find some scratch memory to be used with the given program.
980 */
981 static int scratch_reserve(struct target *target,
982 scratch_mem_t *scratch,
983 struct riscv_program *program,
984 unsigned size_bytes)
985 {
986 riscv_addr_t alignment = 1;
987 while (alignment < size_bytes)
988 alignment *= 2;
989
990 scratch->area = NULL;
991
992 riscv013_info_t *info = get_info(target);
993
994 if (info->dataaccess == 1) {
995 /* Sign extend dataaddr. */
996 scratch->hart_address = info->dataaddr;
997 if (info->dataaddr & (1<<11))
998 scratch->hart_address |= 0xfffffffffffff000ULL;
999 /* Align. */
1000 scratch->hart_address = (scratch->hart_address + alignment - 1) & ~(alignment - 1);
1001
1002 if ((size_bytes + scratch->hart_address - info->dataaddr + 3) / 4 >=
1003 info->datasize) {
1004 scratch->memory_space = SPACE_DMI_DATA;
1005 scratch->debug_address = (scratch->hart_address - info->dataaddr) / 4;
1006 return ERROR_OK;
1007 }
1008 }
1009
1010 if (examine_progbuf(target) != ERROR_OK)
1011 return ERROR_FAIL;
1012
1013 /* Allow for ebreak at the end of the program. */
1014 unsigned program_size = (program->instruction_count + 1) * 4;
1015 scratch->hart_address = (info->progbuf_address + program_size + alignment - 1) &
1016 ~(alignment - 1);
1017 if ((size_bytes + scratch->hart_address - info->progbuf_address + 3) / 4 >=
1018 info->progbufsize) {
1019 scratch->memory_space = SPACE_DMI_PROGBUF;
1020 scratch->debug_address = (scratch->hart_address - info->progbuf_address) / 4;
1021 return ERROR_OK;
1022 }
1023
1024 if (target_alloc_working_area(target, size_bytes + alignment - 1,
1025 &scratch->area) == ERROR_OK) {
1026 scratch->hart_address = (scratch->area->address + alignment - 1) &
1027 ~(alignment - 1);
1028 scratch->memory_space = SPACE_DMI_RAM;
1029 scratch->debug_address = scratch->hart_address;
1030 return ERROR_OK;
1031 }
1032
1033 LOG_ERROR("Couldn't find %d bytes of scratch RAM to use. Please configure "
1034 "a work area with 'configure -work-area-phys'.", size_bytes);
1035 return ERROR_FAIL;
1036 }
1037
1038 static int scratch_release(struct target *target,
1039 scratch_mem_t *scratch)
1040 {
1041 if (scratch->area)
1042 return target_free_working_area(target, scratch->area);
1043
1044 return ERROR_OK;
1045 }
1046
1047 static int scratch_read64(struct target *target, scratch_mem_t *scratch,
1048 uint64_t *value)
1049 {
1050 uint32_t v;
1051 switch (scratch->memory_space) {
1052 case SPACE_DMI_DATA:
1053 if (dmi_read(target, &v, DMI_DATA0 + scratch->debug_address) != ERROR_OK)
1054 return ERROR_FAIL;
1055 *value = v;
1056 if (dmi_read(target, &v, DMI_DATA1 + scratch->debug_address) != ERROR_OK)
1057 return ERROR_FAIL;
1058 *value |= ((uint64_t) v) << 32;
1059 break;
1060 case SPACE_DMI_PROGBUF:
1061 if (dmi_read(target, &v, DMI_PROGBUF0 + scratch->debug_address) != ERROR_OK)
1062 return ERROR_FAIL;
1063 *value = v;
1064 if (dmi_read(target, &v, DMI_PROGBUF1 + scratch->debug_address) != ERROR_OK)
1065 return ERROR_FAIL;
1066 *value |= ((uint64_t) v) << 32;
1067 break;
1068 case SPACE_DMI_RAM:
1069 {
1070 uint8_t buffer[8];
1071 if (read_memory(target, scratch->debug_address, 4, 2, buffer) != ERROR_OK)
1072 return ERROR_FAIL;
1073 *value = buffer[0] |
1074 (((uint64_t) buffer[1]) << 8) |
1075 (((uint64_t) buffer[2]) << 16) |
1076 (((uint64_t) buffer[3]) << 24) |
1077 (((uint64_t) buffer[4]) << 32) |
1078 (((uint64_t) buffer[5]) << 40) |
1079 (((uint64_t) buffer[6]) << 48) |
1080 (((uint64_t) buffer[7]) << 56);
1081 }
1082 break;
1083 }
1084 return ERROR_OK;
1085 }
1086
1087 static int scratch_write64(struct target *target, scratch_mem_t *scratch,
1088 uint64_t value)
1089 {
1090 switch (scratch->memory_space) {
1091 case SPACE_DMI_DATA:
1092 dmi_write(target, DMI_DATA0 + scratch->debug_address, value);
1093 dmi_write(target, DMI_DATA1 + scratch->debug_address, value >> 32);
1094 break;
1095 case SPACE_DMI_PROGBUF:
1096 dmi_write(target, DMI_PROGBUF0 + scratch->debug_address, value);
1097 dmi_write(target, DMI_PROGBUF1 + scratch->debug_address, value >> 32);
1098 break;
1099 case SPACE_DMI_RAM:
1100 {
1101 uint8_t buffer[8] = {
1102 value,
1103 value >> 8,
1104 value >> 16,
1105 value >> 24,
1106 value >> 32,
1107 value >> 40,
1108 value >> 48,
1109 value >> 56
1110 };
1111 if (write_memory(target, scratch->debug_address, 4, 2, buffer) != ERROR_OK)
1112 return ERROR_FAIL;
1113 }
1114 break;
1115 }
1116 return ERROR_OK;
1117 }
1118
1119 /** Return register size in bits. */
1120 static unsigned register_size(struct target *target, unsigned number)
1121 {
1122 /* If reg_cache hasn't been initialized yet, make a guess. We need this for
1123 * when this function is called during examine(). */
1124 if (target->reg_cache)
1125 return target->reg_cache->reg_list[number].size;
1126 else
1127 return riscv_xlen(target);
1128 }
1129
1130 /**
1131 * Immediately write the new value to the requested register. This mechanism
1132 * bypasses any caches.
1133 */
1134 static int register_write_direct(struct target *target, unsigned number,
1135 uint64_t value)
1136 {
1137 RISCV013_INFO(info);
1138 RISCV_INFO(r);
1139
1140 LOG_DEBUG("{%d} reg[0x%x] <- 0x%" PRIx64, riscv_current_hartid(target),
1141 number, value);
1142
1143 int result = register_write_abstract(target, number, value,
1144 register_size(target, number));
1145 if (result == ERROR_OK && target->reg_cache) {
1146 struct reg *reg = &target->reg_cache->reg_list[number];
1147 buf_set_u64(reg->value, 0, reg->size, value);
1148 }
1149 if (result == ERROR_OK || info->progbufsize + r->impebreak < 2 ||
1150 !riscv_is_halted(target))
1151 return result;
1152
1153 struct riscv_program program;
1154 riscv_program_init(&program, target);
1155
1156 uint64_t s0;
1157 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1158 return ERROR_FAIL;
1159
1160 scratch_mem_t scratch;
1161 bool use_scratch = false;
1162 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
1163 riscv_supports_extension(target, riscv_current_hartid(target), 'D') &&
1164 riscv_xlen(target) < 64) {
1165 /* There are no instructions to move all the bits from a register, so
1166 * we need to use some scratch RAM. */
1167 use_scratch = true;
1168 riscv_program_insert(&program, fld(number - GDB_REGNO_FPR0, S0, 0));
1169
1170 if (scratch_reserve(target, &scratch, &program, 8) != ERROR_OK)
1171 return ERROR_FAIL;
1172
1173 if (register_write_direct(target, GDB_REGNO_S0, scratch.hart_address)
1174 != ERROR_OK) {
1175 scratch_release(target, &scratch);
1176 return ERROR_FAIL;
1177 }
1178
1179 if (scratch_write64(target, &scratch, value) != ERROR_OK) {
1180 scratch_release(target, &scratch);
1181 return ERROR_FAIL;
1182 }
1183
1184 } else {
1185 if (register_write_direct(target, GDB_REGNO_S0, value) != ERROR_OK)
1186 return ERROR_FAIL;
1187
1188 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
1189 if (riscv_supports_extension(target, riscv_current_hartid(target), 'D'))
1190 riscv_program_insert(&program, fmv_d_x(number - GDB_REGNO_FPR0, S0));
1191 else
1192 riscv_program_insert(&program, fmv_w_x(number - GDB_REGNO_FPR0, S0));
1193 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
1194 riscv_program_csrw(&program, S0, number);
1195 } else {
1196 LOG_ERROR("Unsupported register (enum gdb_regno)(%d)", number);
1197 return ERROR_FAIL;
1198 }
1199 }
1200
1201 int exec_out = riscv_program_exec(&program, target);
1202 /* Don't message on error. Probably the register doesn't exist. */
1203 if (exec_out == ERROR_OK && target->reg_cache) {
1204 struct reg *reg = &target->reg_cache->reg_list[number];
1205 buf_set_u64(reg->value, 0, reg->size, value);
1206 }
1207
1208 if (use_scratch)
1209 scratch_release(target, &scratch);
1210
1211 /* Restore S0. */
1212 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1213 return ERROR_FAIL;
1214
1215 return exec_out;
1216 }
1217
1218 /** Return the cached value, or read from the target if necessary. */
1219 static int register_read(struct target *target, uint64_t *value, uint32_t number)
1220 {
1221 if (number == GDB_REGNO_ZERO) {
1222 *value = 0;
1223 return ERROR_OK;
1224 }
1225 int result = register_read_direct(target, value, number);
1226 if (result != ERROR_OK)
1227 return ERROR_FAIL;
1228 if (target->reg_cache) {
1229 struct reg *reg = &target->reg_cache->reg_list[number];
1230 buf_set_u64(reg->value, 0, reg->size, *value);
1231 }
1232 return ERROR_OK;
1233 }
1234
1235 /** Actually read registers from the target right now. */
1236 static int register_read_direct(struct target *target, uint64_t *value, uint32_t number)
1237 {
1238 RISCV013_INFO(info);
1239 RISCV_INFO(r);
1240
1241 int result = register_read_abstract(target, value, number,
1242 register_size(target, number));
1243
1244 if (result != ERROR_OK &&
1245 info->progbufsize + r->impebreak >= 2 &&
1246 number > GDB_REGNO_XPR31) {
1247 struct riscv_program program;
1248 riscv_program_init(&program, target);
1249
1250 scratch_mem_t scratch;
1251 bool use_scratch = false;
1252
1253 uint64_t s0;
1254 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1255 return ERROR_FAIL;
1256
1257 /* Write program to move data into s0. */
1258
1259 uint64_t mstatus;
1260 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
1261 if (register_read(target, &mstatus, GDB_REGNO_MSTATUS) != ERROR_OK)
1262 return ERROR_FAIL;
1263 if ((mstatus & MSTATUS_FS) == 0)
1264 if (register_write_direct(target, GDB_REGNO_MSTATUS,
1265 set_field(mstatus, MSTATUS_FS, 1)) != ERROR_OK)
1266 return ERROR_FAIL;
1267
1268 if (riscv_supports_extension(target, riscv_current_hartid(target), 'D')
1269 && riscv_xlen(target) < 64) {
1270 /* There are no instructions to move all the bits from a
1271 * register, so we need to use some scratch RAM. */
1272 riscv_program_insert(&program, fsd(number - GDB_REGNO_FPR0, S0,
1273 0));
1274
1275 if (scratch_reserve(target, &scratch, &program, 8) != ERROR_OK)
1276 return ERROR_FAIL;
1277 use_scratch = true;
1278
1279 if (register_write_direct(target, GDB_REGNO_S0,
1280 scratch.hart_address) != ERROR_OK) {
1281 scratch_release(target, &scratch);
1282 return ERROR_FAIL;
1283 }
1284 } else if (riscv_supports_extension(target,
1285 riscv_current_hartid(target), 'D')) {
1286 riscv_program_insert(&program, fmv_x_d(S0, number - GDB_REGNO_FPR0));
1287 } else {
1288 riscv_program_insert(&program, fmv_x_w(S0, number - GDB_REGNO_FPR0));
1289 }
1290 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
1291 riscv_program_csrr(&program, S0, number);
1292 } else {
1293 LOG_ERROR("Unsupported register (enum gdb_regno)(%d)", number);
1294 return ERROR_FAIL;
1295 }
1296
1297 /* Execute program. */
1298 result = riscv_program_exec(&program, target);
1299 /* Don't message on error. Probably the register doesn't exist. */
1300
1301 if (use_scratch) {
1302 result = scratch_read64(target, &scratch, value);
1303 scratch_release(target, &scratch);
1304 if (result != ERROR_OK)
1305 return result;
1306 } else {
1307 /* Read S0 */
1308 if (register_read_direct(target, value, GDB_REGNO_S0) != ERROR_OK)
1309 return ERROR_FAIL;
1310 }
1311
1312 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
1313 (mstatus & MSTATUS_FS) == 0)
1314 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus) != ERROR_OK)
1315 return ERROR_FAIL;
1316
1317 /* Restore S0. */
1318 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1319 return ERROR_FAIL;
1320 }
1321
1322 if (result == ERROR_OK) {
1323 LOG_DEBUG("{%d} reg[0x%x] = 0x%" PRIx64, riscv_current_hartid(target),
1324 number, *value);
1325 }
1326
1327 return result;
1328 }
1329
1330 int wait_for_authbusy(struct target *target, uint32_t *dmstatus)
1331 {
1332 time_t start = time(NULL);
1333 while (1) {
1334 uint32_t value;
1335 if (dmstatus_read(target, &value, false) != ERROR_OK)
1336 return ERROR_FAIL;
1337 if (dmstatus)
1338 *dmstatus = value;
1339 if (!get_field(value, DMI_DMSTATUS_AUTHBUSY))
1340 break;
1341 if (time(NULL) - start > riscv_command_timeout_sec) {
1342 LOG_ERROR("Timed out after %ds waiting for authbusy to go low (dmstatus=0x%x). "
1343 "Increase the timeout with riscv set_command_timeout_sec.",
1344 riscv_command_timeout_sec,
1345 value);
1346 return ERROR_FAIL;
1347 }
1348 }
1349
1350 return ERROR_OK;
1351 }
1352
1353 /*** OpenOCD target functions. ***/
1354
1355 static void deinit_target(struct target *target)
1356 {
1357 LOG_DEBUG("riscv_deinit_target()");
1358 riscv_info_t *info = (riscv_info_t *) target->arch_info;
1359 free(info->version_specific);
1360 /* TODO: free register arch_info */
1361 info->version_specific = NULL;
1362 }
1363
1364 static int examine(struct target *target)
1365 {
1366 /* Don't need to select dbus, since the first thing we do is read dtmcontrol. */
1367
1368 uint32_t dtmcontrol = dtmcontrol_scan(target, 0);
1369 LOG_DEBUG("dtmcontrol=0x%x", dtmcontrol);
1370 LOG_DEBUG(" dmireset=%d", get_field(dtmcontrol, DTM_DTMCS_DMIRESET));
1371 LOG_DEBUG(" idle=%d", get_field(dtmcontrol, DTM_DTMCS_IDLE));
1372 LOG_DEBUG(" dmistat=%d", get_field(dtmcontrol, DTM_DTMCS_DMISTAT));
1373 LOG_DEBUG(" abits=%d", get_field(dtmcontrol, DTM_DTMCS_ABITS));
1374 LOG_DEBUG(" version=%d", get_field(dtmcontrol, DTM_DTMCS_VERSION));
1375 if (dtmcontrol == 0) {
1376 LOG_ERROR("dtmcontrol is 0. Check JTAG connectivity/board power.");
1377 return ERROR_FAIL;
1378 }
1379 if (get_field(dtmcontrol, DTM_DTMCS_VERSION) != 1) {
1380 LOG_ERROR("Unsupported DTM version %d. (dtmcontrol=0x%x)",
1381 get_field(dtmcontrol, DTM_DTMCS_VERSION), dtmcontrol);
1382 return ERROR_FAIL;
1383 }
1384
1385 riscv013_info_t *info = get_info(target);
1386 info->abits = get_field(dtmcontrol, DTM_DTMCS_ABITS);
1387 info->dtmcs_idle = get_field(dtmcontrol, DTM_DTMCS_IDLE);
1388
1389 /* Reset the Debug Module. */
1390 dm013_info_t *dm = get_dm(target);
1391 if (!dm->was_reset) {
1392 dmi_write(target, DMI_DMCONTROL, 0);
1393 dmi_write(target, DMI_DMCONTROL, DMI_DMCONTROL_DMACTIVE);
1394 dm->was_reset = true;
1395 }
1396
1397 dmi_write(target, DMI_DMCONTROL, DMI_DMCONTROL_HARTSELLO |
1398 DMI_DMCONTROL_HARTSELHI | DMI_DMCONTROL_DMACTIVE);
1399 uint32_t dmcontrol;
1400 if (dmi_read(target, &dmcontrol, DMI_DMCONTROL) != ERROR_OK)
1401 return ERROR_FAIL;
1402
1403 if (!get_field(dmcontrol, DMI_DMCONTROL_DMACTIVE)) {
1404 LOG_ERROR("Debug Module did not become active. dmcontrol=0x%x",
1405 dmcontrol);
1406 return ERROR_FAIL;
1407 }
1408
1409 uint32_t dmstatus;
1410 if (dmstatus_read(target, &dmstatus, false) != ERROR_OK)
1411 return ERROR_FAIL;
1412 LOG_DEBUG("dmstatus: 0x%08x", dmstatus);
1413 if (get_field(dmstatus, DMI_DMSTATUS_VERSION) != 2) {
1414 LOG_ERROR("OpenOCD only supports Debug Module version 2, not %d "
1415 "(dmstatus=0x%x)", get_field(dmstatus, DMI_DMSTATUS_VERSION), dmstatus);
1416 return ERROR_FAIL;
1417 }
1418
1419 uint32_t hartsel =
1420 (get_field(dmcontrol, DMI_DMCONTROL_HARTSELHI) <<
1421 DMI_DMCONTROL_HARTSELLO_LENGTH) |
1422 get_field(dmcontrol, DMI_DMCONTROL_HARTSELLO);
1423 info->hartsellen = 0;
1424 while (hartsel & 1) {
1425 info->hartsellen++;
1426 hartsel >>= 1;
1427 }
1428 LOG_DEBUG("hartsellen=%d", info->hartsellen);
1429
1430 uint32_t hartinfo;
1431 if (dmi_read(target, &hartinfo, DMI_HARTINFO) != ERROR_OK)
1432 return ERROR_FAIL;
1433
1434 info->datasize = get_field(hartinfo, DMI_HARTINFO_DATASIZE);
1435 info->dataaccess = get_field(hartinfo, DMI_HARTINFO_DATAACCESS);
1436 info->dataaddr = get_field(hartinfo, DMI_HARTINFO_DATAADDR);
1437
1438 if (!get_field(dmstatus, DMI_DMSTATUS_AUTHENTICATED)) {
1439 LOG_ERROR("Debugger is not authenticated to target Debug Module. "
1440 "(dmstatus=0x%x). Use `riscv authdata_read` and "
1441 "`riscv authdata_write` commands to authenticate.", dmstatus);
1442 /* If we return ERROR_FAIL here, then in a multicore setup the next
1443 * core won't be examined, which means we won't set up the
1444 * authentication commands for them, which means the config script
1445 * needs to be a lot more complex. */
1446 return ERROR_OK;
1447 }
1448
1449 if (dmi_read(target, &info->sbcs, DMI_SBCS) != ERROR_OK)
1450 return ERROR_FAIL;
1451
1452 /* Check that abstract data registers are accessible. */
1453 uint32_t abstractcs;
1454 if (dmi_read(target, &abstractcs, DMI_ABSTRACTCS) != ERROR_OK)
1455 return ERROR_FAIL;
1456 info->datacount = get_field(abstractcs, DMI_ABSTRACTCS_DATACOUNT);
1457 info->progbufsize = get_field(abstractcs, DMI_ABSTRACTCS_PROGBUFSIZE);
1458
1459 LOG_INFO("datacount=%d progbufsize=%d", info->datacount, info->progbufsize);
1460
1461 RISCV_INFO(r);
1462 r->impebreak = get_field(dmstatus, DMI_DMSTATUS_IMPEBREAK);
1463
1464 if (info->progbufsize + r->impebreak < 2) {
1465 LOG_WARNING("We won't be able to execute fence instructions on this "
1466 "target. Memory may not always appear consistent. "
1467 "(progbufsize=%d, impebreak=%d)", info->progbufsize,
1468 r->impebreak);
1469 }
1470
1471 /* Before doing anything else we must first enumerate the harts. */
1472
1473 /* Don't call any riscv_* functions until after we've counted the number of
1474 * cores and initialized registers. */
1475 for (int i = 0; i < MIN(RISCV_MAX_HARTS, 1 << info->hartsellen); ++i) {
1476 if (!riscv_rtos_enabled(target) && i != target->coreid)
1477 continue;
1478
1479 r->current_hartid = i;
1480 if (riscv013_select_current_hart(target) != ERROR_OK)
1481 return ERROR_FAIL;
1482
1483 uint32_t s;
1484 if (dmstatus_read(target, &s, true) != ERROR_OK)
1485 return ERROR_FAIL;
1486 if (get_field(s, DMI_DMSTATUS_ANYNONEXISTENT))
1487 break;
1488 r->hart_count = i + 1;
1489
1490 if (get_field(s, DMI_DMSTATUS_ANYHAVERESET))
1491 dmi_write(target, DMI_DMCONTROL,
1492 set_hartsel(DMI_DMCONTROL_DMACTIVE | DMI_DMCONTROL_ACKHAVERESET, i));
1493
1494 bool halted = riscv_is_halted(target);
1495 if (!halted) {
1496 if (riscv013_halt_current_hart(target) != ERROR_OK) {
1497 LOG_ERROR("Fatal: Hart %d failed to halt during examine()", i);
1498 return ERROR_FAIL;
1499 }
1500 }
1501
1502 /* Without knowing anything else we can at least mess with the
1503 * program buffer. */
1504 r->debug_buffer_size[i] = info->progbufsize;
1505
1506 int result = register_read_abstract(target, NULL, GDB_REGNO_S0, 64);
1507 if (result == ERROR_OK)
1508 r->xlen[i] = 64;
1509 else
1510 r->xlen[i] = 32;
1511
1512 if (register_read(target, &r->misa[i], GDB_REGNO_MISA)) {
1513 LOG_ERROR("Fatal: Failed to read MISA from hart %d.", i);
1514 return ERROR_FAIL;
1515 }
1516
1517 /* Now init registers based on what we discovered. */
1518 if (riscv_init_registers(target) != ERROR_OK)
1519 return ERROR_FAIL;
1520
1521 /* Display this as early as possible to help people who are using
1522 * really slow simulators. */
1523 LOG_DEBUG(" hart %d: XLEN=%d, misa=0x%" PRIx64, i, r->xlen[i],
1524 r->misa[i]);
1525
1526 if (!halted)
1527 riscv013_resume_current_hart(target);
1528 }
1529
1530 LOG_DEBUG("Enumerated %d harts", r->hart_count);
1531
1532 if (r->hart_count == 0) {
1533 LOG_ERROR("No harts found!");
1534 return ERROR_FAIL;
1535 }
1536
1537 target_set_examined(target);
1538
1539 /* Some regression suites rely on seeing 'Examined RISC-V core' to know
1540 * when they can connect with gdb/telnet.
1541 * We will need to update those suites if we want to change that text. */
1542 LOG_INFO("Examined RISC-V core; found %d harts",
1543 riscv_count_harts(target));
1544 for (int i = 0; i < riscv_count_harts(target); ++i) {
1545 if (riscv_hart_enabled(target, i)) {
1546 LOG_INFO(" hart %d: XLEN=%d, misa=0x%" PRIx64, i, r->xlen[i],
1547 r->misa[i]);
1548 } else {
1549 LOG_INFO(" hart %d: currently disabled", i);
1550 }
1551 }
1552 return ERROR_OK;
1553 }
1554
1555 int riscv013_authdata_read(struct target *target, uint32_t *value)
1556 {
1557 if (wait_for_authbusy(target, NULL) != ERROR_OK)
1558 return ERROR_FAIL;
1559
1560 return dmi_read(target, value, DMI_AUTHDATA);
1561 }
1562
1563 int riscv013_authdata_write(struct target *target, uint32_t value)
1564 {
1565 uint32_t before, after;
1566 if (wait_for_authbusy(target, &before) != ERROR_OK)
1567 return ERROR_FAIL;
1568
1569 dmi_write(target, DMI_AUTHDATA, value);
1570
1571 if (wait_for_authbusy(target, &after) != ERROR_OK)
1572 return ERROR_FAIL;
1573
1574 if (!get_field(before, DMI_DMSTATUS_AUTHENTICATED) &&
1575 get_field(after, DMI_DMSTATUS_AUTHENTICATED)) {
1576 LOG_INFO("authdata_write resulted in successful authentication");
1577 int result = ERROR_OK;
1578 dm013_info_t *dm = get_dm(target);
1579 target_list_t *entry;
1580 list_for_each_entry(entry, &dm->target_list, list) {
1581 if (examine(entry->target) != ERROR_OK)
1582 result = ERROR_FAIL;
1583 }
1584 return result;
1585 }
1586
1587 return ERROR_OK;
1588 }
1589
1590 static int init_target(struct command_context *cmd_ctx,
1591 struct target *target)
1592 {
1593 LOG_DEBUG("init");
1594 riscv_info_t *generic_info = (riscv_info_t *) target->arch_info;
1595
1596 generic_info->get_register = &riscv013_get_register;
1597 generic_info->set_register = &riscv013_set_register;
1598 generic_info->select_current_hart = &riscv013_select_current_hart;
1599 generic_info->is_halted = &riscv013_is_halted;
1600 generic_info->halt_current_hart = &riscv013_halt_current_hart;
1601 generic_info->resume_current_hart = &riscv013_resume_current_hart;
1602 generic_info->step_current_hart = &riscv013_step_current_hart;
1603 generic_info->on_halt = &riscv013_on_halt;
1604 generic_info->on_resume = &riscv013_on_resume;
1605 generic_info->on_step = &riscv013_on_step;
1606 generic_info->halt_reason = &riscv013_halt_reason;
1607 generic_info->read_debug_buffer = &riscv013_read_debug_buffer;
1608 generic_info->write_debug_buffer = &riscv013_write_debug_buffer;
1609 generic_info->execute_debug_buffer = &riscv013_execute_debug_buffer;
1610 generic_info->fill_dmi_write_u64 = &riscv013_fill_dmi_write_u64;
1611 generic_info->fill_dmi_read_u64 = &riscv013_fill_dmi_read_u64;
1612 generic_info->fill_dmi_nop_u64 = &riscv013_fill_dmi_nop_u64;
1613 generic_info->dmi_write_u64_bits = &riscv013_dmi_write_u64_bits;
1614 generic_info->authdata_read = &riscv013_authdata_read;
1615 generic_info->authdata_write = &riscv013_authdata_write;
1616 generic_info->dmi_read = &dmi_read;
1617 generic_info->dmi_write = &dmi_write;
1618 generic_info->test_sba_config_reg = &riscv013_test_sba_config_reg;
1619 generic_info->test_compliance = &riscv013_test_compliance;
1620 generic_info->version_specific = calloc(1, sizeof(riscv013_info_t));
1621 if (!generic_info->version_specific)
1622 return ERROR_FAIL;
1623 riscv013_info_t *info = get_info(target);
1624
1625 info->progbufsize = -1;
1626
1627 info->dmi_busy_delay = 0;
1628 info->bus_master_read_delay = 0;
1629 info->bus_master_write_delay = 0;
1630 info->ac_busy_delay = 0;
1631
1632 /* Assume all these abstract commands are supported until we learn
1633 * otherwise.
1634 * TODO: The spec allows eg. one CSR to be able to be accessed abstractly
1635 * while another one isn't. We don't track that this closely here, but in
1636 * the future we probably should. */
1637 info->abstract_read_csr_supported = true;
1638 info->abstract_write_csr_supported = true;
1639 info->abstract_read_fpr_supported = true;
1640 info->abstract_write_fpr_supported = true;
1641
1642 return ERROR_OK;
1643 }
1644
1645 static int assert_reset(struct target *target)
1646 {
1647 RISCV_INFO(r);
1648
1649 select_dmi(target);
1650
1651 uint32_t control_base = set_field(0, DMI_DMCONTROL_DMACTIVE, 1);
1652
1653 if (target->rtos) {
1654 /* There's only one target, and OpenOCD thinks each hart is a thread.
1655 * We must reset them all. */
1656
1657 /* TODO: Try to use hasel in dmcontrol */
1658
1659 /* Set haltreq for each hart. */
1660 uint32_t control = control_base;
1661 for (int i = 0; i < riscv_count_harts(target); ++i) {
1662 if (!riscv_hart_enabled(target, i))
1663 continue;
1664
1665 control = set_hartsel(control_base, i);
1666 control = set_field(control, DMI_DMCONTROL_HALTREQ,
1667 target->reset_halt ? 1 : 0);
1668 dmi_write(target, DMI_DMCONTROL, control);
1669 }
1670 /* Assert ndmreset */
1671 control = set_field(control, DMI_DMCONTROL_NDMRESET, 1);
1672 dmi_write(target, DMI_DMCONTROL, control);
1673
1674 } else {
1675 /* Reset just this hart. */
1676 uint32_t control = set_hartsel(control_base, r->current_hartid);
1677 control = set_field(control, DMI_DMCONTROL_HALTREQ,
1678 target->reset_halt ? 1 : 0);
1679 control = set_field(control, DMI_DMCONTROL_NDMRESET, 1);
1680 dmi_write(target, DMI_DMCONTROL, control);
1681 }
1682
1683 target->state = TARGET_RESET;
1684
1685 return ERROR_OK;
1686 }
1687
1688 static int deassert_reset(struct target *target)
1689 {
1690 RISCV_INFO(r);
1691 RISCV013_INFO(info);
1692 select_dmi(target);
1693
1694 /* Clear the reset, but make sure haltreq is still set */
1695 uint32_t control = 0;
1696 control = set_field(control, DMI_DMCONTROL_HALTREQ, target->reset_halt ? 1 : 0);
1697 control = set_field(control, DMI_DMCONTROL_DMACTIVE, 1);
1698 dmi_write(target, DMI_DMCONTROL,
1699 set_hartsel(control, r->current_hartid));
1700
1701 uint32_t dmstatus;
1702 int dmi_busy_delay = info->dmi_busy_delay;
1703 time_t start = time(NULL);
1704
1705 for (int i = 0; i < riscv_count_harts(target); ++i) {
1706 int index = i;
1707 if (target->rtos) {
1708 if (!riscv_hart_enabled(target, index))
1709 continue;
1710 dmi_write(target, DMI_DMCONTROL,
1711 set_hartsel(control, index));
1712 } else {
1713 index = r->current_hartid;
1714 }
1715
1716 char *operation;
1717 uint32_t expected_field;
1718 if (target->reset_halt) {
1719 operation = "halt";
1720 expected_field = DMI_DMSTATUS_ALLHALTED;
1721 } else {
1722 operation = "run";
1723 expected_field = DMI_DMSTATUS_ALLRUNNING;
1724 }
1725 LOG_DEBUG("Waiting for hart %d to %s out of reset.", index, operation);
1726 while (1) {
1727 int result = dmstatus_read_timeout(target, &dmstatus, true,
1728 riscv_reset_timeout_sec);
1729 if (result == ERROR_TIMEOUT_REACHED)
1730 LOG_ERROR("Hart %d didn't complete a DMI read coming out of "
1731 "reset in %ds; Increase the timeout with riscv "
1732 "set_reset_timeout_sec.",
1733 index, riscv_reset_timeout_sec);
1734 if (result != ERROR_OK)
1735 return result;
1736 if (get_field(dmstatus, expected_field))
1737 break;
1738 if (time(NULL) - start > riscv_reset_timeout_sec) {
1739 LOG_ERROR("Hart %d didn't %s coming out of reset in %ds; "
1740 "dmstatus=0x%x; "
1741 "Increase the timeout with riscv set_reset_timeout_sec.",
1742 index, operation, riscv_reset_timeout_sec, dmstatus);
1743 return ERROR_FAIL;
1744 }
1745 }
1746 target->state = TARGET_HALTED;
1747
1748 if (get_field(dmstatus, DMI_DMSTATUS_ALLHAVERESET)) {
1749 /* Ack reset. */
1750 dmi_write(target, DMI_DMCONTROL,
1751 set_hartsel(control, index) |
1752 DMI_DMCONTROL_ACKHAVERESET);
1753 }
1754
1755 if (!target->rtos)
1756 break;
1757 }
1758 info->dmi_busy_delay = dmi_busy_delay;
1759 return ERROR_OK;
1760 }
1761
1762 /**
1763 * @par size in bytes
1764 */
1765 static void write_to_buf(uint8_t *buffer, uint64_t value, unsigned size)
1766 {
1767 switch (size) {
1768 case 8:
1769 buffer[7] = value >> 56;
1770 buffer[6] = value >> 48;
1771 buffer[5] = value >> 40;
1772 buffer[4] = value >> 32;
1773 /* falls through */
1774 case 4:
1775 buffer[3] = value >> 24;
1776 buffer[2] = value >> 16;
1777 /* falls through */
1778 case 2:
1779 buffer[1] = value >> 8;
1780 /* falls through */
1781 case 1:
1782 buffer[0] = value;
1783 break;
1784 default:
1785 assert(false);
1786 }
1787 }
1788
1789 static int execute_fence(struct target *target)
1790 {
1791 int old_hartid = riscv_current_hartid(target);
1792
1793 /* FIXME: For non-coherent systems we need to flush the caches right
1794 * here, but there's no ISA-defined way of doing that. */
1795 {
1796 struct riscv_program program;
1797 riscv_program_init(&program, target);
1798 riscv_program_fence_i(&program);
1799 riscv_program_fence(&program);
1800 int result = riscv_program_exec(&program, target);
1801 if (result != ERROR_OK)
1802 LOG_DEBUG("Unable to execute pre-fence");
1803 }
1804
1805 for (int i = 0; i < riscv_count_harts(target); ++i) {
1806 if (!riscv_hart_enabled(target, i))
1807 continue;
1808
1809 riscv_set_current_hartid(target, i);
1810
1811 struct riscv_program program;
1812 riscv_program_init(&program, target);
1813 riscv_program_fence_i(&program);
1814 riscv_program_fence(&program);
1815 int result = riscv_program_exec(&program, target);
1816 if (result != ERROR_OK)
1817 LOG_DEBUG("Unable to execute fence on hart %d", i);
1818 }
1819
1820 riscv_set_current_hartid(target, old_hartid);
1821
1822 return ERROR_OK;
1823 }
1824
1825 static void log_memory_access(target_addr_t address, uint64_t value,
1826 unsigned size_bytes, bool read)
1827 {
1828 if (debug_level < LOG_LVL_DEBUG)
1829 return;
1830
1831 char fmt[80];
1832 sprintf(fmt, "M[0x%" TARGET_PRIxADDR "] %ss 0x%%0%d" PRIx64,
1833 address, read ? "read" : "write", size_bytes * 2);
1834 value &= (((uint64_t) 0x1) << (size_bytes * 8)) - 1;
1835 LOG_DEBUG(fmt, value);
1836 }
1837
1838 /* Read the relevant sbdata regs depending on size, and put the results into
1839 * buffer. */
1840 static int read_memory_bus_word(struct target *target, target_addr_t address,
1841 uint32_t size, uint8_t *buffer)
1842 {
1843 uint32_t value;
1844 if (size > 12) {
1845 if (dmi_read(target, &value, DMI_SBDATA3) != ERROR_OK)
1846 return ERROR_FAIL;
1847 write_to_buf(buffer + 12, value, 4);
1848 log_memory_access(address + 12, value, 4, true);
1849 }
1850 if (size > 8) {
1851 if (dmi_read(target, &value, DMI_SBDATA2) != ERROR_OK)
1852 return ERROR_FAIL;
1853 write_to_buf(buffer + 8, value, 4);
1854 log_memory_access(address + 8, value, 4, true);
1855 }
1856 if (size > 4) {
1857 if (dmi_read(target, &value, DMI_SBDATA1) != ERROR_OK)
1858 return ERROR_FAIL;
1859 write_to_buf(buffer + 4, value, 4);
1860 log_memory_access(address + 4, value, 4, true);
1861 }
1862 if (dmi_read(target, &value, DMI_SBDATA0) != ERROR_OK)
1863 return ERROR_FAIL;
1864 write_to_buf(buffer, value, MIN(size, 4));
1865 log_memory_access(address, value, MIN(size, 4), true);
1866 return ERROR_OK;
1867 }
1868
1869 static uint32_t sb_sbaccess(unsigned size_bytes)
1870 {
1871 switch (size_bytes) {
1872 case 1:
1873 return set_field(0, DMI_SBCS_SBACCESS, 0);
1874 case 2:
1875 return set_field(0, DMI_SBCS_SBACCESS, 1);
1876 case 4:
1877 return set_field(0, DMI_SBCS_SBACCESS, 2);
1878 case 8:
1879 return set_field(0, DMI_SBCS_SBACCESS, 3);
1880 case 16:
1881 return set_field(0, DMI_SBCS_SBACCESS, 4);
1882 }
1883 assert(0);
1884 return 0; /* Make mingw happy. */
1885 }
1886
1887 static target_addr_t sb_read_address(struct target *target)
1888 {
1889 RISCV013_INFO(info);
1890 unsigned sbasize = get_field(info->sbcs, DMI_SBCS_SBASIZE);
1891 target_addr_t address = 0;
1892 uint32_t v;
1893 if (sbasize > 32) {
1894 #if BUILD_TARGET64
1895 dmi_read(target, &v, DMI_SBADDRESS1);
1896 address |= v;
1897 address <<= 32;
1898 #endif
1899 }
1900 dmi_read(target, &v, DMI_SBADDRESS0);
1901 address |= v;
1902 return address;
1903 }
1904
1905 static int sb_write_address(struct target *target, target_addr_t address)
1906 {
1907 RISCV013_INFO(info);
1908 unsigned sbasize = get_field(info->sbcs, DMI_SBCS_SBASIZE);
1909 /* There currently is no support for >64-bit addresses in OpenOCD. */
1910 if (sbasize > 96)
1911 dmi_write(target, DMI_SBADDRESS3, 0);
1912 if (sbasize > 64)
1913 dmi_write(target, DMI_SBADDRESS2, 0);
1914 if (sbasize > 32)
1915 #if BUILD_TARGET64
1916 dmi_write(target, DMI_SBADDRESS1, address >> 32);
1917 #else
1918 dmi_write(target, DMI_SBADDRESS1, 0);
1919 #endif
1920 return dmi_write(target, DMI_SBADDRESS0, address);
1921 }
1922
1923 static int read_sbcs_nonbusy(struct target *target, uint32_t *sbcs)
1924 {
1925 time_t start = time(NULL);
1926 while (1) {
1927 if (dmi_read(target, sbcs, DMI_SBCS) != ERROR_OK)
1928 return ERROR_FAIL;
1929 if (!get_field(*sbcs, DMI_SBCS_SBBUSY))
1930 return ERROR_OK;
1931 if (time(NULL) - start > riscv_command_timeout_sec) {
1932 LOG_ERROR("Timed out after %ds waiting for sbbusy to go low (sbcs=0x%x). "
1933 "Increase the timeout with riscv set_command_timeout_sec.",
1934 riscv_command_timeout_sec, *sbcs);
1935 return ERROR_FAIL;
1936 }
1937 }
1938 }
1939
1940 static int read_memory_bus_v0(struct target *target, target_addr_t address,
1941 uint32_t size, uint32_t count, uint8_t *buffer)
1942 {
1943 LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
1944 TARGET_PRIxADDR, size, count, address);
1945 uint8_t *t_buffer = buffer;
1946 riscv_addr_t cur_addr = address;
1947 riscv_addr_t fin_addr = address + (count * size);
1948 uint32_t access = 0;
1949
1950 const int DMI_SBCS_SBSINGLEREAD_OFFSET = 20;
1951 const uint32_t DMI_SBCS_SBSINGLEREAD = (0x1U << DMI_SBCS_SBSINGLEREAD_OFFSET);
1952
1953 const int DMI_SBCS_SBAUTOREAD_OFFSET = 15;
1954 const uint32_t DMI_SBCS_SBAUTOREAD = (0x1U << DMI_SBCS_SBAUTOREAD_OFFSET);
1955
1956 /* ww favorise one off reading if there is an issue */
1957 if (count == 1) {
1958 for (uint32_t i = 0; i < count; i++) {
1959 if (dmi_read(target, &access, DMI_SBCS) != ERROR_OK)
1960 return ERROR_FAIL;
1961 dmi_write(target, DMI_SBADDRESS0, cur_addr);
1962 /* size/2 matching the bit access of the spec 0.13 */
1963 access = set_field(access, DMI_SBCS_SBACCESS, size/2);
1964 access = set_field(access, DMI_SBCS_SBSINGLEREAD, 1);
1965 LOG_DEBUG("\r\nread_memory: sab: access: 0x%08x", access);
1966 dmi_write(target, DMI_SBCS, access);
1967 /* 3) read */
1968 uint32_t value;
1969 if (dmi_read(target, &value, DMI_SBDATA0) != ERROR_OK)
1970 return ERROR_FAIL;
1971 LOG_DEBUG("\r\nread_memory: sab: value: 0x%08x", value);
1972 write_to_buf(t_buffer, value, size);
1973 t_buffer += size;
1974 cur_addr += size;
1975 }
1976 return ERROR_OK;
1977 }
1978
1979 /* has to be the same size if we want to read a block */
1980 LOG_DEBUG("reading block until final address 0x%" PRIx64, fin_addr);
1981 if (dmi_read(target, &access, DMI_SBCS) != ERROR_OK)
1982 return ERROR_FAIL;
1983 /* set current address */
1984 dmi_write(target, DMI_SBADDRESS0, cur_addr);
1985 /* 2) write sbaccess=2, sbsingleread,sbautoread,sbautoincrement
1986 * size/2 matching the bit access of the spec 0.13 */
1987 access = set_field(access, DMI_SBCS_SBACCESS, size/2);
1988 access = set_field(access, DMI_SBCS_SBAUTOREAD, 1);
1989 access = set_field(access, DMI_SBCS_SBSINGLEREAD, 1);
1990 access = set_field(access, DMI_SBCS_SBAUTOINCREMENT, 1);
1991 LOG_DEBUG("\r\naccess: 0x%08x", access);
1992 dmi_write(target, DMI_SBCS, access);
1993
1994 while (cur_addr < fin_addr) {
1995 LOG_DEBUG("\r\nsab:autoincrement: \r\n size: %d\tcount:%d\taddress: 0x%08"
1996 PRIx64, size, count, cur_addr);
1997 /* read */
1998 uint32_t value;
1999 if (dmi_read(target, &value, DMI_SBDATA0) != ERROR_OK)
2000 return ERROR_FAIL;
2001 write_to_buf(t_buffer, value, size);
2002 cur_addr += size;
2003 t_buffer += size;
2004
2005 /* if we are reaching last address, we must clear autoread */
2006 if (cur_addr == fin_addr && count != 1) {
2007 dmi_write(target, DMI_SBCS, 0);
2008 if (dmi_read(target, &value, DMI_SBDATA0) != ERROR_OK)
2009 return ERROR_FAIL;
2010 write_to_buf(t_buffer, value, size);
2011 }
2012 }
2013
2014 return ERROR_OK;
2015 }
2016
2017 /**
2018 * Read the requested memory using the system bus interface.
2019 */
2020 static int read_memory_bus_v1(struct target *target, target_addr_t address,
2021 uint32_t size, uint32_t count, uint8_t *buffer)
2022 {
2023 RISCV013_INFO(info);
2024 target_addr_t next_address = address;
2025 target_addr_t end_address = address + count * size;
2026
2027 while (next_address < end_address) {
2028 uint32_t sbcs = set_field(0, DMI_SBCS_SBREADONADDR, 1);
2029 sbcs |= sb_sbaccess(size);
2030 sbcs = set_field(sbcs, DMI_SBCS_SBAUTOINCREMENT, 1);
2031 sbcs = set_field(sbcs, DMI_SBCS_SBREADONDATA, count > 1);
2032 dmi_write(target, DMI_SBCS, sbcs);
2033
2034 /* This address write will trigger the first read. */
2035 sb_write_address(target, next_address);
2036
2037 if (info->bus_master_read_delay) {
2038 jtag_add_runtest(info->bus_master_read_delay, TAP_IDLE);
2039 if (jtag_execute_queue() != ERROR_OK) {
2040 LOG_ERROR("Failed to scan idle sequence");
2041 return ERROR_FAIL;
2042 }
2043 }
2044
2045 for (uint32_t i = (next_address - address) / size; i < count - 1; i++) {
2046 read_memory_bus_word(target, address + i * size, size,
2047 buffer + i * size);
2048 }
2049
2050 sbcs = set_field(sbcs, DMI_SBCS_SBREADONDATA, 0);
2051 dmi_write(target, DMI_SBCS, sbcs);
2052
2053 read_memory_bus_word(target, address + (count - 1) * size, size,
2054 buffer + (count - 1) * size);
2055
2056 if (read_sbcs_nonbusy(target, &sbcs) != ERROR_OK)
2057 return ERROR_FAIL;
2058
2059 if (get_field(sbcs, DMI_SBCS_SBBUSYERROR)) {
2060 /* We read while the target was busy. Slow down and try again. */
2061 dmi_write(target, DMI_SBCS, DMI_SBCS_SBBUSYERROR);
2062 next_address = sb_read_address(target);
2063 info->bus_master_read_delay += info->bus_master_read_delay / 10 + 1;
2064 continue;
2065 }
2066
2067 unsigned error = get_field(sbcs, DMI_SBCS_SBERROR);
2068 if (error == 0) {
2069 next_address = end_address;
2070 } else {
2071 /* Some error indicating the bus access failed, but not because of
2072 * something we did wrong. */
2073 dmi_write(target, DMI_SBCS, DMI_SBCS_SBERROR);
2074 return ERROR_FAIL;
2075 }
2076 }
2077
2078 return ERROR_OK;
2079 }
2080
2081 static int batch_run(const struct target *target, struct riscv_batch *batch)
2082 {
2083 RISCV013_INFO(info);
2084 RISCV_INFO(r);
2085 if (r->reset_delays_wait >= 0) {
2086 r->reset_delays_wait -= batch->used_scans;
2087 if (r->reset_delays_wait <= 0) {
2088 batch->idle_count = 0;
2089 info->dmi_busy_delay = 0;
2090 info->ac_busy_delay = 0;
2091 }
2092 }
2093 return riscv_batch_run(batch);
2094 }
2095
2096 /**
2097 * Read the requested memory, taking care to execute every read exactly once,
2098 * even if cmderr=busy is encountered.
2099 */
2100 static int read_memory_progbuf_inner(struct target *target, target_addr_t address,
2101 uint32_t size, uint32_t count, uint8_t *buffer)
2102 {
2103 RISCV013_INFO(info);
2104
2105 int result = ERROR_OK;
2106
2107 /* Write address to S0, and execute buffer. */
2108 result = register_write_direct(target, GDB_REGNO_S0, address);
2109 if (result != ERROR_OK)
2110 goto error;
2111 uint32_t command = access_register_command(target, GDB_REGNO_S1,
2112 riscv_xlen(target),
2113 AC_ACCESS_REGISTER_TRANSFER | AC_ACCESS_REGISTER_POSTEXEC);
2114 if (execute_abstract_command(target, command) != ERROR_OK)
2115 return ERROR_FAIL;
2116
2117 /* First read has just triggered. Result is in s1. */
2118
2119 if (count == 1) {
2120 uint64_t value;
2121 if (register_read_direct(target, &value, GDB_REGNO_S1) != ERROR_OK)
2122 return ERROR_FAIL;
2123 write_to_buf(buffer, value, size);
2124 log_memory_access(address, value, size, true);
2125 return ERROR_OK;
2126 }
2127
2128 if (dmi_write(target, DMI_ABSTRACTAUTO,
2129 1 << DMI_ABSTRACTAUTO_AUTOEXECDATA_OFFSET) != ERROR_OK)
2130 goto error;
2131 /* Read garbage from dmi_data0, which triggers another execution of the
2132 * program. Now dmi_data0 contains the first good result, and s1 the next
2133 * memory value. */
2134 if (dmi_read_exec(target, NULL, DMI_DATA0) != ERROR_OK)
2135 goto error;
2136
2137 /* read_addr is the next address that the hart will read from, which is the
2138 * value in s0. */
2139 riscv_addr_t read_addr = address + 2 * size;
2140 riscv_addr_t fin_addr = address + (count * size);
2141 while (read_addr < fin_addr) {
2142 LOG_DEBUG("read_addr=0x%" PRIx64 ", fin_addr=0x%" PRIx64, read_addr,
2143 fin_addr);
2144 /* The pipeline looks like this:
2145 * memory -> s1 -> dm_data0 -> debugger
2146 * Right now:
2147 * s0 contains read_addr
2148 * s1 contains mem[read_addr-size]
2149 * dm_data0 contains[read_addr-size*2]
2150 */
2151
2152 LOG_DEBUG("creating burst to read from 0x%" PRIx64
2153 " up to 0x%" PRIx64, read_addr, fin_addr);
2154 assert(read_addr >= address && read_addr < fin_addr);
2155 struct riscv_batch *batch = riscv_batch_alloc(target, 32,
2156 info->dmi_busy_delay + info->ac_busy_delay);
2157
2158 size_t reads = 0;
2159 for (riscv_addr_t addr = read_addr; addr < fin_addr; addr += size) {
2160 riscv_batch_add_dmi_read(batch, DMI_DATA0);
2161
2162 reads++;
2163 if (riscv_batch_full(batch))
2164 break;
2165 }
2166
2167 batch_run(target, batch);
2168
2169 /* Wait for the target to finish performing the last abstract command,
2170 * and update our copy of cmderr. If we see that DMI is busy here,
2171 * dmi_busy_delay will be incremented. */
2172 uint32_t abstractcs;
2173 if (dmi_read(target, &abstractcs, DMI_ABSTRACTCS) != ERROR_OK)
2174 return ERROR_FAIL;
2175 while (get_field(abstractcs, DMI_ABSTRACTCS_BUSY))
2176 if (dmi_read(target, &abstractcs, DMI_ABSTRACTCS) != ERROR_OK)
2177 return ERROR_FAIL;
2178 info->cmderr = get_field(abstractcs, DMI_ABSTRACTCS_CMDERR);
2179
2180 riscv_addr_t next_read_addr;
2181 unsigned ignore_last = 0;
2182 switch (info->cmderr) {
2183 case CMDERR_NONE:
2184 LOG_DEBUG("successful (partial?) memory read");
2185 next_read_addr = read_addr + reads * size;
2186 break;
2187 case CMDERR_BUSY:
2188 LOG_DEBUG("memory read resulted in busy response");
2189
2190 increase_ac_busy_delay(target);
2191 riscv013_clear_abstract_error(target);
2192
2193 dmi_write(target, DMI_ABSTRACTAUTO, 0);
2194
2195 uint32_t dmi_data0;
2196 /* This is definitely a good version of the value that we
2197 * attempted to read when we discovered that the target was
2198 * busy. */
2199 if (dmi_read(target, &dmi_data0, DMI_DATA0) != ERROR_OK) {
2200 riscv_batch_free(batch);
2201 goto error;
2202 }
2203
2204 /* See how far we got, clobbering dmi_data0. */
2205 result = register_read_direct(target, &next_read_addr,
2206 GDB_REGNO_S0);
2207 if (result != ERROR_OK) {
2208 riscv_batch_free(batch);
2209 goto error;
2210 }
2211 write_to_buf(buffer + next_read_addr - 2 * size - address, dmi_data0, size);
2212 log_memory_access(next_read_addr - 2 * size, dmi_data0, size, true);
2213
2214 /* Restore the command, and execute it.
2215 * Now DMI_DATA0 contains the next value just as it would if no
2216 * error had occurred. */
2217 dmi_write_exec(target, DMI_COMMAND, command);
2218 next_read_addr += size;
2219
2220 dmi_write(target, DMI_ABSTRACTAUTO,
2221 1 << DMI_ABSTRACTAUTO_AUTOEXECDATA_OFFSET);
2222
2223 ignore_last = 1;
2224
2225 break;
2226 default:
2227 LOG_DEBUG("error when reading memory, abstractcs=0x%08lx", (long)abstractcs);
2228 riscv013_clear_abstract_error(target);
2229 riscv_batch_free(batch);
2230 result = ERROR_FAIL;
2231 goto error;
2232 }
2233
2234 /* Now read whatever we got out of the batch. */
2235 dmi_status_t status = DMI_STATUS_SUCCESS;
2236 for (size_t i = 0; i < reads; i++) {
2237 riscv_addr_t receive_addr = read_addr + (i-2) * size;
2238 assert(receive_addr < address + size * count);
2239 if (receive_addr < address)
2240 continue;
2241 if (receive_addr > next_read_addr - (3 + ignore_last) * size)
2242 break;
2243
2244 uint64_t dmi_out = riscv_batch_get_dmi_read(batch, i);
2245 status = get_field(dmi_out, DTM_DMI_OP);
2246 if (status != DMI_STATUS_SUCCESS) {
2247 /* If we're here because of busy count, dmi_busy_delay will
2248 * already have been increased and busy state will have been
2249 * cleared in dmi_read(). */
2250 /* In at least some implementations, we issue a read, and then
2251 * can get busy back when we try to scan out the read result,
2252 * and the actual read value is lost forever. Since this is
2253 * rare in any case, we return error here and rely on our
2254 * caller to reread the entire block. */
2255 LOG_WARNING("Batch memory read encountered DMI error %d. "
2256 "Falling back on slower reads.", status);
2257 riscv_batch_free(batch);
2258 result = ERROR_FAIL;
2259 goto error;
2260 }
2261 uint32_t value = get_field(dmi_out, DTM_DMI_DATA);
2262 riscv_addr_t offset = receive_addr - address;
2263 write_to_buf(buffer + offset, value, size);
2264 log_memory_access(receive_addr, value, size, true);
2265
2266 receive_addr += size;
2267 }
2268
2269 read_addr = next_read_addr;
2270
2271 riscv_batch_free(batch);
2272 }
2273
2274 dmi_write(target, DMI_ABSTRACTAUTO, 0);
2275
2276 if (count > 1) {
2277 /* Read the penultimate word. */
2278 uint32_t value;
2279 if (dmi_read(target, &value, DMI_DATA0) != ERROR_OK)
2280 return ERROR_FAIL;
2281 write_to_buf(buffer + size * (count-2), value, size);
2282 log_memory_access(address + size * (count-2), value, size, true);
2283 }
2284
2285 /* Read the last word. */
2286 uint64_t value;
2287 result = register_read_direct(target, &value, GDB_REGNO_S1);
2288 if (result != ERROR_OK)
2289 goto error;
2290 write_to_buf(buffer + size * (count-1), value, size);
2291 log_memory_access(address + size * (count-1), value, size, true);
2292
2293 return ERROR_OK;
2294
2295 error:
2296 dmi_write(target, DMI_ABSTRACTAUTO, 0);
2297
2298 return result;
2299 }
2300
2301 /**
2302 * Read the requested memory, silently handling memory access errors.
2303 */
2304 static int read_memory_progbuf(struct target *target, target_addr_t address,
2305 uint32_t size, uint32_t count, uint8_t *buffer)
2306 {
2307 int result = ERROR_OK;
2308
2309 LOG_DEBUG("reading %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
2310 size, address);
2311
2312 select_dmi(target);
2313
2314 memset(buffer, 0, count*size);
2315
2316 /* s0 holds the next address to write to
2317 * s1 holds the next data value to write
2318 */
2319 uint64_t s0, s1;
2320 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
2321 return ERROR_FAIL;
2322 if (register_read(target, &s1, GDB_REGNO_S1) != ERROR_OK)
2323 return ERROR_FAIL;
2324
2325 if (execute_fence(target) != ERROR_OK)
2326 return ERROR_FAIL;
2327
2328 /* Write the program (load, increment) */
2329 struct riscv_program program;
2330 riscv_program_init(&program, target);
2331 switch (size) {
2332 case 1:
2333 riscv_program_lbr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
2334 break;
2335 case 2:
2336 riscv_program_lhr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
2337 break;
2338 case 4:
2339 riscv_program_lwr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
2340 break;
2341 default:
2342 LOG_ERROR("Unsupported size: %d", size);
2343 return ERROR_FAIL;
2344 }
2345 riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, size);
2346
2347 if (riscv_program_ebreak(&program) != ERROR_OK)
2348 return ERROR_FAIL;
2349 riscv_program_write(&program);
2350
2351 result = read_memory_progbuf_inner(target, address, size, count, buffer);
2352
2353 if (result != ERROR_OK) {
2354 /* The full read did not succeed, so we will try to read each word individually. */
2355 /* This will not be fast, but reading outside actual memory is a special case anyway. */
2356 /* It will make the toolchain happier, especially Eclipse Memory View as it reads ahead. */
2357 target_addr_t address_i = address;
2358 uint32_t size_i = size;
2359 uint32_t count_i = 1;
2360 uint8_t *buffer_i = buffer;
2361
2362 for (uint32_t i = 0; i < count; i++, address_i += size_i, buffer_i += size_i) {
2363 /* TODO: This is much slower than it needs to be because we end up
2364 * writing the address to read for every word we read. */
2365 result = read_memory_progbuf_inner(target, address_i, size_i, count_i, buffer_i);
2366
2367 /* The read of a single word failed, so we will just return 0 for that instead */
2368 if (result != ERROR_OK) {
2369 LOG_DEBUG("error reading single word of %d bytes from 0x%" TARGET_PRIxADDR,
2370 size_i, address_i);
2371
2372 uint64_t value_i = 0;
2373 write_to_buf(buffer_i, value_i, size_i);
2374 }
2375 }
2376 result = ERROR_OK;
2377 }
2378
2379 riscv_set_register(target, GDB_REGNO_S0, s0);
2380 riscv_set_register(target, GDB_REGNO_S1, s1);
2381 return result;
2382 }
2383
2384 static int read_memory(struct target *target, target_addr_t address,
2385 uint32_t size, uint32_t count, uint8_t *buffer)
2386 {
2387 RISCV013_INFO(info);
2388 if (info->progbufsize >= 2 && !riscv_prefer_sba)
2389 return read_memory_progbuf(target, address, size, count, buffer);
2390
2391 if ((get_field(info->sbcs, DMI_SBCS_SBACCESS8) && size == 1) ||
2392 (get_field(info->sbcs, DMI_SBCS_SBACCESS16) && size == 2) ||
2393 (get_field(info->sbcs, DMI_SBCS_SBACCESS32) && size == 4) ||
2394 (get_field(info->sbcs, DMI_SBCS_SBACCESS64) && size == 8) ||
2395 (get_field(info->sbcs, DMI_SBCS_SBACCESS128) && size == 16)) {
2396 if (get_field(info->sbcs, DMI_SBCS_SBVERSION) == 0)
2397 return read_memory_bus_v0(target, address, size, count, buffer);
2398 else if (get_field(info->sbcs, DMI_SBCS_SBVERSION) == 1)
2399 return read_memory_bus_v1(target, address, size, count, buffer);
2400 }
2401
2402 if (info->progbufsize >= 2)
2403 return read_memory_progbuf(target, address, size, count, buffer);
2404
2405 LOG_ERROR("Don't know how to read memory on this target.");
2406 return ERROR_FAIL;
2407 }
2408
2409 static int write_memory_bus_v0(struct target *target, target_addr_t address,
2410 uint32_t size, uint32_t count, const uint8_t *buffer)
2411 {
2412 /*1) write sbaddress: for singlewrite and autoincrement, we need to write the address once*/
2413 LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
2414 TARGET_PRIxADDR, size, count, address);
2415 dmi_write(target, DMI_SBADDRESS0, address);
2416 int64_t value = 0;
2417 int64_t access = 0;
2418 riscv_addr_t offset = 0;
2419 riscv_addr_t t_addr = 0;
2420 const uint8_t *t_buffer = buffer + offset;
2421
2422 /* B.8 Writing Memory, single write check if we write in one go */
2423 if (count == 1) { /* count is in bytes here */
2424 /* check the size */
2425 switch (size) {
2426 case 1:
2427 value = t_buffer[0];
2428 break;
2429 case 2:
2430 value = t_buffer[0]
2431 | ((uint32_t) t_buffer[1] << 8);
2432 break;
2433 case 4:
2434 value = t_buffer[0]
2435 | ((uint32_t) t_buffer[1] << 8)
2436 | ((uint32_t) t_buffer[2] << 16)
2437 | ((uint32_t) t_buffer[3] << 24);
2438 break;
2439 default:
2440 LOG_ERROR("unsupported access size: %d", size);
2441 return ERROR_FAIL;
2442 }
2443
2444 access = 0;
2445 access = set_field(access, DMI_SBCS_SBACCESS, size/2);
2446 dmi_write(target, DMI_SBCS, access);
2447 LOG_DEBUG("\r\naccess: 0x%08" PRIx64, access);
2448 LOG_DEBUG("\r\nwrite_memory:SAB: ONE OFF: value 0x%08" PRIx64, value);
2449 dmi_write(target, DMI_SBDATA0, value);
2450 return ERROR_OK;
2451 }
2452
2453 /*B.8 Writing Memory, using autoincrement*/
2454
2455 access = 0;
2456 access = set_field(access, DMI_SBCS_SBACCESS, size/2);
2457 access = set_field(access, DMI_SBCS_SBAUTOINCREMENT, 1);
2458 LOG_DEBUG("\r\naccess: 0x%08" PRIx64, access);
2459 dmi_write(target, DMI_SBCS, access);
2460
2461 /*2)set the value according to the size required and write*/
2462 for (riscv_addr_t i = 0; i < count; ++i) {
2463 offset = size*i;
2464 /* for monitoring only */
2465 t_addr = address + offset;
2466 t_buffer = buffer + offset;
2467
2468 switch (size) {
2469 case 1:
2470 value = t_buffer[0];
2471 break;
2472 case 2:
2473 value = t_buffer[0]
2474 | ((uint32_t) t_buffer[1] << 8);
2475 break;
2476 case 4:
2477 value = t_buffer[0]
2478 | ((uint32_t) t_buffer[1] << 8)
2479 | ((uint32_t) t_buffer[2] << 16)
2480 | ((uint32_t) t_buffer[3] << 24);
2481 break;
2482 default:
2483 LOG_ERROR("unsupported access size: %d", size);
2484 return ERROR_FAIL;
2485 }
2486 LOG_DEBUG("SAB:autoincrement: expected address: 0x%08x value: 0x%08x"
2487 PRIx64, (uint32_t)t_addr, (uint32_t)value);
2488 dmi_write(target, DMI_SBDATA0, value);
2489 }
2490 /*reset the autoincrement when finished (something weird is happening if this is not done at the end*/
2491 access = set_field(access, DMI_SBCS_SBAUTOINCREMENT, 0);
2492 dmi_write(target, DMI_SBCS, access);
2493
2494 return ERROR_OK;
2495 }
2496
2497 static int write_memory_bus_v1(struct target *target, target_addr_t address,
2498 uint32_t size, uint32_t count, const uint8_t *buffer)
2499 {
2500 RISCV013_INFO(info);
2501 uint32_t sbcs = sb_sbaccess(size);
2502 sbcs = set_field(sbcs, DMI_SBCS_SBAUTOINCREMENT, 1);
2503 dmi_write(target, DMI_SBCS, sbcs);
2504
2505 target_addr_t next_address = address;
2506 target_addr_t end_address = address + count * size;
2507
2508 sb_write_address(target, next_address);
2509 while (next_address < end_address) {
2510 for (uint32_t i = (next_address - address) / size; i < count; i++) {
2511 const uint8_t *p = buffer + i * size;
2512 if (size > 12)
2513 dmi_write(target, DMI_SBDATA3,
2514 ((uint32_t) p[12]) |
2515 (((uint32_t) p[13]) << 8) |
2516 (((uint32_t) p[14]) << 16) |
2517 (((uint32_t) p[15]) << 24));
2518 if (size > 8)
2519 dmi_write(target, DMI_SBDATA2,
2520 ((uint32_t) p[8]) |
2521 (((uint32_t) p[9]) << 8) |
2522 (((uint32_t) p[10]) << 16) |
2523 (((uint32_t) p[11]) << 24));
2524 if (size > 4)
2525 dmi_write(target, DMI_SBDATA1,
2526 ((uint32_t) p[4]) |
2527 (((uint32_t) p[5]) << 8) |
2528 (((uint32_t) p[6]) << 16) |
2529 (((uint32_t) p[7]) << 24));
2530 uint32_t value = p[0];
2531 if (size > 2) {
2532 value |= ((uint32_t) p[2]) << 16;
2533 value |= ((uint32_t) p[3]) << 24;
2534 }
2535 if (size > 1)
2536 value |= ((uint32_t) p[1]) << 8;
2537 dmi_write(target, DMI_SBDATA0, value);
2538
2539 log_memory_access(address + i * size, value, size, false);
2540
2541 if (info->bus_master_write_delay) {
2542 jtag_add_runtest(info->bus_master_write_delay, TAP_IDLE);
2543 if (jtag_execute_queue() != ERROR_OK) {
2544 LOG_ERROR("Failed to scan idle sequence");
2545 return ERROR_FAIL;
2546 }
2547 }
2548 }
2549
2550 if (read_sbcs_nonbusy(target, &sbcs) != ERROR_OK)
2551 return ERROR_FAIL;
2552
2553 if (get_field(sbcs, DMI_SBCS_SBBUSYERROR)) {
2554 /* We wrote while the target was busy. Slow down and try again. */
2555 dmi_write(target, DMI_SBCS, DMI_SBCS_SBBUSYERROR);
2556 next_address = sb_read_address(target);
2557 info->bus_master_write_delay += info->bus_master_write_delay / 10 + 1;
2558 continue;
2559 }
2560
2561 unsigned error = get_field(sbcs, DMI_SBCS_SBERROR);
2562 if (error == 0) {
2563 next_address = end_address;
2564 } else {
2565 /* Some error indicating the bus access failed, but not because of
2566 * something we did wrong. */
2567 dmi_write(target, DMI_SBCS, DMI_SBCS_SBERROR);
2568 return ERROR_FAIL;
2569 }
2570 }
2571
2572 return ERROR_OK;
2573 }
2574
2575 static int write_memory_progbuf(struct target *target, target_addr_t address,
2576 uint32_t size, uint32_t count, const uint8_t *buffer)
2577 {
2578 RISCV013_INFO(info);
2579
2580 LOG_DEBUG("writing %d words of %d bytes to 0x%08lx", count, size, (long)address);
2581
2582 select_dmi(target);
2583
2584 /* s0 holds the next address to write to
2585 * s1 holds the next data value to write
2586 */
2587
2588 int result = ERROR_OK;
2589 uint64_t s0, s1;
2590 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
2591 return ERROR_FAIL;
2592 if (register_read(target, &s1, GDB_REGNO_S1) != ERROR_OK)
2593 return ERROR_FAIL;
2594
2595 /* Write the program (store, increment) */
2596 struct riscv_program program;
2597 riscv_program_init(&program, target);
2598
2599 switch (size) {
2600 case 1:
2601 riscv_program_sbr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
2602 break;
2603 case 2:
2604 riscv_program_shr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
2605 break;
2606 case 4:
2607 riscv_program_swr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
2608 break;
2609 default:
2610 LOG_ERROR("Unsupported size: %d", size);
2611 result = ERROR_FAIL;
2612 goto error;
2613 }
2614
2615 riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, size);
2616
2617 result = riscv_program_ebreak(&program);
2618 if (result != ERROR_OK)
2619 goto error;
2620 riscv_program_write(&program);
2621
2622 riscv_addr_t cur_addr = address;
2623 riscv_addr_t fin_addr = address + (count * size);
2624 bool setup_needed = true;
2625 LOG_DEBUG("writing until final address 0x%016" PRIx64, fin_addr);
2626 while (cur_addr < fin_addr) {
2627 LOG_DEBUG("transferring burst starting at address 0x%016" PRIx64,
2628 cur_addr);
2629
2630 struct riscv_batch *batch = riscv_batch_alloc(
2631 target,
2632 32,
2633 info->dmi_busy_delay + info->ac_busy_delay);
2634
2635 /* To write another word, we put it in S1 and execute the program. */
2636 unsigned start = (cur_addr - address) / size;
2637 for (unsigned i = start; i < count; ++i) {
2638 unsigned offset = size*i;
2639 const uint8_t *t_buffer = buffer + offset;
2640
2641 uint32_t value;
2642 switch (size) {
2643 case 1:
2644 value = t_buffer[0];
2645 break;
2646 case 2:
2647 value = t_buffer[0]
2648 | ((uint32_t) t_buffer[1] << 8);
2649 break;
2650 case 4:
2651 value = t_buffer[0]
2652 | ((uint32_t) t_buffer[1] << 8)
2653 | ((uint32_t) t_buffer[2] << 16)
2654 | ((uint32_t) t_buffer[3] << 24);
2655 break;
2656 default:
2657 LOG_ERROR("unsupported access size: %d", size);
2658 riscv_batch_free(batch);
2659 result = ERROR_FAIL;
2660 goto error;
2661 }
2662
2663 log_memory_access(address + offset, value, size, false);
2664 cur_addr += size;
2665
2666 if (setup_needed) {
2667 result = register_write_direct(target, GDB_REGNO_S0,
2668 address + offset);
2669 if (result != ERROR_OK) {
2670 riscv_batch_free(batch);
2671 goto error;
2672 }
2673
2674 /* Write value. */
2675 dmi_write(target, DMI_DATA0, value);
2676
2677 /* Write and execute command that moves value into S1 and
2678 * executes program buffer. */
2679 uint32_t command = access_register_command(target,
2680 GDB_REGNO_S1, 32,
2681 AC_ACCESS_REGISTER_POSTEXEC |
2682 AC_ACCESS_REGISTER_TRANSFER |
2683 AC_ACCESS_REGISTER_WRITE);
2684 result = execute_abstract_command(target, command);
2685 if (result != ERROR_OK) {
2686 riscv_batch_free(batch);
2687 goto error;
2688 }
2689
2690 /* Turn on autoexec */
2691 dmi_write(target, DMI_ABSTRACTAUTO,
2692 1 << DMI_ABSTRACTAUTO_AUTOEXECDATA_OFFSET);
2693
2694 setup_needed = false;
2695 } else {
2696 riscv_batch_add_dmi_write(batch, DMI_DATA0, value);
2697 if (riscv_batch_full(batch))
2698 break;
2699 }
2700 }
2701
2702 result = batch_run(target, batch);
2703 riscv_batch_free(batch);
2704 if (result != ERROR_OK)
2705 goto error;
2706
2707 /* Note that if the scan resulted in a Busy DMI response, it
2708 * is this read to abstractcs that will cause the dmi_busy_delay
2709 * to be incremented if necessary. */
2710
2711 uint32_t abstractcs;
2712 bool dmi_busy_encountered;
2713 if (dmi_op(target, &abstractcs, &dmi_busy_encountered, DMI_OP_READ,
2714 DMI_ABSTRACTCS, 0, false) != ERROR_OK)
2715 goto error;
2716 while (get_field(abstractcs, DMI_ABSTRACTCS_BUSY))
2717 if (dmi_read(target, &abstractcs, DMI_ABSTRACTCS) != ERROR_OK)
2718 return ERROR_FAIL;
2719 info->cmderr = get_field(abstractcs, DMI_ABSTRACTCS_CMDERR);
2720 if (info->cmderr == CMDERR_NONE && !dmi_busy_encountered) {
2721 LOG_DEBUG("successful (partial?) memory write");
2722 } else if (info->cmderr == CMDERR_BUSY || dmi_busy_encountered) {
2723 if (info->cmderr == CMDERR_BUSY)
2724 LOG_DEBUG("Memory write resulted in abstract command busy response.");
2725 else if (dmi_busy_encountered)
2726 LOG_DEBUG("Memory write resulted in DMI busy response.");
2727 riscv013_clear_abstract_error(target);
2728 increase_ac_busy_delay(target);
2729
2730 dmi_write(target, DMI_ABSTRACTAUTO, 0);
2731 result = register_read_direct(target, &cur_addr, GDB_REGNO_S0);
2732 if (result != ERROR_OK)
2733 goto error;
2734 setup_needed = true;
2735 } else {
2736 LOG_ERROR("error when writing memory, abstractcs=0x%08lx", (long)abstractcs);
2737 riscv013_clear_abstract_error(target);
2738 result = ERROR_FAIL;
2739 goto error;
2740 }
2741 }
2742
2743 error:
2744 dmi_write(target, DMI_ABSTRACTAUTO, 0);
2745
2746 if (register_write_direct(target, GDB_REGNO_S1, s1) != ERROR_OK)
2747 return ERROR_FAIL;
2748 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
2749 return ERROR_FAIL;
2750
2751 if (execute_fence(target) != ERROR_OK)
2752 return ERROR_FAIL;
2753
2754 return result;
2755 }
2756
2757 static int write_memory(struct target *target, target_addr_t address,
2758 uint32_t size, uint32_t count, const uint8_t *buffer)
2759 {
2760 RISCV013_INFO(info);
2761 if (info->progbufsize >= 2 && !riscv_prefer_sba)
2762 return write_memory_progbuf(target, address, size, count, buffer);
2763
2764 if ((get_field(info->sbcs, DMI_SBCS_SBACCESS8) && size == 1) ||
2765 (get_field(info->sbcs, DMI_SBCS_SBACCESS16) && size == 2) ||
2766 (get_field(info->sbcs, DMI_SBCS_SBACCESS32) && size == 4) ||
2767 (get_field(info->sbcs, DMI_SBCS_SBACCESS64) && size == 8) ||
2768 (get_field(info->sbcs, DMI_SBCS_SBACCESS128) && size == 16)) {
2769 if (get_field(info->sbcs, DMI_SBCS_SBVERSION) == 0)
2770 return write_memory_bus_v0(target, address, size, count, buffer);
2771 else if (get_field(info->sbcs, DMI_SBCS_SBVERSION) == 1)
2772 return write_memory_bus_v1(target, address, size, count, buffer);
2773 }
2774
2775 if (info->progbufsize >= 2)
2776 return write_memory_progbuf(target, address, size, count, buffer);
2777
2778 LOG_ERROR("Don't know how to write memory on this target.");
2779 return ERROR_FAIL;
2780 }
2781
2782 static int arch_state(struct target *target)
2783 {
2784 return ERROR_OK;
2785 }
2786
2787 struct target_type riscv013_target = {
2788 .name = "riscv",
2789
2790 .init_target = init_target,
2791 .deinit_target = deinit_target,
2792 .examine = examine,
2793
2794 .poll = &riscv_openocd_poll,
2795 .halt = &riscv_openocd_halt,
2796 .resume = &riscv_openocd_resume,
2797 .step = &riscv_openocd_step,
2798
2799 .assert_reset = assert_reset,
2800 .deassert_reset = deassert_reset,
2801
2802 .read_memory = read_memory,
2803 .write_memory = write_memory,
2804
2805 .arch_state = arch_state,
2806 };
2807
2808 /*** 0.13-specific implementations of various RISC-V helper functions. ***/
2809 static int riscv013_get_register(struct target *target,
2810 riscv_reg_t *value, int hid, int rid)
2811 {
2812 LOG_DEBUG("reading register %s on hart %d", gdb_regno_name(rid), hid);
2813
2814 riscv_set_current_hartid(target, hid);
2815
2816 int result = ERROR_OK;
2817 if (rid == GDB_REGNO_PC) {
2818 result = register_read(target, value, GDB_REGNO_DPC);
2819 LOG_DEBUG("read PC from DPC: 0x%" PRIx64, *value);
2820 } else if (rid == GDB_REGNO_PRIV) {
2821 uint64_t dcsr;
2822 result = register_read(target, &dcsr, GDB_REGNO_DCSR);
2823 *value = get_field(dcsr, CSR_DCSR_PRV);
2824 } else {
2825 result = register_read(target, value, rid);
2826 if (result != ERROR_OK)
2827 *value = -1;
2828 }
2829
2830 return result;
2831 }
2832
2833 static int riscv013_set_register(struct target *target, int hid, int rid, uint64_t value)
2834 {
2835 LOG_DEBUG("writing 0x%" PRIx64 " to register %s on hart %d", value,
2836 gdb_regno_name(rid), hid);
2837
2838 riscv_set_current_hartid(target, hid);
2839
2840 if (rid <= GDB_REGNO_XPR31) {
2841 return register_write_direct(target, rid, value);
2842 } else if (rid == GDB_REGNO_PC) {
2843 LOG_DEBUG("writing PC to DPC: 0x%" PRIx64, value);
2844 register_write_direct(target, GDB_REGNO_DPC, value);
2845 uint64_t actual_value;
2846 register_read_direct(target, &actual_value, GDB_REGNO_DPC);
2847 LOG_DEBUG(" actual DPC written: 0x%016" PRIx64, actual_value);
2848 if (value != actual_value) {
2849 LOG_ERROR("Written PC (0x%" PRIx64 ") does not match read back "
2850 "value (0x%" PRIx64 ")", value, actual_value);
2851 return ERROR_FAIL;
2852 }
2853 } else if (rid == GDB_REGNO_PRIV) {
2854 uint64_t dcsr;
2855 register_read(target, &dcsr, GDB_REGNO_DCSR);
2856 dcsr = set_field(dcsr, CSR_DCSR_PRV, value);
2857 return register_write_direct(target, GDB_REGNO_DCSR, dcsr);
2858 } else {
2859 return register_write_direct(target, rid, value);
2860 }
2861
2862 return ERROR_OK;
2863 }
2864
2865 static int riscv013_select_current_hart(struct target *target)
2866 {
2867 RISCV_INFO(r);
2868
2869 dm013_info_t *dm = get_dm(target);
2870 if (r->current_hartid == dm->current_hartid)
2871 return ERROR_OK;
2872
2873 uint32_t dmcontrol;
2874 /* TODO: can't we just "dmcontrol = DMI_DMACTIVE"? */
2875 if (dmi_read(target, &dmcontrol, DMI_DMCONTROL) != ERROR_OK)
2876 return ERROR_FAIL;
2877 dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
2878 int result = dmi_write(target, DMI_DMCONTROL, dmcontrol);
2879 dm->current_hartid = r->current_hartid;
2880 return result;
2881 }
2882
2883 static int riscv013_halt_current_hart(struct target *target)
2884 {
2885 RISCV_INFO(r);
2886 LOG_DEBUG("halting hart %d", r->current_hartid);
2887 if (riscv_is_halted(target))
2888 LOG_ERROR("Hart %d is already halted!", r->current_hartid);
2889
2890 /* Issue the halt command, and then wait for the current hart to halt. */
2891 uint32_t dmcontrol;
2892 if (dmi_read(target, &dmcontrol, DMI_DMCONTROL) != ERROR_OK)
2893 return ERROR_FAIL;
2894 dmcontrol = set_field(dmcontrol, DMI_DMCONTROL_HALTREQ, 1);
2895 dmi_write(target, DMI_DMCONTROL, dmcontrol);
2896 for (size_t i = 0; i < 256; ++i)
2897 if (riscv_is_halted(target))
2898 break;
2899
2900 if (!riscv_is_halted(target)) {
2901 uint32_t dmstatus;
2902 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
2903 return ERROR_FAIL;
2904 if (dmi_read(target, &dmcontrol, DMI_DMCONTROL) != ERROR_OK)
2905 return ERROR_FAIL;
2906
2907 LOG_ERROR("unable to halt hart %d", r->current_hartid);
2908 LOG_ERROR(" dmcontrol=0x%08x", dmcontrol);
2909 LOG_ERROR(" dmstatus =0x%08x", dmstatus);
2910 return ERROR_FAIL;
2911 }
2912
2913 dmcontrol = set_field(dmcontrol, DMI_DMCONTROL_HALTREQ, 0);
2914 dmi_write(target, DMI_DMCONTROL, dmcontrol);
2915
2916 return ERROR_OK;
2917 }
2918
2919 static int riscv013_resume_current_hart(struct target *target)
2920 {
2921 return riscv013_step_or_resume_current_hart(target, false);
2922 }
2923
2924 static int riscv013_step_current_hart(struct target *target)
2925 {
2926 return riscv013_step_or_resume_current_hart(target, true);
2927 }
2928
2929 static int riscv013_on_resume(struct target *target)
2930 {
2931 return riscv013_on_step_or_resume(target, false);
2932 }
2933
2934 static int riscv013_on_step(struct target *target)
2935 {
2936 return riscv013_on_step_or_resume(target, true);
2937 }
2938
2939 static int riscv013_on_halt(struct target *target)
2940 {
2941 return ERROR_OK;
2942 }
2943
2944 static bool riscv013_is_halted(struct target *target)
2945 {
2946 uint32_t dmstatus;
2947 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
2948 return false;
2949 if (get_field(dmstatus, DMI_DMSTATUS_ANYUNAVAIL))
2950 LOG_ERROR("Hart %d is unavailable.", riscv_current_hartid(target));
2951 if (get_field(dmstatus, DMI_DMSTATUS_ANYNONEXISTENT))
2952 LOG_ERROR("Hart %d doesn't exist.", riscv_current_hartid(target));
2953 if (get_field(dmstatus, DMI_DMSTATUS_ANYHAVERESET)) {
2954 int hartid = riscv_current_hartid(target);