openocd: fix doxygen parameters of functions
[openocd.git] / src / target / riscv / riscv-013.c
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2
3 /*
4 * Support for RISC-V, debug version 0.13, which is currently (2/4/17) the
5 * latest draft.
6 */
7
8 #include <assert.h>
9 #include <stdlib.h>
10 #include <time.h>
11
12 #ifdef HAVE_CONFIG_H
13 #include "config.h"
14 #endif
15
16 #include "target/target.h"
17 #include "target/algorithm.h"
18 #include "target/target_type.h"
19 #include "log.h"
20 #include "jtag/jtag.h"
21 #include "target/register.h"
22 #include "target/breakpoints.h"
23 #include "helper/time_support.h"
24 #include "helper/list.h"
25 #include "riscv.h"
26 #include "debug_defines.h"
27 #include "rtos/rtos.h"
28 #include "program.h"
29 #include "asm.h"
30 #include "batch.h"
31
32 #define DM_DATA1 (DM_DATA0 + 1)
33 #define DM_PROGBUF1 (DM_PROGBUF0 + 1)
34
35 static int riscv013_on_step_or_resume(struct target *target, bool step);
36 static int riscv013_step_or_resume_current_hart(struct target *target,
37 bool step, bool use_hasel);
38 static void riscv013_clear_abstract_error(struct target *target);
39
40 /* Implementations of the functions in riscv_info_t. */
41 static int riscv013_get_register(struct target *target,
42 riscv_reg_t *value, int hid, int rid);
43 static int riscv013_set_register(struct target *target, int hartid, int regid, uint64_t value);
44 static int riscv013_select_current_hart(struct target *target);
45 static int riscv013_halt_prep(struct target *target);
46 static int riscv013_halt_go(struct target *target);
47 static int riscv013_resume_go(struct target *target);
48 static int riscv013_step_current_hart(struct target *target);
49 static int riscv013_on_halt(struct target *target);
50 static int riscv013_on_step(struct target *target);
51 static int riscv013_resume_prep(struct target *target);
52 static bool riscv013_is_halted(struct target *target);
53 static enum riscv_halt_reason riscv013_halt_reason(struct target *target);
54 static int riscv013_write_debug_buffer(struct target *target, unsigned index,
55 riscv_insn_t d);
56 static riscv_insn_t riscv013_read_debug_buffer(struct target *target, unsigned
57 index);
58 static int riscv013_execute_debug_buffer(struct target *target);
59 static void riscv013_fill_dmi_write_u64(struct target *target, char *buf, int a, uint64_t d);
60 static void riscv013_fill_dmi_read_u64(struct target *target, char *buf, int a);
61 static int riscv013_dmi_write_u64_bits(struct target *target);
62 static void riscv013_fill_dmi_nop_u64(struct target *target, char *buf);
63 static int register_read(struct target *target, uint64_t *value, uint32_t number);
64 static int register_read_direct(struct target *target, uint64_t *value, uint32_t number);
65 static int register_write_direct(struct target *target, unsigned number,
66 uint64_t value);
67 static int read_memory(struct target *target, target_addr_t address,
68 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment);
69 static int write_memory(struct target *target, target_addr_t address,
70 uint32_t size, uint32_t count, const uint8_t *buffer);
71 static int riscv013_test_sba_config_reg(struct target *target, target_addr_t legal_address,
72 uint32_t num_words, target_addr_t illegal_address, bool run_sbbusyerror_test);
73 void write_memory_sba_simple(struct target *target, target_addr_t addr, uint32_t *write_data,
74 uint32_t write_size, uint32_t sbcs);
75 void read_memory_sba_simple(struct target *target, target_addr_t addr,
76 uint32_t *rd_buf, uint32_t read_size, uint32_t sbcs);
77 static int riscv013_test_compliance(struct target *target);
78
79 /**
80 * Since almost everything can be accomplish by scanning the dbus register, all
81 * functions here assume dbus is already selected. The exception are functions
82 * called directly by OpenOCD, which can't assume anything about what's
83 * currently in IR. They should set IR to dbus explicitly.
84 */
85
86 #define get_field(reg, mask) (((reg) & (mask)) / ((mask) & ~((mask) << 1)))
87 #define set_field(reg, mask, val) (((reg) & ~(mask)) | (((val) * ((mask) & ~((mask) << 1))) & (mask)))
88
89 #define DIM(x) (sizeof(x)/sizeof(*x))
90
91 #define CSR_DCSR_CAUSE_SWBP 1
92 #define CSR_DCSR_CAUSE_TRIGGER 2
93 #define CSR_DCSR_CAUSE_DEBUGINT 3
94 #define CSR_DCSR_CAUSE_STEP 4
95 #define CSR_DCSR_CAUSE_HALT 5
96 #define CSR_DCSR_CAUSE_GROUP 6
97
98 #define RISCV013_INFO(r) riscv013_info_t *r = get_info(target)
99
100 /*** JTAG registers. ***/
101
102 typedef enum {
103 DMI_OP_NOP = 0,
104 DMI_OP_READ = 1,
105 DMI_OP_WRITE = 2
106 } dmi_op_t;
107 typedef enum {
108 DMI_STATUS_SUCCESS = 0,
109 DMI_STATUS_FAILED = 2,
110 DMI_STATUS_BUSY = 3
111 } dmi_status_t;
112
113 typedef enum slot {
114 SLOT0,
115 SLOT1,
116 SLOT_LAST,
117 } slot_t;
118
119 /*** Debug Bus registers. ***/
120
121 #define CMDERR_NONE 0
122 #define CMDERR_BUSY 1
123 #define CMDERR_NOT_SUPPORTED 2
124 #define CMDERR_EXCEPTION 3
125 #define CMDERR_HALT_RESUME 4
126 #define CMDERR_OTHER 7
127
128 /*** Info about the core being debugged. ***/
129
130 struct trigger {
131 uint64_t address;
132 uint32_t length;
133 uint64_t mask;
134 uint64_t value;
135 bool read, write, execute;
136 int unique_id;
137 };
138
139 typedef enum {
140 YNM_MAYBE,
141 YNM_YES,
142 YNM_NO
143 } yes_no_maybe_t;
144
145 typedef struct {
146 struct list_head list;
147 int abs_chain_position;
148
149 /* The number of harts connected to this DM. */
150 int hart_count;
151 /* Indicates we already reset this DM, so don't need to do it again. */
152 bool was_reset;
153 /* Targets that are connected to this DM. */
154 struct list_head target_list;
155 /* The currently selected hartid on this DM. */
156 int current_hartid;
157 bool hasel_supported;
158
159 /* The program buffer stores executable code. 0 is an illegal instruction,
160 * so we use 0 to mean the cached value is invalid. */
161 uint32_t progbuf_cache[16];
162 } dm013_info_t;
163
164 typedef struct {
165 struct list_head list;
166 struct target *target;
167 } target_list_t;
168
169 typedef struct {
170 /* The indexed used to address this hart in its DM. */
171 unsigned index;
172 /* Number of address bits in the dbus register. */
173 unsigned abits;
174 /* Number of abstract command data registers. */
175 unsigned datacount;
176 /* Number of words in the Program Buffer. */
177 unsigned progbufsize;
178
179 /* We cache the read-only bits of sbcs here. */
180 uint32_t sbcs;
181
182 yes_no_maybe_t progbuf_writable;
183 /* We only need the address so that we know the alignment of the buffer. */
184 riscv_addr_t progbuf_address;
185
186 /* Number of run-test/idle cycles the target requests we do after each dbus
187 * access. */
188 unsigned int dtmcs_idle;
189
190 /* This value is incremented every time a dbus access comes back as "busy".
191 * It's used to determine how many run-test/idle cycles to feed the target
192 * in between accesses. */
193 unsigned int dmi_busy_delay;
194
195 /* Number of run-test/idle cycles to add between consecutive bus master
196 * reads/writes respectively. */
197 unsigned int bus_master_write_delay, bus_master_read_delay;
198
199 /* This value is increased every time we tried to execute two commands
200 * consecutively, and the second one failed because the previous hadn't
201 * completed yet. It's used to add extra run-test/idle cycles after
202 * starting a command, so we don't have to waste time checking for busy to
203 * go low. */
204 unsigned int ac_busy_delay;
205
206 bool abstract_read_csr_supported;
207 bool abstract_write_csr_supported;
208 bool abstract_read_fpr_supported;
209 bool abstract_write_fpr_supported;
210
211 /* When a function returns some error due to a failure indicated by the
212 * target in cmderr, the caller can look here to see what that error was.
213 * (Compare with errno.) */
214 uint8_t cmderr;
215
216 /* Some fields from hartinfo. */
217 uint8_t datasize;
218 uint8_t dataaccess;
219 int16_t dataaddr;
220
221 /* The width of the hartsel field. */
222 unsigned hartsellen;
223
224 /* DM that provides access to this target. */
225 dm013_info_t *dm;
226 } riscv013_info_t;
227
228 LIST_HEAD(dm_list);
229
230 static riscv013_info_t *get_info(const struct target *target)
231 {
232 riscv_info_t *info = (riscv_info_t *) target->arch_info;
233 return (riscv013_info_t *) info->version_specific;
234 }
235
236 /**
237 * Return the DM structure for this target. If there isn't one, find it in the
238 * global list of DMs. If it's not in there, then create one and initialize it
239 * to 0.
240 */
241 dm013_info_t *get_dm(struct target *target)
242 {
243 RISCV013_INFO(info);
244 if (info->dm)
245 return info->dm;
246
247 int abs_chain_position = target->tap->abs_chain_position;
248
249 dm013_info_t *entry;
250 dm013_info_t *dm = NULL;
251 list_for_each_entry(entry, &dm_list, list) {
252 if (entry->abs_chain_position == abs_chain_position) {
253 dm = entry;
254 break;
255 }
256 }
257
258 if (!dm) {
259 LOG_DEBUG("[%d] Allocating new DM", target->coreid);
260 dm = calloc(1, sizeof(dm013_info_t));
261 if (!dm)
262 return NULL;
263 dm->abs_chain_position = abs_chain_position;
264 dm->current_hartid = -1;
265 dm->hart_count = -1;
266 INIT_LIST_HEAD(&dm->target_list);
267 list_add(&dm->list, &dm_list);
268 }
269
270 info->dm = dm;
271 target_list_t *target_entry;
272 list_for_each_entry(target_entry, &dm->target_list, list) {
273 if (target_entry->target == target)
274 return dm;
275 }
276 target_entry = calloc(1, sizeof(*target_entry));
277 if (!target_entry) {
278 info->dm = NULL;
279 return NULL;
280 }
281 target_entry->target = target;
282 list_add(&target_entry->list, &dm->target_list);
283
284 return dm;
285 }
286
287 static uint32_t set_hartsel(uint32_t initial, uint32_t index)
288 {
289 initial &= ~DM_DMCONTROL_HARTSELLO;
290 initial &= ~DM_DMCONTROL_HARTSELHI;
291
292 uint32_t index_lo = index & ((1 << DM_DMCONTROL_HARTSELLO_LENGTH) - 1);
293 initial |= index_lo << DM_DMCONTROL_HARTSELLO_OFFSET;
294 uint32_t index_hi = index >> DM_DMCONTROL_HARTSELLO_LENGTH;
295 assert(index_hi < 1 << DM_DMCONTROL_HARTSELHI_LENGTH);
296 initial |= index_hi << DM_DMCONTROL_HARTSELHI_OFFSET;
297
298 return initial;
299 }
300
301 static void decode_dmi(char *text, unsigned address, unsigned data)
302 {
303 static const struct {
304 unsigned address;
305 uint64_t mask;
306 const char *name;
307 } description[] = {
308 { DM_DMCONTROL, DM_DMCONTROL_HALTREQ, "haltreq" },
309 { DM_DMCONTROL, DM_DMCONTROL_RESUMEREQ, "resumereq" },
310 { DM_DMCONTROL, DM_DMCONTROL_HARTRESET, "hartreset" },
311 { DM_DMCONTROL, DM_DMCONTROL_HASEL, "hasel" },
312 { DM_DMCONTROL, DM_DMCONTROL_HARTSELHI, "hartselhi" },
313 { DM_DMCONTROL, DM_DMCONTROL_HARTSELLO, "hartsello" },
314 { DM_DMCONTROL, DM_DMCONTROL_NDMRESET, "ndmreset" },
315 { DM_DMCONTROL, DM_DMCONTROL_DMACTIVE, "dmactive" },
316 { DM_DMCONTROL, DM_DMCONTROL_ACKHAVERESET, "ackhavereset" },
317
318 { DM_DMSTATUS, DM_DMSTATUS_IMPEBREAK, "impebreak" },
319 { DM_DMSTATUS, DM_DMSTATUS_ALLHAVERESET, "allhavereset" },
320 { DM_DMSTATUS, DM_DMSTATUS_ANYHAVERESET, "anyhavereset" },
321 { DM_DMSTATUS, DM_DMSTATUS_ALLRESUMEACK, "allresumeack" },
322 { DM_DMSTATUS, DM_DMSTATUS_ANYRESUMEACK, "anyresumeack" },
323 { DM_DMSTATUS, DM_DMSTATUS_ALLNONEXISTENT, "allnonexistent" },
324 { DM_DMSTATUS, DM_DMSTATUS_ANYNONEXISTENT, "anynonexistent" },
325 { DM_DMSTATUS, DM_DMSTATUS_ALLUNAVAIL, "allunavail" },
326 { DM_DMSTATUS, DM_DMSTATUS_ANYUNAVAIL, "anyunavail" },
327 { DM_DMSTATUS, DM_DMSTATUS_ALLRUNNING, "allrunning" },
328 { DM_DMSTATUS, DM_DMSTATUS_ANYRUNNING, "anyrunning" },
329 { DM_DMSTATUS, DM_DMSTATUS_ALLHALTED, "allhalted" },
330 { DM_DMSTATUS, DM_DMSTATUS_ANYHALTED, "anyhalted" },
331 { DM_DMSTATUS, DM_DMSTATUS_AUTHENTICATED, "authenticated" },
332 { DM_DMSTATUS, DM_DMSTATUS_AUTHBUSY, "authbusy" },
333 { DM_DMSTATUS, DM_DMSTATUS_HASRESETHALTREQ, "hasresethaltreq" },
334 { DM_DMSTATUS, DM_DMSTATUS_CONFSTRPTRVALID, "confstrptrvalid" },
335 { DM_DMSTATUS, DM_DMSTATUS_VERSION, "version" },
336
337 { DM_ABSTRACTCS, DM_ABSTRACTCS_PROGBUFSIZE, "progbufsize" },
338 { DM_ABSTRACTCS, DM_ABSTRACTCS_BUSY, "busy" },
339 { DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR, "cmderr" },
340 { DM_ABSTRACTCS, DM_ABSTRACTCS_DATACOUNT, "datacount" },
341
342 { DM_COMMAND, DM_COMMAND_CMDTYPE, "cmdtype" },
343
344 { DM_SBCS, DM_SBCS_SBVERSION, "sbversion" },
345 { DM_SBCS, DM_SBCS_SBBUSYERROR, "sbbusyerror" },
346 { DM_SBCS, DM_SBCS_SBBUSY, "sbbusy" },
347 { DM_SBCS, DM_SBCS_SBREADONADDR, "sbreadonaddr" },
348 { DM_SBCS, DM_SBCS_SBACCESS, "sbaccess" },
349 { DM_SBCS, DM_SBCS_SBAUTOINCREMENT, "sbautoincrement" },
350 { DM_SBCS, DM_SBCS_SBREADONDATA, "sbreadondata" },
351 { DM_SBCS, DM_SBCS_SBERROR, "sberror" },
352 { DM_SBCS, DM_SBCS_SBASIZE, "sbasize" },
353 { DM_SBCS, DM_SBCS_SBACCESS128, "sbaccess128" },
354 { DM_SBCS, DM_SBCS_SBACCESS64, "sbaccess64" },
355 { DM_SBCS, DM_SBCS_SBACCESS32, "sbaccess32" },
356 { DM_SBCS, DM_SBCS_SBACCESS16, "sbaccess16" },
357 { DM_SBCS, DM_SBCS_SBACCESS8, "sbaccess8" },
358 };
359
360 text[0] = 0;
361 for (unsigned i = 0; i < DIM(description); i++) {
362 if (description[i].address == address) {
363 uint64_t mask = description[i].mask;
364 unsigned value = get_field(data, mask);
365 if (value) {
366 if (i > 0)
367 *(text++) = ' ';
368 if (mask & (mask >> 1)) {
369 /* If the field is more than 1 bit wide. */
370 sprintf(text, "%s=%d", description[i].name, value);
371 } else {
372 strcpy(text, description[i].name);
373 }
374 text += strlen(text);
375 }
376 }
377 }
378 }
379
380 static void dump_field(int idle, const struct scan_field *field)
381 {
382 static const char * const op_string[] = {"-", "r", "w", "?"};
383 static const char * const status_string[] = {"+", "?", "F", "b"};
384
385 if (debug_level < LOG_LVL_DEBUG)
386 return;
387
388 uint64_t out = buf_get_u64(field->out_value, 0, field->num_bits);
389 unsigned int out_op = get_field(out, DTM_DMI_OP);
390 unsigned int out_data = get_field(out, DTM_DMI_DATA);
391 unsigned int out_address = out >> DTM_DMI_ADDRESS_OFFSET;
392
393 uint64_t in = buf_get_u64(field->in_value, 0, field->num_bits);
394 unsigned int in_op = get_field(in, DTM_DMI_OP);
395 unsigned int in_data = get_field(in, DTM_DMI_DATA);
396 unsigned int in_address = in >> DTM_DMI_ADDRESS_OFFSET;
397
398 log_printf_lf(LOG_LVL_DEBUG,
399 __FILE__, __LINE__, "scan",
400 "%db %s %08x @%02x -> %s %08x @%02x; %di",
401 field->num_bits, op_string[out_op], out_data, out_address,
402 status_string[in_op], in_data, in_address, idle);
403
404 char out_text[500];
405 char in_text[500];
406 decode_dmi(out_text, out_address, out_data);
407 decode_dmi(in_text, in_address, in_data);
408 if (in_text[0] || out_text[0]) {
409 log_printf_lf(LOG_LVL_DEBUG, __FILE__, __LINE__, "scan", "%s -> %s",
410 out_text, in_text);
411 }
412 }
413
414 /*** Utility functions. ***/
415
416 static void select_dmi(struct target *target)
417 {
418 if (bscan_tunnel_ir_width != 0) {
419 select_dmi_via_bscan(target);
420 return;
421 }
422 jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
423 }
424
425 static uint32_t dtmcontrol_scan(struct target *target, uint32_t out)
426 {
427 struct scan_field field;
428 uint8_t in_value[4];
429 uint8_t out_value[4] = { 0 };
430
431 if (bscan_tunnel_ir_width != 0)
432 return dtmcontrol_scan_via_bscan(target, out);
433
434 buf_set_u32(out_value, 0, 32, out);
435
436 jtag_add_ir_scan(target->tap, &select_dtmcontrol, TAP_IDLE);
437
438 field.num_bits = 32;
439 field.out_value = out_value;
440 field.in_value = in_value;
441 jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
442
443 /* Always return to dmi. */
444 select_dmi(target);
445
446 int retval = jtag_execute_queue();
447 if (retval != ERROR_OK) {
448 LOG_ERROR("failed jtag scan: %d", retval);
449 return retval;
450 }
451
452 uint32_t in = buf_get_u32(field.in_value, 0, 32);
453 LOG_DEBUG("DTMCS: 0x%x -> 0x%x", out, in);
454
455 return in;
456 }
457
458 static void increase_dmi_busy_delay(struct target *target)
459 {
460 riscv013_info_t *info = get_info(target);
461 info->dmi_busy_delay += info->dmi_busy_delay / 10 + 1;
462 LOG_DEBUG("dtmcs_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
463 info->dtmcs_idle, info->dmi_busy_delay,
464 info->ac_busy_delay);
465
466 dtmcontrol_scan(target, DTM_DTMCS_DMIRESET);
467 }
468
469 /**
470 * exec: If this is set, assume the scan results in an execution, so more
471 * run-test/idle cycles may be required.
472 */
473 static dmi_status_t dmi_scan(struct target *target, uint32_t *address_in,
474 uint32_t *data_in, dmi_op_t op, uint32_t address_out, uint32_t data_out,
475 bool exec)
476 {
477 riscv013_info_t *info = get_info(target);
478 RISCV_INFO(r);
479 unsigned num_bits = info->abits + DTM_DMI_OP_LENGTH + DTM_DMI_DATA_LENGTH;
480 size_t num_bytes = (num_bits + 7) / 8;
481 uint8_t in[num_bytes];
482 uint8_t out[num_bytes];
483 struct scan_field field = {
484 .num_bits = num_bits,
485 .out_value = out,
486 .in_value = in
487 };
488 riscv_bscan_tunneled_scan_context_t bscan_ctxt;
489
490 if (r->reset_delays_wait >= 0) {
491 r->reset_delays_wait--;
492 if (r->reset_delays_wait < 0) {
493 info->dmi_busy_delay = 0;
494 info->ac_busy_delay = 0;
495 }
496 }
497
498 memset(in, 0, num_bytes);
499 memset(out, 0, num_bytes);
500
501 assert(info->abits != 0);
502
503 buf_set_u32(out, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, op);
504 buf_set_u32(out, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, data_out);
505 buf_set_u32(out, DTM_DMI_ADDRESS_OFFSET, info->abits, address_out);
506
507 /* I wanted to place this code in a different function, but the way JTAG command
508 queueing works in the jtag handling functions, the scan fields either have to be
509 heap allocated, global/static, or else they need to stay on the stack until
510 the jtag_execute_queue() call. Heap or static fields in this case doesn't seem
511 the best fit. Declaring stack based field values in a subsidiary function call wouldn't
512 work. */
513 if (bscan_tunnel_ir_width != 0) {
514 riscv_add_bscan_tunneled_scan(target, &field, &bscan_ctxt);
515 } else {
516 /* Assume dbus is already selected. */
517 jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
518 }
519
520 int idle_count = info->dmi_busy_delay;
521 if (exec)
522 idle_count += info->ac_busy_delay;
523
524 if (idle_count)
525 jtag_add_runtest(idle_count, TAP_IDLE);
526
527 int retval = jtag_execute_queue();
528 if (retval != ERROR_OK) {
529 LOG_ERROR("dmi_scan failed jtag scan");
530 if (data_in)
531 *data_in = ~0;
532 return DMI_STATUS_FAILED;
533 }
534
535 if (bscan_tunnel_ir_width != 0) {
536 /* need to right-shift "in" by one bit, because of clock skew between BSCAN TAP and DM TAP */
537 buffer_shr(in, num_bytes, 1);
538 }
539
540 if (data_in)
541 *data_in = buf_get_u32(in, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH);
542
543 if (address_in)
544 *address_in = buf_get_u32(in, DTM_DMI_ADDRESS_OFFSET, info->abits);
545 dump_field(idle_count, &field);
546 return buf_get_u32(in, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH);
547 }
548
549 /**
550 * @param target
551 * @param data_in The data we received from the target.
552 * @param dmi_busy_encountered
553 * If non-NULL, will be updated to reflect whether DMI busy was
554 * encountered while executing this operation or not.
555 * @param dmi_op The operation to perform (read/write/nop).
556 * @param address The address argument to that operation.
557 * @param data_out The data to send to the target.
558 * @param timeout_sec
559 * @param exec When true, this scan will execute something, so extra RTI
560 * cycles may be added.
561 * @param ensure_success
562 * Scan a nop after the requested operation, ensuring the
563 * DMI operation succeeded.
564 */
565 static int dmi_op_timeout(struct target *target, uint32_t *data_in,
566 bool *dmi_busy_encountered, int dmi_op, uint32_t address,
567 uint32_t data_out, int timeout_sec, bool exec, bool ensure_success)
568 {
569 select_dmi(target);
570
571 dmi_status_t status;
572 uint32_t address_in;
573
574 if (dmi_busy_encountered)
575 *dmi_busy_encountered = false;
576
577 const char *op_name;
578 switch (dmi_op) {
579 case DMI_OP_NOP:
580 op_name = "nop";
581 break;
582 case DMI_OP_READ:
583 op_name = "read";
584 break;
585 case DMI_OP_WRITE:
586 op_name = "write";
587 break;
588 default:
589 LOG_ERROR("Invalid DMI operation: %d", dmi_op);
590 return ERROR_FAIL;
591 }
592
593 time_t start = time(NULL);
594 /* This first loop performs the request. Note that if for some reason this
595 * stays busy, it is actually due to the previous access. */
596 while (1) {
597 status = dmi_scan(target, NULL, NULL, dmi_op, address, data_out,
598 exec);
599 if (status == DMI_STATUS_BUSY) {
600 increase_dmi_busy_delay(target);
601 if (dmi_busy_encountered)
602 *dmi_busy_encountered = true;
603 } else if (status == DMI_STATUS_SUCCESS) {
604 break;
605 } else {
606 LOG_ERROR("failed %s at 0x%x, status=%d", op_name, address, status);
607 return ERROR_FAIL;
608 }
609 if (time(NULL) - start > timeout_sec)
610 return ERROR_TIMEOUT_REACHED;
611 }
612
613 if (status != DMI_STATUS_SUCCESS) {
614 LOG_ERROR("Failed %s at 0x%x; status=%d", op_name, address, status);
615 return ERROR_FAIL;
616 }
617
618 if (ensure_success) {
619 /* This second loop ensures the request succeeded, and gets back data.
620 * Note that NOP can result in a 'busy' result as well, but that would be
621 * noticed on the next DMI access we do. */
622 while (1) {
623 status = dmi_scan(target, &address_in, data_in, DMI_OP_NOP, address, 0,
624 false);
625 if (status == DMI_STATUS_BUSY) {
626 increase_dmi_busy_delay(target);
627 if (dmi_busy_encountered)
628 *dmi_busy_encountered = true;
629 } else if (status == DMI_STATUS_SUCCESS) {
630 break;
631 } else {
632 if (data_in) {
633 LOG_ERROR("Failed %s (NOP) at 0x%x; value=0x%x, status=%d",
634 op_name, address, *data_in, status);
635 } else {
636 LOG_ERROR("Failed %s (NOP) at 0x%x; status=%d", op_name, address,
637 status);
638 }
639 return ERROR_FAIL;
640 }
641 if (time(NULL) - start > timeout_sec)
642 return ERROR_TIMEOUT_REACHED;
643 }
644 }
645
646 return ERROR_OK;
647 }
648
649 static int dmi_op(struct target *target, uint32_t *data_in,
650 bool *dmi_busy_encountered, int dmi_op, uint32_t address,
651 uint32_t data_out, bool exec, bool ensure_success)
652 {
653 int result = dmi_op_timeout(target, data_in, dmi_busy_encountered, dmi_op,
654 address, data_out, riscv_command_timeout_sec, exec, ensure_success);
655 if (result == ERROR_TIMEOUT_REACHED) {
656 LOG_ERROR("DMI operation didn't complete in %d seconds. The target is "
657 "either really slow or broken. You could increase the "
658 "timeout with riscv set_command_timeout_sec.",
659 riscv_command_timeout_sec);
660 return ERROR_FAIL;
661 }
662 return result;
663 }
664
665 static int dmi_read(struct target *target, uint32_t *value, uint32_t address)
666 {
667 return dmi_op(target, value, NULL, DMI_OP_READ, address, 0, false, true);
668 }
669
670 static int dmi_read_exec(struct target *target, uint32_t *value, uint32_t address)
671 {
672 return dmi_op(target, value, NULL, DMI_OP_READ, address, 0, true, true);
673 }
674
675 static int dmi_write(struct target *target, uint32_t address, uint32_t value)
676 {
677 return dmi_op(target, NULL, NULL, DMI_OP_WRITE, address, value, false, true);
678 }
679
680 static int dmi_write_exec(struct target *target, uint32_t address,
681 uint32_t value, bool ensure_success)
682 {
683 return dmi_op(target, NULL, NULL, DMI_OP_WRITE, address, value, true, ensure_success);
684 }
685
686 int dmstatus_read_timeout(struct target *target, uint32_t *dmstatus,
687 bool authenticated, unsigned timeout_sec)
688 {
689 int result = dmi_op_timeout(target, dmstatus, NULL, DMI_OP_READ,
690 DM_DMSTATUS, 0, timeout_sec, false, true);
691 if (result != ERROR_OK)
692 return result;
693 int dmstatus_version = get_field(*dmstatus, DM_DMSTATUS_VERSION);
694 if (dmstatus_version != 2 && dmstatus_version != 3) {
695 LOG_ERROR("OpenOCD only supports Debug Module version 2 (0.13) and 3 (0.14), not "
696 "%d (dmstatus=0x%x). This error might be caused by a JTAG "
697 "signal issue. Try reducing the JTAG clock speed.",
698 get_field(*dmstatus, DM_DMSTATUS_VERSION), *dmstatus);
699 } else if (authenticated && !get_field(*dmstatus, DM_DMSTATUS_AUTHENTICATED)) {
700 LOG_ERROR("Debugger is not authenticated to target Debug Module. "
701 "(dmstatus=0x%x). Use `riscv authdata_read` and "
702 "`riscv authdata_write` commands to authenticate.", *dmstatus);
703 return ERROR_FAIL;
704 }
705 return ERROR_OK;
706 }
707
708 int dmstatus_read(struct target *target, uint32_t *dmstatus,
709 bool authenticated)
710 {
711 return dmstatus_read_timeout(target, dmstatus, authenticated,
712 riscv_command_timeout_sec);
713 }
714
715 static void increase_ac_busy_delay(struct target *target)
716 {
717 riscv013_info_t *info = get_info(target);
718 info->ac_busy_delay += info->ac_busy_delay / 10 + 1;
719 LOG_DEBUG("dtmcs_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
720 info->dtmcs_idle, info->dmi_busy_delay,
721 info->ac_busy_delay);
722 }
723
724 uint32_t abstract_register_size(unsigned width)
725 {
726 switch (width) {
727 case 32:
728 return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 2);
729 case 64:
730 return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 3);
731 case 128:
732 return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 4);
733 default:
734 LOG_ERROR("Unsupported register width: %d", width);
735 return 0;
736 }
737 }
738
739 static int wait_for_idle(struct target *target, uint32_t *abstractcs)
740 {
741 RISCV013_INFO(info);
742 time_t start = time(NULL);
743 while (1) {
744 if (dmi_read(target, abstractcs, DM_ABSTRACTCS) != ERROR_OK)
745 return ERROR_FAIL;
746
747 if (get_field(*abstractcs, DM_ABSTRACTCS_BUSY) == 0)
748 return ERROR_OK;
749
750 if (time(NULL) - start > riscv_command_timeout_sec) {
751 info->cmderr = get_field(*abstractcs, DM_ABSTRACTCS_CMDERR);
752 if (info->cmderr != CMDERR_NONE) {
753 const char *errors[8] = {
754 "none",
755 "busy",
756 "not supported",
757 "exception",
758 "halt/resume",
759 "reserved",
760 "reserved",
761 "other" };
762
763 LOG_ERROR("Abstract command ended in error '%s' (abstractcs=0x%x)",
764 errors[info->cmderr], *abstractcs);
765 }
766
767 LOG_ERROR("Timed out after %ds waiting for busy to go low (abstractcs=0x%x). "
768 "Increase the timeout with riscv set_command_timeout_sec.",
769 riscv_command_timeout_sec,
770 *abstractcs);
771 return ERROR_FAIL;
772 }
773 }
774 }
775
776 static int execute_abstract_command(struct target *target, uint32_t command)
777 {
778 RISCV013_INFO(info);
779 if (debug_level >= LOG_LVL_DEBUG) {
780 switch (get_field(command, DM_COMMAND_CMDTYPE)) {
781 case 0:
782 LOG_DEBUG("command=0x%x; access register, size=%d, postexec=%d, "
783 "transfer=%d, write=%d, regno=0x%x",
784 command,
785 8 << get_field(command, AC_ACCESS_REGISTER_AARSIZE),
786 get_field(command, AC_ACCESS_REGISTER_POSTEXEC),
787 get_field(command, AC_ACCESS_REGISTER_TRANSFER),
788 get_field(command, AC_ACCESS_REGISTER_WRITE),
789 get_field(command, AC_ACCESS_REGISTER_REGNO));
790 break;
791 default:
792 LOG_DEBUG("command=0x%x", command);
793 break;
794 }
795 }
796
797 if (dmi_write_exec(target, DM_COMMAND, command, false) != ERROR_OK)
798 return ERROR_FAIL;
799
800 uint32_t abstractcs = 0;
801 int result = wait_for_idle(target, &abstractcs);
802
803 info->cmderr = get_field(abstractcs, DM_ABSTRACTCS_CMDERR);
804 if (info->cmderr != 0 || result != ERROR_OK) {
805 LOG_DEBUG("command 0x%x failed; abstractcs=0x%x", command, abstractcs);
806 /* Clear the error. */
807 dmi_write(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
808 return ERROR_FAIL;
809 }
810
811 return ERROR_OK;
812 }
813
814 static riscv_reg_t read_abstract_arg(struct target *target, unsigned index,
815 unsigned size_bits)
816 {
817 riscv_reg_t value = 0;
818 uint32_t v;
819 unsigned offset = index * size_bits / 32;
820 switch (size_bits) {
821 default:
822 LOG_ERROR("Unsupported size: %d bits", size_bits);
823 return ~0;
824 case 64:
825 dmi_read(target, &v, DM_DATA0 + offset + 1);
826 value |= ((uint64_t) v) << 32;
827 /* falls through */
828 case 32:
829 dmi_read(target, &v, DM_DATA0 + offset);
830 value |= v;
831 }
832 return value;
833 }
834
835 static int write_abstract_arg(struct target *target, unsigned index,
836 riscv_reg_t value, unsigned size_bits)
837 {
838 unsigned offset = index * size_bits / 32;
839 switch (size_bits) {
840 default:
841 LOG_ERROR("Unsupported size: %d bits", size_bits);
842 return ERROR_FAIL;
843 case 64:
844 dmi_write(target, DM_DATA0 + offset + 1, value >> 32);
845 /* falls through */
846 case 32:
847 dmi_write(target, DM_DATA0 + offset, value);
848 }
849 return ERROR_OK;
850 }
851
852 /**
853 * @par size in bits
854 */
855 static uint32_t access_register_command(struct target *target, uint32_t number,
856 unsigned size, uint32_t flags)
857 {
858 uint32_t command = set_field(0, DM_COMMAND_CMDTYPE, 0);
859 switch (size) {
860 case 32:
861 command = set_field(command, AC_ACCESS_REGISTER_AARSIZE, 2);
862 break;
863 case 64:
864 command = set_field(command, AC_ACCESS_REGISTER_AARSIZE, 3);
865 break;
866 default:
867 LOG_ERROR("%d-bit register %s not supported.", size,
868 gdb_regno_name(number));
869 assert(0);
870 }
871
872 if (number <= GDB_REGNO_XPR31) {
873 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
874 0x1000 + number - GDB_REGNO_ZERO);
875 } else if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
876 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
877 0x1020 + number - GDB_REGNO_FPR0);
878 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
879 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
880 number - GDB_REGNO_CSR0);
881 } else if (number >= GDB_REGNO_COUNT) {
882 /* Custom register. */
883 assert(target->reg_cache->reg_list[number].arch_info);
884 riscv_reg_info_t *reg_info = target->reg_cache->reg_list[number].arch_info;
885 assert(reg_info);
886 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
887 0xc000 + reg_info->custom_number);
888 } else {
889 assert(0);
890 }
891
892 command |= flags;
893
894 return command;
895 }
896
897 static int register_read_abstract(struct target *target, uint64_t *value,
898 uint32_t number, unsigned size)
899 {
900 RISCV013_INFO(info);
901
902 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
903 !info->abstract_read_fpr_supported)
904 return ERROR_FAIL;
905 if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095 &&
906 !info->abstract_read_csr_supported)
907 return ERROR_FAIL;
908 /* The spec doesn't define abstract register numbers for vector registers. */
909 if (number >= GDB_REGNO_V0 && number <= GDB_REGNO_V31)
910 return ERROR_FAIL;
911
912 uint32_t command = access_register_command(target, number, size,
913 AC_ACCESS_REGISTER_TRANSFER);
914
915 int result = execute_abstract_command(target, command);
916 if (result != ERROR_OK) {
917 if (info->cmderr == CMDERR_NOT_SUPPORTED) {
918 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
919 info->abstract_read_fpr_supported = false;
920 LOG_INFO("Disabling abstract command reads from FPRs.");
921 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
922 info->abstract_read_csr_supported = false;
923 LOG_INFO("Disabling abstract command reads from CSRs.");
924 }
925 }
926 return result;
927 }
928
929 if (value)
930 *value = read_abstract_arg(target, 0, size);
931
932 return ERROR_OK;
933 }
934
935 static int register_write_abstract(struct target *target, uint32_t number,
936 uint64_t value, unsigned size)
937 {
938 RISCV013_INFO(info);
939
940 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
941 !info->abstract_write_fpr_supported)
942 return ERROR_FAIL;
943 if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095 &&
944 !info->abstract_write_csr_supported)
945 return ERROR_FAIL;
946
947 uint32_t command = access_register_command(target, number, size,
948 AC_ACCESS_REGISTER_TRANSFER |
949 AC_ACCESS_REGISTER_WRITE);
950
951 if (write_abstract_arg(target, 0, value, size) != ERROR_OK)
952 return ERROR_FAIL;
953
954 int result = execute_abstract_command(target, command);
955 if (result != ERROR_OK) {
956 if (info->cmderr == CMDERR_NOT_SUPPORTED) {
957 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
958 info->abstract_write_fpr_supported = false;
959 LOG_INFO("Disabling abstract command writes to FPRs.");
960 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
961 info->abstract_write_csr_supported = false;
962 LOG_INFO("Disabling abstract command writes to CSRs.");
963 }
964 }
965 return result;
966 }
967
968 return ERROR_OK;
969 }
970
971 /*
972 * Sets the AAMSIZE field of a memory access abstract command based on
973 * the width (bits).
974 */
975 static uint32_t abstract_memory_size(unsigned width)
976 {
977 switch (width) {
978 case 8:
979 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 0);
980 case 16:
981 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 1);
982 case 32:
983 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 2);
984 case 64:
985 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 3);
986 case 128:
987 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 4);
988 default:
989 LOG_ERROR("Unsupported memory width: %d", width);
990 return 0;
991 }
992 }
993
994 /*
995 * Creates a memory access abstract command.
996 */
997 static uint32_t access_memory_command(struct target *target, bool virtual,
998 unsigned width, bool postincrement, bool write)
999 {
1000 uint32_t command = set_field(0, AC_ACCESS_MEMORY_CMDTYPE, 2);
1001 command = set_field(command, AC_ACCESS_MEMORY_AAMVIRTUAL, virtual);
1002 command |= abstract_memory_size(width);
1003 command = set_field(command, AC_ACCESS_MEMORY_AAMPOSTINCREMENT,
1004 postincrement);
1005 command = set_field(command, AC_ACCESS_MEMORY_WRITE, write);
1006
1007 return command;
1008 }
1009
1010 static int examine_progbuf(struct target *target)
1011 {
1012 riscv013_info_t *info = get_info(target);
1013
1014 if (info->progbuf_writable != YNM_MAYBE)
1015 return ERROR_OK;
1016
1017 /* Figure out if progbuf is writable. */
1018
1019 if (info->progbufsize < 1) {
1020 info->progbuf_writable = YNM_NO;
1021 LOG_INFO("No program buffer present.");
1022 return ERROR_OK;
1023 }
1024
1025 uint64_t s0;
1026 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1027 return ERROR_FAIL;
1028
1029 struct riscv_program program;
1030 riscv_program_init(&program, target);
1031 riscv_program_insert(&program, auipc(S0));
1032 if (riscv_program_exec(&program, target) != ERROR_OK)
1033 return ERROR_FAIL;
1034
1035 if (register_read_direct(target, &info->progbuf_address, GDB_REGNO_S0) != ERROR_OK)
1036 return ERROR_FAIL;
1037
1038 riscv_program_init(&program, target);
1039 riscv_program_insert(&program, sw(S0, S0, 0));
1040 int result = riscv_program_exec(&program, target);
1041
1042 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1043 return ERROR_FAIL;
1044
1045 if (result != ERROR_OK) {
1046 /* This program might have failed if the program buffer is not
1047 * writable. */
1048 info->progbuf_writable = YNM_NO;
1049 return ERROR_OK;
1050 }
1051
1052 uint32_t written;
1053 if (dmi_read(target, &written, DM_PROGBUF0) != ERROR_OK)
1054 return ERROR_FAIL;
1055 if (written == (uint32_t) info->progbuf_address) {
1056 LOG_INFO("progbuf is writable at 0x%" PRIx64,
1057 info->progbuf_address);
1058 info->progbuf_writable = YNM_YES;
1059
1060 } else {
1061 LOG_INFO("progbuf is not writeable at 0x%" PRIx64,
1062 info->progbuf_address);
1063 info->progbuf_writable = YNM_NO;
1064 }
1065
1066 return ERROR_OK;
1067 }
1068
1069 static int is_fpu_reg(uint32_t gdb_regno)
1070 {
1071 return (gdb_regno >= GDB_REGNO_FPR0 && gdb_regno <= GDB_REGNO_FPR31) ||
1072 (gdb_regno == GDB_REGNO_CSR0 + CSR_FFLAGS) ||
1073 (gdb_regno == GDB_REGNO_CSR0 + CSR_FRM) ||
1074 (gdb_regno == GDB_REGNO_CSR0 + CSR_FCSR);
1075 }
1076
1077 static int is_vector_reg(uint32_t gdb_regno)
1078 {
1079 return (gdb_regno >= GDB_REGNO_V0 && gdb_regno <= GDB_REGNO_V31) ||
1080 gdb_regno == GDB_REGNO_VSTART ||
1081 gdb_regno == GDB_REGNO_VXSAT ||
1082 gdb_regno == GDB_REGNO_VXRM ||
1083 gdb_regno == GDB_REGNO_VL ||
1084 gdb_regno == GDB_REGNO_VTYPE ||
1085 gdb_regno == GDB_REGNO_VLENB;
1086 }
1087
1088 static int prep_for_register_access(struct target *target, uint64_t *mstatus,
1089 int regno)
1090 {
1091 if (is_fpu_reg(regno) || is_vector_reg(regno)) {
1092 if (register_read(target, mstatus, GDB_REGNO_MSTATUS) != ERROR_OK)
1093 return ERROR_FAIL;
1094 if (is_fpu_reg(regno) && (*mstatus & MSTATUS_FS) == 0) {
1095 if (register_write_direct(target, GDB_REGNO_MSTATUS,
1096 set_field(*mstatus, MSTATUS_FS, 1)) != ERROR_OK)
1097 return ERROR_FAIL;
1098 } else if (is_vector_reg(regno) && (*mstatus & MSTATUS_VS) == 0) {
1099 if (register_write_direct(target, GDB_REGNO_MSTATUS,
1100 set_field(*mstatus, MSTATUS_VS, 1)) != ERROR_OK)
1101 return ERROR_FAIL;
1102 }
1103 } else {
1104 *mstatus = 0;
1105 }
1106 return ERROR_OK;
1107 }
1108
1109 static int cleanup_after_register_access(struct target *target,
1110 uint64_t mstatus, int regno)
1111 {
1112 if ((is_fpu_reg(regno) && (mstatus & MSTATUS_FS) == 0) ||
1113 (is_vector_reg(regno) && (mstatus & MSTATUS_VS) == 0))
1114 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus) != ERROR_OK)
1115 return ERROR_FAIL;
1116 return ERROR_OK;
1117 }
1118
1119 typedef enum {
1120 SPACE_DM_DATA,
1121 SPACE_DMI_PROGBUF,
1122 SPACE_DMI_RAM
1123 } memory_space_t;
1124
1125 typedef struct {
1126 /* How can the debugger access this memory? */
1127 memory_space_t memory_space;
1128 /* Memory address to access the scratch memory from the hart. */
1129 riscv_addr_t hart_address;
1130 /* Memory address to access the scratch memory from the debugger. */
1131 riscv_addr_t debug_address;
1132 struct working_area *area;
1133 } scratch_mem_t;
1134
1135 /**
1136 * Find some scratch memory to be used with the given program.
1137 */
1138 static int scratch_reserve(struct target *target,
1139 scratch_mem_t *scratch,
1140 struct riscv_program *program,
1141 unsigned size_bytes)
1142 {
1143 riscv_addr_t alignment = 1;
1144 while (alignment < size_bytes)
1145 alignment *= 2;
1146
1147 scratch->area = NULL;
1148
1149 riscv013_info_t *info = get_info(target);
1150
1151 /* Option 1: See if data# registers can be used as the scratch memory */
1152 if (info->dataaccess == 1) {
1153 /* Sign extend dataaddr. */
1154 scratch->hart_address = info->dataaddr;
1155 if (info->dataaddr & (1<<11))
1156 scratch->hart_address |= 0xfffffffffffff000ULL;
1157 /* Align. */
1158 scratch->hart_address = (scratch->hart_address + alignment - 1) & ~(alignment - 1);
1159
1160 if ((size_bytes + scratch->hart_address - info->dataaddr + 3) / 4 >=
1161 info->datasize) {
1162 scratch->memory_space = SPACE_DM_DATA;
1163 scratch->debug_address = (scratch->hart_address - info->dataaddr) / 4;
1164 return ERROR_OK;
1165 }
1166 }
1167
1168 /* Option 2: See if progbuf can be used as the scratch memory */
1169 if (examine_progbuf(target) != ERROR_OK)
1170 return ERROR_FAIL;
1171
1172 /* Allow for ebreak at the end of the program. */
1173 unsigned program_size = (program->instruction_count + 1) * 4;
1174 scratch->hart_address = (info->progbuf_address + program_size + alignment - 1) &
1175 ~(alignment - 1);
1176 if ((info->progbuf_writable == YNM_YES) &&
1177 ((size_bytes + scratch->hart_address - info->progbuf_address + 3) / 4 >=
1178 info->progbufsize)) {
1179 scratch->memory_space = SPACE_DMI_PROGBUF;
1180 scratch->debug_address = (scratch->hart_address - info->progbuf_address) / 4;
1181 return ERROR_OK;
1182 }
1183
1184 /* Option 3: User-configured memory area as scratch RAM */
1185 if (target_alloc_working_area(target, size_bytes + alignment - 1,
1186 &scratch->area) == ERROR_OK) {
1187 scratch->hart_address = (scratch->area->address + alignment - 1) &
1188 ~(alignment - 1);
1189 scratch->memory_space = SPACE_DMI_RAM;
1190 scratch->debug_address = scratch->hart_address;
1191 return ERROR_OK;
1192 }
1193
1194 LOG_ERROR("Couldn't find %d bytes of scratch RAM to use. Please configure "
1195 "a work area with 'configure -work-area-phys'.", size_bytes);
1196 return ERROR_FAIL;
1197 }
1198
1199 static int scratch_release(struct target *target,
1200 scratch_mem_t *scratch)
1201 {
1202 if (scratch->area)
1203 return target_free_working_area(target, scratch->area);
1204
1205 return ERROR_OK;
1206 }
1207
1208 static int scratch_read64(struct target *target, scratch_mem_t *scratch,
1209 uint64_t *value)
1210 {
1211 uint32_t v;
1212 switch (scratch->memory_space) {
1213 case SPACE_DM_DATA:
1214 if (dmi_read(target, &v, DM_DATA0 + scratch->debug_address) != ERROR_OK)
1215 return ERROR_FAIL;
1216 *value = v;
1217 if (dmi_read(target, &v, DM_DATA1 + scratch->debug_address) != ERROR_OK)
1218 return ERROR_FAIL;
1219 *value |= ((uint64_t) v) << 32;
1220 break;
1221 case SPACE_DMI_PROGBUF:
1222 if (dmi_read(target, &v, DM_PROGBUF0 + scratch->debug_address) != ERROR_OK)
1223 return ERROR_FAIL;
1224 *value = v;
1225 if (dmi_read(target, &v, DM_PROGBUF1 + scratch->debug_address) != ERROR_OK)
1226 return ERROR_FAIL;
1227 *value |= ((uint64_t) v) << 32;
1228 break;
1229 case SPACE_DMI_RAM:
1230 {
1231 uint8_t buffer[8] = {0};
1232 if (read_memory(target, scratch->debug_address, 4, 2, buffer, 4) != ERROR_OK)
1233 return ERROR_FAIL;
1234 *value = buffer[0] |
1235 (((uint64_t) buffer[1]) << 8) |
1236 (((uint64_t) buffer[2]) << 16) |
1237 (((uint64_t) buffer[3]) << 24) |
1238 (((uint64_t) buffer[4]) << 32) |
1239 (((uint64_t) buffer[5]) << 40) |
1240 (((uint64_t) buffer[6]) << 48) |
1241 (((uint64_t) buffer[7]) << 56);
1242 }
1243 break;
1244 }
1245 return ERROR_OK;
1246 }
1247
1248 static int scratch_write64(struct target *target, scratch_mem_t *scratch,
1249 uint64_t value)
1250 {
1251 switch (scratch->memory_space) {
1252 case SPACE_DM_DATA:
1253 dmi_write(target, DM_DATA0 + scratch->debug_address, value);
1254 dmi_write(target, DM_DATA1 + scratch->debug_address, value >> 32);
1255 break;
1256 case SPACE_DMI_PROGBUF:
1257 dmi_write(target, DM_PROGBUF0 + scratch->debug_address, value);
1258 dmi_write(target, DM_PROGBUF1 + scratch->debug_address, value >> 32);
1259 break;
1260 case SPACE_DMI_RAM:
1261 {
1262 uint8_t buffer[8] = {
1263 value,
1264 value >> 8,
1265 value >> 16,
1266 value >> 24,
1267 value >> 32,
1268 value >> 40,
1269 value >> 48,
1270 value >> 56
1271 };
1272 if (write_memory(target, scratch->debug_address, 4, 2, buffer) != ERROR_OK)
1273 return ERROR_FAIL;
1274 }
1275 break;
1276 }
1277 return ERROR_OK;
1278 }
1279
1280 /** Return register size in bits. */
1281 static unsigned register_size(struct target *target, unsigned number)
1282 {
1283 /* If reg_cache hasn't been initialized yet, make a guess. We need this for
1284 * when this function is called during examine(). */
1285 if (target->reg_cache)
1286 return target->reg_cache->reg_list[number].size;
1287 else
1288 return riscv_xlen(target);
1289 }
1290
1291 static bool has_sufficient_progbuf(struct target *target, unsigned size)
1292 {
1293 RISCV013_INFO(info);
1294 RISCV_INFO(r);
1295
1296 return info->progbufsize + r->impebreak >= size;
1297 }
1298
1299 /**
1300 * Immediately write the new value to the requested register. This mechanism
1301 * bypasses any caches.
1302 */
1303 static int register_write_direct(struct target *target, unsigned number,
1304 uint64_t value)
1305 {
1306 LOG_DEBUG("{%d} %s <- 0x%" PRIx64, riscv_current_hartid(target),
1307 gdb_regno_name(number), value);
1308
1309 int result = register_write_abstract(target, number, value,
1310 register_size(target, number));
1311 if (result == ERROR_OK || !has_sufficient_progbuf(target, 2) ||
1312 !riscv_is_halted(target))
1313 return result;
1314
1315 struct riscv_program program;
1316 riscv_program_init(&program, target);
1317
1318 uint64_t s0;
1319 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1320 return ERROR_FAIL;
1321
1322 uint64_t mstatus;
1323 if (prep_for_register_access(target, &mstatus, number) != ERROR_OK)
1324 return ERROR_FAIL;
1325
1326 scratch_mem_t scratch;
1327 bool use_scratch = false;
1328 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
1329 riscv_supports_extension(target, riscv_current_hartid(target), 'D') &&
1330 riscv_xlen(target) < 64) {
1331 /* There are no instructions to move all the bits from a register, so
1332 * we need to use some scratch RAM. */
1333 use_scratch = true;
1334 riscv_program_insert(&program, fld(number - GDB_REGNO_FPR0, S0, 0));
1335
1336 if (scratch_reserve(target, &scratch, &program, 8) != ERROR_OK)
1337 return ERROR_FAIL;
1338
1339 if (register_write_direct(target, GDB_REGNO_S0, scratch.hart_address)
1340 != ERROR_OK) {
1341 scratch_release(target, &scratch);
1342 return ERROR_FAIL;
1343 }
1344
1345 if (scratch_write64(target, &scratch, value) != ERROR_OK) {
1346 scratch_release(target, &scratch);
1347 return ERROR_FAIL;
1348 }
1349
1350 } else if (number == GDB_REGNO_VTYPE) {
1351 riscv_program_insert(&program, csrr(S0, CSR_VL));
1352 riscv_program_insert(&program, vsetvli(ZERO, S0, value));
1353
1354 } else {
1355 if (register_write_direct(target, GDB_REGNO_S0, value) != ERROR_OK)
1356 return ERROR_FAIL;
1357
1358 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
1359 if (riscv_supports_extension(target, riscv_current_hartid(target), 'D'))
1360 riscv_program_insert(&program, fmv_d_x(number - GDB_REGNO_FPR0, S0));
1361 else
1362 riscv_program_insert(&program, fmv_w_x(number - GDB_REGNO_FPR0, S0));
1363 } else if (number == GDB_REGNO_VL) {
1364 /* "The XLEN-bit-wide read-only vl CSR can only be updated by the
1365 * vsetvli and vsetvl instructions, and the fault-only-rst vector
1366 * load instruction variants." */
1367 riscv_reg_t vtype;
1368 if (register_read(target, &vtype, GDB_REGNO_VTYPE) != ERROR_OK)
1369 return ERROR_FAIL;
1370 if (riscv_program_insert(&program, vsetvli(ZERO, S0, vtype)) != ERROR_OK)
1371 return ERROR_FAIL;
1372 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
1373 riscv_program_csrw(&program, S0, number);
1374 } else {
1375 LOG_ERROR("Unsupported register (enum gdb_regno)(%d)", number);
1376 return ERROR_FAIL;
1377 }
1378 }
1379
1380 int exec_out = riscv_program_exec(&program, target);
1381 /* Don't message on error. Probably the register doesn't exist. */
1382 if (exec_out == ERROR_OK && target->reg_cache) {
1383 struct reg *reg = &target->reg_cache->reg_list[number];
1384 buf_set_u64(reg->value, 0, reg->size, value);
1385 }
1386
1387 if (use_scratch)
1388 scratch_release(target, &scratch);
1389
1390 if (cleanup_after_register_access(target, mstatus, number) != ERROR_OK)
1391 return ERROR_FAIL;
1392
1393 /* Restore S0. */
1394 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1395 return ERROR_FAIL;
1396
1397 return exec_out;
1398 }
1399
1400 /** Return the cached value, or read from the target if necessary. */
1401 static int register_read(struct target *target, uint64_t *value, uint32_t number)
1402 {
1403 if (number == GDB_REGNO_ZERO) {
1404 *value = 0;
1405 return ERROR_OK;
1406 }
1407 int result = register_read_direct(target, value, number);
1408 if (result != ERROR_OK)
1409 return ERROR_FAIL;
1410 if (target->reg_cache) {
1411 struct reg *reg = &target->reg_cache->reg_list[number];
1412 buf_set_u64(reg->value, 0, reg->size, *value);
1413 }
1414 return ERROR_OK;
1415 }
1416
1417 /** Actually read registers from the target right now. */
1418 static int register_read_direct(struct target *target, uint64_t *value, uint32_t number)
1419 {
1420 int result = register_read_abstract(target, value, number,
1421 register_size(target, number));
1422
1423 if (result != ERROR_OK &&
1424 has_sufficient_progbuf(target, 2) &&
1425 number > GDB_REGNO_XPR31) {
1426 struct riscv_program program;
1427 riscv_program_init(&program, target);
1428
1429 scratch_mem_t scratch;
1430 bool use_scratch = false;
1431
1432 riscv_reg_t s0;
1433 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1434 return ERROR_FAIL;
1435
1436 /* Write program to move data into s0. */
1437
1438 uint64_t mstatus;
1439 if (prep_for_register_access(target, &mstatus, number) != ERROR_OK)
1440 return ERROR_FAIL;
1441
1442 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
1443 if (riscv_supports_extension(target, riscv_current_hartid(target), 'D')
1444 && riscv_xlen(target) < 64) {
1445 /* There are no instructions to move all the bits from a
1446 * register, so we need to use some scratch RAM. */
1447 riscv_program_insert(&program, fsd(number - GDB_REGNO_FPR0, S0,
1448 0));
1449
1450 if (scratch_reserve(target, &scratch, &program, 8) != ERROR_OK)
1451 return ERROR_FAIL;
1452 use_scratch = true;
1453
1454 if (register_write_direct(target, GDB_REGNO_S0,
1455 scratch.hart_address) != ERROR_OK) {
1456 scratch_release(target, &scratch);
1457 return ERROR_FAIL;
1458 }
1459 } else if (riscv_supports_extension(target,
1460 riscv_current_hartid(target), 'D')) {
1461 riscv_program_insert(&program, fmv_x_d(S0, number - GDB_REGNO_FPR0));
1462 } else {
1463 riscv_program_insert(&program, fmv_x_w(S0, number - GDB_REGNO_FPR0));
1464 }
1465 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
1466 riscv_program_csrr(&program, S0, number);
1467 } else {
1468 LOG_ERROR("Unsupported register: %s", gdb_regno_name(number));
1469 return ERROR_FAIL;
1470 }
1471
1472 /* Execute program. */
1473 result = riscv_program_exec(&program, target);
1474 /* Don't message on error. Probably the register doesn't exist. */
1475
1476 if (use_scratch) {
1477 result = scratch_read64(target, &scratch, value);
1478 scratch_release(target, &scratch);
1479 if (result != ERROR_OK)
1480 return result;
1481 } else {
1482 /* Read S0 */
1483 if (register_read_direct(target, value, GDB_REGNO_S0) != ERROR_OK)
1484 return ERROR_FAIL;
1485 }
1486
1487 if (cleanup_after_register_access(target, mstatus, number) != ERROR_OK)
1488 return ERROR_FAIL;
1489
1490 /* Restore S0. */
1491 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1492 return ERROR_FAIL;
1493 }
1494
1495 if (result == ERROR_OK) {
1496 LOG_DEBUG("{%d} %s = 0x%" PRIx64, riscv_current_hartid(target),
1497 gdb_regno_name(number), *value);
1498 }
1499
1500 return result;
1501 }
1502
1503 int wait_for_authbusy(struct target *target, uint32_t *dmstatus)
1504 {
1505 time_t start = time(NULL);
1506 while (1) {
1507 uint32_t value;
1508 if (dmstatus_read(target, &value, false) != ERROR_OK)
1509 return ERROR_FAIL;
1510 if (dmstatus)
1511 *dmstatus = value;
1512 if (!get_field(value, DM_DMSTATUS_AUTHBUSY))
1513 break;
1514 if (time(NULL) - start > riscv_command_timeout_sec) {
1515 LOG_ERROR("Timed out after %ds waiting for authbusy to go low (dmstatus=0x%x). "
1516 "Increase the timeout with riscv set_command_timeout_sec.",
1517 riscv_command_timeout_sec,
1518 value);
1519 return ERROR_FAIL;
1520 }
1521 }
1522
1523 return ERROR_OK;
1524 }
1525
1526 /*** OpenOCD target functions. ***/
1527
1528 static void deinit_target(struct target *target)
1529 {
1530 LOG_DEBUG("riscv_deinit_target()");
1531 riscv_info_t *info = (riscv_info_t *) target->arch_info;
1532 free(info->version_specific);
1533 /* TODO: free register arch_info */
1534 info->version_specific = NULL;
1535 }
1536
1537 static int set_haltgroup(struct target *target, bool *supported)
1538 {
1539 uint32_t write = set_field(DM_DMCS2_HGWRITE, DM_DMCS2_GROUP, target->smp);
1540 if (dmi_write(target, DM_DMCS2, write) != ERROR_OK)
1541 return ERROR_FAIL;
1542 uint32_t read;
1543 if (dmi_read(target, &read, DM_DMCS2) != ERROR_OK)
1544 return ERROR_FAIL;
1545 *supported = get_field(read, DM_DMCS2_GROUP) == (unsigned)target->smp;
1546 return ERROR_OK;
1547 }
1548
1549 static int discover_vlenb(struct target *target, int hartid)
1550 {
1551 RISCV_INFO(r);
1552 riscv_reg_t vlenb;
1553
1554 if (register_read(target, &vlenb, GDB_REGNO_VLENB) != ERROR_OK) {
1555 LOG_WARNING("Couldn't read vlenb for %s; vector register access won't work.",
1556 target_name(target));
1557 r->vlenb[hartid] = 0;
1558 return ERROR_OK;
1559 }
1560 r->vlenb[hartid] = vlenb;
1561
1562 LOG_INFO("hart %d: Vector support with vlenb=%d", hartid, r->vlenb[hartid]);
1563
1564 return ERROR_OK;
1565 }
1566
1567 static int examine(struct target *target)
1568 {
1569 /* Don't need to select dbus, since the first thing we do is read dtmcontrol. */
1570
1571 uint32_t dtmcontrol = dtmcontrol_scan(target, 0);
1572 LOG_DEBUG("dtmcontrol=0x%x", dtmcontrol);
1573 LOG_DEBUG(" dmireset=%d", get_field(dtmcontrol, DTM_DTMCS_DMIRESET));
1574 LOG_DEBUG(" idle=%d", get_field(dtmcontrol, DTM_DTMCS_IDLE));
1575 LOG_DEBUG(" dmistat=%d", get_field(dtmcontrol, DTM_DTMCS_DMISTAT));
1576 LOG_DEBUG(" abits=%d", get_field(dtmcontrol, DTM_DTMCS_ABITS));
1577 LOG_DEBUG(" version=%d", get_field(dtmcontrol, DTM_DTMCS_VERSION));
1578 if (dtmcontrol == 0) {
1579 LOG_ERROR("dtmcontrol is 0. Check JTAG connectivity/board power.");
1580 return ERROR_FAIL;
1581 }
1582 if (get_field(dtmcontrol, DTM_DTMCS_VERSION) != 1) {
1583 LOG_ERROR("Unsupported DTM version %d. (dtmcontrol=0x%x)",
1584 get_field(dtmcontrol, DTM_DTMCS_VERSION), dtmcontrol);
1585 return ERROR_FAIL;
1586 }
1587
1588 riscv013_info_t *info = get_info(target);
1589 /* TODO: This won't be true if there are multiple DMs. */
1590 info->index = target->coreid;
1591 info->abits = get_field(dtmcontrol, DTM_DTMCS_ABITS);
1592 info->dtmcs_idle = get_field(dtmcontrol, DTM_DTMCS_IDLE);
1593
1594 /* Reset the Debug Module. */
1595 dm013_info_t *dm = get_dm(target);
1596 if (!dm)
1597 return ERROR_FAIL;
1598 if (!dm->was_reset) {
1599 dmi_write(target, DM_DMCONTROL, 0);
1600 dmi_write(target, DM_DMCONTROL, DM_DMCONTROL_DMACTIVE);
1601 dm->was_reset = true;
1602 }
1603
1604 dmi_write(target, DM_DMCONTROL, DM_DMCONTROL_HARTSELLO |
1605 DM_DMCONTROL_HARTSELHI | DM_DMCONTROL_DMACTIVE |
1606 DM_DMCONTROL_HASEL);
1607 uint32_t dmcontrol;
1608 if (dmi_read(target, &dmcontrol, DM_DMCONTROL) != ERROR_OK)
1609 return ERROR_FAIL;
1610
1611 if (!get_field(dmcontrol, DM_DMCONTROL_DMACTIVE)) {
1612 LOG_ERROR("Debug Module did not become active. dmcontrol=0x%x",
1613 dmcontrol);
1614 return ERROR_FAIL;
1615 }
1616
1617 dm->hasel_supported = get_field(dmcontrol, DM_DMCONTROL_HASEL);
1618
1619 uint32_t dmstatus;
1620 if (dmstatus_read(target, &dmstatus, false) != ERROR_OK)
1621 return ERROR_FAIL;
1622 LOG_DEBUG("dmstatus: 0x%08x", dmstatus);
1623 int dmstatus_version = get_field(dmstatus, DM_DMSTATUS_VERSION);
1624 if (dmstatus_version != 2 && dmstatus_version != 3) {
1625 /* Error was already printed out in dmstatus_read(). */
1626 return ERROR_FAIL;
1627 }
1628
1629 uint32_t hartsel =
1630 (get_field(dmcontrol, DM_DMCONTROL_HARTSELHI) <<
1631 DM_DMCONTROL_HARTSELLO_LENGTH) |
1632 get_field(dmcontrol, DM_DMCONTROL_HARTSELLO);
1633 info->hartsellen = 0;
1634 while (hartsel & 1) {
1635 info->hartsellen++;
1636 hartsel >>= 1;
1637 }
1638 LOG_DEBUG("hartsellen=%d", info->hartsellen);
1639
1640 uint32_t hartinfo;
1641 if (dmi_read(target, &hartinfo, DM_HARTINFO) != ERROR_OK)
1642 return ERROR_FAIL;
1643
1644 info->datasize = get_field(hartinfo, DM_HARTINFO_DATASIZE);
1645 info->dataaccess = get_field(hartinfo, DM_HARTINFO_DATAACCESS);
1646 info->dataaddr = get_field(hartinfo, DM_HARTINFO_DATAADDR);
1647
1648 if (!get_field(dmstatus, DM_DMSTATUS_AUTHENTICATED)) {
1649 LOG_ERROR("Debugger is not authenticated to target Debug Module. "
1650 "(dmstatus=0x%x). Use `riscv authdata_read` and "
1651 "`riscv authdata_write` commands to authenticate.", dmstatus);
1652 /* If we return ERROR_FAIL here, then in a multicore setup the next
1653 * core won't be examined, which means we won't set up the
1654 * authentication commands for them, which means the config script
1655 * needs to be a lot more complex. */
1656 return ERROR_OK;
1657 }
1658
1659 if (dmi_read(target, &info->sbcs, DM_SBCS) != ERROR_OK)
1660 return ERROR_FAIL;
1661
1662 /* Check that abstract data registers are accessible. */
1663 uint32_t abstractcs;
1664 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
1665 return ERROR_FAIL;
1666 info->datacount = get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT);
1667 info->progbufsize = get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE);
1668
1669 LOG_INFO("datacount=%d progbufsize=%d", info->datacount, info->progbufsize);
1670
1671 RISCV_INFO(r);
1672 r->impebreak = get_field(dmstatus, DM_DMSTATUS_IMPEBREAK);
1673
1674 if (!has_sufficient_progbuf(target, 2)) {
1675 LOG_WARNING("We won't be able to execute fence instructions on this "
1676 "target. Memory may not always appear consistent. "
1677 "(progbufsize=%d, impebreak=%d)", info->progbufsize,
1678 r->impebreak);
1679 }
1680
1681 if (info->progbufsize < 4 && riscv_enable_virtual) {
1682 LOG_ERROR("set_enable_virtual is not available on this target. It "
1683 "requires a program buffer size of at least 4. (progbufsize=%d) "
1684 "Use `riscv set_enable_virtual off` to continue."
1685 , info->progbufsize);
1686 }
1687
1688 /* Before doing anything else we must first enumerate the harts. */
1689 if (dm->hart_count < 0) {
1690 for (int i = 0; i < MIN(RISCV_MAX_HARTS, 1 << info->hartsellen); ++i) {
1691 r->current_hartid = i;
1692 if (riscv013_select_current_hart(target) != ERROR_OK)
1693 return ERROR_FAIL;
1694
1695 uint32_t s;
1696 if (dmstatus_read(target, &s, true) != ERROR_OK)
1697 return ERROR_FAIL;
1698 if (get_field(s, DM_DMSTATUS_ANYNONEXISTENT))
1699 break;
1700 dm->hart_count = i + 1;
1701
1702 if (get_field(s, DM_DMSTATUS_ANYHAVERESET))
1703 dmi_write(target, DM_DMCONTROL,
1704 set_hartsel(DM_DMCONTROL_DMACTIVE | DM_DMCONTROL_ACKHAVERESET, i));
1705 }
1706
1707 LOG_DEBUG("Detected %d harts.", dm->hart_count);
1708 }
1709
1710 if (dm->hart_count == 0) {
1711 LOG_ERROR("No harts found!");
1712 return ERROR_FAIL;
1713 }
1714
1715 /* Don't call any riscv_* functions until after we've counted the number of
1716 * cores and initialized registers. */
1717 for (int i = 0; i < dm->hart_count; ++i) {
1718 if (!riscv_rtos_enabled(target) && i != target->coreid)
1719 continue;
1720
1721 r->current_hartid = i;
1722 if (riscv013_select_current_hart(target) != ERROR_OK)
1723 return ERROR_FAIL;
1724
1725 bool halted = riscv_is_halted(target);
1726 if (!halted) {
1727 if (riscv013_halt_go(target) != ERROR_OK) {
1728 LOG_ERROR("Fatal: Hart %d failed to halt during examine()", i);
1729 return ERROR_FAIL;
1730 }
1731 }
1732
1733 /* Without knowing anything else we can at least mess with the
1734 * program buffer. */
1735 r->debug_buffer_size[i] = info->progbufsize;
1736
1737 int result = register_read_abstract(target, NULL, GDB_REGNO_S0, 64);
1738 if (result == ERROR_OK)
1739 r->xlen[i] = 64;
1740 else
1741 r->xlen[i] = 32;
1742
1743 if (register_read(target, &r->misa[i], GDB_REGNO_MISA)) {
1744 LOG_ERROR("Fatal: Failed to read MISA from hart %d.", i);
1745 return ERROR_FAIL;
1746 }
1747
1748 if (riscv_supports_extension(target, i, 'V')) {
1749 if (discover_vlenb(target, i) != ERROR_OK)
1750 return ERROR_FAIL;
1751 }
1752
1753 /* Now init registers based on what we discovered. */
1754 if (riscv_init_registers(target) != ERROR_OK)
1755 return ERROR_FAIL;
1756
1757 /* Display this as early as possible to help people who are using
1758 * really slow simulators. */
1759 LOG_DEBUG(" hart %d: XLEN=%d, misa=0x%" PRIx64, i, r->xlen[i],
1760 r->misa[i]);
1761
1762 if (!halted)
1763 riscv013_step_or_resume_current_hart(target, false, false);
1764 }
1765
1766 target_set_examined(target);
1767
1768 if (target->smp) {
1769 bool haltgroup_supported;
1770 if (set_haltgroup(target, &haltgroup_supported) != ERROR_OK)
1771 return ERROR_FAIL;
1772 if (haltgroup_supported)
1773 LOG_INFO("Core %d made part of halt group %d.", target->coreid,
1774 target->smp);
1775 else
1776 LOG_INFO("Core %d could not be made part of halt group %d.",
1777 target->coreid, target->smp);
1778 }
1779
1780 /* Some regression suites rely on seeing 'Examined RISC-V core' to know
1781 * when they can connect with gdb/telnet.
1782 * We will need to update those suites if we want to change that text. */
1783 LOG_INFO("Examined RISC-V core; found %d harts",
1784 riscv_count_harts(target));
1785 for (int i = 0; i < riscv_count_harts(target); ++i) {
1786 if (riscv_hart_enabled(target, i)) {
1787 LOG_INFO(" hart %d: XLEN=%d, misa=0x%" PRIx64, i, r->xlen[i],
1788 r->misa[i]);
1789 } else {
1790 LOG_INFO(" hart %d: currently disabled", i);
1791 }
1792 }
1793 return ERROR_OK;
1794 }
1795
1796 int riscv013_authdata_read(struct target *target, uint32_t *value)
1797 {
1798 if (wait_for_authbusy(target, NULL) != ERROR_OK)
1799 return ERROR_FAIL;
1800
1801 return dmi_read(target, value, DM_AUTHDATA);
1802 }
1803
1804 int riscv013_authdata_write(struct target *target, uint32_t value)
1805 {
1806 uint32_t before, after;
1807 if (wait_for_authbusy(target, &before) != ERROR_OK)
1808 return ERROR_FAIL;
1809
1810 dmi_write(target, DM_AUTHDATA, value);
1811
1812 if (wait_for_authbusy(target, &after) != ERROR_OK)
1813 return ERROR_FAIL;
1814
1815 if (!get_field(before, DM_DMSTATUS_AUTHENTICATED) &&
1816 get_field(after, DM_DMSTATUS_AUTHENTICATED)) {
1817 LOG_INFO("authdata_write resulted in successful authentication");
1818 int result = ERROR_OK;
1819 dm013_info_t *dm = get_dm(target);
1820 if (!dm)
1821 return ERROR_FAIL;
1822 target_list_t *entry;
1823 list_for_each_entry(entry, &dm->target_list, list) {
1824 if (examine(entry->target) != ERROR_OK)
1825 result = ERROR_FAIL;
1826 }
1827 return result;
1828 }
1829
1830 return ERROR_OK;
1831 }
1832
1833 static int riscv013_hart_count(struct target *target)
1834 {
1835 dm013_info_t *dm = get_dm(target);
1836 assert(dm);
1837 return dm->hart_count;
1838 }
1839
1840 static unsigned riscv013_data_bits(struct target *target)
1841 {
1842 RISCV013_INFO(info);
1843 /* TODO: Once there is a spec for discovering abstract commands, we can
1844 * take those into account as well. For now we assume abstract commands
1845 * support XLEN-wide accesses. */
1846 if (has_sufficient_progbuf(target, 3) && !riscv_prefer_sba)
1847 return riscv_xlen(target);
1848
1849 if (get_field(info->sbcs, DM_SBCS_SBACCESS128))
1850 return 128;
1851 if (get_field(info->sbcs, DM_SBCS_SBACCESS64))
1852 return 64;
1853 if (get_field(info->sbcs, DM_SBCS_SBACCESS32))
1854 return 32;
1855 if (get_field(info->sbcs, DM_SBCS_SBACCESS16))
1856 return 16;
1857 if (get_field(info->sbcs, DM_SBCS_SBACCESS8))
1858 return 8;
1859
1860 return riscv_xlen(target);
1861 }
1862
1863 static int prep_for_vector_access(struct target *target, uint64_t *vtype,
1864 uint64_t *vl, unsigned *debug_vl)
1865 {
1866 RISCV_INFO(r);
1867 /* TODO: this continuous save/restore is terrible for performance. */
1868 /* Write vtype and vl. */
1869 unsigned encoded_vsew;
1870 switch (riscv_xlen(target)) {
1871 case 32:
1872 encoded_vsew = 2;
1873 break;
1874 case 64:
1875 encoded_vsew = 3;
1876 break;
1877 default:
1878 LOG_ERROR("Unsupported xlen: %d", riscv_xlen(target));
1879 return ERROR_FAIL;
1880 }
1881
1882 /* Save vtype and vl. */
1883 if (register_read(target, vtype, GDB_REGNO_VTYPE) != ERROR_OK)
1884 return ERROR_FAIL;
1885 if (register_read(target, vl, GDB_REGNO_VL) != ERROR_OK)
1886 return ERROR_FAIL;
1887
1888 if (register_write_direct(target, GDB_REGNO_VTYPE, encoded_vsew << 3) != ERROR_OK)
1889 return ERROR_FAIL;
1890 *debug_vl = DIV_ROUND_UP(r->vlenb[r->current_hartid] * 8,
1891 riscv_xlen(target));
1892 if (register_write_direct(target, GDB_REGNO_VL, *debug_vl) != ERROR_OK)
1893 return ERROR_FAIL;
1894
1895 return ERROR_OK;
1896 }
1897
1898 static int cleanup_after_vector_access(struct target *target, uint64_t vtype,
1899 uint64_t vl)
1900 {
1901 /* Restore vtype and vl. */
1902 if (register_write_direct(target, GDB_REGNO_VTYPE, vtype) != ERROR_OK)
1903 return ERROR_FAIL;
1904 if (register_write_direct(target, GDB_REGNO_VL, vl) != ERROR_OK)
1905 return ERROR_FAIL;
1906 return ERROR_OK;
1907 }
1908
1909 static int riscv013_get_register_buf(struct target *target,
1910 uint8_t *value, int regno)
1911 {
1912 assert(regno >= GDB_REGNO_V0 && regno <= GDB_REGNO_V31);
1913
1914 riscv_reg_t s0;
1915 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1916 return ERROR_FAIL;
1917
1918 uint64_t mstatus;
1919 if (prep_for_register_access(target, &mstatus, regno) != ERROR_OK)
1920 return ERROR_FAIL;
1921
1922 uint64_t vtype, vl;
1923 unsigned debug_vl;
1924 if (prep_for_vector_access(target, &vtype, &vl, &debug_vl) != ERROR_OK)
1925 return ERROR_FAIL;
1926
1927 unsigned vnum = regno - GDB_REGNO_V0;
1928 unsigned xlen = riscv_xlen(target);
1929
1930 struct riscv_program program;
1931 riscv_program_init(&program, target);
1932 riscv_program_insert(&program, vmv_x_s(S0, vnum));
1933 riscv_program_insert(&program, vslide1down_vx(vnum, vnum, S0, true));
1934
1935 int result = ERROR_OK;
1936 for (unsigned i = 0; i < debug_vl; i++) {
1937 /* Executing the program might result in an exception if there is some
1938 * issue with the vector implementation/instructions we're using. If that
1939 * happens, attempt to restore as usual. We may have clobbered the
1940 * vector register we tried to read already.
1941 * For other failures, we just return error because things are probably
1942 * so messed up that attempting to restore isn't going to help. */
1943 result = riscv_program_exec(&program, target);
1944 if (result == ERROR_OK) {
1945 uint64_t v;
1946 if (register_read_direct(target, &v, GDB_REGNO_S0) != ERROR_OK)
1947 return ERROR_FAIL;
1948 buf_set_u64(value, xlen * i, xlen, v);
1949 } else {
1950 break;
1951 }
1952 }
1953
1954 if (cleanup_after_vector_access(target, vtype, vl) != ERROR_OK)
1955 return ERROR_FAIL;
1956
1957 if (cleanup_after_register_access(target, mstatus, regno) != ERROR_OK)
1958 return ERROR_FAIL;
1959 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1960 return ERROR_FAIL;
1961
1962 return result;
1963 }
1964
1965 static int riscv013_set_register_buf(struct target *target,
1966 int regno, const uint8_t *value)
1967 {
1968 assert(regno >= GDB_REGNO_V0 && regno <= GDB_REGNO_V31);
1969
1970 riscv_reg_t s0;
1971 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1972 return ERROR_FAIL;
1973
1974 uint64_t mstatus;
1975 if (prep_for_register_access(target, &mstatus, regno) != ERROR_OK)
1976 return ERROR_FAIL;
1977
1978 uint64_t vtype, vl;
1979 unsigned debug_vl;
1980 if (prep_for_vector_access(target, &vtype, &vl, &debug_vl) != ERROR_OK)
1981 return ERROR_FAIL;
1982
1983 unsigned vnum = regno - GDB_REGNO_V0;
1984 unsigned xlen = riscv_xlen(target);
1985
1986 struct riscv_program program;
1987 riscv_program_init(&program, target);
1988 riscv_program_insert(&program, vslide1down_vx(vnum, vnum, S0, true));
1989 int result = ERROR_OK;
1990 for (unsigned i = 0; i < debug_vl; i++) {
1991 if (register_write_direct(target, GDB_REGNO_S0,
1992 buf_get_u64(value, xlen * i, xlen)) != ERROR_OK)
1993 return ERROR_FAIL;
1994 result = riscv_program_exec(&program, target);
1995 if (result != ERROR_OK)
1996 break;
1997 }
1998
1999 if (cleanup_after_vector_access(target, vtype, vl) != ERROR_OK)
2000 return ERROR_FAIL;
2001
2002 if (cleanup_after_register_access(target, mstatus, regno) != ERROR_OK)
2003 return ERROR_FAIL;
2004 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
2005 return ERROR_FAIL;
2006
2007 return result;
2008 }
2009
2010 static int init_target(struct command_context *cmd_ctx,
2011 struct target *target)
2012 {
2013 LOG_DEBUG("init");
2014 riscv_info_t *generic_info = (riscv_info_t *) target->arch_info;
2015
2016 generic_info->get_register = &riscv013_get_register;
2017 generic_info->set_register = &riscv013_set_register;
2018 generic_info->get_register_buf = &riscv013_get_register_buf;
2019 generic_info->set_register_buf = &riscv013_set_register_buf;
2020 generic_info->select_current_hart = &riscv013_select_current_hart;
2021 generic_info->is_halted = &riscv013_is_halted;
2022 generic_info->resume_go = &riscv013_resume_go;
2023 generic_info->step_current_hart = &riscv013_step_current_hart;
2024 generic_info->on_halt = &riscv013_on_halt;
2025 generic_info->resume_prep = &riscv013_resume_prep;
2026 generic_info->halt_prep = &riscv013_halt_prep;
2027 generic_info->halt_go = &riscv013_halt_go;
2028 generic_info->on_step = &riscv013_on_step;
2029 generic_info->halt_reason = &riscv013_halt_reason;
2030 generic_info->read_debug_buffer = &riscv013_read_debug_buffer;
2031 generic_info->write_debug_buffer = &riscv013_write_debug_buffer;
2032 generic_info->execute_debug_buffer = &riscv013_execute_debug_buffer;
2033 generic_info->fill_dmi_write_u64 = &riscv013_fill_dmi_write_u64;
2034 generic_info->fill_dmi_read_u64 = &riscv013_fill_dmi_read_u64;
2035 generic_info->fill_dmi_nop_u64 = &riscv013_fill_dmi_nop_u64;
2036 generic_info->dmi_write_u64_bits = &riscv013_dmi_write_u64_bits;
2037 generic_info->authdata_read = &riscv013_authdata_read;
2038 generic_info->authdata_write = &riscv013_authdata_write;
2039 generic_info->dmi_read = &dmi_read;
2040 generic_info->dmi_write = &dmi_write;
2041 generic_info->read_memory = read_memory;
2042 generic_info->test_sba_config_reg = &riscv013_test_sba_config_reg;
2043 generic_info->test_compliance = &riscv013_test_compliance;
2044 generic_info->hart_count = &riscv013_hart_count;
2045 generic_info->data_bits = &riscv013_data_bits;
2046 generic_info->version_specific = calloc(1, sizeof(riscv013_info_t));
2047 if (!generic_info->version_specific)
2048 return ERROR_FAIL;
2049 riscv013_info_t *info = get_info(target);
2050
2051 info->progbufsize = -1;
2052
2053 info->dmi_busy_delay = 0;
2054 info->bus_master_read_delay = 0;
2055 info->bus_master_write_delay = 0;
2056 info->ac_busy_delay = 0;
2057
2058 /* Assume all these abstract commands are supported until we learn
2059 * otherwise.
2060 * TODO: The spec allows eg. one CSR to be able to be accessed abstractly
2061 * while another one isn't. We don't track that this closely here, but in
2062 * the future we probably should. */
2063 info->abstract_read_csr_supported = true;
2064 info->abstract_write_csr_supported = true;
2065 info->abstract_read_fpr_supported = true;
2066 info->abstract_write_fpr_supported = true;
2067
2068 return ERROR_OK;
2069 }
2070
2071 static int assert_reset(struct target *target)
2072 {
2073 RISCV_INFO(r);
2074
2075 select_dmi(target);
2076
2077 uint32_t control_base = set_field(0, DM_DMCONTROL_DMACTIVE, 1);
2078
2079 if (target->rtos) {
2080 /* There's only one target, and OpenOCD thinks each hart is a thread.
2081 * We must reset them all. */
2082
2083 /* TODO: Try to use hasel in dmcontrol */
2084
2085 /* Set haltreq for each hart. */
2086 uint32_t control = control_base;
2087 for (int i = 0; i < riscv_count_harts(target); ++i) {
2088 if (!riscv_hart_enabled(target, i))
2089 continue;
2090
2091 control = set_hartsel(control_base, i);
2092 control = set_field(control, DM_DMCONTROL_HALTREQ,
2093 target->reset_halt ? 1 : 0);
2094 dmi_write(target, DM_DMCONTROL, control);
2095 }
2096 /* Assert ndmreset */
2097 control = set_field(control, DM_DMCONTROL_NDMRESET, 1);
2098 dmi_write(target, DM_DMCONTROL, control);
2099
2100 } else {
2101 /* Reset just this hart. */
2102 uint32_t control = set_hartsel(control_base, r->current_hartid);
2103 control = set_field(control, DM_DMCONTROL_HALTREQ,
2104 target->reset_halt ? 1 : 0);
2105 control = set_field(control, DM_DMCONTROL_NDMRESET, 1);
2106 dmi_write(target, DM_DMCONTROL, control);
2107 }
2108
2109 target->state = TARGET_RESET;
2110
2111 dm013_info_t *dm = get_dm(target);
2112 if (!dm)
2113 return ERROR_FAIL;
2114
2115 /* The DM might have gotten reset if OpenOCD called us in some reset that
2116 * involves SRST being toggled. So clear our cache which may be out of
2117 * date. */
2118 memset(dm->progbuf_cache, 0, sizeof(dm->progbuf_cache));
2119
2120 return ERROR_OK;
2121 }
2122
2123 static int deassert_reset(struct target *target)
2124 {
2125 RISCV_INFO(r);
2126 RISCV013_INFO(info);
2127 select_dmi(target);
2128
2129 /* Clear the reset, but make sure haltreq is still set */
2130 uint32_t control = 0;
2131 control = set_field(control, DM_DMCONTROL_HALTREQ, target->reset_halt ? 1 : 0);
2132 control = set_field(control, DM_DMCONTROL_DMACTIVE, 1);
2133 dmi_write(target, DM_DMCONTROL,
2134 set_hartsel(control, r->current_hartid));
2135
2136 uint32_t dmstatus;
2137 int dmi_busy_delay = info->dmi_busy_delay;
2138 time_t start = time(NULL);
2139
2140 for (int i = 0; i < riscv_count_harts(target); ++i) {
2141 int index = i;
2142 if (target->rtos) {
2143 if (!riscv_hart_enabled(target, index))
2144 continue;
2145 dmi_write(target, DM_DMCONTROL,
2146 set_hartsel(control, index));
2147 } else {
2148 index = r->current_hartid;
2149 }
2150
2151 char *operation;
2152 uint32_t expected_field;
2153 if (target->reset_halt) {
2154 operation = "halt";
2155 expected_field = DM_DMSTATUS_ALLHALTED;
2156 } else {
2157 operation = "run";
2158 expected_field = DM_DMSTATUS_ALLRUNNING;
2159 }
2160 LOG_DEBUG("Waiting for hart %d to %s out of reset.", index, operation);
2161 while (1) {
2162 int result = dmstatus_read_timeout(target, &dmstatus, true,
2163 riscv_reset_timeout_sec);
2164 if (result == ERROR_TIMEOUT_REACHED)
2165 LOG_ERROR("Hart %d didn't complete a DMI read coming out of "
2166 "reset in %ds; Increase the timeout with riscv "
2167 "set_reset_timeout_sec.",
2168 index, riscv_reset_timeout_sec);
2169 if (result != ERROR_OK)
2170 return result;
2171 if (get_field(dmstatus, expected_field))
2172 break;
2173 if (time(NULL) - start > riscv_reset_timeout_sec) {
2174 LOG_ERROR("Hart %d didn't %s coming out of reset in %ds; "
2175 "dmstatus=0x%x; "
2176 "Increase the timeout with riscv set_reset_timeout_sec.",
2177 index, operation, riscv_reset_timeout_sec, dmstatus);
2178 return ERROR_FAIL;
2179 }
2180 }
2181 target->state = TARGET_HALTED;
2182
2183 if (get_field(dmstatus, DM_DMSTATUS_ALLHAVERESET)) {
2184 /* Ack reset. */
2185 dmi_write(target, DM_DMCONTROL,
2186 set_hartsel(control, index) |
2187 DM_DMCONTROL_ACKHAVERESET);
2188 }
2189
2190 if (!target->rtos)
2191 break;
2192 }
2193 info->dmi_busy_delay = dmi_busy_delay;
2194 return ERROR_OK;
2195 }
2196
2197 static int execute_fence(struct target *target)
2198 {
2199 int old_hartid = riscv_current_hartid(target);
2200
2201 /* FIXME: For non-coherent systems we need to flush the caches right
2202 * here, but there's no ISA-defined way of doing that. */
2203 {
2204 struct riscv_program program;
2205 riscv_program_init(&program, target);
2206 riscv_program_fence_i(&program);
2207 riscv_program_fence(&program);
2208 int result = riscv_program_exec(&program, target);
2209 if (result != ERROR_OK)
2210 LOG_DEBUG("Unable to execute pre-fence");
2211 }
2212
2213 for (int i = 0; i < riscv_count_harts(target); ++i) {
2214 if (!riscv_hart_enabled(target, i))
2215 continue;
2216
2217 if (i == old_hartid)
2218 /* Fence already executed for this hart */
2219 continue;
2220
2221 riscv_set_current_hartid(target, i);
2222
2223 struct riscv_program program;
2224 riscv_program_init(&program, target);
2225 riscv_program_fence_i(&program);
2226 riscv_program_fence(&program);
2227 int result = riscv_program_exec(&program, target);
2228 if (result != ERROR_OK)
2229 LOG_DEBUG("Unable to execute fence on hart %d", i);
2230 }
2231
2232 riscv_set_current_hartid(target, old_hartid);
2233
2234 return ERROR_OK;
2235 }
2236
2237 static void log_memory_access(target_addr_t address, uint64_t value,
2238 unsigned size_bytes, bool read)
2239 {
2240 if (debug_level < LOG_LVL_DEBUG)
2241 return;
2242
2243 char fmt[80];
2244 sprintf(fmt, "M[0x%" TARGET_PRIxADDR "] %ss 0x%%0%d" PRIx64,
2245 address, read ? "read" : "write", size_bytes * 2);
2246 switch (size_bytes) {
2247 case 1:
2248 value &= 0xff;
2249 break;
2250 case 2:
2251 value &= 0xffff;
2252 break;
2253 case 4:
2254 value &= 0xffffffffUL;
2255 break;
2256 case 8:
2257 break;
2258 default:
2259 assert(false);
2260 }
2261 LOG_DEBUG(fmt, value);
2262 }
2263
2264 /* Read the relevant sbdata regs depending on size, and put the results into
2265 * buffer. */
2266 static int read_memory_bus_word(struct target *target, target_addr_t address,
2267 uint32_t size, uint8_t *buffer)
2268 {
2269 uint32_t value;
2270 int result;
2271 static int sbdata[4] = { DM_SBDATA0, DM_SBDATA1, DM_SBDATA2, DM_SBDATA3 };
2272 assert(size <= 16);
2273 for (int i = (size - 1) / 4; i >= 0; i--) {
2274 result = dmi_op(target, &value, NULL, DMI_OP_READ, sbdata[i], 0, false, true);
2275 if (result != ERROR_OK)
2276 return result;
2277 buf_set_u32(buffer + i * 4, 0, 8 * MIN(size, 4), value);
2278 log_memory_access(address + i * 4, value, MIN(size, 4), true);
2279 }
2280 return ERROR_OK;
2281 }
2282
2283 static uint32_t sb_sbaccess(unsigned size_bytes)
2284 {
2285 switch (size_bytes) {
2286 case 1:
2287 return set_field(0, DM_SBCS_SBACCESS, 0);
2288 case 2:
2289 return set_field(0, DM_SBCS_SBACCESS, 1);
2290 case 4:
2291 return set_field(0, DM_SBCS_SBACCESS, 2);
2292 case 8:
2293 return set_field(0, DM_SBCS_SBACCESS, 3);
2294 case 16:
2295 return set_field(0, DM_SBCS_SBACCESS, 4);
2296 }
2297 assert(0);
2298 return 0; /* Make mingw happy. */
2299 }
2300
2301 static target_addr_t sb_read_address(struct target *target)
2302 {
2303 RISCV013_INFO(info);
2304 unsigned sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
2305 target_addr_t address = 0;
2306 uint32_t v;
2307 if (sbasize > 32) {
2308 dmi_read(target, &v, DM_SBADDRESS1);
2309 address |= v;
2310 address <<= 32;
2311 }
2312 dmi_read(target, &v, DM_SBADDRESS0);
2313 address |= v;
2314 return address;
2315 }
2316
2317 static int sb_write_address(struct target *target, target_addr_t address)
2318 {
2319 RISCV013_INFO(info);
2320 unsigned sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
2321 /* There currently is no support for >64-bit addresses in OpenOCD. */
2322 if (sbasize > 96)
2323 dmi_write(target, DM_SBADDRESS3, 0);
2324 if (sbasize > 64)
2325 dmi_write(target, DM_SBADDRESS2, 0);
2326 if (sbasize > 32)
2327 dmi_write(target, DM_SBADDRESS1, address >> 32);
2328 return dmi_write(target, DM_SBADDRESS0, address);
2329 }
2330
2331 static int read_sbcs_nonbusy(struct target *target, uint32_t *sbcs)
2332 {
2333 time_t start = time(NULL);
2334 while (1) {
2335 if (dmi_read(target, sbcs, DM_SBCS) != ERROR_OK)
2336 return ERROR_FAIL;
2337 if (!get_field(*sbcs, DM_SBCS_SBBUSY))
2338 return ERROR_OK;
2339 if (time(NULL) - start > riscv_command_timeout_sec) {
2340 LOG_ERROR("Timed out after %ds waiting for sbbusy to go low (sbcs=0x%x). "
2341 "Increase the timeout with riscv set_command_timeout_sec.",
2342 riscv_command_timeout_sec, *sbcs);
2343 return ERROR_FAIL;
2344 }
2345 }
2346 }
2347
2348 static int modify_privilege(struct target *target, uint64_t *mstatus, uint64_t *mstatus_old)
2349 {
2350 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5)) {
2351 /* Read DCSR */
2352 uint64_t dcsr;
2353 if (register_read(target, &dcsr, GDB_REGNO_DCSR) != ERROR_OK)
2354 return ERROR_FAIL;
2355
2356 /* Read and save MSTATUS */
2357 if (register_read(target, mstatus, GDB_REGNO_MSTATUS) != ERROR_OK)
2358 return ERROR_FAIL;
2359 *mstatus_old = *mstatus;
2360
2361 /* If we come from m-mode with mprv set, we want to keep mpp */
2362 if (get_field(dcsr, DCSR_PRV) < 3) {
2363 /* MPP = PRIV */
2364 *mstatus = set_field(*mstatus, MSTATUS_MPP, get_field(dcsr, DCSR_PRV));
2365
2366 /* MPRV = 1 */
2367 *mstatus = set_field(*mstatus, MSTATUS_MPRV, 1);
2368
2369 /* Write MSTATUS */
2370 if (*mstatus != *mstatus_old)
2371 if (register_write_direct(target, GDB_REGNO_MSTATUS, *mstatus) != ERROR_OK)
2372 return ERROR_FAIL;
2373 }
2374 }
2375
2376 return ERROR_OK;
2377 }
2378
2379 static int read_memory_bus_v0(struct target *target, target_addr_t address,
2380 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
2381 {
2382 if (size != increment) {
2383 LOG_ERROR("sba v0 reads only support size==increment");
2384 return ERROR_NOT_IMPLEMENTED;
2385 }
2386
2387 LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
2388 TARGET_PRIxADDR, size, count, address);
2389 uint8_t *t_buffer = buffer;
2390 riscv_addr_t cur_addr = address;
2391 riscv_addr_t fin_addr = address + (count * size);
2392 uint32_t access = 0;
2393
2394 const int DM_SBCS_SBSINGLEREAD_OFFSET = 20;
2395 const uint32_t DM_SBCS_SBSINGLEREAD = (0x1U << DM_SBCS_SBSINGLEREAD_OFFSET);
2396
2397 const int DM_SBCS_SBAUTOREAD_OFFSET = 15;
2398 const uint32_t DM_SBCS_SBAUTOREAD = (0x1U << DM_SBCS_SBAUTOREAD_OFFSET);
2399
2400 /* ww favorise one off reading if there is an issue */
2401 if (count == 1) {
2402 for (uint32_t i = 0; i < count; i++) {
2403 if (dmi_read(target, &access, DM_SBCS) != ERROR_OK)
2404 return ERROR_FAIL;
2405 dmi_write(target, DM_SBADDRESS0, cur_addr);
2406 /* size/2 matching the bit access of the spec 0.13 */
2407 access = set_field(access, DM_SBCS_SBACCESS, size/2);
2408 access = set_field(access, DM_SBCS_SBSINGLEREAD, 1);
2409 LOG_DEBUG("\r\nread_memory: sab: access: 0x%08x", access);
2410 dmi_write(target, DM_SBCS, access);
2411 /* 3) read */
2412 uint32_t value;
2413 if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
2414 return ERROR_FAIL;
2415 LOG_DEBUG("\r\nread_memory: sab: value: 0x%08x", value);
2416 buf_set_u32(t_buffer, 0, 8 * size, value);
2417 t_buffer += size;
2418 cur_addr += size;
2419 }
2420 return ERROR_OK;
2421 }
2422
2423 /* has to be the same size if we want to read a block */
2424 LOG_DEBUG("reading block until final address 0x%" PRIx64, fin_addr);
2425 if (dmi_read(target, &access, DM_SBCS) != ERROR_OK)
2426 return ERROR_FAIL;
2427 /* set current address */
2428 dmi_write(target, DM_SBADDRESS0, cur_addr);
2429 /* 2) write sbaccess=2, sbsingleread,sbautoread,sbautoincrement
2430 * size/2 matching the bit access of the spec 0.13 */
2431 access = set_field(access, DM_SBCS_SBACCESS, size/2);
2432 access = set_field(access, DM_SBCS_SBAUTOREAD, 1);
2433 access = set_field(access, DM_SBCS_SBSINGLEREAD, 1);
2434 access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 1);
2435 LOG_DEBUG("\r\naccess: 0x%08x", access);
2436 dmi_write(target, DM_SBCS, access);
2437
2438 while (cur_addr < fin_addr) {
2439 LOG_DEBUG("\r\nsab:autoincrement: \r\n size: %d\tcount:%d\taddress: 0x%08"
2440 PRIx64, size, count, cur_addr);
2441 /* read */
2442 uint32_t value;
2443 if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
2444 return ERROR_FAIL;
2445 buf_set_u32(t_buffer, 0, 8 * size, value);
2446 cur_addr += size;
2447 t_buffer += size;
2448
2449 /* if we are reaching last address, we must clear autoread */
2450 if (cur_addr == fin_addr && count != 1) {
2451 dmi_write(target, DM_SBCS, 0);
2452 if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
2453 return ERROR_FAIL;
2454 buf_set_u32(t_buffer, 0, 8 * size, value);
2455 }
2456 }
2457
2458 return ERROR_OK;
2459 }
2460
2461 /**
2462 * Read the requested memory using the system bus interface.
2463 */
2464 static int read_memory_bus_v1(struct target *target, target_addr_t address,
2465 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
2466 {
2467 if (increment != size && increment != 0) {
2468 LOG_ERROR("sba v1 reads only support increment of size or 0");
2469 return ERROR_NOT_IMPLEMENTED;
2470 }
2471
2472 RISCV013_INFO(info);
2473 target_addr_t next_address = address;
2474 target_addr_t end_address = address + count * size;
2475
2476 while (next_address < end_address) {
2477 uint32_t sbcs_write = set_field(0, DM_SBCS_SBREADONADDR, 1);
2478 sbcs_write |= sb_sbaccess(size);
2479 if (increment == size)
2480 sbcs_write = set_field(sbcs_write, DM_SBCS_SBAUTOINCREMENT, 1);
2481 if (count > 1)
2482 sbcs_write = set_field(sbcs_write, DM_SBCS_SBREADONDATA, count > 1);
2483 if (dmi_write(target, DM_SBCS, sbcs_write) != ERROR_OK)
2484 return ERROR_FAIL;
2485
2486 /* This address write will trigger the first read. */
2487 if (sb_write_address(target, next_address) != ERROR_OK)
2488 return ERROR_FAIL;
2489
2490 if (info->bus_master_read_delay) {
2491 jtag_add_runtest(info->bus_master_read_delay, TAP_IDLE);
2492 if (jtag_execute_queue() != ERROR_OK) {
2493 LOG_ERROR("Failed to scan idle sequence");
2494 return ERROR_FAIL;
2495 }
2496 }
2497
2498 /* First value has been read, and is waiting for us to issue a DMI read
2499 * to get it. */
2500
2501 static int sbdata[4] = {DM_SBDATA0, DM_SBDATA1, DM_SBDATA2, DM_SBDATA3};
2502 assert(size <= 16);
2503 target_addr_t next_read = address - 1;
2504 for (uint32_t i = (next_address - address) / size; i < count - 1; i++) {
2505 for (int j = (size - 1) / 4; j >= 0; j--) {
2506 uint32_t value;
2507 unsigned attempt = 0;
2508 while (1) {
2509 if (attempt++ > 100) {
2510 LOG_ERROR("DMI keeps being busy in while reading memory just past " TARGET_ADDR_FMT,
2511 next_read);
2512 return ERROR_FAIL;
2513 }
2514 dmi_status_t status = dmi_scan(target, NULL, &value,
2515 DMI_OP_READ, sbdata[j], 0, false);
2516 if (status == DMI_STATUS_BUSY)
2517 increase_dmi_busy_delay(target);
2518 else if (status == DMI_STATUS_SUCCESS)
2519 break;
2520 else
2521 return ERROR_FAIL;
2522 }
2523 if (next_read != address - 1) {
2524 buf_set_u32(buffer + next_read - address, 0, 8 * MIN(size, 4), value);
2525 log_memory_access(next_read, value, MIN(size, 4), true);
2526 }
2527 next_read = address + i * size + j * 4;
2528 }
2529 }
2530
2531 uint32_t sbcs_read = 0;
2532 if (count > 1) {
2533 uint32_t value;
2534 unsigned attempt = 0;
2535 while (1) {
2536 if (attempt++ > 100) {
2537 LOG_ERROR("DMI keeps being busy in while reading memory just past " TARGET_ADDR_FMT,
2538 next_read);
2539 return ERROR_FAIL;
2540 }
2541 dmi_status_t status = dmi_scan(target, NULL, &value, DMI_OP_NOP, 0, 0, false);
2542 if (status == DMI_STATUS_BUSY)
2543 increase_dmi_busy_delay(target);
2544 else if (status == DMI_STATUS_SUCCESS)
2545 break;
2546 else
2547 return ERROR_FAIL;
2548 }
2549 buf_set_u32(buffer + next_read - address, 0, 8 * MIN(size, 4), value);
2550 log_memory_access(next_read, value, MIN(size, 4), true);
2551
2552 /* "Writes to sbcs while sbbusy is high result in undefined behavior.
2553 * A debugger must not write to sbcs until it reads sbbusy as 0." */
2554 if (read_sbcs_nonbusy(target, &sbcs_read) != ERROR_OK)
2555 return ERROR_FAIL;
2556
2557 sbcs_write = set_field(sbcs_write, DM_SBCS_SBREADONDATA, 0);
2558 if (dmi_write(target, DM_SBCS, sbcs_write) != ERROR_OK)
2559 return ERROR_FAIL;
2560 }
2561
2562 /* Read the last word, after we disabled sbreadondata if necessary. */
2563 if (!get_field(sbcs_read, DM_SBCS_SBERROR) &&
2564 !get_field(sbcs_read, DM_SBCS_SBBUSYERROR)) {
2565 if (read_memory_bus_word(target, address + (count - 1) * size, size,
2566 buffer + (count - 1) * size) != ERROR_OK)
2567 return ERROR_FAIL;
2568
2569 if (read_sbcs_nonbusy(target, &sbcs_read) != ERROR_OK)
2570 return ERROR_FAIL;
2571 }
2572
2573 if (get_field(sbcs_read, DM_SBCS_SBBUSYERROR)) {
2574 /* We read while the target was busy. Slow down and try again. */
2575 if (dmi_write(target, DM_SBCS, DM_SBCS_SBBUSYERROR) != ERROR_OK)
2576 return ERROR_FAIL;
2577 next_address = sb_read_address(target);
2578 info->bus_master_read_delay += info->bus_master_read_delay / 10 + 1;
2579 continue;
2580 }
2581
2582 unsigned error = get_field(sbcs_read, DM_SBCS_SBERROR);
2583 if (error == 0) {
2584 next_address = end_address;
2585 } else {
2586 /* Some error indicating the bus access failed, but not because of
2587 * something we did wrong. */
2588 if (dmi_write(target, DM_SBCS, DM_SBCS_SBERROR) != ERROR_OK)
2589 return ERROR_FAIL;
2590 return ERROR_FAIL;
2591 }
2592 }
2593
2594 return ERROR_OK;
2595 }
2596
2597 static int batch_run(const struct target *target, struct riscv_batch *batch)
2598 {
2599 RISCV013_INFO(info);
2600 RISCV_INFO(r);
2601 if (r->reset_delays_wait >= 0) {
2602 r->reset_delays_wait -= batch->used_scans;
2603 if (r->reset_delays_wait <= 0) {
2604 batch->idle_count = 0;
2605 info->dmi_busy_delay = 0;
2606 info->ac_busy_delay = 0;
2607 }
2608 }
2609 return riscv_batch_run(batch);
2610 }
2611
2612 /*
2613 * Performs a memory read using memory access abstract commands. The read sizes
2614 * supported are 1, 2, and 4 bytes despite the spec's support of 8 and 16 byte
2615 * aamsize fields in the memory access abstract command.
2616 */
2617 static int read_memory_abstract(struct target *target, target_addr_t address,
2618 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
2619 {
2620 if (size != increment) {
2621 LOG_ERROR("abstract command reads only support size==increment");
2622 return ERROR_NOT_IMPLEMENTED;
2623 }
2624
2625 int result = ERROR_OK;
2626
2627 LOG_DEBUG("reading %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
2628 size, address);
2629
2630 memset(buffer, 0, count * size);
2631
2632 /* Convert the size (bytes) to width (bits) */
2633 unsigned width = size << 3;
2634 if (width > 64) {
2635 /* TODO: Add 128b support if it's ever used. Involves modifying
2636 read/write_abstract_arg() to work on two 64b values. */
2637 LOG_ERROR("Unsupported size: %d bits", size);
2638 return ERROR_FAIL;
2639 }
2640
2641 /* Create the command (physical address, postincrement, read) */
2642 uint32_t command = access_memory_command(target, false, width, true, false);
2643
2644 /* Execute the reads */
2645 uint8_t *p = buffer;
2646 bool updateaddr = true;
2647 unsigned width32 = (width + 31) / 32 * 32;
2648 for (uint32_t c = 0; c < count; c++) {
2649 /* Only update the address initially and let postincrement update it */
2650 if (updateaddr) {
2651 /* Set arg1 to the address: address + c * size */
2652 result = write_abstract_arg(target, 1, address, riscv_xlen(target));
2653 if (result != ERROR_OK) {
2654 LOG_ERROR("Failed to write arg1 during read_memory_abstract().");
2655 return result;
2656 }
2657 }
2658
2659 /* Execute the command */
2660 result = execute_abstract_command(target, command);
2661 if (result != ERROR_OK) {
2662 LOG_ERROR("Failed to execute command read_memory_abstract().");
2663 return result;
2664 }
2665
2666 /* Copy arg0 to buffer (rounded width up to nearest 32) */
2667 riscv_reg_t value = read_abstract_arg(target, 0, width32);
2668 buf_set_u64(p, 0, 8 * size, value);
2669
2670 updateaddr = false;
2671 p += size;
2672 }
2673
2674 return result;
2675 }
2676
2677 /*
2678 * Performs a memory write using memory access abstract commands. The write
2679 * sizes supported are 1, 2, and 4 bytes despite the spec's support of 8 and 16
2680 * byte aamsize fields in the memory access abstract command.
2681 */
2682 static int write_memory_abstract(struct target *target, target_addr_t address,
2683 uint32_t size, uint32_t count, const uint8_t *buffer)
2684 {
2685 int result = ERROR_OK;
2686
2687 LOG_DEBUG("writing %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
2688 size, address);
2689
2690 /* Convert the size (bytes) to width (bits) */
2691 unsigned width = size << 3;
2692 if (width > 64) {
2693 /* TODO: Add 128b support if it's ever used. Involves modifying
2694 read/write_abstract_arg() to work on two 64b values. */
2695 LOG_ERROR("Unsupported size: %d bits", width);
2696 return ERROR_FAIL;
2697 }
2698
2699 /* Create the command (physical address, postincrement, write) */
2700 uint32_t command = access_memory_command(target, false, width, true, true);
2701
2702 /* Execute the writes */
2703 const uint8_t *p = buffer;
2704 bool updateaddr = true;
2705 for (uint32_t c = 0; c < count; c++) {
2706 /* Move data to arg0 */
2707 riscv_reg_t value = buf_get_u64(p, 0, 8 * size);
2708 result = write_abstract_arg(target, 0, value, riscv_xlen(target));
2709 if (result != ERROR_OK) {
2710 LOG_ERROR("Failed to write arg0 during write_memory_abstract().");
2711 return result;
2712 }
2713
2714 /* Only update the address initially and let postincrement update it */
2715 if (updateaddr) {
2716 /* Set arg1 to the address: address + c * size */
2717 result = write_abstract_arg(target, 1, address, riscv_xlen(target));
2718 if (result != ERROR_OK) {
2719 LOG_ERROR("Failed to write arg1 during write_memory_abstract().");
2720 return result;
2721 }
2722 }
2723
2724 /* Execute the command */
2725 result = execute_abstract_command(target, command);
2726 if (result != ERROR_OK) {
2727 LOG_ERROR("Failed to execute command write_memory_abstract().");
2728 return result;
2729 }
2730
2731 updateaddr = false;
2732 p += size;
2733 }
2734
2735 return result;
2736 }
2737
2738 /**
2739 * Read the requested memory, taking care to execute every read exactly once,
2740 * even if cmderr=busy is encountered.
2741 */
2742 static int read_memory_progbuf_inner(struct target *target, target_addr_t address,
2743 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
2744 {
2745 RISCV013_INFO(info);
2746
2747 int result = ERROR_OK;
2748
2749 /* Write address to S0. */
2750 result = register_write_direct(target, GDB_REGNO_S0, address);
2751 if (result != ERROR_OK)
2752 return result;
2753
2754 if (increment == 0 &&
2755 register_write_direct(target, GDB_REGNO_S2, 0) != ERROR_OK)
2756 return ERROR_FAIL;
2757
2758 uint32_t command = access_register_command(target, GDB_REGNO_S1,
2759 riscv_xlen(target),
2760 AC_ACCESS_REGISTER_TRANSFER | AC_ACCESS_REGISTER_POSTEXEC);
2761 if (execute_abstract_command(target, command) != ERROR_OK)
2762 return ERROR_FAIL;
2763
2764 /* First read has just triggered. Result is in s1. */
2765 if (count == 1) {
2766 uint64_t value;
2767 if (register_read_direct(target, &value, GDB_REGNO_S1) != ERROR_OK)
2768 return ERROR_FAIL;
2769 buf_set_u64(buffer, 0, 8 * size, value);
2770 log_memory_access(address, value, size, true);
2771 return ERROR_OK;
2772 }
2773
2774 if (dmi_write(target, DM_ABSTRACTAUTO,
2775 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET) != ERROR_OK)
2776 goto error;
2777 /* Read garbage from dmi_data0, which triggers another execution of the
2778 * program. Now dmi_data0 contains the first good result, and s1 the next
2779 * memory value. */
2780 if (dmi_read_exec(target, NULL, DM_DATA0) != ERROR_OK)
2781 goto error;
2782
2783 /* read_addr is the next address that the hart will read from, which is the
2784 * value in s0. */
2785 unsigned index = 2;
2786 while (index < count) {
2787 riscv_addr_t read_addr = address + index * increment;
2788 LOG_DEBUG("i=%d, count=%d, read_addr=0x%" PRIx64, index, count, read_addr);
2789 /* The pipeline looks like this:
2790 * memory -> s1 -> dm_data0 -> debugger
2791 * Right now:
2792 * s0 contains read_addr
2793 * s1 contains mem[read_addr-size]
2794 * dm_data0 contains[read_addr-size*2]
2795 */
2796
2797 struct riscv_batch *batch = riscv_batch_alloc(target, 32,
2798 info->dmi_busy_delay + info->ac_busy_delay);
2799 if (!batch)
2800 return ERROR_FAIL;
2801
2802 unsigned reads = 0;
2803 for (unsigned j = index; j < count; j++) {
2804 if (size > 4)
2805 riscv_batch_add_dmi_read(batch, DM_DATA1);
2806 riscv_batch_add_dmi_read(batch, DM_DATA0);
2807
2808 reads++;
2809 if (riscv_batch_full(batch))
2810 break;
2811 }
2812
2813 batch_run(target, batch);
2814
2815 /* Wait for the target to finish performing the last abstract command,
2816 * and update our copy of cmderr. If we see that DMI is busy here,
2817 * dmi_busy_delay will be incremented. */
2818 uint32_t abstractcs;
2819 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
2820 return ERROR_FAIL;
2821 while (get_field(abstractcs, DM_ABSTRACTCS_BUSY))
2822 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
2823 return ERROR_FAIL;
2824 info->cmderr = get_field(abstractcs, DM_ABSTRACTCS_CMDERR);
2825
2826 unsigned next_index;
2827 unsigned ignore_last = 0;
2828 switch (info->cmderr) {
2829 case CMDERR_NONE:
2830 LOG_DEBUG("successful (partial?) memory read");
2831 next_index = index + reads;
2832 break;
2833 case CMDERR_BUSY:
2834 LOG_DEBUG("memory read resulted in busy response");
2835
2836 increase_ac_busy_delay(target);
2837 riscv013_clear_abstract_error(target);
2838
2839 dmi_write(target, DM_ABSTRACTAUTO, 0);
2840
2841 uint32_t dmi_data0, dmi_data1 = 0;
2842 /* This is definitely a good version of the value that we
2843 * attempted to read when we discovered that the target was
2844 * busy. */
2845 if (dmi_read(target, &dmi_data0, DM_DATA0) != ERROR_OK) {
2846 riscv_batch_free(batch);
2847 goto error;
2848 }
2849 if (size > 4 && dmi_read(target, &dmi_data1, DM_DATA1) != ERROR_OK) {
2850 riscv_batch_free(batch);
2851 goto error;
2852 }
2853
2854 /* See how far we got, clobbering dmi_data0. */
2855 if (increment == 0) {
2856 uint64_t counter;
2857 result = register_read_direct(target, &counter, GDB_REGNO_S2);
2858 next_index = counter;
2859 } else {
2860 uint64_t next_read_addr;
2861 result = register_read_direct(target, &next_read_addr,
2862 GDB_REGNO_S0);
2863 next_index = (next_read_addr - address) / increment;
2864 }
2865 if (result != ERROR_OK) {
2866 riscv_batch_free(batch);
2867 goto error;
2868 }
2869
2870 uint64_t value64 = (((uint64_t)dmi_data1) << 32) | dmi_data0;
2871 buf_set_u64(buffer + (next_index - 2) * size, 0, 8 * size, value64);
2872 log_memory_access(address + (next_index - 2) * size, value64, size, true);
2873
2874 /* Restore the command, and execute it.
2875 * Now DM_DATA0 contains the next value just as it would if no
2876 * error had occurred. */
2877 dmi_write_exec(target, DM_COMMAND, command, true);
2878 next_index++;
2879
2880 dmi_write(target, DM_ABSTRACTAUTO,
2881 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET);
2882
2883 ignore_last = 1;
2884
2885 break;
2886 default:
2887 LOG_DEBUG("error when reading memory, abstractcs=0x%08lx", (long)abstractcs);
2888 riscv013_clear_abstract_error(target);
2889 riscv_batch_free(batch);
2890 result = ERROR_FAIL;
2891 goto error;
2892 }
2893
2894 /* Now read whatever we got out of the batch. */
2895 dmi_status_t status = DMI_STATUS_SUCCESS;
2896 unsigned read = 0;
2897 assert(index >= 2);
2898 for (unsigned j = index - 2; j < index + reads; j++) {
2899 assert(j < count);
2900 LOG_DEBUG("index=%d, reads=%d, next_index=%d, ignore_last=%d, j=%d",
2901 index, reads, next_index, ignore_last, j);
2902 if (j + 3 + ignore_last > next_index)
2903 break;
2904
2905 status = riscv_batch_get_dmi_read_op(batch, read);
2906 uint64_t value = riscv_batch_get_dmi_read_data(batch, read);
2907 read++;
2908 if (status != DMI_STATUS_SUCCESS) {
2909 /* If we're here because of busy count, dmi_busy_delay will
2910 * already have been increased and busy state will have been
2911 * cleared in dmi_read(). */
2912 /* In at least some implementations, we issue a read, and then
2913 * can get busy back when we try to scan out the read result,
2914 * and the actual read value is lost forever. Since this is
2915 * rare in any case, we return error here and rely on our
2916 * caller to reread the entire block. */
2917 LOG_WARNING("Batch memory read encountered DMI error %d. "
2918 "Falling back on slower reads.", status);
2919 riscv_batch_free(batch);
2920 result = ERROR_FAIL;
2921 goto error;
2922 }
2923 if (size > 4) {
2924 status = riscv_batch_get_dmi_read_op(batch, read);
2925 if (status != DMI_STATUS_SUCCESS) {
2926 LOG_WARNING("Batch memory read encountered DMI error %d. "
2927 "Falling back on slower reads.", status);
2928 riscv_batch_free(batch);
2929 result = ERROR_FAIL;
2930 goto error;
2931 }
2932 value <<= 32;
2933 value |= riscv_batch_get_dmi_read_data(batch, read);
2934 read++;
2935 }
2936 riscv_addr_t offset = j * size;
2937 buf_set_u64(buffer + offset, 0, 8 * size, value);
2938 log_memory_access(address + j * increment, value, size, true);
2939 }
2940
2941 index = next_index;
2942
2943 riscv_batch_free(batch);
2944 }
2945
2946 dmi_write(target, DM_ABSTRACTAUTO, 0);
2947
2948 if (count > 1) {
2949 /* Read the penultimate word. */
2950 uint32_t dmi_data0, dmi_data1 = 0;
2951 if (dmi_read(target, &dmi_data0, DM_DATA0) != ERROR_OK)
2952 return ERROR_FAIL;
2953 if (size > 4 && dmi_read(target, &dmi_data1, DM_DATA1) != ERROR_OK)
2954 return ERROR_FAIL;
2955 uint64_t value64 = (((uint64_t)dmi_data1) << 32) | dmi_data0;
2956 buf_set_u64(buffer + size * (count - 2), 0, 8 * size, value64);
2957 log_memory_access(address + size * (count - 2), value64, size, true);
2958 }
2959
2960 /* Read the last word. */
2961 uint64_t value;
2962 result = register_read_direct(target, &value, GDB_REGNO_S1);
2963 if (result != ERROR_OK)
2964 goto error;
2965 buf_set_u64(buffer + size * (count-1), 0, 8 * size, value);
2966 log_memory_access(address + size * (count-1), value, size, true);
2967
2968 return ERROR_OK;
2969
2970 error:
2971 dmi_write(target, DM_ABSTRACTAUTO, 0);
2972
2973 return result;
2974 }
2975
2976 /* Only need to save/restore one GPR to read a single word, and the progbuf
2977 * program doesn't need to increment. */
2978 static int read_memory_progbuf_one(struct target *target, target_addr_t address,
2979 uint32_t size, uint8_t *buffer)
2980 {
2981 uint64_t mstatus = 0;
2982 uint64_t mstatus_old = 0;
2983 if (modify_privilege(target, &mstatus, &mstatus_old) != ERROR_OK)
2984 return ERROR_FAIL;
2985
2986 uint64_t s0;
2987
2988 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
2989 return ERROR_FAIL;
2990
2991 /* Write the program (load, increment) */
2992 struct riscv_program program;
2993 riscv_program_init(&program, target);
2994 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
2995 riscv_program_csrrsi(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
2996 switch (size) {
2997 case 1:
2998 riscv_program_lbr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
2999 break;
3000 case 2:
3001 riscv_program_lhr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
3002 break;
3003 case 4:
3004 riscv_program_lwr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
3005 break;
3006 case 8:
3007 riscv_program_ldr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
3008 break;
3009 default:
3010 LOG_ERROR("Unsupported size: %d", size);
3011 return ERROR_FAIL;
3012 }
3013 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3014 riscv_program_csrrci(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3015
3016 if (riscv_program_ebreak(&program) != ERROR_OK)
3017 return ERROR_FAIL;
3018 if (riscv_program_write(&program) != ERROR_OK)
3019 return ERROR_FAIL;
3020
3021 /* Write address to S0, and execute buffer. */
3022 if (write_abstract_arg(target, 0, address, riscv_xlen(target)) != ERROR_OK)
3023 return ERROR_FAIL;
3024 uint32_t command = access_register_command(target, GDB_REGNO_S0,
3025 riscv_xlen(target), AC_ACCESS_REGISTER_WRITE |
3026 AC_ACCESS_REGISTER_TRANSFER | AC_ACCESS_REGISTER_POSTEXEC);
3027 if (execute_abstract_command(target, command) != ERROR_OK)
3028 return ERROR_FAIL;
3029
3030 uint64_t value;
3031 if (register_read(target, &value, GDB_REGNO_S0) != ERROR_OK)
3032 return ERROR_FAIL;
3033 buf_set_u64(buffer, 0, 8 * size, value);
3034 log_memory_access(address, value, size, true);
3035
3036 if (riscv_set_register(target, GDB_REGNO_S0, s0) != ERROR_OK)
3037 return ERROR_FAIL;
3038
3039 /* Restore MSTATUS */
3040 if (mstatus != mstatus_old)
3041 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus_old))
3042 return ERROR_FAIL;
3043
3044 return ERROR_OK;
3045 }
3046
3047 /**
3048 * Read the requested memory, silently handling memory access errors.
3049 */
3050 static int read_memory_progbuf(struct target *target, target_addr_t address,
3051 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
3052 {
3053 if (riscv_xlen(target) < size * 8) {
3054 LOG_ERROR("XLEN (%d) is too short for %d-bit memory read.",
3055 riscv_xlen(target), size * 8);
3056 return ERROR_FAIL;
3057 }
3058
3059 int result = ERROR_OK;
3060
3061 LOG_DEBUG("reading %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
3062 size, address);
3063
3064 select_dmi(target);
3065
3066 memset(buffer, 0, count*size);
3067
3068 if (execute_fence(target) != ERROR_OK)
3069 return ERROR_FAIL;
3070
3071 if (count == 1)
3072 return read_memory_progbuf_one(target, address, size, buffer);
3073
3074 uint64_t mstatus = 0;
3075 uint64_t mstatus_old = 0;
3076 if (modify_privilege(target, &mstatus, &mstatus_old) != ERROR_OK)
3077 return ERROR_FAIL;
3078
3079 /* s0 holds the next address to write to
3080 * s1 holds the next data value to write
3081 * s2 is a counter in case increment is 0
3082 */
3083 uint64_t s0, s1, s2;
3084 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
3085 return ERROR_FAIL;
3086 if (register_read(target, &s1, GDB_REGNO_S1) != ERROR_OK)
3087 return ERROR_FAIL;
3088 if (increment == 0 && register_read(target, &s2, GDB_REGNO_S1) != ERROR_OK)
3089 return ERROR_FAIL;
3090
3091 /* Write the program (load, increment) */
3092 struct riscv_program program;
3093 riscv_program_init(&program, target);
3094 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3095 riscv_program_csrrsi(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3096
3097 switch (size) {
3098 case 1:
3099 riscv_program_lbr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3100 break;
3101 case 2:
3102 riscv_program_lhr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3103 break;
3104 case 4:
3105 riscv_program_lwr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3106 break;
3107 case 8:
3108 riscv_program_ldr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3109 break;
3110 default:
3111 LOG_ERROR("Unsupported size: %d", size);
3112 return ERROR_FAIL;
3113 }
3114
3115 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3116 riscv_program_csrrci(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3117 if (increment == 0)
3118 riscv_program_addi(&program, GDB_REGNO_S2, GDB_REGNO_S2, 1);
3119 else
3120 riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, increment);
3121
3122 if (riscv_program_ebreak(&program) != ERROR_OK)
3123 return ERROR_FAIL;
3124 if (riscv_program_write(&program) != ERROR_OK)
3125 return ERROR_FAIL;
3126
3127 result = read_memory_progbuf_inner(target, address, size, count, buffer, increment);
3128
3129 if (result != ERROR_OK) {
3130 /* The full read did not succeed, so we will try to read each word individually. */
3131 /* This will not be fast, but reading outside actual memory is a special case anyway. */
3132 /* It will make the toolchain happier, especially Eclipse Memory View as it reads ahead. */
3133 target_addr_t address_i = address;
3134 uint32_t count_i = 1;
3135 uint8_t *buffer_i = buffer;
3136
3137 for (uint32_t i = 0; i < count; i++, address_i += increment, buffer_i += size) {
3138 keep_alive();
3139 /* TODO: This is much slower than it needs to be because we end up
3140 * writing the address to read for every word we read. */
3141 result = read_memory_progbuf_inner(target, address_i, size, count_i, buffer_i, increment);
3142
3143 /* The read of a single word failed, so we will just return 0 for that instead */
3144 if (result != ERROR_OK) {
3145 LOG_DEBUG("error reading single word of %d bytes from 0x%" TARGET_PRIxADDR,
3146 size, address_i);
3147
3148 buf_set_u64(buffer_i, 0, 8 * size, 0);
3149 }
3150 }
3151 result = ERROR_OK;
3152 }
3153
3154 riscv_set_register(target, GDB_REGNO_S0, s0);
3155 riscv_set_register(target, GDB_REGNO_S1, s1);
3156 if (increment == 0)
3157 riscv_set_register(target, GDB_REGNO_S2, s2);
3158
3159 /* Restore MSTATUS */
3160 if (mstatus != mstatus_old)
3161 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus_old))
3162 return ERROR_FAIL;
3163
3164 return result;
3165 }
3166
3167 static int read_memory(struct target *target, target_addr_t address,
3168 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
3169 {
3170 if (count == 0)
3171 return ERROR_OK;
3172
3173 RISCV013_INFO(info);
3174 if (has_sufficient_progbuf(target, 3) && !riscv_prefer_sba)
3175 return read_memory_progbuf(target, address, size, count, buffer,
3176 increment);
3177
3178 if ((get_field(info->sbcs, DM_SBCS_SBACCESS8) && size == 1) ||
3179 (get_field(info->sbcs, DM_SBCS_SBACCESS16) && size == 2) ||
3180 (get_field(info->sbcs, DM_SBCS_SBACCESS32) && size == 4) ||
3181 (get_field(info->sbcs, DM_SBCS_SBACCESS64) && size == 8) ||
3182 (get_field(info->sbcs, DM_SBCS_SBACCESS128) && size == 16)) {
3183 if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 0)
3184 return read_memory_bus_v0(target, address, size, count, buffer,
3185 increment);
3186 else if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 1)
3187 return read_memory_bus_v1(target, address, size, count, buffer,
3188 increment);
3189 }
3190
3191 if (has_sufficient_progbuf(target, 3))
3192 return read_memory_progbuf(target, address, size, count, buffer,
3193 increment);
3194
3195 return read_memory_abstract(target, address, size, count, buffer,
3196 increment);
3197 }
3198
3199 static int write_memory_bus_v0(struct target *target, target_addr_t address,
3200 uint32_t size, uint32_t count, const uint8_t *buffer)
3201 {
3202 /*1) write sbaddress: for singlewrite and autoincrement, we need to write the address once*/
3203 LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
3204 TARGET_PRIxADDR, size, count, address);
3205 dmi_write(target, DM_SBADDRESS0, address);
3206 int64_t value = 0;
3207 int64_t access = 0;
3208 riscv_addr_t offset = 0;
3209 riscv_addr_t t_addr = 0;
3210 const uint8_t *t_buffer = buffer + offset;
3211
3212 /* B.8 Writing Memory, single write check if we write in one go */
3213 if (count == 1) { /* count is in bytes here */
3214 value = buf_get_u64(t_buffer, 0, 8 * size);
3215
3216 access = 0;
3217 access = set_field(access, DM_SBCS_SBACCESS, size/2);
3218 dmi_write(target, DM_SBCS, access);
3219 LOG_DEBUG("\r\naccess: 0x%08" PRIx64, access);
3220 LOG_DEBUG("\r\nwrite_memory:SAB: ONE OFF: value 0x%08" PRIx64, value);
3221 dmi_write(target, DM_SBDATA0, value);
3222 return ERROR_OK;
3223 }
3224
3225 /*B.8 Writing Memory, using autoincrement*/
3226
3227 access = 0;
3228 access = set_field(access, DM_SBCS_SBACCESS, size/2);
3229 access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 1);
3230 LOG_DEBUG("\r\naccess: 0x%08" PRIx64, access);
3231 dmi_write(target, DM_SBCS, access);
3232
3233 /*2)set the value according to the size required and write*/
3234 for (riscv_addr_t i = 0; i < count; ++i) {
3235 offset = size*i;
3236 /* for monitoring only */
3237 t_addr = address + offset;
3238 t_buffer = buffer + offset;
3239
3240 value = buf_get_u64(t_buffer, 0, 8 * size);
3241 LOG_DEBUG("SAB:autoincrement: expected address: 0x%08x value: 0x%08x"
3242 PRIx64, (uint32_t)t_addr, (uint32_t)value);
3243 dmi_write(target, DM_SBDATA0, value);
3244 }
3245 /*reset the autoincrement when finished (something weird is happening if this is not done at the end*/
3246 access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 0);
3247 dmi_write(target, DM_SBCS, access);
3248
3249 return ERROR_OK;
3250 }
3251
3252 static int write_memory_bus_v1(struct target *target, target_addr_t address,
3253 uint32_t size, uint32_t count, const uint8_t *buffer)
3254 {
3255 RISCV013_INFO(info);
3256 uint32_t sbcs = sb_sbaccess(size);
3257 sbcs = set_field(sbcs, DM_SBCS_SBAUTOINCREMENT, 1);
3258 dmi_write(target, DM_SBCS, sbcs);
3259
3260 target_addr_t next_address = address;
3261 target_addr_t end_address = address + count * size;
3262
3263 int result;
3264
3265 sb_write_address(target, next_address);
3266 while (next_address < end_address) {
3267 LOG_DEBUG("transferring burst starting at address 0x%" TARGET_PRIxADDR,
3268 next_address);
3269
3270 struct riscv_batch *batch = riscv_batch_alloc(
3271 target,
3272 32,
3273 info->dmi_busy_delay + info->bus_master_write_delay);
3274 if (!batch)
3275 return ERROR_FAIL;
3276
3277 for (uint32_t i = (next_address - address) / size; i < count; i++) {
3278 const uint8_t *p = buffer + i * size;
3279
3280 if (riscv_batch_available_scans(batch) < (size + 3) / 4)
3281 break;
3282
3283 if (size > 12)
3284 riscv_batch_add_dmi_write(batch, DM_SBDATA3,
3285 ((uint32_t) p[12]) |
3286 (((uint32_t) p[13]) << 8) |
3287 (((uint32_t) p[14]) << 16) |
3288 (((uint32_t) p[15]) << 24));
3289
3290 if (size > 8)
3291 riscv_batch_add_dmi_write(batch, DM_SBDATA2,
3292 ((uint32_t) p[8]) |
3293 (((uint32_t) p[9]) << 8) |
3294 (((uint32_t) p[10]) << 16) |
3295 (((uint32_t) p[11]) << 24));
3296 if (size > 4)
3297 riscv_batch_add_dmi_write(batch, DM_SBDATA1,
3298 ((uint32_t) p[4]) |
3299 (((uint32_t) p[5]) << 8) |
3300 (((uint32_t) p[6]) << 16) |
3301 (((uint32_t) p[7]) << 24));
3302 uint32_t value = p[0];
3303 if (size > 2) {
3304 value |= ((uint32_t) p[2]) << 16;
3305 value |= ((uint32_t) p[3]) << 24;
3306 }
3307 if (size > 1)
3308 value |= ((uint32_t) p[1]) << 8;
3309 riscv_batch_add_dmi_write(batch, DM_SBDATA0, value);
3310
3311 log_memory_access(address + i * size, value, size, false);
3312 next_address += size;
3313 }
3314
3315 result = batch_run(target, batch);
3316 riscv_batch_free(batch);
3317 if (result != ERROR_OK)
3318 return result;
3319
3320 bool dmi_busy_encountered;
3321 if (dmi_op(target, &sbcs, &dmi_busy_encountered, DMI_OP_READ,
3322 DM_SBCS, 0, false, false) != ERROR_OK)
3323 return ERROR_FAIL;
3324
3325 time_t start = time(NULL);
3326 bool dmi_busy = dmi_busy_encountered;
3327 while (get_field(sbcs, DM_SBCS_SBBUSY) || dmi_busy) {
3328 if (time(NULL) - start > riscv_command_timeout_sec) {
3329 LOG_ERROR("Timed out after %ds waiting for sbbusy to go low (sbcs=0x%x). "
3330 "Increase the timeout with riscv set_command_timeout_sec.",
3331 riscv_command_timeout_sec, sbcs);
3332 return ERROR_FAIL;
3333 }
3334
3335 if (dmi_op(target, &sbcs, &dmi_busy, DMI_OP_READ,
3336 DM_SBCS, 0, false, true) != ERROR_OK)
3337 return ERROR_FAIL;
3338 }
3339
3340 if (get_field(sbcs, DM_SBCS_SBBUSYERROR)) {
3341 /* We wrote while the target was busy. Slow down and try again. */
3342 dmi_write(target, DM_SBCS, DM_SBCS_SBBUSYERROR);
3343 info->bus_master_write_delay += info->bus_master_write_delay / 10 + 1;
3344 }
3345
3346 if (get_field(sbcs, DM_SBCS_SBBUSYERROR) || dmi_busy_encountered) {
3347 next_address = sb_read_address(target);
3348 if (next_address < address) {
3349 /* This should never happen, probably buggy hardware. */
3350 LOG_DEBUG("unexpected system bus address 0x%" TARGET_PRIxADDR,
3351 next_address);
3352 return ERROR_FAIL;
3353 }
3354
3355 continue;
3356 }
3357
3358 unsigned error = get_field(sbcs, DM_SBCS_SBERROR);
3359 if (error != 0) {
3360 /* Some error indicating the bus access failed, but not because of
3361 * something we did wrong. */
3362 dmi_write(target, DM_SBCS, DM_SBCS_SBERROR);
3363 return ERROR_FAIL;
3364 }
3365 }
3366
3367 return ERROR_OK;
3368 }
3369
3370 static int write_memory_progbuf(struct target *target, target_addr_t address,
3371 uint32_t size, uint32_t count, const uint8_t *buffer)
3372 {
3373 RISCV013_INFO(info);
3374
3375 if (riscv_xlen(target) < size * 8) {
3376 LOG_ERROR("XLEN (%d) is too short for %d-bit memory write.",
3377 riscv_xlen(target), size * 8);
3378 return ERROR_FAIL;
3379 }
3380
3381 LOG_DEBUG("writing %d words of %d bytes to 0x%08lx", count, size, (long)address);
3382
3383 select_dmi(target);
3384
3385 uint64_t mstatus = 0;
3386 uint64_t mstatus_old = 0;
3387 if (modify_privilege(target, &mstatus, &mstatus_old) != ERROR_OK)
3388 return ERROR_FAIL;
3389
3390 /* s0 holds the next address to write to
3391 * s1 holds the next data value to write
3392 */
3393
3394 int result = ERROR_OK;
3395 uint64_t s0, s1;
3396 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
3397 return ERROR_FAIL;
3398 if (register_read(target, &s1, GDB_REGNO_S1) != ERROR_OK)
3399 return ERROR_FAIL;
3400
3401 /* Write the program (store, increment) */
3402 struct riscv_program program;
3403 riscv_program_init(&program, target);
3404 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3405 riscv_program_csrrsi(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3406
3407 switch (size) {
3408 case 1:
3409 riscv_program_sbr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3410 break;
3411 case 2:
3412 riscv_program_shr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3413 break;
3414 case 4:
3415 riscv_program_swr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3416 break;
3417 case 8:
3418 riscv_program_sdr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3419 break;
3420 default:
3421 LOG_ERROR("write_memory_progbuf(): Unsupported size: %d", size);
3422 result = ERROR_FAIL;
3423 goto error;
3424 }
3425
3426 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3427 riscv_program_csrrci(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3428 riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, size);
3429
3430 result = riscv_program_ebreak(&program);
3431 if (result != ERROR_OK)
3432 goto error;
3433 riscv_program_write(&program);
3434
3435 riscv_addr_t cur_addr = address;
3436 riscv_addr_t fin_addr = address + (count * size);
3437 bool setup_needed = true;
3438 LOG_DEBUG("writing until final address 0x%016" PRIx64, fin_addr);
3439 while (cur_addr < fin_addr) {
3440 LOG_DEBUG("transferring burst starting at address 0x%016" PRIx64,
3441 cur_addr);
3442
3443 struct riscv_batch *batch = riscv_batch_alloc(
3444 target,
3445 32,
3446 info->dmi_busy_delay + info->ac_busy_delay);
3447 if (!batch)
3448 goto error;
3449
3450 /* To write another word, we put it in S1 and execute the program. */
3451 unsigned start = (cur_addr - address) / size;
3452 for (unsigned i = start; i < count; ++i) {
3453 unsigned offset = size*i;
3454 const uint8_t *t_buffer = buffer + offset;
3455
3456 uint64_t value = buf_get_u64(t_buffer, 0, 8 * size);
3457
3458 log_memory_access(address + offset, value, size, false);
3459 cur_addr += size;
3460
3461 if (setup_needed) {
3462 result = register_write_direct(target, GDB_REGNO_S0,
3463 address + offset);
3464 if (result != ERROR_OK) {
3465 riscv_batch_free(batch);
3466 goto error;
3467 }
3468
3469 /* Write value. */
3470 if (size > 4)
3471 dmi_write(target, DM_DATA1, value >> 32);
3472 dmi_write(target, DM_DATA0, value);
3473
3474 /* Write and execute command that moves value into S1 and
3475 * executes program buffer. */
3476 uint32_t command = access_register_command(target,
3477 GDB_REGNO_S1, riscv_xlen(target),
3478 AC_ACCESS_REGISTER_POSTEXEC |
3479 AC_ACCESS_REGISTER_TRANSFER |
3480 AC_ACCESS_REGISTER_WRITE);
3481 result = execute_abstract_command(target, command);
3482 if (result != ERROR_OK) {
3483 riscv_batch_free(batch);
3484 goto error;
3485 }
3486
3487 /* Turn on autoexec */
3488 dmi_write(target, DM_ABSTRACTAUTO,
3489 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET);
3490
3491 setup_needed = false;
3492 } else {
3493 if (size > 4)
3494 riscv_batch_add_dmi_write(batch, DM_DATA1, value >> 32);
3495 riscv_batch_add_dmi_write(batch, DM_DATA0, value);
3496 if (riscv_batch_full(batch))
3497 break;
3498 }
3499 }
3500
3501 result = batch_run(target, batch);
3502 riscv_batch_free(batch);
3503 if (result != ERROR_OK)
3504 goto error;
3505
3506 /* Note that if the scan resulted in a Busy DMI response, it
3507 * is this read to abstractcs that will cause the dmi_busy_delay
3508 * to be incremented if necessary. */
3509
3510 uint32_t abstractcs;
3511 bool dmi_busy_encountered;
3512 result = dmi_op(target, &abstractcs, &dmi_busy_encountered,
3513 DMI_OP_READ, DM_ABSTRACTCS, 0, false, true);
3514 if (result != ERROR_OK)
3515 goto error;
3516 while (get_field(abstractcs, DM_ABSTRACTCS_BUSY))
3517 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
3518 return ERROR_FAIL;
3519 info->cmderr = get_field(abstractcs, DM_ABSTRACTCS_CMDERR);
3520 if (info->cmderr == CMDERR_NONE && !dmi_busy_encountered) {
3521 LOG_DEBUG("successful (partial?) memory write");
3522 } else if (info->cmderr == CMDERR_BUSY || dmi_busy_encountered) {
3523 if (info->cmderr == CMDERR_BUSY)
3524 LOG_DEBUG("Memory write resulted in abstract command busy response.");
3525 else if (dmi_busy_encountered)
3526 LOG_DEBUG("Memory write resulted in DMI busy response.");
3527 riscv013_clear_abstract_error(target);
3528 increase_ac_busy_delay(target);
3529
3530 dmi_write(target, DM_ABSTRACTAUTO, 0);
3531 result = register_read_direct(target, &cur_addr, GDB_REGNO_S0);
3532 if (result != ERROR_OK)
3533 goto error;
3534 setup_needed = true;
3535 } else {
3536 LOG_ERROR("error when writing memory, abstractcs=0x%08lx", (long)abstractcs);
3537 riscv013_clear_abstract_error(target);
3538 result = ERROR_FAIL;
3539 goto error;
3540 }
3541 }
3542
3543 error:
3544 dmi_write(target, DM_ABSTRACTAUTO, 0);
3545
3546 if (register_write_direct(target, GDB_REGNO_S1, s1) != ERROR_OK)
3547 return ERROR_FAIL;
3548 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
3549 return ERROR_FAIL;
3550
3551 /* Restore MSTATUS */
3552 if (mstatus != mstatus_old)
3553 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus_old))
3554 return ERROR_FAIL;
3555
3556 if (execute_fence(target) != ERROR_OK)
3557 return ERROR_FAIL;
3558
3559 return result;
3560 }
3561
3562 static int write_memory(struct target *target, target_addr_t address,
3563 uint32_t size, uint32_t count, const uint8_t *buffer)
3564 {
3565 RISCV013_INFO(info);
3566
3567 if (has_sufficient_progbuf(target, 3) && !riscv_prefer_sba)
3568 return write_memory_progbuf(target, address, size, count, buffer);
3569
3570 if ((get_field(info->sbcs, DM_SBCS_SBACCESS8) && size == 1) ||
3571 (get_field(info->sbcs, DM_SBCS_SBACCESS16) && size == 2) ||
3572 (get_field(info->sbcs, DM_SBCS_SBACCESS32) && size == 4) ||
3573 (get_field(info->sbcs, DM_SBCS_SBACCESS64) && size == 8) ||
3574 (get_field(info->sbcs, DM_SBCS_SBACCESS128) && size == 16)) {
3575 if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 0)
3576 return write_memory_bus_v0(target, address, size, count, buffer);
3577 else if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 1)
3578 return write_memory_bus_v1(target, address, size, count, buffer);
3579 }
3580
3581 if (has_sufficient_progbuf(target, 3))
3582 return write_memory_progbuf(target, address, size, count, buffer);
3583
3584 return write_memory_abstract(target, address, size, count, buffer);
3585 }
3586
3587 static int arch_state(struct target *target)
3588 {
3589 return ERROR_OK;
3590 }
3591
3592 struct target_type riscv013_target = {
3593 .name = "riscv",
3594
3595 .init_target = init_target,
3596 .deinit_target = deinit_target,
3597 .examine = examine,
3598
3599 .poll = &riscv_openocd_poll,
3600 .halt = &riscv_halt,
3601 .step = &riscv_openocd_step,
3602
3603 .assert_reset = assert_reset,
3604 .deassert_reset = deassert_reset,
3605
3606 .write_memory = write_memory,
3607
3608 .arch_state = arch_state,
3609 };
3610
3611 /*** 0.13-specific implementations of various RISC-V helper functions. ***/
3612 static int riscv013_get_register(struct target *target,
3613 riscv_reg_t *value, int hid, int rid)
3614 {
3615 LOG_DEBUG("[%d] reading register %s on hart %d", target->coreid,
3616 gdb_regno_name(rid), hid);
3617
3618 riscv_set_current_hartid(target, hid);
3619
3620 int result = ERROR_OK;
3621 if (rid == GDB_REGNO_PC) {
3622 /* TODO: move this into riscv.c. */
3623 result = register_read(target, value, GDB_REGNO_DPC);
3624 LOG_DEBUG("[%d] read PC from DPC: 0x%" PRIx64, target->coreid, *value);
3625 } else if (rid == GDB_REGNO_PRIV) {
3626 uint64_t dcsr;
3627 /* TODO: move this into riscv.c. */
3628 result = register_read(target, &dcsr, GDB_REGNO_DCSR);
3629 *value = get_field(dcsr, CSR_DCSR_PRV);
3630 } else {
3631 result = register_read(target, value, rid);
3632 if (result != ERROR_OK)
3633 *value = -1;
3634 }
3635
3636 return result;
3637 }
3638
3639 static int riscv013_set_register(struct target *target, int hid, int rid, uint64_t value)
3640 {
3641 LOG_DEBUG("[%d] writing 0x%" PRIx64 " to register %s on hart %d",
3642 target->coreid, value, gdb_regno_name(rid), hid);
3643
3644 riscv_set_current_hartid(target, hid);
3645
3646 if (rid <= GDB_REGNO_XPR31) {
3647 return register_write_direct(target, rid, value);
3648 } else if (rid == GDB_REGNO_PC) {
3649 LOG_DEBUG("[%d] writing PC to DPC: 0x%" PRIx64, target->coreid, value);
3650 register_write_direct(target, GDB_REGNO_DPC, value);
3651 uint64_t actual_value;
3652 register_read_direct(target, &actual_value, GDB_REGNO_DPC);
3653 LOG_DEBUG("[%d] actual DPC written: 0x%016" PRIx64, target->coreid, actual_value);
3654 if (value != actual_value) {
3655 LOG_ERROR("Written PC (0x%" PRIx64 ") does not match read back "
3656 "value (0x%" PRIx64 ")", value, actual_value);
3657 return ERROR_FAIL;
3658 }
3659 } else if (rid == GDB_REGNO_PRIV) {
3660 uint64_t dcsr;
3661 register_read(target, &dcsr, GDB_REGNO_DCSR);
3662 dcsr = set_field(dcsr, CSR_DCSR_PRV, value);
3663 return register_write_direct(target, GDB_REGNO_DCSR, dcsr);
3664 } else {
3665 return register_write_direct(target, rid, value);
3666 }
3667
3668 return ERROR_OK;
3669 }
3670
3671 static int riscv013_select_current_hart(struct target *target)
3672 {
3673 RISCV_INFO(r);
3674
3675 dm013_info_t *dm = get_dm(target);
3676 if (!dm)
3677 return ERROR_FAIL;
3678 if (r->current_hartid == dm->current_hartid)
3679 return ERROR_OK;
3680
3681 uint32_t dmcontrol;
3682 /* TODO: can't we just "dmcontrol = DMI_DMACTIVE"? */
3683 if (dmi_read(target, &dmcontrol, DM_DMCONTROL) != ERROR_OK)
3684 return ERROR_FAIL;
3685 dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
3686 int result = dmi_write(target, DM_DMCONTROL, dmcontrol);
3687 dm->current_hartid = r->current_hartid;
3688 return result;
3689 }
3690
3691 /* Select all harts that were prepped and that are selectable, clearing the
3692 * prepped flag on the harts that actually were selected. */
3693 static int select_prepped_harts(struct target *target, bool *use_hasel)
3694 {
3695 dm013_info_t *dm = get_dm(target);
3696 if (!dm)
3697 return ERROR_FAIL;
3698 if (!dm->hasel_supported) {
3699 RISCV_INFO(r);
3700 r->prepped = false;
3701 *use_hasel = false;
3702 return ERROR_OK;
3703 }
3704
3705 assert(dm->hart_count);
3706 unsigned hawindow_count = (dm->hart_count + 31) / 32;
3707 uint32_t hawindow[hawindow_count];
3708
3709 memset(hawindow, 0, sizeof(uint32_t) * hawindow_count);
3710
3711 target_list_t *entry;
3712 unsigned total_selected = 0;
3713 list_for_each_entry(entry, &dm->target_list, list) {
3714 struct target *t = entry->target;
3715 riscv_info_t *r = riscv_info(t);
3716 riscv013_info_t *info = get_info(t);
3717 unsigned index = info->index;
3718 LOG_DEBUG("index=%d, coreid=%d, prepped=%d", index, t->coreid, r->prepped);
3719 r->selected = r->prepped;
3720 if (r->prepped) {
3721 hawindow[index / 32] |= 1 << (index % 32);
3722 r->prepped = false;
3723 total_selected++;
3724 }
3725 index++;
3726 }
3727
3728 /* Don't use hasel if we only need to talk to one hart. */
3729 if (total_selected <= 1) {
3730 *use_hasel = false;
3731 return ERROR_OK;
3732 }
3733
3734 for (unsigned i = 0; i < hawindow_count; i++) {
3735 if (dmi_write(target, DM_HAWINDOWSEL, i) != ERROR_OK)
3736 return ERROR_FAIL;
3737 if (dmi_write(target, DM_HAWINDOW, hawindow[i]) != ERROR_OK)
3738 return ERROR_FAIL;
3739 }
3740
3741 *use_hasel = true;
3742 return ERROR_OK;
3743 }
3744
3745 static int riscv013_halt_prep(struct target *target)
3746 {
3747 return ERROR_OK;
3748 }
3749
3750 static int riscv013_halt_go(struct target *target)
3751 {
3752 bool use_hasel = false;
3753 if (!riscv_rtos_enabled(target)) {
3754 if (select_prepped_harts(target, &use_hasel) != ERROR_OK)
3755 return ERROR_FAIL;
3756 }
3757
3758 RISCV_INFO(r);
3759 LOG_DEBUG("halting hart %d", r->current_hartid);
3760
3761 /* Issue the halt command, and then wait for the current hart to halt. */
3762 uint32_t dmcontrol = DM_DMCONTROL_DMACTIVE | DM_DMCONTROL_HALTREQ;
3763 if (use_hasel)
3764 dmcontrol |= DM_DMCONTROL_HASEL;
3765 dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
3766 dmi_write(target, DM_DMCONTROL, dmcontrol);
3767 for (size_t i = 0; i < 256; ++i)
3768 if (riscv_is_halted(target))
3769 break;
3770
3771 if (!riscv_is_halted(target)) {
3772 uint32_t dmstatus;
3773 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
3774 return ERROR_FAIL;
3775 if (dmi_read(target, &dmcontrol, DM_DMCONTROL) != ERROR_OK)
3776 return ERROR_FAIL;
3777
3778 LOG_ERROR("unable to halt hart %d", r->current_hartid);
3779 LOG_ERROR(" dmcontrol=0x%08x", dmcontrol);
3780 LOG_ERROR(" dmstatus =0x%08x", dmstatus);
3781 return ERROR_FAIL;
3782 }
3783
3784 dmcontrol = set_field(dmcontrol, DM_DMCONTROL_HALTREQ, 0);
3785 dmi_write(target, DM_DMCONTROL, dmcontrol);
3786
3787 if (use_hasel) {
3788 target_list_t *entry;
3789 dm013_info_t *dm = get_dm(target);
3790 if (!dm)
3791 return ERROR_FAIL;
3792 list_for_each_entry(entry, &dm->target_list, list) {
3793 struct target *t = entry->target;
3794 t->state = TARGET_HALTED;
3795 if (t->debug_reason == DBG_REASON_NOTHALTED)
3796 t->debug_reason = DBG_REASON_DBGRQ;
3797 }
3798 }
3799 /* The "else" case is handled in halt_go(). */
3800
3801 return ERROR_OK;
3802 }
3803
3804 static int riscv013_resume_go(struct target *target)
3805 {
3806 bool use_hasel = false;
3807 if (!riscv_rtos_enabled(target)) {
3808 if (select_prepped_harts(target, &use_hasel) != ERROR_OK)
3809 return ERROR_FAIL;
3810 }
3811
3812 return riscv013_step_or_resume_current_hart(target, false, use_hasel);
3813 }
3814
3815 static int riscv013_step_current_hart(struct target *target)
3816 {
3817 return riscv013_step_or_resume_current_hart(target, true, false);
3818 }
3819
3820 static int riscv013_resume_prep(struct target *target)
3821 {
3822 return riscv013_on_step_or_resume(target, false);
3823 }
3824
3825 static int riscv013_on_step(struct target *target)
3826 {
3827 return riscv013_on_step_or_resume(target, true);
3828 }
3829
3830 static int riscv013_on_halt(struct target *target)
3831 {
3832 return ERROR_OK;
3833 }
3834
3835 static bool riscv013_is_halted(struct target *target)
3836 {
3837 uint32_t dmstatus;
3838 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
3839 return false;
3840 if (get_field(dmstatus, DM_DMSTATUS_ANYUNAVAIL))
3841 LOG_ERROR("Hart %d is unavailable.", riscv_current_hartid(target));
3842 if (get_field(dmstatus, DM_DMSTATUS_ANYNONEXISTENT))
3843 LOG_ERROR("Hart %d doesn't exist.", riscv_current_hartid(target));
3844 if (get_field(dmstatus, DM_DMSTATUS_ANYHAVERESET)) {
3845 int hartid = riscv_current_hartid(target);
3846 LOG_INFO("Hart %d unexpectedly reset!", hartid);
3847 /* TODO: Can we make this more obvious to eg. a gdb user? */
3848 uint32_t dmcontrol = DM_DMCONTROL_DMACTIVE |
3849 DM_DMCONTROL_ACKHAVERESET;
3850 dmcontrol = set_hartsel(dmcontrol, hartid);
3851 /* If we had been halted when we reset, request another halt. If we
3852 * ended up running out of reset, then the user will (hopefully) get a
3853 * message that a reset happened, that the target is running, and then
3854 * that it is halted again once the request goes through.
3855 */
3856 if (target->state == TARGET_HALTED)
3857 dmcontrol |= DM_DMCONTROL_HALTREQ;
3858 dmi_write(target, DM_DMCONTROL, dmcontrol);
3859 }
3860 return get_field(dmstatus, DM_DMSTATUS_ALLHALTED);
3861 }
3862
3863 static enum riscv_halt_reason riscv013_halt_reason(struct target *target)
3864 {
3865 riscv_reg_t dcsr;
3866 int result = register_read(target, &dcsr, GDB_REGNO_DCSR);
3867 if (result != ERROR_OK)
3868 return RISCV_HALT_UNKNOWN;
3869
3870 switch (get_field(dcsr, CSR_DCSR_CAUSE)) {
3871 case CSR_DCSR_CAUSE_SWBP:
3872 return RISCV_HALT_BREAKPOINT;
3873 case CSR_DCSR_CAUSE_TRIGGER:
3874 /* We could get here before triggers are enumerated if a trigger was
3875 * already set when we connected. Force enumeration now, which has the
3876 * side effect of clearing any triggers we did not set. */
3877 riscv_enumerate_triggers(target);
3878 LOG_DEBUG("{%d} halted because of trigger", target->coreid);
3879 return RISCV_HALT_TRIGGER;
3880 case CSR_DCSR_CAUSE_STEP:
3881 return RISCV_HALT_SINGLESTEP;
3882 case CSR_DCSR_CAUSE_DEBUGINT:
3883 case CSR_DCSR_CAUSE_HALT:
3884 return RISCV_HALT_INTERRUPT;
3885 case CSR_DCSR_CAUSE_GROUP:
3886 return RISCV_HALT_GROUP;
3887 }
3888
3889 LOG_ERROR("Unknown DCSR cause field: %x", (int)get_field(dcsr, CSR_DCSR_CAUSE));
3890 LOG_ERROR(" dcsr=0x%016lx", (long)dcsr);
3891 return RISCV_HALT_UNKNOWN;
3892 }
3893
3894 int riscv013_write_debug_buffer(struct target *target, unsigned index, riscv_insn_t data)
3895 {
3896 dm013_info_t *dm = get_dm(target);
3897 if (!dm)
3898 return ERROR_FAIL;
3899 if (dm->progbuf_cache[index] != data) {
3900 if (dmi_write(target, DM_PROGBUF0 + index, data) != ERROR_OK)
3901 return ERROR_FAIL;
3902 dm->progbuf_cache[index] = data;
3903 } else {
3904 LOG_DEBUG("cache hit for 0x%" PRIx32 " @%d", data, index);
3905 }
3906 return ERROR_OK;
3907 }
3908
3909 riscv_insn_t riscv013_read_debug_buffer(struct target *target, unsigned index)
3910 {
3911 uint32_t value;
3912 dmi_read(target, &value, DM_PROGBUF0 + index);
3913 return value;
3914 }
3915
3916 int riscv013_execute_debug_buffer(struct target *target)
3917 {
3918 uint32_t run_program = 0;
3919 run_program = set_field(run_program, AC_ACCESS_REGISTER_AARSIZE, 2);
3920 run_program = set_field(run_program, AC_ACCESS_REGISTER_POSTEXEC, 1);
3921 run_program = set_field(run_program, AC_ACCESS_REGISTER_TRANSFER, 0);
3922 run_program = set_field(run_program, AC_ACCESS_REGISTER_REGNO, 0x1000);
3923
3924 return execute_abstract_command(target, run_program);
3925 }
3926
3927 void riscv013_fill_dmi_write_u64(struct target *target, char *buf, int a, uint64_t d)
3928 {
3929 RISCV013_INFO(info);
3930 buf_set_u64((unsigned char *)buf, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, DMI_OP_WRITE);
3931 buf_set_u64((unsigned char *)buf, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, d);
3932 buf_set_u64((unsigned char *)buf, DTM_DMI_ADDRESS_OFFSET, info->abits, a);
3933 }
3934
3935 void riscv013_fill_dmi_read_u64(struct target *target, char *buf, int a)
3936 {
3937 RISCV013_INFO(info);
3938 buf_set_u64((unsigned char *)buf, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, DMI_OP_READ);
3939 buf_set_u64((unsigned char *)buf, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, 0);
3940 buf_set_u64((unsigned char *)buf, DTM_DMI_ADDRESS_OFFSET, info->abits, a);
3941 }
3942
3943 void riscv013_fill_dmi_nop_u64(struct target *target, char *buf)
3944 {
3945 RISCV013_INFO(info);
3946 buf_set_u64((unsigned char *)buf, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, DMI_OP_NOP);
3947 buf_set_u64((unsigned char *)buf, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, 0);
3948 buf_set_u64((unsigned char *)buf, DTM_DMI_ADDRESS_OFFSET, info->abits, 0);
3949 }
3950
3951 /* Helper function for riscv013_test_sba_config_reg */
3952 static int get_max_sbaccess(struct target *target)
3953 {
3954 RISCV013_INFO(info);
3955
3956 uint32_t sbaccess128 = get_field(info->sbcs, DM_SBCS_SBACCESS128);
3957 uint32_t sbaccess64 = get_field(info->sbcs, DM_SBCS_SBACCESS64);
3958 uint32_t sbaccess32 = get_field(info->sbcs, DM_SBCS_SBACCESS32);
3959 uint32_t sbaccess16 = get_field(info->sbcs, DM_SBCS_SBACCESS16);
3960 uint32_t sbaccess8 = get_field(info->sbcs, DM_SBCS_SBACCESS8);
3961
3962 if (sbaccess128)
3963 return 4;
3964 else if (sbaccess64)
3965 return 3;
3966 else if (sbaccess32)
3967 return 2;
3968 else if (sbaccess16)
3969 return 1;
3970 else if (sbaccess8)
3971 return 0;
3972 else
3973 return -1;
3974 }
3975
3976 static uint32_t get_num_sbdata_regs(struct target *target)
3977 {
3978 RISCV013_INFO(info);
3979
3980 uint32_t sbaccess128 = get_field(info->sbcs, DM_SBCS_SBACCESS128);
3981 uint32_t sbaccess64 = get_field(info->sbcs, DM_SBCS_SBACCESS64);
3982 uint32_t sbaccess32 = get_field(info->sbcs, DM_SBCS_SBACCESS32);
3983
3984 if (sbaccess128)
3985 return 4;
3986 else if (sbaccess64)
3987 return 2;
3988 else if (sbaccess32)
3989 return 1;
3990 else
3991 return 0;
3992 }
3993
3994 static int riscv013_test_sba_config_reg(struct target *target,
3995 target_addr_t legal_address, uint32_t num_words,
3996 target_addr_t illegal_address, bool run_sbbusyerror_test)
3997 {
3998 LOG_INFO("Testing System Bus Access as defined by RISC-V Debug Spec v0.13");
3999
4000 uint32_t tests_failed = 0;
4001
4002 uint32_t rd_val;
4003 uint32_t sbcs_orig;
4004 dmi_read(target, &sbcs_orig, DM_SBCS);
4005
4006 uint32_t sbcs = sbcs_orig;
4007 bool test_passed;
4008
4009 int max_sbaccess = get_max_sbaccess(target);
4010
4011 if (max_sbaccess == -1) {
4012 LOG_ERROR("System Bus Access not supported in this config.");
4013 return ERROR_FAIL;
4014 }
4015
4016 if (get_field(sbcs, DM_SBCS_SBVERSION) != 1) {
4017 LOG_ERROR("System Bus Access unsupported SBVERSION (%d). Only version 1 is supported.",
4018 get_field(sbcs, DM_SBCS_SBVERSION));
4019 return ERROR_FAIL;
4020 }
4021
4022 uint32_t num_sbdata_regs = get_num_sbdata_regs(target);
4023 assert(num_sbdata_regs);
4024
4025 uint32_t rd_buf[num_sbdata_regs];
4026
4027 /* Test 1: Simple write/read test */
4028 test_passed = true;
4029 sbcs = set_field(sbcs_orig, DM_SBCS_SBAUTOINCREMENT, 0);
4030 dmi_write(target, DM_SBCS, sbcs);
4031
4032 uint32_t test_patterns[4] = {0xdeadbeef, 0xfeedbabe, 0x12345678, 0x08675309};
4033 for (uint32_t sbaccess = 0; sbaccess <= (uint32_t)max_sbaccess; sbaccess++) {
4034 sbcs = set_field(sbcs, DM_SBCS_SBACCESS, sbaccess);
4035 dmi_write(target, DM_SBCS, sbcs);
4036
4037 uint32_t compare_mask = (sbaccess == 0) ? 0xff : (sbaccess == 1) ? 0xffff : 0xffffffff;
4038
4039 for (uint32_t i = 0; i < num_words; i++) {
4040 uint32_t addr = legal_address + (i << sbaccess);
4041 uint32_t wr_data[num_sbdata_regs];
4042 for (uint32_t j = 0; j < num_sbdata_regs; j++)
4043 wr_data[j] = test_patterns[j] + i;
4044 write_memory_sba_simple(target, addr, wr_data, num_sbdata_regs, sbcs);
4045 }
4046
4047 for (uint32_t i = 0; i < num_words; i++) {
4048 uint32_t addr = legal_address + (i << sbaccess);
4049 read_memory_sba_simple(target, addr, rd_buf, num_sbdata_regs, sbcs);
4050 for (uint32_t j = 0; j < num_sbdata_regs; j++) {
4051 if (((test_patterns[j]+i)&compare_mask) != (rd_buf[j]&compare_mask)) {
4052 LOG_ERROR("System Bus Access Test 1: Error reading non-autoincremented address %x,"
4053 "expected val = %x, read val = %x", addr, test_patterns[j]+i, rd_buf[j]);
4054 test_passed = false;
4055 tests_failed++;
4056 }
4057 }
4058 }
4059 }
4060 if (test_passed)
4061 LOG_INFO("System Bus Access Test 1: Simple write/read test PASSED.");
4062
4063 /* Test 2: Address autoincrement test */
4064 target_addr_t curr_addr;
4065 target_addr_t prev_addr;
4066 test_passed = true;
4067 sbcs = set_field(sbcs_orig, DM_SBCS_SBAUTOINCREMENT, 1);
4068 dmi_write(target, DM_SBCS, sbcs);
4069
4070 for (uint32_t sbaccess = 0; sbaccess <= (uint32_t)max_sbaccess; sbaccess++) {
4071 sbcs = set_field(sbcs, DM_SBCS_SBACCESS, sbaccess);
4072 dmi_write(target, DM_SBCS, sbcs);
4073
4074 dmi_write(target, DM_SBADDRESS0, legal_address);
4075 read_sbcs_nonbusy(target, &sbcs);
4076 curr_addr = legal_address;
4077 for (uint32_t i = 0; i < num_words; i++) {
4078 prev_addr = curr_addr;
4079 read_sbcs_nonbusy(target, &sbcs);
4080 curr_addr = sb_read_address(target);
4081 if ((curr_addr - prev_addr != (uint32_t)(1 << sbaccess)) && (i != 0)) {
4082 LOG_ERROR("System Bus Access Test 2: Error with address auto-increment, sbaccess = %x.", sbaccess);
4083 test_passed = false;
4084 tests_failed++;
4085 }
4086 dmi_write(target, DM_SBDATA0, i);
4087 }
4088
4089 read_sbcs_nonbusy(target, &sbcs);
4090
4091 dmi_write(target, DM_SBADDRESS0, legal_address);
4092
4093 uint32_t val;
4094 sbcs = set_field(sbcs, DM_SBCS_SBREADONDATA, 1);
4095 dmi_write(target, DM_SBCS, sbcs);
4096 dmi_read(target, &val, DM_SBDATA0); /* Dummy read to trigger first system bus read */
4097 curr_addr = legal_address;
4098 for (uint32_t i = 0; i < num_words; i++) {
4099 prev_addr = curr_addr;
4100 read_sbcs_nonbusy(target, &sbcs);
4101 curr_addr = sb_read_address(target);
4102 if ((curr_addr - prev_addr != (uint32_t)(1 << sbaccess)) && (i != 0)) {
4103 LOG_ERROR("System Bus Access Test 2: Error with address auto-increment, sbaccess = %x", sbaccess);
4104 test_passed = false;
4105 tests_failed++;
4106 }
4107 dmi_read(target, &val, DM_SBDATA0);
4108 read_sbcs_nonbusy(target, &sbcs);
4109 if (i != val) {
4110 LOG_ERROR("System Bus Access Test 2: Error reading auto-incremented address,"
4111 "expected val = %x, read val = %x.", i, val);
4112 test_passed = false;
4113 tests_failed++;
4114 }
4115 }
4116 }
4117 if (test_passed)
4118 LOG_INFO("System Bus Access Test 2: Address auto-increment test PASSED.");
4119
4120 /* Test 3: Read from illegal address */
4121 read_memory_sba_simple(target, illegal_address, rd_buf, 1, sbcs_orig);
4122
4123 dmi_read(target, &rd_val, DM_SBCS);
4124 if (get_field(rd_val, DM_SBCS_SBERROR) == 2) {
4125 sbcs = set_field(sbcs_orig, DM_SBCS_SBERROR, 2);
4126 dmi_write(target, DM_SBCS, sbcs);
4127 dmi_read(target, &rd_val, DM_SBCS);
4128 if (get_field(rd_val, DM_SBCS_SBERROR) == 0)
4129 LOG_INFO("System Bus Access Test 3: Illegal address read test PASSED.");
4130 else
4131 LOG_ERROR("System Bus Access Test 3: Illegal address read test FAILED, unable to clear to 0.");
4132 } else {
4133 LOG_ERROR("System Bus Access Test 3: Illegal address read test FAILED, unable to set error code.");
4134 }
4135
4136 /* Test 4: Write to illegal address */
4137 write_memory_sba_simple(target, illegal_address, test_patterns, 1, sbcs_orig);
4138
4139 dmi_read(target, &rd_val, DM_SBCS);
4140 if (get_field(rd_val, DM_SBCS_SBERROR) == 2) {
4141 sbcs = set_field(sbcs_orig, DM_SBCS_SBERROR, 2);
4142 dmi_write(target, DM_SBCS, sbcs);
4143 dmi_read(target, &rd_val, DM_SBCS);
4144 if (get_field(rd_val, DM_SBCS_SBERROR) == 0)
4145 LOG_INFO("System Bus Access Test 4: Illegal address write test PASSED.");
4146 else {
4147 LOG_ERROR("System Bus Access Test 4: Illegal address write test FAILED, unable to clear to 0.");
4148 tests_failed++;
4149 }
4150 } else {
4151 LOG_ERROR("System Bus Access Test 4: Illegal address write test FAILED, unable to set error code.");
4152 tests_failed++;
4153 }
4154
4155 /* Test 5: Write with unsupported sbaccess size */
4156 uint32_t sbaccess128 = get_field(sbcs_orig, DM_SBCS_SBACCESS128);
4157
4158 if (sbaccess128) {
4159 LOG_INFO("System Bus Access Test 5: SBCS sbaccess error test PASSED, all sbaccess sizes supported.");
4160 } else {
4161 sbcs = set_field(sbcs_orig, DM_SBCS_SBACCESS, 4);
4162
4163 write_memory_sba_simple(target, legal_address, test_patterns, 1, sbcs);
4164
4165 dmi_read(target, &rd_val, DM_SBCS);
4166 if (get_field(rd_val, DM_SBCS_SBERROR) == 4) {
4167 sbcs = set_field(sbcs_orig, DM_SBCS_SBERROR, 4);
4168 dmi_write(target, DM_SBCS, sbcs);
4169 dmi_read(target, &rd_val, DM_SBCS);
4170 if (get_field(rd_val, DM_SBCS_SBERROR) == 0)
4171 LOG_INFO("System Bus Access Test 5: SBCS sbaccess error test PASSED.");
4172 else {
4173 LOG_ERROR("System Bus Access Test 5: SBCS sbaccess error test FAILED, unable to clear to 0.");
4174 tests_failed++;
4175 }
4176 } else {
4177 LOG_ERROR("System Bus Access Test 5: SBCS sbaccess error test FAILED, unable to set error code.");
4178 tests_failed++;
4179 }
4180 }
4181
4182 /* Test 6: Write to misaligned address */
4183 sbcs = set_field(sbcs_orig, DM_SBCS_SBACCESS, 1);
4184
4185 write_memory_sba_simple(target, legal_address+1, test_patterns, 1, sbcs);
4186
4187 dmi_read(target, &rd_val, DM_SBCS);
4188 if (get_field(rd_val, DM_SBCS_SBERROR) == 3) {
4189 sbcs = set_field(sbcs_orig, DM_SBCS_SBERROR, 3);
4190 dmi_write(target, DM_SBCS, sbcs);
4191 dmi_read(target, &rd_val, DM_SBCS);
4192 if (get_field(rd_val, DM_SBCS_SBERROR) == 0)
4193 LOG_INFO("System Bus Access Test 6: SBCS address alignment error test PASSED");
4194 else {
4195 LOG_ERROR("System Bus Access Test 6: SBCS address alignment error test FAILED, unable to clear to 0.");
4196 tests_failed++;
4197 }
4198 } else {
4199 LOG_ERROR("System Bus Access Test 6: SBCS address alignment error test FAILED, unable to set error code.");
4200 tests_failed++;
4201 }
4202
4203 /* Test 7: Set sbbusyerror, only run this case in simulation as it is likely
4204 * impossible to hit otherwise */
4205 if (run_sbbusyerror_test) {
4206 sbcs = set_field(sbcs_orig, DM_SBCS_SBREADONADDR, 1);
4207 dmi_write(target, DM_SBCS, sbcs);
4208
4209 for (int i = 0; i < 16; i++)
4210 dmi_write(target, DM_SBDATA0, 0xdeadbeef);
4211
4212 for (int i = 0; i < 16; i++)
4213 dmi_write(target, DM_SBADDRESS0, legal_address);
4214
4215 dmi_read(target, &rd_val, DM_SBCS);
4216 if (get_field(rd_val, DM_SBCS_SBBUSYERROR)) {
4217 sbcs = set_field(sbcs_orig, DM_SBCS_SBBUSYERROR, 1);
4218 dmi_write(target, DM_SBCS, sbcs);
4219 dmi_read(target, &rd_val, DM_SBCS);
4220 if (get_field(rd_val, DM_SBCS_SBBUSYERROR) == 0)
4221 LOG_INFO("System Bus Access Test 7: SBCS sbbusyerror test PASSED.");
4222 else {
4223 LOG_ERROR("System Bus Access Test 7: SBCS sbbusyerror test FAILED, unable to clear to 0.");
4224 tests_failed++;
4225 }
4226 } else {
4227 LOG_ERROR("System Bus Access Test 7: SBCS sbbusyerror test FAILED, unable to set error code.");
4228 tests_failed++;
4229 }
4230 }
4231
4232 if (tests_failed == 0) {
4233 LOG_INFO("ALL TESTS PASSED");
4234 return ERROR_OK;
4235 } else {
4236 LOG_ERROR("%d TESTS FAILED", tests_failed);
4237 return ERROR_FAIL;
4238 }
4239
4240 }
4241
4242 void write_memory_sba_simple(struct target *target, target_addr_t addr,
4243 uint32_t *write_data, uint32_t write_size, uint32_t sbcs)
4244 {
4245 RISCV013_INFO(info);
4246
4247 uint32_t rd_sbcs;
4248 uint32_t masked_addr;
4249
4250 uint32_t sba_size = get_field(info->sbcs, DM_SBCS_SBASIZE);
4251
4252 read_sbcs_nonbusy(target, &rd_sbcs);
4253
4254 uint32_t sbcs_no_readonaddr = set_field(sbcs, DM_SBCS_SBREADONADDR, 0);
4255 dmi_write(target, DM_SBCS, sbcs_no_readonaddr);
4256
4257 for (uint32_t i = 0; i < sba_size/32; i++) {
4258 masked_addr = (addr >> 32*i) & 0xffffffff;
4259
4260 if (i != 3)
4261 dmi_write(target, DM_SBADDRESS0+i, masked_addr);
4262 else
4263 dmi_write(target, DM_SBADDRESS3, masked_addr);
4264 }
4265
4266 /* Write SBDATA registers starting with highest address, since write to
4267 * SBDATA0 triggers write */
4268 for (int i = write_size-1; i >= 0; i--)
4269 dmi_write(target, DM_SBDATA0+i, write_data[i]);
4270 }
4271
4272 void read_memory_sba_simple(struct target *target, target_addr_t addr,
4273 uint32_t *rd_buf, uint32_t read_size, uint32_t sbcs)
4274 {
4275 RISCV013_INFO(info);
4276
4277 uint32_t rd_sbcs;
4278 uint32_t masked_addr;
4279
4280 uint32_t sba_size = get_field(info->sbcs, DM_SBCS_SBASIZE);
4281
4282 read_sbcs_nonbusy(target, &rd_sbcs);
4283
4284 uint32_t sbcs_readonaddr = set_field(sbcs, DM_SBCS_SBREADONADDR, 1);
4285 dmi_write(target, DM_SBCS, sbcs_readonaddr);
4286
4287 /* Write addresses starting with highest address register */
4288 for (int i = sba_size/32-1; i >= 0; i--) {
4289 masked_addr = (addr >> 32*i) & 0xffffffff;
4290
4291 if (i != 3)
4292 dmi_write(target, DM_SBADDRESS0+i, masked_addr);
4293 else
4294 dmi_write(target, DM_SBADDRESS3, masked_addr);
4295 }
4296
4297 read_sbcs_nonbusy(target, &rd_sbcs);
4298
4299 for (uint32_t i = 0; i < read_size; i++)
4300 dmi_read(target, &(rd_buf[i]), DM_SBDATA0+i);
4301 }
4302
4303 int riscv013_dmi_write_u64_bits(struct target *target)
4304 {
4305 RISCV013_INFO(info);
4306 return info->abits + DTM_DMI_DATA_LENGTH + DTM_DMI_OP_LENGTH;
4307 }
4308
4309 static int maybe_execute_fence_i(struct target *target)
4310 {
4311 if (has_sufficient_progbuf(target, 3))
4312 return execute_fence(target);
4313 return ERROR_OK;
4314 }
4315
4316 /* Helper Functions. */
4317 static int riscv013_on_step_or_resume(struct target *target, bool step)
4318 {
4319 if (maybe_execute_fence_i(target) != ERROR_OK)
4320 return ERROR_FAIL;
4321
4322 /* We want to twiddle some bits in the debug CSR so debugging works. */
4323 riscv_reg_t dcsr;
4324 int result = register_read(target, &dcsr, GDB_REGNO_DCSR);
4325 if (result != ERROR_OK)
4326 return result;
4327 dcsr = set_field(dcsr, CSR_DCSR_STEP, step);
4328 dcsr = set_field(dcsr, CSR_DCSR_EBREAKM, riscv_ebreakm);
4329 dcsr = set_field(dcsr, CSR_DCSR_EBREAKS, riscv_ebreaks);
4330 dcsr = set_field(dcsr, CSR_DCSR_EBREAKU, riscv_ebreaku);
4331 return riscv_set_register(target, GDB_REGNO_DCSR, dcsr);
4332 }
4333
4334 static int riscv013_step_or_resume_current_hart(struct target *target,
4335 bool step, bool use_hasel)
4336 {
4337 RISCV_INFO(r);
4338 LOG_DEBUG("resuming hart %d (for step?=%d)", r->current_hartid, step);
4339 if (!riscv_is_halted(target)) {
4340 LOG_ERROR("Hart %d is not halted!", r->current_hartid);
4341 return ERROR_FAIL;
4342 }
4343
4344 /* Issue the resume command, and then wait for the current hart to resume. */
4345 uint32_t dmcontrol = DM_DMCONTROL_DMACTIVE | DM_DMCONTROL_RESUMEREQ;
4346 if (use_hasel)
4347 dmcontrol |= DM_DMCONTROL_HASEL;
4348 dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
4349 dmi_write(target, DM_DMCONTROL, dmcontrol);
4350
4351 dmcontrol = set_field(dmcontrol, DM_DMCONTROL_HASEL, 0);
4352 dmcontrol = set_field(dmcontrol, DM_DMCONTROL_RESUMEREQ, 0);
4353
4354 uint32_t dmstatus;
4355 for (size_t i = 0; i < 256; ++i) {
4356 usleep(10);
4357 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
4358 return ERROR_FAIL;
4359 if (get_field(dmstatus, DM_DMSTATUS_ALLRESUMEACK) == 0)
4360 continue;
4361 if (step && get_field(dmstatus, DM_DMSTATUS_ALLHALTED) == 0)
4362 continue;
4363
4364 dmi_write(target, DM_DMCONTROL, dmcontrol);
4365 return ERROR_OK;
4366 }
4367
4368 dmi_write(target, DM_DMCONTROL, dmcontrol);
4369
4370 LOG_ERROR("unable to resume hart %d", r->current_hartid);
4371 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
4372 return ERROR_FAIL;
4373 LOG_ERROR(" dmstatus =0x%08x", dmstatus);
4374
4375 if (step) {
4376 LOG_ERROR(" was stepping, halting");
4377 riscv_halt(target);
4378 return ERROR_OK;
4379 }
4380
4381 return ERROR_FAIL;
4382 }
4383
4384 void riscv013_clear_abstract_error(struct target *target)
4385 {
4386 /* Wait for busy to go away. */
4387 time_t start = time(NULL);
4388 uint32_t abstractcs;
4389 dmi_read(target, &abstractcs, DM_ABSTRACTCS);
4390 while (get_field(abstractcs, DM_ABSTRACTCS_BUSY)) {
4391 dmi_read(target, &abstractcs, DM_ABSTRACTCS);
4392
4393 if (time(NULL) - start > riscv_command_timeout_sec) {
4394 LOG_ERROR("abstractcs.busy is not going low after %d seconds "
4395 "(abstractcs=0x%x). The target is either really slow or "
4396 "broken. You could increase the timeout with riscv "
4397 "set_command_timeout_sec.",
4398 riscv_command_timeout_sec, abstractcs);
4399 break;
4400 }
4401 }
4402 /* Clear the error status. */
4403 dmi_write(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
4404 }
4405
4406 #ifdef _WIN32
4407 #define FILE_SEP '\\'
4408 #else
4409 #define FILE_SEP '/'
4410 #endif
4411 #define COMPLIANCE_TEST(b, message) \
4412 { \
4413 const char *last_sep = strrchr(__FILE__, FILE_SEP); \
4414 const char *fname = (last_sep == NULL ? __FILE__ : last_sep + 1); \
4415 LOG_INFO("Executing test %d (%s:%d): %s", total_tests, fname, __LINE__, message); \
4416 int pass = 0; \
4417 if (b) { \
4418 pass = 1; \
4419 passed_tests++; \
4420 } \
4421 LOG_INFO(" %s", (pass) ? "PASSED" : "FAILED"); \
4422 assert(pass); \
4423 total_tests++; \
4424 }
4425
4426 #define COMPLIANCE_MUST_PASS(b) COMPLIANCE_TEST(ERROR_OK == (b), "Regular calls must return ERROR_OK")
4427
4428 #define COMPLIANCE_READ(target, addr, value) COMPLIANCE_MUST_PASS(dmi_read(target, addr, value))
4429 #define COMPLIANCE_WRITE(target, addr, value) COMPLIANCE_MUST_PASS(dmi_write(target, addr, value))
4430
4431 #define COMPLIANCE_CHECK_RO(target, addr) \
4432 { \
4433 uint32_t orig; \
4434 uint32_t inverse; \
4435 COMPLIANCE_READ(target, &orig, addr); \
4436 COMPLIANCE_WRITE(target, addr, ~orig); \
4437 COMPLIANCE_READ(target, &inverse, addr); \
4438 COMPLIANCE_TEST(orig == inverse, "Register must be read-only"); \
4439 }
4440
4441 int riscv013_test_compliance(struct target *target)
4442 {
4443 LOG_INFO("Basic compliance test against RISC-V Debug Spec v0.13");
4444 LOG_INFO("This test is not complete, and not well supported.");
4445 LOG_INFO("Your core might pass this test without being compliant.");
4446 LOG_INFO("Your core might fail this test while being compliant.");
4447 LOG_INFO("Use your judgment, and please contribute improvements.");
4448
4449 if (!riscv_rtos_enabled(target)) {
4450 LOG_ERROR("Please run with -rtos riscv to run compliance test.");
4451 return ERROR_FAIL;
4452 }
4453
4454 if (!target_was_examined(target)) {
4455 LOG_ERROR("Cannot run compliance test, because target has not yet "
4456 "been examined, or the examination failed.\n");
4457 return ERROR_FAIL;
4458 }
4459
4460 int total_tests = 0;
4461 int passed_tests = 0;
4462
4463 uint32_t dmcontrol_orig = DM_DMCONTROL_DMACTIVE;
4464 uint32_t dmcontrol;
4465 uint32_t testvar;
4466 uint32_t testvar_read;
4467 riscv_reg_t value;
4468 RISCV013_INFO(info);
4469
4470 /* All the bits of HARTSEL are covered by the examine sequence. */
4471
4472 /* hartreset */
4473 /* This field is optional. Either we can read and write it to 1/0,
4474 or it is tied to 0. This check doesn't really do anything, but
4475 it does attempt to set the bit to 1 and then back to 0, which needs to
4476 work if its implemented. */
4477 COMPLIANCE_WRITE(target, DM_DMCONTROL, set_field(dmcontrol_orig, DM_DMCONTROL_HARTRESET, 1));
4478 COMPLIANCE_WRITE(target, DM_DMCONTROL, set_field(dmcontrol_orig, DM_DMCONTROL_HARTRESET, 0));
4479 COMPLIANCE_READ(target, &dmcontrol, DM_DMCONTROL);
4480 COMPLIANCE_TEST((get_field(dmcontrol, DM_DMCONTROL_HARTRESET) == 0),
4481 "DMCONTROL.hartreset can be 0 or RW.");
4482
4483 /* hasel */
4484 COMPLIANCE_WRITE(target, DM_DMCONTROL, set_field(dmcontrol_orig, DM_DMCONTROL_HASEL, 1));
4485 COMPLIANCE_WRITE(target, DM_DMCONTROL, set_field(dmcontrol_orig, DM_DMCONTROL_HASEL, 0));
4486 COMPLIANCE_READ(target, &dmcontrol, DM_DMCONTROL);
4487 COMPLIANCE_TEST((get_field(dmcontrol, DM_DMCONTROL_HASEL) == 0),
4488 "DMCONTROL.hasel can be 0 or RW.");
4489 /* TODO: test that hamask registers exist if hasel does. */
4490
4491 /* haltreq */
4492 COMPLIANCE_MUST_PASS(riscv_halt(target));
4493 /* This bit is not actually readable according to the spec, so nothing to check.*/
4494
4495 /* DMSTATUS */
4496 COMPLIANCE_CHECK_RO(target, DM_DMSTATUS);
4497
4498 /* resumereq */
4499 /* This bit is not actually readable according to the spec, so nothing to check.*/
4500 COMPLIANCE_MUST_PASS(riscv_resume(target, true, 0, false, false, false));
4501
4502 /* Halt all harts again so the test can continue.*/
4503 COMPLIANCE_MUST_PASS(riscv_halt(target));
4504
4505 /* HARTINFO: Read-Only. This is per-hart, so need to adjust hartsel. */
4506 uint32_t hartinfo;
4507 COMPLIANCE_READ(target, &hartinfo, DM_HARTINFO);
4508 for (int hartsel = 0; hartsel < riscv_count_harts(target); hartsel++) {
4509 COMPLIANCE_MUST_PASS(riscv_set_current_hartid(target, hartsel));
4510
4511 COMPLIANCE_CHECK_RO(target, DM_HARTINFO);
4512
4513 /* $dscratch CSRs */
4514 uint32_t nscratch = get_field(hartinfo, DM_HARTINFO_NSCRATCH);
4515 for (unsigned int d = 0; d < nscratch; d++) {
4516 riscv_reg_t testval, testval_read;
4517 /* Because DSCRATCH0 is not guaranteed to last across PB executions, need to put
4518 this all into one PB execution. Which may not be possible on all implementations.*/
4519 if (info->progbufsize >= 5) {
4520 for (testval = 0x0011223300112233;
4521 testval != 0xDEAD;
4522 testval = testval == 0x0011223300112233 ? ~testval : 0xDEAD) {
4523 COMPLIANCE_TEST(register_write_direct(target, GDB_REGNO_S0, testval) == ERROR_OK,
4524 "Need to be able to write S0 in order to test DSCRATCH0.");
4525 struct riscv_program program32;
4526 riscv_program_init(&program32, target);
4527 riscv_program_csrw(&program32, GDB_REGNO_S0, GDB_REGNO_DSCRATCH0 + d);
4528 riscv_program_csrr(&program32, GDB_REGNO_S1, GDB_REGNO_DSCRATCH0 + d);
4529 riscv_program_fence(&program32);
4530 riscv_program_ebreak(&program32);
4531 COMPLIANCE_TEST(riscv_program_exec(&program32, target) == ERROR_OK,
4532 "Accessing DSCRATCH0 with program buffer should succeed.");
4533 COMPLIANCE_TEST(register_read_direct(target, &testval_read, GDB_REGNO_S1) == ERROR_OK,
4534 "Need to be able to read S1 in order to test DSCRATCH0.");
4535 if (riscv_xlen(target) > 32) {
4536 COMPLIANCE_TEST(testval == testval_read,
4537 "All DSCRATCH0 registers in HARTINFO must be R/W.");
4538 } else {
4539 COMPLIANCE_TEST(testval_read == (testval & 0xFFFFFFFF),
4540 "All DSCRATCH0 registers in HARTINFO must be R/W.");
4541 }
4542 }
4543 }
4544 }
4545 /* TODO: dataaccess */
4546 if (get_field(hartinfo, DM_HARTINFO_DATAACCESS)) {
4547 /* TODO: Shadowed in memory map. */
4548 /* TODO: datasize */
4549 /* TODO: dataaddr */
4550 } else {
4551 /* TODO: Shadowed in CSRs. */
4552 /* TODO: datasize */
4553 /* TODO: dataaddr */
4554 }
4555
4556 }
4557
4558 /* HALTSUM -- TODO: More than 32 harts. Would need to loop over this to set hartsel */
4559 /* TODO: HALTSUM2, HALTSUM3 */
4560 /* HALTSUM0 */
4561 uint32_t expected_haltsum0 = 0;
4562 for (int i = 0; i < MIN(riscv_count_harts(target), 32); i++)
4563 expected_haltsum0 |= (1 << i);
4564
4565 COMPLIANCE_READ(target, &testvar_read, DM_HALTSUM0);
4566 COMPLIANCE_TEST(testvar_read == expected_haltsum0,
4567 "HALTSUM0 should report summary of up to 32 halted harts");
4568
4569 COMPLIANCE_WRITE(target, DM_HALTSUM0, 0xffffffff);
4570 COMPLIANCE_READ(target, &testvar_read, DM_HALTSUM0);
4571 COMPLIANCE_TEST(testvar_read == expected_haltsum0, "HALTSUM0 should be R/O");
4572
4573 COMPLIANCE_WRITE(target, DM_HALTSUM0, 0x0);
4574 COMPLIANCE_READ(target, &testvar_read, DM_HALTSUM0);
4575 COMPLIANCE_TEST(testvar_read == expected_haltsum0, "HALTSUM0 should be R/O");
4576
4577 /* HALTSUM1 */
4578 uint32_t expected_haltsum1 = 0;
4579 for (int i = 0; i < MIN(riscv_count_harts(target), 1024); i += 32)
4580 expected_haltsum1 |= (1 << (i/32));
4581
4582 COMPLIANCE_READ(target, &testvar_read, DM_HALTSUM1);
4583 COMPLIANCE_TEST(testvar_read == expected_haltsum1,
4584 "HALTSUM1 should report summary of up to 1024 halted harts");
4585
4586 COMPLIANCE_WRITE(target, DM_HALTSUM1, 0xffffffff);
4587 COMPLIANCE_READ(target, &testvar_read, DM_HALTSUM1);
4588 COMPLIANCE_TEST(testvar_read == expected_haltsum1, "HALTSUM1 should be R/O");
4589
4590 COMPLIANCE_WRITE(target, DM_HALTSUM1, 0x0);
4591 COMPLIANCE_READ(target, &testvar_read, DM_HALTSUM1);
4592 COMPLIANCE_TEST(testvar_read == expected_haltsum1, "HALTSUM1 should be R/O");
4593
4594 /* TODO: HAWINDOWSEL */
4595
4596 /* TODO: HAWINDOW */
4597
4598 /* ABSTRACTCS */
4599
4600 uint32_t abstractcs;
4601 COMPLIANCE_READ(target, &abstractcs, DM_ABSTRACTCS);
4602
4603 /* Check that all reported Data Words are really R/W */
4604 for (int invert = 0; invert < 2; invert++) {
4605 for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT); i++) {
4606 testvar = (i + 1) * 0x11111111;
4607 if (invert)
4608 testvar = ~testvar;
4609 COMPLIANCE_WRITE(target, DM_DATA0 + i, testvar);
4610 }
4611 for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT); i++) {
4612 testvar = (i + 1) * 0x11111111;
4613 if (invert)
4614 testvar = ~testvar;
4615 COMPLIANCE_READ(target, &testvar_read, DM_DATA0 + i);
4616 COMPLIANCE_TEST(testvar_read == testvar, "All reported DATA words must be R/W");
4617 }
4618 }
4619
4620 /* Check that all reported ProgBuf words are really R/W */
4621 for (int invert = 0; invert < 2; invert++) {
4622 for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE); i++) {
4623 testvar = (i + 1) * 0x11111111;
4624 if (invert)
4625 testvar = ~testvar;
4626 COMPLIANCE_WRITE(target, DM_PROGBUF0 + i, testvar);
4627 }
4628 for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE); i++) {
4629 testvar = (i + 1) * 0x11111111;
4630 if (invert)
4631 testvar = ~testvar;
4632 COMPLIANCE_READ(target, &testvar_read, DM_PROGBUF0 + i);
4633 COMPLIANCE_TEST(testvar_read == testvar, "All reported PROGBUF words must be R/W");
4634 }
4635 }
4636
4637 /* TODO: Cause and clear all error types */
4638
4639 /* COMMAND
4640 According to the spec, this register is only W, so can't really check the read result.
4641 But at any rate, this is not legal and should cause an error. */
4642 COMPLIANCE_WRITE(target, DM_COMMAND, 0xAAAAAAAA);
4643 COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTCS);
4644 COMPLIANCE_TEST(get_field(testvar_read, DM_ABSTRACTCS_CMDERR) == CMDERR_NOT_SUPPORTED,
4645 "Illegal COMMAND should result in UNSUPPORTED");
4646 COMPLIANCE_WRITE(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
4647
4648 COMPLIANCE_WRITE(target, DM_COMMAND, 0x55555555);
4649 COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTCS);
4650 COMPLIANCE_TEST(get_field(testvar_read, DM_ABSTRACTCS_CMDERR) == CMDERR_NOT_SUPPORTED,
4651 "Illegal COMMAND should result in UNSUPPORTED");
4652 COMPLIANCE_WRITE(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
4653
4654 /* Basic Abstract Commands */
4655 for (unsigned int i = 1; i < 32; i = i << 1) {
4656 riscv_reg_t testval = i | ((i + 1ULL) << 32);
4657 riscv_reg_t testval_read;
4658 COMPLIANCE_TEST(ERROR_OK == register_write_direct(target, GDB_REGNO_ZERO + i, testval),
4659 "GPR Writes should be supported.");
4660 COMPLIANCE_MUST_PASS(write_abstract_arg(target, 0, 0xDEADBEEFDEADBEEF, 64));
4661 COMPLIANCE_TEST(ERROR_OK == register_read_direct(target, &testval_read, GDB_REGNO_ZERO + i),
4662 "GPR Reads should be supported.");
4663 if (riscv_xlen(target) > 32) {
4664 /* Dummy comment to satisfy linter, since removing the branches here doesn't actually compile. */
4665 COMPLIANCE_TEST(testval == testval_read, "GPR Reads and writes should be supported.");
4666 } else {
4667 /* Dummy comment to satisfy linter, since removing the branches here doesn't actually compile. */
4668 COMPLIANCE_TEST((testval & 0xFFFFFFFF) == testval_read, "GPR Reads and writes should be supported.");
4669 }
4670 }
4671
4672 /* ABSTRACTAUTO
4673 See which bits are actually writable */
4674 COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0xFFFFFFFF);
4675 uint32_t abstractauto;
4676 uint32_t busy;
4677 COMPLIANCE_READ(target, &abstractauto, DM_ABSTRACTAUTO);
4678 COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0x0);
4679 if (abstractauto > 0) {
4680 /* This mechanism only works when you have a reasonable sized progbuf, which is not
4681 a true compliance requirement. */
4682 if (info->progbufsize >= 3) {
4683
4684 testvar = 0;
4685 COMPLIANCE_TEST(ERROR_OK == register_write_direct(target, GDB_REGNO_S0, 0),
4686 "Need to be able to write S0 to test ABSTRACTAUTO");
4687 struct riscv_program program;
4688 COMPLIANCE_MUST_PASS(riscv_program_init(&program, target));
4689 /* This is also testing that WFI() is a NOP during debug mode. */
4690 COMPLIANCE_MUST_PASS(riscv_program_insert(&program, wfi()));
4691 COMPLIANCE_MUST_PASS(riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, 1));
4692 COMPLIANCE_MUST_PASS(riscv_program_ebreak(&program));
4693 COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0x0);
4694 COMPLIANCE_MUST_PASS(riscv_program_exec(&program, target));
4695 testvar++;
4696 COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0xFFFFFFFF);
4697 COMPLIANCE_READ(target, &abstractauto, DM_ABSTRACTAUTO);
4698 uint32_t autoexec_data = get_field(abstractauto, DM_ABSTRACTAUTO_AUTOEXECDATA);
4699 uint32_t autoexec_progbuf = get_field(abstractauto, DM_ABSTRACTAUTO_AUTOEXECPROGBUF);
4700 for (unsigned int i = 0; i < 12; i++) {
4701 COMPLIANCE_READ(target, &testvar_read, DM_DATA0 + i);
4702 do {
4703 COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTCS);
4704 busy = get_field(testvar_read, DM_ABSTRACTCS_BUSY);
4705 } while (busy);
4706 if (autoexec_data & (1 << i)) {
4707 COMPLIANCE_TEST(i < get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT),
4708 "AUTOEXEC may be writable up to DATACOUNT bits.");
4709 testvar++;
4710 }
4711 }
4712 for (unsigned int i = 0; i < 16; i++) {
4713 COMPLIANCE_READ(target, &testvar_read, DM_PROGBUF0 + i);
4714 do {
4715 COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTCS);
4716 busy = get_field(testvar_read, DM_ABSTRACTCS_BUSY);
4717 } while (busy);
4718 if (autoexec_progbuf & (1 << i)) {
4719 COMPLIANCE_TEST(i < get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE),
4720 "AUTOEXEC may be writable up to PROGBUFSIZE bits.");
4721 testvar++;
4722 }
4723 }
4724
4725 COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0);
4726 COMPLIANCE_TEST(ERROR_OK == register_read_direct(target, &value, GDB_REGNO_S0),
4727 "Need to be able to read S0 to test ABSTRACTAUTO");
4728
4729 COMPLIANCE_TEST(testvar == value,
4730 "ABSTRACTAUTO should cause COMMAND to run the expected number of times.");
4731 }
4732 }
4733
4734 /* Single-Step each hart. */
4735 for (int hartsel = 0; hartsel < riscv_count_harts(target); hartsel++) {
4736 COMPLIANCE_MUST_PASS(riscv_set_current_hartid(target, hartsel));
4737 COMPLIANCE_MUST_PASS(riscv013_on_step(target));
4738 COMPLIANCE_MUST_PASS(riscv013_step_current_hart(target));
4739 COMPLIANCE_TEST(riscv_halt_reason(target, hartsel) == RISCV_HALT_SINGLESTEP,
4740 "Single Step should result in SINGLESTEP");
4741 }
4742
4743 /* Core Register Tests */
4744 uint64_t bogus_dpc = 0xdeadbeef;
4745 for (int hartsel = 0; hartsel < riscv_count_harts(target); hartsel++) {
4746 COMPLIANCE_MUST_PASS(riscv_set_current_hartid(target, hartsel));
4747
4748 /* DCSR Tests */
4749 COMPLIANCE_MUST_PASS(register_write_direct(target, GDB_REGNO_DCSR, 0x0));
4750 COMPLIANCE_MUST_PASS(register_read_direct(target, &value, GDB_REGNO_DCSR));
4751 COMPLIANCE_TEST(value != 0, "Not all bits in DCSR are writable by Debugger");
4752 COMPLIANCE_MUST_PASS(register_write_direct(target, GDB_REGNO_DCSR, 0xFFFFFFFF));
4753 COMPLIANCE_MUST_PASS(register_read_direct(target, &value, GDB_REGNO_DCSR));
4754 COMPLIANCE_TEST(value != 0, "At least some bits in DCSR must be 1");
4755
4756 /* DPC. Note that DPC is sign-extended. */
4757 riscv_reg_t dpcmask = 0xFFFFFFFCUL;
4758 riscv_reg_t dpc;
4759
4760 if (riscv_xlen(target) > 32)
4761 dpcmask |= (0xFFFFFFFFULL << 32);
4762
4763 if (riscv_supports_extension(target, riscv_current_hartid(target), 'C'))
4764 dpcmask |= 0x2;
4765
4766 COMPLIANCE_MUST_PASS(register_write_direct(target, GDB_REGNO_DPC, dpcmask));
4767 COMPLIANCE_MUST_PASS(register_read_direct(target, &dpc, GDB_REGNO_DPC));
4768 COMPLIANCE_TEST(dpcmask == dpc,
4769 "DPC must be sign-extended to XLEN and writable to all-1s (except the least significant bits)");
4770 COMPLIANCE_MUST_PASS(register_write_direct(target, GDB_REGNO_DPC, 0));
4771 COMPLIANCE_MUST_PASS(register_read_direct(target, &dpc, GDB_REGNO_DPC));
4772 COMPLIANCE_TEST(dpc == 0, "DPC must be writable to 0.");
4773 if (hartsel == 0)
4774 bogus_dpc = dpc; /* For a later test step */
4775 }
4776
4777 /* NDMRESET
4778 Asserting non-debug module reset should not reset Debug Module state.
4779 But it should reset Hart State, e.g. DPC should get a different value.
4780 Also make sure that DCSR reports cause of 'HALT' even though previously we single-stepped.
4781 */
4782
4783 /* Write some registers. They should not be impacted by ndmreset. */
4784 COMPLIANCE_WRITE(target, DM_COMMAND, 0xFFFFFFFF);
4785
4786 for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE); i++) {
4787 testvar = (i + 1) * 0x11111111;
4788 COMPLIANCE_WRITE(target, DM_PROGBUF0 + i, testvar);
4789 }
4790
4791 for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT); i++) {
4792 testvar = (i + 1) * 0x11111111;
4793 COMPLIANCE_WRITE(target, DM_DATA0 + i, testvar);
4794 }
4795
4796 COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0xFFFFFFFF);
4797 COMPLIANCE_READ(target, &abstractauto, DM_ABSTRACTAUTO);
4798
4799 /* Pulse reset. */
4800 target->reset_halt = true;
4801 COMPLIANCE_MUST_PASS(riscv_set_current_hartid(target, 0));
4802 COMPLIANCE_TEST(ERROR_OK == assert_reset(target), "Must be able to assert NDMRESET");
4803 COMPLIANCE_TEST(ERROR_OK == deassert_reset(target), "Must be able to deassert NDMRESET");
4804
4805 /* Verify that most stuff is not affected by ndmreset. */
4806 COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTCS);
4807 COMPLIANCE_TEST(get_field(testvar_read, DM_ABSTRACTCS_CMDERR) == CMDERR_NOT_SUPPORTED,
4808 "NDMRESET should not affect DM_ABSTRACTCS");
4809 COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTAUTO);
4810 COMPLIANCE_TEST(testvar_read == abstractauto, "NDMRESET should not affect DM_ABSTRACTAUTO");
4811
4812 /* Clean up to avoid future test failures */
4813 COMPLIANCE_WRITE(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
4814 COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0);
4815
4816 for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE); i++) {
4817 testvar = (i + 1) * 0x11111111;
4818 COMPLIANCE_READ(target, &testvar_read, DM_PROGBUF0 + i);
4819 COMPLIANCE_TEST(testvar_read == testvar, "PROGBUF words must not be affected by NDMRESET");
4820 }
4821
4822 for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT); i++) {
4823 testvar = (i + 1) * 0x11111111;
4824 COMPLIANCE_READ(target, &testvar_read, DM_DATA0 + i);
4825 COMPLIANCE_TEST(testvar_read == testvar, "DATA words must not be affected by NDMRESET");
4826 }
4827
4828 /* Verify that DPC *is* affected by ndmreset. Since we don't know what it *should* be,
4829 just verify that at least it's not the bogus value anymore. */
4830
4831 COMPLIANCE_TEST(bogus_dpc != 0xdeadbeef, "BOGUS DPC should have been set somehow (bug in compliance test)");
4832 COMPLIANCE_MUST_PASS(register_read_direct(target, &value, GDB_REGNO_DPC));
4833 COMPLIANCE_TEST(bogus_dpc != value, "NDMRESET should move DPC to reset value.");
4834
4835 COMPLIANCE_TEST(riscv_halt_reason(target, 0) == RISCV_HALT_INTERRUPT,
4836 "After NDMRESET halt, DCSR should report cause of halt");
4837
4838 /* DMACTIVE -- deasserting DMACTIVE should reset all the above values. */
4839
4840 /* Toggle dmactive */
4841 COMPLIANCE_WRITE(target, DM_DMCONTROL, 0);
4842 COMPLIANCE_WRITE(target, DM_DMCONTROL, DM_DMCONTROL_DMACTIVE);
4843 COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTCS);
4844 COMPLIANCE_TEST(get_field(testvar_read, DM_ABSTRACTCS_CMDERR) == 0, "ABSTRACTCS.cmderr should reset to 0");
4845 COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTAUTO);
4846 COMPLIANCE_TEST(testvar_read == 0, "ABSTRACTAUTO should reset to 0");
4847
4848 for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE); i++) {
4849 COMPLIANCE_READ(target, &testvar_read, DM_PROGBUF0 + i);
4850 COMPLIANCE_TEST(testvar_read == 0, "PROGBUF words should reset to 0");
4851 }
4852
4853 for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT); i++) {
4854 COMPLIANCE_READ(target, &testvar_read, DM_DATA0 + i);
4855 COMPLIANCE_TEST(testvar_read == 0, "DATA words should reset to 0");
4856 }
4857
4858 /*
4859 * TODO:
4860 * DCSR.cause priorities
4861 * DCSR.stoptime/stopcycle
4862 * DCSR.stepie
4863 * DCSR.ebreak
4864 * DCSR.prv
4865 */
4866
4867 /* Halt every hart for any follow-up tests*/
4868 COMPLIANCE_MUST_PASS(riscv_halt(target));
4869
4870 uint32_t failed_tests = total_tests - passed_tests;
4871 if (total_tests == passed_tests) {
4872 LOG_INFO("ALL TESTS PASSED\n");
4873 return ERROR_OK;
4874 } else {
4875 LOG_INFO("%d TESTS FAILED\n", failed_tests);
4876 return ERROR_FAIL;
4877 }
4878 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)