jtag: linuxgpiod: drop extra parenthesis
[openocd.git] / src / target / riscv / riscv-013.c
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2
3 /*
4 * Support for RISC-V, debug version 0.13, which is currently (2/4/17) the
5 * latest draft.
6 */
7
8 #include <assert.h>
9 #include <stdlib.h>
10 #include <time.h>
11
12 #ifdef HAVE_CONFIG_H
13 #include "config.h"
14 #endif
15
16 #include "target/target.h"
17 #include "target/algorithm.h"
18 #include "target/target_type.h"
19 #include "log.h"
20 #include "jtag/jtag.h"
21 #include "target/register.h"
22 #include "target/breakpoints.h"
23 #include "helper/time_support.h"
24 #include "helper/list.h"
25 #include "riscv.h"
26 #include "debug_defines.h"
27 #include "rtos/rtos.h"
28 #include "program.h"
29 #include "asm.h"
30 #include "batch.h"
31
32 #define DM_DATA1 (DM_DATA0 + 1)
33 #define DM_PROGBUF1 (DM_PROGBUF0 + 1)
34
35 static int riscv013_on_step_or_resume(struct target *target, bool step);
36 static int riscv013_step_or_resume_current_hart(struct target *target,
37 bool step, bool use_hasel);
38 static void riscv013_clear_abstract_error(struct target *target);
39
40 /* Implementations of the functions in riscv_info_t. */
41 static int riscv013_get_register(struct target *target,
42 riscv_reg_t *value, int hid, int rid);
43 static int riscv013_set_register(struct target *target, int hartid, int regid, uint64_t value);
44 static int riscv013_select_current_hart(struct target *target);
45 static int riscv013_halt_prep(struct target *target);
46 static int riscv013_halt_go(struct target *target);
47 static int riscv013_resume_go(struct target *target);
48 static int riscv013_step_current_hart(struct target *target);
49 static int riscv013_on_halt(struct target *target);
50 static int riscv013_on_step(struct target *target);
51 static int riscv013_resume_prep(struct target *target);
52 static bool riscv013_is_halted(struct target *target);
53 static enum riscv_halt_reason riscv013_halt_reason(struct target *target);
54 static int riscv013_write_debug_buffer(struct target *target, unsigned index,
55 riscv_insn_t d);
56 static riscv_insn_t riscv013_read_debug_buffer(struct target *target, unsigned
57 index);
58 static int riscv013_execute_debug_buffer(struct target *target);
59 static void riscv013_fill_dmi_write_u64(struct target *target, char *buf, int a, uint64_t d);
60 static void riscv013_fill_dmi_read_u64(struct target *target, char *buf, int a);
61 static int riscv013_dmi_write_u64_bits(struct target *target);
62 static void riscv013_fill_dmi_nop_u64(struct target *target, char *buf);
63 static int register_read(struct target *target, uint64_t *value, uint32_t number);
64 static int register_read_direct(struct target *target, uint64_t *value, uint32_t number);
65 static int register_write_direct(struct target *target, unsigned number,
66 uint64_t value);
67 static int read_memory(struct target *target, target_addr_t address,
68 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment);
69 static int write_memory(struct target *target, target_addr_t address,
70 uint32_t size, uint32_t count, const uint8_t *buffer);
71 static int riscv013_test_sba_config_reg(struct target *target, target_addr_t legal_address,
72 uint32_t num_words, target_addr_t illegal_address, bool run_sbbusyerror_test);
73 void write_memory_sba_simple(struct target *target, target_addr_t addr, uint32_t *write_data,
74 uint32_t write_size, uint32_t sbcs);
75 void read_memory_sba_simple(struct target *target, target_addr_t addr,
76 uint32_t *rd_buf, uint32_t read_size, uint32_t sbcs);
77 static int riscv013_test_compliance(struct target *target);
78
79 /**
80 * Since almost everything can be accomplish by scanning the dbus register, all
81 * functions here assume dbus is already selected. The exception are functions
82 * called directly by OpenOCD, which can't assume anything about what's
83 * currently in IR. They should set IR to dbus explicitly.
84 */
85
86 #define get_field(reg, mask) (((reg) & (mask)) / ((mask) & ~((mask) << 1)))
87 #define set_field(reg, mask, val) (((reg) & ~(mask)) | (((val) * ((mask) & ~((mask) << 1))) & (mask)))
88
89 #define DIM(x) (sizeof(x)/sizeof(*x))
90
91 #define CSR_DCSR_CAUSE_SWBP 1
92 #define CSR_DCSR_CAUSE_TRIGGER 2
93 #define CSR_DCSR_CAUSE_DEBUGINT 3
94 #define CSR_DCSR_CAUSE_STEP 4
95 #define CSR_DCSR_CAUSE_HALT 5
96 #define CSR_DCSR_CAUSE_GROUP 6
97
98 #define RISCV013_INFO(r) riscv013_info_t *r = get_info(target)
99
100 /*** JTAG registers. ***/
101
102 typedef enum {
103 DMI_OP_NOP = 0,
104 DMI_OP_READ = 1,
105 DMI_OP_WRITE = 2
106 } dmi_op_t;
107 typedef enum {
108 DMI_STATUS_SUCCESS = 0,
109 DMI_STATUS_FAILED = 2,
110 DMI_STATUS_BUSY = 3
111 } dmi_status_t;
112
113 typedef enum slot {
114 SLOT0,
115 SLOT1,
116 SLOT_LAST,
117 } slot_t;
118
119 /*** Debug Bus registers. ***/
120
121 #define CMDERR_NONE 0
122 #define CMDERR_BUSY 1
123 #define CMDERR_NOT_SUPPORTED 2
124 #define CMDERR_EXCEPTION 3
125 #define CMDERR_HALT_RESUME 4
126 #define CMDERR_OTHER 7
127
128 /*** Info about the core being debugged. ***/
129
130 struct trigger {
131 uint64_t address;
132 uint32_t length;
133 uint64_t mask;
134 uint64_t value;
135 bool read, write, execute;
136 int unique_id;
137 };
138
139 typedef enum {
140 YNM_MAYBE,
141 YNM_YES,
142 YNM_NO
143 } yes_no_maybe_t;
144
145 typedef struct {
146 struct list_head list;
147 int abs_chain_position;
148
149 /* The number of harts connected to this DM. */
150 int hart_count;
151 /* Indicates we already reset this DM, so don't need to do it again. */
152 bool was_reset;
153 /* Targets that are connected to this DM. */
154 struct list_head target_list;
155 /* The currently selected hartid on this DM. */
156 int current_hartid;
157 bool hasel_supported;
158
159 /* The program buffer stores executable code. 0 is an illegal instruction,
160 * so we use 0 to mean the cached value is invalid. */
161 uint32_t progbuf_cache[16];
162 } dm013_info_t;
163
164 typedef struct {
165 struct list_head list;
166 struct target *target;
167 } target_list_t;
168
169 typedef struct {
170 /* The indexed used to address this hart in its DM. */
171 unsigned index;
172 /* Number of address bits in the dbus register. */
173 unsigned abits;
174 /* Number of abstract command data registers. */
175 unsigned datacount;
176 /* Number of words in the Program Buffer. */
177 unsigned progbufsize;
178
179 /* We cache the read-only bits of sbcs here. */
180 uint32_t sbcs;
181
182 yes_no_maybe_t progbuf_writable;
183 /* We only need the address so that we know the alignment of the buffer. */
184 riscv_addr_t progbuf_address;
185
186 /* Number of run-test/idle cycles the target requests we do after each dbus
187 * access. */
188 unsigned int dtmcs_idle;
189
190 /* This value is incremented every time a dbus access comes back as "busy".
191 * It's used to determine how many run-test/idle cycles to feed the target
192 * in between accesses. */
193 unsigned int dmi_busy_delay;
194
195 /* Number of run-test/idle cycles to add between consecutive bus master
196 * reads/writes respectively. */
197 unsigned int bus_master_write_delay, bus_master_read_delay;
198
199 /* This value is increased every time we tried to execute two commands
200 * consecutively, and the second one failed because the previous hadn't
201 * completed yet. It's used to add extra run-test/idle cycles after
202 * starting a command, so we don't have to waste time checking for busy to
203 * go low. */
204 unsigned int ac_busy_delay;
205
206 bool abstract_read_csr_supported;
207 bool abstract_write_csr_supported;
208 bool abstract_read_fpr_supported;
209 bool abstract_write_fpr_supported;
210
211 /* When a function returns some error due to a failure indicated by the
212 * target in cmderr, the caller can look here to see what that error was.
213 * (Compare with errno.) */
214 uint8_t cmderr;
215
216 /* Some fields from hartinfo. */
217 uint8_t datasize;
218 uint8_t dataaccess;
219 int16_t dataaddr;
220
221 /* The width of the hartsel field. */
222 unsigned hartsellen;
223
224 /* DM that provides access to this target. */
225 dm013_info_t *dm;
226 } riscv013_info_t;
227
228 LIST_HEAD(dm_list);
229
230 static riscv013_info_t *get_info(const struct target *target)
231 {
232 riscv_info_t *info = (riscv_info_t *) target->arch_info;
233 return (riscv013_info_t *) info->version_specific;
234 }
235
236 /**
237 * Return the DM structure for this target. If there isn't one, find it in the
238 * global list of DMs. If it's not in there, then create one and initialize it
239 * to 0.
240 */
241 dm013_info_t *get_dm(struct target *target)
242 {
243 RISCV013_INFO(info);
244 if (info->dm)
245 return info->dm;
246
247 int abs_chain_position = target->tap->abs_chain_position;
248
249 dm013_info_t *entry;
250 dm013_info_t *dm = NULL;
251 list_for_each_entry(entry, &dm_list, list) {
252 if (entry->abs_chain_position == abs_chain_position) {
253 dm = entry;
254 break;
255 }
256 }
257
258 if (!dm) {
259 LOG_DEBUG("[%d] Allocating new DM", target->coreid);
260 dm = calloc(1, sizeof(dm013_info_t));
261 if (!dm)
262 return NULL;
263 dm->abs_chain_position = abs_chain_position;
264 dm->current_hartid = -1;
265 dm->hart_count = -1;
266 INIT_LIST_HEAD(&dm->target_list);
267 list_add(&dm->list, &dm_list);
268 }
269
270 info->dm = dm;
271 target_list_t *target_entry;
272 list_for_each_entry(target_entry, &dm->target_list, list) {
273 if (target_entry->target == target)
274 return dm;
275 }
276 target_entry = calloc(1, sizeof(*target_entry));
277 if (!target_entry) {
278 info->dm = NULL;
279 return NULL;
280 }
281 target_entry->target = target;
282 list_add(&target_entry->list, &dm->target_list);
283
284 return dm;
285 }
286
287 static uint32_t set_hartsel(uint32_t initial, uint32_t index)
288 {
289 initial &= ~DM_DMCONTROL_HARTSELLO;
290 initial &= ~DM_DMCONTROL_HARTSELHI;
291
292 uint32_t index_lo = index & ((1 << DM_DMCONTROL_HARTSELLO_LENGTH) - 1);
293 initial |= index_lo << DM_DMCONTROL_HARTSELLO_OFFSET;
294 uint32_t index_hi = index >> DM_DMCONTROL_HARTSELLO_LENGTH;
295 assert(index_hi < 1 << DM_DMCONTROL_HARTSELHI_LENGTH);
296 initial |= index_hi << DM_DMCONTROL_HARTSELHI_OFFSET;
297
298 return initial;
299 }
300
301 static void decode_dmi(char *text, unsigned address, unsigned data)
302 {
303 static const struct {
304 unsigned address;
305 uint64_t mask;
306 const char *name;
307 } description[] = {
308 { DM_DMCONTROL, DM_DMCONTROL_HALTREQ, "haltreq" },
309 { DM_DMCONTROL, DM_DMCONTROL_RESUMEREQ, "resumereq" },
310 { DM_DMCONTROL, DM_DMCONTROL_HARTRESET, "hartreset" },
311 { DM_DMCONTROL, DM_DMCONTROL_HASEL, "hasel" },
312 { DM_DMCONTROL, DM_DMCONTROL_HARTSELHI, "hartselhi" },
313 { DM_DMCONTROL, DM_DMCONTROL_HARTSELLO, "hartsello" },
314 { DM_DMCONTROL, DM_DMCONTROL_NDMRESET, "ndmreset" },
315 { DM_DMCONTROL, DM_DMCONTROL_DMACTIVE, "dmactive" },
316 { DM_DMCONTROL, DM_DMCONTROL_ACKHAVERESET, "ackhavereset" },
317
318 { DM_DMSTATUS, DM_DMSTATUS_IMPEBREAK, "impebreak" },
319 { DM_DMSTATUS, DM_DMSTATUS_ALLHAVERESET, "allhavereset" },
320 { DM_DMSTATUS, DM_DMSTATUS_ANYHAVERESET, "anyhavereset" },
321 { DM_DMSTATUS, DM_DMSTATUS_ALLRESUMEACK, "allresumeack" },
322 { DM_DMSTATUS, DM_DMSTATUS_ANYRESUMEACK, "anyresumeack" },
323 { DM_DMSTATUS, DM_DMSTATUS_ALLNONEXISTENT, "allnonexistent" },
324 { DM_DMSTATUS, DM_DMSTATUS_ANYNONEXISTENT, "anynonexistent" },
325 { DM_DMSTATUS, DM_DMSTATUS_ALLUNAVAIL, "allunavail" },
326 { DM_DMSTATUS, DM_DMSTATUS_ANYUNAVAIL, "anyunavail" },
327 { DM_DMSTATUS, DM_DMSTATUS_ALLRUNNING, "allrunning" },
328 { DM_DMSTATUS, DM_DMSTATUS_ANYRUNNING, "anyrunning" },
329 { DM_DMSTATUS, DM_DMSTATUS_ALLHALTED, "allhalted" },
330 { DM_DMSTATUS, DM_DMSTATUS_ANYHALTED, "anyhalted" },
331 { DM_DMSTATUS, DM_DMSTATUS_AUTHENTICATED, "authenticated" },
332 { DM_DMSTATUS, DM_DMSTATUS_AUTHBUSY, "authbusy" },
333 { DM_DMSTATUS, DM_DMSTATUS_HASRESETHALTREQ, "hasresethaltreq" },
334 { DM_DMSTATUS, DM_DMSTATUS_CONFSTRPTRVALID, "confstrptrvalid" },
335 { DM_DMSTATUS, DM_DMSTATUS_VERSION, "version" },
336
337 { DM_ABSTRACTCS, DM_ABSTRACTCS_PROGBUFSIZE, "progbufsize" },
338 { DM_ABSTRACTCS, DM_ABSTRACTCS_BUSY, "busy" },
339 { DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR, "cmderr" },
340 { DM_ABSTRACTCS, DM_ABSTRACTCS_DATACOUNT, "datacount" },
341
342 { DM_COMMAND, DM_COMMAND_CMDTYPE, "cmdtype" },
343
344 { DM_SBCS, DM_SBCS_SBVERSION, "sbversion" },
345 { DM_SBCS, DM_SBCS_SBBUSYERROR, "sbbusyerror" },
346 { DM_SBCS, DM_SBCS_SBBUSY, "sbbusy" },
347 { DM_SBCS, DM_SBCS_SBREADONADDR, "sbreadonaddr" },
348 { DM_SBCS, DM_SBCS_SBACCESS, "sbaccess" },
349 { DM_SBCS, DM_SBCS_SBAUTOINCREMENT, "sbautoincrement" },
350 { DM_SBCS, DM_SBCS_SBREADONDATA, "sbreadondata" },
351 { DM_SBCS, DM_SBCS_SBERROR, "sberror" },
352 { DM_SBCS, DM_SBCS_SBASIZE, "sbasize" },
353 { DM_SBCS, DM_SBCS_SBACCESS128, "sbaccess128" },
354 { DM_SBCS, DM_SBCS_SBACCESS64, "sbaccess64" },
355 { DM_SBCS, DM_SBCS_SBACCESS32, "sbaccess32" },
356 { DM_SBCS, DM_SBCS_SBACCESS16, "sbaccess16" },
357 { DM_SBCS, DM_SBCS_SBACCESS8, "sbaccess8" },
358 };
359
360 text[0] = 0;
361 for (unsigned i = 0; i < DIM(description); i++) {
362 if (description[i].address == address) {
363 uint64_t mask = description[i].mask;
364 unsigned value = get_field(data, mask);
365 if (value) {
366 if (i > 0)
367 *(text++) = ' ';
368 if (mask & (mask >> 1)) {
369 /* If the field is more than 1 bit wide. */
370 sprintf(text, "%s=%d", description[i].name, value);
371 } else {
372 strcpy(text, description[i].name);
373 }
374 text += strlen(text);
375 }
376 }
377 }
378 }
379
380 static void dump_field(int idle, const struct scan_field *field)
381 {
382 static const char * const op_string[] = {"-", "r", "w", "?"};
383 static const char * const status_string[] = {"+", "?", "F", "b"};
384
385 if (debug_level < LOG_LVL_DEBUG)
386 return;
387
388 uint64_t out = buf_get_u64(field->out_value, 0, field->num_bits);
389 unsigned int out_op = get_field(out, DTM_DMI_OP);
390 unsigned int out_data = get_field(out, DTM_DMI_DATA);
391 unsigned int out_address = out >> DTM_DMI_ADDRESS_OFFSET;
392
393 uint64_t in = buf_get_u64(field->in_value, 0, field->num_bits);
394 unsigned int in_op = get_field(in, DTM_DMI_OP);
395 unsigned int in_data = get_field(in, DTM_DMI_DATA);
396 unsigned int in_address = in >> DTM_DMI_ADDRESS_OFFSET;
397
398 log_printf_lf(LOG_LVL_DEBUG,
399 __FILE__, __LINE__, "scan",
400 "%db %s %08x @%02x -> %s %08x @%02x; %di",
401 field->num_bits, op_string[out_op], out_data, out_address,
402 status_string[in_op], in_data, in_address, idle);
403
404 char out_text[500];
405 char in_text[500];
406 decode_dmi(out_text, out_address, out_data);
407 decode_dmi(in_text, in_address, in_data);
408 if (in_text[0] || out_text[0]) {
409 log_printf_lf(LOG_LVL_DEBUG, __FILE__, __LINE__, "scan", "%s -> %s",
410 out_text, in_text);
411 }
412 }
413
414 /*** Utility functions. ***/
415
416 static void select_dmi(struct target *target)
417 {
418 if (bscan_tunnel_ir_width != 0) {
419 select_dmi_via_bscan(target);
420 return;
421 }
422 jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
423 }
424
425 static uint32_t dtmcontrol_scan(struct target *target, uint32_t out)
426 {
427 struct scan_field field;
428 uint8_t in_value[4];
429 uint8_t out_value[4] = { 0 };
430
431 if (bscan_tunnel_ir_width != 0)
432 return dtmcontrol_scan_via_bscan(target, out);
433
434 buf_set_u32(out_value, 0, 32, out);
435
436 jtag_add_ir_scan(target->tap, &select_dtmcontrol, TAP_IDLE);
437
438 field.num_bits = 32;
439 field.out_value = out_value;
440 field.in_value = in_value;
441 jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
442
443 /* Always return to dmi. */
444 select_dmi(target);
445
446 int retval = jtag_execute_queue();
447 if (retval != ERROR_OK) {
448 LOG_ERROR("failed jtag scan: %d", retval);
449 return retval;
450 }
451
452 uint32_t in = buf_get_u32(field.in_value, 0, 32);
453 LOG_DEBUG("DTMCS: 0x%x -> 0x%x", out, in);
454
455 return in;
456 }
457
458 static void increase_dmi_busy_delay(struct target *target)
459 {
460 riscv013_info_t *info = get_info(target);
461 info->dmi_busy_delay += info->dmi_busy_delay / 10 + 1;
462 LOG_DEBUG("dtmcs_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
463 info->dtmcs_idle, info->dmi_busy_delay,
464 info->ac_busy_delay);
465
466 dtmcontrol_scan(target, DTM_DTMCS_DMIRESET);
467 }
468
469 /**
470 * exec: If this is set, assume the scan results in an execution, so more
471 * run-test/idle cycles may be required.
472 */
473 static dmi_status_t dmi_scan(struct target *target, uint32_t *address_in,
474 uint32_t *data_in, dmi_op_t op, uint32_t address_out, uint32_t data_out,
475 bool exec)
476 {
477 riscv013_info_t *info = get_info(target);
478 RISCV_INFO(r);
479 unsigned num_bits = info->abits + DTM_DMI_OP_LENGTH + DTM_DMI_DATA_LENGTH;
480 size_t num_bytes = (num_bits + 7) / 8;
481 uint8_t in[num_bytes];
482 uint8_t out[num_bytes];
483 struct scan_field field = {
484 .num_bits = num_bits,
485 .out_value = out,
486 .in_value = in
487 };
488 riscv_bscan_tunneled_scan_context_t bscan_ctxt;
489
490 if (r->reset_delays_wait >= 0) {
491 r->reset_delays_wait--;
492 if (r->reset_delays_wait < 0) {
493 info->dmi_busy_delay = 0;
494 info->ac_busy_delay = 0;
495 }
496 }
497
498 memset(in, 0, num_bytes);
499 memset(out, 0, num_bytes);
500
501 assert(info->abits != 0);
502
503 buf_set_u32(out, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, op);
504 buf_set_u32(out, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, data_out);
505 buf_set_u32(out, DTM_DMI_ADDRESS_OFFSET, info->abits, address_out);
506
507 /* I wanted to place this code in a different function, but the way JTAG command
508 queueing works in the jtag handling functions, the scan fields either have to be
509 heap allocated, global/static, or else they need to stay on the stack until
510 the jtag_execute_queue() call. Heap or static fields in this case doesn't seem
511 the best fit. Declaring stack based field values in a subsidiary function call wouldn't
512 work. */
513 if (bscan_tunnel_ir_width != 0) {
514 riscv_add_bscan_tunneled_scan(target, &field, &bscan_ctxt);
515 } else {
516 /* Assume dbus is already selected. */
517 jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
518 }
519
520 int idle_count = info->dmi_busy_delay;
521 if (exec)
522 idle_count += info->ac_busy_delay;
523
524 if (idle_count)
525 jtag_add_runtest(idle_count, TAP_IDLE);
526
527 int retval = jtag_execute_queue();
528 if (retval != ERROR_OK) {
529 LOG_ERROR("dmi_scan failed jtag scan");
530 if (data_in)
531 *data_in = ~0;
532 return DMI_STATUS_FAILED;
533 }
534
535 if (bscan_tunnel_ir_width != 0) {
536 /* need to right-shift "in" by one bit, because of clock skew between BSCAN TAP and DM TAP */
537 buffer_shr(in, num_bytes, 1);
538 }
539
540 if (data_in)
541 *data_in = buf_get_u32(in, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH);
542
543 if (address_in)
544 *address_in = buf_get_u32(in, DTM_DMI_ADDRESS_OFFSET, info->abits);
545 dump_field(idle_count, &field);
546 return buf_get_u32(in, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH);
547 }
548
549 /**
550 * @param data_in The data we received from the target.
551 * @param dmi_op The operation to perform (read/write/nop).
552 * @param dmi_busy_encountered
553 * If non-NULL, will be updated to reflect whether DMI busy was
554 * encountered while executing this operation or not.
555 * @param address The address argument to that operation.
556 * @param data_out The data to send to the target.
557 * @param exec When true, this scan will execute something, so extra RTI
558 * cycles may be added.
559 * @param ensure_success
560 * Scan a nop after the requested operation, ensuring the
561 * DMI operation succeeded.
562 */
563 static int dmi_op_timeout(struct target *target, uint32_t *data_in,
564 bool *dmi_busy_encountered, int dmi_op, uint32_t address,
565 uint32_t data_out, int timeout_sec, bool exec, bool ensure_success)
566 {
567 select_dmi(target);
568
569 dmi_status_t status;
570 uint32_t address_in;
571
572 if (dmi_busy_encountered)
573 *dmi_busy_encountered = false;
574
575 const char *op_name;
576 switch (dmi_op) {
577 case DMI_OP_NOP:
578 op_name = "nop";
579 break;
580 case DMI_OP_READ:
581 op_name = "read";
582 break;
583 case DMI_OP_WRITE:
584 op_name = "write";
585 break;
586 default:
587 LOG_ERROR("Invalid DMI operation: %d", dmi_op);
588 return ERROR_FAIL;
589 }
590
591 time_t start = time(NULL);
592 /* This first loop performs the request. Note that if for some reason this
593 * stays busy, it is actually due to the previous access. */
594 while (1) {
595 status = dmi_scan(target, NULL, NULL, dmi_op, address, data_out,
596 exec);
597 if (status == DMI_STATUS_BUSY) {
598 increase_dmi_busy_delay(target);
599 if (dmi_busy_encountered)
600 *dmi_busy_encountered = true;
601 } else if (status == DMI_STATUS_SUCCESS) {
602 break;
603 } else {
604 LOG_ERROR("failed %s at 0x%x, status=%d", op_name, address, status);
605 return ERROR_FAIL;
606 }
607 if (time(NULL) - start > timeout_sec)
608 return ERROR_TIMEOUT_REACHED;
609 }
610
611 if (status != DMI_STATUS_SUCCESS) {
612 LOG_ERROR("Failed %s at 0x%x; status=%d", op_name, address, status);
613 return ERROR_FAIL;
614 }
615
616 if (ensure_success) {
617 /* This second loop ensures the request succeeded, and gets back data.
618 * Note that NOP can result in a 'busy' result as well, but that would be
619 * noticed on the next DMI access we do. */
620 while (1) {
621 status = dmi_scan(target, &address_in, data_in, DMI_OP_NOP, address, 0,
622 false);
623 if (status == DMI_STATUS_BUSY) {
624 increase_dmi_busy_delay(target);
625 if (dmi_busy_encountered)
626 *dmi_busy_encountered = true;
627 } else if (status == DMI_STATUS_SUCCESS) {
628 break;
629 } else {
630 if (data_in) {
631 LOG_ERROR("Failed %s (NOP) at 0x%x; value=0x%x, status=%d",
632 op_name, address, *data_in, status);
633 } else {
634 LOG_ERROR("Failed %s (NOP) at 0x%x; status=%d", op_name, address,
635 status);
636 }
637 return ERROR_FAIL;
638 }
639 if (time(NULL) - start > timeout_sec)
640 return ERROR_TIMEOUT_REACHED;
641 }
642 }
643
644 return ERROR_OK;
645 }
646
647 static int dmi_op(struct target *target, uint32_t *data_in,
648 bool *dmi_busy_encountered, int dmi_op, uint32_t address,
649 uint32_t data_out, bool exec, bool ensure_success)
650 {
651 int result = dmi_op_timeout(target, data_in, dmi_busy_encountered, dmi_op,
652 address, data_out, riscv_command_timeout_sec, exec, ensure_success);
653 if (result == ERROR_TIMEOUT_REACHED) {
654 LOG_ERROR("DMI operation didn't complete in %d seconds. The target is "
655 "either really slow or broken. You could increase the "
656 "timeout with riscv set_command_timeout_sec.",
657 riscv_command_timeout_sec);
658 return ERROR_FAIL;
659 }
660 return result;
661 }
662
663 static int dmi_read(struct target *target, uint32_t *value, uint32_t address)
664 {
665 return dmi_op(target, value, NULL, DMI_OP_READ, address, 0, false, true);
666 }
667
668 static int dmi_read_exec(struct target *target, uint32_t *value, uint32_t address)
669 {
670 return dmi_op(target, value, NULL, DMI_OP_READ, address, 0, true, true);
671 }
672
673 static int dmi_write(struct target *target, uint32_t address, uint32_t value)
674 {
675 return dmi_op(target, NULL, NULL, DMI_OP_WRITE, address, value, false, true);
676 }
677
678 static int dmi_write_exec(struct target *target, uint32_t address,
679 uint32_t value, bool ensure_success)
680 {
681 return dmi_op(target, NULL, NULL, DMI_OP_WRITE, address, value, true, ensure_success);
682 }
683
684 int dmstatus_read_timeout(struct target *target, uint32_t *dmstatus,
685 bool authenticated, unsigned timeout_sec)
686 {
687 int result = dmi_op_timeout(target, dmstatus, NULL, DMI_OP_READ,
688 DM_DMSTATUS, 0, timeout_sec, false, true);
689 if (result != ERROR_OK)
690 return result;
691 int dmstatus_version = get_field(*dmstatus, DM_DMSTATUS_VERSION);
692 if (dmstatus_version != 2 && dmstatus_version != 3) {
693 LOG_ERROR("OpenOCD only supports Debug Module version 2 (0.13) and 3 (0.14), not "
694 "%d (dmstatus=0x%x). This error might be caused by a JTAG "
695 "signal issue. Try reducing the JTAG clock speed.",
696 get_field(*dmstatus, DM_DMSTATUS_VERSION), *dmstatus);
697 } else if (authenticated && !get_field(*dmstatus, DM_DMSTATUS_AUTHENTICATED)) {
698 LOG_ERROR("Debugger is not authenticated to target Debug Module. "
699 "(dmstatus=0x%x). Use `riscv authdata_read` and "
700 "`riscv authdata_write` commands to authenticate.", *dmstatus);
701 return ERROR_FAIL;
702 }
703 return ERROR_OK;
704 }
705
706 int dmstatus_read(struct target *target, uint32_t *dmstatus,
707 bool authenticated)
708 {
709 return dmstatus_read_timeout(target, dmstatus, authenticated,
710 riscv_command_timeout_sec);
711 }
712
713 static void increase_ac_busy_delay(struct target *target)
714 {
715 riscv013_info_t *info = get_info(target);
716 info->ac_busy_delay += info->ac_busy_delay / 10 + 1;
717 LOG_DEBUG("dtmcs_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
718 info->dtmcs_idle, info->dmi_busy_delay,
719 info->ac_busy_delay);
720 }
721
722 uint32_t abstract_register_size(unsigned width)
723 {
724 switch (width) {
725 case 32:
726 return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 2);
727 case 64:
728 return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 3);
729 case 128:
730 return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 4);
731 default:
732 LOG_ERROR("Unsupported register width: %d", width);
733 return 0;
734 }
735 }
736
737 static int wait_for_idle(struct target *target, uint32_t *abstractcs)
738 {
739 RISCV013_INFO(info);
740 time_t start = time(NULL);
741 while (1) {
742 if (dmi_read(target, abstractcs, DM_ABSTRACTCS) != ERROR_OK)
743 return ERROR_FAIL;
744
745 if (get_field(*abstractcs, DM_ABSTRACTCS_BUSY) == 0)
746 return ERROR_OK;
747
748 if (time(NULL) - start > riscv_command_timeout_sec) {
749 info->cmderr = get_field(*abstractcs, DM_ABSTRACTCS_CMDERR);
750 if (info->cmderr != CMDERR_NONE) {
751 const char *errors[8] = {
752 "none",
753 "busy",
754 "not supported",
755 "exception",
756 "halt/resume",
757 "reserved",
758 "reserved",
759 "other" };
760
761 LOG_ERROR("Abstract command ended in error '%s' (abstractcs=0x%x)",
762 errors[info->cmderr], *abstractcs);
763 }
764
765 LOG_ERROR("Timed out after %ds waiting for busy to go low (abstractcs=0x%x). "
766 "Increase the timeout with riscv set_command_timeout_sec.",
767 riscv_command_timeout_sec,
768 *abstractcs);
769 return ERROR_FAIL;
770 }
771 }
772 }
773
774 static int execute_abstract_command(struct target *target, uint32_t command)
775 {
776 RISCV013_INFO(info);
777 if (debug_level >= LOG_LVL_DEBUG) {
778 switch (get_field(command, DM_COMMAND_CMDTYPE)) {
779 case 0:
780 LOG_DEBUG("command=0x%x; access register, size=%d, postexec=%d, "
781 "transfer=%d, write=%d, regno=0x%x",
782 command,
783 8 << get_field(command, AC_ACCESS_REGISTER_AARSIZE),
784 get_field(command, AC_ACCESS_REGISTER_POSTEXEC),
785 get_field(command, AC_ACCESS_REGISTER_TRANSFER),
786 get_field(command, AC_ACCESS_REGISTER_WRITE),
787 get_field(command, AC_ACCESS_REGISTER_REGNO));
788 break;
789 default:
790 LOG_DEBUG("command=0x%x", command);
791 break;
792 }
793 }
794
795 if (dmi_write_exec(target, DM_COMMAND, command, false) != ERROR_OK)
796 return ERROR_FAIL;
797
798 uint32_t abstractcs = 0;
799 int result = wait_for_idle(target, &abstractcs);
800
801 info->cmderr = get_field(abstractcs, DM_ABSTRACTCS_CMDERR);
802 if (info->cmderr != 0 || result != ERROR_OK) {
803 LOG_DEBUG("command 0x%x failed; abstractcs=0x%x", command, abstractcs);
804 /* Clear the error. */
805 dmi_write(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
806 return ERROR_FAIL;
807 }
808
809 return ERROR_OK;
810 }
811
812 static riscv_reg_t read_abstract_arg(struct target *target, unsigned index,
813 unsigned size_bits)
814 {
815 riscv_reg_t value = 0;
816 uint32_t v;
817 unsigned offset = index * size_bits / 32;
818 switch (size_bits) {
819 default:
820 LOG_ERROR("Unsupported size: %d bits", size_bits);
821 return ~0;
822 case 64:
823 dmi_read(target, &v, DM_DATA0 + offset + 1);
824 value |= ((uint64_t) v) << 32;
825 /* falls through */
826 case 32:
827 dmi_read(target, &v, DM_DATA0 + offset);
828 value |= v;
829 }
830 return value;
831 }
832
833 static int write_abstract_arg(struct target *target, unsigned index,
834 riscv_reg_t value, unsigned size_bits)
835 {
836 unsigned offset = index * size_bits / 32;
837 switch (size_bits) {
838 default:
839 LOG_ERROR("Unsupported size: %d bits", size_bits);
840 return ERROR_FAIL;
841 case 64:
842 dmi_write(target, DM_DATA0 + offset + 1, value >> 32);
843 /* falls through */
844 case 32:
845 dmi_write(target, DM_DATA0 + offset, value);
846 }
847 return ERROR_OK;
848 }
849
850 /**
851 * @par size in bits
852 */
853 static uint32_t access_register_command(struct target *target, uint32_t number,
854 unsigned size, uint32_t flags)
855 {
856 uint32_t command = set_field(0, DM_COMMAND_CMDTYPE, 0);
857 switch (size) {
858 case 32:
859 command = set_field(command, AC_ACCESS_REGISTER_AARSIZE, 2);
860 break;
861 case 64:
862 command = set_field(command, AC_ACCESS_REGISTER_AARSIZE, 3);
863 break;
864 default:
865 LOG_ERROR("%d-bit register %s not supported.", size,
866 gdb_regno_name(number));
867 assert(0);
868 }
869
870 if (number <= GDB_REGNO_XPR31) {
871 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
872 0x1000 + number - GDB_REGNO_ZERO);
873 } else if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
874 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
875 0x1020 + number - GDB_REGNO_FPR0);
876 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
877 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
878 number - GDB_REGNO_CSR0);
879 } else if (number >= GDB_REGNO_COUNT) {
880 /* Custom register. */
881 assert(target->reg_cache->reg_list[number].arch_info);
882 riscv_reg_info_t *reg_info = target->reg_cache->reg_list[number].arch_info;
883 assert(reg_info);
884 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
885 0xc000 + reg_info->custom_number);
886 } else {
887 assert(0);
888 }
889
890 command |= flags;
891
892 return command;
893 }
894
895 static int register_read_abstract(struct target *target, uint64_t *value,
896 uint32_t number, unsigned size)
897 {
898 RISCV013_INFO(info);
899
900 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
901 !info->abstract_read_fpr_supported)
902 return ERROR_FAIL;
903 if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095 &&
904 !info->abstract_read_csr_supported)
905 return ERROR_FAIL;
906 /* The spec doesn't define abstract register numbers for vector registers. */
907 if (number >= GDB_REGNO_V0 && number <= GDB_REGNO_V31)
908 return ERROR_FAIL;
909
910 uint32_t command = access_register_command(target, number, size,
911 AC_ACCESS_REGISTER_TRANSFER);
912
913 int result = execute_abstract_command(target, command);
914 if (result != ERROR_OK) {
915 if (info->cmderr == CMDERR_NOT_SUPPORTED) {
916 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
917 info->abstract_read_fpr_supported = false;
918 LOG_INFO("Disabling abstract command reads from FPRs.");
919 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
920 info->abstract_read_csr_supported = false;
921 LOG_INFO("Disabling abstract command reads from CSRs.");
922 }
923 }
924 return result;
925 }
926
927 if (value)
928 *value = read_abstract_arg(target, 0, size);
929
930 return ERROR_OK;
931 }
932
933 static int register_write_abstract(struct target *target, uint32_t number,
934 uint64_t value, unsigned size)
935 {
936 RISCV013_INFO(info);
937
938 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
939 !info->abstract_write_fpr_supported)
940 return ERROR_FAIL;
941 if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095 &&
942 !info->abstract_write_csr_supported)
943 return ERROR_FAIL;
944
945 uint32_t command = access_register_command(target, number, size,
946 AC_ACCESS_REGISTER_TRANSFER |
947 AC_ACCESS_REGISTER_WRITE);
948
949 if (write_abstract_arg(target, 0, value, size) != ERROR_OK)
950 return ERROR_FAIL;
951
952 int result = execute_abstract_command(target, command);
953 if (result != ERROR_OK) {
954 if (info->cmderr == CMDERR_NOT_SUPPORTED) {
955 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
956 info->abstract_write_fpr_supported = false;
957 LOG_INFO("Disabling abstract command writes to FPRs.");
958 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
959 info->abstract_write_csr_supported = false;
960 LOG_INFO("Disabling abstract command writes to CSRs.");
961 }
962 }
963 return result;
964 }
965
966 return ERROR_OK;
967 }
968
969 /*
970 * Sets the AAMSIZE field of a memory access abstract command based on
971 * the width (bits).
972 */
973 static uint32_t abstract_memory_size(unsigned width)
974 {
975 switch (width) {
976 case 8:
977 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 0);
978 case 16:
979 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 1);
980 case 32:
981 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 2);
982 case 64:
983 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 3);
984 case 128:
985 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 4);
986 default:
987 LOG_ERROR("Unsupported memory width: %d", width);
988 return 0;
989 }
990 }
991
992 /*
993 * Creates a memory access abstract command.
994 */
995 static uint32_t access_memory_command(struct target *target, bool virtual,
996 unsigned width, bool postincrement, bool write)
997 {
998 uint32_t command = set_field(0, AC_ACCESS_MEMORY_CMDTYPE, 2);
999 command = set_field(command, AC_ACCESS_MEMORY_AAMVIRTUAL, virtual);
1000 command |= abstract_memory_size(width);
1001 command = set_field(command, AC_ACCESS_MEMORY_AAMPOSTINCREMENT,
1002 postincrement);
1003 command = set_field(command, AC_ACCESS_MEMORY_WRITE, write);
1004
1005 return command;
1006 }
1007
1008 static int examine_progbuf(struct target *target)
1009 {
1010 riscv013_info_t *info = get_info(target);
1011
1012 if (info->progbuf_writable != YNM_MAYBE)
1013 return ERROR_OK;
1014
1015 /* Figure out if progbuf is writable. */
1016
1017 if (info->progbufsize < 1) {
1018 info->progbuf_writable = YNM_NO;
1019 LOG_INFO("No program buffer present.");
1020 return ERROR_OK;
1021 }
1022
1023 uint64_t s0;
1024 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1025 return ERROR_FAIL;
1026
1027 struct riscv_program program;
1028 riscv_program_init(&program, target);
1029 riscv_program_insert(&program, auipc(S0));
1030 if (riscv_program_exec(&program, target) != ERROR_OK)
1031 return ERROR_FAIL;
1032
1033 if (register_read_direct(target, &info->progbuf_address, GDB_REGNO_S0) != ERROR_OK)
1034 return ERROR_FAIL;
1035
1036 riscv_program_init(&program, target);
1037 riscv_program_insert(&program, sw(S0, S0, 0));
1038 int result = riscv_program_exec(&program, target);
1039
1040 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1041 return ERROR_FAIL;
1042
1043 if (result != ERROR_OK) {
1044 /* This program might have failed if the program buffer is not
1045 * writable. */
1046 info->progbuf_writable = YNM_NO;
1047 return ERROR_OK;
1048 }
1049
1050 uint32_t written;
1051 if (dmi_read(target, &written, DM_PROGBUF0) != ERROR_OK)
1052 return ERROR_FAIL;
1053 if (written == (uint32_t) info->progbuf_address) {
1054 LOG_INFO("progbuf is writable at 0x%" PRIx64,
1055 info->progbuf_address);
1056 info->progbuf_writable = YNM_YES;
1057
1058 } else {
1059 LOG_INFO("progbuf is not writeable at 0x%" PRIx64,
1060 info->progbuf_address);
1061 info->progbuf_writable = YNM_NO;
1062 }
1063
1064 return ERROR_OK;
1065 }
1066
1067 static int is_fpu_reg(uint32_t gdb_regno)
1068 {
1069 return (gdb_regno >= GDB_REGNO_FPR0 && gdb_regno <= GDB_REGNO_FPR31) ||
1070 (gdb_regno == GDB_REGNO_CSR0 + CSR_FFLAGS) ||
1071 (gdb_regno == GDB_REGNO_CSR0 + CSR_FRM) ||
1072 (gdb_regno == GDB_REGNO_CSR0 + CSR_FCSR);
1073 }
1074
1075 static int is_vector_reg(uint32_t gdb_regno)
1076 {
1077 return (gdb_regno >= GDB_REGNO_V0 && gdb_regno <= GDB_REGNO_V31) ||
1078 gdb_regno == GDB_REGNO_VSTART ||
1079 gdb_regno == GDB_REGNO_VXSAT ||
1080 gdb_regno == GDB_REGNO_VXRM ||
1081 gdb_regno == GDB_REGNO_VL ||
1082 gdb_regno == GDB_REGNO_VTYPE ||
1083 gdb_regno == GDB_REGNO_VLENB;
1084 }
1085
1086 static int prep_for_register_access(struct target *target, uint64_t *mstatus,
1087 int regno)
1088 {
1089 if (is_fpu_reg(regno) || is_vector_reg(regno)) {
1090 if (register_read(target, mstatus, GDB_REGNO_MSTATUS) != ERROR_OK)
1091 return ERROR_FAIL;
1092 if (is_fpu_reg(regno) && (*mstatus & MSTATUS_FS) == 0) {
1093 if (register_write_direct(target, GDB_REGNO_MSTATUS,
1094 set_field(*mstatus, MSTATUS_FS, 1)) != ERROR_OK)
1095 return ERROR_FAIL;
1096 } else if (is_vector_reg(regno) && (*mstatus & MSTATUS_VS) == 0) {
1097 if (register_write_direct(target, GDB_REGNO_MSTATUS,
1098 set_field(*mstatus, MSTATUS_VS, 1)) != ERROR_OK)
1099 return ERROR_FAIL;
1100 }
1101 } else {
1102 *mstatus = 0;
1103 }
1104 return ERROR_OK;
1105 }
1106
1107 static int cleanup_after_register_access(struct target *target,
1108 uint64_t mstatus, int regno)
1109 {
1110 if ((is_fpu_reg(regno) && (mstatus & MSTATUS_FS) == 0) ||
1111 (is_vector_reg(regno) && (mstatus & MSTATUS_VS) == 0))
1112 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus) != ERROR_OK)
1113 return ERROR_FAIL;
1114 return ERROR_OK;
1115 }
1116
1117 typedef enum {
1118 SPACE_DM_DATA,
1119 SPACE_DMI_PROGBUF,
1120 SPACE_DMI_RAM
1121 } memory_space_t;
1122
1123 typedef struct {
1124 /* How can the debugger access this memory? */
1125 memory_space_t memory_space;
1126 /* Memory address to access the scratch memory from the hart. */
1127 riscv_addr_t hart_address;
1128 /* Memory address to access the scratch memory from the debugger. */
1129 riscv_addr_t debug_address;
1130 struct working_area *area;
1131 } scratch_mem_t;
1132
1133 /**
1134 * Find some scratch memory to be used with the given program.
1135 */
1136 static int scratch_reserve(struct target *target,
1137 scratch_mem_t *scratch,
1138 struct riscv_program *program,
1139 unsigned size_bytes)
1140 {
1141 riscv_addr_t alignment = 1;
1142 while (alignment < size_bytes)
1143 alignment *= 2;
1144
1145 scratch->area = NULL;
1146
1147 riscv013_info_t *info = get_info(target);
1148
1149 /* Option 1: See if data# registers can be used as the scratch memory */
1150 if (info->dataaccess == 1) {
1151 /* Sign extend dataaddr. */
1152 scratch->hart_address = info->dataaddr;
1153 if (info->dataaddr & (1<<11))
1154 scratch->hart_address |= 0xfffffffffffff000ULL;
1155 /* Align. */
1156 scratch->hart_address = (scratch->hart_address + alignment - 1) & ~(alignment - 1);
1157
1158 if ((size_bytes + scratch->hart_address - info->dataaddr + 3) / 4 >=
1159 info->datasize) {
1160 scratch->memory_space = SPACE_DM_DATA;
1161 scratch->debug_address = (scratch->hart_address - info->dataaddr) / 4;
1162 return ERROR_OK;
1163 }
1164 }
1165
1166 /* Option 2: See if progbuf can be used as the scratch memory */
1167 if (examine_progbuf(target) != ERROR_OK)
1168 return ERROR_FAIL;
1169
1170 /* Allow for ebreak at the end of the program. */
1171 unsigned program_size = (program->instruction_count + 1) * 4;
1172 scratch->hart_address = (info->progbuf_address + program_size + alignment - 1) &
1173 ~(alignment - 1);
1174 if ((info->progbuf_writable == YNM_YES) &&
1175 ((size_bytes + scratch->hart_address - info->progbuf_address + 3) / 4 >=
1176 info->progbufsize)) {
1177 scratch->memory_space = SPACE_DMI_PROGBUF;
1178 scratch->debug_address = (scratch->hart_address - info->progbuf_address) / 4;
1179 return ERROR_OK;
1180 }
1181
1182 /* Option 3: User-configured memory area as scratch RAM */
1183 if (target_alloc_working_area(target, size_bytes + alignment - 1,
1184 &scratch->area) == ERROR_OK) {
1185 scratch->hart_address = (scratch->area->address + alignment - 1) &
1186 ~(alignment - 1);
1187 scratch->memory_space = SPACE_DMI_RAM;
1188 scratch->debug_address = scratch->hart_address;
1189 return ERROR_OK;
1190 }
1191
1192 LOG_ERROR("Couldn't find %d bytes of scratch RAM to use. Please configure "
1193 "a work area with 'configure -work-area-phys'.", size_bytes);
1194 return ERROR_FAIL;
1195 }
1196
1197 static int scratch_release(struct target *target,
1198 scratch_mem_t *scratch)
1199 {
1200 if (scratch->area)
1201 return target_free_working_area(target, scratch->area);
1202
1203 return ERROR_OK;
1204 }
1205
1206 static int scratch_read64(struct target *target, scratch_mem_t *scratch,
1207 uint64_t *value)
1208 {
1209 uint32_t v;
1210 switch (scratch->memory_space) {
1211 case SPACE_DM_DATA:
1212 if (dmi_read(target, &v, DM_DATA0 + scratch->debug_address) != ERROR_OK)
1213 return ERROR_FAIL;
1214 *value = v;
1215 if (dmi_read(target, &v, DM_DATA1 + scratch->debug_address) != ERROR_OK)
1216 return ERROR_FAIL;
1217 *value |= ((uint64_t) v) << 32;
1218 break;
1219 case SPACE_DMI_PROGBUF:
1220 if (dmi_read(target, &v, DM_PROGBUF0 + scratch->debug_address) != ERROR_OK)
1221 return ERROR_FAIL;
1222 *value = v;
1223 if (dmi_read(target, &v, DM_PROGBUF1 + scratch->debug_address) != ERROR_OK)
1224 return ERROR_FAIL;
1225 *value |= ((uint64_t) v) << 32;
1226 break;
1227 case SPACE_DMI_RAM:
1228 {
1229 uint8_t buffer[8] = {0};
1230 if (read_memory(target, scratch->debug_address, 4, 2, buffer, 4) != ERROR_OK)
1231 return ERROR_FAIL;
1232 *value = buffer[0] |
1233 (((uint64_t) buffer[1]) << 8) |
1234 (((uint64_t) buffer[2]) << 16) |
1235 (((uint64_t) buffer[3]) << 24) |
1236 (((uint64_t) buffer[4]) << 32) |
1237 (((uint64_t) buffer[5]) << 40) |
1238 (((uint64_t) buffer[6]) << 48) |
1239 (((uint64_t) buffer[7]) << 56);
1240 }
1241 break;
1242 }
1243 return ERROR_OK;
1244 }
1245
1246 static int scratch_write64(struct target *target, scratch_mem_t *scratch,
1247 uint64_t value)
1248 {
1249 switch (scratch->memory_space) {
1250 case SPACE_DM_DATA:
1251 dmi_write(target, DM_DATA0 + scratch->debug_address, value);
1252 dmi_write(target, DM_DATA1 + scratch->debug_address, value >> 32);
1253 break;
1254 case SPACE_DMI_PROGBUF:
1255 dmi_write(target, DM_PROGBUF0 + scratch->debug_address, value);
1256 dmi_write(target, DM_PROGBUF1 + scratch->debug_address, value >> 32);
1257 break;
1258 case SPACE_DMI_RAM:
1259 {
1260 uint8_t buffer[8] = {
1261 value,
1262 value >> 8,
1263 value >> 16,
1264 value >> 24,
1265 value >> 32,
1266 value >> 40,
1267 value >> 48,
1268 value >> 56
1269 };
1270 if (write_memory(target, scratch->debug_address, 4, 2, buffer) != ERROR_OK)
1271 return ERROR_FAIL;
1272 }
1273 break;
1274 }
1275 return ERROR_OK;
1276 }
1277
1278 /** Return register size in bits. */
1279 static unsigned register_size(struct target *target, unsigned number)
1280 {
1281 /* If reg_cache hasn't been initialized yet, make a guess. We need this for
1282 * when this function is called during examine(). */
1283 if (target->reg_cache)
1284 return target->reg_cache->reg_list[number].size;
1285 else
1286 return riscv_xlen(target);
1287 }
1288
1289 static bool has_sufficient_progbuf(struct target *target, unsigned size)
1290 {
1291 RISCV013_INFO(info);
1292 RISCV_INFO(r);
1293
1294 return info->progbufsize + r->impebreak >= size;
1295 }
1296
1297 /**
1298 * Immediately write the new value to the requested register. This mechanism
1299 * bypasses any caches.
1300 */
1301 static int register_write_direct(struct target *target, unsigned number,
1302 uint64_t value)
1303 {
1304 LOG_DEBUG("{%d} %s <- 0x%" PRIx64, riscv_current_hartid(target),
1305 gdb_regno_name(number), value);
1306
1307 int result = register_write_abstract(target, number, value,
1308 register_size(target, number));
1309 if (result == ERROR_OK || !has_sufficient_progbuf(target, 2) ||
1310 !riscv_is_halted(target))
1311 return result;
1312
1313 struct riscv_program program;
1314 riscv_program_init(&program, target);
1315
1316 uint64_t s0;
1317 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1318 return ERROR_FAIL;
1319
1320 uint64_t mstatus;
1321 if (prep_for_register_access(target, &mstatus, number) != ERROR_OK)
1322 return ERROR_FAIL;
1323
1324 scratch_mem_t scratch;
1325 bool use_scratch = false;
1326 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
1327 riscv_supports_extension(target, riscv_current_hartid(target), 'D') &&
1328 riscv_xlen(target) < 64) {
1329 /* There are no instructions to move all the bits from a register, so
1330 * we need to use some scratch RAM. */
1331 use_scratch = true;
1332 riscv_program_insert(&program, fld(number - GDB_REGNO_FPR0, S0, 0));
1333
1334 if (scratch_reserve(target, &scratch, &program, 8) != ERROR_OK)
1335 return ERROR_FAIL;
1336
1337 if (register_write_direct(target, GDB_REGNO_S0, scratch.hart_address)
1338 != ERROR_OK) {
1339 scratch_release(target, &scratch);
1340 return ERROR_FAIL;
1341 }
1342
1343 if (scratch_write64(target, &scratch, value) != ERROR_OK) {
1344 scratch_release(target, &scratch);
1345 return ERROR_FAIL;
1346 }
1347
1348 } else if (number == GDB_REGNO_VTYPE) {
1349 riscv_program_insert(&program, csrr(S0, CSR_VL));
1350 riscv_program_insert(&program, vsetvli(ZERO, S0, value));
1351
1352 } else {
1353 if (register_write_direct(target, GDB_REGNO_S0, value) != ERROR_OK)
1354 return ERROR_FAIL;
1355
1356 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
1357 if (riscv_supports_extension(target, riscv_current_hartid(target), 'D'))
1358 riscv_program_insert(&program, fmv_d_x(number - GDB_REGNO_FPR0, S0));
1359 else
1360 riscv_program_insert(&program, fmv_w_x(number - GDB_REGNO_FPR0, S0));
1361 } else if (number == GDB_REGNO_VL) {
1362 /* "The XLEN-bit-wide read-only vl CSR can only be updated by the
1363 * vsetvli and vsetvl instructions, and the fault-only-rst vector
1364 * load instruction variants." */
1365 riscv_reg_t vtype;
1366 if (register_read(target, &vtype, GDB_REGNO_VTYPE) != ERROR_OK)
1367 return ERROR_FAIL;
1368 if (riscv_program_insert(&program, vsetvli(ZERO, S0, vtype)) != ERROR_OK)
1369 return ERROR_FAIL;
1370 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
1371 riscv_program_csrw(&program, S0, number);
1372 } else {
1373 LOG_ERROR("Unsupported register (enum gdb_regno)(%d)", number);
1374 return ERROR_FAIL;
1375 }
1376 }
1377
1378 int exec_out = riscv_program_exec(&program, target);
1379 /* Don't message on error. Probably the register doesn't exist. */
1380 if (exec_out == ERROR_OK && target->reg_cache) {
1381 struct reg *reg = &target->reg_cache->reg_list[number];
1382 buf_set_u64(reg->value, 0, reg->size, value);
1383 }
1384
1385 if (use_scratch)
1386 scratch_release(target, &scratch);
1387
1388 if (cleanup_after_register_access(target, mstatus, number) != ERROR_OK)
1389 return ERROR_FAIL;
1390
1391 /* Restore S0. */
1392 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1393 return ERROR_FAIL;
1394
1395 return exec_out;
1396 }
1397
1398 /** Return the cached value, or read from the target if necessary. */
1399 static int register_read(struct target *target, uint64_t *value, uint32_t number)
1400 {
1401 if (number == GDB_REGNO_ZERO) {
1402 *value = 0;
1403 return ERROR_OK;
1404 }
1405 int result = register_read_direct(target, value, number);
1406 if (result != ERROR_OK)
1407 return ERROR_FAIL;
1408 if (target->reg_cache) {
1409 struct reg *reg = &target->reg_cache->reg_list[number];
1410 buf_set_u64(reg->value, 0, reg->size, *value);
1411 }
1412 return ERROR_OK;
1413 }
1414
1415 /** Actually read registers from the target right now. */
1416 static int register_read_direct(struct target *target, uint64_t *value, uint32_t number)
1417 {
1418 int result = register_read_abstract(target, value, number,
1419 register_size(target, number));
1420
1421 if (result != ERROR_OK &&
1422 has_sufficient_progbuf(target, 2) &&
1423 number > GDB_REGNO_XPR31) {
1424 struct riscv_program program;
1425 riscv_program_init(&program, target);
1426
1427 scratch_mem_t scratch;
1428 bool use_scratch = false;
1429
1430 riscv_reg_t s0;
1431 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1432 return ERROR_FAIL;
1433
1434 /* Write program to move data into s0. */
1435
1436 uint64_t mstatus;
1437 if (prep_for_register_access(target, &mstatus, number) != ERROR_OK)
1438 return ERROR_FAIL;
1439
1440 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
1441 if (riscv_supports_extension(target, riscv_current_hartid(target), 'D')
1442 && riscv_xlen(target) < 64) {
1443 /* There are no instructions to move all the bits from a
1444 * register, so we need to use some scratch RAM. */
1445 riscv_program_insert(&program, fsd(number - GDB_REGNO_FPR0, S0,
1446 0));
1447
1448 if (scratch_reserve(target, &scratch, &program, 8) != ERROR_OK)
1449 return ERROR_FAIL;
1450 use_scratch = true;
1451
1452 if (register_write_direct(target, GDB_REGNO_S0,
1453 scratch.hart_address) != ERROR_OK) {
1454 scratch_release(target, &scratch);
1455 return ERROR_FAIL;
1456 }
1457 } else if (riscv_supports_extension(target,
1458 riscv_current_hartid(target), 'D')) {
1459 riscv_program_insert(&program, fmv_x_d(S0, number - GDB_REGNO_FPR0));
1460 } else {
1461 riscv_program_insert(&program, fmv_x_w(S0, number - GDB_REGNO_FPR0));
1462 }
1463 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
1464 riscv_program_csrr(&program, S0, number);
1465 } else {
1466 LOG_ERROR("Unsupported register: %s", gdb_regno_name(number));
1467 return ERROR_FAIL;
1468 }
1469
1470 /* Execute program. */
1471 result = riscv_program_exec(&program, target);
1472 /* Don't message on error. Probably the register doesn't exist. */
1473
1474 if (use_scratch) {
1475 result = scratch_read64(target, &scratch, value);
1476 scratch_release(target, &scratch);
1477 if (result != ERROR_OK)
1478 return result;
1479 } else {
1480 /* Read S0 */
1481 if (register_read_direct(target, value, GDB_REGNO_S0) != ERROR_OK)
1482 return ERROR_FAIL;
1483 }
1484
1485 if (cleanup_after_register_access(target, mstatus, number) != ERROR_OK)
1486 return ERROR_FAIL;
1487
1488 /* Restore S0. */
1489 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1490 return ERROR_FAIL;
1491 }
1492
1493 if (result == ERROR_OK) {
1494 LOG_DEBUG("{%d} %s = 0x%" PRIx64, riscv_current_hartid(target),
1495 gdb_regno_name(number), *value);
1496 }
1497
1498 return result;
1499 }
1500
1501 int wait_for_authbusy(struct target *target, uint32_t *dmstatus)
1502 {
1503 time_t start = time(NULL);
1504 while (1) {
1505 uint32_t value;
1506 if (dmstatus_read(target, &value, false) != ERROR_OK)
1507 return ERROR_FAIL;
1508 if (dmstatus)
1509 *dmstatus = value;
1510 if (!get_field(value, DM_DMSTATUS_AUTHBUSY))
1511 break;
1512 if (time(NULL) - start > riscv_command_timeout_sec) {
1513 LOG_ERROR("Timed out after %ds waiting for authbusy to go low (dmstatus=0x%x). "
1514 "Increase the timeout with riscv set_command_timeout_sec.",
1515 riscv_command_timeout_sec,
1516 value);
1517 return ERROR_FAIL;
1518 }
1519 }
1520
1521 return ERROR_OK;
1522 }
1523
1524 /*** OpenOCD target functions. ***/
1525
1526 static void deinit_target(struct target *target)
1527 {
1528 LOG_DEBUG("riscv_deinit_target()");
1529 riscv_info_t *info = (riscv_info_t *) target->arch_info;
1530 free(info->version_specific);
1531 /* TODO: free register arch_info */
1532 info->version_specific = NULL;
1533 }
1534
1535 static int set_haltgroup(struct target *target, bool *supported)
1536 {
1537 uint32_t write = set_field(DM_DMCS2_HGWRITE, DM_DMCS2_GROUP, target->smp);
1538 if (dmi_write(target, DM_DMCS2, write) != ERROR_OK)
1539 return ERROR_FAIL;
1540 uint32_t read;
1541 if (dmi_read(target, &read, DM_DMCS2) != ERROR_OK)
1542 return ERROR_FAIL;
1543 *supported = get_field(read, DM_DMCS2_GROUP) == (unsigned)target->smp;
1544 return ERROR_OK;
1545 }
1546
1547 static int discover_vlenb(struct target *target, int hartid)
1548 {
1549 RISCV_INFO(r);
1550 riscv_reg_t vlenb;
1551
1552 if (register_read(target, &vlenb, GDB_REGNO_VLENB) != ERROR_OK) {
1553 LOG_WARNING("Couldn't read vlenb for %s; vector register access won't work.",
1554 target_name(target));
1555 r->vlenb[hartid] = 0;
1556 return ERROR_OK;
1557 }
1558 r->vlenb[hartid] = vlenb;
1559
1560 LOG_INFO("hart %d: Vector support with vlenb=%d", hartid, r->vlenb[hartid]);
1561
1562 return ERROR_OK;
1563 }
1564
1565 static int examine(struct target *target)
1566 {
1567 /* Don't need to select dbus, since the first thing we do is read dtmcontrol. */
1568
1569 uint32_t dtmcontrol = dtmcontrol_scan(target, 0);
1570 LOG_DEBUG("dtmcontrol=0x%x", dtmcontrol);
1571 LOG_DEBUG(" dmireset=%d", get_field(dtmcontrol, DTM_DTMCS_DMIRESET));
1572 LOG_DEBUG(" idle=%d", get_field(dtmcontrol, DTM_DTMCS_IDLE));
1573 LOG_DEBUG(" dmistat=%d", get_field(dtmcontrol, DTM_DTMCS_DMISTAT));
1574 LOG_DEBUG(" abits=%d", get_field(dtmcontrol, DTM_DTMCS_ABITS));
1575 LOG_DEBUG(" version=%d", get_field(dtmcontrol, DTM_DTMCS_VERSION));
1576 if (dtmcontrol == 0) {
1577 LOG_ERROR("dtmcontrol is 0. Check JTAG connectivity/board power.");
1578 return ERROR_FAIL;
1579 }
1580 if (get_field(dtmcontrol, DTM_DTMCS_VERSION) != 1) {
1581 LOG_ERROR("Unsupported DTM version %d. (dtmcontrol=0x%x)",
1582 get_field(dtmcontrol, DTM_DTMCS_VERSION), dtmcontrol);
1583 return ERROR_FAIL;
1584 }
1585
1586 riscv013_info_t *info = get_info(target);
1587 /* TODO: This won't be true if there are multiple DMs. */
1588 info->index = target->coreid;
1589 info->abits = get_field(dtmcontrol, DTM_DTMCS_ABITS);
1590 info->dtmcs_idle = get_field(dtmcontrol, DTM_DTMCS_IDLE);
1591
1592 /* Reset the Debug Module. */
1593 dm013_info_t *dm = get_dm(target);
1594 if (!dm)
1595 return ERROR_FAIL;
1596 if (!dm->was_reset) {
1597 dmi_write(target, DM_DMCONTROL, 0);
1598 dmi_write(target, DM_DMCONTROL, DM_DMCONTROL_DMACTIVE);
1599 dm->was_reset = true;
1600 }
1601
1602 dmi_write(target, DM_DMCONTROL, DM_DMCONTROL_HARTSELLO |
1603 DM_DMCONTROL_HARTSELHI | DM_DMCONTROL_DMACTIVE |
1604 DM_DMCONTROL_HASEL);
1605 uint32_t dmcontrol;
1606 if (dmi_read(target, &dmcontrol, DM_DMCONTROL) != ERROR_OK)
1607 return ERROR_FAIL;
1608
1609 if (!get_field(dmcontrol, DM_DMCONTROL_DMACTIVE)) {
1610 LOG_ERROR("Debug Module did not become active. dmcontrol=0x%x",
1611 dmcontrol);
1612 return ERROR_FAIL;
1613 }
1614
1615 dm->hasel_supported = get_field(dmcontrol, DM_DMCONTROL_HASEL);
1616
1617 uint32_t dmstatus;
1618 if (dmstatus_read(target, &dmstatus, false) != ERROR_OK)
1619 return ERROR_FAIL;
1620 LOG_DEBUG("dmstatus: 0x%08x", dmstatus);
1621 int dmstatus_version = get_field(dmstatus, DM_DMSTATUS_VERSION);
1622 if (dmstatus_version != 2 && dmstatus_version != 3) {
1623 /* Error was already printed out in dmstatus_read(). */
1624 return ERROR_FAIL;
1625 }
1626
1627 uint32_t hartsel =
1628 (get_field(dmcontrol, DM_DMCONTROL_HARTSELHI) <<
1629 DM_DMCONTROL_HARTSELLO_LENGTH) |
1630 get_field(dmcontrol, DM_DMCONTROL_HARTSELLO);
1631 info->hartsellen = 0;
1632 while (hartsel & 1) {
1633 info->hartsellen++;
1634 hartsel >>= 1;
1635 }
1636 LOG_DEBUG("hartsellen=%d", info->hartsellen);
1637
1638 uint32_t hartinfo;
1639 if (dmi_read(target, &hartinfo, DM_HARTINFO) != ERROR_OK)
1640 return ERROR_FAIL;
1641
1642 info->datasize = get_field(hartinfo, DM_HARTINFO_DATASIZE);
1643 info->dataaccess = get_field(hartinfo, DM_HARTINFO_DATAACCESS);
1644 info->dataaddr = get_field(hartinfo, DM_HARTINFO_DATAADDR);
1645
1646 if (!get_field(dmstatus, DM_DMSTATUS_AUTHENTICATED)) {
1647 LOG_ERROR("Debugger is not authenticated to target Debug Module. "
1648 "(dmstatus=0x%x). Use `riscv authdata_read` and "
1649 "`riscv authdata_write` commands to authenticate.", dmstatus);
1650 /* If we return ERROR_FAIL here, then in a multicore setup the next
1651 * core won't be examined, which means we won't set up the
1652 * authentication commands for them, which means the config script
1653 * needs to be a lot more complex. */
1654 return ERROR_OK;
1655 }
1656
1657 if (dmi_read(target, &info->sbcs, DM_SBCS) != ERROR_OK)
1658 return ERROR_FAIL;
1659
1660 /* Check that abstract data registers are accessible. */
1661 uint32_t abstractcs;
1662 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
1663 return ERROR_FAIL;
1664 info->datacount = get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT);
1665 info->progbufsize = get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE);
1666
1667 LOG_INFO("datacount=%d progbufsize=%d", info->datacount, info->progbufsize);
1668
1669 RISCV_INFO(r);
1670 r->impebreak = get_field(dmstatus, DM_DMSTATUS_IMPEBREAK);
1671
1672 if (!has_sufficient_progbuf(target, 2)) {
1673 LOG_WARNING("We won't be able to execute fence instructions on this "
1674 "target. Memory may not always appear consistent. "
1675 "(progbufsize=%d, impebreak=%d)", info->progbufsize,
1676 r->impebreak);
1677 }
1678
1679 if (info->progbufsize < 4 && riscv_enable_virtual) {
1680 LOG_ERROR("set_enable_virtual is not available on this target. It "
1681 "requires a program buffer size of at least 4. (progbufsize=%d) "
1682 "Use `riscv set_enable_virtual off` to continue."
1683 , info->progbufsize);
1684 }
1685
1686 /* Before doing anything else we must first enumerate the harts. */
1687 if (dm->hart_count < 0) {
1688 for (int i = 0; i < MIN(RISCV_MAX_HARTS, 1 << info->hartsellen); ++i) {
1689 r->current_hartid = i;
1690 if (riscv013_select_current_hart(target) != ERROR_OK)
1691 return ERROR_FAIL;
1692
1693 uint32_t s;
1694 if (dmstatus_read(target, &s, true) != ERROR_OK)
1695 return ERROR_FAIL;
1696 if (get_field(s, DM_DMSTATUS_ANYNONEXISTENT))
1697 break;
1698 dm->hart_count = i + 1;
1699
1700 if (get_field(s, DM_DMSTATUS_ANYHAVERESET))
1701 dmi_write(target, DM_DMCONTROL,
1702 set_hartsel(DM_DMCONTROL_DMACTIVE | DM_DMCONTROL_ACKHAVERESET, i));
1703 }
1704
1705 LOG_DEBUG("Detected %d harts.", dm->hart_count);
1706 }
1707
1708 if (dm->hart_count == 0) {
1709 LOG_ERROR("No harts found!");
1710 return ERROR_FAIL;
1711 }
1712
1713 /* Don't call any riscv_* functions until after we've counted the number of
1714 * cores and initialized registers. */
1715 for (int i = 0; i < dm->hart_count; ++i) {
1716 if (!riscv_rtos_enabled(target) && i != target->coreid)
1717 continue;
1718
1719 r->current_hartid = i;
1720 if (riscv013_select_current_hart(target) != ERROR_OK)
1721 return ERROR_FAIL;
1722
1723 bool halted = riscv_is_halted(target);
1724 if (!halted) {
1725 if (riscv013_halt_go(target) != ERROR_OK) {
1726 LOG_ERROR("Fatal: Hart %d failed to halt during examine()", i);
1727 return ERROR_FAIL;
1728 }
1729 }
1730
1731 /* Without knowing anything else we can at least mess with the
1732 * program buffer. */
1733 r->debug_buffer_size[i] = info->progbufsize;
1734
1735 int result = register_read_abstract(target, NULL, GDB_REGNO_S0, 64);
1736 if (result == ERROR_OK)
1737 r->xlen[i] = 64;
1738 else
1739 r->xlen[i] = 32;
1740
1741 if (register_read(target, &r->misa[i], GDB_REGNO_MISA)) {
1742 LOG_ERROR("Fatal: Failed to read MISA from hart %d.", i);
1743 return ERROR_FAIL;
1744 }
1745
1746 if (riscv_supports_extension(target, i, 'V')) {
1747 if (discover_vlenb(target, i) != ERROR_OK)
1748 return ERROR_FAIL;
1749 }
1750
1751 /* Now init registers based on what we discovered. */
1752 if (riscv_init_registers(target) != ERROR_OK)
1753 return ERROR_FAIL;
1754
1755 /* Display this as early as possible to help people who are using
1756 * really slow simulators. */
1757 LOG_DEBUG(" hart %d: XLEN=%d, misa=0x%" PRIx64, i, r->xlen[i],
1758 r->misa[i]);
1759
1760 if (!halted)
1761 riscv013_step_or_resume_current_hart(target, false, false);
1762 }
1763
1764 target_set_examined(target);
1765
1766 if (target->smp) {
1767 bool haltgroup_supported;
1768 if (set_haltgroup(target, &haltgroup_supported) != ERROR_OK)
1769 return ERROR_FAIL;
1770 if (haltgroup_supported)
1771 LOG_INFO("Core %d made part of halt group %d.", target->coreid,
1772 target->smp);
1773 else
1774 LOG_INFO("Core %d could not be made part of halt group %d.",
1775 target->coreid, target->smp);
1776 }
1777
1778 /* Some regression suites rely on seeing 'Examined RISC-V core' to know
1779 * when they can connect with gdb/telnet.
1780 * We will need to update those suites if we want to change that text. */
1781 LOG_INFO("Examined RISC-V core; found %d harts",
1782 riscv_count_harts(target));
1783 for (int i = 0; i < riscv_count_harts(target); ++i) {
1784 if (riscv_hart_enabled(target, i)) {
1785 LOG_INFO(" hart %d: XLEN=%d, misa=0x%" PRIx64, i, r->xlen[i],
1786 r->misa[i]);
1787 } else {
1788 LOG_INFO(" hart %d: currently disabled", i);
1789 }
1790 }
1791 return ERROR_OK;
1792 }
1793
1794 int riscv013_authdata_read(struct target *target, uint32_t *value)
1795 {
1796 if (wait_for_authbusy(target, NULL) != ERROR_OK)
1797 return ERROR_FAIL;
1798
1799 return dmi_read(target, value, DM_AUTHDATA);
1800 }
1801
1802 int riscv013_authdata_write(struct target *target, uint32_t value)
1803 {
1804 uint32_t before, after;
1805 if (wait_for_authbusy(target, &before) != ERROR_OK)
1806 return ERROR_FAIL;
1807
1808 dmi_write(target, DM_AUTHDATA, value);
1809
1810 if (wait_for_authbusy(target, &after) != ERROR_OK)
1811 return ERROR_FAIL;
1812
1813 if (!get_field(before, DM_DMSTATUS_AUTHENTICATED) &&
1814 get_field(after, DM_DMSTATUS_AUTHENTICATED)) {
1815 LOG_INFO("authdata_write resulted in successful authentication");
1816 int result = ERROR_OK;
1817 dm013_info_t *dm = get_dm(target);
1818 if (!dm)
1819 return ERROR_FAIL;
1820 target_list_t *entry;
1821 list_for_each_entry(entry, &dm->target_list, list) {
1822 if (examine(entry->target) != ERROR_OK)
1823 result = ERROR_FAIL;
1824 }
1825 return result;
1826 }
1827
1828 return ERROR_OK;
1829 }
1830
1831 static int riscv013_hart_count(struct target *target)
1832 {
1833 dm013_info_t *dm = get_dm(target);
1834 assert(dm);
1835 return dm->hart_count;
1836 }
1837
1838 static unsigned riscv013_data_bits(struct target *target)
1839 {
1840 RISCV013_INFO(info);
1841 /* TODO: Once there is a spec for discovering abstract commands, we can
1842 * take those into account as well. For now we assume abstract commands
1843 * support XLEN-wide accesses. */
1844 if (has_sufficient_progbuf(target, 3) && !riscv_prefer_sba)
1845 return riscv_xlen(target);
1846
1847 if (get_field(info->sbcs, DM_SBCS_SBACCESS128))
1848 return 128;
1849 if (get_field(info->sbcs, DM_SBCS_SBACCESS64))
1850 return 64;
1851 if (get_field(info->sbcs, DM_SBCS_SBACCESS32))
1852 return 32;
1853 if (get_field(info->sbcs, DM_SBCS_SBACCESS16))
1854 return 16;
1855 if (get_field(info->sbcs, DM_SBCS_SBACCESS8))
1856 return 8;
1857
1858 return riscv_xlen(target);
1859 }
1860
1861 static int prep_for_vector_access(struct target *target, uint64_t *vtype,
1862 uint64_t *vl, unsigned *debug_vl)
1863 {
1864 RISCV_INFO(r);
1865 /* TODO: this continuous save/restore is terrible for performance. */
1866 /* Write vtype and vl. */
1867 unsigned encoded_vsew;
1868 switch (riscv_xlen(target)) {
1869 case 32:
1870 encoded_vsew = 2;
1871 break;
1872 case 64:
1873 encoded_vsew = 3;
1874 break;
1875 default:
1876 LOG_ERROR("Unsupported xlen: %d", riscv_xlen(target));
1877 return ERROR_FAIL;
1878 }
1879
1880 /* Save vtype and vl. */
1881 if (register_read(target, vtype, GDB_REGNO_VTYPE) != ERROR_OK)
1882 return ERROR_FAIL;
1883 if (register_read(target, vl, GDB_REGNO_VL) != ERROR_OK)
1884 return ERROR_FAIL;
1885
1886 if (register_write_direct(target, GDB_REGNO_VTYPE, encoded_vsew << 3) != ERROR_OK)
1887 return ERROR_FAIL;
1888 *debug_vl = DIV_ROUND_UP(r->vlenb[r->current_hartid] * 8,
1889 riscv_xlen(target));
1890 if (register_write_direct(target, GDB_REGNO_VL, *debug_vl) != ERROR_OK)
1891 return ERROR_FAIL;
1892
1893 return ERROR_OK;
1894 }
1895
1896 static int cleanup_after_vector_access(struct target *target, uint64_t vtype,
1897 uint64_t vl)
1898 {
1899 /* Restore vtype and vl. */
1900 if (register_write_direct(target, GDB_REGNO_VTYPE, vtype) != ERROR_OK)
1901 return ERROR_FAIL;
1902 if (register_write_direct(target, GDB_REGNO_VL, vl) != ERROR_OK)
1903 return ERROR_FAIL;
1904 return ERROR_OK;
1905 }
1906
1907 static int riscv013_get_register_buf(struct target *target,
1908 uint8_t *value, int regno)
1909 {
1910 assert(regno >= GDB_REGNO_V0 && regno <= GDB_REGNO_V31);
1911
1912 riscv_reg_t s0;
1913 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1914 return ERROR_FAIL;
1915
1916 uint64_t mstatus;
1917 if (prep_for_register_access(target, &mstatus, regno) != ERROR_OK)
1918 return ERROR_FAIL;
1919
1920 uint64_t vtype, vl;
1921 unsigned debug_vl;
1922 if (prep_for_vector_access(target, &vtype, &vl, &debug_vl) != ERROR_OK)
1923 return ERROR_FAIL;
1924
1925 unsigned vnum = regno - GDB_REGNO_V0;
1926 unsigned xlen = riscv_xlen(target);
1927
1928 struct riscv_program program;
1929 riscv_program_init(&program, target);
1930 riscv_program_insert(&program, vmv_x_s(S0, vnum));
1931 riscv_program_insert(&program, vslide1down_vx(vnum, vnum, S0, true));
1932
1933 int result = ERROR_OK;
1934 for (unsigned i = 0; i < debug_vl; i++) {
1935 /* Executing the program might result in an exception if there is some
1936 * issue with the vector implementation/instructions we're using. If that
1937 * happens, attempt to restore as usual. We may have clobbered the
1938 * vector register we tried to read already.
1939 * For other failures, we just return error because things are probably
1940 * so messed up that attempting to restore isn't going to help. */
1941 result = riscv_program_exec(&program, target);
1942 if (result == ERROR_OK) {
1943 uint64_t v;
1944 if (register_read_direct(target, &v, GDB_REGNO_S0) != ERROR_OK)
1945 return ERROR_FAIL;
1946 buf_set_u64(value, xlen * i, xlen, v);
1947 } else {
1948 break;
1949 }
1950 }
1951
1952 if (cleanup_after_vector_access(target, vtype, vl) != ERROR_OK)
1953 return ERROR_FAIL;
1954
1955 if (cleanup_after_register_access(target, mstatus, regno) != ERROR_OK)
1956 return ERROR_FAIL;
1957 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1958 return ERROR_FAIL;
1959
1960 return result;
1961 }
1962
1963 static int riscv013_set_register_buf(struct target *target,
1964 int regno, const uint8_t *value)
1965 {
1966 assert(regno >= GDB_REGNO_V0 && regno <= GDB_REGNO_V31);
1967
1968 riscv_reg_t s0;
1969 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1970 return ERROR_FAIL;
1971
1972 uint64_t mstatus;
1973 if (prep_for_register_access(target, &mstatus, regno) != ERROR_OK)
1974 return ERROR_FAIL;
1975
1976 uint64_t vtype, vl;
1977 unsigned debug_vl;
1978 if (prep_for_vector_access(target, &vtype, &vl, &debug_vl) != ERROR_OK)
1979 return ERROR_FAIL;
1980
1981 unsigned vnum = regno - GDB_REGNO_V0;
1982 unsigned xlen = riscv_xlen(target);
1983
1984 struct riscv_program program;
1985 riscv_program_init(&program, target);
1986 riscv_program_insert(&program, vslide1down_vx(vnum, vnum, S0, true));
1987 int result = ERROR_OK;
1988 for (unsigned i = 0; i < debug_vl; i++) {
1989 if (register_write_direct(target, GDB_REGNO_S0,
1990 buf_get_u64(value, xlen * i, xlen)) != ERROR_OK)
1991 return ERROR_FAIL;
1992 result = riscv_program_exec(&program, target);
1993 if (result != ERROR_OK)
1994 break;
1995 }
1996
1997 if (cleanup_after_vector_access(target, vtype, vl) != ERROR_OK)
1998 return ERROR_FAIL;
1999
2000 if (cleanup_after_register_access(target, mstatus, regno) != ERROR_OK)
2001 return ERROR_FAIL;
2002 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
2003 return ERROR_FAIL;
2004
2005 return result;
2006 }
2007
2008 static int init_target(struct command_context *cmd_ctx,
2009 struct target *target)
2010 {
2011 LOG_DEBUG("init");
2012 riscv_info_t *generic_info = (riscv_info_t *) target->arch_info;
2013
2014 generic_info->get_register = &riscv013_get_register;
2015 generic_info->set_register = &riscv013_set_register;
2016 generic_info->get_register_buf = &riscv013_get_register_buf;
2017 generic_info->set_register_buf = &riscv013_set_register_buf;
2018 generic_info->select_current_hart = &riscv013_select_current_hart;
2019 generic_info->is_halted = &riscv013_is_halted;
2020 generic_info->resume_go = &riscv013_resume_go;
2021 generic_info->step_current_hart = &riscv013_step_current_hart;
2022 generic_info->on_halt = &riscv013_on_halt;
2023 generic_info->resume_prep = &riscv013_resume_prep;
2024 generic_info->halt_prep = &riscv013_halt_prep;
2025 generic_info->halt_go = &riscv013_halt_go;
2026 generic_info->on_step = &riscv013_on_step;
2027 generic_info->halt_reason = &riscv013_halt_reason;
2028 generic_info->read_debug_buffer = &riscv013_read_debug_buffer;
2029 generic_info->write_debug_buffer = &riscv013_write_debug_buffer;
2030 generic_info->execute_debug_buffer = &riscv013_execute_debug_buffer;
2031 generic_info->fill_dmi_write_u64 = &riscv013_fill_dmi_write_u64;
2032 generic_info->fill_dmi_read_u64 = &riscv013_fill_dmi_read_u64;
2033 generic_info->fill_dmi_nop_u64 = &riscv013_fill_dmi_nop_u64;
2034 generic_info->dmi_write_u64_bits = &riscv013_dmi_write_u64_bits;
2035 generic_info->authdata_read = &riscv013_authdata_read;
2036 generic_info->authdata_write = &riscv013_authdata_write;
2037 generic_info->dmi_read = &dmi_read;
2038 generic_info->dmi_write = &dmi_write;
2039 generic_info->read_memory = read_memory;
2040 generic_info->test_sba_config_reg = &riscv013_test_sba_config_reg;
2041 generic_info->test_compliance = &riscv013_test_compliance;
2042 generic_info->hart_count = &riscv013_hart_count;
2043 generic_info->data_bits = &riscv013_data_bits;
2044 generic_info->version_specific = calloc(1, sizeof(riscv013_info_t));
2045 if (!generic_info->version_specific)
2046 return ERROR_FAIL;
2047 riscv013_info_t *info = get_info(target);
2048
2049 info->progbufsize = -1;
2050
2051 info->dmi_busy_delay = 0;
2052 info->bus_master_read_delay = 0;
2053 info->bus_master_write_delay = 0;
2054 info->ac_busy_delay = 0;
2055
2056 /* Assume all these abstract commands are supported until we learn
2057 * otherwise.
2058 * TODO: The spec allows eg. one CSR to be able to be accessed abstractly
2059 * while another one isn't. We don't track that this closely here, but in
2060 * the future we probably should. */
2061 info->abstract_read_csr_supported = true;
2062 info->abstract_write_csr_supported = true;
2063 info->abstract_read_fpr_supported = true;
2064 info->abstract_write_fpr_supported = true;
2065
2066 return ERROR_OK;
2067 }
2068
2069 static int assert_reset(struct target *target)
2070 {
2071 RISCV_INFO(r);
2072
2073 select_dmi(target);
2074
2075 uint32_t control_base = set_field(0, DM_DMCONTROL_DMACTIVE, 1);
2076
2077 if (target->rtos) {
2078 /* There's only one target, and OpenOCD thinks each hart is a thread.
2079 * We must reset them all. */
2080
2081 /* TODO: Try to use hasel in dmcontrol */
2082
2083 /* Set haltreq for each hart. */
2084 uint32_t control = control_base;
2085 for (int i = 0; i < riscv_count_harts(target); ++i) {
2086 if (!riscv_hart_enabled(target, i))
2087 continue;
2088
2089 control = set_hartsel(control_base, i);
2090 control = set_field(control, DM_DMCONTROL_HALTREQ,
2091 target->reset_halt ? 1 : 0);
2092 dmi_write(target, DM_DMCONTROL, control);
2093 }
2094 /* Assert ndmreset */
2095 control = set_field(control, DM_DMCONTROL_NDMRESET, 1);
2096 dmi_write(target, DM_DMCONTROL, control);
2097
2098 } else {
2099 /* Reset just this hart. */
2100 uint32_t control = set_hartsel(control_base, r->current_hartid);
2101 control = set_field(control, DM_DMCONTROL_HALTREQ,
2102 target->reset_halt ? 1 : 0);
2103 control = set_field(control, DM_DMCONTROL_NDMRESET, 1);
2104 dmi_write(target, DM_DMCONTROL, control);
2105 }
2106
2107 target->state = TARGET_RESET;
2108
2109 dm013_info_t *dm = get_dm(target);
2110 if (!dm)
2111 return ERROR_FAIL;
2112
2113 /* The DM might have gotten reset if OpenOCD called us in some reset that
2114 * involves SRST being toggled. So clear our cache which may be out of
2115 * date. */
2116 memset(dm->progbuf_cache, 0, sizeof(dm->progbuf_cache));
2117
2118 return ERROR_OK;
2119 }
2120
2121 static int deassert_reset(struct target *target)
2122 {
2123 RISCV_INFO(r);
2124 RISCV013_INFO(info);
2125 select_dmi(target);
2126
2127 /* Clear the reset, but make sure haltreq is still set */
2128 uint32_t control = 0;
2129 control = set_field(control, DM_DMCONTROL_HALTREQ, target->reset_halt ? 1 : 0);
2130 control = set_field(control, DM_DMCONTROL_DMACTIVE, 1);
2131 dmi_write(target, DM_DMCONTROL,
2132 set_hartsel(control, r->current_hartid));
2133
2134 uint32_t dmstatus;
2135 int dmi_busy_delay = info->dmi_busy_delay;
2136 time_t start = time(NULL);
2137
2138 for (int i = 0; i < riscv_count_harts(target); ++i) {
2139 int index = i;
2140 if (target->rtos) {
2141 if (!riscv_hart_enabled(target, index))
2142 continue;
2143 dmi_write(target, DM_DMCONTROL,
2144 set_hartsel(control, index));
2145 } else {
2146 index = r->current_hartid;
2147 }
2148
2149 char *operation;
2150 uint32_t expected_field;
2151 if (target->reset_halt) {
2152 operation = "halt";
2153 expected_field = DM_DMSTATUS_ALLHALTED;
2154 } else {
2155 operation = "run";
2156 expected_field = DM_DMSTATUS_ALLRUNNING;
2157 }
2158 LOG_DEBUG("Waiting for hart %d to %s out of reset.", index, operation);
2159 while (1) {
2160 int result = dmstatus_read_timeout(target, &dmstatus, true,
2161 riscv_reset_timeout_sec);
2162 if (result == ERROR_TIMEOUT_REACHED)
2163 LOG_ERROR("Hart %d didn't complete a DMI read coming out of "
2164 "reset in %ds; Increase the timeout with riscv "
2165 "set_reset_timeout_sec.",
2166 index, riscv_reset_timeout_sec);
2167 if (result != ERROR_OK)
2168 return result;
2169 if (get_field(dmstatus, expected_field))
2170 break;
2171 if (time(NULL) - start > riscv_reset_timeout_sec) {
2172 LOG_ERROR("Hart %d didn't %s coming out of reset in %ds; "
2173 "dmstatus=0x%x; "
2174 "Increase the timeout with riscv set_reset_timeout_sec.",
2175 index, operation, riscv_reset_timeout_sec, dmstatus);
2176 return ERROR_FAIL;
2177 }
2178 }
2179 target->state = TARGET_HALTED;
2180
2181 if (get_field(dmstatus, DM_DMSTATUS_ALLHAVERESET)) {
2182 /* Ack reset. */
2183 dmi_write(target, DM_DMCONTROL,
2184 set_hartsel(control, index) |
2185 DM_DMCONTROL_ACKHAVERESET);
2186 }
2187
2188 if (!target->rtos)
2189 break;
2190 }
2191 info->dmi_busy_delay = dmi_busy_delay;
2192 return ERROR_OK;
2193 }
2194
2195 static int execute_fence(struct target *target)
2196 {
2197 int old_hartid = riscv_current_hartid(target);
2198
2199 /* FIXME: For non-coherent systems we need to flush the caches right
2200 * here, but there's no ISA-defined way of doing that. */
2201 {
2202 struct riscv_program program;
2203 riscv_program_init(&program, target);
2204 riscv_program_fence_i(&program);
2205 riscv_program_fence(&program);
2206 int result = riscv_program_exec(&program, target);
2207 if (result != ERROR_OK)
2208 LOG_DEBUG("Unable to execute pre-fence");
2209 }
2210
2211 for (int i = 0; i < riscv_count_harts(target); ++i) {
2212 if (!riscv_hart_enabled(target, i))
2213 continue;
2214
2215 if (i == old_hartid)
2216 /* Fence already executed for this hart */
2217 continue;
2218
2219 riscv_set_current_hartid(target, i);
2220
2221 struct riscv_program program;
2222 riscv_program_init(&program, target);
2223 riscv_program_fence_i(&program);
2224 riscv_program_fence(&program);
2225 int result = riscv_program_exec(&program, target);
2226 if (result != ERROR_OK)
2227 LOG_DEBUG("Unable to execute fence on hart %d", i);
2228 }
2229
2230 riscv_set_current_hartid(target, old_hartid);
2231
2232 return ERROR_OK;
2233 }
2234
2235 static void log_memory_access(target_addr_t address, uint64_t value,
2236 unsigned size_bytes, bool read)
2237 {
2238 if (debug_level < LOG_LVL_DEBUG)
2239 return;
2240
2241 char fmt[80];
2242 sprintf(fmt, "M[0x%" TARGET_PRIxADDR "] %ss 0x%%0%d" PRIx64,
2243 address, read ? "read" : "write", size_bytes * 2);
2244 switch (size_bytes) {
2245 case 1:
2246 value &= 0xff;
2247 break;
2248 case 2:
2249 value &= 0xffff;
2250 break;
2251 case 4:
2252 value &= 0xffffffffUL;
2253 break;
2254 case 8:
2255 break;
2256 default:
2257 assert(false);
2258 }
2259 LOG_DEBUG(fmt, value);
2260 }
2261
2262 /* Read the relevant sbdata regs depending on size, and put the results into
2263 * buffer. */
2264 static int read_memory_bus_word(struct target *target, target_addr_t address,
2265 uint32_t size, uint8_t *buffer)
2266 {
2267 uint32_t value;
2268 int result;
2269 static int sbdata[4] = { DM_SBDATA0, DM_SBDATA1, DM_SBDATA2, DM_SBDATA3 };
2270 assert(size <= 16);
2271 for (int i = (size - 1) / 4; i >= 0; i--) {
2272 result = dmi_op(target, &value, NULL, DMI_OP_READ, sbdata[i], 0, false, true);
2273 if (result != ERROR_OK)
2274 return result;
2275 buf_set_u32(buffer + i * 4, 0, 8 * MIN(size, 4), value);
2276 log_memory_access(address + i * 4, value, MIN(size, 4), true);
2277 }
2278 return ERROR_OK;
2279 }
2280
2281 static uint32_t sb_sbaccess(unsigned size_bytes)
2282 {
2283 switch (size_bytes) {
2284 case 1:
2285 return set_field(0, DM_SBCS_SBACCESS, 0);
2286 case 2:
2287 return set_field(0, DM_SBCS_SBACCESS, 1);
2288 case 4:
2289 return set_field(0, DM_SBCS_SBACCESS, 2);
2290 case 8:
2291 return set_field(0, DM_SBCS_SBACCESS, 3);
2292 case 16:
2293 return set_field(0, DM_SBCS_SBACCESS, 4);
2294 }
2295 assert(0);
2296 return 0; /* Make mingw happy. */
2297 }
2298
2299 static target_addr_t sb_read_address(struct target *target)
2300 {
2301 RISCV013_INFO(info);
2302 unsigned sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
2303 target_addr_t address = 0;
2304 uint32_t v;
2305 if (sbasize > 32) {
2306 dmi_read(target, &v, DM_SBADDRESS1);
2307 address |= v;
2308 address <<= 32;
2309 }
2310 dmi_read(target, &v, DM_SBADDRESS0);
2311 address |= v;
2312 return address;
2313 }
2314
2315 static int sb_write_address(struct target *target, target_addr_t address)
2316 {
2317 RISCV013_INFO(info);
2318 unsigned sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
2319 /* There currently is no support for >64-bit addresses in OpenOCD. */
2320 if (sbasize > 96)
2321 dmi_write(target, DM_SBADDRESS3, 0);
2322 if (sbasize > 64)
2323 dmi_write(target, DM_SBADDRESS2, 0);
2324 if (sbasize > 32)
2325 dmi_write(target, DM_SBADDRESS1, address >> 32);
2326 return dmi_write(target, DM_SBADDRESS0, address);
2327 }
2328
2329 static int read_sbcs_nonbusy(struct target *target, uint32_t *sbcs)
2330 {
2331 time_t start = time(NULL);
2332 while (1) {
2333 if (dmi_read(target, sbcs, DM_SBCS) != ERROR_OK)
2334 return ERROR_FAIL;
2335 if (!get_field(*sbcs, DM_SBCS_SBBUSY))
2336 return ERROR_OK;
2337 if (time(NULL) - start > riscv_command_timeout_sec) {
2338 LOG_ERROR("Timed out after %ds waiting for sbbusy to go low (sbcs=0x%x). "
2339 "Increase the timeout with riscv set_command_timeout_sec.",
2340 riscv_command_timeout_sec, *sbcs);
2341 return ERROR_FAIL;
2342 }
2343 }
2344 }
2345
2346 static int modify_privilege(struct target *target, uint64_t *mstatus, uint64_t *mstatus_old)
2347 {
2348 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5)) {
2349 /* Read DCSR */
2350 uint64_t dcsr;
2351 if (register_read(target, &dcsr, GDB_REGNO_DCSR) != ERROR_OK)
2352 return ERROR_FAIL;
2353
2354 /* Read and save MSTATUS */
2355 if (register_read(target, mstatus, GDB_REGNO_MSTATUS) != ERROR_OK)
2356 return ERROR_FAIL;
2357 *mstatus_old = *mstatus;
2358
2359 /* If we come from m-mode with mprv set, we want to keep mpp */
2360 if (get_field(dcsr, DCSR_PRV) < 3) {
2361 /* MPP = PRIV */
2362 *mstatus = set_field(*mstatus, MSTATUS_MPP, get_field(dcsr, DCSR_PRV));
2363
2364 /* MPRV = 1 */
2365 *mstatus = set_field(*mstatus, MSTATUS_MPRV, 1);
2366
2367 /* Write MSTATUS */
2368 if (*mstatus != *mstatus_old)
2369 if (register_write_direct(target, GDB_REGNO_MSTATUS, *mstatus) != ERROR_OK)
2370 return ERROR_FAIL;
2371 }
2372 }
2373
2374 return ERROR_OK;
2375 }
2376
2377 static int read_memory_bus_v0(struct target *target, target_addr_t address,
2378 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
2379 {
2380 if (size != increment) {
2381 LOG_ERROR("sba v0 reads only support size==increment");
2382 return ERROR_NOT_IMPLEMENTED;
2383 }
2384
2385 LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
2386 TARGET_PRIxADDR, size, count, address);
2387 uint8_t *t_buffer = buffer;
2388 riscv_addr_t cur_addr = address;
2389 riscv_addr_t fin_addr = address + (count * size);
2390 uint32_t access = 0;
2391
2392 const int DM_SBCS_SBSINGLEREAD_OFFSET = 20;
2393 const uint32_t DM_SBCS_SBSINGLEREAD = (0x1U << DM_SBCS_SBSINGLEREAD_OFFSET);
2394
2395 const int DM_SBCS_SBAUTOREAD_OFFSET = 15;
2396 const uint32_t DM_SBCS_SBAUTOREAD = (0x1U << DM_SBCS_SBAUTOREAD_OFFSET);
2397
2398 /* ww favorise one off reading if there is an issue */
2399 if (count == 1) {
2400 for (uint32_t i = 0; i < count; i++) {
2401 if (dmi_read(target, &access, DM_SBCS) != ERROR_OK)
2402 return ERROR_FAIL;
2403 dmi_write(target, DM_SBADDRESS0, cur_addr);
2404 /* size/2 matching the bit access of the spec 0.13 */
2405 access = set_field(access, DM_SBCS_SBACCESS, size/2);
2406 access = set_field(access, DM_SBCS_SBSINGLEREAD, 1);
2407 LOG_DEBUG("\r\nread_memory: sab: access: 0x%08x", access);
2408 dmi_write(target, DM_SBCS, access);
2409 /* 3) read */
2410 uint32_t value;
2411 if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
2412 return ERROR_FAIL;
2413 LOG_DEBUG("\r\nread_memory: sab: value: 0x%08x", value);
2414 buf_set_u32(t_buffer, 0, 8 * size, value);
2415 t_buffer += size;
2416 cur_addr += size;
2417 }
2418 return ERROR_OK;
2419 }
2420
2421 /* has to be the same size if we want to read a block */
2422 LOG_DEBUG("reading block until final address 0x%" PRIx64, fin_addr);
2423 if (dmi_read(target, &access, DM_SBCS) != ERROR_OK)
2424 return ERROR_FAIL;
2425 /* set current address */
2426 dmi_write(target, DM_SBADDRESS0, cur_addr);
2427 /* 2) write sbaccess=2, sbsingleread,sbautoread,sbautoincrement
2428 * size/2 matching the bit access of the spec 0.13 */
2429 access = set_field(access, DM_SBCS_SBACCESS, size/2);
2430 access = set_field(access, DM_SBCS_SBAUTOREAD, 1);
2431 access = set_field(access, DM_SBCS_SBSINGLEREAD, 1);
2432 access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 1);
2433 LOG_DEBUG("\r\naccess: 0x%08x", access);
2434 dmi_write(target, DM_SBCS, access);
2435
2436 while (cur_addr < fin_addr) {
2437 LOG_DEBUG("\r\nsab:autoincrement: \r\n size: %d\tcount:%d\taddress: 0x%08"
2438 PRIx64, size, count, cur_addr);
2439 /* read */
2440 uint32_t value;
2441 if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
2442 return ERROR_FAIL;
2443 buf_set_u32(t_buffer, 0, 8 * size, value);
2444 cur_addr += size;
2445 t_buffer += size;
2446
2447 /* if we are reaching last address, we must clear autoread */
2448 if (cur_addr == fin_addr && count != 1) {
2449 dmi_write(target, DM_SBCS, 0);
2450 if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
2451 return ERROR_FAIL;
2452 buf_set_u32(t_buffer, 0, 8 * size, value);
2453 }
2454 }
2455
2456 return ERROR_OK;
2457 }
2458
2459 /**
2460 * Read the requested memory using the system bus interface.
2461 */
2462 static int read_memory_bus_v1(struct target *target, target_addr_t address,
2463 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
2464 {
2465 if (increment != size && increment != 0) {
2466 LOG_ERROR("sba v1 reads only support increment of size or 0");
2467 return ERROR_NOT_IMPLEMENTED;
2468 }
2469
2470 RISCV013_INFO(info);
2471 target_addr_t next_address = address;
2472 target_addr_t end_address = address + count * size;
2473
2474 while (next_address < end_address) {
2475 uint32_t sbcs_write = set_field(0, DM_SBCS_SBREADONADDR, 1);
2476 sbcs_write |= sb_sbaccess(size);
2477 if (increment == size)
2478 sbcs_write = set_field(sbcs_write, DM_SBCS_SBAUTOINCREMENT, 1);
2479 if (count > 1)
2480 sbcs_write = set_field(sbcs_write, DM_SBCS_SBREADONDATA, count > 1);
2481 if (dmi_write(target, DM_SBCS, sbcs_write) != ERROR_OK)
2482 return ERROR_FAIL;
2483
2484 /* This address write will trigger the first read. */
2485 if (sb_write_address(target, next_address) != ERROR_OK)
2486 return ERROR_FAIL;
2487
2488 if (info->bus_master_read_delay) {
2489 jtag_add_runtest(info->bus_master_read_delay, TAP_IDLE);
2490 if (jtag_execute_queue() != ERROR_OK) {
2491 LOG_ERROR("Failed to scan idle sequence");
2492 return ERROR_FAIL;
2493 }
2494 }
2495
2496 /* First value has been read, and is waiting for us to issue a DMI read
2497 * to get it. */
2498
2499 static int sbdata[4] = {DM_SBDATA0, DM_SBDATA1, DM_SBDATA2, DM_SBDATA3};
2500 assert(size <= 16);
2501 target_addr_t next_read = address - 1;
2502 for (uint32_t i = (next_address - address) / size; i < count - 1; i++) {
2503 for (int j = (size - 1) / 4; j >= 0; j--) {
2504 uint32_t value;
2505 unsigned attempt = 0;
2506 while (1) {
2507 if (attempt++ > 100) {
2508 LOG_ERROR("DMI keeps being busy in while reading memory just past " TARGET_ADDR_FMT,
2509 next_read);
2510 return ERROR_FAIL;
2511 }
2512 dmi_status_t status = dmi_scan(target, NULL, &value,
2513 DMI_OP_READ, sbdata[j], 0, false);
2514 if (status == DMI_STATUS_BUSY)
2515 increase_dmi_busy_delay(target);
2516 else if (status == DMI_STATUS_SUCCESS)
2517 break;
2518 else
2519 return ERROR_FAIL;
2520 }
2521 if (next_read != address - 1) {
2522 buf_set_u32(buffer + next_read - address, 0, 8 * MIN(size, 4), value);
2523 log_memory_access(next_read, value, MIN(size, 4), true);
2524 }
2525 next_read = address + i * size + j * 4;
2526 }
2527 }
2528
2529 uint32_t sbcs_read = 0;
2530 if (count > 1) {
2531 uint32_t value;
2532 unsigned attempt = 0;
2533 while (1) {
2534 if (attempt++ > 100) {
2535 LOG_ERROR("DMI keeps being busy in while reading memory just past " TARGET_ADDR_FMT,
2536 next_read);
2537 return ERROR_FAIL;
2538 }
2539 dmi_status_t status = dmi_scan(target, NULL, &value, DMI_OP_NOP, 0, 0, false);
2540 if (status == DMI_STATUS_BUSY)
2541 increase_dmi_busy_delay(target);
2542 else if (status == DMI_STATUS_SUCCESS)
2543 break;
2544 else
2545 return ERROR_FAIL;
2546 }
2547 buf_set_u32(buffer + next_read - address, 0, 8 * MIN(size, 4), value);
2548 log_memory_access(next_read, value, MIN(size, 4), true);
2549
2550 /* "Writes to sbcs while sbbusy is high result in undefined behavior.
2551 * A debugger must not write to sbcs until it reads sbbusy as 0." */
2552 if (read_sbcs_nonbusy(target, &sbcs_read) != ERROR_OK)
2553 return ERROR_FAIL;
2554
2555 sbcs_write = set_field(sbcs_write, DM_SBCS_SBREADONDATA, 0);
2556 if (dmi_write(target, DM_SBCS, sbcs_write) != ERROR_OK)
2557 return ERROR_FAIL;
2558 }
2559
2560 /* Read the last word, after we disabled sbreadondata if necessary. */
2561 if (!get_field(sbcs_read, DM_SBCS_SBERROR) &&
2562 !get_field(sbcs_read, DM_SBCS_SBBUSYERROR)) {
2563 if (read_memory_bus_word(target, address + (count - 1) * size, size,
2564 buffer + (count - 1) * size) != ERROR_OK)
2565 return ERROR_FAIL;
2566
2567 if (read_sbcs_nonbusy(target, &sbcs_read) != ERROR_OK)
2568 return ERROR_FAIL;
2569 }
2570
2571 if (get_field(sbcs_read, DM_SBCS_SBBUSYERROR)) {
2572 /* We read while the target was busy. Slow down and try again. */
2573 if (dmi_write(target, DM_SBCS, DM_SBCS_SBBUSYERROR) != ERROR_OK)
2574 return ERROR_FAIL;
2575 next_address = sb_read_address(target);
2576 info->bus_master_read_delay += info->bus_master_read_delay / 10 + 1;
2577 continue;
2578 }
2579
2580 unsigned error = get_field(sbcs_read, DM_SBCS_SBERROR);
2581 if (error == 0) {
2582 next_address = end_address;
2583 } else {
2584 /* Some error indicating the bus access failed, but not because of
2585 * something we did wrong. */
2586 if (dmi_write(target, DM_SBCS, DM_SBCS_SBERROR) != ERROR_OK)
2587 return ERROR_FAIL;
2588 return ERROR_FAIL;
2589 }
2590 }
2591
2592 return ERROR_OK;
2593 }
2594
2595 static int batch_run(const struct target *target, struct riscv_batch *batch)
2596 {
2597 RISCV013_INFO(info);
2598 RISCV_INFO(r);
2599 if (r->reset_delays_wait >= 0) {
2600 r->reset_delays_wait -= batch->used_scans;
2601 if (r->reset_delays_wait <= 0) {
2602 batch->idle_count = 0;
2603 info->dmi_busy_delay = 0;
2604 info->ac_busy_delay = 0;
2605 }
2606 }
2607 return riscv_batch_run(batch);
2608 }
2609
2610 /*
2611 * Performs a memory read using memory access abstract commands. The read sizes
2612 * supported are 1, 2, and 4 bytes despite the spec's support of 8 and 16 byte
2613 * aamsize fields in the memory access abstract command.
2614 */
2615 static int read_memory_abstract(struct target *target, target_addr_t address,
2616 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
2617 {
2618 if (size != increment) {
2619 LOG_ERROR("abstract command reads only support size==increment");
2620 return ERROR_NOT_IMPLEMENTED;
2621 }
2622
2623 int result = ERROR_OK;
2624
2625 LOG_DEBUG("reading %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
2626 size, address);
2627
2628 memset(buffer, 0, count * size);
2629
2630 /* Convert the size (bytes) to width (bits) */
2631 unsigned width = size << 3;
2632 if (width > 64) {
2633 /* TODO: Add 128b support if it's ever used. Involves modifying
2634 read/write_abstract_arg() to work on two 64b values. */
2635 LOG_ERROR("Unsupported size: %d bits", size);
2636 return ERROR_FAIL;
2637 }
2638
2639 /* Create the command (physical address, postincrement, read) */
2640 uint32_t command = access_memory_command(target, false, width, true, false);
2641
2642 /* Execute the reads */
2643 uint8_t *p = buffer;
2644 bool updateaddr = true;
2645 unsigned width32 = (width + 31) / 32 * 32;
2646 for (uint32_t c = 0; c < count; c++) {
2647 /* Only update the address initially and let postincrement update it */
2648 if (updateaddr) {
2649 /* Set arg1 to the address: address + c * size */
2650 result = write_abstract_arg(target, 1, address, riscv_xlen(target));
2651 if (result != ERROR_OK) {
2652 LOG_ERROR("Failed to write arg1 during read_memory_abstract().");
2653 return result;
2654 }
2655 }
2656
2657 /* Execute the command */
2658 result = execute_abstract_command(target, command);
2659 if (result != ERROR_OK) {
2660 LOG_ERROR("Failed to execute command read_memory_abstract().");
2661 return result;
2662 }
2663
2664 /* Copy arg0 to buffer (rounded width up to nearest 32) */
2665 riscv_reg_t value = read_abstract_arg(target, 0, width32);
2666 buf_set_u64(p, 0, 8 * size, value);
2667
2668 updateaddr = false;
2669 p += size;
2670 }
2671
2672 return result;
2673 }
2674
2675 /*
2676 * Performs a memory write using memory access abstract commands. The write
2677 * sizes supported are 1, 2, and 4 bytes despite the spec's support of 8 and 16
2678 * byte aamsize fields in the memory access abstract command.
2679 */
2680 static int write_memory_abstract(struct target *target, target_addr_t address,
2681 uint32_t size, uint32_t count, const uint8_t *buffer)
2682 {
2683 int result = ERROR_OK;
2684
2685 LOG_DEBUG("writing %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
2686 size, address);
2687
2688 /* Convert the size (bytes) to width (bits) */
2689 unsigned width = size << 3;
2690 if (width > 64) {
2691 /* TODO: Add 128b support if it's ever used. Involves modifying
2692 read/write_abstract_arg() to work on two 64b values. */
2693 LOG_ERROR("Unsupported size: %d bits", width);
2694 return ERROR_FAIL;
2695 }
2696
2697 /* Create the command (physical address, postincrement, write) */
2698 uint32_t command = access_memory_command(target, false, width, true, true);
2699
2700 /* Execute the writes */
2701 const uint8_t *p = buffer;
2702 bool updateaddr = true;
2703 for (uint32_t c = 0; c < count; c++) {
2704 /* Move data to arg0 */
2705 riscv_reg_t value = buf_get_u64(p, 0, 8 * size);
2706 result = write_abstract_arg(target, 0, value, riscv_xlen(target));
2707 if (result != ERROR_OK) {
2708 LOG_ERROR("Failed to write arg0 during write_memory_abstract().");
2709 return result;
2710 }
2711
2712 /* Only update the address initially and let postincrement update it */
2713 if (updateaddr) {
2714 /* Set arg1 to the address: address + c * size */
2715 result = write_abstract_arg(target, 1, address, riscv_xlen(target));
2716 if (result != ERROR_OK) {
2717 LOG_ERROR("Failed to write arg1 during write_memory_abstract().");
2718 return result;
2719 }
2720 }
2721
2722 /* Execute the command */
2723 result = execute_abstract_command(target, command);
2724 if (result != ERROR_OK) {
2725 LOG_ERROR("Failed to execute command write_memory_abstract().");
2726 return result;
2727 }
2728
2729 updateaddr = false;
2730 p += size;
2731 }
2732
2733 return result;
2734 }
2735
2736 /**
2737 * Read the requested memory, taking care to execute every read exactly once,
2738 * even if cmderr=busy is encountered.
2739 */
2740 static int read_memory_progbuf_inner(struct target *target, target_addr_t address,
2741 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
2742 {
2743 RISCV013_INFO(info);
2744
2745 int result = ERROR_OK;
2746
2747 /* Write address to S0. */
2748 result = register_write_direct(target, GDB_REGNO_S0, address);
2749 if (result != ERROR_OK)
2750 return result;
2751
2752 if (increment == 0 &&
2753 register_write_direct(target, GDB_REGNO_S2, 0) != ERROR_OK)
2754 return ERROR_FAIL;
2755
2756 uint32_t command = access_register_command(target, GDB_REGNO_S1,
2757 riscv_xlen(target),
2758 AC_ACCESS_REGISTER_TRANSFER | AC_ACCESS_REGISTER_POSTEXEC);
2759 if (execute_abstract_command(target, command) != ERROR_OK)
2760 return ERROR_FAIL;
2761
2762 /* First read has just triggered. Result is in s1. */
2763 if (count == 1) {
2764 uint64_t value;
2765 if (register_read_direct(target, &value, GDB_REGNO_S1) != ERROR_OK)
2766 return ERROR_FAIL;
2767 buf_set_u64(buffer, 0, 8 * size, value);
2768 log_memory_access(address, value, size, true);
2769 return ERROR_OK;
2770 }
2771
2772 if (dmi_write(target, DM_ABSTRACTAUTO,
2773 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET) != ERROR_OK)
2774 goto error;
2775 /* Read garbage from dmi_data0, which triggers another execution of the
2776 * program. Now dmi_data0 contains the first good result, and s1 the next
2777 * memory value. */
2778 if (dmi_read_exec(target, NULL, DM_DATA0) != ERROR_OK)
2779 goto error;
2780
2781 /* read_addr is the next address that the hart will read from, which is the
2782 * value in s0. */
2783 unsigned index = 2;
2784 while (index < count) {
2785 riscv_addr_t read_addr = address + index * increment;
2786 LOG_DEBUG("i=%d, count=%d, read_addr=0x%" PRIx64, index, count, read_addr);
2787 /* The pipeline looks like this:
2788 * memory -> s1 -> dm_data0 -> debugger
2789 * Right now:
2790 * s0 contains read_addr
2791 * s1 contains mem[read_addr-size]
2792 * dm_data0 contains[read_addr-size*2]
2793 */
2794
2795 struct riscv_batch *batch = riscv_batch_alloc(target, 32,
2796 info->dmi_busy_delay + info->ac_busy_delay);
2797 if (!batch)
2798 return ERROR_FAIL;
2799
2800 unsigned reads = 0;
2801 for (unsigned j = index; j < count; j++) {
2802 if (size > 4)
2803 riscv_batch_add_dmi_read(batch, DM_DATA1);
2804 riscv_batch_add_dmi_read(batch, DM_DATA0);
2805
2806 reads++;
2807 if (riscv_batch_full(batch))
2808 break;
2809 }
2810
2811 batch_run(target, batch);
2812
2813 /* Wait for the target to finish performing the last abstract command,
2814 * and update our copy of cmderr. If we see that DMI is busy here,
2815 * dmi_busy_delay will be incremented. */
2816 uint32_t abstractcs;
2817 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
2818 return ERROR_FAIL;
2819 while (get_field(abstractcs, DM_ABSTRACTCS_BUSY))
2820 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
2821 return ERROR_FAIL;
2822 info->cmderr = get_field(abstractcs, DM_ABSTRACTCS_CMDERR);
2823
2824 unsigned next_index;
2825 unsigned ignore_last = 0;
2826 switch (info->cmderr) {
2827 case CMDERR_NONE:
2828 LOG_DEBUG("successful (partial?) memory read");
2829 next_index = index + reads;
2830 break;
2831 case CMDERR_BUSY:
2832 LOG_DEBUG("memory read resulted in busy response");
2833
2834 increase_ac_busy_delay(target);
2835 riscv013_clear_abstract_error(target);
2836
2837 dmi_write(target, DM_ABSTRACTAUTO, 0);
2838
2839 uint32_t dmi_data0, dmi_data1 = 0;
2840 /* This is definitely a good version of the value that we
2841 * attempted to read when we discovered that the target was
2842 * busy. */
2843 if (dmi_read(target, &dmi_data0, DM_DATA0) != ERROR_OK) {
2844 riscv_batch_free(batch);
2845 goto error;
2846 }
2847 if (size > 4 && dmi_read(target, &dmi_data1, DM_DATA1) != ERROR_OK) {
2848 riscv_batch_free(batch);
2849 goto error;
2850 }
2851
2852 /* See how far we got, clobbering dmi_data0. */
2853 if (increment == 0) {
2854 uint64_t counter;
2855 result = register_read_direct(target, &counter, GDB_REGNO_S2);
2856 next_index = counter;
2857 } else {
2858 uint64_t next_read_addr;
2859 result = register_read_direct(target, &next_read_addr,
2860 GDB_REGNO_S0);
2861 next_index = (next_read_addr - address) / increment;
2862 }
2863 if (result != ERROR_OK) {
2864 riscv_batch_free(batch);
2865 goto error;
2866 }
2867
2868 uint64_t value64 = (((uint64_t)dmi_data1) << 32) | dmi_data0;
2869 buf_set_u64(buffer + (next_index - 2) * size, 0, 8 * size, value64);
2870 log_memory_access(address + (next_index - 2) * size, value64, size, true);
2871
2872 /* Restore the command, and execute it.
2873 * Now DM_DATA0 contains the next value just as it would if no
2874 * error had occurred. */
2875 dmi_write_exec(target, DM_COMMAND, command, true);
2876 next_index++;
2877
2878 dmi_write(target, DM_ABSTRACTAUTO,
2879 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET);
2880
2881 ignore_last = 1;
2882
2883 break;
2884 default:
2885 LOG_DEBUG("error when reading memory, abstractcs=0x%08lx", (long)abstractcs);
2886 riscv013_clear_abstract_error(target);
2887 riscv_batch_free(batch);
2888 result = ERROR_FAIL;
2889 goto error;
2890 }
2891
2892 /* Now read whatever we got out of the batch. */
2893 dmi_status_t status = DMI_STATUS_SUCCESS;
2894 unsigned read = 0;
2895 assert(index >= 2);
2896 for (unsigned j = index - 2; j < index + reads; j++) {
2897 assert(j < count);
2898 LOG_DEBUG("index=%d, reads=%d, next_index=%d, ignore_last=%d, j=%d",
2899 index, reads, next_index, ignore_last, j);
2900 if (j + 3 + ignore_last > next_index)
2901 break;
2902
2903 status = riscv_batch_get_dmi_read_op(batch, read);
2904 uint64_t value = riscv_batch_get_dmi_read_data(batch, read);
2905 read++;
2906 if (status != DMI_STATUS_SUCCESS) {
2907 /* If we're here because of busy count, dmi_busy_delay will
2908 * already have been increased and busy state will have been
2909 * cleared in dmi_read(). */
2910 /* In at least some implementations, we issue a read, and then
2911 * can get busy back when we try to scan out the read result,
2912 * and the actual read value is lost forever. Since this is
2913 * rare in any case, we return error here and rely on our
2914 * caller to reread the entire block. */
2915 LOG_WARNING("Batch memory read encountered DMI error %d. "
2916 "Falling back on slower reads.", status);
2917 riscv_batch_free(batch);
2918 result = ERROR_FAIL;
2919 goto error;
2920 }
2921 if (size > 4) {
2922 status = riscv_batch_get_dmi_read_op(batch, read);
2923 if (status != DMI_STATUS_SUCCESS) {
2924 LOG_WARNING("Batch memory read encountered DMI error %d. "
2925 "Falling back on slower reads.", status);
2926 riscv_batch_free(batch);
2927 result = ERROR_FAIL;
2928 goto error;
2929 }
2930 value <<= 32;
2931 value |= riscv_batch_get_dmi_read_data(batch, read);
2932 read++;
2933 }
2934 riscv_addr_t offset = j * size;
2935 buf_set_u64(buffer + offset, 0, 8 * size, value);
2936 log_memory_access(address + j * increment, value, size, true);
2937 }
2938
2939 index = next_index;
2940
2941 riscv_batch_free(batch);
2942 }
2943
2944 dmi_write(target, DM_ABSTRACTAUTO, 0);
2945
2946 if (count > 1) {
2947 /* Read the penultimate word. */
2948 uint32_t dmi_data0, dmi_data1 = 0;
2949 if (dmi_read(target, &dmi_data0, DM_DATA0) != ERROR_OK)
2950 return ERROR_FAIL;
2951 if (size > 4 && dmi_read(target, &dmi_data1, DM_DATA1) != ERROR_OK)
2952 return ERROR_FAIL;
2953 uint64_t value64 = (((uint64_t)dmi_data1) << 32) | dmi_data0;
2954 buf_set_u64(buffer + size * (count - 2), 0, 8 * size, value64);
2955 log_memory_access(address + size * (count - 2), value64, size, true);
2956 }
2957
2958 /* Read the last word. */
2959 uint64_t value;
2960 result = register_read_direct(target, &value, GDB_REGNO_S1);
2961 if (result != ERROR_OK)
2962 goto error;
2963 buf_set_u64(buffer + size * (count-1), 0, 8 * size, value);
2964 log_memory_access(address + size * (count-1), value, size, true);
2965
2966 return ERROR_OK;
2967
2968 error:
2969 dmi_write(target, DM_ABSTRACTAUTO, 0);
2970
2971 return result;
2972 }
2973
2974 /* Only need to save/restore one GPR to read a single word, and the progbuf
2975 * program doesn't need to increment. */
2976 static int read_memory_progbuf_one(struct target *target, target_addr_t address,
2977 uint32_t size, uint8_t *buffer)
2978 {
2979 uint64_t mstatus = 0;
2980 uint64_t mstatus_old = 0;
2981 if (modify_privilege(target, &mstatus, &mstatus_old) != ERROR_OK)
2982 return ERROR_FAIL;
2983
2984 uint64_t s0;
2985
2986 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
2987 return ERROR_FAIL;
2988
2989 /* Write the program (load, increment) */
2990 struct riscv_program program;
2991 riscv_program_init(&program, target);
2992 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
2993 riscv_program_csrrsi(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
2994 switch (size) {
2995 case 1:
2996 riscv_program_lbr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
2997 break;
2998 case 2:
2999 riscv_program_lhr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
3000 break;
3001 case 4:
3002 riscv_program_lwr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
3003 break;
3004 case 8:
3005 riscv_program_ldr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
3006 break;
3007 default:
3008 LOG_ERROR("Unsupported size: %d", size);
3009 return ERROR_FAIL;
3010 }
3011 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3012 riscv_program_csrrci(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3013
3014 if (riscv_program_ebreak(&program) != ERROR_OK)
3015 return ERROR_FAIL;
3016 if (riscv_program_write(&program) != ERROR_OK)
3017 return ERROR_FAIL;
3018
3019 /* Write address to S0, and execute buffer. */
3020 if (write_abstract_arg(target, 0, address, riscv_xlen(target)) != ERROR_OK)
3021 return ERROR_FAIL;
3022 uint32_t command = access_register_command(target, GDB_REGNO_S0,
3023 riscv_xlen(target), AC_ACCESS_REGISTER_WRITE |
3024 AC_ACCESS_REGISTER_TRANSFER | AC_ACCESS_REGISTER_POSTEXEC);
3025 if (execute_abstract_command(target, command) != ERROR_OK)
3026 return ERROR_FAIL;
3027
3028 uint64_t value;
3029 if (register_read(target, &value, GDB_REGNO_S0) != ERROR_OK)
3030 return ERROR_FAIL;
3031 buf_set_u64(buffer, 0, 8 * size, value);
3032 log_memory_access(address, value, size, true);
3033
3034 if (riscv_set_register(target, GDB_REGNO_S0, s0) != ERROR_OK)
3035 return ERROR_FAIL;
3036
3037 /* Restore MSTATUS */
3038 if (mstatus != mstatus_old)
3039 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus_old))
3040 return ERROR_FAIL;
3041
3042 return ERROR_OK;
3043 }
3044
3045 /**
3046 * Read the requested memory, silently handling memory access errors.
3047 */
3048 static int read_memory_progbuf(struct target *target, target_addr_t address,
3049 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
3050 {
3051 if (riscv_xlen(target) < size * 8) {
3052 LOG_ERROR("XLEN (%d) is too short for %d-bit memory read.",
3053 riscv_xlen(target), size * 8);
3054 return ERROR_FAIL;
3055 }
3056
3057 int result = ERROR_OK;
3058
3059 LOG_DEBUG("reading %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
3060 size, address);
3061
3062 select_dmi(target);
3063
3064 memset(buffer, 0, count*size);
3065
3066 if (execute_fence(target) != ERROR_OK)
3067 return ERROR_FAIL;
3068
3069 if (count == 1)
3070 return read_memory_progbuf_one(target, address, size, buffer);
3071
3072 uint64_t mstatus = 0;
3073 uint64_t mstatus_old = 0;
3074 if (modify_privilege(target, &mstatus, &mstatus_old) != ERROR_OK)
3075 return ERROR_FAIL;
3076
3077 /* s0 holds the next address to write to
3078 * s1 holds the next data value to write
3079 * s2 is a counter in case increment is 0
3080 */
3081 uint64_t s0, s1, s2;
3082 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
3083 return ERROR_FAIL;
3084 if (register_read(target, &s1, GDB_REGNO_S1) != ERROR_OK)
3085 return ERROR_FAIL;
3086 if (increment == 0 && register_read(target, &s2, GDB_REGNO_S1) != ERROR_OK)
3087 return ERROR_FAIL;
3088
3089 /* Write the program (load, increment) */
3090 struct riscv_program program;
3091 riscv_program_init(&program, target);
3092 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3093 riscv_program_csrrsi(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3094
3095 switch (size) {
3096 case 1:
3097 riscv_program_lbr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3098 break;
3099 case 2:
3100 riscv_program_lhr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3101 break;
3102 case 4:
3103 riscv_program_lwr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3104 break;
3105 case 8:
3106 riscv_program_ldr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3107 break;
3108 default:
3109 LOG_ERROR("Unsupported size: %d", size);
3110 return ERROR_FAIL;
3111 }
3112
3113 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3114 riscv_program_csrrci(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3115 if (increment == 0)
3116 riscv_program_addi(&program, GDB_REGNO_S2, GDB_REGNO_S2, 1);
3117 else
3118 riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, increment);
3119
3120 if (riscv_program_ebreak(&program) != ERROR_OK)
3121 return ERROR_FAIL;
3122 if (riscv_program_write(&program) != ERROR_OK)
3123 return ERROR_FAIL;
3124
3125 result = read_memory_progbuf_inner(target, address, size, count, buffer, increment);
3126
3127 if (result != ERROR_OK) {
3128 /* The full read did not succeed, so we will try to read each word individually. */
3129 /* This will not be fast, but reading outside actual memory is a special case anyway. */
3130 /* It will make the toolchain happier, especially Eclipse Memory View as it reads ahead. */
3131 target_addr_t address_i = address;
3132 uint32_t count_i = 1;
3133 uint8_t *buffer_i = buffer;
3134
3135 for (uint32_t i = 0; i < count; i++, address_i += increment, buffer_i += size) {
3136 keep_alive();
3137 /* TODO: This is much slower than it needs to be because we end up
3138 * writing the address to read for every word we read. */
3139 result = read_memory_progbuf_inner(target, address_i, size, count_i, buffer_i, increment);
3140
3141 /* The read of a single word failed, so we will just return 0 for that instead */
3142 if (result != ERROR_OK) {
3143 LOG_DEBUG("error reading single word of %d bytes from 0x%" TARGET_PRIxADDR,
3144 size, address_i);
3145
3146 buf_set_u64(buffer_i, 0, 8 * size, 0);
3147 }
3148 }
3149 result = ERROR_OK;
3150 }
3151
3152 riscv_set_register(target, GDB_REGNO_S0, s0);
3153 riscv_set_register(target, GDB_REGNO_S1, s1);
3154 if (increment == 0)
3155 riscv_set_register(target, GDB_REGNO_S2, s2);
3156
3157 /* Restore MSTATUS */
3158 if (mstatus != mstatus_old)
3159 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus_old))
3160 return ERROR_FAIL;
3161
3162 return result;
3163 }
3164
3165 static int read_memory(struct target *target, target_addr_t address,
3166 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
3167 {
3168 if (count == 0)
3169 return ERROR_OK;
3170
3171 RISCV013_INFO(info);
3172 if (has_sufficient_progbuf(target, 3) && !riscv_prefer_sba)
3173 return read_memory_progbuf(target, address, size, count, buffer,
3174 increment);
3175
3176 if ((get_field(info->sbcs, DM_SBCS_SBACCESS8) && size == 1) ||
3177 (get_field(info->sbcs, DM_SBCS_SBACCESS16) && size == 2) ||
3178 (get_field(info->sbcs, DM_SBCS_SBACCESS32) && size == 4) ||
3179 (get_field(info->sbcs, DM_SBCS_SBACCESS64) && size == 8) ||
3180 (get_field(info->sbcs, DM_SBCS_SBACCESS128) && size == 16)) {
3181 if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 0)
3182 return read_memory_bus_v0(target, address, size, count, buffer,
3183 increment);
3184 else if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 1)
3185 return read_memory_bus_v1(target, address, size, count, buffer,
3186 increment);
3187 }
3188
3189 if (has_sufficient_progbuf(target, 3))
3190 return read_memory_progbuf(target, address, size, count, buffer,
3191 increment);
3192
3193 return read_memory_abstract(target, address, size, count, buffer,
3194 increment);
3195 }
3196
3197 static int write_memory_bus_v0(struct target *target, target_addr_t address,
3198 uint32_t size, uint32_t count, const uint8_t *buffer)
3199 {
3200 /*1) write sbaddress: for singlewrite and autoincrement, we need to write the address once*/
3201 LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
3202 TARGET_PRIxADDR, size, count, address);
3203 dmi_write(target, DM_SBADDRESS0, address);
3204 int64_t value = 0;
3205 int64_t access = 0;
3206 riscv_addr_t offset = 0;
3207 riscv_addr_t t_addr = 0;
3208 const uint8_t *t_buffer = buffer + offset;
3209
3210 /* B.8 Writing Memory, single write check if we write in one go */
3211 if (count == 1) { /* count is in bytes here */
3212 value = buf_get_u64(t_buffer, 0, 8 * size);
3213
3214 access = 0;
3215 access = set_field(access, DM_SBCS_SBACCESS, size/2);
3216 dmi_write(target, DM_SBCS, access);
3217 LOG_DEBUG("\r\naccess: 0x%08" PRIx64, access);
3218 LOG_DEBUG("\r\nwrite_memory:SAB: ONE OFF: value 0x%08" PRIx64, value);
3219 dmi_write(target, DM_SBDATA0, value);
3220 return ERROR_OK;
3221 }
3222
3223 /*B.8 Writing Memory, using autoincrement*/
3224
3225 access = 0;
3226 access = set_field(access, DM_SBCS_SBACCESS, size/2);
3227 access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 1);
3228 LOG_DEBUG("\r\naccess: 0x%08" PRIx64, access);
3229 dmi_write(target, DM_SBCS, access);
3230
3231 /*2)set the value according to the size required and write*/
3232 for (riscv_addr_t i = 0; i < count; ++i) {
3233 offset = size*i;
3234 /* for monitoring only */
3235 t_addr = address + offset;
3236 t_buffer = buffer + offset;
3237
3238 value = buf_get_u64(t_buffer, 0, 8 * size);
3239 LOG_DEBUG("SAB:autoincrement: expected address: 0x%08x value: 0x%08x"
3240 PRIx64, (uint32_t)t_addr, (uint32_t)value);
3241 dmi_write(target, DM_SBDATA0, value);
3242 }
3243 /*reset the autoincrement when finished (something weird is happening if this is not done at the end*/
3244 access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 0);
3245 dmi_write(target, DM_SBCS, access);
3246
3247 return ERROR_OK;
3248 }
3249
3250 static int write_memory_bus_v1(struct target *target, target_addr_t address,
3251 uint32_t size, uint32_t count, const uint8_t *buffer)
3252 {
3253 RISCV013_INFO(info);
3254 uint32_t sbcs = sb_sbaccess(size);
3255 sbcs = set_field(sbcs, DM_SBCS_SBAUTOINCREMENT, 1);
3256 dmi_write(target, DM_SBCS, sbcs);
3257
3258 target_addr_t next_address = address;
3259 target_addr_t end_address = address + count * size;
3260
3261 int result;
3262
3263 sb_write_address(target, next_address);
3264 while (next_address < end_address) {
3265 LOG_DEBUG("transferring burst starting at address 0x%" TARGET_PRIxADDR,
3266 next_address);
3267
3268 struct riscv_batch *batch = riscv_batch_alloc(
3269 target,
3270 32,
3271 info->dmi_busy_delay + info->bus_master_write_delay);
3272 if (!batch)
3273 return ERROR_FAIL;
3274
3275 for (uint32_t i = (next_address - address) / size; i < count; i++) {
3276 const uint8_t *p = buffer + i * size;
3277
3278 if (riscv_batch_available_scans(batch) < (size + 3) / 4)
3279 break;
3280
3281 if (size > 12)
3282 riscv_batch_add_dmi_write(batch, DM_SBDATA3,
3283 ((uint32_t) p[12]) |
3284 (((uint32_t) p[13]) << 8) |
3285 (((uint32_t) p[14]) << 16) |
3286 (((uint32_t) p[15]) << 24));
3287
3288 if (size > 8)
3289 riscv_batch_add_dmi_write(batch, DM_SBDATA2,
3290 ((uint32_t) p[8]) |
3291 (((uint32_t) p[9]) << 8) |
3292 (((uint32_t) p[10]) << 16) |
3293 (((uint32_t) p[11]) << 24));
3294 if (size > 4)
3295 riscv_batch_add_dmi_write(batch, DM_SBDATA1,
3296 ((uint32_t) p[4]) |
3297 (((uint32_t) p[5]) << 8) |
3298 (((uint32_t) p[6]) << 16) |
3299 (((uint32_t) p[7]) << 24));
3300 uint32_t value = p[0];
3301 if (size > 2) {
3302 value |= ((uint32_t) p[2]) << 16;
3303 value |= ((uint32_t) p[3]) << 24;
3304 }
3305 if (size > 1)
3306 value |= ((uint32_t) p[1]) << 8;
3307 riscv_batch_add_dmi_write(batch, DM_SBDATA0, value);
3308
3309 log_memory_access(address + i * size, value, size, false);
3310 next_address += size;
3311 }
3312
3313 result = batch_run(target, batch);
3314 riscv_batch_free(batch);
3315 if (result != ERROR_OK)
3316 return result;
3317
3318 bool dmi_busy_encountered;
3319 if (dmi_op(target, &sbcs, &dmi_busy_encountered, DMI_OP_READ,
3320 DM_SBCS, 0, false, false) != ERROR_OK)
3321 return ERROR_FAIL;
3322
3323 time_t start = time(NULL);
3324 bool dmi_busy = dmi_busy_encountered;
3325 while (get_field(sbcs, DM_SBCS_SBBUSY) || dmi_busy) {
3326 if (time(NULL) - start > riscv_command_timeout_sec) {
3327 LOG_ERROR("Timed out after %ds waiting for sbbusy to go low (sbcs=0x%x). "
3328 "Increase the timeout with riscv set_command_timeout_sec.",
3329 riscv_command_timeout_sec, sbcs);
3330 return ERROR_FAIL;
3331 }
3332
3333 if (dmi_op(target, &sbcs, &dmi_busy, DMI_OP_READ,
3334 DM_SBCS, 0, false, true) != ERROR_OK)
3335 return ERROR_FAIL;
3336 }
3337
3338 if (get_field(sbcs, DM_SBCS_SBBUSYERROR)) {
3339 /* We wrote while the target was busy. Slow down and try again. */
3340 dmi_write(target, DM_SBCS, DM_SBCS_SBBUSYERROR);
3341 info->bus_master_write_delay += info->bus_master_write_delay / 10 + 1;
3342 }
3343
3344 if (get_field(sbcs, DM_SBCS_SBBUSYERROR) || dmi_busy_encountered) {
3345 next_address = sb_read_address(target);
3346 if (next_address < address) {
3347 /* This should never happen, probably buggy hardware. */
3348 LOG_DEBUG("unexpected system bus address 0x%" TARGET_PRIxADDR,
3349 next_address);
3350 return ERROR_FAIL;
3351 }
3352
3353 continue;
3354 }
3355
3356 unsigned error = get_field(sbcs, DM_SBCS_SBERROR);
3357 if (error != 0) {
3358 /* Some error indicating the bus access failed, but not because of
3359 * something we did wrong. */
3360 dmi_write(target, DM_SBCS, DM_SBCS_SBERROR);
3361 return ERROR_FAIL;
3362 }
3363 }
3364
3365 return ERROR_OK;
3366 }
3367
3368 static int write_memory_progbuf(struct target *target, target_addr_t address,
3369 uint32_t size, uint32_t count, const uint8_t *buffer)
3370 {
3371 RISCV013_INFO(info);
3372
3373 if (riscv_xlen(target) < size * 8) {
3374 LOG_ERROR("XLEN (%d) is too short for %d-bit memory write.",
3375 riscv_xlen(target), size * 8);
3376 return ERROR_FAIL;
3377 }
3378
3379 LOG_DEBUG("writing %d words of %d bytes to 0x%08lx", count, size, (long)address);
3380
3381 select_dmi(target);
3382
3383 uint64_t mstatus = 0;
3384 uint64_t mstatus_old = 0;
3385 if (modify_privilege(target, &mstatus, &mstatus_old) != ERROR_OK)
3386 return ERROR_FAIL;
3387
3388 /* s0 holds the next address to write to
3389 * s1 holds the next data value to write
3390 */
3391
3392 int result = ERROR_OK;
3393 uint64_t s0, s1;
3394 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
3395 return ERROR_FAIL;
3396 if (register_read(target, &s1, GDB_REGNO_S1) != ERROR_OK)
3397 return ERROR_FAIL;
3398
3399 /* Write the program (store, increment) */
3400 struct riscv_program program;
3401 riscv_program_init(&program, target);
3402 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3403 riscv_program_csrrsi(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3404
3405 switch (size) {
3406 case 1:
3407 riscv_program_sbr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3408 break;
3409 case 2:
3410 riscv_program_shr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3411 break;
3412 case 4:
3413 riscv_program_swr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3414 break;
3415 case 8:
3416 riscv_program_sdr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3417 break;
3418 default:
3419 LOG_ERROR("write_memory_progbuf(): Unsupported size: %d", size);
3420 result = ERROR_FAIL;
3421 goto error;
3422 }
3423
3424 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3425 riscv_program_csrrci(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3426 riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, size);
3427
3428 result = riscv_program_ebreak(&program);
3429 if (result != ERROR_OK)
3430 goto error;
3431 riscv_program_write(&program);
3432
3433 riscv_addr_t cur_addr = address;
3434 riscv_addr_t fin_addr = address + (count * size);
3435 bool setup_needed = true;
3436 LOG_DEBUG("writing until final address 0x%016" PRIx64, fin_addr);
3437 while (cur_addr < fin_addr) {
3438 LOG_DEBUG("transferring burst starting at address 0x%016" PRIx64,
3439 cur_addr);
3440
3441 struct riscv_batch *batch = riscv_batch_alloc(
3442 target,
3443 32,
3444 info->dmi_busy_delay + info->ac_busy_delay);
3445 if (!batch)
3446 goto error;
3447
3448 /* To write another word, we put it in S1 and execute the program. */
3449 unsigned start = (cur_addr - address) / size;
3450 for (unsigned i = start; i < count; ++i) {
3451 unsigned offset = size*i;
3452 const uint8_t *t_buffer = buffer + offset;
3453
3454 uint64_t value = buf_get_u64(t_buffer, 0, 8 * size);
3455
3456 log_memory_access(address + offset, value, size, false);
3457 cur_addr += size;
3458
3459 if (setup_needed) {
3460 result = register_write_direct(target, GDB_REGNO_S0,
3461 address + offset);
3462 if (result != ERROR_OK) {
3463 riscv_batch_free(batch);
3464 goto error;
3465 }
3466
3467 /* Write value. */
3468 if (size > 4)
3469 dmi_write(target, DM_DATA1, value >> 32);
3470 dmi_write(target, DM_DATA0, value);
3471
3472 /* Write and execute command that moves value into S1 and
3473 * executes program buffer. */
3474 uint32_t command = access_register_command(target,
3475 GDB_REGNO_S1, riscv_xlen(target),
3476 AC_ACCESS_REGISTER_POSTEXEC |
3477 AC_ACCESS_REGISTER_TRANSFER |
3478 AC_ACCESS_REGISTER_WRITE);
3479 result = execute_abstract_command(target, command);
3480 if (result != ERROR_OK) {
3481 riscv_batch_free(batch);
3482 goto error;
3483 }
3484
3485 /* Turn on autoexec */
3486 dmi_write(target, DM_ABSTRACTAUTO,
3487 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET);
3488
3489 setup_needed = false;
3490 } else {
3491 if (size > 4)
3492 riscv_batch_add_dmi_write(batch, DM_DATA1, value >> 32);
3493 riscv_batch_add_dmi_write(batch, DM_DATA0, value);
3494 if (riscv_batch_full(batch))
3495 break;
3496 }
3497 }
3498
3499 result = batch_run(target, batch);
3500 riscv_batch_free(batch);
3501 if (result != ERROR_OK)
3502 goto error;
3503
3504 /* Note that if the scan resulted in a Busy DMI response, it
3505 * is this read to abstractcs that will cause the dmi_busy_delay
3506 * to be incremented if necessary. */
3507
3508 uint32_t abstractcs;
3509 bool dmi_busy_encountered;
3510 result = dmi_op(target, &abstractcs, &dmi_busy_encountered,
3511 DMI_OP_READ, DM_ABSTRACTCS, 0, false, true);
3512 if (result != ERROR_OK)
3513 goto error;
3514 while (get_field(abstractcs, DM_ABSTRACTCS_BUSY))
3515 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
3516 return ERROR_FAIL;
3517 info->cmderr = get_field(abstractcs, DM_ABSTRACTCS_CMDERR);
3518 if (info->cmderr == CMDERR_NONE && !dmi_busy_encountered) {
3519 LOG_DEBUG("successful (partial?) memory write");
3520 } else if (info->cmderr == CMDERR_BUSY || dmi_busy_encountered) {
3521 if (info->cmderr == CMDERR_BUSY)
3522 LOG_DEBUG("Memory write resulted in abstract command busy response.");
3523 else if (dmi_busy_encountered)
3524 LOG_DEBUG("Memory write resulted in DMI busy response.");
3525 riscv013_clear_abstract_error(target);
3526 increase_ac_busy_delay(target);
3527
3528 dmi_write(target, DM_ABSTRACTAUTO, 0);
3529 result = register_read_direct(target, &cur_addr, GDB_REGNO_S0);
3530 if (result != ERROR_OK)
3531 goto error;
3532 setup_needed = true;
3533 } else {
3534 LOG_ERROR("error when writing memory, abstractcs=0x%08lx", (long)abstractcs);
3535 riscv013_clear_abstract_error(target);
3536 result = ERROR_FAIL;
3537 goto error;
3538 }
3539 }
3540
3541 error:
3542 dmi_write(target, DM_ABSTRACTAUTO, 0);
3543
3544 if (register_write_direct(target, GDB_REGNO_S1, s1) != ERROR_OK)
3545 return ERROR_FAIL;
3546 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
3547 return ERROR_FAIL;
3548
3549 /* Restore MSTATUS */
3550 if (mstatus != mstatus_old)
3551 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus_old))
3552 return ERROR_FAIL;
3553
3554 if (execute_fence(target) != ERROR_OK)
3555 return ERROR_FAIL;
3556
3557 return result;
3558 }
3559
3560 static int write_memory(struct target *target, target_addr_t address,
3561 uint32_t size, uint32_t count, const uint8_t *buffer)
3562 {
3563 RISCV013_INFO(info);
3564
3565 if (has_sufficient_progbuf(target, 3) && !riscv_prefer_sba)
3566 return write_memory_progbuf(target, address, size, count, buffer);
3567
3568 if ((get_field(info->sbcs, DM_SBCS_SBACCESS8) && size == 1) ||
3569 (get_field(info->sbcs, DM_SBCS_SBACCESS16) && size == 2) ||
3570 (get_field(info->sbcs, DM_SBCS_SBACCESS32) && size == 4) ||
3571 (get_field(info->sbcs, DM_SBCS_SBACCESS64) && size == 8) ||
3572 (get_field(info->sbcs, DM_SBCS_SBACCESS128) && size == 16)) {
3573 if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 0)
3574 return write_memory_bus_v0(target, address, size, count, buffer);
3575 else if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 1)
3576 return write_memory_bus_v1(target, address, size, count, buffer);
3577 }
3578
3579 if (has_sufficient_progbuf(target, 3))
3580 return write_memory_progbuf(target, address, size, count, buffer);
3581
3582 return write_memory_abstract(target, address, size, count, buffer);
3583 }
3584
3585 static int arch_state(struct target *target)
3586 {
3587 return ERROR_OK;
3588 }
3589
3590 struct target_type riscv013_target = {
3591 .name = "riscv",
3592
3593 .init_target = init_target,
3594 .deinit_target = deinit_target,
3595 .examine = examine,
3596
3597 .poll = &riscv_openocd_poll,
3598 .halt = &riscv_halt,
3599 .step = &riscv_openocd_step,
3600
3601 .assert_reset = assert_reset,
3602 .deassert_reset = deassert_reset,
3603
3604 .write_memory = write_memory,
3605
3606 .arch_state = arch_state,
3607 };
3608
3609 /*** 0.13-specific implementations of various RISC-V helper functions. ***/
3610 static int riscv013_get_register(struct target *target,
3611 riscv_reg_t *value, int hid, int rid)
3612 {
3613 LOG_DEBUG("[%d] reading register %s on hart %d", target->coreid,
3614 gdb_regno_name(rid), hid);
3615
3616 riscv_set_current_hartid(target, hid);
3617
3618 int result = ERROR_OK;
3619 if (rid == GDB_REGNO_PC) {
3620 /* TODO: move this into riscv.c. */
3621 result = register_read(target, value, GDB_REGNO_DPC);
3622 LOG_DEBUG("[%d] read PC from DPC: 0x%" PRIx64, target->coreid, *value);
3623 } else if (rid == GDB_REGNO_PRIV) {
3624 uint64_t dcsr;
3625 /* TODO: move this into riscv.c. */
3626 result = register_read(target, &dcsr, GDB_REGNO_DCSR);
3627 *value = get_field(dcsr, CSR_DCSR_PRV);
3628 } else {
3629 result = register_read(target, value, rid);
3630 if (result != ERROR_OK)
3631 *value = -1;
3632 }
3633
3634 return result;
3635 }
3636
3637 static int riscv013_set_register(struct target *target, int hid, int rid, uint64_t value)
3638 {
3639 LOG_DEBUG("[%d] writing 0x%" PRIx64 " to register %s on hart %d",
3640 target->coreid, value, gdb_regno_name(rid), hid);
3641
3642 riscv_set_current_hartid(target, hid);
3643
3644 if (rid <= GDB_REGNO_XPR31) {
3645 return register_write_direct(target, rid, value);
3646 } else if (rid == GDB_REGNO_PC) {
3647 LOG_DEBUG("[%d] writing PC to DPC: 0x%" PRIx64, target->coreid, value);
3648 register_write_direct(target, GDB_REGNO_DPC, value);
3649 uint64_t actual_value;
3650 register_read_direct(target, &actual_value, GDB_REGNO_DPC);
3651 LOG_DEBUG("[%d] actual DPC written: 0x%016" PRIx64, target->coreid, actual_value);
3652 if (value != actual_value) {
3653 LOG_ERROR("Written PC (0x%" PRIx64 ") does not match read back "
3654 "value (0x%" PRIx64 ")", value, actual_value);
3655 return ERROR_FAIL;
3656 }
3657 } else if (rid == GDB_REGNO_PRIV) {
3658 uint64_t dcsr;
3659 register_read(target, &dcsr, GDB_REGNO_DCSR);
3660 dcsr = set_field(dcsr, CSR_DCSR_PRV, value);
3661 return register_write_direct(target, GDB_REGNO_DCSR, dcsr);
3662 } else {
3663 return register_write_direct(target, rid, value);
3664 }
3665
3666 return ERROR_OK;
3667 }
3668
3669 static int riscv013_select_current_hart(struct target *target)
3670 {
3671 RISCV_INFO(r);
3672
3673 dm013_info_t *dm = get_dm(target);
3674 if (!dm)
3675 return ERROR_FAIL;
3676 if (r->current_hartid == dm->current_hartid)
3677 return ERROR_OK;
3678
3679 uint32_t dmcontrol;
3680 /* TODO: can't we just "dmcontrol = DMI_DMACTIVE"? */
3681 if (dmi_read(target, &dmcontrol, DM_DMCONTROL) != ERROR_OK)
3682 return ERROR_FAIL;
3683 dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
3684 int result = dmi_write(target, DM_DMCONTROL, dmcontrol);
3685 dm->current_hartid = r->current_hartid;
3686 return result;
3687 }
3688
3689 /* Select all harts that were prepped and that are selectable, clearing the
3690 * prepped flag on the harts that actually were selected. */
3691 static int select_prepped_harts(struct target *target, bool *use_hasel)
3692 {
3693 dm013_info_t *dm = get_dm(target);
3694 if (!dm)
3695 return ERROR_FAIL;
3696 if (!dm->hasel_supported) {
3697 RISCV_INFO(r);
3698 r->prepped = false;
3699 *use_hasel = false;
3700 return ERROR_OK;
3701 }
3702
3703 assert(dm->hart_count);
3704 unsigned hawindow_count = (dm->hart_count + 31) / 32;
3705 uint32_t hawindow[hawindow_count];
3706
3707 memset(hawindow, 0, sizeof(uint32_t) * hawindow_count);
3708
3709 target_list_t *entry;
3710 unsigned total_selected = 0;
3711 list_for_each_entry(entry, &dm->target_list, list) {
3712 struct target *t = entry->target;
3713 riscv_info_t *r = riscv_info(t);
3714 riscv013_info_t *info = get_info(t);
3715 unsigned index = info->index;
3716 LOG_DEBUG("index=%d, coreid=%d, prepped=%d", index, t->coreid, r->prepped);
3717 r->selected = r->prepped;
3718 if (r->prepped) {
3719 hawindow[index / 32] |= 1 << (index % 32);
3720 r->prepped = false;
3721 total_selected++;
3722 }
3723 index++;
3724 }
3725
3726 /* Don't use hasel if we only need to talk to one hart. */
3727 if (total_selected <= 1) {
3728 *use_hasel = false;
3729 return ERROR_OK;
3730 }
3731
3732 for (unsigned i = 0; i < hawindow_count; i++) {
3733 if (dmi_write(target, DM_HAWINDOWSEL, i) != ERROR_OK)
3734 return ERROR_FAIL;
3735 if (dmi_write(target, DM_HAWINDOW, hawindow[i]) != ERROR_OK)
3736 return ERROR_FAIL;
3737 }
3738
3739 *use_hasel = true;
3740 return ERROR_OK;
3741 }
3742
3743 static int riscv013_halt_prep(struct target *target)
3744 {
3745 return ERROR_OK;
3746 }
3747
3748 static int riscv013_halt_go(struct target *target)
3749 {
3750 bool use_hasel = false;
3751 if (!riscv_rtos_enabled(target)) {
3752 if (select_prepped_harts(target, &use_hasel) != ERROR_OK)
3753 return ERROR_FAIL;
3754 }
3755
3756 RISCV_INFO(r);
3757 LOG_DEBUG("halting hart %d", r->current_hartid);
3758
3759 /* Issue the halt command, and then wait for the current hart to halt. */
3760 uint32_t dmcontrol = DM_DMCONTROL_DMACTIVE | DM_DMCONTROL_HALTREQ;
3761 if (use_hasel)
3762 dmcontrol |= DM_DMCONTROL_HASEL;
3763 dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
3764 dmi_write(target, DM_DMCONTROL, dmcontrol);
3765 for (size_t i = 0; i < 256; ++i)
3766 if (riscv_is_halted(target))
3767 break;
3768
3769 if (!riscv_is_halted(target)) {
3770 uint32_t dmstatus;
3771 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
3772 return ERROR_FAIL;
3773 if (dmi_read(target, &dmcontrol, DM_DMCONTROL) != ERROR_OK)
3774 return ERROR_FAIL;
3775
3776 LOG_ERROR("unable to halt hart %d", r->current_hartid);
3777 LOG_ERROR(" dmcontrol=0x%08x", dmcontrol);
3778 LOG_ERROR(" dmstatus =0x%08x", dmstatus);
3779 return ERROR_FAIL;
3780 }
3781
3782 dmcontrol = set_field(dmcontrol, DM_DMCONTROL_HALTREQ, 0);
3783 dmi_write(target, DM_DMCONTROL, dmcontrol);
3784
3785 if (use_hasel) {
3786 target_list_t *entry;
3787 dm013_info_t *dm = get_dm(target);
3788 if (!dm)
3789 return ERROR_FAIL;
3790 list_for_each_entry(entry, &dm->target_list, list) {
3791 struct target *t = entry->target;
3792 t->state = TARGET_HALTED;
3793 if (t->debug_reason == DBG_REASON_NOTHALTED)
3794 t->debug_reason = DBG_REASON_DBGRQ;
3795 }
3796 }
3797 /* The "else" case is handled in halt_go(). */
3798
3799 return ERROR_OK;
3800 }
3801
3802 static int riscv013_resume_go(struct target *target)
3803 {
3804 bool use_hasel = false;
3805 if (!riscv_rtos_enabled(target)) {
3806 if (select_prepped_harts(target, &use_hasel) != ERROR_OK)
3807 return ERROR_FAIL;
3808 }
3809
3810 return riscv013_step_or_resume_current_hart(target, false, use_hasel);
3811 }
3812
3813 static int riscv013_step_current_hart(struct target *target)
3814 {
3815 return riscv013_step_or_resume_current_hart(target, true, false);
3816 }
3817
3818 static int riscv013_resume_prep(struct target *target)
3819 {
3820 return riscv013_on_step_or_resume(target, false);
3821 }
3822
3823 static int riscv013_on_step(struct target *target)
3824 {
3825 return riscv013_on_step_or_resume(target, true);
3826 }
3827
3828 static int riscv013_on_halt(struct target *target)
3829 {
3830 return ERROR_OK;
3831 }
3832
3833 static bool riscv013_is_halted(struct target *target)
3834 {
3835 uint32_t dmstatus;
3836 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
3837 return false;
3838 if (get_field(dmstatus, DM_DMSTATUS_ANYUNAVAIL))
3839 LOG_ERROR("Hart %d is unavailable.", riscv_current_hartid(target));
3840 if (get_field(dmstatus, DM_DMSTATUS_ANYNONEXISTENT))
3841 LOG_ERROR("Hart %d doesn't exist.", riscv_current_hartid(target));
3842 if (get_field(dmstatus, DM_DMSTATUS_ANYHAVERESET)) {
3843 int hartid = riscv_current_hartid(target);
3844 LOG_INFO("Hart %d unexpectedly reset!", hartid);
3845 /* TODO: Can we make this more obvious to eg. a gdb user? */
3846 uint32_t dmcontrol = DM_DMCONTROL_DMACTIVE |
3847 DM_DMCONTROL_ACKHAVERESET;
3848 dmcontrol = set_hartsel(dmcontrol, hartid);
3849 /* If we had been halted when we reset, request another halt. If we
3850 * ended up running out of reset, then the user will (hopefully) get a
3851 * message that a reset happened, that the target is running, and then
3852 * that it is halted again once the request goes through.
3853 */
3854 if (target->state == TARGET_HALTED)
3855 dmcontrol |= DM_DMCONTROL_HALTREQ;
3856 dmi_write(target, DM_DMCONTROL, dmcontrol);
3857 }
3858 return get_field(dmstatus, DM_DMSTATUS_ALLHALTED);
3859 }
3860
3861 static enum riscv_halt_reason riscv013_halt_reason(struct target *target)
3862 {
3863 riscv_reg_t dcsr;
3864 int result = register_read(target, &dcsr, GDB_REGNO_DCSR);
3865 if (result != ERROR_OK)
3866 return RISCV_HALT_UNKNOWN;
3867
3868 switch (get_field(dcsr, CSR_DCSR_CAUSE)) {
3869 case CSR_DCSR_CAUSE_SWBP:
3870 return RISCV_HALT_BREAKPOINT;
3871 case CSR_DCSR_CAUSE_TRIGGER:
3872 /* We could get here before triggers are enumerated if a trigger was
3873 * already set when we connected. Force enumeration now, which has the
3874 * side effect of clearing any triggers we did not set. */
3875 riscv_enumerate_triggers(target);
3876 LOG_DEBUG("{%d} halted because of trigger", target->coreid);
3877 return RISCV_HALT_TRIGGER;
3878 case CSR_DCSR_CAUSE_STEP:
3879 return RISCV_HALT_SINGLESTEP;
3880 case CSR_DCSR_CAUSE_DEBUGINT:
3881 case CSR_DCSR_CAUSE_HALT:
3882 return RISCV_HALT_INTERRUPT;
3883 case CSR_DCSR_CAUSE_GROUP:
3884 return RISCV_HALT_GROUP;
3885 }
3886
3887 LOG_ERROR("Unknown DCSR cause field: %x", (int)get_field(dcsr, CSR_DCSR_CAUSE));
3888 LOG_ERROR(" dcsr=0x%016lx", (long)dcsr);
3889 return RISCV_HALT_UNKNOWN;
3890 }
3891
3892 int riscv013_write_debug_buffer(struct target *target, unsigned index, riscv_insn_t data)
3893 {
3894 dm013_info_t *dm = get_dm(target);
3895 if (!dm)
3896 return ERROR_FAIL;
3897 if (dm->progbuf_cache[index] != data) {
3898 if (dmi_write(target, DM_PROGBUF0 + index, data) != ERROR_OK)
3899 return ERROR_FAIL;
3900 dm->progbuf_cache[index] = data;
3901 } else {
3902 LOG_DEBUG("cache hit for 0x%" PRIx32 " @%d", data, index);
3903 }
3904 return ERROR_OK;
3905 }
3906
3907 riscv_insn_t riscv013_read_debug_buffer(struct target *target, unsigned index)
3908 {
3909 uint32_t value;
3910 dmi_read(target, &value, DM_PROGBUF0 + index);
3911 return value;
3912 }
3913
3914 int riscv013_execute_debug_buffer(struct target *target)
3915 {
3916 uint32_t run_program = 0;
3917 run_program = set_field(run_program, AC_ACCESS_REGISTER_AARSIZE, 2);
3918 run_program = set_field(run_program, AC_ACCESS_REGISTER_POSTEXEC, 1);
3919 run_program = set_field(run_program, AC_ACCESS_REGISTER_TRANSFER, 0);
3920 run_program = set_field(run_program, AC_ACCESS_REGISTER_REGNO, 0x1000);
3921
3922 return execute_abstract_command(target, run_program);
3923 }
3924
3925 void riscv013_fill_dmi_write_u64(struct target *target, char *buf, int a, uint64_t d)
3926 {
3927 RISCV013_INFO(info);
3928 buf_set_u64((unsigned char *)buf, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, DMI_OP_WRITE);
3929 buf_set_u64((unsigned char *)buf, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, d);
3930 buf_set_u64((unsigned char *)buf, DTM_DMI_ADDRESS_OFFSET, info->abits, a);
3931 }
3932
3933 void riscv013_fill_dmi_read_u64(struct target *target, char *buf, int a)
3934 {
3935 RISCV013_INFO(info);
3936 buf_set_u64((unsigned char *)buf, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, DMI_OP_READ);
3937 buf_set_u64((unsigned char *)buf, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, 0);
3938 buf_set_u64((unsigned char *)buf, DTM_DMI_ADDRESS_OFFSET, info->abits, a);
3939 }
3940
3941 void riscv013_fill_dmi_nop_u64(struct target *target, char *buf)
3942 {
3943 RISCV013_INFO(info);
3944 buf_set_u64((unsigned char *)buf, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, DMI_OP_NOP);
3945 buf_set_u64((unsigned char *)buf, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, 0);
3946 buf_set_u64((unsigned char *)buf, DTM_DMI_ADDRESS_OFFSET, info->abits, 0);
3947 }
3948
3949 /* Helper function for riscv013_test_sba_config_reg */
3950 static int get_max_sbaccess(struct target *target)
3951 {
3952 RISCV013_INFO(info);
3953
3954 uint32_t sbaccess128 = get_field(info->sbcs, DM_SBCS_SBACCESS128);
3955 uint32_t sbaccess64 = get_field(info->sbcs, DM_SBCS_SBACCESS64);
3956 uint32_t sbaccess32 = get_field(info->sbcs, DM_SBCS_SBACCESS32);
3957 uint32_t sbaccess16 = get_field(info->sbcs, DM_SBCS_SBACCESS16);
3958 uint32_t sbaccess8 = get_field(info->sbcs, DM_SBCS_SBACCESS8);
3959
3960 if (sbaccess128)
3961 return 4;
3962 else if (sbaccess64)
3963 return 3;
3964 else if (sbaccess32)
3965 return 2;
3966 else if (sbaccess16)
3967 return 1;
3968 else if (sbaccess8)
3969 return 0;
3970 else
3971 return -1;
3972 }
3973
3974 static uint32_t get_num_sbdata_regs(struct target *target)
3975 {
3976 RISCV013_INFO(info);
3977
3978 uint32_t sbaccess128 = get_field(info->sbcs, DM_SBCS_SBACCESS128);
3979 uint32_t sbaccess64 = get_field(info->sbcs, DM_SBCS_SBACCESS64);
3980 uint32_t sbaccess32 = get_field(info->sbcs, DM_SBCS_SBACCESS32);
3981
3982 if (sbaccess128)
3983 return 4;
3984 else if (sbaccess64)
3985 return 2;
3986 else if (sbaccess32)
3987 return 1;
3988 else
3989 return 0;
3990 }
3991
3992 static int riscv013_test_sba_config_reg(struct target *target,
3993 target_addr_t legal_address, uint32_t num_words,
3994 target_addr_t illegal_address, bool run_sbbusyerror_test)
3995 {
3996 LOG_INFO("Testing System Bus Access as defined by RISC-V Debug Spec v0.13");
3997
3998 uint32_t tests_failed = 0;
3999
4000 uint32_t rd_val;
4001 uint32_t sbcs_orig;
4002 dmi_read(target, &sbcs_orig, DM_SBCS);
4003
4004 uint32_t sbcs = sbcs_orig;
4005 bool test_passed;
4006
4007 int max_sbaccess = get_max_sbaccess(target);
4008
4009 if (max_sbaccess == -1) {
4010 LOG_ERROR("System Bus Access not supported in this config.");
4011 return ERROR_FAIL;
4012 }
4013
4014 if (get_field(sbcs, DM_SBCS_SBVERSION) != 1) {
4015 LOG_ERROR("System Bus Access unsupported SBVERSION (%d). Only version 1 is supported.",
4016 get_field(sbcs, DM_SBCS_SBVERSION));
4017 return ERROR_FAIL;
4018 }
4019
4020 uint32_t num_sbdata_regs = get_num_sbdata_regs(target);
4021 assert(num_sbdata_regs);
4022
4023 uint32_t rd_buf[num_sbdata_regs];
4024
4025 /* Test 1: Simple write/read test */
4026 test_passed = true;
4027 sbcs = set_field(sbcs_orig, DM_SBCS_SBAUTOINCREMENT, 0);
4028 dmi_write(target, DM_SBCS, sbcs);
4029
4030 uint32_t test_patterns[4] = {0xdeadbeef, 0xfeedbabe, 0x12345678, 0x08675309};
4031 for (uint32_t sbaccess = 0; sbaccess <= (uint32_t)max_sbaccess; sbaccess++) {
4032 sbcs = set_field(sbcs, DM_SBCS_SBACCESS, sbaccess);
4033 dmi_write(target, DM_SBCS, sbcs);
4034
4035 uint32_t compare_mask = (sbaccess == 0) ? 0xff : (sbaccess == 1) ? 0xffff : 0xffffffff;
4036
4037 for (uint32_t i = 0; i < num_words; i++) {
4038 uint32_t addr = legal_address + (i << sbaccess);
4039 uint32_t wr_data[num_sbdata_regs];
4040 for (uint32_t j = 0; j < num_sbdata_regs; j++)
4041 wr_data[j] = test_patterns[j] + i;
4042 write_memory_sba_simple(target, addr, wr_data, num_sbdata_regs, sbcs);
4043 }
4044
4045 for (uint32_t i = 0; i < num_words; i++) {
4046 uint32_t addr = legal_address + (i << sbaccess);
4047 read_memory_sba_simple(target, addr, rd_buf, num_sbdata_regs, sbcs);
4048 for (uint32_t j = 0; j < num_sbdata_regs; j++) {
4049 if (((test_patterns[j]+i)&compare_mask) != (rd_buf[j]&compare_mask)) {
4050 LOG_ERROR("System Bus Access Test 1: Error reading non-autoincremented address %x,"
4051 "expected val = %x, read val = %x", addr, test_patterns[j]+i, rd_buf[j]);
4052 test_passed = false;
4053 tests_failed++;
4054 }
4055 }
4056 }
4057 }
4058 if (test_passed)
4059 LOG_INFO("System Bus Access Test 1: Simple write/read test PASSED.");
4060
4061 /* Test 2: Address autoincrement test */
4062 target_addr_t curr_addr;
4063 target_addr_t prev_addr;
4064 test_passed = true;
4065 sbcs = set_field(sbcs_orig, DM_SBCS_SBAUTOINCREMENT, 1);
4066 dmi_write(target, DM_SBCS, sbcs);
4067
4068 for (uint32_t sbaccess = 0; sbaccess <= (uint32_t)max_sbaccess; sbaccess++) {
4069 sbcs = set_field(sbcs, DM_SBCS_SBACCESS, sbaccess);
4070 dmi_write(target, DM_SBCS, sbcs);
4071
4072 dmi_write(target, DM_SBADDRESS0, legal_address);
4073 read_sbcs_nonbusy(target, &sbcs);
4074 curr_addr = legal_address;
4075 for (uint32_t i = 0; i < num_words; i++) {
4076 prev_addr = curr_addr;
4077 read_sbcs_nonbusy(target, &sbcs);
4078 curr_addr = sb_read_address(target);
4079 if ((curr_addr - prev_addr != (uint32_t)(1 << sbaccess)) && (i != 0)) {
4080 LOG_ERROR("System Bus Access Test 2: Error with address auto-increment, sbaccess = %x.", sbaccess);
4081 test_passed = false;
4082 tests_failed++;
4083 }
4084 dmi_write(target, DM_SBDATA0, i);
4085 }
4086
4087 read_sbcs_nonbusy(target, &sbcs);
4088
4089 dmi_write(target, DM_SBADDRESS0, legal_address);
4090
4091 uint32_t val;
4092 sbcs = set_field(sbcs, DM_SBCS_SBREADONDATA, 1);
4093 dmi_write(target, DM_SBCS, sbcs);
4094 dmi_read(target, &val, DM_SBDATA0); /* Dummy read to trigger first system bus read */
4095 curr_addr = legal_address;
4096 for (uint32_t i = 0; i < num_words; i++) {
4097 prev_addr = curr_addr;
4098 read_sbcs_nonbusy(target, &sbcs);
4099 curr_addr = sb_read_address(target);
4100 if ((curr_addr - prev_addr != (uint32_t)(1 << sbaccess)) && (i != 0)) {
4101 LOG_ERROR("System Bus Access Test 2: Error with address auto-increment, sbaccess = %x", sbaccess);
4102 test_passed = false;
4103 tests_failed++;
4104 }
4105 dmi_read(target, &val, DM_SBDATA0);
4106 read_sbcs_nonbusy(target, &sbcs);
4107 if (i != val) {
4108 LOG_ERROR("System Bus Access Test 2: Error reading auto-incremented address,"
4109 "expected val = %x, read val = %x.", i, val);
4110 test_passed = false;
4111 tests_failed++;
4112 }
4113 }
4114 }
4115 if (test_passed)
4116 LOG_INFO("System Bus Access Test 2: Address auto-increment test PASSED.");
4117
4118 /* Test 3: Read from illegal address */
4119 read_memory_sba_simple(target, illegal_address, rd_buf, 1, sbcs_orig);
4120
4121 dmi_read(target, &rd_val, DM_SBCS);
4122 if (get_field(rd_val, DM_SBCS_SBERROR) == 2) {
4123 sbcs = set_field(sbcs_orig, DM_SBCS_SBERROR, 2);
4124 dmi_write(target, DM_SBCS, sbcs);
4125 dmi_read(target, &rd_val, DM_SBCS);
4126 if (get_field(rd_val, DM_SBCS_SBERROR) == 0)
4127 LOG_INFO("System Bus Access Test 3: Illegal address read test PASSED.");
4128 else
4129 LOG_ERROR("System Bus Access Test 3: Illegal address read test FAILED, unable to clear to 0.");
4130 } else {
4131 LOG_ERROR("System Bus Access Test 3: Illegal address read test FAILED, unable to set error code.");
4132 }
4133
4134 /* Test 4: Write to illegal address */
4135 write_memory_sba_simple(target, illegal_address, test_patterns, 1, sbcs_orig);
4136
4137 dmi_read(target, &rd_val, DM_SBCS);
4138 if (get_field(rd_val, DM_SBCS_SBERROR) == 2) {
4139 sbcs = set_field(sbcs_orig, DM_SBCS_SBERROR, 2);
4140 dmi_write(target, DM_SBCS, sbcs);
4141 dmi_read(target, &rd_val, DM_SBCS);
4142 if (get_field(rd_val, DM_SBCS_SBERROR) == 0)
4143 LOG_INFO("System Bus Access Test 4: Illegal address write test PASSED.");
4144 else {
4145 LOG_ERROR("System Bus Access Test 4: Illegal address write test FAILED, unable to clear to 0.");
4146 tests_failed++;
4147 }
4148 } else {
4149 LOG_ERROR("System Bus Access Test 4: Illegal address write test FAILED, unable to set error code.");
4150 tests_failed++;
4151 }
4152
4153 /* Test 5: Write with unsupported sbaccess size */
4154 uint32_t sbaccess128 = get_field(sbcs_orig, DM_SBCS_SBACCESS128);
4155
4156 if (sbaccess128) {
4157 LOG_INFO("System Bus Access Test 5: SBCS sbaccess error test PASSED, all sbaccess sizes supported.");
4158 } else {
4159 sbcs = set_field(sbcs_orig, DM_SBCS_SBACCESS, 4);
4160
4161 write_memory_sba_simple(target, legal_address, test_patterns, 1, sbcs);
4162
4163 dmi_read(target, &rd_val, DM_SBCS);
4164 if (get_field(rd_val, DM_SBCS_SBERROR) == 4) {
4165 sbcs = set_field(sbcs_orig, DM_SBCS_SBERROR, 4);
4166 dmi_write(target, DM_SBCS, sbcs);
4167 dmi_read(target, &rd_val, DM_SBCS);
4168 if (get_field(rd_val, DM_SBCS_SBERROR) == 0)
4169 LOG_INFO("System Bus Access Test 5: SBCS sbaccess error test PASSED.");
4170 else {
4171 LOG_ERROR("System Bus Access Test 5: SBCS sbaccess error test FAILED, unable to clear to 0.");
4172 tests_failed++;
4173 }
4174 } else {
4175 LOG_ERROR("System Bus Access Test 5: SBCS sbaccess error test FAILED, unable to set error code.");
4176 tests_failed++;
4177 }
4178 }
4179
4180 /* Test 6: Write to misaligned address */
4181 sbcs = set_field(sbcs_orig, DM_SBCS_SBACCESS, 1);
4182
4183 write_memory_sba_simple(target, legal_address+1, test_patterns, 1, sbcs);
4184
4185 dmi_read(target, &rd_val, DM_SBCS);
4186 if (get_field(rd_val, DM_SBCS_SBERROR) == 3) {
4187 sbcs = set_field(sbcs_orig, DM_SBCS_SBERROR, 3);
4188 dmi_write(target, DM_SBCS, sbcs);
4189 dmi_read(target, &rd_val, DM_SBCS);
4190 if (get_field(rd_val, DM_SBCS_SBERROR) == 0)
4191 LOG_INFO("System Bus Access Test 6: SBCS address alignment error test PASSED");
4192 else {
4193 LOG_ERROR("System Bus Access Test 6: SBCS address alignment error test FAILED, unable to clear to 0.");
4194 tests_failed++;
4195 }
4196 } else {
4197 LOG_ERROR("System Bus Access Test 6: SBCS address alignment error test FAILED, unable to set error code.");
4198 tests_failed++;
4199 }
4200
4201 /* Test 7: Set sbbusyerror, only run this case in simulation as it is likely
4202 * impossible to hit otherwise */
4203 if (run_sbbusyerror_test) {
4204 sbcs = set_field(sbcs_orig, DM_SBCS_SBREADONADDR, 1);
4205 dmi_write(target, DM_SBCS, sbcs);
4206
4207 for (int i = 0; i < 16; i++)
4208 dmi_write(target, DM_SBDATA0, 0xdeadbeef);
4209
4210 for (int i = 0; i < 16; i++)
4211 dmi_write(target, DM_SBADDRESS0, legal_address);
4212
4213 dmi_read(target, &rd_val, DM_SBCS);
4214 if (get_field(rd_val, DM_SBCS_SBBUSYERROR)) {
4215 sbcs = set_field(sbcs_orig, DM_SBCS_SBBUSYERROR, 1);
4216 dmi_write(target, DM_SBCS, sbcs);
4217 dmi_read(target, &rd_val, DM_SBCS);
4218 if (get_field(rd_val, DM_SBCS_SBBUSYERROR) == 0)
4219 LOG_INFO("System Bus Access Test 7: SBCS sbbusyerror test PASSED.");
4220 else {
4221 LOG_ERROR("System Bus Access Test 7: SBCS sbbusyerror test FAILED, unable to clear to 0.");
4222 tests_failed++;
4223 }
4224 } else {
4225 LOG_ERROR("System Bus Access Test 7: SBCS sbbusyerror test FAILED, unable to set error code.");
4226 tests_failed++;
4227 }
4228 }
4229
4230 if (tests_failed == 0) {
4231 LOG_INFO("ALL TESTS PASSED");
4232 return ERROR_OK;
4233 } else {
4234 LOG_ERROR("%d TESTS FAILED", tests_failed);
4235 return ERROR_FAIL;
4236 }
4237
4238 }
4239
4240 void write_memory_sba_simple(struct target *target, target_addr_t addr,
4241 uint32_t *write_data, uint32_t write_size, uint32_t sbcs)
4242 {
4243 RISCV013_INFO(info);
4244
4245 uint32_t rd_sbcs;
4246 uint32_t masked_addr;
4247
4248 uint32_t sba_size = get_field(info->sbcs, DM_SBCS_SBASIZE);
4249
4250 read_sbcs_nonbusy(target, &rd_sbcs);
4251
4252 uint32_t sbcs_no_readonaddr = set_field(sbcs, DM_SBCS_SBREADONADDR, 0);
4253 dmi_write(target, DM_SBCS, sbcs_no_readonaddr);
4254
4255 for (uint32_t i = 0; i < sba_size/32; i++) {
4256 masked_addr = (addr >> 32*i) & 0xffffffff;
4257
4258 if (i != 3)
4259 dmi_write(target, DM_SBADDRESS0+i, masked_addr);
4260 else
4261 dmi_write(target, DM_SBADDRESS3, masked_addr);
4262 }
4263
4264 /* Write SBDATA registers starting with highest address, since write to
4265 * SBDATA0 triggers write */
4266 for (int i = write_size-1; i >= 0; i--)
4267 dmi_write(target, DM_SBDATA0+i, write_data[i]);
4268 }
4269
4270 void read_memory_sba_simple(struct target *target, target_addr_t addr,
4271 uint32_t *rd_buf, uint32_t read_size, uint32_t sbcs)
4272 {
4273 RISCV013_INFO(info);
4274
4275 uint32_t rd_sbcs;
4276 uint32_t masked_addr;
4277
4278 uint32_t sba_size = get_field(info->sbcs, DM_SBCS_SBASIZE);
4279
4280 read_sbcs_nonbusy(target, &rd_sbcs);
4281
4282 uint32_t sbcs_readonaddr = set_field(sbcs, DM_SBCS_SBREADONADDR, 1);
4283 dmi_write(target, DM_SBCS, sbcs_readonaddr);
4284
4285 /* Write addresses starting with highest address register */
4286 for (int i = sba_size/32-1; i >= 0; i--) {
4287 masked_addr = (addr >> 32*i) & 0xffffffff;
4288
4289 if (i != 3)
4290 dmi_write(target, DM_SBADDRESS0+i, masked_addr);
4291 else
4292 dmi_write(target, DM_SBADDRESS3, masked_addr);
4293 }
4294
4295 read_sbcs_nonbusy(target, &rd_sbcs);
4296
4297 for (uint32_t i = 0; i < read_size; i++)
4298 dmi_read(target, &(rd_buf[i]), DM_SBDATA0+i);
4299 }
4300
4301 int riscv013_dmi_write_u64_bits(struct target *target)
4302 {
4303 RISCV013_INFO(info);
4304 return info->abits + DTM_DMI_DATA_LENGTH + DTM_DMI_OP_LENGTH;
4305 }
4306
4307 static int maybe_execute_fence_i(struct target *target)
4308 {
4309 if (has_sufficient_progbuf(target, 3))
4310 return execute_fence(target);
4311 return ERROR_OK;
4312 }
4313
4314 /* Helper Functions. */
4315 static int riscv013_on_step_or_resume(struct target *target, bool step)
4316 {
4317 if (maybe_execute_fence_i(target) != ERROR_OK)
4318 return ERROR_FAIL;
4319
4320 /* We want to twiddle some bits in the debug CSR so debugging works. */
4321 riscv_reg_t dcsr;
4322 int result = register_read(target, &dcsr, GDB_REGNO_DCSR);
4323 if (result != ERROR_OK)
4324 return result;
4325 dcsr = set_field(dcsr, CSR_DCSR_STEP, step);
4326 dcsr = set_field(dcsr, CSR_DCSR_EBREAKM, riscv_ebreakm);
4327 dcsr = set_field(dcsr, CSR_DCSR_EBREAKS, riscv_ebreaks);
4328 dcsr = set_field(dcsr, CSR_DCSR_EBREAKU, riscv_ebreaku);
4329 return riscv_set_register(target, GDB_REGNO_DCSR, dcsr);
4330 }
4331
4332 static int riscv013_step_or_resume_current_hart(struct target *target,
4333 bool step, bool use_hasel)
4334 {
4335 RISCV_INFO(r);
4336 LOG_DEBUG("resuming hart %d (for step?=%d)", r->current_hartid, step);
4337 if (!riscv_is_halted(target)) {
4338 LOG_ERROR("Hart %d is not halted!", r->current_hartid);
4339 return ERROR_FAIL;
4340 }
4341
4342 /* Issue the resume command, and then wait for the current hart to resume. */
4343 uint32_t dmcontrol = DM_DMCONTROL_DMACTIVE | DM_DMCONTROL_RESUMEREQ;
4344 if (use_hasel)
4345 dmcontrol |= DM_DMCONTROL_HASEL;
4346 dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
4347 dmi_write(target, DM_DMCONTROL, dmcontrol);
4348
4349 dmcontrol = set_field(dmcontrol, DM_DMCONTROL_HASEL, 0);
4350 dmcontrol = set_field(dmcontrol, DM_DMCONTROL_RESUMEREQ, 0);
4351
4352 uint32_t dmstatus;
4353 for (size_t i = 0; i < 256; ++i) {
4354 usleep(10);
4355 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
4356 return ERROR_FAIL;
4357 if (get_field(dmstatus, DM_DMSTATUS_ALLRESUMEACK) == 0)
4358 continue;
4359 if (step && get_field(dmstatus, DM_DMSTATUS_ALLHALTED) == 0)
4360 continue;
4361
4362 dmi_write(target, DM_DMCONTROL, dmcontrol);
4363 return ERROR_OK;
4364 }
4365
4366 dmi_write(target, DM_DMCONTROL, dmcontrol);
4367
4368 LOG_ERROR("unable to resume hart %d", r->current_hartid);
4369 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
4370 return ERROR_FAIL;
4371 LOG_ERROR(" dmstatus =0x%08x", dmstatus);
4372
4373 if (step) {
4374 LOG_ERROR(" was stepping, halting");
4375 riscv_halt(target);
4376 return ERROR_OK;
4377 }
4378
4379 return ERROR_FAIL;
4380 }
4381
4382 void riscv013_clear_abstract_error(struct target *target)
4383 {
4384 /* Wait for busy to go away. */
4385 time_t start = time(NULL);
4386 uint32_t abstractcs;
4387 dmi_read(target, &abstractcs, DM_ABSTRACTCS);
4388 while (get_field(abstractcs, DM_ABSTRACTCS_BUSY)) {
4389 dmi_read(target, &abstractcs, DM_ABSTRACTCS);
4390
4391 if (time(NULL) - start > riscv_command_timeout_sec) {
4392 LOG_ERROR("abstractcs.busy is not going low after %d seconds "
4393 "(abstractcs=0x%x). The target is either really slow or "
4394 "broken. You could increase the timeout with riscv "
4395 "set_command_timeout_sec.",
4396 riscv_command_timeout_sec, abstractcs);
4397 break;
4398 }
4399 }
4400 /* Clear the error status. */
4401 dmi_write(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
4402 }
4403
4404 #ifdef _WIN32
4405 #define FILE_SEP '\\'
4406 #else
4407 #define FILE_SEP '/'
4408 #endif
4409 #define COMPLIANCE_TEST(b, message) \
4410 { \
4411 const char *last_sep = strrchr(__FILE__, FILE_SEP); \
4412 const char *fname = (last_sep == NULL ? __FILE__ : last_sep + 1); \
4413 LOG_INFO("Executing test %d (%s:%d): %s", total_tests, fname, __LINE__, message); \
4414 int pass = 0; \
4415 if (b) { \
4416 pass = 1; \
4417 passed_tests++; \
4418 } \
4419 LOG_INFO(" %s", (pass) ? "PASSED" : "FAILED"); \
4420 assert(pass); \
4421 total_tests++; \
4422 }
4423
4424 #define COMPLIANCE_MUST_PASS(b) COMPLIANCE_TEST(ERROR_OK == (b), "Regular calls must return ERROR_OK")
4425
4426 #define COMPLIANCE_READ(target, addr, value) COMPLIANCE_MUST_PASS(dmi_read(target, addr, value))
4427 #define COMPLIANCE_WRITE(target, addr, value) COMPLIANCE_MUST_PASS(dmi_write(target, addr, value))
4428
4429 #define COMPLIANCE_CHECK_RO(target, addr) \
4430 { \
4431 uint32_t orig; \
4432 uint32_t inverse; \
4433 COMPLIANCE_READ(target, &orig, addr); \
4434 COMPLIANCE_WRITE(target, addr, ~orig); \
4435 COMPLIANCE_READ(target, &inverse, addr); \
4436 COMPLIANCE_TEST(orig == inverse, "Register must be read-only"); \
4437 }
4438
4439 int riscv013_test_compliance(struct target *target)
4440 {
4441 LOG_INFO("Basic compliance test against RISC-V Debug Spec v0.13");
4442 LOG_INFO("This test is not complete, and not well supported.");
4443 LOG_INFO("Your core might pass this test without being compliant.");
4444 LOG_INFO("Your core might fail this test while being compliant.");
4445 LOG_INFO("Use your judgment, and please contribute improvements.");
4446
4447 if (!riscv_rtos_enabled(target)) {
4448 LOG_ERROR("Please run with -rtos riscv to run compliance test.");
4449 return ERROR_FAIL;
4450 }
4451
4452 if (!target_was_examined(target)) {
4453 LOG_ERROR("Cannot run compliance test, because target has not yet "
4454 "been examined, or the examination failed.\n");
4455 return ERROR_FAIL;
4456 }
4457
4458 int total_tests = 0;
4459 int passed_tests = 0;
4460
4461 uint32_t dmcontrol_orig = DM_DMCONTROL_DMACTIVE;
4462 uint32_t dmcontrol;
4463 uint32_t testvar;
4464 uint32_t testvar_read;
4465 riscv_reg_t value;
4466 RISCV013_INFO(info);
4467
4468 /* All the bits of HARTSEL are covered by the examine sequence. */
4469
4470 /* hartreset */
4471 /* This field is optional. Either we can read and write it to 1/0,
4472 or it is tied to 0. This check doesn't really do anything, but
4473 it does attempt to set the bit to 1 and then back to 0, which needs to
4474 work if its implemented. */
4475 COMPLIANCE_WRITE(target, DM_DMCONTROL, set_field(dmcontrol_orig, DM_DMCONTROL_HARTRESET, 1));
4476 COMPLIANCE_WRITE(target, DM_DMCONTROL, set_field(dmcontrol_orig, DM_DMCONTROL_HARTRESET, 0));
4477 COMPLIANCE_READ(target, &dmcontrol, DM_DMCONTROL);
4478 COMPLIANCE_TEST((get_field(dmcontrol, DM_DMCONTROL_HARTRESET) == 0),
4479 "DMCONTROL.hartreset can be 0 or RW.");
4480
4481 /* hasel */
4482 COMPLIANCE_WRITE(target, DM_DMCONTROL, set_field(dmcontrol_orig, DM_DMCONTROL_HASEL, 1));
4483 COMPLIANCE_WRITE(target, DM_DMCONTROL, set_field(dmcontrol_orig, DM_DMCONTROL_HASEL, 0));
4484 COMPLIANCE_READ(target, &dmcontrol, DM_DMCONTROL);
4485 COMPLIANCE_TEST((get_field(dmcontrol, DM_DMCONTROL_HASEL) == 0),
4486 "DMCONTROL.hasel can be 0 or RW.");
4487 /* TODO: test that hamask registers exist if hasel does. */
4488
4489 /* haltreq */
4490 COMPLIANCE_MUST_PASS(riscv_halt(target));
4491 /* This bit is not actually readable according to the spec, so nothing to check.*/
4492
4493 /* DMSTATUS */
4494 COMPLIANCE_CHECK_RO(target, DM_DMSTATUS);
4495
4496 /* resumereq */
4497 /* This bit is not actually readable according to the spec, so nothing to check.*/
4498 COMPLIANCE_MUST_PASS(riscv_resume(target, true, 0, false, false, false));
4499
4500 /* Halt all harts again so the test can continue.*/
4501 COMPLIANCE_MUST_PASS(riscv_halt(target));
4502
4503 /* HARTINFO: Read-Only. This is per-hart, so need to adjust hartsel. */
4504 uint32_t hartinfo;
4505 COMPLIANCE_READ(target, &hartinfo, DM_HARTINFO);
4506 for (int hartsel = 0; hartsel < riscv_count_harts(target); hartsel++) {
4507 COMPLIANCE_MUST_PASS(riscv_set_current_hartid(target, hartsel));
4508
4509 COMPLIANCE_CHECK_RO(target, DM_HARTINFO);
4510
4511 /* $dscratch CSRs */
4512 uint32_t nscratch = get_field(hartinfo, DM_HARTINFO_NSCRATCH);
4513 for (unsigned int d = 0; d < nscratch; d++) {
4514 riscv_reg_t testval, testval_read;
4515 /* Because DSCRATCH0 is not guaranteed to last across PB executions, need to put
4516 this all into one PB execution. Which may not be possible on all implementations.*/
4517 if (info->progbufsize >= 5) {
4518 for (testval = 0x0011223300112233;
4519 testval != 0xDEAD;
4520 testval = testval == 0x0011223300112233 ? ~testval : 0xDEAD) {
4521 COMPLIANCE_TEST(register_write_direct(target, GDB_REGNO_S0, testval) == ERROR_OK,
4522 "Need to be able to write S0 in order to test DSCRATCH0.");
4523 struct riscv_program program32;
4524 riscv_program_init(&program32, target);
4525 riscv_program_csrw(&program32, GDB_REGNO_S0, GDB_REGNO_DSCRATCH0 + d);
4526 riscv_program_csrr(&program32, GDB_REGNO_S1, GDB_REGNO_DSCRATCH0 + d);
4527 riscv_program_fence(&program32);
4528 riscv_program_ebreak(&program32);
4529 COMPLIANCE_TEST(riscv_program_exec(&program32, target) == ERROR_OK,
4530 "Accessing DSCRATCH0 with program buffer should succeed.");
4531 COMPLIANCE_TEST(register_read_direct(target, &testval_read, GDB_REGNO_S1) == ERROR_OK,
4532 "Need to be able to read S1 in order to test DSCRATCH0.");
4533 if (riscv_xlen(target) > 32) {
4534 COMPLIANCE_TEST(testval == testval_read,
4535 "All DSCRATCH0 registers in HARTINFO must be R/W.");
4536 } else {
4537 COMPLIANCE_TEST(testval_read == (testval & 0xFFFFFFFF),
4538 "All DSCRATCH0 registers in HARTINFO must be R/W.");
4539 }
4540 }
4541 }
4542 }
4543 /* TODO: dataaccess */
4544 if (get_field(hartinfo, DM_HARTINFO_DATAACCESS)) {
4545 /* TODO: Shadowed in memory map. */
4546 /* TODO: datasize */
4547 /* TODO: dataaddr */
4548 } else {
4549 /* TODO: Shadowed in CSRs. */
4550 /* TODO: datasize */
4551 /* TODO: dataaddr */
4552 }
4553
4554 }
4555
4556 /* HALTSUM -- TODO: More than 32 harts. Would need to loop over this to set hartsel */
4557 /* TODO: HALTSUM2, HALTSUM3 */
4558 /* HALTSUM0 */
4559 uint32_t expected_haltsum0 = 0;
4560 for (int i = 0; i < MIN(riscv_count_harts(target), 32); i++)
4561 expected_haltsum0 |= (1 << i);
4562
4563 COMPLIANCE_READ(target, &testvar_read, DM_HALTSUM0);
4564 COMPLIANCE_TEST(testvar_read == expected_haltsum0,
4565 "HALTSUM0 should report summary of up to 32 halted harts");
4566
4567 COMPLIANCE_WRITE(target, DM_HALTSUM0, 0xffffffff);
4568 COMPLIANCE_READ(target, &testvar_read, DM_HALTSUM0);
4569 COMPLIANCE_TEST(testvar_read == expected_haltsum0, "HALTSUM0 should be R/O");
4570
4571 COMPLIANCE_WRITE(target, DM_HALTSUM0, 0x0);
4572 COMPLIANCE_READ(target, &testvar_read, DM_HALTSUM0);
4573 COMPLIANCE_TEST(testvar_read == expected_haltsum0, "HALTSUM0 should be R/O");
4574
4575 /* HALTSUM1 */
4576 uint32_t expected_haltsum1 = 0;
4577 for (int i = 0; i < MIN(riscv_count_harts(target), 1024); i += 32)
4578 expected_haltsum1 |= (1 << (i/32));
4579
4580 COMPLIANCE_READ(target, &testvar_read, DM_HALTSUM1);
4581 COMPLIANCE_TEST(testvar_read == expected_haltsum1,
4582 "HALTSUM1 should report summary of up to 1024 halted harts");
4583
4584 COMPLIANCE_WRITE(target, DM_HALTSUM1, 0xffffffff);
4585 COMPLIANCE_READ(target, &testvar_read, DM_HALTSUM1);
4586 COMPLIANCE_TEST(testvar_read == expected_haltsum1, "HALTSUM1 should be R/O");
4587
4588 COMPLIANCE_WRITE(target, DM_HALTSUM1, 0x0);
4589 COMPLIANCE_READ(target, &testvar_read, DM_HALTSUM1);
4590 COMPLIANCE_TEST(testvar_read == expected_haltsum1, "HALTSUM1 should be R/O");
4591
4592 /* TODO: HAWINDOWSEL */
4593
4594 /* TODO: HAWINDOW */
4595
4596 /* ABSTRACTCS */
4597
4598 uint32_t abstractcs;
4599 COMPLIANCE_READ(target, &abstractcs, DM_ABSTRACTCS);
4600
4601 /* Check that all reported Data Words are really R/W */
4602 for (int invert = 0; invert < 2; invert++) {
4603 for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT); i++) {
4604 testvar = (i + 1) * 0x11111111;
4605 if (invert)
4606 testvar = ~testvar;
4607 COMPLIANCE_WRITE(target, DM_DATA0 + i, testvar);
4608 }
4609 for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT); i++) {
4610 testvar = (i + 1) * 0x11111111;
4611 if (invert)
4612 testvar = ~testvar;
4613 COMPLIANCE_READ(target, &testvar_read, DM_DATA0 + i);
4614 COMPLIANCE_TEST(testvar_read == testvar, "All reported DATA words must be R/W");
4615 }
4616 }
4617
4618 /* Check that all reported ProgBuf words are really R/W */
4619 for (int invert = 0; invert < 2; invert++) {
4620 for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE); i++) {
4621 testvar = (i + 1) * 0x11111111;
4622 if (invert)
4623 testvar = ~testvar;
4624 COMPLIANCE_WRITE(target, DM_PROGBUF0 + i, testvar);
4625 }
4626 for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE); i++) {
4627 testvar = (i + 1) * 0x11111111;
4628 if (invert)
4629 testvar = ~testvar;
4630 COMPLIANCE_READ(target, &testvar_read, DM_PROGBUF0 + i);
4631 COMPLIANCE_TEST(testvar_read == testvar, "All reported PROGBUF words must be R/W");
4632 }
4633 }
4634
4635 /* TODO: Cause and clear all error types */
4636
4637 /* COMMAND
4638 According to the spec, this register is only W, so can't really check the read result.
4639 But at any rate, this is not legal and should cause an error. */
4640 COMPLIANCE_WRITE(target, DM_COMMAND, 0xAAAAAAAA);
4641 COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTCS);
4642 COMPLIANCE_TEST(get_field(testvar_read, DM_ABSTRACTCS_CMDERR) == CMDERR_NOT_SUPPORTED,
4643 "Illegal COMMAND should result in UNSUPPORTED");
4644 COMPLIANCE_WRITE(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
4645
4646 COMPLIANCE_WRITE(target, DM_COMMAND, 0x55555555);
4647 COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTCS);
4648 COMPLIANCE_TEST(get_field(testvar_read, DM_ABSTRACTCS_CMDERR) == CMDERR_NOT_SUPPORTED,
4649 "Illegal COMMAND should result in UNSUPPORTED");
4650 COMPLIANCE_WRITE(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
4651
4652 /* Basic Abstract Commands */
4653 for (unsigned int i = 1; i < 32; i = i << 1) {
4654 riscv_reg_t testval = i | ((i + 1ULL) << 32);
4655 riscv_reg_t testval_read;
4656 COMPLIANCE_TEST(ERROR_OK == register_write_direct(target, GDB_REGNO_ZERO + i, testval),
4657 "GPR Writes should be supported.");
4658 COMPLIANCE_MUST_PASS(write_abstract_arg(target, 0, 0xDEADBEEFDEADBEEF, 64));
4659 COMPLIANCE_TEST(ERROR_OK == register_read_direct(target, &testval_read, GDB_REGNO_ZERO + i),
4660 "GPR Reads should be supported.");
4661 if (riscv_xlen(target) > 32) {
4662 /* Dummy comment to satisfy linter, since removing the branches here doesn't actually compile. */
4663 COMPLIANCE_TEST(testval == testval_read, "GPR Reads and writes should be supported.");
4664 } else {
4665 /* Dummy comment to satisfy linter, since removing the branches here doesn't actually compile. */
4666 COMPLIANCE_TEST((testval & 0xFFFFFFFF) == testval_read, "GPR Reads and writes should be supported.");
4667 }
4668 }
4669
4670 /* ABSTRACTAUTO
4671 See which bits are actually writable */
4672 COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0xFFFFFFFF);
4673 uint32_t abstractauto;
4674 uint32_t busy;
4675 COMPLIANCE_READ(target, &abstractauto, DM_ABSTRACTAUTO);
4676 COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0x0);
4677 if (abstractauto > 0) {
4678 /* This mechanism only works when you have a reasonable sized progbuf, which is not
4679 a true compliance requirement. */
4680 if (info->progbufsize >= 3) {
4681
4682 testvar = 0;
4683 COMPLIANCE_TEST(ERROR_OK == register_write_direct(target, GDB_REGNO_S0, 0),
4684 "Need to be able to write S0 to test ABSTRACTAUTO");
4685 struct riscv_program program;
4686 COMPLIANCE_MUST_PASS(riscv_program_init(&program, target));
4687 /* This is also testing that WFI() is a NOP during debug mode. */
4688 COMPLIANCE_MUST_PASS(riscv_program_insert(&program, wfi()));
4689 COMPLIANCE_MUST_PASS(riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, 1));
4690 COMPLIANCE_MUST_PASS(riscv_program_ebreak(&program));
4691 COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0x0);
4692 COMPLIANCE_MUST_PASS(riscv_program_exec(&program, target));
4693 testvar++;
4694 COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0xFFFFFFFF);
4695 COMPLIANCE_READ(target, &abstractauto, DM_ABSTRACTAUTO);
4696 uint32_t autoexec_data = get_field(abstractauto, DM_ABSTRACTAUTO_AUTOEXECDATA);
4697 uint32_t autoexec_progbuf = get_field(abstractauto, DM_ABSTRACTAUTO_AUTOEXECPROGBUF);
4698 for (unsigned int i = 0; i < 12; i++) {
4699 COMPLIANCE_READ(target, &testvar_read, DM_DATA0 + i);
4700 do {
4701 COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTCS);
4702 busy = get_field(testvar_read, DM_ABSTRACTCS_BUSY);
4703 } while (busy);
4704 if (autoexec_data & (1 << i)) {
4705 COMPLIANCE_TEST(i < get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT),
4706 "AUTOEXEC may be writable up to DATACOUNT bits.");
4707 testvar++;
4708 }
4709 }
4710 for (unsigned int i = 0; i < 16; i++) {
4711 COMPLIANCE_READ(target, &testvar_read, DM_PROGBUF0 + i);
4712 do {
4713 COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTCS);
4714 busy = get_field(testvar_read, DM_ABSTRACTCS_BUSY);
4715 } while (busy);
4716 if (autoexec_progbuf & (1 << i)) {
4717 COMPLIANCE_TEST(i < get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE),
4718 "AUTOEXEC may be writable up to PROGBUFSIZE bits.");
4719 testvar++;
4720 }
4721 }
4722
4723 COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0);
4724 COMPLIANCE_TEST(ERROR_OK == register_read_direct(target, &value, GDB_REGNO_S0),
4725 "Need to be able to read S0 to test ABSTRACTAUTO");
4726
4727 COMPLIANCE_TEST(testvar == value,
4728 "ABSTRACTAUTO should cause COMMAND to run the expected number of times.");
4729 }
4730 }
4731
4732 /* Single-Step each hart. */
4733 for (int hartsel = 0; hartsel < riscv_count_harts(target); hartsel++) {
4734 COMPLIANCE_MUST_PASS(riscv_set_current_hartid(target, hartsel));
4735 COMPLIANCE_MUST_PASS(riscv013_on_step(target));
4736 COMPLIANCE_MUST_PASS(riscv013_step_current_hart(target));
4737 COMPLIANCE_TEST(riscv_halt_reason(target, hartsel) == RISCV_HALT_SINGLESTEP,
4738 "Single Step should result in SINGLESTEP");
4739 }
4740
4741 /* Core Register Tests */
4742 uint64_t bogus_dpc = 0xdeadbeef;
4743 for (int hartsel = 0; hartsel < riscv_count_harts(target); hartsel++) {
4744 COMPLIANCE_MUST_PASS(riscv_set_current_hartid(target, hartsel));
4745
4746 /* DCSR Tests */
4747 COMPLIANCE_MUST_PASS(register_write_direct(target, GDB_REGNO_DCSR, 0x0));
4748 COMPLIANCE_MUST_PASS(register_read_direct(target, &value, GDB_REGNO_DCSR));
4749 COMPLIANCE_TEST(value != 0, "Not all bits in DCSR are writable by Debugger");
4750 COMPLIANCE_MUST_PASS(register_write_direct(target, GDB_REGNO_DCSR, 0xFFFFFFFF));
4751 COMPLIANCE_MUST_PASS(register_read_direct(target, &value, GDB_REGNO_DCSR));
4752 COMPLIANCE_TEST(value != 0, "At least some bits in DCSR must be 1");
4753
4754 /* DPC. Note that DPC is sign-extended. */
4755 riscv_reg_t dpcmask = 0xFFFFFFFCUL;
4756 riscv_reg_t dpc;
4757
4758 if (riscv_xlen(target) > 32)
4759 dpcmask |= (0xFFFFFFFFULL << 32);
4760
4761 if (riscv_supports_extension(target, riscv_current_hartid(target), 'C'))
4762 dpcmask |= 0x2;
4763
4764 COMPLIANCE_MUST_PASS(register_write_direct(target, GDB_REGNO_DPC, dpcmask));
4765 COMPLIANCE_MUST_PASS(register_read_direct(target, &dpc, GDB_REGNO_DPC));
4766 COMPLIANCE_TEST(dpcmask == dpc,
4767 "DPC must be sign-extended to XLEN and writable to all-1s (except the least significant bits)");
4768 COMPLIANCE_MUST_PASS(register_write_direct(target, GDB_REGNO_DPC, 0));
4769 COMPLIANCE_MUST_PASS(register_read_direct(target, &dpc, GDB_REGNO_DPC));
4770 COMPLIANCE_TEST(dpc == 0, "DPC must be writable to 0.");
4771 if (hartsel == 0)
4772 bogus_dpc = dpc; /* For a later test step */
4773 }
4774
4775 /* NDMRESET
4776 Asserting non-debug module reset should not reset Debug Module state.
4777 But it should reset Hart State, e.g. DPC should get a different value.
4778 Also make sure that DCSR reports cause of 'HALT' even though previously we single-stepped.
4779 */
4780
4781 /* Write some registers. They should not be impacted by ndmreset. */
4782 COMPLIANCE_WRITE(target, DM_COMMAND, 0xFFFFFFFF);
4783
4784 for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE); i++) {
4785 testvar = (i + 1) * 0x11111111;
4786 COMPLIANCE_WRITE(target, DM_PROGBUF0 + i, testvar);
4787 }
4788
4789 for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT); i++) {
4790 testvar = (i + 1) * 0x11111111;
4791 COMPLIANCE_WRITE(target, DM_DATA0 + i, testvar);
4792 }
4793
4794 COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0xFFFFFFFF);
4795 COMPLIANCE_READ(target, &abstractauto, DM_ABSTRACTAUTO);
4796
4797 /* Pulse reset. */
4798 target->reset_halt = true;
4799 COMPLIANCE_MUST_PASS(riscv_set_current_hartid(target, 0));
4800 COMPLIANCE_TEST(ERROR_OK == assert_reset(target), "Must be able to assert NDMRESET");
4801 COMPLIANCE_TEST(ERROR_OK == deassert_reset(target), "Must be able to deassert NDMRESET");
4802
4803 /* Verify that most stuff is not affected by ndmreset. */
4804 COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTCS);
4805 COMPLIANCE_TEST(get_field(testvar_read, DM_ABSTRACTCS_CMDERR) == CMDERR_NOT_SUPPORTED,
4806 "NDMRESET should not affect DM_ABSTRACTCS");
4807 COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTAUTO);
4808 COMPLIANCE_TEST(testvar_read == abstractauto, "NDMRESET should not affect DM_ABSTRACTAUTO");
4809
4810 /* Clean up to avoid future test failures */
4811 COMPLIANCE_WRITE(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
4812 COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0);
4813
4814 for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE); i++) {
4815 testvar = (i + 1) * 0x11111111;
4816 COMPLIANCE_READ(target, &testvar_read, DM_PROGBUF0 + i);
4817 COMPLIANCE_TEST(testvar_read == testvar, "PROGBUF words must not be affected by NDMRESET");
4818 }
4819
4820 for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT); i++) {
4821 testvar = (i + 1) * 0x11111111;
4822 COMPLIANCE_READ(target, &testvar_read, DM_DATA0 + i);
4823 COMPLIANCE_TEST(testvar_read == testvar, "DATA words must not be affected by NDMRESET");
4824 }
4825
4826 /* Verify that DPC *is* affected by ndmreset. Since we don't know what it *should* be,
4827 just verify that at least it's not the bogus value anymore. */
4828
4829 COMPLIANCE_TEST(bogus_dpc != 0xdeadbeef, "BOGUS DPC should have been set somehow (bug in compliance test)");
4830 COMPLIANCE_MUST_PASS(register_read_direct(target, &value, GDB_REGNO_DPC));
4831 COMPLIANCE_TEST(bogus_dpc != value, "NDMRESET should move DPC to reset value.");
4832
4833 COMPLIANCE_TEST(riscv_halt_reason(target, 0) == RISCV_HALT_INTERRUPT,
4834 "After NDMRESET halt, DCSR should report cause of halt");
4835
4836 /* DMACTIVE -- deasserting DMACTIVE should reset all the above values. */
4837
4838 /* Toggle dmactive */
4839 COMPLIANCE_WRITE(target, DM_DMCONTROL, 0);
4840 COMPLIANCE_WRITE(target, DM_DMCONTROL, DM_DMCONTROL_DMACTIVE);
4841 COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTCS);
4842 COMPLIANCE_TEST(get_field(testvar_read, DM_ABSTRACTCS_CMDERR) == 0, "ABSTRACTCS.cmderr should reset to 0");
4843 COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTAUTO);
4844 COMPLIANCE_TEST(testvar_read == 0, "ABSTRACTAUTO should reset to 0");
4845
4846 for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE); i++) {
4847 COMPLIANCE_READ(target, &testvar_read, DM_PROGBUF0 + i);
4848 COMPLIANCE_TEST(testvar_read == 0, "PROGBUF words should reset to 0");
4849 }
4850
4851 for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT); i++) {
4852 COMPLIANCE_READ(target, &testvar_read, DM_DATA0 + i);
4853 COMPLIANCE_TEST(testvar_read == 0, "DATA words should reset to 0");
4854 }
4855
4856 /*
4857 * TODO:
4858 * DCSR.cause priorities
4859 * DCSR.stoptime/stopcycle
4860 * DCSR.stepie
4861 * DCSR.ebreak
4862 * DCSR.prv
4863 */
4864
4865 /* Halt every hart for any follow-up tests*/
4866 COMPLIANCE_MUST_PASS(riscv_halt(target));
4867
4868 uint32_t failed_tests = total_tests - passed_tests;
4869 if (total_tests == passed_tests) {
4870 LOG_INFO("ALL TESTS PASSED\n");
4871 return ERROR_OK;
4872 } else {
4873 LOG_INFO("%d TESTS FAILED\n", failed_tests);
4874 return ERROR_FAIL;
4875 }
4876 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)