c4bd63aadf197f53b777b7928e9df3b637fcaf11
[openocd.git] / src / target / nds32.c
1 /***************************************************************************
2 * Copyright (C) 2013 Andes Technology *
3 * Hsiangkai Wang <hkwang@andestech.com> *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
19 ***************************************************************************/
20
21 #ifdef HAVE_CONFIG_H
22 #include "config.h"
23 #endif
24
25 #include <helper/log.h>
26 #include <helper/binarybuffer.h>
27 #include "nds32.h"
28 #include "nds32_aice.h"
29 #include "nds32_tlb.h"
30 #include "nds32_disassembler.h"
31
32 const int NDS32_BREAK_16 = 0x00EA; /* 0xEA00 */
33 const int NDS32_BREAK_32 = 0x0A000064; /* 0x6400000A */
34
35 struct nds32_edm_operation nds32_edm_ops[NDS32_EDM_OPERATION_MAX_NUM];
36 uint32_t nds32_edm_ops_num;
37
38 const char *nds32_debug_type_name[11] = {
39 "SOFTWARE BREAK",
40 "SOFTWARE BREAK_16",
41 "HARDWARE BREAKPOINT",
42 "DATA ADDR WATCHPOINT PRECISE",
43 "DATA VALUE WATCHPOINT PRECISE",
44 "DATA VALUE WATCHPOINT IMPRECISE",
45 "DEBUG INTERRUPT",
46 "HARDWARE SINGLE STEP",
47 "DATA ADDR WATCHPOINT NEXT PRECISE",
48 "DATA VALUE WATCHPOINT NEXT PRECISE",
49 "LOAD STORE GLOBAL STOP",
50 };
51
52 static const int NDS32_LM_SIZE_TABLE[16] = {
53 4 * 1024,
54 8 * 1024,
55 16 * 1024,
56 32 * 1024,
57 64 * 1024,
58 128 * 1024,
59 256 * 1024,
60 512 * 1024,
61 1024 * 1024,
62 1 * 1024,
63 2 * 1024,
64 };
65
66 static const int NDS32_LINE_SIZE_TABLE[6] = {
67 0,
68 8,
69 16,
70 32,
71 64,
72 128,
73 };
74
75 static int nds32_get_core_reg(struct reg *reg)
76 {
77 int retval;
78 struct nds32_reg *reg_arch_info = reg->arch_info;
79 struct target *target = reg_arch_info->target;
80 struct nds32 *nds32 = target_to_nds32(target);
81 struct aice_port_s *aice = target_to_aice(target);
82
83 if (target->state != TARGET_HALTED) {
84 LOG_ERROR("Target not halted");
85 return ERROR_TARGET_NOT_HALTED;
86 }
87
88 if (reg->valid) {
89 LOG_DEBUG("reading register(cached) %i(%s), value: 0x%8.8" PRIx32,
90 reg_arch_info->num, reg->name, reg_arch_info->value);
91 return ERROR_OK;
92 }
93
94 int mapped_regnum = nds32->register_map(nds32, reg_arch_info->num);
95
96 if (reg_arch_info->enable == false) {
97 reg_arch_info->value = NDS32_REGISTER_DISABLE;
98 retval = ERROR_FAIL;
99 } else {
100 if ((nds32->fpu_enable == false) &&
101 (NDS32_REG_TYPE_FPU == nds32_reg_type(mapped_regnum))) {
102 reg_arch_info->value = 0;
103 retval = ERROR_OK;
104 } else if ((nds32->audio_enable == false) &&
105 (NDS32_REG_TYPE_AUMR == nds32_reg_type(mapped_regnum))) {
106 reg_arch_info->value = 0;
107 retval = ERROR_OK;
108 } else {
109 retval = aice_read_register(aice,
110 mapped_regnum, &(reg_arch_info->value));
111 }
112
113 LOG_DEBUG("reading register %i(%s), value: 0x%8.8" PRIx32,
114 reg_arch_info->num, reg->name, reg_arch_info->value);
115 }
116
117 if (retval == ERROR_OK) {
118 reg->valid = true;
119 reg->dirty = false;
120 }
121
122 return retval;
123 }
124
125 static int nds32_get_core_reg_64(struct reg *reg)
126 {
127 int retval;
128 struct nds32_reg *reg_arch_info = reg->arch_info;
129 struct target *target = reg_arch_info->target;
130 struct nds32 *nds32 = target_to_nds32(target);
131 struct aice_port_s *aice = target_to_aice(target);
132
133 if (target->state != TARGET_HALTED) {
134 LOG_ERROR("Target not halted");
135 return ERROR_TARGET_NOT_HALTED;
136 }
137
138 if (reg->valid)
139 return ERROR_OK;
140
141 if (reg_arch_info->enable == false) {
142 reg_arch_info->value_64 = NDS32_REGISTER_DISABLE;
143 retval = ERROR_FAIL;
144 } else {
145 if ((nds32->fpu_enable == false) &&
146 ((FD0 <= reg_arch_info->num) && (reg_arch_info->num <= FD31))) {
147 reg_arch_info->value_64 = 0;
148 retval = ERROR_OK;
149 } else {
150 retval = aice_read_reg_64(aice, reg_arch_info->num,
151 &(reg_arch_info->value_64));
152 }
153 }
154
155 if (retval == ERROR_OK) {
156 reg->valid = true;
157 reg->dirty = false;
158 }
159
160 return retval;
161 }
162
163 static int nds32_update_psw(struct nds32 *nds32)
164 {
165 uint32_t value_ir0;
166 struct aice_port_s *aice = target_to_aice(nds32->target);
167
168 nds32_get_mapped_reg(nds32, IR0, &value_ir0);
169
170 /* Save data memory endian */
171 if ((value_ir0 >> 5) & 0x1) {
172 nds32->data_endian = TARGET_BIG_ENDIAN;
173 aice_set_data_endian(aice, AICE_BIG_ENDIAN);
174 } else {
175 nds32->data_endian = TARGET_LITTLE_ENDIAN;
176 aice_set_data_endian(aice, AICE_LITTLE_ENDIAN);
177 }
178
179 /* Save translation status */
180 nds32->memory.address_translation = ((value_ir0 >> 7) & 0x1) ? true : false;
181
182 return ERROR_OK;
183 }
184
185 static int nds32_update_mmu_info(struct nds32 *nds32)
186 {
187 uint32_t value;
188
189 /* Update MMU control status */
190 nds32_get_mapped_reg(nds32, MR0, &value);
191 nds32->mmu_config.default_min_page_size = value & 0x1;
192 nds32->mmu_config.multiple_page_size_in_use = (value >> 10) & 0x1;
193
194 return ERROR_OK;
195 }
196
197 static int nds32_update_cache_info(struct nds32 *nds32)
198 {
199 uint32_t value;
200
201 if (ERROR_OK == nds32_get_mapped_reg(nds32, MR8, &value)) {
202 if (value & 0x1)
203 nds32->memory.icache.enable = true;
204 else
205 nds32->memory.icache.enable = false;
206
207 if (value & 0x2)
208 nds32->memory.dcache.enable = true;
209 else
210 nds32->memory.dcache.enable = false;
211 } else {
212 nds32->memory.icache.enable = false;
213 nds32->memory.dcache.enable = false;
214 }
215
216 return ERROR_OK;
217 }
218
219 static int nds32_update_lm_info(struct nds32 *nds32)
220 {
221 struct nds32_memory *memory = &(nds32->memory);
222 uint32_t value_mr6;
223 uint32_t value_mr7;
224
225 nds32_get_mapped_reg(nds32, MR6, &value_mr6);
226 if (value_mr6 & 0x1)
227 memory->ilm_enable = true;
228 else
229 memory->ilm_enable = false;
230
231 if (memory->ilm_align_ver == 0) { /* 1MB aligned */
232 memory->ilm_start = value_mr6 & 0xFFF00000;
233 memory->ilm_end = memory->ilm_start + memory->ilm_size;
234 } else if (memory->ilm_align_ver == 1) { /* aligned to local memory size */
235 memory->ilm_start = value_mr6 & 0xFFFFFC00;
236 memory->ilm_end = memory->ilm_start + memory->ilm_size;
237 } else {
238 memory->ilm_start = -1;
239 memory->ilm_end = -1;
240 }
241
242 nds32_get_mapped_reg(nds32, MR7, &value_mr7);
243 if (value_mr7 & 0x1)
244 memory->dlm_enable = true;
245 else
246 memory->dlm_enable = false;
247
248 if (memory->dlm_align_ver == 0) { /* 1MB aligned */
249 memory->dlm_start = value_mr7 & 0xFFF00000;
250 memory->dlm_end = memory->dlm_start + memory->dlm_size;
251 } else if (memory->dlm_align_ver == 1) { /* aligned to local memory size */
252 memory->dlm_start = value_mr7 & 0xFFFFFC00;
253 memory->dlm_end = memory->dlm_start + memory->dlm_size;
254 } else {
255 memory->dlm_start = -1;
256 memory->dlm_end = -1;
257 }
258
259 return ERROR_OK;
260 }
261
262 /**
263 * If fpu/audio is disabled, to access fpu/audio registers will cause
264 * exceptions. So, we need to check if fpu/audio is enabled or not as
265 * target is halted. If fpu/audio is disabled, as users access fpu/audio
266 * registers, OpenOCD will return fake value 0 instead of accessing
267 * registers through DIM.
268 */
269 static int nds32_check_extension(struct nds32 *nds32)
270 {
271 uint32_t value;
272
273 nds32_get_mapped_reg(nds32, FUCPR, &value);
274 if (value == NDS32_REGISTER_DISABLE) {
275 nds32->fpu_enable = false;
276 nds32->audio_enable = false;
277 return ERROR_OK;
278 }
279
280 if (value & 0x1)
281 nds32->fpu_enable = true;
282 else
283 nds32->fpu_enable = false;
284
285 if (value & 0x80000000)
286 nds32->audio_enable = true;
287 else
288 nds32->audio_enable = false;
289
290 return ERROR_OK;
291 }
292
293 static int nds32_set_core_reg(struct reg *reg, uint8_t *buf)
294 {
295 struct nds32_reg *reg_arch_info = reg->arch_info;
296 struct target *target = reg_arch_info->target;
297 struct nds32 *nds32 = target_to_nds32(target);
298 struct aice_port_s *aice = target_to_aice(target);
299 uint32_t value = buf_get_u32(buf, 0, 32);
300
301 if (target->state != TARGET_HALTED) {
302 LOG_ERROR("Target not halted");
303 return ERROR_TARGET_NOT_HALTED;
304 }
305
306 int mapped_regnum = nds32->register_map(nds32, reg_arch_info->num);
307
308 /* ignore values that will generate exception */
309 if (nds32_reg_exception(mapped_regnum, value))
310 return ERROR_OK;
311
312 LOG_DEBUG("writing register %i(%s) with value 0x%8.8" PRIx32,
313 reg_arch_info->num, reg->name, value);
314
315 if ((nds32->fpu_enable == false) &&
316 (NDS32_REG_TYPE_FPU == nds32_reg_type(mapped_regnum))) {
317
318 buf_set_u32(reg->value, 0, 32, 0);
319 } else if ((nds32->audio_enable == false) &&
320 (NDS32_REG_TYPE_AUMR == nds32_reg_type(mapped_regnum))) {
321
322 buf_set_u32(reg->value, 0, 32, 0);
323 } else {
324 buf_set_u32(reg->value, 0, 32, value);
325 aice_write_register(aice, mapped_regnum, reg_arch_info->value);
326
327 /* After set value to registers, read the value from target
328 * to avoid W1C inconsistency. */
329 aice_read_register(aice, mapped_regnum, &(reg_arch_info->value));
330 }
331
332 reg->valid = true;
333 reg->dirty = false;
334
335 /* update registers to take effect right now */
336 if (IR0 == mapped_regnum) {
337 nds32_update_psw(nds32);
338 } else if (MR0 == mapped_regnum) {
339 nds32_update_mmu_info(nds32);
340 } else if ((MR6 == mapped_regnum) || (MR7 == mapped_regnum)) {
341 /* update lm information */
342 nds32_update_lm_info(nds32);
343 } else if (MR8 == mapped_regnum) {
344 nds32_update_cache_info(nds32);
345 } else if (FUCPR == mapped_regnum) {
346 /* update audio/fpu setting */
347 nds32_check_extension(nds32);
348 }
349
350 return ERROR_OK;
351 }
352
353 static int nds32_set_core_reg_64(struct reg *reg, uint8_t *buf)
354 {
355 struct nds32_reg *reg_arch_info = reg->arch_info;
356 struct target *target = reg_arch_info->target;
357 struct nds32 *nds32 = target_to_nds32(target);
358 uint32_t low_part = buf_get_u32(buf, 0, 32);
359 uint32_t high_part = buf_get_u32(buf, 32, 32);
360
361 if (target->state != TARGET_HALTED) {
362 LOG_ERROR("Target not halted");
363 return ERROR_TARGET_NOT_HALTED;
364 }
365
366 if ((nds32->fpu_enable == false) &&
367 ((FD0 <= reg_arch_info->num) && (reg_arch_info->num <= FD31))) {
368
369 buf_set_u32(reg->value, 0, 32, 0);
370 buf_set_u32(reg->value, 32, 32, 0);
371
372 reg->valid = true;
373 reg->dirty = false;
374 } else {
375 buf_set_u32(reg->value, 0, 32, low_part);
376 buf_set_u32(reg->value, 32, 32, high_part);
377
378 reg->valid = true;
379 reg->dirty = true;
380 }
381
382 return ERROR_OK;
383 }
384
385 static const struct reg_arch_type nds32_reg_access_type = {
386 .get = nds32_get_core_reg,
387 .set = nds32_set_core_reg,
388 };
389
390 static const struct reg_arch_type nds32_reg_access_type_64 = {
391 .get = nds32_get_core_reg_64,
392 .set = nds32_set_core_reg_64,
393 };
394
395 static struct reg_cache *nds32_build_reg_cache(struct target *target,
396 struct nds32 *nds32)
397 {
398 struct reg_cache *cache = malloc(sizeof(struct reg_cache));
399 struct reg *reg_list = calloc(TOTAL_REG_NUM, sizeof(struct reg));
400 struct nds32_reg *reg_arch_info = calloc(TOTAL_REG_NUM, sizeof(struct nds32_reg));
401 int i;
402
403 if (!cache || !reg_list || !reg_arch_info) {
404 free(cache);
405 free(reg_list);
406 free(reg_arch_info);
407 return NULL;
408 }
409
410 cache->name = "Andes registers";
411 cache->next = NULL;
412 cache->reg_list = reg_list;
413 cache->num_regs = 0;
414
415 for (i = 0; i < TOTAL_REG_NUM; i++) {
416 reg_arch_info[i].num = i;
417 reg_arch_info[i].target = target;
418 reg_arch_info[i].nds32 = nds32;
419 reg_arch_info[i].enable = false;
420
421 reg_list[i].name = nds32_reg_simple_name(i);
422 reg_list[i].number = reg_arch_info[i].num;
423 reg_list[i].size = nds32_reg_size(i);
424 reg_list[i].arch_info = &reg_arch_info[i];
425
426 reg_list[i].reg_data_type = malloc(sizeof(struct reg_data_type));
427
428 if (FD0 <= reg_arch_info[i].num && reg_arch_info[i].num <= FD31) {
429 reg_list[i].value = &(reg_arch_info[i].value_64);
430 reg_list[i].type = &nds32_reg_access_type_64;
431
432 reg_list[i].reg_data_type->type = REG_TYPE_IEEE_DOUBLE;
433 reg_list[i].reg_data_type->id = "ieee_double";
434 reg_list[i].group = "float";
435 } else {
436 reg_list[i].value = &(reg_arch_info[i].value);
437 reg_list[i].type = &nds32_reg_access_type;
438 reg_list[i].group = "general";
439
440 if ((FS0 <= reg_arch_info[i].num) && (reg_arch_info[i].num <= FS31)) {
441 reg_list[i].reg_data_type->type = REG_TYPE_IEEE_SINGLE;
442 reg_list[i].reg_data_type->id = "ieee_single";
443 reg_list[i].group = "float";
444 } else if ((reg_arch_info[i].num == FPCSR) ||
445 (reg_arch_info[i].num == FPCFG)) {
446 reg_list[i].group = "float";
447 } else if ((reg_arch_info[i].num == R28) ||
448 (reg_arch_info[i].num == R29) ||
449 (reg_arch_info[i].num == R31)) {
450 reg_list[i].reg_data_type->type = REG_TYPE_DATA_PTR;
451 reg_list[i].reg_data_type->id = "data_ptr";
452 } else if ((reg_arch_info[i].num == R30) ||
453 (reg_arch_info[i].num == PC)) {
454 reg_list[i].reg_data_type->type = REG_TYPE_CODE_PTR;
455 reg_list[i].reg_data_type->id = "code_ptr";
456 } else {
457 reg_list[i].reg_data_type->type = REG_TYPE_UINT32;
458 reg_list[i].reg_data_type->id = "uint32";
459 }
460 }
461
462 if (R16 <= reg_arch_info[i].num && reg_arch_info[i].num <= R25)
463 reg_list[i].caller_save = true;
464 else
465 reg_list[i].caller_save = false;
466
467 reg_list[i].feature = malloc(sizeof(struct reg_feature));
468
469 if (R0 <= reg_arch_info[i].num && reg_arch_info[i].num <= IFC_LP)
470 reg_list[i].feature->name = "org.gnu.gdb.nds32.core";
471 else if (CR0 <= reg_arch_info[i].num && reg_arch_info[i].num <= SECUR0)
472 reg_list[i].feature->name = "org.gnu.gdb.nds32.system";
473 else if (D0L24 <= reg_arch_info[i].num && reg_arch_info[i].num <= CBE3)
474 reg_list[i].feature->name = "org.gnu.gdb.nds32.audio";
475 else if (FPCSR <= reg_arch_info[i].num && reg_arch_info[i].num <= FD31)
476 reg_list[i].feature->name = "org.gnu.gdb.nds32.fpu";
477
478 cache->num_regs++;
479 }
480
481 nds32->core_cache = cache;
482
483 return cache;
484 }
485
486 static int nds32_reg_cache_init(struct target *target, struct nds32 *nds32)
487 {
488 struct reg_cache *cache;
489
490 cache = nds32_build_reg_cache(target, nds32);
491 if (!cache)
492 return ERROR_FAIL;
493
494 *register_get_last_cache_p(&target->reg_cache) = cache;
495
496 return ERROR_OK;
497 }
498
499 static struct reg *nds32_reg_current(struct nds32 *nds32, unsigned regnum)
500 {
501 struct reg *r;
502
503 r = nds32->core_cache->reg_list + regnum;
504
505 return r;
506 }
507
508 int nds32_full_context(struct nds32 *nds32)
509 {
510 uint32_t value, value_ir0;
511
512 /* save $pc & $psw */
513 nds32_get_mapped_reg(nds32, PC, &value);
514 nds32_get_mapped_reg(nds32, IR0, &value_ir0);
515
516 nds32_update_psw(nds32);
517 nds32_update_mmu_info(nds32);
518 nds32_update_cache_info(nds32);
519 nds32_update_lm_info(nds32);
520
521 nds32_check_extension(nds32);
522
523 return ERROR_OK;
524 }
525
526 /* get register value internally */
527 int nds32_get_mapped_reg(struct nds32 *nds32, unsigned regnum, uint32_t *value)
528 {
529 struct reg_cache *reg_cache = nds32->core_cache;
530 struct reg *r;
531
532 if (regnum > reg_cache->num_regs)
533 return ERROR_FAIL;
534
535 r = nds32_reg_current(nds32, regnum);
536
537 if (ERROR_OK != r->type->get(r))
538 return ERROR_FAIL;
539
540 *value = buf_get_u32(r->value, 0, 32);
541
542 return ERROR_OK;
543 }
544
545 /** set register internally */
546 int nds32_set_mapped_reg(struct nds32 *nds32, unsigned regnum, uint32_t value)
547 {
548 struct reg_cache *reg_cache = nds32->core_cache;
549 struct reg *r;
550 uint8_t set_value[4];
551
552 if (regnum > reg_cache->num_regs)
553 return ERROR_FAIL;
554
555 r = nds32_reg_current(nds32, regnum);
556
557 buf_set_u32(set_value, 0, 32, value);
558
559 return r->type->set(r, set_value);
560 }
561
562 /** get general register list */
563 static int nds32_get_general_reg_list(struct nds32 *nds32,
564 struct reg **reg_list[], int *reg_list_size)
565 {
566 struct reg *reg_current;
567 int i;
568 int current_idx;
569
570 /** freed in gdb_server.c */
571 *reg_list = malloc(sizeof(struct reg *) * (IFC_LP - R0 + 1));
572 current_idx = 0;
573
574 for (i = R0; i < IFC_LP + 1; i++) {
575 reg_current = nds32_reg_current(nds32, i);
576 if (((struct nds32_reg *)reg_current->arch_info)->enable) {
577 (*reg_list)[current_idx] = reg_current;
578 current_idx++;
579 }
580 }
581 *reg_list_size = current_idx;
582
583 return ERROR_OK;
584 }
585
586 /** get all register list */
587 static int nds32_get_all_reg_list(struct nds32 *nds32,
588 struct reg **reg_list[], int *reg_list_size)
589 {
590 struct reg_cache *reg_cache = nds32->core_cache;
591 struct reg *reg_current;
592 unsigned int i;
593
594 *reg_list_size = reg_cache->num_regs;
595
596 /** freed in gdb_server.c */
597 *reg_list = malloc(sizeof(struct reg *) * (*reg_list_size));
598
599 for (i = 0; i < reg_cache->num_regs; i++) {
600 reg_current = nds32_reg_current(nds32, i);
601 reg_current->exist = ((struct nds32_reg *)
602 reg_current->arch_info)->enable;
603 (*reg_list)[i] = reg_current;
604 }
605
606 return ERROR_OK;
607 }
608
609 /** get all register list */
610 int nds32_get_gdb_reg_list(struct target *target,
611 struct reg **reg_list[], int *reg_list_size,
612 enum target_register_class reg_class)
613 {
614 struct nds32 *nds32 = target_to_nds32(target);
615
616 switch (reg_class) {
617 case REG_CLASS_ALL:
618 return nds32_get_all_reg_list(nds32, reg_list, reg_list_size);
619 case REG_CLASS_GENERAL:
620 return nds32_get_general_reg_list(nds32, reg_list, reg_list_size);
621 default:
622 return ERROR_FAIL;
623 }
624
625 return ERROR_FAIL;
626 }
627
628 static int nds32_select_memory_mode(struct target *target, uint32_t address,
629 uint32_t length, uint32_t *end_address)
630 {
631 struct nds32 *nds32 = target_to_nds32(target);
632 struct aice_port_s *aice = target_to_aice(target);
633 struct nds32_memory *memory = &(nds32->memory);
634 struct nds32_edm *edm = &(nds32->edm);
635 uint32_t dlm_start, dlm_end;
636 uint32_t ilm_start, ilm_end;
637 uint32_t address_end = address + length;
638
639 /* init end_address */
640 *end_address = address_end;
641
642 if (NDS_MEMORY_ACC_CPU == memory->access_channel)
643 return ERROR_OK;
644
645 if (edm->access_control == false) {
646 LOG_DEBUG("EDM does not support ACC_CTL");
647 return ERROR_OK;
648 }
649
650 if (edm->direct_access_local_memory == false) {
651 LOG_DEBUG("EDM does not support DALM");
652 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
653 return ERROR_OK;
654 }
655
656 if (NDS_MEMORY_SELECT_AUTO != memory->mode) {
657 LOG_DEBUG("Memory mode is not AUTO");
658 return ERROR_OK;
659 }
660
661 /* set default mode */
662 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
663
664 if ((memory->ilm_base != 0) && (memory->ilm_enable == true)) {
665 ilm_start = memory->ilm_start;
666 ilm_end = memory->ilm_end;
667
668 /* case 1, address < ilm_start */
669 if (address < ilm_start) {
670 if (ilm_start < address_end) {
671 /* update end_address to split non-ILM from ILM */
672 *end_address = ilm_start;
673 }
674 /* MEM mode */
675 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
676 } else if ((ilm_start <= address) && (address < ilm_end)) {
677 /* case 2, ilm_start <= address < ilm_end */
678 if (ilm_end < address_end) {
679 /* update end_address to split non-ILM from ILM */
680 *end_address = ilm_end;
681 }
682 /* ILM mode */
683 aice_memory_mode(aice, NDS_MEMORY_SELECT_ILM);
684 } else { /* case 3, ilm_end <= address */
685 /* MEM mode */
686 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
687 }
688
689 return ERROR_OK;
690 } else {
691 LOG_DEBUG("ILM is not enabled");
692 }
693
694 if ((memory->dlm_base != 0) && (memory->dlm_enable == true)) {
695 dlm_start = memory->dlm_start;
696 dlm_end = memory->dlm_end;
697
698 /* case 1, address < dlm_start */
699 if (address < dlm_start) {
700 if (dlm_start < address_end) {
701 /* update end_address to split non-DLM from DLM */
702 *end_address = dlm_start;
703 }
704 /* MEM mode */
705 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
706 } else if ((dlm_start <= address) && (address < dlm_end)) {
707 /* case 2, dlm_start <= address < dlm_end */
708 if (dlm_end < address_end) {
709 /* update end_address to split non-DLM from DLM */
710 *end_address = dlm_end;
711 }
712 /* DLM mode */
713 aice_memory_mode(aice, NDS_MEMORY_SELECT_DLM);
714 } else { /* case 3, dlm_end <= address */
715 /* MEM mode */
716 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
717 }
718
719 return ERROR_OK;
720 } else {
721 LOG_DEBUG("DLM is not enabled");
722 }
723
724 return ERROR_OK;
725 }
726
727 int nds32_read_buffer(struct target *target, uint32_t address,
728 uint32_t size, uint8_t *buffer)
729 {
730 struct nds32 *nds32 = target_to_nds32(target);
731 struct nds32_memory *memory = &(nds32->memory);
732
733 if ((NDS_MEMORY_ACC_CPU == memory->access_channel) &&
734 (target->state != TARGET_HALTED)) {
735 LOG_WARNING("target was not halted");
736 return ERROR_TARGET_NOT_HALTED;
737 }
738
739 LOG_DEBUG("READ BUFFER: ADDR %08" PRIx32 " SIZE %08" PRIx32,
740 address,
741 size);
742
743 int retval = ERROR_OK;
744 struct aice_port_s *aice = target_to_aice(target);
745 uint32_t end_address;
746
747 if (((address % 2) == 0) && (size == 2)) {
748 nds32_select_memory_mode(target, address, 2, &end_address);
749 return aice_read_mem_unit(aice, address, 2, 1, buffer);
750 }
751
752 /* handle unaligned head bytes */
753 if (address % 4) {
754 uint32_t unaligned = 4 - (address % 4);
755
756 if (unaligned > size)
757 unaligned = size;
758
759 nds32_select_memory_mode(target, address, unaligned, &end_address);
760 retval = aice_read_mem_unit(aice, address, 1, unaligned, buffer);
761 if (retval != ERROR_OK)
762 return retval;
763
764 buffer += unaligned;
765 address += unaligned;
766 size -= unaligned;
767 }
768
769 /* handle aligned words */
770 if (size >= 4) {
771 int aligned = size - (size % 4);
772 int read_len;
773
774 do {
775 nds32_select_memory_mode(target, address, aligned, &end_address);
776
777 read_len = end_address - address;
778
779 if (read_len > 8)
780 retval = aice_read_mem_bulk(aice, address, read_len, buffer);
781 else
782 retval = aice_read_mem_unit(aice, address, 4, read_len / 4, buffer);
783
784 if (retval != ERROR_OK)
785 return retval;
786
787 buffer += read_len;
788 address += read_len;
789 size -= read_len;
790 aligned -= read_len;
791
792 } while (aligned != 0);
793 }
794
795 /*prevent byte access when possible (avoid AHB access limitations in some cases)*/
796 if (size >= 2) {
797 int aligned = size - (size % 2);
798 nds32_select_memory_mode(target, address, aligned, &end_address);
799 retval = aice_read_mem_unit(aice, address, 2, aligned / 2, buffer);
800 if (retval != ERROR_OK)
801 return retval;
802
803 buffer += aligned;
804 address += aligned;
805 size -= aligned;
806 }
807 /* handle tail writes of less than 4 bytes */
808 if (size > 0) {
809 nds32_select_memory_mode(target, address, size, &end_address);
810 retval = aice_read_mem_unit(aice, address, 1, size, buffer);
811 if (retval != ERROR_OK)
812 return retval;
813 }
814
815 return ERROR_OK;
816 }
817
818 int nds32_read_memory(struct target *target, uint32_t address,
819 uint32_t size, uint32_t count, uint8_t *buffer)
820 {
821 struct aice_port_s *aice = target_to_aice(target);
822
823 return aice_read_mem_unit(aice, address, size, count, buffer);
824 }
825
826 int nds32_read_phys_memory(struct target *target, uint32_t address,
827 uint32_t size, uint32_t count, uint8_t *buffer)
828 {
829 struct aice_port_s *aice = target_to_aice(target);
830 struct nds32 *nds32 = target_to_nds32(target);
831 struct nds32_memory *memory = &(nds32->memory);
832 enum nds_memory_access orig_channel;
833 int result;
834
835 /* switch to BUS access mode to skip MMU */
836 orig_channel = memory->access_channel;
837 memory->access_channel = NDS_MEMORY_ACC_BUS;
838 aice_memory_access(aice, memory->access_channel);
839
840 /* The input address is physical address. No need to do address translation. */
841 result = aice_read_mem_unit(aice, address, size, count, buffer);
842
843 /* restore to origin access mode */
844 memory->access_channel = orig_channel;
845 aice_memory_access(aice, memory->access_channel);
846
847 return result;
848 }
849
850 int nds32_write_buffer(struct target *target, uint32_t address,
851 uint32_t size, const uint8_t *buffer)
852 {
853 struct nds32 *nds32 = target_to_nds32(target);
854 struct nds32_memory *memory = &(nds32->memory);
855
856 if ((NDS_MEMORY_ACC_CPU == memory->access_channel) &&
857 (target->state != TARGET_HALTED)) {
858 LOG_WARNING("target was not halted");
859 return ERROR_TARGET_NOT_HALTED;
860 }
861
862 LOG_DEBUG("WRITE BUFFER: ADDR %08" PRIx32 " SIZE %08" PRIx32,
863 address,
864 size);
865
866 struct aice_port_s *aice = target_to_aice(target);
867 int retval = ERROR_OK;
868 uint32_t end_address;
869
870 if (((address % 2) == 0) && (size == 2)) {
871 nds32_select_memory_mode(target, address, 2, &end_address);
872 return aice_write_mem_unit(aice, address, 2, 1, buffer);
873 }
874
875 /* handle unaligned head bytes */
876 if (address % 4) {
877 uint32_t unaligned = 4 - (address % 4);
878
879 if (unaligned > size)
880 unaligned = size;
881
882 nds32_select_memory_mode(target, address, unaligned, &end_address);
883 retval = aice_write_mem_unit(aice, address, 1, unaligned, buffer);
884 if (retval != ERROR_OK)
885 return retval;
886
887 buffer += unaligned;
888 address += unaligned;
889 size -= unaligned;
890 }
891
892 /* handle aligned words */
893 if (size >= 4) {
894 int aligned = size - (size % 4);
895 int write_len;
896
897 do {
898 nds32_select_memory_mode(target, address, aligned, &end_address);
899
900 write_len = end_address - address;
901 if (write_len > 8)
902 retval = aice_write_mem_bulk(aice, address, write_len, buffer);
903 else
904 retval = aice_write_mem_unit(aice, address, 4, write_len / 4, buffer);
905 if (retval != ERROR_OK)
906 return retval;
907
908 buffer += write_len;
909 address += write_len;
910 size -= write_len;
911 aligned -= write_len;
912
913 } while (aligned != 0);
914 }
915
916 /* handle tail writes of less than 4 bytes */
917 if (size > 0) {
918 nds32_select_memory_mode(target, address, size, &end_address);
919 retval = aice_write_mem_unit(aice, address, 1, size, buffer);
920 if (retval != ERROR_OK)
921 return retval;
922 }
923
924 return retval;
925 }
926
927 int nds32_write_memory(struct target *target, uint32_t address,
928 uint32_t size, uint32_t count, const uint8_t *buffer)
929 {
930 struct aice_port_s *aice = target_to_aice(target);
931
932 return aice_write_mem_unit(aice, address, size, count, buffer);
933 }
934
935 int nds32_write_phys_memory(struct target *target, uint32_t address,
936 uint32_t size, uint32_t count, const uint8_t *buffer)
937 {
938 struct aice_port_s *aice = target_to_aice(target);
939 struct nds32 *nds32 = target_to_nds32(target);
940 struct nds32_memory *memory = &(nds32->memory);
941 enum nds_memory_access orig_channel;
942 int result;
943
944 /* switch to BUS access mode to skip MMU */
945 orig_channel = memory->access_channel;
946 memory->access_channel = NDS_MEMORY_ACC_BUS;
947 aice_memory_access(aice, memory->access_channel);
948
949 /* The input address is physical address. No need to do address translation. */
950 result = aice_write_mem_unit(aice, address, size, count, buffer);
951
952 /* restore to origin access mode */
953 memory->access_channel = orig_channel;
954 aice_memory_access(aice, memory->access_channel);
955
956 return result;
957 }
958
959 int nds32_mmu(struct target *target, int *enabled)
960 {
961 if (target->state != TARGET_HALTED) {
962 LOG_ERROR("%s: target not halted", __func__);
963 return ERROR_TARGET_INVALID;
964 }
965
966 struct nds32 *nds32 = target_to_nds32(target);
967 struct nds32_memory *memory = &(nds32->memory);
968 struct nds32_mmu_config *mmu_config = &(nds32->mmu_config);
969
970 if ((mmu_config->memory_protection == 2) && (memory->address_translation == true))
971 *enabled = 1;
972 else
973 *enabled = 0;
974
975 return ERROR_OK;
976 }
977
978 int nds32_arch_state(struct target *target)
979 {
980 struct nds32 *nds32 = target_to_nds32(target);
981
982 if (nds32->common_magic != NDS32_COMMON_MAGIC) {
983 LOG_ERROR("BUG: called for a non-Andes target");
984 return ERROR_FAIL;
985 }
986
987 uint32_t value_pc, value_psw;
988
989 nds32_get_mapped_reg(nds32, PC, &value_pc);
990 nds32_get_mapped_reg(nds32, IR0, &value_psw);
991
992 LOG_USER("target halted due to %s\n"
993 "psw: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "%s",
994 debug_reason_name(target),
995 value_psw,
996 value_pc,
997 nds32->virtual_hosting ? ", virtual hosting" : "");
998
999 /* save pc value to pseudo register pc */
1000 struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
1001 buf_set_u32(reg->value, 0, 32, value_pc);
1002
1003 return ERROR_OK;
1004 }
1005
1006 static void nds32_init_must_have_registers(struct nds32 *nds32)
1007 {
1008 struct reg_cache *reg_cache = nds32->core_cache;
1009
1010 /** MUST have general registers */
1011 ((struct nds32_reg *)reg_cache->reg_list[R0].arch_info)->enable = true;
1012 ((struct nds32_reg *)reg_cache->reg_list[R1].arch_info)->enable = true;
1013 ((struct nds32_reg *)reg_cache->reg_list[R2].arch_info)->enable = true;
1014 ((struct nds32_reg *)reg_cache->reg_list[R3].arch_info)->enable = true;
1015 ((struct nds32_reg *)reg_cache->reg_list[R4].arch_info)->enable = true;
1016 ((struct nds32_reg *)reg_cache->reg_list[R5].arch_info)->enable = true;
1017 ((struct nds32_reg *)reg_cache->reg_list[R6].arch_info)->enable = true;
1018 ((struct nds32_reg *)reg_cache->reg_list[R7].arch_info)->enable = true;
1019 ((struct nds32_reg *)reg_cache->reg_list[R8].arch_info)->enable = true;
1020 ((struct nds32_reg *)reg_cache->reg_list[R9].arch_info)->enable = true;
1021 ((struct nds32_reg *)reg_cache->reg_list[R10].arch_info)->enable = true;
1022 ((struct nds32_reg *)reg_cache->reg_list[R15].arch_info)->enable = true;
1023 ((struct nds32_reg *)reg_cache->reg_list[R28].arch_info)->enable = true;
1024 ((struct nds32_reg *)reg_cache->reg_list[R29].arch_info)->enable = true;
1025 ((struct nds32_reg *)reg_cache->reg_list[R30].arch_info)->enable = true;
1026 ((struct nds32_reg *)reg_cache->reg_list[R31].arch_info)->enable = true;
1027 ((struct nds32_reg *)reg_cache->reg_list[PC].arch_info)->enable = true;
1028
1029 /** MUST have configuration system registers */
1030 ((struct nds32_reg *)reg_cache->reg_list[CR0].arch_info)->enable = true;
1031 ((struct nds32_reg *)reg_cache->reg_list[CR1].arch_info)->enable = true;
1032 ((struct nds32_reg *)reg_cache->reg_list[CR2].arch_info)->enable = true;
1033 ((struct nds32_reg *)reg_cache->reg_list[CR3].arch_info)->enable = true;
1034 ((struct nds32_reg *)reg_cache->reg_list[CR4].arch_info)->enable = true;
1035
1036 /** MUST have interrupt system registers */
1037 ((struct nds32_reg *)reg_cache->reg_list[IR0].arch_info)->enable = true;
1038 ((struct nds32_reg *)reg_cache->reg_list[IR1].arch_info)->enable = true;
1039 ((struct nds32_reg *)reg_cache->reg_list[IR3].arch_info)->enable = true;
1040 ((struct nds32_reg *)reg_cache->reg_list[IR4].arch_info)->enable = true;
1041 ((struct nds32_reg *)reg_cache->reg_list[IR6].arch_info)->enable = true;
1042 ((struct nds32_reg *)reg_cache->reg_list[IR9].arch_info)->enable = true;
1043 ((struct nds32_reg *)reg_cache->reg_list[IR11].arch_info)->enable = true;
1044 ((struct nds32_reg *)reg_cache->reg_list[IR14].arch_info)->enable = true;
1045 ((struct nds32_reg *)reg_cache->reg_list[IR15].arch_info)->enable = true;
1046
1047 /** MUST have MMU system registers */
1048 ((struct nds32_reg *)reg_cache->reg_list[MR0].arch_info)->enable = true;
1049
1050 /** MUST have EDM system registers */
1051 ((struct nds32_reg *)reg_cache->reg_list[DR40].arch_info)->enable = true;
1052 ((struct nds32_reg *)reg_cache->reg_list[DR42].arch_info)->enable = true;
1053 }
1054
1055 static int nds32_init_memory_config(struct nds32 *nds32)
1056 {
1057 uint32_t value_cr1; /* ICM_CFG */
1058 uint32_t value_cr2; /* DCM_CFG */
1059 struct nds32_memory *memory = &(nds32->memory);
1060
1061 /* read $cr1 to init instruction memory information */
1062 nds32_get_mapped_reg(nds32, CR1, &value_cr1);
1063 memory->icache.set = value_cr1 & 0x7;
1064 memory->icache.way = (value_cr1 >> 3) & 0x7;
1065 memory->icache.line_size = (value_cr1 >> 6) & 0x7;
1066 memory->icache.lock_support = (value_cr1 >> 9) & 0x1;
1067
1068 memory->ilm_base = (value_cr1 >> 10) & 0x7;
1069 memory->ilm_align_ver = (value_cr1 >> 13) & 0x3;
1070
1071 /* read $cr2 to init data memory information */
1072 nds32_get_mapped_reg(nds32, CR2, &value_cr2);
1073 memory->dcache.set = value_cr2 & 0x7;
1074 memory->dcache.way = (value_cr2 >> 3) & 0x7;
1075 memory->dcache.line_size = (value_cr2 >> 6) & 0x7;
1076 memory->dcache.lock_support = (value_cr2 >> 9) & 0x1;
1077
1078 memory->dlm_base = (value_cr2 >> 10) & 0x7;
1079 memory->dlm_align_ver = (value_cr2 >> 13) & 0x3;
1080
1081 return ERROR_OK;
1082 }
1083
1084 static void nds32_init_config(struct nds32 *nds32)
1085 {
1086 uint32_t value_cr0;
1087 uint32_t value_cr3;
1088 uint32_t value_cr4;
1089 struct nds32_cpu_version *cpu_version = &(nds32->cpu_version);
1090 struct nds32_mmu_config *mmu_config = &(nds32->mmu_config);
1091 struct nds32_misc_config *misc_config = &(nds32->misc_config);
1092
1093 nds32_get_mapped_reg(nds32, CR0, &value_cr0);
1094 nds32_get_mapped_reg(nds32, CR3, &value_cr3);
1095 nds32_get_mapped_reg(nds32, CR4, &value_cr4);
1096
1097 /* config cpu version */
1098 cpu_version->performance_extension = value_cr0 & 0x1;
1099 cpu_version->_16bit_extension = (value_cr0 >> 1) & 0x1;
1100 cpu_version->performance_extension_2 = (value_cr0 >> 2) & 0x1;
1101 cpu_version->cop_fpu_extension = (value_cr0 >> 3) & 0x1;
1102 cpu_version->string_extension = (value_cr0 >> 4) & 0x1;
1103 cpu_version->revision = (value_cr0 >> 16) & 0xFF;
1104 cpu_version->cpu_id_family = (value_cr0 >> 24) & 0xF;
1105 cpu_version->cpu_id_version = (value_cr0 >> 28) & 0xF;
1106
1107 /* config MMU */
1108 mmu_config->memory_protection = value_cr3 & 0x3;
1109 mmu_config->memory_protection_version = (value_cr3 >> 2) & 0x1F;
1110 mmu_config->fully_associative_tlb = (value_cr3 >> 7) & 0x1;
1111 if (mmu_config->fully_associative_tlb) {
1112 mmu_config->tlb_size = (value_cr3 >> 8) & 0x7F;
1113 } else {
1114 mmu_config->tlb_ways = (value_cr3 >> 8) & 0x7;
1115 mmu_config->tlb_sets = (value_cr3 >> 11) & 0x7;
1116 }
1117 mmu_config->_8k_page_support = (value_cr3 >> 15) & 0x1;
1118 mmu_config->extra_page_size_support = (value_cr3 >> 16) & 0xFF;
1119 mmu_config->tlb_lock = (value_cr3 >> 24) & 0x1;
1120 mmu_config->hardware_page_table_walker = (value_cr3 >> 25) & 0x1;
1121 mmu_config->default_endian = (value_cr3 >> 26) & 0x1;
1122 mmu_config->partition_num = (value_cr3 >> 27) & 0x1;
1123 mmu_config->invisible_tlb = (value_cr3 >> 28) & 0x1;
1124 mmu_config->vlpt = (value_cr3 >> 29) & 0x1;
1125 mmu_config->ntme = (value_cr3 >> 30) & 0x1;
1126 mmu_config->drde = (value_cr3 >> 31) & 0x1;
1127
1128 /* config misc */
1129 misc_config->edm = value_cr4 & 0x1;
1130 misc_config->local_memory_dma = (value_cr4 >> 1) & 0x1;
1131 misc_config->performance_monitor = (value_cr4 >> 2) & 0x1;
1132 misc_config->high_speed_memory_port = (value_cr4 >> 3) & 0x1;
1133 misc_config->debug_tracer = (value_cr4 >> 4) & 0x1;
1134 misc_config->div_instruction = (value_cr4 >> 5) & 0x1;
1135 misc_config->mac_instruction = (value_cr4 >> 6) & 0x1;
1136 misc_config->audio_isa = (value_cr4 >> 7) & 0x3;
1137 misc_config->L2_cache = (value_cr4 >> 9) & 0x1;
1138 misc_config->reduce_register = (value_cr4 >> 10) & 0x1;
1139 misc_config->addr_24 = (value_cr4 >> 11) & 0x1;
1140 misc_config->interruption_level = (value_cr4 >> 12) & 0x1;
1141 misc_config->baseline_instruction = (value_cr4 >> 13) & 0x7;
1142 misc_config->no_dx_register = (value_cr4 >> 16) & 0x1;
1143 misc_config->implement_dependant_register = (value_cr4 >> 17) & 0x1;
1144 misc_config->implement_dependant_sr_encoding = (value_cr4 >> 18) & 0x1;
1145 misc_config->ifc = (value_cr4 >> 19) & 0x1;
1146 misc_config->mcu = (value_cr4 >> 20) & 0x1;
1147 misc_config->shadow = (value_cr4 >> 21) & 0x7;
1148 misc_config->ex9 = (value_cr4 >> 24) & 0x1;
1149
1150 nds32_init_memory_config(nds32);
1151 }
1152
1153 static int nds32_init_option_registers(struct nds32 *nds32)
1154 {
1155 struct reg_cache *reg_cache = nds32->core_cache;
1156 struct nds32_cpu_version *cpu_version = &(nds32->cpu_version);
1157 struct nds32_mmu_config *mmu_config = &(nds32->mmu_config);
1158 struct nds32_misc_config *misc_config = &(nds32->misc_config);
1159 struct nds32_memory *memory_config = &(nds32->memory);
1160
1161 bool no_cr5;
1162 bool mr10_exist;
1163 bool no_racr0;
1164
1165 if (((cpu_version->cpu_id_family == 0xC) || (cpu_version->cpu_id_family == 0xD)) &&
1166 ((cpu_version->revision & 0xFC) == 0)) {
1167 no_cr5 = true;
1168 mr10_exist = true;
1169 no_racr0 = true;
1170 } else {
1171 no_cr5 = false;
1172 mr10_exist = false;
1173 no_racr0 = false;
1174 }
1175
1176 if (misc_config->reduce_register == false) {
1177 ((struct nds32_reg *)reg_cache->reg_list[R11].arch_info)->enable = true;
1178 ((struct nds32_reg *)reg_cache->reg_list[R12].arch_info)->enable = true;
1179 ((struct nds32_reg *)reg_cache->reg_list[R13].arch_info)->enable = true;
1180 ((struct nds32_reg *)reg_cache->reg_list[R14].arch_info)->enable = true;
1181 ((struct nds32_reg *)reg_cache->reg_list[R16].arch_info)->enable = true;
1182 ((struct nds32_reg *)reg_cache->reg_list[R17].arch_info)->enable = true;
1183 ((struct nds32_reg *)reg_cache->reg_list[R18].arch_info)->enable = true;
1184 ((struct nds32_reg *)reg_cache->reg_list[R19].arch_info)->enable = true;
1185 ((struct nds32_reg *)reg_cache->reg_list[R20].arch_info)->enable = true;
1186 ((struct nds32_reg *)reg_cache->reg_list[R21].arch_info)->enable = true;
1187 ((struct nds32_reg *)reg_cache->reg_list[R22].arch_info)->enable = true;
1188 ((struct nds32_reg *)reg_cache->reg_list[R23].arch_info)->enable = true;
1189 ((struct nds32_reg *)reg_cache->reg_list[R24].arch_info)->enable = true;
1190 ((struct nds32_reg *)reg_cache->reg_list[R25].arch_info)->enable = true;
1191 ((struct nds32_reg *)reg_cache->reg_list[R26].arch_info)->enable = true;
1192 ((struct nds32_reg *)reg_cache->reg_list[R27].arch_info)->enable = true;
1193 }
1194
1195 if (misc_config->no_dx_register == false) {
1196 ((struct nds32_reg *)reg_cache->reg_list[D0LO].arch_info)->enable = true;
1197 ((struct nds32_reg *)reg_cache->reg_list[D0HI].arch_info)->enable = true;
1198 ((struct nds32_reg *)reg_cache->reg_list[D1LO].arch_info)->enable = true;
1199 ((struct nds32_reg *)reg_cache->reg_list[D1HI].arch_info)->enable = true;
1200 }
1201
1202 if (misc_config->ex9)
1203 ((struct nds32_reg *)reg_cache->reg_list[ITB].arch_info)->enable = true;
1204
1205 if (no_cr5 == false)
1206 ((struct nds32_reg *)reg_cache->reg_list[CR5].arch_info)->enable = true;
1207
1208 if (cpu_version->cop_fpu_extension) {
1209 ((struct nds32_reg *)reg_cache->reg_list[CR6].arch_info)->enable = true;
1210 ((struct nds32_reg *)reg_cache->reg_list[FPCSR].arch_info)->enable = true;
1211 ((struct nds32_reg *)reg_cache->reg_list[FPCFG].arch_info)->enable = true;
1212 }
1213
1214 if (mmu_config->memory_protection == 1) {
1215 /* Secure MPU has no IPC, IPSW, P_ITYPE */
1216 ((struct nds32_reg *)reg_cache->reg_list[IR1].arch_info)->enable = false;
1217 ((struct nds32_reg *)reg_cache->reg_list[IR9].arch_info)->enable = false;
1218 }
1219
1220 if (nds32->privilege_level != 0)
1221 ((struct nds32_reg *)reg_cache->reg_list[IR3].arch_info)->enable = false;
1222
1223 if (misc_config->mcu == true)
1224 ((struct nds32_reg *)reg_cache->reg_list[IR4].arch_info)->enable = false;
1225
1226 if (misc_config->interruption_level == false) {
1227 ((struct nds32_reg *)reg_cache->reg_list[IR2].arch_info)->enable = true;
1228 ((struct nds32_reg *)reg_cache->reg_list[IR5].arch_info)->enable = true;
1229 ((struct nds32_reg *)reg_cache->reg_list[IR10].arch_info)->enable = true;
1230 ((struct nds32_reg *)reg_cache->reg_list[IR12].arch_info)->enable = true;
1231 ((struct nds32_reg *)reg_cache->reg_list[IR13].arch_info)->enable = true;
1232
1233 /* Secure MPU has no IPC, IPSW, P_ITYPE */
1234 if (mmu_config->memory_protection != 1)
1235 ((struct nds32_reg *)reg_cache->reg_list[IR7].arch_info)->enable = true;
1236 }
1237
1238 if ((cpu_version->cpu_id_family == 0x9) ||
1239 (cpu_version->cpu_id_family == 0xA) ||
1240 (cpu_version->cpu_id_family == 0xC) ||
1241 (cpu_version->cpu_id_family == 0xD))
1242 ((struct nds32_reg *)reg_cache->reg_list[IR8].arch_info)->enable = true;
1243
1244 if (misc_config->shadow == 1) {
1245 ((struct nds32_reg *)reg_cache->reg_list[IR16].arch_info)->enable = true;
1246 ((struct nds32_reg *)reg_cache->reg_list[IR17].arch_info)->enable = true;
1247 }
1248
1249 if (misc_config->ifc)
1250 ((struct nds32_reg *)reg_cache->reg_list[IFC_LP].arch_info)->enable = true;
1251
1252 if (nds32->privilege_level != 0)
1253 ((struct nds32_reg *)reg_cache->reg_list[MR0].arch_info)->enable = false;
1254
1255 if (mmu_config->memory_protection == 1) {
1256 if (mmu_config->memory_protection_version == 24)
1257 ((struct nds32_reg *)reg_cache->reg_list[MR4].arch_info)->enable = true;
1258
1259 if (nds32->privilege_level == 0) {
1260 if ((mmu_config->memory_protection_version == 16) ||
1261 (mmu_config->memory_protection_version == 24)) {
1262 ((struct nds32_reg *)reg_cache->reg_list[MR11].arch_info)->enable = true;
1263 ((struct nds32_reg *)reg_cache->reg_list[SECUR0].arch_info)->enable = true;
1264 ((struct nds32_reg *)reg_cache->reg_list[IR20].arch_info)->enable = true;
1265 ((struct nds32_reg *)reg_cache->reg_list[IR22].arch_info)->enable = true;
1266 ((struct nds32_reg *)reg_cache->reg_list[IR24].arch_info)->enable = true;
1267 ((struct nds32_reg *)reg_cache->reg_list[IR30].arch_info)->enable = true;
1268
1269 if (misc_config->shadow == 1) {
1270 ((struct nds32_reg *)reg_cache->reg_list[IR21].arch_info)->enable = true;
1271 ((struct nds32_reg *)reg_cache->reg_list[IR23].arch_info)->enable = true;
1272 ((struct nds32_reg *)reg_cache->reg_list[IR25].arch_info)->enable = true;
1273 }
1274 }
1275 }
1276 } else if (mmu_config->memory_protection == 2) {
1277 ((struct nds32_reg *)reg_cache->reg_list[MR1].arch_info)->enable = true;
1278 ((struct nds32_reg *)reg_cache->reg_list[MR4].arch_info)->enable = true;
1279
1280 if ((cpu_version->cpu_id_family != 0xA) && (cpu_version->cpu_id_family != 0xC) &&
1281 (cpu_version->cpu_id_family != 0xD))
1282 ((struct nds32_reg *)reg_cache->reg_list[MR5].arch_info)->enable = true;
1283 }
1284
1285 if (mmu_config->memory_protection > 0) {
1286 ((struct nds32_reg *)reg_cache->reg_list[MR2].arch_info)->enable = true;
1287 ((struct nds32_reg *)reg_cache->reg_list[MR3].arch_info)->enable = true;
1288 }
1289
1290 if (memory_config->ilm_base != 0)
1291 if (nds32->privilege_level == 0)
1292 ((struct nds32_reg *)reg_cache->reg_list[MR6].arch_info)->enable = true;
1293
1294 if (memory_config->dlm_base != 0)
1295 if (nds32->privilege_level == 0)
1296 ((struct nds32_reg *)reg_cache->reg_list[MR7].arch_info)->enable = true;
1297
1298 if ((memory_config->icache.line_size != 0) && (memory_config->dcache.line_size != 0))
1299 ((struct nds32_reg *)reg_cache->reg_list[MR8].arch_info)->enable = true;
1300
1301 if (misc_config->high_speed_memory_port)
1302 ((struct nds32_reg *)reg_cache->reg_list[MR9].arch_info)->enable = true;
1303
1304 if (mr10_exist)
1305 ((struct nds32_reg *)reg_cache->reg_list[MR10].arch_info)->enable = true;
1306
1307 if (misc_config->edm) {
1308 int dr_reg_n = nds32->edm.breakpoint_num * 5;
1309
1310 for (int i = 0 ; i < dr_reg_n ; i++)
1311 ((struct nds32_reg *)reg_cache->reg_list[DR0 + i].arch_info)->enable = true;
1312
1313 ((struct nds32_reg *)reg_cache->reg_list[DR41].arch_info)->enable = true;
1314 ((struct nds32_reg *)reg_cache->reg_list[DR43].arch_info)->enable = true;
1315 ((struct nds32_reg *)reg_cache->reg_list[DR44].arch_info)->enable = true;
1316 ((struct nds32_reg *)reg_cache->reg_list[DR45].arch_info)->enable = true;
1317 }
1318
1319 if (misc_config->debug_tracer) {
1320 ((struct nds32_reg *)reg_cache->reg_list[DR46].arch_info)->enable = true;
1321 ((struct nds32_reg *)reg_cache->reg_list[DR47].arch_info)->enable = true;
1322 }
1323
1324 if (misc_config->performance_monitor) {
1325 ((struct nds32_reg *)reg_cache->reg_list[PFR0].arch_info)->enable = true;
1326 ((struct nds32_reg *)reg_cache->reg_list[PFR1].arch_info)->enable = true;
1327 ((struct nds32_reg *)reg_cache->reg_list[PFR2].arch_info)->enable = true;
1328 ((struct nds32_reg *)reg_cache->reg_list[PFR3].arch_info)->enable = true;
1329 }
1330
1331 if (misc_config->local_memory_dma) {
1332 ((struct nds32_reg *)reg_cache->reg_list[DMAR0].arch_info)->enable = true;
1333 ((struct nds32_reg *)reg_cache->reg_list[DMAR1].arch_info)->enable = true;
1334 ((struct nds32_reg *)reg_cache->reg_list[DMAR2].arch_info)->enable = true;
1335 ((struct nds32_reg *)reg_cache->reg_list[DMAR3].arch_info)->enable = true;
1336 ((struct nds32_reg *)reg_cache->reg_list[DMAR4].arch_info)->enable = true;
1337 ((struct nds32_reg *)reg_cache->reg_list[DMAR5].arch_info)->enable = true;
1338 ((struct nds32_reg *)reg_cache->reg_list[DMAR6].arch_info)->enable = true;
1339 ((struct nds32_reg *)reg_cache->reg_list[DMAR7].arch_info)->enable = true;
1340 ((struct nds32_reg *)reg_cache->reg_list[DMAR8].arch_info)->enable = true;
1341 ((struct nds32_reg *)reg_cache->reg_list[DMAR9].arch_info)->enable = true;
1342 ((struct nds32_reg *)reg_cache->reg_list[DMAR10].arch_info)->enable = true;
1343 }
1344
1345 if ((misc_config->local_memory_dma || misc_config->performance_monitor) &&
1346 (no_racr0 == false))
1347 ((struct nds32_reg *)reg_cache->reg_list[RACR].arch_info)->enable = true;
1348
1349 if (cpu_version->cop_fpu_extension || (misc_config->audio_isa != 0))
1350 ((struct nds32_reg *)reg_cache->reg_list[FUCPR].arch_info)->enable = true;
1351
1352 if (misc_config->audio_isa != 0) {
1353 if (misc_config->audio_isa > 1) {
1354 ((struct nds32_reg *)reg_cache->reg_list[D0L24].arch_info)->enable = true;
1355 ((struct nds32_reg *)reg_cache->reg_list[D1L24].arch_info)->enable = true;
1356 }
1357
1358 ((struct nds32_reg *)reg_cache->reg_list[I0].arch_info)->enable = true;
1359 ((struct nds32_reg *)reg_cache->reg_list[I1].arch_info)->enable = true;
1360 ((struct nds32_reg *)reg_cache->reg_list[I2].arch_info)->enable = true;
1361 ((struct nds32_reg *)reg_cache->reg_list[I3].arch_info)->enable = true;
1362 ((struct nds32_reg *)reg_cache->reg_list[I4].arch_info)->enable = true;
1363 ((struct nds32_reg *)reg_cache->reg_list[I5].arch_info)->enable = true;
1364 ((struct nds32_reg *)reg_cache->reg_list[I6].arch_info)->enable = true;
1365 ((struct nds32_reg *)reg_cache->reg_list[I7].arch_info)->enable = true;
1366 ((struct nds32_reg *)reg_cache->reg_list[M1].arch_info)->enable = true;
1367 ((struct nds32_reg *)reg_cache->reg_list[M2].arch_info)->enable = true;
1368 ((struct nds32_reg *)reg_cache->reg_list[M3].arch_info)->enable = true;
1369 ((struct nds32_reg *)reg_cache->reg_list[M5].arch_info)->enable = true;
1370 ((struct nds32_reg *)reg_cache->reg_list[M6].arch_info)->enable = true;
1371 ((struct nds32_reg *)reg_cache->reg_list[M7].arch_info)->enable = true;
1372 ((struct nds32_reg *)reg_cache->reg_list[MOD].arch_info)->enable = true;
1373 ((struct nds32_reg *)reg_cache->reg_list[LBE].arch_info)->enable = true;
1374 ((struct nds32_reg *)reg_cache->reg_list[LE].arch_info)->enable = true;
1375 ((struct nds32_reg *)reg_cache->reg_list[LC].arch_info)->enable = true;
1376 ((struct nds32_reg *)reg_cache->reg_list[ADM_VBASE].arch_info)->enable = true;
1377 ((struct nds32_reg *)reg_cache->reg_list[SHFT_CTL0].arch_info)->enable = true;
1378 ((struct nds32_reg *)reg_cache->reg_list[SHFT_CTL1].arch_info)->enable = true;
1379
1380 uint32_t value_mod;
1381 uint32_t fucpr_backup;
1382 /* enable fpu and get configuration */
1383 nds32_get_mapped_reg(nds32, FUCPR, &fucpr_backup);
1384 if ((fucpr_backup & 0x80000000) == 0)
1385 nds32_set_mapped_reg(nds32, FUCPR, fucpr_backup | 0x80000000);
1386 nds32_get_mapped_reg(nds32, MOD, &value_mod);
1387 /* restore origin fucpr value */
1388 if ((fucpr_backup & 0x80000000) == 0)
1389 nds32_set_mapped_reg(nds32, FUCPR, fucpr_backup);
1390
1391 if ((value_mod >> 6) & 0x1) {
1392 ((struct nds32_reg *)reg_cache->reg_list[CB_CTL].arch_info)->enable = true;
1393 ((struct nds32_reg *)reg_cache->reg_list[CBB0].arch_info)->enable = true;
1394 ((struct nds32_reg *)reg_cache->reg_list[CBB1].arch_info)->enable = true;
1395 ((struct nds32_reg *)reg_cache->reg_list[CBB2].arch_info)->enable = true;
1396 ((struct nds32_reg *)reg_cache->reg_list[CBB3].arch_info)->enable = true;
1397 ((struct nds32_reg *)reg_cache->reg_list[CBE0].arch_info)->enable = true;
1398 ((struct nds32_reg *)reg_cache->reg_list[CBE1].arch_info)->enable = true;
1399 ((struct nds32_reg *)reg_cache->reg_list[CBE2].arch_info)->enable = true;
1400 ((struct nds32_reg *)reg_cache->reg_list[CBE3].arch_info)->enable = true;
1401 }
1402 }
1403
1404 if ((cpu_version->cpu_id_family == 0x9) ||
1405 (cpu_version->cpu_id_family == 0xA) ||
1406 (cpu_version->cpu_id_family == 0xC)) {
1407
1408 ((struct nds32_reg *)reg_cache->reg_list[IDR0].arch_info)->enable = true;
1409 ((struct nds32_reg *)reg_cache->reg_list[IDR1].arch_info)->enable = true;
1410
1411 if ((cpu_version->cpu_id_family == 0xC) && (cpu_version->revision == 0x0C))
1412 ((struct nds32_reg *)reg_cache->reg_list[IDR0].arch_info)->enable = false;
1413 }
1414
1415 uint32_t ir3_value;
1416 uint32_t ivb_prog_pri_lvl;
1417 uint32_t ivb_ivic_ver;
1418
1419 nds32_get_mapped_reg(nds32, IR3, &ir3_value);
1420 ivb_prog_pri_lvl = ir3_value & 0x1;
1421 ivb_ivic_ver = (ir3_value >> 11) & 0x3;
1422
1423 if ((ivb_prog_pri_lvl == 1) || (ivb_ivic_ver >= 1)) {
1424 ((struct nds32_reg *)reg_cache->reg_list[IR18].arch_info)->enable = true;
1425 ((struct nds32_reg *)reg_cache->reg_list[IR19].arch_info)->enable = true;
1426 }
1427
1428 if (ivb_ivic_ver >= 1) {
1429 ((struct nds32_reg *)reg_cache->reg_list[IR26].arch_info)->enable = true;
1430 ((struct nds32_reg *)reg_cache->reg_list[IR27].arch_info)->enable = true;
1431 ((struct nds32_reg *)reg_cache->reg_list[IR28].arch_info)->enable = true;
1432 ((struct nds32_reg *)reg_cache->reg_list[IR29].arch_info)->enable = true;
1433 }
1434
1435 return ERROR_OK;
1436 }
1437
1438 int nds32_init_register_table(struct nds32 *nds32)
1439 {
1440 nds32_init_must_have_registers(nds32);
1441
1442 return ERROR_OK;
1443 }
1444
1445 int nds32_add_software_breakpoint(struct target *target,
1446 struct breakpoint *breakpoint)
1447 {
1448 uint32_t data;
1449 uint32_t check_data;
1450 uint32_t break_insn;
1451
1452 /* check the breakpoint size */
1453 target->type->read_buffer(target, breakpoint->address, 4, (uint8_t *)&data);
1454
1455 /* backup origin instruction
1456 * instruction is big-endian */
1457 if (*(char *)&data & 0x80) { /* 16-bits instruction */
1458 breakpoint->length = 2;
1459 break_insn = NDS32_BREAK_16;
1460 } else { /* 32-bits instruction */
1461 breakpoint->length = 4;
1462 break_insn = NDS32_BREAK_32;
1463 }
1464
1465 if (breakpoint->orig_instr != NULL)
1466 free(breakpoint->orig_instr);
1467
1468 breakpoint->orig_instr = malloc(breakpoint->length);
1469 memcpy(breakpoint->orig_instr, &data, breakpoint->length);
1470
1471 /* self-modified code */
1472 target->type->write_buffer(target, breakpoint->address, breakpoint->length, (const uint8_t *)&break_insn);
1473 /* write_back & invalidate dcache & invalidate icache */
1474 nds32_cache_sync(target, breakpoint->address, breakpoint->length);
1475
1476 /* read back to check */
1477 target->type->read_buffer(target, breakpoint->address, breakpoint->length, (uint8_t *)&check_data);
1478 if (memcmp(&check_data, &break_insn, breakpoint->length) == 0)
1479 return ERROR_OK;
1480
1481 return ERROR_FAIL;
1482 }
1483
1484 int nds32_remove_software_breakpoint(struct target *target,
1485 struct breakpoint *breakpoint)
1486 {
1487 uint32_t check_data;
1488 uint32_t break_insn;
1489
1490 if (breakpoint->length == 2)
1491 break_insn = NDS32_BREAK_16;
1492 else if (breakpoint->length == 4)
1493 break_insn = NDS32_BREAK_32;
1494 else
1495 return ERROR_FAIL;
1496
1497 target->type->read_buffer(target, breakpoint->address, breakpoint->length,
1498 (uint8_t *)&check_data);
1499
1500 /* break instruction is modified */
1501 if (memcmp(&check_data, &break_insn, breakpoint->length) != 0)
1502 return ERROR_FAIL;
1503
1504 /* self-modified code */
1505 target->type->write_buffer(target, breakpoint->address, breakpoint->length,
1506 breakpoint->orig_instr);
1507
1508 /* write_back & invalidate dcache & invalidate icache */
1509 nds32_cache_sync(target, breakpoint->address, breakpoint->length);
1510
1511 return ERROR_OK;
1512 }
1513
1514 /**
1515 * Restore the processor context on an Andes target. The full processor
1516 * context is analyzed to see if any of the registers are dirty on this end, but
1517 * have a valid new value. If this is the case, the processor is changed to the
1518 * appropriate mode and the new register values are written out to the
1519 * processor. If there happens to be a dirty register with an invalid value, an
1520 * error will be logged.
1521 *
1522 * @param target Pointer to the Andes target to have its context restored
1523 * @return Error status if the target is not halted.
1524 */
1525 int nds32_restore_context(struct target *target)
1526 {
1527 struct nds32 *nds32 = target_to_nds32(target);
1528 struct aice_port_s *aice = target_to_aice(target);
1529 struct reg_cache *reg_cache = nds32->core_cache;
1530 struct reg *reg;
1531 struct nds32_reg *reg_arch_info;
1532 unsigned int i;
1533
1534 LOG_DEBUG("-");
1535
1536 if (target->state != TARGET_HALTED) {
1537 LOG_WARNING("target not halted");
1538 return ERROR_TARGET_NOT_HALTED;
1539 }
1540
1541 /* check if there are dirty registers */
1542 for (i = 0; i < reg_cache->num_regs; i++) {
1543 reg = &(reg_cache->reg_list[i]);
1544 if (reg->dirty == true) {
1545 if (reg->valid == true) {
1546
1547 LOG_DEBUG("examining dirty reg: %s", reg->name);
1548 LOG_DEBUG("writing register %i "
1549 "with value 0x%8.8" PRIx32, i, buf_get_u32(reg->value, 0, 32));
1550
1551 reg_arch_info = reg->arch_info;
1552 if (FD0 <= reg_arch_info->num && reg_arch_info->num <= FD31)
1553 aice_write_reg_64(aice, reg_arch_info->num, reg_arch_info->value_64);
1554 else
1555 aice_write_register(aice, reg_arch_info->num, reg_arch_info->value);
1556 reg->valid = true;
1557 reg->dirty = false;
1558 }
1559 }
1560 }
1561
1562 return ERROR_OK;
1563 }
1564
1565 int nds32_edm_config(struct nds32 *nds32)
1566 {
1567 struct target *target = nds32->target;
1568 struct aice_port_s *aice = target_to_aice(target);
1569 uint32_t edm_cfg;
1570 uint32_t edm_ctl;
1571
1572 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CFG, &edm_cfg);
1573
1574 nds32->edm.version = (edm_cfg >> 16) & 0xFFFF;
1575 LOG_INFO("EDM version 0x%04" PRIx32, nds32->edm.version);
1576
1577 nds32->edm.breakpoint_num = (edm_cfg & 0x7) + 1;
1578
1579 if ((nds32->edm.version & 0x1000) || (0x60 <= nds32->edm.version))
1580 nds32->edm.access_control = true;
1581 else
1582 nds32->edm.access_control = false;
1583
1584 if ((edm_cfg >> 4) & 0x1)
1585 nds32->edm.direct_access_local_memory = true;
1586 else
1587 nds32->edm.direct_access_local_memory = false;
1588
1589 if (nds32->edm.version <= 0x20)
1590 nds32->edm.direct_access_local_memory = false;
1591
1592 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CTL, &edm_ctl);
1593 if (edm_ctl & (0x1 << 29))
1594 nds32->edm.support_max_stop = true;
1595 else
1596 nds32->edm.support_max_stop = false;
1597
1598 /* set passcode for secure MCU */
1599 nds32_login(nds32);
1600
1601 return ERROR_OK;
1602 }
1603
1604 int nds32_config(struct nds32 *nds32)
1605 {
1606 nds32_init_config(nds32);
1607
1608 /* init optional system registers according to config registers */
1609 nds32_init_option_registers(nds32);
1610
1611 /* get max interrupt level */
1612 if (nds32->misc_config.interruption_level)
1613 nds32->max_interrupt_level = 2;
1614 else
1615 nds32->max_interrupt_level = 3;
1616
1617 /* get ILM/DLM size from MR6/MR7 */
1618 uint32_t value_mr6, value_mr7;
1619 uint32_t size_index;
1620 nds32_get_mapped_reg(nds32, MR6, &value_mr6);
1621 size_index = (value_mr6 >> 1) & 0xF;
1622 nds32->memory.ilm_size = NDS32_LM_SIZE_TABLE[size_index];
1623
1624 nds32_get_mapped_reg(nds32, MR7, &value_mr7);
1625 size_index = (value_mr7 >> 1) & 0xF;
1626 nds32->memory.dlm_size = NDS32_LM_SIZE_TABLE[size_index];
1627
1628 return ERROR_OK;
1629 }
1630
1631 int nds32_init_arch_info(struct target *target, struct nds32 *nds32)
1632 {
1633 target->arch_info = nds32;
1634 nds32->target = target;
1635
1636 nds32->common_magic = NDS32_COMMON_MAGIC;
1637 nds32->init_arch_info_after_halted = false;
1638 nds32->auto_convert_hw_bp = true;
1639 nds32->global_stop = false;
1640 nds32->soft_reset_halt = false;
1641 nds32->edm_passcode = NULL;
1642 nds32->privilege_level = 0;
1643 nds32->boot_time = 1500;
1644 nds32->reset_halt_as_examine = false;
1645 nds32->keep_target_edm_ctl = false;
1646 nds32->word_access_mem = false;
1647 nds32->virtual_hosting = false;
1648 nds32->hit_syscall = false;
1649 nds32->active_syscall_id = NDS32_SYSCALL_UNDEFINED;
1650 nds32->virtual_hosting_errno = 0;
1651 nds32->virtual_hosting_ctrl_c = false;
1652
1653 nds32_reg_init();
1654
1655 if (ERROR_FAIL == nds32_reg_cache_init(target, nds32))
1656 return ERROR_FAIL;
1657
1658 if (ERROR_OK != nds32_init_register_table(nds32))
1659 return ERROR_FAIL;
1660
1661 return ERROR_OK;
1662 }
1663
1664 int nds32_virtual_to_physical(struct target *target, uint32_t address, uint32_t *physical)
1665 {
1666 struct nds32 *nds32 = target_to_nds32(target);
1667
1668 if (nds32->memory.address_translation == false) {
1669 *physical = address;
1670 return ERROR_OK;
1671 }
1672
1673 if (ERROR_OK == nds32_probe_tlb(nds32, address, physical))
1674 return ERROR_OK;
1675
1676 if (ERROR_OK == nds32_walk_page_table(nds32, address, physical))
1677 return ERROR_OK;
1678
1679 return ERROR_FAIL;
1680 }
1681
1682 int nds32_cache_sync(struct target *target, uint32_t address, uint32_t length)
1683 {
1684 struct aice_port_s *aice = target_to_aice(target);
1685 struct nds32 *nds32 = target_to_nds32(target);
1686 struct nds32_cache *dcache = &(nds32->memory.dcache);
1687 struct nds32_cache *icache = &(nds32->memory.icache);
1688 uint32_t dcache_line_size = NDS32_LINE_SIZE_TABLE[dcache->line_size];
1689 uint32_t icache_line_size = NDS32_LINE_SIZE_TABLE[icache->line_size];
1690 uint32_t cur_address;
1691 int result;
1692 uint32_t start_line, end_line;
1693 uint32_t cur_line;
1694
1695 if ((dcache->line_size != 0) && (dcache->enable == true)) {
1696 /* address / dcache_line_size */
1697 start_line = address >> (dcache->line_size + 2);
1698 /* (address + length - 1) / dcache_line_size */
1699 end_line = (address + length - 1) >> (dcache->line_size + 2);
1700
1701 for (cur_address = address, cur_line = start_line ;
1702 cur_line <= end_line ;
1703 cur_address += dcache_line_size, cur_line++) {
1704 /* D$ write back */
1705 result = aice_cache_ctl(aice, AICE_CACHE_CTL_L1D_VA_WB, cur_address);
1706 if (result != ERROR_OK)
1707 return result;
1708
1709 /* D$ invalidate */
1710 result = aice_cache_ctl(aice, AICE_CACHE_CTL_L1D_VA_INVAL, cur_address);
1711 if (result != ERROR_OK)
1712 return result;
1713 }
1714 }
1715
1716 if ((icache->line_size != 0) && (icache->enable == true)) {
1717 /* address / icache_line_size */
1718 start_line = address >> (icache->line_size + 2);
1719 /* (address + length - 1) / icache_line_size */
1720 end_line = (address + length - 1) >> (icache->line_size + 2);
1721
1722 for (cur_address = address, cur_line = start_line ;
1723 cur_line <= end_line ;
1724 cur_address += icache_line_size, cur_line++) {
1725 /* Because PSW.IT is turned off under debug exception, address MUST
1726 * be physical address. L1I_VA_INVALIDATE uses PSW.IT to decide
1727 * address translation or not. */
1728 uint32_t physical_addr;
1729 if (ERROR_FAIL == target->type->virt2phys(target, cur_address,
1730 &physical_addr))
1731 return ERROR_FAIL;
1732
1733 /* I$ invalidate */
1734 result = aice_cache_ctl(aice, AICE_CACHE_CTL_L1I_VA_INVAL, physical_addr);
1735 if (result != ERROR_OK)
1736 return result;
1737 }
1738 }
1739
1740 return ERROR_OK;
1741 }
1742
1743 uint32_t nds32_nextpc(struct nds32 *nds32, int current, uint32_t address)
1744 {
1745 if (!current)
1746 nds32_set_mapped_reg(nds32, PC, address);
1747 else
1748 nds32_get_mapped_reg(nds32, PC, &address);
1749
1750 return address;
1751 }
1752
1753 int nds32_step(struct target *target, int current,
1754 uint32_t address, int handle_breakpoints)
1755 {
1756 LOG_DEBUG("target->state: %s",
1757 target_state_name(target));
1758
1759 if (target->state != TARGET_HALTED) {
1760 LOG_WARNING("target was not halted");
1761 return ERROR_TARGET_NOT_HALTED;
1762 }
1763
1764 struct nds32 *nds32 = target_to_nds32(target);
1765
1766 address = nds32_nextpc(nds32, current, address);
1767
1768 LOG_DEBUG("STEP PC %08" PRIx32 "%s", address, !current ? "!" : "");
1769
1770 /** set DSSIM */
1771 uint32_t ir14_value;
1772 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1773 if (nds32->step_isr_enable)
1774 ir14_value |= (0x1 << 31);
1775 else
1776 ir14_value &= ~(0x1 << 31);
1777 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1778
1779 /* check hit_syscall before leave_debug_state() because
1780 * leave_debug_state() may clear hit_syscall flag */
1781 bool no_step = false;
1782 if (nds32->hit_syscall)
1783 /* step after hit_syscall should be ignored because
1784 * leave_debug_state will step implicitly to skip the
1785 * syscall */
1786 no_step = true;
1787
1788 /********* TODO: maybe create another function to handle this part */
1789 CHECK_RETVAL(nds32->leave_debug_state(nds32, true));
1790 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_RESUMED));
1791
1792 if (no_step == false) {
1793 struct aice_port_s *aice = target_to_aice(target);
1794 if (ERROR_OK != aice_step(aice))
1795 return ERROR_FAIL;
1796 }
1797
1798 /* save state */
1799 CHECK_RETVAL(nds32->enter_debug_state(nds32, true));
1800 /********* TODO: maybe create another function to handle this part */
1801
1802 /* restore DSSIM */
1803 if (nds32->step_isr_enable) {
1804 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1805 ir14_value &= ~(0x1 << 31);
1806 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1807 }
1808
1809 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_HALTED));
1810
1811 return ERROR_OK;
1812 }
1813
1814 static int nds32_step_without_watchpoint(struct nds32 *nds32)
1815 {
1816 struct target *target = nds32->target;
1817
1818 if (target->state != TARGET_HALTED) {
1819 LOG_WARNING("target was not halted");
1820 return ERROR_TARGET_NOT_HALTED;
1821 }
1822
1823 /** set DSSIM */
1824 uint32_t ir14_value;
1825 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1826 if (nds32->step_isr_enable)
1827 ir14_value |= (0x1 << 31);
1828 else
1829 ir14_value &= ~(0x1 << 31);
1830 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1831
1832 /********* TODO: maybe create another function to handle this part */
1833 CHECK_RETVAL(nds32->leave_debug_state(nds32, false));
1834
1835 struct aice_port_s *aice = target_to_aice(target);
1836
1837 if (ERROR_OK != aice_step(aice))
1838 return ERROR_FAIL;
1839
1840 /* save state */
1841 CHECK_RETVAL(nds32->enter_debug_state(nds32, false));
1842 /********* TODO: maybe create another function to handle this part */
1843
1844 /* restore DSSIM */
1845 if (nds32->step_isr_enable) {
1846 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1847 ir14_value &= ~(0x1 << 31);
1848 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1849 }
1850
1851 return ERROR_OK;
1852 }
1853
1854 int nds32_target_state(struct nds32 *nds32, enum target_state *state)
1855 {
1856 struct aice_port_s *aice = target_to_aice(nds32->target);
1857 enum aice_target_state_s nds32_state;
1858
1859 if (aice_state(aice, &nds32_state) != ERROR_OK)
1860 return ERROR_FAIL;
1861
1862 switch (nds32_state) {
1863 case AICE_DISCONNECT:
1864 LOG_INFO("USB is disconnected");
1865 return ERROR_FAIL;
1866 case AICE_TARGET_DETACH:
1867 LOG_INFO("Target is disconnected");
1868 return ERROR_FAIL;
1869 case AICE_TARGET_UNKNOWN:
1870 *state = TARGET_UNKNOWN;
1871 break;
1872 case AICE_TARGET_RUNNING:
1873 *state = TARGET_RUNNING;
1874 break;
1875 case AICE_TARGET_HALTED:
1876 *state = TARGET_HALTED;
1877 break;
1878 case AICE_TARGET_RESET:
1879 *state = TARGET_RESET;
1880 break;
1881 case AICE_TARGET_DEBUG_RUNNING:
1882 *state = TARGET_DEBUG_RUNNING;
1883 break;
1884 default:
1885 return ERROR_FAIL;
1886 }
1887
1888 return ERROR_OK;
1889 }
1890
1891 int nds32_examine_debug_reason(struct nds32 *nds32)
1892 {
1893 uint32_t reason;
1894 struct target *target = nds32->target;
1895
1896 if (nds32->hit_syscall == true) {
1897 LOG_DEBUG("Hit syscall breakpoint");
1898 target->debug_reason = DBG_REASON_BREAKPOINT;
1899 return ERROR_OK;
1900 }
1901
1902 nds32->get_debug_reason(nds32, &reason);
1903
1904 LOG_DEBUG("nds32 examines debug reason: %s", nds32_debug_type_name[reason]);
1905
1906 /* Examine debug reason */
1907 switch (reason) {
1908 case NDS32_DEBUG_BREAK:
1909 case NDS32_DEBUG_BREAK_16:
1910 case NDS32_DEBUG_INST_BREAK:
1911 {
1912 uint32_t value_pc;
1913 uint32_t opcode;
1914 struct nds32_instruction instruction;
1915
1916 nds32_get_mapped_reg(nds32, PC, &value_pc);
1917
1918 if (ERROR_OK != nds32_read_opcode(nds32, value_pc, &opcode))
1919 return ERROR_FAIL;
1920 if (ERROR_OK != nds32_evaluate_opcode(nds32, opcode, value_pc,
1921 &instruction))
1922 return ERROR_FAIL;
1923
1924 target->debug_reason = DBG_REASON_BREAKPOINT;
1925 }
1926 break;
1927 case NDS32_DEBUG_DATA_ADDR_WATCHPOINT_PRECISE:
1928 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_PRECISE:
1929 case NDS32_DEBUG_LOAD_STORE_GLOBAL_STOP: /* GLOBAL_STOP is precise exception */
1930 {
1931 int result;
1932
1933 result = nds32->get_watched_address(nds32,
1934 &(nds32->watched_address), reason);
1935 /* do single step(without watchpoints) to skip the "watched" instruction */
1936 nds32_step_without_watchpoint(nds32);
1937
1938 /* before single_step, save exception address */
1939 if (ERROR_OK != result)
1940 return ERROR_FAIL;
1941
1942 target->debug_reason = DBG_REASON_WATCHPOINT;
1943 }
1944 break;
1945 case NDS32_DEBUG_DEBUG_INTERRUPT:
1946 target->debug_reason = DBG_REASON_DBGRQ;
1947 break;
1948 case NDS32_DEBUG_HARDWARE_SINGLE_STEP:
1949 target->debug_reason = DBG_REASON_SINGLESTEP;
1950 break;
1951 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_IMPRECISE:
1952 case NDS32_DEBUG_DATA_ADDR_WATCHPOINT_NEXT_PRECISE:
1953 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_NEXT_PRECISE:
1954 if (ERROR_OK != nds32->get_watched_address(nds32,
1955 &(nds32->watched_address), reason))
1956 return ERROR_FAIL;
1957
1958 target->debug_reason = DBG_REASON_WATCHPOINT;
1959 break;
1960 default:
1961 target->debug_reason = DBG_REASON_UNDEFINED;
1962 break;
1963 }
1964
1965 return ERROR_OK;
1966 }
1967
1968 int nds32_login(struct nds32 *nds32)
1969 {
1970 struct target *target = nds32->target;
1971 struct aice_port_s *aice = target_to_aice(target);
1972 uint32_t passcode_length;
1973 char command_sequence[129];
1974 char command_str[33];
1975 char code_str[9];
1976 uint32_t copy_length;
1977 uint32_t code;
1978 uint32_t i;
1979
1980 LOG_DEBUG("nds32_login");
1981
1982 if (nds32->edm_passcode != NULL) {
1983 /* convert EDM passcode to command sequences */
1984 passcode_length = strlen(nds32->edm_passcode);
1985 command_sequence[0] = '\0';
1986 for (i = 0; i < passcode_length; i += 8) {
1987 if (passcode_length - i < 8)
1988 copy_length = passcode_length - i;
1989 else
1990 copy_length = 8;
1991
1992 strncpy(code_str, nds32->edm_passcode + i, copy_length);
1993 code_str[copy_length] = '\0';
1994 code = strtoul(code_str, NULL, 16);
1995
1996 sprintf(command_str, "write_misc gen_port0 0x%x;", code);
1997 strcat(command_sequence, command_str);
1998 }
1999
2000 if (ERROR_OK != aice_program_edm(aice, command_sequence))
2001 return ERROR_FAIL;
2002
2003 /* get current privilege level */
2004 uint32_t value_edmsw;
2005 aice_read_debug_reg(aice, NDS_EDM_SR_EDMSW, &value_edmsw);
2006 nds32->privilege_level = (value_edmsw >> 16) & 0x3;
2007 LOG_INFO("Current privilege level: %d", nds32->privilege_level);
2008 }
2009
2010 if (nds32_edm_ops_num > 0) {
2011 const char *reg_name;
2012 for (i = 0 ; i < nds32_edm_ops_num ; i++) {
2013 code = nds32_edm_ops[i].value;
2014 if (nds32_edm_ops[i].reg_no == 6)
2015 reg_name = "gen_port0";
2016 else if (nds32_edm_ops[i].reg_no == 7)
2017 reg_name = "gen_port1";
2018 else
2019 return ERROR_FAIL;
2020
2021 sprintf(command_str, "write_misc %s 0x%x;", reg_name, code);
2022 if (ERROR_OK != aice_program_edm(aice, command_str))
2023 return ERROR_FAIL;
2024 }
2025 }
2026
2027 return ERROR_OK;
2028 }
2029
2030 int nds32_halt(struct target *target)
2031 {
2032 struct nds32 *nds32 = target_to_nds32(target);
2033 struct aice_port_s *aice = target_to_aice(target);
2034 enum target_state state;
2035
2036 LOG_DEBUG("target->state: %s",
2037 target_state_name(target));
2038
2039 if (target->state == TARGET_HALTED) {
2040 LOG_DEBUG("target was already halted");
2041 return ERROR_OK;
2042 }
2043
2044 if (nds32_target_state(nds32, &state) != ERROR_OK)
2045 return ERROR_FAIL;
2046
2047 if (TARGET_HALTED != state)
2048 /* TODO: if state == TARGET_HALTED, check ETYPE is DBGI or not */
2049 if (ERROR_OK != aice_halt(aice))
2050 return ERROR_FAIL;
2051
2052 CHECK_RETVAL(nds32->enter_debug_state(nds32, true));
2053
2054 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_HALTED));
2055
2056 return ERROR_OK;
2057 }
2058
2059 /* poll current target status */
2060 int nds32_poll(struct target *target)
2061 {
2062 struct nds32 *nds32 = target_to_nds32(target);
2063 enum target_state state;
2064
2065 if (nds32_target_state(nds32, &state) != ERROR_OK)
2066 return ERROR_FAIL;
2067
2068 if (state == TARGET_HALTED) {
2069 if (target->state != TARGET_HALTED) {
2070 /* if false_hit, continue free_run */
2071 if (ERROR_OK != nds32->enter_debug_state(nds32, true)) {
2072 struct aice_port_s *aice = target_to_aice(target);
2073 aice_run(aice);
2074 return ERROR_OK;
2075 }
2076
2077 LOG_DEBUG("Change target state to TARGET_HALTED.");
2078
2079 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
2080 }
2081 } else if (state == TARGET_RESET) {
2082 if (target->state == TARGET_HALTED) {
2083 /* similar to assert srst */
2084 register_cache_invalidate(nds32->core_cache);
2085 target->state = TARGET_RESET;
2086
2087 /* TODO: deassert srst */
2088 } else if (target->state == TARGET_RUNNING) {
2089 /* reset as running */
2090 LOG_WARNING("<-- TARGET WARNING! The debug target has been reset. -->");
2091 }
2092 } else {
2093 if (target->state != TARGET_RUNNING && target->state != TARGET_DEBUG_RUNNING) {
2094 LOG_DEBUG("Change target state to TARGET_RUNNING.");
2095 target->state = TARGET_RUNNING;
2096 target->debug_reason = DBG_REASON_NOTHALTED;
2097 }
2098 }
2099
2100 return ERROR_OK;
2101 }
2102
2103 int nds32_resume(struct target *target, int current,
2104 uint32_t address, int handle_breakpoints, int debug_execution)
2105 {
2106 LOG_DEBUG("current %d address %08x handle_breakpoints %d debug_execution %d",
2107 current, address, handle_breakpoints, debug_execution);
2108
2109 struct nds32 *nds32 = target_to_nds32(target);
2110
2111 if (target->state != TARGET_HALTED) {
2112 LOG_ERROR("Target not halted");
2113 return ERROR_TARGET_NOT_HALTED;
2114 }
2115
2116 address = nds32_nextpc(nds32, current, address);
2117
2118 LOG_DEBUG("RESUME PC %08" PRIx32 "%s", address, !current ? "!" : "");
2119
2120 if (!debug_execution)
2121 target_free_all_working_areas(target);
2122
2123 /* Disable HSS to avoid users misuse HSS */
2124 if (nds32_reach_max_interrupt_level(nds32) == false) {
2125 uint32_t value_ir0;
2126 nds32_get_mapped_reg(nds32, IR0, &value_ir0);
2127 value_ir0 &= ~(0x1 << 11);
2128 nds32_set_mapped_reg(nds32, IR0, value_ir0);
2129 }
2130
2131 CHECK_RETVAL(nds32->leave_debug_state(nds32, true));
2132 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_RESUMED));
2133
2134 if (nds32->virtual_hosting_ctrl_c == false) {
2135 struct aice_port_s *aice = target_to_aice(target);
2136 aice_run(aice);
2137 } else
2138 nds32->virtual_hosting_ctrl_c = false;
2139
2140 target->debug_reason = DBG_REASON_NOTHALTED;
2141 if (!debug_execution)
2142 target->state = TARGET_RUNNING;
2143 else
2144 target->state = TARGET_DEBUG_RUNNING;
2145
2146 LOG_DEBUG("target->state: %s",
2147 target_state_name(target));
2148
2149 return ERROR_OK;
2150 }
2151
2152 int nds32_assert_reset(struct target *target)
2153 {
2154 struct nds32 *nds32 = target_to_nds32(target);
2155 struct aice_port_s *aice = target_to_aice(target);
2156
2157 jtag_poll_set_enabled(true);
2158
2159 if (target->reset_halt) {
2160 if (nds32->soft_reset_halt)
2161 target->type->soft_reset_halt(target);
2162 else
2163 aice_assert_srst(aice, AICE_RESET_HOLD);
2164 } else {
2165 aice_assert_srst(aice, AICE_SRST);
2166 alive_sleep(nds32->boot_time);
2167 }
2168
2169 /* set passcode for secure MCU after core reset */
2170 nds32_login(nds32);
2171
2172 /* registers are now invalid */
2173 register_cache_invalidate(nds32->core_cache);
2174
2175 target->state = TARGET_RESET;
2176
2177 return ERROR_OK;
2178 }
2179
2180 static uint32_t nds32_backup_edm_ctl;
2181 static bool gdb_attached;
2182
2183 static int nds32_gdb_attach(struct nds32 *nds32)
2184 {
2185 LOG_DEBUG("nds32_gdb_attach");
2186
2187 if (gdb_attached == false) {
2188
2189 if (nds32->keep_target_edm_ctl) {
2190 /* backup target EDM_CTL */
2191 struct aice_port_s *aice = target_to_aice(nds32->target);
2192 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CTL, &nds32_backup_edm_ctl);
2193 }
2194
2195 target_halt(nds32->target);
2196 target_poll(nds32->target);
2197
2198 gdb_attached = true;
2199 }
2200
2201 return ERROR_OK;
2202 }
2203
2204 static int nds32_gdb_detach(struct nds32 *nds32)
2205 {
2206 LOG_DEBUG("nds32_gdb_detach");
2207 bool backup_virtual_hosting_setting;
2208
2209 if (gdb_attached) {
2210
2211 backup_virtual_hosting_setting = nds32->virtual_hosting;
2212 /* turn off virtual hosting before resume as gdb-detach */
2213 nds32->virtual_hosting = false;
2214 target_resume(nds32->target, 1, 0, 0, 0);
2215 nds32->virtual_hosting = backup_virtual_hosting_setting;
2216
2217 if (nds32->keep_target_edm_ctl) {
2218 /* restore target EDM_CTL */
2219 struct aice_port_s *aice = target_to_aice(nds32->target);
2220 aice_write_debug_reg(aice, NDS_EDM_SR_EDM_CTL, nds32_backup_edm_ctl);
2221 }
2222
2223 /* turn off polling */
2224 jtag_poll_set_enabled(false);
2225
2226 gdb_attached = false;
2227 }
2228
2229 return ERROR_OK;
2230 }
2231
2232 static int nds32_callback_event_handler(struct target *target,
2233 enum target_event event, void *priv)
2234 {
2235 int retval = ERROR_OK;
2236 struct nds32 *nds32 = priv;
2237
2238 switch (event) {
2239 case TARGET_EVENT_GDB_ATTACH:
2240 retval = nds32_gdb_attach(nds32);
2241 break;
2242 case TARGET_EVENT_GDB_DETACH:
2243 retval = nds32_gdb_detach(nds32);
2244 break;
2245 default:
2246 break;
2247 }
2248
2249 return retval;
2250 }
2251
2252 int nds32_init(struct nds32 *nds32)
2253 {
2254 /* Initialize anything we can set up without talking to the target */
2255 nds32->memory.access_channel = NDS_MEMORY_ACC_CPU;
2256
2257 /* turn off polling by default */
2258 jtag_poll_set_enabled(false);
2259
2260 /* register event callback */
2261 target_register_event_callback(nds32_callback_event_handler, nds32);
2262
2263 return ERROR_OK;
2264 }
2265
2266 int nds32_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
2267 {
2268 /* fill syscall parameters to file-I/O info */
2269 if (NULL == fileio_info) {
2270 LOG_ERROR("Target has not initial file-I/O data structure");
2271 return ERROR_FAIL;
2272 }
2273
2274 struct nds32 *nds32 = target_to_nds32(target);
2275 uint32_t value_ir6;
2276 uint32_t syscall_id;
2277
2278 if (nds32->hit_syscall == false)
2279 return ERROR_FAIL;
2280
2281 nds32_get_mapped_reg(nds32, IR6, &value_ir6);
2282 syscall_id = (value_ir6 >> 16) & 0x7FFF;
2283 nds32->active_syscall_id = syscall_id;
2284
2285 LOG_DEBUG("hit syscall ID: 0x%x", syscall_id);
2286
2287 /* free previous identifier storage */
2288 if (NULL != fileio_info->identifier) {
2289 free(fileio_info->identifier);
2290 fileio_info->identifier = NULL;
2291 }
2292
2293 switch (syscall_id) {
2294 case NDS32_SYSCALL_EXIT:
2295 fileio_info->identifier = (char *)malloc(5);
2296 sprintf(fileio_info->identifier, "exit");
2297 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2298 break;
2299 case NDS32_SYSCALL_OPEN:
2300 {
2301 uint8_t filename[256];
2302 fileio_info->identifier = (char *)malloc(5);
2303 sprintf(fileio_info->identifier, "open");
2304 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2305 /* reserve fileio_info->param_2 for length of path */
2306 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_3));
2307 nds32_get_mapped_reg(nds32, R2, &(fileio_info->param_4));
2308
2309 target->type->read_buffer(target, fileio_info->param_1,
2310 256, filename);
2311 fileio_info->param_2 = strlen((char *)filename) + 1;
2312 }
2313 break;
2314 case NDS32_SYSCALL_CLOSE:
2315 fileio_info->identifier = (char *)malloc(6);
2316 sprintf(fileio_info->identifier, "close");
2317 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2318 break;
2319 case NDS32_SYSCALL_READ:
2320 fileio_info->identifier = (char *)malloc(5);
2321 sprintf(fileio_info->identifier, "read");
2322 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2323 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_2));
2324 nds32_get_mapped_reg(nds32, R2, &(fileio_info->param_3));
2325 break;
2326 case NDS32_SYSCALL_WRITE:
2327 fileio_info->identifier = (char *)malloc(6);
2328 sprintf(fileio_info->identifier, "write");
2329 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2330 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_2));
2331 nds32_get_mapped_reg(nds32, R2, &(fileio_info->param_3));
2332 break;
2333 case NDS32_SYSCALL_LSEEK:
2334 fileio_info->identifier = (char *)malloc(6);
2335 sprintf(fileio_info->identifier, "lseek");
2336 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2337 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_2));
2338 nds32_get_mapped_reg(nds32, R2, &(fileio_info->param_3));
2339 break;
2340 case NDS32_SYSCALL_UNLINK:
2341 {
2342 uint8_t filename[256];
2343 fileio_info->identifier = (char *)malloc(7);
2344 sprintf(fileio_info->identifier, "unlink");
2345 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2346 /* reserve fileio_info->param_2 for length of path */
2347
2348 target->type->read_buffer(target, fileio_info->param_1,
2349 256, filename);
2350 fileio_info->param_2 = strlen((char *)filename) + 1;
2351 }
2352 break;
2353 case NDS32_SYSCALL_RENAME:
2354 {
2355 uint8_t filename[256];
2356 fileio_info->identifier = (char *)malloc(7);
2357 sprintf(fileio_info->identifier, "rename");
2358 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2359 /* reserve fileio_info->param_2 for length of old path */
2360 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_3));
2361 /* reserve fileio_info->param_4 for length of new path */
2362
2363 target->type->read_buffer(target, fileio_info->param_1,
2364 256, filename);
2365 fileio_info->param_2 = strlen((char *)filename) + 1;
2366
2367 target->type->read_buffer(target, fileio_info->param_3,
2368 256, filename);
2369 fileio_info->param_4 = strlen((char *)filename) + 1;
2370 }
2371 break;
2372 case NDS32_SYSCALL_FSTAT:
2373 fileio_info->identifier = (char *)malloc(6);
2374 sprintf(fileio_info->identifier, "fstat");
2375 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2376 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_2));
2377 break;
2378 case NDS32_SYSCALL_STAT:
2379 {
2380 uint8_t filename[256];
2381 fileio_info->identifier = (char *)malloc(5);
2382 sprintf(fileio_info->identifier, "stat");
2383 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2384 /* reserve fileio_info->param_2 for length of old path */
2385 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_3));
2386
2387 target->type->read_buffer(target, fileio_info->param_1,
2388 256, filename);
2389 fileio_info->param_2 = strlen((char *)filename) + 1;
2390 }
2391 break;
2392 case NDS32_SYSCALL_GETTIMEOFDAY:
2393 fileio_info->identifier = (char *)malloc(13);
2394 sprintf(fileio_info->identifier, "gettimeofday");
2395 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2396 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_2));
2397 break;
2398 case NDS32_SYSCALL_ISATTY:
2399 fileio_info->identifier = (char *)malloc(7);
2400 sprintf(fileio_info->identifier, "isatty");
2401 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2402 break;
2403 case NDS32_SYSCALL_SYSTEM:
2404 {
2405 uint8_t command[256];
2406 fileio_info->identifier = (char *)malloc(7);
2407 sprintf(fileio_info->identifier, "system");
2408 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2409 /* reserve fileio_info->param_2 for length of old path */
2410
2411 target->type->read_buffer(target, fileio_info->param_1,
2412 256, command);
2413 fileio_info->param_2 = strlen((char *)command) + 1;
2414 }
2415 break;
2416 case NDS32_SYSCALL_ERRNO:
2417 fileio_info->identifier = (char *)malloc(6);
2418 sprintf(fileio_info->identifier, "errno");
2419 nds32_set_mapped_reg(nds32, R0, nds32->virtual_hosting_errno);
2420 break;
2421 default:
2422 fileio_info->identifier = (char *)malloc(8);
2423 sprintf(fileio_info->identifier, "unknown");
2424 break;
2425 }
2426
2427 return ERROR_OK;
2428 }
2429
2430 int nds32_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
2431 {
2432 LOG_DEBUG("syscall return code: 0x%x, errno: 0x%x, ctrl_c: %s",
2433 retcode, fileio_errno, ctrl_c ? "true" : "false");
2434
2435 struct nds32 *nds32 = target_to_nds32(target);
2436
2437 nds32_set_mapped_reg(nds32, R0, (uint32_t)retcode);
2438
2439 nds32->virtual_hosting_errno = fileio_errno;
2440 nds32->virtual_hosting_ctrl_c = ctrl_c;
2441 nds32->active_syscall_id = NDS32_SYSCALL_UNDEFINED;
2442
2443 return ERROR_OK;
2444 }
2445
2446 int nds32_gdb_fileio_write_memory(struct nds32 *nds32, uint32_t address,
2447 uint32_t size, const uint8_t *buffer)
2448 {
2449 if ((NDS32_SYSCALL_FSTAT == nds32->active_syscall_id) ||
2450 (NDS32_SYSCALL_STAT == nds32->active_syscall_id)) {
2451 /* If doing GDB file-I/O, target should convert 'struct stat'
2452 * from gdb-format to target-format */
2453 uint8_t stat_buffer[NDS32_STRUCT_STAT_SIZE];
2454 /* st_dev 2 */
2455 stat_buffer[0] = buffer[3];
2456 stat_buffer[1] = buffer[2];
2457 /* st_ino 2 */
2458 stat_buffer[2] = buffer[7];
2459 stat_buffer[3] = buffer[6];
2460 /* st_mode 4 */
2461 stat_buffer[4] = buffer[11];
2462 stat_buffer[5] = buffer[10];
2463 stat_buffer[6] = buffer[9];
2464 stat_buffer[7] = buffer[8];
2465 /* st_nlink 2 */
2466 stat_buffer[8] = buffer[15];
2467 stat_buffer[9] = buffer[16];
2468 /* st_uid 2 */
2469 stat_buffer[10] = buffer[19];
2470 stat_buffer[11] = buffer[18];
2471 /* st_gid 2 */
2472 stat_buffer[12] = buffer[23];
2473 stat_buffer[13] = buffer[22];
2474 /* st_rdev 2 */
2475 stat_buffer[14] = buffer[27];
2476 stat_buffer[15] = buffer[26];
2477 /* st_size 4 */
2478 stat_buffer[16] = buffer[35];
2479 stat_buffer[17] = buffer[34];
2480 stat_buffer[18] = buffer[33];
2481 stat_buffer[19] = buffer[32];
2482 /* st_atime 4 */
2483 stat_buffer[20] = buffer[55];
2484 stat_buffer[21] = buffer[54];
2485 stat_buffer[22] = buffer[53];
2486 stat_buffer[23] = buffer[52];
2487 /* st_spare1 4 */
2488 stat_buffer[24] = 0;
2489 stat_buffer[25] = 0;
2490 stat_buffer[26] = 0;
2491 stat_buffer[27] = 0;
2492 /* st_mtime 4 */
2493 stat_buffer[28] = buffer[59];
2494 stat_buffer[29] = buffer[58];
2495 stat_buffer[30] = buffer[57];
2496 stat_buffer[31] = buffer[56];
2497 /* st_spare2 4 */
2498 stat_buffer[32] = 0;
2499 stat_buffer[33] = 0;
2500 stat_buffer[34] = 0;
2501 stat_buffer[35] = 0;
2502 /* st_ctime 4 */
2503 stat_buffer[36] = buffer[63];
2504 stat_buffer[37] = buffer[62];
2505 stat_buffer[38] = buffer[61];
2506 stat_buffer[39] = buffer[60];
2507 /* st_spare3 4 */
2508 stat_buffer[40] = 0;
2509 stat_buffer[41] = 0;
2510 stat_buffer[42] = 0;
2511 stat_buffer[43] = 0;
2512 /* st_blksize 4 */
2513 stat_buffer[44] = buffer[43];
2514 stat_buffer[45] = buffer[42];
2515 stat_buffer[46] = buffer[41];
2516 stat_buffer[47] = buffer[40];
2517 /* st_blocks 4 */
2518 stat_buffer[48] = buffer[51];
2519 stat_buffer[49] = buffer[50];
2520 stat_buffer[50] = buffer[49];
2521 stat_buffer[51] = buffer[48];
2522 /* st_spare4 8 */
2523 stat_buffer[52] = 0;
2524 stat_buffer[53] = 0;
2525 stat_buffer[54] = 0;
2526 stat_buffer[55] = 0;
2527 stat_buffer[56] = 0;
2528 stat_buffer[57] = 0;
2529 stat_buffer[58] = 0;
2530 stat_buffer[59] = 0;
2531
2532 return nds32_write_buffer(nds32->target, address, NDS32_STRUCT_STAT_SIZE, stat_buffer);
2533 } else if (NDS32_SYSCALL_GETTIMEOFDAY == nds32->active_syscall_id) {
2534 /* If doing GDB file-I/O, target should convert 'struct timeval'
2535 * from gdb-format to target-format */
2536 uint8_t timeval_buffer[NDS32_STRUCT_TIMEVAL_SIZE];
2537 timeval_buffer[0] = buffer[3];
2538 timeval_buffer[1] = buffer[2];
2539 timeval_buffer[2] = buffer[1];
2540 timeval_buffer[3] = buffer[0];
2541 timeval_buffer[4] = buffer[11];
2542 timeval_buffer[5] = buffer[10];
2543 timeval_buffer[6] = buffer[9];
2544 timeval_buffer[7] = buffer[8];
2545
2546 return nds32_write_buffer(nds32->target, address, NDS32_STRUCT_TIMEVAL_SIZE, timeval_buffer);
2547 }
2548
2549 return nds32_write_buffer(nds32->target, address, size, buffer);
2550 }
2551
2552 int nds32_reset_halt(struct nds32 *nds32)
2553 {
2554 LOG_INFO("reset halt as init");
2555
2556 struct aice_port_s *aice = target_to_aice(nds32->target);
2557 aice_assert_srst(aice, AICE_RESET_HOLD);
2558
2559 return ERROR_OK;
2560 }