nds32: add new target type nds32_v2, nds32_v3, nds32_v3m
[openocd.git] / src / target / nds32.c
1 /***************************************************************************
2 * Copyright (C) 2013 Andes Technology *
3 * Hsiangkai Wang <hkwang@andestech.com> *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
19 ***************************************************************************/
20
21 #ifdef HAVE_CONFIG_H
22 #include "config.h"
23 #endif
24
25 #include <helper/log.h>
26 #include <helper/binarybuffer.h>
27 #include "nds32.h"
28 #include "nds32_aice.h"
29 #include "nds32_tlb.h"
30 #include "nds32_disassembler.h"
31
32 const int NDS32_BREAK_16 = 0x00EA; /* 0xEA00 */
33 const int NDS32_BREAK_32 = 0x0A000064; /* 0x6400000A */
34
35 struct nds32_edm_operation nds32_edm_ops[NDS32_EDM_OPERATION_MAX_NUM];
36 uint32_t nds32_edm_ops_num;
37
38 const char *nds32_debug_type_name[11] = {
39 "SOFTWARE BREAK",
40 "SOFTWARE BREAK_16",
41 "HARDWARE BREAKPOINT",
42 "DATA ADDR WATCHPOINT PRECISE",
43 "DATA VALUE WATCHPOINT PRECISE",
44 "DATA VALUE WATCHPOINT IMPRECISE",
45 "DEBUG INTERRUPT",
46 "HARDWARE SINGLE STEP",
47 "DATA ADDR WATCHPOINT NEXT PRECISE",
48 "DATA VALUE WATCHPOINT NEXT PRECISE",
49 "LOAD STORE GLOBAL STOP",
50 };
51
52 static const int NDS32_LM_SIZE_TABLE[16] = {
53 4 * 1024,
54 8 * 1024,
55 16 * 1024,
56 32 * 1024,
57 64 * 1024,
58 128 * 1024,
59 256 * 1024,
60 512 * 1024,
61 1024 * 1024,
62 1 * 1024,
63 2 * 1024,
64 };
65
66 static const int NDS32_LINE_SIZE_TABLE[6] = {
67 0,
68 8,
69 16,
70 32,
71 64,
72 128,
73 };
74
75 static int nds32_get_core_reg(struct reg *reg)
76 {
77 int retval;
78 struct nds32_reg *reg_arch_info = reg->arch_info;
79 struct target *target = reg_arch_info->target;
80 struct nds32 *nds32 = target_to_nds32(target);
81 struct aice_port_s *aice = target_to_aice(target);
82
83 if (target->state != TARGET_HALTED) {
84 LOG_ERROR("Target not halted");
85 return ERROR_TARGET_NOT_HALTED;
86 }
87
88 if (reg->valid) {
89 LOG_DEBUG("reading register(cached) %i(%s), value: 0x%8.8" PRIx32,
90 reg_arch_info->num, reg->name, reg_arch_info->value);
91 return ERROR_OK;
92 }
93
94 if (reg_arch_info->enable == false) {
95 reg_arch_info->value = NDS32_REGISTER_DISABLE;
96 retval = ERROR_FAIL;
97 } else {
98 if ((nds32->fpu_enable == false) &&
99 (NDS32_REG_TYPE_FPU == nds32_reg_type(reg_arch_info->num))) {
100 reg_arch_info->value = 0;
101 retval = ERROR_OK;
102 } else if ((nds32->audio_enable == false) &&
103 (NDS32_REG_TYPE_AUMR == nds32_reg_type(reg_arch_info->num))) {
104 reg_arch_info->value = 0;
105 retval = ERROR_OK;
106 } else {
107 retval = aice_read_register(aice,
108 reg_arch_info->num, &(reg_arch_info->value));
109 }
110
111 LOG_DEBUG("reading register %i(%s), value: 0x%8.8" PRIx32,
112 reg_arch_info->num, reg->name, reg_arch_info->value);
113 }
114
115 if (retval == ERROR_OK) {
116 reg->valid = true;
117 reg->dirty = false;
118 }
119
120 return retval;
121 }
122
123 static int nds32_get_core_reg_64(struct reg *reg)
124 {
125 int retval;
126 struct nds32_reg *reg_arch_info = reg->arch_info;
127 struct target *target = reg_arch_info->target;
128 struct nds32 *nds32 = target_to_nds32(target);
129 struct aice_port_s *aice = target_to_aice(target);
130
131 if (target->state != TARGET_HALTED) {
132 LOG_ERROR("Target not halted");
133 return ERROR_TARGET_NOT_HALTED;
134 }
135
136 if (reg->valid)
137 return ERROR_OK;
138
139 if (reg_arch_info->enable == false) {
140 reg_arch_info->value_64 = NDS32_REGISTER_DISABLE;
141 retval = ERROR_FAIL;
142 } else {
143 if ((nds32->fpu_enable == false) &&
144 ((FD0 <= reg_arch_info->num) && (reg_arch_info->num <= FD31))) {
145 reg_arch_info->value_64 = 0;
146 retval = ERROR_OK;
147 } else {
148 retval = aice_read_reg_64(aice, reg_arch_info->num,
149 &(reg_arch_info->value_64));
150 }
151 }
152
153 if (retval == ERROR_OK) {
154 reg->valid = true;
155 reg->dirty = false;
156 }
157
158 return retval;
159 }
160
161 static int nds32_update_psw(struct nds32 *nds32)
162 {
163 uint32_t value_ir0;
164 struct aice_port_s *aice = target_to_aice(nds32->target);
165
166 nds32_get_mapped_reg(nds32, IR0, &value_ir0);
167
168 /* Save data memory endian */
169 if ((value_ir0 >> 5) & 0x1) {
170 nds32->data_endian = TARGET_BIG_ENDIAN;
171 aice_set_data_endian(aice, AICE_BIG_ENDIAN);
172 } else {
173 nds32->data_endian = TARGET_LITTLE_ENDIAN;
174 aice_set_data_endian(aice, AICE_LITTLE_ENDIAN);
175 }
176
177 /* Save translation status */
178 nds32->memory.address_translation = ((value_ir0 >> 7) & 0x1) ? true : false;
179
180 return ERROR_OK;
181 }
182
183 static int nds32_update_mmu_info(struct nds32 *nds32)
184 {
185 uint32_t value;
186
187 /* Update MMU control status */
188 nds32_get_mapped_reg(nds32, MR0, &value);
189 nds32->mmu_config.default_min_page_size = value & 0x1;
190 nds32->mmu_config.multiple_page_size_in_use = (value >> 10) & 0x1;
191
192 return ERROR_OK;
193 }
194
195 static int nds32_update_cache_info(struct nds32 *nds32)
196 {
197 uint32_t value;
198
199 if (ERROR_OK == nds32_get_mapped_reg(nds32, MR8, &value)) {
200 if (value & 0x1)
201 nds32->memory.icache.enable = true;
202 else
203 nds32->memory.icache.enable = false;
204
205 if (value & 0x2)
206 nds32->memory.dcache.enable = true;
207 else
208 nds32->memory.dcache.enable = false;
209 } else {
210 nds32->memory.icache.enable = false;
211 nds32->memory.dcache.enable = false;
212 }
213
214 return ERROR_OK;
215 }
216
217 static int nds32_update_lm_info(struct nds32 *nds32)
218 {
219 struct nds32_memory *memory = &(nds32->memory);
220 uint32_t value_mr6;
221 uint32_t value_mr7;
222
223 nds32_get_mapped_reg(nds32, MR6, &value_mr6);
224 if (value_mr6 & 0x1)
225 memory->ilm_enable = true;
226 else
227 memory->ilm_enable = false;
228
229 if (memory->ilm_align_ver == 0) { /* 1MB aligned */
230 memory->ilm_start = value_mr6 & 0xFFF00000;
231 memory->ilm_end = memory->ilm_start + memory->ilm_size;
232 } else if (memory->ilm_align_ver == 1) { /* aligned to local memory size */
233 memory->ilm_start = value_mr6 & 0xFFFFFC00;
234 memory->ilm_end = memory->ilm_start + memory->ilm_size;
235 } else {
236 memory->ilm_start = -1;
237 memory->ilm_end = -1;
238 }
239
240 nds32_get_mapped_reg(nds32, MR7, &value_mr7);
241 if (value_mr7 & 0x1)
242 memory->dlm_enable = true;
243 else
244 memory->dlm_enable = false;
245
246 if (memory->dlm_align_ver == 0) { /* 1MB aligned */
247 memory->dlm_start = value_mr7 & 0xFFF00000;
248 memory->dlm_end = memory->dlm_start + memory->dlm_size;
249 } else if (memory->dlm_align_ver == 1) { /* aligned to local memory size */
250 memory->dlm_start = value_mr7 & 0xFFFFFC00;
251 memory->dlm_end = memory->dlm_start + memory->dlm_size;
252 } else {
253 memory->dlm_start = -1;
254 memory->dlm_end = -1;
255 }
256
257 return ERROR_OK;
258 }
259
260 /**
261 * If fpu/audio is disabled, to access fpu/audio registers will cause
262 * exceptions. So, we need to check if fpu/audio is enabled or not as
263 * target is halted. If fpu/audio is disabled, as users access fpu/audio
264 * registers, OpenOCD will return fake value 0 instead of accessing
265 * registers through DIM.
266 */
267 static int nds32_check_extension(struct nds32 *nds32)
268 {
269 uint32_t value;
270
271 nds32_get_mapped_reg(nds32, FUCPR, &value);
272 if (value == NDS32_REGISTER_DISABLE) {
273 nds32->fpu_enable = false;
274 nds32->audio_enable = false;
275 return ERROR_OK;
276 }
277
278 if (value & 0x1)
279 nds32->fpu_enable = true;
280 else
281 nds32->fpu_enable = false;
282
283 if (value & 0x80000000)
284 nds32->audio_enable = true;
285 else
286 nds32->audio_enable = false;
287
288 return ERROR_OK;
289 }
290
291 static int nds32_set_core_reg(struct reg *reg, uint8_t *buf)
292 {
293 struct nds32_reg *reg_arch_info = reg->arch_info;
294 struct target *target = reg_arch_info->target;
295 struct nds32 *nds32 = target_to_nds32(target);
296 struct aice_port_s *aice = target_to_aice(target);
297 uint32_t value = buf_get_u32(buf, 0, 32);
298
299 if (target->state != TARGET_HALTED) {
300 LOG_ERROR("Target not halted");
301 return ERROR_TARGET_NOT_HALTED;
302 }
303
304 /* ignore values that will generate exception */
305 if (nds32_reg_exception(reg_arch_info->num, value))
306 return ERROR_OK;
307
308 LOG_DEBUG("writing register %i(%s) with value 0x%8.8" PRIx32,
309 reg_arch_info->num, reg->name, value);
310
311 if ((nds32->fpu_enable == false) &&
312 (NDS32_REG_TYPE_FPU == nds32_reg_type(reg_arch_info->num))) {
313
314 buf_set_u32(reg->value, 0, 32, 0);
315 } else if ((nds32->audio_enable == false) &&
316 (NDS32_REG_TYPE_AUMR == nds32_reg_type(reg_arch_info->num))) {
317
318 buf_set_u32(reg->value, 0, 32, 0);
319 } else {
320 buf_set_u32(reg->value, 0, 32, value);
321 aice_write_register(aice, reg_arch_info->num, reg_arch_info->value);
322
323 /* After set value to registers, read the value from target
324 * to avoid W1C inconsistency. */
325 aice_read_register(aice, reg_arch_info->num, &(reg_arch_info->value));
326 }
327
328 reg->valid = true;
329 reg->dirty = false;
330
331 /* update registers to take effect right now */
332 if (IR0 == reg_arch_info->num) {
333 nds32_update_psw(nds32);
334 } else if (MR0 == reg_arch_info->num) {
335 nds32_update_mmu_info(nds32);
336 } else if ((MR6 == reg_arch_info->num) || (MR7 == reg_arch_info->num)) {
337 /* update lm information */
338 nds32_update_lm_info(nds32);
339 } else if (MR8 == reg_arch_info->num) {
340 nds32_update_cache_info(nds32);
341 } else if (FUCPR == reg_arch_info->num) {
342 /* update audio/fpu setting */
343 nds32_check_extension(nds32);
344 }
345
346 return ERROR_OK;
347 }
348
349 static int nds32_set_core_reg_64(struct reg *reg, uint8_t *buf)
350 {
351 struct nds32_reg *reg_arch_info = reg->arch_info;
352 struct target *target = reg_arch_info->target;
353 struct nds32 *nds32 = target_to_nds32(target);
354 uint32_t low_part = buf_get_u32(buf, 0, 32);
355 uint32_t high_part = buf_get_u32(buf, 32, 32);
356
357 if (target->state != TARGET_HALTED) {
358 LOG_ERROR("Target not halted");
359 return ERROR_TARGET_NOT_HALTED;
360 }
361
362 if ((nds32->fpu_enable == false) &&
363 ((FD0 <= reg_arch_info->num) && (reg_arch_info->num <= FD31))) {
364
365 buf_set_u32(reg->value, 0, 32, 0);
366 buf_set_u32(reg->value, 32, 32, 0);
367
368 reg->valid = true;
369 reg->dirty = false;
370 } else {
371 buf_set_u32(reg->value, 0, 32, low_part);
372 buf_set_u32(reg->value, 32, 32, high_part);
373
374 reg->valid = true;
375 reg->dirty = true;
376 }
377
378 return ERROR_OK;
379 }
380
381 static const struct reg_arch_type nds32_reg_access_type = {
382 .get = nds32_get_core_reg,
383 .set = nds32_set_core_reg,
384 };
385
386 static const struct reg_arch_type nds32_reg_access_type_64 = {
387 .get = nds32_get_core_reg_64,
388 .set = nds32_set_core_reg_64,
389 };
390
391 static struct reg_cache *nds32_build_reg_cache(struct target *target,
392 struct nds32 *nds32)
393 {
394 struct reg_cache *cache = malloc(sizeof(struct reg_cache));
395 struct reg *reg_list = calloc(TOTAL_REG_NUM, sizeof(struct reg));
396 struct nds32_reg *reg_arch_info = calloc(TOTAL_REG_NUM, sizeof(struct nds32_reg));
397 int i;
398
399 if (!cache || !reg_list || !reg_arch_info) {
400 free(cache);
401 free(reg_list);
402 free(reg_arch_info);
403 return NULL;
404 }
405
406 cache->name = "Andes registers";
407 cache->next = NULL;
408 cache->reg_list = reg_list;
409 cache->num_regs = 0;
410
411 for (i = 0; i < TOTAL_REG_NUM; i++) {
412 reg_arch_info[i].num = i;
413 reg_arch_info[i].target = target;
414 reg_arch_info[i].nds32 = nds32;
415 reg_arch_info[i].enable = false;
416
417 reg_list[i].name = nds32_reg_simple_name(i);
418 reg_list[i].size = nds32_reg_size(i);
419 reg_list[i].arch_info = &reg_arch_info[i];
420
421 if (FD0 <= reg_arch_info[i].num && reg_arch_info[i].num <= FD31) {
422 reg_list[i].value = &(reg_arch_info[i].value_64);
423 reg_list[i].type = &nds32_reg_access_type_64;
424 } else {
425 reg_list[i].value = &(reg_arch_info[i].value);
426 reg_list[i].type = &nds32_reg_access_type;
427 }
428
429 cache->num_regs++;
430 }
431
432 nds32->core_cache = cache;
433
434 return cache;
435 }
436
437 static int nds32_reg_cache_init(struct target *target, struct nds32 *nds32)
438 {
439 struct reg_cache *cache;
440
441 cache = nds32_build_reg_cache(target, nds32);
442 if (!cache)
443 return ERROR_FAIL;
444
445 *register_get_last_cache_p(&target->reg_cache) = cache;
446
447 return ERROR_OK;
448 }
449
450 static struct reg *nds32_reg_current(struct nds32 *nds32, unsigned regnum)
451 {
452 struct reg *r;
453
454 /* Register mapping, pass user-view registers to gdb */
455 int mapped_regnum = nds32->register_map(nds32, regnum);
456 r = nds32->core_cache->reg_list + mapped_regnum;
457
458 return r;
459 }
460
461 int nds32_full_context(struct nds32 *nds32)
462 {
463 uint32_t value, value_ir0;
464
465 /* save $pc & $psw */
466 nds32_get_mapped_reg(nds32, PC, &value);
467 nds32_get_mapped_reg(nds32, IR0, &value_ir0);
468
469 nds32_update_psw(nds32);
470 nds32_update_mmu_info(nds32);
471 nds32_update_cache_info(nds32);
472 nds32_update_lm_info(nds32);
473
474 nds32_check_extension(nds32);
475
476 return ERROR_OK;
477 }
478
479 /* get register value internally */
480 int nds32_get_mapped_reg(struct nds32 *nds32, unsigned regnum, uint32_t *value)
481 {
482 struct reg_cache *reg_cache = nds32->core_cache;
483 struct reg *r;
484
485 if (regnum > reg_cache->num_regs)
486 return ERROR_FAIL;
487
488 r = nds32_reg_current(nds32, regnum);
489
490 if (ERROR_OK != r->type->get(r))
491 return ERROR_FAIL;
492
493 *value = buf_get_u32(r->value, 0, 32);
494
495 return ERROR_OK;
496 }
497
498 /** set register internally */
499 int nds32_set_mapped_reg(struct nds32 *nds32, unsigned regnum, uint32_t value)
500 {
501 struct reg_cache *reg_cache = nds32->core_cache;
502 struct reg *r;
503 uint8_t set_value[4];
504
505 if (regnum > reg_cache->num_regs)
506 return ERROR_FAIL;
507
508 r = nds32_reg_current(nds32, regnum);
509
510 buf_set_u32(set_value, 0, 32, value);
511
512 return r->type->set(r, set_value);
513 }
514
515 /** get all register list */
516 int nds32_get_gdb_reg_list(struct target *target,
517 struct reg **reg_list[], int *reg_list_size)
518 {
519 struct nds32 *nds32 = target_to_nds32(target);
520 struct reg_cache *reg_cache = nds32->core_cache;
521 unsigned int i;
522
523 *reg_list_size = reg_cache->num_regs;
524
525 /** freed in gdb_server.c */
526 *reg_list = malloc(sizeof(struct reg *) * (*reg_list_size));
527
528 for (i = 0; i < reg_cache->num_regs; i++)
529 (*reg_list)[i] = nds32_reg_current(nds32, i);
530
531 return ERROR_OK;
532 }
533
534 static int nds32_select_memory_mode(struct target *target, uint32_t address,
535 uint32_t length, uint32_t *end_address)
536 {
537 struct nds32 *nds32 = target_to_nds32(target);
538 struct aice_port_s *aice = target_to_aice(target);
539 struct nds32_memory *memory = &(nds32->memory);
540 struct nds32_edm *edm = &(nds32->edm);
541 uint32_t dlm_start, dlm_end;
542 uint32_t ilm_start, ilm_end;
543 uint32_t address_end = address + length;
544
545 /* init end_address */
546 *end_address = address_end;
547
548 if (NDS_MEMORY_ACC_CPU == memory->access_channel)
549 return ERROR_OK;
550
551 if (edm->access_control == false) {
552 LOG_DEBUG("EDM does not support ACC_CTL");
553 return ERROR_OK;
554 }
555
556 if (edm->direct_access_local_memory == false) {
557 LOG_DEBUG("EDM does not support DALM");
558 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
559 return ERROR_OK;
560 }
561
562 if (NDS_MEMORY_SELECT_AUTO != memory->mode) {
563 LOG_DEBUG("Memory mode is not AUTO");
564 return ERROR_OK;
565 }
566
567 /* set default mode */
568 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
569
570 if ((memory->ilm_base != 0) && (memory->ilm_enable == true)) {
571 ilm_start = memory->ilm_start;
572 ilm_end = memory->ilm_end;
573
574 /* case 1, address < ilm_start */
575 if (address < ilm_start) {
576 if (ilm_start < address_end) {
577 /* update end_address to split non-ILM from ILM */
578 *end_address = ilm_start;
579 }
580 /* MEM mode */
581 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
582 } else if ((ilm_start <= address) && (address < ilm_end)) {
583 /* case 2, ilm_start <= address < ilm_end */
584 if (ilm_end < address_end) {
585 /* update end_address to split non-ILM from ILM */
586 *end_address = ilm_end;
587 }
588 /* ILM mode */
589 aice_memory_mode(aice, NDS_MEMORY_SELECT_ILM);
590 } else { /* case 3, ilm_end <= address */
591 /* MEM mode */
592 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
593 }
594
595 return ERROR_OK;
596 } else {
597 LOG_DEBUG("ILM is not enabled");
598 }
599
600 if ((memory->dlm_base != 0) && (memory->dlm_enable == true)) {
601 dlm_start = memory->dlm_start;
602 dlm_end = memory->dlm_end;
603
604 /* case 1, address < dlm_start */
605 if (address < dlm_start) {
606 if (dlm_start < address_end) {
607 /* update end_address to split non-DLM from DLM */
608 *end_address = dlm_start;
609 }
610 /* MEM mode */
611 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
612 } else if ((dlm_start <= address) && (address < dlm_end)) {
613 /* case 2, dlm_start <= address < dlm_end */
614 if (dlm_end < address_end) {
615 /* update end_address to split non-DLM from DLM */
616 *end_address = dlm_end;
617 }
618 /* DLM mode */
619 aice_memory_mode(aice, NDS_MEMORY_SELECT_DLM);
620 } else { /* case 3, dlm_end <= address */
621 /* MEM mode */
622 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
623 }
624
625 return ERROR_OK;
626 } else {
627 LOG_DEBUG("DLM is not enabled");
628 }
629
630 return ERROR_OK;
631 }
632
633 int nds32_read_buffer(struct target *target, uint32_t address,
634 uint32_t size, uint8_t *buffer)
635 {
636 struct nds32 *nds32 = target_to_nds32(target);
637 struct nds32_memory *memory = &(nds32->memory);
638
639 if ((NDS_MEMORY_ACC_CPU == memory->access_channel) &&
640 (target->state != TARGET_HALTED)) {
641 LOG_WARNING("target was not halted");
642 return ERROR_TARGET_NOT_HALTED;
643 }
644
645 LOG_DEBUG("READ BUFFER: ADDR %08" PRIx32 " SIZE %08" PRIx32,
646 address,
647 size);
648
649 int retval = ERROR_OK;
650 struct aice_port_s *aice = target_to_aice(target);
651 uint32_t end_address;
652
653 if (((address % 2) == 0) && (size == 2)) {
654 nds32_select_memory_mode(target, address, 2, &end_address);
655 return aice_read_mem_unit(aice, address, 2, 1, buffer);
656 }
657
658 /* handle unaligned head bytes */
659 if (address % 4) {
660 uint32_t unaligned = 4 - (address % 4);
661
662 if (unaligned > size)
663 unaligned = size;
664
665 nds32_select_memory_mode(target, address, unaligned, &end_address);
666 retval = aice_read_mem_unit(aice, address, 1, unaligned, buffer);
667 if (retval != ERROR_OK)
668 return retval;
669
670 buffer += unaligned;
671 address += unaligned;
672 size -= unaligned;
673 }
674
675 /* handle aligned words */
676 if (size >= 4) {
677 int aligned = size - (size % 4);
678 int read_len;
679
680 do {
681 nds32_select_memory_mode(target, address, aligned, &end_address);
682
683 read_len = end_address - address;
684
685 if (read_len > 8)
686 retval = aice_read_mem_bulk(aice, address, read_len, buffer);
687 else
688 retval = aice_read_mem_unit(aice, address, 4, read_len / 4, buffer);
689
690 if (retval != ERROR_OK)
691 return retval;
692
693 buffer += read_len;
694 address += read_len;
695 size -= read_len;
696 aligned -= read_len;
697
698 } while (aligned != 0);
699 }
700
701 /*prevent byte access when possible (avoid AHB access limitations in some cases)*/
702 if (size >= 2) {
703 int aligned = size - (size % 2);
704 nds32_select_memory_mode(target, address, aligned, &end_address);
705 retval = aice_read_mem_unit(aice, address, 2, aligned / 2, buffer);
706 if (retval != ERROR_OK)
707 return retval;
708
709 buffer += aligned;
710 address += aligned;
711 size -= aligned;
712 }
713 /* handle tail writes of less than 4 bytes */
714 if (size > 0) {
715 nds32_select_memory_mode(target, address, size, &end_address);
716 retval = aice_read_mem_unit(aice, address, 1, size, buffer);
717 if (retval != ERROR_OK)
718 return retval;
719 }
720
721 return ERROR_OK;
722 }
723
724 int nds32_read_memory(struct target *target, uint32_t address,
725 uint32_t size, uint32_t count, uint8_t *buffer)
726 {
727 struct aice_port_s *aice = target_to_aice(target);
728
729 return aice_read_mem_unit(aice, address, size, count, buffer);
730 }
731
732 int nds32_read_phys_memory(struct target *target, uint32_t address,
733 uint32_t size, uint32_t count, uint8_t *buffer)
734 {
735 struct aice_port_s *aice = target_to_aice(target);
736 struct nds32 *nds32 = target_to_nds32(target);
737 struct nds32_memory *memory = &(nds32->memory);
738 enum nds_memory_access orig_channel;
739 int result;
740
741 /* switch to BUS access mode to skip MMU */
742 orig_channel = memory->access_channel;
743 memory->access_channel = NDS_MEMORY_ACC_BUS;
744 aice_memory_access(aice, memory->access_channel);
745
746 /* The input address is physical address. No need to do address translation. */
747 result = aice_read_mem_unit(aice, address, size, count, buffer);
748
749 /* restore to origin access mode */
750 memory->access_channel = orig_channel;
751 aice_memory_access(aice, memory->access_channel);
752
753 return result;
754 }
755
756 int nds32_write_buffer(struct target *target, uint32_t address,
757 uint32_t size, const uint8_t *buffer)
758 {
759 struct nds32 *nds32 = target_to_nds32(target);
760 struct nds32_memory *memory = &(nds32->memory);
761
762 if ((NDS_MEMORY_ACC_CPU == memory->access_channel) &&
763 (target->state != TARGET_HALTED)) {
764 LOG_WARNING("target was not halted");
765 return ERROR_TARGET_NOT_HALTED;
766 }
767
768 LOG_DEBUG("WRITE BUFFER: ADDR %08" PRIx32 " SIZE %08" PRIx32,
769 address,
770 size);
771
772 struct aice_port_s *aice = target_to_aice(target);
773 int retval = ERROR_OK;
774 uint32_t end_address;
775
776 if (((address % 2) == 0) && (size == 2)) {
777 nds32_select_memory_mode(target, address, 2, &end_address);
778 return aice_write_mem_unit(aice, address, 2, 1, buffer);
779 }
780
781 /* handle unaligned head bytes */
782 if (address % 4) {
783 uint32_t unaligned = 4 - (address % 4);
784
785 if (unaligned > size)
786 unaligned = size;
787
788 nds32_select_memory_mode(target, address, unaligned, &end_address);
789 retval = aice_write_mem_unit(aice, address, 1, unaligned, buffer);
790 if (retval != ERROR_OK)
791 return retval;
792
793 buffer += unaligned;
794 address += unaligned;
795 size -= unaligned;
796 }
797
798 /* handle aligned words */
799 if (size >= 4) {
800 int aligned = size - (size % 4);
801 int write_len;
802
803 do {
804 nds32_select_memory_mode(target, address, aligned, &end_address);
805
806 write_len = end_address - address;
807 if (write_len > 8)
808 retval = aice_write_mem_bulk(aice, address, write_len, buffer);
809 else
810 retval = aice_write_mem_unit(aice, address, 4, write_len / 4, buffer);
811 if (retval != ERROR_OK)
812 return retval;
813
814 buffer += write_len;
815 address += write_len;
816 size -= write_len;
817 aligned -= write_len;
818
819 } while (aligned != 0);
820 }
821
822 /* handle tail writes of less than 4 bytes */
823 if (size > 0) {
824 nds32_select_memory_mode(target, address, size, &end_address);
825 retval = aice_write_mem_unit(aice, address, 1, size, buffer);
826 if (retval != ERROR_OK)
827 return retval;
828 }
829
830 return retval;
831 }
832
833 int nds32_write_memory(struct target *target, uint32_t address,
834 uint32_t size, uint32_t count, const uint8_t *buffer)
835 {
836 struct aice_port_s *aice = target_to_aice(target);
837
838 return aice_write_mem_unit(aice, address, size, count, buffer);
839 }
840
841 int nds32_write_phys_memory(struct target *target, uint32_t address,
842 uint32_t size, uint32_t count, const uint8_t *buffer)
843 {
844 struct aice_port_s *aice = target_to_aice(target);
845 struct nds32 *nds32 = target_to_nds32(target);
846 struct nds32_memory *memory = &(nds32->memory);
847 enum nds_memory_access orig_channel;
848 int result;
849
850 /* switch to BUS access mode to skip MMU */
851 orig_channel = memory->access_channel;
852 memory->access_channel = NDS_MEMORY_ACC_BUS;
853 aice_memory_access(aice, memory->access_channel);
854
855 /* The input address is physical address. No need to do address translation. */
856 result = aice_write_mem_unit(aice, address, size, count, buffer);
857
858 /* restore to origin access mode */
859 memory->access_channel = orig_channel;
860 aice_memory_access(aice, memory->access_channel);
861
862 return result;
863 }
864
865 int nds32_mmu(struct target *target, int *enabled)
866 {
867 if (target->state != TARGET_HALTED) {
868 LOG_ERROR("%s: target not halted", __func__);
869 return ERROR_TARGET_INVALID;
870 }
871
872 struct nds32 *nds32 = target_to_nds32(target);
873 struct nds32_memory *memory = &(nds32->memory);
874 struct nds32_mmu_config *mmu_config = &(nds32->mmu_config);
875
876 if ((mmu_config->memory_protection == 2) && (memory->address_translation == true))
877 *enabled = 1;
878 else
879 *enabled = 0;
880
881 return ERROR_OK;
882 }
883
884 int nds32_arch_state(struct target *target)
885 {
886 struct nds32 *nds32 = target_to_nds32(target);
887
888 if (nds32->common_magic != NDS32_COMMON_MAGIC) {
889 LOG_ERROR("BUG: called for a non-Andes target");
890 return ERROR_FAIL;
891 }
892
893 uint32_t value_pc, value_psw;
894
895 nds32_get_mapped_reg(nds32, PC, &value_pc);
896 nds32_get_mapped_reg(nds32, IR0, &value_psw);
897
898 LOG_USER("target halted due to %s\n"
899 "psw: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "%s",
900 debug_reason_name(target),
901 value_psw,
902 value_pc,
903 nds32->virtual_hosting ? ", virtual hosting" : "");
904
905 /* save pc value to pseudo register pc */
906 struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
907 buf_set_u32(reg->value, 0, 32, value_pc);
908
909 return ERROR_OK;
910 }
911
912 static void nds32_init_must_have_registers(struct nds32 *nds32)
913 {
914 struct reg_cache *reg_cache = nds32->core_cache;
915
916 /** MUST have general registers */
917 ((struct nds32_reg *)reg_cache->reg_list[R0].arch_info)->enable = true;
918 ((struct nds32_reg *)reg_cache->reg_list[R1].arch_info)->enable = true;
919 ((struct nds32_reg *)reg_cache->reg_list[R2].arch_info)->enable = true;
920 ((struct nds32_reg *)reg_cache->reg_list[R3].arch_info)->enable = true;
921 ((struct nds32_reg *)reg_cache->reg_list[R4].arch_info)->enable = true;
922 ((struct nds32_reg *)reg_cache->reg_list[R5].arch_info)->enable = true;
923 ((struct nds32_reg *)reg_cache->reg_list[R6].arch_info)->enable = true;
924 ((struct nds32_reg *)reg_cache->reg_list[R7].arch_info)->enable = true;
925 ((struct nds32_reg *)reg_cache->reg_list[R8].arch_info)->enable = true;
926 ((struct nds32_reg *)reg_cache->reg_list[R9].arch_info)->enable = true;
927 ((struct nds32_reg *)reg_cache->reg_list[R10].arch_info)->enable = true;
928 ((struct nds32_reg *)reg_cache->reg_list[R15].arch_info)->enable = true;
929 ((struct nds32_reg *)reg_cache->reg_list[R28].arch_info)->enable = true;
930 ((struct nds32_reg *)reg_cache->reg_list[R29].arch_info)->enable = true;
931 ((struct nds32_reg *)reg_cache->reg_list[R30].arch_info)->enable = true;
932 ((struct nds32_reg *)reg_cache->reg_list[R31].arch_info)->enable = true;
933 ((struct nds32_reg *)reg_cache->reg_list[PC].arch_info)->enable = true;
934
935 /** MUST have configuration system registers */
936 ((struct nds32_reg *)reg_cache->reg_list[CR0].arch_info)->enable = true;
937 ((struct nds32_reg *)reg_cache->reg_list[CR1].arch_info)->enable = true;
938 ((struct nds32_reg *)reg_cache->reg_list[CR2].arch_info)->enable = true;
939 ((struct nds32_reg *)reg_cache->reg_list[CR3].arch_info)->enable = true;
940 ((struct nds32_reg *)reg_cache->reg_list[CR4].arch_info)->enable = true;
941
942 /** MUST have interrupt system registers */
943 ((struct nds32_reg *)reg_cache->reg_list[IR0].arch_info)->enable = true;
944 ((struct nds32_reg *)reg_cache->reg_list[IR1].arch_info)->enable = true;
945 ((struct nds32_reg *)reg_cache->reg_list[IR3].arch_info)->enable = true;
946 ((struct nds32_reg *)reg_cache->reg_list[IR4].arch_info)->enable = true;
947 ((struct nds32_reg *)reg_cache->reg_list[IR6].arch_info)->enable = true;
948 ((struct nds32_reg *)reg_cache->reg_list[IR9].arch_info)->enable = true;
949 ((struct nds32_reg *)reg_cache->reg_list[IR11].arch_info)->enable = true;
950 ((struct nds32_reg *)reg_cache->reg_list[IR14].arch_info)->enable = true;
951 ((struct nds32_reg *)reg_cache->reg_list[IR15].arch_info)->enable = true;
952
953 /** MUST have MMU system registers */
954 ((struct nds32_reg *)reg_cache->reg_list[MR0].arch_info)->enable = true;
955
956 /** MUST have EDM system registers */
957 ((struct nds32_reg *)reg_cache->reg_list[DR40].arch_info)->enable = true;
958 ((struct nds32_reg *)reg_cache->reg_list[DR42].arch_info)->enable = true;
959 }
960
961 static int nds32_init_memory_config(struct nds32 *nds32)
962 {
963 uint32_t value_cr1; /* ICM_CFG */
964 uint32_t value_cr2; /* DCM_CFG */
965 struct nds32_memory *memory = &(nds32->memory);
966
967 /* read $cr1 to init instruction memory information */
968 nds32_get_mapped_reg(nds32, CR1, &value_cr1);
969 memory->icache.set = value_cr1 & 0x7;
970 memory->icache.way = (value_cr1 >> 3) & 0x7;
971 memory->icache.line_size = (value_cr1 >> 6) & 0x7;
972 memory->icache.lock_support = (value_cr1 >> 9) & 0x1;
973
974 memory->ilm_base = (value_cr1 >> 10) & 0x7;
975 memory->ilm_align_ver = (value_cr1 >> 13) & 0x3;
976
977 /* read $cr2 to init data memory information */
978 nds32_get_mapped_reg(nds32, CR2, &value_cr2);
979 memory->dcache.set = value_cr2 & 0x7;
980 memory->dcache.way = (value_cr2 >> 3) & 0x7;
981 memory->dcache.line_size = (value_cr2 >> 6) & 0x7;
982 memory->dcache.lock_support = (value_cr2 >> 9) & 0x1;
983
984 memory->dlm_base = (value_cr2 >> 10) & 0x7;
985 memory->dlm_align_ver = (value_cr2 >> 13) & 0x3;
986
987 return ERROR_OK;
988 }
989
990 static void nds32_init_config(struct nds32 *nds32)
991 {
992 uint32_t value_cr0;
993 uint32_t value_cr3;
994 uint32_t value_cr4;
995 struct nds32_cpu_version *cpu_version = &(nds32->cpu_version);
996 struct nds32_mmu_config *mmu_config = &(nds32->mmu_config);
997 struct nds32_misc_config *misc_config = &(nds32->misc_config);
998
999 nds32_get_mapped_reg(nds32, CR0, &value_cr0);
1000 nds32_get_mapped_reg(nds32, CR3, &value_cr3);
1001 nds32_get_mapped_reg(nds32, CR4, &value_cr4);
1002
1003 /* config cpu version */
1004 cpu_version->performance_extension = value_cr0 & 0x1;
1005 cpu_version->_16bit_extension = (value_cr0 >> 1) & 0x1;
1006 cpu_version->performance_extension_2 = (value_cr0 >> 2) & 0x1;
1007 cpu_version->cop_fpu_extension = (value_cr0 >> 3) & 0x1;
1008 cpu_version->string_extension = (value_cr0 >> 4) & 0x1;
1009 cpu_version->revision = (value_cr0 >> 16) & 0xFF;
1010 cpu_version->cpu_id_family = (value_cr0 >> 24) & 0xF;
1011 cpu_version->cpu_id_version = (value_cr0 >> 28) & 0xF;
1012
1013 /* config MMU */
1014 mmu_config->memory_protection = value_cr3 & 0x3;
1015 mmu_config->memory_protection_version = (value_cr3 >> 2) & 0x1F;
1016 mmu_config->fully_associative_tlb = (value_cr3 >> 7) & 0x1;
1017 if (mmu_config->fully_associative_tlb) {
1018 mmu_config->tlb_size = (value_cr3 >> 8) & 0x7F;
1019 } else {
1020 mmu_config->tlb_ways = (value_cr3 >> 8) & 0x7;
1021 mmu_config->tlb_sets = (value_cr3 >> 11) & 0x7;
1022 }
1023 mmu_config->_8k_page_support = (value_cr3 >> 15) & 0x1;
1024 mmu_config->extra_page_size_support = (value_cr3 >> 16) & 0xFF;
1025 mmu_config->tlb_lock = (value_cr3 >> 24) & 0x1;
1026 mmu_config->hardware_page_table_walker = (value_cr3 >> 25) & 0x1;
1027 mmu_config->default_endian = (value_cr3 >> 26) & 0x1;
1028 mmu_config->partition_num = (value_cr3 >> 27) & 0x1;
1029 mmu_config->invisible_tlb = (value_cr3 >> 28) & 0x1;
1030 mmu_config->vlpt = (value_cr3 >> 29) & 0x1;
1031 mmu_config->ntme = (value_cr3 >> 30) & 0x1;
1032 mmu_config->drde = (value_cr3 >> 31) & 0x1;
1033
1034 /* config misc */
1035 misc_config->edm = value_cr4 & 0x1;
1036 misc_config->local_memory_dma = (value_cr4 >> 1) & 0x1;
1037 misc_config->performance_monitor = (value_cr4 >> 2) & 0x1;
1038 misc_config->high_speed_memory_port = (value_cr4 >> 3) & 0x1;
1039 misc_config->debug_tracer = (value_cr4 >> 4) & 0x1;
1040 misc_config->div_instruction = (value_cr4 >> 5) & 0x1;
1041 misc_config->mac_instruction = (value_cr4 >> 6) & 0x1;
1042 misc_config->audio_isa = (value_cr4 >> 7) & 0x3;
1043 misc_config->L2_cache = (value_cr4 >> 9) & 0x1;
1044 misc_config->reduce_register = (value_cr4 >> 10) & 0x1;
1045 misc_config->addr_24 = (value_cr4 >> 11) & 0x1;
1046 misc_config->interruption_level = (value_cr4 >> 12) & 0x1;
1047 misc_config->baseline_instruction = (value_cr4 >> 13) & 0x7;
1048 misc_config->no_dx_register = (value_cr4 >> 16) & 0x1;
1049 misc_config->implement_dependant_register = (value_cr4 >> 17) & 0x1;
1050 misc_config->implement_dependant_sr_encoding = (value_cr4 >> 18) & 0x1;
1051 misc_config->ifc = (value_cr4 >> 19) & 0x1;
1052 misc_config->mcu = (value_cr4 >> 20) & 0x1;
1053 misc_config->shadow = (value_cr4 >> 21) & 0x7;
1054 misc_config->ex9 = (value_cr4 >> 24) & 0x1;
1055
1056 nds32_init_memory_config(nds32);
1057 }
1058
1059 static int nds32_init_option_registers(struct nds32 *nds32)
1060 {
1061 struct reg_cache *reg_cache = nds32->core_cache;
1062 struct nds32_cpu_version *cpu_version = &(nds32->cpu_version);
1063 struct nds32_mmu_config *mmu_config = &(nds32->mmu_config);
1064 struct nds32_misc_config *misc_config = &(nds32->misc_config);
1065 struct nds32_memory *memory_config = &(nds32->memory);
1066
1067 bool no_cr5;
1068 bool mr10_exist;
1069 bool no_racr0;
1070
1071 if (((cpu_version->cpu_id_family == 0xC) || (cpu_version->cpu_id_family == 0xD)) &&
1072 ((cpu_version->revision & 0xFC) == 0)) {
1073 no_cr5 = true;
1074 mr10_exist = true;
1075 no_racr0 = true;
1076 } else {
1077 no_cr5 = false;
1078 mr10_exist = false;
1079 no_racr0 = false;
1080 }
1081
1082 if (misc_config->reduce_register == false) {
1083 ((struct nds32_reg *)reg_cache->reg_list[R11].arch_info)->enable = true;
1084 ((struct nds32_reg *)reg_cache->reg_list[R12].arch_info)->enable = true;
1085 ((struct nds32_reg *)reg_cache->reg_list[R13].arch_info)->enable = true;
1086 ((struct nds32_reg *)reg_cache->reg_list[R14].arch_info)->enable = true;
1087 ((struct nds32_reg *)reg_cache->reg_list[R16].arch_info)->enable = true;
1088 ((struct nds32_reg *)reg_cache->reg_list[R17].arch_info)->enable = true;
1089 ((struct nds32_reg *)reg_cache->reg_list[R18].arch_info)->enable = true;
1090 ((struct nds32_reg *)reg_cache->reg_list[R19].arch_info)->enable = true;
1091 ((struct nds32_reg *)reg_cache->reg_list[R20].arch_info)->enable = true;
1092 ((struct nds32_reg *)reg_cache->reg_list[R21].arch_info)->enable = true;
1093 ((struct nds32_reg *)reg_cache->reg_list[R22].arch_info)->enable = true;
1094 ((struct nds32_reg *)reg_cache->reg_list[R23].arch_info)->enable = true;
1095 ((struct nds32_reg *)reg_cache->reg_list[R24].arch_info)->enable = true;
1096 ((struct nds32_reg *)reg_cache->reg_list[R25].arch_info)->enable = true;
1097 ((struct nds32_reg *)reg_cache->reg_list[R26].arch_info)->enable = true;
1098 ((struct nds32_reg *)reg_cache->reg_list[R27].arch_info)->enable = true;
1099 }
1100
1101 if (misc_config->no_dx_register == false) {
1102 ((struct nds32_reg *)reg_cache->reg_list[D0LO].arch_info)->enable = true;
1103 ((struct nds32_reg *)reg_cache->reg_list[D0HI].arch_info)->enable = true;
1104 ((struct nds32_reg *)reg_cache->reg_list[D1LO].arch_info)->enable = true;
1105 ((struct nds32_reg *)reg_cache->reg_list[D1HI].arch_info)->enable = true;
1106 }
1107
1108 if (misc_config->ex9)
1109 ((struct nds32_reg *)reg_cache->reg_list[ITB].arch_info)->enable = true;
1110
1111 if (no_cr5 == false)
1112 ((struct nds32_reg *)reg_cache->reg_list[CR5].arch_info)->enable = true;
1113
1114 if (cpu_version->cop_fpu_extension) {
1115 ((struct nds32_reg *)reg_cache->reg_list[CR6].arch_info)->enable = true;
1116 ((struct nds32_reg *)reg_cache->reg_list[FPCSR].arch_info)->enable = true;
1117 ((struct nds32_reg *)reg_cache->reg_list[FPCFG].arch_info)->enable = true;
1118 }
1119
1120 if (mmu_config->memory_protection == 1) {
1121 /* Secure MPU has no IPC, IPSW, P_ITYPE */
1122 ((struct nds32_reg *)reg_cache->reg_list[IR1].arch_info)->enable = false;
1123 ((struct nds32_reg *)reg_cache->reg_list[IR9].arch_info)->enable = false;
1124 }
1125
1126 if (nds32->privilege_level != 0)
1127 ((struct nds32_reg *)reg_cache->reg_list[IR3].arch_info)->enable = false;
1128
1129 if (misc_config->mcu == true)
1130 ((struct nds32_reg *)reg_cache->reg_list[IR4].arch_info)->enable = false;
1131
1132 if (misc_config->interruption_level == false) {
1133 ((struct nds32_reg *)reg_cache->reg_list[IR2].arch_info)->enable = true;
1134 ((struct nds32_reg *)reg_cache->reg_list[IR5].arch_info)->enable = true;
1135 ((struct nds32_reg *)reg_cache->reg_list[IR10].arch_info)->enable = true;
1136 ((struct nds32_reg *)reg_cache->reg_list[IR12].arch_info)->enable = true;
1137 ((struct nds32_reg *)reg_cache->reg_list[IR13].arch_info)->enable = true;
1138
1139 /* Secure MPU has no IPC, IPSW, P_ITYPE */
1140 if (mmu_config->memory_protection != 1)
1141 ((struct nds32_reg *)reg_cache->reg_list[IR7].arch_info)->enable = true;
1142 }
1143
1144 if ((cpu_version->cpu_id_family == 0x9) ||
1145 (cpu_version->cpu_id_family == 0xA) ||
1146 (cpu_version->cpu_id_family == 0xC) ||
1147 (cpu_version->cpu_id_family == 0xD))
1148 ((struct nds32_reg *)reg_cache->reg_list[IR8].arch_info)->enable = true;
1149
1150 if (misc_config->shadow == 1) {
1151 ((struct nds32_reg *)reg_cache->reg_list[IR16].arch_info)->enable = true;
1152 ((struct nds32_reg *)reg_cache->reg_list[IR17].arch_info)->enable = true;
1153 }
1154
1155 if (misc_config->ifc)
1156 ((struct nds32_reg *)reg_cache->reg_list[IFC_LP].arch_info)->enable = true;
1157
1158 if (nds32->privilege_level != 0)
1159 ((struct nds32_reg *)reg_cache->reg_list[MR0].arch_info)->enable = false;
1160
1161 if (mmu_config->memory_protection == 1) {
1162 if (mmu_config->memory_protection_version == 24)
1163 ((struct nds32_reg *)reg_cache->reg_list[MR4].arch_info)->enable = true;
1164
1165 if (nds32->privilege_level == 0) {
1166 if ((mmu_config->memory_protection_version == 16) ||
1167 (mmu_config->memory_protection_version == 24)) {
1168 ((struct nds32_reg *)reg_cache->reg_list[MR11].arch_info)->enable = true;
1169 ((struct nds32_reg *)reg_cache->reg_list[SECUR0].arch_info)->enable = true;
1170 ((struct nds32_reg *)reg_cache->reg_list[IR20].arch_info)->enable = true;
1171 ((struct nds32_reg *)reg_cache->reg_list[IR22].arch_info)->enable = true;
1172 ((struct nds32_reg *)reg_cache->reg_list[IR24].arch_info)->enable = true;
1173 ((struct nds32_reg *)reg_cache->reg_list[IR30].arch_info)->enable = true;
1174
1175 if (misc_config->shadow == 1) {
1176 ((struct nds32_reg *)reg_cache->reg_list[IR21].arch_info)->enable = true;
1177 ((struct nds32_reg *)reg_cache->reg_list[IR23].arch_info)->enable = true;
1178 ((struct nds32_reg *)reg_cache->reg_list[IR25].arch_info)->enable = true;
1179 }
1180 }
1181 }
1182 } else if (mmu_config->memory_protection == 2) {
1183 ((struct nds32_reg *)reg_cache->reg_list[MR1].arch_info)->enable = true;
1184 ((struct nds32_reg *)reg_cache->reg_list[MR4].arch_info)->enable = true;
1185
1186 if ((cpu_version->cpu_id_family != 0xA) && (cpu_version->cpu_id_family != 0xC) &&
1187 (cpu_version->cpu_id_family != 0xD))
1188 ((struct nds32_reg *)reg_cache->reg_list[MR5].arch_info)->enable = true;
1189 }
1190
1191 if (mmu_config->memory_protection > 0) {
1192 ((struct nds32_reg *)reg_cache->reg_list[MR2].arch_info)->enable = true;
1193 ((struct nds32_reg *)reg_cache->reg_list[MR3].arch_info)->enable = true;
1194 }
1195
1196 if (memory_config->ilm_base != 0)
1197 if (nds32->privilege_level == 0)
1198 ((struct nds32_reg *)reg_cache->reg_list[MR6].arch_info)->enable = true;
1199
1200 if (memory_config->dlm_base != 0)
1201 if (nds32->privilege_level == 0)
1202 ((struct nds32_reg *)reg_cache->reg_list[MR7].arch_info)->enable = true;
1203
1204 if ((memory_config->icache.line_size != 0) && (memory_config->dcache.line_size != 0))
1205 ((struct nds32_reg *)reg_cache->reg_list[MR8].arch_info)->enable = true;
1206
1207 if (misc_config->high_speed_memory_port)
1208 ((struct nds32_reg *)reg_cache->reg_list[MR9].arch_info)->enable = true;
1209
1210 if (mr10_exist)
1211 ((struct nds32_reg *)reg_cache->reg_list[MR10].arch_info)->enable = true;
1212
1213 if (misc_config->edm) {
1214 int dr_reg_n = nds32->edm.breakpoint_num * 5;
1215
1216 for (int i = 0 ; i < dr_reg_n ; i++)
1217 ((struct nds32_reg *)reg_cache->reg_list[DR0 + i].arch_info)->enable = true;
1218
1219 ((struct nds32_reg *)reg_cache->reg_list[DR41].arch_info)->enable = true;
1220 ((struct nds32_reg *)reg_cache->reg_list[DR43].arch_info)->enable = true;
1221 ((struct nds32_reg *)reg_cache->reg_list[DR44].arch_info)->enable = true;
1222 ((struct nds32_reg *)reg_cache->reg_list[DR45].arch_info)->enable = true;
1223 }
1224
1225 if (misc_config->debug_tracer) {
1226 ((struct nds32_reg *)reg_cache->reg_list[DR46].arch_info)->enable = true;
1227 ((struct nds32_reg *)reg_cache->reg_list[DR47].arch_info)->enable = true;
1228 }
1229
1230 if (misc_config->performance_monitor) {
1231 ((struct nds32_reg *)reg_cache->reg_list[PFR0].arch_info)->enable = true;
1232 ((struct nds32_reg *)reg_cache->reg_list[PFR1].arch_info)->enable = true;
1233 ((struct nds32_reg *)reg_cache->reg_list[PFR2].arch_info)->enable = true;
1234 ((struct nds32_reg *)reg_cache->reg_list[PFR3].arch_info)->enable = true;
1235 }
1236
1237 if (misc_config->local_memory_dma) {
1238 ((struct nds32_reg *)reg_cache->reg_list[DMAR0].arch_info)->enable = true;
1239 ((struct nds32_reg *)reg_cache->reg_list[DMAR1].arch_info)->enable = true;
1240 ((struct nds32_reg *)reg_cache->reg_list[DMAR2].arch_info)->enable = true;
1241 ((struct nds32_reg *)reg_cache->reg_list[DMAR3].arch_info)->enable = true;
1242 ((struct nds32_reg *)reg_cache->reg_list[DMAR4].arch_info)->enable = true;
1243 ((struct nds32_reg *)reg_cache->reg_list[DMAR5].arch_info)->enable = true;
1244 ((struct nds32_reg *)reg_cache->reg_list[DMAR6].arch_info)->enable = true;
1245 ((struct nds32_reg *)reg_cache->reg_list[DMAR7].arch_info)->enable = true;
1246 ((struct nds32_reg *)reg_cache->reg_list[DMAR8].arch_info)->enable = true;
1247 ((struct nds32_reg *)reg_cache->reg_list[DMAR9].arch_info)->enable = true;
1248 ((struct nds32_reg *)reg_cache->reg_list[DMAR10].arch_info)->enable = true;
1249 }
1250
1251 if ((misc_config->local_memory_dma || misc_config->performance_monitor) &&
1252 (no_racr0 == false))
1253 ((struct nds32_reg *)reg_cache->reg_list[RACR].arch_info)->enable = true;
1254
1255 if (cpu_version->cop_fpu_extension || (misc_config->audio_isa != 0))
1256 ((struct nds32_reg *)reg_cache->reg_list[FUCPR].arch_info)->enable = true;
1257
1258 if (misc_config->audio_isa != 0) {
1259 if (misc_config->audio_isa > 1) {
1260 ((struct nds32_reg *)reg_cache->reg_list[D0L24].arch_info)->enable = true;
1261 ((struct nds32_reg *)reg_cache->reg_list[D1L24].arch_info)->enable = true;
1262 }
1263
1264 ((struct nds32_reg *)reg_cache->reg_list[I0].arch_info)->enable = true;
1265 ((struct nds32_reg *)reg_cache->reg_list[I1].arch_info)->enable = true;
1266 ((struct nds32_reg *)reg_cache->reg_list[I2].arch_info)->enable = true;
1267 ((struct nds32_reg *)reg_cache->reg_list[I3].arch_info)->enable = true;
1268 ((struct nds32_reg *)reg_cache->reg_list[I4].arch_info)->enable = true;
1269 ((struct nds32_reg *)reg_cache->reg_list[I5].arch_info)->enable = true;
1270 ((struct nds32_reg *)reg_cache->reg_list[I6].arch_info)->enable = true;
1271 ((struct nds32_reg *)reg_cache->reg_list[I7].arch_info)->enable = true;
1272 ((struct nds32_reg *)reg_cache->reg_list[M1].arch_info)->enable = true;
1273 ((struct nds32_reg *)reg_cache->reg_list[M2].arch_info)->enable = true;
1274 ((struct nds32_reg *)reg_cache->reg_list[M3].arch_info)->enable = true;
1275 ((struct nds32_reg *)reg_cache->reg_list[M5].arch_info)->enable = true;
1276 ((struct nds32_reg *)reg_cache->reg_list[M6].arch_info)->enable = true;
1277 ((struct nds32_reg *)reg_cache->reg_list[M7].arch_info)->enable = true;
1278 ((struct nds32_reg *)reg_cache->reg_list[MOD].arch_info)->enable = true;
1279 ((struct nds32_reg *)reg_cache->reg_list[LBE].arch_info)->enable = true;
1280 ((struct nds32_reg *)reg_cache->reg_list[LE].arch_info)->enable = true;
1281 ((struct nds32_reg *)reg_cache->reg_list[LC].arch_info)->enable = true;
1282 ((struct nds32_reg *)reg_cache->reg_list[ADM_VBASE].arch_info)->enable = true;
1283 ((struct nds32_reg *)reg_cache->reg_list[SHFT_CTL0].arch_info)->enable = true;
1284 ((struct nds32_reg *)reg_cache->reg_list[SHFT_CTL1].arch_info)->enable = true;
1285
1286 uint32_t value_mod;
1287 uint32_t fucpr_backup;
1288 /* enable fpu and get configuration */
1289 nds32_get_mapped_reg(nds32, FUCPR, &fucpr_backup);
1290 if ((fucpr_backup & 0x80000000) == 0)
1291 nds32_set_mapped_reg(nds32, FUCPR, fucpr_backup | 0x80000000);
1292 nds32_get_mapped_reg(nds32, MOD, &value_mod);
1293 /* restore origin fucpr value */
1294 if ((fucpr_backup & 0x80000000) == 0)
1295 nds32_set_mapped_reg(nds32, FUCPR, fucpr_backup);
1296
1297 if ((value_mod >> 6) & 0x1) {
1298 ((struct nds32_reg *)reg_cache->reg_list[CB_CTL].arch_info)->enable = true;
1299 ((struct nds32_reg *)reg_cache->reg_list[CBB0].arch_info)->enable = true;
1300 ((struct nds32_reg *)reg_cache->reg_list[CBB1].arch_info)->enable = true;
1301 ((struct nds32_reg *)reg_cache->reg_list[CBB2].arch_info)->enable = true;
1302 ((struct nds32_reg *)reg_cache->reg_list[CBB3].arch_info)->enable = true;
1303 ((struct nds32_reg *)reg_cache->reg_list[CBE0].arch_info)->enable = true;
1304 ((struct nds32_reg *)reg_cache->reg_list[CBE1].arch_info)->enable = true;
1305 ((struct nds32_reg *)reg_cache->reg_list[CBE2].arch_info)->enable = true;
1306 ((struct nds32_reg *)reg_cache->reg_list[CBE3].arch_info)->enable = true;
1307 }
1308 }
1309
1310 if ((cpu_version->cpu_id_family == 0x9) ||
1311 (cpu_version->cpu_id_family == 0xA) ||
1312 (cpu_version->cpu_id_family == 0xC)) {
1313
1314 ((struct nds32_reg *)reg_cache->reg_list[IDR0].arch_info)->enable = true;
1315 ((struct nds32_reg *)reg_cache->reg_list[IDR1].arch_info)->enable = true;
1316
1317 if ((cpu_version->cpu_id_family == 0xC) && (cpu_version->revision == 0x0C))
1318 ((struct nds32_reg *)reg_cache->reg_list[IDR0].arch_info)->enable = false;
1319 }
1320
1321 uint32_t ir3_value;
1322 uint32_t ivb_prog_pri_lvl;
1323 uint32_t ivb_ivic_ver;
1324
1325 nds32_get_mapped_reg(nds32, IR3, &ir3_value);
1326 ivb_prog_pri_lvl = ir3_value & 0x1;
1327 ivb_ivic_ver = (ir3_value >> 11) & 0x3;
1328
1329 if ((ivb_prog_pri_lvl == 1) || (ivb_ivic_ver >= 1)) {
1330 ((struct nds32_reg *)reg_cache->reg_list[IR18].arch_info)->enable = true;
1331 ((struct nds32_reg *)reg_cache->reg_list[IR19].arch_info)->enable = true;
1332 }
1333
1334 if (ivb_ivic_ver >= 1) {
1335 ((struct nds32_reg *)reg_cache->reg_list[IR26].arch_info)->enable = true;
1336 ((struct nds32_reg *)reg_cache->reg_list[IR27].arch_info)->enable = true;
1337 ((struct nds32_reg *)reg_cache->reg_list[IR28].arch_info)->enable = true;
1338 ((struct nds32_reg *)reg_cache->reg_list[IR29].arch_info)->enable = true;
1339 }
1340
1341 return ERROR_OK;
1342 }
1343
1344 int nds32_init_register_table(struct nds32 *nds32)
1345 {
1346 nds32_init_must_have_registers(nds32);
1347
1348 return ERROR_OK;
1349 }
1350
1351 int nds32_add_software_breakpoint(struct target *target,
1352 struct breakpoint *breakpoint)
1353 {
1354 uint32_t data;
1355 uint32_t check_data;
1356 uint32_t break_insn;
1357
1358 /* check the breakpoint size */
1359 target->type->read_buffer(target, breakpoint->address, 4, (uint8_t *)&data);
1360
1361 /* backup origin instruction
1362 * instruction is big-endian */
1363 if (*(char *)&data & 0x80) { /* 16-bits instruction */
1364 breakpoint->length = 2;
1365 break_insn = NDS32_BREAK_16;
1366 } else { /* 32-bits instruction */
1367 breakpoint->length = 4;
1368 break_insn = NDS32_BREAK_32;
1369 }
1370
1371 if (breakpoint->orig_instr != NULL)
1372 free(breakpoint->orig_instr);
1373
1374 breakpoint->orig_instr = malloc(breakpoint->length);
1375 memcpy(breakpoint->orig_instr, &data, breakpoint->length);
1376
1377 /* self-modified code */
1378 target->type->write_buffer(target, breakpoint->address, breakpoint->length, (const uint8_t *)&break_insn);
1379 /* write_back & invalidate dcache & invalidate icache */
1380 nds32_cache_sync(target, breakpoint->address, breakpoint->length);
1381
1382 /* read back to check */
1383 target->type->read_buffer(target, breakpoint->address, breakpoint->length, (uint8_t *)&check_data);
1384 if (memcmp(&check_data, &break_insn, breakpoint->length) == 0)
1385 return ERROR_OK;
1386
1387 return ERROR_FAIL;
1388 }
1389
1390 int nds32_remove_software_breakpoint(struct target *target,
1391 struct breakpoint *breakpoint)
1392 {
1393 uint32_t check_data;
1394 uint32_t break_insn;
1395
1396 if (breakpoint->length == 2)
1397 break_insn = NDS32_BREAK_16;
1398 else if (breakpoint->length == 4)
1399 break_insn = NDS32_BREAK_32;
1400 else
1401 return ERROR_FAIL;
1402
1403 target->type->read_buffer(target, breakpoint->address, breakpoint->length,
1404 (uint8_t *)&check_data);
1405
1406 /* break instruction is modified */
1407 if (memcmp(&check_data, &break_insn, breakpoint->length) != 0)
1408 return ERROR_FAIL;
1409
1410 /* self-modified code */
1411 target->type->write_buffer(target, breakpoint->address, breakpoint->length,
1412 breakpoint->orig_instr);
1413
1414 /* write_back & invalidate dcache & invalidate icache */
1415 nds32_cache_sync(target, breakpoint->address, breakpoint->length);
1416
1417 return ERROR_OK;
1418 }
1419
1420 /**
1421 * Restore the processor context on an Andes target. The full processor
1422 * context is analyzed to see if any of the registers are dirty on this end, but
1423 * have a valid new value. If this is the case, the processor is changed to the
1424 * appropriate mode and the new register values are written out to the
1425 * processor. If there happens to be a dirty register with an invalid value, an
1426 * error will be logged.
1427 *
1428 * @param target Pointer to the Andes target to have its context restored
1429 * @return Error status if the target is not halted.
1430 */
1431 int nds32_restore_context(struct target *target)
1432 {
1433 struct nds32 *nds32 = target_to_nds32(target);
1434 struct aice_port_s *aice = target_to_aice(target);
1435 struct reg_cache *reg_cache = nds32->core_cache;
1436 struct reg *reg;
1437 struct nds32_reg *reg_arch_info;
1438 unsigned int i;
1439
1440 LOG_DEBUG("-");
1441
1442 if (target->state != TARGET_HALTED) {
1443 LOG_WARNING("target not halted");
1444 return ERROR_TARGET_NOT_HALTED;
1445 }
1446
1447 /* check if there are dirty registers */
1448 for (i = 0; i < reg_cache->num_regs; i++) {
1449 reg = &(reg_cache->reg_list[i]);
1450 if (reg->dirty == true) {
1451 if (reg->valid == true) {
1452
1453 LOG_DEBUG("examining dirty reg: %s", reg->name);
1454 LOG_DEBUG("writing register %i "
1455 "with value 0x%8.8" PRIx32, i, buf_get_u32(reg->value, 0, 32));
1456
1457 reg_arch_info = reg->arch_info;
1458 if (FD0 <= reg_arch_info->num && reg_arch_info->num <= FD31)
1459 aice_write_reg_64(aice, reg_arch_info->num, reg_arch_info->value_64);
1460 else
1461 aice_write_register(aice, reg_arch_info->num, reg_arch_info->value);
1462 reg->valid = true;
1463 reg->dirty = false;
1464 }
1465 }
1466 }
1467
1468 return ERROR_OK;
1469 }
1470
1471 int nds32_edm_config(struct nds32 *nds32)
1472 {
1473 struct target *target = nds32->target;
1474 struct aice_port_s *aice = target_to_aice(target);
1475 uint32_t edm_cfg;
1476 uint32_t edm_ctl;
1477
1478 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CFG, &edm_cfg);
1479
1480 nds32->edm.version = (edm_cfg >> 16) & 0xFFFF;
1481 LOG_INFO("EDM version 0x%04" PRIx32, nds32->edm.version);
1482
1483 nds32->edm.breakpoint_num = (edm_cfg & 0x7) + 1;
1484
1485 if ((nds32->edm.version & 0x1000) || (0x60 <= nds32->edm.version))
1486 nds32->edm.access_control = true;
1487 else
1488 nds32->edm.access_control = false;
1489
1490 if ((edm_cfg >> 4) & 0x1)
1491 nds32->edm.direct_access_local_memory = true;
1492 else
1493 nds32->edm.direct_access_local_memory = false;
1494
1495 if (nds32->edm.version <= 0x20)
1496 nds32->edm.direct_access_local_memory = false;
1497
1498 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CTL, &edm_ctl);
1499 if (edm_ctl & (0x1 << 29))
1500 nds32->edm.support_max_stop = true;
1501 else
1502 nds32->edm.support_max_stop = false;
1503
1504 /* set passcode for secure MCU */
1505 nds32_login(nds32);
1506
1507 return ERROR_OK;
1508 }
1509
1510 int nds32_config(struct nds32 *nds32)
1511 {
1512 nds32_init_config(nds32);
1513
1514 /* init optional system registers according to config registers */
1515 nds32_init_option_registers(nds32);
1516
1517 /* get max interrupt level */
1518 if (nds32->misc_config.interruption_level)
1519 nds32->max_interrupt_level = 2;
1520 else
1521 nds32->max_interrupt_level = 3;
1522
1523 /* get ILM/DLM size from MR6/MR7 */
1524 uint32_t value_mr6, value_mr7;
1525 uint32_t size_index;
1526 nds32_get_mapped_reg(nds32, MR6, &value_mr6);
1527 size_index = (value_mr6 >> 1) & 0xF;
1528 nds32->memory.ilm_size = NDS32_LM_SIZE_TABLE[size_index];
1529
1530 nds32_get_mapped_reg(nds32, MR7, &value_mr7);
1531 size_index = (value_mr7 >> 1) & 0xF;
1532 nds32->memory.dlm_size = NDS32_LM_SIZE_TABLE[size_index];
1533
1534 return ERROR_OK;
1535 }
1536
1537 int nds32_init_arch_info(struct target *target, struct nds32 *nds32)
1538 {
1539 target->arch_info = nds32;
1540 nds32->target = target;
1541
1542 nds32->common_magic = NDS32_COMMON_MAGIC;
1543 nds32->init_arch_info_after_halted = false;
1544 nds32->auto_convert_hw_bp = true;
1545 nds32->global_stop = false;
1546 nds32->soft_reset_halt = false;
1547 nds32->edm_passcode = NULL;
1548 nds32->privilege_level = 0;
1549 nds32->boot_time = 1500;
1550 nds32->reset_halt_as_examine = false;
1551 nds32->keep_target_edm_ctl = false;
1552 nds32->word_access_mem = false;
1553 nds32->virtual_hosting = false;
1554
1555 nds32_reg_init();
1556
1557 if (ERROR_FAIL == nds32_reg_cache_init(target, nds32))
1558 return ERROR_FAIL;
1559
1560 if (ERROR_OK != nds32_init_register_table(nds32))
1561 return ERROR_FAIL;
1562
1563 return ERROR_OK;
1564 }
1565
1566 int nds32_virtual_to_physical(struct target *target, uint32_t address, uint32_t *physical)
1567 {
1568 struct nds32 *nds32 = target_to_nds32(target);
1569
1570 if (nds32->memory.address_translation == false) {
1571 *physical = address;
1572 return ERROR_OK;
1573 }
1574
1575 if (ERROR_OK == nds32_probe_tlb(nds32, address, physical))
1576 return ERROR_OK;
1577
1578 if (ERROR_OK == nds32_walk_page_table(nds32, address, physical))
1579 return ERROR_OK;
1580
1581 return ERROR_FAIL;
1582 }
1583
1584 int nds32_cache_sync(struct target *target, uint32_t address, uint32_t length)
1585 {
1586 struct aice_port_s *aice = target_to_aice(target);
1587 struct nds32 *nds32 = target_to_nds32(target);
1588 struct nds32_cache *dcache = &(nds32->memory.dcache);
1589 struct nds32_cache *icache = &(nds32->memory.icache);
1590 uint32_t dcache_line_size = NDS32_LINE_SIZE_TABLE[dcache->line_size];
1591 uint32_t icache_line_size = NDS32_LINE_SIZE_TABLE[icache->line_size];
1592 uint32_t cur_address;
1593 int result;
1594 uint32_t start_line, end_line;
1595 uint32_t cur_line;
1596
1597 if ((dcache->line_size != 0) && (dcache->enable == true)) {
1598 /* address / dcache_line_size */
1599 start_line = address >> (dcache->line_size + 2);
1600 /* (address + length - 1) / dcache_line_size */
1601 end_line = (address + length - 1) >> (dcache->line_size + 2);
1602
1603 for (cur_address = address, cur_line = start_line ;
1604 cur_line <= end_line ;
1605 cur_address += dcache_line_size, cur_line++) {
1606 /* D$ write back */
1607 result = aice_cache_ctl(aice, AICE_CACHE_CTL_L1D_VA_WB, cur_address);
1608 if (result != ERROR_OK)
1609 return result;
1610
1611 /* D$ invalidate */
1612 result = aice_cache_ctl(aice, AICE_CACHE_CTL_L1D_VA_INVAL, cur_address);
1613 if (result != ERROR_OK)
1614 return result;
1615 }
1616 }
1617
1618 if ((icache->line_size != 0) && (icache->enable == true)) {
1619 /* address / icache_line_size */
1620 start_line = address >> (icache->line_size + 2);
1621 /* (address + length - 1) / icache_line_size */
1622 end_line = (address + length - 1) >> (icache->line_size + 2);
1623
1624 for (cur_address = address, cur_line = start_line ;
1625 cur_line <= end_line ;
1626 cur_address += icache_line_size, cur_line++) {
1627 /* Because PSW.IT is turned off under debug exception, address MUST
1628 * be physical address. L1I_VA_INVALIDATE uses PSW.IT to decide
1629 * address translation or not. */
1630 uint32_t physical_addr;
1631 if (ERROR_FAIL == target->type->virt2phys(target, cur_address,
1632 &physical_addr))
1633 return ERROR_FAIL;
1634
1635 /* I$ invalidate */
1636 result = aice_cache_ctl(aice, AICE_CACHE_CTL_L1I_VA_INVAL, physical_addr);
1637 if (result != ERROR_OK)
1638 return result;
1639 }
1640 }
1641
1642 return ERROR_OK;
1643 }
1644
1645 uint32_t nds32_nextpc(struct nds32 *nds32, int current, uint32_t address)
1646 {
1647 if (!current)
1648 nds32_set_mapped_reg(nds32, PC, address);
1649 else
1650 nds32_get_mapped_reg(nds32, PC, &address);
1651
1652 return address;
1653 }
1654
1655 int nds32_step(struct target *target, int current,
1656 uint32_t address, int handle_breakpoints)
1657 {
1658 LOG_DEBUG("target->state: %s",
1659 target_state_name(target));
1660
1661 if (target->state != TARGET_HALTED) {
1662 LOG_WARNING("target was not halted");
1663 return ERROR_TARGET_NOT_HALTED;
1664 }
1665
1666 struct nds32 *nds32 = target_to_nds32(target);
1667
1668 address = nds32_nextpc(nds32, current, address);
1669
1670 LOG_DEBUG("STEP PC %08" PRIx32 "%s", address, !current ? "!" : "");
1671
1672 /** set DSSIM */
1673 uint32_t ir14_value;
1674 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1675 if (nds32->step_isr_enable)
1676 ir14_value |= (0x1 << 31);
1677 else
1678 ir14_value &= ~(0x1 << 31);
1679 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1680
1681 /********* TODO: maybe create another function to handle this part */
1682 CHECK_RETVAL(nds32->leave_debug_state(nds32, true));
1683 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_RESUMED));
1684
1685 struct aice_port_s *aice = target_to_aice(target);
1686 if (ERROR_OK != aice_step(aice))
1687 return ERROR_FAIL;
1688
1689 /* save state */
1690 CHECK_RETVAL(nds32->enter_debug_state(nds32, true));
1691 /********* TODO: maybe create another function to handle this part */
1692
1693 /* restore DSSIM */
1694 if (nds32->step_isr_enable) {
1695 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1696 ir14_value &= ~(0x1 << 31);
1697 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1698 }
1699
1700 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_HALTED));
1701
1702 return ERROR_OK;
1703 }
1704
1705 static int nds32_step_without_watchpoint(struct nds32 *nds32)
1706 {
1707 struct target *target = nds32->target;
1708
1709 if (target->state != TARGET_HALTED) {
1710 LOG_WARNING("target was not halted");
1711 return ERROR_TARGET_NOT_HALTED;
1712 }
1713
1714 /** set DSSIM */
1715 uint32_t ir14_value;
1716 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1717 if (nds32->step_isr_enable)
1718 ir14_value |= (0x1 << 31);
1719 else
1720 ir14_value &= ~(0x1 << 31);
1721 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1722
1723 /********* TODO: maybe create another function to handle this part */
1724 CHECK_RETVAL(nds32->leave_debug_state(nds32, false));
1725
1726 struct aice_port_s *aice = target_to_aice(target);
1727
1728 if (ERROR_OK != aice_step(aice))
1729 return ERROR_FAIL;
1730
1731 /* save state */
1732 CHECK_RETVAL(nds32->enter_debug_state(nds32, false));
1733 /********* TODO: maybe create another function to handle this part */
1734
1735 /* restore DSSIM */
1736 if (nds32->step_isr_enable) {
1737 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1738 ir14_value &= ~(0x1 << 31);
1739 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1740 }
1741
1742 return ERROR_OK;
1743 }
1744
1745 int nds32_target_state(struct nds32 *nds32, enum target_state *state)
1746 {
1747 struct aice_port_s *aice = target_to_aice(nds32->target);
1748 enum aice_target_state_s nds32_state;
1749
1750 if (aice_state(aice, &nds32_state) != ERROR_OK)
1751 return ERROR_FAIL;
1752
1753 switch (nds32_state) {
1754 case AICE_DISCONNECT:
1755 LOG_INFO("USB is disconnected");
1756 return ERROR_FAIL;
1757 case AICE_TARGET_DETACH:
1758 LOG_INFO("Target is disconnected");
1759 return ERROR_FAIL;
1760 case AICE_TARGET_UNKNOWN:
1761 *state = TARGET_UNKNOWN;
1762 break;
1763 case AICE_TARGET_RUNNING:
1764 *state = TARGET_RUNNING;
1765 break;
1766 case AICE_TARGET_HALTED:
1767 *state = TARGET_HALTED;
1768 break;
1769 case AICE_TARGET_RESET:
1770 *state = TARGET_RESET;
1771 break;
1772 case AICE_TARGET_DEBUG_RUNNING:
1773 *state = TARGET_DEBUG_RUNNING;
1774 break;
1775 default:
1776 return ERROR_FAIL;
1777 }
1778
1779 return ERROR_OK;
1780 }
1781
1782 int nds32_examine_debug_reason(struct nds32 *nds32)
1783 {
1784 uint32_t reason;
1785 struct target *target = nds32->target;
1786
1787 nds32->get_debug_reason(nds32, &reason);
1788
1789 LOG_DEBUG("nds32 examines debug reason: %s", nds32_debug_type_name[reason]);
1790
1791 /* Examine debug reason */
1792 switch (reason) {
1793 case NDS32_DEBUG_BREAK:
1794 case NDS32_DEBUG_BREAK_16:
1795 case NDS32_DEBUG_INST_BREAK:
1796 {
1797 uint32_t value_pc;
1798 uint32_t opcode;
1799 struct nds32_instruction instruction;
1800
1801 nds32_get_mapped_reg(nds32, PC, &value_pc);
1802
1803 if (ERROR_OK != nds32_read_opcode(nds32, value_pc, &opcode))
1804 return ERROR_FAIL;
1805 if (ERROR_OK != nds32_evaluate_opcode(nds32, opcode, value_pc,
1806 &instruction))
1807 return ERROR_FAIL;
1808
1809 target->debug_reason = DBG_REASON_BREAKPOINT;
1810 }
1811 break;
1812 case NDS32_DEBUG_DATA_ADDR_WATCHPOINT_PRECISE:
1813 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_PRECISE:
1814 case NDS32_DEBUG_LOAD_STORE_GLOBAL_STOP: /* GLOBAL_STOP is precise exception */
1815 {
1816 int result;
1817
1818 result = nds32->get_watched_address(nds32,
1819 &(nds32->watched_address), reason);
1820 /* do single step(without watchpoints) to skip the "watched" instruction */
1821 nds32_step_without_watchpoint(nds32);
1822
1823 /* before single_step, save exception address */
1824 if (ERROR_OK != result)
1825 return ERROR_FAIL;
1826
1827 target->debug_reason = DBG_REASON_WATCHPOINT;
1828 }
1829 break;
1830 case NDS32_DEBUG_DEBUG_INTERRUPT:
1831 target->debug_reason = DBG_REASON_DBGRQ;
1832 break;
1833 case NDS32_DEBUG_HARDWARE_SINGLE_STEP:
1834 target->debug_reason = DBG_REASON_SINGLESTEP;
1835 break;
1836 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_IMPRECISE:
1837 case NDS32_DEBUG_DATA_ADDR_WATCHPOINT_NEXT_PRECISE:
1838 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_NEXT_PRECISE:
1839 if (ERROR_OK != nds32->get_watched_address(nds32,
1840 &(nds32->watched_address), reason))
1841 return ERROR_FAIL;
1842
1843 target->debug_reason = DBG_REASON_WATCHPOINT;
1844 break;
1845 default:
1846 target->debug_reason = DBG_REASON_UNDEFINED;
1847 break;
1848 }
1849
1850 return ERROR_OK;
1851 }
1852
1853 int nds32_login(struct nds32 *nds32)
1854 {
1855 struct target *target = nds32->target;
1856 struct aice_port_s *aice = target_to_aice(target);
1857 uint32_t passcode_length;
1858 char command_sequence[129];
1859 char command_str[33];
1860 char code_str[9];
1861 uint32_t copy_length;
1862 uint32_t code;
1863 uint32_t i;
1864
1865 LOG_DEBUG("nds32_login");
1866
1867 if (nds32->edm_passcode != NULL) {
1868 /* convert EDM passcode to command sequences */
1869 passcode_length = strlen(nds32->edm_passcode);
1870 command_sequence[0] = '\0';
1871 for (i = 0; i < passcode_length; i += 8) {
1872 if (passcode_length - i < 8)
1873 copy_length = passcode_length - i;
1874 else
1875 copy_length = 8;
1876
1877 strncpy(code_str, nds32->edm_passcode + i, copy_length);
1878 code_str[copy_length] = '\0';
1879 code = strtoul(code_str, NULL, 16);
1880
1881 sprintf(command_str, "write_misc gen_port0 0x%x;", code);
1882 strcat(command_sequence, command_str);
1883 }
1884
1885 if (ERROR_OK != aice_program_edm(aice, command_sequence))
1886 return ERROR_FAIL;
1887
1888 /* get current privilege level */
1889 uint32_t value_edmsw;
1890 aice_read_debug_reg(aice, NDS_EDM_SR_EDMSW, &value_edmsw);
1891 nds32->privilege_level = (value_edmsw >> 16) & 0x3;
1892 LOG_INFO("Current privilege level: %d", nds32->privilege_level);
1893 }
1894
1895 if (nds32_edm_ops_num > 0) {
1896 const char *reg_name;
1897 for (i = 0 ; i < nds32_edm_ops_num ; i++) {
1898 code = nds32_edm_ops[i].value;
1899 if (nds32_edm_ops[i].reg_no == 6)
1900 reg_name = "gen_port0";
1901 else if (nds32_edm_ops[i].reg_no == 7)
1902 reg_name = "gen_port1";
1903 else
1904 return ERROR_FAIL;
1905
1906 sprintf(command_str, "write_misc %s 0x%x;", reg_name, code);
1907 if (ERROR_OK != aice_program_edm(aice, command_str))
1908 return ERROR_FAIL;
1909 }
1910 }
1911
1912 return ERROR_OK;
1913 }
1914
1915 int nds32_halt(struct target *target)
1916 {
1917 struct nds32 *nds32 = target_to_nds32(target);
1918 struct aice_port_s *aice = target_to_aice(target);
1919 enum target_state state;
1920
1921 LOG_DEBUG("target->state: %s",
1922 target_state_name(target));
1923
1924 if (target->state == TARGET_HALTED) {
1925 LOG_DEBUG("target was already halted");
1926 return ERROR_OK;
1927 }
1928
1929 if (nds32_target_state(nds32, &state) != ERROR_OK)
1930 return ERROR_FAIL;
1931
1932 if (TARGET_HALTED != state)
1933 /* TODO: if state == TARGET_HALTED, check ETYPE is DBGI or not */
1934 if (ERROR_OK != aice_halt(aice))
1935 return ERROR_FAIL;
1936
1937 CHECK_RETVAL(nds32->enter_debug_state(nds32, true));
1938
1939 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_HALTED));
1940
1941 return ERROR_OK;
1942 }
1943
1944 /* poll current target status */
1945 int nds32_poll(struct target *target)
1946 {
1947 struct nds32 *nds32 = target_to_nds32(target);
1948 enum target_state state;
1949
1950 if (nds32_target_state(nds32, &state) != ERROR_OK)
1951 return ERROR_FAIL;
1952
1953 if (state == TARGET_HALTED) {
1954 if (target->state != TARGET_HALTED) {
1955 /* if false_hit, continue free_run */
1956 if (ERROR_OK != nds32->enter_debug_state(nds32, true)) {
1957 struct aice_port_s *aice = target_to_aice(target);
1958 aice_run(aice);
1959 return ERROR_OK;
1960 }
1961
1962 LOG_DEBUG("Change target state to TARGET_HALTED.");
1963
1964 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1965 }
1966 } else if (state == TARGET_RESET) {
1967 if (target->state == TARGET_HALTED) {
1968 /* similar to assert srst */
1969 register_cache_invalidate(nds32->core_cache);
1970 target->state = TARGET_RESET;
1971
1972 /* TODO: deassert srst */
1973 } else if (target->state == TARGET_RUNNING) {
1974 /* reset as running */
1975 LOG_WARNING("<-- TARGET WARNING! The debug target has been reset. -->");
1976 }
1977 } else {
1978 if (target->state != TARGET_RUNNING && target->state != TARGET_DEBUG_RUNNING) {
1979 LOG_DEBUG("Change target state to TARGET_RUNNING.");
1980 target->state = TARGET_RUNNING;
1981 target->debug_reason = DBG_REASON_NOTHALTED;
1982 }
1983 }
1984
1985 return ERROR_OK;
1986 }
1987
1988 int nds32_resume(struct target *target, int current,
1989 uint32_t address, int handle_breakpoints, int debug_execution)
1990 {
1991 LOG_DEBUG("current %d address %08x handle_breakpoints %d debug_execution %d",
1992 current, address, handle_breakpoints, debug_execution);
1993
1994 struct nds32 *nds32 = target_to_nds32(target);
1995
1996 if (target->state != TARGET_HALTED) {
1997 LOG_ERROR("Target not halted");
1998 return ERROR_TARGET_NOT_HALTED;
1999 }
2000
2001 address = nds32_nextpc(nds32, current, address);
2002
2003 LOG_DEBUG("RESUME PC %08" PRIx32 "%s", address, !current ? "!" : "");
2004
2005 if (!debug_execution)
2006 target_free_all_working_areas(target);
2007
2008 /* Disable HSS to avoid users misuse HSS */
2009 if (nds32_reach_max_interrupt_level(nds32) == false) {
2010 uint32_t value_ir0;
2011 nds32_get_mapped_reg(nds32, IR0, &value_ir0);
2012 value_ir0 &= ~(0x1 << 11);
2013 nds32_set_mapped_reg(nds32, IR0, value_ir0);
2014 }
2015
2016 CHECK_RETVAL(nds32->leave_debug_state(nds32, true));
2017 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_RESUMED));
2018
2019 struct aice_port_s *aice = target_to_aice(target);
2020 aice_run(aice);
2021
2022 target->debug_reason = DBG_REASON_NOTHALTED;
2023 if (!debug_execution)
2024 target->state = TARGET_RUNNING;
2025 else
2026 target->state = TARGET_DEBUG_RUNNING;
2027
2028 LOG_DEBUG("target->state: %s",
2029 target_state_name(target));
2030
2031 return ERROR_OK;
2032 }
2033
2034 int nds32_assert_reset(struct target *target)
2035 {
2036 struct nds32 *nds32 = target_to_nds32(target);
2037 struct aice_port_s *aice = target_to_aice(target);
2038
2039 jtag_poll_set_enabled(true);
2040
2041 if (target->reset_halt) {
2042 if (nds32->soft_reset_halt)
2043 target->type->soft_reset_halt(target);
2044 else
2045 aice_assert_srst(aice, AICE_RESET_HOLD);
2046 } else {
2047 aice_assert_srst(aice, AICE_SRST);
2048 alive_sleep(nds32->boot_time);
2049 }
2050
2051 /* set passcode for secure MCU after core reset */
2052 nds32_login(nds32);
2053
2054 /* registers are now invalid */
2055 register_cache_invalidate(nds32->core_cache);
2056
2057 target->state = TARGET_RESET;
2058
2059 return ERROR_OK;
2060 }
2061
2062 static uint32_t nds32_backup_edm_ctl;
2063 static bool gdb_attached;
2064
2065 static int nds32_gdb_attach(struct nds32 *nds32)
2066 {
2067 LOG_DEBUG("nds32_gdb_attach");
2068
2069 if (gdb_attached == false) {
2070
2071 if (nds32->keep_target_edm_ctl) {
2072 /* backup target EDM_CTL */
2073 struct aice_port_s *aice = target_to_aice(nds32->target);
2074 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CTL, &nds32_backup_edm_ctl);
2075 }
2076
2077 target_halt(nds32->target);
2078 target_poll(nds32->target);
2079
2080 gdb_attached = true;
2081 }
2082
2083 return ERROR_OK;
2084 }
2085
2086 static int nds32_gdb_detach(struct nds32 *nds32)
2087 {
2088 LOG_DEBUG("nds32_gdb_detach");
2089 bool backup_virtual_hosting_setting;
2090
2091 if (gdb_attached) {
2092
2093 backup_virtual_hosting_setting = nds32->virtual_hosting;
2094 /* turn off virtual hosting before resume as gdb-detach */
2095 nds32->virtual_hosting = false;
2096 target_resume(nds32->target, 1, 0, 0, 0);
2097 nds32->virtual_hosting = backup_virtual_hosting_setting;
2098
2099 if (nds32->keep_target_edm_ctl) {
2100 /* restore target EDM_CTL */
2101 struct aice_port_s *aice = target_to_aice(nds32->target);
2102 aice_write_debug_reg(aice, NDS_EDM_SR_EDM_CTL, nds32_backup_edm_ctl);
2103 }
2104
2105 /* turn off polling */
2106 jtag_poll_set_enabled(false);
2107
2108 gdb_attached = false;
2109 }
2110
2111 return ERROR_OK;
2112 }
2113
2114 static int nds32_callback_event_handler(struct target *target,
2115 enum target_event event, void *priv)
2116 {
2117 int retval = ERROR_OK;
2118 struct nds32 *nds32 = priv;
2119
2120 switch (event) {
2121 case TARGET_EVENT_GDB_ATTACH:
2122 retval = nds32_gdb_attach(nds32);
2123 break;
2124 case TARGET_EVENT_GDB_DETACH:
2125 retval = nds32_gdb_detach(nds32);
2126 break;
2127 default:
2128 break;
2129 }
2130
2131 return retval;
2132 }
2133
2134 int nds32_init(struct nds32 *nds32)
2135 {
2136 /* Initialize anything we can set up without talking to the target */
2137 nds32->memory.access_channel = NDS_MEMORY_ACC_CPU;
2138
2139 /* turn off polling by default */
2140 jtag_poll_set_enabled(false);
2141
2142 /* register event callback */
2143 target_register_event_callback(nds32_callback_event_handler, nds32);
2144
2145 return ERROR_OK;
2146 }
2147
2148 int nds32_reset_halt(struct nds32 *nds32)
2149 {
2150 LOG_INFO("reset halt as init");
2151
2152 struct aice_port_s *aice = target_to_aice(nds32->target);
2153 aice_assert_srst(aice, AICE_RESET_HOLD);
2154
2155 return ERROR_OK;
2156 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)