jtag: linuxgpiod: drop extra parenthesis
[openocd.git] / src / target / arc.c
1 /***************************************************************************
2 * Copyright (C) 2013-2015,2019-2020 Synopsys, Inc. *
3 * Frank Dols <frank.dols@synopsys.com> *
4 * Mischa Jonker <mischa.jonker@synopsys.com> *
5 * Anton Kolesov <anton.kolesov@synopsys.com> *
6 * Evgeniy Didin <didin@synopsys.com> *
7 * *
8 * SPDX-License-Identifier: GPL-2.0-or-later *
9 ***************************************************************************/
10
11
12 #ifdef HAVE_CONFIG_H
13 #include "config.h"
14 #endif
15
16 #include "arc.h"
17
18
19
20 /*
21 * ARC architecture specific details.
22 *
23 * ARC has two types of registers:
24 * 1) core registers(e.g. r0,r1..) [is_core = true]
25 * 2) Auxiliary registers [is_core = false]..
26 *
27 * Auxiliary registers at the same time can be divided into
28 * read-only BCR(build configuration regs, e.g. isa_config, mpu_build) and
29 * R/RW non-BCR ("control" register, e.g. pc, status32_t, debug).
30 *
31 * The way of accessing to Core and AUX registers differs on Jtag level.
32 * BCR/non-BCR describes if the register is immutable and that reading
33 * unexisting register is safe RAZ, rather then an error.
34 * Note, core registers cannot be BCR.
35 *
36 * In arc/cpu/ tcl files all registers are defined as core, non-BCR aux
37 * and BCR aux, in "add-reg" command they are passed to three lists
38 * respectively: core_reg_descriptions, aux_reg_descriptions,
39 * bcr_reg_descriptions.
40 *
41 * Due to the specifics of accessing to BCR/non-BCR registers there are two
42 * register caches:
43 * 1) core_and_aux_cache - includes registers described in
44 * core_reg_descriptions and aux_reg_descriptions lists.
45 * Used during save/restore context step.
46 * 2) bcr_cache - includes registers described bcr_reg_descriptions.
47 * Currently used internally during configure step.
48 */
49
50
51
52 void arc_reg_data_type_add(struct target *target,
53 struct arc_reg_data_type *data_type)
54 {
55 LOG_DEBUG("Adding %s reg_data_type", data_type->data_type.id);
56 struct arc_common *arc = target_to_arc(target);
57 assert(arc);
58
59 list_add_tail(&data_type->list, &arc->reg_data_types);
60 }
61
62 /**
63 * Private implementation of register_get_by_name() for ARC that
64 * doesn't skip not [yet] existing registers. Used in many places
65 * for iteration through registers and even for marking required registers as
66 * existing.
67 */
68 struct reg *arc_reg_get_by_name(struct reg_cache *first,
69 const char *name, bool search_all)
70 {
71 unsigned int i;
72 struct reg_cache *cache = first;
73
74 while (cache) {
75 for (i = 0; i < cache->num_regs; i++) {
76 if (!strcmp(cache->reg_list[i].name, name))
77 return &(cache->reg_list[i]);
78 }
79
80 if (search_all)
81 cache = cache->next;
82 else
83 break;
84 }
85
86 return NULL;
87 }
88
89 /**
90 * Reset internal states of caches. Must be called when entering debugging.
91 *
92 * @param target Target for which to reset caches states.
93 */
94 int arc_reset_caches_states(struct target *target)
95 {
96 struct arc_common *arc = target_to_arc(target);
97
98 LOG_DEBUG("Resetting internal variables of caches states");
99
100 /* Reset caches states. */
101 arc->dcache_flushed = false;
102 arc->l2cache_flushed = false;
103 arc->icache_invalidated = false;
104 arc->dcache_invalidated = false;
105 arc->l2cache_invalidated = false;
106
107 return ERROR_OK;
108 }
109
110 /* Initialize arc_common structure, which passes to openocd target instance */
111 static int arc_init_arch_info(struct target *target, struct arc_common *arc,
112 struct jtag_tap *tap)
113 {
114 arc->common_magic = ARC_COMMON_MAGIC;
115 target->arch_info = arc;
116
117 arc->jtag_info.tap = tap;
118
119 /* The only allowed ir_length is 4 for ARC jtag. */
120 if (tap->ir_length != 4) {
121 LOG_ERROR("ARC jtag instruction length should be equal to 4");
122 return ERROR_FAIL;
123 }
124
125 /* On most ARC targets there is a dcache, so we enable its flushing
126 * by default. If there no dcache, there will be no error, just a slight
127 * performance penalty from unnecessary JTAG operations. */
128 arc->has_dcache = true;
129 arc->has_icache = true;
130 /* L2$ is not available in a target by default. */
131 arc->has_l2cache = false;
132 arc_reset_caches_states(target);
133
134 /* Add standard GDB data types */
135 INIT_LIST_HEAD(&arc->reg_data_types);
136 struct arc_reg_data_type *std_types = calloc(ARRAY_SIZE(standard_gdb_types),
137 sizeof(*std_types));
138
139 if (!std_types) {
140 LOG_ERROR("Unable to allocate memory");
141 return ERROR_FAIL;
142 }
143
144 for (unsigned int i = 0; i < ARRAY_SIZE(standard_gdb_types); i++) {
145 std_types[i].data_type.type = standard_gdb_types[i].type;
146 std_types[i].data_type.id = standard_gdb_types[i].id;
147 arc_reg_data_type_add(target, &(std_types[i]));
148 }
149
150 /* Fields related to target descriptions */
151 INIT_LIST_HEAD(&arc->core_reg_descriptions);
152 INIT_LIST_HEAD(&arc->aux_reg_descriptions);
153 INIT_LIST_HEAD(&arc->bcr_reg_descriptions);
154 arc->num_regs = 0;
155 arc->num_core_regs = 0;
156 arc->num_aux_regs = 0;
157 arc->num_bcr_regs = 0;
158 arc->last_general_reg = ULONG_MAX;
159 arc->pc_index_in_cache = ULONG_MAX;
160 arc->debug_index_in_cache = ULONG_MAX;
161
162 return ERROR_OK;
163 }
164
165 int arc_reg_add(struct target *target, struct arc_reg_desc *arc_reg,
166 const char * const type_name, const size_t type_name_len)
167 {
168 assert(target);
169 assert(arc_reg);
170
171 struct arc_common *arc = target_to_arc(target);
172 assert(arc);
173
174 /* Find register type */
175 {
176 struct arc_reg_data_type *type;
177 list_for_each_entry(type, &arc->reg_data_types, list)
178 if (!strncmp(type->data_type.id, type_name, type_name_len)) {
179 arc_reg->data_type = &(type->data_type);
180 break;
181 }
182
183 if (!arc_reg->data_type)
184 return ERROR_ARC_REGTYPE_NOT_FOUND;
185 }
186
187 if (arc_reg->is_core) {
188 list_add_tail(&arc_reg->list, &arc->core_reg_descriptions);
189 arc->num_core_regs += 1;
190 } else if (arc_reg->is_bcr) {
191 list_add_tail(&arc_reg->list, &arc->bcr_reg_descriptions);
192 arc->num_bcr_regs += 1;
193 } else {
194 list_add_tail(&arc_reg->list, &arc->aux_reg_descriptions);
195 arc->num_aux_regs += 1;
196 }
197 arc->num_regs += 1;
198
199 LOG_DEBUG(
200 "added register {name=%s, num=0x%x, type=%s%s%s%s}",
201 arc_reg->name, arc_reg->arch_num, arc_reg->data_type->id,
202 arc_reg->is_core ? ", core" : "", arc_reg->is_bcr ? ", bcr" : "",
203 arc_reg->is_general ? ", general" : ""
204 );
205
206 return ERROR_OK;
207 }
208
209 /* Reading core or aux register */
210 static int arc_get_register(struct reg *reg)
211 {
212 assert(reg);
213
214 struct arc_reg_desc *desc = reg->arch_info;
215 struct target *target = desc->target;
216 struct arc_common *arc = target_to_arc(target);
217
218 uint32_t value;
219
220 if (reg->valid) {
221 LOG_DEBUG("Get register (cached) gdb_num=%" PRIu32 ", name=%s, value=0x%" PRIx32,
222 reg->number, desc->name, target_buffer_get_u32(target, reg->value));
223 return ERROR_OK;
224 }
225
226 if (desc->is_core) {
227 /* Accessing to R61/R62 registers causes Jtag hang */
228 if (desc->arch_num == CORE_R61_NUM || desc->arch_num == CORE_R62_NUM) {
229 LOG_ERROR("It is forbidden to read core registers 61 and 62.");
230 return ERROR_FAIL;
231 }
232 CHECK_RETVAL(arc_jtag_read_core_reg_one(&arc->jtag_info, desc->arch_num,
233 &value));
234 } else {
235 CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, desc->arch_num,
236 &value));
237 }
238
239 target_buffer_set_u32(target, reg->value, value);
240
241 /* If target is unhalted all register reads should be uncached. */
242 if (target->state == TARGET_HALTED)
243 reg->valid = true;
244 else
245 reg->valid = false;
246
247 reg->dirty = false;
248
249 LOG_DEBUG("Get register gdb_num=%" PRIu32 ", name=%s, value=0x%" PRIx32,
250 reg->number, desc->name, value);
251
252
253 return ERROR_OK;
254 }
255
256 /* Writing core or aux register */
257 static int arc_set_register(struct reg *reg, uint8_t *buf)
258 {
259 struct arc_reg_desc *desc = reg->arch_info;
260 struct target *target = desc->target;
261 uint32_t value = target_buffer_get_u32(target, buf);
262 /* Unlike "get" function "set" is supported only if target
263 * is in halt mode. Async writes are not supported yet. */
264 if (target->state != TARGET_HALTED)
265 return ERROR_TARGET_NOT_HALTED;
266
267 /* Accessing to R61/R62 registers causes Jtag hang */
268 if (desc->is_core && (desc->arch_num == CORE_R61_NUM ||
269 desc->arch_num == CORE_R62_NUM)) {
270 LOG_ERROR("It is forbidden to write core registers 61 and 62.");
271 return ERROR_FAIL;
272 }
273 target_buffer_set_u32(target, reg->value, value);
274
275 LOG_DEBUG("Set register gdb_num=%" PRIu32 ", name=%s, value=0x%08" PRIx32,
276 reg->number, desc->name, value);
277
278 reg->valid = true;
279 reg->dirty = true;
280
281 return ERROR_OK;
282 }
283
284 const struct reg_arch_type arc_reg_type = {
285 .get = arc_get_register,
286 .set = arc_set_register,
287 };
288
289 /* GDB register groups. For now we support only general and "empty" */
290 static const char * const reg_group_general = "general";
291 static const char * const reg_group_other = "";
292
293 /* Common code to initialize `struct reg` for different registers: core, aux, bcr. */
294 static int arc_init_reg(struct target *target, struct reg *reg,
295 struct arc_reg_desc *reg_desc, unsigned long number)
296 {
297 assert(target);
298 assert(reg);
299 assert(reg_desc);
300
301 struct arc_common *arc = target_to_arc(target);
302
303 /* Initialize struct reg */
304 reg->name = reg_desc->name;
305 reg->size = 32; /* All register in ARC are 32-bit */
306 reg->value = &reg_desc->reg_value;
307 reg->type = &arc_reg_type;
308 reg->arch_info = reg_desc;
309 reg->caller_save = true; /* @todo should be configurable. */
310 reg->reg_data_type = reg_desc->data_type;
311 reg->feature = &reg_desc->feature;
312
313 reg->feature->name = reg_desc->gdb_xml_feature;
314
315 /* reg->number is used by OpenOCD as value for @regnum. Thus when setting
316 * value of a register GDB will use it as a number of register in
317 * P-packet. OpenOCD gdbserver will then use number of register in
318 * P-packet as an array index in the reg_list returned by
319 * arc_regs_get_gdb_reg_list. So to ensure that registers are assigned
320 * correctly it would be required to either sort registers in
321 * arc_regs_get_gdb_reg_list or to assign numbers sequentially here and
322 * according to how registers will be sorted in
323 * arc_regs_get_gdb_reg_list. Second options is much more simpler. */
324 reg->number = number;
325
326 if (reg_desc->is_general) {
327 arc->last_general_reg = reg->number;
328 reg->group = reg_group_general;
329 } else {
330 reg->group = reg_group_other;
331 }
332
333 return ERROR_OK;
334 }
335
336 /* Building aux/core reg_cache */
337 static int arc_build_reg_cache(struct target *target)
338 {
339 unsigned long i = 0;
340 struct arc_reg_desc *reg_desc;
341 /* get pointers to arch-specific information */
342 struct arc_common *arc = target_to_arc(target);
343 const unsigned long num_regs = arc->num_core_regs + arc->num_aux_regs;
344 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
345 struct reg_cache *cache = calloc(1, sizeof(*cache));
346 struct reg *reg_list = calloc(num_regs, sizeof(*reg_list));
347
348 if (!cache || !reg_list) {
349 LOG_ERROR("Not enough memory");
350 goto fail;
351 }
352
353 /* Build the process context cache */
354 cache->name = "arc registers";
355 cache->next = NULL;
356 cache->reg_list = reg_list;
357 cache->num_regs = num_regs;
358 arc->core_and_aux_cache = cache;
359 (*cache_p) = cache;
360
361 if (list_empty(&arc->core_reg_descriptions)) {
362 LOG_ERROR("No core registers were defined");
363 goto fail;
364 }
365
366 list_for_each_entry(reg_desc, &arc->core_reg_descriptions, list) {
367 CHECK_RETVAL(arc_init_reg(target, &reg_list[i], reg_desc, i));
368
369 LOG_DEBUG("reg n=%3li name=%3s group=%s feature=%s", i,
370 reg_list[i].name, reg_list[i].group,
371 reg_list[i].feature->name);
372
373 i += 1;
374 }
375
376 if (list_empty(&arc->aux_reg_descriptions)) {
377 LOG_ERROR("No aux registers were defined");
378 goto fail;
379 }
380
381 list_for_each_entry(reg_desc, &arc->aux_reg_descriptions, list) {
382 CHECK_RETVAL(arc_init_reg(target, &reg_list[i], reg_desc, i));
383
384 LOG_DEBUG("reg n=%3li name=%3s group=%s feature=%s", i,
385 reg_list[i].name, reg_list[i].group,
386 reg_list[i].feature->name);
387
388 /* PC and DEBUG are essential so we search for them. */
389 if (!strcmp("pc", reg_desc->name)) {
390 if (arc->pc_index_in_cache != ULONG_MAX) {
391 LOG_ERROR("Double definition of PC in configuration");
392 goto fail;
393 }
394 arc->pc_index_in_cache = i;
395 } else if (!strcmp("debug", reg_desc->name)) {
396 if (arc->debug_index_in_cache != ULONG_MAX) {
397 LOG_ERROR("Double definition of DEBUG in configuration");
398 goto fail;
399 }
400 arc->debug_index_in_cache = i;
401 }
402 i += 1;
403 }
404
405 if (arc->pc_index_in_cache == ULONG_MAX
406 || arc->debug_index_in_cache == ULONG_MAX) {
407 LOG_ERROR("`pc' and `debug' registers must be present in target description.");
408 goto fail;
409 }
410
411 assert(i == (arc->num_core_regs + arc->num_aux_regs));
412
413 arc->core_aux_cache_built = true;
414
415 return ERROR_OK;
416
417 fail:
418 free(cache);
419 free(reg_list);
420
421 return ERROR_FAIL;
422 }
423
424 /* Build bcr reg_cache.
425 * This function must be called only after arc_build_reg_cache */
426 static int arc_build_bcr_reg_cache(struct target *target)
427 {
428 /* get pointers to arch-specific information */
429 struct arc_common *arc = target_to_arc(target);
430 const unsigned long num_regs = arc->num_bcr_regs;
431 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
432 struct reg_cache *cache = malloc(sizeof(*cache));
433 struct reg *reg_list = calloc(num_regs, sizeof(*reg_list));
434
435 struct arc_reg_desc *reg_desc;
436 unsigned long i = 0;
437 unsigned long gdb_regnum = arc->core_and_aux_cache->num_regs;
438
439 if (!cache || !reg_list) {
440 LOG_ERROR("Unable to allocate memory");
441 goto fail;
442 }
443
444 /* Build the process context cache */
445 cache->name = "arc.bcr";
446 cache->next = NULL;
447 cache->reg_list = reg_list;
448 cache->num_regs = num_regs;
449 arc->bcr_cache = cache;
450 (*cache_p) = cache;
451
452 if (list_empty(&arc->bcr_reg_descriptions)) {
453 LOG_ERROR("No BCR registers are defined");
454 goto fail;
455 }
456
457 list_for_each_entry(reg_desc, &arc->bcr_reg_descriptions, list) {
458 CHECK_RETVAL(arc_init_reg(target, &reg_list[i], reg_desc, gdb_regnum));
459 /* BCRs always semantically, they are just read-as-zero, if there is
460 * not real register. */
461 reg_list[i].exist = true;
462
463 LOG_DEBUG("reg n=%3li name=%3s group=%s feature=%s", i,
464 reg_list[i].name, reg_list[i].group,
465 reg_list[i].feature->name);
466 i += 1;
467 gdb_regnum += 1;
468 }
469
470 assert(i == arc->num_bcr_regs);
471
472 arc->bcr_cache_built = true;
473
474
475 return ERROR_OK;
476 fail:
477 free(cache);
478 free(reg_list);
479
480 return ERROR_FAIL;
481 }
482
483
484 static int arc_get_gdb_reg_list(struct target *target, struct reg **reg_list[],
485 int *reg_list_size, enum target_register_class reg_class)
486 {
487 assert(target->reg_cache);
488 struct arc_common *arc = target_to_arc(target);
489
490 /* get pointers to arch-specific information storage */
491 *reg_list_size = arc->num_regs;
492 *reg_list = calloc(*reg_list_size, sizeof(struct reg *));
493
494 if (!*reg_list) {
495 LOG_ERROR("Unable to allocate memory");
496 return ERROR_FAIL;
497 }
498
499 /* OpenOCD gdb_server API seems to be inconsistent here: when it generates
500 * XML tdesc it filters out !exist registers, however when creating a
501 * g-packet it doesn't do so. REG_CLASS_ALL is used in first case, and
502 * REG_CLASS_GENERAL used in the latter one. Due to this we had to filter
503 * out !exist register for "general", but not for "all". Attempts to filter out
504 * !exist for "all" as well will cause a failed check in OpenOCD GDB
505 * server. */
506 if (reg_class == REG_CLASS_ALL) {
507 unsigned long i = 0;
508 struct reg_cache *reg_cache = target->reg_cache;
509 while (reg_cache) {
510 for (unsigned j = 0; j < reg_cache->num_regs; j++, i++)
511 (*reg_list)[i] = &reg_cache->reg_list[j];
512 reg_cache = reg_cache->next;
513 }
514 assert(i == arc->num_regs);
515 LOG_DEBUG("REG_CLASS_ALL: number of regs=%i", *reg_list_size);
516 } else {
517 unsigned long i = 0;
518 unsigned long gdb_reg_number = 0;
519 struct reg_cache *reg_cache = target->reg_cache;
520 while (reg_cache) {
521 for (unsigned j = 0;
522 j < reg_cache->num_regs && gdb_reg_number <= arc->last_general_reg;
523 j++) {
524 if (reg_cache->reg_list[j].exist) {
525 (*reg_list)[i] = &reg_cache->reg_list[j];
526 i++;
527 }
528 gdb_reg_number += 1;
529 }
530 reg_cache = reg_cache->next;
531 }
532 *reg_list_size = i;
533 LOG_DEBUG("REG_CLASS_GENERAL: number of regs=%i", *reg_list_size);
534 }
535
536 return ERROR_OK;
537 }
538
539 /* Reading field of struct_type register */
540 int arc_reg_get_field(struct target *target, const char *reg_name,
541 const char *field_name, uint32_t *value_ptr)
542 {
543 struct reg_data_type_struct_field *field;
544
545 LOG_DEBUG("getting register field (reg_name=%s, field_name=%s)", reg_name, field_name);
546
547 /* Get register */
548 struct reg *reg = arc_reg_get_by_name(target->reg_cache, reg_name, true);
549
550 if (!reg) {
551 LOG_ERROR("Requested register `%s' doesn't exist.", reg_name);
552 return ERROR_ARC_REGISTER_NOT_FOUND;
553 }
554
555 if (reg->reg_data_type->type != REG_TYPE_ARCH_DEFINED
556 || reg->reg_data_type->type_class != REG_TYPE_CLASS_STRUCT)
557 return ERROR_ARC_REGISTER_IS_NOT_STRUCT;
558
559 /* Get field in a register */
560 struct reg_data_type_struct *reg_struct =
561 reg->reg_data_type->reg_type_struct;
562 for (field = reg_struct->fields;
563 field;
564 field = field->next) {
565 if (!strcmp(field->name, field_name))
566 break;
567 }
568
569 if (!field)
570 return ERROR_ARC_REGISTER_FIELD_NOT_FOUND;
571
572 if (!field->use_bitfields)
573 return ERROR_ARC_FIELD_IS_NOT_BITFIELD;
574
575 if (!reg->valid)
576 CHECK_RETVAL(reg->type->get(reg));
577
578 /* First do endianness-safe read of register value
579 * then convert it to binary buffer for further
580 * field extraction */
581
582 *value_ptr = buf_get_u32(reg->value, field->bitfield->start,
583 field->bitfield->end - field->bitfield->start + 1);
584
585 return ERROR_OK;
586 }
587
588 static int arc_get_register_value(struct target *target, const char *reg_name,
589 uint32_t *value_ptr)
590 {
591 LOG_DEBUG("reg_name=%s", reg_name);
592
593 struct reg *reg = arc_reg_get_by_name(target->reg_cache, reg_name, true);
594
595 if (!reg)
596 return ERROR_ARC_REGISTER_NOT_FOUND;
597
598 if (!reg->valid)
599 CHECK_RETVAL(reg->type->get(reg));
600
601 *value_ptr = target_buffer_get_u32(target, reg->value);
602
603 return ERROR_OK;
604 }
605
606 static int arc_set_register_value(struct target *target, const char *reg_name,
607 uint32_t value)
608 {
609 LOG_DEBUG("reg_name=%s value=0x%08" PRIx32, reg_name, value);
610
611 if (!(target && reg_name)) {
612 LOG_ERROR("Arguments cannot be NULL.");
613 return ERROR_FAIL;
614 }
615
616 struct reg *reg = arc_reg_get_by_name(target->reg_cache, reg_name, true);
617
618 if (!reg)
619 return ERROR_ARC_REGISTER_NOT_FOUND;
620
621 uint8_t value_buf[4];
622 buf_set_u32(value_buf, 0, 32, value);
623 CHECK_RETVAL(reg->type->set(reg, value_buf));
624
625 return ERROR_OK;
626 }
627
628 /* Configure DCCM's */
629 static int arc_configure_dccm(struct target *target)
630 {
631 struct arc_common *arc = target_to_arc(target);
632
633 uint32_t dccm_build_version, dccm_build_size0, dccm_build_size1;
634 CHECK_RETVAL(arc_reg_get_field(target, "dccm_build", "version",
635 &dccm_build_version));
636 CHECK_RETVAL(arc_reg_get_field(target, "dccm_build", "size0",
637 &dccm_build_size0));
638 CHECK_RETVAL(arc_reg_get_field(target, "dccm_build", "size1",
639 &dccm_build_size1));
640 /* There is no yet support of configurable number of cycles,
641 * So there is no difference between v3 and v4 */
642 if ((dccm_build_version == 3 || dccm_build_version == 4) && dccm_build_size0 > 0) {
643 CHECK_RETVAL(arc_get_register_value(target, "aux_dccm", &(arc->dccm_start)));
644 uint32_t dccm_size = 0x100;
645 dccm_size <<= dccm_build_size0;
646 if (dccm_build_size0 == 0xF)
647 dccm_size <<= dccm_build_size1;
648 arc->dccm_end = arc->dccm_start + dccm_size;
649 LOG_DEBUG("DCCM detected start=0x%" PRIx32 " end=0x%" PRIx32,
650 arc->dccm_start, arc->dccm_end);
651
652 }
653 return ERROR_OK;
654 }
655
656
657 /* Configure ICCM's */
658
659 static int arc_configure_iccm(struct target *target)
660 {
661 struct arc_common *arc = target_to_arc(target);
662
663 /* ICCM0 */
664 uint32_t iccm_build_version, iccm_build_size00, iccm_build_size01;
665 uint32_t aux_iccm = 0;
666 CHECK_RETVAL(arc_reg_get_field(target, "iccm_build", "version",
667 &iccm_build_version));
668 CHECK_RETVAL(arc_reg_get_field(target, "iccm_build", "iccm0_size0",
669 &iccm_build_size00));
670 CHECK_RETVAL(arc_reg_get_field(target, "iccm_build", "iccm0_size1",
671 &iccm_build_size01));
672 if (iccm_build_version == 4 && iccm_build_size00 > 0) {
673 CHECK_RETVAL(arc_get_register_value(target, "aux_iccm", &aux_iccm));
674 uint32_t iccm0_size = 0x100;
675 iccm0_size <<= iccm_build_size00;
676 if (iccm_build_size00 == 0xF)
677 iccm0_size <<= iccm_build_size01;
678 /* iccm0 start is located in highest 4 bits of aux_iccm */
679 arc->iccm0_start = aux_iccm & 0xF0000000;
680 arc->iccm0_end = arc->iccm0_start + iccm0_size;
681 LOG_DEBUG("ICCM0 detected start=0x%" PRIx32 " end=0x%" PRIx32,
682 arc->iccm0_start, arc->iccm0_end);
683 }
684
685 /* ICCM1 */
686 uint32_t iccm_build_size10, iccm_build_size11;
687 CHECK_RETVAL(arc_reg_get_field(target, "iccm_build", "iccm1_size0",
688 &iccm_build_size10));
689 CHECK_RETVAL(arc_reg_get_field(target, "iccm_build", "iccm1_size1",
690 &iccm_build_size11));
691 if (iccm_build_version == 4 && iccm_build_size10 > 0) {
692 /* Use value read for ICCM0 */
693 if (!aux_iccm)
694 CHECK_RETVAL(arc_get_register_value(target, "aux_iccm", &aux_iccm));
695 uint32_t iccm1_size = 0x100;
696 iccm1_size <<= iccm_build_size10;
697 if (iccm_build_size10 == 0xF)
698 iccm1_size <<= iccm_build_size11;
699 arc->iccm1_start = aux_iccm & 0x0F000000;
700 arc->iccm1_end = arc->iccm1_start + iccm1_size;
701 LOG_DEBUG("ICCM1 detected start=0x%" PRIx32 " end=0x%" PRIx32,
702 arc->iccm1_start, arc->iccm1_end);
703 }
704 return ERROR_OK;
705 }
706
707 /* Configure some core features, depending on BCRs. */
708 static int arc_configure(struct target *target)
709 {
710 LOG_DEBUG("Configuring ARC ICCM and DCCM");
711
712 /* Configuring DCCM if DCCM_BUILD and AUX_DCCM are known registers. */
713 if (arc_reg_get_by_name(target->reg_cache, "dccm_build", true) &&
714 arc_reg_get_by_name(target->reg_cache, "aux_dccm", true))
715 CHECK_RETVAL(arc_configure_dccm(target));
716
717 /* Configuring ICCM if ICCM_BUILD and AUX_ICCM are known registers. */
718 if (arc_reg_get_by_name(target->reg_cache, "iccm_build", true) &&
719 arc_reg_get_by_name(target->reg_cache, "aux_iccm", true))
720 CHECK_RETVAL(arc_configure_iccm(target));
721
722 return ERROR_OK;
723 }
724
725 /* arc_examine is function, which is used for all arc targets*/
726 static int arc_examine(struct target *target)
727 {
728 uint32_t status;
729 struct arc_common *arc = target_to_arc(target);
730
731 CHECK_RETVAL(arc_jtag_startup(&arc->jtag_info));
732
733 if (!target_was_examined(target)) {
734 CHECK_RETVAL(arc_jtag_status(&arc->jtag_info, &status));
735 if (status & ARC_JTAG_STAT_RU)
736 target->state = TARGET_RUNNING;
737 else
738 target->state = TARGET_HALTED;
739
740 /* Read BCRs and configure optional registers. */
741 CHECK_RETVAL(arc_configure(target));
742
743 target_set_examined(target);
744 }
745
746 return ERROR_OK;
747 }
748
749 static int arc_halt(struct target *target)
750 {
751 uint32_t value, irq_state;
752 struct arc_common *arc = target_to_arc(target);
753
754 LOG_DEBUG("target->state: %s", target_state_name(target));
755
756 if (target->state == TARGET_HALTED) {
757 LOG_DEBUG("target was already halted");
758 return ERROR_OK;
759 }
760
761 if (target->state == TARGET_UNKNOWN)
762 LOG_WARNING("target was in unknown state when halt was requested");
763
764 if (target->state == TARGET_RESET) {
765 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
766 LOG_ERROR("can't request a halt while in reset if nSRST pulls nTRST");
767 return ERROR_TARGET_FAILURE;
768 } else {
769 target->debug_reason = DBG_REASON_DBGRQ;
770 }
771 }
772
773 /* Break (stop) processor.
774 * Do read-modify-write sequence, or DEBUG.UB will be reset unintentionally.
775 * We do not use here arc_get/set_core_reg functions here because they imply
776 * that the processor is already halted. */
777 CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, AUX_DEBUG_REG, &value));
778 value |= SET_CORE_FORCE_HALT; /* set the HALT bit */
779 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_DEBUG_REG, value));
780 alive_sleep(1);
781
782 /* Save current IRQ state */
783 CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, AUX_STATUS32_REG, &irq_state));
784
785 if (irq_state & AUX_STATUS32_REG_IE_BIT)
786 arc->irq_state = 1;
787 else
788 arc->irq_state = 0;
789
790 /* update state and notify gdb*/
791 target->state = TARGET_HALTED;
792 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_HALTED));
793
794 /* some more debug information */
795 if (debug_level >= LOG_LVL_DEBUG) {
796 LOG_DEBUG("core stopped (halted) DEGUB-REG: 0x%08" PRIx32, value);
797 CHECK_RETVAL(arc_get_register_value(target, "status32", &value));
798 LOG_DEBUG("core STATUS32: 0x%08" PRIx32, value);
799 }
800
801 return ERROR_OK;
802 }
803
804 /**
805 * Read registers that are used in GDB g-packet. We don't read them one-by-one,
806 * but do that in one batch operation to improve speed. Calls to JTAG layer are
807 * expensive so it is better to make one big call that reads all necessary
808 * registers, instead of many calls, one for one register.
809 */
810 static int arc_save_context(struct target *target)
811 {
812 int retval = ERROR_OK;
813 unsigned int i;
814 struct arc_common *arc = target_to_arc(target);
815 struct reg *reg_list = arc->core_and_aux_cache->reg_list;
816
817 LOG_DEBUG("Saving aux and core registers values");
818 assert(reg_list);
819
820 /* It is assumed that there is at least one AUX register in the list, for
821 * example PC. */
822 const uint32_t core_regs_size = arc->num_core_regs * sizeof(uint32_t);
823 /* last_general_reg is inclusive number. To get count of registers it is
824 * required to do +1. */
825 const uint32_t regs_to_scan =
826 MIN(arc->last_general_reg + 1, arc->num_regs);
827 const uint32_t aux_regs_size = arc->num_aux_regs * sizeof(uint32_t);
828 uint32_t *core_values = malloc(core_regs_size);
829 uint32_t *aux_values = malloc(aux_regs_size);
830 uint32_t *core_addrs = malloc(core_regs_size);
831 uint32_t *aux_addrs = malloc(aux_regs_size);
832 unsigned int core_cnt = 0;
833 unsigned int aux_cnt = 0;
834
835 if (!core_values || !core_addrs || !aux_values || !aux_addrs) {
836 LOG_ERROR("Unable to allocate memory");
837 retval = ERROR_FAIL;
838 goto exit;
839 }
840
841 memset(core_values, 0xff, core_regs_size);
842 memset(core_addrs, 0xff, core_regs_size);
843 memset(aux_values, 0xff, aux_regs_size);
844 memset(aux_addrs, 0xff, aux_regs_size);
845
846 for (i = 0; i < MIN(arc->num_core_regs, regs_to_scan); i++) {
847 struct reg *reg = &(reg_list[i]);
848 struct arc_reg_desc *arc_reg = reg->arch_info;
849 if (!reg->valid && reg->exist) {
850 core_addrs[core_cnt] = arc_reg->arch_num;
851 core_cnt += 1;
852 }
853 }
854
855 for (i = arc->num_core_regs; i < regs_to_scan; i++) {
856 struct reg *reg = &(reg_list[i]);
857 struct arc_reg_desc *arc_reg = reg->arch_info;
858 if (!reg->valid && reg->exist) {
859 aux_addrs[aux_cnt] = arc_reg->arch_num;
860 aux_cnt += 1;
861 }
862 }
863
864 /* Read data from target. */
865 if (core_cnt > 0) {
866 retval = arc_jtag_read_core_reg(&arc->jtag_info, core_addrs, core_cnt, core_values);
867 if (ERROR_OK != retval) {
868 LOG_ERROR("Attempt to read core registers failed.");
869 retval = ERROR_FAIL;
870 goto exit;
871 }
872 }
873 if (aux_cnt > 0) {
874 retval = arc_jtag_read_aux_reg(&arc->jtag_info, aux_addrs, aux_cnt, aux_values);
875 if (ERROR_OK != retval) {
876 LOG_ERROR("Attempt to read aux registers failed.");
877 retval = ERROR_FAIL;
878 goto exit;
879 }
880 }
881
882 /* Parse core regs */
883 core_cnt = 0;
884 for (i = 0; i < MIN(arc->num_core_regs, regs_to_scan); i++) {
885 struct reg *reg = &(reg_list[i]);
886 struct arc_reg_desc *arc_reg = reg->arch_info;
887 if (!reg->valid && reg->exist) {
888 target_buffer_set_u32(target, reg->value, core_values[core_cnt]);
889 core_cnt += 1;
890 reg->valid = true;
891 reg->dirty = false;
892 LOG_DEBUG("Get core register regnum=%" PRIu32 ", name=%s, value=0x%08" PRIx32,
893 i, arc_reg->name, core_values[core_cnt]);
894 }
895 }
896
897 /* Parse aux regs */
898 aux_cnt = 0;
899 for (i = arc->num_core_regs; i < regs_to_scan; i++) {
900 struct reg *reg = &(reg_list[i]);
901 struct arc_reg_desc *arc_reg = reg->arch_info;
902 if (!reg->valid && reg->exist) {
903 target_buffer_set_u32(target, reg->value, aux_values[aux_cnt]);
904 aux_cnt += 1;
905 reg->valid = true;
906 reg->dirty = false;
907 LOG_DEBUG("Get aux register regnum=%" PRIu32 ", name=%s, value=0x%08" PRIx32,
908 i, arc_reg->name, aux_values[aux_cnt]);
909 }
910 }
911
912 exit:
913 free(core_values);
914 free(core_addrs);
915 free(aux_values);
916 free(aux_addrs);
917
918 return retval;
919 }
920
921 /**
922 * Finds an actionpoint that triggered last actionpoint event, as specified by
923 * DEBUG.ASR.
924 *
925 * @param actionpoint Pointer to be set to last active actionpoint. Pointer
926 * will be set to NULL if DEBUG.AH is 0.
927 */
928 static int get_current_actionpoint(struct target *target,
929 struct arc_actionpoint **actionpoint)
930 {
931 assert(target != NULL);
932 assert(actionpoint != NULL);
933
934 uint32_t debug_ah;
935 /* Check if actionpoint caused halt */
936 CHECK_RETVAL(arc_reg_get_field(target, "debug", "ah",
937 &debug_ah));
938
939 if (debug_ah) {
940 struct arc_common *arc = target_to_arc(target);
941 unsigned int ap;
942 uint32_t debug_asr;
943 CHECK_RETVAL(arc_reg_get_field(target, "debug",
944 "asr", &debug_asr));
945
946 for (ap = 0; debug_asr > 1; debug_asr >>= 1)
947 ap += 1;
948
949 assert(ap < arc->actionpoints_num);
950
951 *actionpoint = &(arc->actionpoints_list[ap]);
952 } else {
953 *actionpoint = NULL;
954 }
955
956 return ERROR_OK;
957 }
958
959 static int arc_examine_debug_reason(struct target *target)
960 {
961 uint32_t debug_bh;
962
963 /* Only check for reason if don't know it already. */
964 /* BTW After singlestep at this point core is not marked as halted, so
965 * reading from memory to get current instruction wouldn't work anyway. */
966 if (target->debug_reason == DBG_REASON_DBGRQ ||
967 target->debug_reason == DBG_REASON_SINGLESTEP) {
968 return ERROR_OK;
969 }
970
971 CHECK_RETVAL(arc_reg_get_field(target, "debug", "bh",
972 &debug_bh));
973
974 if (debug_bh) {
975 /* DEBUG.BH is set if core halted due to BRK instruction. */
976 target->debug_reason = DBG_REASON_BREAKPOINT;
977 } else {
978 struct arc_actionpoint *actionpoint = NULL;
979 CHECK_RETVAL(get_current_actionpoint(target, &actionpoint));
980
981 if (actionpoint != NULL) {
982 if (!actionpoint->used)
983 LOG_WARNING("Target halted by an unused actionpoint.");
984
985 if (actionpoint->type == ARC_AP_BREAKPOINT)
986 target->debug_reason = DBG_REASON_BREAKPOINT;
987 else if (actionpoint->type == ARC_AP_WATCHPOINT)
988 target->debug_reason = DBG_REASON_WATCHPOINT;
989 else
990 LOG_WARNING("Unknown type of actionpoint.");
991 }
992 }
993
994 return ERROR_OK;
995 }
996
997 static int arc_debug_entry(struct target *target)
998 {
999 CHECK_RETVAL(arc_save_context(target));
1000
1001 /* TODO: reset internal indicators of caches states, otherwise D$/I$
1002 * will not be flushed/invalidated when required. */
1003 CHECK_RETVAL(arc_reset_caches_states(target));
1004 CHECK_RETVAL(arc_examine_debug_reason(target));
1005
1006 return ERROR_OK;
1007 }
1008
1009 static int arc_poll(struct target *target)
1010 {
1011 uint32_t status, value;
1012 struct arc_common *arc = target_to_arc(target);
1013
1014 /* gdb calls continuously through this arc_poll() function */
1015 CHECK_RETVAL(arc_jtag_status(&arc->jtag_info, &status));
1016
1017 /* check for processor halted */
1018 if (status & ARC_JTAG_STAT_RU) {
1019 if (target->state != TARGET_RUNNING) {
1020 LOG_WARNING("target is still running!");
1021 target->state = TARGET_RUNNING;
1022 }
1023 return ERROR_OK;
1024 }
1025 /* In some cases JTAG status register indicates that
1026 * processor is in halt mode, but processor is still running.
1027 * We check halt bit of AUX STATUS32 register for setting correct state. */
1028 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_RESET)) {
1029 CHECK_RETVAL(arc_get_register_value(target, "status32", &value));
1030 if (value & AUX_STATUS32_REG_HALT_BIT) {
1031 LOG_DEBUG("ARC core in halt or reset state.");
1032 /* Save context if target was not in reset state */
1033 if (target->state == TARGET_RUNNING)
1034 CHECK_RETVAL(arc_debug_entry(target));
1035 target->state = TARGET_HALTED;
1036 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_HALTED));
1037 } else {
1038 LOG_DEBUG("Discrepancy of STATUS32[0] HALT bit and ARC_JTAG_STAT_RU, "
1039 "target is still running");
1040 }
1041
1042 } else if (target->state == TARGET_DEBUG_RUNNING) {
1043
1044 target->state = TARGET_HALTED;
1045 LOG_DEBUG("ARC core is in debug running mode");
1046
1047 CHECK_RETVAL(arc_debug_entry(target));
1048
1049 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED));
1050 }
1051
1052 return ERROR_OK;
1053 }
1054
1055 static int arc_assert_reset(struct target *target)
1056 {
1057 struct arc_common *arc = target_to_arc(target);
1058 enum reset_types jtag_reset_config = jtag_get_reset_config();
1059 bool srst_asserted = false;
1060
1061 LOG_DEBUG("target->state: %s", target_state_name(target));
1062
1063 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1064 /* allow scripts to override the reset event */
1065
1066 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1067 register_cache_invalidate(arc->core_and_aux_cache);
1068 /* An ARC target might be in halt state after reset, so
1069 * if script requested processor to resume, then it must
1070 * be manually started to ensure that this request
1071 * is satisfied. */
1072 if (target->state == TARGET_HALTED && !target->reset_halt) {
1073 /* Resume the target and continue from the current
1074 * PC register value. */
1075 LOG_DEBUG("Starting CPU execution after reset");
1076 CHECK_RETVAL(target_resume(target, 1, 0, 0, 0));
1077 }
1078 target->state = TARGET_RESET;
1079
1080 return ERROR_OK;
1081 }
1082
1083 /* some cores support connecting while srst is asserted
1084 * use that mode if it has been configured */
1085 if (!(jtag_reset_config & RESET_SRST_PULLS_TRST) &&
1086 (jtag_reset_config & RESET_SRST_NO_GATING)) {
1087 jtag_add_reset(0, 1);
1088 srst_asserted = true;
1089 }
1090
1091 if (jtag_reset_config & RESET_HAS_SRST) {
1092 /* should issue a srst only, but we may have to assert trst as well */
1093 if (jtag_reset_config & RESET_SRST_PULLS_TRST)
1094 jtag_add_reset(1, 1);
1095 else if (!srst_asserted)
1096 jtag_add_reset(0, 1);
1097 }
1098
1099 target->state = TARGET_RESET;
1100 jtag_add_sleep(50000);
1101
1102 register_cache_invalidate(arc->core_and_aux_cache);
1103
1104 if (target->reset_halt)
1105 CHECK_RETVAL(target_halt(target));
1106
1107 return ERROR_OK;
1108 }
1109
1110 static int arc_deassert_reset(struct target *target)
1111 {
1112 LOG_DEBUG("target->state: %s", target_state_name(target));
1113
1114 /* deassert reset lines */
1115 jtag_add_reset(0, 0);
1116
1117 return ERROR_OK;
1118 }
1119
1120 static int arc_arch_state(struct target *target)
1121 {
1122 uint32_t pc_value;
1123
1124 if (debug_level < LOG_LVL_DEBUG)
1125 return ERROR_OK;
1126
1127 CHECK_RETVAL(arc_get_register_value(target, "pc", &pc_value));
1128
1129 LOG_DEBUG("target state: %s; PC at: 0x%08" PRIx32,
1130 target_state_name(target),
1131 pc_value);
1132
1133 return ERROR_OK;
1134 }
1135
1136 /**
1137 * See arc_save_context() for reason why we want to dump all regs at once.
1138 * This however means that if there are dependencies between registers they
1139 * will not be observable until target will be resumed.
1140 */
1141 static int arc_restore_context(struct target *target)
1142 {
1143 int retval = ERROR_OK;
1144 unsigned int i;
1145 struct arc_common *arc = target_to_arc(target);
1146 struct reg *reg_list = arc->core_and_aux_cache->reg_list;
1147
1148 LOG_DEBUG("Restoring registers values");
1149 assert(reg_list);
1150
1151 const uint32_t core_regs_size = arc->num_core_regs * sizeof(uint32_t);
1152 const uint32_t aux_regs_size = arc->num_aux_regs * sizeof(uint32_t);
1153 uint32_t *core_values = malloc(core_regs_size);
1154 uint32_t *aux_values = malloc(aux_regs_size);
1155 uint32_t *core_addrs = malloc(core_regs_size);
1156 uint32_t *aux_addrs = malloc(aux_regs_size);
1157 unsigned int core_cnt = 0;
1158 unsigned int aux_cnt = 0;
1159
1160 if (!core_values || !core_addrs || !aux_values || !aux_addrs) {
1161 LOG_ERROR("Unable to allocate memory");
1162 retval = ERROR_FAIL;
1163 goto exit;
1164 }
1165
1166 memset(core_values, 0xff, core_regs_size);
1167 memset(core_addrs, 0xff, core_regs_size);
1168 memset(aux_values, 0xff, aux_regs_size);
1169 memset(aux_addrs, 0xff, aux_regs_size);
1170
1171 for (i = 0; i < arc->num_core_regs; i++) {
1172 struct reg *reg = &(reg_list[i]);
1173 struct arc_reg_desc *arc_reg = reg->arch_info;
1174 if (reg->valid && reg->exist && reg->dirty) {
1175 LOG_DEBUG("Will write regnum=%u", i);
1176 core_addrs[core_cnt] = arc_reg->arch_num;
1177 core_values[core_cnt] = target_buffer_get_u32(target, reg->value);
1178 core_cnt += 1;
1179 }
1180 }
1181
1182 for (i = 0; i < arc->num_aux_regs; i++) {
1183 struct reg *reg = &(reg_list[arc->num_core_regs + i]);
1184 struct arc_reg_desc *arc_reg = reg->arch_info;
1185 if (reg->valid && reg->exist && reg->dirty) {
1186 LOG_DEBUG("Will write regnum=%lu", arc->num_core_regs + i);
1187 aux_addrs[aux_cnt] = arc_reg->arch_num;
1188 aux_values[aux_cnt] = target_buffer_get_u32(target, reg->value);
1189 aux_cnt += 1;
1190 }
1191 }
1192
1193 /* Write data to target.
1194 * Check before write, if aux and core count is greater than 0. */
1195 if (core_cnt > 0) {
1196 retval = arc_jtag_write_core_reg(&arc->jtag_info, core_addrs, core_cnt, core_values);
1197 if (ERROR_OK != retval) {
1198 LOG_ERROR("Attempt to write to core registers failed.");
1199 retval = ERROR_FAIL;
1200 goto exit;
1201 }
1202 }
1203
1204 if (aux_cnt > 0) {
1205 retval = arc_jtag_write_aux_reg(&arc->jtag_info, aux_addrs, aux_cnt, aux_values);
1206 if (ERROR_OK != retval) {
1207 LOG_ERROR("Attempt to write to aux registers failed.");
1208 retval = ERROR_FAIL;
1209 goto exit;
1210 }
1211 }
1212
1213 exit:
1214 free(core_values);
1215 free(core_addrs);
1216 free(aux_values);
1217 free(aux_addrs);
1218
1219 return retval;
1220 }
1221
1222 static int arc_enable_interrupts(struct target *target, int enable)
1223 {
1224 uint32_t value;
1225
1226 struct arc_common *arc = target_to_arc(target);
1227
1228 CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, AUX_STATUS32_REG, &value));
1229
1230 if (enable) {
1231 /* enable interrupts */
1232 value |= SET_CORE_ENABLE_INTERRUPTS;
1233 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_STATUS32_REG, value));
1234 LOG_DEBUG("interrupts enabled");
1235 } else {
1236 /* disable interrupts */
1237 value &= ~SET_CORE_ENABLE_INTERRUPTS;
1238 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_STATUS32_REG, value));
1239 LOG_DEBUG("interrupts disabled");
1240 }
1241
1242 return ERROR_OK;
1243 }
1244
1245 static int arc_resume(struct target *target, int current, target_addr_t address,
1246 int handle_breakpoints, int debug_execution)
1247 {
1248 struct arc_common *arc = target_to_arc(target);
1249 uint32_t resume_pc = 0;
1250 uint32_t value;
1251 struct reg *pc = &arc->core_and_aux_cache->reg_list[arc->pc_index_in_cache];
1252
1253 LOG_DEBUG("current:%i, address:0x%08" TARGET_PRIxADDR ", handle_breakpoints(not supported yet):%i,"
1254 " debug_execution:%i", current, address, handle_breakpoints, debug_execution);
1255
1256 /* We need to reset ARC cache variables so caches
1257 * would be invalidated and actual data
1258 * would be fetched from memory. */
1259 CHECK_RETVAL(arc_reset_caches_states(target));
1260
1261 if (target->state != TARGET_HALTED) {
1262 LOG_WARNING("target not halted");
1263 return ERROR_TARGET_NOT_HALTED;
1264 }
1265
1266 /* current = 1: continue on current PC, otherwise continue at <address> */
1267 if (!current) {
1268 target_buffer_set_u32(target, pc->value, address);
1269 pc->dirty = 1;
1270 pc->valid = 1;
1271 LOG_DEBUG("Changing the value of current PC to 0x%08" TARGET_PRIxADDR, address);
1272 }
1273
1274 if (!current)
1275 resume_pc = address;
1276 else
1277 resume_pc = target_buffer_get_u32(target, pc->value);
1278
1279 CHECK_RETVAL(arc_restore_context(target));
1280
1281 LOG_DEBUG("Target resumes from PC=0x%" PRIx32 ", pc.dirty=%i, pc.valid=%i",
1282 resume_pc, pc->dirty, pc->valid);
1283
1284 /* check if GDB tells to set our PC where to continue from */
1285 if ((pc->valid == 1) && (resume_pc == target_buffer_get_u32(target, pc->value))) {
1286 value = target_buffer_get_u32(target, pc->value);
1287 LOG_DEBUG("resume Core (when start-core) with PC @:0x%08" PRIx32, value);
1288 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_PC_REG, value));
1289 }
1290
1291 /* Restore IRQ state if not in debug_execution*/
1292 if (!debug_execution)
1293 CHECK_RETVAL(arc_enable_interrupts(target, arc->irq_state));
1294 else
1295 CHECK_RETVAL(arc_enable_interrupts(target, !debug_execution));
1296
1297 target->debug_reason = DBG_REASON_NOTHALTED;
1298
1299 /* ready to get us going again */
1300 target->state = TARGET_RUNNING;
1301 CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, AUX_STATUS32_REG, &value));
1302 value &= ~SET_CORE_HALT_BIT; /* clear the HALT bit */
1303 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_STATUS32_REG, value));
1304 LOG_DEBUG("Core started to run");
1305
1306 /* registers are now invalid */
1307 register_cache_invalidate(arc->core_and_aux_cache);
1308
1309 if (!debug_execution) {
1310 target->state = TARGET_RUNNING;
1311 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_RESUMED));
1312 LOG_DEBUG("target resumed at 0x%08" PRIx32, resume_pc);
1313 } else {
1314 target->state = TARGET_DEBUG_RUNNING;
1315 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED));
1316 LOG_DEBUG("target debug resumed at 0x%08" PRIx32, resume_pc);
1317 }
1318
1319 return ERROR_OK;
1320 }
1321
1322 static int arc_init_target(struct command_context *cmd_ctx, struct target *target)
1323 {
1324 CHECK_RETVAL(arc_build_reg_cache(target));
1325 CHECK_RETVAL(arc_build_bcr_reg_cache(target));
1326 target->debug_reason = DBG_REASON_DBGRQ;
1327 return ERROR_OK;
1328 }
1329
1330 static void arc_free_reg_cache(struct reg_cache *cache)
1331 {
1332 free(cache->reg_list);
1333 free(cache);
1334 }
1335
1336 static void arc_deinit_target(struct target *target)
1337 {
1338 struct arc_common *arc = target_to_arc(target);
1339
1340 LOG_DEBUG("deinitialization of target");
1341 if (arc->core_aux_cache_built)
1342 arc_free_reg_cache(arc->core_and_aux_cache);
1343 if (arc->bcr_cache_built)
1344 arc_free_reg_cache(arc->bcr_cache);
1345
1346 struct arc_reg_data_type *type, *n;
1347 struct arc_reg_desc *desc, *k;
1348
1349 /* Free arc-specific reg_data_types allocations*/
1350 list_for_each_entry_safe_reverse(type, n, &arc->reg_data_types, list) {
1351 if (type->data_type.type_class == REG_TYPE_CLASS_STRUCT) {
1352 free(type->reg_type_struct_field);
1353 free(type->bitfields);
1354 free(type);
1355 } else if (type->data_type.type_class == REG_TYPE_CLASS_FLAGS) {
1356 free(type->reg_type_flags_field);
1357 free(type->bitfields);
1358 free(type);
1359 }
1360 }
1361
1362 /* Free standard_gdb_types reg_data_types allocations */
1363 type = list_first_entry(&arc->reg_data_types, struct arc_reg_data_type, list);
1364 free(type);
1365
1366 list_for_each_entry_safe(desc, k, &arc->aux_reg_descriptions, list)
1367 free_reg_desc(desc);
1368
1369 list_for_each_entry_safe(desc, k, &arc->core_reg_descriptions, list)
1370 free_reg_desc(desc);
1371
1372 list_for_each_entry_safe(desc, k, &arc->bcr_reg_descriptions, list)
1373 free_reg_desc(desc);
1374
1375 free(arc->actionpoints_list);
1376 free(arc);
1377 }
1378
1379
1380 static int arc_target_create(struct target *target, Jim_Interp *interp)
1381 {
1382 struct arc_common *arc = calloc(1, sizeof(*arc));
1383
1384 if (!arc) {
1385 LOG_ERROR("Unable to allocate memory");
1386 return ERROR_FAIL;
1387 }
1388
1389 LOG_DEBUG("Entering");
1390 CHECK_RETVAL(arc_init_arch_info(target, arc, target->tap));
1391
1392 return ERROR_OK;
1393 }
1394
1395 /**
1396 * Write 4-byte instruction to memory. This is like target_write_u32, however
1397 * in case of little endian ARC instructions are in middle endian format, not
1398 * little endian, so different type of conversion should be done.
1399 * Middle endian: instruction "aabbccdd", stored as "bbaaddcc"
1400 */
1401 int arc_write_instruction_u32(struct target *target, uint32_t address,
1402 uint32_t instr)
1403 {
1404 uint8_t value_buf[4];
1405 if (!target_was_examined(target)) {
1406 LOG_ERROR("Target not examined yet");
1407 return ERROR_FAIL;
1408 }
1409
1410 LOG_DEBUG("Address: 0x%08" PRIx32 ", value: 0x%08" PRIx32, address,
1411 instr);
1412
1413 if (target->endianness == TARGET_LITTLE_ENDIAN)
1414 arc_h_u32_to_me(value_buf, instr);
1415 else
1416 h_u32_to_be(value_buf, instr);
1417
1418 CHECK_RETVAL(target_write_buffer(target, address, 4, value_buf));
1419
1420 return ERROR_OK;
1421 }
1422
1423 /**
1424 * Read 32-bit instruction from memory. It is like target_read_u32, however in
1425 * case of little endian ARC instructions are in middle endian format, so
1426 * different type of conversion should be done.
1427 */
1428 int arc_read_instruction_u32(struct target *target, uint32_t address,
1429 uint32_t *value)
1430 {
1431 uint8_t value_buf[4];
1432
1433 if (!target_was_examined(target)) {
1434 LOG_ERROR("Target not examined yet");
1435 return ERROR_FAIL;
1436 }
1437
1438 *value = 0;
1439 CHECK_RETVAL(target_read_buffer(target, address, 4, value_buf));
1440
1441 if (target->endianness == TARGET_LITTLE_ENDIAN)
1442 *value = arc_me_to_h_u32(value_buf);
1443 else
1444 *value = be_to_h_u32(value_buf);
1445
1446 LOG_DEBUG("Address: 0x%08" PRIx32 ", value: 0x%08" PRIx32, address,
1447 *value);
1448
1449 return ERROR_OK;
1450 }
1451
1452 /* Actionpoint mechanism allows to setup HW breakpoints
1453 * and watchpoints. Each actionpoint is controlled by
1454 * 3 aux registers: Actionpoint(AP) match mask(AP_AMM), AP match value(AP_AMV)
1455 * and AP control(AC).
1456 * This function is for setting/unsetting actionpoints:
1457 * at - actionpoint target: trigger on mem/reg access
1458 * tt - transaction type : trigger on r/w. */
1459 static int arc_configure_actionpoint(struct target *target, uint32_t ap_num,
1460 uint32_t match_value, uint32_t control_tt, uint32_t control_at)
1461 {
1462 struct arc_common *arc = target_to_arc(target);
1463
1464 if (control_tt != AP_AC_TT_DISABLE) {
1465
1466 if (arc->actionpoints_num_avail < 1) {
1467 LOG_ERROR("No free actionpoints, maximim amount is %" PRIu32,
1468 arc->actionpoints_num);
1469 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1470 }
1471
1472 /* Names of register to set - 24 chars should be enough. Looks a little
1473 * bit out-of-place for C code, but makes it aligned to the bigger
1474 * concept of "ARC registers are defined in TCL" as far as possible.
1475 */
1476 char ap_amv_reg_name[24], ap_amm_reg_name[24], ap_ac_reg_name[24];
1477 snprintf(ap_amv_reg_name, 24, "ap_amv%" PRIu32, ap_num);
1478 snprintf(ap_amm_reg_name, 24, "ap_amm%" PRIu32, ap_num);
1479 snprintf(ap_ac_reg_name, 24, "ap_ac%" PRIu32, ap_num);
1480 CHECK_RETVAL(arc_set_register_value(target, ap_amv_reg_name,
1481 match_value));
1482 CHECK_RETVAL(arc_set_register_value(target, ap_amm_reg_name, 0));
1483 CHECK_RETVAL(arc_set_register_value(target, ap_ac_reg_name,
1484 control_tt | control_at));
1485 arc->actionpoints_num_avail--;
1486 } else {
1487 char ap_ac_reg_name[24];
1488 snprintf(ap_ac_reg_name, 24, "ap_ac%" PRIu32, ap_num);
1489 CHECK_RETVAL(arc_set_register_value(target, ap_ac_reg_name,
1490 AP_AC_TT_DISABLE));
1491 arc->actionpoints_num_avail++;
1492 }
1493
1494 return ERROR_OK;
1495 }
1496
1497 static int arc_set_breakpoint(struct target *target,
1498 struct breakpoint *breakpoint)
1499 {
1500 if (breakpoint->set) {
1501 LOG_WARNING("breakpoint already set");
1502 return ERROR_OK;
1503 }
1504
1505 if (breakpoint->type == BKPT_SOFT) {
1506 LOG_DEBUG("bpid: %" PRIu32, breakpoint->unique_id);
1507
1508 if (breakpoint->length == 4) {
1509 uint32_t verify = 0xffffffff;
1510
1511 CHECK_RETVAL(target_read_buffer(target, breakpoint->address, breakpoint->length,
1512 breakpoint->orig_instr));
1513
1514 CHECK_RETVAL(arc_write_instruction_u32(target, breakpoint->address,
1515 ARC_SDBBP_32));
1516
1517 CHECK_RETVAL(arc_read_instruction_u32(target, breakpoint->address, &verify));
1518
1519 if (verify != ARC_SDBBP_32) {
1520 LOG_ERROR("Unable to set 32bit breakpoint at address @0x%" TARGET_PRIxADDR
1521 " - check that memory is read/writable", breakpoint->address);
1522 return ERROR_FAIL;
1523 }
1524 } else if (breakpoint->length == 2) {
1525 uint16_t verify = 0xffff;
1526
1527 CHECK_RETVAL(target_read_buffer(target, breakpoint->address, breakpoint->length,
1528 breakpoint->orig_instr));
1529 CHECK_RETVAL(target_write_u16(target, breakpoint->address, ARC_SDBBP_16));
1530
1531 CHECK_RETVAL(target_read_u16(target, breakpoint->address, &verify));
1532 if (verify != ARC_SDBBP_16) {
1533 LOG_ERROR("Unable to set 16bit breakpoint at address @0x%" TARGET_PRIxADDR
1534 " - check that memory is read/writable", breakpoint->address);
1535 return ERROR_FAIL;
1536 }
1537 } else {
1538 LOG_ERROR("Invalid breakpoint length: target supports only 2 or 4");
1539 return ERROR_COMMAND_ARGUMENT_INVALID;
1540 }
1541
1542 breakpoint->set = 64; /* Any nice value but 0 */
1543 } else if (breakpoint->type == BKPT_HARD) {
1544 struct arc_common *arc = target_to_arc(target);
1545 struct arc_actionpoint *ap_list = arc->actionpoints_list;
1546 unsigned int bp_num;
1547
1548 for (bp_num = 0; bp_num < arc->actionpoints_num; bp_num++) {
1549 if (!ap_list[bp_num].used)
1550 break;
1551 }
1552
1553 if (bp_num >= arc->actionpoints_num) {
1554 LOG_ERROR("No free actionpoints, maximum amount is %" PRIu32,
1555 arc->actionpoints_num);
1556 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1557 }
1558
1559 int retval = arc_configure_actionpoint(target, bp_num,
1560 breakpoint->address, AP_AC_TT_READWRITE, AP_AC_AT_INST_ADDR);
1561
1562 if (retval == ERROR_OK) {
1563 breakpoint->set = bp_num + 1;
1564 ap_list[bp_num].used = 1;
1565 ap_list[bp_num].bp_value = breakpoint->address;
1566 ap_list[bp_num].type = ARC_AP_BREAKPOINT;
1567
1568 LOG_DEBUG("bpid: %" PRIu32 ", bp_num %u bp_value 0x%" PRIx32,
1569 breakpoint->unique_id, bp_num, ap_list[bp_num].bp_value);
1570 }
1571
1572 } else {
1573 LOG_DEBUG("ERROR: setting unknown breakpoint type");
1574 return ERROR_FAIL;
1575 }
1576
1577 /* core instruction cache is now invalid. */
1578 CHECK_RETVAL(arc_cache_invalidate(target));
1579
1580 return ERROR_OK;
1581 }
1582
1583 static int arc_unset_breakpoint(struct target *target,
1584 struct breakpoint *breakpoint)
1585 {
1586 int retval = ERROR_OK;
1587
1588 if (!breakpoint->set) {
1589 LOG_WARNING("breakpoint not set");
1590 return ERROR_OK;
1591 }
1592
1593 if (breakpoint->type == BKPT_SOFT) {
1594 /* restore original instruction (kept in target endianness) */
1595 LOG_DEBUG("bpid: %" PRIu32, breakpoint->unique_id);
1596 if (breakpoint->length == 4) {
1597 uint32_t current_instr;
1598
1599 /* check that user program has not modified breakpoint instruction */
1600 CHECK_RETVAL(arc_read_instruction_u32(target, breakpoint->address, &current_instr));
1601
1602 if (current_instr == ARC_SDBBP_32) {
1603 retval = target_write_buffer(target, breakpoint->address,
1604 breakpoint->length, breakpoint->orig_instr);
1605 if (retval != ERROR_OK)
1606 return retval;
1607 } else {
1608 LOG_WARNING("Software breakpoint @0x%" TARGET_PRIxADDR
1609 " has been overwritten outside of debugger."
1610 "Expected: @0x%" PRIx32 ", got: @0x%" PRIx32,
1611 breakpoint->address, ARC_SDBBP_32, current_instr);
1612 }
1613 } else if (breakpoint->length == 2) {
1614 uint16_t current_instr;
1615
1616 /* check that user program has not modified breakpoint instruction */
1617 CHECK_RETVAL(target_read_u16(target, breakpoint->address, &current_instr));
1618 if (current_instr == ARC_SDBBP_16) {
1619 retval = target_write_buffer(target, breakpoint->address,
1620 breakpoint->length, breakpoint->orig_instr);
1621 if (retval != ERROR_OK)
1622 return retval;
1623 } else {
1624 LOG_WARNING("Software breakpoint @0x%" TARGET_PRIxADDR
1625 " has been overwritten outside of debugger. "
1626 "Expected: 0x%04x, got: 0x%04" PRIx16,
1627 breakpoint->address, ARC_SDBBP_16, current_instr);
1628 }
1629 } else {
1630 LOG_ERROR("Invalid breakpoint length: target supports only 2 or 4");
1631 return ERROR_COMMAND_ARGUMENT_INVALID;
1632 }
1633 breakpoint->set = 0;
1634
1635 } else if (breakpoint->type == BKPT_HARD) {
1636 struct arc_common *arc = target_to_arc(target);
1637 struct arc_actionpoint *ap_list = arc->actionpoints_list;
1638 unsigned int bp_num = breakpoint->set - 1;
1639
1640 if ((breakpoint->set == 0) || (bp_num >= arc->actionpoints_num)) {
1641 LOG_DEBUG("Invalid actionpoint ID: %u in breakpoint: %" PRIu32,
1642 bp_num, breakpoint->unique_id);
1643 return ERROR_OK;
1644 }
1645
1646 retval = arc_configure_actionpoint(target, bp_num,
1647 breakpoint->address, AP_AC_TT_DISABLE, AP_AC_AT_INST_ADDR);
1648
1649 if (retval == ERROR_OK) {
1650 breakpoint->set = 0;
1651 ap_list[bp_num].used = 0;
1652 ap_list[bp_num].bp_value = 0;
1653
1654 LOG_DEBUG("bpid: %" PRIu32 " - released actionpoint ID: %i",
1655 breakpoint->unique_id, bp_num);
1656 }
1657 } else {
1658 LOG_DEBUG("ERROR: unsetting unknown breakpoint type");
1659 return ERROR_FAIL;
1660 }
1661
1662 /* core instruction cache is now invalid. */
1663 CHECK_RETVAL(arc_cache_invalidate(target));
1664
1665 return retval;
1666 }
1667
1668
1669 static int arc_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1670 {
1671 if (target->state == TARGET_HALTED) {
1672 return arc_set_breakpoint(target, breakpoint);
1673
1674 } else {
1675 LOG_WARNING(" > core was not halted, please try again.");
1676 return ERROR_TARGET_NOT_HALTED;
1677 }
1678 }
1679
1680 static int arc_remove_breakpoint(struct target *target,
1681 struct breakpoint *breakpoint)
1682 {
1683 if (target->state == TARGET_HALTED) {
1684 if (breakpoint->set)
1685 CHECK_RETVAL(arc_unset_breakpoint(target, breakpoint));
1686 } else {
1687 LOG_WARNING("target not halted");
1688 return ERROR_TARGET_NOT_HALTED;
1689 }
1690
1691 return ERROR_OK;
1692 }
1693
1694 void arc_reset_actionpoints(struct target *target)
1695 {
1696 struct arc_common *arc = target_to_arc(target);
1697 struct arc_actionpoint *ap_list = arc->actionpoints_list;
1698 struct breakpoint *next_b;
1699
1700 while (target->breakpoints) {
1701 next_b = target->breakpoints->next;
1702 arc_remove_breakpoint(target, target->breakpoints);
1703 free(target->breakpoints->orig_instr);
1704 free(target->breakpoints);
1705 target->breakpoints = next_b;
1706 }
1707 for (unsigned int i = 0; i < arc->actionpoints_num; i++) {
1708 if ((ap_list[i].used) && (ap_list[i].reg_address))
1709 arc_remove_auxreg_actionpoint(target, ap_list[i].reg_address);
1710 }
1711 }
1712
1713 int arc_set_actionpoints_num(struct target *target, uint32_t ap_num)
1714 {
1715 LOG_DEBUG("target=%s actionpoints=%" PRIu32, target_name(target), ap_num);
1716 struct arc_common *arc = target_to_arc(target);
1717
1718 /* Make sure that there are no enabled actionpoints in target. */
1719 arc_reset_actionpoints(target);
1720
1721 /* Assume that all points have been removed from target. */
1722 free(arc->actionpoints_list);
1723
1724 arc->actionpoints_num_avail = ap_num;
1725 arc->actionpoints_num = ap_num;
1726 /* calloc can be safely called when ncount == 0. */
1727 arc->actionpoints_list = calloc(ap_num, sizeof(struct arc_actionpoint));
1728
1729 if (!arc->actionpoints_list) {
1730 LOG_ERROR("Unable to allocate memory");
1731 return ERROR_FAIL;
1732 }
1733 return ERROR_OK;
1734 }
1735
1736
1737 int arc_add_auxreg_actionpoint(struct target *target,
1738 uint32_t auxreg_addr, uint32_t transaction)
1739 {
1740 unsigned int ap_num = 0;
1741 int retval = ERROR_OK;
1742
1743 if (target->state != TARGET_HALTED)
1744 return ERROR_TARGET_NOT_HALTED;
1745
1746 struct arc_common *arc = target_to_arc(target);
1747 struct arc_actionpoint *ap_list = arc->actionpoints_list;
1748
1749 while (ap_list[ap_num].used)
1750 ap_num++;
1751
1752 if (ap_num >= arc->actionpoints_num) {
1753 LOG_ERROR("No actionpoint free, maximum amount is %u",
1754 arc->actionpoints_num);
1755 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1756 }
1757
1758 retval = arc_configure_actionpoint(target, ap_num,
1759 auxreg_addr, transaction, AP_AC_AT_AUXREG_ADDR);
1760
1761 if (retval == ERROR_OK) {
1762 ap_list[ap_num].used = 1;
1763 ap_list[ap_num].reg_address = auxreg_addr;
1764 }
1765
1766 return retval;
1767 }
1768
1769 int arc_remove_auxreg_actionpoint(struct target *target, uint32_t auxreg_addr)
1770 {
1771 int retval = ERROR_OK;
1772 bool ap_found = false;
1773 unsigned int ap_num = 0;
1774
1775 if (target->state != TARGET_HALTED)
1776 return ERROR_TARGET_NOT_HALTED;
1777
1778 struct arc_common *arc = target_to_arc(target);
1779 struct arc_actionpoint *ap_list = arc->actionpoints_list;
1780
1781 while ((ap_list[ap_num].used) && (ap_num < arc->actionpoints_num)) {
1782 if (ap_list[ap_num].reg_address == auxreg_addr) {
1783 ap_found = true;
1784 break;
1785 }
1786 ap_num++;
1787 }
1788
1789 if (ap_found) {
1790 retval = arc_configure_actionpoint(target, ap_num,
1791 auxreg_addr, AP_AC_TT_DISABLE, AP_AC_AT_AUXREG_ADDR);
1792
1793 if (retval == ERROR_OK) {
1794 ap_list[ap_num].used = 0;
1795 ap_list[ap_num].bp_value = 0;
1796 }
1797 } else {
1798 LOG_ERROR("Register actionpoint not found");
1799 }
1800 return retval;
1801 }
1802
1803 /* Helper function which switches core to single_step mode by
1804 * doing aux r/w operations. */
1805 int arc_config_step(struct target *target, int enable_step)
1806 {
1807 uint32_t value;
1808
1809 struct arc_common *arc = target_to_arc(target);
1810
1811 /* enable core debug step mode */
1812 if (enable_step) {
1813 CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, AUX_STATUS32_REG,
1814 &value));
1815 value &= ~SET_CORE_AE_BIT; /* clear the AE bit */
1816 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_STATUS32_REG,
1817 value));
1818 LOG_DEBUG(" [status32:0x%08" PRIx32 "]", value);
1819
1820 /* Doing read-modify-write, because DEBUG might contain manually set
1821 * bits like UB or ED, which should be preserved. */
1822 CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info,
1823 AUX_DEBUG_REG, &value));
1824 value |= SET_CORE_SINGLE_INSTR_STEP; /* set the IS bit */
1825 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_DEBUG_REG,
1826 value));
1827 LOG_DEBUG("core debug step mode enabled [debug-reg:0x%08" PRIx32 "]", value);
1828
1829 } else { /* disable core debug step mode */
1830 CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, AUX_DEBUG_REG,
1831 &value));
1832 value &= ~SET_CORE_SINGLE_INSTR_STEP; /* clear the IS bit */
1833 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_DEBUG_REG,
1834 value));
1835 LOG_DEBUG("core debug step mode disabled");
1836 }
1837
1838 return ERROR_OK;
1839 }
1840
1841 int arc_step(struct target *target, int current, target_addr_t address,
1842 int handle_breakpoints)
1843 {
1844 /* get pointers to arch-specific information */
1845 struct arc_common *arc = target_to_arc(target);
1846 struct breakpoint *breakpoint = NULL;
1847 struct reg *pc = &(arc->core_and_aux_cache->reg_list[arc->pc_index_in_cache]);
1848
1849 if (target->state != TARGET_HALTED) {
1850 LOG_WARNING("target not halted");
1851 return ERROR_TARGET_NOT_HALTED;
1852 }
1853
1854 /* current = 1: continue on current pc, otherwise continue at <address> */
1855 if (!current) {
1856 buf_set_u32(pc->value, 0, 32, address);
1857 pc->dirty = 1;
1858 pc->valid = 1;
1859 }
1860
1861 LOG_DEBUG("Target steps one instruction from PC=0x%" PRIx32,
1862 buf_get_u32(pc->value, 0, 32));
1863
1864 /* the front-end may request us not to handle breakpoints */
1865 if (handle_breakpoints) {
1866 breakpoint = breakpoint_find(target, buf_get_u32(pc->value, 0, 32));
1867 if (breakpoint)
1868 CHECK_RETVAL(arc_unset_breakpoint(target, breakpoint));
1869 }
1870
1871 /* restore context */
1872 CHECK_RETVAL(arc_restore_context(target));
1873
1874 target->debug_reason = DBG_REASON_SINGLESTEP;
1875
1876 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_RESUMED));
1877
1878 /* disable interrupts while stepping */
1879 CHECK_RETVAL(arc_enable_interrupts(target, 0));
1880
1881 /* do a single step */
1882 CHECK_RETVAL(arc_config_step(target, 1));
1883
1884 /* make sure we done our step */
1885 alive_sleep(1);
1886
1887 /* registers are now invalid */
1888 register_cache_invalidate(arc->core_and_aux_cache);
1889
1890 if (breakpoint)
1891 CHECK_RETVAL(arc_set_breakpoint(target, breakpoint));
1892
1893 LOG_DEBUG("target stepped ");
1894
1895 target->state = TARGET_HALTED;
1896
1897 /* Saving context */
1898 CHECK_RETVAL(arc_debug_entry(target));
1899 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_HALTED));
1900
1901 return ERROR_OK;
1902 }
1903
1904
1905 /* This function invalidates icache. */
1906 static int arc_icache_invalidate(struct target *target)
1907 {
1908 uint32_t value;
1909
1910 struct arc_common *arc = target_to_arc(target);
1911
1912 /* Don't waste time if already done. */
1913 if (!arc->has_icache || arc->icache_invalidated)
1914 return ERROR_OK;
1915
1916 LOG_DEBUG("Invalidating I$.");
1917
1918 value = IC_IVIC_INVALIDATE; /* invalidate I$ */
1919 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_IC_IVIC_REG, value));
1920
1921 arc->icache_invalidated = true;
1922
1923 return ERROR_OK;
1924 }
1925
1926 /* This function invalidates dcache */
1927 static int arc_dcache_invalidate(struct target *target)
1928 {
1929 uint32_t value, dc_ctrl_value;
1930
1931 struct arc_common *arc = target_to_arc(target);
1932
1933 if (!arc->has_dcache || arc->dcache_invalidated)
1934 return ERROR_OK;
1935
1936 LOG_DEBUG("Invalidating D$.");
1937
1938 CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, AUX_DC_CTRL_REG, &value));
1939 dc_ctrl_value = value;
1940 value &= ~DC_CTRL_IM;
1941
1942 /* set DC_CTRL invalidate mode to invalidate-only (no flushing!!) */
1943 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_DC_CTRL_REG, value));
1944 value = DC_IVDC_INVALIDATE; /* invalidate D$ */
1945 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_DC_IVDC_REG, value));
1946
1947 /* restore DC_CTRL invalidate mode */
1948 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_DC_CTRL_REG, dc_ctrl_value));
1949
1950 arc->dcache_invalidated = true;
1951
1952 return ERROR_OK;
1953 }
1954
1955 /* This function invalidates l2 cache. */
1956 static int arc_l2cache_invalidate(struct target *target)
1957 {
1958 uint32_t value, slc_ctrl_value;
1959
1960 struct arc_common *arc = target_to_arc(target);
1961
1962 if (!arc->has_l2cache || arc->l2cache_invalidated)
1963 return ERROR_OK;
1964
1965 LOG_DEBUG("Invalidating L2$.");
1966
1967 CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, SLC_AUX_CACHE_CTRL, &value));
1968 slc_ctrl_value = value;
1969 value &= ~L2_CTRL_IM;
1970
1971 /* set L2_CTRL invalidate mode to invalidate-only (no flushing!!) */
1972 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, SLC_AUX_CACHE_CTRL, value));
1973 /* invalidate L2$ */
1974 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, SLC_AUX_CACHE_INV, L2_INV_IV));
1975
1976 /* Wait until invalidate operation ends */
1977 do {
1978 LOG_DEBUG("Waiting for invalidation end.");
1979 CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, SLC_AUX_CACHE_CTRL, &value));
1980 } while (value & L2_CTRL_BS);
1981
1982 /* restore L2_CTRL invalidate mode */
1983 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, SLC_AUX_CACHE_CTRL, slc_ctrl_value));
1984
1985 arc->l2cache_invalidated = true;
1986
1987 return ERROR_OK;
1988 }
1989
1990
1991 int arc_cache_invalidate(struct target *target)
1992 {
1993 CHECK_RETVAL(arc_icache_invalidate(target));
1994 CHECK_RETVAL(arc_dcache_invalidate(target));
1995 CHECK_RETVAL(arc_l2cache_invalidate(target));
1996
1997 return ERROR_OK;
1998 }
1999
2000 /* Flush data cache. This function is cheap to call and return quickly if D$
2001 * already has been flushed since target had been halted. JTAG debugger reads
2002 * values directly from memory, bypassing cache, so if there are unflushed
2003 * lines debugger will read invalid values, which will cause a lot of troubles.
2004 * */
2005 int arc_dcache_flush(struct target *target)
2006 {
2007 uint32_t value, dc_ctrl_value;
2008 bool has_to_set_dc_ctrl_im;
2009
2010 struct arc_common *arc = target_to_arc(target);
2011
2012 /* Don't waste time if already done. */
2013 if (!arc->has_dcache || arc->dcache_flushed)
2014 return ERROR_OK;
2015
2016 LOG_DEBUG("Flushing D$.");
2017
2018 /* Store current value of DC_CTRL */
2019 CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, AUX_DC_CTRL_REG, &dc_ctrl_value));
2020
2021 /* Set DC_CTRL invalidate mode to flush (if not already set) */
2022 has_to_set_dc_ctrl_im = (dc_ctrl_value & DC_CTRL_IM) == 0;
2023 if (has_to_set_dc_ctrl_im) {
2024 value = dc_ctrl_value | DC_CTRL_IM;
2025 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_DC_CTRL_REG, value));
2026 }
2027
2028 /* Flush D$ */
2029 value = DC_IVDC_INVALIDATE;
2030 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_DC_IVDC_REG, value));
2031
2032 /* Restore DC_CTRL invalidate mode (even of flush failed) */
2033 if (has_to_set_dc_ctrl_im)
2034 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_DC_CTRL_REG, dc_ctrl_value));
2035
2036 arc->dcache_flushed = true;
2037
2038 return ERROR_OK;
2039 }
2040
2041 /* This function flushes l2cache. */
2042 static int arc_l2cache_flush(struct target *target)
2043 {
2044 uint32_t value;
2045
2046 struct arc_common *arc = target_to_arc(target);
2047
2048 /* Don't waste time if already done. */
2049 if (!arc->has_l2cache || arc->l2cache_flushed)
2050 return ERROR_OK;
2051
2052 LOG_DEBUG("Flushing L2$.");
2053
2054 /* Flush L2 cache */
2055 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, SLC_AUX_CACHE_FLUSH, L2_FLUSH_FL));
2056
2057 /* Wait until flush operation ends */
2058 do {
2059 LOG_DEBUG("Waiting for flushing end.");
2060 CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, SLC_AUX_CACHE_CTRL, &value));
2061 } while (value & L2_CTRL_BS);
2062
2063 arc->l2cache_flushed = true;
2064
2065 return ERROR_OK;
2066 }
2067
2068 int arc_cache_flush(struct target *target)
2069 {
2070 CHECK_RETVAL(arc_dcache_flush(target));
2071 CHECK_RETVAL(arc_l2cache_flush(target));
2072
2073 return ERROR_OK;
2074 }
2075
2076 /* ARC v2 target */
2077 struct target_type arcv2_target = {
2078 .name = "arcv2",
2079
2080 .poll = arc_poll,
2081
2082 .arch_state = arc_arch_state,
2083
2084 /* TODO That seems like something similar to metaware hostlink, so perhaps
2085 * we can exploit this in the future. */
2086 .target_request_data = NULL,
2087
2088 .halt = arc_halt,
2089 .resume = arc_resume,
2090 .step = arc_step,
2091
2092 .assert_reset = arc_assert_reset,
2093 .deassert_reset = arc_deassert_reset,
2094
2095 /* TODO Implement soft_reset_halt */
2096 .soft_reset_halt = NULL,
2097
2098 .get_gdb_reg_list = arc_get_gdb_reg_list,
2099
2100 .read_memory = arc_mem_read,
2101 .write_memory = arc_mem_write,
2102 .checksum_memory = NULL,
2103 .blank_check_memory = NULL,
2104
2105 .add_breakpoint = arc_add_breakpoint,
2106 .add_context_breakpoint = NULL,
2107 .add_hybrid_breakpoint = NULL,
2108 .remove_breakpoint = arc_remove_breakpoint,
2109 .add_watchpoint = NULL,
2110 .remove_watchpoint = NULL,
2111 .hit_watchpoint = NULL,
2112
2113 .run_algorithm = NULL,
2114 .start_algorithm = NULL,
2115 .wait_algorithm = NULL,
2116
2117 .commands = arc_monitor_command_handlers,
2118
2119 .target_create = arc_target_create,
2120 .init_target = arc_init_target,
2121 .deinit_target = arc_deinit_target,
2122 .examine = arc_examine,
2123
2124 .virt2phys = NULL,
2125 .read_phys_memory = NULL,
2126 .write_phys_memory = NULL,
2127 .mmu = NULL,
2128 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)