armv4_5: use register_commands()
[openocd.git] / src / target / armv4_5.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2008 by Spencer Oliver *
6 * spen@spen-soft.co.uk *
7 * *
8 * Copyright (C) 2008 by Oyvind Harboe *
9 * oyvind.harboe@zylin.com *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "armv4_5.h"
31 #include "arm_jtag.h"
32 #include "breakpoints.h"
33 #include "arm_disassembler.h"
34 #include "binarybuffer.h"
35 #include "algorithm.h"
36 #include "register.h"
37
38
39 /* offsets into armv4_5 core register cache */
40 enum {
41 // ARMV4_5_CPSR = 31,
42 ARMV4_5_SPSR_FIQ = 32,
43 ARMV4_5_SPSR_IRQ = 33,
44 ARMV4_5_SPSR_SVC = 34,
45 ARMV4_5_SPSR_ABT = 35,
46 ARMV4_5_SPSR_UND = 36,
47 ARM_SPSR_MON = 39,
48 };
49
50 static const uint8_t arm_usr_indices[17] = {
51 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, ARMV4_5_CPSR,
52 };
53
54 static const uint8_t arm_fiq_indices[8] = {
55 16, 17, 18, 19, 20, 21, 22, ARMV4_5_SPSR_FIQ,
56 };
57
58 static const uint8_t arm_irq_indices[3] = {
59 23, 24, ARMV4_5_SPSR_IRQ,
60 };
61
62 static const uint8_t arm_svc_indices[3] = {
63 25, 26, ARMV4_5_SPSR_SVC,
64 };
65
66 static const uint8_t arm_abt_indices[3] = {
67 27, 28, ARMV4_5_SPSR_ABT,
68 };
69
70 static const uint8_t arm_und_indices[3] = {
71 29, 30, ARMV4_5_SPSR_UND,
72 };
73
74 static const uint8_t arm_mon_indices[3] = {
75 37, 38, ARM_SPSR_MON,
76 };
77
78 static const struct {
79 const char *name;
80 unsigned short psr;
81 /* For user and system modes, these list indices for all registers.
82 * otherwise they're just indices for the shadow registers and SPSR.
83 */
84 unsigned short n_indices;
85 const uint8_t *indices;
86 } arm_mode_data[] = {
87 /* Seven modes are standard from ARM7 on. "System" and "User" share
88 * the same registers; other modes shadow from 3 to 8 registers.
89 */
90 {
91 .name = "User",
92 .psr = ARMV4_5_MODE_USR,
93 .n_indices = ARRAY_SIZE(arm_usr_indices),
94 .indices = arm_usr_indices,
95 },
96 {
97 .name = "FIQ",
98 .psr = ARMV4_5_MODE_FIQ,
99 .n_indices = ARRAY_SIZE(arm_fiq_indices),
100 .indices = arm_fiq_indices,
101 },
102 {
103 .name = "Supervisor",
104 .psr = ARMV4_5_MODE_SVC,
105 .n_indices = ARRAY_SIZE(arm_svc_indices),
106 .indices = arm_svc_indices,
107 },
108 {
109 .name = "Abort",
110 .psr = ARMV4_5_MODE_ABT,
111 .n_indices = ARRAY_SIZE(arm_abt_indices),
112 .indices = arm_abt_indices,
113 },
114 {
115 .name = "IRQ",
116 .psr = ARMV4_5_MODE_IRQ,
117 .n_indices = ARRAY_SIZE(arm_irq_indices),
118 .indices = arm_irq_indices,
119 },
120 {
121 .name = "Undefined instruction",
122 .psr = ARMV4_5_MODE_UND,
123 .n_indices = ARRAY_SIZE(arm_und_indices),
124 .indices = arm_und_indices,
125 },
126 {
127 .name = "System",
128 .psr = ARMV4_5_MODE_SYS,
129 .n_indices = ARRAY_SIZE(arm_usr_indices),
130 .indices = arm_usr_indices,
131 },
132 /* TrustZone "Security Extensions" add a secure monitor mode.
133 * This is distinct from a "debug monitor" which can support
134 * non-halting debug, in conjunction with some debuggers.
135 */
136 {
137 .name = "Secure Monitor",
138 .psr = ARM_MODE_MON,
139 .n_indices = ARRAY_SIZE(arm_mon_indices),
140 .indices = arm_mon_indices,
141 },
142 };
143
144 /** Map PSR mode bits to the name of an ARM processor operating mode. */
145 const char *arm_mode_name(unsigned psr_mode)
146 {
147 for (unsigned i = 0; i < ARRAY_SIZE(arm_mode_data); i++) {
148 if (arm_mode_data[i].psr == psr_mode)
149 return arm_mode_data[i].name;
150 }
151 LOG_ERROR("unrecognized psr mode: %#02x", psr_mode);
152 return "UNRECOGNIZED";
153 }
154
155 /** Return true iff the parameter denotes a valid ARM processor mode. */
156 bool is_arm_mode(unsigned psr_mode)
157 {
158 for (unsigned i = 0; i < ARRAY_SIZE(arm_mode_data); i++) {
159 if (arm_mode_data[i].psr == psr_mode)
160 return true;
161 }
162 return false;
163 }
164
165 /** Map PSR mode bits to linear number indexing armv4_5_core_reg_map */
166 int armv4_5_mode_to_number(enum armv4_5_mode mode)
167 {
168 switch (mode) {
169 case ARMV4_5_MODE_ANY:
170 /* map MODE_ANY to user mode */
171 case ARMV4_5_MODE_USR:
172 return 0;
173 case ARMV4_5_MODE_FIQ:
174 return 1;
175 case ARMV4_5_MODE_IRQ:
176 return 2;
177 case ARMV4_5_MODE_SVC:
178 return 3;
179 case ARMV4_5_MODE_ABT:
180 return 4;
181 case ARMV4_5_MODE_UND:
182 return 5;
183 case ARMV4_5_MODE_SYS:
184 return 6;
185 case ARM_MODE_MON:
186 return 7;
187 default:
188 LOG_ERROR("invalid mode value encountered %d", mode);
189 return -1;
190 }
191 }
192
193 /** Map linear number indexing armv4_5_core_reg_map to PSR mode bits. */
194 enum armv4_5_mode armv4_5_number_to_mode(int number)
195 {
196 switch (number) {
197 case 0:
198 return ARMV4_5_MODE_USR;
199 case 1:
200 return ARMV4_5_MODE_FIQ;
201 case 2:
202 return ARMV4_5_MODE_IRQ;
203 case 3:
204 return ARMV4_5_MODE_SVC;
205 case 4:
206 return ARMV4_5_MODE_ABT;
207 case 5:
208 return ARMV4_5_MODE_UND;
209 case 6:
210 return ARMV4_5_MODE_SYS;
211 case 7:
212 return ARM_MODE_MON;
213 default:
214 LOG_ERROR("mode index out of bounds %d", number);
215 return ARMV4_5_MODE_ANY;
216 }
217 }
218
219 char* armv4_5_state_strings[] =
220 {
221 "ARM", "Thumb", "Jazelle", "ThumbEE",
222 };
223
224 /* Templates for ARM core registers.
225 *
226 * NOTE: offsets in this table are coupled to the arm_mode_data
227 * table above, the armv4_5_core_reg_map array below, and also to
228 * the ARMV4_5_CPSR symbol (which should vanish after ARM11 updates).
229 */
230 static const struct {
231 /* The name is used for e.g. the "regs" command. */
232 const char *name;
233
234 /* The {cookie, mode} tuple uniquely identifies one register.
235 * In a given mode, cookies 0..15 map to registers R0..R15,
236 * with R13..R15 usually called SP, LR, PC.
237 *
238 * MODE_ANY is used as *input* to the mapping, and indicates
239 * various special cases (sigh) and errors.
240 *
241 * Cookie 16 is (currently) confusing, since it indicates
242 * CPSR -or- SPSR depending on whether 'mode' is MODE_ANY.
243 * (Exception modes have both CPSR and SPSR registers ...)
244 */
245 unsigned cookie;
246 enum armv4_5_mode mode;
247 } arm_core_regs[] = {
248 /* IMPORTANT: we guarantee that the first eight cached registers
249 * correspond to r0..r7, and the fifteenth to PC, so that callers
250 * don't need to map them.
251 */
252 { .name = "r0", .cookie = 0, .mode = ARMV4_5_MODE_ANY, },
253 { .name = "r1", .cookie = 1, .mode = ARMV4_5_MODE_ANY, },
254 { .name = "r2", .cookie = 2, .mode = ARMV4_5_MODE_ANY, },
255 { .name = "r3", .cookie = 3, .mode = ARMV4_5_MODE_ANY, },
256 { .name = "r4", .cookie = 4, .mode = ARMV4_5_MODE_ANY, },
257 { .name = "r5", .cookie = 5, .mode = ARMV4_5_MODE_ANY, },
258 { .name = "r6", .cookie = 6, .mode = ARMV4_5_MODE_ANY, },
259 { .name = "r7", .cookie = 7, .mode = ARMV4_5_MODE_ANY, },
260
261 /* NOTE: regs 8..12 might be shadowed by FIQ ... flagging
262 * them as MODE_ANY creates special cases. (ANY means
263 * "not mapped" elsewhere; here it's "everything but FIQ".)
264 */
265 { .name = "r8", .cookie = 8, .mode = ARMV4_5_MODE_ANY, },
266 { .name = "r9", .cookie = 9, .mode = ARMV4_5_MODE_ANY, },
267 { .name = "r10", .cookie = 10, .mode = ARMV4_5_MODE_ANY, },
268 { .name = "r11", .cookie = 11, .mode = ARMV4_5_MODE_ANY, },
269 { .name = "r12", .cookie = 12, .mode = ARMV4_5_MODE_ANY, },
270
271 /* NOTE all MODE_USR registers are equivalent to MODE_SYS ones */
272 { .name = "sp_usr", .cookie = 13, .mode = ARMV4_5_MODE_USR, },
273 { .name = "lr_usr", .cookie = 14, .mode = ARMV4_5_MODE_USR, },
274
275 /* guaranteed to be at index 15 */
276 { .name = "pc", .cookie = 15, .mode = ARMV4_5_MODE_ANY, },
277
278 { .name = "r8_fiq", .cookie = 8, .mode = ARMV4_5_MODE_FIQ, },
279 { .name = "r9_fiq", .cookie = 9, .mode = ARMV4_5_MODE_FIQ, },
280 { .name = "r10_fiq", .cookie = 10, .mode = ARMV4_5_MODE_FIQ, },
281 { .name = "r11_fiq", .cookie = 11, .mode = ARMV4_5_MODE_FIQ, },
282 { .name = "r12_fiq", .cookie = 12, .mode = ARMV4_5_MODE_FIQ, },
283
284 { .name = "lr_fiq", .cookie = 13, .mode = ARMV4_5_MODE_FIQ, },
285 { .name = "sp_fiq", .cookie = 14, .mode = ARMV4_5_MODE_FIQ, },
286
287 { .name = "lr_irq", .cookie = 13, .mode = ARMV4_5_MODE_IRQ, },
288 { .name = "sp_irq", .cookie = 14, .mode = ARMV4_5_MODE_IRQ, },
289
290 { .name = "lr_svc", .cookie = 13, .mode = ARMV4_5_MODE_SVC, },
291 { .name = "sp_svc", .cookie = 14, .mode = ARMV4_5_MODE_SVC, },
292
293 { .name = "lr_abt", .cookie = 13, .mode = ARMV4_5_MODE_ABT, },
294 { .name = "sp_abt", .cookie = 14, .mode = ARMV4_5_MODE_ABT, },
295
296 { .name = "lr_und", .cookie = 13, .mode = ARMV4_5_MODE_UND, },
297 { .name = "sp_und", .cookie = 14, .mode = ARMV4_5_MODE_UND, },
298
299 { .name = "cpsr", .cookie = 16, .mode = ARMV4_5_MODE_ANY, },
300 { .name = "spsr_fiq", .cookie = 16, .mode = ARMV4_5_MODE_FIQ, },
301 { .name = "spsr_irq", .cookie = 16, .mode = ARMV4_5_MODE_IRQ, },
302 { .name = "spsr_svc", .cookie = 16, .mode = ARMV4_5_MODE_SVC, },
303 { .name = "spsr_abt", .cookie = 16, .mode = ARMV4_5_MODE_ABT, },
304 { .name = "spsr_und", .cookie = 16, .mode = ARMV4_5_MODE_UND, },
305
306 { .name = "lr_mon", .cookie = 13, .mode = ARM_MODE_MON, },
307 { .name = "sp_mon", .cookie = 14, .mode = ARM_MODE_MON, },
308 { .name = "spsr_mon", .cookie = 16, .mode = ARM_MODE_MON, },
309 };
310
311 /* map core mode (USR, FIQ, ...) and register number to
312 * indices into the register cache
313 */
314 const int armv4_5_core_reg_map[8][17] =
315 {
316 { /* USR */
317 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
318 },
319 { /* FIQ (8 shadows of USR, vs normal 3) */
320 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 15, 32
321 },
322 { /* IRQ */
323 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 23, 24, 15, 33
324 },
325 { /* SVC */
326 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 25, 26, 15, 34
327 },
328 { /* ABT */
329 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 27, 28, 15, 35
330 },
331 { /* UND */
332 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 29, 30, 15, 36
333 },
334 { /* SYS (same registers as USR) */
335 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
336 },
337 { /* MON */
338 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 37, 38, 15, 39,
339 }
340 };
341
342 /**
343 * Configures host-side ARM records to reflect the specified CPSR.
344 * Later, code can use arm_reg_current() to map register numbers
345 * according to how they are exposed by this mode.
346 */
347 void arm_set_cpsr(struct arm *arm, uint32_t cpsr)
348 {
349 enum armv4_5_mode mode = cpsr & 0x1f;
350 int num;
351
352 /* NOTE: this may be called very early, before the register
353 * cache is set up. We can't defend against many errors, in
354 * particular against CPSRs that aren't valid *here* ...
355 */
356 if (arm->cpsr) {
357 buf_set_u32(arm->cpsr->value, 0, 32, cpsr);
358 arm->cpsr->valid = 1;
359 arm->cpsr->dirty = 0;
360 }
361
362 arm->core_mode = mode;
363
364 /* mode_to_number() warned; set up a somewhat-sane mapping */
365 num = armv4_5_mode_to_number(mode);
366 if (num < 0) {
367 mode = ARMV4_5_MODE_USR;
368 num = 0;
369 }
370
371 arm->map = &armv4_5_core_reg_map[num][0];
372 arm->spsr = (mode == ARMV4_5_MODE_USR || mode == ARMV4_5_MODE_SYS)
373 ? NULL
374 : arm->core_cache->reg_list + arm->map[16];
375
376 /* Older ARMs won't have the J bit */
377 enum armv4_5_state state;
378
379 if (cpsr & (1 << 5)) { /* T */
380 if (cpsr & (1 << 24)) { /* J */
381 LOG_WARNING("ThumbEE -- incomplete support");
382 state = ARM_STATE_THUMB_EE;
383 } else
384 state = ARMV4_5_STATE_THUMB;
385 } else {
386 if (cpsr & (1 << 24)) { /* J */
387 LOG_ERROR("Jazelle state handling is BROKEN!");
388 state = ARMV4_5_STATE_JAZELLE;
389 } else
390 state = ARMV4_5_STATE_ARM;
391 }
392 arm->core_state = state;
393 }
394
395 /**
396 * Returns handle to the register currently mapped to a given number.
397 * Someone must have called arm_set_cpsr() before.
398 *
399 * \param arm This core's state and registers are used.
400 * \param regnum From 0..15 corresponding to R0..R14 and PC.
401 * Note that R0..R7 don't require mapping; you may access those
402 * as the first eight entries in the register cache. Likewise
403 * R15 (PC) doesn't need mapping; you may also access it directly.
404 * However, R8..R14, and SPSR (arm->spsr) *must* be mapped.
405 * CPSR (arm->cpsr) is also not mapped.
406 */
407 struct reg *arm_reg_current(struct arm *arm, unsigned regnum)
408 {
409 struct reg *r;
410
411 if (regnum > 16)
412 return NULL;
413
414 r = arm->core_cache->reg_list + arm->map[regnum];
415
416 /* e.g. invalid CPSR said "secure monitor" mode on a core
417 * that doesn't support it...
418 */
419 if (!r) {
420 LOG_ERROR("Invalid CPSR mode");
421 r = arm->core_cache->reg_list + regnum;
422 }
423
424 return r;
425 }
426
427 static const uint8_t arm_gdb_dummy_fp_value[12];
428
429 /**
430 * Dummy FPA registers are required to support GDB on ARM.
431 * Register packets require eight obsolete FPA register values.
432 * Modern ARM cores use Vector Floating Point (VFP), if they
433 * have any floating point support. VFP is not FPA-compatible.
434 */
435 struct reg arm_gdb_dummy_fp_reg =
436 {
437 .name = "GDB dummy FPA register",
438 .value = (uint8_t *) arm_gdb_dummy_fp_value,
439 .valid = 1,
440 .size = 96,
441 };
442
443 static const uint8_t arm_gdb_dummy_fps_value[4];
444
445 /**
446 * Dummy FPA status registers are required to support GDB on ARM.
447 * Register packets require an obsolete FPA status register.
448 */
449 struct reg arm_gdb_dummy_fps_reg =
450 {
451 .name = "GDB dummy FPA status register",
452 .value = (uint8_t *) arm_gdb_dummy_fps_value,
453 .valid = 1,
454 .size = 32,
455 };
456
457 static void arm_gdb_dummy_init(void) __attribute__ ((constructor));
458
459 static void arm_gdb_dummy_init(void)
460 {
461 register_init_dummy(&arm_gdb_dummy_fp_reg);
462 register_init_dummy(&arm_gdb_dummy_fps_reg);
463 }
464
465 static int armv4_5_get_core_reg(struct reg *reg)
466 {
467 int retval;
468 struct arm_reg *armv4_5 = reg->arch_info;
469 struct target *target = armv4_5->target;
470
471 if (target->state != TARGET_HALTED)
472 {
473 LOG_ERROR("Target not halted");
474 return ERROR_TARGET_NOT_HALTED;
475 }
476
477 retval = armv4_5->armv4_5_common->read_core_reg(target, reg, armv4_5->num, armv4_5->mode);
478 if (retval == ERROR_OK) {
479 reg->valid = 1;
480 reg->dirty = 0;
481 }
482
483 return retval;
484 }
485
486 static int armv4_5_set_core_reg(struct reg *reg, uint8_t *buf)
487 {
488 struct arm_reg *armv4_5 = reg->arch_info;
489 struct target *target = armv4_5->target;
490 struct arm *armv4_5_target = target_to_armv4_5(target);
491 uint32_t value = buf_get_u32(buf, 0, 32);
492
493 if (target->state != TARGET_HALTED)
494 {
495 LOG_ERROR("Target not halted");
496 return ERROR_TARGET_NOT_HALTED;
497 }
498
499 /* Except for CPSR, the "reg" command exposes a writeback model
500 * for the register cache.
501 */
502 if (reg == armv4_5_target->cpsr) {
503 arm_set_cpsr(armv4_5_target, value);
504
505 /* Older cores need help to be in ARM mode during halt
506 * mode debug, so we clear the J and T bits if we flush.
507 * For newer cores (v6/v7a/v7r) we don't need that, but
508 * it won't hurt since CPSR is always flushed anyway.
509 */
510 if (armv4_5_target->core_mode !=
511 (enum armv4_5_mode)(value & 0x1f)) {
512 LOG_DEBUG("changing ARM core mode to '%s'",
513 arm_mode_name(value & 0x1f));
514 value &= ~((1 << 24) | (1 << 5));
515 armv4_5_target->write_core_reg(target, reg,
516 16, ARMV4_5_MODE_ANY, value);
517 }
518 } else {
519 buf_set_u32(reg->value, 0, 32, value);
520 reg->valid = 1;
521 }
522 reg->dirty = 1;
523
524 return ERROR_OK;
525 }
526
527 static const struct reg_arch_type arm_reg_type = {
528 .get = armv4_5_get_core_reg,
529 .set = armv4_5_set_core_reg,
530 };
531
532 struct reg_cache* armv4_5_build_reg_cache(struct target *target, struct arm *armv4_5_common)
533 {
534 int num_regs = ARRAY_SIZE(arm_core_regs);
535 struct reg_cache *cache = malloc(sizeof(struct reg_cache));
536 struct reg *reg_list = calloc(num_regs, sizeof(struct reg));
537 struct arm_reg *arch_info = calloc(num_regs, sizeof(struct arm_reg));
538 int i;
539
540 if (!cache || !reg_list || !arch_info) {
541 free(cache);
542 free(reg_list);
543 free(arch_info);
544 return NULL;
545 }
546
547 cache->name = "ARM registers";
548 cache->next = NULL;
549 cache->reg_list = reg_list;
550 cache->num_regs = 0;
551
552 for (i = 0; i < num_regs; i++)
553 {
554 /* Skip registers this core doesn't expose */
555 if (arm_core_regs[i].mode == ARM_MODE_MON
556 && armv4_5_common->core_type != ARM_MODE_MON)
557 continue;
558
559 /* REVISIT handle Cortex-M, which only shadows R13/SP */
560
561 arch_info[i].num = arm_core_regs[i].cookie;
562 arch_info[i].mode = arm_core_regs[i].mode;
563 arch_info[i].target = target;
564 arch_info[i].armv4_5_common = armv4_5_common;
565
566 reg_list[i].name = (char *) arm_core_regs[i].name;
567 reg_list[i].size = 32;
568 reg_list[i].value = &arch_info[i].value;
569 reg_list[i].type = &arm_reg_type;
570 reg_list[i].arch_info = &arch_info[i];
571
572 cache->num_regs++;
573 }
574
575 armv4_5_common->cpsr = reg_list + ARMV4_5_CPSR;
576 armv4_5_common->core_cache = cache;
577 return cache;
578 }
579
580 int armv4_5_arch_state(struct target *target)
581 {
582 struct arm *armv4_5 = target_to_armv4_5(target);
583
584 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
585 {
586 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
587 return ERROR_FAIL;
588 }
589
590 LOG_USER("target halted in %s state due to %s, current mode: %s\ncpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "",
591 armv4_5_state_strings[armv4_5->core_state],
592 Jim_Nvp_value2name_simple(nvp_target_debug_reason, target->debug_reason)->name,
593 arm_mode_name(armv4_5->core_mode),
594 buf_get_u32(armv4_5->cpsr->value, 0, 32),
595 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
596
597 return ERROR_OK;
598 }
599
600 #define ARMV4_5_CORE_REG_MODENUM(cache, mode, num) \
601 cache->reg_list[armv4_5_core_reg_map[mode][num]]
602
603 COMMAND_HANDLER(handle_armv4_5_reg_command)
604 {
605 struct target *target = get_current_target(CMD_CTX);
606 struct arm *armv4_5 = target_to_armv4_5(target);
607 unsigned num_regs;
608 struct reg *regs;
609
610 if (!is_arm(armv4_5))
611 {
612 command_print(CMD_CTX, "current target isn't an ARM");
613 return ERROR_FAIL;
614 }
615
616 if (target->state != TARGET_HALTED)
617 {
618 command_print(CMD_CTX, "error: target must be halted for register accesses");
619 return ERROR_FAIL;
620 }
621
622 if (!is_arm_mode(armv4_5->core_mode))
623 return ERROR_FAIL;
624
625 if (!armv4_5->full_context) {
626 command_print(CMD_CTX, "error: target doesn't support %s",
627 CMD_NAME);
628 return ERROR_FAIL;
629 }
630
631 num_regs = armv4_5->core_cache->num_regs;
632 regs = armv4_5->core_cache->reg_list;
633
634 for (unsigned mode = 0; mode < ARRAY_SIZE(arm_mode_data); mode++) {
635 const char *name;
636 char *sep = "\n";
637 char *shadow = "";
638
639 /* label this bank of registers (or shadows) */
640 switch (arm_mode_data[mode].psr) {
641 case ARMV4_5_MODE_SYS:
642 continue;
643 case ARMV4_5_MODE_USR:
644 name = "System and User";
645 sep = "";
646 break;
647 case ARM_MODE_MON:
648 if (armv4_5->core_type != ARM_MODE_MON)
649 continue;
650 /* FALLTHROUGH */
651 default:
652 name = arm_mode_data[mode].name;
653 shadow = "shadow ";
654 break;
655 }
656 command_print(CMD_CTX, "%s%s mode %sregisters",
657 sep, name, shadow);
658
659 /* display N rows of up to 4 registers each */
660 for (unsigned i = 0; i < arm_mode_data[mode].n_indices;) {
661 char output[80];
662 int output_len = 0;
663
664 for (unsigned j = 0; j < 4; j++, i++) {
665 uint32_t value;
666 struct reg *reg = regs;
667
668 if (i >= arm_mode_data[mode].n_indices)
669 break;
670
671 reg += arm_mode_data[mode].indices[i];
672
673 /* REVISIT be smarter about faults... */
674 if (!reg->valid)
675 armv4_5->full_context(target);
676
677 value = buf_get_u32(reg->value, 0, 32);
678 output_len += snprintf(output + output_len,
679 sizeof(output) - output_len,
680 "%8s: %8.8" PRIx32 " ",
681 reg->name, value);
682 }
683 command_print(CMD_CTX, "%s", output);
684 }
685 }
686
687 return ERROR_OK;
688 }
689
690 COMMAND_HANDLER(handle_armv4_5_core_state_command)
691 {
692 struct target *target = get_current_target(CMD_CTX);
693 struct arm *armv4_5 = target_to_armv4_5(target);
694
695 if (!is_arm(armv4_5))
696 {
697 command_print(CMD_CTX, "current target isn't an ARM");
698 return ERROR_FAIL;
699 }
700
701 if (CMD_ARGC > 0)
702 {
703 if (strcmp(CMD_ARGV[0], "arm") == 0)
704 {
705 armv4_5->core_state = ARMV4_5_STATE_ARM;
706 }
707 if (strcmp(CMD_ARGV[0], "thumb") == 0)
708 {
709 armv4_5->core_state = ARMV4_5_STATE_THUMB;
710 }
711 }
712
713 command_print(CMD_CTX, "core state: %s", armv4_5_state_strings[armv4_5->core_state]);
714
715 return ERROR_OK;
716 }
717
718 COMMAND_HANDLER(handle_armv4_5_disassemble_command)
719 {
720 int retval = ERROR_OK;
721 struct target *target = get_current_target(CMD_CTX);
722 struct arm *arm = target ? target_to_arm(target) : NULL;
723 uint32_t address;
724 int count = 1;
725 int thumb = 0;
726
727 if (!is_arm(arm)) {
728 command_print(CMD_CTX, "current target isn't an ARM");
729 return ERROR_FAIL;
730 }
731
732 switch (CMD_ARGC) {
733 case 3:
734 if (strcmp(CMD_ARGV[2], "thumb") != 0)
735 goto usage;
736 thumb = 1;
737 /* FALL THROUGH */
738 case 2:
739 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], count);
740 /* FALL THROUGH */
741 case 1:
742 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
743 if (address & 0x01) {
744 if (!thumb) {
745 command_print(CMD_CTX, "Disassemble as Thumb");
746 thumb = 1;
747 }
748 address &= ~1;
749 }
750 break;
751 default:
752 usage:
753 command_print(CMD_CTX,
754 "usage: arm disassemble <address> [<count> ['thumb']]");
755 count = 0;
756 retval = ERROR_FAIL;
757 }
758
759 while (count-- > 0) {
760 struct arm_instruction cur_instruction;
761
762 if (thumb) {
763 /* Always use Thumb2 disassembly for best handling
764 * of 32-bit BL/BLX, and to work with newer cores
765 * (some ARMv6, all ARMv7) that use Thumb2.
766 */
767 retval = thumb2_opcode(target, address,
768 &cur_instruction);
769 if (retval != ERROR_OK)
770 break;
771 } else {
772 uint32_t opcode;
773
774 retval = target_read_u32(target, address, &opcode);
775 if (retval != ERROR_OK)
776 break;
777 retval = arm_evaluate_opcode(opcode, address,
778 &cur_instruction) != ERROR_OK;
779 if (retval != ERROR_OK)
780 break;
781 }
782 command_print(CMD_CTX, "%s", cur_instruction.text);
783 address += cur_instruction.instruction_size;
784 }
785
786 return retval;
787 }
788
789 static const struct command_registration arm_exec_command_handlers[] = {
790 {
791 .name = "reg",
792 .handler = &handle_armv4_5_reg_command,
793 .mode = COMMAND_EXEC,
794 .help = "display ARM core registers",
795 },
796 {
797 .name = "core_state",
798 .handler = &handle_armv4_5_core_state_command,
799 .mode = COMMAND_EXEC,
800 .usage = "<arm | thumb>",
801 .help = "display/change ARM core state",
802 },
803 {
804 .name = "disassemble",
805 .handler = &handle_armv4_5_disassemble_command,
806 .mode = COMMAND_EXEC,
807 .usage = "<address> [<count> ['thumb']]",
808 .help = "disassemble instructions ",
809 },
810 COMMAND_REGISTRATION_DONE
811 };
812 static const struct command_registration arm_command_handlers[] = {
813 {
814 .name = "arm",
815 .mode = COMMAND_ANY,
816 .help = "ARM command group",
817 .chain = arm_exec_command_handlers,
818 },
819 COMMAND_REGISTRATION_DONE
820 };
821
822 int armv4_5_register_commands(struct command_context *cmd_ctx)
823 {
824 return register_commands(cmd_ctx, NULL, arm_command_handlers);
825 }
826
827 int armv4_5_get_gdb_reg_list(struct target *target, struct reg **reg_list[], int *reg_list_size)
828 {
829 struct arm *armv4_5 = target_to_armv4_5(target);
830 int i;
831
832 if (!is_arm_mode(armv4_5->core_mode))
833 return ERROR_FAIL;
834
835 *reg_list_size = 26;
836 *reg_list = malloc(sizeof(struct reg*) * (*reg_list_size));
837
838 for (i = 0; i < 16; i++)
839 (*reg_list)[i] = arm_reg_current(armv4_5, i);
840
841 for (i = 16; i < 24; i++)
842 (*reg_list)[i] = &arm_gdb_dummy_fp_reg;
843
844 (*reg_list)[24] = &arm_gdb_dummy_fps_reg;
845 (*reg_list)[25] = armv4_5->cpsr;
846
847 return ERROR_OK;
848 }
849
850 /* wait for execution to complete and check exit point */
851 static int armv4_5_run_algorithm_completion(struct target *target, uint32_t exit_point, int timeout_ms, void *arch_info)
852 {
853 int retval;
854 struct arm *armv4_5 = target_to_armv4_5(target);
855
856 if ((retval = target_wait_state(target, TARGET_HALTED, timeout_ms)) != ERROR_OK)
857 {
858 return retval;
859 }
860 if (target->state != TARGET_HALTED)
861 {
862 if ((retval = target_halt(target)) != ERROR_OK)
863 return retval;
864 if ((retval = target_wait_state(target, TARGET_HALTED, 500)) != ERROR_OK)
865 {
866 return retval;
867 }
868 return ERROR_TARGET_TIMEOUT;
869 }
870
871 /* fast exit: ARMv5+ code can use BKPT */
872 if (exit_point && buf_get_u32(armv4_5->core_cache->reg_list[15].value,
873 0, 32) != exit_point)
874 {
875 LOG_WARNING("target reentered debug state, but not at the desired exit point: 0x%4.4" PRIx32 "",
876 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
877 return ERROR_TARGET_TIMEOUT;
878 }
879
880 return ERROR_OK;
881 }
882
883 int armv4_5_run_algorithm_inner(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_params, uint32_t entry_point, uint32_t exit_point, int timeout_ms, void *arch_info, int (*run_it)(struct target *target, uint32_t exit_point, int timeout_ms, void *arch_info))
884 {
885 struct arm *armv4_5 = target_to_armv4_5(target);
886 struct armv4_5_algorithm *armv4_5_algorithm_info = arch_info;
887 enum armv4_5_state core_state = armv4_5->core_state;
888 uint32_t context[17];
889 uint32_t cpsr;
890 int exit_breakpoint_size = 0;
891 int i;
892 int retval = ERROR_OK;
893 LOG_DEBUG("Running algorithm");
894
895 if (armv4_5_algorithm_info->common_magic != ARMV4_5_COMMON_MAGIC)
896 {
897 LOG_ERROR("current target isn't an ARMV4/5 target");
898 return ERROR_TARGET_INVALID;
899 }
900
901 if (target->state != TARGET_HALTED)
902 {
903 LOG_WARNING("target not halted");
904 return ERROR_TARGET_NOT_HALTED;
905 }
906
907 if (!is_arm_mode(armv4_5->core_mode))
908 return ERROR_FAIL;
909
910 /* armv5 and later can terminate with BKPT instruction; less overhead */
911 if (!exit_point && armv4_5->is_armv4)
912 {
913 LOG_ERROR("ARMv4 target needs HW breakpoint location");
914 return ERROR_FAIL;
915 }
916
917 /* save r0..pc, cpsr-or-spsr, and then cpsr-for-sure;
918 * they'll be restored later.
919 */
920 for (i = 0; i <= 16; i++)
921 {
922 struct reg *r;
923
924 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
925 armv4_5_algorithm_info->core_mode, i);
926 if (!r->valid)
927 armv4_5->read_core_reg(target, r, i,
928 armv4_5_algorithm_info->core_mode);
929 context[i] = buf_get_u32(r->value, 0, 32);
930 }
931 cpsr = buf_get_u32(armv4_5->cpsr->value, 0, 32);
932
933 for (i = 0; i < num_mem_params; i++)
934 {
935 if ((retval = target_write_buffer(target, mem_params[i].address, mem_params[i].size, mem_params[i].value)) != ERROR_OK)
936 {
937 return retval;
938 }
939 }
940
941 for (i = 0; i < num_reg_params; i++)
942 {
943 struct reg *reg = register_get_by_name(armv4_5->core_cache, reg_params[i].reg_name, 0);
944 if (!reg)
945 {
946 LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
947 return ERROR_INVALID_ARGUMENTS;
948 }
949
950 if (reg->size != reg_params[i].size)
951 {
952 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
953 return ERROR_INVALID_ARGUMENTS;
954 }
955
956 if ((retval = armv4_5_set_core_reg(reg, reg_params[i].value)) != ERROR_OK)
957 {
958 return retval;
959 }
960 }
961
962 armv4_5->core_state = armv4_5_algorithm_info->core_state;
963 if (armv4_5->core_state == ARMV4_5_STATE_ARM)
964 exit_breakpoint_size = 4;
965 else if (armv4_5->core_state == ARMV4_5_STATE_THUMB)
966 exit_breakpoint_size = 2;
967 else
968 {
969 LOG_ERROR("BUG: can't execute algorithms when not in ARM or Thumb state");
970 return ERROR_INVALID_ARGUMENTS;
971 }
972
973 if (armv4_5_algorithm_info->core_mode != ARMV4_5_MODE_ANY)
974 {
975 LOG_DEBUG("setting core_mode: 0x%2.2x",
976 armv4_5_algorithm_info->core_mode);
977 buf_set_u32(armv4_5->cpsr->value, 0, 5,
978 armv4_5_algorithm_info->core_mode);
979 armv4_5->cpsr->dirty = 1;
980 armv4_5->cpsr->valid = 1;
981 }
982
983 /* terminate using a hardware or (ARMv5+) software breakpoint */
984 if (exit_point && (retval = breakpoint_add(target, exit_point,
985 exit_breakpoint_size, BKPT_HARD)) != ERROR_OK)
986 {
987 LOG_ERROR("can't add HW breakpoint to terminate algorithm");
988 return ERROR_TARGET_FAILURE;
989 }
990
991 if ((retval = target_resume(target, 0, entry_point, 1, 1)) != ERROR_OK)
992 {
993 return retval;
994 }
995 int retvaltemp;
996 retval = run_it(target, exit_point, timeout_ms, arch_info);
997
998 if (exit_point)
999 breakpoint_remove(target, exit_point);
1000
1001 if (retval != ERROR_OK)
1002 return retval;
1003
1004 for (i = 0; i < num_mem_params; i++)
1005 {
1006 if (mem_params[i].direction != PARAM_OUT)
1007 if ((retvaltemp = target_read_buffer(target, mem_params[i].address, mem_params[i].size, mem_params[i].value)) != ERROR_OK)
1008 {
1009 retval = retvaltemp;
1010 }
1011 }
1012
1013 for (i = 0; i < num_reg_params; i++)
1014 {
1015 if (reg_params[i].direction != PARAM_OUT)
1016 {
1017
1018 struct reg *reg = register_get_by_name(armv4_5->core_cache, reg_params[i].reg_name, 0);
1019 if (!reg)
1020 {
1021 LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
1022 retval = ERROR_INVALID_ARGUMENTS;
1023 continue;
1024 }
1025
1026 if (reg->size != reg_params[i].size)
1027 {
1028 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
1029 retval = ERROR_INVALID_ARGUMENTS;
1030 continue;
1031 }
1032
1033 buf_set_u32(reg_params[i].value, 0, 32, buf_get_u32(reg->value, 0, 32));
1034 }
1035 }
1036
1037 /* restore everything we saved before (17 or 18 registers) */
1038 for (i = 0; i <= 16; i++)
1039 {
1040 uint32_t regvalue;
1041 regvalue = buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_algorithm_info->core_mode, i).value, 0, 32);
1042 if (regvalue != context[i])
1043 {
1044 LOG_DEBUG("restoring register %s with value 0x%8.8" PRIx32 "", ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_algorithm_info->core_mode, i).name, context[i]);
1045 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_algorithm_info->core_mode, i).value, 0, 32, context[i]);
1046 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_algorithm_info->core_mode, i).valid = 1;
1047 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_algorithm_info->core_mode, i).dirty = 1;
1048 }
1049 }
1050
1051 arm_set_cpsr(armv4_5, cpsr);
1052 armv4_5->cpsr->dirty = 1;
1053
1054 armv4_5->core_state = core_state;
1055
1056 return retval;
1057 }
1058
1059 int armv4_5_run_algorithm(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_params, uint32_t entry_point, uint32_t exit_point, int timeout_ms, void *arch_info)
1060 {
1061 return armv4_5_run_algorithm_inner(target, num_mem_params, mem_params, num_reg_params, reg_params, entry_point, exit_point, timeout_ms, arch_info, armv4_5_run_algorithm_completion);
1062 }
1063
1064 /**
1065 * Runs ARM code in the target to calculate a CRC32 checksum.
1066 *
1067 * \todo On ARMv5+, rely on BKPT termination for reduced overhead.
1068 */
1069 int arm_checksum_memory(struct target *target,
1070 uint32_t address, uint32_t count, uint32_t *checksum)
1071 {
1072 struct working_area *crc_algorithm;
1073 struct armv4_5_algorithm armv4_5_info;
1074 struct reg_param reg_params[2];
1075 int retval;
1076 uint32_t i;
1077
1078 static const uint32_t arm_crc_code[] = {
1079 0xE1A02000, /* mov r2, r0 */
1080 0xE3E00000, /* mov r0, #0xffffffff */
1081 0xE1A03001, /* mov r3, r1 */
1082 0xE3A04000, /* mov r4, #0 */
1083 0xEA00000B, /* b ncomp */
1084 /* nbyte: */
1085 0xE7D21004, /* ldrb r1, [r2, r4] */
1086 0xE59F7030, /* ldr r7, CRC32XOR */
1087 0xE0200C01, /* eor r0, r0, r1, asl 24 */
1088 0xE3A05000, /* mov r5, #0 */
1089 /* loop: */
1090 0xE3500000, /* cmp r0, #0 */
1091 0xE1A06080, /* mov r6, r0, asl #1 */
1092 0xE2855001, /* add r5, r5, #1 */
1093 0xE1A00006, /* mov r0, r6 */
1094 0xB0260007, /* eorlt r0, r6, r7 */
1095 0xE3550008, /* cmp r5, #8 */
1096 0x1AFFFFF8, /* bne loop */
1097 0xE2844001, /* add r4, r4, #1 */
1098 /* ncomp: */
1099 0xE1540003, /* cmp r4, r3 */
1100 0x1AFFFFF1, /* bne nbyte */
1101 /* end: */
1102 0xEAFFFFFE, /* b end */
1103 /* CRC32XOR: */
1104 0x04C11DB7 /* .word 0x04C11DB7 */
1105 };
1106
1107 retval = target_alloc_working_area(target,
1108 sizeof(arm_crc_code), &crc_algorithm);
1109 if (retval != ERROR_OK)
1110 return retval;
1111
1112 /* convert code into a buffer in target endianness */
1113 for (i = 0; i < ARRAY_SIZE(arm_crc_code); i++) {
1114 retval = target_write_u32(target,
1115 crc_algorithm->address + i * sizeof(uint32_t),
1116 arm_crc_code[i]);
1117 if (retval != ERROR_OK)
1118 return retval;
1119 }
1120
1121 armv4_5_info.common_magic = ARMV4_5_COMMON_MAGIC;
1122 armv4_5_info.core_mode = ARMV4_5_MODE_SVC;
1123 armv4_5_info.core_state = ARMV4_5_STATE_ARM;
1124
1125 init_reg_param(&reg_params[0], "r0", 32, PARAM_IN_OUT);
1126 init_reg_param(&reg_params[1], "r1", 32, PARAM_OUT);
1127
1128 buf_set_u32(reg_params[0].value, 0, 32, address);
1129 buf_set_u32(reg_params[1].value, 0, 32, count);
1130
1131 /* 20 second timeout/megabyte */
1132 int timeout = 20000 * (1 + (count / (1024 * 1024)));
1133
1134 retval = target_run_algorithm(target, 0, NULL, 2, reg_params,
1135 crc_algorithm->address,
1136 crc_algorithm->address + sizeof(arm_crc_code) - 8,
1137 timeout, &armv4_5_info);
1138 if (retval != ERROR_OK) {
1139 LOG_ERROR("error executing ARM crc algorithm");
1140 destroy_reg_param(&reg_params[0]);
1141 destroy_reg_param(&reg_params[1]);
1142 target_free_working_area(target, crc_algorithm);
1143 return retval;
1144 }
1145
1146 *checksum = buf_get_u32(reg_params[0].value, 0, 32);
1147
1148 destroy_reg_param(&reg_params[0]);
1149 destroy_reg_param(&reg_params[1]);
1150
1151 target_free_working_area(target, crc_algorithm);
1152
1153 return ERROR_OK;
1154 }
1155
1156 /**
1157 * Runs ARM code in the target to check whether a memory block holds
1158 * all ones. NOR flash which has been erased, and thus may be written,
1159 * holds all ones.
1160 *
1161 * \todo On ARMv5+, rely on BKPT termination for reduced overhead.
1162 */
1163 int arm_blank_check_memory(struct target *target,
1164 uint32_t address, uint32_t count, uint32_t *blank)
1165 {
1166 struct working_area *check_algorithm;
1167 struct reg_param reg_params[3];
1168 struct armv4_5_algorithm armv4_5_info;
1169 int retval;
1170 uint32_t i;
1171
1172 static const uint32_t check_code[] = {
1173 /* loop: */
1174 0xe4d03001, /* ldrb r3, [r0], #1 */
1175 0xe0022003, /* and r2, r2, r3 */
1176 0xe2511001, /* subs r1, r1, #1 */
1177 0x1afffffb, /* bne loop */
1178 /* end: */
1179 0xeafffffe /* b end */
1180 };
1181
1182 /* make sure we have a working area */
1183 retval = target_alloc_working_area(target,
1184 sizeof(check_code), &check_algorithm);
1185 if (retval != ERROR_OK)
1186 return retval;
1187
1188 /* convert code into a buffer in target endianness */
1189 for (i = 0; i < ARRAY_SIZE(check_code); i++) {
1190 retval = target_write_u32(target,
1191 check_algorithm->address
1192 + i * sizeof(uint32_t),
1193 check_code[i]);
1194 if (retval != ERROR_OK)
1195 return retval;
1196 }
1197
1198 armv4_5_info.common_magic = ARMV4_5_COMMON_MAGIC;
1199 armv4_5_info.core_mode = ARMV4_5_MODE_SVC;
1200 armv4_5_info.core_state = ARMV4_5_STATE_ARM;
1201
1202 init_reg_param(&reg_params[0], "r0", 32, PARAM_OUT);
1203 buf_set_u32(reg_params[0].value, 0, 32, address);
1204
1205 init_reg_param(&reg_params[1], "r1", 32, PARAM_OUT);
1206 buf_set_u32(reg_params[1].value, 0, 32, count);
1207
1208 init_reg_param(&reg_params[2], "r2", 32, PARAM_IN_OUT);
1209 buf_set_u32(reg_params[2].value, 0, 32, 0xff);
1210
1211 retval = target_run_algorithm(target, 0, NULL, 3, reg_params,
1212 check_algorithm->address,
1213 check_algorithm->address + sizeof(check_code) - 4,
1214 10000, &armv4_5_info);
1215 if (retval != ERROR_OK) {
1216 destroy_reg_param(&reg_params[0]);
1217 destroy_reg_param(&reg_params[1]);
1218 destroy_reg_param(&reg_params[2]);
1219 target_free_working_area(target, check_algorithm);
1220 return retval;
1221 }
1222
1223 *blank = buf_get_u32(reg_params[2].value, 0, 32);
1224
1225 destroy_reg_param(&reg_params[0]);
1226 destroy_reg_param(&reg_params[1]);
1227 destroy_reg_param(&reg_params[2]);
1228
1229 target_free_working_area(target, check_algorithm);
1230
1231 return ERROR_OK;
1232 }
1233
1234 static int arm_full_context(struct target *target)
1235 {
1236 struct arm *armv4_5 = target_to_armv4_5(target);
1237 unsigned num_regs = armv4_5->core_cache->num_regs;
1238 struct reg *reg = armv4_5->core_cache->reg_list;
1239 int retval = ERROR_OK;
1240
1241 for (; num_regs && retval == ERROR_OK; num_regs--, reg++) {
1242 if (reg->valid)
1243 continue;
1244 retval = armv4_5_get_core_reg(reg);
1245 }
1246 return retval;
1247 }
1248
1249 int armv4_5_init_arch_info(struct target *target, struct arm *armv4_5)
1250 {
1251 target->arch_info = armv4_5;
1252 armv4_5->target = target;
1253
1254 armv4_5->common_magic = ARMV4_5_COMMON_MAGIC;
1255 arm_set_cpsr(armv4_5, ARMV4_5_MODE_USR);
1256
1257 /* core_type may be overridden by subtype logic */
1258 armv4_5->core_type = ARMV4_5_MODE_ANY;
1259
1260 /* default full_context() has no core-specific optimizations */
1261 if (!armv4_5->full_context && armv4_5->read_core_reg)
1262 armv4_5->full_context = arm_full_context;
1263
1264 return ERROR_OK;
1265 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)