target: "mcr" and "mrc" are ARM-specific
[openocd.git] / src / target / armv4_5.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2008 by Spencer Oliver *
6 * spen@spen-soft.co.uk *
7 * *
8 * Copyright (C) 2008 by Oyvind Harboe *
9 * oyvind.harboe@zylin.com *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "armv4_5.h"
31 #include "arm_jtag.h"
32 #include "breakpoints.h"
33 #include "arm_disassembler.h"
34 #include "binarybuffer.h"
35 #include "algorithm.h"
36 #include "register.h"
37
38
39 /* offsets into armv4_5 core register cache */
40 enum {
41 // ARMV4_5_CPSR = 31,
42 ARMV4_5_SPSR_FIQ = 32,
43 ARMV4_5_SPSR_IRQ = 33,
44 ARMV4_5_SPSR_SVC = 34,
45 ARMV4_5_SPSR_ABT = 35,
46 ARMV4_5_SPSR_UND = 36,
47 ARM_SPSR_MON = 39,
48 };
49
50 static const uint8_t arm_usr_indices[17] = {
51 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, ARMV4_5_CPSR,
52 };
53
54 static const uint8_t arm_fiq_indices[8] = {
55 16, 17, 18, 19, 20, 21, 22, ARMV4_5_SPSR_FIQ,
56 };
57
58 static const uint8_t arm_irq_indices[3] = {
59 23, 24, ARMV4_5_SPSR_IRQ,
60 };
61
62 static const uint8_t arm_svc_indices[3] = {
63 25, 26, ARMV4_5_SPSR_SVC,
64 };
65
66 static const uint8_t arm_abt_indices[3] = {
67 27, 28, ARMV4_5_SPSR_ABT,
68 };
69
70 static const uint8_t arm_und_indices[3] = {
71 29, 30, ARMV4_5_SPSR_UND,
72 };
73
74 static const uint8_t arm_mon_indices[3] = {
75 37, 38, ARM_SPSR_MON,
76 };
77
78 static const struct {
79 const char *name;
80 unsigned short psr;
81 /* For user and system modes, these list indices for all registers.
82 * otherwise they're just indices for the shadow registers and SPSR.
83 */
84 unsigned short n_indices;
85 const uint8_t *indices;
86 } arm_mode_data[] = {
87 /* Seven modes are standard from ARM7 on. "System" and "User" share
88 * the same registers; other modes shadow from 3 to 8 registers.
89 */
90 {
91 .name = "User",
92 .psr = ARMV4_5_MODE_USR,
93 .n_indices = ARRAY_SIZE(arm_usr_indices),
94 .indices = arm_usr_indices,
95 },
96 {
97 .name = "FIQ",
98 .psr = ARMV4_5_MODE_FIQ,
99 .n_indices = ARRAY_SIZE(arm_fiq_indices),
100 .indices = arm_fiq_indices,
101 },
102 {
103 .name = "Supervisor",
104 .psr = ARMV4_5_MODE_SVC,
105 .n_indices = ARRAY_SIZE(arm_svc_indices),
106 .indices = arm_svc_indices,
107 },
108 {
109 .name = "Abort",
110 .psr = ARMV4_5_MODE_ABT,
111 .n_indices = ARRAY_SIZE(arm_abt_indices),
112 .indices = arm_abt_indices,
113 },
114 {
115 .name = "IRQ",
116 .psr = ARMV4_5_MODE_IRQ,
117 .n_indices = ARRAY_SIZE(arm_irq_indices),
118 .indices = arm_irq_indices,
119 },
120 {
121 .name = "Undefined instruction",
122 .psr = ARMV4_5_MODE_UND,
123 .n_indices = ARRAY_SIZE(arm_und_indices),
124 .indices = arm_und_indices,
125 },
126 {
127 .name = "System",
128 .psr = ARMV4_5_MODE_SYS,
129 .n_indices = ARRAY_SIZE(arm_usr_indices),
130 .indices = arm_usr_indices,
131 },
132 /* TrustZone "Security Extensions" add a secure monitor mode.
133 * This is distinct from a "debug monitor" which can support
134 * non-halting debug, in conjunction with some debuggers.
135 */
136 {
137 .name = "Secure Monitor",
138 .psr = ARM_MODE_MON,
139 .n_indices = ARRAY_SIZE(arm_mon_indices),
140 .indices = arm_mon_indices,
141 },
142 };
143
144 /** Map PSR mode bits to the name of an ARM processor operating mode. */
145 const char *arm_mode_name(unsigned psr_mode)
146 {
147 for (unsigned i = 0; i < ARRAY_SIZE(arm_mode_data); i++) {
148 if (arm_mode_data[i].psr == psr_mode)
149 return arm_mode_data[i].name;
150 }
151 LOG_ERROR("unrecognized psr mode: %#02x", psr_mode);
152 return "UNRECOGNIZED";
153 }
154
155 /** Return true iff the parameter denotes a valid ARM processor mode. */
156 bool is_arm_mode(unsigned psr_mode)
157 {
158 for (unsigned i = 0; i < ARRAY_SIZE(arm_mode_data); i++) {
159 if (arm_mode_data[i].psr == psr_mode)
160 return true;
161 }
162 return false;
163 }
164
165 /** Map PSR mode bits to linear number indexing armv4_5_core_reg_map */
166 int armv4_5_mode_to_number(enum armv4_5_mode mode)
167 {
168 switch (mode) {
169 case ARMV4_5_MODE_ANY:
170 /* map MODE_ANY to user mode */
171 case ARMV4_5_MODE_USR:
172 return 0;
173 case ARMV4_5_MODE_FIQ:
174 return 1;
175 case ARMV4_5_MODE_IRQ:
176 return 2;
177 case ARMV4_5_MODE_SVC:
178 return 3;
179 case ARMV4_5_MODE_ABT:
180 return 4;
181 case ARMV4_5_MODE_UND:
182 return 5;
183 case ARMV4_5_MODE_SYS:
184 return 6;
185 case ARM_MODE_MON:
186 return 7;
187 default:
188 LOG_ERROR("invalid mode value encountered %d", mode);
189 return -1;
190 }
191 }
192
193 /** Map linear number indexing armv4_5_core_reg_map to PSR mode bits. */
194 enum armv4_5_mode armv4_5_number_to_mode(int number)
195 {
196 switch (number) {
197 case 0:
198 return ARMV4_5_MODE_USR;
199 case 1:
200 return ARMV4_5_MODE_FIQ;
201 case 2:
202 return ARMV4_5_MODE_IRQ;
203 case 3:
204 return ARMV4_5_MODE_SVC;
205 case 4:
206 return ARMV4_5_MODE_ABT;
207 case 5:
208 return ARMV4_5_MODE_UND;
209 case 6:
210 return ARMV4_5_MODE_SYS;
211 case 7:
212 return ARM_MODE_MON;
213 default:
214 LOG_ERROR("mode index out of bounds %d", number);
215 return ARMV4_5_MODE_ANY;
216 }
217 }
218
219 char* armv4_5_state_strings[] =
220 {
221 "ARM", "Thumb", "Jazelle", "ThumbEE",
222 };
223
224 /* Templates for ARM core registers.
225 *
226 * NOTE: offsets in this table are coupled to the arm_mode_data
227 * table above, the armv4_5_core_reg_map array below, and also to
228 * the ARMV4_5_CPSR symbol (which should vanish after ARM11 updates).
229 */
230 static const struct {
231 /* The name is used for e.g. the "regs" command. */
232 const char *name;
233
234 /* The {cookie, mode} tuple uniquely identifies one register.
235 * In a given mode, cookies 0..15 map to registers R0..R15,
236 * with R13..R15 usually called SP, LR, PC.
237 *
238 * MODE_ANY is used as *input* to the mapping, and indicates
239 * various special cases (sigh) and errors.
240 *
241 * Cookie 16 is (currently) confusing, since it indicates
242 * CPSR -or- SPSR depending on whether 'mode' is MODE_ANY.
243 * (Exception modes have both CPSR and SPSR registers ...)
244 */
245 unsigned cookie;
246 enum armv4_5_mode mode;
247 } arm_core_regs[] = {
248 /* IMPORTANT: we guarantee that the first eight cached registers
249 * correspond to r0..r7, and the fifteenth to PC, so that callers
250 * don't need to map them.
251 */
252 { .name = "r0", .cookie = 0, .mode = ARMV4_5_MODE_ANY, },
253 { .name = "r1", .cookie = 1, .mode = ARMV4_5_MODE_ANY, },
254 { .name = "r2", .cookie = 2, .mode = ARMV4_5_MODE_ANY, },
255 { .name = "r3", .cookie = 3, .mode = ARMV4_5_MODE_ANY, },
256 { .name = "r4", .cookie = 4, .mode = ARMV4_5_MODE_ANY, },
257 { .name = "r5", .cookie = 5, .mode = ARMV4_5_MODE_ANY, },
258 { .name = "r6", .cookie = 6, .mode = ARMV4_5_MODE_ANY, },
259 { .name = "r7", .cookie = 7, .mode = ARMV4_5_MODE_ANY, },
260
261 /* NOTE: regs 8..12 might be shadowed by FIQ ... flagging
262 * them as MODE_ANY creates special cases. (ANY means
263 * "not mapped" elsewhere; here it's "everything but FIQ".)
264 */
265 { .name = "r8", .cookie = 8, .mode = ARMV4_5_MODE_ANY, },
266 { .name = "r9", .cookie = 9, .mode = ARMV4_5_MODE_ANY, },
267 { .name = "r10", .cookie = 10, .mode = ARMV4_5_MODE_ANY, },
268 { .name = "r11", .cookie = 11, .mode = ARMV4_5_MODE_ANY, },
269 { .name = "r12", .cookie = 12, .mode = ARMV4_5_MODE_ANY, },
270
271 /* NOTE all MODE_USR registers are equivalent to MODE_SYS ones */
272 { .name = "sp_usr", .cookie = 13, .mode = ARMV4_5_MODE_USR, },
273 { .name = "lr_usr", .cookie = 14, .mode = ARMV4_5_MODE_USR, },
274
275 /* guaranteed to be at index 15 */
276 { .name = "pc", .cookie = 15, .mode = ARMV4_5_MODE_ANY, },
277
278 { .name = "r8_fiq", .cookie = 8, .mode = ARMV4_5_MODE_FIQ, },
279 { .name = "r9_fiq", .cookie = 9, .mode = ARMV4_5_MODE_FIQ, },
280 { .name = "r10_fiq", .cookie = 10, .mode = ARMV4_5_MODE_FIQ, },
281 { .name = "r11_fiq", .cookie = 11, .mode = ARMV4_5_MODE_FIQ, },
282 { .name = "r12_fiq", .cookie = 12, .mode = ARMV4_5_MODE_FIQ, },
283
284 { .name = "lr_fiq", .cookie = 13, .mode = ARMV4_5_MODE_FIQ, },
285 { .name = "sp_fiq", .cookie = 14, .mode = ARMV4_5_MODE_FIQ, },
286
287 { .name = "lr_irq", .cookie = 13, .mode = ARMV4_5_MODE_IRQ, },
288 { .name = "sp_irq", .cookie = 14, .mode = ARMV4_5_MODE_IRQ, },
289
290 { .name = "lr_svc", .cookie = 13, .mode = ARMV4_5_MODE_SVC, },
291 { .name = "sp_svc", .cookie = 14, .mode = ARMV4_5_MODE_SVC, },
292
293 { .name = "lr_abt", .cookie = 13, .mode = ARMV4_5_MODE_ABT, },
294 { .name = "sp_abt", .cookie = 14, .mode = ARMV4_5_MODE_ABT, },
295
296 { .name = "lr_und", .cookie = 13, .mode = ARMV4_5_MODE_UND, },
297 { .name = "sp_und", .cookie = 14, .mode = ARMV4_5_MODE_UND, },
298
299 { .name = "cpsr", .cookie = 16, .mode = ARMV4_5_MODE_ANY, },
300 { .name = "spsr_fiq", .cookie = 16, .mode = ARMV4_5_MODE_FIQ, },
301 { .name = "spsr_irq", .cookie = 16, .mode = ARMV4_5_MODE_IRQ, },
302 { .name = "spsr_svc", .cookie = 16, .mode = ARMV4_5_MODE_SVC, },
303 { .name = "spsr_abt", .cookie = 16, .mode = ARMV4_5_MODE_ABT, },
304 { .name = "spsr_und", .cookie = 16, .mode = ARMV4_5_MODE_UND, },
305
306 { .name = "lr_mon", .cookie = 13, .mode = ARM_MODE_MON, },
307 { .name = "sp_mon", .cookie = 14, .mode = ARM_MODE_MON, },
308 { .name = "spsr_mon", .cookie = 16, .mode = ARM_MODE_MON, },
309 };
310
311 /* map core mode (USR, FIQ, ...) and register number to
312 * indices into the register cache
313 */
314 const int armv4_5_core_reg_map[8][17] =
315 {
316 { /* USR */
317 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
318 },
319 { /* FIQ (8 shadows of USR, vs normal 3) */
320 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 15, 32
321 },
322 { /* IRQ */
323 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 23, 24, 15, 33
324 },
325 { /* SVC */
326 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 25, 26, 15, 34
327 },
328 { /* ABT */
329 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 27, 28, 15, 35
330 },
331 { /* UND */
332 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 29, 30, 15, 36
333 },
334 { /* SYS (same registers as USR) */
335 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
336 },
337 { /* MON */
338 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 37, 38, 15, 39,
339 }
340 };
341
342 /**
343 * Configures host-side ARM records to reflect the specified CPSR.
344 * Later, code can use arm_reg_current() to map register numbers
345 * according to how they are exposed by this mode.
346 */
347 void arm_set_cpsr(struct arm *arm, uint32_t cpsr)
348 {
349 enum armv4_5_mode mode = cpsr & 0x1f;
350 int num;
351
352 /* NOTE: this may be called very early, before the register
353 * cache is set up. We can't defend against many errors, in
354 * particular against CPSRs that aren't valid *here* ...
355 */
356 if (arm->cpsr) {
357 buf_set_u32(arm->cpsr->value, 0, 32, cpsr);
358 arm->cpsr->valid = 1;
359 arm->cpsr->dirty = 0;
360 }
361
362 arm->core_mode = mode;
363
364 /* mode_to_number() warned; set up a somewhat-sane mapping */
365 num = armv4_5_mode_to_number(mode);
366 if (num < 0) {
367 mode = ARMV4_5_MODE_USR;
368 num = 0;
369 }
370
371 arm->map = &armv4_5_core_reg_map[num][0];
372 arm->spsr = (mode == ARMV4_5_MODE_USR || mode == ARMV4_5_MODE_SYS)
373 ? NULL
374 : arm->core_cache->reg_list + arm->map[16];
375
376 /* Older ARMs won't have the J bit */
377 enum armv4_5_state state;
378
379 if (cpsr & (1 << 5)) { /* T */
380 if (cpsr & (1 << 24)) { /* J */
381 LOG_WARNING("ThumbEE -- incomplete support");
382 state = ARM_STATE_THUMB_EE;
383 } else
384 state = ARMV4_5_STATE_THUMB;
385 } else {
386 if (cpsr & (1 << 24)) { /* J */
387 LOG_ERROR("Jazelle state handling is BROKEN!");
388 state = ARMV4_5_STATE_JAZELLE;
389 } else
390 state = ARMV4_5_STATE_ARM;
391 }
392 arm->core_state = state;
393
394 LOG_DEBUG("set CPSR %#8.8x: %s mode, %s state", (unsigned) cpsr,
395 arm_mode_name(mode),
396 armv4_5_state_strings[arm->core_state]);
397 }
398
399 /**
400 * Returns handle to the register currently mapped to a given number.
401 * Someone must have called arm_set_cpsr() before.
402 *
403 * \param arm This core's state and registers are used.
404 * \param regnum From 0..15 corresponding to R0..R14 and PC.
405 * Note that R0..R7 don't require mapping; you may access those
406 * as the first eight entries in the register cache. Likewise
407 * R15 (PC) doesn't need mapping; you may also access it directly.
408 * However, R8..R14, and SPSR (arm->spsr) *must* be mapped.
409 * CPSR (arm->cpsr) is also not mapped.
410 */
411 struct reg *arm_reg_current(struct arm *arm, unsigned regnum)
412 {
413 struct reg *r;
414
415 if (regnum > 16)
416 return NULL;
417
418 r = arm->core_cache->reg_list + arm->map[regnum];
419
420 /* e.g. invalid CPSR said "secure monitor" mode on a core
421 * that doesn't support it...
422 */
423 if (!r) {
424 LOG_ERROR("Invalid CPSR mode");
425 r = arm->core_cache->reg_list + regnum;
426 }
427
428 return r;
429 }
430
431 static const uint8_t arm_gdb_dummy_fp_value[12];
432
433 /**
434 * Dummy FPA registers are required to support GDB on ARM.
435 * Register packets require eight obsolete FPA register values.
436 * Modern ARM cores use Vector Floating Point (VFP), if they
437 * have any floating point support. VFP is not FPA-compatible.
438 */
439 struct reg arm_gdb_dummy_fp_reg =
440 {
441 .name = "GDB dummy FPA register",
442 .value = (uint8_t *) arm_gdb_dummy_fp_value,
443 .valid = 1,
444 .size = 96,
445 };
446
447 static const uint8_t arm_gdb_dummy_fps_value[4];
448
449 /**
450 * Dummy FPA status registers are required to support GDB on ARM.
451 * Register packets require an obsolete FPA status register.
452 */
453 struct reg arm_gdb_dummy_fps_reg =
454 {
455 .name = "GDB dummy FPA status register",
456 .value = (uint8_t *) arm_gdb_dummy_fps_value,
457 .valid = 1,
458 .size = 32,
459 };
460
461 static void arm_gdb_dummy_init(void) __attribute__ ((constructor));
462
463 static void arm_gdb_dummy_init(void)
464 {
465 register_init_dummy(&arm_gdb_dummy_fp_reg);
466 register_init_dummy(&arm_gdb_dummy_fps_reg);
467 }
468
469 static int armv4_5_get_core_reg(struct reg *reg)
470 {
471 int retval;
472 struct arm_reg *armv4_5 = reg->arch_info;
473 struct target *target = armv4_5->target;
474
475 if (target->state != TARGET_HALTED)
476 {
477 LOG_ERROR("Target not halted");
478 return ERROR_TARGET_NOT_HALTED;
479 }
480
481 retval = armv4_5->armv4_5_common->read_core_reg(target, reg, armv4_5->num, armv4_5->mode);
482 if (retval == ERROR_OK) {
483 reg->valid = 1;
484 reg->dirty = 0;
485 }
486
487 return retval;
488 }
489
490 static int armv4_5_set_core_reg(struct reg *reg, uint8_t *buf)
491 {
492 struct arm_reg *armv4_5 = reg->arch_info;
493 struct target *target = armv4_5->target;
494 struct arm *armv4_5_target = target_to_armv4_5(target);
495 uint32_t value = buf_get_u32(buf, 0, 32);
496
497 if (target->state != TARGET_HALTED)
498 {
499 LOG_ERROR("Target not halted");
500 return ERROR_TARGET_NOT_HALTED;
501 }
502
503 /* Except for CPSR, the "reg" command exposes a writeback model
504 * for the register cache.
505 */
506 if (reg == armv4_5_target->cpsr) {
507 arm_set_cpsr(armv4_5_target, value);
508
509 /* Older cores need help to be in ARM mode during halt
510 * mode debug, so we clear the J and T bits if we flush.
511 * For newer cores (v6/v7a/v7r) we don't need that, but
512 * it won't hurt since CPSR is always flushed anyway.
513 */
514 if (armv4_5_target->core_mode !=
515 (enum armv4_5_mode)(value & 0x1f)) {
516 LOG_DEBUG("changing ARM core mode to '%s'",
517 arm_mode_name(value & 0x1f));
518 value &= ~((1 << 24) | (1 << 5));
519 armv4_5_target->write_core_reg(target, reg,
520 16, ARMV4_5_MODE_ANY, value);
521 }
522 } else {
523 buf_set_u32(reg->value, 0, 32, value);
524 reg->valid = 1;
525 }
526 reg->dirty = 1;
527
528 return ERROR_OK;
529 }
530
531 static const struct reg_arch_type arm_reg_type = {
532 .get = armv4_5_get_core_reg,
533 .set = armv4_5_set_core_reg,
534 };
535
536 struct reg_cache* armv4_5_build_reg_cache(struct target *target, struct arm *armv4_5_common)
537 {
538 int num_regs = ARRAY_SIZE(arm_core_regs);
539 struct reg_cache *cache = malloc(sizeof(struct reg_cache));
540 struct reg *reg_list = calloc(num_regs, sizeof(struct reg));
541 struct arm_reg *arch_info = calloc(num_regs, sizeof(struct arm_reg));
542 int i;
543
544 if (!cache || !reg_list || !arch_info) {
545 free(cache);
546 free(reg_list);
547 free(arch_info);
548 return NULL;
549 }
550
551 cache->name = "ARM registers";
552 cache->next = NULL;
553 cache->reg_list = reg_list;
554 cache->num_regs = 0;
555
556 for (i = 0; i < num_regs; i++)
557 {
558 /* Skip registers this core doesn't expose */
559 if (arm_core_regs[i].mode == ARM_MODE_MON
560 && armv4_5_common->core_type != ARM_MODE_MON)
561 continue;
562
563 /* REVISIT handle Cortex-M, which only shadows R13/SP */
564
565 arch_info[i].num = arm_core_regs[i].cookie;
566 arch_info[i].mode = arm_core_regs[i].mode;
567 arch_info[i].target = target;
568 arch_info[i].armv4_5_common = armv4_5_common;
569
570 reg_list[i].name = (char *) arm_core_regs[i].name;
571 reg_list[i].size = 32;
572 reg_list[i].value = &arch_info[i].value;
573 reg_list[i].type = &arm_reg_type;
574 reg_list[i].arch_info = &arch_info[i];
575
576 cache->num_regs++;
577 }
578
579 armv4_5_common->cpsr = reg_list + ARMV4_5_CPSR;
580 armv4_5_common->core_cache = cache;
581 return cache;
582 }
583
584 int armv4_5_arch_state(struct target *target)
585 {
586 struct arm *armv4_5 = target_to_armv4_5(target);
587
588 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
589 {
590 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
591 return ERROR_FAIL;
592 }
593
594 LOG_USER("target halted in %s state due to %s, current mode: %s\ncpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "",
595 armv4_5_state_strings[armv4_5->core_state],
596 Jim_Nvp_value2name_simple(nvp_target_debug_reason, target->debug_reason)->name,
597 arm_mode_name(armv4_5->core_mode),
598 buf_get_u32(armv4_5->cpsr->value, 0, 32),
599 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
600
601 return ERROR_OK;
602 }
603
604 #define ARMV4_5_CORE_REG_MODENUM(cache, mode, num) \
605 cache->reg_list[armv4_5_core_reg_map[mode][num]]
606
607 COMMAND_HANDLER(handle_armv4_5_reg_command)
608 {
609 struct target *target = get_current_target(CMD_CTX);
610 struct arm *armv4_5 = target_to_armv4_5(target);
611 unsigned num_regs;
612 struct reg *regs;
613
614 if (!is_arm(armv4_5))
615 {
616 command_print(CMD_CTX, "current target isn't an ARM");
617 return ERROR_FAIL;
618 }
619
620 if (target->state != TARGET_HALTED)
621 {
622 command_print(CMD_CTX, "error: target must be halted for register accesses");
623 return ERROR_FAIL;
624 }
625
626 if (!is_arm_mode(armv4_5->core_mode))
627 return ERROR_FAIL;
628
629 if (!armv4_5->full_context) {
630 command_print(CMD_CTX, "error: target doesn't support %s",
631 CMD_NAME);
632 return ERROR_FAIL;
633 }
634
635 num_regs = armv4_5->core_cache->num_regs;
636 regs = armv4_5->core_cache->reg_list;
637
638 for (unsigned mode = 0; mode < ARRAY_SIZE(arm_mode_data); mode++) {
639 const char *name;
640 char *sep = "\n";
641 char *shadow = "";
642
643 /* label this bank of registers (or shadows) */
644 switch (arm_mode_data[mode].psr) {
645 case ARMV4_5_MODE_SYS:
646 continue;
647 case ARMV4_5_MODE_USR:
648 name = "System and User";
649 sep = "";
650 break;
651 case ARM_MODE_MON:
652 if (armv4_5->core_type != ARM_MODE_MON)
653 continue;
654 /* FALLTHROUGH */
655 default:
656 name = arm_mode_data[mode].name;
657 shadow = "shadow ";
658 break;
659 }
660 command_print(CMD_CTX, "%s%s mode %sregisters",
661 sep, name, shadow);
662
663 /* display N rows of up to 4 registers each */
664 for (unsigned i = 0; i < arm_mode_data[mode].n_indices;) {
665 char output[80];
666 int output_len = 0;
667
668 for (unsigned j = 0; j < 4; j++, i++) {
669 uint32_t value;
670 struct reg *reg = regs;
671
672 if (i >= arm_mode_data[mode].n_indices)
673 break;
674
675 reg += arm_mode_data[mode].indices[i];
676
677 /* REVISIT be smarter about faults... */
678 if (!reg->valid)
679 armv4_5->full_context(target);
680
681 value = buf_get_u32(reg->value, 0, 32);
682 output_len += snprintf(output + output_len,
683 sizeof(output) - output_len,
684 "%8s: %8.8" PRIx32 " ",
685 reg->name, value);
686 }
687 command_print(CMD_CTX, "%s", output);
688 }
689 }
690
691 return ERROR_OK;
692 }
693
694 COMMAND_HANDLER(handle_armv4_5_core_state_command)
695 {
696 struct target *target = get_current_target(CMD_CTX);
697 struct arm *armv4_5 = target_to_armv4_5(target);
698
699 if (!is_arm(armv4_5))
700 {
701 command_print(CMD_CTX, "current target isn't an ARM");
702 return ERROR_FAIL;
703 }
704
705 if (CMD_ARGC > 0)
706 {
707 if (strcmp(CMD_ARGV[0], "arm") == 0)
708 {
709 armv4_5->core_state = ARMV4_5_STATE_ARM;
710 }
711 if (strcmp(CMD_ARGV[0], "thumb") == 0)
712 {
713 armv4_5->core_state = ARMV4_5_STATE_THUMB;
714 }
715 }
716
717 command_print(CMD_CTX, "core state: %s", armv4_5_state_strings[armv4_5->core_state]);
718
719 return ERROR_OK;
720 }
721
722 COMMAND_HANDLER(handle_armv4_5_disassemble_command)
723 {
724 int retval = ERROR_OK;
725 struct target *target = get_current_target(CMD_CTX);
726 struct arm *arm = target ? target_to_arm(target) : NULL;
727 uint32_t address;
728 int count = 1;
729 int thumb = 0;
730
731 if (!is_arm(arm)) {
732 command_print(CMD_CTX, "current target isn't an ARM");
733 return ERROR_FAIL;
734 }
735
736 switch (CMD_ARGC) {
737 case 3:
738 if (strcmp(CMD_ARGV[2], "thumb") != 0)
739 goto usage;
740 thumb = 1;
741 /* FALL THROUGH */
742 case 2:
743 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], count);
744 /* FALL THROUGH */
745 case 1:
746 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
747 if (address & 0x01) {
748 if (!thumb) {
749 command_print(CMD_CTX, "Disassemble as Thumb");
750 thumb = 1;
751 }
752 address &= ~1;
753 }
754 break;
755 default:
756 usage:
757 command_print(CMD_CTX,
758 "usage: arm disassemble <address> [<count> ['thumb']]");
759 count = 0;
760 retval = ERROR_FAIL;
761 }
762
763 while (count-- > 0) {
764 struct arm_instruction cur_instruction;
765
766 if (thumb) {
767 /* Always use Thumb2 disassembly for best handling
768 * of 32-bit BL/BLX, and to work with newer cores
769 * (some ARMv6, all ARMv7) that use Thumb2.
770 */
771 retval = thumb2_opcode(target, address,
772 &cur_instruction);
773 if (retval != ERROR_OK)
774 break;
775 } else {
776 uint32_t opcode;
777
778 retval = target_read_u32(target, address, &opcode);
779 if (retval != ERROR_OK)
780 break;
781 retval = arm_evaluate_opcode(opcode, address,
782 &cur_instruction) != ERROR_OK;
783 if (retval != ERROR_OK)
784 break;
785 }
786 command_print(CMD_CTX, "%s", cur_instruction.text);
787 address += cur_instruction.instruction_size;
788 }
789
790 return retval;
791 }
792
793 static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
794 {
795 struct command_context *context;
796 struct target *target;
797 struct arm *arm;
798 int retval;
799
800 context = Jim_GetAssocData(interp, "context");
801 if (context == NULL) {
802 LOG_ERROR("%s: no command context", __func__);
803 return JIM_ERR;
804 }
805 target = get_current_target(context);
806 if (target == NULL) {
807 LOG_ERROR("%s: no current target", __func__);
808 return JIM_ERR;
809 }
810 if (!target_was_examined(target)) {
811 LOG_ERROR("%s: not yet examined", target_name(target));
812 return JIM_ERR;
813 }
814 arm = target_to_arm(target);
815 if (!is_arm(arm)) {
816 LOG_ERROR("%s: not an ARM", target_name(target));
817 return JIM_ERR;
818 }
819
820 if ((argc < 6) || (argc > 7)) {
821 /* FIXME use the command name to verify # params... */
822 LOG_ERROR("%s: wrong number of arguments", __func__);
823 return JIM_ERR;
824 }
825
826 int cpnum;
827 uint32_t op1;
828 uint32_t op2;
829 uint32_t CRn;
830 uint32_t CRm;
831 uint32_t value;
832 long l;
833
834 /* NOTE: parameter sequence matches ARM instruction set usage:
835 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
836 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
837 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
838 */
839 retval = Jim_GetLong(interp, argv[1], &l);
840 if (retval != JIM_OK)
841 return retval;
842 if (l & ~0xf) {
843 LOG_ERROR("%s: %s %d out of range", __func__,
844 "coprocessor", (int) l);
845 return JIM_ERR;
846 }
847 cpnum = l;
848
849 retval = Jim_GetLong(interp, argv[2], &l);
850 if (retval != JIM_OK)
851 return retval;
852 if (l & ~0x7) {
853 LOG_ERROR("%s: %s %d out of range", __func__,
854 "op1", (int) l);
855 return JIM_ERR;
856 }
857 op1 = l;
858
859 retval = Jim_GetLong(interp, argv[3], &l);
860 if (retval != JIM_OK)
861 return retval;
862 if (l & ~0xf) {
863 LOG_ERROR("%s: %s %d out of range", __func__,
864 "CRn", (int) l);
865 return JIM_ERR;
866 }
867 CRn = l;
868
869 retval = Jim_GetLong(interp, argv[4], &l);
870 if (retval != JIM_OK)
871 return retval;
872 if (l & ~0xf) {
873 LOG_ERROR("%s: %s %d out of range", __func__,
874 "CRm", (int) l);
875 return JIM_ERR;
876 }
877 CRm = l;
878
879 retval = Jim_GetLong(interp, argv[5], &l);
880 if (retval != JIM_OK)
881 return retval;
882 if (l & ~0x7) {
883 LOG_ERROR("%s: %s %d out of range", __func__,
884 "op2", (int) l);
885 return JIM_ERR;
886 }
887 op2 = l;
888
889 value = 0;
890
891 /* FIXME don't assume "mrc" vs "mcr" from the number of params;
892 * that could easily be a typo! Check both...
893 *
894 * FIXME change the call syntax here ... simplest to just pass
895 * the MRC() or MCR() instruction to be executed. That will also
896 * let us support the "mrc2" and "mcr2" opcodes (toggling one bit)
897 * if that's ever needed.
898 */
899 if (argc == 7) {
900 retval = Jim_GetLong(interp, argv[6], &l);
901 if (retval != JIM_OK) {
902 return retval;
903 }
904 value = l;
905
906 /* NOTE: parameters reordered! */
907 // ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2)
908 retval = arm->mcr(target, cpnum, op1, op2, CRn, CRm, value);
909 if (retval != ERROR_OK)
910 return JIM_ERR;
911 } else {
912 /* NOTE: parameters reordered! */
913 // ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2)
914 retval = arm->mrc(target, cpnum, op1, op2, CRn, CRm, &value);
915 if (retval != ERROR_OK)
916 return JIM_ERR;
917
918 Jim_SetResult(interp, Jim_NewIntObj(interp, value));
919 }
920
921 return JIM_OK;
922 }
923
924 static const struct command_registration arm_exec_command_handlers[] = {
925 {
926 .name = "reg",
927 .handler = &handle_armv4_5_reg_command,
928 .mode = COMMAND_EXEC,
929 .help = "display ARM core registers",
930 },
931 {
932 .name = "core_state",
933 .handler = &handle_armv4_5_core_state_command,
934 .mode = COMMAND_EXEC,
935 .usage = "<arm | thumb>",
936 .help = "display/change ARM core state",
937 },
938 {
939 .name = "disassemble",
940 .handler = &handle_armv4_5_disassemble_command,
941 .mode = COMMAND_EXEC,
942 .usage = "<address> [<count> ['thumb']]",
943 .help = "disassemble instructions ",
944 },
945 {
946 .name = "mcr",
947 .mode = COMMAND_EXEC,
948 .jim_handler = &jim_mcrmrc,
949 .help = "write coprocessor register",
950 .usage = "cpnum op1 CRn op2 CRm value",
951 },
952 {
953 .name = "mrc",
954 .jim_handler = &jim_mcrmrc,
955 .help = "read coprocessor register",
956 .usage = "cpnum op1 CRn op2 CRm",
957 },
958
959 COMMAND_REGISTRATION_DONE
960 };
961 const struct command_registration arm_command_handlers[] = {
962 {
963 .name = "arm",
964 .mode = COMMAND_ANY,
965 .help = "ARM command group",
966 .chain = arm_exec_command_handlers,
967 },
968 COMMAND_REGISTRATION_DONE
969 };
970
971 int armv4_5_get_gdb_reg_list(struct target *target, struct reg **reg_list[], int *reg_list_size)
972 {
973 struct arm *armv4_5 = target_to_armv4_5(target);
974 int i;
975
976 if (!is_arm_mode(armv4_5->core_mode))
977 return ERROR_FAIL;
978
979 *reg_list_size = 26;
980 *reg_list = malloc(sizeof(struct reg*) * (*reg_list_size));
981
982 for (i = 0; i < 16; i++)
983 (*reg_list)[i] = arm_reg_current(armv4_5, i);
984
985 for (i = 16; i < 24; i++)
986 (*reg_list)[i] = &arm_gdb_dummy_fp_reg;
987
988 (*reg_list)[24] = &arm_gdb_dummy_fps_reg;
989 (*reg_list)[25] = armv4_5->cpsr;
990
991 return ERROR_OK;
992 }
993
994 /* wait for execution to complete and check exit point */
995 static int armv4_5_run_algorithm_completion(struct target *target, uint32_t exit_point, int timeout_ms, void *arch_info)
996 {
997 int retval;
998 struct arm *armv4_5 = target_to_armv4_5(target);
999
1000 if ((retval = target_wait_state(target, TARGET_HALTED, timeout_ms)) != ERROR_OK)
1001 {
1002 return retval;
1003 }
1004 if (target->state != TARGET_HALTED)
1005 {
1006 if ((retval = target_halt(target)) != ERROR_OK)
1007 return retval;
1008 if ((retval = target_wait_state(target, TARGET_HALTED, 500)) != ERROR_OK)
1009 {
1010 return retval;
1011 }
1012 return ERROR_TARGET_TIMEOUT;
1013 }
1014
1015 /* fast exit: ARMv5+ code can use BKPT */
1016 if (exit_point && buf_get_u32(armv4_5->core_cache->reg_list[15].value,
1017 0, 32) != exit_point)
1018 {
1019 LOG_WARNING("target reentered debug state, but not at the desired exit point: 0x%4.4" PRIx32 "",
1020 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1021 return ERROR_TARGET_TIMEOUT;
1022 }
1023
1024 return ERROR_OK;
1025 }
1026
1027 int armv4_5_run_algorithm_inner(struct target *target,
1028 int num_mem_params, struct mem_param *mem_params,
1029 int num_reg_params, struct reg_param *reg_params,
1030 uint32_t entry_point, uint32_t exit_point,
1031 int timeout_ms, void *arch_info,
1032 int (*run_it)(struct target *target, uint32_t exit_point,
1033 int timeout_ms, void *arch_info))
1034 {
1035 struct arm *armv4_5 = target_to_armv4_5(target);
1036 struct armv4_5_algorithm *armv4_5_algorithm_info = arch_info;
1037 enum armv4_5_state core_state = armv4_5->core_state;
1038 uint32_t context[17];
1039 uint32_t cpsr;
1040 int exit_breakpoint_size = 0;
1041 int i;
1042 int retval = ERROR_OK;
1043
1044 LOG_DEBUG("Running algorithm");
1045
1046 if (armv4_5_algorithm_info->common_magic != ARMV4_5_COMMON_MAGIC)
1047 {
1048 LOG_ERROR("current target isn't an ARMV4/5 target");
1049 return ERROR_TARGET_INVALID;
1050 }
1051
1052 if (target->state != TARGET_HALTED)
1053 {
1054 LOG_WARNING("target not halted");
1055 return ERROR_TARGET_NOT_HALTED;
1056 }
1057
1058 if (!is_arm_mode(armv4_5->core_mode))
1059 return ERROR_FAIL;
1060
1061 /* armv5 and later can terminate with BKPT instruction; less overhead */
1062 if (!exit_point && armv4_5->is_armv4)
1063 {
1064 LOG_ERROR("ARMv4 target needs HW breakpoint location");
1065 return ERROR_FAIL;
1066 }
1067
1068 /* save r0..pc, cpsr-or-spsr, and then cpsr-for-sure;
1069 * they'll be restored later.
1070 */
1071 for (i = 0; i <= 16; i++)
1072 {
1073 struct reg *r;
1074
1075 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1076 armv4_5_algorithm_info->core_mode, i);
1077 if (!r->valid)
1078 armv4_5->read_core_reg(target, r, i,
1079 armv4_5_algorithm_info->core_mode);
1080 context[i] = buf_get_u32(r->value, 0, 32);
1081 }
1082 cpsr = buf_get_u32(armv4_5->cpsr->value, 0, 32);
1083
1084 for (i = 0; i < num_mem_params; i++)
1085 {
1086 if ((retval = target_write_buffer(target, mem_params[i].address, mem_params[i].size, mem_params[i].value)) != ERROR_OK)
1087 {
1088 return retval;
1089 }
1090 }
1091
1092 for (i = 0; i < num_reg_params; i++)
1093 {
1094 struct reg *reg = register_get_by_name(armv4_5->core_cache, reg_params[i].reg_name, 0);
1095 if (!reg)
1096 {
1097 LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
1098 return ERROR_INVALID_ARGUMENTS;
1099 }
1100
1101 if (reg->size != reg_params[i].size)
1102 {
1103 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
1104 return ERROR_INVALID_ARGUMENTS;
1105 }
1106
1107 if ((retval = armv4_5_set_core_reg(reg, reg_params[i].value)) != ERROR_OK)
1108 {
1109 return retval;
1110 }
1111 }
1112
1113 armv4_5->core_state = armv4_5_algorithm_info->core_state;
1114 if (armv4_5->core_state == ARMV4_5_STATE_ARM)
1115 exit_breakpoint_size = 4;
1116 else if (armv4_5->core_state == ARMV4_5_STATE_THUMB)
1117 exit_breakpoint_size = 2;
1118 else
1119 {
1120 LOG_ERROR("BUG: can't execute algorithms when not in ARM or Thumb state");
1121 return ERROR_INVALID_ARGUMENTS;
1122 }
1123
1124 if (armv4_5_algorithm_info->core_mode != ARMV4_5_MODE_ANY)
1125 {
1126 LOG_DEBUG("setting core_mode: 0x%2.2x",
1127 armv4_5_algorithm_info->core_mode);
1128 buf_set_u32(armv4_5->cpsr->value, 0, 5,
1129 armv4_5_algorithm_info->core_mode);
1130 armv4_5->cpsr->dirty = 1;
1131 armv4_5->cpsr->valid = 1;
1132 }
1133
1134 /* terminate using a hardware or (ARMv5+) software breakpoint */
1135 if (exit_point && (retval = breakpoint_add(target, exit_point,
1136 exit_breakpoint_size, BKPT_HARD)) != ERROR_OK)
1137 {
1138 LOG_ERROR("can't add HW breakpoint to terminate algorithm");
1139 return ERROR_TARGET_FAILURE;
1140 }
1141
1142 if ((retval = target_resume(target, 0, entry_point, 1, 1)) != ERROR_OK)
1143 {
1144 return retval;
1145 }
1146 int retvaltemp;
1147 retval = run_it(target, exit_point, timeout_ms, arch_info);
1148
1149 if (exit_point)
1150 breakpoint_remove(target, exit_point);
1151
1152 if (retval != ERROR_OK)
1153 return retval;
1154
1155 for (i = 0; i < num_mem_params; i++)
1156 {
1157 if (mem_params[i].direction != PARAM_OUT)
1158 if ((retvaltemp = target_read_buffer(target, mem_params[i].address, mem_params[i].size, mem_params[i].value)) != ERROR_OK)
1159 {
1160 retval = retvaltemp;
1161 }
1162 }
1163
1164 for (i = 0; i < num_reg_params; i++)
1165 {
1166 if (reg_params[i].direction != PARAM_OUT)
1167 {
1168
1169 struct reg *reg = register_get_by_name(armv4_5->core_cache, reg_params[i].reg_name, 0);
1170 if (!reg)
1171 {
1172 LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
1173 retval = ERROR_INVALID_ARGUMENTS;
1174 continue;
1175 }
1176
1177 if (reg->size != reg_params[i].size)
1178 {
1179 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
1180 retval = ERROR_INVALID_ARGUMENTS;
1181 continue;
1182 }
1183
1184 buf_set_u32(reg_params[i].value, 0, 32, buf_get_u32(reg->value, 0, 32));
1185 }
1186 }
1187
1188 /* restore everything we saved before (17 or 18 registers) */
1189 for (i = 0; i <= 16; i++)
1190 {
1191 uint32_t regvalue;
1192 regvalue = buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_algorithm_info->core_mode, i).value, 0, 32);
1193 if (regvalue != context[i])
1194 {
1195 LOG_DEBUG("restoring register %s with value 0x%8.8" PRIx32 "", ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_algorithm_info->core_mode, i).name, context[i]);
1196 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_algorithm_info->core_mode, i).value, 0, 32, context[i]);
1197 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_algorithm_info->core_mode, i).valid = 1;
1198 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_algorithm_info->core_mode, i).dirty = 1;
1199 }
1200 }
1201
1202 arm_set_cpsr(armv4_5, cpsr);
1203 armv4_5->cpsr->dirty = 1;
1204
1205 armv4_5->core_state = core_state;
1206
1207 return retval;
1208 }
1209
1210 int armv4_5_run_algorithm(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_params, uint32_t entry_point, uint32_t exit_point, int timeout_ms, void *arch_info)
1211 {
1212 return armv4_5_run_algorithm_inner(target, num_mem_params, mem_params, num_reg_params, reg_params, entry_point, exit_point, timeout_ms, arch_info, armv4_5_run_algorithm_completion);
1213 }
1214
1215 /**
1216 * Runs ARM code in the target to calculate a CRC32 checksum.
1217 *
1218 * \todo On ARMv5+, rely on BKPT termination for reduced overhead.
1219 */
1220 int arm_checksum_memory(struct target *target,
1221 uint32_t address, uint32_t count, uint32_t *checksum)
1222 {
1223 struct working_area *crc_algorithm;
1224 struct armv4_5_algorithm armv4_5_info;
1225 struct reg_param reg_params[2];
1226 int retval;
1227 uint32_t i;
1228
1229 static const uint32_t arm_crc_code[] = {
1230 0xE1A02000, /* mov r2, r0 */
1231 0xE3E00000, /* mov r0, #0xffffffff */
1232 0xE1A03001, /* mov r3, r1 */
1233 0xE3A04000, /* mov r4, #0 */
1234 0xEA00000B, /* b ncomp */
1235 /* nbyte: */
1236 0xE7D21004, /* ldrb r1, [r2, r4] */
1237 0xE59F7030, /* ldr r7, CRC32XOR */
1238 0xE0200C01, /* eor r0, r0, r1, asl 24 */
1239 0xE3A05000, /* mov r5, #0 */
1240 /* loop: */
1241 0xE3500000, /* cmp r0, #0 */
1242 0xE1A06080, /* mov r6, r0, asl #1 */
1243 0xE2855001, /* add r5, r5, #1 */
1244 0xE1A00006, /* mov r0, r6 */
1245 0xB0260007, /* eorlt r0, r6, r7 */
1246 0xE3550008, /* cmp r5, #8 */
1247 0x1AFFFFF8, /* bne loop */
1248 0xE2844001, /* add r4, r4, #1 */
1249 /* ncomp: */
1250 0xE1540003, /* cmp r4, r3 */
1251 0x1AFFFFF1, /* bne nbyte */
1252 /* end: */
1253 0xEAFFFFFE, /* b end */
1254 /* CRC32XOR: */
1255 0x04C11DB7 /* .word 0x04C11DB7 */
1256 };
1257
1258 retval = target_alloc_working_area(target,
1259 sizeof(arm_crc_code), &crc_algorithm);
1260 if (retval != ERROR_OK)
1261 return retval;
1262
1263 /* convert code into a buffer in target endianness */
1264 for (i = 0; i < ARRAY_SIZE(arm_crc_code); i++) {
1265 retval = target_write_u32(target,
1266 crc_algorithm->address + i * sizeof(uint32_t),
1267 arm_crc_code[i]);
1268 if (retval != ERROR_OK)
1269 return retval;
1270 }
1271
1272 armv4_5_info.common_magic = ARMV4_5_COMMON_MAGIC;
1273 armv4_5_info.core_mode = ARMV4_5_MODE_SVC;
1274 armv4_5_info.core_state = ARMV4_5_STATE_ARM;
1275
1276 init_reg_param(&reg_params[0], "r0", 32, PARAM_IN_OUT);
1277 init_reg_param(&reg_params[1], "r1", 32, PARAM_OUT);
1278
1279 buf_set_u32(reg_params[0].value, 0, 32, address);
1280 buf_set_u32(reg_params[1].value, 0, 32, count);
1281
1282 /* 20 second timeout/megabyte */
1283 int timeout = 20000 * (1 + (count / (1024 * 1024)));
1284
1285 retval = target_run_algorithm(target, 0, NULL, 2, reg_params,
1286 crc_algorithm->address,
1287 crc_algorithm->address + sizeof(arm_crc_code) - 8,
1288 timeout, &armv4_5_info);
1289 if (retval != ERROR_OK) {
1290 LOG_ERROR("error executing ARM crc algorithm");
1291 destroy_reg_param(&reg_params[0]);
1292 destroy_reg_param(&reg_params[1]);
1293 target_free_working_area(target, crc_algorithm);
1294 return retval;
1295 }
1296
1297 *checksum = buf_get_u32(reg_params[0].value, 0, 32);
1298
1299 destroy_reg_param(&reg_params[0]);
1300 destroy_reg_param(&reg_params[1]);
1301
1302 target_free_working_area(target, crc_algorithm);
1303
1304 return ERROR_OK;
1305 }
1306
1307 /**
1308 * Runs ARM code in the target to check whether a memory block holds
1309 * all ones. NOR flash which has been erased, and thus may be written,
1310 * holds all ones.
1311 *
1312 * \todo On ARMv5+, rely on BKPT termination for reduced overhead.
1313 */
1314 int arm_blank_check_memory(struct target *target,
1315 uint32_t address, uint32_t count, uint32_t *blank)
1316 {
1317 struct working_area *check_algorithm;
1318 struct reg_param reg_params[3];
1319 struct armv4_5_algorithm armv4_5_info;
1320 int retval;
1321 uint32_t i;
1322
1323 static const uint32_t check_code[] = {
1324 /* loop: */
1325 0xe4d03001, /* ldrb r3, [r0], #1 */
1326 0xe0022003, /* and r2, r2, r3 */
1327 0xe2511001, /* subs r1, r1, #1 */
1328 0x1afffffb, /* bne loop */
1329 /* end: */
1330 0xeafffffe /* b end */
1331 };
1332
1333 /* make sure we have a working area */
1334 retval = target_alloc_working_area(target,
1335 sizeof(check_code), &check_algorithm);
1336 if (retval != ERROR_OK)
1337 return retval;
1338
1339 /* convert code into a buffer in target endianness */
1340 for (i = 0; i < ARRAY_SIZE(check_code); i++) {
1341 retval = target_write_u32(target,
1342 check_algorithm->address
1343 + i * sizeof(uint32_t),
1344 check_code[i]);
1345 if (retval != ERROR_OK)
1346 return retval;
1347 }
1348
1349 armv4_5_info.common_magic = ARMV4_5_COMMON_MAGIC;
1350 armv4_5_info.core_mode = ARMV4_5_MODE_SVC;
1351 armv4_5_info.core_state = ARMV4_5_STATE_ARM;
1352
1353 init_reg_param(&reg_params[0], "r0", 32, PARAM_OUT);
1354 buf_set_u32(reg_params[0].value, 0, 32, address);
1355
1356 init_reg_param(&reg_params[1], "r1", 32, PARAM_OUT);
1357 buf_set_u32(reg_params[1].value, 0, 32, count);
1358
1359 init_reg_param(&reg_params[2], "r2", 32, PARAM_IN_OUT);
1360 buf_set_u32(reg_params[2].value, 0, 32, 0xff);
1361
1362 retval = target_run_algorithm(target, 0, NULL, 3, reg_params,
1363 check_algorithm->address,
1364 check_algorithm->address + sizeof(check_code) - 4,
1365 10000, &armv4_5_info);
1366 if (retval != ERROR_OK) {
1367 destroy_reg_param(&reg_params[0]);
1368 destroy_reg_param(&reg_params[1]);
1369 destroy_reg_param(&reg_params[2]);
1370 target_free_working_area(target, check_algorithm);
1371 return retval;
1372 }
1373
1374 *blank = buf_get_u32(reg_params[2].value, 0, 32);
1375
1376 destroy_reg_param(&reg_params[0]);
1377 destroy_reg_param(&reg_params[1]);
1378 destroy_reg_param(&reg_params[2]);
1379
1380 target_free_working_area(target, check_algorithm);
1381
1382 return ERROR_OK;
1383 }
1384
1385 static int arm_full_context(struct target *target)
1386 {
1387 struct arm *armv4_5 = target_to_armv4_5(target);
1388 unsigned num_regs = armv4_5->core_cache->num_regs;
1389 struct reg *reg = armv4_5->core_cache->reg_list;
1390 int retval = ERROR_OK;
1391
1392 for (; num_regs && retval == ERROR_OK; num_regs--, reg++) {
1393 if (reg->valid)
1394 continue;
1395 retval = armv4_5_get_core_reg(reg);
1396 }
1397 return retval;
1398 }
1399
1400 static int arm_default_mrc(struct target *target, int cpnum,
1401 uint32_t op1, uint32_t op2,
1402 uint32_t CRn, uint32_t CRm,
1403 uint32_t *value)
1404 {
1405 LOG_ERROR("%s doesn't implement MRC", target_type_name(target));
1406 return ERROR_FAIL;
1407 }
1408
1409 static int arm_default_mcr(struct target *target, int cpnum,
1410 uint32_t op1, uint32_t op2,
1411 uint32_t CRn, uint32_t CRm,
1412 uint32_t value)
1413 {
1414 LOG_ERROR("%s doesn't implement MCR", target_type_name(target));
1415 return ERROR_FAIL;
1416 }
1417
1418 int armv4_5_init_arch_info(struct target *target, struct arm *armv4_5)
1419 {
1420 target->arch_info = armv4_5;
1421 armv4_5->target = target;
1422
1423 armv4_5->common_magic = ARMV4_5_COMMON_MAGIC;
1424 arm_set_cpsr(armv4_5, ARMV4_5_MODE_USR);
1425
1426 /* core_type may be overridden by subtype logic */
1427 armv4_5->core_type = ARMV4_5_MODE_ANY;
1428
1429 /* default full_context() has no core-specific optimizations */
1430 if (!armv4_5->full_context && armv4_5->read_core_reg)
1431 armv4_5->full_context = arm_full_context;
1432
1433 if (!armv4_5->mrc)
1434 armv4_5->mrc = arm_default_mrc;
1435 if (!armv4_5->mcr)
1436 armv4_5->mcr = arm_default_mcr;
1437
1438 return ERROR_OK;
1439 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)