ARM: rename some generic routines
[openocd.git] / src / target / armv4_5.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2008 by Spencer Oliver *
6 * spen@spen-soft.co.uk *
7 * *
8 * Copyright (C) 2008 by Oyvind Harboe *
9 * oyvind.harboe@zylin.com *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "armv4_5.h"
31 #include "arm_jtag.h"
32 #include "breakpoints.h"
33 #include "arm_disassembler.h"
34 #include <helper/binarybuffer.h>
35 #include "algorithm.h"
36 #include "register.h"
37
38
39 /* offsets into armv4_5 core register cache */
40 enum {
41 // ARMV4_5_CPSR = 31,
42 ARMV4_5_SPSR_FIQ = 32,
43 ARMV4_5_SPSR_IRQ = 33,
44 ARMV4_5_SPSR_SVC = 34,
45 ARMV4_5_SPSR_ABT = 35,
46 ARMV4_5_SPSR_UND = 36,
47 ARM_SPSR_MON = 39,
48 };
49
50 static const uint8_t arm_usr_indices[17] = {
51 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, ARMV4_5_CPSR,
52 };
53
54 static const uint8_t arm_fiq_indices[8] = {
55 16, 17, 18, 19, 20, 21, 22, ARMV4_5_SPSR_FIQ,
56 };
57
58 static const uint8_t arm_irq_indices[3] = {
59 23, 24, ARMV4_5_SPSR_IRQ,
60 };
61
62 static const uint8_t arm_svc_indices[3] = {
63 25, 26, ARMV4_5_SPSR_SVC,
64 };
65
66 static const uint8_t arm_abt_indices[3] = {
67 27, 28, ARMV4_5_SPSR_ABT,
68 };
69
70 static const uint8_t arm_und_indices[3] = {
71 29, 30, ARMV4_5_SPSR_UND,
72 };
73
74 static const uint8_t arm_mon_indices[3] = {
75 37, 38, ARM_SPSR_MON,
76 };
77
78 static const struct {
79 const char *name;
80 unsigned short psr;
81 /* For user and system modes, these list indices for all registers.
82 * otherwise they're just indices for the shadow registers and SPSR.
83 */
84 unsigned short n_indices;
85 const uint8_t *indices;
86 } arm_mode_data[] = {
87 /* Seven modes are standard from ARM7 on. "System" and "User" share
88 * the same registers; other modes shadow from 3 to 8 registers.
89 */
90 {
91 .name = "User",
92 .psr = ARM_MODE_USR,
93 .n_indices = ARRAY_SIZE(arm_usr_indices),
94 .indices = arm_usr_indices,
95 },
96 {
97 .name = "FIQ",
98 .psr = ARM_MODE_FIQ,
99 .n_indices = ARRAY_SIZE(arm_fiq_indices),
100 .indices = arm_fiq_indices,
101 },
102 {
103 .name = "Supervisor",
104 .psr = ARM_MODE_SVC,
105 .n_indices = ARRAY_SIZE(arm_svc_indices),
106 .indices = arm_svc_indices,
107 },
108 {
109 .name = "Abort",
110 .psr = ARM_MODE_ABT,
111 .n_indices = ARRAY_SIZE(arm_abt_indices),
112 .indices = arm_abt_indices,
113 },
114 {
115 .name = "IRQ",
116 .psr = ARM_MODE_IRQ,
117 .n_indices = ARRAY_SIZE(arm_irq_indices),
118 .indices = arm_irq_indices,
119 },
120 {
121 .name = "Undefined instruction",
122 .psr = ARM_MODE_UND,
123 .n_indices = ARRAY_SIZE(arm_und_indices),
124 .indices = arm_und_indices,
125 },
126 {
127 .name = "System",
128 .psr = ARM_MODE_SYS,
129 .n_indices = ARRAY_SIZE(arm_usr_indices),
130 .indices = arm_usr_indices,
131 },
132 /* TrustZone "Security Extensions" add a secure monitor mode.
133 * This is distinct from a "debug monitor" which can support
134 * non-halting debug, in conjunction with some debuggers.
135 */
136 {
137 .name = "Secure Monitor",
138 .psr = ARM_MODE_MON,
139 .n_indices = ARRAY_SIZE(arm_mon_indices),
140 .indices = arm_mon_indices,
141 },
142 };
143
144 /** Map PSR mode bits to the name of an ARM processor operating mode. */
145 const char *arm_mode_name(unsigned psr_mode)
146 {
147 for (unsigned i = 0; i < ARRAY_SIZE(arm_mode_data); i++) {
148 if (arm_mode_data[i].psr == psr_mode)
149 return arm_mode_data[i].name;
150 }
151 LOG_ERROR("unrecognized psr mode: %#02x", psr_mode);
152 return "UNRECOGNIZED";
153 }
154
155 /** Return true iff the parameter denotes a valid ARM processor mode. */
156 bool is_arm_mode(unsigned psr_mode)
157 {
158 for (unsigned i = 0; i < ARRAY_SIZE(arm_mode_data); i++) {
159 if (arm_mode_data[i].psr == psr_mode)
160 return true;
161 }
162 return false;
163 }
164
165 /** Map PSR mode bits to linear number indexing armv4_5_core_reg_map */
166 int arm_mode_to_number(enum arm_mode mode)
167 {
168 switch (mode) {
169 case ARM_MODE_ANY:
170 /* map MODE_ANY to user mode */
171 case ARM_MODE_USR:
172 return 0;
173 case ARM_MODE_FIQ:
174 return 1;
175 case ARM_MODE_IRQ:
176 return 2;
177 case ARM_MODE_SVC:
178 return 3;
179 case ARM_MODE_ABT:
180 return 4;
181 case ARM_MODE_UND:
182 return 5;
183 case ARM_MODE_SYS:
184 return 6;
185 case ARM_MODE_MON:
186 return 7;
187 default:
188 LOG_ERROR("invalid mode value encountered %d", mode);
189 return -1;
190 }
191 }
192
193 /** Map linear number indexing armv4_5_core_reg_map to PSR mode bits. */
194 enum arm_mode armv4_5_number_to_mode(int number)
195 {
196 switch (number) {
197 case 0:
198 return ARM_MODE_USR;
199 case 1:
200 return ARM_MODE_FIQ;
201 case 2:
202 return ARM_MODE_IRQ;
203 case 3:
204 return ARM_MODE_SVC;
205 case 4:
206 return ARM_MODE_ABT;
207 case 5:
208 return ARM_MODE_UND;
209 case 6:
210 return ARM_MODE_SYS;
211 case 7:
212 return ARM_MODE_MON;
213 default:
214 LOG_ERROR("mode index out of bounds %d", number);
215 return ARM_MODE_ANY;
216 }
217 }
218
219 const char *arm_state_strings[] =
220 {
221 "ARM", "Thumb", "Jazelle", "ThumbEE",
222 };
223
224 /* Templates for ARM core registers.
225 *
226 * NOTE: offsets in this table are coupled to the arm_mode_data
227 * table above, the armv4_5_core_reg_map array below, and also to
228 * the ARMV4_5_CPSR symbol (which should vanish after ARM11 updates).
229 */
230 static const struct {
231 /* The name is used for e.g. the "regs" command. */
232 const char *name;
233
234 /* The {cookie, mode} tuple uniquely identifies one register.
235 * In a given mode, cookies 0..15 map to registers R0..R15,
236 * with R13..R15 usually called SP, LR, PC.
237 *
238 * MODE_ANY is used as *input* to the mapping, and indicates
239 * various special cases (sigh) and errors.
240 *
241 * Cookie 16 is (currently) confusing, since it indicates
242 * CPSR -or- SPSR depending on whether 'mode' is MODE_ANY.
243 * (Exception modes have both CPSR and SPSR registers ...)
244 */
245 unsigned cookie;
246 enum arm_mode mode;
247 } arm_core_regs[] = {
248 /* IMPORTANT: we guarantee that the first eight cached registers
249 * correspond to r0..r7, and the fifteenth to PC, so that callers
250 * don't need to map them.
251 */
252 { .name = "r0", .cookie = 0, .mode = ARM_MODE_ANY, },
253 { .name = "r1", .cookie = 1, .mode = ARM_MODE_ANY, },
254 { .name = "r2", .cookie = 2, .mode = ARM_MODE_ANY, },
255 { .name = "r3", .cookie = 3, .mode = ARM_MODE_ANY, },
256 { .name = "r4", .cookie = 4, .mode = ARM_MODE_ANY, },
257 { .name = "r5", .cookie = 5, .mode = ARM_MODE_ANY, },
258 { .name = "r6", .cookie = 6, .mode = ARM_MODE_ANY, },
259 { .name = "r7", .cookie = 7, .mode = ARM_MODE_ANY, },
260
261 /* NOTE: regs 8..12 might be shadowed by FIQ ... flagging
262 * them as MODE_ANY creates special cases. (ANY means
263 * "not mapped" elsewhere; here it's "everything but FIQ".)
264 */
265 { .name = "r8", .cookie = 8, .mode = ARM_MODE_ANY, },
266 { .name = "r9", .cookie = 9, .mode = ARM_MODE_ANY, },
267 { .name = "r10", .cookie = 10, .mode = ARM_MODE_ANY, },
268 { .name = "r11", .cookie = 11, .mode = ARM_MODE_ANY, },
269 { .name = "r12", .cookie = 12, .mode = ARM_MODE_ANY, },
270
271 /* NOTE all MODE_USR registers are equivalent to MODE_SYS ones */
272 { .name = "sp_usr", .cookie = 13, .mode = ARM_MODE_USR, },
273 { .name = "lr_usr", .cookie = 14, .mode = ARM_MODE_USR, },
274
275 /* guaranteed to be at index 15 */
276 { .name = "pc", .cookie = 15, .mode = ARM_MODE_ANY, },
277
278 { .name = "r8_fiq", .cookie = 8, .mode = ARM_MODE_FIQ, },
279 { .name = "r9_fiq", .cookie = 9, .mode = ARM_MODE_FIQ, },
280 { .name = "r10_fiq", .cookie = 10, .mode = ARM_MODE_FIQ, },
281 { .name = "r11_fiq", .cookie = 11, .mode = ARM_MODE_FIQ, },
282 { .name = "r12_fiq", .cookie = 12, .mode = ARM_MODE_FIQ, },
283
284 { .name = "sp_fiq", .cookie = 13, .mode = ARM_MODE_FIQ, },
285 { .name = "lr_fiq", .cookie = 14, .mode = ARM_MODE_FIQ, },
286
287 { .name = "sp_irq", .cookie = 13, .mode = ARM_MODE_IRQ, },
288 { .name = "lr_irq", .cookie = 14, .mode = ARM_MODE_IRQ, },
289
290 { .name = "sp_svc", .cookie = 13, .mode = ARM_MODE_SVC, },
291 { .name = "lr_svc", .cookie = 14, .mode = ARM_MODE_SVC, },
292
293 { .name = "sp_abt", .cookie = 13, .mode = ARM_MODE_ABT, },
294 { .name = "lr_abt", .cookie = 14, .mode = ARM_MODE_ABT, },
295
296 { .name = "sp_und", .cookie = 13, .mode = ARM_MODE_UND, },
297 { .name = "lr_und", .cookie = 14, .mode = ARM_MODE_UND, },
298
299 { .name = "cpsr", .cookie = 16, .mode = ARM_MODE_ANY, },
300 { .name = "spsr_fiq", .cookie = 16, .mode = ARM_MODE_FIQ, },
301 { .name = "spsr_irq", .cookie = 16, .mode = ARM_MODE_IRQ, },
302 { .name = "spsr_svc", .cookie = 16, .mode = ARM_MODE_SVC, },
303 { .name = "spsr_abt", .cookie = 16, .mode = ARM_MODE_ABT, },
304 { .name = "spsr_und", .cookie = 16, .mode = ARM_MODE_UND, },
305
306 { .name = "sp_mon", .cookie = 13, .mode = ARM_MODE_MON, },
307 { .name = "lr_mon", .cookie = 14, .mode = ARM_MODE_MON, },
308 { .name = "spsr_mon", .cookie = 16, .mode = ARM_MODE_MON, },
309 };
310
311 /* map core mode (USR, FIQ, ...) and register number to
312 * indices into the register cache
313 */
314 const int armv4_5_core_reg_map[8][17] =
315 {
316 { /* USR */
317 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
318 },
319 { /* FIQ (8 shadows of USR, vs normal 3) */
320 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 15, 32
321 },
322 { /* IRQ */
323 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 23, 24, 15, 33
324 },
325 { /* SVC */
326 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 25, 26, 15, 34
327 },
328 { /* ABT */
329 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 27, 28, 15, 35
330 },
331 { /* UND */
332 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 29, 30, 15, 36
333 },
334 { /* SYS (same registers as USR) */
335 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
336 },
337 { /* MON */
338 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 37, 38, 15, 39,
339 }
340 };
341
342 /**
343 * Configures host-side ARM records to reflect the specified CPSR.
344 * Later, code can use arm_reg_current() to map register numbers
345 * according to how they are exposed by this mode.
346 */
347 void arm_set_cpsr(struct arm *arm, uint32_t cpsr)
348 {
349 enum arm_mode mode = cpsr & 0x1f;
350 int num;
351
352 /* NOTE: this may be called very early, before the register
353 * cache is set up. We can't defend against many errors, in
354 * particular against CPSRs that aren't valid *here* ...
355 */
356 if (arm->cpsr) {
357 buf_set_u32(arm->cpsr->value, 0, 32, cpsr);
358 arm->cpsr->valid = 1;
359 arm->cpsr->dirty = 0;
360 }
361
362 arm->core_mode = mode;
363
364 /* mode_to_number() warned; set up a somewhat-sane mapping */
365 num = arm_mode_to_number(mode);
366 if (num < 0) {
367 mode = ARM_MODE_USR;
368 num = 0;
369 }
370
371 arm->map = &armv4_5_core_reg_map[num][0];
372 arm->spsr = (mode == ARM_MODE_USR || mode == ARM_MODE_SYS)
373 ? NULL
374 : arm->core_cache->reg_list + arm->map[16];
375
376 /* Older ARMs won't have the J bit */
377 enum arm_state state;
378
379 if (cpsr & (1 << 5)) { /* T */
380 if (cpsr & (1 << 24)) { /* J */
381 LOG_WARNING("ThumbEE -- incomplete support");
382 state = ARM_STATE_THUMB_EE;
383 } else
384 state = ARM_STATE_THUMB;
385 } else {
386 if (cpsr & (1 << 24)) { /* J */
387 LOG_ERROR("Jazelle state handling is BROKEN!");
388 state = ARM_STATE_JAZELLE;
389 } else
390 state = ARM_STATE_ARM;
391 }
392 arm->core_state = state;
393
394 LOG_DEBUG("set CPSR %#8.8x: %s mode, %s state", (unsigned) cpsr,
395 arm_mode_name(mode),
396 arm_state_strings[arm->core_state]);
397 }
398
399 /**
400 * Returns handle to the register currently mapped to a given number.
401 * Someone must have called arm_set_cpsr() before.
402 *
403 * \param arm This core's state and registers are used.
404 * \param regnum From 0..15 corresponding to R0..R14 and PC.
405 * Note that R0..R7 don't require mapping; you may access those
406 * as the first eight entries in the register cache. Likewise
407 * R15 (PC) doesn't need mapping; you may also access it directly.
408 * However, R8..R14, and SPSR (arm->spsr) *must* be mapped.
409 * CPSR (arm->cpsr) is also not mapped.
410 */
411 struct reg *arm_reg_current(struct arm *arm, unsigned regnum)
412 {
413 struct reg *r;
414
415 if (regnum > 16)
416 return NULL;
417
418 r = arm->core_cache->reg_list + arm->map[regnum];
419
420 /* e.g. invalid CPSR said "secure monitor" mode on a core
421 * that doesn't support it...
422 */
423 if (!r) {
424 LOG_ERROR("Invalid CPSR mode");
425 r = arm->core_cache->reg_list + regnum;
426 }
427
428 return r;
429 }
430
431 static const uint8_t arm_gdb_dummy_fp_value[12];
432
433 /**
434 * Dummy FPA registers are required to support GDB on ARM.
435 * Register packets require eight obsolete FPA register values.
436 * Modern ARM cores use Vector Floating Point (VFP), if they
437 * have any floating point support. VFP is not FPA-compatible.
438 */
439 struct reg arm_gdb_dummy_fp_reg =
440 {
441 .name = "GDB dummy FPA register",
442 .value = (uint8_t *) arm_gdb_dummy_fp_value,
443 .valid = 1,
444 .size = 96,
445 };
446
447 static const uint8_t arm_gdb_dummy_fps_value[4];
448
449 /**
450 * Dummy FPA status registers are required to support GDB on ARM.
451 * Register packets require an obsolete FPA status register.
452 */
453 struct reg arm_gdb_dummy_fps_reg =
454 {
455 .name = "GDB dummy FPA status register",
456 .value = (uint8_t *) arm_gdb_dummy_fps_value,
457 .valid = 1,
458 .size = 32,
459 };
460
461 static void arm_gdb_dummy_init(void) __attribute__ ((constructor));
462
463 static void arm_gdb_dummy_init(void)
464 {
465 register_init_dummy(&arm_gdb_dummy_fp_reg);
466 register_init_dummy(&arm_gdb_dummy_fps_reg);
467 }
468
469 static int armv4_5_get_core_reg(struct reg *reg)
470 {
471 int retval;
472 struct arm_reg *armv4_5 = reg->arch_info;
473 struct target *target = armv4_5->target;
474
475 if (target->state != TARGET_HALTED)
476 {
477 LOG_ERROR("Target not halted");
478 return ERROR_TARGET_NOT_HALTED;
479 }
480
481 retval = armv4_5->armv4_5_common->read_core_reg(target, reg, armv4_5->num, armv4_5->mode);
482 if (retval == ERROR_OK) {
483 reg->valid = 1;
484 reg->dirty = 0;
485 }
486
487 return retval;
488 }
489
490 static int armv4_5_set_core_reg(struct reg *reg, uint8_t *buf)
491 {
492 struct arm_reg *armv4_5 = reg->arch_info;
493 struct target *target = armv4_5->target;
494 struct arm *armv4_5_target = target_to_arm(target);
495 uint32_t value = buf_get_u32(buf, 0, 32);
496
497 if (target->state != TARGET_HALTED)
498 {
499 LOG_ERROR("Target not halted");
500 return ERROR_TARGET_NOT_HALTED;
501 }
502
503 /* Except for CPSR, the "reg" command exposes a writeback model
504 * for the register cache.
505 */
506 if (reg == armv4_5_target->cpsr) {
507 arm_set_cpsr(armv4_5_target, value);
508
509 /* Older cores need help to be in ARM mode during halt
510 * mode debug, so we clear the J and T bits if we flush.
511 * For newer cores (v6/v7a/v7r) we don't need that, but
512 * it won't hurt since CPSR is always flushed anyway.
513 */
514 if (armv4_5_target->core_mode !=
515 (enum arm_mode)(value & 0x1f)) {
516 LOG_DEBUG("changing ARM core mode to '%s'",
517 arm_mode_name(value & 0x1f));
518 value &= ~((1 << 24) | (1 << 5));
519 armv4_5_target->write_core_reg(target, reg,
520 16, ARM_MODE_ANY, value);
521 }
522 } else {
523 buf_set_u32(reg->value, 0, 32, value);
524 reg->valid = 1;
525 }
526 reg->dirty = 1;
527
528 return ERROR_OK;
529 }
530
531 static const struct reg_arch_type arm_reg_type = {
532 .get = armv4_5_get_core_reg,
533 .set = armv4_5_set_core_reg,
534 };
535
536 struct reg_cache *arm_build_reg_cache(struct target *target, struct arm *arm)
537 {
538 int num_regs = ARRAY_SIZE(arm_core_regs);
539 struct reg_cache *cache = malloc(sizeof(struct reg_cache));
540 struct reg *reg_list = calloc(num_regs, sizeof(struct reg));
541 struct arm_reg *arch_info = calloc(num_regs, sizeof(struct arm_reg));
542 int i;
543
544 if (!cache || !reg_list || !arch_info) {
545 free(cache);
546 free(reg_list);
547 free(arch_info);
548 return NULL;
549 }
550
551 cache->name = "ARM registers";
552 cache->next = NULL;
553 cache->reg_list = reg_list;
554 cache->num_regs = 0;
555
556 for (i = 0; i < num_regs; i++)
557 {
558 /* Skip registers this core doesn't expose */
559 if (arm_core_regs[i].mode == ARM_MODE_MON
560 && arm->core_type != ARM_MODE_MON)
561 continue;
562
563 /* REVISIT handle Cortex-M, which only shadows R13/SP */
564
565 arch_info[i].num = arm_core_regs[i].cookie;
566 arch_info[i].mode = arm_core_regs[i].mode;
567 arch_info[i].target = target;
568 arch_info[i].armv4_5_common = arm;
569
570 reg_list[i].name = (char *) arm_core_regs[i].name;
571 reg_list[i].size = 32;
572 reg_list[i].value = &arch_info[i].value;
573 reg_list[i].type = &arm_reg_type;
574 reg_list[i].arch_info = &arch_info[i];
575
576 cache->num_regs++;
577 }
578
579 arm->cpsr = reg_list + ARMV4_5_CPSR;
580 arm->core_cache = cache;
581 return cache;
582 }
583
584 int arm_arch_state(struct target *target)
585 {
586 struct arm *armv4_5 = target_to_arm(target);
587
588 if (armv4_5->common_magic != ARM_COMMON_MAGIC)
589 {
590 LOG_ERROR("BUG: called for a non-ARM target");
591 return ERROR_FAIL;
592 }
593
594 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
595 "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "%s",
596 arm_state_strings[armv4_5->core_state],
597 Jim_Nvp_value2name_simple(nvp_target_debug_reason,
598 target->debug_reason)->name,
599 arm_mode_name(armv4_5->core_mode),
600 buf_get_u32(armv4_5->cpsr->value, 0, 32),
601 buf_get_u32(armv4_5->core_cache->reg_list[15].value,
602 0, 32),
603 armv4_5->is_semihosting ? ", semihosting" : "");
604
605 return ERROR_OK;
606 }
607
608 #define ARMV4_5_CORE_REG_MODENUM(cache, mode, num) \
609 cache->reg_list[armv4_5_core_reg_map[mode][num]]
610
611 COMMAND_HANDLER(handle_armv4_5_reg_command)
612 {
613 struct target *target = get_current_target(CMD_CTX);
614 struct arm *armv4_5 = target_to_arm(target);
615 unsigned num_regs;
616 struct reg *regs;
617
618 if (!is_arm(armv4_5))
619 {
620 command_print(CMD_CTX, "current target isn't an ARM");
621 return ERROR_FAIL;
622 }
623
624 if (target->state != TARGET_HALTED)
625 {
626 command_print(CMD_CTX, "error: target must be halted for register accesses");
627 return ERROR_FAIL;
628 }
629
630 if (!is_arm_mode(armv4_5->core_mode))
631 return ERROR_FAIL;
632
633 if (!armv4_5->full_context) {
634 command_print(CMD_CTX, "error: target doesn't support %s",
635 CMD_NAME);
636 return ERROR_FAIL;
637 }
638
639 num_regs = armv4_5->core_cache->num_regs;
640 regs = armv4_5->core_cache->reg_list;
641
642 for (unsigned mode = 0; mode < ARRAY_SIZE(arm_mode_data); mode++) {
643 const char *name;
644 char *sep = "\n";
645 char *shadow = "";
646
647 /* label this bank of registers (or shadows) */
648 switch (arm_mode_data[mode].psr) {
649 case ARM_MODE_SYS:
650 continue;
651 case ARM_MODE_USR:
652 name = "System and User";
653 sep = "";
654 break;
655 case ARM_MODE_MON:
656 if (armv4_5->core_type != ARM_MODE_MON)
657 continue;
658 /* FALLTHROUGH */
659 default:
660 name = arm_mode_data[mode].name;
661 shadow = "shadow ";
662 break;
663 }
664 command_print(CMD_CTX, "%s%s mode %sregisters",
665 sep, name, shadow);
666
667 /* display N rows of up to 4 registers each */
668 for (unsigned i = 0; i < arm_mode_data[mode].n_indices;) {
669 char output[80];
670 int output_len = 0;
671
672 for (unsigned j = 0; j < 4; j++, i++) {
673 uint32_t value;
674 struct reg *reg = regs;
675
676 if (i >= arm_mode_data[mode].n_indices)
677 break;
678
679 reg += arm_mode_data[mode].indices[i];
680
681 /* REVISIT be smarter about faults... */
682 if (!reg->valid)
683 armv4_5->full_context(target);
684
685 value = buf_get_u32(reg->value, 0, 32);
686 output_len += snprintf(output + output_len,
687 sizeof(output) - output_len,
688 "%8s: %8.8" PRIx32 " ",
689 reg->name, value);
690 }
691 command_print(CMD_CTX, "%s", output);
692 }
693 }
694
695 return ERROR_OK;
696 }
697
698 COMMAND_HANDLER(handle_armv4_5_core_state_command)
699 {
700 struct target *target = get_current_target(CMD_CTX);
701 struct arm *armv4_5 = target_to_arm(target);
702
703 if (!is_arm(armv4_5))
704 {
705 command_print(CMD_CTX, "current target isn't an ARM");
706 return ERROR_FAIL;
707 }
708
709 if (CMD_ARGC > 0)
710 {
711 if (strcmp(CMD_ARGV[0], "arm") == 0)
712 {
713 armv4_5->core_state = ARM_STATE_ARM;
714 }
715 if (strcmp(CMD_ARGV[0], "thumb") == 0)
716 {
717 armv4_5->core_state = ARM_STATE_THUMB;
718 }
719 }
720
721 command_print(CMD_CTX, "core state: %s", arm_state_strings[armv4_5->core_state]);
722
723 return ERROR_OK;
724 }
725
726 COMMAND_HANDLER(handle_armv4_5_disassemble_command)
727 {
728 int retval = ERROR_OK;
729 struct target *target = get_current_target(CMD_CTX);
730 struct arm *arm = target ? target_to_arm(target) : NULL;
731 uint32_t address;
732 int count = 1;
733 int thumb = 0;
734
735 if (!is_arm(arm)) {
736 command_print(CMD_CTX, "current target isn't an ARM");
737 return ERROR_FAIL;
738 }
739
740 switch (CMD_ARGC) {
741 case 3:
742 if (strcmp(CMD_ARGV[2], "thumb") != 0)
743 goto usage;
744 thumb = 1;
745 /* FALL THROUGH */
746 case 2:
747 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], count);
748 /* FALL THROUGH */
749 case 1:
750 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
751 if (address & 0x01) {
752 if (!thumb) {
753 command_print(CMD_CTX, "Disassemble as Thumb");
754 thumb = 1;
755 }
756 address &= ~1;
757 }
758 break;
759 default:
760 usage:
761 command_print(CMD_CTX,
762 "usage: arm disassemble <address> [<count> ['thumb']]");
763 count = 0;
764 retval = ERROR_FAIL;
765 }
766
767 while (count-- > 0) {
768 struct arm_instruction cur_instruction;
769
770 if (thumb) {
771 /* Always use Thumb2 disassembly for best handling
772 * of 32-bit BL/BLX, and to work with newer cores
773 * (some ARMv6, all ARMv7) that use Thumb2.
774 */
775 retval = thumb2_opcode(target, address,
776 &cur_instruction);
777 if (retval != ERROR_OK)
778 break;
779 } else {
780 uint32_t opcode;
781
782 retval = target_read_u32(target, address, &opcode);
783 if (retval != ERROR_OK)
784 break;
785 retval = arm_evaluate_opcode(opcode, address,
786 &cur_instruction) != ERROR_OK;
787 if (retval != ERROR_OK)
788 break;
789 }
790 command_print(CMD_CTX, "%s", cur_instruction.text);
791 address += cur_instruction.instruction_size;
792 }
793
794 return retval;
795 }
796
797 static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
798 {
799 struct command_context *context;
800 struct target *target;
801 struct arm *arm;
802 int retval;
803
804 context = Jim_GetAssocData(interp, "context");
805 if (context == NULL) {
806 LOG_ERROR("%s: no command context", __func__);
807 return JIM_ERR;
808 }
809 target = get_current_target(context);
810 if (target == NULL) {
811 LOG_ERROR("%s: no current target", __func__);
812 return JIM_ERR;
813 }
814 if (!target_was_examined(target)) {
815 LOG_ERROR("%s: not yet examined", target_name(target));
816 return JIM_ERR;
817 }
818 arm = target_to_arm(target);
819 if (!is_arm(arm)) {
820 LOG_ERROR("%s: not an ARM", target_name(target));
821 return JIM_ERR;
822 }
823
824 if ((argc < 6) || (argc > 7)) {
825 /* FIXME use the command name to verify # params... */
826 LOG_ERROR("%s: wrong number of arguments", __func__);
827 return JIM_ERR;
828 }
829
830 int cpnum;
831 uint32_t op1;
832 uint32_t op2;
833 uint32_t CRn;
834 uint32_t CRm;
835 uint32_t value;
836 long l;
837
838 /* NOTE: parameter sequence matches ARM instruction set usage:
839 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
840 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
841 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
842 */
843 retval = Jim_GetLong(interp, argv[1], &l);
844 if (retval != JIM_OK)
845 return retval;
846 if (l & ~0xf) {
847 LOG_ERROR("%s: %s %d out of range", __func__,
848 "coprocessor", (int) l);
849 return JIM_ERR;
850 }
851 cpnum = l;
852
853 retval = Jim_GetLong(interp, argv[2], &l);
854 if (retval != JIM_OK)
855 return retval;
856 if (l & ~0x7) {
857 LOG_ERROR("%s: %s %d out of range", __func__,
858 "op1", (int) l);
859 return JIM_ERR;
860 }
861 op1 = l;
862
863 retval = Jim_GetLong(interp, argv[3], &l);
864 if (retval != JIM_OK)
865 return retval;
866 if (l & ~0xf) {
867 LOG_ERROR("%s: %s %d out of range", __func__,
868 "CRn", (int) l);
869 return JIM_ERR;
870 }
871 CRn = l;
872
873 retval = Jim_GetLong(interp, argv[4], &l);
874 if (retval != JIM_OK)
875 return retval;
876 if (l & ~0xf) {
877 LOG_ERROR("%s: %s %d out of range", __func__,
878 "CRm", (int) l);
879 return JIM_ERR;
880 }
881 CRm = l;
882
883 retval = Jim_GetLong(interp, argv[5], &l);
884 if (retval != JIM_OK)
885 return retval;
886 if (l & ~0x7) {
887 LOG_ERROR("%s: %s %d out of range", __func__,
888 "op2", (int) l);
889 return JIM_ERR;
890 }
891 op2 = l;
892
893 value = 0;
894
895 /* FIXME don't assume "mrc" vs "mcr" from the number of params;
896 * that could easily be a typo! Check both...
897 *
898 * FIXME change the call syntax here ... simplest to just pass
899 * the MRC() or MCR() instruction to be executed. That will also
900 * let us support the "mrc2" and "mcr2" opcodes (toggling one bit)
901 * if that's ever needed.
902 */
903 if (argc == 7) {
904 retval = Jim_GetLong(interp, argv[6], &l);
905 if (retval != JIM_OK) {
906 return retval;
907 }
908 value = l;
909
910 /* NOTE: parameters reordered! */
911 // ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2)
912 retval = arm->mcr(target, cpnum, op1, op2, CRn, CRm, value);
913 if (retval != ERROR_OK)
914 return JIM_ERR;
915 } else {
916 /* NOTE: parameters reordered! */
917 // ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2)
918 retval = arm->mrc(target, cpnum, op1, op2, CRn, CRm, &value);
919 if (retval != ERROR_OK)
920 return JIM_ERR;
921
922 Jim_SetResult(interp, Jim_NewIntObj(interp, value));
923 }
924
925 return JIM_OK;
926 }
927
928 static const struct command_registration arm_exec_command_handlers[] = {
929 {
930 .name = "reg",
931 .handler = &handle_armv4_5_reg_command,
932 .mode = COMMAND_EXEC,
933 .help = "display ARM core registers",
934 },
935 {
936 .name = "core_state",
937 .handler = &handle_armv4_5_core_state_command,
938 .mode = COMMAND_EXEC,
939 .usage = "<arm | thumb>",
940 .help = "display/change ARM core state",
941 },
942 {
943 .name = "disassemble",
944 .handler = &handle_armv4_5_disassemble_command,
945 .mode = COMMAND_EXEC,
946 .usage = "<address> [<count> ['thumb']]",
947 .help = "disassemble instructions ",
948 },
949 {
950 .name = "mcr",
951 .mode = COMMAND_EXEC,
952 .jim_handler = &jim_mcrmrc,
953 .help = "write coprocessor register",
954 .usage = "cpnum op1 CRn op2 CRm value",
955 },
956 {
957 .name = "mrc",
958 .jim_handler = &jim_mcrmrc,
959 .help = "read coprocessor register",
960 .usage = "cpnum op1 CRn op2 CRm",
961 },
962
963 COMMAND_REGISTRATION_DONE
964 };
965 const struct command_registration arm_command_handlers[] = {
966 {
967 .name = "arm",
968 .mode = COMMAND_ANY,
969 .help = "ARM command group",
970 .chain = arm_exec_command_handlers,
971 },
972 COMMAND_REGISTRATION_DONE
973 };
974
975 int arm_get_gdb_reg_list(struct target *target,
976 struct reg **reg_list[], int *reg_list_size)
977 {
978 struct arm *armv4_5 = target_to_arm(target);
979 int i;
980
981 if (!is_arm_mode(armv4_5->core_mode))
982 return ERROR_FAIL;
983
984 *reg_list_size = 26;
985 *reg_list = malloc(sizeof(struct reg*) * (*reg_list_size));
986
987 for (i = 0; i < 16; i++)
988 (*reg_list)[i] = arm_reg_current(armv4_5, i);
989
990 for (i = 16; i < 24; i++)
991 (*reg_list)[i] = &arm_gdb_dummy_fp_reg;
992
993 (*reg_list)[24] = &arm_gdb_dummy_fps_reg;
994 (*reg_list)[25] = armv4_5->cpsr;
995
996 return ERROR_OK;
997 }
998
999 /* wait for execution to complete and check exit point */
1000 static int armv4_5_run_algorithm_completion(struct target *target, uint32_t exit_point, int timeout_ms, void *arch_info)
1001 {
1002 int retval;
1003 struct arm *armv4_5 = target_to_arm(target);
1004
1005 if ((retval = target_wait_state(target, TARGET_HALTED, timeout_ms)) != ERROR_OK)
1006 {
1007 return retval;
1008 }
1009 if (target->state != TARGET_HALTED)
1010 {
1011 if ((retval = target_halt(target)) != ERROR_OK)
1012 return retval;
1013 if ((retval = target_wait_state(target, TARGET_HALTED, 500)) != ERROR_OK)
1014 {
1015 return retval;
1016 }
1017 return ERROR_TARGET_TIMEOUT;
1018 }
1019
1020 /* fast exit: ARMv5+ code can use BKPT */
1021 if (exit_point && buf_get_u32(armv4_5->core_cache->reg_list[15].value,
1022 0, 32) != exit_point)
1023 {
1024 LOG_WARNING("target reentered debug state, but not at the desired exit point: 0x%4.4" PRIx32 "",
1025 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1026 return ERROR_TARGET_TIMEOUT;
1027 }
1028
1029 return ERROR_OK;
1030 }
1031
1032 int armv4_5_run_algorithm_inner(struct target *target,
1033 int num_mem_params, struct mem_param *mem_params,
1034 int num_reg_params, struct reg_param *reg_params,
1035 uint32_t entry_point, uint32_t exit_point,
1036 int timeout_ms, void *arch_info,
1037 int (*run_it)(struct target *target, uint32_t exit_point,
1038 int timeout_ms, void *arch_info))
1039 {
1040 struct arm *armv4_5 = target_to_arm(target);
1041 struct arm_algorithm *arm_algorithm_info = arch_info;
1042 enum arm_state core_state = armv4_5->core_state;
1043 uint32_t context[17];
1044 uint32_t cpsr;
1045 int exit_breakpoint_size = 0;
1046 int i;
1047 int retval = ERROR_OK;
1048
1049 LOG_DEBUG("Running algorithm");
1050
1051 if (arm_algorithm_info->common_magic != ARM_COMMON_MAGIC)
1052 {
1053 LOG_ERROR("current target isn't an ARMV4/5 target");
1054 return ERROR_TARGET_INVALID;
1055 }
1056
1057 if (target->state != TARGET_HALTED)
1058 {
1059 LOG_WARNING("target not halted");
1060 return ERROR_TARGET_NOT_HALTED;
1061 }
1062
1063 if (!is_arm_mode(armv4_5->core_mode))
1064 return ERROR_FAIL;
1065
1066 /* armv5 and later can terminate with BKPT instruction; less overhead */
1067 if (!exit_point && armv4_5->is_armv4)
1068 {
1069 LOG_ERROR("ARMv4 target needs HW breakpoint location");
1070 return ERROR_FAIL;
1071 }
1072
1073 /* save r0..pc, cpsr-or-spsr, and then cpsr-for-sure;
1074 * they'll be restored later.
1075 */
1076 for (i = 0; i <= 16; i++)
1077 {
1078 struct reg *r;
1079
1080 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1081 arm_algorithm_info->core_mode, i);
1082 if (!r->valid)
1083 armv4_5->read_core_reg(target, r, i,
1084 arm_algorithm_info->core_mode);
1085 context[i] = buf_get_u32(r->value, 0, 32);
1086 }
1087 cpsr = buf_get_u32(armv4_5->cpsr->value, 0, 32);
1088
1089 for (i = 0; i < num_mem_params; i++)
1090 {
1091 if ((retval = target_write_buffer(target, mem_params[i].address, mem_params[i].size, mem_params[i].value)) != ERROR_OK)
1092 {
1093 return retval;
1094 }
1095 }
1096
1097 for (i = 0; i < num_reg_params; i++)
1098 {
1099 struct reg *reg = register_get_by_name(armv4_5->core_cache, reg_params[i].reg_name, 0);
1100 if (!reg)
1101 {
1102 LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
1103 return ERROR_INVALID_ARGUMENTS;
1104 }
1105
1106 if (reg->size != reg_params[i].size)
1107 {
1108 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
1109 return ERROR_INVALID_ARGUMENTS;
1110 }
1111
1112 if ((retval = armv4_5_set_core_reg(reg, reg_params[i].value)) != ERROR_OK)
1113 {
1114 return retval;
1115 }
1116 }
1117
1118 armv4_5->core_state = arm_algorithm_info->core_state;
1119 if (armv4_5->core_state == ARM_STATE_ARM)
1120 exit_breakpoint_size = 4;
1121 else if (armv4_5->core_state == ARM_STATE_THUMB)
1122 exit_breakpoint_size = 2;
1123 else
1124 {
1125 LOG_ERROR("BUG: can't execute algorithms when not in ARM or Thumb state");
1126 return ERROR_INVALID_ARGUMENTS;
1127 }
1128
1129 if (arm_algorithm_info->core_mode != ARM_MODE_ANY)
1130 {
1131 LOG_DEBUG("setting core_mode: 0x%2.2x",
1132 arm_algorithm_info->core_mode);
1133 buf_set_u32(armv4_5->cpsr->value, 0, 5,
1134 arm_algorithm_info->core_mode);
1135 armv4_5->cpsr->dirty = 1;
1136 armv4_5->cpsr->valid = 1;
1137 }
1138
1139 /* terminate using a hardware or (ARMv5+) software breakpoint */
1140 if (exit_point && (retval = breakpoint_add(target, exit_point,
1141 exit_breakpoint_size, BKPT_HARD)) != ERROR_OK)
1142 {
1143 LOG_ERROR("can't add HW breakpoint to terminate algorithm");
1144 return ERROR_TARGET_FAILURE;
1145 }
1146
1147 if ((retval = target_resume(target, 0, entry_point, 1, 1)) != ERROR_OK)
1148 {
1149 return retval;
1150 }
1151 int retvaltemp;
1152 retval = run_it(target, exit_point, timeout_ms, arch_info);
1153
1154 if (exit_point)
1155 breakpoint_remove(target, exit_point);
1156
1157 if (retval != ERROR_OK)
1158 return retval;
1159
1160 for (i = 0; i < num_mem_params; i++)
1161 {
1162 if (mem_params[i].direction != PARAM_OUT)
1163 if ((retvaltemp = target_read_buffer(target, mem_params[i].address, mem_params[i].size, mem_params[i].value)) != ERROR_OK)
1164 {
1165 retval = retvaltemp;
1166 }
1167 }
1168
1169 for (i = 0; i < num_reg_params; i++)
1170 {
1171 if (reg_params[i].direction != PARAM_OUT)
1172 {
1173
1174 struct reg *reg = register_get_by_name(armv4_5->core_cache, reg_params[i].reg_name, 0);
1175 if (!reg)
1176 {
1177 LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
1178 retval = ERROR_INVALID_ARGUMENTS;
1179 continue;
1180 }
1181
1182 if (reg->size != reg_params[i].size)
1183 {
1184 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
1185 retval = ERROR_INVALID_ARGUMENTS;
1186 continue;
1187 }
1188
1189 buf_set_u32(reg_params[i].value, 0, 32, buf_get_u32(reg->value, 0, 32));
1190 }
1191 }
1192
1193 /* restore everything we saved before (17 or 18 registers) */
1194 for (i = 0; i <= 16; i++)
1195 {
1196 uint32_t regvalue;
1197 regvalue = buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).value, 0, 32);
1198 if (regvalue != context[i])
1199 {
1200 LOG_DEBUG("restoring register %s with value 0x%8.8" PRIx32 "", ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).name, context[i]);
1201 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).value, 0, 32, context[i]);
1202 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).valid = 1;
1203 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).dirty = 1;
1204 }
1205 }
1206
1207 arm_set_cpsr(armv4_5, cpsr);
1208 armv4_5->cpsr->dirty = 1;
1209
1210 armv4_5->core_state = core_state;
1211
1212 return retval;
1213 }
1214
1215 int armv4_5_run_algorithm(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_params, uint32_t entry_point, uint32_t exit_point, int timeout_ms, void *arch_info)
1216 {
1217 return armv4_5_run_algorithm_inner(target, num_mem_params, mem_params, num_reg_params, reg_params, entry_point, exit_point, timeout_ms, arch_info, armv4_5_run_algorithm_completion);
1218 }
1219
1220 /**
1221 * Runs ARM code in the target to calculate a CRC32 checksum.
1222 *
1223 * \todo On ARMv5+, rely on BKPT termination for reduced overhead.
1224 */
1225 int arm_checksum_memory(struct target *target,
1226 uint32_t address, uint32_t count, uint32_t *checksum)
1227 {
1228 struct working_area *crc_algorithm;
1229 struct arm_algorithm armv4_5_info;
1230 struct reg_param reg_params[2];
1231 int retval;
1232 uint32_t i;
1233
1234 static const uint32_t arm_crc_code[] = {
1235 0xE1A02000, /* mov r2, r0 */
1236 0xE3E00000, /* mov r0, #0xffffffff */
1237 0xE1A03001, /* mov r3, r1 */
1238 0xE3A04000, /* mov r4, #0 */
1239 0xEA00000B, /* b ncomp */
1240 /* nbyte: */
1241 0xE7D21004, /* ldrb r1, [r2, r4] */
1242 0xE59F7030, /* ldr r7, CRC32XOR */
1243 0xE0200C01, /* eor r0, r0, r1, asl 24 */
1244 0xE3A05000, /* mov r5, #0 */
1245 /* loop: */
1246 0xE3500000, /* cmp r0, #0 */
1247 0xE1A06080, /* mov r6, r0, asl #1 */
1248 0xE2855001, /* add r5, r5, #1 */
1249 0xE1A00006, /* mov r0, r6 */
1250 0xB0260007, /* eorlt r0, r6, r7 */
1251 0xE3550008, /* cmp r5, #8 */
1252 0x1AFFFFF8, /* bne loop */
1253 0xE2844001, /* add r4, r4, #1 */
1254 /* ncomp: */
1255 0xE1540003, /* cmp r4, r3 */
1256 0x1AFFFFF1, /* bne nbyte */
1257 /* end: */
1258 0xEAFFFFFE, /* b end */
1259 /* CRC32XOR: */
1260 0x04C11DB7 /* .word 0x04C11DB7 */
1261 };
1262
1263 retval = target_alloc_working_area(target,
1264 sizeof(arm_crc_code), &crc_algorithm);
1265 if (retval != ERROR_OK)
1266 return retval;
1267
1268 /* convert code into a buffer in target endianness */
1269 for (i = 0; i < ARRAY_SIZE(arm_crc_code); i++) {
1270 retval = target_write_u32(target,
1271 crc_algorithm->address + i * sizeof(uint32_t),
1272 arm_crc_code[i]);
1273 if (retval != ERROR_OK)
1274 return retval;
1275 }
1276
1277 armv4_5_info.common_magic = ARM_COMMON_MAGIC;
1278 armv4_5_info.core_mode = ARM_MODE_SVC;
1279 armv4_5_info.core_state = ARM_STATE_ARM;
1280
1281 init_reg_param(&reg_params[0], "r0", 32, PARAM_IN_OUT);
1282 init_reg_param(&reg_params[1], "r1", 32, PARAM_OUT);
1283
1284 buf_set_u32(reg_params[0].value, 0, 32, address);
1285 buf_set_u32(reg_params[1].value, 0, 32, count);
1286
1287 /* 20 second timeout/megabyte */
1288 int timeout = 20000 * (1 + (count / (1024 * 1024)));
1289
1290 retval = target_run_algorithm(target, 0, NULL, 2, reg_params,
1291 crc_algorithm->address,
1292 crc_algorithm->address + sizeof(arm_crc_code) - 8,
1293 timeout, &armv4_5_info);
1294 if (retval != ERROR_OK) {
1295 LOG_ERROR("error executing ARM crc algorithm");
1296 destroy_reg_param(&reg_params[0]);
1297 destroy_reg_param(&reg_params[1]);
1298 target_free_working_area(target, crc_algorithm);
1299 return retval;
1300 }
1301
1302 *checksum = buf_get_u32(reg_params[0].value, 0, 32);
1303
1304 destroy_reg_param(&reg_params[0]);
1305 destroy_reg_param(&reg_params[1]);
1306
1307 target_free_working_area(target, crc_algorithm);
1308
1309 return ERROR_OK;
1310 }
1311
1312 /**
1313 * Runs ARM code in the target to check whether a memory block holds
1314 * all ones. NOR flash which has been erased, and thus may be written,
1315 * holds all ones.
1316 *
1317 * \todo On ARMv5+, rely on BKPT termination for reduced overhead.
1318 */
1319 int arm_blank_check_memory(struct target *target,
1320 uint32_t address, uint32_t count, uint32_t *blank)
1321 {
1322 struct working_area *check_algorithm;
1323 struct reg_param reg_params[3];
1324 struct arm_algorithm armv4_5_info;
1325 int retval;
1326 uint32_t i;
1327
1328 static const uint32_t check_code[] = {
1329 /* loop: */
1330 0xe4d03001, /* ldrb r3, [r0], #1 */
1331 0xe0022003, /* and r2, r2, r3 */
1332 0xe2511001, /* subs r1, r1, #1 */
1333 0x1afffffb, /* bne loop */
1334 /* end: */
1335 0xeafffffe /* b end */
1336 };
1337
1338 /* make sure we have a working area */
1339 retval = target_alloc_working_area(target,
1340 sizeof(check_code), &check_algorithm);
1341 if (retval != ERROR_OK)
1342 return retval;
1343
1344 /* convert code into a buffer in target endianness */
1345 for (i = 0; i < ARRAY_SIZE(check_code); i++) {
1346 retval = target_write_u32(target,
1347 check_algorithm->address
1348 + i * sizeof(uint32_t),
1349 check_code[i]);
1350 if (retval != ERROR_OK)
1351 return retval;
1352 }
1353
1354 armv4_5_info.common_magic = ARM_COMMON_MAGIC;
1355 armv4_5_info.core_mode = ARM_MODE_SVC;
1356 armv4_5_info.core_state = ARM_STATE_ARM;
1357
1358 init_reg_param(&reg_params[0], "r0", 32, PARAM_OUT);
1359 buf_set_u32(reg_params[0].value, 0, 32, address);
1360
1361 init_reg_param(&reg_params[1], "r1", 32, PARAM_OUT);
1362 buf_set_u32(reg_params[1].value, 0, 32, count);
1363
1364 init_reg_param(&reg_params[2], "r2", 32, PARAM_IN_OUT);
1365 buf_set_u32(reg_params[2].value, 0, 32, 0xff);
1366
1367 retval = target_run_algorithm(target, 0, NULL, 3, reg_params,
1368 check_algorithm->address,
1369 check_algorithm->address + sizeof(check_code) - 4,
1370 10000, &armv4_5_info);
1371 if (retval != ERROR_OK) {
1372 destroy_reg_param(&reg_params[0]);
1373 destroy_reg_param(&reg_params[1]);
1374 destroy_reg_param(&reg_params[2]);
1375 target_free_working_area(target, check_algorithm);
1376 return retval;
1377 }
1378
1379 *blank = buf_get_u32(reg_params[2].value, 0, 32);
1380
1381 destroy_reg_param(&reg_params[0]);
1382 destroy_reg_param(&reg_params[1]);
1383 destroy_reg_param(&reg_params[2]);
1384
1385 target_free_working_area(target, check_algorithm);
1386
1387 return ERROR_OK;
1388 }
1389
1390 static int arm_full_context(struct target *target)
1391 {
1392 struct arm *armv4_5 = target_to_arm(target);
1393 unsigned num_regs = armv4_5->core_cache->num_regs;
1394 struct reg *reg = armv4_5->core_cache->reg_list;
1395 int retval = ERROR_OK;
1396
1397 for (; num_regs && retval == ERROR_OK; num_regs--, reg++) {
1398 if (reg->valid)
1399 continue;
1400 retval = armv4_5_get_core_reg(reg);
1401 }
1402 return retval;
1403 }
1404
1405 static int arm_default_mrc(struct target *target, int cpnum,
1406 uint32_t op1, uint32_t op2,
1407 uint32_t CRn, uint32_t CRm,
1408 uint32_t *value)
1409 {
1410 LOG_ERROR("%s doesn't implement MRC", target_type_name(target));
1411 return ERROR_FAIL;
1412 }
1413
1414 static int arm_default_mcr(struct target *target, int cpnum,
1415 uint32_t op1, uint32_t op2,
1416 uint32_t CRn, uint32_t CRm,
1417 uint32_t value)
1418 {
1419 LOG_ERROR("%s doesn't implement MCR", target_type_name(target));
1420 return ERROR_FAIL;
1421 }
1422
1423 int arm_init_arch_info(struct target *target, struct arm *armv4_5)
1424 {
1425 target->arch_info = armv4_5;
1426 armv4_5->target = target;
1427
1428 armv4_5->common_magic = ARM_COMMON_MAGIC;
1429 arm_set_cpsr(armv4_5, ARM_MODE_USR);
1430
1431 /* core_type may be overridden by subtype logic */
1432 armv4_5->core_type = ARM_MODE_ANY;
1433
1434 /* default full_context() has no core-specific optimizations */
1435 if (!armv4_5->full_context && armv4_5->read_core_reg)
1436 armv4_5->full_context = arm_full_context;
1437
1438 if (!armv4_5->mrc)
1439 armv4_5->mrc = arm_default_mrc;
1440 if (!armv4_5->mcr)
1441 armv4_5->mcr = arm_default_mcr;
1442
1443 return ERROR_OK;
1444 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)