cortex_m3: use armv7m's async algorithm implementation
[openocd.git] / src / target / armv4_5.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2008 by Spencer Oliver *
6 * spen@spen-soft.co.uk *
7 * *
8 * Copyright (C) 2008 by Oyvind Harboe *
9 * oyvind.harboe@zylin.com *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "arm.h"
31 #include "armv4_5.h"
32 #include "arm_jtag.h"
33 #include "breakpoints.h"
34 #include "arm_disassembler.h"
35 #include <helper/binarybuffer.h>
36 #include "algorithm.h"
37 #include "register.h"
38
39
40 /* offsets into armv4_5 core register cache */
41 enum {
42 // ARMV4_5_CPSR = 31,
43 ARMV4_5_SPSR_FIQ = 32,
44 ARMV4_5_SPSR_IRQ = 33,
45 ARMV4_5_SPSR_SVC = 34,
46 ARMV4_5_SPSR_ABT = 35,
47 ARMV4_5_SPSR_UND = 36,
48 ARM_SPSR_MON = 39,
49 };
50
51 static const uint8_t arm_usr_indices[17] = {
52 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, ARMV4_5_CPSR,
53 };
54
55 static const uint8_t arm_fiq_indices[8] = {
56 16, 17, 18, 19, 20, 21, 22, ARMV4_5_SPSR_FIQ,
57 };
58
59 static const uint8_t arm_irq_indices[3] = {
60 23, 24, ARMV4_5_SPSR_IRQ,
61 };
62
63 static const uint8_t arm_svc_indices[3] = {
64 25, 26, ARMV4_5_SPSR_SVC,
65 };
66
67 static const uint8_t arm_abt_indices[3] = {
68 27, 28, ARMV4_5_SPSR_ABT,
69 };
70
71 static const uint8_t arm_und_indices[3] = {
72 29, 30, ARMV4_5_SPSR_UND,
73 };
74
75 static const uint8_t arm_mon_indices[3] = {
76 37, 38, ARM_SPSR_MON,
77 };
78
79 static const struct {
80 const char *name;
81 unsigned short psr;
82 /* For user and system modes, these list indices for all registers.
83 * otherwise they're just indices for the shadow registers and SPSR.
84 */
85 unsigned short n_indices;
86 const uint8_t *indices;
87 } arm_mode_data[] = {
88 /* Seven modes are standard from ARM7 on. "System" and "User" share
89 * the same registers; other modes shadow from 3 to 8 registers.
90 */
91 {
92 .name = "User",
93 .psr = ARM_MODE_USR,
94 .n_indices = ARRAY_SIZE(arm_usr_indices),
95 .indices = arm_usr_indices,
96 },
97 {
98 .name = "FIQ",
99 .psr = ARM_MODE_FIQ,
100 .n_indices = ARRAY_SIZE(arm_fiq_indices),
101 .indices = arm_fiq_indices,
102 },
103 {
104 .name = "Supervisor",
105 .psr = ARM_MODE_SVC,
106 .n_indices = ARRAY_SIZE(arm_svc_indices),
107 .indices = arm_svc_indices,
108 },
109 {
110 .name = "Abort",
111 .psr = ARM_MODE_ABT,
112 .n_indices = ARRAY_SIZE(arm_abt_indices),
113 .indices = arm_abt_indices,
114 },
115 {
116 .name = "IRQ",
117 .psr = ARM_MODE_IRQ,
118 .n_indices = ARRAY_SIZE(arm_irq_indices),
119 .indices = arm_irq_indices,
120 },
121 {
122 .name = "Undefined instruction",
123 .psr = ARM_MODE_UND,
124 .n_indices = ARRAY_SIZE(arm_und_indices),
125 .indices = arm_und_indices,
126 },
127 {
128 .name = "System",
129 .psr = ARM_MODE_SYS,
130 .n_indices = ARRAY_SIZE(arm_usr_indices),
131 .indices = arm_usr_indices,
132 },
133 /* TrustZone "Security Extensions" add a secure monitor mode.
134 * This is distinct from a "debug monitor" which can support
135 * non-halting debug, in conjunction with some debuggers.
136 */
137 {
138 .name = "Secure Monitor",
139 .psr = ARM_MODE_MON,
140 .n_indices = ARRAY_SIZE(arm_mon_indices),
141 .indices = arm_mon_indices,
142 },
143 };
144
145 /** Map PSR mode bits to the name of an ARM processor operating mode. */
146 const char *arm_mode_name(unsigned psr_mode)
147 {
148 for (unsigned i = 0; i < ARRAY_SIZE(arm_mode_data); i++) {
149 if (arm_mode_data[i].psr == psr_mode)
150 return arm_mode_data[i].name;
151 }
152 LOG_ERROR("unrecognized psr mode: %#02x", psr_mode);
153 return "UNRECOGNIZED";
154 }
155
156 /** Return true iff the parameter denotes a valid ARM processor mode. */
157 bool is_arm_mode(unsigned psr_mode)
158 {
159 for (unsigned i = 0; i < ARRAY_SIZE(arm_mode_data); i++) {
160 if (arm_mode_data[i].psr == psr_mode)
161 return true;
162 }
163 return false;
164 }
165
166 /** Map PSR mode bits to linear number indexing armv4_5_core_reg_map */
167 int arm_mode_to_number(enum arm_mode mode)
168 {
169 switch (mode) {
170 case ARM_MODE_ANY:
171 /* map MODE_ANY to user mode */
172 case ARM_MODE_USR:
173 return 0;
174 case ARM_MODE_FIQ:
175 return 1;
176 case ARM_MODE_IRQ:
177 return 2;
178 case ARM_MODE_SVC:
179 return 3;
180 case ARM_MODE_ABT:
181 return 4;
182 case ARM_MODE_UND:
183 return 5;
184 case ARM_MODE_SYS:
185 return 6;
186 case ARM_MODE_MON:
187 return 7;
188 default:
189 LOG_ERROR("invalid mode value encountered %d", mode);
190 return -1;
191 }
192 }
193
194 /** Map linear number indexing armv4_5_core_reg_map to PSR mode bits. */
195 enum arm_mode armv4_5_number_to_mode(int number)
196 {
197 switch (number) {
198 case 0:
199 return ARM_MODE_USR;
200 case 1:
201 return ARM_MODE_FIQ;
202 case 2:
203 return ARM_MODE_IRQ;
204 case 3:
205 return ARM_MODE_SVC;
206 case 4:
207 return ARM_MODE_ABT;
208 case 5:
209 return ARM_MODE_UND;
210 case 6:
211 return ARM_MODE_SYS;
212 case 7:
213 return ARM_MODE_MON;
214 default:
215 LOG_ERROR("mode index out of bounds %d", number);
216 return ARM_MODE_ANY;
217 }
218 }
219
220 static const char *arm_state_strings[] =
221 {
222 "ARM", "Thumb", "Jazelle", "ThumbEE",
223 };
224
225 /* Templates for ARM core registers.
226 *
227 * NOTE: offsets in this table are coupled to the arm_mode_data
228 * table above, the armv4_5_core_reg_map array below, and also to
229 * the ARMV4_5_CPSR symbol (which should vanish after ARM11 updates).
230 */
231 static const struct {
232 /* The name is used for e.g. the "regs" command. */
233 const char *name;
234
235 /* The {cookie, mode} tuple uniquely identifies one register.
236 * In a given mode, cookies 0..15 map to registers R0..R15,
237 * with R13..R15 usually called SP, LR, PC.
238 *
239 * MODE_ANY is used as *input* to the mapping, and indicates
240 * various special cases (sigh) and errors.
241 *
242 * Cookie 16 is (currently) confusing, since it indicates
243 * CPSR -or- SPSR depending on whether 'mode' is MODE_ANY.
244 * (Exception modes have both CPSR and SPSR registers ...)
245 */
246 unsigned cookie;
247 enum arm_mode mode;
248 } arm_core_regs[] = {
249 /* IMPORTANT: we guarantee that the first eight cached registers
250 * correspond to r0..r7, and the fifteenth to PC, so that callers
251 * don't need to map them.
252 */
253 { .name = "r0", .cookie = 0, .mode = ARM_MODE_ANY, },
254 { .name = "r1", .cookie = 1, .mode = ARM_MODE_ANY, },
255 { .name = "r2", .cookie = 2, .mode = ARM_MODE_ANY, },
256 { .name = "r3", .cookie = 3, .mode = ARM_MODE_ANY, },
257 { .name = "r4", .cookie = 4, .mode = ARM_MODE_ANY, },
258 { .name = "r5", .cookie = 5, .mode = ARM_MODE_ANY, },
259 { .name = "r6", .cookie = 6, .mode = ARM_MODE_ANY, },
260 { .name = "r7", .cookie = 7, .mode = ARM_MODE_ANY, },
261
262 /* NOTE: regs 8..12 might be shadowed by FIQ ... flagging
263 * them as MODE_ANY creates special cases. (ANY means
264 * "not mapped" elsewhere; here it's "everything but FIQ".)
265 */
266 { .name = "r8", .cookie = 8, .mode = ARM_MODE_ANY, },
267 { .name = "r9", .cookie = 9, .mode = ARM_MODE_ANY, },
268 { .name = "r10", .cookie = 10, .mode = ARM_MODE_ANY, },
269 { .name = "r11", .cookie = 11, .mode = ARM_MODE_ANY, },
270 { .name = "r12", .cookie = 12, .mode = ARM_MODE_ANY, },
271
272 /* NOTE all MODE_USR registers are equivalent to MODE_SYS ones */
273 { .name = "sp_usr", .cookie = 13, .mode = ARM_MODE_USR, },
274 { .name = "lr_usr", .cookie = 14, .mode = ARM_MODE_USR, },
275
276 /* guaranteed to be at index 15 */
277 { .name = "pc", .cookie = 15, .mode = ARM_MODE_ANY, },
278
279 { .name = "r8_fiq", .cookie = 8, .mode = ARM_MODE_FIQ, },
280 { .name = "r9_fiq", .cookie = 9, .mode = ARM_MODE_FIQ, },
281 { .name = "r10_fiq", .cookie = 10, .mode = ARM_MODE_FIQ, },
282 { .name = "r11_fiq", .cookie = 11, .mode = ARM_MODE_FIQ, },
283 { .name = "r12_fiq", .cookie = 12, .mode = ARM_MODE_FIQ, },
284
285 { .name = "sp_fiq", .cookie = 13, .mode = ARM_MODE_FIQ, },
286 { .name = "lr_fiq", .cookie = 14, .mode = ARM_MODE_FIQ, },
287
288 { .name = "sp_irq", .cookie = 13, .mode = ARM_MODE_IRQ, },
289 { .name = "lr_irq", .cookie = 14, .mode = ARM_MODE_IRQ, },
290
291 { .name = "sp_svc", .cookie = 13, .mode = ARM_MODE_SVC, },
292 { .name = "lr_svc", .cookie = 14, .mode = ARM_MODE_SVC, },
293
294 { .name = "sp_abt", .cookie = 13, .mode = ARM_MODE_ABT, },
295 { .name = "lr_abt", .cookie = 14, .mode = ARM_MODE_ABT, },
296
297 { .name = "sp_und", .cookie = 13, .mode = ARM_MODE_UND, },
298 { .name = "lr_und", .cookie = 14, .mode = ARM_MODE_UND, },
299
300 { .name = "cpsr", .cookie = 16, .mode = ARM_MODE_ANY, },
301 { .name = "spsr_fiq", .cookie = 16, .mode = ARM_MODE_FIQ, },
302 { .name = "spsr_irq", .cookie = 16, .mode = ARM_MODE_IRQ, },
303 { .name = "spsr_svc", .cookie = 16, .mode = ARM_MODE_SVC, },
304 { .name = "spsr_abt", .cookie = 16, .mode = ARM_MODE_ABT, },
305 { .name = "spsr_und", .cookie = 16, .mode = ARM_MODE_UND, },
306
307 { .name = "sp_mon", .cookie = 13, .mode = ARM_MODE_MON, },
308 { .name = "lr_mon", .cookie = 14, .mode = ARM_MODE_MON, },
309 { .name = "spsr_mon", .cookie = 16, .mode = ARM_MODE_MON, },
310 };
311
312 /* map core mode (USR, FIQ, ...) and register number to
313 * indices into the register cache
314 */
315 const int armv4_5_core_reg_map[8][17] =
316 {
317 { /* USR */
318 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
319 },
320 { /* FIQ (8 shadows of USR, vs normal 3) */
321 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 15, 32
322 },
323 { /* IRQ */
324 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 23, 24, 15, 33
325 },
326 { /* SVC */
327 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 25, 26, 15, 34
328 },
329 { /* ABT */
330 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 27, 28, 15, 35
331 },
332 { /* UND */
333 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 29, 30, 15, 36
334 },
335 { /* SYS (same registers as USR) */
336 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
337 },
338 { /* MON */
339 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 37, 38, 15, 39,
340 }
341 };
342
343 /**
344 * Configures host-side ARM records to reflect the specified CPSR.
345 * Later, code can use arm_reg_current() to map register numbers
346 * according to how they are exposed by this mode.
347 */
348 void arm_set_cpsr(struct arm *arm, uint32_t cpsr)
349 {
350 enum arm_mode mode = cpsr & 0x1f;
351 int num;
352
353 /* NOTE: this may be called very early, before the register
354 * cache is set up. We can't defend against many errors, in
355 * particular against CPSRs that aren't valid *here* ...
356 */
357 if (arm->cpsr) {
358 buf_set_u32(arm->cpsr->value, 0, 32, cpsr);
359 arm->cpsr->valid = 1;
360 arm->cpsr->dirty = 0;
361 }
362
363 arm->core_mode = mode;
364
365 /* mode_to_number() warned; set up a somewhat-sane mapping */
366 num = arm_mode_to_number(mode);
367 if (num < 0) {
368 mode = ARM_MODE_USR;
369 num = 0;
370 }
371
372 arm->map = &armv4_5_core_reg_map[num][0];
373 arm->spsr = (mode == ARM_MODE_USR || mode == ARM_MODE_SYS)
374 ? NULL
375 : arm->core_cache->reg_list + arm->map[16];
376
377 /* Older ARMs won't have the J bit */
378 enum arm_state state;
379
380 if (cpsr & (1 << 5)) { /* T */
381 if (cpsr & (1 << 24)) { /* J */
382 LOG_WARNING("ThumbEE -- incomplete support");
383 state = ARM_STATE_THUMB_EE;
384 } else
385 state = ARM_STATE_THUMB;
386 } else {
387 if (cpsr & (1 << 24)) { /* J */
388 LOG_ERROR("Jazelle state handling is BROKEN!");
389 state = ARM_STATE_JAZELLE;
390 } else
391 state = ARM_STATE_ARM;
392 }
393 arm->core_state = state;
394
395 LOG_DEBUG("set CPSR %#8.8x: %s mode, %s state", (unsigned) cpsr,
396 arm_mode_name(mode),
397 arm_state_strings[arm->core_state]);
398 }
399
400 /**
401 * Returns handle to the register currently mapped to a given number.
402 * Someone must have called arm_set_cpsr() before.
403 *
404 * \param arm This core's state and registers are used.
405 * \param regnum From 0..15 corresponding to R0..R14 and PC.
406 * Note that R0..R7 don't require mapping; you may access those
407 * as the first eight entries in the register cache. Likewise
408 * R15 (PC) doesn't need mapping; you may also access it directly.
409 * However, R8..R14, and SPSR (arm->spsr) *must* be mapped.
410 * CPSR (arm->cpsr) is also not mapped.
411 */
412 struct reg *arm_reg_current(struct arm *arm, unsigned regnum)
413 {
414 struct reg *r;
415
416 if (regnum > 16)
417 return NULL;
418
419 r = arm->core_cache->reg_list + arm->map[regnum];
420
421 /* e.g. invalid CPSR said "secure monitor" mode on a core
422 * that doesn't support it...
423 */
424 if (!r) {
425 LOG_ERROR("Invalid CPSR mode");
426 r = arm->core_cache->reg_list + regnum;
427 }
428
429 return r;
430 }
431
432 static const uint8_t arm_gdb_dummy_fp_value[12];
433
434 /**
435 * Dummy FPA registers are required to support GDB on ARM.
436 * Register packets require eight obsolete FPA register values.
437 * Modern ARM cores use Vector Floating Point (VFP), if they
438 * have any floating point support. VFP is not FPA-compatible.
439 */
440 struct reg arm_gdb_dummy_fp_reg =
441 {
442 .name = "GDB dummy FPA register",
443 .value = (uint8_t *) arm_gdb_dummy_fp_value,
444 .valid = 1,
445 .size = 96,
446 };
447
448 static const uint8_t arm_gdb_dummy_fps_value[4];
449
450 /**
451 * Dummy FPA status registers are required to support GDB on ARM.
452 * Register packets require an obsolete FPA status register.
453 */
454 struct reg arm_gdb_dummy_fps_reg =
455 {
456 .name = "GDB dummy FPA status register",
457 .value = (uint8_t *) arm_gdb_dummy_fps_value,
458 .valid = 1,
459 .size = 32,
460 };
461
462 static void arm_gdb_dummy_init(void) __attribute__ ((constructor));
463
464 static void arm_gdb_dummy_init(void)
465 {
466 register_init_dummy(&arm_gdb_dummy_fp_reg);
467 register_init_dummy(&arm_gdb_dummy_fps_reg);
468 }
469
470 static int armv4_5_get_core_reg(struct reg *reg)
471 {
472 int retval;
473 struct arm_reg *armv4_5 = reg->arch_info;
474 struct target *target = armv4_5->target;
475
476 if (target->state != TARGET_HALTED)
477 {
478 LOG_ERROR("Target not halted");
479 return ERROR_TARGET_NOT_HALTED;
480 }
481
482 retval = armv4_5->armv4_5_common->read_core_reg(target, reg, armv4_5->num, armv4_5->mode);
483 if (retval == ERROR_OK) {
484 reg->valid = 1;
485 reg->dirty = 0;
486 }
487
488 return retval;
489 }
490
491 static int armv4_5_set_core_reg(struct reg *reg, uint8_t *buf)
492 {
493 struct arm_reg *armv4_5 = reg->arch_info;
494 struct target *target = armv4_5->target;
495 struct arm *armv4_5_target = target_to_arm(target);
496 uint32_t value = buf_get_u32(buf, 0, 32);
497
498 if (target->state != TARGET_HALTED)
499 {
500 LOG_ERROR("Target not halted");
501 return ERROR_TARGET_NOT_HALTED;
502 }
503
504 /* Except for CPSR, the "reg" command exposes a writeback model
505 * for the register cache.
506 */
507 if (reg == armv4_5_target->cpsr) {
508 arm_set_cpsr(armv4_5_target, value);
509
510 /* Older cores need help to be in ARM mode during halt
511 * mode debug, so we clear the J and T bits if we flush.
512 * For newer cores (v6/v7a/v7r) we don't need that, but
513 * it won't hurt since CPSR is always flushed anyway.
514 */
515 if (armv4_5_target->core_mode !=
516 (enum arm_mode)(value & 0x1f)) {
517 LOG_DEBUG("changing ARM core mode to '%s'",
518 arm_mode_name(value & 0x1f));
519 value &= ~((1 << 24) | (1 << 5));
520 armv4_5_target->write_core_reg(target, reg,
521 16, ARM_MODE_ANY, value);
522 }
523 } else {
524 buf_set_u32(reg->value, 0, 32, value);
525 reg->valid = 1;
526 }
527 reg->dirty = 1;
528
529 return ERROR_OK;
530 }
531
532 static const struct reg_arch_type arm_reg_type = {
533 .get = armv4_5_get_core_reg,
534 .set = armv4_5_set_core_reg,
535 };
536
537 struct reg_cache *arm_build_reg_cache(struct target *target, struct arm *arm)
538 {
539 int num_regs = ARRAY_SIZE(arm_core_regs);
540 struct reg_cache *cache = malloc(sizeof(struct reg_cache));
541 struct reg *reg_list = calloc(num_regs, sizeof(struct reg));
542 struct arm_reg *arch_info = calloc(num_regs, sizeof(struct arm_reg));
543 int i;
544
545 if (!cache || !reg_list || !arch_info) {
546 free(cache);
547 free(reg_list);
548 free(arch_info);
549 return NULL;
550 }
551
552 cache->name = "ARM registers";
553 cache->next = NULL;
554 cache->reg_list = reg_list;
555 cache->num_regs = 0;
556
557 for (i = 0; i < num_regs; i++)
558 {
559 /* Skip registers this core doesn't expose */
560 if (arm_core_regs[i].mode == ARM_MODE_MON
561 && arm->core_type != ARM_MODE_MON)
562 continue;
563
564 /* REVISIT handle Cortex-M, which only shadows R13/SP */
565
566 arch_info[i].num = arm_core_regs[i].cookie;
567 arch_info[i].mode = arm_core_regs[i].mode;
568 arch_info[i].target = target;
569 arch_info[i].armv4_5_common = arm;
570
571 reg_list[i].name = (char *) arm_core_regs[i].name;
572 reg_list[i].size = 32;
573 reg_list[i].value = &arch_info[i].value;
574 reg_list[i].type = &arm_reg_type;
575 reg_list[i].arch_info = &arch_info[i];
576
577 cache->num_regs++;
578 }
579
580 arm->pc = reg_list + 15;
581 arm->cpsr = reg_list + ARMV4_5_CPSR;
582 arm->core_cache = cache;
583 return cache;
584 }
585
586 int arm_arch_state(struct target *target)
587 {
588 struct arm *armv4_5 = target_to_arm(target);
589
590 if (armv4_5->common_magic != ARM_COMMON_MAGIC)
591 {
592 LOG_ERROR("BUG: called for a non-ARM target");
593 return ERROR_FAIL;
594 }
595
596 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
597 "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "%s",
598 arm_state_strings[armv4_5->core_state],
599 debug_reason_name(target),
600 arm_mode_name(armv4_5->core_mode),
601 buf_get_u32(armv4_5->cpsr->value, 0, 32),
602 buf_get_u32(armv4_5->pc->value, 0, 32),
603 armv4_5->is_semihosting ? ", semihosting" : "");
604
605 return ERROR_OK;
606 }
607
608 #define ARMV4_5_CORE_REG_MODENUM(cache, mode, num) \
609 cache->reg_list[armv4_5_core_reg_map[mode][num]]
610
611 COMMAND_HANDLER(handle_armv4_5_reg_command)
612 {
613 struct target *target = get_current_target(CMD_CTX);
614 struct arm *armv4_5 = target_to_arm(target);
615 struct reg *regs;
616
617 if (!is_arm(armv4_5))
618 {
619 command_print(CMD_CTX, "current target isn't an ARM");
620 return ERROR_FAIL;
621 }
622
623 if (target->state != TARGET_HALTED)
624 {
625 command_print(CMD_CTX, "error: target must be halted for register accesses");
626 return ERROR_FAIL;
627 }
628
629 if (armv4_5->core_type != ARM_MODE_ANY)
630 {
631 command_print(CMD_CTX, "Microcontroller Profile not supported - use standard reg cmd");
632 return ERROR_OK;
633 }
634
635 if (!is_arm_mode(armv4_5->core_mode))
636 {
637 LOG_ERROR("not a valid arm core mode - communication failure?");
638 return ERROR_FAIL;
639 }
640
641 if (!armv4_5->full_context) {
642 command_print(CMD_CTX, "error: target doesn't support %s",
643 CMD_NAME);
644 return ERROR_FAIL;
645 }
646
647 regs = armv4_5->core_cache->reg_list;
648
649 for (unsigned mode = 0; mode < ARRAY_SIZE(arm_mode_data); mode++) {
650 const char *name;
651 char *sep = "\n";
652 char *shadow = "";
653
654 /* label this bank of registers (or shadows) */
655 switch (arm_mode_data[mode].psr) {
656 case ARM_MODE_SYS:
657 continue;
658 case ARM_MODE_USR:
659 name = "System and User";
660 sep = "";
661 break;
662 case ARM_MODE_MON:
663 if (armv4_5->core_type != ARM_MODE_MON)
664 continue;
665 /* FALLTHROUGH */
666 default:
667 name = arm_mode_data[mode].name;
668 shadow = "shadow ";
669 break;
670 }
671 command_print(CMD_CTX, "%s%s mode %sregisters",
672 sep, name, shadow);
673
674 /* display N rows of up to 4 registers each */
675 for (unsigned i = 0; i < arm_mode_data[mode].n_indices;) {
676 char output[80];
677 int output_len = 0;
678
679 for (unsigned j = 0; j < 4; j++, i++) {
680 uint32_t value;
681 struct reg *reg = regs;
682
683 if (i >= arm_mode_data[mode].n_indices)
684 break;
685
686 reg += arm_mode_data[mode].indices[i];
687
688 /* REVISIT be smarter about faults... */
689 if (!reg->valid)
690 armv4_5->full_context(target);
691
692 value = buf_get_u32(reg->value, 0, 32);
693 output_len += snprintf(output + output_len,
694 sizeof(output) - output_len,
695 "%8s: %8.8" PRIx32 " ",
696 reg->name, value);
697 }
698 command_print(CMD_CTX, "%s", output);
699 }
700 }
701
702 return ERROR_OK;
703 }
704
705 COMMAND_HANDLER(handle_armv4_5_core_state_command)
706 {
707 struct target *target = get_current_target(CMD_CTX);
708 struct arm *armv4_5 = target_to_arm(target);
709
710 if (!is_arm(armv4_5))
711 {
712 command_print(CMD_CTX, "current target isn't an ARM");
713 return ERROR_FAIL;
714 }
715
716 if (armv4_5->core_type == ARM_MODE_THREAD)
717 {
718 /* armv7m not supported */
719 command_print(CMD_CTX, "Unsupported Command");
720 return ERROR_OK;
721 }
722
723 if (CMD_ARGC > 0)
724 {
725 if (strcmp(CMD_ARGV[0], "arm") == 0)
726 {
727 armv4_5->core_state = ARM_STATE_ARM;
728 }
729 if (strcmp(CMD_ARGV[0], "thumb") == 0)
730 {
731 armv4_5->core_state = ARM_STATE_THUMB;
732 }
733 }
734
735 command_print(CMD_CTX, "core state: %s", arm_state_strings[armv4_5->core_state]);
736
737 return ERROR_OK;
738 }
739
740 COMMAND_HANDLER(handle_arm_disassemble_command)
741 {
742 int retval = ERROR_OK;
743 struct target *target = get_current_target(CMD_CTX);
744 struct arm *arm = target ? target_to_arm(target) : NULL;
745 uint32_t address;
746 int count = 1;
747 int thumb = 0;
748
749 if (!is_arm(arm)) {
750 command_print(CMD_CTX, "current target isn't an ARM");
751 return ERROR_FAIL;
752 }
753
754 if (arm->core_type == ARM_MODE_THREAD)
755 {
756 /* armv7m is always thumb mode */
757 thumb = 1;
758 }
759
760 switch (CMD_ARGC) {
761 case 3:
762 if (strcmp(CMD_ARGV[2], "thumb") != 0)
763 goto usage;
764 thumb = 1;
765 /* FALL THROUGH */
766 case 2:
767 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], count);
768 /* FALL THROUGH */
769 case 1:
770 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
771 if (address & 0x01) {
772 if (!thumb) {
773 command_print(CMD_CTX, "Disassemble as Thumb");
774 thumb = 1;
775 }
776 address &= ~1;
777 }
778 break;
779 default:
780 usage:
781 command_print(CMD_CTX,
782 "usage: arm disassemble <address> [<count> ['thumb']]");
783 count = 0;
784 retval = ERROR_FAIL;
785 }
786
787 while (count-- > 0) {
788 struct arm_instruction cur_instruction;
789
790 if (thumb) {
791 /* Always use Thumb2 disassembly for best handling
792 * of 32-bit BL/BLX, and to work with newer cores
793 * (some ARMv6, all ARMv7) that use Thumb2.
794 */
795 retval = thumb2_opcode(target, address,
796 &cur_instruction);
797 if (retval != ERROR_OK)
798 break;
799 } else {
800 uint32_t opcode;
801
802 retval = target_read_u32(target, address, &opcode);
803 if (retval != ERROR_OK)
804 break;
805 retval = arm_evaluate_opcode(opcode, address,
806 &cur_instruction) != ERROR_OK;
807 if (retval != ERROR_OK)
808 break;
809 }
810 command_print(CMD_CTX, "%s", cur_instruction.text);
811 address += cur_instruction.instruction_size;
812 }
813
814 return retval;
815 }
816
817 static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
818 {
819 struct command_context *context;
820 struct target *target;
821 struct arm *arm;
822 int retval;
823
824 context = current_command_context(interp);
825 assert( context != NULL);
826
827 target = get_current_target(context);
828 if (target == NULL) {
829 LOG_ERROR("%s: no current target", __func__);
830 return JIM_ERR;
831 }
832 if (!target_was_examined(target)) {
833 LOG_ERROR("%s: not yet examined", target_name(target));
834 return JIM_ERR;
835 }
836 arm = target_to_arm(target);
837 if (!is_arm(arm)) {
838 LOG_ERROR("%s: not an ARM", target_name(target));
839 return JIM_ERR;
840 }
841
842 if ((argc < 6) || (argc > 7)) {
843 /* FIXME use the command name to verify # params... */
844 LOG_ERROR("%s: wrong number of arguments", __func__);
845 return JIM_ERR;
846 }
847
848 int cpnum;
849 uint32_t op1;
850 uint32_t op2;
851 uint32_t CRn;
852 uint32_t CRm;
853 uint32_t value;
854 long l;
855
856 /* NOTE: parameter sequence matches ARM instruction set usage:
857 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
858 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
859 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
860 */
861 retval = Jim_GetLong(interp, argv[1], &l);
862 if (retval != JIM_OK)
863 return retval;
864 if (l & ~0xf) {
865 LOG_ERROR("%s: %s %d out of range", __func__,
866 "coprocessor", (int) l);
867 return JIM_ERR;
868 }
869 cpnum = l;
870
871 retval = Jim_GetLong(interp, argv[2], &l);
872 if (retval != JIM_OK)
873 return retval;
874 if (l & ~0x7) {
875 LOG_ERROR("%s: %s %d out of range", __func__,
876 "op1", (int) l);
877 return JIM_ERR;
878 }
879 op1 = l;
880
881 retval = Jim_GetLong(interp, argv[3], &l);
882 if (retval != JIM_OK)
883 return retval;
884 if (l & ~0xf) {
885 LOG_ERROR("%s: %s %d out of range", __func__,
886 "CRn", (int) l);
887 return JIM_ERR;
888 }
889 CRn = l;
890
891 retval = Jim_GetLong(interp, argv[4], &l);
892 if (retval != JIM_OK)
893 return retval;
894 if (l & ~0xf) {
895 LOG_ERROR("%s: %s %d out of range", __func__,
896 "CRm", (int) l);
897 return JIM_ERR;
898 }
899 CRm = l;
900
901 retval = Jim_GetLong(interp, argv[5], &l);
902 if (retval != JIM_OK)
903 return retval;
904 if (l & ~0x7) {
905 LOG_ERROR("%s: %s %d out of range", __func__,
906 "op2", (int) l);
907 return JIM_ERR;
908 }
909 op2 = l;
910
911 value = 0;
912
913 /* FIXME don't assume "mrc" vs "mcr" from the number of params;
914 * that could easily be a typo! Check both...
915 *
916 * FIXME change the call syntax here ... simplest to just pass
917 * the MRC() or MCR() instruction to be executed. That will also
918 * let us support the "mrc2" and "mcr2" opcodes (toggling one bit)
919 * if that's ever needed.
920 */
921 if (argc == 7) {
922 retval = Jim_GetLong(interp, argv[6], &l);
923 if (retval != JIM_OK) {
924 return retval;
925 }
926 value = l;
927
928 /* NOTE: parameters reordered! */
929 // ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2)
930 retval = arm->mcr(target, cpnum, op1, op2, CRn, CRm, value);
931 if (retval != ERROR_OK)
932 return JIM_ERR;
933 } else {
934 /* NOTE: parameters reordered! */
935 // ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2)
936 retval = arm->mrc(target, cpnum, op1, op2, CRn, CRm, &value);
937 if (retval != ERROR_OK)
938 return JIM_ERR;
939
940 Jim_SetResult(interp, Jim_NewIntObj(interp, value));
941 }
942
943 return JIM_OK;
944 }
945
946 COMMAND_HANDLER(handle_arm_semihosting_command)
947 {
948 struct target *target = get_current_target(CMD_CTX);
949 struct arm *arm = target ? target_to_arm(target) : NULL;
950
951 if (!is_arm(arm)) {
952 command_print(CMD_CTX, "current target isn't an ARM");
953 return ERROR_FAIL;
954 }
955
956 if (!arm->setup_semihosting)
957 {
958 command_print(CMD_CTX, "semihosting not supported for current target");
959 }
960
961 if (CMD_ARGC > 0)
962 {
963 int semihosting;
964
965 COMMAND_PARSE_ENABLE(CMD_ARGV[0], semihosting);
966
967 if (!target_was_examined(target))
968 {
969 LOG_ERROR("Target not examined yet");
970 return ERROR_FAIL;
971 }
972
973 if (arm->setup_semihosting(target, semihosting) != ERROR_OK) {
974 LOG_ERROR("Failed to Configure semihosting");
975 return ERROR_FAIL;
976 }
977
978 /* FIXME never let that "catch" be dropped! */
979 arm->is_semihosting = semihosting;
980 }
981
982 command_print(CMD_CTX, "semihosting is %s",
983 arm->is_semihosting
984 ? "enabled" : "disabled");
985
986 return ERROR_OK;
987 }
988
989 static const struct command_registration arm_exec_command_handlers[] = {
990 {
991 .name = "reg",
992 .handler = handle_armv4_5_reg_command,
993 .mode = COMMAND_EXEC,
994 .help = "display ARM core registers",
995 },
996 {
997 .name = "core_state",
998 .handler = handle_armv4_5_core_state_command,
999 .mode = COMMAND_EXEC,
1000 .usage = "['arm'|'thumb']",
1001 .help = "display/change ARM core state",
1002 },
1003 {
1004 .name = "disassemble",
1005 .handler = handle_arm_disassemble_command,
1006 .mode = COMMAND_EXEC,
1007 .usage = "address [count ['thumb']]",
1008 .help = "disassemble instructions ",
1009 },
1010 {
1011 .name = "mcr",
1012 .mode = COMMAND_EXEC,
1013 .jim_handler = &jim_mcrmrc,
1014 .help = "write coprocessor register",
1015 .usage = "cpnum op1 CRn op2 CRm value",
1016 },
1017 {
1018 .name = "mrc",
1019 .jim_handler = &jim_mcrmrc,
1020 .help = "read coprocessor register",
1021 .usage = "cpnum op1 CRn op2 CRm",
1022 },
1023 {
1024 "semihosting",
1025 .handler = handle_arm_semihosting_command,
1026 .mode = COMMAND_EXEC,
1027 .usage = "['enable'|'disable']",
1028 .help = "activate support for semihosting operations",
1029 },
1030
1031 COMMAND_REGISTRATION_DONE
1032 };
1033 const struct command_registration arm_command_handlers[] = {
1034 {
1035 .name = "arm",
1036 .mode = COMMAND_ANY,
1037 .help = "ARM command group",
1038 .chain = arm_exec_command_handlers,
1039 },
1040 COMMAND_REGISTRATION_DONE
1041 };
1042
1043 int arm_get_gdb_reg_list(struct target *target,
1044 struct reg **reg_list[], int *reg_list_size)
1045 {
1046 struct arm *armv4_5 = target_to_arm(target);
1047 int i;
1048
1049 if (!is_arm_mode(armv4_5->core_mode))
1050 {
1051 LOG_ERROR("not a valid arm core mode - communication failure?");
1052 return ERROR_FAIL;
1053 }
1054
1055 *reg_list_size = 26;
1056 *reg_list = malloc(sizeof(struct reg*) * (*reg_list_size));
1057
1058 for (i = 0; i < 16; i++)
1059 (*reg_list)[i] = arm_reg_current(armv4_5, i);
1060
1061 for (i = 16; i < 24; i++)
1062 (*reg_list)[i] = &arm_gdb_dummy_fp_reg;
1063
1064 (*reg_list)[24] = &arm_gdb_dummy_fps_reg;
1065 (*reg_list)[25] = armv4_5->cpsr;
1066
1067 return ERROR_OK;
1068 }
1069
1070 /* wait for execution to complete and check exit point */
1071 static int armv4_5_run_algorithm_completion(struct target *target, uint32_t exit_point, int timeout_ms, void *arch_info)
1072 {
1073 int retval;
1074 struct arm *armv4_5 = target_to_arm(target);
1075
1076 if ((retval = target_wait_state(target, TARGET_HALTED, timeout_ms)) != ERROR_OK)
1077 {
1078 return retval;
1079 }
1080 if (target->state != TARGET_HALTED)
1081 {
1082 if ((retval = target_halt(target)) != ERROR_OK)
1083 return retval;
1084 if ((retval = target_wait_state(target, TARGET_HALTED, 500)) != ERROR_OK)
1085 {
1086 return retval;
1087 }
1088 return ERROR_TARGET_TIMEOUT;
1089 }
1090
1091 /* fast exit: ARMv5+ code can use BKPT */
1092 if (exit_point && buf_get_u32(armv4_5->pc->value, 0, 32) != exit_point)
1093 {
1094 LOG_WARNING("target reentered debug state, but not at the desired exit point: 0x%4.4" PRIx32 "",
1095 buf_get_u32(armv4_5->pc->value, 0, 32));
1096 return ERROR_TARGET_TIMEOUT;
1097 }
1098
1099 return ERROR_OK;
1100 }
1101
1102 int armv4_5_run_algorithm_inner(struct target *target,
1103 int num_mem_params, struct mem_param *mem_params,
1104 int num_reg_params, struct reg_param *reg_params,
1105 uint32_t entry_point, uint32_t exit_point,
1106 int timeout_ms, void *arch_info,
1107 int (*run_it)(struct target *target, uint32_t exit_point,
1108 int timeout_ms, void *arch_info))
1109 {
1110 struct arm *armv4_5 = target_to_arm(target);
1111 struct arm_algorithm *arm_algorithm_info = arch_info;
1112 enum arm_state core_state = armv4_5->core_state;
1113 uint32_t context[17];
1114 uint32_t cpsr;
1115 int exit_breakpoint_size = 0;
1116 int i;
1117 int retval = ERROR_OK;
1118
1119 LOG_DEBUG("Running algorithm");
1120
1121 if (arm_algorithm_info->common_magic != ARM_COMMON_MAGIC)
1122 {
1123 LOG_ERROR("current target isn't an ARMV4/5 target");
1124 return ERROR_TARGET_INVALID;
1125 }
1126
1127 if (target->state != TARGET_HALTED)
1128 {
1129 LOG_WARNING("target not halted");
1130 return ERROR_TARGET_NOT_HALTED;
1131 }
1132
1133 if (!is_arm_mode(armv4_5->core_mode))
1134 {
1135 LOG_ERROR("not a valid arm core mode - communication failure?");
1136 return ERROR_FAIL;
1137 }
1138
1139 /* armv5 and later can terminate with BKPT instruction; less overhead */
1140 if (!exit_point && armv4_5->is_armv4)
1141 {
1142 LOG_ERROR("ARMv4 target needs HW breakpoint location");
1143 return ERROR_FAIL;
1144 }
1145
1146 /* save r0..pc, cpsr-or-spsr, and then cpsr-for-sure;
1147 * they'll be restored later.
1148 */
1149 for (i = 0; i <= 16; i++)
1150 {
1151 struct reg *r;
1152
1153 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1154 arm_algorithm_info->core_mode, i);
1155 if (!r->valid)
1156 armv4_5->read_core_reg(target, r, i,
1157 arm_algorithm_info->core_mode);
1158 context[i] = buf_get_u32(r->value, 0, 32);
1159 }
1160 cpsr = buf_get_u32(armv4_5->cpsr->value, 0, 32);
1161
1162 for (i = 0; i < num_mem_params; i++)
1163 {
1164 if ((retval = target_write_buffer(target, mem_params[i].address, mem_params[i].size, mem_params[i].value)) != ERROR_OK)
1165 {
1166 return retval;
1167 }
1168 }
1169
1170 for (i = 0; i < num_reg_params; i++)
1171 {
1172 struct reg *reg = register_get_by_name(armv4_5->core_cache, reg_params[i].reg_name, 0);
1173 if (!reg)
1174 {
1175 LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
1176 return ERROR_INVALID_ARGUMENTS;
1177 }
1178
1179 if (reg->size != reg_params[i].size)
1180 {
1181 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
1182 return ERROR_INVALID_ARGUMENTS;
1183 }
1184
1185 if ((retval = armv4_5_set_core_reg(reg, reg_params[i].value)) != ERROR_OK)
1186 {
1187 return retval;
1188 }
1189 }
1190
1191 armv4_5->core_state = arm_algorithm_info->core_state;
1192 if (armv4_5->core_state == ARM_STATE_ARM)
1193 exit_breakpoint_size = 4;
1194 else if (armv4_5->core_state == ARM_STATE_THUMB)
1195 exit_breakpoint_size = 2;
1196 else
1197 {
1198 LOG_ERROR("BUG: can't execute algorithms when not in ARM or Thumb state");
1199 return ERROR_INVALID_ARGUMENTS;
1200 }
1201
1202 if (arm_algorithm_info->core_mode != ARM_MODE_ANY)
1203 {
1204 LOG_DEBUG("setting core_mode: 0x%2.2x",
1205 arm_algorithm_info->core_mode);
1206 buf_set_u32(armv4_5->cpsr->value, 0, 5,
1207 arm_algorithm_info->core_mode);
1208 armv4_5->cpsr->dirty = 1;
1209 armv4_5->cpsr->valid = 1;
1210 }
1211
1212 /* terminate using a hardware or (ARMv5+) software breakpoint */
1213 if (exit_point && (retval = breakpoint_add(target, exit_point,
1214 exit_breakpoint_size, BKPT_HARD)) != ERROR_OK)
1215 {
1216 LOG_ERROR("can't add HW breakpoint to terminate algorithm");
1217 return ERROR_TARGET_FAILURE;
1218 }
1219
1220 if ((retval = target_resume(target, 0, entry_point, 1, 1)) != ERROR_OK)
1221 {
1222 return retval;
1223 }
1224 int retvaltemp;
1225 retval = run_it(target, exit_point, timeout_ms, arch_info);
1226
1227 if (exit_point)
1228 breakpoint_remove(target, exit_point);
1229
1230 if (retval != ERROR_OK)
1231 return retval;
1232
1233 for (i = 0; i < num_mem_params; i++)
1234 {
1235 if (mem_params[i].direction != PARAM_OUT)
1236 if ((retvaltemp = target_read_buffer(target, mem_params[i].address, mem_params[i].size, mem_params[i].value)) != ERROR_OK)
1237 {
1238 retval = retvaltemp;
1239 }
1240 }
1241
1242 for (i = 0; i < num_reg_params; i++)
1243 {
1244 if (reg_params[i].direction != PARAM_OUT)
1245 {
1246
1247 struct reg *reg = register_get_by_name(armv4_5->core_cache, reg_params[i].reg_name, 0);
1248 if (!reg)
1249 {
1250 LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
1251 retval = ERROR_INVALID_ARGUMENTS;
1252 continue;
1253 }
1254
1255 if (reg->size != reg_params[i].size)
1256 {
1257 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
1258 retval = ERROR_INVALID_ARGUMENTS;
1259 continue;
1260 }
1261
1262 buf_set_u32(reg_params[i].value, 0, 32, buf_get_u32(reg->value, 0, 32));
1263 }
1264 }
1265
1266 /* restore everything we saved before (17 or 18 registers) */
1267 for (i = 0; i <= 16; i++)
1268 {
1269 uint32_t regvalue;
1270 regvalue = buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).value, 0, 32);
1271 if (regvalue != context[i])
1272 {
1273 LOG_DEBUG("restoring register %s with value 0x%8.8" PRIx32 "", ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).name, context[i]);
1274 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).value, 0, 32, context[i]);
1275 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).valid = 1;
1276 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).dirty = 1;
1277 }
1278 }
1279
1280 arm_set_cpsr(armv4_5, cpsr);
1281 armv4_5->cpsr->dirty = 1;
1282
1283 armv4_5->core_state = core_state;
1284
1285 return retval;
1286 }
1287
1288 int armv4_5_run_algorithm(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_params, uint32_t entry_point, uint32_t exit_point, int timeout_ms, void *arch_info)
1289 {
1290 return armv4_5_run_algorithm_inner(target, num_mem_params, mem_params, num_reg_params, reg_params, entry_point, exit_point, timeout_ms, arch_info, armv4_5_run_algorithm_completion);
1291 }
1292
1293 /**
1294 * Runs ARM code in the target to calculate a CRC32 checksum.
1295 *
1296 */
1297 int arm_checksum_memory(struct target *target,
1298 uint32_t address, uint32_t count, uint32_t *checksum)
1299 {
1300 struct working_area *crc_algorithm;
1301 struct arm_algorithm armv4_5_info;
1302 struct arm *armv4_5 = target_to_arm(target);
1303 struct reg_param reg_params[2];
1304 int retval;
1305 uint32_t i;
1306 uint32_t exit_var = 0;
1307
1308 /* see contib/loaders/checksum/armv4_5_crc.s for src */
1309
1310 static const uint32_t arm_crc_code[] = {
1311 0xE1A02000, /* mov r2, r0 */
1312 0xE3E00000, /* mov r0, #0xffffffff */
1313 0xE1A03001, /* mov r3, r1 */
1314 0xE3A04000, /* mov r4, #0 */
1315 0xEA00000B, /* b ncomp */
1316 /* nbyte: */
1317 0xE7D21004, /* ldrb r1, [r2, r4] */
1318 0xE59F7030, /* ldr r7, CRC32XOR */
1319 0xE0200C01, /* eor r0, r0, r1, asl 24 */
1320 0xE3A05000, /* mov r5, #0 */
1321 /* loop: */
1322 0xE3500000, /* cmp r0, #0 */
1323 0xE1A06080, /* mov r6, r0, asl #1 */
1324 0xE2855001, /* add r5, r5, #1 */
1325 0xE1A00006, /* mov r0, r6 */
1326 0xB0260007, /* eorlt r0, r6, r7 */
1327 0xE3550008, /* cmp r5, #8 */
1328 0x1AFFFFF8, /* bne loop */
1329 0xE2844001, /* add r4, r4, #1 */
1330 /* ncomp: */
1331 0xE1540003, /* cmp r4, r3 */
1332 0x1AFFFFF1, /* bne nbyte */
1333 /* end: */
1334 0xe1200070, /* bkpt #0 */
1335 /* CRC32XOR: */
1336 0x04C11DB7 /* .word 0x04C11DB7 */
1337 };
1338
1339 retval = target_alloc_working_area(target,
1340 sizeof(arm_crc_code), &crc_algorithm);
1341 if (retval != ERROR_OK)
1342 return retval;
1343
1344 /* convert code into a buffer in target endianness */
1345 for (i = 0; i < ARRAY_SIZE(arm_crc_code); i++) {
1346 retval = target_write_u32(target,
1347 crc_algorithm->address + i * sizeof(uint32_t),
1348 arm_crc_code[i]);
1349 if (retval != ERROR_OK)
1350 return retval;
1351 }
1352
1353 armv4_5_info.common_magic = ARM_COMMON_MAGIC;
1354 armv4_5_info.core_mode = ARM_MODE_SVC;
1355 armv4_5_info.core_state = ARM_STATE_ARM;
1356
1357 init_reg_param(&reg_params[0], "r0", 32, PARAM_IN_OUT);
1358 init_reg_param(&reg_params[1], "r1", 32, PARAM_OUT);
1359
1360 buf_set_u32(reg_params[0].value, 0, 32, address);
1361 buf_set_u32(reg_params[1].value, 0, 32, count);
1362
1363 /* 20 second timeout/megabyte */
1364 int timeout = 20000 * (1 + (count / (1024 * 1024)));
1365
1366 /* armv4 must exit using a hardware breakpoint */
1367 if (armv4_5->is_armv4)
1368 exit_var = crc_algorithm->address + sizeof(arm_crc_code) - 8;
1369
1370 retval = target_run_algorithm(target, 0, NULL, 2, reg_params,
1371 crc_algorithm->address,
1372 exit_var,
1373 timeout, &armv4_5_info);
1374 if (retval != ERROR_OK) {
1375 LOG_ERROR("error executing ARM crc algorithm");
1376 destroy_reg_param(&reg_params[0]);
1377 destroy_reg_param(&reg_params[1]);
1378 target_free_working_area(target, crc_algorithm);
1379 return retval;
1380 }
1381
1382 *checksum = buf_get_u32(reg_params[0].value, 0, 32);
1383
1384 destroy_reg_param(&reg_params[0]);
1385 destroy_reg_param(&reg_params[1]);
1386
1387 target_free_working_area(target, crc_algorithm);
1388
1389 return ERROR_OK;
1390 }
1391
1392 /**
1393 * Runs ARM code in the target to check whether a memory block holds
1394 * all ones. NOR flash which has been erased, and thus may be written,
1395 * holds all ones.
1396 *
1397 */
1398 int arm_blank_check_memory(struct target *target,
1399 uint32_t address, uint32_t count, uint32_t *blank)
1400 {
1401 struct working_area *check_algorithm;
1402 struct reg_param reg_params[3];
1403 struct arm_algorithm armv4_5_info;
1404 struct arm *armv4_5 = target_to_arm(target);
1405 int retval;
1406 uint32_t i;
1407 uint32_t exit_var = 0;
1408
1409 static const uint32_t check_code[] = {
1410 /* loop: */
1411 0xe4d03001, /* ldrb r3, [r0], #1 */
1412 0xe0022003, /* and r2, r2, r3 */
1413 0xe2511001, /* subs r1, r1, #1 */
1414 0x1afffffb, /* bne loop */
1415 /* end: */
1416 0xe1200070, /* bkpt #0 */
1417 };
1418
1419 /* make sure we have a working area */
1420 retval = target_alloc_working_area(target,
1421 sizeof(check_code), &check_algorithm);
1422 if (retval != ERROR_OK)
1423 return retval;
1424
1425 /* convert code into a buffer in target endianness */
1426 for (i = 0; i < ARRAY_SIZE(check_code); i++) {
1427 retval = target_write_u32(target,
1428 check_algorithm->address
1429 + i * sizeof(uint32_t),
1430 check_code[i]);
1431 if (retval != ERROR_OK)
1432 return retval;
1433 }
1434
1435 armv4_5_info.common_magic = ARM_COMMON_MAGIC;
1436 armv4_5_info.core_mode = ARM_MODE_SVC;
1437 armv4_5_info.core_state = ARM_STATE_ARM;
1438
1439 init_reg_param(&reg_params[0], "r0", 32, PARAM_OUT);
1440 buf_set_u32(reg_params[0].value, 0, 32, address);
1441
1442 init_reg_param(&reg_params[1], "r1", 32, PARAM_OUT);
1443 buf_set_u32(reg_params[1].value, 0, 32, count);
1444
1445 init_reg_param(&reg_params[2], "r2", 32, PARAM_IN_OUT);
1446 buf_set_u32(reg_params[2].value, 0, 32, 0xff);
1447
1448 /* armv4 must exit using a hardware breakpoint */
1449 if (armv4_5->is_armv4)
1450 exit_var = check_algorithm->address + sizeof(check_code) - 4;
1451
1452 retval = target_run_algorithm(target, 0, NULL, 3, reg_params,
1453 check_algorithm->address,
1454 exit_var,
1455 10000, &armv4_5_info);
1456 if (retval != ERROR_OK) {
1457 destroy_reg_param(&reg_params[0]);
1458 destroy_reg_param(&reg_params[1]);
1459 destroy_reg_param(&reg_params[2]);
1460 target_free_working_area(target, check_algorithm);
1461 return retval;
1462 }
1463
1464 *blank = buf_get_u32(reg_params[2].value, 0, 32);
1465
1466 destroy_reg_param(&reg_params[0]);
1467 destroy_reg_param(&reg_params[1]);
1468 destroy_reg_param(&reg_params[2]);
1469
1470 target_free_working_area(target, check_algorithm);
1471
1472 return ERROR_OK;
1473 }
1474
1475 static int arm_full_context(struct target *target)
1476 {
1477 struct arm *armv4_5 = target_to_arm(target);
1478 unsigned num_regs = armv4_5->core_cache->num_regs;
1479 struct reg *reg = armv4_5->core_cache->reg_list;
1480 int retval = ERROR_OK;
1481
1482 for (; num_regs && retval == ERROR_OK; num_regs--, reg++) {
1483 if (reg->valid)
1484 continue;
1485 retval = armv4_5_get_core_reg(reg);
1486 }
1487 return retval;
1488 }
1489
1490 static int arm_default_mrc(struct target *target, int cpnum,
1491 uint32_t op1, uint32_t op2,
1492 uint32_t CRn, uint32_t CRm,
1493 uint32_t *value)
1494 {
1495 LOG_ERROR("%s doesn't implement MRC", target_type_name(target));
1496 return ERROR_FAIL;
1497 }
1498
1499 static int arm_default_mcr(struct target *target, int cpnum,
1500 uint32_t op1, uint32_t op2,
1501 uint32_t CRn, uint32_t CRm,
1502 uint32_t value)
1503 {
1504 LOG_ERROR("%s doesn't implement MCR", target_type_name(target));
1505 return ERROR_FAIL;
1506 }
1507
1508 int arm_init_arch_info(struct target *target, struct arm *armv4_5)
1509 {
1510 target->arch_info = armv4_5;
1511 armv4_5->target = target;
1512
1513 armv4_5->common_magic = ARM_COMMON_MAGIC;
1514
1515 /* core_type may be overridden by subtype logic */
1516 if (armv4_5->core_type != ARM_MODE_THREAD) {
1517 armv4_5->core_type = ARM_MODE_ANY;
1518 arm_set_cpsr(armv4_5, ARM_MODE_USR);
1519 }
1520
1521 /* default full_context() has no core-specific optimizations */
1522 if (!armv4_5->full_context && armv4_5->read_core_reg)
1523 armv4_5->full_context = arm_full_context;
1524
1525 if (!armv4_5->mrc)
1526 armv4_5->mrc = arm_default_mrc;
1527 if (!armv4_5->mcr)
1528 armv4_5->mcr = arm_default_mcr;
1529
1530 return ERROR_OK;
1531 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)