semihosting: move semihosting cmd to arm cmd group
[openocd.git] / src / target / armv4_5.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2008 by Spencer Oliver *
6 * spen@spen-soft.co.uk *
7 * *
8 * Copyright (C) 2008 by Oyvind Harboe *
9 * oyvind.harboe@zylin.com *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "arm.h"
31 #include "armv4_5.h"
32 #include "arm_jtag.h"
33 #include "breakpoints.h"
34 #include "arm_disassembler.h"
35 #include <helper/binarybuffer.h>
36 #include "algorithm.h"
37 #include "register.h"
38
39
40 /* offsets into armv4_5 core register cache */
41 enum {
42 // ARMV4_5_CPSR = 31,
43 ARMV4_5_SPSR_FIQ = 32,
44 ARMV4_5_SPSR_IRQ = 33,
45 ARMV4_5_SPSR_SVC = 34,
46 ARMV4_5_SPSR_ABT = 35,
47 ARMV4_5_SPSR_UND = 36,
48 ARM_SPSR_MON = 39,
49 };
50
51 static const uint8_t arm_usr_indices[17] = {
52 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, ARMV4_5_CPSR,
53 };
54
55 static const uint8_t arm_fiq_indices[8] = {
56 16, 17, 18, 19, 20, 21, 22, ARMV4_5_SPSR_FIQ,
57 };
58
59 static const uint8_t arm_irq_indices[3] = {
60 23, 24, ARMV4_5_SPSR_IRQ,
61 };
62
63 static const uint8_t arm_svc_indices[3] = {
64 25, 26, ARMV4_5_SPSR_SVC,
65 };
66
67 static const uint8_t arm_abt_indices[3] = {
68 27, 28, ARMV4_5_SPSR_ABT,
69 };
70
71 static const uint8_t arm_und_indices[3] = {
72 29, 30, ARMV4_5_SPSR_UND,
73 };
74
75 static const uint8_t arm_mon_indices[3] = {
76 37, 38, ARM_SPSR_MON,
77 };
78
79 static const struct {
80 const char *name;
81 unsigned short psr;
82 /* For user and system modes, these list indices for all registers.
83 * otherwise they're just indices for the shadow registers and SPSR.
84 */
85 unsigned short n_indices;
86 const uint8_t *indices;
87 } arm_mode_data[] = {
88 /* Seven modes are standard from ARM7 on. "System" and "User" share
89 * the same registers; other modes shadow from 3 to 8 registers.
90 */
91 {
92 .name = "User",
93 .psr = ARM_MODE_USR,
94 .n_indices = ARRAY_SIZE(arm_usr_indices),
95 .indices = arm_usr_indices,
96 },
97 {
98 .name = "FIQ",
99 .psr = ARM_MODE_FIQ,
100 .n_indices = ARRAY_SIZE(arm_fiq_indices),
101 .indices = arm_fiq_indices,
102 },
103 {
104 .name = "Supervisor",
105 .psr = ARM_MODE_SVC,
106 .n_indices = ARRAY_SIZE(arm_svc_indices),
107 .indices = arm_svc_indices,
108 },
109 {
110 .name = "Abort",
111 .psr = ARM_MODE_ABT,
112 .n_indices = ARRAY_SIZE(arm_abt_indices),
113 .indices = arm_abt_indices,
114 },
115 {
116 .name = "IRQ",
117 .psr = ARM_MODE_IRQ,
118 .n_indices = ARRAY_SIZE(arm_irq_indices),
119 .indices = arm_irq_indices,
120 },
121 {
122 .name = "Undefined instruction",
123 .psr = ARM_MODE_UND,
124 .n_indices = ARRAY_SIZE(arm_und_indices),
125 .indices = arm_und_indices,
126 },
127 {
128 .name = "System",
129 .psr = ARM_MODE_SYS,
130 .n_indices = ARRAY_SIZE(arm_usr_indices),
131 .indices = arm_usr_indices,
132 },
133 /* TrustZone "Security Extensions" add a secure monitor mode.
134 * This is distinct from a "debug monitor" which can support
135 * non-halting debug, in conjunction with some debuggers.
136 */
137 {
138 .name = "Secure Monitor",
139 .psr = ARM_MODE_MON,
140 .n_indices = ARRAY_SIZE(arm_mon_indices),
141 .indices = arm_mon_indices,
142 },
143 };
144
145 /** Map PSR mode bits to the name of an ARM processor operating mode. */
146 const char *arm_mode_name(unsigned psr_mode)
147 {
148 for (unsigned i = 0; i < ARRAY_SIZE(arm_mode_data); i++) {
149 if (arm_mode_data[i].psr == psr_mode)
150 return arm_mode_data[i].name;
151 }
152 LOG_ERROR("unrecognized psr mode: %#02x", psr_mode);
153 return "UNRECOGNIZED";
154 }
155
156 /** Return true iff the parameter denotes a valid ARM processor mode. */
157 bool is_arm_mode(unsigned psr_mode)
158 {
159 for (unsigned i = 0; i < ARRAY_SIZE(arm_mode_data); i++) {
160 if (arm_mode_data[i].psr == psr_mode)
161 return true;
162 }
163 return false;
164 }
165
166 /** Map PSR mode bits to linear number indexing armv4_5_core_reg_map */
167 int arm_mode_to_number(enum arm_mode mode)
168 {
169 switch (mode) {
170 case ARM_MODE_ANY:
171 /* map MODE_ANY to user mode */
172 case ARM_MODE_USR:
173 return 0;
174 case ARM_MODE_FIQ:
175 return 1;
176 case ARM_MODE_IRQ:
177 return 2;
178 case ARM_MODE_SVC:
179 return 3;
180 case ARM_MODE_ABT:
181 return 4;
182 case ARM_MODE_UND:
183 return 5;
184 case ARM_MODE_SYS:
185 return 6;
186 case ARM_MODE_MON:
187 return 7;
188 default:
189 LOG_ERROR("invalid mode value encountered %d", mode);
190 return -1;
191 }
192 }
193
194 /** Map linear number indexing armv4_5_core_reg_map to PSR mode bits. */
195 enum arm_mode armv4_5_number_to_mode(int number)
196 {
197 switch (number) {
198 case 0:
199 return ARM_MODE_USR;
200 case 1:
201 return ARM_MODE_FIQ;
202 case 2:
203 return ARM_MODE_IRQ;
204 case 3:
205 return ARM_MODE_SVC;
206 case 4:
207 return ARM_MODE_ABT;
208 case 5:
209 return ARM_MODE_UND;
210 case 6:
211 return ARM_MODE_SYS;
212 case 7:
213 return ARM_MODE_MON;
214 default:
215 LOG_ERROR("mode index out of bounds %d", number);
216 return ARM_MODE_ANY;
217 }
218 }
219
220 const char *arm_state_strings[] =
221 {
222 "ARM", "Thumb", "Jazelle", "ThumbEE",
223 };
224
225 /* Templates for ARM core registers.
226 *
227 * NOTE: offsets in this table are coupled to the arm_mode_data
228 * table above, the armv4_5_core_reg_map array below, and also to
229 * the ARMV4_5_CPSR symbol (which should vanish after ARM11 updates).
230 */
231 static const struct {
232 /* The name is used for e.g. the "regs" command. */
233 const char *name;
234
235 /* The {cookie, mode} tuple uniquely identifies one register.
236 * In a given mode, cookies 0..15 map to registers R0..R15,
237 * with R13..R15 usually called SP, LR, PC.
238 *
239 * MODE_ANY is used as *input* to the mapping, and indicates
240 * various special cases (sigh) and errors.
241 *
242 * Cookie 16 is (currently) confusing, since it indicates
243 * CPSR -or- SPSR depending on whether 'mode' is MODE_ANY.
244 * (Exception modes have both CPSR and SPSR registers ...)
245 */
246 unsigned cookie;
247 enum arm_mode mode;
248 } arm_core_regs[] = {
249 /* IMPORTANT: we guarantee that the first eight cached registers
250 * correspond to r0..r7, and the fifteenth to PC, so that callers
251 * don't need to map them.
252 */
253 { .name = "r0", .cookie = 0, .mode = ARM_MODE_ANY, },
254 { .name = "r1", .cookie = 1, .mode = ARM_MODE_ANY, },
255 { .name = "r2", .cookie = 2, .mode = ARM_MODE_ANY, },
256 { .name = "r3", .cookie = 3, .mode = ARM_MODE_ANY, },
257 { .name = "r4", .cookie = 4, .mode = ARM_MODE_ANY, },
258 { .name = "r5", .cookie = 5, .mode = ARM_MODE_ANY, },
259 { .name = "r6", .cookie = 6, .mode = ARM_MODE_ANY, },
260 { .name = "r7", .cookie = 7, .mode = ARM_MODE_ANY, },
261
262 /* NOTE: regs 8..12 might be shadowed by FIQ ... flagging
263 * them as MODE_ANY creates special cases. (ANY means
264 * "not mapped" elsewhere; here it's "everything but FIQ".)
265 */
266 { .name = "r8", .cookie = 8, .mode = ARM_MODE_ANY, },
267 { .name = "r9", .cookie = 9, .mode = ARM_MODE_ANY, },
268 { .name = "r10", .cookie = 10, .mode = ARM_MODE_ANY, },
269 { .name = "r11", .cookie = 11, .mode = ARM_MODE_ANY, },
270 { .name = "r12", .cookie = 12, .mode = ARM_MODE_ANY, },
271
272 /* NOTE all MODE_USR registers are equivalent to MODE_SYS ones */
273 { .name = "sp_usr", .cookie = 13, .mode = ARM_MODE_USR, },
274 { .name = "lr_usr", .cookie = 14, .mode = ARM_MODE_USR, },
275
276 /* guaranteed to be at index 15 */
277 { .name = "pc", .cookie = 15, .mode = ARM_MODE_ANY, },
278
279 { .name = "r8_fiq", .cookie = 8, .mode = ARM_MODE_FIQ, },
280 { .name = "r9_fiq", .cookie = 9, .mode = ARM_MODE_FIQ, },
281 { .name = "r10_fiq", .cookie = 10, .mode = ARM_MODE_FIQ, },
282 { .name = "r11_fiq", .cookie = 11, .mode = ARM_MODE_FIQ, },
283 { .name = "r12_fiq", .cookie = 12, .mode = ARM_MODE_FIQ, },
284
285 { .name = "sp_fiq", .cookie = 13, .mode = ARM_MODE_FIQ, },
286 { .name = "lr_fiq", .cookie = 14, .mode = ARM_MODE_FIQ, },
287
288 { .name = "sp_irq", .cookie = 13, .mode = ARM_MODE_IRQ, },
289 { .name = "lr_irq", .cookie = 14, .mode = ARM_MODE_IRQ, },
290
291 { .name = "sp_svc", .cookie = 13, .mode = ARM_MODE_SVC, },
292 { .name = "lr_svc", .cookie = 14, .mode = ARM_MODE_SVC, },
293
294 { .name = "sp_abt", .cookie = 13, .mode = ARM_MODE_ABT, },
295 { .name = "lr_abt", .cookie = 14, .mode = ARM_MODE_ABT, },
296
297 { .name = "sp_und", .cookie = 13, .mode = ARM_MODE_UND, },
298 { .name = "lr_und", .cookie = 14, .mode = ARM_MODE_UND, },
299
300 { .name = "cpsr", .cookie = 16, .mode = ARM_MODE_ANY, },
301 { .name = "spsr_fiq", .cookie = 16, .mode = ARM_MODE_FIQ, },
302 { .name = "spsr_irq", .cookie = 16, .mode = ARM_MODE_IRQ, },
303 { .name = "spsr_svc", .cookie = 16, .mode = ARM_MODE_SVC, },
304 { .name = "spsr_abt", .cookie = 16, .mode = ARM_MODE_ABT, },
305 { .name = "spsr_und", .cookie = 16, .mode = ARM_MODE_UND, },
306
307 { .name = "sp_mon", .cookie = 13, .mode = ARM_MODE_MON, },
308 { .name = "lr_mon", .cookie = 14, .mode = ARM_MODE_MON, },
309 { .name = "spsr_mon", .cookie = 16, .mode = ARM_MODE_MON, },
310 };
311
312 /* map core mode (USR, FIQ, ...) and register number to
313 * indices into the register cache
314 */
315 const int armv4_5_core_reg_map[8][17] =
316 {
317 { /* USR */
318 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
319 },
320 { /* FIQ (8 shadows of USR, vs normal 3) */
321 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 15, 32
322 },
323 { /* IRQ */
324 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 23, 24, 15, 33
325 },
326 { /* SVC */
327 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 25, 26, 15, 34
328 },
329 { /* ABT */
330 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 27, 28, 15, 35
331 },
332 { /* UND */
333 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 29, 30, 15, 36
334 },
335 { /* SYS (same registers as USR) */
336 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
337 },
338 { /* MON */
339 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 37, 38, 15, 39,
340 }
341 };
342
343 /**
344 * Configures host-side ARM records to reflect the specified CPSR.
345 * Later, code can use arm_reg_current() to map register numbers
346 * according to how they are exposed by this mode.
347 */
348 void arm_set_cpsr(struct arm *arm, uint32_t cpsr)
349 {
350 enum arm_mode mode = cpsr & 0x1f;
351 int num;
352
353 /* NOTE: this may be called very early, before the register
354 * cache is set up. We can't defend against many errors, in
355 * particular against CPSRs that aren't valid *here* ...
356 */
357 if (arm->cpsr) {
358 buf_set_u32(arm->cpsr->value, 0, 32, cpsr);
359 arm->cpsr->valid = 1;
360 arm->cpsr->dirty = 0;
361 }
362
363 arm->core_mode = mode;
364
365 /* mode_to_number() warned; set up a somewhat-sane mapping */
366 num = arm_mode_to_number(mode);
367 if (num < 0) {
368 mode = ARM_MODE_USR;
369 num = 0;
370 }
371
372 arm->map = &armv4_5_core_reg_map[num][0];
373 arm->spsr = (mode == ARM_MODE_USR || mode == ARM_MODE_SYS)
374 ? NULL
375 : arm->core_cache->reg_list + arm->map[16];
376
377 /* Older ARMs won't have the J bit */
378 enum arm_state state;
379
380 if (cpsr & (1 << 5)) { /* T */
381 if (cpsr & (1 << 24)) { /* J */
382 LOG_WARNING("ThumbEE -- incomplete support");
383 state = ARM_STATE_THUMB_EE;
384 } else
385 state = ARM_STATE_THUMB;
386 } else {
387 if (cpsr & (1 << 24)) { /* J */
388 LOG_ERROR("Jazelle state handling is BROKEN!");
389 state = ARM_STATE_JAZELLE;
390 } else
391 state = ARM_STATE_ARM;
392 }
393 arm->core_state = state;
394
395 LOG_DEBUG("set CPSR %#8.8x: %s mode, %s state", (unsigned) cpsr,
396 arm_mode_name(mode),
397 arm_state_strings[arm->core_state]);
398 }
399
400 /**
401 * Returns handle to the register currently mapped to a given number.
402 * Someone must have called arm_set_cpsr() before.
403 *
404 * \param arm This core's state and registers are used.
405 * \param regnum From 0..15 corresponding to R0..R14 and PC.
406 * Note that R0..R7 don't require mapping; you may access those
407 * as the first eight entries in the register cache. Likewise
408 * R15 (PC) doesn't need mapping; you may also access it directly.
409 * However, R8..R14, and SPSR (arm->spsr) *must* be mapped.
410 * CPSR (arm->cpsr) is also not mapped.
411 */
412 struct reg *arm_reg_current(struct arm *arm, unsigned regnum)
413 {
414 struct reg *r;
415
416 if (regnum > 16)
417 return NULL;
418
419 r = arm->core_cache->reg_list + arm->map[regnum];
420
421 /* e.g. invalid CPSR said "secure monitor" mode on a core
422 * that doesn't support it...
423 */
424 if (!r) {
425 LOG_ERROR("Invalid CPSR mode");
426 r = arm->core_cache->reg_list + regnum;
427 }
428
429 return r;
430 }
431
432 static const uint8_t arm_gdb_dummy_fp_value[12];
433
434 /**
435 * Dummy FPA registers are required to support GDB on ARM.
436 * Register packets require eight obsolete FPA register values.
437 * Modern ARM cores use Vector Floating Point (VFP), if they
438 * have any floating point support. VFP is not FPA-compatible.
439 */
440 struct reg arm_gdb_dummy_fp_reg =
441 {
442 .name = "GDB dummy FPA register",
443 .value = (uint8_t *) arm_gdb_dummy_fp_value,
444 .valid = 1,
445 .size = 96,
446 };
447
448 static const uint8_t arm_gdb_dummy_fps_value[4];
449
450 /**
451 * Dummy FPA status registers are required to support GDB on ARM.
452 * Register packets require an obsolete FPA status register.
453 */
454 struct reg arm_gdb_dummy_fps_reg =
455 {
456 .name = "GDB dummy FPA status register",
457 .value = (uint8_t *) arm_gdb_dummy_fps_value,
458 .valid = 1,
459 .size = 32,
460 };
461
462 static void arm_gdb_dummy_init(void) __attribute__ ((constructor));
463
464 static void arm_gdb_dummy_init(void)
465 {
466 register_init_dummy(&arm_gdb_dummy_fp_reg);
467 register_init_dummy(&arm_gdb_dummy_fps_reg);
468 }
469
470 static int armv4_5_get_core_reg(struct reg *reg)
471 {
472 int retval;
473 struct arm_reg *armv4_5 = reg->arch_info;
474 struct target *target = armv4_5->target;
475
476 if (target->state != TARGET_HALTED)
477 {
478 LOG_ERROR("Target not halted");
479 return ERROR_TARGET_NOT_HALTED;
480 }
481
482 retval = armv4_5->armv4_5_common->read_core_reg(target, reg, armv4_5->num, armv4_5->mode);
483 if (retval == ERROR_OK) {
484 reg->valid = 1;
485 reg->dirty = 0;
486 }
487
488 return retval;
489 }
490
491 static int armv4_5_set_core_reg(struct reg *reg, uint8_t *buf)
492 {
493 struct arm_reg *armv4_5 = reg->arch_info;
494 struct target *target = armv4_5->target;
495 struct arm *armv4_5_target = target_to_arm(target);
496 uint32_t value = buf_get_u32(buf, 0, 32);
497
498 if (target->state != TARGET_HALTED)
499 {
500 LOG_ERROR("Target not halted");
501 return ERROR_TARGET_NOT_HALTED;
502 }
503
504 /* Except for CPSR, the "reg" command exposes a writeback model
505 * for the register cache.
506 */
507 if (reg == armv4_5_target->cpsr) {
508 arm_set_cpsr(armv4_5_target, value);
509
510 /* Older cores need help to be in ARM mode during halt
511 * mode debug, so we clear the J and T bits if we flush.
512 * For newer cores (v6/v7a/v7r) we don't need that, but
513 * it won't hurt since CPSR is always flushed anyway.
514 */
515 if (armv4_5_target->core_mode !=
516 (enum arm_mode)(value & 0x1f)) {
517 LOG_DEBUG("changing ARM core mode to '%s'",
518 arm_mode_name(value & 0x1f));
519 value &= ~((1 << 24) | (1 << 5));
520 armv4_5_target->write_core_reg(target, reg,
521 16, ARM_MODE_ANY, value);
522 }
523 } else {
524 buf_set_u32(reg->value, 0, 32, value);
525 reg->valid = 1;
526 }
527 reg->dirty = 1;
528
529 return ERROR_OK;
530 }
531
532 static const struct reg_arch_type arm_reg_type = {
533 .get = armv4_5_get_core_reg,
534 .set = armv4_5_set_core_reg,
535 };
536
537 struct reg_cache *arm_build_reg_cache(struct target *target, struct arm *arm)
538 {
539 int num_regs = ARRAY_SIZE(arm_core_regs);
540 struct reg_cache *cache = malloc(sizeof(struct reg_cache));
541 struct reg *reg_list = calloc(num_regs, sizeof(struct reg));
542 struct arm_reg *arch_info = calloc(num_regs, sizeof(struct arm_reg));
543 int i;
544
545 if (!cache || !reg_list || !arch_info) {
546 free(cache);
547 free(reg_list);
548 free(arch_info);
549 return NULL;
550 }
551
552 cache->name = "ARM registers";
553 cache->next = NULL;
554 cache->reg_list = reg_list;
555 cache->num_regs = 0;
556
557 for (i = 0; i < num_regs; i++)
558 {
559 /* Skip registers this core doesn't expose */
560 if (arm_core_regs[i].mode == ARM_MODE_MON
561 && arm->core_type != ARM_MODE_MON)
562 continue;
563
564 /* REVISIT handle Cortex-M, which only shadows R13/SP */
565
566 arch_info[i].num = arm_core_regs[i].cookie;
567 arch_info[i].mode = arm_core_regs[i].mode;
568 arch_info[i].target = target;
569 arch_info[i].armv4_5_common = arm;
570
571 reg_list[i].name = (char *) arm_core_regs[i].name;
572 reg_list[i].size = 32;
573 reg_list[i].value = &arch_info[i].value;
574 reg_list[i].type = &arm_reg_type;
575 reg_list[i].arch_info = &arch_info[i];
576
577 cache->num_regs++;
578 }
579
580 arm->pc = reg_list + 15;
581 arm->cpsr = reg_list + ARMV4_5_CPSR;
582 arm->core_cache = cache;
583 return cache;
584 }
585
586 int arm_arch_state(struct target *target)
587 {
588 struct arm *armv4_5 = target_to_arm(target);
589
590 if (armv4_5->common_magic != ARM_COMMON_MAGIC)
591 {
592 LOG_ERROR("BUG: called for a non-ARM target");
593 return ERROR_FAIL;
594 }
595
596 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
597 "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "%s",
598 arm_state_strings[armv4_5->core_state],
599 debug_reason_name(target),
600 arm_mode_name(armv4_5->core_mode),
601 buf_get_u32(armv4_5->cpsr->value, 0, 32),
602 buf_get_u32(armv4_5->pc->value, 0, 32),
603 armv4_5->is_semihosting ? ", semihosting" : "");
604
605 return ERROR_OK;
606 }
607
608 #define ARMV4_5_CORE_REG_MODENUM(cache, mode, num) \
609 cache->reg_list[armv4_5_core_reg_map[mode][num]]
610
611 COMMAND_HANDLER(handle_armv4_5_reg_command)
612 {
613 struct target *target = get_current_target(CMD_CTX);
614 struct arm *armv4_5 = target_to_arm(target);
615 unsigned num_regs;
616 struct reg *regs;
617
618 if (!is_arm(armv4_5))
619 {
620 command_print(CMD_CTX, "current target isn't an ARM");
621 return ERROR_FAIL;
622 }
623
624 if (target->state != TARGET_HALTED)
625 {
626 command_print(CMD_CTX, "error: target must be halted for register accesses");
627 return ERROR_FAIL;
628 }
629
630 if (armv4_5->core_type != ARM_MODE_ANY)
631 {
632 command_print(CMD_CTX, "Microcontroller Profile not supported - use standard reg cmd");
633 return ERROR_OK;
634 }
635
636 if (!is_arm_mode(armv4_5->core_mode))
637 return ERROR_FAIL;
638
639 if (!armv4_5->full_context) {
640 command_print(CMD_CTX, "error: target doesn't support %s",
641 CMD_NAME);
642 return ERROR_FAIL;
643 }
644
645 num_regs = armv4_5->core_cache->num_regs;
646 regs = armv4_5->core_cache->reg_list;
647
648 for (unsigned mode = 0; mode < ARRAY_SIZE(arm_mode_data); mode++) {
649 const char *name;
650 char *sep = "\n";
651 char *shadow = "";
652
653 /* label this bank of registers (or shadows) */
654 switch (arm_mode_data[mode].psr) {
655 case ARM_MODE_SYS:
656 continue;
657 case ARM_MODE_USR:
658 name = "System and User";
659 sep = "";
660 break;
661 case ARM_MODE_MON:
662 if (armv4_5->core_type != ARM_MODE_MON)
663 continue;
664 /* FALLTHROUGH */
665 default:
666 name = arm_mode_data[mode].name;
667 shadow = "shadow ";
668 break;
669 }
670 command_print(CMD_CTX, "%s%s mode %sregisters",
671 sep, name, shadow);
672
673 /* display N rows of up to 4 registers each */
674 for (unsigned i = 0; i < arm_mode_data[mode].n_indices;) {
675 char output[80];
676 int output_len = 0;
677
678 for (unsigned j = 0; j < 4; j++, i++) {
679 uint32_t value;
680 struct reg *reg = regs;
681
682 if (i >= arm_mode_data[mode].n_indices)
683 break;
684
685 reg += arm_mode_data[mode].indices[i];
686
687 /* REVISIT be smarter about faults... */
688 if (!reg->valid)
689 armv4_5->full_context(target);
690
691 value = buf_get_u32(reg->value, 0, 32);
692 output_len += snprintf(output + output_len,
693 sizeof(output) - output_len,
694 "%8s: %8.8" PRIx32 " ",
695 reg->name, value);
696 }
697 command_print(CMD_CTX, "%s", output);
698 }
699 }
700
701 return ERROR_OK;
702 }
703
704 COMMAND_HANDLER(handle_armv4_5_core_state_command)
705 {
706 struct target *target = get_current_target(CMD_CTX);
707 struct arm *armv4_5 = target_to_arm(target);
708
709 if (!is_arm(armv4_5))
710 {
711 command_print(CMD_CTX, "current target isn't an ARM");
712 return ERROR_FAIL;
713 }
714
715 if (armv4_5->core_type == ARM_MODE_THREAD)
716 {
717 /* armv7m not supported */
718 command_print(CMD_CTX, "Unsupported Command");
719 return ERROR_OK;
720 }
721
722 if (CMD_ARGC > 0)
723 {
724 if (strcmp(CMD_ARGV[0], "arm") == 0)
725 {
726 armv4_5->core_state = ARM_STATE_ARM;
727 }
728 if (strcmp(CMD_ARGV[0], "thumb") == 0)
729 {
730 armv4_5->core_state = ARM_STATE_THUMB;
731 }
732 }
733
734 command_print(CMD_CTX, "core state: %s", arm_state_strings[armv4_5->core_state]);
735
736 return ERROR_OK;
737 }
738
739 COMMAND_HANDLER(handle_arm_disassemble_command)
740 {
741 int retval = ERROR_OK;
742 struct target *target = get_current_target(CMD_CTX);
743 struct arm *arm = target ? target_to_arm(target) : NULL;
744 uint32_t address;
745 int count = 1;
746 int thumb = 0;
747
748 if (!is_arm(arm)) {
749 command_print(CMD_CTX, "current target isn't an ARM");
750 return ERROR_FAIL;
751 }
752
753 if (arm->core_type == ARM_MODE_THREAD)
754 {
755 /* armv7m is always thumb mode */
756 thumb = 1;
757 }
758
759 switch (CMD_ARGC) {
760 case 3:
761 if (strcmp(CMD_ARGV[2], "thumb") != 0)
762 goto usage;
763 thumb = 1;
764 /* FALL THROUGH */
765 case 2:
766 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], count);
767 /* FALL THROUGH */
768 case 1:
769 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
770 if (address & 0x01) {
771 if (!thumb) {
772 command_print(CMD_CTX, "Disassemble as Thumb");
773 thumb = 1;
774 }
775 address &= ~1;
776 }
777 break;
778 default:
779 usage:
780 command_print(CMD_CTX,
781 "usage: arm disassemble <address> [<count> ['thumb']]");
782 count = 0;
783 retval = ERROR_FAIL;
784 }
785
786 while (count-- > 0) {
787 struct arm_instruction cur_instruction;
788
789 if (thumb) {
790 /* Always use Thumb2 disassembly for best handling
791 * of 32-bit BL/BLX, and to work with newer cores
792 * (some ARMv6, all ARMv7) that use Thumb2.
793 */
794 retval = thumb2_opcode(target, address,
795 &cur_instruction);
796 if (retval != ERROR_OK)
797 break;
798 } else {
799 uint32_t opcode;
800
801 retval = target_read_u32(target, address, &opcode);
802 if (retval != ERROR_OK)
803 break;
804 retval = arm_evaluate_opcode(opcode, address,
805 &cur_instruction) != ERROR_OK;
806 if (retval != ERROR_OK)
807 break;
808 }
809 command_print(CMD_CTX, "%s", cur_instruction.text);
810 address += cur_instruction.instruction_size;
811 }
812
813 return retval;
814 }
815
816 static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
817 {
818 struct command_context *context;
819 struct target *target;
820 struct arm *arm;
821 int retval;
822
823 context = Jim_GetAssocData(interp, "context");
824 if (context == NULL) {
825 LOG_ERROR("%s: no command context", __func__);
826 return JIM_ERR;
827 }
828 target = get_current_target(context);
829 if (target == NULL) {
830 LOG_ERROR("%s: no current target", __func__);
831 return JIM_ERR;
832 }
833 if (!target_was_examined(target)) {
834 LOG_ERROR("%s: not yet examined", target_name(target));
835 return JIM_ERR;
836 }
837 arm = target_to_arm(target);
838 if (!is_arm(arm)) {
839 LOG_ERROR("%s: not an ARM", target_name(target));
840 return JIM_ERR;
841 }
842
843 if (arm->core_type == ARM_MODE_THREAD)
844 {
845 /* armv7m not supported */
846 LOG_ERROR("Unsupported Command");
847 return ERROR_OK;
848 }
849
850 if ((argc < 6) || (argc > 7)) {
851 /* FIXME use the command name to verify # params... */
852 LOG_ERROR("%s: wrong number of arguments", __func__);
853 return JIM_ERR;
854 }
855
856 int cpnum;
857 uint32_t op1;
858 uint32_t op2;
859 uint32_t CRn;
860 uint32_t CRm;
861 uint32_t value;
862 long l;
863
864 /* NOTE: parameter sequence matches ARM instruction set usage:
865 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
866 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
867 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
868 */
869 retval = Jim_GetLong(interp, argv[1], &l);
870 if (retval != JIM_OK)
871 return retval;
872 if (l & ~0xf) {
873 LOG_ERROR("%s: %s %d out of range", __func__,
874 "coprocessor", (int) l);
875 return JIM_ERR;
876 }
877 cpnum = l;
878
879 retval = Jim_GetLong(interp, argv[2], &l);
880 if (retval != JIM_OK)
881 return retval;
882 if (l & ~0x7) {
883 LOG_ERROR("%s: %s %d out of range", __func__,
884 "op1", (int) l);
885 return JIM_ERR;
886 }
887 op1 = l;
888
889 retval = Jim_GetLong(interp, argv[3], &l);
890 if (retval != JIM_OK)
891 return retval;
892 if (l & ~0xf) {
893 LOG_ERROR("%s: %s %d out of range", __func__,
894 "CRn", (int) l);
895 return JIM_ERR;
896 }
897 CRn = l;
898
899 retval = Jim_GetLong(interp, argv[4], &l);
900 if (retval != JIM_OK)
901 return retval;
902 if (l & ~0xf) {
903 LOG_ERROR("%s: %s %d out of range", __func__,
904 "CRm", (int) l);
905 return JIM_ERR;
906 }
907 CRm = l;
908
909 retval = Jim_GetLong(interp, argv[5], &l);
910 if (retval != JIM_OK)
911 return retval;
912 if (l & ~0x7) {
913 LOG_ERROR("%s: %s %d out of range", __func__,
914 "op2", (int) l);
915 return JIM_ERR;
916 }
917 op2 = l;
918
919 value = 0;
920
921 /* FIXME don't assume "mrc" vs "mcr" from the number of params;
922 * that could easily be a typo! Check both...
923 *
924 * FIXME change the call syntax here ... simplest to just pass
925 * the MRC() or MCR() instruction to be executed. That will also
926 * let us support the "mrc2" and "mcr2" opcodes (toggling one bit)
927 * if that's ever needed.
928 */
929 if (argc == 7) {
930 retval = Jim_GetLong(interp, argv[6], &l);
931 if (retval != JIM_OK) {
932 return retval;
933 }
934 value = l;
935
936 /* NOTE: parameters reordered! */
937 // ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2)
938 retval = arm->mcr(target, cpnum, op1, op2, CRn, CRm, value);
939 if (retval != ERROR_OK)
940 return JIM_ERR;
941 } else {
942 /* NOTE: parameters reordered! */
943 // ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2)
944 retval = arm->mrc(target, cpnum, op1, op2, CRn, CRm, &value);
945 if (retval != ERROR_OK)
946 return JIM_ERR;
947
948 Jim_SetResult(interp, Jim_NewIntObj(interp, value));
949 }
950
951 return JIM_OK;
952 }
953
954 COMMAND_HANDLER(handle_arm_semihosting_command)
955 {
956 struct target *target = get_current_target(CMD_CTX);
957 struct arm *arm = target ? target_to_arm(target) : NULL;
958
959 if (!is_arm(arm)) {
960 command_print(CMD_CTX, "current target isn't an ARM");
961 return ERROR_FAIL;
962 }
963
964 if (!arm->setup_semihosting)
965 {
966 command_print(CMD_CTX, "semihosting not supported for current target");
967 }
968
969 if (CMD_ARGC > 0)
970 {
971 int semihosting;
972
973 COMMAND_PARSE_ENABLE(CMD_ARGV[0], semihosting);
974
975 if (!target_was_examined(target))
976 {
977 LOG_ERROR("Target not examined yet");
978 return ERROR_FAIL;
979 }
980
981 if (arm->setup_semihosting(target, semihosting) != ERROR_OK) {
982 LOG_ERROR("Failed to Configure semihosting");
983 return ERROR_FAIL;
984 }
985
986 /* FIXME never let that "catch" be dropped! */
987 arm->is_semihosting = semihosting;
988 }
989
990 command_print(CMD_CTX, "semihosting is %s",
991 arm->is_semihosting
992 ? "enabled" : "disabled");
993
994 return ERROR_OK;
995 }
996
997 static const struct command_registration arm_exec_command_handlers[] = {
998 {
999 .name = "reg",
1000 .handler = handle_armv4_5_reg_command,
1001 .mode = COMMAND_EXEC,
1002 .help = "display ARM core registers",
1003 },
1004 {
1005 .name = "core_state",
1006 .handler = handle_armv4_5_core_state_command,
1007 .mode = COMMAND_EXEC,
1008 .usage = "['arm'|'thumb']",
1009 .help = "display/change ARM core state",
1010 },
1011 {
1012 .name = "disassemble",
1013 .handler = handle_arm_disassemble_command,
1014 .mode = COMMAND_EXEC,
1015 .usage = "address [count ['thumb']]",
1016 .help = "disassemble instructions ",
1017 },
1018 {
1019 .name = "mcr",
1020 .mode = COMMAND_EXEC,
1021 .jim_handler = &jim_mcrmrc,
1022 .help = "write coprocessor register",
1023 .usage = "cpnum op1 CRn op2 CRm value",
1024 },
1025 {
1026 .name = "mrc",
1027 .jim_handler = &jim_mcrmrc,
1028 .help = "read coprocessor register",
1029 .usage = "cpnum op1 CRn op2 CRm",
1030 },
1031 {
1032 "semihosting",
1033 .handler = handle_arm_semihosting_command,
1034 .mode = COMMAND_EXEC,
1035 .usage = "['enable'|'disable']",
1036 .help = "activate support for semihosting operations",
1037 },
1038
1039 COMMAND_REGISTRATION_DONE
1040 };
1041 const struct command_registration arm_command_handlers[] = {
1042 {
1043 .name = "arm",
1044 .mode = COMMAND_ANY,
1045 .help = "ARM command group",
1046 .chain = arm_exec_command_handlers,
1047 },
1048 COMMAND_REGISTRATION_DONE
1049 };
1050
1051 int arm_get_gdb_reg_list(struct target *target,
1052 struct reg **reg_list[], int *reg_list_size)
1053 {
1054 struct arm *armv4_5 = target_to_arm(target);
1055 int i;
1056
1057 if (!is_arm_mode(armv4_5->core_mode))
1058 return ERROR_FAIL;
1059
1060 *reg_list_size = 26;
1061 *reg_list = malloc(sizeof(struct reg*) * (*reg_list_size));
1062
1063 for (i = 0; i < 16; i++)
1064 (*reg_list)[i] = arm_reg_current(armv4_5, i);
1065
1066 for (i = 16; i < 24; i++)
1067 (*reg_list)[i] = &arm_gdb_dummy_fp_reg;
1068
1069 (*reg_list)[24] = &arm_gdb_dummy_fps_reg;
1070 (*reg_list)[25] = armv4_5->cpsr;
1071
1072 return ERROR_OK;
1073 }
1074
1075 /* wait for execution to complete and check exit point */
1076 static int armv4_5_run_algorithm_completion(struct target *target, uint32_t exit_point, int timeout_ms, void *arch_info)
1077 {
1078 int retval;
1079 struct arm *armv4_5 = target_to_arm(target);
1080
1081 if ((retval = target_wait_state(target, TARGET_HALTED, timeout_ms)) != ERROR_OK)
1082 {
1083 return retval;
1084 }
1085 if (target->state != TARGET_HALTED)
1086 {
1087 if ((retval = target_halt(target)) != ERROR_OK)
1088 return retval;
1089 if ((retval = target_wait_state(target, TARGET_HALTED, 500)) != ERROR_OK)
1090 {
1091 return retval;
1092 }
1093 return ERROR_TARGET_TIMEOUT;
1094 }
1095
1096 /* fast exit: ARMv5+ code can use BKPT */
1097 if (exit_point && buf_get_u32(armv4_5->pc->value, 0, 32) != exit_point)
1098 {
1099 LOG_WARNING("target reentered debug state, but not at the desired exit point: 0x%4.4" PRIx32 "",
1100 buf_get_u32(armv4_5->pc->value, 0, 32));
1101 return ERROR_TARGET_TIMEOUT;
1102 }
1103
1104 return ERROR_OK;
1105 }
1106
1107 int armv4_5_run_algorithm_inner(struct target *target,
1108 int num_mem_params, struct mem_param *mem_params,
1109 int num_reg_params, struct reg_param *reg_params,
1110 uint32_t entry_point, uint32_t exit_point,
1111 int timeout_ms, void *arch_info,
1112 int (*run_it)(struct target *target, uint32_t exit_point,
1113 int timeout_ms, void *arch_info))
1114 {
1115 struct arm *armv4_5 = target_to_arm(target);
1116 struct arm_algorithm *arm_algorithm_info = arch_info;
1117 enum arm_state core_state = armv4_5->core_state;
1118 uint32_t context[17];
1119 uint32_t cpsr;
1120 int exit_breakpoint_size = 0;
1121 int i;
1122 int retval = ERROR_OK;
1123
1124 LOG_DEBUG("Running algorithm");
1125
1126 if (arm_algorithm_info->common_magic != ARM_COMMON_MAGIC)
1127 {
1128 LOG_ERROR("current target isn't an ARMV4/5 target");
1129 return ERROR_TARGET_INVALID;
1130 }
1131
1132 if (target->state != TARGET_HALTED)
1133 {
1134 LOG_WARNING("target not halted");
1135 return ERROR_TARGET_NOT_HALTED;
1136 }
1137
1138 if (!is_arm_mode(armv4_5->core_mode))
1139 return ERROR_FAIL;
1140
1141 /* armv5 and later can terminate with BKPT instruction; less overhead */
1142 if (!exit_point && armv4_5->is_armv4)
1143 {
1144 LOG_ERROR("ARMv4 target needs HW breakpoint location");
1145 return ERROR_FAIL;
1146 }
1147
1148 /* save r0..pc, cpsr-or-spsr, and then cpsr-for-sure;
1149 * they'll be restored later.
1150 */
1151 for (i = 0; i <= 16; i++)
1152 {
1153 struct reg *r;
1154
1155 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1156 arm_algorithm_info->core_mode, i);
1157 if (!r->valid)
1158 armv4_5->read_core_reg(target, r, i,
1159 arm_algorithm_info->core_mode);
1160 context[i] = buf_get_u32(r->value, 0, 32);
1161 }
1162 cpsr = buf_get_u32(armv4_5->cpsr->value, 0, 32);
1163
1164 for (i = 0; i < num_mem_params; i++)
1165 {
1166 if ((retval = target_write_buffer(target, mem_params[i].address, mem_params[i].size, mem_params[i].value)) != ERROR_OK)
1167 {
1168 return retval;
1169 }
1170 }
1171
1172 for (i = 0; i < num_reg_params; i++)
1173 {
1174 struct reg *reg = register_get_by_name(armv4_5->core_cache, reg_params[i].reg_name, 0);
1175 if (!reg)
1176 {
1177 LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
1178 return ERROR_INVALID_ARGUMENTS;
1179 }
1180
1181 if (reg->size != reg_params[i].size)
1182 {
1183 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
1184 return ERROR_INVALID_ARGUMENTS;
1185 }
1186
1187 if ((retval = armv4_5_set_core_reg(reg, reg_params[i].value)) != ERROR_OK)
1188 {
1189 return retval;
1190 }
1191 }
1192
1193 armv4_5->core_state = arm_algorithm_info->core_state;
1194 if (armv4_5->core_state == ARM_STATE_ARM)
1195 exit_breakpoint_size = 4;
1196 else if (armv4_5->core_state == ARM_STATE_THUMB)
1197 exit_breakpoint_size = 2;
1198 else
1199 {
1200 LOG_ERROR("BUG: can't execute algorithms when not in ARM or Thumb state");
1201 return ERROR_INVALID_ARGUMENTS;
1202 }
1203
1204 if (arm_algorithm_info->core_mode != ARM_MODE_ANY)
1205 {
1206 LOG_DEBUG("setting core_mode: 0x%2.2x",
1207 arm_algorithm_info->core_mode);
1208 buf_set_u32(armv4_5->cpsr->value, 0, 5,
1209 arm_algorithm_info->core_mode);
1210 armv4_5->cpsr->dirty = 1;
1211 armv4_5->cpsr->valid = 1;
1212 }
1213
1214 /* terminate using a hardware or (ARMv5+) software breakpoint */
1215 if (exit_point && (retval = breakpoint_add(target, exit_point,
1216 exit_breakpoint_size, BKPT_HARD)) != ERROR_OK)
1217 {
1218 LOG_ERROR("can't add HW breakpoint to terminate algorithm");
1219 return ERROR_TARGET_FAILURE;
1220 }
1221
1222 if ((retval = target_resume(target, 0, entry_point, 1, 1)) != ERROR_OK)
1223 {
1224 return retval;
1225 }
1226 int retvaltemp;
1227 retval = run_it(target, exit_point, timeout_ms, arch_info);
1228
1229 if (exit_point)
1230 breakpoint_remove(target, exit_point);
1231
1232 if (retval != ERROR_OK)
1233 return retval;
1234
1235 for (i = 0; i < num_mem_params; i++)
1236 {
1237 if (mem_params[i].direction != PARAM_OUT)
1238 if ((retvaltemp = target_read_buffer(target, mem_params[i].address, mem_params[i].size, mem_params[i].value)) != ERROR_OK)
1239 {
1240 retval = retvaltemp;
1241 }
1242 }
1243
1244 for (i = 0; i < num_reg_params; i++)
1245 {
1246 if (reg_params[i].direction != PARAM_OUT)
1247 {
1248
1249 struct reg *reg = register_get_by_name(armv4_5->core_cache, reg_params[i].reg_name, 0);
1250 if (!reg)
1251 {
1252 LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
1253 retval = ERROR_INVALID_ARGUMENTS;
1254 continue;
1255 }
1256
1257 if (reg->size != reg_params[i].size)
1258 {
1259 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
1260 retval = ERROR_INVALID_ARGUMENTS;
1261 continue;
1262 }
1263
1264 buf_set_u32(reg_params[i].value, 0, 32, buf_get_u32(reg->value, 0, 32));
1265 }
1266 }
1267
1268 /* restore everything we saved before (17 or 18 registers) */
1269 for (i = 0; i <= 16; i++)
1270 {
1271 uint32_t regvalue;
1272 regvalue = buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).value, 0, 32);
1273 if (regvalue != context[i])
1274 {
1275 LOG_DEBUG("restoring register %s with value 0x%8.8" PRIx32 "", ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).name, context[i]);
1276 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).value, 0, 32, context[i]);
1277 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).valid = 1;
1278 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).dirty = 1;
1279 }
1280 }
1281
1282 arm_set_cpsr(armv4_5, cpsr);
1283 armv4_5->cpsr->dirty = 1;
1284
1285 armv4_5->core_state = core_state;
1286
1287 return retval;
1288 }
1289
1290 int armv4_5_run_algorithm(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_params, uint32_t entry_point, uint32_t exit_point, int timeout_ms, void *arch_info)
1291 {
1292 return armv4_5_run_algorithm_inner(target, num_mem_params, mem_params, num_reg_params, reg_params, entry_point, exit_point, timeout_ms, arch_info, armv4_5_run_algorithm_completion);
1293 }
1294
1295 /**
1296 * Runs ARM code in the target to calculate a CRC32 checksum.
1297 *
1298 * \todo On ARMv5+, rely on BKPT termination for reduced overhead.
1299 */
1300 int arm_checksum_memory(struct target *target,
1301 uint32_t address, uint32_t count, uint32_t *checksum)
1302 {
1303 struct working_area *crc_algorithm;
1304 struct arm_algorithm armv4_5_info;
1305 struct reg_param reg_params[2];
1306 int retval;
1307 uint32_t i;
1308
1309 static const uint32_t arm_crc_code[] = {
1310 0xE1A02000, /* mov r2, r0 */
1311 0xE3E00000, /* mov r0, #0xffffffff */
1312 0xE1A03001, /* mov r3, r1 */
1313 0xE3A04000, /* mov r4, #0 */
1314 0xEA00000B, /* b ncomp */
1315 /* nbyte: */
1316 0xE7D21004, /* ldrb r1, [r2, r4] */
1317 0xE59F7030, /* ldr r7, CRC32XOR */
1318 0xE0200C01, /* eor r0, r0, r1, asl 24 */
1319 0xE3A05000, /* mov r5, #0 */
1320 /* loop: */
1321 0xE3500000, /* cmp r0, #0 */
1322 0xE1A06080, /* mov r6, r0, asl #1 */
1323 0xE2855001, /* add r5, r5, #1 */
1324 0xE1A00006, /* mov r0, r6 */
1325 0xB0260007, /* eorlt r0, r6, r7 */
1326 0xE3550008, /* cmp r5, #8 */
1327 0x1AFFFFF8, /* bne loop */
1328 0xE2844001, /* add r4, r4, #1 */
1329 /* ncomp: */
1330 0xE1540003, /* cmp r4, r3 */
1331 0x1AFFFFF1, /* bne nbyte */
1332 /* end: */
1333 0xEAFFFFFE, /* b end */
1334 /* CRC32XOR: */
1335 0x04C11DB7 /* .word 0x04C11DB7 */
1336 };
1337
1338 retval = target_alloc_working_area(target,
1339 sizeof(arm_crc_code), &crc_algorithm);
1340 if (retval != ERROR_OK)
1341 return retval;
1342
1343 /* convert code into a buffer in target endianness */
1344 for (i = 0; i < ARRAY_SIZE(arm_crc_code); i++) {
1345 retval = target_write_u32(target,
1346 crc_algorithm->address + i * sizeof(uint32_t),
1347 arm_crc_code[i]);
1348 if (retval != ERROR_OK)
1349 return retval;
1350 }
1351
1352 armv4_5_info.common_magic = ARM_COMMON_MAGIC;
1353 armv4_5_info.core_mode = ARM_MODE_SVC;
1354 armv4_5_info.core_state = ARM_STATE_ARM;
1355
1356 init_reg_param(&reg_params[0], "r0", 32, PARAM_IN_OUT);
1357 init_reg_param(&reg_params[1], "r1", 32, PARAM_OUT);
1358
1359 buf_set_u32(reg_params[0].value, 0, 32, address);
1360 buf_set_u32(reg_params[1].value, 0, 32, count);
1361
1362 /* 20 second timeout/megabyte */
1363 int timeout = 20000 * (1 + (count / (1024 * 1024)));
1364
1365 retval = target_run_algorithm(target, 0, NULL, 2, reg_params,
1366 crc_algorithm->address,
1367 crc_algorithm->address + sizeof(arm_crc_code) - 8,
1368 timeout, &armv4_5_info);
1369 if (retval != ERROR_OK) {
1370 LOG_ERROR("error executing ARM crc algorithm");
1371 destroy_reg_param(&reg_params[0]);
1372 destroy_reg_param(&reg_params[1]);
1373 target_free_working_area(target, crc_algorithm);
1374 return retval;
1375 }
1376
1377 *checksum = buf_get_u32(reg_params[0].value, 0, 32);
1378
1379 destroy_reg_param(&reg_params[0]);
1380 destroy_reg_param(&reg_params[1]);
1381
1382 target_free_working_area(target, crc_algorithm);
1383
1384 return ERROR_OK;
1385 }
1386
1387 /**
1388 * Runs ARM code in the target to check whether a memory block holds
1389 * all ones. NOR flash which has been erased, and thus may be written,
1390 * holds all ones.
1391 *
1392 * \todo On ARMv5+, rely on BKPT termination for reduced overhead.
1393 */
1394 int arm_blank_check_memory(struct target *target,
1395 uint32_t address, uint32_t count, uint32_t *blank)
1396 {
1397 struct working_area *check_algorithm;
1398 struct reg_param reg_params[3];
1399 struct arm_algorithm armv4_5_info;
1400 int retval;
1401 uint32_t i;
1402
1403 static const uint32_t check_code[] = {
1404 /* loop: */
1405 0xe4d03001, /* ldrb r3, [r0], #1 */
1406 0xe0022003, /* and r2, r2, r3 */
1407 0xe2511001, /* subs r1, r1, #1 */
1408 0x1afffffb, /* bne loop */
1409 /* end: */
1410 0xeafffffe /* b end */
1411 };
1412
1413 /* make sure we have a working area */
1414 retval = target_alloc_working_area(target,
1415 sizeof(check_code), &check_algorithm);
1416 if (retval != ERROR_OK)
1417 return retval;
1418
1419 /* convert code into a buffer in target endianness */
1420 for (i = 0; i < ARRAY_SIZE(check_code); i++) {
1421 retval = target_write_u32(target,
1422 check_algorithm->address
1423 + i * sizeof(uint32_t),
1424 check_code[i]);
1425 if (retval != ERROR_OK)
1426 return retval;
1427 }
1428
1429 armv4_5_info.common_magic = ARM_COMMON_MAGIC;
1430 armv4_5_info.core_mode = ARM_MODE_SVC;
1431 armv4_5_info.core_state = ARM_STATE_ARM;
1432
1433 init_reg_param(&reg_params[0], "r0", 32, PARAM_OUT);
1434 buf_set_u32(reg_params[0].value, 0, 32, address);
1435
1436 init_reg_param(&reg_params[1], "r1", 32, PARAM_OUT);
1437 buf_set_u32(reg_params[1].value, 0, 32, count);
1438
1439 init_reg_param(&reg_params[2], "r2", 32, PARAM_IN_OUT);
1440 buf_set_u32(reg_params[2].value, 0, 32, 0xff);
1441
1442 retval = target_run_algorithm(target, 0, NULL, 3, reg_params,
1443 check_algorithm->address,
1444 check_algorithm->address + sizeof(check_code) - 4,
1445 10000, &armv4_5_info);
1446 if (retval != ERROR_OK) {
1447 destroy_reg_param(&reg_params[0]);
1448 destroy_reg_param(&reg_params[1]);
1449 destroy_reg_param(&reg_params[2]);
1450 target_free_working_area(target, check_algorithm);
1451 return retval;
1452 }
1453
1454 *blank = buf_get_u32(reg_params[2].value, 0, 32);
1455
1456 destroy_reg_param(&reg_params[0]);
1457 destroy_reg_param(&reg_params[1]);
1458 destroy_reg_param(&reg_params[2]);
1459
1460 target_free_working_area(target, check_algorithm);
1461
1462 return ERROR_OK;
1463 }
1464
1465 static int arm_full_context(struct target *target)
1466 {
1467 struct arm *armv4_5 = target_to_arm(target);
1468 unsigned num_regs = armv4_5->core_cache->num_regs;
1469 struct reg *reg = armv4_5->core_cache->reg_list;
1470 int retval = ERROR_OK;
1471
1472 for (; num_regs && retval == ERROR_OK; num_regs--, reg++) {
1473 if (reg->valid)
1474 continue;
1475 retval = armv4_5_get_core_reg(reg);
1476 }
1477 return retval;
1478 }
1479
1480 static int arm_default_mrc(struct target *target, int cpnum,
1481 uint32_t op1, uint32_t op2,
1482 uint32_t CRn, uint32_t CRm,
1483 uint32_t *value)
1484 {
1485 LOG_ERROR("%s doesn't implement MRC", target_type_name(target));
1486 return ERROR_FAIL;
1487 }
1488
1489 static int arm_default_mcr(struct target *target, int cpnum,
1490 uint32_t op1, uint32_t op2,
1491 uint32_t CRn, uint32_t CRm,
1492 uint32_t value)
1493 {
1494 LOG_ERROR("%s doesn't implement MCR", target_type_name(target));
1495 return ERROR_FAIL;
1496 }
1497
1498 int arm_init_arch_info(struct target *target, struct arm *armv4_5)
1499 {
1500 target->arch_info = armv4_5;
1501 armv4_5->target = target;
1502
1503 armv4_5->common_magic = ARM_COMMON_MAGIC;
1504
1505 /* core_type may be overridden by subtype logic */
1506 if (armv4_5->core_type != ARM_MODE_THREAD) {
1507 armv4_5->core_type = ARM_MODE_ANY;
1508 arm_set_cpsr(armv4_5, ARM_MODE_USR);
1509 }
1510
1511 /* default full_context() has no core-specific optimizations */
1512 if (!armv4_5->full_context && armv4_5->read_core_reg)
1513 armv4_5->full_context = arm_full_context;
1514
1515 if (!armv4_5->mrc)
1516 armv4_5->mrc = arm_default_mrc;
1517 if (!armv4_5->mcr)
1518 armv4_5->mcr = arm_default_mcr;
1519
1520 return ERROR_OK;
1521 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)